diff options
Diffstat (limited to 'drivers')
220 files changed, 22418 insertions, 6671 deletions
diff --git a/drivers/Kconfig b/drivers/Kconfig index 80f0ec91e2cf..59f33fa6af3e 100644 --- a/drivers/Kconfig +++ b/drivers/Kconfig | |||
| @@ -84,6 +84,8 @@ source "drivers/memstick/Kconfig" | |||
| 84 | 84 | ||
| 85 | source "drivers/leds/Kconfig" | 85 | source "drivers/leds/Kconfig" |
| 86 | 86 | ||
| 87 | source "drivers/accessibility/Kconfig" | ||
| 88 | |||
| 87 | source "drivers/infiniband/Kconfig" | 89 | source "drivers/infiniband/Kconfig" |
| 88 | 90 | ||
| 89 | source "drivers/edac/Kconfig" | 91 | source "drivers/edac/Kconfig" |
diff --git a/drivers/Makefile b/drivers/Makefile index e5e394a7e6c0..f65deda72d61 100644 --- a/drivers/Makefile +++ b/drivers/Makefile | |||
| @@ -70,6 +70,7 @@ obj-$(CONFIG_WATCHDOG) += watchdog/ | |||
| 70 | obj-$(CONFIG_PHONE) += telephony/ | 70 | obj-$(CONFIG_PHONE) += telephony/ |
| 71 | obj-$(CONFIG_MD) += md/ | 71 | obj-$(CONFIG_MD) += md/ |
| 72 | obj-$(CONFIG_BT) += bluetooth/ | 72 | obj-$(CONFIG_BT) += bluetooth/ |
| 73 | obj-$(CONFIG_ACCESSIBILITY) += accessibility/ | ||
| 73 | obj-$(CONFIG_ISDN) += isdn/ | 74 | obj-$(CONFIG_ISDN) += isdn/ |
| 74 | obj-$(CONFIG_EDAC) += edac/ | 75 | obj-$(CONFIG_EDAC) += edac/ |
| 75 | obj-$(CONFIG_MCA) += mca/ | 76 | obj-$(CONFIG_MCA) += mca/ |
diff --git a/drivers/accessibility/Kconfig b/drivers/accessibility/Kconfig new file mode 100644 index 000000000000..1264c4b98094 --- /dev/null +++ b/drivers/accessibility/Kconfig | |||
| @@ -0,0 +1,23 @@ | |||
| 1 | menuconfig ACCESSIBILITY | ||
| 2 | bool "Accessibility support" | ||
| 3 | ---help--- | ||
| 4 | Enable a submenu where accessibility items may be enabled. | ||
| 5 | |||
| 6 | If unsure, say N. | ||
| 7 | |||
| 8 | if ACCESSIBILITY | ||
| 9 | config A11Y_BRAILLE_CONSOLE | ||
| 10 | bool "Console on braille device" | ||
| 11 | depends on VT | ||
| 12 | depends on SERIAL_CORE_CONSOLE | ||
| 13 | ---help--- | ||
| 14 | Enables console output on a braille device connected to a 8250 | ||
| 15 | serial port. For now only the VisioBraille device is supported. | ||
| 16 | |||
| 17 | To actually enable it, you need to pass option | ||
| 18 | console=brl,ttyS0 | ||
| 19 | to the kernel. Options are the same as for serial console. | ||
| 20 | |||
| 21 | If unsure, say N. | ||
| 22 | |||
| 23 | endif # ACCESSIBILITY | ||
diff --git a/drivers/accessibility/Makefile b/drivers/accessibility/Makefile new file mode 100644 index 000000000000..72b01a46546f --- /dev/null +++ b/drivers/accessibility/Makefile | |||
| @@ -0,0 +1 @@ | |||
| obj-y += braille/ | |||
diff --git a/drivers/accessibility/braille/Makefile b/drivers/accessibility/braille/Makefile new file mode 100644 index 000000000000..2e9f16c91347 --- /dev/null +++ b/drivers/accessibility/braille/Makefile | |||
| @@ -0,0 +1 @@ | |||
| obj-$(CONFIG_A11Y_BRAILLE_CONSOLE) += braille_console.o | |||
diff --git a/drivers/accessibility/braille/braille_console.c b/drivers/accessibility/braille/braille_console.c new file mode 100644 index 000000000000..0a5f6b2114c5 --- /dev/null +++ b/drivers/accessibility/braille/braille_console.c | |||
| @@ -0,0 +1,397 @@ | |||
| 1 | /* | ||
| 2 | * Minimalistic braille device kernel support. | ||
| 3 | * | ||
| 4 | * By default, shows console messages on the braille device. | ||
| 5 | * Pressing Insert switches to VC browsing. | ||
| 6 | * | ||
| 7 | * Copyright (C) Samuel Thibault <samuel.thibault@ens-lyon.org> | ||
| 8 | * | ||
| 9 | * This program is free software ; you can redistribute it and/or modify | ||
| 10 | * it under the terms of the GNU General Public License as published by | ||
| 11 | * the Free Software Foundation ; either version 2 of the License, or | ||
| 12 | * (at your option) any later version. | ||
| 13 | * | ||
| 14 | * This program is distributed in the hope that it will be useful, | ||
| 15 | * but WITHOUT ANY WARRANTY ; without even the implied warranty of | ||
| 16 | * MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE. See the | ||
| 17 | * GNU General Public License for more details. | ||
| 18 | * | ||
| 19 | * You should have received a copy of the GNU General Public License | ||
| 20 | * along with the program ; if not, write to the Free Software | ||
| 21 | * Foundation, Inc., 675 Mass Ave, Cambridge, MA 02139, USA. | ||
| 22 | */ | ||
| 23 | |||
| 24 | #include <linux/autoconf.h> | ||
| 25 | #include <linux/kernel.h> | ||
| 26 | #include <linux/module.h> | ||
| 27 | #include <linux/moduleparam.h> | ||
| 28 | #include <linux/console.h> | ||
| 29 | #include <linux/notifier.h> | ||
| 30 | |||
| 31 | #include <linux/selection.h> | ||
| 32 | #include <linux/vt_kern.h> | ||
| 33 | #include <linux/consolemap.h> | ||
| 34 | |||
| 35 | #include <linux/keyboard.h> | ||
| 36 | #include <linux/kbd_kern.h> | ||
| 37 | #include <linux/input.h> | ||
| 38 | |||
| 39 | MODULE_AUTHOR("samuel.thibault@ens-lyon.org"); | ||
| 40 | MODULE_DESCRIPTION("braille device"); | ||
| 41 | MODULE_LICENSE("GPL"); | ||
| 42 | |||
| 43 | /* | ||
| 44 | * Braille device support part. | ||
| 45 | */ | ||
| 46 | |||
| 47 | /* Emit various sounds */ | ||
| 48 | static int sound; | ||
| 49 | module_param(sound, bool, 0); | ||
| 50 | MODULE_PARM_DESC(sound, "emit sounds"); | ||
| 51 | |||
| 52 | static void beep(unsigned int freq) | ||
| 53 | { | ||
| 54 | if (sound) | ||
| 55 | kd_mksound(freq, HZ/10); | ||
| 56 | } | ||
| 57 | |||
| 58 | /* mini console */ | ||
| 59 | #define WIDTH 40 | ||
| 60 | #define BRAILLE_KEY KEY_INSERT | ||
| 61 | static u16 console_buf[WIDTH]; | ||
| 62 | static int console_cursor; | ||
| 63 | |||
| 64 | /* mini view of VC */ | ||
| 65 | static int vc_x, vc_y, lastvc_x, lastvc_y; | ||
| 66 | |||
| 67 | /* show console ? (or show VC) */ | ||
| 68 | static int console_show = 1; | ||
| 69 | /* pending newline ? */ | ||
| 70 | static int console_newline = 1; | ||
| 71 | static int lastVC = -1; | ||
| 72 | |||
| 73 | static struct console *braille_co; | ||
| 74 | |||
| 75 | /* Very VisioBraille-specific */ | ||
| 76 | static void braille_write(u16 *buf) | ||
| 77 | { | ||
| 78 | static u16 lastwrite[WIDTH]; | ||
| 79 | unsigned char data[1 + 1 + 2*WIDTH + 2 + 1], csum = 0, *c; | ||
| 80 | u16 out; | ||
| 81 | int i; | ||
| 82 | |||
| 83 | if (!braille_co) | ||
| 84 | return; | ||
| 85 | |||
| 86 | if (!memcmp(lastwrite, buf, WIDTH * sizeof(*buf))) | ||
| 87 | return; | ||
| 88 | memcpy(lastwrite, buf, WIDTH * sizeof(*buf)); | ||
| 89 | |||
| 90 | #define SOH 1 | ||
| 91 | #define STX 2 | ||
| 92 | #define ETX 2 | ||
| 93 | #define EOT 4 | ||
| 94 | #define ENQ 5 | ||
| 95 | data[0] = STX; | ||
| 96 | data[1] = '>'; | ||
| 97 | csum ^= '>'; | ||
| 98 | c = &data[2]; | ||
| 99 | for (i = 0; i < WIDTH; i++) { | ||
| 100 | out = buf[i]; | ||
| 101 | if (out >= 0x100) | ||
| 102 | out = '?'; | ||
| 103 | else if (out == 0x00) | ||
| 104 | out = ' '; | ||
| 105 | csum ^= out; | ||
| 106 | if (out <= 0x05) { | ||
| 107 | *c++ = SOH; | ||
| 108 | out |= 0x40; | ||
| 109 | } | ||
| 110 | *c++ = out; | ||
| 111 | } | ||
| 112 | |||
| 113 | if (csum <= 0x05) { | ||
| 114 | *c++ = SOH; | ||
| 115 | csum |= 0x40; | ||
| 116 | } | ||
| 117 | *c++ = csum; | ||
| 118 | *c++ = ETX; | ||
| 119 | |||
| 120 | braille_co->write(braille_co, data, c - data); | ||
| 121 | } | ||
| 122 | |||
| 123 | /* Follow the VC cursor*/ | ||
| 124 | static void vc_follow_cursor(struct vc_data *vc) | ||
| 125 | { | ||
| 126 | vc_x = vc->vc_x - (vc->vc_x % WIDTH); | ||
| 127 | vc_y = vc->vc_y; | ||
| 128 | lastvc_x = vc->vc_x; | ||
| 129 | lastvc_y = vc->vc_y; | ||
| 130 | } | ||
| 131 | |||
| 132 | /* Maybe the VC cursor moved, if so follow it */ | ||
| 133 | static void vc_maybe_cursor_moved(struct vc_data *vc) | ||
| 134 | { | ||
| 135 | if (vc->vc_x != lastvc_x || vc->vc_y != lastvc_y) | ||
| 136 | vc_follow_cursor(vc); | ||
| 137 | } | ||
| 138 | |||
| 139 | /* Show portion of VC at vc_x, vc_y */ | ||
| 140 | static void vc_refresh(struct vc_data *vc) | ||
| 141 | { | ||
| 142 | u16 buf[WIDTH]; | ||
| 143 | int i; | ||
| 144 | |||
| 145 | for (i = 0; i < WIDTH; i++) { | ||
| 146 | u16 glyph = screen_glyph(vc, | ||
| 147 | 2 * (vc_x + i) + vc_y * vc->vc_size_row); | ||
| 148 | buf[i] = inverse_translate(vc, glyph, 1); | ||
| 149 | } | ||
| 150 | braille_write(buf); | ||
| 151 | } | ||
| 152 | |||
| 153 | /* | ||
| 154 | * Link to keyboard | ||
| 155 | */ | ||
| 156 | |||
| 157 | static int keyboard_notifier_call(struct notifier_block *blk, | ||
| 158 | unsigned long code, void *_param) | ||
| 159 | { | ||
| 160 | struct keyboard_notifier_param *param = _param; | ||
| 161 | struct vc_data *vc = param->vc; | ||
| 162 | int ret = NOTIFY_OK; | ||
| 163 | |||
| 164 | if (!param->down) | ||
| 165 | return ret; | ||
| 166 | |||
| 167 | switch (code) { | ||
| 168 | case KBD_KEYCODE: | ||
| 169 | if (console_show) { | ||
| 170 | if (param->value == BRAILLE_KEY) { | ||
| 171 | console_show = 0; | ||
| 172 | beep(880); | ||
| 173 | vc_maybe_cursor_moved(vc); | ||
| 174 | vc_refresh(vc); | ||
| 175 | ret = NOTIFY_STOP; | ||
| 176 | } | ||
| 177 | } else { | ||
| 178 | ret = NOTIFY_STOP; | ||
| 179 | switch (param->value) { | ||
| 180 | case KEY_INSERT: | ||
| 181 | beep(440); | ||
| 182 | console_show = 1; | ||
| 183 | lastVC = -1; | ||
| 184 | braille_write(console_buf); | ||
| 185 | break; | ||
| 186 | case KEY_LEFT: | ||
| 187 | if (vc_x > 0) { | ||
| 188 | vc_x -= WIDTH; | ||
| 189 | if (vc_x < 0) | ||
| 190 | vc_x = 0; | ||
| 191 | } else if (vc_y >= 1) { | ||
| 192 | beep(880); | ||
| 193 | vc_y--; | ||
| 194 | vc_x = vc->vc_cols-WIDTH; | ||
| 195 | } else | ||
| 196 | beep(220); | ||
| 197 | break; | ||
| 198 | case KEY_RIGHT: | ||
| 199 | if (vc_x + WIDTH < vc->vc_cols) { | ||
| 200 | vc_x += WIDTH; | ||
| 201 | } else if (vc_y + 1 < vc->vc_rows) { | ||
| 202 | beep(880); | ||
| 203 | vc_y++; | ||
| 204 | vc_x = 0; | ||
| 205 | } else | ||
| 206 | beep(220); | ||
| 207 | break; | ||
| 208 | case KEY_DOWN: | ||
| 209 | if (vc_y + 1 < vc->vc_rows) | ||
| 210 | vc_y++; | ||
| 211 | else | ||
| 212 | beep(220); | ||
| 213 | break; | ||
| 214 | case KEY_UP: | ||
| 215 | if (vc_y >= 1) | ||
| 216 | vc_y--; | ||
| 217 | else | ||
| 218 | beep(220); | ||
| 219 | break; | ||
| 220 | case KEY_HOME: | ||
| 221 | vc_follow_cursor(vc); | ||
| 222 | break; | ||
| 223 | case KEY_PAGEUP: | ||
| 224 | vc_x = 0; | ||
| 225 | vc_y = 0; | ||
| 226 | break; | ||
| 227 | case KEY_PAGEDOWN: | ||
| 228 | vc_x = 0; | ||
| 229 | vc_y = vc->vc_rows-1; | ||
| 230 | break; | ||
| 231 | default: | ||
| 232 | ret = NOTIFY_OK; | ||
| 233 | break; | ||
| 234 | } | ||
| 235 | if (ret == NOTIFY_STOP) | ||
| 236 | vc_refresh(vc); | ||
| 237 | } | ||
| 238 | break; | ||
| 239 | case KBD_POST_KEYSYM: | ||
| 240 | { | ||
| 241 | unsigned char type = KTYP(param->value) - 0xf0; | ||
| 242 | if (type == KT_SPEC) { | ||
| 243 | unsigned char val = KVAL(param->value); | ||
| 244 | int on_off = -1; | ||
| 245 | |||
| 246 | switch (val) { | ||
| 247 | case KVAL(K_CAPS): | ||
| 248 | on_off = vc_kbd_led(kbd_table + fg_console, | ||
| 249 | VC_CAPSLOCK); | ||
| 250 | break; | ||
| 251 | case KVAL(K_NUM): | ||
| 252 | on_off = vc_kbd_led(kbd_table + fg_console, | ||
| 253 | VC_NUMLOCK); | ||
| 254 | break; | ||
| 255 | case KVAL(K_HOLD): | ||
| 256 | on_off = vc_kbd_led(kbd_table + fg_console, | ||
| 257 | VC_SCROLLOCK); | ||
| 258 | break; | ||
| 259 | } | ||
| 260 | if (on_off == 1) | ||
| 261 | beep(880); | ||
| 262 | else if (on_off == 0) | ||
| 263 | beep(440); | ||
| 264 | } | ||
| 265 | } | ||
| 266 | case KBD_UNBOUND_KEYCODE: | ||
| 267 | case KBD_UNICODE: | ||
| 268 | case KBD_KEYSYM: | ||
| 269 | /* Unused */ | ||
| 270 | break; | ||
| 271 | } | ||
| 272 | return ret; | ||
| 273 | } | ||
| 274 | |||
| 275 | static struct notifier_block keyboard_notifier_block = { | ||
| 276 | .notifier_call = keyboard_notifier_call, | ||
| 277 | }; | ||
| 278 | |||
| 279 | static int vt_notifier_call(struct notifier_block *blk, | ||
| 280 | unsigned long code, void *_param) | ||
| 281 | { | ||
| 282 | struct vt_notifier_param *param = _param; | ||
| 283 | struct vc_data *vc = param->vc; | ||
| 284 | switch (code) { | ||
| 285 | case VT_ALLOCATE: | ||
| 286 | break; | ||
| 287 | case VT_DEALLOCATE: | ||
| 288 | break; | ||
| 289 | case VT_WRITE: | ||
| 290 | { | ||
| 291 | unsigned char c = param->c; | ||
| 292 | if (vc->vc_num != fg_console) | ||
| 293 | break; | ||
| 294 | switch (c) { | ||
| 295 | case '\b': | ||
| 296 | case 127: | ||
| 297 | if (console_cursor > 0) { | ||
| 298 | console_cursor--; | ||
| 299 | console_buf[console_cursor] = ' '; | ||
| 300 | } | ||
| 301 | break; | ||
| 302 | case '\n': | ||
| 303 | case '\v': | ||
| 304 | case '\f': | ||
| 305 | case '\r': | ||
| 306 | console_newline = 1; | ||
| 307 | break; | ||
| 308 | case '\t': | ||
| 309 | c = ' '; | ||
| 310 | /* Fallthrough */ | ||
| 311 | default: | ||
| 312 | if (c < 32) | ||
| 313 | /* Ignore other control sequences */ | ||
| 314 | break; | ||
| 315 | if (console_newline) { | ||
| 316 | memset(console_buf, 0, sizeof(console_buf)); | ||
| 317 | console_cursor = 0; | ||
| 318 | console_newline = 0; | ||
| 319 | } | ||
| 320 | if (console_cursor == WIDTH) | ||
| 321 | memmove(console_buf, &console_buf[1], | ||
| 322 | (WIDTH-1) * sizeof(*console_buf)); | ||
| 323 | else | ||
| 324 | console_cursor++; | ||
| 325 | console_buf[console_cursor-1] = c; | ||
| 326 | break; | ||
| 327 | } | ||
| 328 | if (console_show) | ||
| 329 | braille_write(console_buf); | ||
| 330 | else { | ||
| 331 | vc_maybe_cursor_moved(vc); | ||
| 332 | vc_refresh(vc); | ||
| 333 | } | ||
| 334 | break; | ||
| 335 | } | ||
| 336 | case VT_UPDATE: | ||
| 337 | /* Maybe a VT switch, flush */ | ||
| 338 | if (console_show) { | ||
| 339 | if (vc->vc_num != lastVC) { | ||
| 340 | lastVC = vc->vc_num; | ||
| 341 | memset(console_buf, 0, sizeof(console_buf)); | ||
| 342 | console_cursor = 0; | ||
| 343 | braille_write(console_buf); | ||
| 344 | } | ||
| 345 | } else { | ||
| 346 | vc_maybe_cursor_moved(vc); | ||
| 347 | vc_refresh(vc); | ||
| 348 | } | ||
| 349 | break; | ||
| 350 | } | ||
| 351 | return NOTIFY_OK; | ||
| 352 | } | ||
| 353 | |||
| 354 | static struct notifier_block vt_notifier_block = { | ||
| 355 | .notifier_call = vt_notifier_call, | ||
| 356 | }; | ||
| 357 | |||
| 358 | /* | ||
| 359 | * Called from printk.c when console=brl is given | ||
| 360 | */ | ||
| 361 | |||
| 362 | int braille_register_console(struct console *console, int index, | ||
| 363 | char *console_options, char *braille_options) | ||
| 364 | { | ||
| 365 | int ret; | ||
| 366 | if (!console_options) | ||
| 367 | /* Only support VisioBraille for now */ | ||
| 368 | console_options = "57600o8"; | ||
| 369 | if (braille_co) | ||
| 370 | return -ENODEV; | ||
| 371 | if (console->setup) { | ||
| 372 | ret = console->setup(console, console_options); | ||
| 373 | if (ret != 0) | ||
| 374 | return ret; | ||
| 375 | } | ||
| 376 | console->flags |= CON_ENABLED; | ||
| 377 | console->index = index; | ||
| 378 | braille_co = console; | ||
| 379 | return 0; | ||
| 380 | } | ||
| 381 | |||
| 382 | int braille_unregister_console(struct console *console) | ||
| 383 | { | ||
| 384 | if (braille_co != console) | ||
| 385 | return -EINVAL; | ||
| 386 | braille_co = NULL; | ||
| 387 | return 0; | ||
| 388 | } | ||
| 389 | |||
| 390 | static int __init braille_init(void) | ||
| 391 | { | ||
| 392 | register_keyboard_notifier(&keyboard_notifier_block); | ||
| 393 | register_vt_notifier(&vt_notifier_block); | ||
| 394 | return 0; | ||
| 395 | } | ||
| 396 | |||
| 397 | console_initcall(braille_init); | ||
diff --git a/drivers/atm/ambassador.c b/drivers/atm/ambassador.c index 5aa12b011a9a..6adb72a2f876 100644 --- a/drivers/atm/ambassador.c +++ b/drivers/atm/ambassador.c | |||
| @@ -33,6 +33,7 @@ | |||
| 33 | #include <linux/interrupt.h> | 33 | #include <linux/interrupt.h> |
| 34 | #include <linux/poison.h> | 34 | #include <linux/poison.h> |
| 35 | #include <linux/bitrev.h> | 35 | #include <linux/bitrev.h> |
| 36 | #include <linux/mutex.h> | ||
| 36 | 37 | ||
| 37 | #include <asm/atomic.h> | 38 | #include <asm/atomic.h> |
| 38 | #include <asm/io.h> | 39 | #include <asm/io.h> |
| @@ -1177,7 +1178,7 @@ static int amb_open (struct atm_vcc * atm_vcc) | |||
| 1177 | 1178 | ||
| 1178 | vcc->tx_frame_bits = tx_frame_bits; | 1179 | vcc->tx_frame_bits = tx_frame_bits; |
| 1179 | 1180 | ||
| 1180 | down (&dev->vcc_sf); | 1181 | mutex_lock(&dev->vcc_sf); |
| 1181 | if (dev->rxer[vci]) { | 1182 | if (dev->rxer[vci]) { |
| 1182 | // RXer on the channel already, just modify rate... | 1183 | // RXer on the channel already, just modify rate... |
| 1183 | cmd.request = cpu_to_be32 (SRB_MODIFY_VC_RATE); | 1184 | cmd.request = cpu_to_be32 (SRB_MODIFY_VC_RATE); |
| @@ -1203,7 +1204,7 @@ static int amb_open (struct atm_vcc * atm_vcc) | |||
| 1203 | schedule(); | 1204 | schedule(); |
| 1204 | } | 1205 | } |
| 1205 | dev->txer[vci].tx_present = 1; | 1206 | dev->txer[vci].tx_present = 1; |
| 1206 | up (&dev->vcc_sf); | 1207 | mutex_unlock(&dev->vcc_sf); |
| 1207 | } | 1208 | } |
| 1208 | 1209 | ||
| 1209 | if (rxtp->traffic_class != ATM_NONE) { | 1210 | if (rxtp->traffic_class != ATM_NONE) { |
| @@ -1211,7 +1212,7 @@ static int amb_open (struct atm_vcc * atm_vcc) | |||
| 1211 | 1212 | ||
| 1212 | vcc->rx_info.pool = pool; | 1213 | vcc->rx_info.pool = pool; |
| 1213 | 1214 | ||
| 1214 | down (&dev->vcc_sf); | 1215 | mutex_lock(&dev->vcc_sf); |
| 1215 | /* grow RX buffer pool */ | 1216 | /* grow RX buffer pool */ |
| 1216 | if (!dev->rxq[pool].buffers_wanted) | 1217 | if (!dev->rxq[pool].buffers_wanted) |
| 1217 | dev->rxq[pool].buffers_wanted = rx_lats; | 1218 | dev->rxq[pool].buffers_wanted = rx_lats; |
| @@ -1237,7 +1238,7 @@ static int amb_open (struct atm_vcc * atm_vcc) | |||
| 1237 | schedule(); | 1238 | schedule(); |
| 1238 | // this link allows RX frames through | 1239 | // this link allows RX frames through |
| 1239 | dev->rxer[vci] = atm_vcc; | 1240 | dev->rxer[vci] = atm_vcc; |
| 1240 | up (&dev->vcc_sf); | 1241 | mutex_unlock(&dev->vcc_sf); |
| 1241 | } | 1242 | } |
| 1242 | 1243 | ||
| 1243 | // indicate readiness | 1244 | // indicate readiness |
| @@ -1262,7 +1263,7 @@ static void amb_close (struct atm_vcc * atm_vcc) { | |||
| 1262 | if (atm_vcc->qos.txtp.traffic_class != ATM_NONE) { | 1263 | if (atm_vcc->qos.txtp.traffic_class != ATM_NONE) { |
| 1263 | command cmd; | 1264 | command cmd; |
| 1264 | 1265 | ||
| 1265 | down (&dev->vcc_sf); | 1266 | mutex_lock(&dev->vcc_sf); |
| 1266 | if (dev->rxer[vci]) { | 1267 | if (dev->rxer[vci]) { |
| 1267 | // RXer still on the channel, just modify rate... XXX not really needed | 1268 | // RXer still on the channel, just modify rate... XXX not really needed |
| 1268 | cmd.request = cpu_to_be32 (SRB_MODIFY_VC_RATE); | 1269 | cmd.request = cpu_to_be32 (SRB_MODIFY_VC_RATE); |
| @@ -1277,7 +1278,7 @@ static void amb_close (struct atm_vcc * atm_vcc) { | |||
| 1277 | dev->txer[vci].tx_present = 0; | 1278 | dev->txer[vci].tx_present = 0; |
| 1278 | while (command_do (dev, &cmd)) | 1279 | while (command_do (dev, &cmd)) |
| 1279 | schedule(); | 1280 | schedule(); |
| 1280 | up (&dev->vcc_sf); | 1281 | mutex_unlock(&dev->vcc_sf); |
| 1281 | } | 1282 | } |
| 1282 | 1283 | ||
| 1283 | // disable RXing | 1284 | // disable RXing |
| @@ -1287,7 +1288,7 @@ static void amb_close (struct atm_vcc * atm_vcc) { | |||
| 1287 | // this is (the?) one reason why we need the amb_vcc struct | 1288 | // this is (the?) one reason why we need the amb_vcc struct |
| 1288 | unsigned char pool = vcc->rx_info.pool; | 1289 | unsigned char pool = vcc->rx_info.pool; |
| 1289 | 1290 | ||
| 1290 | down (&dev->vcc_sf); | 1291 | mutex_lock(&dev->vcc_sf); |
| 1291 | if (dev->txer[vci].tx_present) { | 1292 | if (dev->txer[vci].tx_present) { |
| 1292 | // TXer still on the channel, just go to pool zero XXX not really needed | 1293 | // TXer still on the channel, just go to pool zero XXX not really needed |
| 1293 | cmd.request = cpu_to_be32 (SRB_MODIFY_VC_FLAGS); | 1294 | cmd.request = cpu_to_be32 (SRB_MODIFY_VC_FLAGS); |
| @@ -1314,7 +1315,7 @@ static void amb_close (struct atm_vcc * atm_vcc) { | |||
| 1314 | dev->rxq[pool].buffers_wanted = 0; | 1315 | dev->rxq[pool].buffers_wanted = 0; |
| 1315 | drain_rx_pool (dev, pool); | 1316 | drain_rx_pool (dev, pool); |
| 1316 | } | 1317 | } |
| 1317 | up (&dev->vcc_sf); | 1318 | mutex_unlock(&dev->vcc_sf); |
| 1318 | } | 1319 | } |
| 1319 | 1320 | ||
| 1320 | // free our structure | 1321 | // free our structure |
| @@ -2188,7 +2189,7 @@ static void setup_dev(amb_dev *dev, struct pci_dev *pci_dev) | |||
| 2188 | 2189 | ||
| 2189 | // semaphore for txer/rxer modifications - we cannot use a | 2190 | // semaphore for txer/rxer modifications - we cannot use a |
| 2190 | // spinlock as the critical region needs to switch processes | 2191 | // spinlock as the critical region needs to switch processes |
| 2191 | init_MUTEX (&dev->vcc_sf); | 2192 | mutex_init(&dev->vcc_sf); |
| 2192 | // queue manipulation spinlocks; we want atomic reads and | 2193 | // queue manipulation spinlocks; we want atomic reads and |
| 2193 | // writes to the queue descriptors (handles IRQ and SMP) | 2194 | // writes to the queue descriptors (handles IRQ and SMP) |
| 2194 | // consider replacing "int pending" -> "atomic_t available" | 2195 | // consider replacing "int pending" -> "atomic_t available" |
diff --git a/drivers/atm/ambassador.h b/drivers/atm/ambassador.h index ff2a303cbe00..df55fa8387dc 100644 --- a/drivers/atm/ambassador.h +++ b/drivers/atm/ambassador.h | |||
| @@ -638,7 +638,7 @@ struct amb_dev { | |||
| 638 | amb_txq txq; | 638 | amb_txq txq; |
| 639 | amb_rxq rxq[NUM_RX_POOLS]; | 639 | amb_rxq rxq[NUM_RX_POOLS]; |
| 640 | 640 | ||
| 641 | struct semaphore vcc_sf; | 641 | struct mutex vcc_sf; |
| 642 | amb_tx_info txer[NUM_VCS]; | 642 | amb_tx_info txer[NUM_VCS]; |
| 643 | struct atm_vcc * rxer[NUM_VCS]; | 643 | struct atm_vcc * rxer[NUM_VCS]; |
| 644 | unsigned int tx_avail; | 644 | unsigned int tx_avail; |
diff --git a/drivers/base/node.c b/drivers/base/node.c index 12fde2d03d69..39f3d1b3a213 100644 --- a/drivers/base/node.c +++ b/drivers/base/node.c | |||
| @@ -77,6 +77,7 @@ static ssize_t node_read_meminfo(struct sys_device * dev, char * buf) | |||
| 77 | "Node %d PageTables: %8lu kB\n" | 77 | "Node %d PageTables: %8lu kB\n" |
| 78 | "Node %d NFS_Unstable: %8lu kB\n" | 78 | "Node %d NFS_Unstable: %8lu kB\n" |
| 79 | "Node %d Bounce: %8lu kB\n" | 79 | "Node %d Bounce: %8lu kB\n" |
| 80 | "Node %d WritebackTmp: %8lu kB\n" | ||
| 80 | "Node %d Slab: %8lu kB\n" | 81 | "Node %d Slab: %8lu kB\n" |
| 81 | "Node %d SReclaimable: %8lu kB\n" | 82 | "Node %d SReclaimable: %8lu kB\n" |
| 82 | "Node %d SUnreclaim: %8lu kB\n", | 83 | "Node %d SUnreclaim: %8lu kB\n", |
| @@ -99,6 +100,7 @@ static ssize_t node_read_meminfo(struct sys_device * dev, char * buf) | |||
| 99 | nid, K(node_page_state(nid, NR_PAGETABLE)), | 100 | nid, K(node_page_state(nid, NR_PAGETABLE)), |
| 100 | nid, K(node_page_state(nid, NR_UNSTABLE_NFS)), | 101 | nid, K(node_page_state(nid, NR_UNSTABLE_NFS)), |
| 101 | nid, K(node_page_state(nid, NR_BOUNCE)), | 102 | nid, K(node_page_state(nid, NR_BOUNCE)), |
| 103 | nid, K(node_page_state(nid, NR_WRITEBACK_TEMP)), | ||
| 102 | nid, K(node_page_state(nid, NR_SLAB_RECLAIMABLE) + | 104 | nid, K(node_page_state(nid, NR_SLAB_RECLAIMABLE) + |
| 103 | node_page_state(nid, NR_SLAB_UNRECLAIMABLE)), | 105 | node_page_state(nid, NR_SLAB_UNRECLAIMABLE)), |
| 104 | nid, K(node_page_state(nid, NR_SLAB_RECLAIMABLE)), | 106 | nid, K(node_page_state(nid, NR_SLAB_RECLAIMABLE)), |
diff --git a/drivers/block/brd.c b/drivers/block/brd.c index e8e38faeafd8..a196ef7f147f 100644 --- a/drivers/block/brd.c +++ b/drivers/block/brd.c | |||
| @@ -387,10 +387,14 @@ static struct block_device_operations brd_fops = { | |||
| 387 | */ | 387 | */ |
| 388 | static int rd_nr; | 388 | static int rd_nr; |
| 389 | int rd_size = CONFIG_BLK_DEV_RAM_SIZE; | 389 | int rd_size = CONFIG_BLK_DEV_RAM_SIZE; |
| 390 | static int max_part; | ||
| 391 | static int part_shift; | ||
| 390 | module_param(rd_nr, int, 0); | 392 | module_param(rd_nr, int, 0); |
| 391 | MODULE_PARM_DESC(rd_nr, "Maximum number of brd devices"); | 393 | MODULE_PARM_DESC(rd_nr, "Maximum number of brd devices"); |
| 392 | module_param(rd_size, int, 0); | 394 | module_param(rd_size, int, 0); |
| 393 | MODULE_PARM_DESC(rd_size, "Size of each RAM disk in kbytes."); | 395 | MODULE_PARM_DESC(rd_size, "Size of each RAM disk in kbytes."); |
| 396 | module_param(max_part, int, 0); | ||
| 397 | MODULE_PARM_DESC(max_part, "Maximum number of partitions per RAM disk"); | ||
| 394 | MODULE_LICENSE("GPL"); | 398 | MODULE_LICENSE("GPL"); |
| 395 | MODULE_ALIAS_BLOCKDEV_MAJOR(RAMDISK_MAJOR); | 399 | MODULE_ALIAS_BLOCKDEV_MAJOR(RAMDISK_MAJOR); |
| 396 | 400 | ||
| @@ -435,11 +439,11 @@ static struct brd_device *brd_alloc(int i) | |||
| 435 | blk_queue_max_sectors(brd->brd_queue, 1024); | 439 | blk_queue_max_sectors(brd->brd_queue, 1024); |
| 436 | blk_queue_bounce_limit(brd->brd_queue, BLK_BOUNCE_ANY); | 440 | blk_queue_bounce_limit(brd->brd_queue, BLK_BOUNCE_ANY); |
| 437 | 441 | ||
| 438 | disk = brd->brd_disk = alloc_disk(1); | 442 | disk = brd->brd_disk = alloc_disk(1 << part_shift); |
| 439 | if (!disk) | 443 | if (!disk) |
| 440 | goto out_free_queue; | 444 | goto out_free_queue; |
| 441 | disk->major = RAMDISK_MAJOR; | 445 | disk->major = RAMDISK_MAJOR; |
| 442 | disk->first_minor = i; | 446 | disk->first_minor = i << part_shift; |
| 443 | disk->fops = &brd_fops; | 447 | disk->fops = &brd_fops; |
| 444 | disk->private_data = brd; | 448 | disk->private_data = brd; |
| 445 | disk->queue = brd->brd_queue; | 449 | disk->queue = brd->brd_queue; |
| @@ -523,7 +527,12 @@ static int __init brd_init(void) | |||
| 523 | * themselves and have kernel automatically instantiate actual | 527 | * themselves and have kernel automatically instantiate actual |
| 524 | * device on-demand. | 528 | * device on-demand. |
| 525 | */ | 529 | */ |
| 526 | if (rd_nr > 1UL << MINORBITS) | 530 | |
| 531 | part_shift = 0; | ||
| 532 | if (max_part > 0) | ||
| 533 | part_shift = fls(max_part); | ||
| 534 | |||
| 535 | if (rd_nr > 1UL << (MINORBITS - part_shift)) | ||
| 527 | return -EINVAL; | 536 | return -EINVAL; |
| 528 | 537 | ||
| 529 | if (rd_nr) { | 538 | if (rd_nr) { |
| @@ -531,7 +540,7 @@ static int __init brd_init(void) | |||
| 531 | range = rd_nr; | 540 | range = rd_nr; |
| 532 | } else { | 541 | } else { |
| 533 | nr = CONFIG_BLK_DEV_RAM_COUNT; | 542 | nr = CONFIG_BLK_DEV_RAM_COUNT; |
| 534 | range = 1UL << MINORBITS; | 543 | range = 1UL << (MINORBITS - part_shift); |
| 535 | } | 544 | } |
| 536 | 545 | ||
| 537 | if (register_blkdev(RAMDISK_MAJOR, "ramdisk")) | 546 | if (register_blkdev(RAMDISK_MAJOR, "ramdisk")) |
| @@ -570,7 +579,7 @@ static void __exit brd_exit(void) | |||
| 570 | unsigned long range; | 579 | unsigned long range; |
| 571 | struct brd_device *brd, *next; | 580 | struct brd_device *brd, *next; |
| 572 | 581 | ||
| 573 | range = rd_nr ? rd_nr : 1UL << MINORBITS; | 582 | range = rd_nr ? rd_nr : 1UL << (MINORBITS - part_shift); |
| 574 | 583 | ||
| 575 | list_for_each_entry_safe(brd, next, &brd_devices, brd_list) | 584 | list_for_each_entry_safe(brd, next, &brd_devices, brd_list) |
| 576 | brd_del_one(brd); | 585 | brd_del_one(brd); |
diff --git a/drivers/bluetooth/hci_ldisc.c b/drivers/bluetooth/hci_ldisc.c index 7e31d5f1bc8a..e5cd856a2fea 100644 --- a/drivers/bluetooth/hci_ldisc.c +++ b/drivers/bluetooth/hci_ldisc.c | |||
| @@ -143,7 +143,7 @@ restart: | |||
| 143 | int len; | 143 | int len; |
| 144 | 144 | ||
| 145 | set_bit(TTY_DO_WRITE_WAKEUP, &tty->flags); | 145 | set_bit(TTY_DO_WRITE_WAKEUP, &tty->flags); |
| 146 | len = tty->driver->write(tty, skb->data, skb->len); | 146 | len = tty->ops->write(tty, skb->data, skb->len); |
| 147 | hdev->stat.byte_tx += len; | 147 | hdev->stat.byte_tx += len; |
| 148 | 148 | ||
| 149 | skb_pull(skb, len); | 149 | skb_pull(skb, len); |
| @@ -190,8 +190,7 @@ static int hci_uart_flush(struct hci_dev *hdev) | |||
| 190 | 190 | ||
| 191 | /* Flush any pending characters in the driver and discipline. */ | 191 | /* Flush any pending characters in the driver and discipline. */ |
| 192 | tty_ldisc_flush(tty); | 192 | tty_ldisc_flush(tty); |
| 193 | if (tty->driver && tty->driver->flush_buffer) | 193 | tty_driver_flush_buffer(tty); |
| 194 | tty->driver->flush_buffer(tty); | ||
| 195 | 194 | ||
| 196 | if (test_bit(HCI_UART_PROTO_SET, &hu->flags)) | 195 | if (test_bit(HCI_UART_PROTO_SET, &hu->flags)) |
| 197 | hu->proto->flush(hu); | 196 | hu->proto->flush(hu); |
| @@ -285,9 +284,7 @@ static int hci_uart_tty_open(struct tty_struct *tty) | |||
| 285 | 284 | ||
| 286 | if (tty->ldisc.flush_buffer) | 285 | if (tty->ldisc.flush_buffer) |
| 287 | tty->ldisc.flush_buffer(tty); | 286 | tty->ldisc.flush_buffer(tty); |
| 288 | 287 | tty_driver_flush_buffer(tty); | |
| 289 | if (tty->driver && tty->driver->flush_buffer) | ||
| 290 | tty->driver->flush_buffer(tty); | ||
| 291 | 288 | ||
| 292 | return 0; | 289 | return 0; |
| 293 | } | 290 | } |
| @@ -373,9 +370,7 @@ static void hci_uart_tty_receive(struct tty_struct *tty, const u8 *data, char *f | |||
| 373 | hu->hdev->stat.byte_rx += count; | 370 | hu->hdev->stat.byte_rx += count; |
| 374 | spin_unlock(&hu->rx_lock); | 371 | spin_unlock(&hu->rx_lock); |
| 375 | 372 | ||
| 376 | if (test_and_clear_bit(TTY_THROTTLED, &tty->flags) && | 373 | tty_unthrottle(tty); |
| 377 | tty->driver->unthrottle) | ||
| 378 | tty->driver->unthrottle(tty); | ||
| 379 | } | 374 | } |
| 380 | 375 | ||
| 381 | static int hci_uart_register_dev(struct hci_uart *hu) | 376 | static int hci_uart_register_dev(struct hci_uart *hu) |
diff --git a/drivers/char/agp/agp.h b/drivers/char/agp/agp.h index c69f79598e47..99e6a406efb4 100644 --- a/drivers/char/agp/agp.h +++ b/drivers/char/agp/agp.h | |||
| @@ -35,7 +35,7 @@ | |||
| 35 | 35 | ||
| 36 | //#define AGP_DEBUG 1 | 36 | //#define AGP_DEBUG 1 |
| 37 | #ifdef AGP_DEBUG | 37 | #ifdef AGP_DEBUG |
| 38 | #define DBG(x,y...) printk (KERN_DEBUG PFX "%s: " x "\n", __FUNCTION__ , ## y) | 38 | #define DBG(x,y...) printk (KERN_DEBUG PFX "%s: " x "\n", __func__ , ## y) |
| 39 | #else | 39 | #else |
| 40 | #define DBG(x,y...) do { } while (0) | 40 | #define DBG(x,y...) do { } while (0) |
| 41 | #endif | 41 | #endif |
diff --git a/drivers/char/amiserial.c b/drivers/char/amiserial.c index 3d468f502d2d..37457e5a4f2b 100644 --- a/drivers/char/amiserial.c +++ b/drivers/char/amiserial.c | |||
| @@ -832,33 +832,34 @@ static void change_speed(struct async_struct *info, | |||
| 832 | local_irq_restore(flags); | 832 | local_irq_restore(flags); |
| 833 | } | 833 | } |
| 834 | 834 | ||
| 835 | static void rs_put_char(struct tty_struct *tty, unsigned char ch) | 835 | static int rs_put_char(struct tty_struct *tty, unsigned char ch) |
| 836 | { | 836 | { |
| 837 | struct async_struct *info; | 837 | struct async_struct *info; |
| 838 | unsigned long flags; | 838 | unsigned long flags; |
| 839 | 839 | ||
| 840 | if (!tty) | 840 | if (!tty) |
| 841 | return; | 841 | return 0; |
| 842 | 842 | ||
| 843 | info = tty->driver_data; | 843 | info = tty->driver_data; |
| 844 | 844 | ||
| 845 | if (serial_paranoia_check(info, tty->name, "rs_put_char")) | 845 | if (serial_paranoia_check(info, tty->name, "rs_put_char")) |
| 846 | return; | 846 | return 0; |
| 847 | 847 | ||
| 848 | if (!info->xmit.buf) | 848 | if (!info->xmit.buf) |
| 849 | return; | 849 | return 0; |
| 850 | 850 | ||
| 851 | local_irq_save(flags); | 851 | local_irq_save(flags); |
| 852 | if (CIRC_SPACE(info->xmit.head, | 852 | if (CIRC_SPACE(info->xmit.head, |
| 853 | info->xmit.tail, | 853 | info->xmit.tail, |
| 854 | SERIAL_XMIT_SIZE) == 0) { | 854 | SERIAL_XMIT_SIZE) == 0) { |
| 855 | local_irq_restore(flags); | 855 | local_irq_restore(flags); |
| 856 | return; | 856 | return 0; |
| 857 | } | 857 | } |
| 858 | 858 | ||
| 859 | info->xmit.buf[info->xmit.head++] = ch; | 859 | info->xmit.buf[info->xmit.head++] = ch; |
| 860 | info->xmit.head &= SERIAL_XMIT_SIZE-1; | 860 | info->xmit.head &= SERIAL_XMIT_SIZE-1; |
| 861 | local_irq_restore(flags); | 861 | local_irq_restore(flags); |
| 862 | return 1; | ||
| 862 | } | 863 | } |
| 863 | 864 | ||
| 864 | static void rs_flush_chars(struct tty_struct *tty) | 865 | static void rs_flush_chars(struct tty_struct *tty) |
| @@ -1074,6 +1075,7 @@ static int get_serial_info(struct async_struct * info, | |||
| 1074 | if (!retinfo) | 1075 | if (!retinfo) |
| 1075 | return -EFAULT; | 1076 | return -EFAULT; |
| 1076 | memset(&tmp, 0, sizeof(tmp)); | 1077 | memset(&tmp, 0, sizeof(tmp)); |
| 1078 | lock_kernel(); | ||
| 1077 | tmp.type = state->type; | 1079 | tmp.type = state->type; |
| 1078 | tmp.line = state->line; | 1080 | tmp.line = state->line; |
| 1079 | tmp.port = state->port; | 1081 | tmp.port = state->port; |
| @@ -1084,6 +1086,7 @@ static int get_serial_info(struct async_struct * info, | |||
| 1084 | tmp.close_delay = state->close_delay; | 1086 | tmp.close_delay = state->close_delay; |
| 1085 | tmp.closing_wait = state->closing_wait; | 1087 | tmp.closing_wait = state->closing_wait; |
| 1086 | tmp.custom_divisor = state->custom_divisor; | 1088 | tmp.custom_divisor = state->custom_divisor; |
| 1089 | unlock_kernel(); | ||
| 1087 | if (copy_to_user(retinfo,&tmp,sizeof(*retinfo))) | 1090 | if (copy_to_user(retinfo,&tmp,sizeof(*retinfo))) |
| 1088 | return -EFAULT; | 1091 | return -EFAULT; |
| 1089 | return 0; | 1092 | return 0; |
| @@ -1099,13 +1102,17 @@ static int set_serial_info(struct async_struct * info, | |||
| 1099 | 1102 | ||
| 1100 | if (copy_from_user(&new_serial,new_info,sizeof(new_serial))) | 1103 | if (copy_from_user(&new_serial,new_info,sizeof(new_serial))) |
| 1101 | return -EFAULT; | 1104 | return -EFAULT; |
| 1105 | |||
| 1106 | lock_kernel(); | ||
| 1102 | state = info->state; | 1107 | state = info->state; |
| 1103 | old_state = *state; | 1108 | old_state = *state; |
| 1104 | 1109 | ||
| 1105 | change_irq = new_serial.irq != state->irq; | 1110 | change_irq = new_serial.irq != state->irq; |
| 1106 | change_port = (new_serial.port != state->port); | 1111 | change_port = (new_serial.port != state->port); |
| 1107 | if(change_irq || change_port || (new_serial.xmit_fifo_size != state->xmit_fifo_size)) | 1112 | if(change_irq || change_port || (new_serial.xmit_fifo_size != state->xmit_fifo_size)) { |
| 1113 | unlock_kernel(); | ||
| 1108 | return -EINVAL; | 1114 | return -EINVAL; |
| 1115 | } | ||
| 1109 | 1116 | ||
| 1110 | if (!serial_isroot()) { | 1117 | if (!serial_isroot()) { |
| 1111 | if ((new_serial.baud_base != state->baud_base) || | 1118 | if ((new_serial.baud_base != state->baud_base) || |
| @@ -1122,8 +1129,10 @@ static int set_serial_info(struct async_struct * info, | |||
| 1122 | goto check_and_exit; | 1129 | goto check_and_exit; |
| 1123 | } | 1130 | } |
| 1124 | 1131 | ||
| 1125 | if (new_serial.baud_base < 9600) | 1132 | if (new_serial.baud_base < 9600) { |
| 1133 | unlock_kernel(); | ||
| 1126 | return -EINVAL; | 1134 | return -EINVAL; |
| 1135 | } | ||
| 1127 | 1136 | ||
| 1128 | /* | 1137 | /* |
| 1129 | * OK, past this point, all the error checking has been done. | 1138 | * OK, past this point, all the error checking has been done. |
| @@ -1157,6 +1166,7 @@ check_and_exit: | |||
| 1157 | } | 1166 | } |
| 1158 | } else | 1167 | } else |
| 1159 | retval = startup(info); | 1168 | retval = startup(info); |
| 1169 | unlock_kernel(); | ||
| 1160 | return retval; | 1170 | return retval; |
| 1161 | } | 1171 | } |
| 1162 | 1172 | ||
| @@ -1496,8 +1506,7 @@ static void rs_close(struct tty_struct *tty, struct file * filp) | |||
| 1496 | rs_wait_until_sent(tty, info->timeout); | 1506 | rs_wait_until_sent(tty, info->timeout); |
| 1497 | } | 1507 | } |
| 1498 | shutdown(info); | 1508 | shutdown(info); |
| 1499 | if (tty->driver->flush_buffer) | 1509 | rs_flush_buffer(tty); |
| 1500 | tty->driver->flush_buffer(tty); | ||
| 1501 | 1510 | ||
| 1502 | tty_ldisc_flush(tty); | 1511 | tty_ldisc_flush(tty); |
| 1503 | tty->closing = 0; | 1512 | tty->closing = 0; |
| @@ -1530,6 +1539,8 @@ static void rs_wait_until_sent(struct tty_struct *tty, int timeout) | |||
| 1530 | return; /* Just in case.... */ | 1539 | return; /* Just in case.... */ |
| 1531 | 1540 | ||
| 1532 | orig_jiffies = jiffies; | 1541 | orig_jiffies = jiffies; |
| 1542 | |||
| 1543 | lock_kernel(); | ||
| 1533 | /* | 1544 | /* |
| 1534 | * Set the check interval to be 1/5 of the estimated time to | 1545 | * Set the check interval to be 1/5 of the estimated time to |
| 1535 | * send a single character, and make it at least 1. The check | 1546 | * send a single character, and make it at least 1. The check |
| @@ -1570,6 +1581,7 @@ static void rs_wait_until_sent(struct tty_struct *tty, int timeout) | |||
| 1570 | break; | 1581 | break; |
| 1571 | } | 1582 | } |
| 1572 | __set_current_state(TASK_RUNNING); | 1583 | __set_current_state(TASK_RUNNING); |
| 1584 | unlock_kernel(); | ||
| 1573 | #ifdef SERIAL_DEBUG_RS_WAIT_UNTIL_SENT | 1585 | #ifdef SERIAL_DEBUG_RS_WAIT_UNTIL_SENT |
| 1574 | printk("lsr = %d (jiff=%lu)...done\n", lsr, jiffies); | 1586 | printk("lsr = %d (jiff=%lu)...done\n", lsr, jiffies); |
| 1575 | #endif | 1587 | #endif |
diff --git a/drivers/char/applicom.c b/drivers/char/applicom.c index a7c4990b5b6b..31d08b641f5b 100644 --- a/drivers/char/applicom.c +++ b/drivers/char/applicom.c | |||
| @@ -199,7 +199,7 @@ static int __init applicom_init(void) | |||
| 199 | if (pci_enable_device(dev)) | 199 | if (pci_enable_device(dev)) |
| 200 | return -EIO; | 200 | return -EIO; |
| 201 | 201 | ||
| 202 | RamIO = ioremap(pci_resource_start(dev, 0), LEN_RAM_IO); | 202 | RamIO = ioremap_nocache(pci_resource_start(dev, 0), LEN_RAM_IO); |
| 203 | 203 | ||
| 204 | if (!RamIO) { | 204 | if (!RamIO) { |
| 205 | printk(KERN_INFO "ac.o: Failed to ioremap PCI memory " | 205 | printk(KERN_INFO "ac.o: Failed to ioremap PCI memory " |
| @@ -254,7 +254,7 @@ static int __init applicom_init(void) | |||
| 254 | /* Now try the specified ISA cards */ | 254 | /* Now try the specified ISA cards */ |
| 255 | 255 | ||
| 256 | for (i = 0; i < MAX_ISA_BOARD; i++) { | 256 | for (i = 0; i < MAX_ISA_BOARD; i++) { |
| 257 | RamIO = ioremap(mem + (LEN_RAM_IO * i), LEN_RAM_IO); | 257 | RamIO = ioremap_nocache(mem + (LEN_RAM_IO * i), LEN_RAM_IO); |
| 258 | 258 | ||
| 259 | if (!RamIO) { | 259 | if (!RamIO) { |
| 260 | printk(KERN_INFO "ac.o: Failed to ioremap the ISA card's memory space (slot #%d)\n", i + 1); | 260 | printk(KERN_INFO "ac.o: Failed to ioremap the ISA card's memory space (slot #%d)\n", i + 1); |
diff --git a/drivers/char/consolemap.c b/drivers/char/consolemap.c index 6b104e45a322..4246b8e36cb3 100644 --- a/drivers/char/consolemap.c +++ b/drivers/char/consolemap.c | |||
| @@ -277,6 +277,7 @@ u16 inverse_translate(struct vc_data *conp, int glyph, int use_unicode) | |||
| 277 | return p->inverse_translations[m][glyph]; | 277 | return p->inverse_translations[m][glyph]; |
| 278 | } | 278 | } |
| 279 | } | 279 | } |
| 280 | EXPORT_SYMBOL_GPL(inverse_translate); | ||
| 280 | 281 | ||
| 281 | static void update_user_maps(void) | 282 | static void update_user_maps(void) |
| 282 | { | 283 | { |
diff --git a/drivers/char/cyclades.c b/drivers/char/cyclades.c index e4f579c3e245..ef73e72daedc 100644 --- a/drivers/char/cyclades.c +++ b/drivers/char/cyclades.c | |||
| @@ -21,7 +21,6 @@ | |||
| 21 | * | 21 | * |
| 22 | * This version supports shared IRQ's (only for PCI boards). | 22 | * This version supports shared IRQ's (only for PCI boards). |
| 23 | * | 23 | * |
| 24 | * $Log: cyclades.c,v $ | ||
| 25 | * Prevent users from opening non-existing Z ports. | 24 | * Prevent users from opening non-existing Z ports. |
| 26 | * | 25 | * |
| 27 | * Revision 2.3.2.8 2000/07/06 18:14:16 ivan | 26 | * Revision 2.3.2.8 2000/07/06 18:14:16 ivan |
| @@ -62,7 +61,7 @@ | |||
| 62 | * Driver now makes sure that the constant SERIAL_XMIT_SIZE is defined; | 61 | * Driver now makes sure that the constant SERIAL_XMIT_SIZE is defined; |
| 63 | * | 62 | * |
| 64 | * Revision 2.3.2.2 1999/10/01 11:27:43 ivan | 63 | * Revision 2.3.2.2 1999/10/01 11:27:43 ivan |
| 65 | * Fixed bug in cyz_poll that would make all ports but port 0 | 64 | * Fixed bug in cyz_poll that would make all ports but port 0 |
| 66 | * unable to transmit/receive data (Cyclades-Z only); | 65 | * unable to transmit/receive data (Cyclades-Z only); |
| 67 | * Implemented logic to prevent the RX buffer from being stuck with data | 66 | * Implemented logic to prevent the RX buffer from being stuck with data |
| 68 | * due to a driver / firmware race condition in interrupt op mode | 67 | * due to a driver / firmware race condition in interrupt op mode |
| @@ -83,25 +82,25 @@ | |||
| 83 | * Revision 2.3.1.1 1999/07/15 16:45:53 ivan | 82 | * Revision 2.3.1.1 1999/07/15 16:45:53 ivan |
| 84 | * Removed CY_PROC conditional compilation; | 83 | * Removed CY_PROC conditional compilation; |
| 85 | * Implemented SMP-awareness for the driver; | 84 | * Implemented SMP-awareness for the driver; |
| 86 | * Implemented a new ISA IRQ autoprobe that uses the irq_probe_[on|off] | 85 | * Implemented a new ISA IRQ autoprobe that uses the irq_probe_[on|off] |
| 87 | * functions; | 86 | * functions; |
| 88 | * The driver now accepts memory addresses (maddr=0xMMMMM) and IRQs | 87 | * The driver now accepts memory addresses (maddr=0xMMMMM) and IRQs |
| 89 | * (irq=NN) as parameters (only for ISA boards); | 88 | * (irq=NN) as parameters (only for ISA boards); |
| 90 | * Fixed bug in set_line_char that would prevent the Cyclades-Z | 89 | * Fixed bug in set_line_char that would prevent the Cyclades-Z |
| 91 | * ports from being configured at speeds above 115.2Kbps; | 90 | * ports from being configured at speeds above 115.2Kbps; |
| 92 | * Fixed bug in cy_set_termios that would prevent XON/XOFF flow control | 91 | * Fixed bug in cy_set_termios that would prevent XON/XOFF flow control |
| 93 | * switching from working properly; | 92 | * switching from working properly; |
| 94 | * The driver now only prints IRQ info for the Cyclades-Z if it's | 93 | * The driver now only prints IRQ info for the Cyclades-Z if it's |
| 95 | * configured to work in interrupt mode; | 94 | * configured to work in interrupt mode; |
| 96 | * | 95 | * |
| 97 | * Revision 2.2.2.3 1999/06/28 11:13:29 ivan | 96 | * Revision 2.2.2.3 1999/06/28 11:13:29 ivan |
| 98 | * Added support for interrupt mode operation for the Z cards; | 97 | * Added support for interrupt mode operation for the Z cards; |
| 99 | * Removed the driver inactivity control for the Z; | 98 | * Removed the driver inactivity control for the Z; |
| 100 | * Added a missing MOD_DEC_USE_COUNT in the cy_open function for when | 99 | * Added a missing MOD_DEC_USE_COUNT in the cy_open function for when |
| 101 | * the Z firmware is not loaded yet; | 100 | * the Z firmware is not loaded yet; |
| 102 | * Replaced the "manual" Z Tx flush buffer by a call to a FW command of | 101 | * Replaced the "manual" Z Tx flush buffer by a call to a FW command of |
| 103 | * same functionality; | 102 | * same functionality; |
| 104 | * Implemented workaround for IRQ setting loss on the PCI configuration | 103 | * Implemented workaround for IRQ setting loss on the PCI configuration |
| 105 | * registers after a PCI bridge EEPROM reload (affects PLX9060 only); | 104 | * registers after a PCI bridge EEPROM reload (affects PLX9060 only); |
| 106 | * | 105 | * |
| 107 | * Revision 2.2.2.2 1999/05/14 17:18:15 ivan | 106 | * Revision 2.2.2.2 1999/05/14 17:18:15 ivan |
| @@ -112,22 +111,22 @@ | |||
| 112 | * BREAK implementation changed in order to make use of the 'break_ctl' | 111 | * BREAK implementation changed in order to make use of the 'break_ctl' |
| 113 | * TTY facility; | 112 | * TTY facility; |
| 114 | * Fixed typo in TTY structure field 'driver_name'; | 113 | * Fixed typo in TTY structure field 'driver_name'; |
| 115 | * Included a PCI bridge reset and EEPROM reload in the board | 114 | * Included a PCI bridge reset and EEPROM reload in the board |
| 116 | * initialization code (for both Y and Z series). | 115 | * initialization code (for both Y and Z series). |
| 117 | * | 116 | * |
| 118 | * Revision 2.2.2.1 1999/04/08 16:17:43 ivan | 117 | * Revision 2.2.2.1 1999/04/08 16:17:43 ivan |
| 119 | * Fixed a bug in cy_wait_until_sent that was preventing the port to be | 118 | * Fixed a bug in cy_wait_until_sent that was preventing the port to be |
| 120 | * closed properly after a SIGINT; | 119 | * closed properly after a SIGINT; |
| 121 | * Module usage counter scheme revisited; | 120 | * Module usage counter scheme revisited; |
| 122 | * Added support to the upcoming Y PCI boards (i.e., support to additional | 121 | * Added support to the upcoming Y PCI boards (i.e., support to additional |
| 123 | * PCI Device ID's). | 122 | * PCI Device ID's). |
| 124 | * | 123 | * |
| 125 | * Revision 2.2.1.10 1999/01/20 16:14:29 ivan | 124 | * Revision 2.2.1.10 1999/01/20 16:14:29 ivan |
| 126 | * Removed all unnecessary page-alignement operations in ioremap calls | 125 | * Removed all unnecessary page-alignement operations in ioremap calls |
| 127 | * (ioremap is currently safe for these operations). | 126 | * (ioremap is currently safe for these operations). |
| 128 | * | 127 | * |
| 129 | * Revision 2.2.1.9 1998/12/30 18:18:30 ivan | 128 | * Revision 2.2.1.9 1998/12/30 18:18:30 ivan |
| 130 | * Changed access to PLX PCI bridge registers from I/O to MMIO, in | 129 | * Changed access to PLX PCI bridge registers from I/O to MMIO, in |
| 131 | * order to make PLX9050-based boards work with certain motherboards. | 130 | * order to make PLX9050-based boards work with certain motherboards. |
| 132 | * | 131 | * |
| 133 | * Revision 2.2.1.8 1998/11/13 12:46:20 ivan | 132 | * Revision 2.2.1.8 1998/11/13 12:46:20 ivan |
| @@ -148,7 +147,7 @@ | |||
| 148 | * Fixed Cyclom-4Yo hardware detection bug. | 147 | * Fixed Cyclom-4Yo hardware detection bug. |
| 149 | * | 148 | * |
| 150 | * Revision 2.2.1.4 1998/08/04 11:02:50 ivan | 149 | * Revision 2.2.1.4 1998/08/04 11:02:50 ivan |
| 151 | * /proc/cyclades implementation with great collaboration of | 150 | * /proc/cyclades implementation with great collaboration of |
| 152 | * Marc Lewis <marc@blarg.net>; | 151 | * Marc Lewis <marc@blarg.net>; |
| 153 | * cyy_interrupt was changed to avoid occurrence of kernel oopses | 152 | * cyy_interrupt was changed to avoid occurrence of kernel oopses |
| 154 | * during PPP operation. | 153 | * during PPP operation. |
| @@ -157,7 +156,7 @@ | |||
| 157 | * General code review in order to comply with 2.1 kernel standards; | 156 | * General code review in order to comply with 2.1 kernel standards; |
| 158 | * data loss prevention for slow devices revisited (cy_wait_until_sent | 157 | * data loss prevention for slow devices revisited (cy_wait_until_sent |
| 159 | * was created); | 158 | * was created); |
| 160 | * removed conditional compilation for new/old PCI structure support | 159 | * removed conditional compilation for new/old PCI structure support |
| 161 | * (now the driver only supports the new PCI structure). | 160 | * (now the driver only supports the new PCI structure). |
| 162 | * | 161 | * |
| 163 | * Revision 2.2.1.1 1998/03/19 16:43:12 ivan | 162 | * Revision 2.2.1.1 1998/03/19 16:43:12 ivan |
| @@ -168,7 +167,7 @@ | |||
| 168 | * cleaned up the data loss fix; | 167 | * cleaned up the data loss fix; |
| 169 | * fixed XON/XOFF handling once more (Cyclades-Z); | 168 | * fixed XON/XOFF handling once more (Cyclades-Z); |
| 170 | * general review of the driver routines; | 169 | * general review of the driver routines; |
| 171 | * introduction of a mechanism to prevent data loss with slow | 170 | * introduction of a mechanism to prevent data loss with slow |
| 172 | * printers, by forcing a delay before closing the port. | 171 | * printers, by forcing a delay before closing the port. |
| 173 | * | 172 | * |
| 174 | * Revision 2.1.1.2 1998/02/17 16:50:00 ivan | 173 | * Revision 2.1.1.2 1998/02/17 16:50:00 ivan |
| @@ -182,12 +181,12 @@ | |||
| 182 | * Code review for the module cleanup routine; | 181 | * Code review for the module cleanup routine; |
| 183 | * fixed RTS and DTR status report for new CD1400's in get_modem_info; | 182 | * fixed RTS and DTR status report for new CD1400's in get_modem_info; |
| 184 | * includes anonymous changes regarding signal_pending. | 183 | * includes anonymous changes regarding signal_pending. |
| 185 | * | 184 | * |
| 186 | * Revision 2.1 1997/11/01 17:42:41 ivan | 185 | * Revision 2.1 1997/11/01 17:42:41 ivan |
| 187 | * Changes in the driver to support Alpha systems (except 8Zo V_1); | 186 | * Changes in the driver to support Alpha systems (except 8Zo V_1); |
| 188 | * BREAK fix for the Cyclades-Z boards; | 187 | * BREAK fix for the Cyclades-Z boards; |
| 189 | * driver inactivity control by FW implemented; | 188 | * driver inactivity control by FW implemented; |
| 190 | * introduction of flag that allows driver to take advantage of | 189 | * introduction of flag that allows driver to take advantage of |
| 191 | * a special CD1400 feature related to HW flow control; | 190 | * a special CD1400 feature related to HW flow control; |
| 192 | * added support for the CD1400 rev. J (Cyclom-Y boards); | 191 | * added support for the CD1400 rev. J (Cyclom-Y boards); |
| 193 | * introduction of ioctls to: | 192 | * introduction of ioctls to: |
| @@ -196,17 +195,17 @@ | |||
| 196 | * - adjust the polling interval (Cyclades-Z); | 195 | * - adjust the polling interval (Cyclades-Z); |
| 197 | * | 196 | * |
| 198 | * Revision 1.36.4.33 1997/06/27 19:00:00 ivan | 197 | * Revision 1.36.4.33 1997/06/27 19:00:00 ivan |
| 199 | * Fixes related to kernel version conditional | 198 | * Fixes related to kernel version conditional |
| 200 | * compilation. | 199 | * compilation. |
| 201 | * | 200 | * |
| 202 | * Revision 1.36.4.32 1997/06/14 19:30:00 ivan | 201 | * Revision 1.36.4.32 1997/06/14 19:30:00 ivan |
| 203 | * Compatibility issues between kernels 2.0.x and | 202 | * Compatibility issues between kernels 2.0.x and |
| 204 | * 2.1.x (mainly related to clear_bit function). | 203 | * 2.1.x (mainly related to clear_bit function). |
| 205 | * | 204 | * |
| 206 | * Revision 1.36.4.31 1997/06/03 15:30:00 ivan | 205 | * Revision 1.36.4.31 1997/06/03 15:30:00 ivan |
| 207 | * Changes to define the memory window according to the | 206 | * Changes to define the memory window according to the |
| 208 | * board type. | 207 | * board type. |
| 209 | * | 208 | * |
| 210 | * Revision 1.36.4.30 1997/05/16 15:30:00 daniel | 209 | * Revision 1.36.4.30 1997/05/16 15:30:00 daniel |
| 211 | * Changes to support new cycladesZ boards. | 210 | * Changes to support new cycladesZ boards. |
| 212 | * | 211 | * |
| @@ -624,7 +623,7 @@ | |||
| 624 | #undef CY_PCI_DEBUG | 623 | #undef CY_PCI_DEBUG |
| 625 | 624 | ||
| 626 | /* | 625 | /* |
| 627 | * Include section | 626 | * Include section |
| 628 | */ | 627 | */ |
| 629 | #include <linux/module.h> | 628 | #include <linux/module.h> |
| 630 | #include <linux/errno.h> | 629 | #include <linux/errno.h> |
| @@ -649,9 +648,9 @@ | |||
| 649 | #include <linux/firmware.h> | 648 | #include <linux/firmware.h> |
| 650 | 649 | ||
| 651 | #include <asm/system.h> | 650 | #include <asm/system.h> |
| 652 | #include <asm/io.h> | 651 | #include <linux/io.h> |
| 653 | #include <asm/irq.h> | 652 | #include <asm/irq.h> |
| 654 | #include <asm/uaccess.h> | 653 | #include <linux/uaccess.h> |
| 655 | 654 | ||
| 656 | #include <linux/kernel.h> | 655 | #include <linux/kernel.h> |
| 657 | #include <linux/pci.h> | 656 | #include <linux/pci.h> |
| @@ -668,10 +667,10 @@ static void cy_send_xchar(struct tty_struct *tty, char ch); | |||
| 668 | ((readl(&((struct RUNTIME_9060 __iomem *) \ | 667 | ((readl(&((struct RUNTIME_9060 __iomem *) \ |
| 669 | ((card).ctl_addr))->init_ctrl) & (1<<17)) != 0) | 668 | ((card).ctl_addr))->init_ctrl) & (1<<17)) != 0) |
| 670 | 669 | ||
| 671 | #define ISZLOADED(card) (((ZO_V1==readl(&((struct RUNTIME_9060 __iomem *) \ | 670 | #define ISZLOADED(card) (((ZO_V1 == readl(&((struct RUNTIME_9060 __iomem *) \ |
| 672 | ((card).ctl_addr))->mail_box_0)) || \ | 671 | ((card).ctl_addr))->mail_box_0)) || \ |
| 673 | Z_FPGA_CHECK(card)) && \ | 672 | Z_FPGA_CHECK(card)) && \ |
| 674 | (ZFIRM_ID==readl(&((struct FIRM_ID __iomem *) \ | 673 | (ZFIRM_ID == readl(&((struct FIRM_ID __iomem *) \ |
| 675 | ((card).base_addr+ID_ADDRESS))->signature))) | 674 | ((card).base_addr+ID_ADDRESS))->signature))) |
| 676 | 675 | ||
| 677 | #ifndef SERIAL_XMIT_SIZE | 676 | #ifndef SERIAL_XMIT_SIZE |
| @@ -809,12 +808,12 @@ static char baud_cor3[] = { /* receive threshold */ | |||
| 809 | 808 | ||
| 810 | /* | 809 | /* |
| 811 | * The Cyclades driver implements HW flow control as any serial driver. | 810 | * The Cyclades driver implements HW flow control as any serial driver. |
| 812 | * The cyclades_port structure member rflow and the vector rflow_thr | 811 | * The cyclades_port structure member rflow and the vector rflow_thr |
| 813 | * allows us to take advantage of a special feature in the CD1400 to avoid | 812 | * allows us to take advantage of a special feature in the CD1400 to avoid |
| 814 | * data loss even when the system interrupt latency is too high. These flags | 813 | * data loss even when the system interrupt latency is too high. These flags |
| 815 | * are to be used only with very special applications. Setting these flags | 814 | * are to be used only with very special applications. Setting these flags |
| 816 | * requires the use of a special cable (DTR and RTS reversed). In the new | 815 | * requires the use of a special cable (DTR and RTS reversed). In the new |
| 817 | * CD1400-based boards (rev. 6.00 or later), there is no need for special | 816 | * CD1400-based boards (rev. 6.00 or later), there is no need for special |
| 818 | * cables. | 817 | * cables. |
| 819 | */ | 818 | */ |
| 820 | 819 | ||
| @@ -841,14 +840,22 @@ static int cy_chip_offset[] = { 0x0000, | |||
| 841 | 840 | ||
| 842 | #ifdef CONFIG_PCI | 841 | #ifdef CONFIG_PCI |
| 843 | static struct pci_device_id cy_pci_dev_id[] __devinitdata = { | 842 | static struct pci_device_id cy_pci_dev_id[] __devinitdata = { |
| 844 | { PCI_DEVICE(PCI_VENDOR_ID_CYCLADES, PCI_DEVICE_ID_CYCLOM_Y_Lo) }, /* PCI < 1Mb */ | 843 | /* PCI < 1Mb */ |
| 845 | { PCI_DEVICE(PCI_VENDOR_ID_CYCLADES, PCI_DEVICE_ID_CYCLOM_Y_Hi) }, /* PCI > 1Mb */ | 844 | { PCI_DEVICE(PCI_VENDOR_ID_CYCLADES, PCI_DEVICE_ID_CYCLOM_Y_Lo) }, |
| 846 | { PCI_DEVICE(PCI_VENDOR_ID_CYCLADES, PCI_DEVICE_ID_CYCLOM_4Y_Lo) }, /* 4Y PCI < 1Mb */ | 845 | /* PCI > 1Mb */ |
| 847 | { PCI_DEVICE(PCI_VENDOR_ID_CYCLADES, PCI_DEVICE_ID_CYCLOM_4Y_Hi) }, /* 4Y PCI > 1Mb */ | 846 | { PCI_DEVICE(PCI_VENDOR_ID_CYCLADES, PCI_DEVICE_ID_CYCLOM_Y_Hi) }, |
| 848 | { PCI_DEVICE(PCI_VENDOR_ID_CYCLADES, PCI_DEVICE_ID_CYCLOM_8Y_Lo) }, /* 8Y PCI < 1Mb */ | 847 | /* 4Y PCI < 1Mb */ |
| 849 | { PCI_DEVICE(PCI_VENDOR_ID_CYCLADES, PCI_DEVICE_ID_CYCLOM_8Y_Hi) }, /* 8Y PCI > 1Mb */ | 848 | { PCI_DEVICE(PCI_VENDOR_ID_CYCLADES, PCI_DEVICE_ID_CYCLOM_4Y_Lo) }, |
| 850 | { PCI_DEVICE(PCI_VENDOR_ID_CYCLADES, PCI_DEVICE_ID_CYCLOM_Z_Lo) }, /* Z PCI < 1Mb */ | 849 | /* 4Y PCI > 1Mb */ |
| 851 | { PCI_DEVICE(PCI_VENDOR_ID_CYCLADES, PCI_DEVICE_ID_CYCLOM_Z_Hi) }, /* Z PCI > 1Mb */ | 850 | { PCI_DEVICE(PCI_VENDOR_ID_CYCLADES, PCI_DEVICE_ID_CYCLOM_4Y_Hi) }, |
| 851 | /* 8Y PCI < 1Mb */ | ||
| 852 | { PCI_DEVICE(PCI_VENDOR_ID_CYCLADES, PCI_DEVICE_ID_CYCLOM_8Y_Lo) }, | ||
| 853 | /* 8Y PCI > 1Mb */ | ||
| 854 | { PCI_DEVICE(PCI_VENDOR_ID_CYCLADES, PCI_DEVICE_ID_CYCLOM_8Y_Hi) }, | ||
| 855 | /* Z PCI < 1Mb */ | ||
| 856 | { PCI_DEVICE(PCI_VENDOR_ID_CYCLADES, PCI_DEVICE_ID_CYCLOM_Z_Lo) }, | ||
| 857 | /* Z PCI > 1Mb */ | ||
| 858 | { PCI_DEVICE(PCI_VENDOR_ID_CYCLADES, PCI_DEVICE_ID_CYCLOM_Z_Hi) }, | ||
| 852 | { } /* end of table */ | 859 | { } /* end of table */ |
| 853 | }; | 860 | }; |
| 854 | MODULE_DEVICE_TABLE(pci, cy_pci_dev_id); | 861 | MODULE_DEVICE_TABLE(pci, cy_pci_dev_id); |
| @@ -905,15 +912,14 @@ static inline int serial_paranoia_check(struct cyclades_port *info, | |||
| 905 | 912 | ||
| 906 | This function is only called from inside spinlock-protected code. | 913 | This function is only called from inside spinlock-protected code. |
| 907 | */ | 914 | */ |
| 908 | static int cyy_issue_cmd(void __iomem * base_addr, u_char cmd, int index) | 915 | static int cyy_issue_cmd(void __iomem *base_addr, u_char cmd, int index) |
| 909 | { | 916 | { |
| 910 | unsigned int i; | 917 | unsigned int i; |
| 911 | 918 | ||
| 912 | /* Check to see that the previous command has completed */ | 919 | /* Check to see that the previous command has completed */ |
| 913 | for (i = 0; i < 100; i++) { | 920 | for (i = 0; i < 100; i++) { |
| 914 | if (readb(base_addr + (CyCCR << index)) == 0) { | 921 | if (readb(base_addr + (CyCCR << index)) == 0) |
| 915 | break; | 922 | break; |
| 916 | } | ||
| 917 | udelay(10L); | 923 | udelay(10L); |
| 918 | } | 924 | } |
| 919 | /* if the CCR never cleared, the previous command | 925 | /* if the CCR never cleared, the previous command |
| @@ -929,7 +935,7 @@ static int cyy_issue_cmd(void __iomem * base_addr, u_char cmd, int index) | |||
| 929 | 935 | ||
| 930 | #ifdef CONFIG_ISA | 936 | #ifdef CONFIG_ISA |
| 931 | /* ISA interrupt detection code */ | 937 | /* ISA interrupt detection code */ |
| 932 | static unsigned detect_isa_irq(void __iomem * address) | 938 | static unsigned detect_isa_irq(void __iomem *address) |
| 933 | { | 939 | { |
| 934 | int irq; | 940 | int irq; |
| 935 | unsigned long irqs, flags; | 941 | unsigned long irqs, flags; |
| @@ -1038,7 +1044,7 @@ static void cyy_chip_rx(struct cyclades_card *cinfo, int chip, | |||
| 1038 | if (info->flags & ASYNC_SAK) | 1044 | if (info->flags & ASYNC_SAK) |
| 1039 | do_SAK(tty); | 1045 | do_SAK(tty); |
| 1040 | } else if (data & CyFRAME) { | 1046 | } else if (data & CyFRAME) { |
| 1041 | tty_insert_flip_char( tty, | 1047 | tty_insert_flip_char(tty, |
| 1042 | readb(base_addr + (CyRDSR << | 1048 | readb(base_addr + (CyRDSR << |
| 1043 | index)), TTY_FRAME); | 1049 | index)), TTY_FRAME); |
| 1044 | info->icount.rx++; | 1050 | info->icount.rx++; |
| @@ -1320,7 +1326,8 @@ static irqreturn_t cyy_interrupt(int irq, void *dev_id) | |||
| 1320 | 1326 | ||
| 1321 | if (unlikely(cinfo == NULL)) { | 1327 | if (unlikely(cinfo == NULL)) { |
| 1322 | #ifdef CY_DEBUG_INTERRUPTS | 1328 | #ifdef CY_DEBUG_INTERRUPTS |
| 1323 | printk(KERN_DEBUG "cyy_interrupt: spurious interrupt %d\n",irq); | 1329 | printk(KERN_DEBUG "cyy_interrupt: spurious interrupt %d\n", |
| 1330 | irq); | ||
| 1324 | #endif | 1331 | #endif |
| 1325 | return IRQ_NONE; /* spurious interrupt */ | 1332 | return IRQ_NONE; /* spurious interrupt */ |
| 1326 | } | 1333 | } |
| @@ -1375,12 +1382,12 @@ static irqreturn_t cyy_interrupt(int irq, void *dev_id) | |||
| 1375 | 1382 | ||
| 1376 | /***********************************************************/ | 1383 | /***********************************************************/ |
| 1377 | /********* End of block of Cyclom-Y specific code **********/ | 1384 | /********* End of block of Cyclom-Y specific code **********/ |
| 1378 | /******** Start of block of Cyclades-Z specific code *********/ | 1385 | /******** Start of block of Cyclades-Z specific code *******/ |
| 1379 | /***********************************************************/ | 1386 | /***********************************************************/ |
| 1380 | 1387 | ||
| 1381 | static int | 1388 | static int |
| 1382 | cyz_fetch_msg(struct cyclades_card *cinfo, | 1389 | cyz_fetch_msg(struct cyclades_card *cinfo, |
| 1383 | __u32 * channel, __u8 * cmd, __u32 * param) | 1390 | __u32 *channel, __u8 *cmd, __u32 *param) |
| 1384 | { | 1391 | { |
| 1385 | struct FIRM_ID __iomem *firm_id; | 1392 | struct FIRM_ID __iomem *firm_id; |
| 1386 | struct ZFW_CTRL __iomem *zfw_ctrl; | 1393 | struct ZFW_CTRL __iomem *zfw_ctrl; |
| @@ -1388,9 +1395,8 @@ cyz_fetch_msg(struct cyclades_card *cinfo, | |||
| 1388 | unsigned long loc_doorbell; | 1395 | unsigned long loc_doorbell; |
| 1389 | 1396 | ||
| 1390 | firm_id = cinfo->base_addr + ID_ADDRESS; | 1397 | firm_id = cinfo->base_addr + ID_ADDRESS; |
| 1391 | if (!ISZLOADED(*cinfo)) { | 1398 | if (!ISZLOADED(*cinfo)) |
| 1392 | return -1; | 1399 | return -1; |
| 1393 | } | ||
| 1394 | zfw_ctrl = cinfo->base_addr + (readl(&firm_id->zfwctrl_addr) & 0xfffff); | 1400 | zfw_ctrl = cinfo->base_addr + (readl(&firm_id->zfwctrl_addr) & 0xfffff); |
| 1395 | board_ctrl = &zfw_ctrl->board_ctrl; | 1401 | board_ctrl = &zfw_ctrl->board_ctrl; |
| 1396 | 1402 | ||
| @@ -1418,9 +1424,9 @@ cyz_issue_cmd(struct cyclades_card *cinfo, | |||
| 1418 | unsigned int index; | 1424 | unsigned int index; |
| 1419 | 1425 | ||
| 1420 | firm_id = cinfo->base_addr + ID_ADDRESS; | 1426 | firm_id = cinfo->base_addr + ID_ADDRESS; |
| 1421 | if (!ISZLOADED(*cinfo)) { | 1427 | if (!ISZLOADED(*cinfo)) |
| 1422 | return -1; | 1428 | return -1; |
| 1423 | } | 1429 | |
| 1424 | zfw_ctrl = cinfo->base_addr + (readl(&firm_id->zfwctrl_addr) & 0xfffff); | 1430 | zfw_ctrl = cinfo->base_addr + (readl(&firm_id->zfwctrl_addr) & 0xfffff); |
| 1425 | board_ctrl = &zfw_ctrl->board_ctrl; | 1431 | board_ctrl = &zfw_ctrl->board_ctrl; |
| 1426 | 1432 | ||
| @@ -1428,9 +1434,8 @@ cyz_issue_cmd(struct cyclades_card *cinfo, | |||
| 1428 | pci_doorbell = | 1434 | pci_doorbell = |
| 1429 | &((struct RUNTIME_9060 __iomem *)(cinfo->ctl_addr))->pci_doorbell; | 1435 | &((struct RUNTIME_9060 __iomem *)(cinfo->ctl_addr))->pci_doorbell; |
| 1430 | while ((readl(pci_doorbell) & 0xff) != 0) { | 1436 | while ((readl(pci_doorbell) & 0xff) != 0) { |
| 1431 | if (index++ == 1000) { | 1437 | if (index++ == 1000) |
| 1432 | return (int)(readl(pci_doorbell) & 0xff); | 1438 | return (int)(readl(pci_doorbell) & 0xff); |
| 1433 | } | ||
| 1434 | udelay(50L); | 1439 | udelay(50L); |
| 1435 | } | 1440 | } |
| 1436 | cy_writel(&board_ctrl->hcmd_channel, channel); | 1441 | cy_writel(&board_ctrl->hcmd_channel, channel); |
| @@ -1504,7 +1509,8 @@ static void cyz_handle_rx(struct cyclades_port *info, | |||
| 1504 | while (len--) { | 1509 | while (len--) { |
| 1505 | data = readb(cinfo->base_addr + rx_bufaddr + | 1510 | data = readb(cinfo->base_addr + rx_bufaddr + |
| 1506 | new_rx_get); | 1511 | new_rx_get); |
| 1507 | new_rx_get = (new_rx_get + 1)& (rx_bufsize - 1); | 1512 | new_rx_get = (new_rx_get + 1) & |
| 1513 | (rx_bufsize - 1); | ||
| 1508 | tty_insert_flip_char(tty, data, TTY_NORMAL); | 1514 | tty_insert_flip_char(tty, data, TTY_NORMAL); |
| 1509 | info->idle_stats.recv_bytes++; | 1515 | info->idle_stats.recv_bytes++; |
| 1510 | info->icount.rx++; | 1516 | info->icount.rx++; |
| @@ -1636,7 +1642,8 @@ static void cyz_handle_cmd(struct cyclades_card *cinfo) | |||
| 1636 | special_count = 0; | 1642 | special_count = 0; |
| 1637 | delta_count = 0; | 1643 | delta_count = 0; |
| 1638 | info = &cinfo->ports[channel]; | 1644 | info = &cinfo->ports[channel]; |
| 1639 | if ((tty = info->tty) == NULL) | 1645 | tty = info->tty; |
| 1646 | if (tty == NULL) | ||
| 1640 | continue; | 1647 | continue; |
| 1641 | 1648 | ||
| 1642 | ch_ctrl = &(zfw_ctrl->ch_ctrl[channel]); | 1649 | ch_ctrl = &(zfw_ctrl->ch_ctrl[channel]); |
| @@ -1732,7 +1739,8 @@ static irqreturn_t cyz_interrupt(int irq, void *dev_id) | |||
| 1732 | 1739 | ||
| 1733 | if (unlikely(cinfo == NULL)) { | 1740 | if (unlikely(cinfo == NULL)) { |
| 1734 | #ifdef CY_DEBUG_INTERRUPTS | 1741 | #ifdef CY_DEBUG_INTERRUPTS |
| 1735 | printk(KERN_DEBUG "cyz_interrupt: spurious interrupt %d\n",irq); | 1742 | printk(KERN_DEBUG "cyz_interrupt: spurious interrupt %d\n", |
| 1743 | irq); | ||
| 1736 | #endif | 1744 | #endif |
| 1737 | return IRQ_NONE; /* spurious interrupt */ | 1745 | return IRQ_NONE; /* spurious interrupt */ |
| 1738 | } | 1746 | } |
| @@ -1851,9 +1859,8 @@ static int startup(struct cyclades_port *info) | |||
| 1851 | } | 1859 | } |
| 1852 | 1860 | ||
| 1853 | if (!info->type) { | 1861 | if (!info->type) { |
| 1854 | if (info->tty) { | 1862 | if (info->tty) |
| 1855 | set_bit(TTY_IO_ERROR, &info->tty->flags); | 1863 | set_bit(TTY_IO_ERROR, &info->tty->flags); |
| 1856 | } | ||
| 1857 | free_page(page); | 1864 | free_page(page); |
| 1858 | goto errout; | 1865 | goto errout; |
| 1859 | } | 1866 | } |
| @@ -1904,9 +1911,8 @@ static int startup(struct cyclades_port *info) | |||
| 1904 | readb(base_addr + (CySRER << index)) | CyRxData); | 1911 | readb(base_addr + (CySRER << index)) | CyRxData); |
| 1905 | info->flags |= ASYNC_INITIALIZED; | 1912 | info->flags |= ASYNC_INITIALIZED; |
| 1906 | 1913 | ||
| 1907 | if (info->tty) { | 1914 | if (info->tty) |
| 1908 | clear_bit(TTY_IO_ERROR, &info->tty->flags); | 1915 | clear_bit(TTY_IO_ERROR, &info->tty->flags); |
| 1909 | } | ||
| 1910 | info->xmit_cnt = info->xmit_head = info->xmit_tail = 0; | 1916 | info->xmit_cnt = info->xmit_head = info->xmit_tail = 0; |
| 1911 | info->breakon = info->breakoff = 0; | 1917 | info->breakon = info->breakoff = 0; |
| 1912 | memset((char *)&info->idle_stats, 0, sizeof(info->idle_stats)); | 1918 | memset((char *)&info->idle_stats, 0, sizeof(info->idle_stats)); |
| @@ -1925,9 +1931,8 @@ static int startup(struct cyclades_port *info) | |||
| 1925 | base_addr = card->base_addr; | 1931 | base_addr = card->base_addr; |
| 1926 | 1932 | ||
| 1927 | firm_id = base_addr + ID_ADDRESS; | 1933 | firm_id = base_addr + ID_ADDRESS; |
| 1928 | if (!ISZLOADED(*card)) { | 1934 | if (!ISZLOADED(*card)) |
| 1929 | return -ENODEV; | 1935 | return -ENODEV; |
| 1930 | } | ||
| 1931 | 1936 | ||
| 1932 | zfw_ctrl = card->base_addr + | 1937 | zfw_ctrl = card->base_addr + |
| 1933 | (readl(&firm_id->zfwctrl_addr) & 0xfffff); | 1938 | (readl(&firm_id->zfwctrl_addr) & 0xfffff); |
| @@ -1990,9 +1995,8 @@ static int startup(struct cyclades_port *info) | |||
| 1990 | /* enable send, recv, modem !!! */ | 1995 | /* enable send, recv, modem !!! */ |
| 1991 | 1996 | ||
| 1992 | info->flags |= ASYNC_INITIALIZED; | 1997 | info->flags |= ASYNC_INITIALIZED; |
| 1993 | if (info->tty) { | 1998 | if (info->tty) |
| 1994 | clear_bit(TTY_IO_ERROR, &info->tty->flags); | 1999 | clear_bit(TTY_IO_ERROR, &info->tty->flags); |
| 1995 | } | ||
| 1996 | info->xmit_cnt = info->xmit_head = info->xmit_tail = 0; | 2000 | info->xmit_cnt = info->xmit_head = info->xmit_tail = 0; |
| 1997 | info->breakon = info->breakoff = 0; | 2001 | info->breakon = info->breakoff = 0; |
| 1998 | memset((char *)&info->idle_stats, 0, sizeof(info->idle_stats)); | 2002 | memset((char *)&info->idle_stats, 0, sizeof(info->idle_stats)); |
| @@ -2061,9 +2065,8 @@ static void shutdown(struct cyclades_port *info) | |||
| 2061 | void __iomem *base_addr; | 2065 | void __iomem *base_addr; |
| 2062 | int chip, channel, index; | 2066 | int chip, channel, index; |
| 2063 | 2067 | ||
| 2064 | if (!(info->flags & ASYNC_INITIALIZED)) { | 2068 | if (!(info->flags & ASYNC_INITIALIZED)) |
| 2065 | return; | 2069 | return; |
| 2066 | } | ||
| 2067 | 2070 | ||
| 2068 | card = info->card; | 2071 | card = info->card; |
| 2069 | channel = info->line - card->first_line; | 2072 | channel = info->line - card->first_line; |
| @@ -2105,9 +2108,8 @@ static void shutdown(struct cyclades_port *info) | |||
| 2105 | /* it may be appropriate to clear _XMIT at | 2108 | /* it may be appropriate to clear _XMIT at |
| 2106 | some later date (after testing)!!! */ | 2109 | some later date (after testing)!!! */ |
| 2107 | 2110 | ||
| 2108 | if (info->tty) { | 2111 | if (info->tty) |
| 2109 | set_bit(TTY_IO_ERROR, &info->tty->flags); | 2112 | set_bit(TTY_IO_ERROR, &info->tty->flags); |
| 2110 | } | ||
| 2111 | info->flags &= ~ASYNC_INITIALIZED; | 2113 | info->flags &= ~ASYNC_INITIALIZED; |
| 2112 | spin_unlock_irqrestore(&card->card_lock, flags); | 2114 | spin_unlock_irqrestore(&card->card_lock, flags); |
| 2113 | } else { | 2115 | } else { |
| @@ -2124,9 +2126,8 @@ static void shutdown(struct cyclades_port *info) | |||
| 2124 | #endif | 2126 | #endif |
| 2125 | 2127 | ||
| 2126 | firm_id = base_addr + ID_ADDRESS; | 2128 | firm_id = base_addr + ID_ADDRESS; |
| 2127 | if (!ISZLOADED(*card)) { | 2129 | if (!ISZLOADED(*card)) |
| 2128 | return; | 2130 | return; |
| 2129 | } | ||
| 2130 | 2131 | ||
| 2131 | zfw_ctrl = card->base_addr + | 2132 | zfw_ctrl = card->base_addr + |
| 2132 | (readl(&firm_id->zfwctrl_addr) & 0xfffff); | 2133 | (readl(&firm_id->zfwctrl_addr) & 0xfffff); |
| @@ -2157,9 +2158,8 @@ static void shutdown(struct cyclades_port *info) | |||
| 2157 | #endif | 2158 | #endif |
| 2158 | } | 2159 | } |
| 2159 | 2160 | ||
| 2160 | if (info->tty) { | 2161 | if (info->tty) |
| 2161 | set_bit(TTY_IO_ERROR, &info->tty->flags); | 2162 | set_bit(TTY_IO_ERROR, &info->tty->flags); |
| 2162 | } | ||
| 2163 | info->flags &= ~ASYNC_INITIALIZED; | 2163 | info->flags &= ~ASYNC_INITIALIZED; |
| 2164 | 2164 | ||
| 2165 | spin_unlock_irqrestore(&card->card_lock, flags); | 2165 | spin_unlock_irqrestore(&card->card_lock, flags); |
| @@ -2204,7 +2204,8 @@ block_til_ready(struct tty_struct *tty, struct file *filp, | |||
| 2204 | * If non-blocking mode is set, then make the check up front | 2204 | * If non-blocking mode is set, then make the check up front |
| 2205 | * and then exit. | 2205 | * and then exit. |
| 2206 | */ | 2206 | */ |
| 2207 | if ((filp->f_flags & O_NONBLOCK) || (tty->flags & (1 << TTY_IO_ERROR))) { | 2207 | if ((filp->f_flags & O_NONBLOCK) || |
| 2208 | (tty->flags & (1 << TTY_IO_ERROR))) { | ||
| 2208 | info->flags |= ASYNC_NORMAL_ACTIVE; | 2209 | info->flags |= ASYNC_NORMAL_ACTIVE; |
| 2209 | return 0; | 2210 | return 0; |
| 2210 | } | 2211 | } |
| @@ -2301,7 +2302,8 @@ block_til_ready(struct tty_struct *tty, struct file *filp, | |||
| 2301 | return -EINVAL; | 2302 | return -EINVAL; |
| 2302 | } | 2303 | } |
| 2303 | 2304 | ||
| 2304 | zfw_ctrl = base_addr + (readl(&firm_id->zfwctrl_addr)& 0xfffff); | 2305 | zfw_ctrl = base_addr + (readl(&firm_id->zfwctrl_addr) |
| 2306 | & 0xfffff); | ||
| 2305 | board_ctrl = &zfw_ctrl->board_ctrl; | 2307 | board_ctrl = &zfw_ctrl->board_ctrl; |
| 2306 | ch_ctrl = zfw_ctrl->ch_ctrl; | 2308 | ch_ctrl = zfw_ctrl->ch_ctrl; |
| 2307 | 2309 | ||
| @@ -2378,9 +2380,9 @@ static int cy_open(struct tty_struct *tty, struct file *filp) | |||
| 2378 | int retval; | 2380 | int retval; |
| 2379 | 2381 | ||
| 2380 | line = tty->index; | 2382 | line = tty->index; |
| 2381 | if ((tty->index < 0) || (NR_PORTS <= line)) { | 2383 | if (tty->index < 0 || NR_PORTS <= line) |
| 2382 | return -ENODEV; | 2384 | return -ENODEV; |
| 2383 | } | 2385 | |
| 2384 | for (i = 0; i < NR_CARDS; i++) | 2386 | for (i = 0; i < NR_CARDS; i++) |
| 2385 | if (line < cy_card[i].first_line + cy_card[i].nports && | 2387 | if (line < cy_card[i].first_line + cy_card[i].nports && |
| 2386 | line >= cy_card[i].first_line) | 2388 | line >= cy_card[i].first_line) |
| @@ -2388,9 +2390,8 @@ static int cy_open(struct tty_struct *tty, struct file *filp) | |||
| 2388 | if (i >= NR_CARDS) | 2390 | if (i >= NR_CARDS) |
| 2389 | return -ENODEV; | 2391 | return -ENODEV; |
| 2390 | info = &cy_card[i].ports[line - cy_card[i].first_line]; | 2392 | info = &cy_card[i].ports[line - cy_card[i].first_line]; |
| 2391 | if (info->line < 0) { | 2393 | if (info->line < 0) |
| 2392 | return -ENODEV; | 2394 | return -ENODEV; |
| 2393 | } | ||
| 2394 | 2395 | ||
| 2395 | /* If the card's firmware hasn't been loaded, | 2396 | /* If the card's firmware hasn't been loaded, |
| 2396 | treat it as absent from the system. This | 2397 | treat it as absent from the system. This |
| @@ -2456,9 +2457,9 @@ static int cy_open(struct tty_struct *tty, struct file *filp) | |||
| 2456 | #endif | 2457 | #endif |
| 2457 | tty->driver_data = info; | 2458 | tty->driver_data = info; |
| 2458 | info->tty = tty; | 2459 | info->tty = tty; |
| 2459 | if (serial_paranoia_check(info, tty->name, "cy_open")) { | 2460 | if (serial_paranoia_check(info, tty->name, "cy_open")) |
| 2460 | return -ENODEV; | 2461 | return -ENODEV; |
| 2461 | } | 2462 | |
| 2462 | #ifdef CY_DEBUG_OPEN | 2463 | #ifdef CY_DEBUG_OPEN |
| 2463 | printk(KERN_DEBUG "cyc:cy_open ttyC%d, count = %d\n", info->line, | 2464 | printk(KERN_DEBUG "cyc:cy_open ttyC%d, count = %d\n", info->line, |
| 2464 | info->count); | 2465 | info->count); |
| @@ -2482,9 +2483,8 @@ static int cy_open(struct tty_struct *tty, struct file *filp) | |||
| 2482 | * Start up serial port | 2483 | * Start up serial port |
| 2483 | */ | 2484 | */ |
| 2484 | retval = startup(info); | 2485 | retval = startup(info); |
| 2485 | if (retval) { | 2486 | if (retval) |
| 2486 | return retval; | 2487 | return retval; |
| 2487 | } | ||
| 2488 | 2488 | ||
| 2489 | retval = block_til_ready(tty, filp, info); | 2489 | retval = block_til_ready(tty, filp, info); |
| 2490 | if (retval) { | 2490 | if (retval) { |
| @@ -2522,6 +2522,7 @@ static void cy_wait_until_sent(struct tty_struct *tty, int timeout) | |||
| 2522 | return; /* Just in case.... */ | 2522 | return; /* Just in case.... */ |
| 2523 | 2523 | ||
| 2524 | orig_jiffies = jiffies; | 2524 | orig_jiffies = jiffies; |
| 2525 | lock_kernel(); | ||
| 2525 | /* | 2526 | /* |
| 2526 | * Set the check interval to be 1/5 of the estimated time to | 2527 | * Set the check interval to be 1/5 of the estimated time to |
| 2527 | * send a single character, and make it at least 1. The check | 2528 | * send a single character, and make it at least 1. The check |
| @@ -2573,11 +2574,47 @@ static void cy_wait_until_sent(struct tty_struct *tty, int timeout) | |||
| 2573 | } | 2574 | } |
| 2574 | /* Run one more char cycle */ | 2575 | /* Run one more char cycle */ |
| 2575 | msleep_interruptible(jiffies_to_msecs(char_time * 5)); | 2576 | msleep_interruptible(jiffies_to_msecs(char_time * 5)); |
| 2577 | unlock_kernel(); | ||
| 2576 | #ifdef CY_DEBUG_WAIT_UNTIL_SENT | 2578 | #ifdef CY_DEBUG_WAIT_UNTIL_SENT |
| 2577 | printk(KERN_DEBUG "Clean (jiff=%lu)...done\n", jiffies); | 2579 | printk(KERN_DEBUG "Clean (jiff=%lu)...done\n", jiffies); |
| 2578 | #endif | 2580 | #endif |
| 2579 | } | 2581 | } |
| 2580 | 2582 | ||
| 2583 | static void cy_flush_buffer(struct tty_struct *tty) | ||
| 2584 | { | ||
| 2585 | struct cyclades_port *info = tty->driver_data; | ||
| 2586 | struct cyclades_card *card; | ||
| 2587 | int channel, retval; | ||
| 2588 | unsigned long flags; | ||
| 2589 | |||
| 2590 | #ifdef CY_DEBUG_IO | ||
| 2591 | printk(KERN_DEBUG "cyc:cy_flush_buffer ttyC%d\n", info->line); | ||
| 2592 | #endif | ||
| 2593 | |||
| 2594 | if (serial_paranoia_check(info, tty->name, "cy_flush_buffer")) | ||
| 2595 | return; | ||
| 2596 | |||
| 2597 | card = info->card; | ||
| 2598 | channel = info->line - card->first_line; | ||
| 2599 | |||
| 2600 | spin_lock_irqsave(&card->card_lock, flags); | ||
| 2601 | info->xmit_cnt = info->xmit_head = info->xmit_tail = 0; | ||
| 2602 | spin_unlock_irqrestore(&card->card_lock, flags); | ||
| 2603 | |||
| 2604 | if (IS_CYC_Z(*card)) { /* If it is a Z card, flush the on-board | ||
| 2605 | buffers as well */ | ||
| 2606 | spin_lock_irqsave(&card->card_lock, flags); | ||
| 2607 | retval = cyz_issue_cmd(card, channel, C_CM_FLUSH_TX, 0L); | ||
| 2608 | if (retval != 0) { | ||
| 2609 | printk(KERN_ERR "cyc: flush_buffer retval on ttyC%d " | ||
| 2610 | "was %x\n", info->line, retval); | ||
| 2611 | } | ||
| 2612 | spin_unlock_irqrestore(&card->card_lock, flags); | ||
| 2613 | } | ||
| 2614 | tty_wakeup(tty); | ||
| 2615 | } /* cy_flush_buffer */ | ||
| 2616 | |||
| 2617 | |||
| 2581 | /* | 2618 | /* |
| 2582 | * This routine is called when a particular tty device is closed. | 2619 | * This routine is called when a particular tty device is closed. |
| 2583 | */ | 2620 | */ |
| @@ -2591,9 +2628,8 @@ static void cy_close(struct tty_struct *tty, struct file *filp) | |||
| 2591 | printk(KERN_DEBUG "cyc:cy_close ttyC%d\n", info->line); | 2628 | printk(KERN_DEBUG "cyc:cy_close ttyC%d\n", info->line); |
| 2592 | #endif | 2629 | #endif |
| 2593 | 2630 | ||
| 2594 | if (!info || serial_paranoia_check(info, tty->name, "cy_close")) { | 2631 | if (!info || serial_paranoia_check(info, tty->name, "cy_close")) |
| 2595 | return; | 2632 | return; |
| 2596 | } | ||
| 2597 | 2633 | ||
| 2598 | card = info->card; | 2634 | card = info->card; |
| 2599 | 2635 | ||
| @@ -2641,9 +2677,9 @@ static void cy_close(struct tty_struct *tty, struct file *filp) | |||
| 2641 | */ | 2677 | */ |
| 2642 | tty->closing = 1; | 2678 | tty->closing = 1; |
| 2643 | spin_unlock_irqrestore(&card->card_lock, flags); | 2679 | spin_unlock_irqrestore(&card->card_lock, flags); |
| 2644 | if (info->closing_wait != CY_CLOSING_WAIT_NONE) { | 2680 | if (info->closing_wait != CY_CLOSING_WAIT_NONE) |
| 2645 | tty_wait_until_sent(tty, info->closing_wait); | 2681 | tty_wait_until_sent(tty, info->closing_wait); |
| 2646 | } | 2682 | |
| 2647 | spin_lock_irqsave(&card->card_lock, flags); | 2683 | spin_lock_irqsave(&card->card_lock, flags); |
| 2648 | 2684 | ||
| 2649 | if (!IS_CYC_Z(*card)) { | 2685 | if (!IS_CYC_Z(*card)) { |
| @@ -2657,15 +2693,16 @@ static void cy_close(struct tty_struct *tty, struct file *filp) | |||
| 2657 | cy_writeb(base_addr + (CySRER << index), | 2693 | cy_writeb(base_addr + (CySRER << index), |
| 2658 | readb(base_addr + (CySRER << index)) & ~CyRxData); | 2694 | readb(base_addr + (CySRER << index)) & ~CyRxData); |
| 2659 | if (info->flags & ASYNC_INITIALIZED) { | 2695 | if (info->flags & ASYNC_INITIALIZED) { |
| 2660 | /* Waiting for on-board buffers to be empty before closing | 2696 | /* Waiting for on-board buffers to be empty before |
| 2661 | the port */ | 2697 | closing the port */ |
| 2662 | spin_unlock_irqrestore(&card->card_lock, flags); | 2698 | spin_unlock_irqrestore(&card->card_lock, flags); |
| 2663 | cy_wait_until_sent(tty, info->timeout); | 2699 | cy_wait_until_sent(tty, info->timeout); |
| 2664 | spin_lock_irqsave(&card->card_lock, flags); | 2700 | spin_lock_irqsave(&card->card_lock, flags); |
| 2665 | } | 2701 | } |
| 2666 | } else { | 2702 | } else { |
| 2667 | #ifdef Z_WAKE | 2703 | #ifdef Z_WAKE |
| 2668 | /* Waiting for on-board buffers to be empty before closing the port */ | 2704 | /* Waiting for on-board buffers to be empty before closing |
| 2705 | the port */ | ||
| 2669 | void __iomem *base_addr = card->base_addr; | 2706 | void __iomem *base_addr = card->base_addr; |
| 2670 | struct FIRM_ID __iomem *firm_id = base_addr + ID_ADDRESS; | 2707 | struct FIRM_ID __iomem *firm_id = base_addr + ID_ADDRESS; |
| 2671 | struct ZFW_CTRL __iomem *zfw_ctrl = | 2708 | struct ZFW_CTRL __iomem *zfw_ctrl = |
| @@ -2689,8 +2726,7 @@ static void cy_close(struct tty_struct *tty, struct file *filp) | |||
| 2689 | 2726 | ||
| 2690 | spin_unlock_irqrestore(&card->card_lock, flags); | 2727 | spin_unlock_irqrestore(&card->card_lock, flags); |
| 2691 | shutdown(info); | 2728 | shutdown(info); |
| 2692 | if (tty->driver->flush_buffer) | 2729 | cy_flush_buffer(tty); |
| 2693 | tty->driver->flush_buffer(tty); | ||
| 2694 | tty_ldisc_flush(tty); | 2730 | tty_ldisc_flush(tty); |
| 2695 | spin_lock_irqsave(&card->card_lock, flags); | 2731 | spin_lock_irqsave(&card->card_lock, flags); |
| 2696 | 2732 | ||
| @@ -2738,17 +2774,16 @@ static int cy_write(struct tty_struct *tty, const unsigned char *buf, int count) | |||
| 2738 | printk(KERN_DEBUG "cyc:cy_write ttyC%d\n", info->line); | 2774 | printk(KERN_DEBUG "cyc:cy_write ttyC%d\n", info->line); |
| 2739 | #endif | 2775 | #endif |
| 2740 | 2776 | ||
| 2741 | if (serial_paranoia_check(info, tty->name, "cy_write")) { | 2777 | if (serial_paranoia_check(info, tty->name, "cy_write")) |
| 2742 | return 0; | 2778 | return 0; |
| 2743 | } | ||
| 2744 | 2779 | ||
| 2745 | if (!info->xmit_buf) | 2780 | if (!info->xmit_buf) |
| 2746 | return 0; | 2781 | return 0; |
| 2747 | 2782 | ||
| 2748 | spin_lock_irqsave(&info->card->card_lock, flags); | 2783 | spin_lock_irqsave(&info->card->card_lock, flags); |
| 2749 | while (1) { | 2784 | while (1) { |
| 2750 | c = min(count, min((int)(SERIAL_XMIT_SIZE - info->xmit_cnt - 1), | 2785 | c = min(count, (int)(SERIAL_XMIT_SIZE - info->xmit_cnt - 1)); |
| 2751 | (int)(SERIAL_XMIT_SIZE - info->xmit_head))); | 2786 | c = min(c, (int)(SERIAL_XMIT_SIZE - info->xmit_head)); |
| 2752 | 2787 | ||
| 2753 | if (c <= 0) | 2788 | if (c <= 0) |
| 2754 | break; | 2789 | break; |
| @@ -2766,9 +2801,9 @@ static int cy_write(struct tty_struct *tty, const unsigned char *buf, int count) | |||
| 2766 | info->idle_stats.xmit_bytes += ret; | 2801 | info->idle_stats.xmit_bytes += ret; |
| 2767 | info->idle_stats.xmit_idle = jiffies; | 2802 | info->idle_stats.xmit_idle = jiffies; |
| 2768 | 2803 | ||
| 2769 | if (info->xmit_cnt && !tty->stopped && !tty->hw_stopped) { | 2804 | if (info->xmit_cnt && !tty->stopped && !tty->hw_stopped) |
| 2770 | start_xmit(info); | 2805 | start_xmit(info); |
| 2771 | } | 2806 | |
| 2772 | return ret; | 2807 | return ret; |
| 2773 | } /* cy_write */ | 2808 | } /* cy_write */ |
| 2774 | 2809 | ||
| @@ -2779,7 +2814,7 @@ static int cy_write(struct tty_struct *tty, const unsigned char *buf, int count) | |||
| 2779 | * done stuffing characters into the driver. If there is no room | 2814 | * done stuffing characters into the driver. If there is no room |
| 2780 | * in the queue, the character is ignored. | 2815 | * in the queue, the character is ignored. |
| 2781 | */ | 2816 | */ |
| 2782 | static void cy_put_char(struct tty_struct *tty, unsigned char ch) | 2817 | static int cy_put_char(struct tty_struct *tty, unsigned char ch) |
| 2783 | { | 2818 | { |
| 2784 | struct cyclades_port *info = tty->driver_data; | 2819 | struct cyclades_port *info = tty->driver_data; |
| 2785 | unsigned long flags; | 2820 | unsigned long flags; |
| @@ -2789,15 +2824,15 @@ static void cy_put_char(struct tty_struct *tty, unsigned char ch) | |||
| 2789 | #endif | 2824 | #endif |
| 2790 | 2825 | ||
| 2791 | if (serial_paranoia_check(info, tty->name, "cy_put_char")) | 2826 | if (serial_paranoia_check(info, tty->name, "cy_put_char")) |
| 2792 | return; | 2827 | return 0; |
| 2793 | 2828 | ||
| 2794 | if (!info->xmit_buf) | 2829 | if (!info->xmit_buf) |
| 2795 | return; | 2830 | return 0; |
| 2796 | 2831 | ||
| 2797 | spin_lock_irqsave(&info->card->card_lock, flags); | 2832 | spin_lock_irqsave(&info->card->card_lock, flags); |
| 2798 | if (info->xmit_cnt >= (int)(SERIAL_XMIT_SIZE - 1)) { | 2833 | if (info->xmit_cnt >= (int)(SERIAL_XMIT_SIZE - 1)) { |
| 2799 | spin_unlock_irqrestore(&info->card->card_lock, flags); | 2834 | spin_unlock_irqrestore(&info->card->card_lock, flags); |
| 2800 | return; | 2835 | return 0; |
| 2801 | } | 2836 | } |
| 2802 | 2837 | ||
| 2803 | info->xmit_buf[info->xmit_head++] = ch; | 2838 | info->xmit_buf[info->xmit_head++] = ch; |
| @@ -2806,11 +2841,12 @@ static void cy_put_char(struct tty_struct *tty, unsigned char ch) | |||
| 2806 | info->idle_stats.xmit_bytes++; | 2841 | info->idle_stats.xmit_bytes++; |
| 2807 | info->idle_stats.xmit_idle = jiffies; | 2842 | info->idle_stats.xmit_idle = jiffies; |
| 2808 | spin_unlock_irqrestore(&info->card->card_lock, flags); | 2843 | spin_unlock_irqrestore(&info->card->card_lock, flags); |
| 2844 | return 1; | ||
| 2809 | } /* cy_put_char */ | 2845 | } /* cy_put_char */ |
| 2810 | 2846 | ||
| 2811 | /* | 2847 | /* |
| 2812 | * This routine is called by the kernel after it has written a | 2848 | * This routine is called by the kernel after it has written a |
| 2813 | * series of characters to the tty device using put_char(). | 2849 | * series of characters to the tty device using put_char(). |
| 2814 | */ | 2850 | */ |
| 2815 | static void cy_flush_chars(struct tty_struct *tty) | 2851 | static void cy_flush_chars(struct tty_struct *tty) |
| 2816 | { | 2852 | { |
| @@ -2882,6 +2918,7 @@ static int cy_chars_in_buffer(struct tty_struct *tty) | |||
| 2882 | int char_count; | 2918 | int char_count; |
| 2883 | __u32 tx_put, tx_get, tx_bufsize; | 2919 | __u32 tx_put, tx_get, tx_bufsize; |
| 2884 | 2920 | ||
| 2921 | lock_kernel(); | ||
| 2885 | firm_id = card->base_addr + ID_ADDRESS; | 2922 | firm_id = card->base_addr + ID_ADDRESS; |
| 2886 | zfw_ctrl = card->base_addr + | 2923 | zfw_ctrl = card->base_addr + |
| 2887 | (readl(&firm_id->zfwctrl_addr) & 0xfffff); | 2924 | (readl(&firm_id->zfwctrl_addr) & 0xfffff); |
| @@ -2899,6 +2936,7 @@ static int cy_chars_in_buffer(struct tty_struct *tty) | |||
| 2899 | printk(KERN_DEBUG "cyc:cy_chars_in_buffer ttyC%d %d\n", | 2936 | printk(KERN_DEBUG "cyc:cy_chars_in_buffer ttyC%d %d\n", |
| 2900 | info->line, info->xmit_cnt + char_count); | 2937 | info->line, info->xmit_cnt + char_count); |
| 2901 | #endif | 2938 | #endif |
| 2939 | unlock_kernel(); | ||
| 2902 | return info->xmit_cnt + char_count; | 2940 | return info->xmit_cnt + char_count; |
| 2903 | } | 2941 | } |
| 2904 | #endif /* Z_EXT_CHARS_IN_BUFFER */ | 2942 | #endif /* Z_EXT_CHARS_IN_BUFFER */ |
| @@ -2950,12 +2988,12 @@ static void set_line_char(struct cyclades_port *info) | |||
| 2950 | int baud, baud_rate = 0; | 2988 | int baud, baud_rate = 0; |
| 2951 | int i; | 2989 | int i; |
| 2952 | 2990 | ||
| 2953 | if (!info->tty || !info->tty->termios) { | 2991 | if (!info->tty || !info->tty->termios) |
| 2954 | return; | 2992 | return; |
| 2955 | } | 2993 | |
| 2956 | if (info->line == -1) { | 2994 | if (info->line == -1) |
| 2957 | return; | 2995 | return; |
| 2958 | } | 2996 | |
| 2959 | cflag = info->tty->termios->c_cflag; | 2997 | cflag = info->tty->termios->c_cflag; |
| 2960 | iflag = info->tty->termios->c_iflag; | 2998 | iflag = info->tty->termios->c_iflag; |
| 2961 | 2999 | ||
| @@ -2994,13 +3032,11 @@ static void set_line_char(struct cyclades_port *info) | |||
| 2994 | } | 3032 | } |
| 2995 | /* find the baud index */ | 3033 | /* find the baud index */ |
| 2996 | for (i = 0; i < 20; i++) { | 3034 | for (i = 0; i < 20; i++) { |
| 2997 | if (baud == baud_table[i]) { | 3035 | if (baud == baud_table[i]) |
| 2998 | break; | 3036 | break; |
| 2999 | } | ||
| 3000 | } | 3037 | } |
| 3001 | if (i == 20) { | 3038 | if (i == 20) |
| 3002 | i = 19; /* CD1400_MAX_SPEED */ | 3039 | i = 19; /* CD1400_MAX_SPEED */ |
| 3003 | } | ||
| 3004 | 3040 | ||
| 3005 | if (baud == 38400 && (info->flags & ASYNC_SPD_MASK) == | 3041 | if (baud == 38400 && (info->flags & ASYNC_SPD_MASK) == |
| 3006 | ASYNC_SPD_CUST) { | 3042 | ASYNC_SPD_CUST) { |
| @@ -3059,18 +3095,16 @@ static void set_line_char(struct cyclades_port *info) | |||
| 3059 | info->cor1 = Cy_8_BITS; | 3095 | info->cor1 = Cy_8_BITS; |
| 3060 | break; | 3096 | break; |
| 3061 | } | 3097 | } |
| 3062 | if (cflag & CSTOPB) { | 3098 | if (cflag & CSTOPB) |
| 3063 | info->cor1 |= Cy_2_STOP; | 3099 | info->cor1 |= Cy_2_STOP; |
| 3064 | } | 3100 | |
| 3065 | if (cflag & PARENB) { | 3101 | if (cflag & PARENB) { |
| 3066 | if (cflag & PARODD) { | 3102 | if (cflag & PARODD) |
| 3067 | info->cor1 |= CyPARITY_O; | 3103 | info->cor1 |= CyPARITY_O; |
| 3068 | } else { | 3104 | else |
| 3069 | info->cor1 |= CyPARITY_E; | 3105 | info->cor1 |= CyPARITY_E; |
| 3070 | } | 3106 | } else |
| 3071 | } else { | ||
| 3072 | info->cor1 |= CyPARITY_NONE; | 3107 | info->cor1 |= CyPARITY_NONE; |
| 3073 | } | ||
| 3074 | 3108 | ||
| 3075 | /* CTS flow control flag */ | 3109 | /* CTS flow control flag */ |
| 3076 | if (cflag & CRTSCTS) { | 3110 | if (cflag & CRTSCTS) { |
| @@ -3123,7 +3157,8 @@ static void set_line_char(struct cyclades_port *info) | |||
| 3123 | cyy_issue_cmd(base_addr, CyCOR_CHANGE | CyCOR1ch | CyCOR2ch | | 3157 | cyy_issue_cmd(base_addr, CyCOR_CHANGE | CyCOR1ch | CyCOR2ch | |
| 3124 | CyCOR3ch, index); | 3158 | CyCOR3ch, index); |
| 3125 | 3159 | ||
| 3126 | cy_writeb(base_addr + (CyCAR << index), (u_char) channel); /* !!! Is this needed? */ | 3160 | /* !!! Is this needed? */ |
| 3161 | cy_writeb(base_addr + (CyCAR << index), (u_char) channel); | ||
| 3127 | cy_writeb(base_addr + (CyRTPR << index), | 3162 | cy_writeb(base_addr + (CyRTPR << index), |
| 3128 | (info->default_timeout ? info->default_timeout : 0x02)); | 3163 | (info->default_timeout ? info->default_timeout : 0x02)); |
| 3129 | /* 10ms rx timeout */ | 3164 | /* 10ms rx timeout */ |
| @@ -3191,9 +3226,8 @@ static void set_line_char(struct cyclades_port *info) | |||
| 3191 | #endif | 3226 | #endif |
| 3192 | } | 3227 | } |
| 3193 | 3228 | ||
| 3194 | if (info->tty) { | 3229 | if (info->tty) |
| 3195 | clear_bit(TTY_IO_ERROR, &info->tty->flags); | 3230 | clear_bit(TTY_IO_ERROR, &info->tty->flags); |
| 3196 | } | ||
| 3197 | spin_unlock_irqrestore(&card->card_lock, flags); | 3231 | spin_unlock_irqrestore(&card->card_lock, flags); |
| 3198 | 3232 | ||
| 3199 | } else { | 3233 | } else { |
| @@ -3206,9 +3240,8 @@ static void set_line_char(struct cyclades_port *info) | |||
| 3206 | int retval; | 3240 | int retval; |
| 3207 | 3241 | ||
| 3208 | firm_id = card->base_addr + ID_ADDRESS; | 3242 | firm_id = card->base_addr + ID_ADDRESS; |
| 3209 | if (!ISZLOADED(*card)) { | 3243 | if (!ISZLOADED(*card)) |
| 3210 | return; | 3244 | return; |
| 3211 | } | ||
| 3212 | 3245 | ||
| 3213 | zfw_ctrl = card->base_addr + | 3246 | zfw_ctrl = card->base_addr + |
| 3214 | (readl(&firm_id->zfwctrl_addr) & 0xfffff); | 3247 | (readl(&firm_id->zfwctrl_addr) & 0xfffff); |
| @@ -3268,14 +3301,12 @@ static void set_line_char(struct cyclades_port *info) | |||
| 3268 | readl(&ch_ctrl->comm_data_l) | C_DL_1STOP); | 3301 | readl(&ch_ctrl->comm_data_l) | C_DL_1STOP); |
| 3269 | } | 3302 | } |
| 3270 | if (cflag & PARENB) { | 3303 | if (cflag & PARENB) { |
| 3271 | if (cflag & PARODD) { | 3304 | if (cflag & PARODD) |
| 3272 | cy_writel(&ch_ctrl->comm_parity, C_PR_ODD); | 3305 | cy_writel(&ch_ctrl->comm_parity, C_PR_ODD); |
| 3273 | } else { | 3306 | else |
| 3274 | cy_writel(&ch_ctrl->comm_parity, C_PR_EVEN); | 3307 | cy_writel(&ch_ctrl->comm_parity, C_PR_EVEN); |
| 3275 | } | 3308 | } else |
| 3276 | } else { | ||
| 3277 | cy_writel(&ch_ctrl->comm_parity, C_PR_NONE); | 3309 | cy_writel(&ch_ctrl->comm_parity, C_PR_NONE); |
| 3278 | } | ||
| 3279 | 3310 | ||
| 3280 | /* CTS flow control flag */ | 3311 | /* CTS flow control flag */ |
| 3281 | if (cflag & CRTSCTS) { | 3312 | if (cflag & CRTSCTS) { |
| @@ -3305,11 +3336,10 @@ static void set_line_char(struct cyclades_port *info) | |||
| 3305 | } | 3336 | } |
| 3306 | 3337 | ||
| 3307 | /* CD sensitivity */ | 3338 | /* CD sensitivity */ |
| 3308 | if (cflag & CLOCAL) { | 3339 | if (cflag & CLOCAL) |
| 3309 | info->flags &= ~ASYNC_CHECK_CD; | 3340 | info->flags &= ~ASYNC_CHECK_CD; |
| 3310 | } else { | 3341 | else |
| 3311 | info->flags |= ASYNC_CHECK_CD; | 3342 | info->flags |= ASYNC_CHECK_CD; |
| 3312 | } | ||
| 3313 | 3343 | ||
| 3314 | if (baud == 0) { /* baud rate is zero, turn off line */ | 3344 | if (baud == 0) { /* baud rate is zero, turn off line */ |
| 3315 | cy_writel(&ch_ctrl->rs_control, | 3345 | cy_writel(&ch_ctrl->rs_control, |
| @@ -3325,21 +3355,20 @@ static void set_line_char(struct cyclades_port *info) | |||
| 3325 | #endif | 3355 | #endif |
| 3326 | } | 3356 | } |
| 3327 | 3357 | ||
| 3328 | retval = cyz_issue_cmd(card, channel, C_CM_IOCTLM,0L); | 3358 | retval = cyz_issue_cmd(card, channel, C_CM_IOCTLM, 0L); |
| 3329 | if (retval != 0) { | 3359 | if (retval != 0) { |
| 3330 | printk(KERN_ERR "cyc:set_line_char(2) retval on ttyC%d " | 3360 | printk(KERN_ERR "cyc:set_line_char(2) retval on ttyC%d " |
| 3331 | "was %x\n", info->line, retval); | 3361 | "was %x\n", info->line, retval); |
| 3332 | } | 3362 | } |
| 3333 | 3363 | ||
| 3334 | if (info->tty) { | 3364 | if (info->tty) |
| 3335 | clear_bit(TTY_IO_ERROR, &info->tty->flags); | 3365 | clear_bit(TTY_IO_ERROR, &info->tty->flags); |
| 3336 | } | ||
| 3337 | } | 3366 | } |
| 3338 | } /* set_line_char */ | 3367 | } /* set_line_char */ |
| 3339 | 3368 | ||
| 3340 | static int | 3369 | static int |
| 3341 | get_serial_info(struct cyclades_port *info, | 3370 | get_serial_info(struct cyclades_port *info, |
| 3342 | struct serial_struct __user * retinfo) | 3371 | struct serial_struct __user *retinfo) |
| 3343 | { | 3372 | { |
| 3344 | struct serial_struct tmp; | 3373 | struct serial_struct tmp; |
| 3345 | struct cyclades_card *cinfo = info->card; | 3374 | struct cyclades_card *cinfo = info->card; |
| @@ -3363,7 +3392,7 @@ get_serial_info(struct cyclades_port *info, | |||
| 3363 | 3392 | ||
| 3364 | static int | 3393 | static int |
| 3365 | set_serial_info(struct cyclades_port *info, | 3394 | set_serial_info(struct cyclades_port *info, |
| 3366 | struct serial_struct __user * new_info) | 3395 | struct serial_struct __user *new_info) |
| 3367 | { | 3396 | { |
| 3368 | struct serial_struct new_serial; | 3397 | struct serial_struct new_serial; |
| 3369 | struct cyclades_port old_info; | 3398 | struct cyclades_port old_info; |
| @@ -3417,7 +3446,7 @@ check_and_exit: | |||
| 3417 | * transmit holding register is empty. This functionality | 3446 | * transmit holding register is empty. This functionality |
| 3418 | * allows an RS485 driver to be written in user space. | 3447 | * allows an RS485 driver to be written in user space. |
| 3419 | */ | 3448 | */ |
| 3420 | static int get_lsr_info(struct cyclades_port *info, unsigned int __user * value) | 3449 | static int get_lsr_info(struct cyclades_port *info, unsigned int __user *value) |
| 3421 | { | 3450 | { |
| 3422 | struct cyclades_card *card; | 3451 | struct cyclades_card *card; |
| 3423 | int chip, channel, index; | 3452 | int chip, channel, index; |
| @@ -3461,9 +3490,11 @@ static int cy_tiocmget(struct tty_struct *tty, struct file *file) | |||
| 3461 | struct BOARD_CTRL __iomem *board_ctrl; | 3490 | struct BOARD_CTRL __iomem *board_ctrl; |
| 3462 | struct CH_CTRL __iomem *ch_ctrl; | 3491 | struct CH_CTRL __iomem *ch_ctrl; |
| 3463 | 3492 | ||
| 3464 | if (serial_paranoia_check(info, tty->name, __FUNCTION__)) | 3493 | if (serial_paranoia_check(info, tty->name, __func__)) |
| 3465 | return -ENODEV; | 3494 | return -ENODEV; |
| 3466 | 3495 | ||
| 3496 | lock_kernel(); | ||
| 3497 | |||
| 3467 | card = info->card; | 3498 | card = info->card; |
| 3468 | channel = info->line - card->first_line; | 3499 | channel = info->line - card->first_line; |
| 3469 | if (!IS_CYC_Z(*card)) { | 3500 | if (!IS_CYC_Z(*card)) { |
| @@ -3506,10 +3537,12 @@ static int cy_tiocmget(struct tty_struct *tty, struct file *file) | |||
| 3506 | ((lstatus & C_RS_CTS) ? TIOCM_CTS : 0); | 3537 | ((lstatus & C_RS_CTS) ? TIOCM_CTS : 0); |
| 3507 | } else { | 3538 | } else { |
| 3508 | result = 0; | 3539 | result = 0; |
| 3540 | unlock_kernel(); | ||
| 3509 | return -ENODEV; | 3541 | return -ENODEV; |
| 3510 | } | 3542 | } |
| 3511 | 3543 | ||
| 3512 | } | 3544 | } |
| 3545 | unlock_kernel(); | ||
| 3513 | return result; | 3546 | return result; |
| 3514 | } /* cy_tiomget */ | 3547 | } /* cy_tiomget */ |
| 3515 | 3548 | ||
| @@ -3528,7 +3561,7 @@ cy_tiocmset(struct tty_struct *tty, struct file *file, | |||
| 3528 | struct CH_CTRL __iomem *ch_ctrl; | 3561 | struct CH_CTRL __iomem *ch_ctrl; |
| 3529 | int retval; | 3562 | int retval; |
| 3530 | 3563 | ||
| 3531 | if (serial_paranoia_check(info, tty->name, __FUNCTION__)) | 3564 | if (serial_paranoia_check(info, tty->name, __func__)) |
| 3532 | return -ENODEV; | 3565 | return -ENODEV; |
| 3533 | 3566 | ||
| 3534 | card = info->card; | 3567 | card = info->card; |
| @@ -3727,8 +3760,8 @@ static void cy_break(struct tty_struct *tty, int break_state) | |||
| 3727 | spin_unlock_irqrestore(&card->card_lock, flags); | 3760 | spin_unlock_irqrestore(&card->card_lock, flags); |
| 3728 | } /* cy_break */ | 3761 | } /* cy_break */ |
| 3729 | 3762 | ||
| 3730 | static int | 3763 | static int get_mon_info(struct cyclades_port *info, |
| 3731 | get_mon_info(struct cyclades_port *info, struct cyclades_monitor __user * mon) | 3764 | struct cyclades_monitor __user *mon) |
| 3732 | { | 3765 | { |
| 3733 | 3766 | ||
| 3734 | if (copy_to_user(mon, &info->mon, sizeof(struct cyclades_monitor))) | 3767 | if (copy_to_user(mon, &info->mon, sizeof(struct cyclades_monitor))) |
| @@ -3767,8 +3800,8 @@ static int set_threshold(struct cyclades_port *info, unsigned long value) | |||
| 3767 | return 0; | 3800 | return 0; |
| 3768 | } /* set_threshold */ | 3801 | } /* set_threshold */ |
| 3769 | 3802 | ||
| 3770 | static int | 3803 | static int get_threshold(struct cyclades_port *info, |
| 3771 | get_threshold(struct cyclades_port *info, unsigned long __user * value) | 3804 | unsigned long __user *value) |
| 3772 | { | 3805 | { |
| 3773 | struct cyclades_card *card; | 3806 | struct cyclades_card *card; |
| 3774 | void __iomem *base_addr; | 3807 | void __iomem *base_addr; |
| @@ -3789,15 +3822,15 @@ get_threshold(struct cyclades_port *info, unsigned long __user * value) | |||
| 3789 | return 0; | 3822 | return 0; |
| 3790 | } /* get_threshold */ | 3823 | } /* get_threshold */ |
| 3791 | 3824 | ||
| 3792 | static int | 3825 | static int set_default_threshold(struct cyclades_port *info, |
| 3793 | set_default_threshold(struct cyclades_port *info, unsigned long value) | 3826 | unsigned long value) |
| 3794 | { | 3827 | { |
| 3795 | info->default_threshold = value & 0x0f; | 3828 | info->default_threshold = value & 0x0f; |
| 3796 | return 0; | 3829 | return 0; |
| 3797 | } /* set_default_threshold */ | 3830 | } /* set_default_threshold */ |
| 3798 | 3831 | ||
| 3799 | static int | 3832 | static int get_default_threshold(struct cyclades_port *info, |
| 3800 | get_default_threshold(struct cyclades_port *info, unsigned long __user * value) | 3833 | unsigned long __user *value) |
| 3801 | { | 3834 | { |
| 3802 | return put_user(info->default_threshold, value); | 3835 | return put_user(info->default_threshold, value); |
| 3803 | } /* get_default_threshold */ | 3836 | } /* get_default_threshold */ |
| @@ -3824,7 +3857,8 @@ static int set_timeout(struct cyclades_port *info, unsigned long value) | |||
| 3824 | return 0; | 3857 | return 0; |
| 3825 | } /* set_timeout */ | 3858 | } /* set_timeout */ |
| 3826 | 3859 | ||
| 3827 | static int get_timeout(struct cyclades_port *info, unsigned long __user * value) | 3860 | static int get_timeout(struct cyclades_port *info, |
| 3861 | unsigned long __user *value) | ||
| 3828 | { | 3862 | { |
| 3829 | struct cyclades_card *card; | 3863 | struct cyclades_card *card; |
| 3830 | void __iomem *base_addr; | 3864 | void __iomem *base_addr; |
| @@ -3851,8 +3885,8 @@ static int set_default_timeout(struct cyclades_port *info, unsigned long value) | |||
| 3851 | return 0; | 3885 | return 0; |
| 3852 | } /* set_default_timeout */ | 3886 | } /* set_default_timeout */ |
| 3853 | 3887 | ||
| 3854 | static int | 3888 | static int get_default_timeout(struct cyclades_port *info, |
| 3855 | get_default_timeout(struct cyclades_port *info, unsigned long __user * value) | 3889 | unsigned long __user *value) |
| 3856 | { | 3890 | { |
| 3857 | return put_user(info->default_timeout, value); | 3891 | return put_user(info->default_timeout, value); |
| 3858 | } /* get_default_timeout */ | 3892 | } /* get_default_timeout */ |
| @@ -3880,6 +3914,7 @@ cy_ioctl(struct tty_struct *tty, struct file *file, | |||
| 3880 | printk(KERN_DEBUG "cyc:cy_ioctl ttyC%d, cmd = %x arg = %lx\n", | 3914 | printk(KERN_DEBUG "cyc:cy_ioctl ttyC%d, cmd = %x arg = %lx\n", |
| 3881 | info->line, cmd, arg); | 3915 | info->line, cmd, arg); |
| 3882 | #endif | 3916 | #endif |
| 3917 | lock_kernel(); | ||
| 3883 | 3918 | ||
| 3884 | switch (cmd) { | 3919 | switch (cmd) { |
| 3885 | case CYGETMON: | 3920 | case CYGETMON: |
| @@ -3936,7 +3971,7 @@ cy_ioctl(struct tty_struct *tty, struct file *file, | |||
| 3936 | break; | 3971 | break; |
| 3937 | #endif /* CONFIG_CYZ_INTR */ | 3972 | #endif /* CONFIG_CYZ_INTR */ |
| 3938 | case CYSETWAIT: | 3973 | case CYSETWAIT: |
| 3939 | info->closing_wait = (unsigned short)arg *HZ / 100; | 3974 | info->closing_wait = (unsigned short)arg * HZ / 100; |
| 3940 | ret_val = 0; | 3975 | ret_val = 0; |
| 3941 | break; | 3976 | break; |
| 3942 | case CYGETWAIT: | 3977 | case CYGETWAIT: |
| @@ -3988,47 +4023,47 @@ cy_ioctl(struct tty_struct *tty, struct file *file, | |||
| 3988 | p_cuser = argp; | 4023 | p_cuser = argp; |
| 3989 | ret_val = put_user(cnow.cts, &p_cuser->cts); | 4024 | ret_val = put_user(cnow.cts, &p_cuser->cts); |
| 3990 | if (ret_val) | 4025 | if (ret_val) |
| 3991 | return ret_val; | 4026 | break; |
| 3992 | ret_val = put_user(cnow.dsr, &p_cuser->dsr); | 4027 | ret_val = put_user(cnow.dsr, &p_cuser->dsr); |
| 3993 | if (ret_val) | 4028 | if (ret_val) |
| 3994 | return ret_val; | 4029 | break; |
| 3995 | ret_val = put_user(cnow.rng, &p_cuser->rng); | 4030 | ret_val = put_user(cnow.rng, &p_cuser->rng); |
| 3996 | if (ret_val) | 4031 | if (ret_val) |
| 3997 | return ret_val; | 4032 | break; |
| 3998 | ret_val = put_user(cnow.dcd, &p_cuser->dcd); | 4033 | ret_val = put_user(cnow.dcd, &p_cuser->dcd); |
| 3999 | if (ret_val) | 4034 | if (ret_val) |
| 4000 | return ret_val; | 4035 | break; |
| 4001 | ret_val = put_user(cnow.rx, &p_cuser->rx); | 4036 | ret_val = put_user(cnow.rx, &p_cuser->rx); |
| 4002 | if (ret_val) | 4037 | if (ret_val) |
| 4003 | return ret_val; | 4038 | break; |
| 4004 | ret_val = put_user(cnow.tx, &p_cuser->tx); | 4039 | ret_val = put_user(cnow.tx, &p_cuser->tx); |
| 4005 | if (ret_val) | 4040 | if (ret_val) |
| 4006 | return ret_val; | 4041 | break; |
| 4007 | ret_val = put_user(cnow.frame, &p_cuser->frame); | 4042 | ret_val = put_user(cnow.frame, &p_cuser->frame); |
| 4008 | if (ret_val) | 4043 | if (ret_val) |
| 4009 | return ret_val; | 4044 | break; |
| 4010 | ret_val = put_user(cnow.overrun, &p_cuser->overrun); | 4045 | ret_val = put_user(cnow.overrun, &p_cuser->overrun); |
| 4011 | if (ret_val) | 4046 | if (ret_val) |
| 4012 | return ret_val; | 4047 | break; |
| 4013 | ret_val = put_user(cnow.parity, &p_cuser->parity); | 4048 | ret_val = put_user(cnow.parity, &p_cuser->parity); |
| 4014 | if (ret_val) | 4049 | if (ret_val) |
| 4015 | return ret_val; | 4050 | break; |
| 4016 | ret_val = put_user(cnow.brk, &p_cuser->brk); | 4051 | ret_val = put_user(cnow.brk, &p_cuser->brk); |
| 4017 | if (ret_val) | 4052 | if (ret_val) |
| 4018 | return ret_val; | 4053 | break; |
| 4019 | ret_val = put_user(cnow.buf_overrun, &p_cuser->buf_overrun); | 4054 | ret_val = put_user(cnow.buf_overrun, &p_cuser->buf_overrun); |
| 4020 | if (ret_val) | 4055 | if (ret_val) |
| 4021 | return ret_val; | 4056 | break; |
| 4022 | ret_val = 0; | 4057 | ret_val = 0; |
| 4023 | break; | 4058 | break; |
| 4024 | default: | 4059 | default: |
| 4025 | ret_val = -ENOIOCTLCMD; | 4060 | ret_val = -ENOIOCTLCMD; |
| 4026 | } | 4061 | } |
| 4062 | unlock_kernel(); | ||
| 4027 | 4063 | ||
| 4028 | #ifdef CY_DEBUG_OTHER | 4064 | #ifdef CY_DEBUG_OTHER |
| 4029 | printk(KERN_DEBUG "cyc:cy_ioctl done\n"); | 4065 | printk(KERN_DEBUG "cyc:cy_ioctl done\n"); |
| 4030 | #endif | 4066 | #endif |
| 4031 | |||
| 4032 | return ret_val; | 4067 | return ret_val; |
| 4033 | } /* cy_ioctl */ | 4068 | } /* cy_ioctl */ |
| 4034 | 4069 | ||
| @@ -4113,9 +4148,8 @@ static void cy_throttle(struct tty_struct *tty) | |||
| 4113 | tty->ldisc.chars_in_buffer(tty), info->line); | 4148 | tty->ldisc.chars_in_buffer(tty), info->line); |
| 4114 | #endif | 4149 | #endif |
| 4115 | 4150 | ||
| 4116 | if (serial_paranoia_check(info, tty->name, "cy_throttle")) { | 4151 | if (serial_paranoia_check(info, tty->name, "cy_throttle")) |
| 4117 | return; | 4152 | return; |
| 4118 | } | ||
| 4119 | 4153 | ||
| 4120 | card = info->card; | 4154 | card = info->card; |
| 4121 | 4155 | ||
| @@ -4169,12 +4203,11 @@ static void cy_unthrottle(struct tty_struct *tty) | |||
| 4169 | char buf[64]; | 4203 | char buf[64]; |
| 4170 | 4204 | ||
| 4171 | printk(KERN_DEBUG "cyc:unthrottle %s: %ld...ttyC%d\n", | 4205 | printk(KERN_DEBUG "cyc:unthrottle %s: %ld...ttyC%d\n", |
| 4172 | tty_name(tty, buf), tty->ldisc.chars_in_buffer(tty),info->line); | 4206 | tty_name(tty, buf), tty_chars_in_buffer(tty), info->line); |
| 4173 | #endif | 4207 | #endif |
| 4174 | 4208 | ||
| 4175 | if (serial_paranoia_check(info, tty->name, "cy_unthrottle")) { | 4209 | if (serial_paranoia_check(info, tty->name, "cy_unthrottle")) |
| 4176 | return; | 4210 | return; |
| 4177 | } | ||
| 4178 | 4211 | ||
| 4179 | if (I_IXOFF(tty)) { | 4212 | if (I_IXOFF(tty)) { |
| 4180 | if (info->x_char) | 4213 | if (info->x_char) |
| @@ -4269,47 +4302,14 @@ static void cy_start(struct tty_struct *tty) | |||
| 4269 | base_addr = cinfo->base_addr + (cy_chip_offset[chip] << index); | 4302 | base_addr = cinfo->base_addr + (cy_chip_offset[chip] << index); |
| 4270 | 4303 | ||
| 4271 | spin_lock_irqsave(&cinfo->card_lock, flags); | 4304 | spin_lock_irqsave(&cinfo->card_lock, flags); |
| 4272 | cy_writeb(base_addr + (CyCAR << index), (u_char) (channel & 0x0003)); /* index channel */ | 4305 | cy_writeb(base_addr + (CyCAR << index), |
| 4306 | (u_char) (channel & 0x0003)); /* index channel */ | ||
| 4273 | cy_writeb(base_addr + (CySRER << index), | 4307 | cy_writeb(base_addr + (CySRER << index), |
| 4274 | readb(base_addr + (CySRER << index)) | CyTxRdy); | 4308 | readb(base_addr + (CySRER << index)) | CyTxRdy); |
| 4275 | spin_unlock_irqrestore(&cinfo->card_lock, flags); | 4309 | spin_unlock_irqrestore(&cinfo->card_lock, flags); |
| 4276 | } | 4310 | } |
| 4277 | } /* cy_start */ | 4311 | } /* cy_start */ |
| 4278 | 4312 | ||
| 4279 | static void cy_flush_buffer(struct tty_struct *tty) | ||
| 4280 | { | ||
| 4281 | struct cyclades_port *info = tty->driver_data; | ||
| 4282 | struct cyclades_card *card; | ||
| 4283 | int channel, retval; | ||
| 4284 | unsigned long flags; | ||
| 4285 | |||
| 4286 | #ifdef CY_DEBUG_IO | ||
| 4287 | printk(KERN_DEBUG "cyc:cy_flush_buffer ttyC%d\n", info->line); | ||
| 4288 | #endif | ||
| 4289 | |||
| 4290 | if (serial_paranoia_check(info, tty->name, "cy_flush_buffer")) | ||
| 4291 | return; | ||
| 4292 | |||
| 4293 | card = info->card; | ||
| 4294 | channel = info->line - card->first_line; | ||
| 4295 | |||
| 4296 | spin_lock_irqsave(&card->card_lock, flags); | ||
| 4297 | info->xmit_cnt = info->xmit_head = info->xmit_tail = 0; | ||
| 4298 | spin_unlock_irqrestore(&card->card_lock, flags); | ||
| 4299 | |||
| 4300 | if (IS_CYC_Z(*card)) { /* If it is a Z card, flush the on-board | ||
| 4301 | buffers as well */ | ||
| 4302 | spin_lock_irqsave(&card->card_lock, flags); | ||
| 4303 | retval = cyz_issue_cmd(card, channel, C_CM_FLUSH_TX, 0L); | ||
| 4304 | if (retval != 0) { | ||
| 4305 | printk(KERN_ERR "cyc: flush_buffer retval on ttyC%d " | ||
| 4306 | "was %x\n", info->line, retval); | ||
| 4307 | } | ||
| 4308 | spin_unlock_irqrestore(&card->card_lock, flags); | ||
| 4309 | } | ||
| 4310 | tty_wakeup(tty); | ||
| 4311 | } /* cy_flush_buffer */ | ||
| 4312 | |||
| 4313 | /* | 4313 | /* |
| 4314 | * cy_hangup() --- called by tty_hangup() when a hangup is signaled. | 4314 | * cy_hangup() --- called by tty_hangup() when a hangup is signaled. |
| 4315 | */ | 4315 | */ |
| @@ -4406,10 +4406,11 @@ static int __devinit cy_init_card(struct cyclades_card *cinfo) | |||
| 4406 | info->cor3 = 0x08; /* _very_ small rcv threshold */ | 4406 | info->cor3 = 0x08; /* _very_ small rcv threshold */ |
| 4407 | 4407 | ||
| 4408 | chip_number = (port - cinfo->first_line) / 4; | 4408 | chip_number = (port - cinfo->first_line) / 4; |
| 4409 | if ((info->chip_rev = readb(cinfo->base_addr + | 4409 | info->chip_rev = readb(cinfo->base_addr + |
| 4410 | (cy_chip_offset[chip_number] << | 4410 | (cy_chip_offset[chip_number] << index) + |
| 4411 | index) + (CyGFRCR << index))) >= | 4411 | (CyGFRCR << index)); |
| 4412 | CD1400_REV_J) { | 4412 | |
| 4413 | if (info->chip_rev >= CD1400_REV_J) { | ||
| 4413 | /* It is a CD1400 rev. J or later */ | 4414 | /* It is a CD1400 rev. J or later */ |
| 4414 | info->tbpr = baud_bpr_60[13]; /* Tx BPR */ | 4415 | info->tbpr = baud_bpr_60[13]; /* Tx BPR */ |
| 4415 | info->tco = baud_co_60[13]; /* Tx CO */ | 4416 | info->tco = baud_co_60[13]; /* Tx CO */ |
| @@ -4454,7 +4455,8 @@ static unsigned short __devinit cyy_init_card(void __iomem *true_base_addr, | |||
| 4454 | /* Cy_ClrIntr is 0x1800 */ | 4455 | /* Cy_ClrIntr is 0x1800 */ |
| 4455 | udelay(500L); | 4456 | udelay(500L); |
| 4456 | 4457 | ||
| 4457 | for (chip_number = 0; chip_number < CyMAX_CHIPS_PER_CARD; chip_number++) { | 4458 | for (chip_number = 0; chip_number < CyMAX_CHIPS_PER_CARD; |
| 4459 | chip_number++) { | ||
| 4458 | base_addr = | 4460 | base_addr = |
| 4459 | true_base_addr + (cy_chip_offset[chip_number] << index); | 4461 | true_base_addr + (cy_chip_offset[chip_number] << index); |
| 4460 | mdelay(1); | 4462 | mdelay(1); |
| @@ -4555,12 +4557,11 @@ static int __init cy_detect_isa(void) | |||
| 4555 | /* scan the address table probing for Cyclom-Y/ISA boards */ | 4557 | /* scan the address table probing for Cyclom-Y/ISA boards */ |
| 4556 | for (i = 0; i < NR_ISA_ADDRS; i++) { | 4558 | for (i = 0; i < NR_ISA_ADDRS; i++) { |
| 4557 | unsigned int isa_address = cy_isa_addresses[i]; | 4559 | unsigned int isa_address = cy_isa_addresses[i]; |
| 4558 | if (isa_address == 0x0000) { | 4560 | if (isa_address == 0x0000) |
| 4559 | return nboard; | 4561 | return nboard; |
| 4560 | } | ||
| 4561 | 4562 | ||
| 4562 | /* probe for CD1400... */ | 4563 | /* probe for CD1400... */ |
| 4563 | cy_isa_address = ioremap(isa_address, CyISA_Ywin); | 4564 | cy_isa_address = ioremap_nocache(isa_address, CyISA_Ywin); |
| 4564 | if (cy_isa_address == NULL) { | 4565 | if (cy_isa_address == NULL) { |
| 4565 | printk(KERN_ERR "Cyclom-Y/ISA: can't remap base " | 4566 | printk(KERN_ERR "Cyclom-Y/ISA: can't remap base " |
| 4566 | "address\n"); | 4567 | "address\n"); |
| @@ -4847,12 +4848,10 @@ static int __devinit cyz_load_fw(struct pci_dev *pdev, void __iomem *base_addr, | |||
| 4847 | if (mailbox != 0) { | 4848 | if (mailbox != 0) { |
| 4848 | /* set window to last 512K of RAM */ | 4849 | /* set window to last 512K of RAM */ |
| 4849 | cy_writel(&ctl_addr->loc_addr_base, WIN_RAM + RAM_SIZE); | 4850 | cy_writel(&ctl_addr->loc_addr_base, WIN_RAM + RAM_SIZE); |
| 4850 | //sleep(1); | ||
| 4851 | for (tmp = base_addr; tmp < base_addr + RAM_SIZE; tmp++) | 4851 | for (tmp = base_addr; tmp < base_addr + RAM_SIZE; tmp++) |
| 4852 | cy_writeb(tmp, 255); | 4852 | cy_writeb(tmp, 255); |
| 4853 | /* set window to beginning of RAM */ | 4853 | /* set window to beginning of RAM */ |
| 4854 | cy_writel(&ctl_addr->loc_addr_base, WIN_RAM); | 4854 | cy_writel(&ctl_addr->loc_addr_base, WIN_RAM); |
| 4855 | //sleep(1); | ||
| 4856 | } | 4855 | } |
| 4857 | 4856 | ||
| 4858 | retval = __cyz_load_fw(fw, "Cyclom-Z", mailbox, base_addr, NULL); | 4857 | retval = __cyz_load_fw(fw, "Cyclom-Z", mailbox, base_addr, NULL); |
| @@ -5382,7 +5381,8 @@ static void __exit cy_cleanup_module(void) | |||
| 5382 | del_timer_sync(&cyz_timerlist); | 5381 | del_timer_sync(&cyz_timerlist); |
| 5383 | #endif /* CONFIG_CYZ_INTR */ | 5382 | #endif /* CONFIG_CYZ_INTR */ |
| 5384 | 5383 | ||
| 5385 | if ((e1 = tty_unregister_driver(cy_serial_driver))) | 5384 | e1 = tty_unregister_driver(cy_serial_driver); |
| 5385 | if (e1) | ||
| 5386 | printk(KERN_ERR "failed to unregister Cyclades serial " | 5386 | printk(KERN_ERR "failed to unregister Cyclades serial " |
| 5387 | "driver(%d)\n", e1); | 5387 | "driver(%d)\n", e1); |
| 5388 | 5388 | ||
diff --git a/drivers/char/drm/drmP.h b/drivers/char/drm/drmP.h index ecee3547a13f..213b3ca3468e 100644 --- a/drivers/char/drm/drmP.h +++ b/drivers/char/drm/drmP.h | |||
| @@ -160,7 +160,7 @@ struct drm_device; | |||
| 160 | * \param arg arguments | 160 | * \param arg arguments |
| 161 | */ | 161 | */ |
| 162 | #define DRM_ERROR(fmt, arg...) \ | 162 | #define DRM_ERROR(fmt, arg...) \ |
| 163 | printk(KERN_ERR "[" DRM_NAME ":%s] *ERROR* " fmt , __FUNCTION__ , ##arg) | 163 | printk(KERN_ERR "[" DRM_NAME ":%s] *ERROR* " fmt , __func__ , ##arg) |
| 164 | 164 | ||
| 165 | /** | 165 | /** |
| 166 | * Memory error output. | 166 | * Memory error output. |
| @@ -170,7 +170,7 @@ struct drm_device; | |||
| 170 | * \param arg arguments | 170 | * \param arg arguments |
| 171 | */ | 171 | */ |
| 172 | #define DRM_MEM_ERROR(area, fmt, arg...) \ | 172 | #define DRM_MEM_ERROR(area, fmt, arg...) \ |
| 173 | printk(KERN_ERR "[" DRM_NAME ":%s:%s] *ERROR* " fmt , __FUNCTION__, \ | 173 | printk(KERN_ERR "[" DRM_NAME ":%s:%s] *ERROR* " fmt , __func__, \ |
| 174 | drm_mem_stats[area].name , ##arg) | 174 | drm_mem_stats[area].name , ##arg) |
| 175 | 175 | ||
| 176 | #define DRM_INFO(fmt, arg...) printk(KERN_INFO "[" DRM_NAME "] " fmt , ##arg) | 176 | #define DRM_INFO(fmt, arg...) printk(KERN_INFO "[" DRM_NAME "] " fmt , ##arg) |
| @@ -187,7 +187,7 @@ struct drm_device; | |||
| 187 | if ( drm_debug ) \ | 187 | if ( drm_debug ) \ |
| 188 | printk(KERN_DEBUG \ | 188 | printk(KERN_DEBUG \ |
| 189 | "[" DRM_NAME ":%s] " fmt , \ | 189 | "[" DRM_NAME ":%s] " fmt , \ |
| 190 | __FUNCTION__ , ##arg); \ | 190 | __func__ , ##arg); \ |
| 191 | } while (0) | 191 | } while (0) |
| 192 | #else | 192 | #else |
| 193 | #define DRM_DEBUG(fmt, arg...) do { } while (0) | 193 | #define DRM_DEBUG(fmt, arg...) do { } while (0) |
| @@ -238,7 +238,7 @@ do { \ | |||
| 238 | if ( !_DRM_LOCK_IS_HELD( dev->lock.hw_lock->lock ) || \ | 238 | if ( !_DRM_LOCK_IS_HELD( dev->lock.hw_lock->lock ) || \ |
| 239 | dev->lock.file_priv != file_priv ) { \ | 239 | dev->lock.file_priv != file_priv ) { \ |
| 240 | DRM_ERROR( "%s called without lock held, held %d owner %p %p\n",\ | 240 | DRM_ERROR( "%s called without lock held, held %d owner %p %p\n",\ |
| 241 | __FUNCTION__, _DRM_LOCK_IS_HELD( dev->lock.hw_lock->lock ),\ | 241 | __func__, _DRM_LOCK_IS_HELD( dev->lock.hw_lock->lock ),\ |
| 242 | dev->lock.file_priv, file_priv ); \ | 242 | dev->lock.file_priv, file_priv ); \ |
| 243 | return -EINVAL; \ | 243 | return -EINVAL; \ |
| 244 | } \ | 244 | } \ |
diff --git a/drivers/char/drm/drm_sysfs.c b/drivers/char/drm/drm_sysfs.c index 7a1d9a782ddb..9a32169e88fb 100644 --- a/drivers/char/drm/drm_sysfs.c +++ b/drivers/char/drm/drm_sysfs.c | |||
| @@ -34,7 +34,7 @@ static int drm_sysfs_suspend(struct device *dev, pm_message_t state) | |||
| 34 | struct drm_minor *drm_minor = to_drm_minor(dev); | 34 | struct drm_minor *drm_minor = to_drm_minor(dev); |
| 35 | struct drm_device *drm_dev = drm_minor->dev; | 35 | struct drm_device *drm_dev = drm_minor->dev; |
| 36 | 36 | ||
| 37 | printk(KERN_ERR "%s\n", __FUNCTION__); | 37 | printk(KERN_ERR "%s\n", __func__); |
| 38 | 38 | ||
| 39 | if (drm_dev->driver->suspend) | 39 | if (drm_dev->driver->suspend) |
| 40 | return drm_dev->driver->suspend(drm_dev, state); | 40 | return drm_dev->driver->suspend(drm_dev, state); |
diff --git a/drivers/char/drm/i830_dma.c b/drivers/char/drm/i830_dma.c index 60c9376be486..a86ab30b4620 100644 --- a/drivers/char/drm/i830_dma.c +++ b/drivers/char/drm/i830_dma.c | |||
| @@ -692,7 +692,7 @@ static void i830EmitState(struct drm_device * dev) | |||
| 692 | drm_i830_sarea_t *sarea_priv = dev_priv->sarea_priv; | 692 | drm_i830_sarea_t *sarea_priv = dev_priv->sarea_priv; |
| 693 | unsigned int dirty = sarea_priv->dirty; | 693 | unsigned int dirty = sarea_priv->dirty; |
| 694 | 694 | ||
| 695 | DRM_DEBUG("%s %x\n", __FUNCTION__, dirty); | 695 | DRM_DEBUG("%s %x\n", __func__, dirty); |
| 696 | 696 | ||
| 697 | if (dirty & I830_UPLOAD_BUFFERS) { | 697 | if (dirty & I830_UPLOAD_BUFFERS) { |
| 698 | i830EmitDestVerified(dev, sarea_priv->BufferState); | 698 | i830EmitDestVerified(dev, sarea_priv->BufferState); |
| @@ -1043,7 +1043,7 @@ static void i830_dma_dispatch_flip(struct drm_device * dev) | |||
| 1043 | RING_LOCALS; | 1043 | RING_LOCALS; |
| 1044 | 1044 | ||
| 1045 | DRM_DEBUG("%s: page=%d pfCurrentPage=%d\n", | 1045 | DRM_DEBUG("%s: page=%d pfCurrentPage=%d\n", |
| 1046 | __FUNCTION__, | 1046 | __func__, |
| 1047 | dev_priv->current_page, | 1047 | dev_priv->current_page, |
| 1048 | dev_priv->sarea_priv->pf_current_page); | 1048 | dev_priv->sarea_priv->pf_current_page); |
| 1049 | 1049 | ||
| @@ -1206,7 +1206,7 @@ static void i830_dma_quiescent(struct drm_device * dev) | |||
| 1206 | OUT_RING(0); | 1206 | OUT_RING(0); |
| 1207 | ADVANCE_LP_RING(); | 1207 | ADVANCE_LP_RING(); |
| 1208 | 1208 | ||
| 1209 | i830_wait_ring(dev, dev_priv->ring.Size - 8, __FUNCTION__); | 1209 | i830_wait_ring(dev, dev_priv->ring.Size - 8, __func__); |
| 1210 | } | 1210 | } |
| 1211 | 1211 | ||
| 1212 | static int i830_flush_queue(struct drm_device * dev) | 1212 | static int i830_flush_queue(struct drm_device * dev) |
| @@ -1223,7 +1223,7 @@ static int i830_flush_queue(struct drm_device * dev) | |||
| 1223 | OUT_RING(0); | 1223 | OUT_RING(0); |
| 1224 | ADVANCE_LP_RING(); | 1224 | ADVANCE_LP_RING(); |
| 1225 | 1225 | ||
| 1226 | i830_wait_ring(dev, dev_priv->ring.Size - 8, __FUNCTION__); | 1226 | i830_wait_ring(dev, dev_priv->ring.Size - 8, __func__); |
| 1227 | 1227 | ||
| 1228 | for (i = 0; i < dma->buf_count; i++) { | 1228 | for (i = 0; i < dma->buf_count; i++) { |
| 1229 | struct drm_buf *buf = dma->buflist[i]; | 1229 | struct drm_buf *buf = dma->buflist[i]; |
| @@ -1344,7 +1344,7 @@ static void i830_do_init_pageflip(struct drm_device * dev) | |||
| 1344 | { | 1344 | { |
| 1345 | drm_i830_private_t *dev_priv = dev->dev_private; | 1345 | drm_i830_private_t *dev_priv = dev->dev_private; |
| 1346 | 1346 | ||
| 1347 | DRM_DEBUG("%s\n", __FUNCTION__); | 1347 | DRM_DEBUG("%s\n", __func__); |
| 1348 | dev_priv->page_flipping = 1; | 1348 | dev_priv->page_flipping = 1; |
| 1349 | dev_priv->current_page = 0; | 1349 | dev_priv->current_page = 0; |
| 1350 | dev_priv->sarea_priv->pf_current_page = dev_priv->current_page; | 1350 | dev_priv->sarea_priv->pf_current_page = dev_priv->current_page; |
| @@ -1354,7 +1354,7 @@ static int i830_do_cleanup_pageflip(struct drm_device * dev) | |||
| 1354 | { | 1354 | { |
| 1355 | drm_i830_private_t *dev_priv = dev->dev_private; | 1355 | drm_i830_private_t *dev_priv = dev->dev_private; |
| 1356 | 1356 | ||
| 1357 | DRM_DEBUG("%s\n", __FUNCTION__); | 1357 | DRM_DEBUG("%s\n", __func__); |
| 1358 | if (dev_priv->current_page != 0) | 1358 | if (dev_priv->current_page != 0) |
| 1359 | i830_dma_dispatch_flip(dev); | 1359 | i830_dma_dispatch_flip(dev); |
| 1360 | 1360 | ||
| @@ -1367,7 +1367,7 @@ static int i830_flip_bufs(struct drm_device *dev, void *data, | |||
| 1367 | { | 1367 | { |
| 1368 | drm_i830_private_t *dev_priv = dev->dev_private; | 1368 | drm_i830_private_t *dev_priv = dev->dev_private; |
| 1369 | 1369 | ||
| 1370 | DRM_DEBUG("%s\n", __FUNCTION__); | 1370 | DRM_DEBUG("%s\n", __func__); |
| 1371 | 1371 | ||
| 1372 | LOCK_TEST_WITH_RETURN(dev, file_priv); | 1372 | LOCK_TEST_WITH_RETURN(dev, file_priv); |
| 1373 | 1373 | ||
| @@ -1437,7 +1437,7 @@ static int i830_getparam(struct drm_device *dev, void *data, | |||
| 1437 | int value; | 1437 | int value; |
| 1438 | 1438 | ||
| 1439 | if (!dev_priv) { | 1439 | if (!dev_priv) { |
| 1440 | DRM_ERROR("%s called with no initialization\n", __FUNCTION__); | 1440 | DRM_ERROR("%s called with no initialization\n", __func__); |
| 1441 | return -EINVAL; | 1441 | return -EINVAL; |
| 1442 | } | 1442 | } |
| 1443 | 1443 | ||
| @@ -1464,7 +1464,7 @@ static int i830_setparam(struct drm_device *dev, void *data, | |||
| 1464 | drm_i830_setparam_t *param = data; | 1464 | drm_i830_setparam_t *param = data; |
| 1465 | 1465 | ||
| 1466 | if (!dev_priv) { | 1466 | if (!dev_priv) { |
| 1467 | DRM_ERROR("%s called with no initialization\n", __FUNCTION__); | 1467 | DRM_ERROR("%s called with no initialization\n", __func__); |
| 1468 | return -EINVAL; | 1468 | return -EINVAL; |
| 1469 | } | 1469 | } |
| 1470 | 1470 | ||
diff --git a/drivers/char/drm/i830_drv.h b/drivers/char/drm/i830_drv.h index 4caba8c54455..b5bf8cc0fdaa 100644 --- a/drivers/char/drm/i830_drv.h +++ b/drivers/char/drm/i830_drv.h | |||
| @@ -158,7 +158,7 @@ extern int i830_driver_device_is_agp(struct drm_device * dev); | |||
| 158 | if (I830_VERBOSE) \ | 158 | if (I830_VERBOSE) \ |
| 159 | printk("BEGIN_LP_RING(%d)\n", (n)); \ | 159 | printk("BEGIN_LP_RING(%d)\n", (n)); \ |
| 160 | if (dev_priv->ring.space < n*4) \ | 160 | if (dev_priv->ring.space < n*4) \ |
| 161 | i830_wait_ring(dev, n*4, __FUNCTION__); \ | 161 | i830_wait_ring(dev, n*4, __func__); \ |
| 162 | outcount = 0; \ | 162 | outcount = 0; \ |
| 163 | outring = dev_priv->ring.tail; \ | 163 | outring = dev_priv->ring.tail; \ |
| 164 | ringmask = dev_priv->ring.tail_mask; \ | 164 | ringmask = dev_priv->ring.tail_mask; \ |
diff --git a/drivers/char/drm/i830_irq.c b/drivers/char/drm/i830_irq.c index a33db5f0967f..91ec2bb497e9 100644 --- a/drivers/char/drm/i830_irq.c +++ b/drivers/char/drm/i830_irq.c | |||
| @@ -58,7 +58,7 @@ static int i830_emit_irq(struct drm_device * dev) | |||
| 58 | drm_i830_private_t *dev_priv = dev->dev_private; | 58 | drm_i830_private_t *dev_priv = dev->dev_private; |
| 59 | RING_LOCALS; | 59 | RING_LOCALS; |
| 60 | 60 | ||
| 61 | DRM_DEBUG("%s\n", __FUNCTION__); | 61 | DRM_DEBUG("%s\n", __func__); |
| 62 | 62 | ||
| 63 | atomic_inc(&dev_priv->irq_emitted); | 63 | atomic_inc(&dev_priv->irq_emitted); |
| 64 | 64 | ||
| @@ -77,7 +77,7 @@ static int i830_wait_irq(struct drm_device * dev, int irq_nr) | |||
| 77 | unsigned long end = jiffies + HZ * 3; | 77 | unsigned long end = jiffies + HZ * 3; |
| 78 | int ret = 0; | 78 | int ret = 0; |
| 79 | 79 | ||
| 80 | DRM_DEBUG("%s\n", __FUNCTION__); | 80 | DRM_DEBUG("%s\n", __func__); |
| 81 | 81 | ||
| 82 | if (atomic_read(&dev_priv->irq_received) >= irq_nr) | 82 | if (atomic_read(&dev_priv->irq_received) >= irq_nr) |
| 83 | return 0; | 83 | return 0; |
| @@ -124,7 +124,7 @@ int i830_irq_emit(struct drm_device *dev, void *data, | |||
| 124 | LOCK_TEST_WITH_RETURN(dev, file_priv); | 124 | LOCK_TEST_WITH_RETURN(dev, file_priv); |
| 125 | 125 | ||
| 126 | if (!dev_priv) { | 126 | if (!dev_priv) { |
| 127 | DRM_ERROR("%s called with no initialization\n", __FUNCTION__); | 127 | DRM_ERROR("%s called with no initialization\n", __func__); |
| 128 | return -EINVAL; | 128 | return -EINVAL; |
| 129 | } | 129 | } |
| 130 | 130 | ||
| @@ -147,7 +147,7 @@ int i830_irq_wait(struct drm_device *dev, void *data, | |||
| 147 | drm_i830_irq_wait_t *irqwait = data; | 147 | drm_i830_irq_wait_t *irqwait = data; |
| 148 | 148 | ||
| 149 | if (!dev_priv) { | 149 | if (!dev_priv) { |
| 150 | DRM_ERROR("%s called with no initialization\n", __FUNCTION__); | 150 | DRM_ERROR("%s called with no initialization\n", __func__); |
| 151 | return -EINVAL; | 151 | return -EINVAL; |
| 152 | } | 152 | } |
| 153 | 153 | ||
diff --git a/drivers/char/drm/i915_dma.c b/drivers/char/drm/i915_dma.c index ef7bf143a80c..f47e46e3529f 100644 --- a/drivers/char/drm/i915_dma.c +++ b/drivers/char/drm/i915_dma.c | |||
| @@ -194,7 +194,7 @@ static int i915_dma_resume(struct drm_device * dev) | |||
| 194 | { | 194 | { |
| 195 | drm_i915_private_t *dev_priv = (drm_i915_private_t *) dev->dev_private; | 195 | drm_i915_private_t *dev_priv = (drm_i915_private_t *) dev->dev_private; |
| 196 | 196 | ||
| 197 | DRM_DEBUG("%s\n", __FUNCTION__); | 197 | DRM_DEBUG("%s\n", __func__); |
| 198 | 198 | ||
| 199 | if (!dev_priv->sarea) { | 199 | if (!dev_priv->sarea) { |
| 200 | DRM_ERROR("can not find sarea!\n"); | 200 | DRM_ERROR("can not find sarea!\n"); |
| @@ -609,7 +609,7 @@ static int i915_quiescent(struct drm_device * dev) | |||
| 609 | drm_i915_private_t *dev_priv = dev->dev_private; | 609 | drm_i915_private_t *dev_priv = dev->dev_private; |
| 610 | 610 | ||
| 611 | i915_kernel_lost_context(dev); | 611 | i915_kernel_lost_context(dev); |
| 612 | return i915_wait_ring(dev, dev_priv->ring.Size - 8, __FUNCTION__); | 612 | return i915_wait_ring(dev, dev_priv->ring.Size - 8, __func__); |
| 613 | } | 613 | } |
| 614 | 614 | ||
| 615 | static int i915_flush_ioctl(struct drm_device *dev, void *data, | 615 | static int i915_flush_ioctl(struct drm_device *dev, void *data, |
diff --git a/drivers/char/drm/i915_drv.h b/drivers/char/drm/i915_drv.h index c614d78b3dfd..db7001f22561 100644 --- a/drivers/char/drm/i915_drv.h +++ b/drivers/char/drm/i915_drv.h | |||
| @@ -272,7 +272,7 @@ extern void i915_mem_release(struct drm_device * dev, | |||
| 272 | if (I915_VERBOSE) \ | 272 | if (I915_VERBOSE) \ |
| 273 | DRM_DEBUG("BEGIN_LP_RING(%d)\n", (n)); \ | 273 | DRM_DEBUG("BEGIN_LP_RING(%d)\n", (n)); \ |
| 274 | if (dev_priv->ring.space < (n)*4) \ | 274 | if (dev_priv->ring.space < (n)*4) \ |
| 275 | i915_wait_ring(dev, (n)*4, __FUNCTION__); \ | 275 | i915_wait_ring(dev, (n)*4, __func__); \ |
| 276 | outcount = 0; \ | 276 | outcount = 0; \ |
| 277 | outring = dev_priv->ring.tail; \ | 277 | outring = dev_priv->ring.tail; \ |
| 278 | ringmask = dev_priv->ring.tail_mask; \ | 278 | ringmask = dev_priv->ring.tail_mask; \ |
diff --git a/drivers/char/drm/radeon_cp.c b/drivers/char/drm/radeon_cp.c index 9072e4a1894e..f6f6c92bf771 100644 --- a/drivers/char/drm/radeon_cp.c +++ b/drivers/char/drm/radeon_cp.c | |||
| @@ -894,7 +894,7 @@ static u32 RADEON_READ_IGPGART(drm_radeon_private_t *dev_priv, int addr) | |||
| 894 | #if RADEON_FIFO_DEBUG | 894 | #if RADEON_FIFO_DEBUG |
| 895 | static void radeon_status(drm_radeon_private_t * dev_priv) | 895 | static void radeon_status(drm_radeon_private_t * dev_priv) |
| 896 | { | 896 | { |
| 897 | printk("%s:\n", __FUNCTION__); | 897 | printk("%s:\n", __func__); |
| 898 | printk("RBBM_STATUS = 0x%08x\n", | 898 | printk("RBBM_STATUS = 0x%08x\n", |
| 899 | (unsigned int)RADEON_READ(RADEON_RBBM_STATUS)); | 899 | (unsigned int)RADEON_READ(RADEON_RBBM_STATUS)); |
| 900 | printk("CP_RB_RTPR = 0x%08x\n", | 900 | printk("CP_RB_RTPR = 0x%08x\n", |
diff --git a/drivers/char/ds1286.c b/drivers/char/ds1286.c index 59146e3365ba..ea35ab2c9909 100644 --- a/drivers/char/ds1286.c +++ b/drivers/char/ds1286.c | |||
| @@ -39,6 +39,7 @@ | |||
| 39 | #include <linux/spinlock.h> | 39 | #include <linux/spinlock.h> |
| 40 | #include <linux/bcd.h> | 40 | #include <linux/bcd.h> |
| 41 | #include <linux/proc_fs.h> | 41 | #include <linux/proc_fs.h> |
| 42 | #include <linux/jiffies.h> | ||
| 42 | 43 | ||
| 43 | #include <asm/uaccess.h> | 44 | #include <asm/uaccess.h> |
| 44 | #include <asm/system.h> | 45 | #include <asm/system.h> |
| @@ -451,7 +452,7 @@ static void ds1286_get_time(struct rtc_time *rtc_tm) | |||
| 451 | */ | 452 | */ |
| 452 | 453 | ||
| 453 | if (ds1286_is_updating() != 0) | 454 | if (ds1286_is_updating() != 0) |
| 454 | while (jiffies - uip_watchdog < 2*HZ/100) | 455 | while (time_before(jiffies, uip_watchdog + 2*HZ/100)) |
| 455 | barrier(); | 456 | barrier(); |
| 456 | 457 | ||
| 457 | /* | 458 | /* |
diff --git a/drivers/char/epca.c b/drivers/char/epca.c index ffd747c5dff0..60a4df7dac12 100644 --- a/drivers/char/epca.c +++ b/drivers/char/epca.c | |||
| @@ -38,8 +38,8 @@ | |||
| 38 | #include <linux/slab.h> | 38 | #include <linux/slab.h> |
| 39 | #include <linux/ioport.h> | 39 | #include <linux/ioport.h> |
| 40 | #include <linux/interrupt.h> | 40 | #include <linux/interrupt.h> |
| 41 | #include <asm/uaccess.h> | 41 | #include <linux/uaccess.h> |
| 42 | #include <asm/io.h> | 42 | #include <linux/io.h> |
| 43 | #include <linux/spinlock.h> | 43 | #include <linux/spinlock.h> |
| 44 | #include <linux/pci.h> | 44 | #include <linux/pci.h> |
| 45 | #include "digiPCI.h" | 45 | #include "digiPCI.h" |
| @@ -73,7 +73,8 @@ static int invalid_lilo_config; | |||
| 73 | */ | 73 | */ |
| 74 | static DEFINE_SPINLOCK(epca_lock); | 74 | static DEFINE_SPINLOCK(epca_lock); |
| 75 | 75 | ||
| 76 | /* MAXBOARDS is typically 12, but ISA and EISA cards are restricted to 7 below. */ | 76 | /* MAXBOARDS is typically 12, but ISA and EISA cards are restricted |
| 77 | to 7 below. */ | ||
| 77 | static struct board_info boards[MAXBOARDS]; | 78 | static struct board_info boards[MAXBOARDS]; |
| 78 | 79 | ||
| 79 | static struct tty_driver *pc_driver; | 80 | static struct tty_driver *pc_driver; |
| @@ -157,13 +158,12 @@ static void epca_error(int, char *); | |||
| 157 | static void pc_close(struct tty_struct *, struct file *); | 158 | static void pc_close(struct tty_struct *, struct file *); |
| 158 | static void shutdown(struct channel *); | 159 | static void shutdown(struct channel *); |
| 159 | static void pc_hangup(struct tty_struct *); | 160 | static void pc_hangup(struct tty_struct *); |
| 160 | static void pc_put_char(struct tty_struct *, unsigned char); | ||
| 161 | static int pc_write_room(struct tty_struct *); | 161 | static int pc_write_room(struct tty_struct *); |
| 162 | static int pc_chars_in_buffer(struct tty_struct *); | 162 | static int pc_chars_in_buffer(struct tty_struct *); |
| 163 | static void pc_flush_buffer(struct tty_struct *); | 163 | static void pc_flush_buffer(struct tty_struct *); |
| 164 | static void pc_flush_chars(struct tty_struct *); | 164 | static void pc_flush_chars(struct tty_struct *); |
| 165 | static int block_til_ready(struct tty_struct *, struct file *, | 165 | static int block_til_ready(struct tty_struct *, struct file *, |
| 166 | struct channel *); | 166 | struct channel *); |
| 167 | static int pc_open(struct tty_struct *, struct file *); | 167 | static int pc_open(struct tty_struct *, struct file *); |
| 168 | static void post_fep_init(unsigned int crd); | 168 | static void post_fep_init(unsigned int crd); |
| 169 | static void epcapoll(unsigned long); | 169 | static void epcapoll(unsigned long); |
| @@ -175,18 +175,18 @@ static unsigned termios2digi_c(struct channel *ch, unsigned); | |||
| 175 | static void epcaparam(struct tty_struct *, struct channel *); | 175 | static void epcaparam(struct tty_struct *, struct channel *); |
| 176 | static void receive_data(struct channel *); | 176 | static void receive_data(struct channel *); |
| 177 | static int pc_ioctl(struct tty_struct *, struct file *, | 177 | static int pc_ioctl(struct tty_struct *, struct file *, |
| 178 | unsigned int, unsigned long); | 178 | unsigned int, unsigned long); |
| 179 | static int info_ioctl(struct tty_struct *, struct file *, | 179 | static int info_ioctl(struct tty_struct *, struct file *, |
| 180 | unsigned int, unsigned long); | 180 | unsigned int, unsigned long); |
| 181 | static void pc_set_termios(struct tty_struct *, struct ktermios *); | 181 | static void pc_set_termios(struct tty_struct *, struct ktermios *); |
| 182 | static void do_softint(struct work_struct *work); | 182 | static void do_softint(struct work_struct *work); |
| 183 | static void pc_stop(struct tty_struct *); | 183 | static void pc_stop(struct tty_struct *); |
| 184 | static void pc_start(struct tty_struct *); | 184 | static void pc_start(struct tty_struct *); |
| 185 | static void pc_throttle(struct tty_struct * tty); | 185 | static void pc_throttle(struct tty_struct *tty); |
| 186 | static void pc_unthrottle(struct tty_struct *tty); | 186 | static void pc_unthrottle(struct tty_struct *tty); |
| 187 | static void digi_send_break(struct channel *ch, int msec); | 187 | static void digi_send_break(struct channel *ch, int msec); |
| 188 | static void setup_empty_event(struct tty_struct *tty, struct channel *ch); | 188 | static void setup_empty_event(struct tty_struct *tty, struct channel *ch); |
| 189 | void epca_setup(char *, int *); | 189 | static void epca_setup(char *, int *); |
| 190 | 190 | ||
| 191 | static int pc_write(struct tty_struct *, const unsigned char *, int); | 191 | static int pc_write(struct tty_struct *, const unsigned char *, int); |
| 192 | static int pc_init(void); | 192 | static int pc_init(void); |
| @@ -243,7 +243,7 @@ static void assertmemoff(struct channel *ch) | |||
| 243 | /* PCXEM windowing is the same as that used in the PCXR and CX series cards. */ | 243 | /* PCXEM windowing is the same as that used in the PCXR and CX series cards. */ |
| 244 | static void pcxem_memwinon(struct board_info *b, unsigned int win) | 244 | static void pcxem_memwinon(struct board_info *b, unsigned int win) |
| 245 | { | 245 | { |
| 246 | outb_p(FEPWIN|win, b->port + 1); | 246 | outb_p(FEPWIN | win, b->port + 1); |
| 247 | } | 247 | } |
| 248 | 248 | ||
| 249 | static void pcxem_memwinoff(struct board_info *b, unsigned int win) | 249 | static void pcxem_memwinoff(struct board_info *b, unsigned int win) |
| @@ -253,7 +253,7 @@ static void pcxem_memwinoff(struct board_info *b, unsigned int win) | |||
| 253 | 253 | ||
| 254 | static void pcxem_globalwinon(struct channel *ch) | 254 | static void pcxem_globalwinon(struct channel *ch) |
| 255 | { | 255 | { |
| 256 | outb_p( FEPWIN, (int)ch->board->port + 1); | 256 | outb_p(FEPWIN, (int)ch->board->port + 1); |
| 257 | } | 257 | } |
| 258 | 258 | ||
| 259 | static void pcxem_rxwinon(struct channel *ch) | 259 | static void pcxem_rxwinon(struct channel *ch) |
| @@ -394,7 +394,7 @@ static struct channel *verifyChannel(struct tty_struct *tty) | |||
| 394 | */ | 394 | */ |
| 395 | if (tty) { | 395 | if (tty) { |
| 396 | struct channel *ch = (struct channel *)tty->driver_data; | 396 | struct channel *ch = (struct channel *)tty->driver_data; |
| 397 | if ((ch >= &digi_channels[0]) && (ch < &digi_channels[nbdevs])) { | 397 | if (ch >= &digi_channels[0] && ch < &digi_channels[nbdevs]) { |
| 398 | if (ch->magic == EPCA_MAGIC) | 398 | if (ch->magic == EPCA_MAGIC) |
| 399 | return ch; | 399 | return ch; |
| 400 | } | 400 | } |
| @@ -414,7 +414,7 @@ static void pc_sched_event(struct channel *ch, int event) | |||
| 414 | 414 | ||
| 415 | static void epca_error(int line, char *msg) | 415 | static void epca_error(int line, char *msg) |
| 416 | { | 416 | { |
| 417 | printk(KERN_ERR "epca_error (Digi): line = %d %s\n",line,msg); | 417 | printk(KERN_ERR "epca_error (Digi): line = %d %s\n", line, msg); |
| 418 | } | 418 | } |
| 419 | 419 | ||
| 420 | static void pc_close(struct tty_struct *tty, struct file *filp) | 420 | static void pc_close(struct tty_struct *tty, struct file *filp) |
| @@ -425,7 +425,8 @@ static void pc_close(struct tty_struct *tty, struct file *filp) | |||
| 425 | * verifyChannel returns the channel from the tty struct if it is | 425 | * verifyChannel returns the channel from the tty struct if it is |
| 426 | * valid. This serves as a sanity check. | 426 | * valid. This serves as a sanity check. |
| 427 | */ | 427 | */ |
| 428 | if ((ch = verifyChannel(tty)) != NULL) { | 428 | ch = verifyChannel(tty); |
| 429 | if (ch != NULL) { | ||
| 429 | spin_lock_irqsave(&epca_lock, flags); | 430 | spin_lock_irqsave(&epca_lock, flags); |
| 430 | if (tty_hung_up_p(filp)) { | 431 | if (tty_hung_up_p(filp)) { |
| 431 | spin_unlock_irqrestore(&epca_lock, flags); | 432 | spin_unlock_irqrestore(&epca_lock, flags); |
| @@ -440,7 +441,6 @@ static void pc_close(struct tty_struct *tty, struct file *filp) | |||
| 440 | spin_unlock_irqrestore(&epca_lock, flags); | 441 | spin_unlock_irqrestore(&epca_lock, flags); |
| 441 | return; | 442 | return; |
| 442 | } | 443 | } |
| 443 | |||
| 444 | /* Port open only once go ahead with shutdown & reset */ | 444 | /* Port open only once go ahead with shutdown & reset */ |
| 445 | BUG_ON(ch->count < 0); | 445 | BUG_ON(ch->count < 0); |
| 446 | 446 | ||
| @@ -455,12 +455,13 @@ static void pc_close(struct tty_struct *tty, struct file *filp) | |||
| 455 | spin_unlock_irqrestore(&epca_lock, flags); | 455 | spin_unlock_irqrestore(&epca_lock, flags); |
| 456 | 456 | ||
| 457 | if (ch->asyncflags & ASYNC_INITIALIZED) { | 457 | if (ch->asyncflags & ASYNC_INITIALIZED) { |
| 458 | /* Setup an event to indicate when the transmit buffer empties */ | 458 | /* Setup an event to indicate when the |
| 459 | transmit buffer empties */ | ||
| 459 | setup_empty_event(tty, ch); | 460 | setup_empty_event(tty, ch); |
| 460 | tty_wait_until_sent(tty, 3000); /* 30 seconds timeout */ | 461 | /* 30 seconds timeout */ |
| 462 | tty_wait_until_sent(tty, 3000); | ||
| 461 | } | 463 | } |
| 462 | if (tty->driver->flush_buffer) | 464 | pc_flush_buffer(tty); |
| 463 | tty->driver->flush_buffer(tty); | ||
| 464 | 465 | ||
| 465 | tty_ldisc_flush(tty); | 466 | tty_ldisc_flush(tty); |
| 466 | shutdown(ch); | 467 | shutdown(ch); |
| @@ -477,7 +478,7 @@ static void pc_close(struct tty_struct *tty, struct file *filp) | |||
| 477 | wake_up_interruptible(&ch->open_wait); | 478 | wake_up_interruptible(&ch->open_wait); |
| 478 | } | 479 | } |
| 479 | ch->asyncflags &= ~(ASYNC_NORMAL_ACTIVE | ASYNC_INITIALIZED | | 480 | ch->asyncflags &= ~(ASYNC_NORMAL_ACTIVE | ASYNC_INITIALIZED | |
| 480 | ASYNC_CLOSING); | 481 | ASYNC_CLOSING); |
| 481 | wake_up_interruptible(&ch->close_wait); | 482 | wake_up_interruptible(&ch->close_wait); |
| 482 | } | 483 | } |
| 483 | } | 484 | } |
| @@ -524,16 +525,15 @@ static void shutdown(struct channel *ch) | |||
| 524 | static void pc_hangup(struct tty_struct *tty) | 525 | static void pc_hangup(struct tty_struct *tty) |
| 525 | { | 526 | { |
| 526 | struct channel *ch; | 527 | struct channel *ch; |
| 527 | |||
| 528 | /* | 528 | /* |
| 529 | * verifyChannel returns the channel from the tty struct if it is | 529 | * verifyChannel returns the channel from the tty struct if it is |
| 530 | * valid. This serves as a sanity check. | 530 | * valid. This serves as a sanity check. |
| 531 | */ | 531 | */ |
| 532 | if ((ch = verifyChannel(tty)) != NULL) { | 532 | ch = verifyChannel(tty); |
| 533 | if (ch != NULL) { | ||
| 533 | unsigned long flags; | 534 | unsigned long flags; |
| 534 | 535 | ||
| 535 | if (tty->driver->flush_buffer) | 536 | pc_flush_buffer(tty); |
| 536 | tty->driver->flush_buffer(tty); | ||
| 537 | tty_ldisc_flush(tty); | 537 | tty_ldisc_flush(tty); |
| 538 | shutdown(ch); | 538 | shutdown(ch); |
| 539 | 539 | ||
| @@ -548,7 +548,7 @@ static void pc_hangup(struct tty_struct *tty) | |||
| 548 | } | 548 | } |
| 549 | 549 | ||
| 550 | static int pc_write(struct tty_struct *tty, | 550 | static int pc_write(struct tty_struct *tty, |
| 551 | const unsigned char *buf, int bytesAvailable) | 551 | const unsigned char *buf, int bytesAvailable) |
| 552 | { | 552 | { |
| 553 | unsigned int head, tail; | 553 | unsigned int head, tail; |
| 554 | int dataLen; | 554 | int dataLen; |
| @@ -572,7 +572,8 @@ static int pc_write(struct tty_struct *tty, | |||
| 572 | * verifyChannel returns the channel from the tty struct if it is | 572 | * verifyChannel returns the channel from the tty struct if it is |
| 573 | * valid. This serves as a sanity check. | 573 | * valid. This serves as a sanity check. |
| 574 | */ | 574 | */ |
| 575 | if ((ch = verifyChannel(tty)) == NULL) | 575 | ch = verifyChannel(tty); |
| 576 | if (ch == NULL) | ||
| 576 | return 0; | 577 | return 0; |
| 577 | 578 | ||
| 578 | /* Make a pointer to the channel data structure found on the board. */ | 579 | /* Make a pointer to the channel data structure found on the board. */ |
| @@ -645,26 +646,19 @@ static int pc_write(struct tty_struct *tty, | |||
| 645 | return amountCopied; | 646 | return amountCopied; |
| 646 | } | 647 | } |
| 647 | 648 | ||
| 648 | static void pc_put_char(struct tty_struct *tty, unsigned char c) | ||
| 649 | { | ||
| 650 | pc_write(tty, &c, 1); | ||
| 651 | } | ||
| 652 | |||
| 653 | static int pc_write_room(struct tty_struct *tty) | 649 | static int pc_write_room(struct tty_struct *tty) |
| 654 | { | 650 | { |
| 655 | int remain; | 651 | int remain = 0; |
| 656 | struct channel *ch; | 652 | struct channel *ch; |
| 657 | unsigned long flags; | 653 | unsigned long flags; |
| 658 | unsigned int head, tail; | 654 | unsigned int head, tail; |
| 659 | struct board_chan __iomem *bc; | 655 | struct board_chan __iomem *bc; |
| 660 | |||
| 661 | remain = 0; | ||
| 662 | |||
| 663 | /* | 656 | /* |
| 664 | * verifyChannel returns the channel from the tty struct if it is | 657 | * verifyChannel returns the channel from the tty struct if it is |
| 665 | * valid. This serves as a sanity check. | 658 | * valid. This serves as a sanity check. |
| 666 | */ | 659 | */ |
| 667 | if ((ch = verifyChannel(tty)) != NULL) { | 660 | ch = verifyChannel(tty); |
| 661 | if (ch != NULL) { | ||
| 668 | spin_lock_irqsave(&epca_lock, flags); | 662 | spin_lock_irqsave(&epca_lock, flags); |
| 669 | globalwinon(ch); | 663 | globalwinon(ch); |
| 670 | 664 | ||
| @@ -676,8 +670,8 @@ static int pc_write_room(struct tty_struct *tty) | |||
| 676 | tail = readw(&bc->tout); | 670 | tail = readw(&bc->tout); |
| 677 | /* Wrap tail if necessary */ | 671 | /* Wrap tail if necessary */ |
| 678 | tail &= (ch->txbufsize - 1); | 672 | tail &= (ch->txbufsize - 1); |
| 679 | 673 | remain = tail - head - 1; | |
| 680 | if ((remain = tail - head - 1) < 0 ) | 674 | if (remain < 0) |
| 681 | remain += ch->txbufsize; | 675 | remain += ch->txbufsize; |
| 682 | 676 | ||
| 683 | if (remain && (ch->statusflags & LOWWAIT) == 0) { | 677 | if (remain && (ch->statusflags & LOWWAIT) == 0) { |
| @@ -699,12 +693,12 @@ static int pc_chars_in_buffer(struct tty_struct *tty) | |||
| 699 | unsigned long flags; | 693 | unsigned long flags; |
| 700 | struct channel *ch; | 694 | struct channel *ch; |
| 701 | struct board_chan __iomem *bc; | 695 | struct board_chan __iomem *bc; |
| 702 | |||
| 703 | /* | 696 | /* |
| 704 | * verifyChannel returns the channel from the tty struct if it is | 697 | * verifyChannel returns the channel from the tty struct if it is |
| 705 | * valid. This serves as a sanity check. | 698 | * valid. This serves as a sanity check. |
| 706 | */ | 699 | */ |
| 707 | if ((ch = verifyChannel(tty)) == NULL) | 700 | ch = verifyChannel(tty); |
| 701 | if (ch == NULL) | ||
| 708 | return 0; | 702 | return 0; |
| 709 | 703 | ||
| 710 | spin_lock_irqsave(&epca_lock, flags); | 704 | spin_lock_irqsave(&epca_lock, flags); |
| @@ -715,7 +709,8 @@ static int pc_chars_in_buffer(struct tty_struct *tty) | |||
| 715 | head = readw(&bc->tin); | 709 | head = readw(&bc->tin); |
| 716 | ctail = readw(&ch->mailbox->cout); | 710 | ctail = readw(&ch->mailbox->cout); |
| 717 | 711 | ||
| 718 | if (tail == head && readw(&ch->mailbox->cin) == ctail && readb(&bc->tbusy) == 0) | 712 | if (tail == head && readw(&ch->mailbox->cin) == ctail && |
| 713 | readb(&bc->tbusy) == 0) | ||
| 719 | chars = 0; | 714 | chars = 0; |
| 720 | else { /* Begin if some space on the card has been used */ | 715 | else { /* Begin if some space on the card has been used */ |
| 721 | head = readw(&bc->tin) & (ch->txbufsize - 1); | 716 | head = readw(&bc->tin) & (ch->txbufsize - 1); |
| @@ -725,7 +720,8 @@ static int pc_chars_in_buffer(struct tty_struct *tty) | |||
| 725 | * pc_write_room here we are finding the amount of bytes in the | 720 | * pc_write_room here we are finding the amount of bytes in the |
| 726 | * buffer filled. Not the amount of bytes empty. | 721 | * buffer filled. Not the amount of bytes empty. |
| 727 | */ | 722 | */ |
| 728 | if ((remain = tail - head - 1) < 0 ) | 723 | remain = tail - head - 1; |
| 724 | if (remain < 0) | ||
| 729 | remain += ch->txbufsize; | 725 | remain += ch->txbufsize; |
| 730 | chars = (int)(ch->txbufsize - remain); | 726 | chars = (int)(ch->txbufsize - remain); |
| 731 | /* | 727 | /* |
| @@ -736,7 +732,7 @@ static int pc_chars_in_buffer(struct tty_struct *tty) | |||
| 736 | * transmit buffer empties. | 732 | * transmit buffer empties. |
| 737 | */ | 733 | */ |
| 738 | if (!(ch->statusflags & EMPTYWAIT)) | 734 | if (!(ch->statusflags & EMPTYWAIT)) |
| 739 | setup_empty_event(tty,ch); | 735 | setup_empty_event(tty, ch); |
| 740 | } /* End if some space on the card has been used */ | 736 | } /* End if some space on the card has been used */ |
| 741 | memoff(ch); | 737 | memoff(ch); |
| 742 | spin_unlock_irqrestore(&epca_lock, flags); | 738 | spin_unlock_irqrestore(&epca_lock, flags); |
| @@ -754,7 +750,8 @@ static void pc_flush_buffer(struct tty_struct *tty) | |||
| 754 | * verifyChannel returns the channel from the tty struct if it is | 750 | * verifyChannel returns the channel from the tty struct if it is |
| 755 | * valid. This serves as a sanity check. | 751 | * valid. This serves as a sanity check. |
| 756 | */ | 752 | */ |
| 757 | if ((ch = verifyChannel(tty)) == NULL) | 753 | ch = verifyChannel(tty); |
| 754 | if (ch == NULL) | ||
| 758 | return; | 755 | return; |
| 759 | 756 | ||
| 760 | spin_lock_irqsave(&epca_lock, flags); | 757 | spin_lock_irqsave(&epca_lock, flags); |
| @@ -775,23 +772,25 @@ static void pc_flush_chars(struct tty_struct *tty) | |||
| 775 | * verifyChannel returns the channel from the tty struct if it is | 772 | * verifyChannel returns the channel from the tty struct if it is |
| 776 | * valid. This serves as a sanity check. | 773 | * valid. This serves as a sanity check. |
| 777 | */ | 774 | */ |
| 778 | if ((ch = verifyChannel(tty)) != NULL) { | 775 | ch = verifyChannel(tty); |
| 776 | if (ch != NULL) { | ||
| 779 | unsigned long flags; | 777 | unsigned long flags; |
| 780 | spin_lock_irqsave(&epca_lock, flags); | 778 | spin_lock_irqsave(&epca_lock, flags); |
| 781 | /* | 779 | /* |
| 782 | * If not already set and the transmitter is busy setup an | 780 | * If not already set and the transmitter is busy setup an |
| 783 | * event to indicate when the transmit empties. | 781 | * event to indicate when the transmit empties. |
| 784 | */ | 782 | */ |
| 785 | if ((ch->statusflags & TXBUSY) && !(ch->statusflags & EMPTYWAIT)) | 783 | if ((ch->statusflags & TXBUSY) && |
| 786 | setup_empty_event(tty,ch); | 784 | !(ch->statusflags & EMPTYWAIT)) |
| 785 | setup_empty_event(tty, ch); | ||
| 787 | spin_unlock_irqrestore(&epca_lock, flags); | 786 | spin_unlock_irqrestore(&epca_lock, flags); |
| 788 | } | 787 | } |
| 789 | } | 788 | } |
| 790 | 789 | ||
| 791 | static int block_til_ready(struct tty_struct *tty, | 790 | static int block_til_ready(struct tty_struct *tty, |
| 792 | struct file *filp, struct channel *ch) | 791 | struct file *filp, struct channel *ch) |
| 793 | { | 792 | { |
| 794 | DECLARE_WAITQUEUE(wait,current); | 793 | DECLARE_WAITQUEUE(wait, current); |
| 795 | int retval, do_clocal = 0; | 794 | int retval, do_clocal = 0; |
| 796 | unsigned long flags; | 795 | unsigned long flags; |
| 797 | 796 | ||
| @@ -839,8 +838,7 @@ static int block_til_ready(struct tty_struct *tty, | |||
| 839 | while (1) { | 838 | while (1) { |
| 840 | set_current_state(TASK_INTERRUPTIBLE); | 839 | set_current_state(TASK_INTERRUPTIBLE); |
| 841 | if (tty_hung_up_p(filp) || | 840 | if (tty_hung_up_p(filp) || |
| 842 | !(ch->asyncflags & ASYNC_INITIALIZED)) | 841 | !(ch->asyncflags & ASYNC_INITIALIZED)) { |
| 843 | { | ||
| 844 | if (ch->asyncflags & ASYNC_HUP_NOTIFY) | 842 | if (ch->asyncflags & ASYNC_HUP_NOTIFY) |
| 845 | retval = -EAGAIN; | 843 | retval = -EAGAIN; |
| 846 | else | 844 | else |
| @@ -880,7 +878,7 @@ static int block_til_ready(struct tty_struct *tty, | |||
| 880 | return 0; | 878 | return 0; |
| 881 | } | 879 | } |
| 882 | 880 | ||
| 883 | static int pc_open(struct tty_struct *tty, struct file * filp) | 881 | static int pc_open(struct tty_struct *tty, struct file *filp) |
| 884 | { | 882 | { |
| 885 | struct channel *ch; | 883 | struct channel *ch; |
| 886 | unsigned long flags; | 884 | unsigned long flags; |
| @@ -923,7 +921,8 @@ static int pc_open(struct tty_struct *tty, struct file * filp) | |||
| 923 | return(-ENODEV); | 921 | return(-ENODEV); |
| 924 | } | 922 | } |
| 925 | 923 | ||
| 926 | if ((bc = ch->brdchan) == 0) { | 924 | bc = ch->brdchan; |
| 925 | if (bc == NULL) { | ||
| 927 | tty->driver_data = NULL; | 926 | tty->driver_data = NULL; |
| 928 | return -ENODEV; | 927 | return -ENODEV; |
| 929 | } | 928 | } |
| @@ -964,7 +963,7 @@ static int pc_open(struct tty_struct *tty, struct file * filp) | |||
| 964 | * The below routine generally sets up parity, baud, flow control | 963 | * The below routine generally sets up parity, baud, flow control |
| 965 | * issues, etc.... It effect both control flags and input flags. | 964 | * issues, etc.... It effect both control flags and input flags. |
| 966 | */ | 965 | */ |
| 967 | epcaparam(tty,ch); | 966 | epcaparam(tty, ch); |
| 968 | ch->asyncflags |= ASYNC_INITIALIZED; | 967 | ch->asyncflags |= ASYNC_INITIALIZED; |
| 969 | memoff(ch); | 968 | memoff(ch); |
| 970 | spin_unlock_irqrestore(&epca_lock, flags); | 969 | spin_unlock_irqrestore(&epca_lock, flags); |
| @@ -1002,8 +1001,8 @@ static void __exit epca_module_exit(void) | |||
| 1002 | 1001 | ||
| 1003 | del_timer_sync(&epca_timer); | 1002 | del_timer_sync(&epca_timer); |
| 1004 | 1003 | ||
| 1005 | if (tty_unregister_driver(pc_driver) || tty_unregister_driver(pc_info)) | 1004 | if (tty_unregister_driver(pc_driver) || |
| 1006 | { | 1005 | tty_unregister_driver(pc_info)) { |
| 1007 | printk(KERN_WARNING "epca: cleanup_module failed to un-register tty driver\n"); | 1006 | printk(KERN_WARNING "epca: cleanup_module failed to un-register tty driver\n"); |
| 1008 | return; | 1007 | return; |
| 1009 | } | 1008 | } |
| @@ -1034,7 +1033,6 @@ static const struct tty_operations pc_ops = { | |||
| 1034 | .flush_buffer = pc_flush_buffer, | 1033 | .flush_buffer = pc_flush_buffer, |
| 1035 | .chars_in_buffer = pc_chars_in_buffer, | 1034 | .chars_in_buffer = pc_chars_in_buffer, |
| 1036 | .flush_chars = pc_flush_chars, | 1035 | .flush_chars = pc_flush_chars, |
| 1037 | .put_char = pc_put_char, | ||
| 1038 | .ioctl = pc_ioctl, | 1036 | .ioctl = pc_ioctl, |
| 1039 | .set_termios = pc_set_termios, | 1037 | .set_termios = pc_set_termios, |
| 1040 | .stop = pc_stop, | 1038 | .stop = pc_stop, |
| @@ -1044,7 +1042,7 @@ static const struct tty_operations pc_ops = { | |||
| 1044 | .hangup = pc_hangup, | 1042 | .hangup = pc_hangup, |
| 1045 | }; | 1043 | }; |
| 1046 | 1044 | ||
| 1047 | static int info_open(struct tty_struct *tty, struct file * filp) | 1045 | static int info_open(struct tty_struct *tty, struct file *filp) |
| 1048 | { | 1046 | { |
| 1049 | return 0; | 1047 | return 0; |
| 1050 | } | 1048 | } |
| @@ -1099,7 +1097,7 @@ static int __init pc_init(void) | |||
| 1099 | * Set up interrupt, we will worry about memory allocation in | 1097 | * Set up interrupt, we will worry about memory allocation in |
| 1100 | * post_fep_init. | 1098 | * post_fep_init. |
| 1101 | */ | 1099 | */ |
| 1102 | printk(KERN_INFO "DIGI epca driver version %s loaded.\n",VERSION); | 1100 | printk(KERN_INFO "DIGI epca driver version %s loaded.\n", VERSION); |
| 1103 | 1101 | ||
| 1104 | /* | 1102 | /* |
| 1105 | * NOTE : This code assumes that the number of ports found in the | 1103 | * NOTE : This code assumes that the number of ports found in the |
| @@ -1252,7 +1250,7 @@ static int __init pc_init(void) | |||
| 1252 | if ((board_id & 0x30) == 0x30) | 1250 | if ((board_id & 0x30) == 0x30) |
| 1253 | bd->memory_seg = 0x8000; | 1251 | bd->memory_seg = 0x8000; |
| 1254 | } else | 1252 | } else |
| 1255 | printk(KERN_ERR "epca: Board at 0x%x doesn't appear to be an XI\n",(int)bd->port); | 1253 | printk(KERN_ERR "epca: Board at 0x%x doesn't appear to be an XI\n", (int)bd->port); |
| 1256 | break; | 1254 | break; |
| 1257 | } | 1255 | } |
| 1258 | } | 1256 | } |
| @@ -1326,12 +1324,12 @@ static void post_fep_init(unsigned int crd) | |||
| 1326 | */ | 1324 | */ |
| 1327 | /* PCI cards are already remapped at this point ISA are not */ | 1325 | /* PCI cards are already remapped at this point ISA are not */ |
| 1328 | bd->numports = readw(bd->re_map_membase + XEMPORTS); | 1326 | bd->numports = readw(bd->re_map_membase + XEMPORTS); |
| 1329 | epcaassert(bd->numports <= 64,"PCI returned a invalid number of ports"); | 1327 | epcaassert(bd->numports <= 64, "PCI returned a invalid number of ports"); |
| 1330 | nbdevs += (bd->numports); | 1328 | nbdevs += (bd->numports); |
| 1331 | } else { | 1329 | } else { |
| 1332 | /* Fix up the mappings for ISA/EISA etc */ | 1330 | /* Fix up the mappings for ISA/EISA etc */ |
| 1333 | /* FIXME: 64K - can we be smarter ? */ | 1331 | /* FIXME: 64K - can we be smarter ? */ |
| 1334 | bd->re_map_membase = ioremap(bd->membase, 0x10000); | 1332 | bd->re_map_membase = ioremap_nocache(bd->membase, 0x10000); |
| 1335 | } | 1333 | } |
| 1336 | 1334 | ||
| 1337 | if (crd != 0) | 1335 | if (crd != 0) |
| @@ -1362,7 +1360,8 @@ static void post_fep_init(unsigned int crd) | |||
| 1362 | * XEPORTS (address 0xc22) points at the number of channels the card | 1360 | * XEPORTS (address 0xc22) points at the number of channels the card |
| 1363 | * supports. (For 64XE, XI, XEM, and XR use 0xc02) | 1361 | * supports. (For 64XE, XI, XEM, and XR use 0xc02) |
| 1364 | */ | 1362 | */ |
| 1365 | if ((bd->type == PCXEVE || bd->type == PCXE) && (readw(memaddr + XEPORTS) < 3)) | 1363 | if ((bd->type == PCXEVE || bd->type == PCXE) && |
| 1364 | (readw(memaddr + XEPORTS) < 3)) | ||
| 1366 | shrinkmem = 1; | 1365 | shrinkmem = 1; |
| 1367 | if (bd->type < PCIXEM) | 1366 | if (bd->type < PCIXEM) |
| 1368 | if (!request_region((int)bd->port, 4, board_desc[bd->type])) | 1367 | if (!request_region((int)bd->port, 4, board_desc[bd->type])) |
| @@ -1461,10 +1460,12 @@ static void post_fep_init(unsigned int crd) | |||
| 1461 | 1460 | ||
| 1462 | case PCXEVE: | 1461 | case PCXEVE: |
| 1463 | case PCXE: | 1462 | case PCXE: |
| 1464 | ch->txptr = memaddr + (((tseg - bd->memory_seg) << 4) & 0x1fff); | 1463 | ch->txptr = memaddr + (((tseg - bd->memory_seg) << 4) |
| 1464 | & 0x1fff); | ||
| 1465 | ch->txwin = FEPWIN | ((tseg - bd->memory_seg) >> 9); | 1465 | ch->txwin = FEPWIN | ((tseg - bd->memory_seg) >> 9); |
| 1466 | ch->rxptr = memaddr + (((rseg - bd->memory_seg) << 4) & 0x1fff); | 1466 | ch->rxptr = memaddr + (((rseg - bd->memory_seg) << 4) |
| 1467 | ch->rxwin = FEPWIN | ((rseg - bd->memory_seg) >>9 ); | 1467 | & 0x1fff); |
| 1468 | ch->rxwin = FEPWIN | ((rseg - bd->memory_seg) >> 9); | ||
| 1468 | break; | 1469 | break; |
| 1469 | 1470 | ||
| 1470 | case PCXI: | 1471 | case PCXI: |
| @@ -1518,8 +1519,9 @@ static void post_fep_init(unsigned int crd) | |||
| 1518 | } | 1519 | } |
| 1519 | 1520 | ||
| 1520 | printk(KERN_INFO | 1521 | printk(KERN_INFO |
| 1521 | "Digi PC/Xx Driver V%s: %s I/O = 0x%lx Mem = 0x%lx Ports = %d\n", | 1522 | "Digi PC/Xx Driver V%s: %s I/O = 0x%lx Mem = 0x%lx Ports = %d\n", |
| 1522 | VERSION, board_desc[bd->type], (long)bd->port, (long)bd->membase, bd->numports); | 1523 | VERSION, board_desc[bd->type], (long)bd->port, |
| 1524 | (long)bd->membase, bd->numports); | ||
| 1523 | memwinoff(bd, 0); | 1525 | memwinoff(bd, 0); |
| 1524 | } | 1526 | } |
| 1525 | 1527 | ||
| @@ -1527,7 +1529,7 @@ static void epcapoll(unsigned long ignored) | |||
| 1527 | { | 1529 | { |
| 1528 | unsigned long flags; | 1530 | unsigned long flags; |
| 1529 | int crd; | 1531 | int crd; |
| 1530 | volatile unsigned int head, tail; | 1532 | unsigned int head, tail; |
| 1531 | struct channel *ch; | 1533 | struct channel *ch; |
| 1532 | struct board_info *bd; | 1534 | struct board_info *bd; |
| 1533 | 1535 | ||
| @@ -1593,7 +1595,9 @@ static void doevent(int crd) | |||
| 1593 | chan0 = card_ptr[crd]; | 1595 | chan0 = card_ptr[crd]; |
| 1594 | epcaassert(chan0 <= &digi_channels[nbdevs - 1], "ch out of range"); | 1596 | epcaassert(chan0 <= &digi_channels[nbdevs - 1], "ch out of range"); |
| 1595 | assertgwinon(chan0); | 1597 | assertgwinon(chan0); |
| 1596 | while ((tail = readw(&chan0->mailbox->eout)) != (head = readw(&chan0->mailbox->ein))) { /* Begin while something in event queue */ | 1598 | while ((tail = readw(&chan0->mailbox->eout)) != |
| 1599 | (head = readw(&chan0->mailbox->ein))) { | ||
| 1600 | /* Begin while something in event queue */ | ||
| 1597 | assertgwinon(chan0); | 1601 | assertgwinon(chan0); |
| 1598 | eventbuf = bd->re_map_membase + tail + ISTART; | 1602 | eventbuf = bd->re_map_membase + tail + ISTART; |
| 1599 | /* Get the channel the event occurred on */ | 1603 | /* Get the channel the event occurred on */ |
| @@ -1617,7 +1621,8 @@ static void doevent(int crd) | |||
| 1617 | goto next; | 1621 | goto next; |
| 1618 | } | 1622 | } |
| 1619 | 1623 | ||
| 1620 | if ((bc = ch->brdchan) == NULL) | 1624 | bc = ch->brdchan; |
| 1625 | if (bc == NULL) | ||
| 1621 | goto next; | 1626 | goto next; |
| 1622 | 1627 | ||
| 1623 | if (event & DATA_IND) { /* Begin DATA_IND */ | 1628 | if (event & DATA_IND) { /* Begin DATA_IND */ |
| @@ -1629,10 +1634,11 @@ static void doevent(int crd) | |||
| 1629 | /* A modem signal change has been indicated */ | 1634 | /* A modem signal change has been indicated */ |
| 1630 | ch->imodem = mstat; | 1635 | ch->imodem = mstat; |
| 1631 | if (ch->asyncflags & ASYNC_CHECK_CD) { | 1636 | if (ch->asyncflags & ASYNC_CHECK_CD) { |
| 1632 | if (mstat & ch->dcd) /* We are now receiving dcd */ | 1637 | /* We are now receiving dcd */ |
| 1638 | if (mstat & ch->dcd) | ||
| 1633 | wake_up_interruptible(&ch->open_wait); | 1639 | wake_up_interruptible(&ch->open_wait); |
| 1634 | else | 1640 | else /* No dcd; hangup */ |
| 1635 | pc_sched_event(ch, EPCA_EVENT_HANGUP); /* No dcd; hangup */ | 1641 | pc_sched_event(ch, EPCA_EVENT_HANGUP); |
| 1636 | } | 1642 | } |
| 1637 | } | 1643 | } |
| 1638 | tty = ch->tty; | 1644 | tty = ch->tty; |
| @@ -1647,7 +1653,8 @@ static void doevent(int crd) | |||
| 1647 | tty_wakeup(tty); | 1653 | tty_wakeup(tty); |
| 1648 | } | 1654 | } |
| 1649 | } else if (event & EMPTYTX_IND) { | 1655 | } else if (event & EMPTYTX_IND) { |
| 1650 | /* This event is generated by setup_empty_event */ | 1656 | /* This event is generated by |
| 1657 | setup_empty_event */ | ||
| 1651 | ch->statusflags &= ~TXBUSY; | 1658 | ch->statusflags &= ~TXBUSY; |
| 1652 | if (ch->statusflags & EMPTYWAIT) { | 1659 | if (ch->statusflags & EMPTYWAIT) { |
| 1653 | ch->statusflags &= ~EMPTYWAIT; | 1660 | ch->statusflags &= ~EMPTYWAIT; |
| @@ -1655,7 +1662,7 @@ static void doevent(int crd) | |||
| 1655 | } | 1662 | } |
| 1656 | } | 1663 | } |
| 1657 | } | 1664 | } |
| 1658 | next: | 1665 | next: |
| 1659 | globalwinon(ch); | 1666 | globalwinon(ch); |
| 1660 | BUG_ON(!bc); | 1667 | BUG_ON(!bc); |
| 1661 | writew(1, &bc->idata); | 1668 | writew(1, &bc->idata); |
| @@ -1665,7 +1672,7 @@ static void doevent(int crd) | |||
| 1665 | } | 1672 | } |
| 1666 | 1673 | ||
| 1667 | static void fepcmd(struct channel *ch, int cmd, int word_or_byte, | 1674 | static void fepcmd(struct channel *ch, int cmd, int word_or_byte, |
| 1668 | int byte2, int ncmds, int bytecmd) | 1675 | int byte2, int ncmds, int bytecmd) |
| 1669 | { | 1676 | { |
| 1670 | unchar __iomem *memaddr; | 1677 | unchar __iomem *memaddr; |
| 1671 | unsigned int head, cmdTail, cmdStart, cmdMax; | 1678 | unsigned int head, cmdTail, cmdStart, cmdMax; |
| @@ -1690,8 +1697,10 @@ static void fepcmd(struct channel *ch, int cmd, int word_or_byte, | |||
| 1690 | memaddr = ch->board->re_map_membase; | 1697 | memaddr = ch->board->re_map_membase; |
| 1691 | 1698 | ||
| 1692 | if (head >= (cmdMax - cmdStart) || (head & 03)) { | 1699 | if (head >= (cmdMax - cmdStart) || (head & 03)) { |
| 1693 | printk(KERN_ERR "line %d: Out of range, cmd = %x, head = %x\n", __LINE__, cmd, head); | 1700 | printk(KERN_ERR "line %d: Out of range, cmd = %x, head = %x\n", |
| 1694 | printk(KERN_ERR "line %d: Out of range, cmdMax = %x, cmdStart = %x\n", __LINE__, cmdMax, cmdStart); | 1701 | __LINE__, cmd, head); |
| 1702 | printk(KERN_ERR "line %d: Out of range, cmdMax = %x, cmdStart = %x\n", | ||
| 1703 | __LINE__, cmdMax, cmdStart); | ||
| 1695 | return; | 1704 | return; |
| 1696 | } | 1705 | } |
| 1697 | if (bytecmd) { | 1706 | if (bytecmd) { |
| @@ -1770,7 +1779,7 @@ static unsigned termios2digi_h(struct channel *ch, unsigned cflag) | |||
| 1770 | static unsigned termios2digi_i(struct channel *ch, unsigned iflag) | 1779 | static unsigned termios2digi_i(struct channel *ch, unsigned iflag) |
| 1771 | { | 1780 | { |
| 1772 | unsigned res = iflag & (IGNBRK | BRKINT | IGNPAR | PARMRK | | 1781 | unsigned res = iflag & (IGNBRK | BRKINT | IGNPAR | PARMRK | |
| 1773 | INPCK | ISTRIP|IXON|IXANY|IXOFF); | 1782 | INPCK | ISTRIP | IXON | IXANY | IXOFF); |
| 1774 | if (ch->digiext.digi_flags & DIGI_AIXON) | 1783 | if (ch->digiext.digi_flags & DIGI_AIXON) |
| 1775 | res |= IAIXON; | 1784 | res |= IAIXON; |
| 1776 | return res; | 1785 | return res; |
| @@ -1838,7 +1847,7 @@ static void epcaparam(struct tty_struct *tty, struct channel *ch) | |||
| 1838 | unsigned mval, hflow, cflag, iflag; | 1847 | unsigned mval, hflow, cflag, iflag; |
| 1839 | 1848 | ||
| 1840 | bc = ch->brdchan; | 1849 | bc = ch->brdchan; |
| 1841 | epcaassert(bc !=0, "bc out of range"); | 1850 | epcaassert(bc != NULL, "bc out of range"); |
| 1842 | 1851 | ||
| 1843 | assertgwinon(ch); | 1852 | assertgwinon(ch); |
| 1844 | ts = tty->termios; | 1853 | ts = tty->termios; |
| @@ -1884,8 +1893,10 @@ static void epcaparam(struct tty_struct *tty, struct channel *ch) | |||
| 1884 | * Command sets channels iflag structure on the board. Such | 1893 | * Command sets channels iflag structure on the board. Such |
| 1885 | * things as input soft flow control, handling of parity | 1894 | * things as input soft flow control, handling of parity |
| 1886 | * errors, and break handling are all set here. | 1895 | * errors, and break handling are all set here. |
| 1896 | * | ||
| 1897 | * break handling, parity handling, input stripping, | ||
| 1898 | * flow control chars | ||
| 1887 | */ | 1899 | */ |
| 1888 | /* break handling, parity handling, input stripping, flow control chars */ | ||
| 1889 | fepcmd(ch, SETIFLAGS, (unsigned int) ch->fepiflag, 0, 0, 0); | 1900 | fepcmd(ch, SETIFLAGS, (unsigned int) ch->fepiflag, 0, 0, 0); |
| 1890 | } | 1901 | } |
| 1891 | /* | 1902 | /* |
| @@ -1981,7 +1992,7 @@ static void receive_data(struct channel *ch) | |||
| 1981 | return; | 1992 | return; |
| 1982 | 1993 | ||
| 1983 | /* If CREAD bit is off or device not open, set TX tail to head */ | 1994 | /* If CREAD bit is off or device not open, set TX tail to head */ |
| 1984 | if (!tty || !ts || !(ts->c_cflag & CREAD)) { | 1995 | if (!tty || !ts || !(ts->c_cflag & CREAD)) { |
| 1985 | writew(head, &bc->rout); | 1996 | writew(head, &bc->rout); |
| 1986 | return; | 1997 | return; |
| 1987 | } | 1998 | } |
| @@ -1991,18 +2002,21 @@ static void receive_data(struct channel *ch) | |||
| 1991 | 2002 | ||
| 1992 | if (readb(&bc->orun)) { | 2003 | if (readb(&bc->orun)) { |
| 1993 | writeb(0, &bc->orun); | 2004 | writeb(0, &bc->orun); |
| 1994 | printk(KERN_WARNING "epca; overrun! DigiBoard device %s\n",tty->name); | 2005 | printk(KERN_WARNING "epca; overrun! DigiBoard device %s\n", |
| 2006 | tty->name); | ||
| 1995 | tty_insert_flip_char(tty, 0, TTY_OVERRUN); | 2007 | tty_insert_flip_char(tty, 0, TTY_OVERRUN); |
| 1996 | } | 2008 | } |
| 1997 | rxwinon(ch); | 2009 | rxwinon(ch); |
| 1998 | while (bytesAvailable > 0) { /* Begin while there is data on the card */ | 2010 | while (bytesAvailable > 0) { |
| 2011 | /* Begin while there is data on the card */ | ||
| 1999 | wrapgap = (head >= tail) ? head - tail : ch->rxbufsize - tail; | 2012 | wrapgap = (head >= tail) ? head - tail : ch->rxbufsize - tail; |
| 2000 | /* | 2013 | /* |
| 2001 | * Even if head has wrapped around only report the amount of | 2014 | * Even if head has wrapped around only report the amount of |
| 2002 | * data to be equal to the size - tail. Remember memcpy can't | 2015 | * data to be equal to the size - tail. Remember memcpy can't |
| 2003 | * automaticly wrap around the receive buffer. | 2016 | * automaticly wrap around the receive buffer. |
| 2004 | */ | 2017 | */ |
| 2005 | dataToRead = (wrapgap < bytesAvailable) ? wrapgap : bytesAvailable; | 2018 | dataToRead = (wrapgap < bytesAvailable) ? wrapgap |
| 2019 | : bytesAvailable; | ||
| 2006 | /* Make sure we don't overflow the buffer */ | 2020 | /* Make sure we don't overflow the buffer */ |
| 2007 | dataToRead = tty_prepare_flip_string(tty, &rptr, dataToRead); | 2021 | dataToRead = tty_prepare_flip_string(tty, &rptr, dataToRead); |
| 2008 | if (dataToRead == 0) | 2022 | if (dataToRead == 0) |
| @@ -2153,14 +2167,14 @@ static int pc_tiocmset(struct tty_struct *tty, struct file *file, | |||
| 2153 | * The below routine generally sets up parity, baud, flow control | 2167 | * The below routine generally sets up parity, baud, flow control |
| 2154 | * issues, etc.... It effect both control flags and input flags. | 2168 | * issues, etc.... It effect both control flags and input flags. |
| 2155 | */ | 2169 | */ |
| 2156 | epcaparam(tty,ch); | 2170 | epcaparam(tty, ch); |
| 2157 | memoff(ch); | 2171 | memoff(ch); |
| 2158 | spin_unlock_irqrestore(&epca_lock, flags); | 2172 | spin_unlock_irqrestore(&epca_lock, flags); |
| 2159 | return 0; | 2173 | return 0; |
| 2160 | } | 2174 | } |
| 2161 | 2175 | ||
| 2162 | static int pc_ioctl(struct tty_struct *tty, struct file * file, | 2176 | static int pc_ioctl(struct tty_struct *tty, struct file *file, |
| 2163 | unsigned int cmd, unsigned long arg) | 2177 | unsigned int cmd, unsigned long arg) |
| 2164 | { | 2178 | { |
| 2165 | digiflow_t dflow; | 2179 | digiflow_t dflow; |
| 2166 | int retval; | 2180 | int retval; |
| @@ -2175,7 +2189,6 @@ static int pc_ioctl(struct tty_struct *tty, struct file * file, | |||
| 2175 | bc = ch->brdchan; | 2189 | bc = ch->brdchan; |
| 2176 | else | 2190 | else |
| 2177 | return -EINVAL; | 2191 | return -EINVAL; |
| 2178 | |||
| 2179 | /* | 2192 | /* |
| 2180 | * For POSIX compliance we need to add more ioctls. See tty_ioctl.c in | 2193 | * For POSIX compliance we need to add more ioctls. See tty_ioctl.c in |
| 2181 | * /usr/src/linux/drivers/char for a good example. In particular think | 2194 | * /usr/src/linux/drivers/char for a good example. In particular think |
| @@ -2186,9 +2199,10 @@ static int pc_ioctl(struct tty_struct *tty, struct file * file, | |||
| 2186 | retval = tty_check_change(tty); | 2199 | retval = tty_check_change(tty); |
| 2187 | if (retval) | 2200 | if (retval) |
| 2188 | return retval; | 2201 | return retval; |
| 2189 | /* Setup an event to indicate when the transmit buffer empties */ | 2202 | /* Setup an event to indicate when the transmit |
| 2203 | buffer empties */ | ||
| 2190 | spin_lock_irqsave(&epca_lock, flags); | 2204 | spin_lock_irqsave(&epca_lock, flags); |
| 2191 | setup_empty_event(tty,ch); | 2205 | setup_empty_event(tty, ch); |
| 2192 | spin_unlock_irqrestore(&epca_lock, flags); | 2206 | spin_unlock_irqrestore(&epca_lock, flags); |
| 2193 | tty_wait_until_sent(tty, 0); | 2207 | tty_wait_until_sent(tty, 0); |
| 2194 | if (!arg) | 2208 | if (!arg) |
| @@ -2198,29 +2212,14 @@ static int pc_ioctl(struct tty_struct *tty, struct file * file, | |||
| 2198 | retval = tty_check_change(tty); | 2212 | retval = tty_check_change(tty); |
| 2199 | if (retval) | 2213 | if (retval) |
| 2200 | return retval; | 2214 | return retval; |
| 2201 | 2215 | /* Setup an event to indicate when the transmit buffer | |
| 2202 | /* Setup an event to indicate when the transmit buffer empties */ | 2216 | empties */ |
| 2203 | spin_lock_irqsave(&epca_lock, flags); | 2217 | spin_lock_irqsave(&epca_lock, flags); |
| 2204 | setup_empty_event(tty,ch); | 2218 | setup_empty_event(tty, ch); |
| 2205 | spin_unlock_irqrestore(&epca_lock, flags); | 2219 | spin_unlock_irqrestore(&epca_lock, flags); |
| 2206 | tty_wait_until_sent(tty, 0); | 2220 | tty_wait_until_sent(tty, 0); |
| 2207 | digi_send_break(ch, arg ? arg*(HZ/10) : HZ/4); | 2221 | digi_send_break(ch, arg ? arg*(HZ/10) : HZ/4); |
| 2208 | return 0; | 2222 | return 0; |
| 2209 | case TIOCGSOFTCAR: | ||
| 2210 | if (put_user(C_CLOCAL(tty)?1:0, (unsigned long __user *)arg)) | ||
| 2211 | return -EFAULT; | ||
| 2212 | return 0; | ||
| 2213 | case TIOCSSOFTCAR: | ||
| 2214 | { | ||
| 2215 | unsigned int value; | ||
| 2216 | |||
| 2217 | if (get_user(value, (unsigned __user *)argp)) | ||
| 2218 | return -EFAULT; | ||
| 2219 | tty->termios->c_cflag = | ||
| 2220 | ((tty->termios->c_cflag & ~CLOCAL) | | ||
| 2221 | (value ? CLOCAL : 0)); | ||
| 2222 | return 0; | ||
| 2223 | } | ||
| 2224 | case TIOCMODG: | 2223 | case TIOCMODG: |
| 2225 | mflag = pc_tiocmget(tty, file); | 2224 | mflag = pc_tiocmget(tty, file); |
| 2226 | if (put_user(mflag, (unsigned long __user *)argp)) | 2225 | if (put_user(mflag, (unsigned long __user *)argp)) |
| @@ -2253,10 +2252,12 @@ static int pc_ioctl(struct tty_struct *tty, struct file * file, | |||
| 2253 | break; | 2252 | break; |
| 2254 | case DIGI_SETAW: | 2253 | case DIGI_SETAW: |
| 2255 | case DIGI_SETAF: | 2254 | case DIGI_SETAF: |
| 2255 | lock_kernel(); | ||
| 2256 | if (cmd == DIGI_SETAW) { | 2256 | if (cmd == DIGI_SETAW) { |
| 2257 | /* Setup an event to indicate when the transmit buffer empties */ | 2257 | /* Setup an event to indicate when the transmit |
| 2258 | buffer empties */ | ||
| 2258 | spin_lock_irqsave(&epca_lock, flags); | 2259 | spin_lock_irqsave(&epca_lock, flags); |
| 2259 | setup_empty_event(tty,ch); | 2260 | setup_empty_event(tty, ch); |
| 2260 | spin_unlock_irqrestore(&epca_lock, flags); | 2261 | spin_unlock_irqrestore(&epca_lock, flags); |
| 2261 | tty_wait_until_sent(tty, 0); | 2262 | tty_wait_until_sent(tty, 0); |
| 2262 | } else { | 2263 | } else { |
| @@ -2264,6 +2265,7 @@ static int pc_ioctl(struct tty_struct *tty, struct file * file, | |||
| 2264 | if (tty->ldisc.flush_buffer) | 2265 | if (tty->ldisc.flush_buffer) |
| 2265 | tty->ldisc.flush_buffer(tty); | 2266 | tty->ldisc.flush_buffer(tty); |
| 2266 | } | 2267 | } |
| 2268 | unlock_kernel(); | ||
| 2267 | /* Fall Thru */ | 2269 | /* Fall Thru */ |
| 2268 | case DIGI_SETA: | 2270 | case DIGI_SETA: |
| 2269 | if (copy_from_user(&ch->digiext, argp, sizeof(digi_t))) | 2271 | if (copy_from_user(&ch->digiext, argp, sizeof(digi_t))) |
| @@ -2285,7 +2287,7 @@ static int pc_ioctl(struct tty_struct *tty, struct file * file, | |||
| 2285 | * control issues, etc.... It effect both control flags and | 2287 | * control issues, etc.... It effect both control flags and |
| 2286 | * input flags. | 2288 | * input flags. |
| 2287 | */ | 2289 | */ |
| 2288 | epcaparam(tty,ch); | 2290 | epcaparam(tty, ch); |
| 2289 | memoff(ch); | 2291 | memoff(ch); |
| 2290 | spin_unlock_irqrestore(&epca_lock, flags); | 2292 | spin_unlock_irqrestore(&epca_lock, flags); |
| 2291 | break; | 2293 | break; |
| @@ -2321,18 +2323,21 @@ static int pc_ioctl(struct tty_struct *tty, struct file * file, | |||
| 2321 | if (copy_from_user(&dflow, argp, sizeof(dflow))) | 2323 | if (copy_from_user(&dflow, argp, sizeof(dflow))) |
| 2322 | return -EFAULT; | 2324 | return -EFAULT; |
| 2323 | 2325 | ||
| 2324 | if (dflow.startc != startc || dflow.stopc != stopc) { /* Begin if setflow toggled */ | 2326 | if (dflow.startc != startc || dflow.stopc != stopc) { |
| 2327 | /* Begin if setflow toggled */ | ||
| 2325 | spin_lock_irqsave(&epca_lock, flags); | 2328 | spin_lock_irqsave(&epca_lock, flags); |
| 2326 | globalwinon(ch); | 2329 | globalwinon(ch); |
| 2327 | 2330 | ||
| 2328 | if (cmd == DIGI_SETFLOW) { | 2331 | if (cmd == DIGI_SETFLOW) { |
| 2329 | ch->fepstartc = ch->startc = dflow.startc; | 2332 | ch->fepstartc = ch->startc = dflow.startc; |
| 2330 | ch->fepstopc = ch->stopc = dflow.stopc; | 2333 | ch->fepstopc = ch->stopc = dflow.stopc; |
| 2331 | fepcmd(ch, SONOFFC, ch->fepstartc, ch->fepstopc, 0, 1); | 2334 | fepcmd(ch, SONOFFC, ch->fepstartc, |
| 2335 | ch->fepstopc, 0, 1); | ||
| 2332 | } else { | 2336 | } else { |
| 2333 | ch->fepstartca = ch->startca = dflow.startc; | 2337 | ch->fepstartca = ch->startca = dflow.startc; |
| 2334 | ch->fepstopca = ch->stopca = dflow.stopc; | 2338 | ch->fepstopca = ch->stopca = dflow.stopc; |
| 2335 | fepcmd(ch, SAUXONOFFC, ch->fepstartca, ch->fepstopca, 0, 1); | 2339 | fepcmd(ch, SAUXONOFFC, ch->fepstartca, |
| 2340 | ch->fepstopca, 0, 1); | ||
| 2336 | } | 2341 | } |
| 2337 | 2342 | ||
| 2338 | if (ch->statusflags & TXSTOPPED) | 2343 | if (ch->statusflags & TXSTOPPED) |
| @@ -2356,7 +2361,9 @@ static void pc_set_termios(struct tty_struct *tty, struct ktermios *old_termios) | |||
| 2356 | * verifyChannel returns the channel from the tty struct if it is | 2361 | * verifyChannel returns the channel from the tty struct if it is |
| 2357 | * valid. This serves as a sanity check. | 2362 | * valid. This serves as a sanity check. |
| 2358 | */ | 2363 | */ |
| 2359 | if ((ch = verifyChannel(tty)) != NULL) { /* Begin if channel valid */ | 2364 | ch = verifyChannel(tty); |
| 2365 | |||
| 2366 | if (ch != NULL) { /* Begin if channel valid */ | ||
| 2360 | spin_lock_irqsave(&epca_lock, flags); | 2367 | spin_lock_irqsave(&epca_lock, flags); |
| 2361 | globalwinon(ch); | 2368 | globalwinon(ch); |
| 2362 | epcaparam(tty, ch); | 2369 | epcaparam(tty, ch); |
| @@ -2383,7 +2390,7 @@ static void do_softint(struct work_struct *work) | |||
| 2383 | 2390 | ||
| 2384 | if (tty && tty->driver_data) { | 2391 | if (tty && tty->driver_data) { |
| 2385 | if (test_and_clear_bit(EPCA_EVENT_HANGUP, &ch->event)) { | 2392 | if (test_and_clear_bit(EPCA_EVENT_HANGUP, &ch->event)) { |
| 2386 | tty_hangup(tty); /* FIXME: module removal race here - AKPM */ | 2393 | tty_hangup(tty); |
| 2387 | wake_up_interruptible(&ch->open_wait); | 2394 | wake_up_interruptible(&ch->open_wait); |
| 2388 | ch->asyncflags &= ~ASYNC_NORMAL_ACTIVE; | 2395 | ch->asyncflags &= ~ASYNC_NORMAL_ACTIVE; |
| 2389 | } | 2396 | } |
| @@ -2403,9 +2410,11 @@ static void pc_stop(struct tty_struct *tty) | |||
| 2403 | * verifyChannel returns the channel from the tty struct if it is | 2410 | * verifyChannel returns the channel from the tty struct if it is |
| 2404 | * valid. This serves as a sanity check. | 2411 | * valid. This serves as a sanity check. |
| 2405 | */ | 2412 | */ |
| 2406 | if ((ch = verifyChannel(tty)) != NULL) { | 2413 | ch = verifyChannel(tty); |
| 2414 | if (ch != NULL) { | ||
| 2407 | spin_lock_irqsave(&epca_lock, flags); | 2415 | spin_lock_irqsave(&epca_lock, flags); |
| 2408 | if ((ch->statusflags & TXSTOPPED) == 0) { /* Begin if transmit stop requested */ | 2416 | if ((ch->statusflags & TXSTOPPED) == 0) { |
| 2417 | /* Begin if transmit stop requested */ | ||
| 2409 | globalwinon(ch); | 2418 | globalwinon(ch); |
| 2410 | /* STOP transmitting now !! */ | 2419 | /* STOP transmitting now !! */ |
| 2411 | fepcmd(ch, PAUSETX, 0, 0, 0, 0); | 2420 | fepcmd(ch, PAUSETX, 0, 0, 0, 0); |
| @@ -2423,11 +2432,14 @@ static void pc_start(struct tty_struct *tty) | |||
| 2423 | * verifyChannel returns the channel from the tty struct if it is | 2432 | * verifyChannel returns the channel from the tty struct if it is |
| 2424 | * valid. This serves as a sanity check. | 2433 | * valid. This serves as a sanity check. |
| 2425 | */ | 2434 | */ |
| 2426 | if ((ch = verifyChannel(tty)) != NULL) { | 2435 | ch = verifyChannel(tty); |
| 2436 | if (ch != NULL) { | ||
| 2427 | unsigned long flags; | 2437 | unsigned long flags; |
| 2428 | spin_lock_irqsave(&epca_lock, flags); | 2438 | spin_lock_irqsave(&epca_lock, flags); |
| 2429 | /* Just in case output was resumed because of a change in Digi-flow */ | 2439 | /* Just in case output was resumed because of a change |
| 2430 | if (ch->statusflags & TXSTOPPED) { /* Begin transmit resume requested */ | 2440 | in Digi-flow */ |
| 2441 | if (ch->statusflags & TXSTOPPED) { | ||
| 2442 | /* Begin transmit resume requested */ | ||
| 2431 | struct board_chan __iomem *bc; | 2443 | struct board_chan __iomem *bc; |
| 2432 | globalwinon(ch); | 2444 | globalwinon(ch); |
| 2433 | bc = ch->brdchan; | 2445 | bc = ch->brdchan; |
| @@ -2457,7 +2469,8 @@ static void pc_throttle(struct tty_struct *tty) | |||
| 2457 | * verifyChannel returns the channel from the tty struct if it is | 2469 | * verifyChannel returns the channel from the tty struct if it is |
| 2458 | * valid. This serves as a sanity check. | 2470 | * valid. This serves as a sanity check. |
| 2459 | */ | 2471 | */ |
| 2460 | if ((ch = verifyChannel(tty)) != NULL) { | 2472 | ch = verifyChannel(tty); |
| 2473 | if (ch != NULL) { | ||
| 2461 | spin_lock_irqsave(&epca_lock, flags); | 2474 | spin_lock_irqsave(&epca_lock, flags); |
| 2462 | if ((ch->statusflags & RXSTOPPED) == 0) { | 2475 | if ((ch->statusflags & RXSTOPPED) == 0) { |
| 2463 | globalwinon(ch); | 2476 | globalwinon(ch); |
| @@ -2477,8 +2490,10 @@ static void pc_unthrottle(struct tty_struct *tty) | |||
| 2477 | * verifyChannel returns the channel from the tty struct if it is | 2490 | * verifyChannel returns the channel from the tty struct if it is |
| 2478 | * valid. This serves as a sanity check. | 2491 | * valid. This serves as a sanity check. |
| 2479 | */ | 2492 | */ |
| 2480 | if ((ch = verifyChannel(tty)) != NULL) { | 2493 | ch = verifyChannel(tty); |
| 2481 | /* Just in case output was resumed because of a change in Digi-flow */ | 2494 | if (ch != NULL) { |
| 2495 | /* Just in case output was resumed because of a change | ||
| 2496 | in Digi-flow */ | ||
| 2482 | spin_lock_irqsave(&epca_lock, flags); | 2497 | spin_lock_irqsave(&epca_lock, flags); |
| 2483 | if (ch->statusflags & RXSTOPPED) { | 2498 | if (ch->statusflags & RXSTOPPED) { |
| 2484 | globalwinon(ch); | 2499 | globalwinon(ch); |
| @@ -2490,7 +2505,7 @@ static void pc_unthrottle(struct tty_struct *tty) | |||
| 2490 | } | 2505 | } |
| 2491 | } | 2506 | } |
| 2492 | 2507 | ||
| 2493 | void digi_send_break(struct channel *ch, int msec) | 2508 | static void digi_send_break(struct channel *ch, int msec) |
| 2494 | { | 2509 | { |
| 2495 | unsigned long flags; | 2510 | unsigned long flags; |
| 2496 | 2511 | ||
| @@ -2523,7 +2538,7 @@ static void setup_empty_event(struct tty_struct *tty, struct channel *ch) | |||
| 2523 | memoff(ch); | 2538 | memoff(ch); |
| 2524 | } | 2539 | } |
| 2525 | 2540 | ||
| 2526 | void epca_setup(char *str, int *ints) | 2541 | static void epca_setup(char *str, int *ints) |
| 2527 | { | 2542 | { |
| 2528 | struct board_info board; | 2543 | struct board_info board; |
| 2529 | int index, loop, last; | 2544 | int index, loop, last; |
| @@ -2552,14 +2567,16 @@ void epca_setup(char *str, int *ints) | |||
| 2552 | * instructing the driver to ignore epcaconfig.) For | 2567 | * instructing the driver to ignore epcaconfig.) For |
| 2553 | * this reason we check for 2. | 2568 | * this reason we check for 2. |
| 2554 | */ | 2569 | */ |
| 2555 | if (board.status == 2) { /* Begin ignore epcaconfig as well as lilo cmd line */ | 2570 | if (board.status == 2) { |
| 2571 | /* Begin ignore epcaconfig as well as lilo cmd line */ | ||
| 2556 | nbdevs = 0; | 2572 | nbdevs = 0; |
| 2557 | num_cards = 0; | 2573 | num_cards = 0; |
| 2558 | return; | 2574 | return; |
| 2559 | } /* End ignore epcaconfig as well as lilo cmd line */ | 2575 | } /* End ignore epcaconfig as well as lilo cmd line */ |
| 2560 | 2576 | ||
| 2561 | if (board.status > 2) { | 2577 | if (board.status > 2) { |
| 2562 | printk(KERN_ERR "epca_setup: Invalid board status 0x%x\n", board.status); | 2578 | printk(KERN_ERR "epca_setup: Invalid board status 0x%x\n", |
| 2579 | board.status); | ||
| 2563 | invalid_lilo_config = 1; | 2580 | invalid_lilo_config = 1; |
| 2564 | setup_error_code |= INVALID_BOARD_STATUS; | 2581 | setup_error_code |= INVALID_BOARD_STATUS; |
| 2565 | return; | 2582 | return; |
| @@ -2613,7 +2630,8 @@ void epca_setup(char *str, int *ints) | |||
| 2613 | case 6: | 2630 | case 6: |
| 2614 | board.membase = ints[index]; | 2631 | board.membase = ints[index]; |
| 2615 | if (ints[index] <= 0) { | 2632 | if (ints[index] <= 0) { |
| 2616 | printk(KERN_ERR "epca_setup: Invalid memory base 0x%x\n",(unsigned int)board.membase); | 2633 | printk(KERN_ERR "epca_setup: Invalid memory base 0x%x\n", |
| 2634 | (unsigned int)board.membase); | ||
| 2617 | invalid_lilo_config = 1; | 2635 | invalid_lilo_config = 1; |
| 2618 | setup_error_code |= INVALID_MEM_BASE; | 2636 | setup_error_code |= INVALID_MEM_BASE; |
| 2619 | return; | 2637 | return; |
| @@ -2744,7 +2762,7 @@ void epca_setup(char *str, int *ints) | |||
| 2744 | t2++; | 2762 | t2++; |
| 2745 | 2763 | ||
| 2746 | if (*t2) { | 2764 | if (*t2) { |
| 2747 | printk(KERN_ERR "epca_setup: Invalid memory base %s\n",str); | 2765 | printk(KERN_ERR "epca_setup: Invalid memory base %s\n", str); |
| 2748 | invalid_lilo_config = 1; | 2766 | invalid_lilo_config = 1; |
| 2749 | setup_error_code |= INVALID_MEM_BASE; | 2767 | setup_error_code |= INVALID_MEM_BASE; |
| 2750 | return; | 2768 | return; |
| @@ -2766,7 +2784,7 @@ void epca_setup(char *str, int *ints) | |||
| 2766 | 2784 | ||
| 2767 | /* I should REALLY validate the stuff here */ | 2785 | /* I should REALLY validate the stuff here */ |
| 2768 | /* Copies our local copy of board into boards */ | 2786 | /* Copies our local copy of board into boards */ |
| 2769 | memcpy((void *)&boards[num_cards],(void *)&board, sizeof(board)); | 2787 | memcpy((void *)&boards[num_cards], (void *)&board, sizeof(board)); |
| 2770 | /* Does this get called once per lilo arg are what ? */ | 2788 | /* Does this get called once per lilo arg are what ? */ |
| 2771 | printk(KERN_INFO "PC/Xx: Added board %i, %s %i ports at 0x%4.4X base 0x%6.6X\n", | 2789 | printk(KERN_INFO "PC/Xx: Added board %i, %s %i ports at 0x%4.4X base 0x%6.6X\n", |
| 2772 | num_cards, board_desc[board.type], | 2790 | num_cards, board_desc[board.type], |
| @@ -2807,9 +2825,9 @@ static int __devinit epca_init_one(struct pci_dev *pdev, | |||
| 2807 | if (board_idx >= MAXBOARDS) | 2825 | if (board_idx >= MAXBOARDS) |
| 2808 | goto err_out; | 2826 | goto err_out; |
| 2809 | 2827 | ||
| 2810 | addr = pci_resource_start (pdev, epca_info_tbl[info_idx].bar_idx); | 2828 | addr = pci_resource_start(pdev, epca_info_tbl[info_idx].bar_idx); |
| 2811 | if (!addr) { | 2829 | if (!addr) { |
| 2812 | printk (KERN_ERR PFX "PCI region #%d not available (size 0)\n", | 2830 | printk(KERN_ERR PFX "PCI region #%d not available (size 0)\n", |
| 2813 | epca_info_tbl[info_idx].bar_idx); | 2831 | epca_info_tbl[info_idx].bar_idx); |
| 2814 | goto err_out; | 2832 | goto err_out; |
| 2815 | } | 2833 | } |
| @@ -2820,28 +2838,29 @@ static int __devinit epca_init_one(struct pci_dev *pdev, | |||
| 2820 | boards[board_idx].port = addr + PCI_IO_OFFSET; | 2838 | boards[board_idx].port = addr + PCI_IO_OFFSET; |
| 2821 | boards[board_idx].membase = addr; | 2839 | boards[board_idx].membase = addr; |
| 2822 | 2840 | ||
| 2823 | if (!request_mem_region (addr + PCI_IO_OFFSET, 0x200000, "epca")) { | 2841 | if (!request_mem_region(addr + PCI_IO_OFFSET, 0x200000, "epca")) { |
| 2824 | printk (KERN_ERR PFX "resource 0x%x @ 0x%lx unavailable\n", | 2842 | printk(KERN_ERR PFX "resource 0x%x @ 0x%lx unavailable\n", |
| 2825 | 0x200000, addr + PCI_IO_OFFSET); | 2843 | 0x200000, addr + PCI_IO_OFFSET); |
| 2826 | goto err_out; | 2844 | goto err_out; |
| 2827 | } | 2845 | } |
| 2828 | 2846 | ||
| 2829 | boards[board_idx].re_map_port = ioremap(addr + PCI_IO_OFFSET, 0x200000); | 2847 | boards[board_idx].re_map_port = ioremap_nocache(addr + PCI_IO_OFFSET, |
| 2848 | 0x200000); | ||
| 2830 | if (!boards[board_idx].re_map_port) { | 2849 | if (!boards[board_idx].re_map_port) { |
| 2831 | printk (KERN_ERR PFX "cannot map 0x%x @ 0x%lx\n", | 2850 | printk(KERN_ERR PFX "cannot map 0x%x @ 0x%lx\n", |
| 2832 | 0x200000, addr + PCI_IO_OFFSET); | 2851 | 0x200000, addr + PCI_IO_OFFSET); |
| 2833 | goto err_out_free_pciio; | 2852 | goto err_out_free_pciio; |
| 2834 | } | 2853 | } |
| 2835 | 2854 | ||
| 2836 | if (!request_mem_region (addr, 0x200000, "epca")) { | 2855 | if (!request_mem_region(addr, 0x200000, "epca")) { |
| 2837 | printk (KERN_ERR PFX "resource 0x%x @ 0x%lx unavailable\n", | 2856 | printk(KERN_ERR PFX "resource 0x%x @ 0x%lx unavailable\n", |
| 2838 | 0x200000, addr); | 2857 | 0x200000, addr); |
| 2839 | goto err_out_free_iounmap; | 2858 | goto err_out_free_iounmap; |
| 2840 | } | 2859 | } |
| 2841 | 2860 | ||
| 2842 | boards[board_idx].re_map_membase = ioremap(addr, 0x200000); | 2861 | boards[board_idx].re_map_membase = ioremap_nocache(addr, 0x200000); |
| 2843 | if (!boards[board_idx].re_map_membase) { | 2862 | if (!boards[board_idx].re_map_membase) { |
| 2844 | printk (KERN_ERR PFX "cannot map 0x%x @ 0x%lx\n", | 2863 | printk(KERN_ERR PFX "cannot map 0x%x @ 0x%lx\n", |
| 2845 | 0x200000, addr + PCI_IO_OFFSET); | 2864 | 0x200000, addr + PCI_IO_OFFSET); |
| 2846 | goto err_out_free_memregion; | 2865 | goto err_out_free_memregion; |
| 2847 | } | 2866 | } |
| @@ -2858,11 +2877,11 @@ static int __devinit epca_init_one(struct pci_dev *pdev, | |||
| 2858 | return 0; | 2877 | return 0; |
| 2859 | 2878 | ||
| 2860 | err_out_free_memregion: | 2879 | err_out_free_memregion: |
| 2861 | release_mem_region (addr, 0x200000); | 2880 | release_mem_region(addr, 0x200000); |
| 2862 | err_out_free_iounmap: | 2881 | err_out_free_iounmap: |
| 2863 | iounmap (boards[board_idx].re_map_port); | 2882 | iounmap(boards[board_idx].re_map_port); |
| 2864 | err_out_free_pciio: | 2883 | err_out_free_pciio: |
| 2865 | release_mem_region (addr + PCI_IO_OFFSET, 0x200000); | 2884 | release_mem_region(addr + PCI_IO_OFFSET, 0x200000); |
| 2866 | err_out: | 2885 | err_out: |
| 2867 | return -ENODEV; | 2886 | return -ENODEV; |
| 2868 | } | 2887 | } |
| @@ -2878,9 +2897,9 @@ static struct pci_device_id epca_pci_tbl[] = { | |||
| 2878 | 2897 | ||
| 2879 | MODULE_DEVICE_TABLE(pci, epca_pci_tbl); | 2898 | MODULE_DEVICE_TABLE(pci, epca_pci_tbl); |
| 2880 | 2899 | ||
| 2881 | int __init init_PCI (void) | 2900 | static int __init init_PCI(void) |
| 2882 | { | 2901 | { |
| 2883 | memset (&epca_driver, 0, sizeof (epca_driver)); | 2902 | memset(&epca_driver, 0, sizeof(epca_driver)); |
| 2884 | epca_driver.name = "epca"; | 2903 | epca_driver.name = "epca"; |
| 2885 | epca_driver.id_table = epca_pci_tbl; | 2904 | epca_driver.id_table = epca_pci_tbl; |
| 2886 | epca_driver.probe = epca_init_one; | 2905 | epca_driver.probe = epca_init_one; |
diff --git a/drivers/char/esp.c b/drivers/char/esp.c index f3fe62067344..84840ba13ff0 100644 --- a/drivers/char/esp.c +++ b/drivers/char/esp.c | |||
| @@ -8,7 +8,7 @@ | |||
| 8 | * Extensively rewritten by Theodore Ts'o, 8/16/92 -- 9/14/92. Now | 8 | * Extensively rewritten by Theodore Ts'o, 8/16/92 -- 9/14/92. Now |
| 9 | * much more extensible to support other serial cards based on the | 9 | * much more extensible to support other serial cards based on the |
| 10 | * 16450/16550A UART's. Added support for the AST FourPort and the | 10 | * 16450/16550A UART's. Added support for the AST FourPort and the |
| 11 | * Accent Async board. | 11 | * Accent Async board. |
| 12 | * | 12 | * |
| 13 | * set_serial_info fixed to set the flags, custom divisor, and uart | 13 | * set_serial_info fixed to set the flags, custom divisor, and uart |
| 14 | * type fields. Fix suggested by Michael K. Johnson 12/12/92. | 14 | * type fields. Fix suggested by Michael K. Johnson 12/12/92. |
| @@ -61,11 +61,11 @@ | |||
| 61 | #include <linux/bitops.h> | 61 | #include <linux/bitops.h> |
| 62 | 62 | ||
| 63 | #include <asm/system.h> | 63 | #include <asm/system.h> |
| 64 | #include <asm/io.h> | 64 | #include <linux/io.h> |
| 65 | 65 | ||
| 66 | #include <asm/dma.h> | 66 | #include <asm/dma.h> |
| 67 | #include <linux/slab.h> | 67 | #include <linux/slab.h> |
| 68 | #include <asm/uaccess.h> | 68 | #include <linux/uaccess.h> |
| 69 | 69 | ||
| 70 | #include <linux/hayesesp.h> | 70 | #include <linux/hayesesp.h> |
| 71 | 71 | ||
| @@ -127,8 +127,10 @@ static struct tty_driver *esp_driver; | |||
| 127 | #undef SERIAL_DEBUG_FLOW | 127 | #undef SERIAL_DEBUG_FLOW |
| 128 | 128 | ||
| 129 | #if defined(MODULE) && defined(SERIAL_DEBUG_MCOUNT) | 129 | #if defined(MODULE) && defined(SERIAL_DEBUG_MCOUNT) |
| 130 | #define DBG_CNT(s) printk("(%s): [%x] refc=%d, serc=%d, ttyc=%d -> %s\n", \ | 130 | #define DBG_CNT(s) printk(KERN_DEBUG "(%s): [%x] refc=%d, serc=%d, ttyc=%d -> %s\n", \ |
| 131 | tty->name, (info->flags), serial_driver.refcount,info->count,tty->count,s) | 131 | tty->name, info->flags, \ |
| 132 | serial_driver.refcount, \ | ||
| 133 | info->count, tty->count, s) | ||
| 132 | #else | 134 | #else |
| 133 | #define DBG_CNT(s) | 135 | #define DBG_CNT(s) |
| 134 | #endif | 136 | #endif |
| @@ -189,7 +191,7 @@ static inline void serial_out(struct esp_struct *info, int offset, | |||
| 189 | */ | 191 | */ |
| 190 | static void rs_stop(struct tty_struct *tty) | 192 | static void rs_stop(struct tty_struct *tty) |
| 191 | { | 193 | { |
| 192 | struct esp_struct *info = (struct esp_struct *)tty->driver_data; | 194 | struct esp_struct *info = tty->driver_data; |
| 193 | unsigned long flags; | 195 | unsigned long flags; |
| 194 | 196 | ||
| 195 | if (serial_paranoia_check(info, tty->name, "rs_stop")) | 197 | if (serial_paranoia_check(info, tty->name, "rs_stop")) |
| @@ -206,12 +208,12 @@ static void rs_stop(struct tty_struct *tty) | |||
| 206 | 208 | ||
| 207 | static void rs_start(struct tty_struct *tty) | 209 | static void rs_start(struct tty_struct *tty) |
| 208 | { | 210 | { |
| 209 | struct esp_struct *info = (struct esp_struct *)tty->driver_data; | 211 | struct esp_struct *info = tty->driver_data; |
| 210 | unsigned long flags; | 212 | unsigned long flags; |
| 211 | 213 | ||
| 212 | if (serial_paranoia_check(info, tty->name, "rs_start")) | 214 | if (serial_paranoia_check(info, tty->name, "rs_start")) |
| 213 | return; | 215 | return; |
| 214 | 216 | ||
| 215 | spin_lock_irqsave(&info->lock, flags); | 217 | spin_lock_irqsave(&info->lock, flags); |
| 216 | if (info->xmit_cnt && info->xmit_buf && !(info->IER & UART_IER_THRI)) { | 218 | if (info->xmit_cnt && info->xmit_buf && !(info->IER & UART_IER_THRI)) { |
| 217 | info->IER |= UART_IER_THRI; | 219 | info->IER |= UART_IER_THRI; |
| @@ -233,7 +235,7 @@ static void rs_start(struct tty_struct *tty) | |||
| 233 | * rs_interrupt() should try to keep the interrupt handler as fast as | 235 | * rs_interrupt() should try to keep the interrupt handler as fast as |
| 234 | * possible. After you are done making modifications, it is not a bad | 236 | * possible. After you are done making modifications, it is not a bad |
| 235 | * idea to do: | 237 | * idea to do: |
| 236 | * | 238 | * |
| 237 | * gcc -S -DKERNEL -Wall -Wstrict-prototypes -O6 -fomit-frame-pointer serial.c | 239 | * gcc -S -DKERNEL -Wall -Wstrict-prototypes -O6 -fomit-frame-pointer serial.c |
| 238 | * | 240 | * |
| 239 | * and look at the resulting assemble code in serial.s. | 241 | * and look at the resulting assemble code in serial.s. |
| @@ -290,7 +292,7 @@ static inline void receive_chars_pio(struct esp_struct *info, int num_bytes) | |||
| 290 | } | 292 | } |
| 291 | 293 | ||
| 292 | status_mask = (info->read_status_mask >> 2) & 0x07; | 294 | status_mask = (info->read_status_mask >> 2) & 0x07; |
| 293 | 295 | ||
| 294 | for (i = 0; i < num_bytes - 1; i += 2) { | 296 | for (i = 0; i < num_bytes - 1; i += 2) { |
| 295 | *((unsigned short *)(pio_buf->data + i)) = | 297 | *((unsigned short *)(pio_buf->data + i)) = |
| 296 | inw(info->port + UART_ESI_RX); | 298 | inw(info->port + UART_ESI_RX); |
| @@ -325,8 +327,7 @@ static inline void receive_chars_pio(struct esp_struct *info, int num_bytes) | |||
| 325 | flag = TTY_BREAK; | 327 | flag = TTY_BREAK; |
| 326 | if (info->flags & ASYNC_SAK) | 328 | if (info->flags & ASYNC_SAK) |
| 327 | do_SAK(tty); | 329 | do_SAK(tty); |
| 328 | } | 330 | } else if (err_buf->data[i] & 0x02) |
| 329 | else if (err_buf->data[i] & 0x02) | ||
| 330 | flag = TTY_FRAME; | 331 | flag = TTY_FRAME; |
| 331 | else if (err_buf->data[i] & 0x01) | 332 | else if (err_buf->data[i] & 0x01) |
| 332 | flag = TTY_PARITY; | 333 | flag = TTY_PARITY; |
| @@ -341,23 +342,29 @@ static inline void receive_chars_pio(struct esp_struct *info, int num_bytes) | |||
| 341 | release_pio_buffer(err_buf); | 342 | release_pio_buffer(err_buf); |
| 342 | } | 343 | } |
| 343 | 344 | ||
| 344 | static inline void receive_chars_dma(struct esp_struct *info, int num_bytes) | 345 | static void program_isa_dma(int dma, int dir, unsigned long addr, int len) |
| 345 | { | 346 | { |
| 346 | unsigned long flags; | 347 | unsigned long flags; |
| 348 | |||
| 349 | flags = claim_dma_lock(); | ||
| 350 | disable_dma(dma); | ||
| 351 | clear_dma_ff(dma); | ||
| 352 | set_dma_mode(dma, dir); | ||
| 353 | set_dma_addr(dma, addr); | ||
| 354 | set_dma_count(dma, len); | ||
| 355 | enable_dma(dma); | ||
| 356 | release_dma_lock(flags); | ||
| 357 | } | ||
| 358 | |||
| 359 | static void receive_chars_dma(struct esp_struct *info, int num_bytes) | ||
| 360 | { | ||
| 347 | info->stat_flags &= ~ESP_STAT_RX_TIMEOUT; | 361 | info->stat_flags &= ~ESP_STAT_RX_TIMEOUT; |
| 348 | dma_bytes = num_bytes; | 362 | dma_bytes = num_bytes; |
| 349 | info->stat_flags |= ESP_STAT_DMA_RX; | 363 | info->stat_flags |= ESP_STAT_DMA_RX; |
| 350 | 364 | ||
| 351 | flags=claim_dma_lock(); | 365 | program_isa_dma(dma, DMA_MODE_READ, isa_virt_to_bus(dma_buffer), |
| 352 | disable_dma(dma); | 366 | dma_bytes); |
| 353 | clear_dma_ff(dma); | 367 | serial_out(info, UART_ESI_CMD1, ESI_START_DMA_RX); |
| 354 | set_dma_mode(dma, DMA_MODE_READ); | ||
| 355 | set_dma_addr(dma, isa_virt_to_bus(dma_buffer)); | ||
| 356 | set_dma_count(dma, dma_bytes); | ||
| 357 | enable_dma(dma); | ||
| 358 | release_dma_lock(flags); | ||
| 359 | |||
| 360 | serial_out(info, UART_ESI_CMD1, ESI_START_DMA_RX); | ||
| 361 | } | 368 | } |
| 362 | 369 | ||
| 363 | static inline void receive_chars_dma_done(struct esp_struct *info, | 370 | static inline void receive_chars_dma_done(struct esp_struct *info, |
| @@ -366,22 +373,22 @@ static inline void receive_chars_dma_done(struct esp_struct *info, | |||
| 366 | struct tty_struct *tty = info->tty; | 373 | struct tty_struct *tty = info->tty; |
| 367 | int num_bytes; | 374 | int num_bytes; |
| 368 | unsigned long flags; | 375 | unsigned long flags; |
| 369 | 376 | ||
| 370 | flags=claim_dma_lock(); | 377 | flags = claim_dma_lock(); |
| 371 | disable_dma(dma); | 378 | disable_dma(dma); |
| 372 | clear_dma_ff(dma); | 379 | clear_dma_ff(dma); |
| 373 | 380 | ||
| 374 | info->stat_flags &= ~ESP_STAT_DMA_RX; | 381 | info->stat_flags &= ~ESP_STAT_DMA_RX; |
| 375 | num_bytes = dma_bytes - get_dma_residue(dma); | 382 | num_bytes = dma_bytes - get_dma_residue(dma); |
| 376 | release_dma_lock(flags); | 383 | release_dma_lock(flags); |
| 377 | 384 | ||
| 378 | info->icount.rx += num_bytes; | 385 | info->icount.rx += num_bytes; |
| 379 | 386 | ||
| 380 | if (num_bytes > 0) { | 387 | if (num_bytes > 0) { |
| 381 | tty_insert_flip_string(tty, dma_buffer, num_bytes - 1); | 388 | tty_insert_flip_string(tty, dma_buffer, num_bytes - 1); |
| 382 | 389 | ||
| 383 | status &= (0x1c & info->read_status_mask); | 390 | status &= (0x1c & info->read_status_mask); |
| 384 | 391 | ||
| 385 | /* Is the status significant or do we throw the last byte ? */ | 392 | /* Is the status significant or do we throw the last byte ? */ |
| 386 | if (!(status & info->ignore_status_mask)) { | 393 | if (!(status & info->ignore_status_mask)) { |
| 387 | int statflag = 0; | 394 | int statflag = 0; |
| @@ -393,13 +400,13 @@ static inline void receive_chars_dma_done(struct esp_struct *info, | |||
| 393 | do_SAK(tty); | 400 | do_SAK(tty); |
| 394 | } else if (status & 0x08) { | 401 | } else if (status & 0x08) { |
| 395 | statflag = TTY_FRAME; | 402 | statflag = TTY_FRAME; |
| 396 | (info->icount.frame)++; | 403 | info->icount.frame++; |
| 397 | } | 404 | } else if (status & 0x04) { |
| 398 | else if (status & 0x04) { | ||
| 399 | statflag = TTY_PARITY; | 405 | statflag = TTY_PARITY; |
| 400 | (info->icount.parity)++; | 406 | info->icount.parity++; |
| 401 | } | 407 | } |
| 402 | tty_insert_flip_char(tty, dma_buffer[num_bytes - 1], statflag); | 408 | tty_insert_flip_char(tty, dma_buffer[num_bytes - 1], |
| 409 | statflag); | ||
| 403 | } | 410 | } |
| 404 | tty_schedule_flip(tty); | 411 | tty_schedule_flip(tty); |
| 405 | } | 412 | } |
| @@ -484,8 +491,6 @@ static inline void transmit_chars_pio(struct esp_struct *info, | |||
| 484 | /* Caller must hold info->lock */ | 491 | /* Caller must hold info->lock */ |
| 485 | static inline void transmit_chars_dma(struct esp_struct *info, int num_bytes) | 492 | static inline void transmit_chars_dma(struct esp_struct *info, int num_bytes) |
| 486 | { | 493 | { |
| 487 | unsigned long flags; | ||
| 488 | |||
| 489 | dma_bytes = num_bytes; | 494 | dma_bytes = num_bytes; |
| 490 | 495 | ||
| 491 | if (info->xmit_tail + dma_bytes <= ESP_XMIT_SIZE) { | 496 | if (info->xmit_tail + dma_bytes <= ESP_XMIT_SIZE) { |
| @@ -517,26 +522,18 @@ static inline void transmit_chars_dma(struct esp_struct *info, int num_bytes) | |||
| 517 | } | 522 | } |
| 518 | 523 | ||
| 519 | info->stat_flags |= ESP_STAT_DMA_TX; | 524 | info->stat_flags |= ESP_STAT_DMA_TX; |
| 520 | 525 | ||
| 521 | flags=claim_dma_lock(); | 526 | program_isa_dma(dma, DMA_MODE_WRITE, isa_virt_to_bus(dma_buffer), |
| 522 | disable_dma(dma); | 527 | dma_bytes); |
| 523 | clear_dma_ff(dma); | 528 | serial_out(info, UART_ESI_CMD1, ESI_START_DMA_TX); |
| 524 | set_dma_mode(dma, DMA_MODE_WRITE); | ||
| 525 | set_dma_addr(dma, isa_virt_to_bus(dma_buffer)); | ||
| 526 | set_dma_count(dma, dma_bytes); | ||
| 527 | enable_dma(dma); | ||
| 528 | release_dma_lock(flags); | ||
| 529 | |||
| 530 | serial_out(info, UART_ESI_CMD1, ESI_START_DMA_TX); | ||
| 531 | } | 529 | } |
| 532 | 530 | ||
| 533 | static inline void transmit_chars_dma_done(struct esp_struct *info) | 531 | static inline void transmit_chars_dma_done(struct esp_struct *info) |
| 534 | { | 532 | { |
| 535 | int num_bytes; | 533 | int num_bytes; |
| 536 | unsigned long flags; | 534 | unsigned long flags; |
| 537 | |||
| 538 | 535 | ||
| 539 | flags=claim_dma_lock(); | 536 | flags = claim_dma_lock(); |
| 540 | disable_dma(dma); | 537 | disable_dma(dma); |
| 541 | clear_dma_ff(dma); | 538 | clear_dma_ff(dma); |
| 542 | 539 | ||
| @@ -547,27 +544,21 @@ static inline void transmit_chars_dma_done(struct esp_struct *info) | |||
| 547 | if (dma_bytes != num_bytes) { | 544 | if (dma_bytes != num_bytes) { |
| 548 | dma_bytes -= num_bytes; | 545 | dma_bytes -= num_bytes; |
| 549 | memmove(dma_buffer, dma_buffer + num_bytes, dma_bytes); | 546 | memmove(dma_buffer, dma_buffer + num_bytes, dma_bytes); |
| 550 | 547 | ||
| 551 | flags=claim_dma_lock(); | 548 | program_isa_dma(dma, DMA_MODE_WRITE, |
| 552 | disable_dma(dma); | 549 | isa_virt_to_bus(dma_buffer), dma_bytes); |
| 553 | clear_dma_ff(dma); | 550 | |
| 554 | set_dma_mode(dma, DMA_MODE_WRITE); | 551 | serial_out(info, UART_ESI_CMD1, ESI_START_DMA_TX); |
| 555 | set_dma_addr(dma, isa_virt_to_bus(dma_buffer)); | ||
| 556 | set_dma_count(dma, dma_bytes); | ||
| 557 | enable_dma(dma); | ||
| 558 | release_dma_lock(flags); | ||
| 559 | |||
| 560 | serial_out(info, UART_ESI_CMD1, ESI_START_DMA_TX); | ||
| 561 | } else { | 552 | } else { |
| 562 | dma_bytes = 0; | 553 | dma_bytes = 0; |
| 563 | info->stat_flags &= ~ESP_STAT_DMA_TX; | 554 | info->stat_flags &= ~ESP_STAT_DMA_TX; |
| 564 | } | 555 | } |
| 565 | } | 556 | } |
| 566 | 557 | ||
| 567 | static inline void check_modem_status(struct esp_struct *info) | 558 | static void check_modem_status(struct esp_struct *info) |
| 568 | { | 559 | { |
| 569 | int status; | 560 | int status; |
| 570 | 561 | ||
| 571 | serial_out(info, UART_ESI_CMD1, ESI_GET_UART_STAT); | 562 | serial_out(info, UART_ESI_CMD1, ESI_GET_UART_STAT); |
| 572 | status = serial_in(info, UART_ESI_STAT2); | 563 | status = serial_in(info, UART_ESI_STAT2); |
| 573 | 564 | ||
| @@ -588,7 +579,7 @@ static inline void check_modem_status(struct esp_struct *info) | |||
| 588 | #if (defined(SERIAL_DEBUG_OPEN) || defined(SERIAL_DEBUG_INTR)) | 579 | #if (defined(SERIAL_DEBUG_OPEN) || defined(SERIAL_DEBUG_INTR)) |
| 589 | printk("ttys%d CD now %s...", info->line, | 580 | printk("ttys%d CD now %s...", info->line, |
| 590 | (status & UART_MSR_DCD) ? "on" : "off"); | 581 | (status & UART_MSR_DCD) ? "on" : "off"); |
| 591 | #endif | 582 | #endif |
| 592 | if (status & UART_MSR_DCD) | 583 | if (status & UART_MSR_DCD) |
| 593 | wake_up_interruptible(&info->open_wait); | 584 | wake_up_interruptible(&info->open_wait); |
| 594 | else { | 585 | else { |
| @@ -605,7 +596,7 @@ static inline void check_modem_status(struct esp_struct *info) | |||
| 605 | */ | 596 | */ |
| 606 | static irqreturn_t rs_interrupt_single(int irq, void *dev_id) | 597 | static irqreturn_t rs_interrupt_single(int irq, void *dev_id) |
| 607 | { | 598 | { |
| 608 | struct esp_struct * info; | 599 | struct esp_struct *info; |
| 609 | unsigned err_status; | 600 | unsigned err_status; |
| 610 | unsigned int scratch; | 601 | unsigned int scratch; |
| 611 | 602 | ||
| @@ -617,7 +608,7 @@ static irqreturn_t rs_interrupt_single(int irq, void *dev_id) | |||
| 617 | scratch = serial_in(info, UART_ESI_SID); | 608 | scratch = serial_in(info, UART_ESI_SID); |
| 618 | 609 | ||
| 619 | spin_lock(&info->lock); | 610 | spin_lock(&info->lock); |
| 620 | 611 | ||
| 621 | if (!info->tty) { | 612 | if (!info->tty) { |
| 622 | spin_unlock(&info->lock); | 613 | spin_unlock(&info->lock); |
| 623 | return IRQ_NONE; | 614 | return IRQ_NONE; |
| @@ -637,7 +628,7 @@ static irqreturn_t rs_interrupt_single(int irq, void *dev_id) | |||
| 637 | if (err_status & 0x80) /* Start break */ | 628 | if (err_status & 0x80) /* Start break */ |
| 638 | wake_up_interruptible(&info->break_wait); | 629 | wake_up_interruptible(&info->break_wait); |
| 639 | } | 630 | } |
| 640 | 631 | ||
| 641 | if ((scratch & 0x88) || /* DMA completed or timed out */ | 632 | if ((scratch & 0x88) || /* DMA completed or timed out */ |
| 642 | (err_status & 0x1c) /* receive error */) { | 633 | (err_status & 0x1c) /* receive error */) { |
| 643 | if (info->stat_flags & ESP_STAT_DMA_RX) | 634 | if (info->stat_flags & ESP_STAT_DMA_RX) |
| @@ -667,7 +658,7 @@ static irqreturn_t rs_interrupt_single(int irq, void *dev_id) | |||
| 667 | receive_chars_dma(info, num_bytes); | 658 | receive_chars_dma(info, num_bytes); |
| 668 | } | 659 | } |
| 669 | } | 660 | } |
| 670 | 661 | ||
| 671 | if (!(info->stat_flags & (ESP_STAT_DMA_RX | ESP_STAT_DMA_TX)) && | 662 | if (!(info->stat_flags & (ESP_STAT_DMA_RX | ESP_STAT_DMA_TX)) && |
| 672 | (scratch & 0x02) && (info->IER & UART_IER_THRI)) { | 663 | (scratch & 0x02) && (info->IER & UART_IER_THRI)) { |
| 673 | if ((info->xmit_cnt <= 0) || info->tty->stopped) { | 664 | if ((info->xmit_cnt <= 0) || info->tty->stopped) { |
| @@ -722,11 +713,11 @@ static irqreturn_t rs_interrupt_single(int irq, void *dev_id) | |||
| 722 | * --------------------------------------------------------------- | 713 | * --------------------------------------------------------------- |
| 723 | */ | 714 | */ |
| 724 | 715 | ||
| 725 | static inline void esp_basic_init(struct esp_struct * info) | 716 | static void esp_basic_init(struct esp_struct *info) |
| 726 | { | 717 | { |
| 727 | /* put ESPC in enhanced mode */ | 718 | /* put ESPC in enhanced mode */ |
| 728 | serial_out(info, UART_ESI_CMD1, ESI_SET_MODE); | 719 | serial_out(info, UART_ESI_CMD1, ESI_SET_MODE); |
| 729 | 720 | ||
| 730 | if (info->stat_flags & ESP_STAT_NEVER_DMA) | 721 | if (info->stat_flags & ESP_STAT_NEVER_DMA) |
| 731 | serial_out(info, UART_ESI_CMD2, 0x01); | 722 | serial_out(info, UART_ESI_CMD2, 0x01); |
| 732 | else | 723 | else |
| @@ -783,13 +774,13 @@ static inline void esp_basic_init(struct esp_struct * info) | |||
| 783 | serial_out(info, UART_ESI_CMD2, 0xff); | 774 | serial_out(info, UART_ESI_CMD2, 0xff); |
| 784 | } | 775 | } |
| 785 | 776 | ||
| 786 | static int startup(struct esp_struct * info) | 777 | static int startup(struct esp_struct *info) |
| 787 | { | 778 | { |
| 788 | unsigned long flags; | 779 | unsigned long flags; |
| 789 | int retval=0; | 780 | int retval = 0; |
| 790 | unsigned int num_chars; | 781 | unsigned int num_chars; |
| 791 | 782 | ||
| 792 | spin_lock_irqsave(&info->lock, flags); | 783 | spin_lock_irqsave(&info->lock, flags); |
| 793 | 784 | ||
| 794 | if (info->flags & ASYNC_INITIALIZED) | 785 | if (info->flags & ASYNC_INITIALIZED) |
| 795 | goto out; | 786 | goto out; |
| @@ -802,7 +793,8 @@ static int startup(struct esp_struct * info) | |||
| 802 | } | 793 | } |
| 803 | 794 | ||
| 804 | #ifdef SERIAL_DEBUG_OPEN | 795 | #ifdef SERIAL_DEBUG_OPEN |
| 805 | printk("starting up ttys%d (irq %d)...", info->line, info->irq); | 796 | printk(KERN_DEBUG "starting up ttys%d (irq %d)...", |
| 797 | info->line, info->irq); | ||
| 806 | #endif | 798 | #endif |
| 807 | 799 | ||
| 808 | /* Flush the RX buffer. Using the ESI flush command may cause */ | 800 | /* Flush the RX buffer. Using the ESI flush command may cause */ |
| @@ -863,7 +855,7 @@ static int startup(struct esp_struct * info) | |||
| 863 | dma_buffer = NULL; | 855 | dma_buffer = NULL; |
| 864 | info->stat_flags |= ESP_STAT_USE_PIO; | 856 | info->stat_flags |= ESP_STAT_USE_PIO; |
| 865 | } | 857 | } |
| 866 | 858 | ||
| 867 | } | 859 | } |
| 868 | 860 | ||
| 869 | info->MCR = UART_MCR_DTR | UART_MCR_RTS | UART_MCR_OUT2; | 861 | info->MCR = UART_MCR_DTR | UART_MCR_RTS | UART_MCR_OUT2; |
| @@ -872,7 +864,7 @@ static int startup(struct esp_struct * info) | |||
| 872 | serial_out(info, UART_ESI_CMD1, ESI_WRITE_UART); | 864 | serial_out(info, UART_ESI_CMD1, ESI_WRITE_UART); |
| 873 | serial_out(info, UART_ESI_CMD2, UART_MCR); | 865 | serial_out(info, UART_ESI_CMD2, UART_MCR); |
| 874 | serial_out(info, UART_ESI_CMD2, info->MCR); | 866 | serial_out(info, UART_ESI_CMD2, info->MCR); |
| 875 | 867 | ||
| 876 | /* | 868 | /* |
| 877 | * Finally, enable interrupts | 869 | * Finally, enable interrupts |
| 878 | */ | 870 | */ |
| @@ -881,7 +873,7 @@ static int startup(struct esp_struct * info) | |||
| 881 | UART_IER_DMA_TC; | 873 | UART_IER_DMA_TC; |
| 882 | serial_out(info, UART_ESI_CMD1, ESI_SET_SRV_MASK); | 874 | serial_out(info, UART_ESI_CMD1, ESI_SET_SRV_MASK); |
| 883 | serial_out(info, UART_ESI_CMD2, info->IER); | 875 | serial_out(info, UART_ESI_CMD2, info->IER); |
| 884 | 876 | ||
| 885 | if (info->tty) | 877 | if (info->tty) |
| 886 | clear_bit(TTY_IO_ERROR, &info->tty->flags); | 878 | clear_bit(TTY_IO_ERROR, &info->tty->flags); |
| 887 | info->xmit_cnt = info->xmit_head = info->xmit_tail = 0; | 879 | info->xmit_cnt = info->xmit_head = info->xmit_tail = 0; |
| @@ -900,7 +892,7 @@ static int startup(struct esp_struct * info) | |||
| 900 | if ((info->flags & ASYNC_SPD_MASK) == ASYNC_SPD_WARP) | 892 | if ((info->flags & ASYNC_SPD_MASK) == ASYNC_SPD_WARP) |
| 901 | info->tty->alt_speed = 460800; | 893 | info->tty->alt_speed = 460800; |
| 902 | } | 894 | } |
| 903 | 895 | ||
| 904 | /* | 896 | /* |
| 905 | * set the speed of the serial port | 897 | * set the speed of the serial port |
| 906 | */ | 898 | */ |
| @@ -918,7 +910,7 @@ out_unlocked: | |||
| 918 | * This routine will shutdown a serial port; interrupts are disabled, and | 910 | * This routine will shutdown a serial port; interrupts are disabled, and |
| 919 | * DTR is dropped if the hangup on close termio flag is on. | 911 | * DTR is dropped if the hangup on close termio flag is on. |
| 920 | */ | 912 | */ |
| 921 | static void shutdown(struct esp_struct * info) | 913 | static void shutdown(struct esp_struct *info) |
| 922 | { | 914 | { |
| 923 | unsigned long flags, f; | 915 | unsigned long flags, f; |
| 924 | 916 | ||
| @@ -929,7 +921,7 @@ static void shutdown(struct esp_struct * info) | |||
| 929 | printk("Shutting down serial port %d (irq %d)....", info->line, | 921 | printk("Shutting down serial port %d (irq %d)....", info->line, |
| 930 | info->irq); | 922 | info->irq); |
| 931 | #endif | 923 | #endif |
| 932 | 924 | ||
| 933 | spin_lock_irqsave(&info->lock, flags); | 925 | spin_lock_irqsave(&info->lock, flags); |
| 934 | /* | 926 | /* |
| 935 | * clear delta_msr_wait queue to avoid mem leaks: we may free the irq | 927 | * clear delta_msr_wait queue to avoid mem leaks: we may free the irq |
| @@ -941,14 +933,14 @@ static void shutdown(struct esp_struct * info) | |||
| 941 | /* stop a DMA transfer on the port being closed */ | 933 | /* stop a DMA transfer on the port being closed */ |
| 942 | /* DMA lock is higher priority always */ | 934 | /* DMA lock is higher priority always */ |
| 943 | if (info->stat_flags & (ESP_STAT_DMA_RX | ESP_STAT_DMA_TX)) { | 935 | if (info->stat_flags & (ESP_STAT_DMA_RX | ESP_STAT_DMA_TX)) { |
| 944 | f=claim_dma_lock(); | 936 | f = claim_dma_lock(); |
| 945 | disable_dma(dma); | 937 | disable_dma(dma); |
| 946 | clear_dma_ff(dma); | 938 | clear_dma_ff(dma); |
| 947 | release_dma_lock(f); | 939 | release_dma_lock(f); |
| 948 | 940 | ||
| 949 | dma_bytes = 0; | 941 | dma_bytes = 0; |
| 950 | } | 942 | } |
| 951 | 943 | ||
| 952 | /* | 944 | /* |
| 953 | * Free the IRQ | 945 | * Free the IRQ |
| 954 | */ | 946 | */ |
| @@ -970,7 +962,7 @@ static void shutdown(struct esp_struct * info) | |||
| 970 | free_pages((unsigned long)dma_buffer, | 962 | free_pages((unsigned long)dma_buffer, |
| 971 | get_order(DMA_BUFFER_SZ)); | 963 | get_order(DMA_BUFFER_SZ)); |
| 972 | dma_buffer = NULL; | 964 | dma_buffer = NULL; |
| 973 | } | 965 | } |
| 974 | } | 966 | } |
| 975 | 967 | ||
| 976 | if (info->xmit_buf) { | 968 | if (info->xmit_buf) { |
| @@ -992,7 +984,7 @@ static void shutdown(struct esp_struct * info) | |||
| 992 | 984 | ||
| 993 | if (info->tty) | 985 | if (info->tty) |
| 994 | set_bit(TTY_IO_ERROR, &info->tty->flags); | 986 | set_bit(TTY_IO_ERROR, &info->tty->flags); |
| 995 | 987 | ||
| 996 | info->flags &= ~ASYNC_INITIALIZED; | 988 | info->flags &= ~ASYNC_INITIALIZED; |
| 997 | spin_unlock_irqrestore(&info->lock, flags); | 989 | spin_unlock_irqrestore(&info->lock, flags); |
| 998 | } | 990 | } |
| @@ -1005,7 +997,7 @@ static void change_speed(struct esp_struct *info) | |||
| 1005 | { | 997 | { |
| 1006 | unsigned short port; | 998 | unsigned short port; |
| 1007 | int quot = 0; | 999 | int quot = 0; |
| 1008 | unsigned cflag,cval; | 1000 | unsigned cflag, cval; |
| 1009 | int baud, bits; | 1001 | int baud, bits; |
| 1010 | unsigned char flow1 = 0, flow2 = 0; | 1002 | unsigned char flow1 = 0, flow2 = 0; |
| 1011 | unsigned long flags; | 1003 | unsigned long flags; |
| @@ -1014,14 +1006,14 @@ static void change_speed(struct esp_struct *info) | |||
| 1014 | return; | 1006 | return; |
| 1015 | cflag = info->tty->termios->c_cflag; | 1007 | cflag = info->tty->termios->c_cflag; |
| 1016 | port = info->port; | 1008 | port = info->port; |
| 1017 | 1009 | ||
| 1018 | /* byte size and parity */ | 1010 | /* byte size and parity */ |
| 1019 | switch (cflag & CSIZE) { | 1011 | switch (cflag & CSIZE) { |
| 1020 | case CS5: cval = 0x00; bits = 7; break; | 1012 | case CS5: cval = 0x00; bits = 7; break; |
| 1021 | case CS6: cval = 0x01; bits = 8; break; | 1013 | case CS6: cval = 0x01; bits = 8; break; |
| 1022 | case CS7: cval = 0x02; bits = 9; break; | 1014 | case CS7: cval = 0x02; bits = 9; break; |
| 1023 | case CS8: cval = 0x03; bits = 10; break; | 1015 | case CS8: cval = 0x03; bits = 10; break; |
| 1024 | default: cval = 0x00; bits = 7; break; | 1016 | default: cval = 0x00; bits = 7; break; |
| 1025 | } | 1017 | } |
| 1026 | if (cflag & CSTOPB) { | 1018 | if (cflag & CSTOPB) { |
| 1027 | cval |= 0x04; | 1019 | cval |= 0x04; |
| @@ -1037,14 +1029,12 @@ static void change_speed(struct esp_struct *info) | |||
| 1037 | if (cflag & CMSPAR) | 1029 | if (cflag & CMSPAR) |
| 1038 | cval |= UART_LCR_SPAR; | 1030 | cval |= UART_LCR_SPAR; |
| 1039 | #endif | 1031 | #endif |
| 1040 | |||
| 1041 | baud = tty_get_baud_rate(info->tty); | 1032 | baud = tty_get_baud_rate(info->tty); |
| 1042 | if (baud == 38400 && | 1033 | if (baud == 38400 && |
| 1043 | ((info->flags & ASYNC_SPD_MASK) == ASYNC_SPD_CUST)) | 1034 | ((info->flags & ASYNC_SPD_MASK) == ASYNC_SPD_CUST)) |
| 1044 | quot = info->custom_divisor; | 1035 | quot = info->custom_divisor; |
| 1045 | else { | 1036 | else { |
| 1046 | if (baud == 134) | 1037 | if (baud == 134) /* Special case since 134 is really 134.5 */ |
| 1047 | /* Special case since 134 is really 134.5 */ | ||
| 1048 | quot = (2*BASE_BAUD / 269); | 1038 | quot = (2*BASE_BAUD / 269); |
| 1049 | else if (baud) | 1039 | else if (baud) |
| 1050 | quot = BASE_BAUD / baud; | 1040 | quot = BASE_BAUD / baud; |
| @@ -1052,7 +1042,12 @@ static void change_speed(struct esp_struct *info) | |||
| 1052 | /* If the quotient is ever zero, default to 9600 bps */ | 1042 | /* If the quotient is ever zero, default to 9600 bps */ |
| 1053 | if (!quot) | 1043 | if (!quot) |
| 1054 | quot = BASE_BAUD / 9600; | 1044 | quot = BASE_BAUD / 9600; |
| 1055 | 1045 | ||
| 1046 | if (baud) { | ||
| 1047 | /* Actual rate */ | ||
| 1048 | baud = BASE_BAUD/quot; | ||
| 1049 | tty_encode_baud_rate(info->tty, baud, baud); | ||
| 1050 | } | ||
| 1056 | info->timeout = ((1024 * HZ * bits * quot) / BASE_BAUD) + (HZ / 50); | 1051 | info->timeout = ((1024 * HZ * bits * quot) / BASE_BAUD) + (HZ / 50); |
| 1057 | 1052 | ||
| 1058 | /* CTS flow control flag and modem status interrupts */ | 1053 | /* CTS flow control flag and modem status interrupts */ |
| @@ -1066,10 +1061,8 @@ static void change_speed(struct esp_struct *info) | |||
| 1066 | info->flags &= ~ASYNC_CTS_FLOW; | 1061 | info->flags &= ~ASYNC_CTS_FLOW; |
| 1067 | if (cflag & CLOCAL) | 1062 | if (cflag & CLOCAL) |
| 1068 | info->flags &= ~ASYNC_CHECK_CD; | 1063 | info->flags &= ~ASYNC_CHECK_CD; |
| 1069 | else { | 1064 | else |
| 1070 | info->flags |= ASYNC_CHECK_CD; | 1065 | info->flags |= ASYNC_CHECK_CD; |
| 1071 | /* info->IER |= UART_IER_MSI; */ | ||
| 1072 | } | ||
| 1073 | 1066 | ||
| 1074 | /* | 1067 | /* |
| 1075 | * Set up parity check flag | 1068 | * Set up parity check flag |
| @@ -1079,7 +1072,7 @@ static void change_speed(struct esp_struct *info) | |||
| 1079 | info->read_status_mask |= UART_LSR_FE | UART_LSR_PE; | 1072 | info->read_status_mask |= UART_LSR_FE | UART_LSR_PE; |
| 1080 | if (I_BRKINT(info->tty) || I_PARMRK(info->tty)) | 1073 | if (I_BRKINT(info->tty) || I_PARMRK(info->tty)) |
| 1081 | info->read_status_mask |= UART_LSR_BI; | 1074 | info->read_status_mask |= UART_LSR_BI; |
| 1082 | 1075 | ||
| 1083 | info->ignore_status_mask = 0; | 1076 | info->ignore_status_mask = 0; |
| 1084 | #if 0 | 1077 | #if 0 |
| 1085 | /* This should be safe, but for some broken bits of hardware... */ | 1078 | /* This should be safe, but for some broken bits of hardware... */ |
| @@ -1092,7 +1085,7 @@ static void change_speed(struct esp_struct *info) | |||
| 1092 | info->ignore_status_mask |= UART_LSR_BI; | 1085 | info->ignore_status_mask |= UART_LSR_BI; |
| 1093 | info->read_status_mask |= UART_LSR_BI; | 1086 | info->read_status_mask |= UART_LSR_BI; |
| 1094 | /* | 1087 | /* |
| 1095 | * If we're ignore parity and break indicators, ignore | 1088 | * If we're ignore parity and break indicators, ignore |
| 1096 | * overruns too. (For real raw support). | 1089 | * overruns too. (For real raw support). |
| 1097 | */ | 1090 | */ |
| 1098 | if (I_IGNPAR(info->tty)) { | 1091 | if (I_IGNPAR(info->tty)) { |
| @@ -1130,19 +1123,19 @@ static void change_speed(struct esp_struct *info) | |||
| 1130 | serial_out(info, UART_ESI_CMD2, 0x10); | 1123 | serial_out(info, UART_ESI_CMD2, 0x10); |
| 1131 | serial_out(info, UART_ESI_CMD2, 0x21); | 1124 | serial_out(info, UART_ESI_CMD2, 0x21); |
| 1132 | switch (cflag & CSIZE) { | 1125 | switch (cflag & CSIZE) { |
| 1133 | case CS5: | 1126 | case CS5: |
| 1134 | serial_out(info, UART_ESI_CMD2, 0x1f); | 1127 | serial_out(info, UART_ESI_CMD2, 0x1f); |
| 1135 | break; | 1128 | break; |
| 1136 | case CS6: | 1129 | case CS6: |
| 1137 | serial_out(info, UART_ESI_CMD2, 0x3f); | 1130 | serial_out(info, UART_ESI_CMD2, 0x3f); |
| 1138 | break; | 1131 | break; |
| 1139 | case CS7: | 1132 | case CS7: |
| 1140 | case CS8: | 1133 | case CS8: |
| 1141 | serial_out(info, UART_ESI_CMD2, 0x7f); | 1134 | serial_out(info, UART_ESI_CMD2, 0x7f); |
| 1142 | break; | 1135 | break; |
| 1143 | default: | 1136 | default: |
| 1144 | serial_out(info, UART_ESI_CMD2, 0xff); | 1137 | serial_out(info, UART_ESI_CMD2, 0xff); |
| 1145 | break; | 1138 | break; |
| 1146 | } | 1139 | } |
| 1147 | } | 1140 | } |
| 1148 | 1141 | ||
| @@ -1156,31 +1149,34 @@ static void change_speed(struct esp_struct *info) | |||
| 1156 | spin_unlock_irqrestore(&info->lock, flags); | 1149 | spin_unlock_irqrestore(&info->lock, flags); |
| 1157 | } | 1150 | } |
| 1158 | 1151 | ||
| 1159 | static void rs_put_char(struct tty_struct *tty, unsigned char ch) | 1152 | static int rs_put_char(struct tty_struct *tty, unsigned char ch) |
| 1160 | { | 1153 | { |
| 1161 | struct esp_struct *info = (struct esp_struct *)tty->driver_data; | 1154 | struct esp_struct *info = tty->driver_data; |
| 1162 | unsigned long flags; | 1155 | unsigned long flags; |
| 1156 | int ret = 0; | ||
| 1163 | 1157 | ||
| 1164 | if (serial_paranoia_check(info, tty->name, "rs_put_char")) | 1158 | if (serial_paranoia_check(info, tty->name, "rs_put_char")) |
| 1165 | return; | 1159 | return 0; |
| 1166 | 1160 | ||
| 1167 | if (!info->xmit_buf) | 1161 | if (!info->xmit_buf) |
| 1168 | return; | 1162 | return 0; |
| 1169 | 1163 | ||
| 1170 | spin_lock_irqsave(&info->lock, flags); | 1164 | spin_lock_irqsave(&info->lock, flags); |
| 1171 | if (info->xmit_cnt < ESP_XMIT_SIZE - 1) { | 1165 | if (info->xmit_cnt < ESP_XMIT_SIZE - 1) { |
| 1172 | info->xmit_buf[info->xmit_head++] = ch; | 1166 | info->xmit_buf[info->xmit_head++] = ch; |
| 1173 | info->xmit_head &= ESP_XMIT_SIZE-1; | 1167 | info->xmit_head &= ESP_XMIT_SIZE-1; |
| 1174 | info->xmit_cnt++; | 1168 | info->xmit_cnt++; |
| 1169 | ret = 1; | ||
| 1175 | } | 1170 | } |
| 1176 | spin_unlock_irqrestore(&info->lock, flags); | 1171 | spin_unlock_irqrestore(&info->lock, flags); |
| 1172 | return ret; | ||
| 1177 | } | 1173 | } |
| 1178 | 1174 | ||
| 1179 | static void rs_flush_chars(struct tty_struct *tty) | 1175 | static void rs_flush_chars(struct tty_struct *tty) |
| 1180 | { | 1176 | { |
| 1181 | struct esp_struct *info = (struct esp_struct *)tty->driver_data; | 1177 | struct esp_struct *info = tty->driver_data; |
| 1182 | unsigned long flags; | 1178 | unsigned long flags; |
| 1183 | 1179 | ||
| 1184 | if (serial_paranoia_check(info, tty->name, "rs_flush_chars")) | 1180 | if (serial_paranoia_check(info, tty->name, "rs_flush_chars")) |
| 1185 | return; | 1181 | return; |
| 1186 | 1182 | ||
| @@ -1198,11 +1194,11 @@ out: | |||
| 1198 | spin_unlock_irqrestore(&info->lock, flags); | 1194 | spin_unlock_irqrestore(&info->lock, flags); |
| 1199 | } | 1195 | } |
| 1200 | 1196 | ||
| 1201 | static int rs_write(struct tty_struct * tty, | 1197 | static int rs_write(struct tty_struct *tty, |
| 1202 | const unsigned char *buf, int count) | 1198 | const unsigned char *buf, int count) |
| 1203 | { | 1199 | { |
| 1204 | int c, t, ret = 0; | 1200 | int c, t, ret = 0; |
| 1205 | struct esp_struct *info = (struct esp_struct *)tty->driver_data; | 1201 | struct esp_struct *info = tty->driver_data; |
| 1206 | unsigned long flags; | 1202 | unsigned long flags; |
| 1207 | 1203 | ||
| 1208 | if (serial_paranoia_check(info, tty->name, "rs_write")) | 1204 | if (serial_paranoia_check(info, tty->name, "rs_write")) |
| @@ -1210,19 +1206,19 @@ static int rs_write(struct tty_struct * tty, | |||
| 1210 | 1206 | ||
| 1211 | if (!info->xmit_buf) | 1207 | if (!info->xmit_buf) |
| 1212 | return 0; | 1208 | return 0; |
| 1213 | 1209 | ||
| 1214 | while (1) { | 1210 | while (1) { |
| 1215 | /* Thanks to R. Wolff for suggesting how to do this with */ | 1211 | /* Thanks to R. Wolff for suggesting how to do this with */ |
| 1216 | /* interrupts enabled */ | 1212 | /* interrupts enabled */ |
| 1217 | 1213 | ||
| 1218 | c = count; | 1214 | c = count; |
| 1219 | t = ESP_XMIT_SIZE - info->xmit_cnt - 1; | 1215 | t = ESP_XMIT_SIZE - info->xmit_cnt - 1; |
| 1220 | 1216 | ||
| 1221 | if (t < c) | 1217 | if (t < c) |
| 1222 | c = t; | 1218 | c = t; |
| 1223 | 1219 | ||
| 1224 | t = ESP_XMIT_SIZE - info->xmit_head; | 1220 | t = ESP_XMIT_SIZE - info->xmit_head; |
| 1225 | 1221 | ||
| 1226 | if (t < c) | 1222 | if (t < c) |
| 1227 | c = t; | 1223 | c = t; |
| 1228 | 1224 | ||
| @@ -1252,10 +1248,10 @@ static int rs_write(struct tty_struct * tty, | |||
| 1252 | 1248 | ||
| 1253 | static int rs_write_room(struct tty_struct *tty) | 1249 | static int rs_write_room(struct tty_struct *tty) |
| 1254 | { | 1250 | { |
| 1255 | struct esp_struct *info = (struct esp_struct *)tty->driver_data; | 1251 | struct esp_struct *info = tty->driver_data; |
| 1256 | int ret; | 1252 | int ret; |
| 1257 | unsigned long flags; | 1253 | unsigned long flags; |
| 1258 | 1254 | ||
| 1259 | if (serial_paranoia_check(info, tty->name, "rs_write_room")) | 1255 | if (serial_paranoia_check(info, tty->name, "rs_write_room")) |
| 1260 | return 0; | 1256 | return 0; |
| 1261 | 1257 | ||
| @@ -1270,8 +1266,8 @@ static int rs_write_room(struct tty_struct *tty) | |||
| 1270 | 1266 | ||
| 1271 | static int rs_chars_in_buffer(struct tty_struct *tty) | 1267 | static int rs_chars_in_buffer(struct tty_struct *tty) |
| 1272 | { | 1268 | { |
| 1273 | struct esp_struct *info = (struct esp_struct *)tty->driver_data; | 1269 | struct esp_struct *info = tty->driver_data; |
| 1274 | 1270 | ||
| 1275 | if (serial_paranoia_check(info, tty->name, "rs_chars_in_buffer")) | 1271 | if (serial_paranoia_check(info, tty->name, "rs_chars_in_buffer")) |
| 1276 | return 0; | 1272 | return 0; |
| 1277 | return info->xmit_cnt; | 1273 | return info->xmit_cnt; |
| @@ -1279,9 +1275,9 @@ static int rs_chars_in_buffer(struct tty_struct *tty) | |||
| 1279 | 1275 | ||
| 1280 | static void rs_flush_buffer(struct tty_struct *tty) | 1276 | static void rs_flush_buffer(struct tty_struct *tty) |
| 1281 | { | 1277 | { |
| 1282 | struct esp_struct *info = (struct esp_struct *)tty->driver_data; | 1278 | struct esp_struct *info = tty->driver_data; |
| 1283 | unsigned long flags; | 1279 | unsigned long flags; |
| 1284 | 1280 | ||
| 1285 | if (serial_paranoia_check(info, tty->name, "rs_flush_buffer")) | 1281 | if (serial_paranoia_check(info, tty->name, "rs_flush_buffer")) |
| 1286 | return; | 1282 | return; |
| 1287 | spin_lock_irqsave(&info->lock, flags); | 1283 | spin_lock_irqsave(&info->lock, flags); |
| @@ -1293,20 +1289,20 @@ static void rs_flush_buffer(struct tty_struct *tty) | |||
| 1293 | /* | 1289 | /* |
| 1294 | * ------------------------------------------------------------ | 1290 | * ------------------------------------------------------------ |
| 1295 | * rs_throttle() | 1291 | * rs_throttle() |
| 1296 | * | 1292 | * |
| 1297 | * This routine is called by the upper-layer tty layer to signal that | 1293 | * This routine is called by the upper-layer tty layer to signal that |
| 1298 | * incoming characters should be throttled. | 1294 | * incoming characters should be throttled. |
| 1299 | * ------------------------------------------------------------ | 1295 | * ------------------------------------------------------------ |
| 1300 | */ | 1296 | */ |
| 1301 | static void rs_throttle(struct tty_struct * tty) | 1297 | static void rs_throttle(struct tty_struct *tty) |
| 1302 | { | 1298 | { |
| 1303 | struct esp_struct *info = (struct esp_struct *)tty->driver_data; | 1299 | struct esp_struct *info = tty->driver_data; |
| 1304 | unsigned long flags; | 1300 | unsigned long flags; |
| 1305 | #ifdef SERIAL_DEBUG_THROTTLE | 1301 | #ifdef SERIAL_DEBUG_THROTTLE |
| 1306 | char buf[64]; | 1302 | char buf[64]; |
| 1307 | 1303 | ||
| 1308 | printk("throttle %s: %d....\n", tty_name(tty, buf), | 1304 | printk("throttle %s: %d....\n", tty_name(tty, buf), |
| 1309 | tty->ldisc.chars_in_buffer(tty)); | 1305 | tty_chars_in_buffer(tty)); |
| 1310 | #endif | 1306 | #endif |
| 1311 | 1307 | ||
| 1312 | if (serial_paranoia_check(info, tty->name, "rs_throttle")) | 1308 | if (serial_paranoia_check(info, tty->name, "rs_throttle")) |
| @@ -1321,20 +1317,20 @@ static void rs_throttle(struct tty_struct * tty) | |||
| 1321 | spin_unlock_irqrestore(&info->lock, flags); | 1317 | spin_unlock_irqrestore(&info->lock, flags); |
| 1322 | } | 1318 | } |
| 1323 | 1319 | ||
| 1324 | static void rs_unthrottle(struct tty_struct * tty) | 1320 | static void rs_unthrottle(struct tty_struct *tty) |
| 1325 | { | 1321 | { |
| 1326 | struct esp_struct *info = (struct esp_struct *)tty->driver_data; | 1322 | struct esp_struct *info = tty->driver_data; |
| 1327 | unsigned long flags; | 1323 | unsigned long flags; |
| 1328 | #ifdef SERIAL_DEBUG_THROTTLE | 1324 | #ifdef SERIAL_DEBUG_THROTTLE |
| 1329 | char buf[64]; | 1325 | char buf[64]; |
| 1330 | 1326 | ||
| 1331 | printk("unthrottle %s: %d....\n", tty_name(tty, buf), | 1327 | printk(KERN_DEBUG "unthrottle %s: %d....\n", tty_name(tty, buf), |
| 1332 | tty->ldisc.chars_in_buffer(tty)); | 1328 | tty_chars_in_buffer(tty)); |
| 1333 | #endif | 1329 | #endif |
| 1334 | 1330 | ||
| 1335 | if (serial_paranoia_check(info, tty->name, "rs_unthrottle")) | 1331 | if (serial_paranoia_check(info, tty->name, "rs_unthrottle")) |
| 1336 | return; | 1332 | return; |
| 1337 | 1333 | ||
| 1338 | spin_lock_irqsave(&info->lock, flags); | 1334 | spin_lock_irqsave(&info->lock, flags); |
| 1339 | info->IER |= UART_IER_RDI; | 1335 | info->IER |= UART_IER_RDI; |
| 1340 | serial_out(info, UART_ESI_CMD1, ESI_SET_SRV_MASK); | 1336 | serial_out(info, UART_ESI_CMD1, ESI_SET_SRV_MASK); |
| @@ -1350,11 +1346,12 @@ static void rs_unthrottle(struct tty_struct * tty) | |||
| 1350 | * ------------------------------------------------------------ | 1346 | * ------------------------------------------------------------ |
| 1351 | */ | 1347 | */ |
| 1352 | 1348 | ||
| 1353 | static int get_serial_info(struct esp_struct * info, | 1349 | static int get_serial_info(struct esp_struct *info, |
| 1354 | struct serial_struct __user *retinfo) | 1350 | struct serial_struct __user *retinfo) |
| 1355 | { | 1351 | { |
| 1356 | struct serial_struct tmp; | 1352 | struct serial_struct tmp; |
| 1357 | 1353 | ||
| 1354 | lock_kernel(); | ||
| 1358 | memset(&tmp, 0, sizeof(tmp)); | 1355 | memset(&tmp, 0, sizeof(tmp)); |
| 1359 | tmp.type = PORT_16550A; | 1356 | tmp.type = PORT_16550A; |
| 1360 | tmp.line = info->line; | 1357 | tmp.line = info->line; |
| @@ -1367,20 +1364,22 @@ static int get_serial_info(struct esp_struct * info, | |||
| 1367 | tmp.closing_wait = info->closing_wait; | 1364 | tmp.closing_wait = info->closing_wait; |
| 1368 | tmp.custom_divisor = info->custom_divisor; | 1365 | tmp.custom_divisor = info->custom_divisor; |
| 1369 | tmp.hub6 = 0; | 1366 | tmp.hub6 = 0; |
| 1370 | if (copy_to_user(retinfo,&tmp,sizeof(*retinfo))) | 1367 | unlock_kernel(); |
| 1368 | if (copy_to_user(retinfo, &tmp, sizeof(*retinfo))) | ||
| 1371 | return -EFAULT; | 1369 | return -EFAULT; |
| 1372 | return 0; | 1370 | return 0; |
| 1373 | } | 1371 | } |
| 1374 | 1372 | ||
| 1375 | static int get_esp_config(struct esp_struct * info, | 1373 | static int get_esp_config(struct esp_struct *info, |
| 1376 | struct hayes_esp_config __user *retinfo) | 1374 | struct hayes_esp_config __user *retinfo) |
| 1377 | { | 1375 | { |
| 1378 | struct hayes_esp_config tmp; | 1376 | struct hayes_esp_config tmp; |
| 1379 | 1377 | ||
| 1380 | if (!retinfo) | 1378 | if (!retinfo) |
| 1381 | return -EFAULT; | 1379 | return -EFAULT; |
| 1382 | 1380 | ||
| 1383 | memset(&tmp, 0, sizeof(tmp)); | 1381 | memset(&tmp, 0, sizeof(tmp)); |
| 1382 | lock_kernel(); | ||
| 1384 | tmp.rx_timeout = info->config.rx_timeout; | 1383 | tmp.rx_timeout = info->config.rx_timeout; |
| 1385 | tmp.rx_trigger = info->config.rx_trigger; | 1384 | tmp.rx_trigger = info->config.rx_trigger; |
| 1386 | tmp.tx_trigger = info->config.tx_trigger; | 1385 | tmp.tx_trigger = info->config.tx_trigger; |
| @@ -1388,11 +1387,12 @@ static int get_esp_config(struct esp_struct * info, | |||
| 1388 | tmp.flow_on = info->config.flow_on; | 1387 | tmp.flow_on = info->config.flow_on; |
| 1389 | tmp.pio_threshold = info->config.pio_threshold; | 1388 | tmp.pio_threshold = info->config.pio_threshold; |
| 1390 | tmp.dma_channel = (info->stat_flags & ESP_STAT_NEVER_DMA ? 0 : dma); | 1389 | tmp.dma_channel = (info->stat_flags & ESP_STAT_NEVER_DMA ? 0 : dma); |
| 1390 | unlock_kernel(); | ||
| 1391 | 1391 | ||
| 1392 | return copy_to_user(retinfo, &tmp, sizeof(*retinfo)) ? -EFAULT : 0; | 1392 | return copy_to_user(retinfo, &tmp, sizeof(*retinfo)) ? -EFAULT : 0; |
| 1393 | } | 1393 | } |
| 1394 | 1394 | ||
| 1395 | static int set_serial_info(struct esp_struct * info, | 1395 | static int set_serial_info(struct esp_struct *info, |
| 1396 | struct serial_struct __user *new_info) | 1396 | struct serial_struct __user *new_info) |
| 1397 | { | 1397 | { |
| 1398 | struct serial_struct new_serial; | 1398 | struct serial_struct new_serial; |
| @@ -1401,7 +1401,7 @@ static int set_serial_info(struct esp_struct * info, | |||
| 1401 | int retval = 0; | 1401 | int retval = 0; |
| 1402 | struct esp_struct *current_async; | 1402 | struct esp_struct *current_async; |
| 1403 | 1403 | ||
| 1404 | if (copy_from_user(&new_serial,new_info,sizeof(new_serial))) | 1404 | if (copy_from_user(&new_serial, new_info, sizeof(new_serial))) |
| 1405 | return -EFAULT; | 1405 | return -EFAULT; |
| 1406 | old_info = *info; | 1406 | old_info = *info; |
| 1407 | 1407 | ||
| @@ -1422,7 +1422,7 @@ static int set_serial_info(struct esp_struct * info, | |||
| 1422 | return -EINVAL; | 1422 | return -EINVAL; |
| 1423 | 1423 | ||
| 1424 | if (!capable(CAP_SYS_ADMIN)) { | 1424 | if (!capable(CAP_SYS_ADMIN)) { |
| 1425 | if (change_irq || | 1425 | if (change_irq || |
| 1426 | (new_serial.close_delay != info->close_delay) || | 1426 | (new_serial.close_delay != info->close_delay) || |
| 1427 | ((new_serial.flags & ~ASYNC_USR_MASK) != | 1427 | ((new_serial.flags & ~ASYNC_USR_MASK) != |
| 1428 | (info->flags & ~ASYNC_USR_MASK))) | 1428 | (info->flags & ~ASYNC_USR_MASK))) |
| @@ -1507,8 +1507,8 @@ static int set_serial_info(struct esp_struct * info, | |||
| 1507 | return retval; | 1507 | return retval; |
| 1508 | } | 1508 | } |
| 1509 | 1509 | ||
| 1510 | static int set_esp_config(struct esp_struct * info, | 1510 | static int set_esp_config(struct esp_struct *info, |
| 1511 | struct hayes_esp_config __user * new_info) | 1511 | struct hayes_esp_config __user *new_info) |
| 1512 | { | 1512 | { |
| 1513 | struct hayes_esp_config new_config; | 1513 | struct hayes_esp_config new_config; |
| 1514 | unsigned int change_dma; | 1514 | unsigned int change_dma; |
| @@ -1550,7 +1550,6 @@ static int set_esp_config(struct esp_struct * info, | |||
| 1550 | if (new_config.dma_channel) { | 1550 | if (new_config.dma_channel) { |
| 1551 | /* PIO mode to DMA mode transition OR */ | 1551 | /* PIO mode to DMA mode transition OR */ |
| 1552 | /* change current DMA channel */ | 1552 | /* change current DMA channel */ |
| 1553 | |||
| 1554 | current_async = ports; | 1553 | current_async = ports; |
| 1555 | 1554 | ||
| 1556 | while (current_async) { | 1555 | while (current_async) { |
| @@ -1559,16 +1558,15 @@ static int set_esp_config(struct esp_struct * info, | |||
| 1559 | return -EBUSY; | 1558 | return -EBUSY; |
| 1560 | } else if (current_async->count) | 1559 | } else if (current_async->count) |
| 1561 | return -EBUSY; | 1560 | return -EBUSY; |
| 1562 | 1561 | ||
| 1563 | current_async = | 1562 | current_async = current_async->next_port; |
| 1564 | current_async->next_port; | ||
| 1565 | } | 1563 | } |
| 1566 | 1564 | ||
| 1567 | shutdown(info); | 1565 | shutdown(info); |
| 1568 | dma = new_config.dma_channel; | 1566 | dma = new_config.dma_channel; |
| 1569 | info->stat_flags &= ~ESP_STAT_NEVER_DMA; | 1567 | info->stat_flags &= ~ESP_STAT_NEVER_DMA; |
| 1570 | 1568 | ||
| 1571 | /* all ports must use the same DMA channel */ | 1569 | /* all ports must use the same DMA channel */ |
| 1572 | 1570 | ||
| 1573 | spin_lock_irqsave(&info->lock, flags); | 1571 | spin_lock_irqsave(&info->lock, flags); |
| 1574 | current_async = ports; | 1572 | current_async = ports; |
| @@ -1580,7 +1578,6 @@ static int set_esp_config(struct esp_struct * info, | |||
| 1580 | spin_unlock_irqrestore(&info->lock, flags); | 1578 | spin_unlock_irqrestore(&info->lock, flags); |
| 1581 | } else { | 1579 | } else { |
| 1582 | /* DMA mode to PIO mode only */ | 1580 | /* DMA mode to PIO mode only */ |
| 1583 | |||
| 1584 | if (info->count > 1) | 1581 | if (info->count > 1) |
| 1585 | return -EBUSY; | 1582 | return -EBUSY; |
| 1586 | 1583 | ||
| @@ -1596,8 +1593,6 @@ static int set_esp_config(struct esp_struct * info, | |||
| 1596 | 1593 | ||
| 1597 | if ((new_config.flow_off != info->config.flow_off) || | 1594 | if ((new_config.flow_off != info->config.flow_off) || |
| 1598 | (new_config.flow_on != info->config.flow_on)) { | 1595 | (new_config.flow_on != info->config.flow_on)) { |
| 1599 | unsigned long flags; | ||
| 1600 | |||
| 1601 | info->config.flow_off = new_config.flow_off; | 1596 | info->config.flow_off = new_config.flow_off; |
| 1602 | info->config.flow_on = new_config.flow_on; | 1597 | info->config.flow_on = new_config.flow_on; |
| 1603 | 1598 | ||
| @@ -1612,8 +1607,6 @@ static int set_esp_config(struct esp_struct * info, | |||
| 1612 | 1607 | ||
| 1613 | if ((new_config.rx_trigger != info->config.rx_trigger) || | 1608 | if ((new_config.rx_trigger != info->config.rx_trigger) || |
| 1614 | (new_config.tx_trigger != info->config.tx_trigger)) { | 1609 | (new_config.tx_trigger != info->config.tx_trigger)) { |
| 1615 | unsigned long flags; | ||
| 1616 | |||
| 1617 | info->config.rx_trigger = new_config.rx_trigger; | 1610 | info->config.rx_trigger = new_config.rx_trigger; |
| 1618 | info->config.tx_trigger = new_config.tx_trigger; | 1611 | info->config.tx_trigger = new_config.tx_trigger; |
| 1619 | spin_lock_irqsave(&info->lock, flags); | 1612 | spin_lock_irqsave(&info->lock, flags); |
| @@ -1628,8 +1621,6 @@ static int set_esp_config(struct esp_struct * info, | |||
| 1628 | } | 1621 | } |
| 1629 | 1622 | ||
| 1630 | if (new_config.rx_timeout != info->config.rx_timeout) { | 1623 | if (new_config.rx_timeout != info->config.rx_timeout) { |
| 1631 | unsigned long flags; | ||
| 1632 | |||
| 1633 | info->config.rx_timeout = new_config.rx_timeout; | 1624 | info->config.rx_timeout = new_config.rx_timeout; |
| 1634 | spin_lock_irqsave(&info->lock, flags); | 1625 | spin_lock_irqsave(&info->lock, flags); |
| 1635 | 1626 | ||
| @@ -1657,9 +1648,9 @@ static int set_esp_config(struct esp_struct * info, | |||
| 1657 | * release the bus after transmitting. This must be done when | 1648 | * release the bus after transmitting. This must be done when |
| 1658 | * the transmit shift register is empty, not be done when the | 1649 | * the transmit shift register is empty, not be done when the |
| 1659 | * transmit holding register is empty. This functionality | 1650 | * transmit holding register is empty. This functionality |
| 1660 | * allows an RS485 driver to be written in user space. | 1651 | * allows an RS485 driver to be written in user space. |
| 1661 | */ | 1652 | */ |
| 1662 | static int get_lsr_info(struct esp_struct * info, unsigned int __user *value) | 1653 | static int get_lsr_info(struct esp_struct *info, unsigned int __user *value) |
| 1663 | { | 1654 | { |
| 1664 | unsigned char status; | 1655 | unsigned char status; |
| 1665 | unsigned int result; | 1656 | unsigned int result; |
| @@ -1670,17 +1661,17 @@ static int get_lsr_info(struct esp_struct * info, unsigned int __user *value) | |||
| 1670 | status = serial_in(info, UART_ESI_STAT1); | 1661 | status = serial_in(info, UART_ESI_STAT1); |
| 1671 | spin_unlock_irqrestore(&info->lock, flags); | 1662 | spin_unlock_irqrestore(&info->lock, flags); |
| 1672 | result = ((status & UART_LSR_TEMT) ? TIOCSER_TEMT : 0); | 1663 | result = ((status & UART_LSR_TEMT) ? TIOCSER_TEMT : 0); |
| 1673 | return put_user(result,value); | 1664 | return put_user(result, value); |
| 1674 | } | 1665 | } |
| 1675 | 1666 | ||
| 1676 | 1667 | ||
| 1677 | static int esp_tiocmget(struct tty_struct *tty, struct file *file) | 1668 | static int esp_tiocmget(struct tty_struct *tty, struct file *file) |
| 1678 | { | 1669 | { |
| 1679 | struct esp_struct * info = (struct esp_struct *)tty->driver_data; | 1670 | struct esp_struct *info = tty->driver_data; |
| 1680 | unsigned char control, status; | 1671 | unsigned char control, status; |
| 1681 | unsigned long flags; | 1672 | unsigned long flags; |
| 1682 | 1673 | ||
| 1683 | if (serial_paranoia_check(info, tty->name, __FUNCTION__)) | 1674 | if (serial_paranoia_check(info, tty->name, __func__)) |
| 1684 | return -ENODEV; | 1675 | return -ENODEV; |
| 1685 | if (tty->flags & (1 << TTY_IO_ERROR)) | 1676 | if (tty->flags & (1 << TTY_IO_ERROR)) |
| 1686 | return -EIO; | 1677 | return -EIO; |
| @@ -1703,10 +1694,10 @@ static int esp_tiocmget(struct tty_struct *tty, struct file *file) | |||
| 1703 | static int esp_tiocmset(struct tty_struct *tty, struct file *file, | 1694 | static int esp_tiocmset(struct tty_struct *tty, struct file *file, |
| 1704 | unsigned int set, unsigned int clear) | 1695 | unsigned int set, unsigned int clear) |
| 1705 | { | 1696 | { |
| 1706 | struct esp_struct * info = (struct esp_struct *)tty->driver_data; | 1697 | struct esp_struct *info = tty->driver_data; |
| 1707 | unsigned long flags; | 1698 | unsigned long flags; |
| 1708 | 1699 | ||
| 1709 | if (serial_paranoia_check(info, tty->name, __FUNCTION__)) | 1700 | if (serial_paranoia_check(info, tty->name, __func__)) |
| 1710 | return -ENODEV; | 1701 | return -ENODEV; |
| 1711 | if (tty->flags & (1 << TTY_IO_ERROR)) | 1702 | if (tty->flags & (1 << TTY_IO_ERROR)) |
| 1712 | return -EIO; | 1703 | return -EIO; |
| @@ -1736,9 +1727,9 @@ static int esp_tiocmset(struct tty_struct *tty, struct file *file, | |||
| 1736 | */ | 1727 | */ |
| 1737 | static void esp_break(struct tty_struct *tty, int break_state) | 1728 | static void esp_break(struct tty_struct *tty, int break_state) |
| 1738 | { | 1729 | { |
| 1739 | struct esp_struct * info = (struct esp_struct *)tty->driver_data; | 1730 | struct esp_struct *info = tty->driver_data; |
| 1740 | unsigned long flags; | 1731 | unsigned long flags; |
| 1741 | 1732 | ||
| 1742 | if (serial_paranoia_check(info, tty->name, "esp_break")) | 1733 | if (serial_paranoia_check(info, tty->name, "esp_break")) |
| 1743 | return; | 1734 | return; |
| 1744 | 1735 | ||
| @@ -1758,14 +1749,15 @@ static void esp_break(struct tty_struct *tty, int break_state) | |||
| 1758 | } | 1749 | } |
| 1759 | } | 1750 | } |
| 1760 | 1751 | ||
| 1761 | static int rs_ioctl(struct tty_struct *tty, struct file * file, | 1752 | static int rs_ioctl(struct tty_struct *tty, struct file *file, |
| 1762 | unsigned int cmd, unsigned long arg) | 1753 | unsigned int cmd, unsigned long arg) |
| 1763 | { | 1754 | { |
| 1764 | struct esp_struct * info = (struct esp_struct *)tty->driver_data; | 1755 | struct esp_struct *info = tty->driver_data; |
| 1765 | struct async_icount cprev, cnow; /* kernel counter temps */ | 1756 | struct async_icount cprev, cnow; /* kernel counter temps */ |
| 1766 | struct serial_icounter_struct __user *p_cuser; /* user space */ | 1757 | struct serial_icounter_struct __user *p_cuser; /* user space */ |
| 1767 | void __user *argp = (void __user *)arg; | 1758 | void __user *argp = (void __user *)arg; |
| 1768 | unsigned long flags; | 1759 | unsigned long flags; |
| 1760 | int ret; | ||
| 1769 | 1761 | ||
| 1770 | if (serial_paranoia_check(info, tty->name, "rs_ioctl")) | 1762 | if (serial_paranoia_check(info, tty->name, "rs_ioctl")) |
| 1771 | return -ENODEV; | 1763 | return -ENODEV; |
| @@ -1778,97 +1770,93 @@ static int rs_ioctl(struct tty_struct *tty, struct file * file, | |||
| 1778 | if (tty->flags & (1 << TTY_IO_ERROR)) | 1770 | if (tty->flags & (1 << TTY_IO_ERROR)) |
| 1779 | return -EIO; | 1771 | return -EIO; |
| 1780 | } | 1772 | } |
| 1781 | |||
| 1782 | switch (cmd) { | ||
| 1783 | case TIOCGSERIAL: | ||
| 1784 | return get_serial_info(info, argp); | ||
| 1785 | case TIOCSSERIAL: | ||
| 1786 | return set_serial_info(info, argp); | ||
| 1787 | case TIOCSERCONFIG: | ||
| 1788 | /* do not reconfigure after initial configuration */ | ||
| 1789 | return 0; | ||
| 1790 | |||
| 1791 | case TIOCSERGWILD: | ||
| 1792 | return put_user(0L, (unsigned long __user *)argp); | ||
| 1793 | 1773 | ||
| 1794 | case TIOCSERGETLSR: /* Get line status register */ | 1774 | switch (cmd) { |
| 1795 | return get_lsr_info(info, argp); | 1775 | case TIOCGSERIAL: |
| 1796 | 1776 | return get_serial_info(info, argp); | |
| 1797 | case TIOCSERSWILD: | 1777 | case TIOCSSERIAL: |
| 1798 | if (!capable(CAP_SYS_ADMIN)) | 1778 | lock_kernel(); |
| 1799 | return -EPERM; | 1779 | ret = set_serial_info(info, argp); |
| 1800 | return 0; | 1780 | unlock_kernel(); |
| 1801 | 1781 | return ret; | |
| 1802 | /* | 1782 | case TIOCSERGWILD: |
| 1803 | * Wait for any of the 4 modem inputs (DCD,RI,DSR,CTS) to change | 1783 | return put_user(0L, (unsigned long __user *)argp); |
| 1804 | * - mask passed in arg for lines of interest | 1784 | case TIOCSERGETLSR: /* Get line status register */ |
| 1805 | * (use |'ed TIOCM_RNG/DSR/CD/CTS for masking) | 1785 | return get_lsr_info(info, argp); |
| 1806 | * Caller should use TIOCGICOUNT to see which one it was | 1786 | case TIOCSERSWILD: |
| 1807 | */ | 1787 | if (!capable(CAP_SYS_ADMIN)) |
| 1808 | case TIOCMIWAIT: | 1788 | return -EPERM; |
| 1789 | return 0; | ||
| 1790 | /* | ||
| 1791 | * Wait for any of the 4 modem inputs (DCD,RI,DSR,CTS) to change | ||
| 1792 | * - mask passed in arg for lines of interest | ||
| 1793 | * (use |'ed TIOCM_RNG/DSR/CD/CTS for masking) | ||
| 1794 | * Caller should use TIOCGICOUNT to see which one it was | ||
| 1795 | */ | ||
| 1796 | case TIOCMIWAIT: | ||
| 1797 | spin_lock_irqsave(&info->lock, flags); | ||
| 1798 | cprev = info->icount; /* note the counters on entry */ | ||
| 1799 | spin_unlock_irqrestore(&info->lock, flags); | ||
| 1800 | while (1) { | ||
| 1801 | /* FIXME: convert to new style wakeup */ | ||
| 1802 | interruptible_sleep_on(&info->delta_msr_wait); | ||
| 1803 | /* see if a signal did it */ | ||
| 1804 | if (signal_pending(current)) | ||
| 1805 | return -ERESTARTSYS; | ||
| 1809 | spin_lock_irqsave(&info->lock, flags); | 1806 | spin_lock_irqsave(&info->lock, flags); |
| 1810 | cprev = info->icount; /* note the counters on entry */ | 1807 | cnow = info->icount; /* atomic copy */ |
| 1811 | spin_unlock_irqrestore(&info->lock, flags); | 1808 | spin_unlock_irqrestore(&info->lock, flags); |
| 1812 | while (1) { | 1809 | if (cnow.rng == cprev.rng && |
| 1813 | /* FIXME: convert to new style wakeup */ | 1810 | cnow.dsr == cprev.dsr && |
| 1814 | interruptible_sleep_on(&info->delta_msr_wait); | 1811 | cnow.dcd == cprev.dcd && |
| 1815 | /* see if a signal did it */ | 1812 | cnow.cts == cprev.cts) |
| 1816 | if (signal_pending(current)) | 1813 | return -EIO; /* no change => error */ |
| 1817 | return -ERESTARTSYS; | 1814 | if (((arg & TIOCM_RNG) && |
| 1818 | spin_lock_irqsave(&info->lock, flags); | 1815 | (cnow.rng != cprev.rng)) || |
| 1819 | cnow = info->icount; /* atomic copy */ | 1816 | ((arg & TIOCM_DSR) && |
| 1820 | spin_unlock_irqrestore(&info->lock, flags); | 1817 | (cnow.dsr != cprev.dsr)) || |
| 1821 | if (cnow.rng == cprev.rng && | 1818 | ((arg & TIOCM_CD) && |
| 1822 | cnow.dsr == cprev.dsr && | 1819 | (cnow.dcd != cprev.dcd)) || |
| 1823 | cnow.dcd == cprev.dcd && | 1820 | ((arg & TIOCM_CTS) && |
| 1824 | cnow.cts == cprev.cts) | 1821 | (cnow.cts != cprev.cts))) { |
| 1825 | return -EIO; /* no change => error */ | 1822 | return 0; |
| 1826 | if (((arg & TIOCM_RNG) && | ||
| 1827 | (cnow.rng != cprev.rng)) || | ||
| 1828 | ((arg & TIOCM_DSR) && | ||
| 1829 | (cnow.dsr != cprev.dsr)) || | ||
| 1830 | ((arg & TIOCM_CD) && | ||
| 1831 | (cnow.dcd != cprev.dcd)) || | ||
| 1832 | ((arg & TIOCM_CTS) && | ||
| 1833 | (cnow.cts != cprev.cts)) ) { | ||
| 1834 | return 0; | ||
| 1835 | } | ||
| 1836 | cprev = cnow; | ||
| 1837 | } | 1823 | } |
| 1838 | /* NOTREACHED */ | 1824 | cprev = cnow; |
| 1839 | 1825 | } | |
| 1840 | /* | 1826 | /* NOTREACHED */ |
| 1841 | * Get counter of input serial line interrupts (DCD,RI,DSR,CTS) | 1827 | /* |
| 1842 | * Return: write counters to the user passed counter struct | 1828 | * Get counter of input serial line interrupts (DCD,RI,DSR,CTS) |
| 1843 | * NB: both 1->0 and 0->1 transitions are counted except for | 1829 | * Return: write counters to the user passed counter struct |
| 1844 | * RI where only 0->1 is counted. | 1830 | * NB: both 1->0 and 0->1 transitions are counted except for |
| 1845 | */ | 1831 | * RI where only 0->1 is counted. |
| 1846 | case TIOCGICOUNT: | 1832 | */ |
| 1847 | spin_lock_irqsave(&info->lock, flags); | 1833 | case TIOCGICOUNT: |
| 1848 | cnow = info->icount; | 1834 | spin_lock_irqsave(&info->lock, flags); |
| 1849 | spin_unlock_irqrestore(&info->lock, flags); | 1835 | cnow = info->icount; |
| 1850 | p_cuser = argp; | 1836 | spin_unlock_irqrestore(&info->lock, flags); |
| 1851 | if (put_user(cnow.cts, &p_cuser->cts) || | 1837 | p_cuser = argp; |
| 1852 | put_user(cnow.dsr, &p_cuser->dsr) || | 1838 | if (put_user(cnow.cts, &p_cuser->cts) || |
| 1853 | put_user(cnow.rng, &p_cuser->rng) || | 1839 | put_user(cnow.dsr, &p_cuser->dsr) || |
| 1854 | put_user(cnow.dcd, &p_cuser->dcd)) | 1840 | put_user(cnow.rng, &p_cuser->rng) || |
| 1855 | return -EFAULT; | 1841 | put_user(cnow.dcd, &p_cuser->dcd)) |
| 1856 | 1842 | return -EFAULT; | |
| 1857 | return 0; | 1843 | return 0; |
| 1858 | case TIOCGHAYESESP: | 1844 | case TIOCGHAYESESP: |
| 1859 | return get_esp_config(info, argp); | 1845 | return get_esp_config(info, argp); |
| 1860 | case TIOCSHAYESESP: | 1846 | case TIOCSHAYESESP: |
| 1861 | return set_esp_config(info, argp); | 1847 | lock_kernel(); |
| 1862 | 1848 | ret = set_esp_config(info, argp); | |
| 1863 | default: | 1849 | unlock_kernel(); |
| 1864 | return -ENOIOCTLCMD; | 1850 | return ret; |
| 1865 | } | 1851 | default: |
| 1852 | return -ENOIOCTLCMD; | ||
| 1853 | } | ||
| 1866 | return 0; | 1854 | return 0; |
| 1867 | } | 1855 | } |
| 1868 | 1856 | ||
| 1869 | static void rs_set_termios(struct tty_struct *tty, struct ktermios *old_termios) | 1857 | static void rs_set_termios(struct tty_struct *tty, struct ktermios *old_termios) |
| 1870 | { | 1858 | { |
| 1871 | struct esp_struct *info = (struct esp_struct *)tty->driver_data; | 1859 | struct esp_struct *info = tty->driver_data; |
| 1872 | unsigned long flags; | 1860 | unsigned long flags; |
| 1873 | 1861 | ||
| 1874 | change_speed(info); | 1862 | change_speed(info); |
| @@ -1905,32 +1893,33 @@ static void rs_set_termios(struct tty_struct *tty, struct ktermios *old_termios) | |||
| 1905 | /* | 1893 | /* |
| 1906 | * ------------------------------------------------------------ | 1894 | * ------------------------------------------------------------ |
| 1907 | * rs_close() | 1895 | * rs_close() |
| 1908 | * | 1896 | * |
| 1909 | * This routine is called when the serial port gets closed. First, we | 1897 | * This routine is called when the serial port gets closed. First, we |
| 1910 | * wait for the last remaining data to be sent. Then, we unlink its | 1898 | * wait for the last remaining data to be sent. Then, we unlink its |
| 1911 | * async structure from the interrupt chain if necessary, and we free | 1899 | * async structure from the interrupt chain if necessary, and we free |
| 1912 | * that IRQ if nothing is left in the chain. | 1900 | * that IRQ if nothing is left in the chain. |
| 1913 | * ------------------------------------------------------------ | 1901 | * ------------------------------------------------------------ |
| 1914 | */ | 1902 | */ |
| 1915 | static void rs_close(struct tty_struct *tty, struct file * filp) | 1903 | static void rs_close(struct tty_struct *tty, struct file *filp) |
| 1916 | { | 1904 | { |
| 1917 | struct esp_struct * info = (struct esp_struct *)tty->driver_data; | 1905 | struct esp_struct *info = tty->driver_data; |
| 1918 | unsigned long flags; | 1906 | unsigned long flags; |
| 1919 | 1907 | ||
| 1920 | if (!info || serial_paranoia_check(info, tty->name, "rs_close")) | 1908 | if (!info || serial_paranoia_check(info, tty->name, "rs_close")) |
| 1921 | return; | 1909 | return; |
| 1922 | 1910 | ||
| 1923 | spin_lock_irqsave(&info->lock, flags); | 1911 | spin_lock_irqsave(&info->lock, flags); |
| 1924 | 1912 | ||
| 1925 | if (tty_hung_up_p(filp)) { | 1913 | if (tty_hung_up_p(filp)) { |
| 1926 | DBG_CNT("before DEC-hung"); | 1914 | DBG_CNT("before DEC-hung"); |
| 1927 | goto out; | 1915 | goto out; |
| 1928 | } | 1916 | } |
| 1929 | 1917 | ||
| 1930 | #ifdef SERIAL_DEBUG_OPEN | 1918 | #ifdef SERIAL_DEBUG_OPEN |
| 1931 | printk("rs_close ttys%d, count = %d\n", info->line, info->count); | 1919 | printk(KERN_DEBUG "rs_close ttys%d, count = %d\n", |
| 1920 | info->line, info->count); | ||
| 1932 | #endif | 1921 | #endif |
| 1933 | if ((tty->count == 1) && (info->count != 1)) { | 1922 | if (tty->count == 1 && info->count != 1) { |
| 1934 | /* | 1923 | /* |
| 1935 | * Uh, oh. tty->count is 1, which means that the tty | 1924 | * Uh, oh. tty->count is 1, which means that the tty |
| 1936 | * structure will be freed. Info->count should always | 1925 | * structure will be freed. Info->count should always |
| @@ -1938,12 +1927,11 @@ static void rs_close(struct tty_struct *tty, struct file * filp) | |||
| 1938 | * one, we've got real problems, since it means the | 1927 | * one, we've got real problems, since it means the |
| 1939 | * serial port won't be shutdown. | 1928 | * serial port won't be shutdown. |
| 1940 | */ | 1929 | */ |
| 1941 | printk("rs_close: bad serial port count; tty->count is 1, " | 1930 | printk(KERN_DEBUG "rs_close: bad serial port count; tty->count is 1, info->count is %d\n", info->count); |
| 1942 | "info->count is %d\n", info->count); | ||
| 1943 | info->count = 1; | 1931 | info->count = 1; |
| 1944 | } | 1932 | } |
| 1945 | if (--info->count < 0) { | 1933 | if (--info->count < 0) { |
| 1946 | printk("rs_close: bad serial port count for ttys%d: %d\n", | 1934 | printk(KERN_ERR "rs_close: bad serial port count for ttys%d: %d\n", |
| 1947 | info->line, info->count); | 1935 | info->line, info->count); |
| 1948 | info->count = 0; | 1936 | info->count = 0; |
| 1949 | } | 1937 | } |
| @@ -1955,7 +1943,7 @@ static void rs_close(struct tty_struct *tty, struct file * filp) | |||
| 1955 | 1943 | ||
| 1956 | spin_unlock_irqrestore(&info->lock, flags); | 1944 | spin_unlock_irqrestore(&info->lock, flags); |
| 1957 | /* | 1945 | /* |
| 1958 | * Now we wait for the transmit buffer to clear; and we notify | 1946 | * Now we wait for the transmit buffer to clear; and we notify |
| 1959 | * the line discipline to only process XON/XOFF characters. | 1947 | * the line discipline to only process XON/XOFF characters. |
| 1960 | */ | 1948 | */ |
| 1961 | tty->closing = 1; | 1949 | tty->closing = 1; |
| @@ -1990,16 +1978,14 @@ static void rs_close(struct tty_struct *tty, struct file * filp) | |||
| 1990 | rs_wait_until_sent(tty, info->timeout); | 1978 | rs_wait_until_sent(tty, info->timeout); |
| 1991 | } | 1979 | } |
| 1992 | shutdown(info); | 1980 | shutdown(info); |
| 1993 | if (tty->driver->flush_buffer) | 1981 | rs_flush_buffer(tty); |
| 1994 | tty->driver->flush_buffer(tty); | ||
| 1995 | tty_ldisc_flush(tty); | 1982 | tty_ldisc_flush(tty); |
| 1996 | tty->closing = 0; | 1983 | tty->closing = 0; |
| 1997 | info->tty = NULL; | 1984 | info->tty = NULL; |
| 1998 | 1985 | ||
| 1999 | if (info->blocked_open) { | 1986 | if (info->blocked_open) { |
| 2000 | if (info->close_delay) { | 1987 | if (info->close_delay) |
| 2001 | msleep_interruptible(jiffies_to_msecs(info->close_delay)); | 1988 | msleep_interruptible(jiffies_to_msecs(info->close_delay)); |
| 2002 | } | ||
| 2003 | wake_up_interruptible(&info->open_wait); | 1989 | wake_up_interruptible(&info->open_wait); |
| 2004 | } | 1990 | } |
| 2005 | info->flags &= ~(ASYNC_NORMAL_ACTIVE|ASYNC_CLOSING); | 1991 | info->flags &= ~(ASYNC_NORMAL_ACTIVE|ASYNC_CLOSING); |
| @@ -2012,7 +1998,7 @@ out: | |||
| 2012 | 1998 | ||
| 2013 | static void rs_wait_until_sent(struct tty_struct *tty, int timeout) | 1999 | static void rs_wait_until_sent(struct tty_struct *tty, int timeout) |
| 2014 | { | 2000 | { |
| 2015 | struct esp_struct *info = (struct esp_struct *)tty->driver_data; | 2001 | struct esp_struct *info = tty->driver_data; |
| 2016 | unsigned long orig_jiffies, char_time; | 2002 | unsigned long orig_jiffies, char_time; |
| 2017 | unsigned long flags; | 2003 | unsigned long flags; |
| 2018 | 2004 | ||
| @@ -2036,10 +2022,10 @@ static void rs_wait_until_sent(struct tty_struct *tty, int timeout) | |||
| 2036 | msleep_interruptible(jiffies_to_msecs(char_time)); | 2022 | msleep_interruptible(jiffies_to_msecs(char_time)); |
| 2037 | 2023 | ||
| 2038 | if (signal_pending(current)) | 2024 | if (signal_pending(current)) |
| 2039 | break; | 2025 | return; |
| 2040 | 2026 | ||
| 2041 | if (timeout && time_after(jiffies, orig_jiffies + timeout)) | 2027 | if (timeout && time_after(jiffies, orig_jiffies + timeout)) |
| 2042 | break; | 2028 | return; |
| 2043 | 2029 | ||
| 2044 | spin_lock_irqsave(&info->lock, flags); | 2030 | spin_lock_irqsave(&info->lock, flags); |
| 2045 | serial_out(info, UART_ESI_CMD1, ESI_NO_COMMAND); | 2031 | serial_out(info, UART_ESI_CMD1, ESI_NO_COMMAND); |
| @@ -2054,11 +2040,11 @@ static void rs_wait_until_sent(struct tty_struct *tty, int timeout) | |||
| 2054 | */ | 2040 | */ |
| 2055 | static void esp_hangup(struct tty_struct *tty) | 2041 | static void esp_hangup(struct tty_struct *tty) |
| 2056 | { | 2042 | { |
| 2057 | struct esp_struct * info = (struct esp_struct *)tty->driver_data; | 2043 | struct esp_struct *info = tty->driver_data; |
| 2058 | 2044 | ||
| 2059 | if (serial_paranoia_check(info, tty->name, "esp_hangup")) | 2045 | if (serial_paranoia_check(info, tty->name, "esp_hangup")) |
| 2060 | return; | 2046 | return; |
| 2061 | 2047 | ||
| 2062 | rs_flush_buffer(tty); | 2048 | rs_flush_buffer(tty); |
| 2063 | shutdown(info); | 2049 | shutdown(info); |
| 2064 | info->count = 0; | 2050 | info->count = 0; |
| @@ -2072,7 +2058,7 @@ static void esp_hangup(struct tty_struct *tty) | |||
| 2072 | * esp_open() and friends | 2058 | * esp_open() and friends |
| 2073 | * ------------------------------------------------------------ | 2059 | * ------------------------------------------------------------ |
| 2074 | */ | 2060 | */ |
| 2075 | static int block_til_ready(struct tty_struct *tty, struct file * filp, | 2061 | static int block_til_ready(struct tty_struct *tty, struct file *filp, |
| 2076 | struct esp_struct *info) | 2062 | struct esp_struct *info) |
| 2077 | { | 2063 | { |
| 2078 | DECLARE_WAITQUEUE(wait, current); | 2064 | DECLARE_WAITQUEUE(wait, current); |
| @@ -2121,11 +2107,11 @@ static int block_til_ready(struct tty_struct *tty, struct file * filp, | |||
| 2121 | retval = 0; | 2107 | retval = 0; |
| 2122 | add_wait_queue(&info->open_wait, &wait); | 2108 | add_wait_queue(&info->open_wait, &wait); |
| 2123 | #ifdef SERIAL_DEBUG_OPEN | 2109 | #ifdef SERIAL_DEBUG_OPEN |
| 2124 | printk("block_til_ready before block: ttys%d, count = %d\n", | 2110 | printk(KERN_DEBUG "block_til_ready before block: ttys%d, count = %d\n", |
| 2125 | info->line, info->count); | 2111 | info->line, info->count); |
| 2126 | #endif | 2112 | #endif |
| 2127 | spin_lock_irqsave(&info->lock, flags); | 2113 | spin_lock_irqsave(&info->lock, flags); |
| 2128 | if (!tty_hung_up_p(filp)) | 2114 | if (!tty_hung_up_p(filp)) |
| 2129 | info->count--; | 2115 | info->count--; |
| 2130 | info->blocked_open++; | 2116 | info->blocked_open++; |
| 2131 | while (1) { | 2117 | while (1) { |
| @@ -2147,7 +2133,7 @@ static int block_til_ready(struct tty_struct *tty, struct file * filp, | |||
| 2147 | if (info->flags & ASYNC_HUP_NOTIFY) | 2133 | if (info->flags & ASYNC_HUP_NOTIFY) |
| 2148 | retval = -EAGAIN; | 2134 | retval = -EAGAIN; |
| 2149 | else | 2135 | else |
| 2150 | retval = -ERESTARTSYS; | 2136 | retval = -ERESTARTSYS; |
| 2151 | #else | 2137 | #else |
| 2152 | retval = -EAGAIN; | 2138 | retval = -EAGAIN; |
| 2153 | #endif | 2139 | #endif |
| @@ -2166,7 +2152,7 @@ static int block_til_ready(struct tty_struct *tty, struct file * filp, | |||
| 2166 | break; | 2152 | break; |
| 2167 | } | 2153 | } |
| 2168 | #ifdef SERIAL_DEBUG_OPEN | 2154 | #ifdef SERIAL_DEBUG_OPEN |
| 2169 | printk("block_til_ready blocking: ttys%d, count = %d\n", | 2155 | printk(KERN_DEBUG "block_til_ready blocking: ttys%d, count = %d\n", |
| 2170 | info->line, info->count); | 2156 | info->line, info->count); |
| 2171 | #endif | 2157 | #endif |
| 2172 | spin_unlock_irqrestore(&info->lock, flags); | 2158 | spin_unlock_irqrestore(&info->lock, flags); |
| @@ -2180,14 +2166,14 @@ static int block_til_ready(struct tty_struct *tty, struct file * filp, | |||
| 2180 | info->blocked_open--; | 2166 | info->blocked_open--; |
| 2181 | spin_unlock_irqrestore(&info->lock, flags); | 2167 | spin_unlock_irqrestore(&info->lock, flags); |
| 2182 | #ifdef SERIAL_DEBUG_OPEN | 2168 | #ifdef SERIAL_DEBUG_OPEN |
| 2183 | printk("block_til_ready after blocking: ttys%d, count = %d\n", | 2169 | printk(KERN_DEBUG "block_til_ready after blocking: ttys%d, count = %d\n", |
| 2184 | info->line, info->count); | 2170 | info->line, info->count); |
| 2185 | #endif | 2171 | #endif |
| 2186 | if (retval) | 2172 | if (retval) |
| 2187 | return retval; | 2173 | return retval; |
| 2188 | info->flags |= ASYNC_NORMAL_ACTIVE; | 2174 | info->flags |= ASYNC_NORMAL_ACTIVE; |
| 2189 | return 0; | 2175 | return 0; |
| 2190 | } | 2176 | } |
| 2191 | 2177 | ||
| 2192 | /* | 2178 | /* |
| 2193 | * This routine is called whenever a serial port is opened. It | 2179 | * This routine is called whenever a serial port is opened. It |
| @@ -2195,7 +2181,7 @@ static int block_til_ready(struct tty_struct *tty, struct file * filp, | |||
| 2195 | * the IRQ chain. It also performs the serial-specific | 2181 | * the IRQ chain. It also performs the serial-specific |
| 2196 | * initialization for the tty structure. | 2182 | * initialization for the tty structure. |
| 2197 | */ | 2183 | */ |
| 2198 | static int esp_open(struct tty_struct *tty, struct file * filp) | 2184 | static int esp_open(struct tty_struct *tty, struct file *filp) |
| 2199 | { | 2185 | { |
| 2200 | struct esp_struct *info; | 2186 | struct esp_struct *info; |
| 2201 | int retval, line; | 2187 | int retval, line; |
| @@ -2218,7 +2204,7 @@ static int esp_open(struct tty_struct *tty, struct file * filp) | |||
| 2218 | } | 2204 | } |
| 2219 | 2205 | ||
| 2220 | #ifdef SERIAL_DEBUG_OPEN | 2206 | #ifdef SERIAL_DEBUG_OPEN |
| 2221 | printk("esp_open %s, count = %d\n", tty->name, info->count); | 2207 | printk(KERN_DEBUG "esp_open %s, count = %d\n", tty->name, info->count); |
| 2222 | #endif | 2208 | #endif |
| 2223 | spin_lock_irqsave(&info->lock, flags); | 2209 | spin_lock_irqsave(&info->lock, flags); |
| 2224 | info->count++; | 2210 | info->count++; |
| @@ -2226,7 +2212,7 @@ static int esp_open(struct tty_struct *tty, struct file * filp) | |||
| 2226 | info->tty = tty; | 2212 | info->tty = tty; |
| 2227 | 2213 | ||
| 2228 | spin_unlock_irqrestore(&info->lock, flags); | 2214 | spin_unlock_irqrestore(&info->lock, flags); |
| 2229 | 2215 | ||
| 2230 | /* | 2216 | /* |
| 2231 | * Start up serial port | 2217 | * Start up serial port |
| 2232 | */ | 2218 | */ |
| @@ -2237,14 +2223,13 @@ static int esp_open(struct tty_struct *tty, struct file * filp) | |||
| 2237 | retval = block_til_ready(tty, filp, info); | 2223 | retval = block_til_ready(tty, filp, info); |
| 2238 | if (retval) { | 2224 | if (retval) { |
| 2239 | #ifdef SERIAL_DEBUG_OPEN | 2225 | #ifdef SERIAL_DEBUG_OPEN |
| 2240 | printk("esp_open returning after block_til_ready with %d\n", | 2226 | printk(KERN_DEBUG "esp_open returning after block_til_ready with %d\n", |
| 2241 | retval); | 2227 | retval); |
| 2242 | #endif | 2228 | #endif |
| 2243 | return retval; | 2229 | return retval; |
| 2244 | } | 2230 | } |
| 2245 | |||
| 2246 | #ifdef SERIAL_DEBUG_OPEN | 2231 | #ifdef SERIAL_DEBUG_OPEN |
| 2247 | printk("esp_open %s successful...", tty->name); | 2232 | printk(KERN_DEBUG "esp_open %s successful...", tty->name); |
| 2248 | #endif | 2233 | #endif |
| 2249 | return 0; | 2234 | return 0; |
| 2250 | } | 2235 | } |
| @@ -2262,10 +2247,10 @@ static int esp_open(struct tty_struct *tty, struct file * filp) | |||
| 2262 | * number, and identifies which options were configured into this | 2247 | * number, and identifies which options were configured into this |
| 2263 | * driver. | 2248 | * driver. |
| 2264 | */ | 2249 | */ |
| 2265 | 2250 | ||
| 2266 | static inline void show_serial_version(void) | 2251 | static void show_serial_version(void) |
| 2267 | { | 2252 | { |
| 2268 | printk(KERN_INFO "%s version %s (DMA %u)\n", | 2253 | printk(KERN_INFO "%s version %s (DMA %u)\n", |
| 2269 | serial_name, serial_version, dma); | 2254 | serial_name, serial_version, dma); |
| 2270 | } | 2255 | } |
| 2271 | 2256 | ||
| @@ -2273,7 +2258,7 @@ static inline void show_serial_version(void) | |||
| 2273 | * This routine is called by espserial_init() to initialize a specific serial | 2258 | * This routine is called by espserial_init() to initialize a specific serial |
| 2274 | * port. | 2259 | * port. |
| 2275 | */ | 2260 | */ |
| 2276 | static inline int autoconfig(struct esp_struct * info) | 2261 | static int autoconfig(struct esp_struct *info) |
| 2277 | { | 2262 | { |
| 2278 | int port_detected = 0; | 2263 | int port_detected = 0; |
| 2279 | unsigned long flags; | 2264 | unsigned long flags; |
| @@ -2349,14 +2334,14 @@ static const struct tty_operations esp_ops = { | |||
| 2349 | static int __init espserial_init(void) | 2334 | static int __init espserial_init(void) |
| 2350 | { | 2335 | { |
| 2351 | int i, offset; | 2336 | int i, offset; |
| 2352 | struct esp_struct * info; | 2337 | struct esp_struct *info; |
| 2353 | struct esp_struct *last_primary = NULL; | 2338 | struct esp_struct *last_primary = NULL; |
| 2354 | int esp[] = {0x100,0x140,0x180,0x200,0x240,0x280,0x300,0x380}; | 2339 | int esp[] = { 0x100, 0x140, 0x180, 0x200, 0x240, 0x280, 0x300, 0x380 }; |
| 2355 | 2340 | ||
| 2356 | esp_driver = alloc_tty_driver(NR_PORTS); | 2341 | esp_driver = alloc_tty_driver(NR_PORTS); |
| 2357 | if (!esp_driver) | 2342 | if (!esp_driver) |
| 2358 | return -ENOMEM; | 2343 | return -ENOMEM; |
| 2359 | 2344 | ||
| 2360 | for (i = 0; i < NR_PRIMARY; i++) { | 2345 | for (i = 0; i < NR_PRIMARY; i++) { |
| 2361 | if (irq[i] != 0) { | 2346 | if (irq[i] != 0) { |
| 2362 | if ((irq[i] < 2) || (irq[i] > 15) || (irq[i] == 6) || | 2347 | if ((irq[i] < 2) || (irq[i] > 15) || (irq[i] == 6) || |
| @@ -2378,20 +2363,20 @@ static int __init espserial_init(void) | |||
| 2378 | 2363 | ||
| 2379 | if ((flow_off < 1) || (flow_off > 1023)) | 2364 | if ((flow_off < 1) || (flow_off > 1023)) |
| 2380 | flow_off = 1016; | 2365 | flow_off = 1016; |
| 2381 | 2366 | ||
| 2382 | if ((flow_on < 1) || (flow_on > 1023)) | 2367 | if ((flow_on < 1) || (flow_on > 1023)) |
| 2383 | flow_on = 944; | 2368 | flow_on = 944; |
| 2384 | 2369 | ||
| 2385 | if ((rx_timeout < 0) || (rx_timeout > 255)) | 2370 | if ((rx_timeout < 0) || (rx_timeout > 255)) |
| 2386 | rx_timeout = 128; | 2371 | rx_timeout = 128; |
| 2387 | 2372 | ||
| 2388 | if (flow_on >= flow_off) | 2373 | if (flow_on >= flow_off) |
| 2389 | flow_on = flow_off - 1; | 2374 | flow_on = flow_off - 1; |
| 2390 | 2375 | ||
| 2391 | show_serial_version(); | 2376 | show_serial_version(); |
| 2392 | 2377 | ||
| 2393 | /* Initialize the tty_driver structure */ | 2378 | /* Initialize the tty_driver structure */ |
| 2394 | 2379 | ||
| 2395 | esp_driver->owner = THIS_MODULE; | 2380 | esp_driver->owner = THIS_MODULE; |
| 2396 | esp_driver->name = "ttyP"; | 2381 | esp_driver->name = "ttyP"; |
| 2397 | esp_driver->major = ESP_IN_MAJOR; | 2382 | esp_driver->major = ESP_IN_MAJOR; |
| @@ -2401,10 +2386,11 @@ static int __init espserial_init(void) | |||
| 2401 | esp_driver->init_termios = tty_std_termios; | 2386 | esp_driver->init_termios = tty_std_termios; |
| 2402 | esp_driver->init_termios.c_cflag = | 2387 | esp_driver->init_termios.c_cflag = |
| 2403 | B9600 | CS8 | CREAD | HUPCL | CLOCAL; | 2388 | B9600 | CS8 | CREAD | HUPCL | CLOCAL; |
| 2389 | esp_driver->init_termios.c_ispeed = 9600; | ||
| 2390 | esp_driver->init_termios.c_ospeed = 9600; | ||
| 2404 | esp_driver->flags = TTY_DRIVER_REAL_RAW; | 2391 | esp_driver->flags = TTY_DRIVER_REAL_RAW; |
| 2405 | tty_set_operations(esp_driver, &esp_ops); | 2392 | tty_set_operations(esp_driver, &esp_ops); |
| 2406 | if (tty_register_driver(esp_driver)) | 2393 | if (tty_register_driver(esp_driver)) { |
| 2407 | { | ||
| 2408 | printk(KERN_ERR "Couldn't register esp serial driver"); | 2394 | printk(KERN_ERR "Couldn't register esp serial driver"); |
| 2409 | put_tty_driver(esp_driver); | 2395 | put_tty_driver(esp_driver); |
| 2410 | return 1; | 2396 | return 1; |
| @@ -2412,8 +2398,7 @@ static int __init espserial_init(void) | |||
| 2412 | 2398 | ||
| 2413 | info = kzalloc(sizeof(struct esp_struct), GFP_KERNEL); | 2399 | info = kzalloc(sizeof(struct esp_struct), GFP_KERNEL); |
| 2414 | 2400 | ||
| 2415 | if (!info) | 2401 | if (!info) { |
| 2416 | { | ||
| 2417 | printk(KERN_ERR "Couldn't allocate memory for esp serial device information\n"); | 2402 | printk(KERN_ERR "Couldn't allocate memory for esp serial device information\n"); |
| 2418 | tty_unregister_driver(esp_driver); | 2403 | tty_unregister_driver(esp_driver); |
| 2419 | put_tty_driver(esp_driver); | 2404 | put_tty_driver(esp_driver); |
| @@ -2476,10 +2461,8 @@ static int __init espserial_init(void) | |||
| 2476 | info->stat_flags |= ESP_STAT_NEVER_DMA; | 2461 | info->stat_flags |= ESP_STAT_NEVER_DMA; |
| 2477 | 2462 | ||
| 2478 | info = kzalloc(sizeof(struct esp_struct), GFP_KERNEL); | 2463 | info = kzalloc(sizeof(struct esp_struct), GFP_KERNEL); |
| 2479 | if (!info) | 2464 | if (!info) { |
| 2480 | { | 2465 | printk(KERN_ERR "Couldn't allocate memory for esp serial device information\n"); |
| 2481 | printk(KERN_ERR "Couldn't allocate memory for esp serial device information\n"); | ||
| 2482 | |||
| 2483 | /* allow use of the already detected ports */ | 2466 | /* allow use of the already detected ports */ |
| 2484 | return 0; | 2467 | return 0; |
| 2485 | } | 2468 | } |
| @@ -2503,22 +2486,20 @@ static int __init espserial_init(void) | |||
| 2503 | return 0; | 2486 | return 0; |
| 2504 | } | 2487 | } |
| 2505 | 2488 | ||
| 2506 | static void __exit espserial_exit(void) | 2489 | static void __exit espserial_exit(void) |
| 2507 | { | 2490 | { |
| 2508 | int e1; | 2491 | int e1; |
| 2509 | struct esp_struct *temp_async; | 2492 | struct esp_struct *temp_async; |
| 2510 | struct esp_pio_buffer *pio_buf; | 2493 | struct esp_pio_buffer *pio_buf; |
| 2511 | 2494 | ||
| 2512 | /* printk("Unloading %s: version %s\n", serial_name, serial_version); */ | 2495 | e1 = tty_unregister_driver(esp_driver); |
| 2513 | if ((e1 = tty_unregister_driver(esp_driver))) | 2496 | if (e1) |
| 2514 | printk("SERIAL: failed to unregister serial driver (%d)\n", | 2497 | printk(KERN_ERR "esp: failed to unregister driver (%d)\n", e1); |
| 2515 | e1); | ||
| 2516 | put_tty_driver(esp_driver); | 2498 | put_tty_driver(esp_driver); |
| 2517 | 2499 | ||
| 2518 | while (ports) { | 2500 | while (ports) { |
| 2519 | if (ports->port) { | 2501 | if (ports->port) |
| 2520 | release_region(ports->port, REGION_SIZE); | 2502 | release_region(ports->port, REGION_SIZE); |
| 2521 | } | ||
| 2522 | temp_async = ports->next_port; | 2503 | temp_async = ports->next_port; |
| 2523 | kfree(ports); | 2504 | kfree(ports); |
| 2524 | ports = temp_async; | 2505 | ports = temp_async; |
diff --git a/drivers/char/generic_serial.c b/drivers/char/generic_serial.c index 7ed7da1d99cf..252f73e48596 100644 --- a/drivers/char/generic_serial.c +++ b/drivers/char/generic_serial.c | |||
| @@ -40,27 +40,27 @@ static int gs_debug; | |||
| 40 | #define gs_dprintk(f, str...) /* nothing */ | 40 | #define gs_dprintk(f, str...) /* nothing */ |
| 41 | #endif | 41 | #endif |
| 42 | 42 | ||
| 43 | #define func_enter() gs_dprintk (GS_DEBUG_FLOW, "gs: enter %s\n", __FUNCTION__) | 43 | #define func_enter() gs_dprintk (GS_DEBUG_FLOW, "gs: enter %s\n", __func__) |
| 44 | #define func_exit() gs_dprintk (GS_DEBUG_FLOW, "gs: exit %s\n", __FUNCTION__) | 44 | #define func_exit() gs_dprintk (GS_DEBUG_FLOW, "gs: exit %s\n", __func__) |
| 45 | 45 | ||
| 46 | #define RS_EVENT_WRITE_WAKEUP 1 | 46 | #define RS_EVENT_WRITE_WAKEUP 1 |
| 47 | 47 | ||
| 48 | module_param(gs_debug, int, 0644); | 48 | module_param(gs_debug, int, 0644); |
| 49 | 49 | ||
| 50 | 50 | ||
| 51 | void gs_put_char(struct tty_struct * tty, unsigned char ch) | 51 | int gs_put_char(struct tty_struct * tty, unsigned char ch) |
| 52 | { | 52 | { |
| 53 | struct gs_port *port; | 53 | struct gs_port *port; |
| 54 | 54 | ||
| 55 | func_enter (); | 55 | func_enter (); |
| 56 | 56 | ||
| 57 | if (!tty) return; | 57 | if (!tty) return 0; |
| 58 | 58 | ||
| 59 | port = tty->driver_data; | 59 | port = tty->driver_data; |
| 60 | 60 | ||
| 61 | if (!port) return; | 61 | if (!port) return 0; |
| 62 | 62 | ||
| 63 | if (! (port->flags & ASYNC_INITIALIZED)) return; | 63 | if (! (port->flags & ASYNC_INITIALIZED)) return 0; |
| 64 | 64 | ||
| 65 | /* Take a lock on the serial tranmit buffer! */ | 65 | /* Take a lock on the serial tranmit buffer! */ |
| 66 | mutex_lock(& port->port_write_mutex); | 66 | mutex_lock(& port->port_write_mutex); |
| @@ -68,7 +68,7 @@ void gs_put_char(struct tty_struct * tty, unsigned char ch) | |||
| 68 | if (port->xmit_cnt >= SERIAL_XMIT_SIZE - 1) { | 68 | if (port->xmit_cnt >= SERIAL_XMIT_SIZE - 1) { |
| 69 | /* Sorry, buffer is full, drop character. Update statistics???? -- REW */ | 69 | /* Sorry, buffer is full, drop character. Update statistics???? -- REW */ |
| 70 | mutex_unlock(&port->port_write_mutex); | 70 | mutex_unlock(&port->port_write_mutex); |
| 71 | return; | 71 | return 0; |
| 72 | } | 72 | } |
| 73 | 73 | ||
| 74 | port->xmit_buf[port->xmit_head++] = ch; | 74 | port->xmit_buf[port->xmit_head++] = ch; |
| @@ -77,6 +77,7 @@ void gs_put_char(struct tty_struct * tty, unsigned char ch) | |||
| 77 | 77 | ||
| 78 | mutex_unlock(&port->port_write_mutex); | 78 | mutex_unlock(&port->port_write_mutex); |
| 79 | func_exit (); | 79 | func_exit (); |
| 80 | return 1; | ||
| 80 | } | 81 | } |
| 81 | 82 | ||
| 82 | 83 | ||
| @@ -586,8 +587,7 @@ void gs_close(struct tty_struct * tty, struct file * filp) | |||
| 586 | 587 | ||
| 587 | port->flags &= ~GS_ACTIVE; | 588 | port->flags &= ~GS_ACTIVE; |
| 588 | 589 | ||
| 589 | if (tty->driver->flush_buffer) | 590 | gs_flush_buffer(tty); |
| 590 | tty->driver->flush_buffer(tty); | ||
| 591 | 591 | ||
| 592 | tty_ldisc_flush(tty); | 592 | tty_ldisc_flush(tty); |
| 593 | tty->closing = 0; | 593 | tty->closing = 0; |
diff --git a/drivers/char/hpet.c b/drivers/char/hpet.c index 1399971be689..e7fb0bca3667 100644 --- a/drivers/char/hpet.c +++ b/drivers/char/hpet.c | |||
| @@ -308,7 +308,7 @@ static int hpet_mmap(struct file *file, struct vm_area_struct *vma) | |||
| 308 | if (io_remap_pfn_range(vma, vma->vm_start, addr >> PAGE_SHIFT, | 308 | if (io_remap_pfn_range(vma, vma->vm_start, addr >> PAGE_SHIFT, |
| 309 | PAGE_SIZE, vma->vm_page_prot)) { | 309 | PAGE_SIZE, vma->vm_page_prot)) { |
| 310 | printk(KERN_ERR "%s: io_remap_pfn_range failed\n", | 310 | printk(KERN_ERR "%s: io_remap_pfn_range failed\n", |
| 311 | __FUNCTION__); | 311 | __func__); |
| 312 | return -EAGAIN; | 312 | return -EAGAIN; |
| 313 | } | 313 | } |
| 314 | 314 | ||
| @@ -748,7 +748,7 @@ int hpet_alloc(struct hpet_data *hdp) | |||
| 748 | */ | 748 | */ |
| 749 | if (hpet_is_known(hdp)) { | 749 | if (hpet_is_known(hdp)) { |
| 750 | printk(KERN_DEBUG "%s: duplicate HPET ignored\n", | 750 | printk(KERN_DEBUG "%s: duplicate HPET ignored\n", |
| 751 | __FUNCTION__); | 751 | __func__); |
| 752 | return 0; | 752 | return 0; |
| 753 | } | 753 | } |
| 754 | 754 | ||
| @@ -869,7 +869,7 @@ static acpi_status hpet_resources(struct acpi_resource *res, void *data) | |||
| 869 | 869 | ||
| 870 | if (hpet_is_known(hdp)) { | 870 | if (hpet_is_known(hdp)) { |
| 871 | printk(KERN_DEBUG "%s: 0x%lx is busy\n", | 871 | printk(KERN_DEBUG "%s: 0x%lx is busy\n", |
| 872 | __FUNCTION__, hdp->hd_phys_address); | 872 | __func__, hdp->hd_phys_address); |
| 873 | iounmap(hdp->hd_address); | 873 | iounmap(hdp->hd_address); |
| 874 | return AE_ALREADY_EXISTS; | 874 | return AE_ALREADY_EXISTS; |
| 875 | } | 875 | } |
| @@ -886,7 +886,7 @@ static acpi_status hpet_resources(struct acpi_resource *res, void *data) | |||
| 886 | 886 | ||
| 887 | if (hpet_is_known(hdp)) { | 887 | if (hpet_is_known(hdp)) { |
| 888 | printk(KERN_DEBUG "%s: 0x%lx is busy\n", | 888 | printk(KERN_DEBUG "%s: 0x%lx is busy\n", |
| 889 | __FUNCTION__, hdp->hd_phys_address); | 889 | __func__, hdp->hd_phys_address); |
| 890 | iounmap(hdp->hd_address); | 890 | iounmap(hdp->hd_address); |
| 891 | return AE_ALREADY_EXISTS; | 891 | return AE_ALREADY_EXISTS; |
| 892 | } | 892 | } |
| @@ -925,7 +925,7 @@ static int hpet_acpi_add(struct acpi_device *device) | |||
| 925 | return -ENODEV; | 925 | return -ENODEV; |
| 926 | 926 | ||
| 927 | if (!data.hd_address || !data.hd_nirqs) { | 927 | if (!data.hd_address || !data.hd_nirqs) { |
| 928 | printk("%s: no address or irqs in _CRS\n", __FUNCTION__); | 928 | printk("%s: no address or irqs in _CRS\n", __func__); |
| 929 | return -ENODEV; | 929 | return -ENODEV; |
| 930 | } | 930 | } |
| 931 | 931 | ||
diff --git a/drivers/char/hvsi.c b/drivers/char/hvsi.c index d5a752da322f..59c6f9ab94e4 100644 --- a/drivers/char/hvsi.c +++ b/drivers/char/hvsi.c | |||
| @@ -246,7 +246,7 @@ static void compact_inbuf(struct hvsi_struct *hp, uint8_t *read_to) | |||
| 246 | { | 246 | { |
| 247 | int remaining = (int)(hp->inbuf_end - read_to); | 247 | int remaining = (int)(hp->inbuf_end - read_to); |
| 248 | 248 | ||
| 249 | pr_debug("%s: %i chars remain\n", __FUNCTION__, remaining); | 249 | pr_debug("%s: %i chars remain\n", __func__, remaining); |
| 250 | 250 | ||
| 251 | if (read_to != hp->inbuf) | 251 | if (read_to != hp->inbuf) |
| 252 | memmove(hp->inbuf, read_to, remaining); | 252 | memmove(hp->inbuf, read_to, remaining); |
| @@ -365,7 +365,7 @@ static int hvsi_version_respond(struct hvsi_struct *hp, uint16_t query_seqno) | |||
| 365 | packet.u.version = HVSI_VERSION; | 365 | packet.u.version = HVSI_VERSION; |
| 366 | packet.query_seqno = query_seqno+1; | 366 | packet.query_seqno = query_seqno+1; |
| 367 | 367 | ||
| 368 | pr_debug("%s: sending %i bytes\n", __FUNCTION__, packet.len); | 368 | pr_debug("%s: sending %i bytes\n", __func__, packet.len); |
| 369 | dbg_dump_hex((uint8_t*)&packet, packet.len); | 369 | dbg_dump_hex((uint8_t*)&packet, packet.len); |
| 370 | 370 | ||
| 371 | wrote = hvc_put_chars(hp->vtermno, (char *)&packet, packet.len); | 371 | wrote = hvc_put_chars(hp->vtermno, (char *)&packet, packet.len); |
| @@ -437,7 +437,7 @@ static struct tty_struct *hvsi_recv_data(struct hvsi_struct *hp, | |||
| 437 | return NULL; | 437 | return NULL; |
| 438 | 438 | ||
| 439 | if (overflow > 0) { | 439 | if (overflow > 0) { |
| 440 | pr_debug("%s: got >TTY_THRESHOLD_THROTTLE bytes\n", __FUNCTION__); | 440 | pr_debug("%s: got >TTY_THRESHOLD_THROTTLE bytes\n", __func__); |
| 441 | datalen = TTY_THRESHOLD_THROTTLE; | 441 | datalen = TTY_THRESHOLD_THROTTLE; |
| 442 | } | 442 | } |
| 443 | 443 | ||
| @@ -448,7 +448,7 @@ static struct tty_struct *hvsi_recv_data(struct hvsi_struct *hp, | |||
| 448 | * we still have more data to deliver, so we need to save off the | 448 | * we still have more data to deliver, so we need to save off the |
| 449 | * overflow and send it later | 449 | * overflow and send it later |
| 450 | */ | 450 | */ |
| 451 | pr_debug("%s: deferring overflow\n", __FUNCTION__); | 451 | pr_debug("%s: deferring overflow\n", __func__); |
| 452 | memcpy(hp->throttle_buf, data + TTY_THRESHOLD_THROTTLE, overflow); | 452 | memcpy(hp->throttle_buf, data + TTY_THRESHOLD_THROTTLE, overflow); |
| 453 | hp->n_throttle = overflow; | 453 | hp->n_throttle = overflow; |
| 454 | } | 454 | } |
| @@ -474,11 +474,11 @@ static int hvsi_load_chunk(struct hvsi_struct *hp, struct tty_struct **flip, | |||
| 474 | 474 | ||
| 475 | chunklen = hvsi_read(hp, hp->inbuf_end, HVSI_MAX_READ); | 475 | chunklen = hvsi_read(hp, hp->inbuf_end, HVSI_MAX_READ); |
| 476 | if (chunklen == 0) { | 476 | if (chunklen == 0) { |
| 477 | pr_debug("%s: 0-length read\n", __FUNCTION__); | 477 | pr_debug("%s: 0-length read\n", __func__); |
| 478 | return 0; | 478 | return 0; |
| 479 | } | 479 | } |
| 480 | 480 | ||
| 481 | pr_debug("%s: got %i bytes\n", __FUNCTION__, chunklen); | 481 | pr_debug("%s: got %i bytes\n", __func__, chunklen); |
| 482 | dbg_dump_hex(hp->inbuf_end, chunklen); | 482 | dbg_dump_hex(hp->inbuf_end, chunklen); |
| 483 | 483 | ||
| 484 | hp->inbuf_end += chunklen; | 484 | hp->inbuf_end += chunklen; |
| @@ -495,7 +495,7 @@ static int hvsi_load_chunk(struct hvsi_struct *hp, struct tty_struct **flip, | |||
| 495 | continue; | 495 | continue; |
| 496 | } | 496 | } |
| 497 | 497 | ||
| 498 | pr_debug("%s: handling %i-byte packet\n", __FUNCTION__, | 498 | pr_debug("%s: handling %i-byte packet\n", __func__, |
| 499 | len_packet(packet)); | 499 | len_packet(packet)); |
| 500 | dbg_dump_packet(packet); | 500 | dbg_dump_packet(packet); |
| 501 | 501 | ||
| @@ -526,7 +526,7 @@ static int hvsi_load_chunk(struct hvsi_struct *hp, struct tty_struct **flip, | |||
| 526 | packet += len_packet(packet); | 526 | packet += len_packet(packet); |
| 527 | 527 | ||
| 528 | if (*hangup || *handshake) { | 528 | if (*hangup || *handshake) { |
| 529 | pr_debug("%s: hangup or handshake\n", __FUNCTION__); | 529 | pr_debug("%s: hangup or handshake\n", __func__); |
| 530 | /* | 530 | /* |
| 531 | * we need to send the hangup now before receiving any more data. | 531 | * we need to send the hangup now before receiving any more data. |
| 532 | * If we get "data, hangup, data", we can't deliver the second | 532 | * If we get "data, hangup, data", we can't deliver the second |
| @@ -543,7 +543,7 @@ static int hvsi_load_chunk(struct hvsi_struct *hp, struct tty_struct **flip, | |||
| 543 | 543 | ||
| 544 | static void hvsi_send_overflow(struct hvsi_struct *hp) | 544 | static void hvsi_send_overflow(struct hvsi_struct *hp) |
| 545 | { | 545 | { |
| 546 | pr_debug("%s: delivering %i bytes overflow\n", __FUNCTION__, | 546 | pr_debug("%s: delivering %i bytes overflow\n", __func__, |
| 547 | hp->n_throttle); | 547 | hp->n_throttle); |
| 548 | 548 | ||
| 549 | hvsi_insert_chars(hp, hp->throttle_buf, hp->n_throttle); | 549 | hvsi_insert_chars(hp, hp->throttle_buf, hp->n_throttle); |
| @@ -563,7 +563,7 @@ static irqreturn_t hvsi_interrupt(int irq, void *arg) | |||
| 563 | unsigned long flags; | 563 | unsigned long flags; |
| 564 | int again = 1; | 564 | int again = 1; |
| 565 | 565 | ||
| 566 | pr_debug("%s\n", __FUNCTION__); | 566 | pr_debug("%s\n", __func__); |
| 567 | 567 | ||
| 568 | while (again) { | 568 | while (again) { |
| 569 | spin_lock_irqsave(&hp->lock, flags); | 569 | spin_lock_irqsave(&hp->lock, flags); |
| @@ -647,7 +647,7 @@ static int hvsi_query(struct hvsi_struct *hp, uint16_t verb) | |||
| 647 | packet.seqno = atomic_inc_return(&hp->seqno); | 647 | packet.seqno = atomic_inc_return(&hp->seqno); |
| 648 | packet.verb = verb; | 648 | packet.verb = verb; |
| 649 | 649 | ||
| 650 | pr_debug("%s: sending %i bytes\n", __FUNCTION__, packet.len); | 650 | pr_debug("%s: sending %i bytes\n", __func__, packet.len); |
| 651 | dbg_dump_hex((uint8_t*)&packet, packet.len); | 651 | dbg_dump_hex((uint8_t*)&packet, packet.len); |
| 652 | 652 | ||
| 653 | wrote = hvc_put_chars(hp->vtermno, (char *)&packet, packet.len); | 653 | wrote = hvc_put_chars(hp->vtermno, (char *)&packet, packet.len); |
| @@ -674,7 +674,7 @@ static int hvsi_get_mctrl(struct hvsi_struct *hp) | |||
| 674 | return ret; | 674 | return ret; |
| 675 | } | 675 | } |
| 676 | 676 | ||
| 677 | pr_debug("%s: mctrl 0x%x\n", __FUNCTION__, hp->mctrl); | 677 | pr_debug("%s: mctrl 0x%x\n", __func__, hp->mctrl); |
| 678 | 678 | ||
| 679 | return 0; | 679 | return 0; |
| 680 | } | 680 | } |
| @@ -694,7 +694,7 @@ static int hvsi_set_mctrl(struct hvsi_struct *hp, uint16_t mctrl) | |||
| 694 | if (mctrl & TIOCM_DTR) | 694 | if (mctrl & TIOCM_DTR) |
| 695 | packet.word = HVSI_TSDTR; | 695 | packet.word = HVSI_TSDTR; |
| 696 | 696 | ||
| 697 | pr_debug("%s: sending %i bytes\n", __FUNCTION__, packet.len); | 697 | pr_debug("%s: sending %i bytes\n", __func__, packet.len); |
| 698 | dbg_dump_hex((uint8_t*)&packet, packet.len); | 698 | dbg_dump_hex((uint8_t*)&packet, packet.len); |
| 699 | 699 | ||
| 700 | wrote = hvc_put_chars(hp->vtermno, (char *)&packet, packet.len); | 700 | wrote = hvc_put_chars(hp->vtermno, (char *)&packet, packet.len); |
| @@ -790,7 +790,7 @@ static void hvsi_close_protocol(struct hvsi_struct *hp) | |||
| 790 | packet.len = 6; | 790 | packet.len = 6; |
| 791 | packet.verb = VSV_CLOSE_PROTOCOL; | 791 | packet.verb = VSV_CLOSE_PROTOCOL; |
| 792 | 792 | ||
| 793 | pr_debug("%s: sending %i bytes\n", __FUNCTION__, packet.len); | 793 | pr_debug("%s: sending %i bytes\n", __func__, packet.len); |
| 794 | dbg_dump_hex((uint8_t*)&packet, packet.len); | 794 | dbg_dump_hex((uint8_t*)&packet, packet.len); |
| 795 | 795 | ||
| 796 | hvc_put_chars(hp->vtermno, (char *)&packet, packet.len); | 796 | hvc_put_chars(hp->vtermno, (char *)&packet, packet.len); |
| @@ -803,7 +803,7 @@ static int hvsi_open(struct tty_struct *tty, struct file *filp) | |||
| 803 | int line = tty->index; | 803 | int line = tty->index; |
| 804 | int ret; | 804 | int ret; |
| 805 | 805 | ||
| 806 | pr_debug("%s\n", __FUNCTION__); | 806 | pr_debug("%s\n", __func__); |
| 807 | 807 | ||
| 808 | if (line < 0 || line >= hvsi_count) | 808 | if (line < 0 || line >= hvsi_count) |
| 809 | return -ENODEV; | 809 | return -ENODEV; |
| @@ -868,7 +868,7 @@ static void hvsi_close(struct tty_struct *tty, struct file *filp) | |||
| 868 | struct hvsi_struct *hp = tty->driver_data; | 868 | struct hvsi_struct *hp = tty->driver_data; |
| 869 | unsigned long flags; | 869 | unsigned long flags; |
| 870 | 870 | ||
| 871 | pr_debug("%s\n", __FUNCTION__); | 871 | pr_debug("%s\n", __func__); |
| 872 | 872 | ||
| 873 | if (tty_hung_up_p(filp)) | 873 | if (tty_hung_up_p(filp)) |
| 874 | return; | 874 | return; |
| @@ -920,7 +920,7 @@ static void hvsi_hangup(struct tty_struct *tty) | |||
| 920 | struct hvsi_struct *hp = tty->driver_data; | 920 | struct hvsi_struct *hp = tty->driver_data; |
| 921 | unsigned long flags; | 921 | unsigned long flags; |
| 922 | 922 | ||
| 923 | pr_debug("%s\n", __FUNCTION__); | 923 | pr_debug("%s\n", __func__); |
| 924 | 924 | ||
| 925 | spin_lock_irqsave(&hp->lock, flags); | 925 | spin_lock_irqsave(&hp->lock, flags); |
| 926 | 926 | ||
| @@ -942,7 +942,7 @@ static void hvsi_push(struct hvsi_struct *hp) | |||
| 942 | n = hvsi_put_chars(hp, hp->outbuf, hp->n_outbuf); | 942 | n = hvsi_put_chars(hp, hp->outbuf, hp->n_outbuf); |
| 943 | if (n > 0) { | 943 | if (n > 0) { |
| 944 | /* success */ | 944 | /* success */ |
| 945 | pr_debug("%s: wrote %i chars\n", __FUNCTION__, n); | 945 | pr_debug("%s: wrote %i chars\n", __func__, n); |
| 946 | hp->n_outbuf = 0; | 946 | hp->n_outbuf = 0; |
| 947 | } else if (n == -EIO) { | 947 | } else if (n == -EIO) { |
| 948 | __set_state(hp, HVSI_FSP_DIED); | 948 | __set_state(hp, HVSI_FSP_DIED); |
| @@ -965,7 +965,7 @@ static void hvsi_write_worker(struct work_struct *work) | |||
| 965 | 965 | ||
| 966 | spin_lock_irqsave(&hp->lock, flags); | 966 | spin_lock_irqsave(&hp->lock, flags); |
| 967 | 967 | ||
| 968 | pr_debug("%s: %i chars in buffer\n", __FUNCTION__, hp->n_outbuf); | 968 | pr_debug("%s: %i chars in buffer\n", __func__, hp->n_outbuf); |
| 969 | 969 | ||
| 970 | if (!is_open(hp)) { | 970 | if (!is_open(hp)) { |
| 971 | /* | 971 | /* |
| @@ -983,7 +983,7 @@ static void hvsi_write_worker(struct work_struct *work) | |||
| 983 | schedule_delayed_work(&hp->writer, 10); | 983 | schedule_delayed_work(&hp->writer, 10); |
| 984 | else { | 984 | else { |
| 985 | #ifdef DEBUG | 985 | #ifdef DEBUG |
| 986 | pr_debug("%s: outbuf emptied after %li jiffies\n", __FUNCTION__, | 986 | pr_debug("%s: outbuf emptied after %li jiffies\n", __func__, |
| 987 | jiffies - start_j); | 987 | jiffies - start_j); |
| 988 | start_j = 0; | 988 | start_j = 0; |
| 989 | #endif /* DEBUG */ | 989 | #endif /* DEBUG */ |
| @@ -1020,11 +1020,11 @@ static int hvsi_write(struct tty_struct *tty, | |||
| 1020 | 1020 | ||
| 1021 | spin_lock_irqsave(&hp->lock, flags); | 1021 | spin_lock_irqsave(&hp->lock, flags); |
| 1022 | 1022 | ||
| 1023 | pr_debug("%s: %i chars in buffer\n", __FUNCTION__, hp->n_outbuf); | 1023 | pr_debug("%s: %i chars in buffer\n", __func__, hp->n_outbuf); |
| 1024 | 1024 | ||
| 1025 | if (!is_open(hp)) { | 1025 | if (!is_open(hp)) { |
| 1026 | /* we're either closing or not yet open; don't accept data */ | 1026 | /* we're either closing or not yet open; don't accept data */ |
| 1027 | pr_debug("%s: not open\n", __FUNCTION__); | 1027 | pr_debug("%s: not open\n", __func__); |
| 1028 | goto out; | 1028 | goto out; |
| 1029 | } | 1029 | } |
| 1030 | 1030 | ||
| @@ -1058,7 +1058,7 @@ out: | |||
| 1058 | spin_unlock_irqrestore(&hp->lock, flags); | 1058 | spin_unlock_irqrestore(&hp->lock, flags); |
| 1059 | 1059 | ||
| 1060 | if (total != origcount) | 1060 | if (total != origcount) |
| 1061 | pr_debug("%s: wanted %i, only wrote %i\n", __FUNCTION__, origcount, | 1061 | pr_debug("%s: wanted %i, only wrote %i\n", __func__, origcount, |
| 1062 | total); | 1062 | total); |
| 1063 | 1063 | ||
| 1064 | return total; | 1064 | return total; |
| @@ -1072,7 +1072,7 @@ static void hvsi_throttle(struct tty_struct *tty) | |||
| 1072 | { | 1072 | { |
| 1073 | struct hvsi_struct *hp = (struct hvsi_struct *)tty->driver_data; | 1073 | struct hvsi_struct *hp = (struct hvsi_struct *)tty->driver_data; |
| 1074 | 1074 | ||
| 1075 | pr_debug("%s\n", __FUNCTION__); | 1075 | pr_debug("%s\n", __func__); |
| 1076 | 1076 | ||
| 1077 | h_vio_signal(hp->vtermno, VIO_IRQ_DISABLE); | 1077 | h_vio_signal(hp->vtermno, VIO_IRQ_DISABLE); |
| 1078 | } | 1078 | } |
| @@ -1083,7 +1083,7 @@ static void hvsi_unthrottle(struct tty_struct *tty) | |||
| 1083 | unsigned long flags; | 1083 | unsigned long flags; |
| 1084 | int shouldflip = 0; | 1084 | int shouldflip = 0; |
| 1085 | 1085 | ||
| 1086 | pr_debug("%s\n", __FUNCTION__); | 1086 | pr_debug("%s\n", __func__); |
| 1087 | 1087 | ||
| 1088 | spin_lock_irqsave(&hp->lock, flags); | 1088 | spin_lock_irqsave(&hp->lock, flags); |
| 1089 | if (hp->n_throttle) { | 1089 | if (hp->n_throttle) { |
| @@ -1302,7 +1302,7 @@ static int __init hvsi_console_init(void) | |||
| 1302 | hp->virq = irq_create_mapping(NULL, irq[0]); | 1302 | hp->virq = irq_create_mapping(NULL, irq[0]); |
| 1303 | if (hp->virq == NO_IRQ) { | 1303 | if (hp->virq == NO_IRQ) { |
| 1304 | printk(KERN_ERR "%s: couldn't create irq mapping for 0x%x\n", | 1304 | printk(KERN_ERR "%s: couldn't create irq mapping for 0x%x\n", |
| 1305 | __FUNCTION__, irq[0]); | 1305 | __func__, irq[0]); |
| 1306 | continue; | 1306 | continue; |
| 1307 | } | 1307 | } |
| 1308 | 1308 | ||
diff --git a/drivers/char/ip2/i2ellis.c b/drivers/char/ip2/i2ellis.c index 61ef013b8445..3601017f58cf 100644 --- a/drivers/char/ip2/i2ellis.c +++ b/drivers/char/ip2/i2ellis.c | |||
| @@ -53,7 +53,7 @@ static int ii2Safe; // Safe I/O address for delay routine | |||
| 53 | 53 | ||
| 54 | static int iiDelayed; // Set when the iiResetDelay function is | 54 | static int iiDelayed; // Set when the iiResetDelay function is |
| 55 | // called. Cleared when ANY board is reset. | 55 | // called. Cleared when ANY board is reset. |
| 56 | static rwlock_t Dl_spinlock; | 56 | static DEFINE_RWLOCK(Dl_spinlock); |
| 57 | 57 | ||
| 58 | //******** | 58 | //******** |
| 59 | //* Code * | 59 | //* Code * |
| @@ -82,7 +82,6 @@ static rwlock_t Dl_spinlock; | |||
| 82 | static void | 82 | static void |
| 83 | iiEllisInit(void) | 83 | iiEllisInit(void) |
| 84 | { | 84 | { |
| 85 | LOCK_INIT(&Dl_spinlock); | ||
| 86 | } | 85 | } |
| 87 | 86 | ||
| 88 | //****************************************************************************** | 87 | //****************************************************************************** |
| @@ -132,7 +131,7 @@ iiSetAddress( i2eBordStrPtr pB, int address, delayFunc_t delay ) | |||
| 132 | || (address & 0x7) | 131 | || (address & 0x7) |
| 133 | ) | 132 | ) |
| 134 | { | 133 | { |
| 135 | COMPLETE(pB,I2EE_BADADDR); | 134 | I2_COMPLETE(pB, I2EE_BADADDR); |
| 136 | } | 135 | } |
| 137 | 136 | ||
| 138 | // Initialize accelerators | 137 | // Initialize accelerators |
| @@ -152,7 +151,7 @@ iiSetAddress( i2eBordStrPtr pB, int address, delayFunc_t delay ) | |||
| 152 | pB->i2eValid = I2E_MAGIC; | 151 | pB->i2eValid = I2E_MAGIC; |
| 153 | pB->i2eState = II_STATE_COLD; | 152 | pB->i2eState = II_STATE_COLD; |
| 154 | 153 | ||
| 155 | COMPLETE(pB, I2EE_GOOD); | 154 | I2_COMPLETE(pB, I2EE_GOOD); |
| 156 | } | 155 | } |
| 157 | 156 | ||
| 158 | //****************************************************************************** | 157 | //****************************************************************************** |
| @@ -177,12 +176,12 @@ iiReset(i2eBordStrPtr pB) | |||
| 177 | // Magic number should be set, else even the address is suspect | 176 | // Magic number should be set, else even the address is suspect |
| 178 | if (pB->i2eValid != I2E_MAGIC) | 177 | if (pB->i2eValid != I2E_MAGIC) |
| 179 | { | 178 | { |
| 180 | COMPLETE(pB, I2EE_BADMAGIC); | 179 | I2_COMPLETE(pB, I2EE_BADMAGIC); |
| 181 | } | 180 | } |
| 182 | 181 | ||
| 183 | OUTB(pB->i2eBase + FIFO_RESET, 0); // Any data will do | 182 | outb(0, pB->i2eBase + FIFO_RESET); /* Any data will do */ |
| 184 | iiDelay(pB, 50); // Pause between resets | 183 | iiDelay(pB, 50); // Pause between resets |
| 185 | OUTB(pB->i2eBase + FIFO_RESET, 0); // Second reset | 184 | outb(0, pB->i2eBase + FIFO_RESET); /* Second reset */ |
| 186 | 185 | ||
| 187 | // We must wait before even attempting to read anything from the FIFO: the | 186 | // We must wait before even attempting to read anything from the FIFO: the |
| 188 | // board's P.O.S.T may actually attempt to read and write its end of the | 187 | // board's P.O.S.T may actually attempt to read and write its end of the |
| @@ -203,7 +202,7 @@ iiReset(i2eBordStrPtr pB) | |||
| 203 | // Ensure anything which would have been of use to standard loadware is | 202 | // Ensure anything which would have been of use to standard loadware is |
| 204 | // blanked out, since board has now forgotten everything!. | 203 | // blanked out, since board has now forgotten everything!. |
| 205 | 204 | ||
| 206 | pB->i2eUsingIrq = IRQ_UNDEFINED; // Not set up to use an interrupt yet | 205 | pB->i2eUsingIrq = I2_IRQ_UNDEFINED; /* to not use an interrupt so far */ |
| 207 | pB->i2eWaitingForEmptyFifo = 0; | 206 | pB->i2eWaitingForEmptyFifo = 0; |
| 208 | pB->i2eOutMailWaiting = 0; | 207 | pB->i2eOutMailWaiting = 0; |
| 209 | pB->i2eChannelPtr = NULL; | 208 | pB->i2eChannelPtr = NULL; |
| @@ -215,7 +214,7 @@ iiReset(i2eBordStrPtr pB) | |||
| 215 | pB->i2eFatalTrap = NULL; | 214 | pB->i2eFatalTrap = NULL; |
| 216 | pB->i2eFatal = 0; | 215 | pB->i2eFatal = 0; |
| 217 | 216 | ||
| 218 | COMPLETE(pB, I2EE_GOOD); | 217 | I2_COMPLETE(pB, I2EE_GOOD); |
| 219 | } | 218 | } |
| 220 | 219 | ||
| 221 | //****************************************************************************** | 220 | //****************************************************************************** |
| @@ -235,14 +234,14 @@ static int | |||
| 235 | iiResetDelay(i2eBordStrPtr pB) | 234 | iiResetDelay(i2eBordStrPtr pB) |
| 236 | { | 235 | { |
| 237 | if (pB->i2eValid != I2E_MAGIC) { | 236 | if (pB->i2eValid != I2E_MAGIC) { |
| 238 | COMPLETE(pB, I2EE_BADMAGIC); | 237 | I2_COMPLETE(pB, I2EE_BADMAGIC); |
| 239 | } | 238 | } |
| 240 | if (pB->i2eState != II_STATE_RESET) { | 239 | if (pB->i2eState != II_STATE_RESET) { |
| 241 | COMPLETE(pB, I2EE_BADSTATE); | 240 | I2_COMPLETE(pB, I2EE_BADSTATE); |
| 242 | } | 241 | } |
| 243 | iiDelay(pB,2000); /* Now we wait for two seconds. */ | 242 | iiDelay(pB,2000); /* Now we wait for two seconds. */ |
| 244 | iiDelayed = 1; /* Delay has been called: ok to initialize */ | 243 | iiDelayed = 1; /* Delay has been called: ok to initialize */ |
| 245 | COMPLETE(pB, I2EE_GOOD); | 244 | I2_COMPLETE(pB, I2EE_GOOD); |
| 246 | } | 245 | } |
| 247 | 246 | ||
| 248 | //****************************************************************************** | 247 | //****************************************************************************** |
| @@ -273,12 +272,12 @@ iiInitialize(i2eBordStrPtr pB) | |||
| 273 | 272 | ||
| 274 | if (pB->i2eValid != I2E_MAGIC) | 273 | if (pB->i2eValid != I2E_MAGIC) |
| 275 | { | 274 | { |
| 276 | COMPLETE(pB, I2EE_BADMAGIC); | 275 | I2_COMPLETE(pB, I2EE_BADMAGIC); |
| 277 | } | 276 | } |
| 278 | 277 | ||
| 279 | if (pB->i2eState != II_STATE_RESET || !iiDelayed) | 278 | if (pB->i2eState != II_STATE_RESET || !iiDelayed) |
| 280 | { | 279 | { |
| 281 | COMPLETE(pB, I2EE_BADSTATE); | 280 | I2_COMPLETE(pB, I2EE_BADSTATE); |
| 282 | } | 281 | } |
| 283 | 282 | ||
| 284 | // In case there is a failure short of our completely reading the power-up | 283 | // In case there is a failure short of our completely reading the power-up |
| @@ -291,13 +290,12 @@ iiInitialize(i2eBordStrPtr pB) | |||
| 291 | for (itemp = 0; itemp < sizeof(porStr); itemp++) | 290 | for (itemp = 0; itemp < sizeof(porStr); itemp++) |
| 292 | { | 291 | { |
| 293 | // We expect the entire message is ready. | 292 | // We expect the entire message is ready. |
| 294 | if (HAS_NO_INPUT(pB)) | 293 | if (!I2_HAS_INPUT(pB)) { |
| 295 | { | ||
| 296 | pB->i2ePomSize = itemp; | 294 | pB->i2ePomSize = itemp; |
| 297 | COMPLETE(pB, I2EE_PORM_SHORT); | 295 | I2_COMPLETE(pB, I2EE_PORM_SHORT); |
| 298 | } | 296 | } |
| 299 | 297 | ||
| 300 | pB->i2ePom.c[itemp] = c = BYTE_FROM(pB); | 298 | pB->i2ePom.c[itemp] = c = inb(pB->i2eData); |
| 301 | 299 | ||
| 302 | // We check the magic numbers as soon as they are supposed to be read | 300 | // We check the magic numbers as soon as they are supposed to be read |
| 303 | // (rather than after) to minimize effect of reading something we | 301 | // (rather than after) to minimize effect of reading something we |
| @@ -306,22 +304,22 @@ iiInitialize(i2eBordStrPtr pB) | |||
| 306 | (itemp == POR_2_INDEX && c != POR_MAGIC_2)) | 304 | (itemp == POR_2_INDEX && c != POR_MAGIC_2)) |
| 307 | { | 305 | { |
| 308 | pB->i2ePomSize = itemp+1; | 306 | pB->i2ePomSize = itemp+1; |
| 309 | COMPLETE(pB, I2EE_BADMAGIC); | 307 | I2_COMPLETE(pB, I2EE_BADMAGIC); |
| 310 | } | 308 | } |
| 311 | } | 309 | } |
| 312 | 310 | ||
| 313 | pB->i2ePomSize = itemp; | 311 | pB->i2ePomSize = itemp; |
| 314 | 312 | ||
| 315 | // Ensure that this was all the data... | 313 | // Ensure that this was all the data... |
| 316 | if (HAS_INPUT(pB)) | 314 | if (I2_HAS_INPUT(pB)) |
| 317 | COMPLETE(pB, I2EE_PORM_LONG); | 315 | I2_COMPLETE(pB, I2EE_PORM_LONG); |
| 318 | 316 | ||
| 319 | // For now, we'll fail to initialize if P.O.S.T reports bad chip mapper: | 317 | // For now, we'll fail to initialize if P.O.S.T reports bad chip mapper: |
| 320 | // Implying we will not be able to download any code either: That's ok: the | 318 | // Implying we will not be able to download any code either: That's ok: the |
| 321 | // condition is pretty explicit. | 319 | // condition is pretty explicit. |
| 322 | if (pB->i2ePom.e.porDiag1 & POR_BAD_MAPPER) | 320 | if (pB->i2ePom.e.porDiag1 & POR_BAD_MAPPER) |
| 323 | { | 321 | { |
| 324 | COMPLETE(pB, I2EE_POSTERR); | 322 | I2_COMPLETE(pB, I2EE_POSTERR); |
| 325 | } | 323 | } |
| 326 | 324 | ||
| 327 | // Determine anything which must be done differently depending on the family | 325 | // Determine anything which must be done differently depending on the family |
| @@ -332,7 +330,7 @@ iiInitialize(i2eBordStrPtr pB) | |||
| 332 | 330 | ||
| 333 | pB->i2eFifoStyle = FIFO_II; | 331 | pB->i2eFifoStyle = FIFO_II; |
| 334 | pB->i2eFifoSize = 512; // 512 bytes, always | 332 | pB->i2eFifoSize = 512; // 512 bytes, always |
| 335 | pB->i2eDataWidth16 = NO; | 333 | pB->i2eDataWidth16 = false; |
| 336 | 334 | ||
| 337 | pB->i2eMaxIrq = 15; // Because board cannot tell us it is in an 8-bit | 335 | pB->i2eMaxIrq = 15; // Because board cannot tell us it is in an 8-bit |
| 338 | // slot, we do allow it to be done (documentation!) | 336 | // slot, we do allow it to be done (documentation!) |
| @@ -354,7 +352,7 @@ iiInitialize(i2eBordStrPtr pB) | |||
| 354 | // should always be consistent for IntelliPort-II. Ditto below... | 352 | // should always be consistent for IntelliPort-II. Ditto below... |
| 355 | if (pB->i2ePom.e.porPorts1 != 4) | 353 | if (pB->i2ePom.e.porPorts1 != 4) |
| 356 | { | 354 | { |
| 357 | COMPLETE(pB, I2EE_INCONSIST); | 355 | I2_COMPLETE(pB, I2EE_INCONSIST); |
| 358 | } | 356 | } |
| 359 | break; | 357 | break; |
| 360 | 358 | ||
| @@ -364,7 +362,7 @@ iiInitialize(i2eBordStrPtr pB) | |||
| 364 | pB->i2eChannelMap[0] = 0xff; // Eight port | 362 | pB->i2eChannelMap[0] = 0xff; // Eight port |
| 365 | if (pB->i2ePom.e.porPorts1 != 8) | 363 | if (pB->i2ePom.e.porPorts1 != 8) |
| 366 | { | 364 | { |
| 367 | COMPLETE(pB, I2EE_INCONSIST); | 365 | I2_COMPLETE(pB, I2EE_INCONSIST); |
| 368 | } | 366 | } |
| 369 | break; | 367 | break; |
| 370 | 368 | ||
| @@ -373,7 +371,7 @@ iiInitialize(i2eBordStrPtr pB) | |||
| 373 | pB->i2eChannelMap[0] = 0x3f; // Six Port | 371 | pB->i2eChannelMap[0] = 0x3f; // Six Port |
| 374 | if (pB->i2ePom.e.porPorts1 != 6) | 372 | if (pB->i2ePom.e.porPorts1 != 6) |
| 375 | { | 373 | { |
| 376 | COMPLETE(pB, I2EE_INCONSIST); | 374 | I2_COMPLETE(pB, I2EE_INCONSIST); |
| 377 | } | 375 | } |
| 378 | break; | 376 | break; |
| 379 | } | 377 | } |
| @@ -402,7 +400,7 @@ iiInitialize(i2eBordStrPtr pB) | |||
| 402 | 400 | ||
| 403 | if (itemp < 8 || itemp > 15) | 401 | if (itemp < 8 || itemp > 15) |
| 404 | { | 402 | { |
| 405 | COMPLETE(pB, I2EE_INCONSIST); | 403 | I2_COMPLETE(pB, I2EE_INCONSIST); |
| 406 | } | 404 | } |
| 407 | pB->i2eFifoSize = (1 << itemp); | 405 | pB->i2eFifoSize = (1 << itemp); |
| 408 | 406 | ||
| @@ -450,26 +448,26 @@ iiInitialize(i2eBordStrPtr pB) | |||
| 450 | switch (pB->i2ePom.e.porBus & (POR_BUS_SLOT16 | POR_BUS_DIP16) ) | 448 | switch (pB->i2ePom.e.porBus & (POR_BUS_SLOT16 | POR_BUS_DIP16) ) |
| 451 | { | 449 | { |
| 452 | case POR_BUS_SLOT16 | POR_BUS_DIP16: | 450 | case POR_BUS_SLOT16 | POR_BUS_DIP16: |
| 453 | pB->i2eDataWidth16 = YES; | 451 | pB->i2eDataWidth16 = true; |
| 454 | pB->i2eMaxIrq = 15; | 452 | pB->i2eMaxIrq = 15; |
| 455 | break; | 453 | break; |
| 456 | 454 | ||
| 457 | case POR_BUS_SLOT16: | 455 | case POR_BUS_SLOT16: |
| 458 | pB->i2eDataWidth16 = NO; | 456 | pB->i2eDataWidth16 = false; |
| 459 | pB->i2eMaxIrq = 15; | 457 | pB->i2eMaxIrq = 15; |
| 460 | break; | 458 | break; |
| 461 | 459 | ||
| 462 | case 0: | 460 | case 0: |
| 463 | case POR_BUS_DIP16: // In an 8-bit slot, DIP switch don't care. | 461 | case POR_BUS_DIP16: // In an 8-bit slot, DIP switch don't care. |
| 464 | default: | 462 | default: |
| 465 | pB->i2eDataWidth16 = NO; | 463 | pB->i2eDataWidth16 = false; |
| 466 | pB->i2eMaxIrq = 7; | 464 | pB->i2eMaxIrq = 7; |
| 467 | break; | 465 | break; |
| 468 | } | 466 | } |
| 469 | break; // POR_ID_FIIEX case | 467 | break; // POR_ID_FIIEX case |
| 470 | 468 | ||
| 471 | default: // Unknown type of board | 469 | default: // Unknown type of board |
| 472 | COMPLETE(pB, I2EE_BAD_FAMILY); | 470 | I2_COMPLETE(pB, I2EE_BAD_FAMILY); |
| 473 | break; | 471 | break; |
| 474 | } // End the switch based on family | 472 | } // End the switch based on family |
| 475 | 473 | ||
| @@ -483,17 +481,14 @@ iiInitialize(i2eBordStrPtr pB) | |||
| 483 | { | 481 | { |
| 484 | case POR_BUS_T_ISA: | 482 | case POR_BUS_T_ISA: |
| 485 | case POR_BUS_T_UNK: // If the type of bus is undeclared, assume ok. | 483 | case POR_BUS_T_UNK: // If the type of bus is undeclared, assume ok. |
| 486 | pB->i2eChangeIrq = YES; | ||
| 487 | break; | ||
| 488 | case POR_BUS_T_MCA: | 484 | case POR_BUS_T_MCA: |
| 489 | case POR_BUS_T_EISA: | 485 | case POR_BUS_T_EISA: |
| 490 | pB->i2eChangeIrq = NO; | ||
| 491 | break; | 486 | break; |
| 492 | default: | 487 | default: |
| 493 | COMPLETE(pB, I2EE_BADBUS); | 488 | I2_COMPLETE(pB, I2EE_BADBUS); |
| 494 | } | 489 | } |
| 495 | 490 | ||
| 496 | if (pB->i2eDataWidth16 == YES) | 491 | if (pB->i2eDataWidth16) |
| 497 | { | 492 | { |
| 498 | pB->i2eWriteBuf = iiWriteBuf16; | 493 | pB->i2eWriteBuf = iiWriteBuf16; |
| 499 | pB->i2eReadBuf = iiReadBuf16; | 494 | pB->i2eReadBuf = iiReadBuf16; |
| @@ -529,7 +524,7 @@ iiInitialize(i2eBordStrPtr pB) | |||
| 529 | break; | 524 | break; |
| 530 | 525 | ||
| 531 | default: | 526 | default: |
| 532 | COMPLETE(pB, I2EE_INCONSIST); | 527 | I2_COMPLETE(pB, I2EE_INCONSIST); |
| 533 | } | 528 | } |
| 534 | 529 | ||
| 535 | // Initialize state information. | 530 | // Initialize state information. |
| @@ -549,7 +544,7 @@ iiInitialize(i2eBordStrPtr pB) | |||
| 549 | // Everything is ok now, return with good status/ | 544 | // Everything is ok now, return with good status/ |
| 550 | 545 | ||
| 551 | pB->i2eValid = I2E_MAGIC; | 546 | pB->i2eValid = I2E_MAGIC; |
| 552 | COMPLETE(pB, I2EE_GOOD); | 547 | I2_COMPLETE(pB, I2EE_GOOD); |
| 553 | } | 548 | } |
| 554 | 549 | ||
| 555 | //****************************************************************************** | 550 | //****************************************************************************** |
| @@ -658,7 +653,7 @@ ii2DelayIO(unsigned int mseconds) | |||
| 658 | while(mseconds--) { | 653 | while(mseconds--) { |
| 659 | int i = ii2DelValue; | 654 | int i = ii2DelValue; |
| 660 | while ( i-- ) { | 655 | while ( i-- ) { |
| 661 | INB ( ii2Safe ); | 656 | inb(ii2Safe); |
| 662 | } | 657 | } |
| 663 | } | 658 | } |
| 664 | } | 659 | } |
| @@ -709,11 +704,11 @@ iiWriteBuf16(i2eBordStrPtr pB, unsigned char *address, int count) | |||
| 709 | { | 704 | { |
| 710 | // Rudimentary sanity checking here. | 705 | // Rudimentary sanity checking here. |
| 711 | if (pB->i2eValid != I2E_MAGIC) | 706 | if (pB->i2eValid != I2E_MAGIC) |
| 712 | COMPLETE(pB, I2EE_INVALID); | 707 | I2_COMPLETE(pB, I2EE_INVALID); |
| 713 | 708 | ||
| 714 | OUTSW ( pB->i2eData, address, count); | 709 | I2_OUTSW(pB->i2eData, address, count); |
| 715 | 710 | ||
| 716 | COMPLETE(pB, I2EE_GOOD); | 711 | I2_COMPLETE(pB, I2EE_GOOD); |
| 717 | } | 712 | } |
| 718 | 713 | ||
| 719 | //****************************************************************************** | 714 | //****************************************************************************** |
| @@ -738,11 +733,11 @@ iiWriteBuf8(i2eBordStrPtr pB, unsigned char *address, int count) | |||
| 738 | { | 733 | { |
| 739 | /* Rudimentary sanity checking here */ | 734 | /* Rudimentary sanity checking here */ |
| 740 | if (pB->i2eValid != I2E_MAGIC) | 735 | if (pB->i2eValid != I2E_MAGIC) |
| 741 | COMPLETE(pB, I2EE_INVALID); | 736 | I2_COMPLETE(pB, I2EE_INVALID); |
| 742 | 737 | ||
| 743 | OUTSB ( pB->i2eData, address, count ); | 738 | I2_OUTSB(pB->i2eData, address, count); |
| 744 | 739 | ||
| 745 | COMPLETE(pB, I2EE_GOOD); | 740 | I2_COMPLETE(pB, I2EE_GOOD); |
| 746 | } | 741 | } |
| 747 | 742 | ||
| 748 | //****************************************************************************** | 743 | //****************************************************************************** |
| @@ -767,11 +762,11 @@ iiReadBuf16(i2eBordStrPtr pB, unsigned char *address, int count) | |||
| 767 | { | 762 | { |
| 768 | // Rudimentary sanity checking here. | 763 | // Rudimentary sanity checking here. |
| 769 | if (pB->i2eValid != I2E_MAGIC) | 764 | if (pB->i2eValid != I2E_MAGIC) |
| 770 | COMPLETE(pB, I2EE_INVALID); | 765 | I2_COMPLETE(pB, I2EE_INVALID); |
| 771 | 766 | ||
| 772 | INSW ( pB->i2eData, address, count); | 767 | I2_INSW(pB->i2eData, address, count); |
| 773 | 768 | ||
| 774 | COMPLETE(pB, I2EE_GOOD); | 769 | I2_COMPLETE(pB, I2EE_GOOD); |
| 775 | } | 770 | } |
| 776 | 771 | ||
| 777 | //****************************************************************************** | 772 | //****************************************************************************** |
| @@ -796,11 +791,11 @@ iiReadBuf8(i2eBordStrPtr pB, unsigned char *address, int count) | |||
| 796 | { | 791 | { |
| 797 | // Rudimentary sanity checking here. | 792 | // Rudimentary sanity checking here. |
| 798 | if (pB->i2eValid != I2E_MAGIC) | 793 | if (pB->i2eValid != I2E_MAGIC) |
| 799 | COMPLETE(pB, I2EE_INVALID); | 794 | I2_COMPLETE(pB, I2EE_INVALID); |
| 800 | 795 | ||
| 801 | INSB ( pB->i2eData, address, count); | 796 | I2_INSB(pB->i2eData, address, count); |
| 802 | 797 | ||
| 803 | COMPLETE(pB, I2EE_GOOD); | 798 | I2_COMPLETE(pB, I2EE_GOOD); |
| 804 | } | 799 | } |
| 805 | 800 | ||
| 806 | //****************************************************************************** | 801 | //****************************************************************************** |
| @@ -820,7 +815,7 @@ iiReadBuf8(i2eBordStrPtr pB, unsigned char *address, int count) | |||
| 820 | static unsigned short | 815 | static unsigned short |
| 821 | iiReadWord16(i2eBordStrPtr pB) | 816 | iiReadWord16(i2eBordStrPtr pB) |
| 822 | { | 817 | { |
| 823 | return (unsigned short)( INW(pB->i2eData) ); | 818 | return inw(pB->i2eData); |
| 824 | } | 819 | } |
| 825 | 820 | ||
| 826 | //****************************************************************************** | 821 | //****************************************************************************** |
| @@ -842,9 +837,9 @@ iiReadWord8(i2eBordStrPtr pB) | |||
| 842 | { | 837 | { |
| 843 | unsigned short urs; | 838 | unsigned short urs; |
| 844 | 839 | ||
| 845 | urs = INB ( pB->i2eData ); | 840 | urs = inb(pB->i2eData); |
| 846 | 841 | ||
| 847 | return ( ( INB ( pB->i2eData ) << 8 ) | urs ); | 842 | return (inb(pB->i2eData) << 8) | urs; |
| 848 | } | 843 | } |
| 849 | 844 | ||
| 850 | //****************************************************************************** | 845 | //****************************************************************************** |
| @@ -865,7 +860,7 @@ iiReadWord8(i2eBordStrPtr pB) | |||
| 865 | static void | 860 | static void |
| 866 | iiWriteWord16(i2eBordStrPtr pB, unsigned short value) | 861 | iiWriteWord16(i2eBordStrPtr pB, unsigned short value) |
| 867 | { | 862 | { |
| 868 | WORD_TO(pB, (int)value); | 863 | outw((int)value, pB->i2eData); |
| 869 | } | 864 | } |
| 870 | 865 | ||
| 871 | //****************************************************************************** | 866 | //****************************************************************************** |
| @@ -886,8 +881,8 @@ iiWriteWord16(i2eBordStrPtr pB, unsigned short value) | |||
| 886 | static void | 881 | static void |
| 887 | iiWriteWord8(i2eBordStrPtr pB, unsigned short value) | 882 | iiWriteWord8(i2eBordStrPtr pB, unsigned short value) |
| 888 | { | 883 | { |
| 889 | BYTE_TO(pB, (char)value); | 884 | outb((char)value, pB->i2eData); |
| 890 | BYTE_TO(pB, (char)(value >> 8) ); | 885 | outb((char)(value >> 8), pB->i2eData); |
| 891 | } | 886 | } |
| 892 | 887 | ||
| 893 | //****************************************************************************** | 888 | //****************************************************************************** |
| @@ -939,30 +934,30 @@ iiWaitForTxEmptyII(i2eBordStrPtr pB, int mSdelay) | |||
| 939 | // interrupts of any kind. | 934 | // interrupts of any kind. |
| 940 | 935 | ||
| 941 | 936 | ||
| 942 | WRITE_LOCK_IRQSAVE(&Dl_spinlock,flags) | 937 | write_lock_irqsave(&Dl_spinlock, flags); |
| 943 | OUTB(pB->i2ePointer, SEL_COMMAND); | 938 | outb(SEL_COMMAND, pB->i2ePointer); |
| 944 | OUTB(pB->i2ePointer, SEL_CMD_SH); | 939 | outb(SEL_CMD_SH, pB->i2ePointer); |
| 945 | 940 | ||
| 946 | itemp = INB(pB->i2eStatus); | 941 | itemp = inb(pB->i2eStatus); |
| 947 | 942 | ||
| 948 | OUTB(pB->i2ePointer, SEL_COMMAND); | 943 | outb(SEL_COMMAND, pB->i2ePointer); |
| 949 | OUTB(pB->i2ePointer, SEL_CMD_UNSH); | 944 | outb(SEL_CMD_UNSH, pB->i2ePointer); |
| 950 | 945 | ||
| 951 | if (itemp & ST_IN_EMPTY) | 946 | if (itemp & ST_IN_EMPTY) |
| 952 | { | 947 | { |
| 953 | UPDATE_FIFO_ROOM(pB); | 948 | I2_UPDATE_FIFO_ROOM(pB); |
| 954 | WRITE_UNLOCK_IRQRESTORE(&Dl_spinlock,flags) | 949 | write_unlock_irqrestore(&Dl_spinlock, flags); |
| 955 | COMPLETE(pB, I2EE_GOOD); | 950 | I2_COMPLETE(pB, I2EE_GOOD); |
| 956 | } | 951 | } |
| 957 | 952 | ||
| 958 | WRITE_UNLOCK_IRQRESTORE(&Dl_spinlock,flags) | 953 | write_unlock_irqrestore(&Dl_spinlock, flags); |
| 959 | 954 | ||
| 960 | if (mSdelay-- == 0) | 955 | if (mSdelay-- == 0) |
| 961 | break; | 956 | break; |
| 962 | 957 | ||
| 963 | iiDelay(pB, 1); /* 1 mS granularity on checking condition */ | 958 | iiDelay(pB, 1); /* 1 mS granularity on checking condition */ |
| 964 | } | 959 | } |
| 965 | COMPLETE(pB, I2EE_TXE_TIME); | 960 | I2_COMPLETE(pB, I2EE_TXE_TIME); |
| 966 | } | 961 | } |
| 967 | 962 | ||
| 968 | //****************************************************************************** | 963 | //****************************************************************************** |
| @@ -1002,21 +997,21 @@ iiWaitForTxEmptyIIEX(i2eBordStrPtr pB, int mSdelay) | |||
| 1002 | // you will generally not want to service interrupts or in any way | 997 | // you will generally not want to service interrupts or in any way |
| 1003 | // disrupt the assumptions implicit in the larger context. | 998 | // disrupt the assumptions implicit in the larger context. |
| 1004 | 999 | ||
| 1005 | WRITE_LOCK_IRQSAVE(&Dl_spinlock,flags) | 1000 | write_lock_irqsave(&Dl_spinlock, flags); |
| 1006 | 1001 | ||
| 1007 | if (INB(pB->i2eStatus) & STE_OUT_MT) { | 1002 | if (inb(pB->i2eStatus) & STE_OUT_MT) { |
| 1008 | UPDATE_FIFO_ROOM(pB); | 1003 | I2_UPDATE_FIFO_ROOM(pB); |
| 1009 | WRITE_UNLOCK_IRQRESTORE(&Dl_spinlock,flags) | 1004 | write_unlock_irqrestore(&Dl_spinlock, flags); |
| 1010 | COMPLETE(pB, I2EE_GOOD); | 1005 | I2_COMPLETE(pB, I2EE_GOOD); |
| 1011 | } | 1006 | } |
| 1012 | WRITE_UNLOCK_IRQRESTORE(&Dl_spinlock,flags) | 1007 | write_unlock_irqrestore(&Dl_spinlock, flags); |
| 1013 | 1008 | ||
| 1014 | if (mSdelay-- == 0) | 1009 | if (mSdelay-- == 0) |
| 1015 | break; | 1010 | break; |
| 1016 | 1011 | ||
| 1017 | iiDelay(pB, 1); // 1 mS granularity on checking condition | 1012 | iiDelay(pB, 1); // 1 mS granularity on checking condition |
| 1018 | } | 1013 | } |
| 1019 | COMPLETE(pB, I2EE_TXE_TIME); | 1014 | I2_COMPLETE(pB, I2EE_TXE_TIME); |
| 1020 | } | 1015 | } |
| 1021 | 1016 | ||
| 1022 | //****************************************************************************** | 1017 | //****************************************************************************** |
| @@ -1038,8 +1033,8 @@ static int | |||
| 1038 | iiTxMailEmptyII(i2eBordStrPtr pB) | 1033 | iiTxMailEmptyII(i2eBordStrPtr pB) |
| 1039 | { | 1034 | { |
| 1040 | int port = pB->i2ePointer; | 1035 | int port = pB->i2ePointer; |
| 1041 | OUTB ( port, SEL_OUTMAIL ); | 1036 | outb(SEL_OUTMAIL, port); |
| 1042 | return ( INB(port) == 0 ); | 1037 | return inb(port) == 0; |
| 1043 | } | 1038 | } |
| 1044 | 1039 | ||
| 1045 | //****************************************************************************** | 1040 | //****************************************************************************** |
| @@ -1060,7 +1055,7 @@ iiTxMailEmptyII(i2eBordStrPtr pB) | |||
| 1060 | static int | 1055 | static int |
| 1061 | iiTxMailEmptyIIEX(i2eBordStrPtr pB) | 1056 | iiTxMailEmptyIIEX(i2eBordStrPtr pB) |
| 1062 | { | 1057 | { |
| 1063 | return !(INB(pB->i2eStatus) & STE_OUT_MAIL); | 1058 | return !(inb(pB->i2eStatus) & STE_OUT_MAIL); |
| 1064 | } | 1059 | } |
| 1065 | 1060 | ||
| 1066 | //****************************************************************************** | 1061 | //****************************************************************************** |
| @@ -1084,10 +1079,10 @@ iiTrySendMailII(i2eBordStrPtr pB, unsigned char mail) | |||
| 1084 | { | 1079 | { |
| 1085 | int port = pB->i2ePointer; | 1080 | int port = pB->i2ePointer; |
| 1086 | 1081 | ||
| 1087 | OUTB(port, SEL_OUTMAIL); | 1082 | outb(SEL_OUTMAIL, port); |
| 1088 | if (INB(port) == 0) { | 1083 | if (inb(port) == 0) { |
| 1089 | OUTB(port, SEL_OUTMAIL); | 1084 | outb(SEL_OUTMAIL, port); |
| 1090 | OUTB(port, mail); | 1085 | outb(mail, port); |
| 1091 | return 1; | 1086 | return 1; |
| 1092 | } | 1087 | } |
| 1093 | return 0; | 1088 | return 0; |
| @@ -1112,10 +1107,9 @@ iiTrySendMailII(i2eBordStrPtr pB, unsigned char mail) | |||
| 1112 | static int | 1107 | static int |
| 1113 | iiTrySendMailIIEX(i2eBordStrPtr pB, unsigned char mail) | 1108 | iiTrySendMailIIEX(i2eBordStrPtr pB, unsigned char mail) |
| 1114 | { | 1109 | { |
| 1115 | if(INB(pB->i2eStatus) & STE_OUT_MAIL) { | 1110 | if (inb(pB->i2eStatus) & STE_OUT_MAIL) |
| 1116 | return 0; | 1111 | return 0; |
| 1117 | } | 1112 | outb(mail, pB->i2eXMail); |
| 1118 | OUTB(pB->i2eXMail, mail); | ||
| 1119 | return 1; | 1113 | return 1; |
| 1120 | } | 1114 | } |
| 1121 | 1115 | ||
| @@ -1136,9 +1130,9 @@ iiTrySendMailIIEX(i2eBordStrPtr pB, unsigned char mail) | |||
| 1136 | static unsigned short | 1130 | static unsigned short |
| 1137 | iiGetMailII(i2eBordStrPtr pB) | 1131 | iiGetMailII(i2eBordStrPtr pB) |
| 1138 | { | 1132 | { |
| 1139 | if (HAS_MAIL(pB)) { | 1133 | if (I2_HAS_MAIL(pB)) { |
| 1140 | OUTB(pB->i2ePointer, SEL_INMAIL); | 1134 | outb(SEL_INMAIL, pB->i2ePointer); |
| 1141 | return INB(pB->i2ePointer); | 1135 | return inb(pB->i2ePointer); |
| 1142 | } else { | 1136 | } else { |
| 1143 | return NO_MAIL_HERE; | 1137 | return NO_MAIL_HERE; |
| 1144 | } | 1138 | } |
| @@ -1161,11 +1155,10 @@ iiGetMailII(i2eBordStrPtr pB) | |||
| 1161 | static unsigned short | 1155 | static unsigned short |
| 1162 | iiGetMailIIEX(i2eBordStrPtr pB) | 1156 | iiGetMailIIEX(i2eBordStrPtr pB) |
| 1163 | { | 1157 | { |
| 1164 | if (HAS_MAIL(pB)) { | 1158 | if (I2_HAS_MAIL(pB)) |
| 1165 | return INB(pB->i2eXMail); | 1159 | return inb(pB->i2eXMail); |
| 1166 | } else { | 1160 | else |
| 1167 | return NO_MAIL_HERE; | 1161 | return NO_MAIL_HERE; |
| 1168 | } | ||
| 1169 | } | 1162 | } |
| 1170 | 1163 | ||
| 1171 | //****************************************************************************** | 1164 | //****************************************************************************** |
| @@ -1184,8 +1177,8 @@ iiGetMailIIEX(i2eBordStrPtr pB) | |||
| 1184 | static void | 1177 | static void |
| 1185 | iiEnableMailIrqII(i2eBordStrPtr pB) | 1178 | iiEnableMailIrqII(i2eBordStrPtr pB) |
| 1186 | { | 1179 | { |
| 1187 | OUTB(pB->i2ePointer, SEL_MASK); | 1180 | outb(SEL_MASK, pB->i2ePointer); |
| 1188 | OUTB(pB->i2ePointer, ST_IN_MAIL); | 1181 | outb(ST_IN_MAIL, pB->i2ePointer); |
| 1189 | } | 1182 | } |
| 1190 | 1183 | ||
| 1191 | //****************************************************************************** | 1184 | //****************************************************************************** |
| @@ -1204,7 +1197,7 @@ iiEnableMailIrqII(i2eBordStrPtr pB) | |||
| 1204 | static void | 1197 | static void |
| 1205 | iiEnableMailIrqIIEX(i2eBordStrPtr pB) | 1198 | iiEnableMailIrqIIEX(i2eBordStrPtr pB) |
| 1206 | { | 1199 | { |
| 1207 | OUTB(pB->i2eXMask, MX_IN_MAIL); | 1200 | outb(MX_IN_MAIL, pB->i2eXMask); |
| 1208 | } | 1201 | } |
| 1209 | 1202 | ||
| 1210 | //****************************************************************************** | 1203 | //****************************************************************************** |
| @@ -1223,8 +1216,8 @@ iiEnableMailIrqIIEX(i2eBordStrPtr pB) | |||
| 1223 | static void | 1216 | static void |
| 1224 | iiWriteMaskII(i2eBordStrPtr pB, unsigned char value) | 1217 | iiWriteMaskII(i2eBordStrPtr pB, unsigned char value) |
| 1225 | { | 1218 | { |
| 1226 | OUTB(pB->i2ePointer, SEL_MASK); | 1219 | outb(SEL_MASK, pB->i2ePointer); |
| 1227 | OUTB(pB->i2ePointer, value); | 1220 | outb(value, pB->i2ePointer); |
| 1228 | } | 1221 | } |
| 1229 | 1222 | ||
| 1230 | //****************************************************************************** | 1223 | //****************************************************************************** |
| @@ -1243,7 +1236,7 @@ iiWriteMaskII(i2eBordStrPtr pB, unsigned char value) | |||
| 1243 | static void | 1236 | static void |
| 1244 | iiWriteMaskIIEX(i2eBordStrPtr pB, unsigned char value) | 1237 | iiWriteMaskIIEX(i2eBordStrPtr pB, unsigned char value) |
| 1245 | { | 1238 | { |
| 1246 | OUTB(pB->i2eXMask, value); | 1239 | outb(value, pB->i2eXMask); |
| 1247 | } | 1240 | } |
| 1248 | 1241 | ||
| 1249 | //****************************************************************************** | 1242 | //****************************************************************************** |
| @@ -1354,9 +1347,8 @@ iiDownloadBlock ( i2eBordStrPtr pB, loadHdrStrPtr pSource, int isStandard) | |||
| 1354 | // immediately and be harmless, though not strictly necessary. | 1347 | // immediately and be harmless, though not strictly necessary. |
| 1355 | itemp = MAX_DLOAD_ACK_TIME/10; | 1348 | itemp = MAX_DLOAD_ACK_TIME/10; |
| 1356 | while (--itemp) { | 1349 | while (--itemp) { |
| 1357 | if (HAS_INPUT(pB)) { | 1350 | if (I2_HAS_INPUT(pB)) { |
| 1358 | switch(BYTE_FROM(pB)) | 1351 | switch (inb(pB->i2eData)) { |
| 1359 | { | ||
| 1360 | case LOADWARE_OK: | 1352 | case LOADWARE_OK: |
| 1361 | pB->i2eState = | 1353 | pB->i2eState = |
| 1362 | isStandard ? II_STATE_STDLOADED :II_STATE_LOADED; | 1354 | isStandard ? II_STATE_STDLOADED :II_STATE_LOADED; |
diff --git a/drivers/char/ip2/i2ellis.h b/drivers/char/ip2/i2ellis.h index 433305062fb8..c88a64e527aa 100644 --- a/drivers/char/ip2/i2ellis.h +++ b/drivers/char/ip2/i2ellis.h | |||
| @@ -185,10 +185,6 @@ typedef struct _i2eBordStr | |||
| 185 | // The highest allowable IRQ, based on the | 185 | // The highest allowable IRQ, based on the |
| 186 | // slot size. | 186 | // slot size. |
| 187 | 187 | ||
| 188 | unsigned char i2eChangeIrq; | ||
| 189 | // Whether tis valid to change IRQ's | ||
| 190 | // ISA = ok, EISA, MicroChannel, no | ||
| 191 | |||
| 192 | // Accelerators for various addresses on the board | 188 | // Accelerators for various addresses on the board |
| 193 | int i2eBase; // I/O Address of the Board | 189 | int i2eBase; // I/O Address of the Board |
| 194 | int i2eData; // From here data transfers happen | 190 | int i2eData; // From here data transfers happen |
| @@ -431,12 +427,6 @@ typedef struct _i2eBordStr | |||
| 431 | // Manifests for i2eBordStr: | 427 | // Manifests for i2eBordStr: |
| 432 | //------------------------------------------- | 428 | //------------------------------------------- |
| 433 | 429 | ||
| 434 | #define YES 1 | ||
| 435 | #define NO 0 | ||
| 436 | |||
| 437 | #define NULLFUNC (void (*)(void))0 | ||
| 438 | #define NULLPTR (void *)0 | ||
| 439 | |||
| 440 | typedef void (*delayFunc_t)(unsigned int); | 430 | typedef void (*delayFunc_t)(unsigned int); |
| 441 | 431 | ||
| 442 | // i2eValid | 432 | // i2eValid |
| @@ -494,8 +484,8 @@ typedef void (*delayFunc_t)(unsigned int); | |||
| 494 | 484 | ||
| 495 | // i2eUsingIrq | 485 | // i2eUsingIrq |
| 496 | // | 486 | // |
| 497 | #define IRQ_UNDEFINED 0x1352 // No valid irq (or polling = 0) can ever | 487 | #define I2_IRQ_UNDEFINED 0x1352 /* No valid irq (or polling = 0) can |
| 498 | // promote to this! | 488 | * ever promote to this! */ |
| 499 | //------------------------------------------ | 489 | //------------------------------------------ |
| 500 | // Handy Macros for i2ellis.c and others | 490 | // Handy Macros for i2ellis.c and others |
| 501 | // Note these are common to -II and -IIEX | 491 | // Note these are common to -II and -IIEX |
| @@ -504,41 +494,14 @@ typedef void (*delayFunc_t)(unsigned int); | |||
| 504 | // Given a pointer to the board structure, does the input FIFO have any data or | 494 | // Given a pointer to the board structure, does the input FIFO have any data or |
| 505 | // not? | 495 | // not? |
| 506 | // | 496 | // |
| 507 | #define HAS_INPUT(pB) !(INB(pB->i2eStatus) & ST_IN_EMPTY) | 497 | #define I2_HAS_INPUT(pB) !(inb(pB->i2eStatus) & ST_IN_EMPTY) |
| 508 | #define HAS_NO_INPUT(pB) (INB(pB->i2eStatus) & ST_IN_EMPTY) | ||
| 509 | |||
| 510 | // Given a pointer to board structure, read a byte or word from the fifo | ||
| 511 | // | ||
| 512 | #define BYTE_FROM(pB) (unsigned char)INB(pB->i2eData) | ||
| 513 | #define WORD_FROM(pB) (unsigned short)INW(pB->i2eData) | ||
| 514 | |||
| 515 | // Given a pointer to board structure, is there room for any data to be written | ||
| 516 | // to the data fifo? | ||
| 517 | // | ||
| 518 | #define HAS_OUTROOM(pB) !(INB(pB->i2eStatus) & ST_OUT_FULL) | ||
| 519 | #define HAS_NO_OUTROOM(pB) (INB(pB->i2eStatus) & ST_OUT_FULL) | ||
| 520 | |||
| 521 | // Given a pointer to board structure, write a single byte to the fifo | ||
| 522 | // structure. Note that for 16-bit interfaces, the high order byte is undefined | ||
| 523 | // and unknown. | ||
| 524 | // | ||
| 525 | #define BYTE_TO(pB, c) OUTB(pB->i2eData,(c)) | ||
| 526 | |||
| 527 | // Write a word to the fifo structure. For 8-bit interfaces, this may have | ||
| 528 | // unknown results. | ||
| 529 | // | ||
| 530 | #define WORD_TO(pB, c) OUTW(pB->i2eData,(c)) | ||
| 531 | 498 | ||
| 532 | // Given a pointer to the board structure, is there anything in the incoming | 499 | // Given a pointer to the board structure, is there anything in the incoming |
| 533 | // mailbox? | 500 | // mailbox? |
| 534 | // | 501 | // |
| 535 | #define HAS_MAIL(pB) (INB(pB->i2eStatus) & ST_IN_MAIL) | 502 | #define I2_HAS_MAIL(pB) (inb(pB->i2eStatus) & ST_IN_MAIL) |
| 536 | 503 | ||
| 537 | #define UPDATE_FIFO_ROOM(pB) (pB)->i2eFifoRemains=(pB)->i2eFifoSize | 504 | #define I2_UPDATE_FIFO_ROOM(pB) ((pB)->i2eFifoRemains = (pB)->i2eFifoSize) |
| 538 | |||
| 539 | // Handy macro to round up a number (like the buffer write and read routines do) | ||
| 540 | // | ||
| 541 | #define ROUNDUP(number) (((number)+1) & (~1)) | ||
| 542 | 505 | ||
| 543 | //------------------------------------------ | 506 | //------------------------------------------ |
| 544 | // Function Declarations for i2ellis.c | 507 | // Function Declarations for i2ellis.c |
| @@ -593,20 +556,11 @@ static int iiDownloadBlock(i2eBordStrPtr, loadHdrStrPtr, int); | |||
| 593 | // | 556 | // |
| 594 | static int iiDownloadAll(i2eBordStrPtr, loadHdrStrPtr, int, int); | 557 | static int iiDownloadAll(i2eBordStrPtr, loadHdrStrPtr, int, int); |
| 595 | 558 | ||
| 596 | // Called indirectly always. Needed externally so the routine might be | ||
| 597 | // SPECIFIED as an argument to iiReset() | ||
| 598 | // | ||
| 599 | //static void ii2DelayIO(unsigned int); // N-millisecond delay using | ||
| 600 | //hardware spin | ||
| 601 | //static void ii2DelayTimer(unsigned int); // N-millisecond delay using Linux | ||
| 602 | //timer | ||
| 603 | |||
| 604 | // Many functions defined here return True if good, False otherwise, with an | 559 | // Many functions defined here return True if good, False otherwise, with an |
| 605 | // error code in i2eError field. Here is a handy macro for setting the error | 560 | // error code in i2eError field. Here is a handy macro for setting the error |
| 606 | // code and returning. | 561 | // code and returning. |
| 607 | // | 562 | // |
| 608 | #define COMPLETE(pB,code) \ | 563 | #define I2_COMPLETE(pB,code) do { \ |
| 609 | do { \ | ||
| 610 | pB->i2eError = code; \ | 564 | pB->i2eError = code; \ |
| 611 | return (code == I2EE_GOOD);\ | 565 | return (code == I2EE_GOOD);\ |
| 612 | } while (0) | 566 | } while (0) |
diff --git a/drivers/char/ip2/i2hw.h b/drivers/char/ip2/i2hw.h index 15fe04e748f4..8aa6e7ab8d5b 100644 --- a/drivers/char/ip2/i2hw.h +++ b/drivers/char/ip2/i2hw.h | |||
| @@ -129,7 +129,6 @@ registers, use byte operations only. | |||
| 129 | //------------------------------------------------ | 129 | //------------------------------------------------ |
| 130 | // | 130 | // |
| 131 | #include "ip2types.h" | 131 | #include "ip2types.h" |
| 132 | #include "i2os.h" /* For any o.s., compiler, or host-related issues */ | ||
| 133 | 132 | ||
| 134 | //------------------------------------------------------------------------- | 133 | //------------------------------------------------------------------------- |
| 135 | // Manifests for the I/O map: | 134 | // Manifests for the I/O map: |
| @@ -644,5 +643,10 @@ typedef union _loadHdrStr | |||
| 644 | #define ABS_BIGGEST_BOX 16 // Absolute the most ports per box | 643 | #define ABS_BIGGEST_BOX 16 // Absolute the most ports per box |
| 645 | #define ABS_MOST_PORTS (ABS_MAX_BOXES * ABS_BIGGEST_BOX) | 644 | #define ABS_MOST_PORTS (ABS_MAX_BOXES * ABS_BIGGEST_BOX) |
| 646 | 645 | ||
| 646 | #define I2_OUTSW(port, addr, count) outsw((port), (addr), (((count)+1)/2)) | ||
| 647 | #define I2_OUTSB(port, addr, count) outsb((port), (addr), (((count)+1))&-2) | ||
| 648 | #define I2_INSW(port, addr, count) insw((port), (addr), (((count)+1)/2)) | ||
| 649 | #define I2_INSB(port, addr, count) insb((port), (addr), (((count)+1))&-2) | ||
| 650 | |||
| 647 | #endif // I2HW_H | 651 | #endif // I2HW_H |
| 648 | 652 | ||
diff --git a/drivers/char/ip2/i2lib.c b/drivers/char/ip2/i2lib.c index 9c25320121ef..938879cc7bcc 100644 --- a/drivers/char/ip2/i2lib.c +++ b/drivers/char/ip2/i2lib.c | |||
| @@ -227,17 +227,17 @@ i2InitChannels ( i2eBordStrPtr pB, int nChannels, i2ChanStrPtr pCh) | |||
| 227 | i2ChanStrPtr *ppCh; | 227 | i2ChanStrPtr *ppCh; |
| 228 | 228 | ||
| 229 | if (pB->i2eValid != I2E_MAGIC) { | 229 | if (pB->i2eValid != I2E_MAGIC) { |
| 230 | COMPLETE(pB, I2EE_BADMAGIC); | 230 | I2_COMPLETE(pB, I2EE_BADMAGIC); |
| 231 | } | 231 | } |
| 232 | if (pB->i2eState != II_STATE_STDLOADED) { | 232 | if (pB->i2eState != II_STATE_STDLOADED) { |
| 233 | COMPLETE(pB, I2EE_BADSTATE); | 233 | I2_COMPLETE(pB, I2EE_BADSTATE); |
| 234 | } | 234 | } |
| 235 | 235 | ||
| 236 | LOCK_INIT(&pB->read_fifo_spinlock); | 236 | rwlock_init(&pB->read_fifo_spinlock); |
| 237 | LOCK_INIT(&pB->write_fifo_spinlock); | 237 | rwlock_init(&pB->write_fifo_spinlock); |
| 238 | LOCK_INIT(&pB->Dbuf_spinlock); | 238 | rwlock_init(&pB->Dbuf_spinlock); |
| 239 | LOCK_INIT(&pB->Bbuf_spinlock); | 239 | rwlock_init(&pB->Bbuf_spinlock); |
| 240 | LOCK_INIT(&pB->Fbuf_spinlock); | 240 | rwlock_init(&pB->Fbuf_spinlock); |
| 241 | 241 | ||
| 242 | // NO LOCK needed yet - this is init | 242 | // NO LOCK needed yet - this is init |
| 243 | 243 | ||
| @@ -259,10 +259,10 @@ i2InitChannels ( i2eBordStrPtr pB, int nChannels, i2ChanStrPtr pCh) | |||
| 259 | if ( !(pB->i2eChannelMap[index >> 4] & (1 << (index & 0xf)) ) ) { | 259 | if ( !(pB->i2eChannelMap[index >> 4] & (1 << (index & 0xf)) ) ) { |
| 260 | continue; | 260 | continue; |
| 261 | } | 261 | } |
| 262 | LOCK_INIT(&pCh->Ibuf_spinlock); | 262 | rwlock_init(&pCh->Ibuf_spinlock); |
| 263 | LOCK_INIT(&pCh->Obuf_spinlock); | 263 | rwlock_init(&pCh->Obuf_spinlock); |
| 264 | LOCK_INIT(&pCh->Cbuf_spinlock); | 264 | rwlock_init(&pCh->Cbuf_spinlock); |
| 265 | LOCK_INIT(&pCh->Pbuf_spinlock); | 265 | rwlock_init(&pCh->Pbuf_spinlock); |
| 266 | // NO LOCK needed yet - this is init | 266 | // NO LOCK needed yet - this is init |
| 267 | // Set up validity flag according to support level | 267 | // Set up validity flag according to support level |
| 268 | if (pB->i2eGoodMap[index >> 4] & (1 << (index & 0xf)) ) { | 268 | if (pB->i2eGoodMap[index >> 4] & (1 << (index & 0xf)) ) { |
| @@ -347,7 +347,7 @@ i2InitChannels ( i2eBordStrPtr pB, int nChannels, i2ChanStrPtr pCh) | |||
| 347 | } | 347 | } |
| 348 | // No need to check for wrap here; this is initialization. | 348 | // No need to check for wrap here; this is initialization. |
| 349 | pB->i2Fbuf_stuff = stuffIndex; | 349 | pB->i2Fbuf_stuff = stuffIndex; |
| 350 | COMPLETE(pB, I2EE_GOOD); | 350 | I2_COMPLETE(pB, I2EE_GOOD); |
| 351 | 351 | ||
| 352 | } | 352 | } |
| 353 | 353 | ||
| @@ -374,7 +374,7 @@ i2DeQueueNeeds(i2eBordStrPtr pB, int type) | |||
| 374 | 374 | ||
| 375 | case NEED_INLINE: | 375 | case NEED_INLINE: |
| 376 | 376 | ||
| 377 | WRITE_LOCK_IRQSAVE(&pB->Dbuf_spinlock,flags); | 377 | write_lock_irqsave(&pB->Dbuf_spinlock, flags); |
| 378 | if ( pB->i2Dbuf_stuff != pB->i2Dbuf_strip) | 378 | if ( pB->i2Dbuf_stuff != pB->i2Dbuf_strip) |
| 379 | { | 379 | { |
| 380 | queueIndex = pB->i2Dbuf_strip; | 380 | queueIndex = pB->i2Dbuf_strip; |
| @@ -386,12 +386,12 @@ i2DeQueueNeeds(i2eBordStrPtr pB, int type) | |||
| 386 | pB->i2Dbuf_strip = queueIndex; | 386 | pB->i2Dbuf_strip = queueIndex; |
| 387 | pCh->channelNeeds &= ~NEED_INLINE; | 387 | pCh->channelNeeds &= ~NEED_INLINE; |
| 388 | } | 388 | } |
| 389 | WRITE_UNLOCK_IRQRESTORE(&pB->Dbuf_spinlock,flags); | 389 | write_unlock_irqrestore(&pB->Dbuf_spinlock, flags); |
| 390 | break; | 390 | break; |
| 391 | 391 | ||
| 392 | case NEED_BYPASS: | 392 | case NEED_BYPASS: |
| 393 | 393 | ||
| 394 | WRITE_LOCK_IRQSAVE(&pB->Bbuf_spinlock,flags); | 394 | write_lock_irqsave(&pB->Bbuf_spinlock, flags); |
| 395 | if (pB->i2Bbuf_stuff != pB->i2Bbuf_strip) | 395 | if (pB->i2Bbuf_stuff != pB->i2Bbuf_strip) |
| 396 | { | 396 | { |
| 397 | queueIndex = pB->i2Bbuf_strip; | 397 | queueIndex = pB->i2Bbuf_strip; |
| @@ -403,12 +403,12 @@ i2DeQueueNeeds(i2eBordStrPtr pB, int type) | |||
| 403 | pB->i2Bbuf_strip = queueIndex; | 403 | pB->i2Bbuf_strip = queueIndex; |
| 404 | pCh->channelNeeds &= ~NEED_BYPASS; | 404 | pCh->channelNeeds &= ~NEED_BYPASS; |
| 405 | } | 405 | } |
| 406 | WRITE_UNLOCK_IRQRESTORE(&pB->Bbuf_spinlock,flags); | 406 | write_unlock_irqrestore(&pB->Bbuf_spinlock, flags); |
| 407 | break; | 407 | break; |
| 408 | 408 | ||
| 409 | case NEED_FLOW: | 409 | case NEED_FLOW: |
| 410 | 410 | ||
| 411 | WRITE_LOCK_IRQSAVE(&pB->Fbuf_spinlock,flags); | 411 | write_lock_irqsave(&pB->Fbuf_spinlock, flags); |
| 412 | if (pB->i2Fbuf_stuff != pB->i2Fbuf_strip) | 412 | if (pB->i2Fbuf_stuff != pB->i2Fbuf_strip) |
| 413 | { | 413 | { |
| 414 | queueIndex = pB->i2Fbuf_strip; | 414 | queueIndex = pB->i2Fbuf_strip; |
| @@ -420,7 +420,7 @@ i2DeQueueNeeds(i2eBordStrPtr pB, int type) | |||
| 420 | pB->i2Fbuf_strip = queueIndex; | 420 | pB->i2Fbuf_strip = queueIndex; |
| 421 | pCh->channelNeeds &= ~NEED_FLOW; | 421 | pCh->channelNeeds &= ~NEED_FLOW; |
| 422 | } | 422 | } |
| 423 | WRITE_UNLOCK_IRQRESTORE(&pB->Fbuf_spinlock,flags); | 423 | write_unlock_irqrestore(&pB->Fbuf_spinlock, flags); |
| 424 | break; | 424 | break; |
| 425 | default: | 425 | default: |
| 426 | printk(KERN_ERR "i2DeQueueNeeds called with bad type:%x\n",type); | 426 | printk(KERN_ERR "i2DeQueueNeeds called with bad type:%x\n",type); |
| @@ -453,7 +453,7 @@ i2QueueNeeds(i2eBordStrPtr pB, i2ChanStrPtr pCh, int type) | |||
| 453 | 453 | ||
| 454 | case NEED_INLINE: | 454 | case NEED_INLINE: |
| 455 | 455 | ||
| 456 | WRITE_LOCK_IRQSAVE(&pB->Dbuf_spinlock,flags); | 456 | write_lock_irqsave(&pB->Dbuf_spinlock, flags); |
| 457 | if ( !(pCh->channelNeeds & NEED_INLINE) ) | 457 | if ( !(pCh->channelNeeds & NEED_INLINE) ) |
| 458 | { | 458 | { |
| 459 | pCh->channelNeeds |= NEED_INLINE; | 459 | pCh->channelNeeds |= NEED_INLINE; |
| @@ -463,12 +463,12 @@ i2QueueNeeds(i2eBordStrPtr pB, i2ChanStrPtr pCh, int type) | |||
| 463 | queueIndex = 0; | 463 | queueIndex = 0; |
| 464 | pB->i2Dbuf_stuff = queueIndex; | 464 | pB->i2Dbuf_stuff = queueIndex; |
| 465 | } | 465 | } |
| 466 | WRITE_UNLOCK_IRQRESTORE(&pB->Dbuf_spinlock,flags); | 466 | write_unlock_irqrestore(&pB->Dbuf_spinlock, flags); |
| 467 | break; | 467 | break; |
| 468 | 468 | ||
| 469 | case NEED_BYPASS: | 469 | case NEED_BYPASS: |
| 470 | 470 | ||
| 471 | WRITE_LOCK_IRQSAVE(&pB->Bbuf_spinlock,flags); | 471 | write_lock_irqsave(&pB->Bbuf_spinlock, flags); |
| 472 | if ((type & NEED_BYPASS) && !(pCh->channelNeeds & NEED_BYPASS)) | 472 | if ((type & NEED_BYPASS) && !(pCh->channelNeeds & NEED_BYPASS)) |
| 473 | { | 473 | { |
| 474 | pCh->channelNeeds |= NEED_BYPASS; | 474 | pCh->channelNeeds |= NEED_BYPASS; |
| @@ -478,12 +478,12 @@ i2QueueNeeds(i2eBordStrPtr pB, i2ChanStrPtr pCh, int type) | |||
| 478 | queueIndex = 0; | 478 | queueIndex = 0; |
| 479 | pB->i2Bbuf_stuff = queueIndex; | 479 | pB->i2Bbuf_stuff = queueIndex; |
| 480 | } | 480 | } |
| 481 | WRITE_UNLOCK_IRQRESTORE(&pB->Bbuf_spinlock,flags); | 481 | write_unlock_irqrestore(&pB->Bbuf_spinlock, flags); |
| 482 | break; | 482 | break; |
| 483 | 483 | ||
| 484 | case NEED_FLOW: | 484 | case NEED_FLOW: |
| 485 | 485 | ||
| 486 | WRITE_LOCK_IRQSAVE(&pB->Fbuf_spinlock,flags); | 486 | write_lock_irqsave(&pB->Fbuf_spinlock, flags); |
| 487 | if ((type & NEED_FLOW) && !(pCh->channelNeeds & NEED_FLOW)) | 487 | if ((type & NEED_FLOW) && !(pCh->channelNeeds & NEED_FLOW)) |
| 488 | { | 488 | { |
| 489 | pCh->channelNeeds |= NEED_FLOW; | 489 | pCh->channelNeeds |= NEED_FLOW; |
| @@ -493,7 +493,7 @@ i2QueueNeeds(i2eBordStrPtr pB, i2ChanStrPtr pCh, int type) | |||
| 493 | queueIndex = 0; | 493 | queueIndex = 0; |
| 494 | pB->i2Fbuf_stuff = queueIndex; | 494 | pB->i2Fbuf_stuff = queueIndex; |
| 495 | } | 495 | } |
| 496 | WRITE_UNLOCK_IRQRESTORE(&pB->Fbuf_spinlock,flags); | 496 | write_unlock_irqrestore(&pB->Fbuf_spinlock, flags); |
| 497 | break; | 497 | break; |
| 498 | 498 | ||
| 499 | case NEED_CREDIT: | 499 | case NEED_CREDIT: |
| @@ -562,9 +562,8 @@ i2QueueCommands(int type, i2ChanStrPtr pCh, int timeout, int nCommands, | |||
| 562 | pB = pCh->pMyBord; | 562 | pB = pCh->pMyBord; |
| 563 | 563 | ||
| 564 | // Board must also exist, and THE INTERRUPT COMMAND ALREADY SENT | 564 | // Board must also exist, and THE INTERRUPT COMMAND ALREADY SENT |
| 565 | if (pB->i2eValid != I2E_MAGIC || pB->i2eUsingIrq == IRQ_UNDEFINED) { | 565 | if (pB->i2eValid != I2E_MAGIC || pB->i2eUsingIrq == I2_IRQ_UNDEFINED) |
| 566 | return -2; | 566 | return -2; |
| 567 | } | ||
| 568 | // If the board has gone fatal, return bad, and also hit the trap routine if | 567 | // If the board has gone fatal, return bad, and also hit the trap routine if |
| 569 | // it exists. | 568 | // it exists. |
| 570 | if (pB->i2eFatal) { | 569 | if (pB->i2eFatal) { |
| @@ -620,13 +619,13 @@ i2QueueCommands(int type, i2ChanStrPtr pCh, int timeout, int nCommands, | |||
| 620 | switch(type) { | 619 | switch(type) { |
| 621 | case PTYPE_INLINE: | 620 | case PTYPE_INLINE: |
| 622 | lock_var_p = &pCh->Obuf_spinlock; | 621 | lock_var_p = &pCh->Obuf_spinlock; |
| 623 | WRITE_LOCK_IRQSAVE(lock_var_p,flags); | 622 | write_lock_irqsave(lock_var_p, flags); |
| 624 | stuffIndex = pCh->Obuf_stuff; | 623 | stuffIndex = pCh->Obuf_stuff; |
| 625 | bufroom = pCh->Obuf_strip - stuffIndex; | 624 | bufroom = pCh->Obuf_strip - stuffIndex; |
| 626 | break; | 625 | break; |
| 627 | case PTYPE_BYPASS: | 626 | case PTYPE_BYPASS: |
| 628 | lock_var_p = &pCh->Cbuf_spinlock; | 627 | lock_var_p = &pCh->Cbuf_spinlock; |
| 629 | WRITE_LOCK_IRQSAVE(lock_var_p,flags); | 628 | write_lock_irqsave(lock_var_p, flags); |
| 630 | stuffIndex = pCh->Cbuf_stuff; | 629 | stuffIndex = pCh->Cbuf_stuff; |
| 631 | bufroom = pCh->Cbuf_strip - stuffIndex; | 630 | bufroom = pCh->Cbuf_strip - stuffIndex; |
| 632 | break; | 631 | break; |
| @@ -645,7 +644,7 @@ i2QueueCommands(int type, i2ChanStrPtr pCh, int timeout, int nCommands, | |||
| 645 | break; /* from for()- Enough room: goto proceed */ | 644 | break; /* from for()- Enough room: goto proceed */ |
| 646 | } | 645 | } |
| 647 | ip2trace(CHANN, ITRC_QUEUE, 3, 1, totalsize); | 646 | ip2trace(CHANN, ITRC_QUEUE, 3, 1, totalsize); |
| 648 | WRITE_UNLOCK_IRQRESTORE(lock_var_p, flags); | 647 | write_unlock_irqrestore(lock_var_p, flags); |
| 649 | } else | 648 | } else |
| 650 | ip2trace(CHANN, ITRC_QUEUE, 3, 1, totalsize); | 649 | ip2trace(CHANN, ITRC_QUEUE, 3, 1, totalsize); |
| 651 | 650 | ||
| @@ -747,7 +746,7 @@ i2QueueCommands(int type, i2ChanStrPtr pCh, int timeout, int nCommands, | |||
| 747 | { | 746 | { |
| 748 | case PTYPE_INLINE: | 747 | case PTYPE_INLINE: |
| 749 | pCh->Obuf_stuff = stuffIndex; // Store buffer pointer | 748 | pCh->Obuf_stuff = stuffIndex; // Store buffer pointer |
| 750 | WRITE_UNLOCK_IRQRESTORE(&pCh->Obuf_spinlock,flags); | 749 | write_unlock_irqrestore(&pCh->Obuf_spinlock, flags); |
| 751 | 750 | ||
| 752 | pB->debugInlineQueued++; | 751 | pB->debugInlineQueued++; |
| 753 | // Add the channel pointer to list of channels needing service (first | 752 | // Add the channel pointer to list of channels needing service (first |
| @@ -757,7 +756,7 @@ i2QueueCommands(int type, i2ChanStrPtr pCh, int timeout, int nCommands, | |||
| 757 | 756 | ||
| 758 | case PTYPE_BYPASS: | 757 | case PTYPE_BYPASS: |
| 759 | pCh->Cbuf_stuff = stuffIndex; // Store buffer pointer | 758 | pCh->Cbuf_stuff = stuffIndex; // Store buffer pointer |
| 760 | WRITE_UNLOCK_IRQRESTORE(&pCh->Cbuf_spinlock,flags); | 759 | write_unlock_irqrestore(&pCh->Cbuf_spinlock, flags); |
| 761 | 760 | ||
| 762 | pB->debugBypassQueued++; | 761 | pB->debugBypassQueued++; |
| 763 | // Add the channel pointer to list of channels needing service (first | 762 | // Add the channel pointer to list of channels needing service (first |
| @@ -840,7 +839,7 @@ i2Input(i2ChanStrPtr pCh) | |||
| 840 | count = -1; | 839 | count = -1; |
| 841 | goto i2Input_exit; | 840 | goto i2Input_exit; |
| 842 | } | 841 | } |
| 843 | WRITE_LOCK_IRQSAVE(&pCh->Ibuf_spinlock,flags); | 842 | write_lock_irqsave(&pCh->Ibuf_spinlock, flags); |
| 844 | 843 | ||
| 845 | // initialize some accelerators and private copies | 844 | // initialize some accelerators and private copies |
| 846 | stripIndex = pCh->Ibuf_strip; | 845 | stripIndex = pCh->Ibuf_strip; |
| @@ -850,7 +849,7 @@ i2Input(i2ChanStrPtr pCh) | |||
| 850 | // If buffer is empty or requested data count was 0, (trivial case) return | 849 | // If buffer is empty or requested data count was 0, (trivial case) return |
| 851 | // without any further thought. | 850 | // without any further thought. |
| 852 | if ( count == 0 ) { | 851 | if ( count == 0 ) { |
| 853 | WRITE_UNLOCK_IRQRESTORE(&pCh->Ibuf_spinlock,flags); | 852 | write_unlock_irqrestore(&pCh->Ibuf_spinlock, flags); |
| 854 | goto i2Input_exit; | 853 | goto i2Input_exit; |
| 855 | } | 854 | } |
| 856 | // Adjust for buffer wrap | 855 | // Adjust for buffer wrap |
| @@ -891,10 +890,10 @@ i2Input(i2ChanStrPtr pCh) | |||
| 891 | 890 | ||
| 892 | if ((pCh->sinceLastFlow += count) >= pCh->whenSendFlow) { | 891 | if ((pCh->sinceLastFlow += count) >= pCh->whenSendFlow) { |
| 893 | pCh->sinceLastFlow -= pCh->whenSendFlow; | 892 | pCh->sinceLastFlow -= pCh->whenSendFlow; |
| 894 | WRITE_UNLOCK_IRQRESTORE(&pCh->Ibuf_spinlock,flags); | 893 | write_unlock_irqrestore(&pCh->Ibuf_spinlock, flags); |
| 895 | i2QueueNeeds(pCh->pMyBord, pCh, NEED_FLOW); | 894 | i2QueueNeeds(pCh->pMyBord, pCh, NEED_FLOW); |
| 896 | } else { | 895 | } else { |
| 897 | WRITE_UNLOCK_IRQRESTORE(&pCh->Ibuf_spinlock,flags); | 896 | write_unlock_irqrestore(&pCh->Ibuf_spinlock, flags); |
| 898 | } | 897 | } |
| 899 | 898 | ||
| 900 | i2Input_exit: | 899 | i2Input_exit: |
| @@ -926,7 +925,7 @@ i2InputFlush(i2ChanStrPtr pCh) | |||
| 926 | 925 | ||
| 927 | ip2trace (CHANN, ITRC_INPUT, 10, 0); | 926 | ip2trace (CHANN, ITRC_INPUT, 10, 0); |
| 928 | 927 | ||
| 929 | WRITE_LOCK_IRQSAVE(&pCh->Ibuf_spinlock,flags); | 928 | write_lock_irqsave(&pCh->Ibuf_spinlock, flags); |
| 930 | count = pCh->Ibuf_stuff - pCh->Ibuf_strip; | 929 | count = pCh->Ibuf_stuff - pCh->Ibuf_strip; |
| 931 | 930 | ||
| 932 | // Adjust for buffer wrap | 931 | // Adjust for buffer wrap |
| @@ -947,10 +946,10 @@ i2InputFlush(i2ChanStrPtr pCh) | |||
| 947 | if ( (pCh->sinceLastFlow += count) >= pCh->whenSendFlow ) | 946 | if ( (pCh->sinceLastFlow += count) >= pCh->whenSendFlow ) |
| 948 | { | 947 | { |
| 949 | pCh->sinceLastFlow -= pCh->whenSendFlow; | 948 | pCh->sinceLastFlow -= pCh->whenSendFlow; |
| 950 | WRITE_UNLOCK_IRQRESTORE(&pCh->Ibuf_spinlock,flags); | 949 | write_unlock_irqrestore(&pCh->Ibuf_spinlock, flags); |
| 951 | i2QueueNeeds(pCh->pMyBord, pCh, NEED_FLOW); | 950 | i2QueueNeeds(pCh->pMyBord, pCh, NEED_FLOW); |
| 952 | } else { | 951 | } else { |
| 953 | WRITE_UNLOCK_IRQRESTORE(&pCh->Ibuf_spinlock,flags); | 952 | write_unlock_irqrestore(&pCh->Ibuf_spinlock, flags); |
| 954 | } | 953 | } |
| 955 | 954 | ||
| 956 | ip2trace (CHANN, ITRC_INPUT, 19, 1, count); | 955 | ip2trace (CHANN, ITRC_INPUT, 19, 1, count); |
| @@ -979,9 +978,9 @@ i2InputAvailable(i2ChanStrPtr pCh) | |||
| 979 | 978 | ||
| 980 | 979 | ||
| 981 | // initialize some accelerators and private copies | 980 | // initialize some accelerators and private copies |
| 982 | READ_LOCK_IRQSAVE(&pCh->Ibuf_spinlock,flags); | 981 | read_lock_irqsave(&pCh->Ibuf_spinlock, flags); |
| 983 | count = pCh->Ibuf_stuff - pCh->Ibuf_strip; | 982 | count = pCh->Ibuf_stuff - pCh->Ibuf_strip; |
| 984 | READ_UNLOCK_IRQRESTORE(&pCh->Ibuf_spinlock,flags); | 983 | read_unlock_irqrestore(&pCh->Ibuf_spinlock, flags); |
| 985 | 984 | ||
| 986 | // Adjust for buffer wrap | 985 | // Adjust for buffer wrap |
| 987 | if (count < 0) | 986 | if (count < 0) |
| @@ -1045,9 +1044,9 @@ i2Output(i2ChanStrPtr pCh, const char *pSource, int count) | |||
| 1045 | while ( count > 0 ) { | 1044 | while ( count > 0 ) { |
| 1046 | 1045 | ||
| 1047 | // How much room in output buffer is there? | 1046 | // How much room in output buffer is there? |
| 1048 | READ_LOCK_IRQSAVE(&pCh->Obuf_spinlock,flags); | 1047 | read_lock_irqsave(&pCh->Obuf_spinlock, flags); |
| 1049 | amountToMove = pCh->Obuf_strip - pCh->Obuf_stuff - 1; | 1048 | amountToMove = pCh->Obuf_strip - pCh->Obuf_stuff - 1; |
| 1050 | READ_UNLOCK_IRQRESTORE(&pCh->Obuf_spinlock,flags); | 1049 | read_unlock_irqrestore(&pCh->Obuf_spinlock, flags); |
| 1051 | if (amountToMove < 0) { | 1050 | if (amountToMove < 0) { |
| 1052 | amountToMove += OBUF_SIZE; | 1051 | amountToMove += OBUF_SIZE; |
| 1053 | } | 1052 | } |
| @@ -1075,7 +1074,7 @@ i2Output(i2ChanStrPtr pCh, const char *pSource, int count) | |||
| 1075 | if ( !(pCh->flush_flags && i2RetryFlushOutput(pCh) ) | 1074 | if ( !(pCh->flush_flags && i2RetryFlushOutput(pCh) ) |
| 1076 | && amountToMove > 0 ) | 1075 | && amountToMove > 0 ) |
| 1077 | { | 1076 | { |
| 1078 | WRITE_LOCK_IRQSAVE(&pCh->Obuf_spinlock,flags); | 1077 | write_lock_irqsave(&pCh->Obuf_spinlock, flags); |
| 1079 | stuffIndex = pCh->Obuf_stuff; | 1078 | stuffIndex = pCh->Obuf_stuff; |
| 1080 | 1079 | ||
| 1081 | // Had room to move some data: don't know whether the block size, | 1080 | // Had room to move some data: don't know whether the block size, |
| @@ -1102,7 +1101,7 @@ i2Output(i2ChanStrPtr pCh, const char *pSource, int count) | |||
| 1102 | } | 1101 | } |
| 1103 | pCh->Obuf_stuff = stuffIndex; | 1102 | pCh->Obuf_stuff = stuffIndex; |
| 1104 | 1103 | ||
| 1105 | WRITE_UNLOCK_IRQRESTORE(&pCh->Obuf_spinlock,flags); | 1104 | write_unlock_irqrestore(&pCh->Obuf_spinlock, flags); |
| 1106 | 1105 | ||
| 1107 | ip2trace (CHANN, ITRC_OUTPUT, 13, 1, stuffIndex ); | 1106 | ip2trace (CHANN, ITRC_OUTPUT, 13, 1, stuffIndex ); |
| 1108 | 1107 | ||
| @@ -1352,9 +1351,9 @@ i2OutputFree(i2ChanStrPtr pCh) | |||
| 1352 | if ( !i2Validate ( pCh ) ) { | 1351 | if ( !i2Validate ( pCh ) ) { |
| 1353 | return -1; | 1352 | return -1; |
| 1354 | } | 1353 | } |
| 1355 | READ_LOCK_IRQSAVE(&pCh->Obuf_spinlock,flags); | 1354 | read_lock_irqsave(&pCh->Obuf_spinlock, flags); |
| 1356 | amountToMove = pCh->Obuf_strip - pCh->Obuf_stuff - 1; | 1355 | amountToMove = pCh->Obuf_strip - pCh->Obuf_stuff - 1; |
| 1357 | READ_UNLOCK_IRQRESTORE(&pCh->Obuf_spinlock,flags); | 1356 | read_unlock_irqrestore(&pCh->Obuf_spinlock, flags); |
| 1358 | 1357 | ||
| 1359 | if (amountToMove < 0) { | 1358 | if (amountToMove < 0) { |
| 1360 | amountToMove += OBUF_SIZE; | 1359 | amountToMove += OBUF_SIZE; |
| @@ -1464,11 +1463,11 @@ i2StripFifo(i2eBordStrPtr pB) | |||
| 1464 | 1463 | ||
| 1465 | // ip2trace (ITRC_NO_PORT, ITRC_SFIFO, ITRC_ENTER, 0 ); | 1464 | // ip2trace (ITRC_NO_PORT, ITRC_SFIFO, ITRC_ENTER, 0 ); |
| 1466 | 1465 | ||
| 1467 | while (HAS_INPUT(pB)) { | 1466 | while (I2_HAS_INPUT(pB)) { |
| 1468 | // ip2trace (ITRC_NO_PORT, ITRC_SFIFO, 2, 0 ); | 1467 | // ip2trace (ITRC_NO_PORT, ITRC_SFIFO, 2, 0 ); |
| 1469 | 1468 | ||
| 1470 | // Process packet from fifo a one atomic unit | 1469 | // Process packet from fifo a one atomic unit |
| 1471 | WRITE_LOCK_IRQSAVE(&pB->read_fifo_spinlock,bflags); | 1470 | write_lock_irqsave(&pB->read_fifo_spinlock, bflags); |
| 1472 | 1471 | ||
| 1473 | // The first word (or two bytes) will have channel number and type of | 1472 | // The first word (or two bytes) will have channel number and type of |
| 1474 | // packet, possibly other information | 1473 | // packet, possibly other information |
| @@ -1490,7 +1489,8 @@ i2StripFifo(i2eBordStrPtr pB) | |||
| 1490 | // sick! | 1489 | // sick! |
| 1491 | if ( ((unsigned int)count) > IBUF_SIZE ) { | 1490 | if ( ((unsigned int)count) > IBUF_SIZE ) { |
| 1492 | pB->i2eFatal = 2; | 1491 | pB->i2eFatal = 2; |
| 1493 | WRITE_UNLOCK_IRQRESTORE(&pB->read_fifo_spinlock,bflags); | 1492 | write_unlock_irqrestore(&pB->read_fifo_spinlock, |
| 1493 | bflags); | ||
| 1494 | return; /* Bail out ASAP */ | 1494 | return; /* Bail out ASAP */ |
| 1495 | } | 1495 | } |
| 1496 | // Channel is illegally big ? | 1496 | // Channel is illegally big ? |
| @@ -1498,7 +1498,8 @@ i2StripFifo(i2eBordStrPtr pB) | |||
| 1498 | (NULL==(pCh = ((i2ChanStrPtr*)pB->i2eChannelPtr)[channel]))) | 1498 | (NULL==(pCh = ((i2ChanStrPtr*)pB->i2eChannelPtr)[channel]))) |
| 1499 | { | 1499 | { |
| 1500 | iiReadBuf(pB, junkBuffer, count); | 1500 | iiReadBuf(pB, junkBuffer, count); |
| 1501 | WRITE_UNLOCK_IRQRESTORE(&pB->read_fifo_spinlock,bflags); | 1501 | write_unlock_irqrestore(&pB->read_fifo_spinlock, |
| 1502 | bflags); | ||
| 1502 | break; /* From switch: ready for next packet */ | 1503 | break; /* From switch: ready for next packet */ |
| 1503 | } | 1504 | } |
| 1504 | 1505 | ||
| @@ -1512,14 +1513,15 @@ i2StripFifo(i2eBordStrPtr pB) | |||
| 1512 | if(ID_OF(pB->i2eLeadoffWord) == ID_HOT_KEY) | 1513 | if(ID_OF(pB->i2eLeadoffWord) == ID_HOT_KEY) |
| 1513 | { | 1514 | { |
| 1514 | pCh->hotKeyIn = iiReadWord(pB) & 0xff; | 1515 | pCh->hotKeyIn = iiReadWord(pB) & 0xff; |
| 1515 | WRITE_UNLOCK_IRQRESTORE(&pB->read_fifo_spinlock,bflags); | 1516 | write_unlock_irqrestore(&pB->read_fifo_spinlock, |
| 1517 | bflags); | ||
| 1516 | i2QueueCommands(PTYPE_BYPASS, pCh, 0, 1, CMD_HOTACK); | 1518 | i2QueueCommands(PTYPE_BYPASS, pCh, 0, 1, CMD_HOTACK); |
| 1517 | break; /* From the switch: ready for next packet */ | 1519 | break; /* From the switch: ready for next packet */ |
| 1518 | } | 1520 | } |
| 1519 | 1521 | ||
| 1520 | // Normal data! We crudely assume there is room for the data in our | 1522 | // Normal data! We crudely assume there is room for the data in our |
| 1521 | // buffer because the board wouldn't have exceeded his credit limit. | 1523 | // buffer because the board wouldn't have exceeded his credit limit. |
| 1522 | WRITE_LOCK_IRQSAVE(&pCh->Ibuf_spinlock,cflags); | 1524 | write_lock_irqsave(&pCh->Ibuf_spinlock, cflags); |
| 1523 | // We have 2 locks now | 1525 | // We have 2 locks now |
| 1524 | stuffIndex = pCh->Ibuf_stuff; | 1526 | stuffIndex = pCh->Ibuf_stuff; |
| 1525 | amountToRead = IBUF_SIZE - stuffIndex; | 1527 | amountToRead = IBUF_SIZE - stuffIndex; |
| @@ -1562,8 +1564,9 @@ i2StripFifo(i2eBordStrPtr pB) | |||
| 1562 | 1564 | ||
| 1563 | // Update stuff index | 1565 | // Update stuff index |
| 1564 | pCh->Ibuf_stuff = stuffIndex; | 1566 | pCh->Ibuf_stuff = stuffIndex; |
| 1565 | WRITE_UNLOCK_IRQRESTORE(&pCh->Ibuf_spinlock,cflags); | 1567 | write_unlock_irqrestore(&pCh->Ibuf_spinlock, cflags); |
| 1566 | WRITE_UNLOCK_IRQRESTORE(&pB->read_fifo_spinlock,bflags); | 1568 | write_unlock_irqrestore(&pB->read_fifo_spinlock, |
| 1569 | bflags); | ||
| 1567 | 1570 | ||
| 1568 | #ifdef USE_IQ | 1571 | #ifdef USE_IQ |
| 1569 | schedule_work(&pCh->tqueue_input); | 1572 | schedule_work(&pCh->tqueue_input); |
| @@ -1585,7 +1588,8 @@ i2StripFifo(i2eBordStrPtr pB) | |||
| 1585 | 1588 | ||
| 1586 | iiReadBuf(pB, cmdBuffer, count); | 1589 | iiReadBuf(pB, cmdBuffer, count); |
| 1587 | // We can release early with buffer grab | 1590 | // We can release early with buffer grab |
| 1588 | WRITE_UNLOCK_IRQRESTORE(&pB->read_fifo_spinlock,bflags); | 1591 | write_unlock_irqrestore(&pB->read_fifo_spinlock, |
| 1592 | bflags); | ||
| 1589 | 1593 | ||
| 1590 | pc = cmdBuffer; | 1594 | pc = cmdBuffer; |
| 1591 | pcLimit = &(cmdBuffer[count]); | 1595 | pcLimit = &(cmdBuffer[count]); |
| @@ -1830,12 +1834,12 @@ i2StripFifo(i2eBordStrPtr pB) | |||
| 1830 | default: // Neither packet? should be impossible | 1834 | default: // Neither packet? should be impossible |
| 1831 | ip2trace (ITRC_NO_PORT, ITRC_SFIFO, 5, 1, | 1835 | ip2trace (ITRC_NO_PORT, ITRC_SFIFO, 5, 1, |
| 1832 | PTYPE_OF(pB->i2eLeadoffWord) ); | 1836 | PTYPE_OF(pB->i2eLeadoffWord) ); |
| 1833 | WRITE_UNLOCK_IRQRESTORE(&pB->read_fifo_spinlock, | 1837 | write_unlock_irqrestore(&pB->read_fifo_spinlock, |
| 1834 | bflags); | 1838 | bflags); |
| 1835 | 1839 | ||
| 1836 | break; | 1840 | break; |
| 1837 | } // End of switch on type of packets | 1841 | } // End of switch on type of packets |
| 1838 | } //while(board HAS_INPUT) | 1842 | } /*while(board I2_HAS_INPUT)*/ |
| 1839 | 1843 | ||
| 1840 | ip2trace (ITRC_NO_PORT, ITRC_SFIFO, ITRC_RETURN, 0 ); | 1844 | ip2trace (ITRC_NO_PORT, ITRC_SFIFO, ITRC_RETURN, 0 ); |
| 1841 | 1845 | ||
| @@ -1858,7 +1862,7 @@ i2Write2Fifo(i2eBordStrPtr pB, unsigned char *source, int count,int reserve) | |||
| 1858 | { | 1862 | { |
| 1859 | int rc = 0; | 1863 | int rc = 0; |
| 1860 | unsigned long flags; | 1864 | unsigned long flags; |
| 1861 | WRITE_LOCK_IRQSAVE(&pB->write_fifo_spinlock,flags); | 1865 | write_lock_irqsave(&pB->write_fifo_spinlock, flags); |
| 1862 | if (!pB->i2eWaitingForEmptyFifo) { | 1866 | if (!pB->i2eWaitingForEmptyFifo) { |
| 1863 | if (pB->i2eFifoRemains > (count+reserve)) { | 1867 | if (pB->i2eFifoRemains > (count+reserve)) { |
| 1864 | pB->i2eFifoRemains -= count; | 1868 | pB->i2eFifoRemains -= count; |
| @@ -1867,7 +1871,7 @@ i2Write2Fifo(i2eBordStrPtr pB, unsigned char *source, int count,int reserve) | |||
| 1867 | rc = count; | 1871 | rc = count; |
| 1868 | } | 1872 | } |
| 1869 | } | 1873 | } |
| 1870 | WRITE_UNLOCK_IRQRESTORE(&pB->write_fifo_spinlock,flags); | 1874 | write_unlock_irqrestore(&pB->write_fifo_spinlock, flags); |
| 1871 | return rc; | 1875 | return rc; |
| 1872 | } | 1876 | } |
| 1873 | //****************************************************************************** | 1877 | //****************************************************************************** |
| @@ -1898,7 +1902,7 @@ i2StuffFifoBypass(i2eBordStrPtr pB) | |||
| 1898 | while ( --bailout && notClogged && | 1902 | while ( --bailout && notClogged && |
| 1899 | (NULL != (pCh = i2DeQueueNeeds(pB,NEED_BYPASS)))) | 1903 | (NULL != (pCh = i2DeQueueNeeds(pB,NEED_BYPASS)))) |
| 1900 | { | 1904 | { |
| 1901 | WRITE_LOCK_IRQSAVE(&pCh->Cbuf_spinlock,flags); | 1905 | write_lock_irqsave(&pCh->Cbuf_spinlock, flags); |
| 1902 | stripIndex = pCh->Cbuf_strip; | 1906 | stripIndex = pCh->Cbuf_strip; |
| 1903 | 1907 | ||
| 1904 | // as long as there are packets for this channel... | 1908 | // as long as there are packets for this channel... |
| @@ -1906,7 +1910,7 @@ i2StuffFifoBypass(i2eBordStrPtr pB) | |||
| 1906 | while (stripIndex != pCh->Cbuf_stuff) { | 1910 | while (stripIndex != pCh->Cbuf_stuff) { |
| 1907 | pRemove = &(pCh->Cbuf[stripIndex]); | 1911 | pRemove = &(pCh->Cbuf[stripIndex]); |
| 1908 | packetSize = CMD_COUNT_OF(pRemove) + sizeof(i2CmdHeader); | 1912 | packetSize = CMD_COUNT_OF(pRemove) + sizeof(i2CmdHeader); |
| 1909 | paddedSize = ROUNDUP(packetSize); | 1913 | paddedSize = roundup(packetSize, 2); |
| 1910 | 1914 | ||
| 1911 | if (paddedSize > 0) { | 1915 | if (paddedSize > 0) { |
| 1912 | if ( 0 == i2Write2Fifo(pB, pRemove, paddedSize,0)) { | 1916 | if ( 0 == i2Write2Fifo(pB, pRemove, paddedSize,0)) { |
| @@ -1930,7 +1934,7 @@ WriteDBGBuf("BYPS", pRemove, paddedSize); | |||
| 1930 | // Done with this channel. Move to next, removing this one from | 1934 | // Done with this channel. Move to next, removing this one from |
| 1931 | // the queue of channels if we cleaned it out (i.e., didn't get clogged. | 1935 | // the queue of channels if we cleaned it out (i.e., didn't get clogged. |
| 1932 | pCh->Cbuf_strip = stripIndex; | 1936 | pCh->Cbuf_strip = stripIndex; |
| 1933 | WRITE_UNLOCK_IRQRESTORE(&pCh->Cbuf_spinlock,flags); | 1937 | write_unlock_irqrestore(&pCh->Cbuf_spinlock, flags); |
| 1934 | } // Either clogged or finished all the work | 1938 | } // Either clogged or finished all the work |
| 1935 | 1939 | ||
| 1936 | #ifdef IP2DEBUG_TRACE | 1940 | #ifdef IP2DEBUG_TRACE |
| @@ -1954,7 +1958,7 @@ static inline void | |||
| 1954 | i2StuffFifoFlow(i2eBordStrPtr pB) | 1958 | i2StuffFifoFlow(i2eBordStrPtr pB) |
| 1955 | { | 1959 | { |
| 1956 | i2ChanStrPtr pCh; | 1960 | i2ChanStrPtr pCh; |
| 1957 | unsigned short paddedSize = ROUNDUP(sizeof(flowIn)); | 1961 | unsigned short paddedSize = roundup(sizeof(flowIn), 2); |
| 1958 | 1962 | ||
| 1959 | ip2trace (ITRC_NO_PORT, ITRC_SFLOW, ITRC_ENTER, 2, | 1963 | ip2trace (ITRC_NO_PORT, ITRC_SFLOW, ITRC_ENTER, 2, |
| 1960 | pB->i2eFifoRemains, paddedSize ); | 1964 | pB->i2eFifoRemains, paddedSize ); |
| @@ -2010,7 +2014,7 @@ i2StuffFifoInline(i2eBordStrPtr pB) | |||
| 2010 | while ( --bailout && notClogged && | 2014 | while ( --bailout && notClogged && |
| 2011 | (NULL != (pCh = i2DeQueueNeeds(pB,NEED_INLINE))) ) | 2015 | (NULL != (pCh = i2DeQueueNeeds(pB,NEED_INLINE))) ) |
| 2012 | { | 2016 | { |
| 2013 | WRITE_LOCK_IRQSAVE(&pCh->Obuf_spinlock,flags); | 2017 | write_lock_irqsave(&pCh->Obuf_spinlock, flags); |
| 2014 | stripIndex = pCh->Obuf_strip; | 2018 | stripIndex = pCh->Obuf_strip; |
| 2015 | 2019 | ||
| 2016 | ip2trace (CHANN, ITRC_SICMD, 3, 2, stripIndex, pCh->Obuf_stuff ); | 2020 | ip2trace (CHANN, ITRC_SICMD, 3, 2, stripIndex, pCh->Obuf_stuff ); |
| @@ -2031,7 +2035,7 @@ i2StuffFifoInline(i2eBordStrPtr pB) | |||
| 2031 | packetSize = flowsize + sizeof(i2CmdHeader); | 2035 | packetSize = flowsize + sizeof(i2CmdHeader); |
| 2032 | } | 2036 | } |
| 2033 | flowsize = CREDIT_USAGE(flowsize); | 2037 | flowsize = CREDIT_USAGE(flowsize); |
| 2034 | paddedSize = ROUNDUP(packetSize); | 2038 | paddedSize = roundup(packetSize, 2); |
| 2035 | 2039 | ||
| 2036 | ip2trace (CHANN, ITRC_SICMD, 4, 2, pB->i2eFifoRemains, paddedSize ); | 2040 | ip2trace (CHANN, ITRC_SICMD, 4, 2, pB->i2eFifoRemains, paddedSize ); |
| 2037 | 2041 | ||
| @@ -2086,7 +2090,7 @@ WriteDBGBuf("DATA", pRemove, paddedSize); | |||
| 2086 | // Done with this channel. Move to next, removing this one from the | 2090 | // Done with this channel. Move to next, removing this one from the |
| 2087 | // queue of channels if we cleaned it out (i.e., didn't get clogged. | 2091 | // queue of channels if we cleaned it out (i.e., didn't get clogged. |
| 2088 | pCh->Obuf_strip = stripIndex; | 2092 | pCh->Obuf_strip = stripIndex; |
| 2089 | WRITE_UNLOCK_IRQRESTORE(&pCh->Obuf_spinlock,flags); | 2093 | write_unlock_irqrestore(&pCh->Obuf_spinlock, flags); |
| 2090 | if ( notClogged ) | 2094 | if ( notClogged ) |
| 2091 | { | 2095 | { |
| 2092 | 2096 | ||
| @@ -2190,10 +2194,11 @@ i2ServiceBoard ( i2eBordStrPtr pB ) | |||
| 2190 | 2194 | ||
| 2191 | if (inmail & MB_OUT_STRIPPED) { | 2195 | if (inmail & MB_OUT_STRIPPED) { |
| 2192 | pB->i2eFifoOutInts++; | 2196 | pB->i2eFifoOutInts++; |
| 2193 | WRITE_LOCK_IRQSAVE(&pB->write_fifo_spinlock,flags); | 2197 | write_lock_irqsave(&pB->write_fifo_spinlock, flags); |
| 2194 | pB->i2eFifoRemains = pB->i2eFifoSize; | 2198 | pB->i2eFifoRemains = pB->i2eFifoSize; |
| 2195 | pB->i2eWaitingForEmptyFifo = 0; | 2199 | pB->i2eWaitingForEmptyFifo = 0; |
| 2196 | WRITE_UNLOCK_IRQRESTORE(&pB->write_fifo_spinlock,flags); | 2200 | write_unlock_irqrestore(&pB->write_fifo_spinlock, |
| 2201 | flags); | ||
| 2197 | 2202 | ||
| 2198 | ip2trace (ITRC_NO_PORT, ITRC_INTR, 30, 1, pB->i2eFifoRemains ); | 2203 | ip2trace (ITRC_NO_PORT, ITRC_INTR, 30, 1, pB->i2eFifoRemains ); |
| 2199 | 2204 | ||
diff --git a/drivers/char/ip2/i2os.h b/drivers/char/ip2/i2os.h deleted file mode 100644 index eff9b542d699..000000000000 --- a/drivers/char/ip2/i2os.h +++ /dev/null | |||
| @@ -1,127 +0,0 @@ | |||
| 1 | /******************************************************************************* | ||
| 2 | * | ||
| 3 | * (c) 1999 by Computone Corporation | ||
| 4 | * | ||
| 5 | ******************************************************************************** | ||
| 6 | * | ||
| 7 | * | ||
| 8 | * PACKAGE: Linux tty Device Driver for IntelliPort II family of multiport | ||
| 9 | * serial I/O controllers. | ||
| 10 | * | ||
| 11 | * DESCRIPTION: Defines, definitions and includes which are heavily dependent | ||
| 12 | * on O/S, host, compiler, etc. This file is tailored for: | ||
| 13 | * Linux v2.0.0 and later | ||
| 14 | * Gnu gcc c2.7.2 | ||
| 15 | * 80x86 architecture | ||
| 16 | * | ||
| 17 | *******************************************************************************/ | ||
| 18 | |||
| 19 | #ifndef I2OS_H /* To prevent multiple includes */ | ||
| 20 | #define I2OS_H 1 | ||
| 21 | |||
| 22 | //------------------------------------------------- | ||
| 23 | // Required Includes | ||
| 24 | //------------------------------------------------- | ||
| 25 | |||
| 26 | #include "ip2types.h" | ||
| 27 | #include <asm/io.h> /* For inb, etc */ | ||
| 28 | |||
| 29 | //------------------------------------ | ||
| 30 | // Defines for I/O instructions: | ||
| 31 | //------------------------------------ | ||
| 32 | |||
| 33 | #define INB(port) inb(port) | ||
| 34 | #define OUTB(port,value) outb((value),(port)) | ||
| 35 | #define INW(port) inw(port) | ||
| 36 | #define OUTW(port,value) outw((value),(port)) | ||
| 37 | #define OUTSW(port,addr,count) outsw((port),(addr),(((count)+1)/2)) | ||
| 38 | #define OUTSB(port,addr,count) outsb((port),(addr),(((count)+1))&-2) | ||
| 39 | #define INSW(port,addr,count) insw((port),(addr),(((count)+1)/2)) | ||
| 40 | #define INSB(port,addr,count) insb((port),(addr),(((count)+1))&-2) | ||
| 41 | |||
| 42 | //-------------------------------------------- | ||
| 43 | // Interrupt control | ||
| 44 | //-------------------------------------------- | ||
| 45 | |||
| 46 | #define LOCK_INIT(a) rwlock_init(a) | ||
| 47 | |||
| 48 | #define SAVE_AND_DISABLE_INTS(a,b) { \ | ||
| 49 | /* printk("get_lock: 0x%x,%4d,%s\n",(int)a,__LINE__,__FILE__);*/ \ | ||
| 50 | spin_lock_irqsave(a,b); \ | ||
| 51 | } | ||
| 52 | |||
| 53 | #define RESTORE_INTS(a,b) { \ | ||
| 54 | /* printk("rel_lock: 0x%x,%4d,%s\n",(int)a,__LINE__,__FILE__);*/ \ | ||
| 55 | spin_unlock_irqrestore(a,b); \ | ||
| 56 | } | ||
| 57 | |||
| 58 | #define READ_LOCK_IRQSAVE(a,b) { \ | ||
| 59 | /* printk("get_read_lock: 0x%x,%4d,%s\n",(int)a,__LINE__,__FILE__);*/ \ | ||
| 60 | read_lock_irqsave(a,b); \ | ||
| 61 | } | ||
| 62 | |||
| 63 | #define READ_UNLOCK_IRQRESTORE(a,b) { \ | ||
| 64 | /* printk("rel_read_lock: 0x%x,%4d,%s\n",(int)a,__LINE__,__FILE__);*/ \ | ||
| 65 | read_unlock_irqrestore(a,b); \ | ||
| 66 | } | ||
| 67 | |||
| 68 | #define WRITE_LOCK_IRQSAVE(a,b) { \ | ||
| 69 | /* printk("get_write_lock: 0x%x,%4d,%s\n",(int)a,__LINE__,__FILE__);*/ \ | ||
| 70 | write_lock_irqsave(a,b); \ | ||
| 71 | } | ||
| 72 | |||
| 73 | #define WRITE_UNLOCK_IRQRESTORE(a,b) { \ | ||
| 74 | /* printk("rel_write_lock: 0x%x,%4d,%s\n",(int)a,__LINE__,__FILE__);*/ \ | ||
| 75 | write_unlock_irqrestore(a,b); \ | ||
| 76 | } | ||
| 77 | |||
| 78 | |||
| 79 | //------------------------------------------------------------------------------ | ||
| 80 | // Hardware-delay loop | ||
| 81 | // | ||
| 82 | // Probably used in only one place (see i2ellis.c) but this helps keep things | ||
| 83 | // together. Note we have unwound the IN instructions. On machines with a | ||
| 84 | // reasonable cache, the eight instructions (1 byte each) should fit in cache | ||
| 85 | // nicely, and on un-cached machines, the code-fetch would tend not to dominate. | ||
| 86 | // Note that cx is shifted so that "count" still reflects the total number of | ||
| 87 | // iterations assuming no unwinding. | ||
| 88 | //------------------------------------------------------------------------------ | ||
| 89 | |||
| 90 | //#define DELAY1MS(port,count,label) | ||
| 91 | |||
| 92 | //------------------------------------------------------------------------------ | ||
| 93 | // Macros to switch to a new stack, saving stack pointers, and to restore the | ||
| 94 | // old stack (Used, for example, in i2lib.c) "heap" is the address of some | ||
| 95 | // buffer which will become the new stack (working down from highest address). | ||
| 96 | // The two words at the two lowest addresses in this stack are for storing the | ||
| 97 | // SS and SP. | ||
| 98 | //------------------------------------------------------------------------------ | ||
| 99 | |||
| 100 | //#define TO_NEW_STACK(heap,size) | ||
| 101 | //#define TO_OLD_STACK(heap) | ||
| 102 | |||
| 103 | //------------------------------------------------------------------------------ | ||
| 104 | // Macros to save the original IRQ vectors and masks, and to patch in new ones. | ||
| 105 | //------------------------------------------------------------------------------ | ||
| 106 | |||
| 107 | //#define SAVE_IRQ_MASKS(dest) | ||
| 108 | //#define WRITE_IRQ_MASKS(src) | ||
| 109 | //#define SAVE_IRQ_VECTOR(value,dest) | ||
| 110 | //#define WRITE_IRQ_VECTOR(value,src) | ||
| 111 | |||
| 112 | //------------------------------------------------------------------------------ | ||
| 113 | // Macro to copy data from one far pointer to another. | ||
| 114 | //------------------------------------------------------------------------------ | ||
| 115 | |||
| 116 | #define I2_MOVE_DATA(fpSource,fpDest,count) memmove(fpDest,fpSource,count); | ||
| 117 | |||
| 118 | //------------------------------------------------------------------------------ | ||
| 119 | // Macros to issue eoi's to host interrupt control (IBM AT 8259-style). | ||
| 120 | //------------------------------------------------------------------------------ | ||
| 121 | |||
| 122 | //#define MASTER_EOI | ||
| 123 | //#define SLAVE_EOI | ||
| 124 | |||
| 125 | #endif /* I2OS_H */ | ||
| 126 | |||
| 127 | |||
diff --git a/drivers/char/ip2/ip2main.c b/drivers/char/ip2/ip2main.c index 0a61856c631f..70957acaa960 100644 --- a/drivers/char/ip2/ip2main.c +++ b/drivers/char/ip2/ip2main.c | |||
| @@ -169,7 +169,7 @@ static int Fip_firmware_size; | |||
| 169 | static int ip2_open(PTTY, struct file *); | 169 | static int ip2_open(PTTY, struct file *); |
| 170 | static void ip2_close(PTTY, struct file *); | 170 | static void ip2_close(PTTY, struct file *); |
| 171 | static int ip2_write(PTTY, const unsigned char *, int); | 171 | static int ip2_write(PTTY, const unsigned char *, int); |
| 172 | static void ip2_putchar(PTTY, unsigned char); | 172 | static int ip2_putchar(PTTY, unsigned char); |
| 173 | static void ip2_flush_chars(PTTY); | 173 | static void ip2_flush_chars(PTTY); |
| 174 | static int ip2_write_room(PTTY); | 174 | static int ip2_write_room(PTTY); |
| 175 | static int ip2_chars_in_buf(PTTY); | 175 | static int ip2_chars_in_buf(PTTY); |
| @@ -355,14 +355,15 @@ have_requested_irq( char irq ) | |||
| 355 | /* the driver initialisation function and returns what it returns. */ | 355 | /* the driver initialisation function and returns what it returns. */ |
| 356 | /******************************************************************************/ | 356 | /******************************************************************************/ |
| 357 | #ifdef MODULE | 357 | #ifdef MODULE |
| 358 | int | 358 | static int __init |
| 359 | init_module(void) | 359 | ip2_init_module(void) |
| 360 | { | 360 | { |
| 361 | #ifdef IP2DEBUG_INIT | 361 | #ifdef IP2DEBUG_INIT |
| 362 | printk (KERN_DEBUG "Loading module ...\n" ); | 362 | printk (KERN_DEBUG "Loading module ...\n" ); |
| 363 | #endif | 363 | #endif |
| 364 | return 0; | 364 | return 0; |
| 365 | } | 365 | } |
| 366 | module_init(ip2_init_module); | ||
| 366 | #endif /* MODULE */ | 367 | #endif /* MODULE */ |
| 367 | 368 | ||
| 368 | /******************************************************************************/ | 369 | /******************************************************************************/ |
| @@ -381,8 +382,8 @@ init_module(void) | |||
| 381 | /* driver should be returned since it may be unloaded from memory. */ | 382 | /* driver should be returned since it may be unloaded from memory. */ |
| 382 | /******************************************************************************/ | 383 | /******************************************************************************/ |
| 383 | #ifdef MODULE | 384 | #ifdef MODULE |
| 384 | void | 385 | void __exit |
| 385 | cleanup_module(void) | 386 | ip2_cleanup_module(void) |
| 386 | { | 387 | { |
| 387 | int err; | 388 | int err; |
| 388 | int i; | 389 | int i; |
| @@ -452,6 +453,7 @@ cleanup_module(void) | |||
| 452 | printk (KERN_DEBUG "IP2 Unloaded\n" ); | 453 | printk (KERN_DEBUG "IP2 Unloaded\n" ); |
| 453 | #endif | 454 | #endif |
| 454 | } | 455 | } |
| 456 | module_exit(ip2_cleanup_module); | ||
| 455 | #endif /* MODULE */ | 457 | #endif /* MODULE */ |
| 456 | 458 | ||
| 457 | static const struct tty_operations ip2_ops = { | 459 | static const struct tty_operations ip2_ops = { |
| @@ -1050,9 +1052,9 @@ set_irq( int boardnum, int boardIrq ) | |||
| 1050 | * Write to FIFO; don't bother to adjust fifo capacity for this, since | 1052 | * Write to FIFO; don't bother to adjust fifo capacity for this, since |
| 1051 | * board will respond almost immediately after SendMail hit. | 1053 | * board will respond almost immediately after SendMail hit. |
| 1052 | */ | 1054 | */ |
| 1053 | WRITE_LOCK_IRQSAVE(&pB->write_fifo_spinlock,flags); | 1055 | write_lock_irqsave(&pB->write_fifo_spinlock, flags); |
| 1054 | iiWriteBuf(pB, tempCommand, 4); | 1056 | iiWriteBuf(pB, tempCommand, 4); |
| 1055 | WRITE_UNLOCK_IRQRESTORE(&pB->write_fifo_spinlock,flags); | 1057 | write_unlock_irqrestore(&pB->write_fifo_spinlock, flags); |
| 1056 | pB->i2eUsingIrq = boardIrq; | 1058 | pB->i2eUsingIrq = boardIrq; |
| 1057 | pB->i2eOutMailWaiting |= MB_OUT_STUFFED; | 1059 | pB->i2eOutMailWaiting |= MB_OUT_STUFFED; |
| 1058 | 1060 | ||
| @@ -1070,9 +1072,9 @@ set_irq( int boardnum, int boardIrq ) | |||
| 1070 | (CMD_OF(tempCommand))[4] = 64; // chars | 1072 | (CMD_OF(tempCommand))[4] = 64; // chars |
| 1071 | 1073 | ||
| 1072 | (CMD_OF(tempCommand))[5] = 87; // HW_TEST | 1074 | (CMD_OF(tempCommand))[5] = 87; // HW_TEST |
| 1073 | WRITE_LOCK_IRQSAVE(&pB->write_fifo_spinlock,flags); | 1075 | write_lock_irqsave(&pB->write_fifo_spinlock, flags); |
| 1074 | iiWriteBuf(pB, tempCommand, 8); | 1076 | iiWriteBuf(pB, tempCommand, 8); |
| 1075 | WRITE_UNLOCK_IRQRESTORE(&pB->write_fifo_spinlock,flags); | 1077 | write_unlock_irqrestore(&pB->write_fifo_spinlock, flags); |
| 1076 | 1078 | ||
| 1077 | CHANNEL_OF(tempCommand) = 0; | 1079 | CHANNEL_OF(tempCommand) = 0; |
| 1078 | PTYPE_OF(tempCommand) = PTYPE_BYPASS; | 1080 | PTYPE_OF(tempCommand) = PTYPE_BYPASS; |
| @@ -1087,9 +1089,9 @@ set_irq( int boardnum, int boardIrq ) | |||
| 1087 | CMD_COUNT_OF(tempCommand) = 2; | 1089 | CMD_COUNT_OF(tempCommand) = 2; |
| 1088 | (CMD_OF(tempCommand))[0] = 44; /* get ping */ | 1090 | (CMD_OF(tempCommand))[0] = 44; /* get ping */ |
| 1089 | (CMD_OF(tempCommand))[1] = 200; /* 200 ms */ | 1091 | (CMD_OF(tempCommand))[1] = 200; /* 200 ms */ |
| 1090 | WRITE_LOCK_IRQSAVE(&pB->write_fifo_spinlock,flags); | 1092 | write_lock_irqsave(&pB->write_fifo_spinlock, flags); |
| 1091 | iiWriteBuf(pB, tempCommand, 4); | 1093 | iiWriteBuf(pB, tempCommand, 4); |
| 1092 | WRITE_UNLOCK_IRQRESTORE(&pB->write_fifo_spinlock,flags); | 1094 | write_unlock_irqrestore(&pB->write_fifo_spinlock, flags); |
| 1093 | #endif | 1095 | #endif |
| 1094 | 1096 | ||
| 1095 | iiEnableMailIrq(pB); | 1097 | iiEnableMailIrq(pB); |
| @@ -1268,12 +1270,12 @@ static void do_input(struct work_struct *work) | |||
| 1268 | 1270 | ||
| 1269 | // Data input | 1271 | // Data input |
| 1270 | if ( pCh->pTTY != NULL ) { | 1272 | if ( pCh->pTTY != NULL ) { |
| 1271 | READ_LOCK_IRQSAVE(&pCh->Ibuf_spinlock,flags) | 1273 | read_lock_irqsave(&pCh->Ibuf_spinlock, flags); |
| 1272 | if (!pCh->throttled && (pCh->Ibuf_stuff != pCh->Ibuf_strip)) { | 1274 | if (!pCh->throttled && (pCh->Ibuf_stuff != pCh->Ibuf_strip)) { |
| 1273 | READ_UNLOCK_IRQRESTORE(&pCh->Ibuf_spinlock,flags) | 1275 | read_unlock_irqrestore(&pCh->Ibuf_spinlock, flags); |
| 1274 | i2Input( pCh ); | 1276 | i2Input( pCh ); |
| 1275 | } else | 1277 | } else |
| 1276 | READ_UNLOCK_IRQRESTORE(&pCh->Ibuf_spinlock,flags) | 1278 | read_unlock_irqrestore(&pCh->Ibuf_spinlock, flags); |
| 1277 | } else { | 1279 | } else { |
| 1278 | ip2trace(CHANN, ITRC_INPUT, 22, 0 ); | 1280 | ip2trace(CHANN, ITRC_INPUT, 22, 0 ); |
| 1279 | 1281 | ||
| @@ -1614,10 +1616,8 @@ ip2_close( PTTY tty, struct file *pFile ) | |||
| 1614 | 1616 | ||
| 1615 | serviceOutgoingFifo ( pCh->pMyBord ); | 1617 | serviceOutgoingFifo ( pCh->pMyBord ); |
| 1616 | 1618 | ||
| 1617 | if ( tty->driver->flush_buffer ) | 1619 | tty_ldisc_flush(tty); |
| 1618 | tty->driver->flush_buffer(tty); | 1620 | tty_driver_flush_buffer(tty); |
| 1619 | if ( tty->ldisc.flush_buffer ) | ||
| 1620 | tty->ldisc.flush_buffer(tty); | ||
| 1621 | tty->closing = 0; | 1621 | tty->closing = 0; |
| 1622 | 1622 | ||
| 1623 | pCh->pTTY = NULL; | 1623 | pCh->pTTY = NULL; |
| @@ -1717,9 +1717,9 @@ ip2_write( PTTY tty, const unsigned char *pData, int count) | |||
| 1717 | ip2_flush_chars( tty ); | 1717 | ip2_flush_chars( tty ); |
| 1718 | 1718 | ||
| 1719 | /* This is the actual move bit. Make sure it does what we need!!!!! */ | 1719 | /* This is the actual move bit. Make sure it does what we need!!!!! */ |
| 1720 | WRITE_LOCK_IRQSAVE(&pCh->Pbuf_spinlock,flags); | 1720 | write_lock_irqsave(&pCh->Pbuf_spinlock, flags); |
| 1721 | bytesSent = i2Output( pCh, pData, count); | 1721 | bytesSent = i2Output( pCh, pData, count); |
| 1722 | WRITE_UNLOCK_IRQRESTORE(&pCh->Pbuf_spinlock,flags); | 1722 | write_unlock_irqrestore(&pCh->Pbuf_spinlock, flags); |
| 1723 | 1723 | ||
| 1724 | ip2trace (CHANN, ITRC_WRITE, ITRC_RETURN, 1, bytesSent ); | 1724 | ip2trace (CHANN, ITRC_WRITE, ITRC_RETURN, 1, bytesSent ); |
| 1725 | 1725 | ||
| @@ -1736,7 +1736,7 @@ ip2_write( PTTY tty, const unsigned char *pData, int count) | |||
| 1736 | /* */ | 1736 | /* */ |
| 1737 | /* */ | 1737 | /* */ |
| 1738 | /******************************************************************************/ | 1738 | /******************************************************************************/ |
| 1739 | static void | 1739 | static int |
| 1740 | ip2_putchar( PTTY tty, unsigned char ch ) | 1740 | ip2_putchar( PTTY tty, unsigned char ch ) |
| 1741 | { | 1741 | { |
| 1742 | i2ChanStrPtr pCh = tty->driver_data; | 1742 | i2ChanStrPtr pCh = tty->driver_data; |
| @@ -1744,13 +1744,14 @@ ip2_putchar( PTTY tty, unsigned char ch ) | |||
| 1744 | 1744 | ||
| 1745 | // ip2trace (CHANN, ITRC_PUTC, ITRC_ENTER, 1, ch ); | 1745 | // ip2trace (CHANN, ITRC_PUTC, ITRC_ENTER, 1, ch ); |
| 1746 | 1746 | ||
| 1747 | WRITE_LOCK_IRQSAVE(&pCh->Pbuf_spinlock,flags); | 1747 | write_lock_irqsave(&pCh->Pbuf_spinlock, flags); |
| 1748 | pCh->Pbuf[pCh->Pbuf_stuff++] = ch; | 1748 | pCh->Pbuf[pCh->Pbuf_stuff++] = ch; |
| 1749 | if ( pCh->Pbuf_stuff == sizeof pCh->Pbuf ) { | 1749 | if ( pCh->Pbuf_stuff == sizeof pCh->Pbuf ) { |
| 1750 | WRITE_UNLOCK_IRQRESTORE(&pCh->Pbuf_spinlock,flags); | 1750 | write_unlock_irqrestore(&pCh->Pbuf_spinlock, flags); |
| 1751 | ip2_flush_chars( tty ); | 1751 | ip2_flush_chars( tty ); |
| 1752 | } else | 1752 | } else |
| 1753 | WRITE_UNLOCK_IRQRESTORE(&pCh->Pbuf_spinlock,flags); | 1753 | write_unlock_irqrestore(&pCh->Pbuf_spinlock, flags); |
| 1754 | return 1; | ||
| 1754 | 1755 | ||
| 1755 | // ip2trace (CHANN, ITRC_PUTC, ITRC_RETURN, 1, ch ); | 1756 | // ip2trace (CHANN, ITRC_PUTC, ITRC_RETURN, 1, ch ); |
| 1756 | } | 1757 | } |
| @@ -1770,7 +1771,7 @@ ip2_flush_chars( PTTY tty ) | |||
| 1770 | i2ChanStrPtr pCh = tty->driver_data; | 1771 | i2ChanStrPtr pCh = tty->driver_data; |
| 1771 | unsigned long flags; | 1772 | unsigned long flags; |
| 1772 | 1773 | ||
| 1773 | WRITE_LOCK_IRQSAVE(&pCh->Pbuf_spinlock,flags); | 1774 | write_lock_irqsave(&pCh->Pbuf_spinlock, flags); |
| 1774 | if ( pCh->Pbuf_stuff ) { | 1775 | if ( pCh->Pbuf_stuff ) { |
| 1775 | 1776 | ||
| 1776 | // ip2trace (CHANN, ITRC_PUTC, 10, 1, strip ); | 1777 | // ip2trace (CHANN, ITRC_PUTC, 10, 1, strip ); |
| @@ -1784,7 +1785,7 @@ ip2_flush_chars( PTTY tty ) | |||
| 1784 | } | 1785 | } |
| 1785 | pCh->Pbuf_stuff -= strip; | 1786 | pCh->Pbuf_stuff -= strip; |
| 1786 | } | 1787 | } |
| 1787 | WRITE_UNLOCK_IRQRESTORE(&pCh->Pbuf_spinlock,flags); | 1788 | write_unlock_irqrestore(&pCh->Pbuf_spinlock, flags); |
| 1788 | } | 1789 | } |
| 1789 | 1790 | ||
| 1790 | /******************************************************************************/ | 1791 | /******************************************************************************/ |
| @@ -1802,9 +1803,9 @@ ip2_write_room ( PTTY tty ) | |||
| 1802 | i2ChanStrPtr pCh = tty->driver_data; | 1803 | i2ChanStrPtr pCh = tty->driver_data; |
| 1803 | unsigned long flags; | 1804 | unsigned long flags; |
| 1804 | 1805 | ||
| 1805 | READ_LOCK_IRQSAVE(&pCh->Pbuf_spinlock,flags); | 1806 | read_lock_irqsave(&pCh->Pbuf_spinlock, flags); |
| 1806 | bytesFree = i2OutputFree( pCh ) - pCh->Pbuf_stuff; | 1807 | bytesFree = i2OutputFree( pCh ) - pCh->Pbuf_stuff; |
| 1807 | READ_UNLOCK_IRQRESTORE(&pCh->Pbuf_spinlock,flags); | 1808 | read_unlock_irqrestore(&pCh->Pbuf_spinlock, flags); |
| 1808 | 1809 | ||
| 1809 | ip2trace (CHANN, ITRC_WRITE, 11, 1, bytesFree ); | 1810 | ip2trace (CHANN, ITRC_WRITE, 11, 1, bytesFree ); |
| 1810 | 1811 | ||
| @@ -1834,12 +1835,12 @@ ip2_chars_in_buf ( PTTY tty ) | |||
| 1834 | pCh->Obuf_char_count + pCh->Pbuf_stuff, | 1835 | pCh->Obuf_char_count + pCh->Pbuf_stuff, |
| 1835 | pCh->Obuf_char_count, pCh->Pbuf_stuff ); | 1836 | pCh->Obuf_char_count, pCh->Pbuf_stuff ); |
| 1836 | #endif | 1837 | #endif |
| 1837 | READ_LOCK_IRQSAVE(&pCh->Obuf_spinlock,flags); | 1838 | read_lock_irqsave(&pCh->Obuf_spinlock, flags); |
| 1838 | rc = pCh->Obuf_char_count; | 1839 | rc = pCh->Obuf_char_count; |
| 1839 | READ_UNLOCK_IRQRESTORE(&pCh->Obuf_spinlock,flags); | 1840 | read_unlock_irqrestore(&pCh->Obuf_spinlock, flags); |
| 1840 | READ_LOCK_IRQSAVE(&pCh->Pbuf_spinlock,flags); | 1841 | read_lock_irqsave(&pCh->Pbuf_spinlock, flags); |
| 1841 | rc += pCh->Pbuf_stuff; | 1842 | rc += pCh->Pbuf_stuff; |
| 1842 | READ_UNLOCK_IRQRESTORE(&pCh->Pbuf_spinlock,flags); | 1843 | read_unlock_irqrestore(&pCh->Pbuf_spinlock, flags); |
| 1843 | return rc; | 1844 | return rc; |
| 1844 | } | 1845 | } |
| 1845 | 1846 | ||
| @@ -1863,9 +1864,9 @@ ip2_flush_buffer( PTTY tty ) | |||
| 1863 | #ifdef IP2DEBUG_WRITE | 1864 | #ifdef IP2DEBUG_WRITE |
| 1864 | printk (KERN_DEBUG "IP2: flush buffer\n" ); | 1865 | printk (KERN_DEBUG "IP2: flush buffer\n" ); |
| 1865 | #endif | 1866 | #endif |
| 1866 | WRITE_LOCK_IRQSAVE(&pCh->Pbuf_spinlock,flags); | 1867 | write_lock_irqsave(&pCh->Pbuf_spinlock, flags); |
| 1867 | pCh->Pbuf_stuff = 0; | 1868 | pCh->Pbuf_stuff = 0; |
| 1868 | WRITE_UNLOCK_IRQRESTORE(&pCh->Pbuf_spinlock,flags); | 1869 | write_unlock_irqrestore(&pCh->Pbuf_spinlock, flags); |
| 1869 | i2FlushOutput( pCh ); | 1870 | i2FlushOutput( pCh ); |
| 1870 | ip2_owake(tty); | 1871 | ip2_owake(tty); |
| 1871 | 1872 | ||
| @@ -1951,15 +1952,15 @@ ip2_unthrottle ( PTTY tty ) | |||
| 1951 | pCh->throttled = 0; | 1952 | pCh->throttled = 0; |
| 1952 | i2QueueCommands(PTYPE_BYPASS, pCh, 0, 1, CMD_RESUME); | 1953 | i2QueueCommands(PTYPE_BYPASS, pCh, 0, 1, CMD_RESUME); |
| 1953 | serviceOutgoingFifo( pCh->pMyBord ); | 1954 | serviceOutgoingFifo( pCh->pMyBord ); |
| 1954 | READ_LOCK_IRQSAVE(&pCh->Ibuf_spinlock,flags) | 1955 | read_lock_irqsave(&pCh->Ibuf_spinlock, flags); |
| 1955 | if ( pCh->Ibuf_stuff != pCh->Ibuf_strip ) { | 1956 | if ( pCh->Ibuf_stuff != pCh->Ibuf_strip ) { |
| 1956 | READ_UNLOCK_IRQRESTORE(&pCh->Ibuf_spinlock,flags) | 1957 | read_unlock_irqrestore(&pCh->Ibuf_spinlock, flags); |
| 1957 | #ifdef IP2DEBUG_READ | 1958 | #ifdef IP2DEBUG_READ |
| 1958 | printk (KERN_DEBUG "i2Input called from unthrottle\n" ); | 1959 | printk (KERN_DEBUG "i2Input called from unthrottle\n" ); |
| 1959 | #endif | 1960 | #endif |
| 1960 | i2Input( pCh ); | 1961 | i2Input( pCh ); |
| 1961 | } else | 1962 | } else |
| 1962 | READ_UNLOCK_IRQRESTORE(&pCh->Ibuf_spinlock,flags) | 1963 | read_unlock_irqrestore(&pCh->Ibuf_spinlock, flags); |
| 1963 | } | 1964 | } |
| 1964 | 1965 | ||
| 1965 | static void | 1966 | static void |
| @@ -2202,9 +2203,9 @@ ip2_ioctl ( PTTY tty, struct file *pFile, UINT cmd, ULONG arg ) | |||
| 2202 | * for masking). Caller should use TIOCGICOUNT to see which one it was | 2203 | * for masking). Caller should use TIOCGICOUNT to see which one it was |
| 2203 | */ | 2204 | */ |
| 2204 | case TIOCMIWAIT: | 2205 | case TIOCMIWAIT: |
| 2205 | WRITE_LOCK_IRQSAVE(&pB->read_fifo_spinlock, flags); | 2206 | write_lock_irqsave(&pB->read_fifo_spinlock, flags); |
| 2206 | cprev = pCh->icount; /* note the counters on entry */ | 2207 | cprev = pCh->icount; /* note the counters on entry */ |
| 2207 | WRITE_UNLOCK_IRQRESTORE(&pB->read_fifo_spinlock, flags); | 2208 | write_unlock_irqrestore(&pB->read_fifo_spinlock, flags); |
| 2208 | i2QueueCommands(PTYPE_BYPASS, pCh, 100, 4, | 2209 | i2QueueCommands(PTYPE_BYPASS, pCh, 100, 4, |
| 2209 | CMD_DCD_REP, CMD_CTS_REP, CMD_DSR_REP, CMD_RI_REP); | 2210 | CMD_DCD_REP, CMD_CTS_REP, CMD_DSR_REP, CMD_RI_REP); |
| 2210 | init_waitqueue_entry(&wait, current); | 2211 | init_waitqueue_entry(&wait, current); |
| @@ -2224,9 +2225,9 @@ ip2_ioctl ( PTTY tty, struct file *pFile, UINT cmd, ULONG arg ) | |||
| 2224 | rc = -ERESTARTSYS; | 2225 | rc = -ERESTARTSYS; |
| 2225 | break; | 2226 | break; |
| 2226 | } | 2227 | } |
| 2227 | WRITE_LOCK_IRQSAVE(&pB->read_fifo_spinlock, flags); | 2228 | write_lock_irqsave(&pB->read_fifo_spinlock, flags); |
| 2228 | cnow = pCh->icount; /* atomic copy */ | 2229 | cnow = pCh->icount; /* atomic copy */ |
| 2229 | WRITE_UNLOCK_IRQRESTORE(&pB->read_fifo_spinlock, flags); | 2230 | write_unlock_irqrestore(&pB->read_fifo_spinlock, flags); |
| 2230 | if (cnow.rng == cprev.rng && cnow.dsr == cprev.dsr && | 2231 | if (cnow.rng == cprev.rng && cnow.dsr == cprev.dsr && |
| 2231 | cnow.dcd == cprev.dcd && cnow.cts == cprev.cts) { | 2232 | cnow.dcd == cprev.dcd && cnow.cts == cprev.cts) { |
| 2232 | rc = -EIO; /* no change => rc */ | 2233 | rc = -EIO; /* no change => rc */ |
| @@ -2264,9 +2265,9 @@ ip2_ioctl ( PTTY tty, struct file *pFile, UINT cmd, ULONG arg ) | |||
| 2264 | case TIOCGICOUNT: | 2265 | case TIOCGICOUNT: |
| 2265 | ip2trace (CHANN, ITRC_IOCTL, 11, 1, rc ); | 2266 | ip2trace (CHANN, ITRC_IOCTL, 11, 1, rc ); |
| 2266 | 2267 | ||
| 2267 | WRITE_LOCK_IRQSAVE(&pB->read_fifo_spinlock, flags); | 2268 | write_lock_irqsave(&pB->read_fifo_spinlock, flags); |
| 2268 | cnow = pCh->icount; | 2269 | cnow = pCh->icount; |
| 2269 | WRITE_UNLOCK_IRQRESTORE(&pB->read_fifo_spinlock, flags); | 2270 | write_unlock_irqrestore(&pB->read_fifo_spinlock, flags); |
| 2270 | p_cuser = argp; | 2271 | p_cuser = argp; |
| 2271 | rc = put_user(cnow.cts, &p_cuser->cts); | 2272 | rc = put_user(cnow.cts, &p_cuser->cts); |
| 2272 | rc = put_user(cnow.dsr, &p_cuser->dsr); | 2273 | rc = put_user(cnow.dsr, &p_cuser->dsr); |
| @@ -2872,7 +2873,7 @@ ip2_ipl_ioctl ( struct inode *pInode, struct file *pFile, UINT cmd, ULONG arg ) | |||
| 2872 | case 65: /* Board - ip2stat */ | 2873 | case 65: /* Board - ip2stat */ |
| 2873 | if ( pB ) { | 2874 | if ( pB ) { |
| 2874 | rc = copy_to_user(argp, pB, sizeof(i2eBordStr)); | 2875 | rc = copy_to_user(argp, pB, sizeof(i2eBordStr)); |
| 2875 | rc = put_user(INB(pB->i2eStatus), | 2876 | rc = put_user(inb(pB->i2eStatus), |
| 2876 | (ULONG __user *)(arg + (ULONG)(&pB->i2eStatus) - (ULONG)pB ) ); | 2877 | (ULONG __user *)(arg + (ULONG)(&pB->i2eStatus) - (ULONG)pB ) ); |
| 2877 | } else { | 2878 | } else { |
| 2878 | rc = -ENODEV; | 2879 | rc = -ENODEV; |
diff --git a/drivers/char/isicom.c b/drivers/char/isicom.c index eba2883b630e..4f3cefa8eb0e 100644 --- a/drivers/char/isicom.c +++ b/drivers/char/isicom.c | |||
| @@ -126,8 +126,8 @@ | |||
| 126 | #include <linux/delay.h> | 126 | #include <linux/delay.h> |
| 127 | #include <linux/ioport.h> | 127 | #include <linux/ioport.h> |
| 128 | 128 | ||
| 129 | #include <asm/uaccess.h> | 129 | #include <linux/uaccess.h> |
| 130 | #include <asm/io.h> | 130 | #include <linux/io.h> |
| 131 | #include <asm/system.h> | 131 | #include <asm/system.h> |
| 132 | 132 | ||
| 133 | #include <linux/pci.h> | 133 | #include <linux/pci.h> |
| @@ -189,7 +189,7 @@ struct isi_board { | |||
| 189 | unsigned short status; | 189 | unsigned short status; |
| 190 | unsigned short port_status; /* each bit for each port */ | 190 | unsigned short port_status; /* each bit for each port */ |
| 191 | unsigned short shift_count; | 191 | unsigned short shift_count; |
| 192 | struct isi_port * ports; | 192 | struct isi_port *ports; |
| 193 | signed char count; | 193 | signed char count; |
| 194 | spinlock_t card_lock; /* Card wide lock 11/5/00 -sameer */ | 194 | spinlock_t card_lock; /* Card wide lock 11/5/00 -sameer */ |
| 195 | unsigned long flags; | 195 | unsigned long flags; |
| @@ -205,11 +205,11 @@ struct isi_port { | |||
| 205 | u16 channel; | 205 | u16 channel; |
| 206 | u16 status; | 206 | u16 status; |
| 207 | u16 closing_wait; | 207 | u16 closing_wait; |
| 208 | struct isi_board * card; | 208 | struct isi_board *card; |
| 209 | struct tty_struct * tty; | 209 | struct tty_struct *tty; |
| 210 | wait_queue_head_t close_wait; | 210 | wait_queue_head_t close_wait; |
| 211 | wait_queue_head_t open_wait; | 211 | wait_queue_head_t open_wait; |
| 212 | unsigned char * xmit_buf; | 212 | unsigned char *xmit_buf; |
| 213 | int xmit_head; | 213 | int xmit_head; |
| 214 | int xmit_tail; | 214 | int xmit_tail; |
| 215 | int xmit_cnt; | 215 | int xmit_cnt; |
| @@ -405,7 +405,7 @@ static void isicom_tx(unsigned long _data) | |||
| 405 | 405 | ||
| 406 | /* find next active board */ | 406 | /* find next active board */ |
| 407 | card = (prev_card + 1) & 0x0003; | 407 | card = (prev_card + 1) & 0x0003; |
| 408 | while(count-- > 0) { | 408 | while (count-- > 0) { |
| 409 | if (isi_card[card].status & BOARD_ACTIVE) | 409 | if (isi_card[card].status & BOARD_ACTIVE) |
| 410 | break; | 410 | break; |
| 411 | card = (card + 1) & 0x0003; | 411 | card = (card + 1) & 0x0003; |
| @@ -428,7 +428,7 @@ static void isicom_tx(unsigned long _data) | |||
| 428 | if (retries >= 100) | 428 | if (retries >= 100) |
| 429 | goto unlock; | 429 | goto unlock; |
| 430 | 430 | ||
| 431 | for (;count > 0;count--, port++) { | 431 | for (; count > 0; count--, port++) { |
| 432 | /* port not active or tx disabled to force flow control */ | 432 | /* port not active or tx disabled to force flow control */ |
| 433 | if (!(port->flags & ASYNC_INITIALIZED) || | 433 | if (!(port->flags & ASYNC_INITIALIZED) || |
| 434 | !(port->status & ISI_TXOK)) | 434 | !(port->status & ISI_TXOK)) |
| @@ -471,9 +471,10 @@ static void isicom_tx(unsigned long _data) | |||
| 471 | break; | 471 | break; |
| 472 | } | 472 | } |
| 473 | } | 473 | } |
| 474 | if (cnt <= 0) break; | 474 | if (cnt <= 0) |
| 475 | break; | ||
| 475 | word_count = cnt >> 1; | 476 | word_count = cnt >> 1; |
| 476 | outsw(base, port->xmit_buf+port->xmit_tail,word_count); | 477 | outsw(base, port->xmit_buf+port->xmit_tail, word_count); |
| 477 | port->xmit_tail = (port->xmit_tail | 478 | port->xmit_tail = (port->xmit_tail |
| 478 | + (word_count << 1)) & (SERIAL_XMIT_SIZE - 1); | 479 | + (word_count << 1)) & (SERIAL_XMIT_SIZE - 1); |
| 479 | txcount -= (word_count << 1); | 480 | txcount -= (word_count << 1); |
| @@ -556,7 +557,7 @@ static irqreturn_t isicom_interrupt(int irq, void *dev_id) | |||
| 556 | tty = port->tty; | 557 | tty = port->tty; |
| 557 | if (tty == NULL) { | 558 | if (tty == NULL) { |
| 558 | word_count = byte_count >> 1; | 559 | word_count = byte_count >> 1; |
| 559 | while(byte_count > 1) { | 560 | while (byte_count > 1) { |
| 560 | inw(base); | 561 | inw(base); |
| 561 | byte_count -= 2; | 562 | byte_count -= 2; |
| 562 | } | 563 | } |
| @@ -569,7 +570,7 @@ static irqreturn_t isicom_interrupt(int irq, void *dev_id) | |||
| 569 | 570 | ||
| 570 | if (header & 0x8000) { /* Status Packet */ | 571 | if (header & 0x8000) { /* Status Packet */ |
| 571 | header = inw(base); | 572 | header = inw(base); |
| 572 | switch(header & 0xff) { | 573 | switch (header & 0xff) { |
| 573 | case 0: /* Change in EIA signals */ | 574 | case 0: /* Change in EIA signals */ |
| 574 | if (port->flags & ASYNC_CHECK_CD) { | 575 | if (port->flags & ASYNC_CHECK_CD) { |
| 575 | if (port->status & ISI_DCD) { | 576 | if (port->status & ISI_DCD) { |
| @@ -656,7 +657,8 @@ static irqreturn_t isicom_interrupt(int irq, void *dev_id) | |||
| 656 | if (byte_count > 0) { | 657 | if (byte_count > 0) { |
| 657 | pr_dbg("Intr(0x%lx:%d): Flip buffer overflow! dropping " | 658 | pr_dbg("Intr(0x%lx:%d): Flip buffer overflow! dropping " |
| 658 | "bytes...\n", base, channel + 1); | 659 | "bytes...\n", base, channel + 1); |
| 659 | while(byte_count > 0) { /* drain out unread xtra data */ | 660 | /* drain out unread xtra data */ |
| 661 | while (byte_count > 0) { | ||
| 660 | inw(base); | 662 | inw(base); |
| 661 | byte_count -= 2; | 663 | byte_count -= 2; |
| 662 | } | 664 | } |
| @@ -679,8 +681,11 @@ static void isicom_config_port(struct isi_port *port) | |||
| 679 | shift_count = card->shift_count; | 681 | shift_count = card->shift_count; |
| 680 | unsigned char flow_ctrl; | 682 | unsigned char flow_ctrl; |
| 681 | 683 | ||
| 682 | if (!(tty = port->tty) || !tty->termios) | 684 | tty = port->tty; |
| 685 | |||
| 686 | if (tty == NULL) | ||
| 683 | return; | 687 | return; |
| 688 | /* FIXME: Switch to new tty baud API */ | ||
| 684 | baud = C_BAUD(tty); | 689 | baud = C_BAUD(tty); |
| 685 | if (baud & CBAUDEX) { | 690 | if (baud & CBAUDEX) { |
| 686 | baud &= ~CBAUDEX; | 691 | baud &= ~CBAUDEX; |
| @@ -706,7 +711,7 @@ static void isicom_config_port(struct isi_port *port) | |||
| 706 | if ((port->flags & ASYNC_SPD_MASK) == ASYNC_SPD_HI) | 711 | if ((port->flags & ASYNC_SPD_MASK) == ASYNC_SPD_HI) |
| 707 | baud++; /* 57.6 Kbps */ | 712 | baud++; /* 57.6 Kbps */ |
| 708 | if ((port->flags & ASYNC_SPD_MASK) == ASYNC_SPD_VHI) | 713 | if ((port->flags & ASYNC_SPD_MASK) == ASYNC_SPD_VHI) |
| 709 | baud +=2; /* 115 Kbps */ | 714 | baud += 2; /* 115 Kbps */ |
| 710 | if ((port->flags & ASYNC_SPD_MASK) == ASYNC_SPD_SHI) | 715 | if ((port->flags & ASYNC_SPD_MASK) == ASYNC_SPD_SHI) |
| 711 | baud += 3; /* 230 kbps*/ | 716 | baud += 3; /* 230 kbps*/ |
| 712 | if ((port->flags & ASYNC_SPD_MASK) == ASYNC_SPD_WARP) | 717 | if ((port->flags & ASYNC_SPD_MASK) == ASYNC_SPD_WARP) |
| @@ -716,15 +721,14 @@ static void isicom_config_port(struct isi_port *port) | |||
| 716 | /* hang up */ | 721 | /* hang up */ |
| 717 | drop_dtr(port); | 722 | drop_dtr(port); |
| 718 | return; | 723 | return; |
| 719 | } | 724 | } else |
| 720 | else | ||
| 721 | raise_dtr(port); | 725 | raise_dtr(port); |
| 722 | 726 | ||
| 723 | if (WaitTillCardIsFree(base) == 0) { | 727 | if (WaitTillCardIsFree(base) == 0) { |
| 724 | outw(0x8000 | (channel << shift_count) |0x03, base); | 728 | outw(0x8000 | (channel << shift_count) | 0x03, base); |
| 725 | outw(linuxb_to_isib[baud] << 8 | 0x03, base); | 729 | outw(linuxb_to_isib[baud] << 8 | 0x03, base); |
| 726 | channel_setup = 0; | 730 | channel_setup = 0; |
| 727 | switch(C_CSIZE(tty)) { | 731 | switch (C_CSIZE(tty)) { |
| 728 | case CS5: | 732 | case CS5: |
| 729 | channel_setup |= ISICOM_CS5; | 733 | channel_setup |= ISICOM_CS5; |
| 730 | break; | 734 | break; |
| @@ -767,7 +771,7 @@ static void isicom_config_port(struct isi_port *port) | |||
| 767 | flow_ctrl |= ISICOM_INITIATE_XONXOFF; | 771 | flow_ctrl |= ISICOM_INITIATE_XONXOFF; |
| 768 | 772 | ||
| 769 | if (WaitTillCardIsFree(base) == 0) { | 773 | if (WaitTillCardIsFree(base) == 0) { |
| 770 | outw(0x8000 | (channel << shift_count) |0x04, base); | 774 | outw(0x8000 | (channel << shift_count) | 0x04, base); |
| 771 | outw(flow_ctrl << 8 | 0x05, base); | 775 | outw(flow_ctrl << 8 | 0x05, base); |
| 772 | outw((STOP_CHAR(tty)) << 8 | (START_CHAR(tty)), base); | 776 | outw((STOP_CHAR(tty)) << 8 | (START_CHAR(tty)), base); |
| 773 | InterruptTheCard(base); | 777 | InterruptTheCard(base); |
| @@ -805,20 +809,17 @@ static int isicom_setup_port(struct isi_port *port) | |||
| 805 | struct isi_board *card = port->card; | 809 | struct isi_board *card = port->card; |
| 806 | unsigned long flags; | 810 | unsigned long flags; |
| 807 | 811 | ||
| 808 | if (port->flags & ASYNC_INITIALIZED) { | 812 | if (port->flags & ASYNC_INITIALIZED) |
| 809 | return 0; | 813 | return 0; |
| 810 | } | ||
| 811 | if (!port->xmit_buf) { | 814 | if (!port->xmit_buf) { |
| 812 | unsigned long page; | 815 | /* Relies on BKL */ |
| 813 | 816 | unsigned long page = get_zeroed_page(GFP_KERNEL); | |
| 814 | if (!(page = get_zeroed_page(GFP_KERNEL))) | 817 | if (page == 0) |
| 815 | return -ENOMEM; | 818 | return -ENOMEM; |
| 816 | 819 | if (port->xmit_buf) | |
| 817 | if (port->xmit_buf) { | ||
| 818 | free_page(page); | 820 | free_page(page); |
| 819 | return -ERESTARTSYS; | 821 | else |
| 820 | } | 822 | port->xmit_buf = (unsigned char *) page; |
| 821 | port->xmit_buf = (unsigned char *) page; | ||
| 822 | } | 823 | } |
| 823 | 824 | ||
| 824 | spin_lock_irqsave(&card->card_lock, flags); | 825 | spin_lock_irqsave(&card->card_lock, flags); |
| @@ -949,21 +950,18 @@ static int isicom_open(struct tty_struct *tty, struct file *filp) | |||
| 949 | port->count++; | 950 | port->count++; |
| 950 | tty->driver_data = port; | 951 | tty->driver_data = port; |
| 951 | port->tty = tty; | 952 | port->tty = tty; |
| 952 | if ((error = isicom_setup_port(port))!=0) | 953 | error = isicom_setup_port(port); |
| 953 | return error; | 954 | if (error == 0) |
| 954 | if ((error = block_til_ready(tty, filp, port))!=0) | 955 | error = block_til_ready(tty, filp, port); |
| 955 | return error; | 956 | return error; |
| 956 | |||
| 957 | return 0; | ||
| 958 | } | 957 | } |
| 959 | 958 | ||
| 960 | /* close et all */ | 959 | /* close et all */ |
| 961 | 960 | ||
| 962 | static inline void isicom_shutdown_board(struct isi_board *bp) | 961 | static inline void isicom_shutdown_board(struct isi_board *bp) |
| 963 | { | 962 | { |
| 964 | if (bp->status & BOARD_ACTIVE) { | 963 | if (bp->status & BOARD_ACTIVE) |
| 965 | bp->status &= ~BOARD_ACTIVE; | 964 | bp->status &= ~BOARD_ACTIVE; |
| 966 | } | ||
| 967 | } | 965 | } |
| 968 | 966 | ||
| 969 | /* card->lock HAS to be held */ | 967 | /* card->lock HAS to be held */ |
| @@ -1012,6 +1010,22 @@ static void isicom_shutdown_port(struct isi_port *port) | |||
| 1012 | } | 1010 | } |
| 1013 | } | 1011 | } |
| 1014 | 1012 | ||
| 1013 | static void isicom_flush_buffer(struct tty_struct *tty) | ||
| 1014 | { | ||
| 1015 | struct isi_port *port = tty->driver_data; | ||
| 1016 | struct isi_board *card = port->card; | ||
| 1017 | unsigned long flags; | ||
| 1018 | |||
| 1019 | if (isicom_paranoia_check(port, tty->name, "isicom_flush_buffer")) | ||
| 1020 | return; | ||
| 1021 | |||
| 1022 | spin_lock_irqsave(&card->card_lock, flags); | ||
| 1023 | port->xmit_cnt = port->xmit_head = port->xmit_tail = 0; | ||
| 1024 | spin_unlock_irqrestore(&card->card_lock, flags); | ||
| 1025 | |||
| 1026 | tty_wakeup(tty); | ||
| 1027 | } | ||
| 1028 | |||
| 1015 | static void isicom_close(struct tty_struct *tty, struct file *filp) | 1029 | static void isicom_close(struct tty_struct *tty, struct file *filp) |
| 1016 | { | 1030 | { |
| 1017 | struct isi_port *port = tty->driver_data; | 1031 | struct isi_port *port = tty->driver_data; |
| @@ -1065,8 +1079,7 @@ static void isicom_close(struct tty_struct *tty, struct file *filp) | |||
| 1065 | isicom_shutdown_port(port); | 1079 | isicom_shutdown_port(port); |
| 1066 | spin_unlock_irqrestore(&card->card_lock, flags); | 1080 | spin_unlock_irqrestore(&card->card_lock, flags); |
| 1067 | 1081 | ||
| 1068 | if (tty->driver->flush_buffer) | 1082 | isicom_flush_buffer(tty); |
| 1069 | tty->driver->flush_buffer(tty); | ||
| 1070 | tty_ldisc_flush(tty); | 1083 | tty_ldisc_flush(tty); |
| 1071 | 1084 | ||
| 1072 | spin_lock_irqsave(&card->card_lock, flags); | 1085 | spin_lock_irqsave(&card->card_lock, flags); |
| @@ -1104,7 +1117,7 @@ static int isicom_write(struct tty_struct *tty, const unsigned char *buf, | |||
| 1104 | 1117 | ||
| 1105 | spin_lock_irqsave(&card->card_lock, flags); | 1118 | spin_lock_irqsave(&card->card_lock, flags); |
| 1106 | 1119 | ||
| 1107 | while(1) { | 1120 | while (1) { |
| 1108 | cnt = min_t(int, count, min(SERIAL_XMIT_SIZE - port->xmit_cnt | 1121 | cnt = min_t(int, count, min(SERIAL_XMIT_SIZE - port->xmit_cnt |
| 1109 | - 1, SERIAL_XMIT_SIZE - port->xmit_head)); | 1122 | - 1, SERIAL_XMIT_SIZE - port->xmit_head)); |
| 1110 | if (cnt <= 0) | 1123 | if (cnt <= 0) |
| @@ -1125,28 +1138,29 @@ static int isicom_write(struct tty_struct *tty, const unsigned char *buf, | |||
| 1125 | } | 1138 | } |
| 1126 | 1139 | ||
| 1127 | /* put_char et all */ | 1140 | /* put_char et all */ |
| 1128 | static void isicom_put_char(struct tty_struct *tty, unsigned char ch) | 1141 | static int isicom_put_char(struct tty_struct *tty, unsigned char ch) |
| 1129 | { | 1142 | { |
| 1130 | struct isi_port *port = tty->driver_data; | 1143 | struct isi_port *port = tty->driver_data; |
| 1131 | struct isi_board *card = port->card; | 1144 | struct isi_board *card = port->card; |
| 1132 | unsigned long flags; | 1145 | unsigned long flags; |
| 1133 | 1146 | ||
| 1134 | if (isicom_paranoia_check(port, tty->name, "isicom_put_char")) | 1147 | if (isicom_paranoia_check(port, tty->name, "isicom_put_char")) |
| 1135 | return; | 1148 | return 0; |
| 1136 | 1149 | ||
| 1137 | if (!port->xmit_buf) | 1150 | if (!port->xmit_buf) |
| 1138 | return; | 1151 | return 0; |
| 1139 | 1152 | ||
| 1140 | spin_lock_irqsave(&card->card_lock, flags); | 1153 | spin_lock_irqsave(&card->card_lock, flags); |
| 1141 | if (port->xmit_cnt >= SERIAL_XMIT_SIZE - 1) { | 1154 | if (port->xmit_cnt >= SERIAL_XMIT_SIZE - 1) { |
| 1142 | spin_unlock_irqrestore(&card->card_lock, flags); | 1155 | spin_unlock_irqrestore(&card->card_lock, flags); |
| 1143 | return; | 1156 | return 0; |
| 1144 | } | 1157 | } |
| 1145 | 1158 | ||
| 1146 | port->xmit_buf[port->xmit_head++] = ch; | 1159 | port->xmit_buf[port->xmit_head++] = ch; |
| 1147 | port->xmit_head &= (SERIAL_XMIT_SIZE - 1); | 1160 | port->xmit_head &= (SERIAL_XMIT_SIZE - 1); |
| 1148 | port->xmit_cnt++; | 1161 | port->xmit_cnt++; |
| 1149 | spin_unlock_irqrestore(&card->card_lock, flags); | 1162 | spin_unlock_irqrestore(&card->card_lock, flags); |
| 1163 | return 1; | ||
| 1150 | } | 1164 | } |
| 1151 | 1165 | ||
| 1152 | /* flush_chars et all */ | 1166 | /* flush_chars et all */ |
| @@ -1258,6 +1272,8 @@ static int isicom_set_serial_info(struct isi_port *port, | |||
| 1258 | if (copy_from_user(&newinfo, info, sizeof(newinfo))) | 1272 | if (copy_from_user(&newinfo, info, sizeof(newinfo))) |
| 1259 | return -EFAULT; | 1273 | return -EFAULT; |
| 1260 | 1274 | ||
| 1275 | lock_kernel(); | ||
| 1276 | |||
| 1261 | reconfig_port = ((port->flags & ASYNC_SPD_MASK) != | 1277 | reconfig_port = ((port->flags & ASYNC_SPD_MASK) != |
| 1262 | (newinfo.flags & ASYNC_SPD_MASK)); | 1278 | (newinfo.flags & ASYNC_SPD_MASK)); |
| 1263 | 1279 | ||
| @@ -1265,12 +1281,13 @@ static int isicom_set_serial_info(struct isi_port *port, | |||
| 1265 | if ((newinfo.close_delay != port->close_delay) || | 1281 | if ((newinfo.close_delay != port->close_delay) || |
| 1266 | (newinfo.closing_wait != port->closing_wait) || | 1282 | (newinfo.closing_wait != port->closing_wait) || |
| 1267 | ((newinfo.flags & ~ASYNC_USR_MASK) != | 1283 | ((newinfo.flags & ~ASYNC_USR_MASK) != |
| 1268 | (port->flags & ~ASYNC_USR_MASK))) | 1284 | (port->flags & ~ASYNC_USR_MASK))) { |
| 1285 | unlock_kernel(); | ||
| 1269 | return -EPERM; | 1286 | return -EPERM; |
| 1270 | port->flags = ((port->flags & ~ ASYNC_USR_MASK) | | 1287 | } |
| 1288 | port->flags = ((port->flags & ~ASYNC_USR_MASK) | | ||
| 1271 | (newinfo.flags & ASYNC_USR_MASK)); | 1289 | (newinfo.flags & ASYNC_USR_MASK)); |
| 1272 | } | 1290 | } else { |
| 1273 | else { | ||
| 1274 | port->close_delay = newinfo.close_delay; | 1291 | port->close_delay = newinfo.close_delay; |
| 1275 | port->closing_wait = newinfo.closing_wait; | 1292 | port->closing_wait = newinfo.closing_wait; |
| 1276 | port->flags = ((port->flags & ~ASYNC_FLAGS) | | 1293 | port->flags = ((port->flags & ~ASYNC_FLAGS) | |
| @@ -1282,6 +1299,7 @@ static int isicom_set_serial_info(struct isi_port *port, | |||
| 1282 | isicom_config_port(port); | 1299 | isicom_config_port(port); |
| 1283 | spin_unlock_irqrestore(&port->card->card_lock, flags); | 1300 | spin_unlock_irqrestore(&port->card->card_lock, flags); |
| 1284 | } | 1301 | } |
| 1302 | unlock_kernel(); | ||
| 1285 | return 0; | 1303 | return 0; |
| 1286 | } | 1304 | } |
| 1287 | 1305 | ||
| @@ -1290,6 +1308,7 @@ static int isicom_get_serial_info(struct isi_port *port, | |||
| 1290 | { | 1308 | { |
| 1291 | struct serial_struct out_info; | 1309 | struct serial_struct out_info; |
| 1292 | 1310 | ||
| 1311 | lock_kernel(); | ||
| 1293 | memset(&out_info, 0, sizeof(out_info)); | 1312 | memset(&out_info, 0, sizeof(out_info)); |
| 1294 | /* out_info.type = ? */ | 1313 | /* out_info.type = ? */ |
| 1295 | out_info.line = port - isi_ports; | 1314 | out_info.line = port - isi_ports; |
| @@ -1299,6 +1318,7 @@ static int isicom_get_serial_info(struct isi_port *port, | |||
| 1299 | /* out_info.baud_base = ? */ | 1318 | /* out_info.baud_base = ? */ |
| 1300 | out_info.close_delay = port->close_delay; | 1319 | out_info.close_delay = port->close_delay; |
| 1301 | out_info.closing_wait = port->closing_wait; | 1320 | out_info.closing_wait = port->closing_wait; |
| 1321 | unlock_kernel(); | ||
| 1302 | if (copy_to_user(info, &out_info, sizeof(out_info))) | 1322 | if (copy_to_user(info, &out_info, sizeof(out_info))) |
| 1303 | return -EFAULT; | 1323 | return -EFAULT; |
| 1304 | return 0; | 1324 | return 0; |
| @@ -1314,7 +1334,7 @@ static int isicom_ioctl(struct tty_struct *tty, struct file *filp, | |||
| 1314 | if (isicom_paranoia_check(port, tty->name, "isicom_ioctl")) | 1334 | if (isicom_paranoia_check(port, tty->name, "isicom_ioctl")) |
| 1315 | return -ENODEV; | 1335 | return -ENODEV; |
| 1316 | 1336 | ||
| 1317 | switch(cmd) { | 1337 | switch (cmd) { |
| 1318 | case TCSBRK: | 1338 | case TCSBRK: |
| 1319 | retval = tty_check_change(tty); | 1339 | retval = tty_check_change(tty); |
| 1320 | if (retval) | 1340 | if (retval) |
| @@ -1331,19 +1351,6 @@ static int isicom_ioctl(struct tty_struct *tty, struct file *filp, | |||
| 1331 | tty_wait_until_sent(tty, 0); | 1351 | tty_wait_until_sent(tty, 0); |
| 1332 | isicom_send_break(port, arg ? arg * (HZ/10) : HZ/4); | 1352 | isicom_send_break(port, arg ? arg * (HZ/10) : HZ/4); |
| 1333 | return 0; | 1353 | return 0; |
| 1334 | |||
| 1335 | case TIOCGSOFTCAR: | ||
| 1336 | return put_user(C_CLOCAL(tty) ? 1 : 0, | ||
| 1337 | (unsigned long __user *)argp); | ||
| 1338 | |||
| 1339 | case TIOCSSOFTCAR: | ||
| 1340 | if (get_user(arg, (unsigned long __user *) argp)) | ||
| 1341 | return -EFAULT; | ||
| 1342 | tty->termios->c_cflag = | ||
| 1343 | ((tty->termios->c_cflag & ~CLOCAL) | | ||
| 1344 | (arg ? CLOCAL : 0)); | ||
| 1345 | return 0; | ||
| 1346 | |||
| 1347 | case TIOCGSERIAL: | 1354 | case TIOCGSERIAL: |
| 1348 | return isicom_get_serial_info(port, argp); | 1355 | return isicom_get_serial_info(port, argp); |
| 1349 | 1356 | ||
| @@ -1453,22 +1460,6 @@ static void isicom_hangup(struct tty_struct *tty) | |||
| 1453 | wake_up_interruptible(&port->open_wait); | 1460 | wake_up_interruptible(&port->open_wait); |
| 1454 | } | 1461 | } |
| 1455 | 1462 | ||
| 1456 | /* flush_buffer et all */ | ||
| 1457 | static void isicom_flush_buffer(struct tty_struct *tty) | ||
| 1458 | { | ||
| 1459 | struct isi_port *port = tty->driver_data; | ||
| 1460 | struct isi_board *card = port->card; | ||
| 1461 | unsigned long flags; | ||
| 1462 | |||
| 1463 | if (isicom_paranoia_check(port, tty->name, "isicom_flush_buffer")) | ||
| 1464 | return; | ||
| 1465 | |||
| 1466 | spin_lock_irqsave(&card->card_lock, flags); | ||
| 1467 | port->xmit_cnt = port->xmit_head = port->xmit_tail = 0; | ||
| 1468 | spin_unlock_irqrestore(&card->card_lock, flags); | ||
| 1469 | |||
| 1470 | tty_wakeup(tty); | ||
| 1471 | } | ||
| 1472 | 1463 | ||
| 1473 | /* | 1464 | /* |
| 1474 | * Driver init and deinit functions | 1465 | * Driver init and deinit functions |
| @@ -1592,7 +1583,7 @@ static int __devinit load_firmware(struct pci_dev *pdev, | |||
| 1592 | default: | 1583 | default: |
| 1593 | dev_err(&pdev->dev, "Unknown signature.\n"); | 1584 | dev_err(&pdev->dev, "Unknown signature.\n"); |
| 1594 | goto end; | 1585 | goto end; |
| 1595 | } | 1586 | } |
| 1596 | 1587 | ||
| 1597 | retval = request_firmware(&fw, name, &pdev->dev); | 1588 | retval = request_firmware(&fw, name, &pdev->dev); |
| 1598 | if (retval) | 1589 | if (retval) |
| @@ -1620,7 +1611,8 @@ static int __devinit load_firmware(struct pci_dev *pdev, | |||
| 1620 | if (WaitTillCardIsFree(base)) | 1611 | if (WaitTillCardIsFree(base)) |
| 1621 | goto errrelfw; | 1612 | goto errrelfw; |
| 1622 | 1613 | ||
| 1623 | if ((status = inw(base + 0x4)) != 0) { | 1614 | status = inw(base + 0x4); |
| 1615 | if (status != 0) { | ||
| 1624 | dev_warn(&pdev->dev, "Card%d rejected load header:\n" | 1616 | dev_warn(&pdev->dev, "Card%d rejected load header:\n" |
| 1625 | KERN_WARNING "Address:0x%x\n" | 1617 | KERN_WARNING "Address:0x%x\n" |
| 1626 | KERN_WARNING "Count:0x%x\n" | 1618 | KERN_WARNING "Count:0x%x\n" |
| @@ -1637,12 +1629,13 @@ static int __devinit load_firmware(struct pci_dev *pdev, | |||
| 1637 | if (WaitTillCardIsFree(base)) | 1629 | if (WaitTillCardIsFree(base)) |
| 1638 | goto errrelfw; | 1630 | goto errrelfw; |
| 1639 | 1631 | ||
| 1640 | if ((status = inw(base + 0x4)) != 0) { | 1632 | status = inw(base + 0x4); |
| 1633 | if (status != 0) { | ||
| 1641 | dev_err(&pdev->dev, "Card%d got out of sync.Card " | 1634 | dev_err(&pdev->dev, "Card%d got out of sync.Card " |
| 1642 | "Status:0x%x\n", index + 1, status); | 1635 | "Status:0x%x\n", index + 1, status); |
| 1643 | goto errrelfw; | 1636 | goto errrelfw; |
| 1644 | } | 1637 | } |
| 1645 | } | 1638 | } |
| 1646 | 1639 | ||
| 1647 | /* XXX: should we test it by reading it back and comparing with original like | 1640 | /* XXX: should we test it by reading it back and comparing with original like |
| 1648 | * in load firmware package? */ | 1641 | * in load firmware package? */ |
| @@ -1666,7 +1659,8 @@ static int __devinit load_firmware(struct pci_dev *pdev, | |||
| 1666 | if (WaitTillCardIsFree(base)) | 1659 | if (WaitTillCardIsFree(base)) |
| 1667 | goto errrelfw; | 1660 | goto errrelfw; |
| 1668 | 1661 | ||
| 1669 | if ((status = inw(base + 0x4)) != 0) { | 1662 | status = inw(base + 0x4); |
| 1663 | if (status != 0) { | ||
| 1670 | dev_warn(&pdev->dev, "Card%d rejected verify header:\n" | 1664 | dev_warn(&pdev->dev, "Card%d rejected verify header:\n" |
| 1671 | KERN_WARNING "Address:0x%x\n" | 1665 | KERN_WARNING "Address:0x%x\n" |
| 1672 | KERN_WARNING "Count:0x%x\n" | 1666 | KERN_WARNING "Count:0x%x\n" |
| @@ -1699,7 +1693,8 @@ static int __devinit load_firmware(struct pci_dev *pdev, | |||
| 1699 | if (WaitTillCardIsFree(base)) | 1693 | if (WaitTillCardIsFree(base)) |
| 1700 | goto errrelfw; | 1694 | goto errrelfw; |
| 1701 | 1695 | ||
| 1702 | if ((status = inw(base + 0x4)) != 0) { | 1696 | status = inw(base + 0x4); |
| 1697 | if (status != 0) { | ||
| 1703 | dev_err(&pdev->dev, "Card%d verify got out of sync. " | 1698 | dev_err(&pdev->dev, "Card%d verify got out of sync. " |
| 1704 | "Card Status:0x%x\n", index + 1, status); | 1699 | "Card Status:0x%x\n", index + 1, status); |
| 1705 | goto errrelfw; | 1700 | goto errrelfw; |
| @@ -1764,7 +1759,7 @@ static int __devinit isicom_probe(struct pci_dev *pdev, | |||
| 1764 | index + 1); | 1759 | index + 1); |
| 1765 | retval = -EBUSY; | 1760 | retval = -EBUSY; |
| 1766 | goto errdec; | 1761 | goto errdec; |
| 1767 | } | 1762 | } |
| 1768 | 1763 | ||
| 1769 | retval = request_irq(board->irq, isicom_interrupt, | 1764 | retval = request_irq(board->irq, isicom_interrupt, |
| 1770 | IRQF_SHARED | IRQF_DISABLED, ISICOM_NAME, board); | 1765 | IRQF_SHARED | IRQF_DISABLED, ISICOM_NAME, board); |
| @@ -1818,7 +1813,7 @@ static int __init isicom_init(void) | |||
| 1818 | int retval, idx, channel; | 1813 | int retval, idx, channel; |
| 1819 | struct isi_port *port; | 1814 | struct isi_port *port; |
| 1820 | 1815 | ||
| 1821 | for(idx = 0; idx < BOARD_COUNT; idx++) { | 1816 | for (idx = 0; idx < BOARD_COUNT; idx++) { |
| 1822 | port = &isi_ports[idx * 16]; | 1817 | port = &isi_ports[idx * 16]; |
| 1823 | isi_card[idx].ports = port; | 1818 | isi_card[idx].ports = port; |
| 1824 | spin_lock_init(&isi_card[idx].card_lock); | 1819 | spin_lock_init(&isi_card[idx].card_lock); |
| @@ -1832,7 +1827,7 @@ static int __init isicom_init(void) | |||
| 1832 | init_waitqueue_head(&port->open_wait); | 1827 | init_waitqueue_head(&port->open_wait); |
| 1833 | init_waitqueue_head(&port->close_wait); | 1828 | init_waitqueue_head(&port->close_wait); |
| 1834 | /* . . . */ | 1829 | /* . . . */ |
| 1835 | } | 1830 | } |
| 1836 | isi_card[idx].base = 0; | 1831 | isi_card[idx].base = 0; |
| 1837 | isi_card[idx].irq = 0; | 1832 | isi_card[idx].irq = 0; |
| 1838 | } | 1833 | } |
diff --git a/drivers/char/istallion.c b/drivers/char/istallion.c index c645455c3fd1..7c8b62f162bf 100644 --- a/drivers/char/istallion.c +++ b/drivers/char/istallion.c | |||
| @@ -1682,16 +1682,6 @@ static int stli_ioctl(struct tty_struct *tty, struct file *file, unsigned int cm | |||
| 1682 | rc = 0; | 1682 | rc = 0; |
| 1683 | 1683 | ||
| 1684 | switch (cmd) { | 1684 | switch (cmd) { |
| 1685 | case TIOCGSOFTCAR: | ||
| 1686 | rc = put_user(((tty->termios->c_cflag & CLOCAL) ? 1 : 0), | ||
| 1687 | (unsigned __user *) arg); | ||
| 1688 | break; | ||
| 1689 | case TIOCSSOFTCAR: | ||
| 1690 | if ((rc = get_user(ival, (unsigned __user *) arg)) == 0) | ||
| 1691 | tty->termios->c_cflag = | ||
| 1692 | (tty->termios->c_cflag & ~CLOCAL) | | ||
| 1693 | (ival ? CLOCAL : 0); | ||
| 1694 | break; | ||
| 1695 | case TIOCGSERIAL: | 1685 | case TIOCGSERIAL: |
| 1696 | rc = stli_getserial(portp, argp); | 1686 | rc = stli_getserial(portp, argp); |
| 1697 | break; | 1687 | break; |
| @@ -3267,7 +3257,7 @@ static int stli_initecp(struct stlibrd *brdp) | |||
| 3267 | */ | 3257 | */ |
| 3268 | EBRDINIT(brdp); | 3258 | EBRDINIT(brdp); |
| 3269 | 3259 | ||
| 3270 | brdp->membase = ioremap(brdp->memaddr, brdp->memsize); | 3260 | brdp->membase = ioremap_nocache(brdp->memaddr, brdp->memsize); |
| 3271 | if (brdp->membase == NULL) { | 3261 | if (brdp->membase == NULL) { |
| 3272 | retval = -ENOMEM; | 3262 | retval = -ENOMEM; |
| 3273 | goto err_reg; | 3263 | goto err_reg; |
| @@ -3424,7 +3414,7 @@ static int stli_initonb(struct stlibrd *brdp) | |||
| 3424 | */ | 3414 | */ |
| 3425 | EBRDINIT(brdp); | 3415 | EBRDINIT(brdp); |
| 3426 | 3416 | ||
| 3427 | brdp->membase = ioremap(brdp->memaddr, brdp->memsize); | 3417 | brdp->membase = ioremap_nocache(brdp->memaddr, brdp->memsize); |
| 3428 | if (brdp->membase == NULL) { | 3418 | if (brdp->membase == NULL) { |
| 3429 | retval = -ENOMEM; | 3419 | retval = -ENOMEM; |
| 3430 | goto err_reg; | 3420 | goto err_reg; |
| @@ -3675,7 +3665,7 @@ static int stli_eisamemprobe(struct stlibrd *brdp) | |||
| 3675 | */ | 3665 | */ |
| 3676 | for (i = 0; (i < stli_eisamempsize); i++) { | 3666 | for (i = 0; (i < stli_eisamempsize); i++) { |
| 3677 | brdp->memaddr = stli_eisamemprobeaddrs[i]; | 3667 | brdp->memaddr = stli_eisamemprobeaddrs[i]; |
| 3678 | brdp->membase = ioremap(brdp->memaddr, brdp->memsize); | 3668 | brdp->membase = ioremap_nocache(brdp->memaddr, brdp->memsize); |
| 3679 | if (brdp->membase == NULL) | 3669 | if (brdp->membase == NULL) |
| 3680 | continue; | 3670 | continue; |
| 3681 | 3671 | ||
| @@ -4433,6 +4423,8 @@ static int stli_memioctl(struct inode *ip, struct file *fp, unsigned int cmd, un | |||
| 4433 | done = 0; | 4423 | done = 0; |
| 4434 | rc = 0; | 4424 | rc = 0; |
| 4435 | 4425 | ||
| 4426 | lock_kernel(); | ||
| 4427 | |||
| 4436 | switch (cmd) { | 4428 | switch (cmd) { |
| 4437 | case COM_GETPORTSTATS: | 4429 | case COM_GETPORTSTATS: |
| 4438 | rc = stli_getportstats(NULL, argp); | 4430 | rc = stli_getportstats(NULL, argp); |
| @@ -4455,6 +4447,7 @@ static int stli_memioctl(struct inode *ip, struct file *fp, unsigned int cmd, un | |||
| 4455 | done++; | 4447 | done++; |
| 4456 | break; | 4448 | break; |
| 4457 | } | 4449 | } |
| 4450 | unlock_kernel(); | ||
| 4458 | 4451 | ||
| 4459 | if (done) | 4452 | if (done) |
| 4460 | return rc; | 4453 | return rc; |
| @@ -4472,6 +4465,8 @@ static int stli_memioctl(struct inode *ip, struct file *fp, unsigned int cmd, un | |||
| 4472 | if (brdp->state == 0) | 4465 | if (brdp->state == 0) |
| 4473 | return -ENODEV; | 4466 | return -ENODEV; |
| 4474 | 4467 | ||
| 4468 | lock_kernel(); | ||
| 4469 | |||
| 4475 | switch (cmd) { | 4470 | switch (cmd) { |
| 4476 | case STL_BINTR: | 4471 | case STL_BINTR: |
| 4477 | EBRDINTR(brdp); | 4472 | EBRDINTR(brdp); |
| @@ -4494,6 +4489,7 @@ static int stli_memioctl(struct inode *ip, struct file *fp, unsigned int cmd, un | |||
| 4494 | rc = -ENOIOCTLCMD; | 4489 | rc = -ENOIOCTLCMD; |
| 4495 | break; | 4490 | break; |
| 4496 | } | 4491 | } |
| 4492 | unlock_kernel(); | ||
| 4497 | return rc; | 4493 | return rc; |
| 4498 | } | 4494 | } |
| 4499 | 4495 | ||
diff --git a/drivers/char/keyboard.c b/drivers/char/keyboard.c index 60b934adea65..7f7e798c1384 100644 --- a/drivers/char/keyboard.c +++ b/drivers/char/keyboard.c | |||
| @@ -110,6 +110,7 @@ const int max_vals[] = { | |||
| 110 | const int NR_TYPES = ARRAY_SIZE(max_vals); | 110 | const int NR_TYPES = ARRAY_SIZE(max_vals); |
| 111 | 111 | ||
| 112 | struct kbd_struct kbd_table[MAX_NR_CONSOLES]; | 112 | struct kbd_struct kbd_table[MAX_NR_CONSOLES]; |
| 113 | EXPORT_SYMBOL_GPL(kbd_table); | ||
| 113 | static struct kbd_struct *kbd = kbd_table; | 114 | static struct kbd_struct *kbd = kbd_table; |
| 114 | 115 | ||
| 115 | struct vt_spawn_console vt_spawn_con = { | 116 | struct vt_spawn_console vt_spawn_con = { |
| @@ -260,6 +261,7 @@ void kd_mksound(unsigned int hz, unsigned int ticks) | |||
| 260 | } else | 261 | } else |
| 261 | kd_nosound(0); | 262 | kd_nosound(0); |
| 262 | } | 263 | } |
| 264 | EXPORT_SYMBOL(kd_mksound); | ||
| 263 | 265 | ||
| 264 | /* | 266 | /* |
| 265 | * Setting the keyboard rate. | 267 | * Setting the keyboard rate. |
| @@ -1230,7 +1232,7 @@ static void kbd_keycode(unsigned int keycode, int down, int hw_raw) | |||
| 1230 | 1232 | ||
| 1231 | if (rep && | 1233 | if (rep && |
| 1232 | (!vc_kbd_mode(kbd, VC_REPEAT) || | 1234 | (!vc_kbd_mode(kbd, VC_REPEAT) || |
| 1233 | (tty && !L_ECHO(tty) && tty->driver->chars_in_buffer(tty)))) { | 1235 | (tty && !L_ECHO(tty) && tty_chars_in_buffer(tty)))) { |
| 1234 | /* | 1236 | /* |
| 1235 | * Don't repeat a key if the input buffers are not empty and the | 1237 | * Don't repeat a key if the input buffers are not empty and the |
| 1236 | * characters get aren't echoed locally. This makes key repeat | 1238 | * characters get aren't echoed locally. This makes key repeat |
diff --git a/drivers/char/mmtimer.c b/drivers/char/mmtimer.c index e60a74c66e3d..d83db5d880e0 100644 --- a/drivers/char/mmtimer.c +++ b/drivers/char/mmtimer.c | |||
| @@ -74,9 +74,8 @@ static const struct file_operations mmtimer_fops = { | |||
| 74 | * We only have comparison registers RTC1-4 currently available per | 74 | * We only have comparison registers RTC1-4 currently available per |
| 75 | * node. RTC0 is used by SAL. | 75 | * node. RTC0 is used by SAL. |
| 76 | */ | 76 | */ |
| 77 | #define NUM_COMPARATORS 3 | ||
| 78 | /* Check for an RTC interrupt pending */ | 77 | /* Check for an RTC interrupt pending */ |
| 79 | static int inline mmtimer_int_pending(int comparator) | 78 | static int mmtimer_int_pending(int comparator) |
| 80 | { | 79 | { |
| 81 | if (HUB_L((unsigned long *)LOCAL_MMR_ADDR(SH_EVENT_OCCURRED)) & | 80 | if (HUB_L((unsigned long *)LOCAL_MMR_ADDR(SH_EVENT_OCCURRED)) & |
| 82 | SH_EVENT_OCCURRED_RTC1_INT_MASK << comparator) | 81 | SH_EVENT_OCCURRED_RTC1_INT_MASK << comparator) |
| @@ -84,15 +83,16 @@ static int inline mmtimer_int_pending(int comparator) | |||
| 84 | else | 83 | else |
| 85 | return 0; | 84 | return 0; |
| 86 | } | 85 | } |
| 86 | |||
| 87 | /* Clear the RTC interrupt pending bit */ | 87 | /* Clear the RTC interrupt pending bit */ |
| 88 | static void inline mmtimer_clr_int_pending(int comparator) | 88 | static void mmtimer_clr_int_pending(int comparator) |
| 89 | { | 89 | { |
| 90 | HUB_S((u64 *)LOCAL_MMR_ADDR(SH_EVENT_OCCURRED_ALIAS), | 90 | HUB_S((u64 *)LOCAL_MMR_ADDR(SH_EVENT_OCCURRED_ALIAS), |
| 91 | SH_EVENT_OCCURRED_RTC1_INT_MASK << comparator); | 91 | SH_EVENT_OCCURRED_RTC1_INT_MASK << comparator); |
| 92 | } | 92 | } |
| 93 | 93 | ||
| 94 | /* Setup timer on comparator RTC1 */ | 94 | /* Setup timer on comparator RTC1 */ |
| 95 | static void inline mmtimer_setup_int_0(u64 expires) | 95 | static void mmtimer_setup_int_0(int cpu, u64 expires) |
| 96 | { | 96 | { |
| 97 | u64 val; | 97 | u64 val; |
| 98 | 98 | ||
| @@ -106,7 +106,7 @@ static void inline mmtimer_setup_int_0(u64 expires) | |||
| 106 | mmtimer_clr_int_pending(0); | 106 | mmtimer_clr_int_pending(0); |
| 107 | 107 | ||
| 108 | val = ((u64)SGI_MMTIMER_VECTOR << SH_RTC1_INT_CONFIG_IDX_SHFT) | | 108 | val = ((u64)SGI_MMTIMER_VECTOR << SH_RTC1_INT_CONFIG_IDX_SHFT) | |
| 109 | ((u64)cpu_physical_id(smp_processor_id()) << | 109 | ((u64)cpu_physical_id(cpu) << |
| 110 | SH_RTC1_INT_CONFIG_PID_SHFT); | 110 | SH_RTC1_INT_CONFIG_PID_SHFT); |
| 111 | 111 | ||
| 112 | /* Set configuration */ | 112 | /* Set configuration */ |
| @@ -122,7 +122,7 @@ static void inline mmtimer_setup_int_0(u64 expires) | |||
| 122 | } | 122 | } |
| 123 | 123 | ||
| 124 | /* Setup timer on comparator RTC2 */ | 124 | /* Setup timer on comparator RTC2 */ |
| 125 | static void inline mmtimer_setup_int_1(u64 expires) | 125 | static void mmtimer_setup_int_1(int cpu, u64 expires) |
| 126 | { | 126 | { |
| 127 | u64 val; | 127 | u64 val; |
| 128 | 128 | ||
| @@ -133,7 +133,7 @@ static void inline mmtimer_setup_int_1(u64 expires) | |||
| 133 | mmtimer_clr_int_pending(1); | 133 | mmtimer_clr_int_pending(1); |
| 134 | 134 | ||
| 135 | val = ((u64)SGI_MMTIMER_VECTOR << SH_RTC2_INT_CONFIG_IDX_SHFT) | | 135 | val = ((u64)SGI_MMTIMER_VECTOR << SH_RTC2_INT_CONFIG_IDX_SHFT) | |
| 136 | ((u64)cpu_physical_id(smp_processor_id()) << | 136 | ((u64)cpu_physical_id(cpu) << |
| 137 | SH_RTC2_INT_CONFIG_PID_SHFT); | 137 | SH_RTC2_INT_CONFIG_PID_SHFT); |
| 138 | 138 | ||
| 139 | HUB_S((u64 *)LOCAL_MMR_ADDR(SH_RTC2_INT_CONFIG), val); | 139 | HUB_S((u64 *)LOCAL_MMR_ADDR(SH_RTC2_INT_CONFIG), val); |
| @@ -144,7 +144,7 @@ static void inline mmtimer_setup_int_1(u64 expires) | |||
| 144 | } | 144 | } |
| 145 | 145 | ||
| 146 | /* Setup timer on comparator RTC3 */ | 146 | /* Setup timer on comparator RTC3 */ |
| 147 | static void inline mmtimer_setup_int_2(u64 expires) | 147 | static void mmtimer_setup_int_2(int cpu, u64 expires) |
| 148 | { | 148 | { |
| 149 | u64 val; | 149 | u64 val; |
| 150 | 150 | ||
| @@ -155,7 +155,7 @@ static void inline mmtimer_setup_int_2(u64 expires) | |||
| 155 | mmtimer_clr_int_pending(2); | 155 | mmtimer_clr_int_pending(2); |
| 156 | 156 | ||
| 157 | val = ((u64)SGI_MMTIMER_VECTOR << SH_RTC3_INT_CONFIG_IDX_SHFT) | | 157 | val = ((u64)SGI_MMTIMER_VECTOR << SH_RTC3_INT_CONFIG_IDX_SHFT) | |
| 158 | ((u64)cpu_physical_id(smp_processor_id()) << | 158 | ((u64)cpu_physical_id(cpu) << |
| 159 | SH_RTC3_INT_CONFIG_PID_SHFT); | 159 | SH_RTC3_INT_CONFIG_PID_SHFT); |
| 160 | 160 | ||
| 161 | HUB_S((u64 *)LOCAL_MMR_ADDR(SH_RTC3_INT_CONFIG), val); | 161 | HUB_S((u64 *)LOCAL_MMR_ADDR(SH_RTC3_INT_CONFIG), val); |
| @@ -170,22 +170,22 @@ static void inline mmtimer_setup_int_2(u64 expires) | |||
| 170 | * in order to insure that the setup succeeds in a deterministic time frame. | 170 | * in order to insure that the setup succeeds in a deterministic time frame. |
| 171 | * It will check if the interrupt setup succeeded. | 171 | * It will check if the interrupt setup succeeded. |
| 172 | */ | 172 | */ |
| 173 | static int inline mmtimer_setup(int comparator, unsigned long expires) | 173 | static int mmtimer_setup(int cpu, int comparator, unsigned long expires) |
| 174 | { | 174 | { |
| 175 | 175 | ||
| 176 | switch (comparator) { | 176 | switch (comparator) { |
| 177 | case 0: | 177 | case 0: |
| 178 | mmtimer_setup_int_0(expires); | 178 | mmtimer_setup_int_0(cpu, expires); |
| 179 | break; | 179 | break; |
| 180 | case 1: | 180 | case 1: |
| 181 | mmtimer_setup_int_1(expires); | 181 | mmtimer_setup_int_1(cpu, expires); |
| 182 | break; | 182 | break; |
| 183 | case 2: | 183 | case 2: |
| 184 | mmtimer_setup_int_2(expires); | 184 | mmtimer_setup_int_2(cpu, expires); |
| 185 | break; | 185 | break; |
| 186 | } | 186 | } |
| 187 | /* We might've missed our expiration time */ | 187 | /* We might've missed our expiration time */ |
| 188 | if (rtc_time() < expires) | 188 | if (rtc_time() <= expires) |
| 189 | return 1; | 189 | return 1; |
| 190 | 190 | ||
| 191 | /* | 191 | /* |
| @@ -195,7 +195,7 @@ static int inline mmtimer_setup(int comparator, unsigned long expires) | |||
| 195 | return mmtimer_int_pending(comparator); | 195 | return mmtimer_int_pending(comparator); |
| 196 | } | 196 | } |
| 197 | 197 | ||
| 198 | static int inline mmtimer_disable_int(long nasid, int comparator) | 198 | static int mmtimer_disable_int(long nasid, int comparator) |
| 199 | { | 199 | { |
| 200 | switch (comparator) { | 200 | switch (comparator) { |
| 201 | case 0: | 201 | case 0: |
| @@ -216,18 +216,124 @@ static int inline mmtimer_disable_int(long nasid, int comparator) | |||
| 216 | return 0; | 216 | return 0; |
| 217 | } | 217 | } |
| 218 | 218 | ||
| 219 | #define TIMER_OFF 0xbadcabLL | 219 | #define COMPARATOR 1 /* The comparator to use */ |
| 220 | 220 | ||
| 221 | /* There is one of these for each comparator */ | 221 | #define TIMER_OFF 0xbadcabLL /* Timer is not setup */ |
| 222 | typedef struct mmtimer { | 222 | #define TIMER_SET 0 /* Comparator is set for this timer */ |
| 223 | spinlock_t lock ____cacheline_aligned; | 223 | |
| 224 | /* There is one of these for each timer */ | ||
| 225 | struct mmtimer { | ||
| 226 | struct rb_node list; | ||
| 224 | struct k_itimer *timer; | 227 | struct k_itimer *timer; |
| 225 | int i; | ||
| 226 | int cpu; | 228 | int cpu; |
| 229 | }; | ||
| 230 | |||
| 231 | struct mmtimer_node { | ||
| 232 | spinlock_t lock ____cacheline_aligned; | ||
| 233 | struct rb_root timer_head; | ||
| 234 | struct rb_node *next; | ||
| 227 | struct tasklet_struct tasklet; | 235 | struct tasklet_struct tasklet; |
| 228 | } mmtimer_t; | 236 | }; |
| 237 | static struct mmtimer_node *timers; | ||
| 238 | |||
| 239 | |||
| 240 | /* | ||
| 241 | * Add a new mmtimer struct to the node's mmtimer list. | ||
| 242 | * This function assumes the struct mmtimer_node is locked. | ||
| 243 | */ | ||
| 244 | static void mmtimer_add_list(struct mmtimer *n) | ||
| 245 | { | ||
| 246 | int nodeid = n->timer->it.mmtimer.node; | ||
| 247 | unsigned long expires = n->timer->it.mmtimer.expires; | ||
| 248 | struct rb_node **link = &timers[nodeid].timer_head.rb_node; | ||
| 249 | struct rb_node *parent = NULL; | ||
| 250 | struct mmtimer *x; | ||
| 251 | |||
| 252 | /* | ||
| 253 | * Find the right place in the rbtree: | ||
| 254 | */ | ||
| 255 | while (*link) { | ||
| 256 | parent = *link; | ||
| 257 | x = rb_entry(parent, struct mmtimer, list); | ||
| 258 | |||
| 259 | if (expires < x->timer->it.mmtimer.expires) | ||
| 260 | link = &(*link)->rb_left; | ||
| 261 | else | ||
| 262 | link = &(*link)->rb_right; | ||
| 263 | } | ||
| 264 | |||
| 265 | /* | ||
| 266 | * Insert the timer to the rbtree and check whether it | ||
| 267 | * replaces the first pending timer | ||
| 268 | */ | ||
| 269 | rb_link_node(&n->list, parent, link); | ||
| 270 | rb_insert_color(&n->list, &timers[nodeid].timer_head); | ||
| 271 | |||
| 272 | if (!timers[nodeid].next || expires < rb_entry(timers[nodeid].next, | ||
| 273 | struct mmtimer, list)->timer->it.mmtimer.expires) | ||
| 274 | timers[nodeid].next = &n->list; | ||
| 275 | } | ||
| 276 | |||
| 277 | /* | ||
| 278 | * Set the comparator for the next timer. | ||
| 279 | * This function assumes the struct mmtimer_node is locked. | ||
| 280 | */ | ||
| 281 | static void mmtimer_set_next_timer(int nodeid) | ||
| 282 | { | ||
| 283 | struct mmtimer_node *n = &timers[nodeid]; | ||
| 284 | struct mmtimer *x; | ||
| 285 | struct k_itimer *t; | ||
| 286 | int o; | ||
| 287 | |||
| 288 | restart: | ||
| 289 | if (n->next == NULL) | ||
| 290 | return; | ||
| 229 | 291 | ||
| 230 | static mmtimer_t ** timers; | 292 | x = rb_entry(n->next, struct mmtimer, list); |
| 293 | t = x->timer; | ||
| 294 | if (!t->it.mmtimer.incr) { | ||
| 295 | /* Not an interval timer */ | ||
| 296 | if (!mmtimer_setup(x->cpu, COMPARATOR, | ||
| 297 | t->it.mmtimer.expires)) { | ||
| 298 | /* Late setup, fire now */ | ||
| 299 | tasklet_schedule(&n->tasklet); | ||
| 300 | } | ||
| 301 | return; | ||
| 302 | } | ||
| 303 | |||
| 304 | /* Interval timer */ | ||
| 305 | o = 0; | ||
| 306 | while (!mmtimer_setup(x->cpu, COMPARATOR, t->it.mmtimer.expires)) { | ||
| 307 | unsigned long e, e1; | ||
| 308 | struct rb_node *next; | ||
| 309 | t->it.mmtimer.expires += t->it.mmtimer.incr << o; | ||
| 310 | t->it_overrun += 1 << o; | ||
| 311 | o++; | ||
| 312 | if (o > 20) { | ||
| 313 | printk(KERN_ALERT "mmtimer: cannot reschedule timer\n"); | ||
| 314 | t->it.mmtimer.clock = TIMER_OFF; | ||
| 315 | n->next = rb_next(&x->list); | ||
| 316 | rb_erase(&x->list, &n->timer_head); | ||
| 317 | kfree(x); | ||
| 318 | goto restart; | ||
| 319 | } | ||
| 320 | |||
| 321 | e = t->it.mmtimer.expires; | ||
| 322 | next = rb_next(&x->list); | ||
| 323 | |||
| 324 | if (next == NULL) | ||
| 325 | continue; | ||
| 326 | |||
| 327 | e1 = rb_entry(next, struct mmtimer, list)-> | ||
| 328 | timer->it.mmtimer.expires; | ||
| 329 | if (e > e1) { | ||
| 330 | n->next = next; | ||
| 331 | rb_erase(&x->list, &n->timer_head); | ||
| 332 | mmtimer_add_list(x); | ||
| 333 | goto restart; | ||
| 334 | } | ||
| 335 | } | ||
| 336 | } | ||
| 231 | 337 | ||
| 232 | /** | 338 | /** |
| 233 | * mmtimer_ioctl - ioctl interface for /dev/mmtimer | 339 | * mmtimer_ioctl - ioctl interface for /dev/mmtimer |
| @@ -390,35 +496,6 @@ static int sgi_clock_set(clockid_t clockid, struct timespec *tp) | |||
| 390 | return 0; | 496 | return 0; |
| 391 | } | 497 | } |
| 392 | 498 | ||
| 393 | /* | ||
| 394 | * Schedule the next periodic interrupt. This function will attempt | ||
| 395 | * to schedule a periodic interrupt later if necessary. If the scheduling | ||
| 396 | * of an interrupt fails then the time to skip is lengthened | ||
| 397 | * exponentially in order to ensure that the next interrupt | ||
| 398 | * can be properly scheduled.. | ||
| 399 | */ | ||
| 400 | static int inline reschedule_periodic_timer(mmtimer_t *x) | ||
| 401 | { | ||
| 402 | int n; | ||
| 403 | struct k_itimer *t = x->timer; | ||
| 404 | |||
| 405 | t->it.mmtimer.clock = x->i; | ||
| 406 | t->it_overrun--; | ||
| 407 | |||
| 408 | n = 0; | ||
| 409 | do { | ||
| 410 | |||
| 411 | t->it.mmtimer.expires += t->it.mmtimer.incr << n; | ||
| 412 | t->it_overrun += 1 << n; | ||
| 413 | n++; | ||
| 414 | if (n > 20) | ||
| 415 | return 1; | ||
| 416 | |||
| 417 | } while (!mmtimer_setup(x->i, t->it.mmtimer.expires)); | ||
| 418 | |||
| 419 | return 0; | ||
| 420 | } | ||
| 421 | |||
| 422 | /** | 499 | /** |
| 423 | * mmtimer_interrupt - timer interrupt handler | 500 | * mmtimer_interrupt - timer interrupt handler |
| 424 | * @irq: irq received | 501 | * @irq: irq received |
| @@ -435,71 +512,75 @@ static int inline reschedule_periodic_timer(mmtimer_t *x) | |||
| 435 | static irqreturn_t | 512 | static irqreturn_t |
| 436 | mmtimer_interrupt(int irq, void *dev_id) | 513 | mmtimer_interrupt(int irq, void *dev_id) |
| 437 | { | 514 | { |
| 438 | int i; | ||
| 439 | unsigned long expires = 0; | 515 | unsigned long expires = 0; |
| 440 | int result = IRQ_NONE; | 516 | int result = IRQ_NONE; |
| 441 | unsigned indx = cpu_to_node(smp_processor_id()); | 517 | unsigned indx = cpu_to_node(smp_processor_id()); |
| 518 | struct mmtimer *base; | ||
| 442 | 519 | ||
| 443 | /* | 520 | spin_lock(&timers[indx].lock); |
| 444 | * Do this once for each comparison register | 521 | base = rb_entry(timers[indx].next, struct mmtimer, list); |
| 445 | */ | 522 | if (base == NULL) { |
| 446 | for (i = 0; i < NUM_COMPARATORS; i++) { | 523 | spin_unlock(&timers[indx].lock); |
| 447 | mmtimer_t *base = timers[indx] + i; | 524 | return result; |
| 448 | /* Make sure this doesn't get reused before tasklet_sched */ | 525 | } |
| 449 | spin_lock(&base->lock); | 526 | |
| 450 | if (base->cpu == smp_processor_id()) { | 527 | if (base->cpu == smp_processor_id()) { |
| 451 | if (base->timer) | 528 | if (base->timer) |
| 452 | expires = base->timer->it.mmtimer.expires; | 529 | expires = base->timer->it.mmtimer.expires; |
| 453 | /* expires test won't work with shared irqs */ | 530 | /* expires test won't work with shared irqs */ |
| 454 | if ((mmtimer_int_pending(i) > 0) || | 531 | if ((mmtimer_int_pending(COMPARATOR) > 0) || |
| 455 | (expires && (expires < rtc_time()))) { | 532 | (expires && (expires <= rtc_time()))) { |
| 456 | mmtimer_clr_int_pending(i); | 533 | mmtimer_clr_int_pending(COMPARATOR); |
| 457 | tasklet_schedule(&base->tasklet); | 534 | tasklet_schedule(&timers[indx].tasklet); |
| 458 | result = IRQ_HANDLED; | 535 | result = IRQ_HANDLED; |
| 459 | } | ||
| 460 | } | 536 | } |
| 461 | spin_unlock(&base->lock); | ||
| 462 | expires = 0; | ||
| 463 | } | 537 | } |
| 538 | spin_unlock(&timers[indx].lock); | ||
| 464 | return result; | 539 | return result; |
| 465 | } | 540 | } |
| 466 | 541 | ||
| 467 | void mmtimer_tasklet(unsigned long data) { | 542 | static void mmtimer_tasklet(unsigned long data) |
| 468 | mmtimer_t *x = (mmtimer_t *)data; | 543 | { |
| 469 | struct k_itimer *t = x->timer; | 544 | int nodeid = data; |
| 545 | struct mmtimer_node *mn = &timers[nodeid]; | ||
| 546 | struct mmtimer *x = rb_entry(mn->next, struct mmtimer, list); | ||
| 547 | struct k_itimer *t; | ||
| 470 | unsigned long flags; | 548 | unsigned long flags; |
| 471 | 549 | ||
| 472 | if (t == NULL) | ||
| 473 | return; | ||
| 474 | |||
| 475 | /* Send signal and deal with periodic signals */ | 550 | /* Send signal and deal with periodic signals */ |
| 476 | spin_lock_irqsave(&t->it_lock, flags); | 551 | spin_lock_irqsave(&mn->lock, flags); |
| 477 | spin_lock(&x->lock); | 552 | if (!mn->next) |
| 478 | /* If timer was deleted between interrupt and here, leave */ | ||
| 479 | if (t != x->timer) | ||
| 480 | goto out; | 553 | goto out; |
| 481 | t->it_overrun = 0; | ||
| 482 | 554 | ||
| 483 | if (posix_timer_event(t, 0) != 0) { | 555 | x = rb_entry(mn->next, struct mmtimer, list); |
| 556 | t = x->timer; | ||
| 557 | |||
| 558 | if (t->it.mmtimer.clock == TIMER_OFF) | ||
| 559 | goto out; | ||
| 560 | |||
| 561 | t->it_overrun = 0; | ||
| 484 | 562 | ||
| 485 | // printk(KERN_WARNING "mmtimer: cannot deliver signal.\n"); | 563 | mn->next = rb_next(&x->list); |
| 564 | rb_erase(&x->list, &mn->timer_head); | ||
| 486 | 565 | ||
| 566 | if (posix_timer_event(t, 0) != 0) | ||
| 487 | t->it_overrun++; | 567 | t->it_overrun++; |
| 488 | } | 568 | |
| 489 | if(t->it.mmtimer.incr) { | 569 | if(t->it.mmtimer.incr) { |
| 490 | /* Periodic timer */ | 570 | t->it.mmtimer.expires += t->it.mmtimer.incr; |
| 491 | if (reschedule_periodic_timer(x)) { | 571 | mmtimer_add_list(x); |
| 492 | printk(KERN_WARNING "mmtimer: unable to reschedule\n"); | ||
| 493 | x->timer = NULL; | ||
| 494 | } | ||
| 495 | } else { | 572 | } else { |
| 496 | /* Ensure we don't false trigger in mmtimer_interrupt */ | 573 | /* Ensure we don't false trigger in mmtimer_interrupt */ |
| 574 | t->it.mmtimer.clock = TIMER_OFF; | ||
| 497 | t->it.mmtimer.expires = 0; | 575 | t->it.mmtimer.expires = 0; |
| 576 | kfree(x); | ||
| 498 | } | 577 | } |
| 578 | /* Set comparator for next timer, if there is one */ | ||
| 579 | mmtimer_set_next_timer(nodeid); | ||
| 580 | |||
| 499 | t->it_overrun_last = t->it_overrun; | 581 | t->it_overrun_last = t->it_overrun; |
| 500 | out: | 582 | out: |
| 501 | spin_unlock(&x->lock); | 583 | spin_unlock_irqrestore(&mn->lock, flags); |
| 502 | spin_unlock_irqrestore(&t->it_lock, flags); | ||
| 503 | } | 584 | } |
| 504 | 585 | ||
| 505 | static int sgi_timer_create(struct k_itimer *timer) | 586 | static int sgi_timer_create(struct k_itimer *timer) |
| @@ -516,19 +597,50 @@ static int sgi_timer_create(struct k_itimer *timer) | |||
| 516 | */ | 597 | */ |
| 517 | static int sgi_timer_del(struct k_itimer *timr) | 598 | static int sgi_timer_del(struct k_itimer *timr) |
| 518 | { | 599 | { |
| 519 | int i = timr->it.mmtimer.clock; | ||
| 520 | cnodeid_t nodeid = timr->it.mmtimer.node; | 600 | cnodeid_t nodeid = timr->it.mmtimer.node; |
| 521 | mmtimer_t *t = timers[nodeid] + i; | ||
| 522 | unsigned long irqflags; | 601 | unsigned long irqflags; |
| 523 | 602 | ||
| 524 | if (i != TIMER_OFF) { | 603 | spin_lock_irqsave(&timers[nodeid].lock, irqflags); |
| 525 | spin_lock_irqsave(&t->lock, irqflags); | 604 | if (timr->it.mmtimer.clock != TIMER_OFF) { |
| 526 | mmtimer_disable_int(cnodeid_to_nasid(nodeid),i); | 605 | unsigned long expires = timr->it.mmtimer.expires; |
| 527 | t->timer = NULL; | 606 | struct rb_node *n = timers[nodeid].timer_head.rb_node; |
| 607 | struct mmtimer *uninitialized_var(t); | ||
| 608 | int r = 0; | ||
| 609 | |||
| 528 | timr->it.mmtimer.clock = TIMER_OFF; | 610 | timr->it.mmtimer.clock = TIMER_OFF; |
| 529 | timr->it.mmtimer.expires = 0; | 611 | timr->it.mmtimer.expires = 0; |
| 530 | spin_unlock_irqrestore(&t->lock, irqflags); | 612 | |
| 613 | while (n) { | ||
| 614 | t = rb_entry(n, struct mmtimer, list); | ||
| 615 | if (t->timer == timr) | ||
| 616 | break; | ||
| 617 | |||
| 618 | if (expires < t->timer->it.mmtimer.expires) | ||
| 619 | n = n->rb_left; | ||
| 620 | else | ||
| 621 | n = n->rb_right; | ||
| 622 | } | ||
| 623 | |||
| 624 | if (!n) { | ||
| 625 | spin_unlock_irqrestore(&timers[nodeid].lock, irqflags); | ||
| 626 | return 0; | ||
| 627 | } | ||
| 628 | |||
| 629 | if (timers[nodeid].next == n) { | ||
| 630 | timers[nodeid].next = rb_next(n); | ||
| 631 | r = 1; | ||
| 632 | } | ||
| 633 | |||
| 634 | rb_erase(n, &timers[nodeid].timer_head); | ||
| 635 | kfree(t); | ||
| 636 | |||
| 637 | if (r) { | ||
| 638 | mmtimer_disable_int(cnodeid_to_nasid(nodeid), | ||
| 639 | COMPARATOR); | ||
| 640 | mmtimer_set_next_timer(nodeid); | ||
| 641 | } | ||
| 531 | } | 642 | } |
| 643 | spin_unlock_irqrestore(&timers[nodeid].lock, irqflags); | ||
| 532 | return 0; | 644 | return 0; |
| 533 | } | 645 | } |
| 534 | 646 | ||
| @@ -557,12 +669,11 @@ static int sgi_timer_set(struct k_itimer *timr, int flags, | |||
| 557 | struct itimerspec * new_setting, | 669 | struct itimerspec * new_setting, |
| 558 | struct itimerspec * old_setting) | 670 | struct itimerspec * old_setting) |
| 559 | { | 671 | { |
| 560 | |||
| 561 | int i; | ||
| 562 | unsigned long when, period, irqflags; | 672 | unsigned long when, period, irqflags; |
| 563 | int err = 0; | 673 | int err = 0; |
| 564 | cnodeid_t nodeid; | 674 | cnodeid_t nodeid; |
| 565 | mmtimer_t *base; | 675 | struct mmtimer *base; |
| 676 | struct rb_node *n; | ||
| 566 | 677 | ||
| 567 | if (old_setting) | 678 | if (old_setting) |
| 568 | sgi_timer_get(timr, old_setting); | 679 | sgi_timer_get(timr, old_setting); |
| @@ -575,6 +686,10 @@ static int sgi_timer_set(struct k_itimer *timr, int flags, | |||
| 575 | /* Clear timer */ | 686 | /* Clear timer */ |
| 576 | return 0; | 687 | return 0; |
| 577 | 688 | ||
| 689 | base = kmalloc(sizeof(struct mmtimer), GFP_KERNEL); | ||
| 690 | if (base == NULL) | ||
| 691 | return -ENOMEM; | ||
| 692 | |||
| 578 | if (flags & TIMER_ABSTIME) { | 693 | if (flags & TIMER_ABSTIME) { |
| 579 | struct timespec n; | 694 | struct timespec n; |
| 580 | unsigned long now; | 695 | unsigned long now; |
| @@ -604,47 +719,38 @@ static int sgi_timer_set(struct k_itimer *timr, int flags, | |||
| 604 | preempt_disable(); | 719 | preempt_disable(); |
| 605 | 720 | ||
| 606 | nodeid = cpu_to_node(smp_processor_id()); | 721 | nodeid = cpu_to_node(smp_processor_id()); |
| 607 | retry: | ||
| 608 | /* Don't use an allocated timer, or a deleted one that's pending */ | ||
| 609 | for(i = 0; i< NUM_COMPARATORS; i++) { | ||
| 610 | base = timers[nodeid] + i; | ||
| 611 | if (!base->timer && !base->tasklet.state) { | ||
| 612 | break; | ||
| 613 | } | ||
| 614 | } | ||
| 615 | |||
| 616 | if (i == NUM_COMPARATORS) { | ||
| 617 | preempt_enable(); | ||
| 618 | return -EBUSY; | ||
| 619 | } | ||
| 620 | 722 | ||
| 621 | spin_lock_irqsave(&base->lock, irqflags); | 723 | /* Lock the node timer structure */ |
| 724 | spin_lock_irqsave(&timers[nodeid].lock, irqflags); | ||
| 622 | 725 | ||
| 623 | if (base->timer || base->tasklet.state != 0) { | ||
| 624 | spin_unlock_irqrestore(&base->lock, irqflags); | ||
| 625 | goto retry; | ||
| 626 | } | ||
| 627 | base->timer = timr; | 726 | base->timer = timr; |
| 628 | base->cpu = smp_processor_id(); | 727 | base->cpu = smp_processor_id(); |
| 629 | 728 | ||
| 630 | timr->it.mmtimer.clock = i; | 729 | timr->it.mmtimer.clock = TIMER_SET; |
| 631 | timr->it.mmtimer.node = nodeid; | 730 | timr->it.mmtimer.node = nodeid; |
| 632 | timr->it.mmtimer.incr = period; | 731 | timr->it.mmtimer.incr = period; |
| 633 | timr->it.mmtimer.expires = when; | 732 | timr->it.mmtimer.expires = when; |
| 634 | 733 | ||
| 635 | if (period == 0) { | 734 | n = timers[nodeid].next; |
| 636 | if (!mmtimer_setup(i, when)) { | 735 | |
| 637 | mmtimer_disable_int(-1, i); | 736 | /* Add the new struct mmtimer to node's timer list */ |
| 638 | posix_timer_event(timr, 0); | 737 | mmtimer_add_list(base); |
| 639 | timr->it.mmtimer.expires = 0; | 738 | |
| 640 | } | 739 | if (timers[nodeid].next == n) { |
| 641 | } else { | 740 | /* No need to reprogram comparator for now */ |
| 642 | timr->it.mmtimer.expires -= period; | 741 | spin_unlock_irqrestore(&timers[nodeid].lock, irqflags); |
| 643 | if (reschedule_periodic_timer(base)) | 742 | preempt_enable(); |
| 644 | err = -EINVAL; | 743 | return err; |
| 645 | } | 744 | } |
| 646 | 745 | ||
| 647 | spin_unlock_irqrestore(&base->lock, irqflags); | 746 | /* We need to reprogram the comparator */ |
| 747 | if (n) | ||
| 748 | mmtimer_disable_int(cnodeid_to_nasid(nodeid), COMPARATOR); | ||
| 749 | |||
| 750 | mmtimer_set_next_timer(nodeid); | ||
| 751 | |||
| 752 | /* Unlock the node timer structure */ | ||
| 753 | spin_unlock_irqrestore(&timers[nodeid].lock, irqflags); | ||
| 648 | 754 | ||
| 649 | preempt_enable(); | 755 | preempt_enable(); |
| 650 | 756 | ||
| @@ -669,7 +775,6 @@ static struct k_clock sgi_clock = { | |||
| 669 | */ | 775 | */ |
| 670 | static int __init mmtimer_init(void) | 776 | static int __init mmtimer_init(void) |
| 671 | { | 777 | { |
| 672 | unsigned i; | ||
| 673 | cnodeid_t node, maxn = -1; | 778 | cnodeid_t node, maxn = -1; |
| 674 | 779 | ||
| 675 | if (!ia64_platform_is("sn2")) | 780 | if (!ia64_platform_is("sn2")) |
| @@ -706,31 +811,18 @@ static int __init mmtimer_init(void) | |||
| 706 | maxn++; | 811 | maxn++; |
| 707 | 812 | ||
| 708 | /* Allocate list of node ptrs to mmtimer_t's */ | 813 | /* Allocate list of node ptrs to mmtimer_t's */ |
| 709 | timers = kzalloc(sizeof(mmtimer_t *)*maxn, GFP_KERNEL); | 814 | timers = kzalloc(sizeof(struct mmtimer_node)*maxn, GFP_KERNEL); |
| 710 | if (timers == NULL) { | 815 | if (timers == NULL) { |
| 711 | printk(KERN_ERR "%s: failed to allocate memory for device\n", | 816 | printk(KERN_ERR "%s: failed to allocate memory for device\n", |
| 712 | MMTIMER_NAME); | 817 | MMTIMER_NAME); |
| 713 | goto out3; | 818 | goto out3; |
| 714 | } | 819 | } |
| 715 | 820 | ||
| 716 | /* Allocate mmtimer_t's for each online node */ | 821 | /* Initialize struct mmtimer's for each online node */ |
| 717 | for_each_online_node(node) { | 822 | for_each_online_node(node) { |
| 718 | timers[node] = kmalloc_node(sizeof(mmtimer_t)*NUM_COMPARATORS, GFP_KERNEL, node); | 823 | spin_lock_init(&timers[node].lock); |
| 719 | if (timers[node] == NULL) { | 824 | tasklet_init(&timers[node].tasklet, mmtimer_tasklet, |
| 720 | printk(KERN_ERR "%s: failed to allocate memory for device\n", | 825 | (unsigned long) node); |
| 721 | MMTIMER_NAME); | ||
| 722 | goto out4; | ||
| 723 | } | ||
| 724 | for (i=0; i< NUM_COMPARATORS; i++) { | ||
| 725 | mmtimer_t * base = timers[node] + i; | ||
| 726 | |||
| 727 | spin_lock_init(&base->lock); | ||
| 728 | base->timer = NULL; | ||
| 729 | base->cpu = 0; | ||
| 730 | base->i = i; | ||
| 731 | tasklet_init(&base->tasklet, mmtimer_tasklet, | ||
| 732 | (unsigned long) (base)); | ||
| 733 | } | ||
| 734 | } | 826 | } |
| 735 | 827 | ||
| 736 | sgi_clock_period = sgi_clock.res = NSEC_PER_SEC / sn_rtc_cycles_per_second; | 828 | sgi_clock_period = sgi_clock.res = NSEC_PER_SEC / sn_rtc_cycles_per_second; |
| @@ -741,11 +833,8 @@ static int __init mmtimer_init(void) | |||
| 741 | 833 | ||
| 742 | return 0; | 834 | return 0; |
| 743 | 835 | ||
| 744 | out4: | ||
| 745 | for_each_online_node(node) { | ||
| 746 | kfree(timers[node]); | ||
| 747 | } | ||
| 748 | out3: | 836 | out3: |
| 837 | kfree(timers); | ||
| 749 | misc_deregister(&mmtimer_miscdev); | 838 | misc_deregister(&mmtimer_miscdev); |
| 750 | out2: | 839 | out2: |
| 751 | free_irq(SGI_MMTIMER_VECTOR, NULL); | 840 | free_irq(SGI_MMTIMER_VECTOR, NULL); |
| @@ -754,4 +843,3 @@ out1: | |||
| 754 | } | 843 | } |
| 755 | 844 | ||
| 756 | module_init(mmtimer_init); | 845 | module_init(mmtimer_init); |
| 757 | |||
diff --git a/drivers/char/moxa.c b/drivers/char/moxa.c index 64b7b2b18352..d57d3a61919b 100644 --- a/drivers/char/moxa.c +++ b/drivers/char/moxa.c | |||
| @@ -2,7 +2,8 @@ | |||
| 2 | /* | 2 | /* |
| 3 | * moxa.c -- MOXA Intellio family multiport serial driver. | 3 | * moxa.c -- MOXA Intellio family multiport serial driver. |
| 4 | * | 4 | * |
| 5 | * Copyright (C) 1999-2000 Moxa Technologies (support@moxa.com.tw). | 5 | * Copyright (C) 1999-2000 Moxa Technologies (support@moxa.com). |
| 6 | * Copyright (c) 2007 Jiri Slaby <jirislaby@gmail.com> | ||
| 6 | * | 7 | * |
| 7 | * This code is loosely based on the Linux serial driver, written by | 8 | * This code is loosely based on the Linux serial driver, written by |
| 8 | * Linus Torvalds, Theodore T'so and others. | 9 | * Linus Torvalds, Theodore T'so and others. |
| @@ -25,6 +26,7 @@ | |||
| 25 | #include <linux/mm.h> | 26 | #include <linux/mm.h> |
| 26 | #include <linux/ioport.h> | 27 | #include <linux/ioport.h> |
| 27 | #include <linux/errno.h> | 28 | #include <linux/errno.h> |
| 29 | #include <linux/firmware.h> | ||
| 28 | #include <linux/signal.h> | 30 | #include <linux/signal.h> |
| 29 | #include <linux/sched.h> | 31 | #include <linux/sched.h> |
| 30 | #include <linux/timer.h> | 32 | #include <linux/timer.h> |
| @@ -41,21 +43,26 @@ | |||
| 41 | #include <linux/pci.h> | 43 | #include <linux/pci.h> |
| 42 | #include <linux/init.h> | 44 | #include <linux/init.h> |
| 43 | #include <linux/bitops.h> | 45 | #include <linux/bitops.h> |
| 44 | #include <linux/completion.h> | ||
| 45 | 46 | ||
| 46 | #include <asm/system.h> | 47 | #include <asm/system.h> |
| 47 | #include <asm/io.h> | 48 | #include <asm/io.h> |
| 48 | #include <asm/uaccess.h> | 49 | #include <asm/uaccess.h> |
| 49 | 50 | ||
| 50 | #define MOXA_VERSION "5.1k" | 51 | #include "moxa.h" |
| 52 | |||
| 53 | #define MOXA_VERSION "6.0k" | ||
| 54 | |||
| 55 | #define MOXA_FW_HDRLEN 32 | ||
| 51 | 56 | ||
| 52 | #define MOXAMAJOR 172 | 57 | #define MOXAMAJOR 172 |
| 53 | #define MOXACUMAJOR 173 | ||
| 54 | 58 | ||
| 55 | #define MAX_BOARDS 4 /* Don't change this value */ | 59 | #define MAX_BOARDS 4 /* Don't change this value */ |
| 56 | #define MAX_PORTS_PER_BOARD 32 /* Don't change this value */ | 60 | #define MAX_PORTS_PER_BOARD 32 /* Don't change this value */ |
| 57 | #define MAX_PORTS (MAX_BOARDS * MAX_PORTS_PER_BOARD) | 61 | #define MAX_PORTS (MAX_BOARDS * MAX_PORTS_PER_BOARD) |
| 58 | 62 | ||
| 63 | #define MOXA_IS_320(brd) ((brd)->boardType == MOXA_BOARD_C320_ISA || \ | ||
| 64 | (brd)->boardType == MOXA_BOARD_C320_PCI) | ||
| 65 | |||
| 59 | /* | 66 | /* |
| 60 | * Define the Moxa PCI vendor and device IDs. | 67 | * Define the Moxa PCI vendor and device IDs. |
| 61 | */ | 68 | */ |
| @@ -92,24 +99,16 @@ static struct pci_device_id moxa_pcibrds[] = { | |||
| 92 | MODULE_DEVICE_TABLE(pci, moxa_pcibrds); | 99 | MODULE_DEVICE_TABLE(pci, moxa_pcibrds); |
| 93 | #endif /* CONFIG_PCI */ | 100 | #endif /* CONFIG_PCI */ |
| 94 | 101 | ||
| 95 | struct moxa_isa_board_conf { | 102 | struct moxa_port; |
| 96 | int boardType; | ||
| 97 | int numPorts; | ||
| 98 | unsigned long baseAddr; | ||
| 99 | }; | ||
| 100 | |||
| 101 | static struct moxa_isa_board_conf moxa_isa_boards[] = | ||
| 102 | { | ||
| 103 | /* {MOXA_BOARD_C218_ISA,8,0xDC000}, */ | ||
| 104 | }; | ||
| 105 | 103 | ||
| 106 | static struct moxa_board_conf { | 104 | static struct moxa_board_conf { |
| 107 | int boardType; | 105 | int boardType; |
| 108 | int numPorts; | 106 | int numPorts; |
| 109 | unsigned long baseAddr; | ||
| 110 | int busType; | 107 | int busType; |
| 111 | 108 | ||
| 112 | int loadstat; | 109 | unsigned int ready; |
| 110 | |||
| 111 | struct moxa_port *ports; | ||
| 113 | 112 | ||
| 114 | void __iomem *basemem; | 113 | void __iomem *basemem; |
| 115 | void __iomem *intNdx; | 114 | void __iomem *intNdx; |
| @@ -131,30 +130,27 @@ struct moxaq_str { | |||
| 131 | }; | 130 | }; |
| 132 | 131 | ||
| 133 | struct moxa_port { | 132 | struct moxa_port { |
| 133 | struct moxa_board_conf *board; | ||
| 134 | struct tty_struct *tty; | ||
| 135 | void __iomem *tableAddr; | ||
| 136 | |||
| 134 | int type; | 137 | int type; |
| 135 | int port; | ||
| 136 | int close_delay; | 138 | int close_delay; |
| 137 | unsigned short closing_wait; | 139 | unsigned int count; |
| 138 | int count; | ||
| 139 | int blocked_open; | ||
| 140 | long event; /* long req'd for set_bit --RR */ | ||
| 141 | int asyncflags; | 140 | int asyncflags; |
| 142 | unsigned long statusflags; | ||
| 143 | struct tty_struct *tty; | ||
| 144 | int cflag; | 141 | int cflag; |
| 142 | unsigned long statusflags; | ||
| 145 | wait_queue_head_t open_wait; | 143 | wait_queue_head_t open_wait; |
| 146 | struct completion close_wait; | ||
| 147 | |||
| 148 | struct timer_list emptyTimer; | ||
| 149 | 144 | ||
| 150 | char chkPort; | 145 | u8 DCDState; |
| 151 | char lineCtrl; | 146 | u8 lineCtrl; |
| 152 | void __iomem *tableAddr; | 147 | u8 lowChkFlag; |
| 153 | long curBaud; | 148 | }; |
| 154 | char DCDState; | ||
| 155 | char lowChkFlag; | ||
| 156 | 149 | ||
| 157 | ushort breakCnt; | 150 | struct mon_str { |
| 151 | int tick; | ||
| 152 | int rxcnt[MAX_PORTS]; | ||
| 153 | int txcnt[MAX_PORTS]; | ||
| 158 | }; | 154 | }; |
| 159 | 155 | ||
| 160 | /* statusflags */ | 156 | /* statusflags */ |
| @@ -168,20 +164,27 @@ struct moxa_port { | |||
| 168 | #define WAKEUP_CHARS 256 | 164 | #define WAKEUP_CHARS 256 |
| 169 | 165 | ||
| 170 | static int ttymajor = MOXAMAJOR; | 166 | static int ttymajor = MOXAMAJOR; |
| 167 | static struct mon_str moxaLog; | ||
| 168 | static unsigned int moxaFuncTout = HZ / 2; | ||
| 169 | static unsigned int moxaLowWaterChk; | ||
| 170 | static DEFINE_MUTEX(moxa_openlock); | ||
| 171 | /* Variables for insmod */ | 171 | /* Variables for insmod */ |
| 172 | #ifdef MODULE | 172 | #ifdef MODULE |
| 173 | static int baseaddr[4]; | 173 | static unsigned long baseaddr[MAX_BOARDS]; |
| 174 | static int type[4]; | 174 | static unsigned int type[MAX_BOARDS]; |
| 175 | static int numports[4]; | 175 | static unsigned int numports[MAX_BOARDS]; |
| 176 | #endif | 176 | #endif |
| 177 | 177 | ||
| 178 | MODULE_AUTHOR("William Chen"); | 178 | MODULE_AUTHOR("William Chen"); |
| 179 | MODULE_DESCRIPTION("MOXA Intellio Family Multiport Board Device Driver"); | 179 | MODULE_DESCRIPTION("MOXA Intellio Family Multiport Board Device Driver"); |
| 180 | MODULE_LICENSE("GPL"); | 180 | MODULE_LICENSE("GPL"); |
| 181 | #ifdef MODULE | 181 | #ifdef MODULE |
| 182 | module_param_array(type, int, NULL, 0); | 182 | module_param_array(type, uint, NULL, 0); |
| 183 | module_param_array(baseaddr, int, NULL, 0); | 183 | MODULE_PARM_DESC(type, "card type: C218=2, C320=4"); |
| 184 | module_param_array(numports, int, NULL, 0); | 184 | module_param_array(baseaddr, ulong, NULL, 0); |
| 185 | MODULE_PARM_DESC(baseaddr, "base address"); | ||
| 186 | module_param_array(numports, uint, NULL, 0); | ||
| 187 | MODULE_PARM_DESC(numports, "numports (ignored for C218)"); | ||
| 185 | #endif | 188 | #endif |
| 186 | module_param(ttymajor, int, 0); | 189 | module_param(ttymajor, int, 0); |
| 187 | 190 | ||
| @@ -194,9 +197,6 @@ static int moxa_write(struct tty_struct *, const unsigned char *, int); | |||
| 194 | static int moxa_write_room(struct tty_struct *); | 197 | static int moxa_write_room(struct tty_struct *); |
| 195 | static void moxa_flush_buffer(struct tty_struct *); | 198 | static void moxa_flush_buffer(struct tty_struct *); |
| 196 | static int moxa_chars_in_buffer(struct tty_struct *); | 199 | static int moxa_chars_in_buffer(struct tty_struct *); |
| 197 | static void moxa_flush_chars(struct tty_struct *); | ||
| 198 | static void moxa_put_char(struct tty_struct *, unsigned char); | ||
| 199 | static int moxa_ioctl(struct tty_struct *, struct file *, unsigned int, unsigned long); | ||
| 200 | static void moxa_throttle(struct tty_struct *); | 200 | static void moxa_throttle(struct tty_struct *); |
| 201 | static void moxa_unthrottle(struct tty_struct *); | 201 | static void moxa_unthrottle(struct tty_struct *); |
| 202 | static void moxa_set_termios(struct tty_struct *, struct ktermios *); | 202 | static void moxa_set_termios(struct tty_struct *, struct ktermios *); |
| @@ -208,44 +208,183 @@ static int moxa_tiocmset(struct tty_struct *tty, struct file *file, | |||
| 208 | unsigned int set, unsigned int clear); | 208 | unsigned int set, unsigned int clear); |
| 209 | static void moxa_poll(unsigned long); | 209 | static void moxa_poll(unsigned long); |
| 210 | static void moxa_set_tty_param(struct tty_struct *, struct ktermios *); | 210 | static void moxa_set_tty_param(struct tty_struct *, struct ktermios *); |
| 211 | static int moxa_block_till_ready(struct tty_struct *, struct file *, | ||
| 212 | struct moxa_port *); | ||
| 213 | static void moxa_setup_empty_event(struct tty_struct *); | 211 | static void moxa_setup_empty_event(struct tty_struct *); |
| 214 | static void moxa_check_xmit_empty(unsigned long); | ||
| 215 | static void moxa_shut_down(struct moxa_port *); | 212 | static void moxa_shut_down(struct moxa_port *); |
| 216 | static void moxa_receive_data(struct moxa_port *); | ||
| 217 | /* | 213 | /* |
| 218 | * moxa board interface functions: | 214 | * moxa board interface functions: |
| 219 | */ | 215 | */ |
| 220 | static void MoxaDriverInit(void); | 216 | static void MoxaPortEnable(struct moxa_port *); |
| 221 | static int MoxaDriverIoctl(unsigned int, unsigned long, int); | 217 | static void MoxaPortDisable(struct moxa_port *); |
| 222 | static int MoxaDriverPoll(void); | 218 | static int MoxaPortSetTermio(struct moxa_port *, struct ktermios *, speed_t); |
| 223 | static int MoxaPortsOfCard(int); | 219 | static int MoxaPortGetLineOut(struct moxa_port *, int *, int *); |
| 224 | static int MoxaPortIsValid(int); | 220 | static void MoxaPortLineCtrl(struct moxa_port *, int, int); |
| 225 | static void MoxaPortEnable(int); | 221 | static void MoxaPortFlowCtrl(struct moxa_port *, int, int, int, int, int); |
| 226 | static void MoxaPortDisable(int); | 222 | static int MoxaPortLineStatus(struct moxa_port *); |
| 227 | static long MoxaPortGetMaxBaud(int); | 223 | static void MoxaPortFlushData(struct moxa_port *, int); |
| 228 | static long MoxaPortSetBaud(int, long); | 224 | static int MoxaPortWriteData(struct moxa_port *, const unsigned char *, int); |
| 229 | static int MoxaPortSetTermio(int, struct ktermios *, speed_t); | 225 | static int MoxaPortReadData(struct moxa_port *); |
| 230 | static int MoxaPortGetLineOut(int, int *, int *); | 226 | static int MoxaPortTxQueue(struct moxa_port *); |
| 231 | static void MoxaPortLineCtrl(int, int, int); | 227 | static int MoxaPortRxQueue(struct moxa_port *); |
| 232 | static void MoxaPortFlowCtrl(int, int, int, int, int, int); | 228 | static int MoxaPortTxFree(struct moxa_port *); |
| 233 | static int MoxaPortLineStatus(int); | 229 | static void MoxaPortTxDisable(struct moxa_port *); |
| 234 | static int MoxaPortDCDChange(int); | 230 | static void MoxaPortTxEnable(struct moxa_port *); |
| 235 | static int MoxaPortDCDON(int); | ||
| 236 | static void MoxaPortFlushData(int, int); | ||
| 237 | static int MoxaPortWriteData(int, unsigned char *, int); | ||
| 238 | static int MoxaPortReadData(int, struct tty_struct *tty); | ||
| 239 | static int MoxaPortTxQueue(int); | ||
| 240 | static int MoxaPortRxQueue(int); | ||
| 241 | static int MoxaPortTxFree(int); | ||
| 242 | static void MoxaPortTxDisable(int); | ||
| 243 | static void MoxaPortTxEnable(int); | ||
| 244 | static int MoxaPortResetBrkCnt(int); | ||
| 245 | static void MoxaPortSendBreak(int, int); | ||
| 246 | static int moxa_get_serial_info(struct moxa_port *, struct serial_struct __user *); | 231 | static int moxa_get_serial_info(struct moxa_port *, struct serial_struct __user *); |
| 247 | static int moxa_set_serial_info(struct moxa_port *, struct serial_struct __user *); | 232 | static int moxa_set_serial_info(struct moxa_port *, struct serial_struct __user *); |
| 248 | static void MoxaSetFifo(int port, int enable); | 233 | static void MoxaSetFifo(struct moxa_port *port, int enable); |
| 234 | |||
| 235 | /* | ||
| 236 | * I/O functions | ||
| 237 | */ | ||
| 238 | |||
| 239 | static void moxa_wait_finish(void __iomem *ofsAddr) | ||
| 240 | { | ||
| 241 | unsigned long end = jiffies + moxaFuncTout; | ||
| 242 | |||
| 243 | while (readw(ofsAddr + FuncCode) != 0) | ||
| 244 | if (time_after(jiffies, end)) | ||
| 245 | return; | ||
| 246 | if (readw(ofsAddr + FuncCode) != 0 && printk_ratelimit()) | ||
| 247 | printk(KERN_WARNING "moxa function expired\n"); | ||
| 248 | } | ||
| 249 | |||
| 250 | static void moxafunc(void __iomem *ofsAddr, u16 cmd, u16 arg) | ||
| 251 | { | ||
| 252 | writew(arg, ofsAddr + FuncArg); | ||
| 253 | writew(cmd, ofsAddr + FuncCode); | ||
| 254 | moxa_wait_finish(ofsAddr); | ||
| 255 | } | ||
| 256 | |||
| 257 | static void moxa_low_water_check(void __iomem *ofsAddr) | ||
| 258 | { | ||
| 259 | u16 rptr, wptr, mask, len; | ||
| 260 | |||
| 261 | if (readb(ofsAddr + FlagStat) & Xoff_state) { | ||
| 262 | rptr = readw(ofsAddr + RXrptr); | ||
| 263 | wptr = readw(ofsAddr + RXwptr); | ||
| 264 | mask = readw(ofsAddr + RX_mask); | ||
| 265 | len = (wptr - rptr) & mask; | ||
| 266 | if (len <= Low_water) | ||
| 267 | moxafunc(ofsAddr, FC_SendXon, 0); | ||
| 268 | } | ||
| 269 | } | ||
| 270 | |||
| 271 | /* | ||
| 272 | * TTY operations | ||
| 273 | */ | ||
| 274 | |||
| 275 | static int moxa_ioctl(struct tty_struct *tty, struct file *file, | ||
| 276 | unsigned int cmd, unsigned long arg) | ||
| 277 | { | ||
| 278 | struct moxa_port *ch = tty->driver_data; | ||
| 279 | void __user *argp = (void __user *)arg; | ||
| 280 | int status, ret = 0; | ||
| 281 | |||
| 282 | if (tty->index == MAX_PORTS) { | ||
| 283 | if (cmd != MOXA_GETDATACOUNT && cmd != MOXA_GET_IOQUEUE && | ||
| 284 | cmd != MOXA_GETMSTATUS) | ||
| 285 | return -EINVAL; | ||
| 286 | } else if (!ch) | ||
| 287 | return -ENODEV; | ||
| 288 | |||
| 289 | switch (cmd) { | ||
| 290 | case MOXA_GETDATACOUNT: | ||
| 291 | moxaLog.tick = jiffies; | ||
| 292 | if (copy_to_user(argp, &moxaLog, sizeof(moxaLog))) | ||
| 293 | ret = -EFAULT; | ||
| 294 | break; | ||
| 295 | case MOXA_FLUSH_QUEUE: | ||
| 296 | MoxaPortFlushData(ch, arg); | ||
| 297 | break; | ||
| 298 | case MOXA_GET_IOQUEUE: { | ||
| 299 | struct moxaq_str __user *argm = argp; | ||
| 300 | struct moxaq_str tmp; | ||
| 301 | struct moxa_port *p; | ||
| 302 | unsigned int i, j; | ||
| 303 | |||
| 304 | mutex_lock(&moxa_openlock); | ||
| 305 | for (i = 0; i < MAX_BOARDS; i++) { | ||
| 306 | p = moxa_boards[i].ports; | ||
| 307 | for (j = 0; j < MAX_PORTS_PER_BOARD; j++, p++, argm++) { | ||
| 308 | memset(&tmp, 0, sizeof(tmp)); | ||
| 309 | if (moxa_boards[i].ready) { | ||
| 310 | tmp.inq = MoxaPortRxQueue(p); | ||
| 311 | tmp.outq = MoxaPortTxQueue(p); | ||
| 312 | } | ||
| 313 | if (copy_to_user(argm, &tmp, sizeof(tmp))) { | ||
| 314 | mutex_unlock(&moxa_openlock); | ||
| 315 | return -EFAULT; | ||
| 316 | } | ||
| 317 | } | ||
| 318 | } | ||
| 319 | mutex_unlock(&moxa_openlock); | ||
| 320 | break; | ||
| 321 | } case MOXA_GET_OQUEUE: | ||
| 322 | status = MoxaPortTxQueue(ch); | ||
| 323 | ret = put_user(status, (unsigned long __user *)argp); | ||
| 324 | break; | ||
| 325 | case MOXA_GET_IQUEUE: | ||
| 326 | status = MoxaPortRxQueue(ch); | ||
| 327 | ret = put_user(status, (unsigned long __user *)argp); | ||
| 328 | break; | ||
| 329 | case MOXA_GETMSTATUS: { | ||
| 330 | struct mxser_mstatus __user *argm = argp; | ||
| 331 | struct mxser_mstatus tmp; | ||
| 332 | struct moxa_port *p; | ||
| 333 | unsigned int i, j; | ||
| 334 | |||
| 335 | mutex_lock(&moxa_openlock); | ||
| 336 | for (i = 0; i < MAX_BOARDS; i++) { | ||
| 337 | p = moxa_boards[i].ports; | ||
| 338 | for (j = 0; j < MAX_PORTS_PER_BOARD; j++, p++, argm++) { | ||
| 339 | memset(&tmp, 0, sizeof(tmp)); | ||
| 340 | if (!moxa_boards[i].ready) | ||
| 341 | goto copy; | ||
| 342 | |||
| 343 | status = MoxaPortLineStatus(p); | ||
| 344 | if (status & 1) | ||
| 345 | tmp.cts = 1; | ||
| 346 | if (status & 2) | ||
| 347 | tmp.dsr = 1; | ||
| 348 | if (status & 4) | ||
| 349 | tmp.dcd = 1; | ||
| 350 | |||
| 351 | if (!p->tty || !p->tty->termios) | ||
| 352 | tmp.cflag = p->cflag; | ||
| 353 | else | ||
| 354 | tmp.cflag = p->tty->termios->c_cflag; | ||
| 355 | copy: | ||
| 356 | if (copy_to_user(argm, &tmp, sizeof(tmp))) { | ||
| 357 | mutex_unlock(&moxa_openlock); | ||
| 358 | return -EFAULT; | ||
| 359 | } | ||
| 360 | } | ||
| 361 | } | ||
| 362 | mutex_unlock(&moxa_openlock); | ||
| 363 | break; | ||
| 364 | } | ||
| 365 | case TIOCGSERIAL: | ||
| 366 | mutex_lock(&moxa_openlock); | ||
| 367 | ret = moxa_get_serial_info(ch, argp); | ||
| 368 | mutex_unlock(&moxa_openlock); | ||
| 369 | break; | ||
| 370 | case TIOCSSERIAL: | ||
| 371 | mutex_lock(&moxa_openlock); | ||
| 372 | ret = moxa_set_serial_info(ch, argp); | ||
| 373 | mutex_unlock(&moxa_openlock); | ||
| 374 | break; | ||
| 375 | default: | ||
| 376 | ret = -ENOIOCTLCMD; | ||
| 377 | } | ||
| 378 | return ret; | ||
| 379 | } | ||
| 380 | |||
| 381 | static void moxa_break_ctl(struct tty_struct *tty, int state) | ||
| 382 | { | ||
| 383 | struct moxa_port *port = tty->driver_data; | ||
| 384 | |||
| 385 | moxafunc(port->tableAddr, state ? FC_SendBreak : FC_StopBreak, | ||
| 386 | Magic_code); | ||
| 387 | } | ||
| 249 | 388 | ||
| 250 | static const struct tty_operations moxa_ops = { | 389 | static const struct tty_operations moxa_ops = { |
| 251 | .open = moxa_open, | 390 | .open = moxa_open, |
| @@ -254,8 +393,6 @@ static const struct tty_operations moxa_ops = { | |||
| 254 | .write_room = moxa_write_room, | 393 | .write_room = moxa_write_room, |
| 255 | .flush_buffer = moxa_flush_buffer, | 394 | .flush_buffer = moxa_flush_buffer, |
| 256 | .chars_in_buffer = moxa_chars_in_buffer, | 395 | .chars_in_buffer = moxa_chars_in_buffer, |
| 257 | .flush_chars = moxa_flush_chars, | ||
| 258 | .put_char = moxa_put_char, | ||
| 259 | .ioctl = moxa_ioctl, | 396 | .ioctl = moxa_ioctl, |
| 260 | .throttle = moxa_throttle, | 397 | .throttle = moxa_throttle, |
| 261 | .unthrottle = moxa_unthrottle, | 398 | .unthrottle = moxa_unthrottle, |
| @@ -263,15 +400,509 @@ static const struct tty_operations moxa_ops = { | |||
| 263 | .stop = moxa_stop, | 400 | .stop = moxa_stop, |
| 264 | .start = moxa_start, | 401 | .start = moxa_start, |
| 265 | .hangup = moxa_hangup, | 402 | .hangup = moxa_hangup, |
| 403 | .break_ctl = moxa_break_ctl, | ||
| 266 | .tiocmget = moxa_tiocmget, | 404 | .tiocmget = moxa_tiocmget, |
| 267 | .tiocmset = moxa_tiocmset, | 405 | .tiocmset = moxa_tiocmset, |
| 268 | }; | 406 | }; |
| 269 | 407 | ||
| 270 | static struct tty_driver *moxaDriver; | 408 | static struct tty_driver *moxaDriver; |
| 271 | static struct moxa_port moxa_ports[MAX_PORTS]; | ||
| 272 | static DEFINE_TIMER(moxaTimer, moxa_poll, 0, 0); | 409 | static DEFINE_TIMER(moxaTimer, moxa_poll, 0, 0); |
| 273 | static DEFINE_SPINLOCK(moxa_lock); | 410 | static DEFINE_SPINLOCK(moxa_lock); |
| 274 | 411 | ||
| 412 | /* | ||
| 413 | * HW init | ||
| 414 | */ | ||
| 415 | |||
| 416 | static int moxa_check_fw_model(struct moxa_board_conf *brd, u8 model) | ||
| 417 | { | ||
| 418 | switch (brd->boardType) { | ||
| 419 | case MOXA_BOARD_C218_ISA: | ||
| 420 | case MOXA_BOARD_C218_PCI: | ||
| 421 | if (model != 1) | ||
| 422 | goto err; | ||
| 423 | break; | ||
| 424 | case MOXA_BOARD_CP204J: | ||
| 425 | if (model != 3) | ||
| 426 | goto err; | ||
| 427 | break; | ||
| 428 | default: | ||
| 429 | if (model != 2) | ||
| 430 | goto err; | ||
| 431 | break; | ||
| 432 | } | ||
| 433 | return 0; | ||
| 434 | err: | ||
| 435 | return -EINVAL; | ||
| 436 | } | ||
| 437 | |||
| 438 | static int moxa_check_fw(const void *ptr) | ||
| 439 | { | ||
| 440 | const __le16 *lptr = ptr; | ||
| 441 | |||
| 442 | if (*lptr != cpu_to_le16(0x7980)) | ||
| 443 | return -EINVAL; | ||
| 444 | |||
| 445 | return 0; | ||
| 446 | } | ||
| 447 | |||
| 448 | static int moxa_load_bios(struct moxa_board_conf *brd, const u8 *buf, | ||
| 449 | size_t len) | ||
| 450 | { | ||
| 451 | void __iomem *baseAddr = brd->basemem; | ||
| 452 | u16 tmp; | ||
| 453 | |||
| 454 | writeb(HW_reset, baseAddr + Control_reg); /* reset */ | ||
| 455 | msleep(10); | ||
| 456 | memset_io(baseAddr, 0, 4096); | ||
| 457 | memcpy_toio(baseAddr, buf, len); /* download BIOS */ | ||
| 458 | writeb(0, baseAddr + Control_reg); /* restart */ | ||
| 459 | |||
| 460 | msleep(2000); | ||
| 461 | |||
| 462 | switch (brd->boardType) { | ||
| 463 | case MOXA_BOARD_C218_ISA: | ||
| 464 | case MOXA_BOARD_C218_PCI: | ||
| 465 | tmp = readw(baseAddr + C218_key); | ||
| 466 | if (tmp != C218_KeyCode) | ||
| 467 | goto err; | ||
| 468 | break; | ||
| 469 | case MOXA_BOARD_CP204J: | ||
| 470 | tmp = readw(baseAddr + C218_key); | ||
| 471 | if (tmp != CP204J_KeyCode) | ||
| 472 | goto err; | ||
| 473 | break; | ||
| 474 | default: | ||
| 475 | tmp = readw(baseAddr + C320_key); | ||
| 476 | if (tmp != C320_KeyCode) | ||
| 477 | goto err; | ||
| 478 | tmp = readw(baseAddr + C320_status); | ||
| 479 | if (tmp != STS_init) { | ||
| 480 | printk(KERN_ERR "MOXA: bios upload failed -- CPU/Basic " | ||
| 481 | "module not found\n"); | ||
| 482 | return -EIO; | ||
| 483 | } | ||
| 484 | break; | ||
| 485 | } | ||
| 486 | |||
| 487 | return 0; | ||
| 488 | err: | ||
| 489 | printk(KERN_ERR "MOXA: bios upload failed -- board not found\n"); | ||
| 490 | return -EIO; | ||
| 491 | } | ||
| 492 | |||
| 493 | static int moxa_load_320b(struct moxa_board_conf *brd, const u8 *ptr, | ||
| 494 | size_t len) | ||
| 495 | { | ||
| 496 | void __iomem *baseAddr = brd->basemem; | ||
| 497 | |||
| 498 | if (len < 7168) { | ||
| 499 | printk(KERN_ERR "MOXA: invalid 320 bios -- too short\n"); | ||
| 500 | return -EINVAL; | ||
| 501 | } | ||
| 502 | |||
| 503 | writew(len - 7168 - 2, baseAddr + C320bapi_len); | ||
| 504 | writeb(1, baseAddr + Control_reg); /* Select Page 1 */ | ||
| 505 | memcpy_toio(baseAddr + DynPage_addr, ptr, 7168); | ||
| 506 | writeb(2, baseAddr + Control_reg); /* Select Page 2 */ | ||
| 507 | memcpy_toio(baseAddr + DynPage_addr, ptr + 7168, len - 7168); | ||
| 508 | |||
| 509 | return 0; | ||
| 510 | } | ||
| 511 | |||
| 512 | static int moxa_real_load_code(struct moxa_board_conf *brd, const void *ptr, | ||
| 513 | size_t len) | ||
| 514 | { | ||
| 515 | void __iomem *baseAddr = brd->basemem; | ||
| 516 | const u16 *uptr = ptr; | ||
| 517 | size_t wlen, len2, j; | ||
| 518 | unsigned long key, loadbuf, loadlen, checksum, checksum_ok; | ||
| 519 | unsigned int i, retry; | ||
| 520 | u16 usum, keycode; | ||
| 521 | |||
| 522 | keycode = (brd->boardType == MOXA_BOARD_CP204J) ? CP204J_KeyCode : | ||
| 523 | C218_KeyCode; | ||
| 524 | |||
| 525 | switch (brd->boardType) { | ||
| 526 | case MOXA_BOARD_CP204J: | ||
| 527 | case MOXA_BOARD_C218_ISA: | ||
| 528 | case MOXA_BOARD_C218_PCI: | ||
| 529 | key = C218_key; | ||
| 530 | loadbuf = C218_LoadBuf; | ||
| 531 | loadlen = C218DLoad_len; | ||
| 532 | checksum = C218check_sum; | ||
| 533 | checksum_ok = C218chksum_ok; | ||
| 534 | break; | ||
| 535 | default: | ||
| 536 | key = C320_key; | ||
| 537 | keycode = C320_KeyCode; | ||
| 538 | loadbuf = C320_LoadBuf; | ||
| 539 | loadlen = C320DLoad_len; | ||
| 540 | checksum = C320check_sum; | ||
| 541 | checksum_ok = C320chksum_ok; | ||
| 542 | break; | ||
| 543 | } | ||
| 544 | |||
| 545 | usum = 0; | ||
| 546 | wlen = len >> 1; | ||
| 547 | for (i = 0; i < wlen; i++) | ||
| 548 | usum += le16_to_cpu(uptr[i]); | ||
| 549 | retry = 0; | ||
| 550 | do { | ||
| 551 | wlen = len >> 1; | ||
| 552 | j = 0; | ||
| 553 | while (wlen) { | ||
| 554 | len2 = (wlen > 2048) ? 2048 : wlen; | ||
| 555 | wlen -= len2; | ||
| 556 | memcpy_toio(baseAddr + loadbuf, ptr + j, len2 << 1); | ||
| 557 | j += len2 << 1; | ||
| 558 | |||
| 559 | writew(len2, baseAddr + loadlen); | ||
| 560 | writew(0, baseAddr + key); | ||
| 561 | for (i = 0; i < 100; i++) { | ||
| 562 | if (readw(baseAddr + key) == keycode) | ||
| 563 | break; | ||
| 564 | msleep(10); | ||
| 565 | } | ||
| 566 | if (readw(baseAddr + key) != keycode) | ||
| 567 | return -EIO; | ||
| 568 | } | ||
| 569 | writew(0, baseAddr + loadlen); | ||
| 570 | writew(usum, baseAddr + checksum); | ||
| 571 | writew(0, baseAddr + key); | ||
| 572 | for (i = 0; i < 100; i++) { | ||
| 573 | if (readw(baseAddr + key) == keycode) | ||
| 574 | break; | ||
| 575 | msleep(10); | ||
| 576 | } | ||
| 577 | retry++; | ||
| 578 | } while ((readb(baseAddr + checksum_ok) != 1) && (retry < 3)); | ||
| 579 | if (readb(baseAddr + checksum_ok) != 1) | ||
| 580 | return -EIO; | ||
| 581 | |||
| 582 | writew(0, baseAddr + key); | ||
| 583 | for (i = 0; i < 600; i++) { | ||
| 584 | if (readw(baseAddr + Magic_no) == Magic_code) | ||
| 585 | break; | ||
| 586 | msleep(10); | ||
| 587 | } | ||
| 588 | if (readw(baseAddr + Magic_no) != Magic_code) | ||
| 589 | return -EIO; | ||
| 590 | |||
| 591 | if (MOXA_IS_320(brd)) { | ||
| 592 | if (brd->busType == MOXA_BUS_TYPE_PCI) { /* ASIC board */ | ||
| 593 | writew(0x3800, baseAddr + TMS320_PORT1); | ||
| 594 | writew(0x3900, baseAddr + TMS320_PORT2); | ||
| 595 | writew(28499, baseAddr + TMS320_CLOCK); | ||
| 596 | } else { | ||
| 597 | writew(0x3200, baseAddr + TMS320_PORT1); | ||
| 598 | writew(0x3400, baseAddr + TMS320_PORT2); | ||
| 599 | writew(19999, baseAddr + TMS320_CLOCK); | ||
| 600 | } | ||
| 601 | } | ||
| 602 | writew(1, baseAddr + Disable_IRQ); | ||
| 603 | writew(0, baseAddr + Magic_no); | ||
| 604 | for (i = 0; i < 500; i++) { | ||
| 605 | if (readw(baseAddr + Magic_no) == Magic_code) | ||
| 606 | break; | ||
| 607 | msleep(10); | ||
| 608 | } | ||
| 609 | if (readw(baseAddr + Magic_no) != Magic_code) | ||
| 610 | return -EIO; | ||
| 611 | |||
| 612 | if (MOXA_IS_320(brd)) { | ||
| 613 | j = readw(baseAddr + Module_cnt); | ||
| 614 | if (j <= 0) | ||
| 615 | return -EIO; | ||
| 616 | brd->numPorts = j * 8; | ||
| 617 | writew(j, baseAddr + Module_no); | ||
| 618 | writew(0, baseAddr + Magic_no); | ||
| 619 | for (i = 0; i < 600; i++) { | ||
| 620 | if (readw(baseAddr + Magic_no) == Magic_code) | ||
| 621 | break; | ||
| 622 | msleep(10); | ||
| 623 | } | ||
| 624 | if (readw(baseAddr + Magic_no) != Magic_code) | ||
| 625 | return -EIO; | ||
| 626 | } | ||
| 627 | brd->intNdx = baseAddr + IRQindex; | ||
| 628 | brd->intPend = baseAddr + IRQpending; | ||
| 629 | brd->intTable = baseAddr + IRQtable; | ||
| 630 | |||
| 631 | return 0; | ||
| 632 | } | ||
| 633 | |||
| 634 | static int moxa_load_code(struct moxa_board_conf *brd, const void *ptr, | ||
| 635 | size_t len) | ||
| 636 | { | ||
| 637 | void __iomem *ofsAddr, *baseAddr = brd->basemem; | ||
| 638 | struct moxa_port *port; | ||
| 639 | int retval, i; | ||
| 640 | |||
| 641 | if (len % 2) { | ||
| 642 | printk(KERN_ERR "MOXA: bios length is not even\n"); | ||
| 643 | return -EINVAL; | ||
| 644 | } | ||
| 645 | |||
| 646 | retval = moxa_real_load_code(brd, ptr, len); /* may change numPorts */ | ||
| 647 | if (retval) | ||
| 648 | return retval; | ||
| 649 | |||
| 650 | switch (brd->boardType) { | ||
| 651 | case MOXA_BOARD_C218_ISA: | ||
| 652 | case MOXA_BOARD_C218_PCI: | ||
| 653 | case MOXA_BOARD_CP204J: | ||
| 654 | port = brd->ports; | ||
| 655 | for (i = 0; i < brd->numPorts; i++, port++) { | ||
| 656 | port->board = brd; | ||
| 657 | port->DCDState = 0; | ||
| 658 | port->tableAddr = baseAddr + Extern_table + | ||
| 659 | Extern_size * i; | ||
| 660 | ofsAddr = port->tableAddr; | ||
| 661 | writew(C218rx_mask, ofsAddr + RX_mask); | ||
| 662 | writew(C218tx_mask, ofsAddr + TX_mask); | ||
| 663 | writew(C218rx_spage + i * C218buf_pageno, ofsAddr + Page_rxb); | ||
| 664 | writew(readw(ofsAddr + Page_rxb) + C218rx_pageno, ofsAddr + EndPage_rxb); | ||
| 665 | |||
| 666 | writew(C218tx_spage + i * C218buf_pageno, ofsAddr + Page_txb); | ||
| 667 | writew(readw(ofsAddr + Page_txb) + C218tx_pageno, ofsAddr + EndPage_txb); | ||
| 668 | |||
| 669 | } | ||
| 670 | break; | ||
| 671 | default: | ||
| 672 | port = brd->ports; | ||
| 673 | for (i = 0; i < brd->numPorts; i++, port++) { | ||
| 674 | port->board = brd; | ||
| 675 | port->DCDState = 0; | ||
| 676 | port->tableAddr = baseAddr + Extern_table + | ||
| 677 | Extern_size * i; | ||
| 678 | ofsAddr = port->tableAddr; | ||
| 679 | switch (brd->numPorts) { | ||
| 680 | case 8: | ||
| 681 | writew(C320p8rx_mask, ofsAddr + RX_mask); | ||
| 682 | writew(C320p8tx_mask, ofsAddr + TX_mask); | ||
| 683 | writew(C320p8rx_spage + i * C320p8buf_pgno, ofsAddr + Page_rxb); | ||
| 684 | writew(readw(ofsAddr + Page_rxb) + C320p8rx_pgno, ofsAddr + EndPage_rxb); | ||
| 685 | writew(C320p8tx_spage + i * C320p8buf_pgno, ofsAddr + Page_txb); | ||
| 686 | writew(readw(ofsAddr + Page_txb) + C320p8tx_pgno, ofsAddr + EndPage_txb); | ||
| 687 | |||
| 688 | break; | ||
| 689 | case 16: | ||
| 690 | writew(C320p16rx_mask, ofsAddr + RX_mask); | ||
| 691 | writew(C320p16tx_mask, ofsAddr + TX_mask); | ||
| 692 | writew(C320p16rx_spage + i * C320p16buf_pgno, ofsAddr + Page_rxb); | ||
| 693 | writew(readw(ofsAddr + Page_rxb) + C320p16rx_pgno, ofsAddr + EndPage_rxb); | ||
| 694 | writew(C320p16tx_spage + i * C320p16buf_pgno, ofsAddr + Page_txb); | ||
| 695 | writew(readw(ofsAddr + Page_txb) + C320p16tx_pgno, ofsAddr + EndPage_txb); | ||
| 696 | break; | ||
| 697 | |||
| 698 | case 24: | ||
| 699 | writew(C320p24rx_mask, ofsAddr + RX_mask); | ||
| 700 | writew(C320p24tx_mask, ofsAddr + TX_mask); | ||
| 701 | writew(C320p24rx_spage + i * C320p24buf_pgno, ofsAddr + Page_rxb); | ||
| 702 | writew(readw(ofsAddr + Page_rxb) + C320p24rx_pgno, ofsAddr + EndPage_rxb); | ||
| 703 | writew(C320p24tx_spage + i * C320p24buf_pgno, ofsAddr + Page_txb); | ||
| 704 | writew(readw(ofsAddr + Page_txb), ofsAddr + EndPage_txb); | ||
| 705 | break; | ||
| 706 | case 32: | ||
| 707 | writew(C320p32rx_mask, ofsAddr + RX_mask); | ||
| 708 | writew(C320p32tx_mask, ofsAddr + TX_mask); | ||
| 709 | writew(C320p32tx_ofs, ofsAddr + Ofs_txb); | ||
| 710 | writew(C320p32rx_spage + i * C320p32buf_pgno, ofsAddr + Page_rxb); | ||
| 711 | writew(readb(ofsAddr + Page_rxb), ofsAddr + EndPage_rxb); | ||
| 712 | writew(C320p32tx_spage + i * C320p32buf_pgno, ofsAddr + Page_txb); | ||
| 713 | writew(readw(ofsAddr + Page_txb), ofsAddr + EndPage_txb); | ||
| 714 | break; | ||
| 715 | } | ||
| 716 | } | ||
| 717 | break; | ||
| 718 | } | ||
| 719 | return 0; | ||
| 720 | } | ||
| 721 | |||
| 722 | static int moxa_load_fw(struct moxa_board_conf *brd, const struct firmware *fw) | ||
| 723 | { | ||
| 724 | void *ptr = fw->data; | ||
| 725 | char rsn[64]; | ||
| 726 | u16 lens[5]; | ||
| 727 | size_t len; | ||
| 728 | unsigned int a, lenp, lencnt; | ||
| 729 | int ret = -EINVAL; | ||
| 730 | struct { | ||
| 731 | __le32 magic; /* 0x34303430 */ | ||
| 732 | u8 reserved1[2]; | ||
| 733 | u8 type; /* UNIX = 3 */ | ||
| 734 | u8 model; /* C218T=1, C320T=2, CP204=3 */ | ||
| 735 | u8 reserved2[8]; | ||
| 736 | __le16 len[5]; | ||
| 737 | } *hdr = ptr; | ||
| 738 | |||
| 739 | BUILD_BUG_ON(ARRAY_SIZE(hdr->len) != ARRAY_SIZE(lens)); | ||
| 740 | |||
| 741 | if (fw->size < MOXA_FW_HDRLEN) { | ||
| 742 | strcpy(rsn, "too short (even header won't fit)"); | ||
| 743 | goto err; | ||
| 744 | } | ||
| 745 | if (hdr->magic != cpu_to_le32(0x30343034)) { | ||
| 746 | sprintf(rsn, "bad magic: %.8x", le32_to_cpu(hdr->magic)); | ||
| 747 | goto err; | ||
| 748 | } | ||
| 749 | if (hdr->type != 3) { | ||
| 750 | sprintf(rsn, "not for linux, type is %u", hdr->type); | ||
| 751 | goto err; | ||
| 752 | } | ||
| 753 | if (moxa_check_fw_model(brd, hdr->model)) { | ||
| 754 | sprintf(rsn, "not for this card, model is %u", hdr->model); | ||
| 755 | goto err; | ||
| 756 | } | ||
| 757 | |||
| 758 | len = MOXA_FW_HDRLEN; | ||
| 759 | lencnt = hdr->model == 2 ? 5 : 3; | ||
| 760 | for (a = 0; a < ARRAY_SIZE(lens); a++) { | ||
| 761 | lens[a] = le16_to_cpu(hdr->len[a]); | ||
| 762 | if (lens[a] && len + lens[a] <= fw->size && | ||
| 763 | moxa_check_fw(&fw->data[len])) | ||
| 764 | printk(KERN_WARNING "MOXA firmware: unexpected input " | ||
| 765 | "at offset %u, but going on\n", (u32)len); | ||
| 766 | if (!lens[a] && a < lencnt) { | ||
| 767 | sprintf(rsn, "too few entries in fw file"); | ||
| 768 | goto err; | ||
| 769 | } | ||
| 770 | len += lens[a]; | ||
| 771 | } | ||
| 772 | |||
| 773 | if (len != fw->size) { | ||
| 774 | sprintf(rsn, "bad length: %u (should be %u)", (u32)fw->size, | ||
| 775 | (u32)len); | ||
| 776 | goto err; | ||
| 777 | } | ||
| 778 | |||
| 779 | ptr += MOXA_FW_HDRLEN; | ||
| 780 | lenp = 0; /* bios */ | ||
| 781 | |||
| 782 | strcpy(rsn, "read above"); | ||
| 783 | |||
| 784 | ret = moxa_load_bios(brd, ptr, lens[lenp]); | ||
| 785 | if (ret) | ||
| 786 | goto err; | ||
| 787 | |||
| 788 | /* we skip the tty section (lens[1]), since we don't need it */ | ||
| 789 | ptr += lens[lenp] + lens[lenp + 1]; | ||
| 790 | lenp += 2; /* comm */ | ||
| 791 | |||
| 792 | if (hdr->model == 2) { | ||
| 793 | ret = moxa_load_320b(brd, ptr, lens[lenp]); | ||
| 794 | if (ret) | ||
| 795 | goto err; | ||
| 796 | /* skip another tty */ | ||
| 797 | ptr += lens[lenp] + lens[lenp + 1]; | ||
| 798 | lenp += 2; | ||
| 799 | } | ||
| 800 | |||
| 801 | ret = moxa_load_code(brd, ptr, lens[lenp]); | ||
| 802 | if (ret) | ||
| 803 | goto err; | ||
| 804 | |||
| 805 | return 0; | ||
| 806 | err: | ||
| 807 | printk(KERN_ERR "firmware failed to load, reason: %s\n", rsn); | ||
| 808 | return ret; | ||
| 809 | } | ||
| 810 | |||
| 811 | static int moxa_init_board(struct moxa_board_conf *brd, struct device *dev) | ||
| 812 | { | ||
| 813 | const struct firmware *fw; | ||
| 814 | const char *file; | ||
| 815 | struct moxa_port *p; | ||
| 816 | unsigned int i; | ||
| 817 | int ret; | ||
| 818 | |||
| 819 | brd->ports = kcalloc(MAX_PORTS_PER_BOARD, sizeof(*brd->ports), | ||
| 820 | GFP_KERNEL); | ||
| 821 | if (brd->ports == NULL) { | ||
| 822 | printk(KERN_ERR "cannot allocate memory for ports\n"); | ||
| 823 | ret = -ENOMEM; | ||
| 824 | goto err; | ||
| 825 | } | ||
| 826 | |||
| 827 | for (i = 0, p = brd->ports; i < MAX_PORTS_PER_BOARD; i++, p++) { | ||
| 828 | p->type = PORT_16550A; | ||
| 829 | p->close_delay = 5 * HZ / 10; | ||
| 830 | p->cflag = B9600 | CS8 | CREAD | CLOCAL | HUPCL; | ||
| 831 | init_waitqueue_head(&p->open_wait); | ||
| 832 | } | ||
| 833 | |||
| 834 | switch (brd->boardType) { | ||
| 835 | case MOXA_BOARD_C218_ISA: | ||
| 836 | case MOXA_BOARD_C218_PCI: | ||
| 837 | file = "c218tunx.cod"; | ||
| 838 | break; | ||
| 839 | case MOXA_BOARD_CP204J: | ||
| 840 | file = "cp204unx.cod"; | ||
| 841 | break; | ||
| 842 | default: | ||
| 843 | file = "c320tunx.cod"; | ||
| 844 | break; | ||
| 845 | } | ||
| 846 | |||
| 847 | ret = request_firmware(&fw, file, dev); | ||
| 848 | if (ret) { | ||
| 849 | printk(KERN_ERR "MOXA: request_firmware failed. Make sure " | ||
| 850 | "you've placed '%s' file into your firmware " | ||
| 851 | "loader directory (e.g. /lib/firmware)\n", | ||
| 852 | file); | ||
| 853 | goto err_free; | ||
| 854 | } | ||
| 855 | |||
| 856 | ret = moxa_load_fw(brd, fw); | ||
| 857 | |||
| 858 | release_firmware(fw); | ||
| 859 | |||
| 860 | if (ret) | ||
| 861 | goto err_free; | ||
| 862 | |||
| 863 | spin_lock_bh(&moxa_lock); | ||
| 864 | brd->ready = 1; | ||
| 865 | if (!timer_pending(&moxaTimer)) | ||
| 866 | mod_timer(&moxaTimer, jiffies + HZ / 50); | ||
| 867 | spin_unlock_bh(&moxa_lock); | ||
| 868 | |||
| 869 | return 0; | ||
| 870 | err_free: | ||
| 871 | kfree(brd->ports); | ||
| 872 | err: | ||
| 873 | return ret; | ||
| 874 | } | ||
| 875 | |||
| 876 | static void moxa_board_deinit(struct moxa_board_conf *brd) | ||
| 877 | { | ||
| 878 | unsigned int a, opened; | ||
| 879 | |||
| 880 | mutex_lock(&moxa_openlock); | ||
| 881 | spin_lock_bh(&moxa_lock); | ||
| 882 | brd->ready = 0; | ||
| 883 | spin_unlock_bh(&moxa_lock); | ||
| 884 | |||
| 885 | /* pci hot-un-plug support */ | ||
| 886 | for (a = 0; a < brd->numPorts; a++) | ||
| 887 | if (brd->ports[a].asyncflags & ASYNC_INITIALIZED) | ||
| 888 | tty_hangup(brd->ports[a].tty); | ||
| 889 | while (1) { | ||
| 890 | opened = 0; | ||
| 891 | for (a = 0; a < brd->numPorts; a++) | ||
| 892 | if (brd->ports[a].asyncflags & ASYNC_INITIALIZED) | ||
| 893 | opened++; | ||
| 894 | mutex_unlock(&moxa_openlock); | ||
| 895 | if (!opened) | ||
| 896 | break; | ||
| 897 | msleep(50); | ||
| 898 | mutex_lock(&moxa_openlock); | ||
| 899 | } | ||
| 900 | |||
| 901 | iounmap(brd->basemem); | ||
| 902 | brd->basemem = NULL; | ||
| 903 | kfree(brd->ports); | ||
| 904 | } | ||
| 905 | |||
| 275 | #ifdef CONFIG_PCI | 906 | #ifdef CONFIG_PCI |
| 276 | static int __devinit moxa_pci_probe(struct pci_dev *pdev, | 907 | static int __devinit moxa_pci_probe(struct pci_dev *pdev, |
| 277 | const struct pci_device_id *ent) | 908 | const struct pci_device_id *ent) |
| @@ -299,10 +930,17 @@ static int __devinit moxa_pci_probe(struct pci_dev *pdev, | |||
| 299 | } | 930 | } |
| 300 | 931 | ||
| 301 | board = &moxa_boards[i]; | 932 | board = &moxa_boards[i]; |
| 302 | board->basemem = pci_iomap(pdev, 2, 0x4000); | 933 | |
| 934 | retval = pci_request_region(pdev, 2, "moxa-base"); | ||
| 935 | if (retval) { | ||
| 936 | dev_err(&pdev->dev, "can't request pci region 2\n"); | ||
| 937 | goto err; | ||
| 938 | } | ||
| 939 | |||
| 940 | board->basemem = ioremap_nocache(pci_resource_start(pdev, 2), 0x4000); | ||
| 303 | if (board->basemem == NULL) { | 941 | if (board->basemem == NULL) { |
| 304 | dev_err(&pdev->dev, "can't remap io space 2\n"); | 942 | dev_err(&pdev->dev, "can't remap io space 2\n"); |
| 305 | goto err; | 943 | goto err_reg; |
| 306 | } | 944 | } |
| 307 | 945 | ||
| 308 | board->boardType = board_type; | 946 | board->boardType = board_type; |
| @@ -321,9 +959,21 @@ static int __devinit moxa_pci_probe(struct pci_dev *pdev, | |||
| 321 | } | 959 | } |
| 322 | board->busType = MOXA_BUS_TYPE_PCI; | 960 | board->busType = MOXA_BUS_TYPE_PCI; |
| 323 | 961 | ||
| 962 | retval = moxa_init_board(board, &pdev->dev); | ||
| 963 | if (retval) | ||
| 964 | goto err_base; | ||
| 965 | |||
| 324 | pci_set_drvdata(pdev, board); | 966 | pci_set_drvdata(pdev, board); |
| 325 | 967 | ||
| 326 | return (0); | 968 | dev_info(&pdev->dev, "board '%s' ready (%u ports, firmware loaded)\n", |
| 969 | moxa_brdname[board_type - 1], board->numPorts); | ||
| 970 | |||
| 971 | return 0; | ||
| 972 | err_base: | ||
| 973 | iounmap(board->basemem); | ||
| 974 | board->basemem = NULL; | ||
| 975 | err_reg: | ||
| 976 | pci_release_region(pdev, 2); | ||
| 327 | err: | 977 | err: |
| 328 | return retval; | 978 | return retval; |
| 329 | } | 979 | } |
| @@ -332,8 +982,9 @@ static void __devexit moxa_pci_remove(struct pci_dev *pdev) | |||
| 332 | { | 982 | { |
| 333 | struct moxa_board_conf *brd = pci_get_drvdata(pdev); | 983 | struct moxa_board_conf *brd = pci_get_drvdata(pdev); |
| 334 | 984 | ||
| 335 | pci_iounmap(pdev, brd->basemem); | 985 | moxa_board_deinit(brd); |
| 336 | brd->basemem = NULL; | 986 | |
| 987 | pci_release_region(pdev, 2); | ||
| 337 | } | 988 | } |
| 338 | 989 | ||
| 339 | static struct pci_driver moxa_pci_driver = { | 990 | static struct pci_driver moxa_pci_driver = { |
| @@ -346,8 +997,8 @@ static struct pci_driver moxa_pci_driver = { | |||
| 346 | 997 | ||
| 347 | static int __init moxa_init(void) | 998 | static int __init moxa_init(void) |
| 348 | { | 999 | { |
| 349 | int i, numBoards, retval = 0; | 1000 | unsigned int isabrds = 0; |
| 350 | struct moxa_port *ch; | 1001 | int retval = 0; |
| 351 | 1002 | ||
| 352 | printk(KERN_INFO "MOXA Intellio family driver version %s\n", | 1003 | printk(KERN_INFO "MOXA Intellio family driver version %s\n", |
| 353 | MOXA_VERSION); | 1004 | MOXA_VERSION); |
| @@ -368,154 +1019,176 @@ static int __init moxa_init(void) | |||
| 368 | moxaDriver->flags = TTY_DRIVER_REAL_RAW; | 1019 | moxaDriver->flags = TTY_DRIVER_REAL_RAW; |
| 369 | tty_set_operations(moxaDriver, &moxa_ops); | 1020 | tty_set_operations(moxaDriver, &moxa_ops); |
| 370 | 1021 | ||
| 371 | for (i = 0, ch = moxa_ports; i < MAX_PORTS; i++, ch++) { | ||
| 372 | ch->type = PORT_16550A; | ||
| 373 | ch->port = i; | ||
| 374 | ch->close_delay = 5 * HZ / 10; | ||
| 375 | ch->closing_wait = 30 * HZ; | ||
| 376 | ch->cflag = B9600 | CS8 | CREAD | CLOCAL | HUPCL; | ||
| 377 | init_waitqueue_head(&ch->open_wait); | ||
| 378 | init_completion(&ch->close_wait); | ||
| 379 | |||
| 380 | setup_timer(&ch->emptyTimer, moxa_check_xmit_empty, | ||
| 381 | (unsigned long)ch); | ||
| 382 | } | ||
| 383 | |||
| 384 | pr_debug("Moxa tty devices major number = %d\n", ttymajor); | ||
| 385 | |||
| 386 | if (tty_register_driver(moxaDriver)) { | 1022 | if (tty_register_driver(moxaDriver)) { |
| 387 | printk(KERN_ERR "Couldn't install MOXA Smartio family driver !\n"); | 1023 | printk(KERN_ERR "can't register MOXA Smartio tty driver!\n"); |
| 388 | put_tty_driver(moxaDriver); | 1024 | put_tty_driver(moxaDriver); |
| 389 | return -1; | 1025 | return -1; |
| 390 | } | 1026 | } |
| 391 | 1027 | ||
| 392 | mod_timer(&moxaTimer, jiffies + HZ / 50); | 1028 | /* Find the boards defined from module args. */ |
| 393 | |||
| 394 | /* Find the boards defined in source code */ | ||
| 395 | numBoards = 0; | ||
| 396 | for (i = 0; i < MAX_BOARDS; i++) { | ||
| 397 | if ((moxa_isa_boards[i].boardType == MOXA_BOARD_C218_ISA) || | ||
| 398 | (moxa_isa_boards[i].boardType == MOXA_BOARD_C320_ISA)) { | ||
| 399 | moxa_boards[numBoards].boardType = moxa_isa_boards[i].boardType; | ||
| 400 | if (moxa_isa_boards[i].boardType == MOXA_BOARD_C218_ISA) | ||
| 401 | moxa_boards[numBoards].numPorts = 8; | ||
| 402 | else | ||
| 403 | moxa_boards[numBoards].numPorts = moxa_isa_boards[i].numPorts; | ||
| 404 | moxa_boards[numBoards].busType = MOXA_BUS_TYPE_ISA; | ||
| 405 | moxa_boards[numBoards].baseAddr = moxa_isa_boards[i].baseAddr; | ||
| 406 | pr_debug("Moxa board %2d: %s board(baseAddr=%lx)\n", | ||
| 407 | numBoards + 1, | ||
| 408 | moxa_brdname[moxa_boards[numBoards].boardType-1], | ||
| 409 | moxa_boards[numBoards].baseAddr); | ||
| 410 | numBoards++; | ||
| 411 | } | ||
| 412 | } | ||
| 413 | /* Find the boards defined form module args. */ | ||
| 414 | #ifdef MODULE | 1029 | #ifdef MODULE |
| 1030 | { | ||
| 1031 | struct moxa_board_conf *brd = moxa_boards; | ||
| 1032 | unsigned int i; | ||
| 415 | for (i = 0; i < MAX_BOARDS; i++) { | 1033 | for (i = 0; i < MAX_BOARDS; i++) { |
| 416 | if ((type[i] == MOXA_BOARD_C218_ISA) || | 1034 | if (!baseaddr[i]) |
| 417 | (type[i] == MOXA_BOARD_C320_ISA)) { | 1035 | break; |
| 1036 | if (type[i] == MOXA_BOARD_C218_ISA || | ||
| 1037 | type[i] == MOXA_BOARD_C320_ISA) { | ||
| 418 | pr_debug("Moxa board %2d: %s board(baseAddr=%lx)\n", | 1038 | pr_debug("Moxa board %2d: %s board(baseAddr=%lx)\n", |
| 419 | numBoards + 1, moxa_brdname[type[i] - 1], | 1039 | isabrds + 1, moxa_brdname[type[i] - 1], |
| 420 | (unsigned long)baseaddr[i]); | 1040 | baseaddr[i]); |
| 421 | if (numBoards >= MAX_BOARDS) { | 1041 | brd->boardType = type[i]; |
| 422 | printk(KERN_WARNING "More than %d MOXA " | 1042 | brd->numPorts = type[i] == MOXA_BOARD_C218_ISA ? 8 : |
| 423 | "Intellio family boards found. Board " | 1043 | numports[i]; |
| 424 | "is ignored.\n", MAX_BOARDS); | 1044 | brd->busType = MOXA_BUS_TYPE_ISA; |
| 1045 | brd->basemem = ioremap_nocache(baseaddr[i], 0x4000); | ||
| 1046 | if (!brd->basemem) { | ||
| 1047 | printk(KERN_ERR "MOXA: can't remap %lx\n", | ||
| 1048 | baseaddr[i]); | ||
| 425 | continue; | 1049 | continue; |
| 426 | } | 1050 | } |
| 427 | moxa_boards[numBoards].boardType = type[i]; | 1051 | if (moxa_init_board(brd, NULL)) { |
| 428 | if (moxa_isa_boards[i].boardType == MOXA_BOARD_C218_ISA) | 1052 | iounmap(brd->basemem); |
| 429 | moxa_boards[numBoards].numPorts = 8; | 1053 | brd->basemem = NULL; |
| 430 | else | 1054 | continue; |
| 431 | moxa_boards[numBoards].numPorts = numports[i]; | 1055 | } |
| 432 | moxa_boards[numBoards].busType = MOXA_BUS_TYPE_ISA; | 1056 | |
| 433 | moxa_boards[numBoards].baseAddr = baseaddr[i]; | 1057 | printk(KERN_INFO "MOXA isa board found at 0x%.8lu and " |
| 434 | numBoards++; | 1058 | "ready (%u ports, firmware loaded)\n", |
| 1059 | baseaddr[i], brd->numPorts); | ||
| 1060 | |||
| 1061 | brd++; | ||
| 1062 | isabrds++; | ||
| 435 | } | 1063 | } |
| 436 | } | 1064 | } |
| 1065 | } | ||
| 437 | #endif | 1066 | #endif |
| 438 | 1067 | ||
| 439 | #ifdef CONFIG_PCI | 1068 | #ifdef CONFIG_PCI |
| 440 | retval = pci_register_driver(&moxa_pci_driver); | 1069 | retval = pci_register_driver(&moxa_pci_driver); |
| 441 | if (retval) { | 1070 | if (retval) { |
| 442 | printk(KERN_ERR "Can't register moxa pci driver!\n"); | 1071 | printk(KERN_ERR "Can't register MOXA pci driver!\n"); |
| 443 | if (numBoards) | 1072 | if (isabrds) |
| 444 | retval = 0; | 1073 | retval = 0; |
| 445 | } | 1074 | } |
| 446 | #endif | 1075 | #endif |
| 447 | 1076 | ||
| 448 | for (i = 0; i < numBoards; i++) { | ||
| 449 | moxa_boards[i].basemem = ioremap(moxa_boards[i].baseAddr, | ||
| 450 | 0x4000); | ||
| 451 | } | ||
| 452 | |||
| 453 | return retval; | 1077 | return retval; |
| 454 | } | 1078 | } |
| 455 | 1079 | ||
| 456 | static void __exit moxa_exit(void) | 1080 | static void __exit moxa_exit(void) |
| 457 | { | 1081 | { |
| 458 | int i; | 1082 | unsigned int i; |
| 459 | 1083 | ||
| 460 | del_timer_sync(&moxaTimer); | 1084 | #ifdef CONFIG_PCI |
| 1085 | pci_unregister_driver(&moxa_pci_driver); | ||
| 1086 | #endif | ||
| 1087 | |||
| 1088 | for (i = 0; i < MAX_BOARDS; i++) /* ISA boards */ | ||
| 1089 | if (moxa_boards[i].ready) | ||
| 1090 | moxa_board_deinit(&moxa_boards[i]); | ||
| 461 | 1091 | ||
| 462 | for (i = 0; i < MAX_PORTS; i++) | 1092 | del_timer_sync(&moxaTimer); |
| 463 | del_timer_sync(&moxa_ports[i].emptyTimer); | ||
| 464 | 1093 | ||
| 465 | if (tty_unregister_driver(moxaDriver)) | 1094 | if (tty_unregister_driver(moxaDriver)) |
| 466 | printk(KERN_ERR "Couldn't unregister MOXA Intellio family " | 1095 | printk(KERN_ERR "Couldn't unregister MOXA Intellio family " |
| 467 | "serial driver\n"); | 1096 | "serial driver\n"); |
| 468 | put_tty_driver(moxaDriver); | 1097 | put_tty_driver(moxaDriver); |
| 469 | |||
| 470 | #ifdef CONFIG_PCI | ||
| 471 | pci_unregister_driver(&moxa_pci_driver); | ||
| 472 | #endif | ||
| 473 | |||
| 474 | for (i = 0; i < MAX_BOARDS; i++) | ||
| 475 | if (moxa_boards[i].basemem) | ||
| 476 | iounmap(moxa_boards[i].basemem); | ||
| 477 | } | 1098 | } |
| 478 | 1099 | ||
| 479 | module_init(moxa_init); | 1100 | module_init(moxa_init); |
| 480 | module_exit(moxa_exit); | 1101 | module_exit(moxa_exit); |
| 481 | 1102 | ||
| 1103 | static void moxa_close_port(struct moxa_port *ch) | ||
| 1104 | { | ||
| 1105 | moxa_shut_down(ch); | ||
| 1106 | MoxaPortFlushData(ch, 2); | ||
| 1107 | ch->asyncflags &= ~ASYNC_NORMAL_ACTIVE; | ||
| 1108 | ch->tty->driver_data = NULL; | ||
| 1109 | ch->tty = NULL; | ||
| 1110 | } | ||
| 1111 | |||
| 1112 | static int moxa_block_till_ready(struct tty_struct *tty, struct file *filp, | ||
| 1113 | struct moxa_port *ch) | ||
| 1114 | { | ||
| 1115 | DEFINE_WAIT(wait); | ||
| 1116 | int retval = 0; | ||
| 1117 | u8 dcd; | ||
| 1118 | |||
| 1119 | while (1) { | ||
| 1120 | prepare_to_wait(&ch->open_wait, &wait, TASK_INTERRUPTIBLE); | ||
| 1121 | if (tty_hung_up_p(filp)) { | ||
| 1122 | #ifdef SERIAL_DO_RESTART | ||
| 1123 | retval = -ERESTARTSYS; | ||
| 1124 | #else | ||
| 1125 | retval = -EAGAIN; | ||
| 1126 | #endif | ||
| 1127 | break; | ||
| 1128 | } | ||
| 1129 | spin_lock_bh(&moxa_lock); | ||
| 1130 | dcd = ch->DCDState; | ||
| 1131 | spin_unlock_bh(&moxa_lock); | ||
| 1132 | if (dcd) | ||
| 1133 | break; | ||
| 1134 | |||
| 1135 | if (signal_pending(current)) { | ||
| 1136 | retval = -ERESTARTSYS; | ||
| 1137 | break; | ||
| 1138 | } | ||
| 1139 | schedule(); | ||
| 1140 | } | ||
| 1141 | finish_wait(&ch->open_wait, &wait); | ||
| 1142 | |||
| 1143 | return retval; | ||
| 1144 | } | ||
| 1145 | |||
| 482 | static int moxa_open(struct tty_struct *tty, struct file *filp) | 1146 | static int moxa_open(struct tty_struct *tty, struct file *filp) |
| 483 | { | 1147 | { |
| 1148 | struct moxa_board_conf *brd; | ||
| 484 | struct moxa_port *ch; | 1149 | struct moxa_port *ch; |
| 485 | int port; | 1150 | int port; |
| 486 | int retval; | 1151 | int retval; |
| 487 | 1152 | ||
| 488 | port = tty->index; | 1153 | port = tty->index; |
| 489 | if (port == MAX_PORTS) { | 1154 | if (port == MAX_PORTS) { |
| 490 | return (0); | 1155 | return capable(CAP_SYS_ADMIN) ? 0 : -EPERM; |
| 491 | } | 1156 | } |
| 492 | if (!MoxaPortIsValid(port)) { | 1157 | if (mutex_lock_interruptible(&moxa_openlock)) |
| 493 | tty->driver_data = NULL; | 1158 | return -ERESTARTSYS; |
| 494 | return (-ENODEV); | 1159 | brd = &moxa_boards[port / MAX_PORTS_PER_BOARD]; |
| 1160 | if (!brd->ready) { | ||
| 1161 | mutex_unlock(&moxa_openlock); | ||
| 1162 | return -ENODEV; | ||
| 495 | } | 1163 | } |
| 496 | 1164 | ||
| 497 | ch = &moxa_ports[port]; | 1165 | ch = &brd->ports[port % MAX_PORTS_PER_BOARD]; |
| 498 | ch->count++; | 1166 | ch->count++; |
| 499 | tty->driver_data = ch; | 1167 | tty->driver_data = ch; |
| 500 | ch->tty = tty; | 1168 | ch->tty = tty; |
| 501 | if (!(ch->asyncflags & ASYNC_INITIALIZED)) { | 1169 | if (!(ch->asyncflags & ASYNC_INITIALIZED)) { |
| 502 | ch->statusflags = 0; | 1170 | ch->statusflags = 0; |
| 503 | moxa_set_tty_param(tty, tty->termios); | 1171 | moxa_set_tty_param(tty, tty->termios); |
| 504 | MoxaPortLineCtrl(ch->port, 1, 1); | 1172 | MoxaPortLineCtrl(ch, 1, 1); |
| 505 | MoxaPortEnable(ch->port); | 1173 | MoxaPortEnable(ch); |
| 1174 | MoxaSetFifo(ch, ch->type == PORT_16550A); | ||
| 506 | ch->asyncflags |= ASYNC_INITIALIZED; | 1175 | ch->asyncflags |= ASYNC_INITIALIZED; |
| 507 | } | 1176 | } |
| 508 | retval = moxa_block_till_ready(tty, filp, ch); | 1177 | mutex_unlock(&moxa_openlock); |
| 509 | 1178 | ||
| 510 | moxa_unthrottle(tty); | 1179 | retval = 0; |
| 511 | 1180 | if (!(filp->f_flags & O_NONBLOCK) && !C_CLOCAL(tty)) | |
| 512 | if (ch->type == PORT_16550A) { | 1181 | retval = moxa_block_till_ready(tty, filp, ch); |
| 513 | MoxaSetFifo(ch->port, 1); | 1182 | mutex_lock(&moxa_openlock); |
| 514 | } else { | 1183 | if (retval) { |
| 515 | MoxaSetFifo(ch->port, 0); | 1184 | if (ch->count) /* 0 means already hung up... */ |
| 516 | } | 1185 | if (--ch->count == 0) |
| 1186 | moxa_close_port(ch); | ||
| 1187 | } else | ||
| 1188 | ch->asyncflags |= ASYNC_NORMAL_ACTIVE; | ||
| 1189 | mutex_unlock(&moxa_openlock); | ||
| 517 | 1190 | ||
| 518 | return (retval); | 1191 | return retval; |
| 519 | } | 1192 | } |
| 520 | 1193 | ||
| 521 | static void moxa_close(struct tty_struct *tty, struct file *filp) | 1194 | static void moxa_close(struct tty_struct *tty, struct file *filp) |
| @@ -524,23 +1197,14 @@ static void moxa_close(struct tty_struct *tty, struct file *filp) | |||
| 524 | int port; | 1197 | int port; |
| 525 | 1198 | ||
| 526 | port = tty->index; | 1199 | port = tty->index; |
| 527 | if (port == MAX_PORTS) { | 1200 | if (port == MAX_PORTS || tty_hung_up_p(filp)) |
| 528 | return; | ||
| 529 | } | ||
| 530 | if (!MoxaPortIsValid(port)) { | ||
| 531 | pr_debug("Invalid portno in moxa_close\n"); | ||
| 532 | tty->driver_data = NULL; | ||
| 533 | return; | ||
| 534 | } | ||
| 535 | if (tty->driver_data == NULL) { | ||
| 536 | return; | 1201 | return; |
| 537 | } | ||
| 538 | if (tty_hung_up_p(filp)) { | ||
| 539 | return; | ||
| 540 | } | ||
| 541 | ch = (struct moxa_port *) tty->driver_data; | ||
| 542 | 1202 | ||
| 543 | if ((tty->count == 1) && (ch->count != 1)) { | 1203 | mutex_lock(&moxa_openlock); |
| 1204 | ch = tty->driver_data; | ||
| 1205 | if (ch == NULL) | ||
| 1206 | goto unlock; | ||
| 1207 | if (tty->count == 1 && ch->count != 1) { | ||
| 544 | printk(KERN_WARNING "moxa_close: bad serial port count; " | 1208 | printk(KERN_WARNING "moxa_close: bad serial port count; " |
| 545 | "tty->count is 1, ch->count is %d\n", ch->count); | 1209 | "tty->count is 1, ch->count is %d\n", ch->count); |
| 546 | ch->count = 1; | 1210 | ch->count = 1; |
| @@ -550,59 +1214,35 @@ static void moxa_close(struct tty_struct *tty, struct file *filp) | |||
| 550 | "device=%s\n", tty->name); | 1214 | "device=%s\n", tty->name); |
| 551 | ch->count = 0; | 1215 | ch->count = 0; |
| 552 | } | 1216 | } |
| 553 | if (ch->count) { | 1217 | if (ch->count) |
| 554 | return; | 1218 | goto unlock; |
| 555 | } | ||
| 556 | ch->asyncflags |= ASYNC_CLOSING; | ||
| 557 | 1219 | ||
| 558 | ch->cflag = tty->termios->c_cflag; | 1220 | ch->cflag = tty->termios->c_cflag; |
| 559 | if (ch->asyncflags & ASYNC_INITIALIZED) { | 1221 | if (ch->asyncflags & ASYNC_INITIALIZED) { |
| 560 | moxa_setup_empty_event(tty); | 1222 | moxa_setup_empty_event(tty); |
| 561 | tty_wait_until_sent(tty, 30 * HZ); /* 30 seconds timeout */ | 1223 | tty_wait_until_sent(tty, 30 * HZ); /* 30 seconds timeout */ |
| 562 | del_timer_sync(&moxa_ports[ch->port].emptyTimer); | ||
| 563 | } | ||
| 564 | moxa_shut_down(ch); | ||
| 565 | MoxaPortFlushData(port, 2); | ||
| 566 | |||
| 567 | if (tty->driver->flush_buffer) | ||
| 568 | tty->driver->flush_buffer(tty); | ||
| 569 | tty_ldisc_flush(tty); | ||
| 570 | |||
| 571 | tty->closing = 0; | ||
| 572 | ch->event = 0; | ||
| 573 | ch->tty = NULL; | ||
| 574 | if (ch->blocked_open) { | ||
| 575 | if (ch->close_delay) { | ||
| 576 | msleep_interruptible(jiffies_to_msecs(ch->close_delay)); | ||
| 577 | } | ||
| 578 | wake_up_interruptible(&ch->open_wait); | ||
| 579 | } | 1224 | } |
| 580 | ch->asyncflags &= ~(ASYNC_NORMAL_ACTIVE | ASYNC_CLOSING); | 1225 | |
| 581 | complete_all(&ch->close_wait); | 1226 | moxa_close_port(ch); |
| 1227 | unlock: | ||
| 1228 | mutex_unlock(&moxa_openlock); | ||
| 582 | } | 1229 | } |
| 583 | 1230 | ||
| 584 | static int moxa_write(struct tty_struct *tty, | 1231 | static int moxa_write(struct tty_struct *tty, |
| 585 | const unsigned char *buf, int count) | 1232 | const unsigned char *buf, int count) |
| 586 | { | 1233 | { |
| 587 | struct moxa_port *ch; | 1234 | struct moxa_port *ch = tty->driver_data; |
| 588 | int len, port; | 1235 | int len; |
| 589 | unsigned long flags; | ||
| 590 | 1236 | ||
| 591 | ch = (struct moxa_port *) tty->driver_data; | ||
| 592 | if (ch == NULL) | 1237 | if (ch == NULL) |
| 593 | return (0); | 1238 | return 0; |
| 594 | port = ch->port; | ||
| 595 | 1239 | ||
| 596 | spin_lock_irqsave(&moxa_lock, flags); | 1240 | spin_lock_bh(&moxa_lock); |
| 597 | len = MoxaPortWriteData(port, (unsigned char *) buf, count); | 1241 | len = MoxaPortWriteData(ch, buf, count); |
| 598 | spin_unlock_irqrestore(&moxa_lock, flags); | 1242 | spin_unlock_bh(&moxa_lock); |
| 599 | 1243 | ||
| 600 | /********************************************* | ||
| 601 | if ( !(ch->statusflags & LOWWAIT) && | ||
| 602 | ((len != count) || (MoxaPortTxFree(port) <= 100)) ) | ||
| 603 | ************************************************/ | ||
| 604 | ch->statusflags |= LOWWAIT; | 1244 | ch->statusflags |= LOWWAIT; |
| 605 | return (len); | 1245 | return len; |
| 606 | } | 1246 | } |
| 607 | 1247 | ||
| 608 | static int moxa_write_room(struct tty_struct *tty) | 1248 | static int moxa_write_room(struct tty_struct *tty) |
| @@ -610,27 +1250,27 @@ static int moxa_write_room(struct tty_struct *tty) | |||
| 610 | struct moxa_port *ch; | 1250 | struct moxa_port *ch; |
| 611 | 1251 | ||
| 612 | if (tty->stopped) | 1252 | if (tty->stopped) |
| 613 | return (0); | 1253 | return 0; |
| 614 | ch = (struct moxa_port *) tty->driver_data; | 1254 | ch = tty->driver_data; |
| 615 | if (ch == NULL) | 1255 | if (ch == NULL) |
| 616 | return (0); | 1256 | return 0; |
| 617 | return (MoxaPortTxFree(ch->port)); | 1257 | return MoxaPortTxFree(ch); |
| 618 | } | 1258 | } |
| 619 | 1259 | ||
| 620 | static void moxa_flush_buffer(struct tty_struct *tty) | 1260 | static void moxa_flush_buffer(struct tty_struct *tty) |
| 621 | { | 1261 | { |
| 622 | struct moxa_port *ch = (struct moxa_port *) tty->driver_data; | 1262 | struct moxa_port *ch = tty->driver_data; |
| 623 | 1263 | ||
| 624 | if (ch == NULL) | 1264 | if (ch == NULL) |
| 625 | return; | 1265 | return; |
| 626 | MoxaPortFlushData(ch->port, 1); | 1266 | MoxaPortFlushData(ch, 1); |
| 627 | tty_wakeup(tty); | 1267 | tty_wakeup(tty); |
| 628 | } | 1268 | } |
| 629 | 1269 | ||
| 630 | static int moxa_chars_in_buffer(struct tty_struct *tty) | 1270 | static int moxa_chars_in_buffer(struct tty_struct *tty) |
| 631 | { | 1271 | { |
| 1272 | struct moxa_port *ch = tty->driver_data; | ||
| 632 | int chars; | 1273 | int chars; |
| 633 | struct moxa_port *ch = (struct moxa_port *) tty->driver_data; | ||
| 634 | 1274 | ||
| 635 | /* | 1275 | /* |
| 636 | * Sigh...I have to check if driver_data is NULL here, because | 1276 | * Sigh...I have to check if driver_data is NULL here, because |
| @@ -639,8 +1279,9 @@ static int moxa_chars_in_buffer(struct tty_struct *tty) | |||
| 639 | * routine. And since the open() failed, we return 0 here. TDJ | 1279 | * routine. And since the open() failed, we return 0 here. TDJ |
| 640 | */ | 1280 | */ |
| 641 | if (ch == NULL) | 1281 | if (ch == NULL) |
| 642 | return (0); | 1282 | return 0; |
| 643 | chars = MoxaPortTxQueue(ch->port); | 1283 | lock_kernel(); |
| 1284 | chars = MoxaPortTxQueue(ch); | ||
| 644 | if (chars) { | 1285 | if (chars) { |
| 645 | /* | 1286 | /* |
| 646 | * Make it possible to wakeup anything waiting for output | 1287 | * Make it possible to wakeup anything waiting for output |
| @@ -649,73 +1290,54 @@ static int moxa_chars_in_buffer(struct tty_struct *tty) | |||
| 649 | if (!(ch->statusflags & EMPTYWAIT)) | 1290 | if (!(ch->statusflags & EMPTYWAIT)) |
| 650 | moxa_setup_empty_event(tty); | 1291 | moxa_setup_empty_event(tty); |
| 651 | } | 1292 | } |
| 652 | return (chars); | 1293 | unlock_kernel(); |
| 653 | } | 1294 | return chars; |
| 654 | |||
| 655 | static void moxa_flush_chars(struct tty_struct *tty) | ||
| 656 | { | ||
| 657 | /* | ||
| 658 | * Don't think I need this, because this is called to empty the TX | ||
| 659 | * buffer for the 16450, 16550, etc. | ||
| 660 | */ | ||
| 661 | } | ||
| 662 | |||
| 663 | static void moxa_put_char(struct tty_struct *tty, unsigned char c) | ||
| 664 | { | ||
| 665 | struct moxa_port *ch; | ||
| 666 | int port; | ||
| 667 | unsigned long flags; | ||
| 668 | |||
| 669 | ch = (struct moxa_port *) tty->driver_data; | ||
| 670 | if (ch == NULL) | ||
| 671 | return; | ||
| 672 | port = ch->port; | ||
| 673 | spin_lock_irqsave(&moxa_lock, flags); | ||
| 674 | MoxaPortWriteData(port, &c, 1); | ||
| 675 | spin_unlock_irqrestore(&moxa_lock, flags); | ||
| 676 | /************************************************ | ||
| 677 | if ( !(ch->statusflags & LOWWAIT) && (MoxaPortTxFree(port) <= 100) ) | ||
| 678 | *************************************************/ | ||
| 679 | ch->statusflags |= LOWWAIT; | ||
| 680 | } | 1295 | } |
| 681 | 1296 | ||
| 682 | static int moxa_tiocmget(struct tty_struct *tty, struct file *file) | 1297 | static int moxa_tiocmget(struct tty_struct *tty, struct file *file) |
| 683 | { | 1298 | { |
| 684 | struct moxa_port *ch = (struct moxa_port *) tty->driver_data; | 1299 | struct moxa_port *ch; |
| 685 | int port; | ||
| 686 | int flag = 0, dtr, rts; | 1300 | int flag = 0, dtr, rts; |
| 687 | 1301 | ||
| 688 | port = tty->index; | 1302 | mutex_lock(&moxa_openlock); |
| 689 | if ((port != MAX_PORTS) && (!ch)) | 1303 | ch = tty->driver_data; |
| 690 | return (-EINVAL); | 1304 | if (!ch) { |
| 1305 | mutex_unlock(&moxa_openlock); | ||
| 1306 | return -EINVAL; | ||
| 1307 | } | ||
| 691 | 1308 | ||
| 692 | MoxaPortGetLineOut(ch->port, &dtr, &rts); | 1309 | MoxaPortGetLineOut(ch, &dtr, &rts); |
| 693 | if (dtr) | 1310 | if (dtr) |
| 694 | flag |= TIOCM_DTR; | 1311 | flag |= TIOCM_DTR; |
| 695 | if (rts) | 1312 | if (rts) |
| 696 | flag |= TIOCM_RTS; | 1313 | flag |= TIOCM_RTS; |
| 697 | dtr = MoxaPortLineStatus(ch->port); | 1314 | dtr = MoxaPortLineStatus(ch); |
| 698 | if (dtr & 1) | 1315 | if (dtr & 1) |
| 699 | flag |= TIOCM_CTS; | 1316 | flag |= TIOCM_CTS; |
| 700 | if (dtr & 2) | 1317 | if (dtr & 2) |
| 701 | flag |= TIOCM_DSR; | 1318 | flag |= TIOCM_DSR; |
| 702 | if (dtr & 4) | 1319 | if (dtr & 4) |
| 703 | flag |= TIOCM_CD; | 1320 | flag |= TIOCM_CD; |
| 1321 | mutex_unlock(&moxa_openlock); | ||
| 704 | return flag; | 1322 | return flag; |
| 705 | } | 1323 | } |
| 706 | 1324 | ||
| 707 | static int moxa_tiocmset(struct tty_struct *tty, struct file *file, | 1325 | static int moxa_tiocmset(struct tty_struct *tty, struct file *file, |
| 708 | unsigned int set, unsigned int clear) | 1326 | unsigned int set, unsigned int clear) |
| 709 | { | 1327 | { |
| 710 | struct moxa_port *ch = (struct moxa_port *) tty->driver_data; | 1328 | struct moxa_port *ch; |
| 711 | int port; | 1329 | int port; |
| 712 | int dtr, rts; | 1330 | int dtr, rts; |
| 713 | 1331 | ||
| 714 | port = tty->index; | 1332 | port = tty->index; |
| 715 | if ((port != MAX_PORTS) && (!ch)) | 1333 | mutex_lock(&moxa_openlock); |
| 716 | return (-EINVAL); | 1334 | ch = tty->driver_data; |
| 1335 | if (!ch) { | ||
| 1336 | mutex_unlock(&moxa_openlock); | ||
| 1337 | return -EINVAL; | ||
| 1338 | } | ||
| 717 | 1339 | ||
| 718 | MoxaPortGetLineOut(ch->port, &dtr, &rts); | 1340 | MoxaPortGetLineOut(ch, &dtr, &rts); |
| 719 | if (set & TIOCM_RTS) | 1341 | if (set & TIOCM_RTS) |
| 720 | rts = 1; | 1342 | rts = 1; |
| 721 | if (set & TIOCM_DTR) | 1343 | if (set & TIOCM_DTR) |
| @@ -724,105 +1346,51 @@ static int moxa_tiocmset(struct tty_struct *tty, struct file *file, | |||
| 724 | rts = 0; | 1346 | rts = 0; |
| 725 | if (clear & TIOCM_DTR) | 1347 | if (clear & TIOCM_DTR) |
| 726 | dtr = 0; | 1348 | dtr = 0; |
| 727 | MoxaPortLineCtrl(ch->port, dtr, rts); | 1349 | MoxaPortLineCtrl(ch, dtr, rts); |
| 1350 | mutex_unlock(&moxa_openlock); | ||
| 728 | return 0; | 1351 | return 0; |
| 729 | } | 1352 | } |
| 730 | 1353 | ||
| 731 | static int moxa_ioctl(struct tty_struct *tty, struct file *file, | ||
| 732 | unsigned int cmd, unsigned long arg) | ||
| 733 | { | ||
| 734 | struct moxa_port *ch = (struct moxa_port *) tty->driver_data; | ||
| 735 | register int port; | ||
| 736 | void __user *argp = (void __user *)arg; | ||
| 737 | int retval; | ||
| 738 | |||
| 739 | port = tty->index; | ||
| 740 | if ((port != MAX_PORTS) && (!ch)) | ||
| 741 | return (-EINVAL); | ||
| 742 | |||
| 743 | switch (cmd) { | ||
| 744 | case TCSBRK: /* SVID version: non-zero arg --> no break */ | ||
| 745 | retval = tty_check_change(tty); | ||
| 746 | if (retval) | ||
| 747 | return (retval); | ||
| 748 | moxa_setup_empty_event(tty); | ||
| 749 | tty_wait_until_sent(tty, 0); | ||
| 750 | if (!arg) | ||
| 751 | MoxaPortSendBreak(ch->port, 0); | ||
| 752 | return (0); | ||
| 753 | case TCSBRKP: /* support for POSIX tcsendbreak() */ | ||
| 754 | retval = tty_check_change(tty); | ||
| 755 | if (retval) | ||
| 756 | return (retval); | ||
| 757 | moxa_setup_empty_event(tty); | ||
| 758 | tty_wait_until_sent(tty, 0); | ||
| 759 | MoxaPortSendBreak(ch->port, arg); | ||
| 760 | return (0); | ||
| 761 | case TIOCGSOFTCAR: | ||
| 762 | return put_user(C_CLOCAL(tty) ? 1 : 0, (unsigned long __user *) argp); | ||
| 763 | case TIOCSSOFTCAR: | ||
| 764 | if(get_user(retval, (unsigned long __user *) argp)) | ||
| 765 | return -EFAULT; | ||
| 766 | arg = retval; | ||
| 767 | tty->termios->c_cflag = ((tty->termios->c_cflag & ~CLOCAL) | | ||
| 768 | (arg ? CLOCAL : 0)); | ||
| 769 | if (C_CLOCAL(tty)) | ||
| 770 | ch->asyncflags &= ~ASYNC_CHECK_CD; | ||
| 771 | else | ||
| 772 | ch->asyncflags |= ASYNC_CHECK_CD; | ||
| 773 | return (0); | ||
| 774 | case TIOCGSERIAL: | ||
| 775 | return moxa_get_serial_info(ch, argp); | ||
| 776 | |||
| 777 | case TIOCSSERIAL: | ||
| 778 | return moxa_set_serial_info(ch, argp); | ||
| 779 | default: | ||
| 780 | retval = MoxaDriverIoctl(cmd, arg, port); | ||
| 781 | } | ||
| 782 | return (retval); | ||
| 783 | } | ||
| 784 | |||
| 785 | static void moxa_throttle(struct tty_struct *tty) | 1354 | static void moxa_throttle(struct tty_struct *tty) |
| 786 | { | 1355 | { |
| 787 | struct moxa_port *ch = (struct moxa_port *) tty->driver_data; | 1356 | struct moxa_port *ch = tty->driver_data; |
| 788 | 1357 | ||
| 789 | ch->statusflags |= THROTTLE; | 1358 | ch->statusflags |= THROTTLE; |
| 790 | } | 1359 | } |
| 791 | 1360 | ||
| 792 | static void moxa_unthrottle(struct tty_struct *tty) | 1361 | static void moxa_unthrottle(struct tty_struct *tty) |
| 793 | { | 1362 | { |
| 794 | struct moxa_port *ch = (struct moxa_port *) tty->driver_data; | 1363 | struct moxa_port *ch = tty->driver_data; |
| 795 | 1364 | ||
| 796 | ch->statusflags &= ~THROTTLE; | 1365 | ch->statusflags &= ~THROTTLE; |
| 797 | } | 1366 | } |
| 798 | 1367 | ||
| 799 | static void moxa_set_termios(struct tty_struct *tty, | 1368 | static void moxa_set_termios(struct tty_struct *tty, |
| 800 | struct ktermios *old_termios) | 1369 | struct ktermios *old_termios) |
| 801 | { | 1370 | { |
| 802 | struct moxa_port *ch = (struct moxa_port *) tty->driver_data; | 1371 | struct moxa_port *ch = tty->driver_data; |
| 803 | 1372 | ||
| 804 | if (ch == NULL) | 1373 | if (ch == NULL) |
| 805 | return; | 1374 | return; |
| 806 | moxa_set_tty_param(tty, old_termios); | 1375 | moxa_set_tty_param(tty, old_termios); |
| 807 | if (!(old_termios->c_cflag & CLOCAL) && | 1376 | if (!(old_termios->c_cflag & CLOCAL) && C_CLOCAL(tty)) |
| 808 | (tty->termios->c_cflag & CLOCAL)) | ||
| 809 | wake_up_interruptible(&ch->open_wait); | 1377 | wake_up_interruptible(&ch->open_wait); |
| 810 | } | 1378 | } |
| 811 | 1379 | ||
| 812 | static void moxa_stop(struct tty_struct *tty) | 1380 | static void moxa_stop(struct tty_struct *tty) |
| 813 | { | 1381 | { |
| 814 | struct moxa_port *ch = (struct moxa_port *) tty->driver_data; | 1382 | struct moxa_port *ch = tty->driver_data; |
| 815 | 1383 | ||
| 816 | if (ch == NULL) | 1384 | if (ch == NULL) |
| 817 | return; | 1385 | return; |
| 818 | MoxaPortTxDisable(ch->port); | 1386 | MoxaPortTxDisable(ch); |
| 819 | ch->statusflags |= TXSTOPPED; | 1387 | ch->statusflags |= TXSTOPPED; |
| 820 | } | 1388 | } |
| 821 | 1389 | ||
| 822 | 1390 | ||
| 823 | static void moxa_start(struct tty_struct *tty) | 1391 | static void moxa_start(struct tty_struct *tty) |
| 824 | { | 1392 | { |
| 825 | struct moxa_port *ch = (struct moxa_port *) tty->driver_data; | 1393 | struct moxa_port *ch = tty->driver_data; |
| 826 | 1394 | ||
| 827 | if (ch == NULL) | 1395 | if (ch == NULL) |
| 828 | return; | 1396 | return; |
| @@ -830,91 +1398,143 @@ static void moxa_start(struct tty_struct *tty) | |||
| 830 | if (!(ch->statusflags & TXSTOPPED)) | 1398 | if (!(ch->statusflags & TXSTOPPED)) |
| 831 | return; | 1399 | return; |
| 832 | 1400 | ||
| 833 | MoxaPortTxEnable(ch->port); | 1401 | MoxaPortTxEnable(ch); |
| 834 | ch->statusflags &= ~TXSTOPPED; | 1402 | ch->statusflags &= ~TXSTOPPED; |
| 835 | } | 1403 | } |
| 836 | 1404 | ||
| 837 | static void moxa_hangup(struct tty_struct *tty) | 1405 | static void moxa_hangup(struct tty_struct *tty) |
| 838 | { | 1406 | { |
| 839 | struct moxa_port *ch = (struct moxa_port *) tty->driver_data; | 1407 | struct moxa_port *ch; |
| 840 | 1408 | ||
| 841 | moxa_flush_buffer(tty); | 1409 | mutex_lock(&moxa_openlock); |
| 842 | moxa_shut_down(ch); | 1410 | ch = tty->driver_data; |
| 843 | ch->event = 0; | 1411 | if (ch == NULL) { |
| 1412 | mutex_unlock(&moxa_openlock); | ||
| 1413 | return; | ||
| 1414 | } | ||
| 844 | ch->count = 0; | 1415 | ch->count = 0; |
| 845 | ch->asyncflags &= ~ASYNC_NORMAL_ACTIVE; | 1416 | moxa_close_port(ch); |
| 846 | ch->tty = NULL; | 1417 | mutex_unlock(&moxa_openlock); |
| 1418 | |||
| 847 | wake_up_interruptible(&ch->open_wait); | 1419 | wake_up_interruptible(&ch->open_wait); |
| 848 | } | 1420 | } |
| 849 | 1421 | ||
| 850 | static void moxa_poll(unsigned long ignored) | 1422 | static void moxa_new_dcdstate(struct moxa_port *p, u8 dcd) |
| 851 | { | 1423 | { |
| 852 | register int card; | 1424 | dcd = !!dcd; |
| 853 | struct moxa_port *ch; | ||
| 854 | struct tty_struct *tp; | ||
| 855 | int i, ports; | ||
| 856 | 1425 | ||
| 857 | del_timer(&moxaTimer); | 1426 | if (dcd != p->DCDState && p->tty && C_CLOCAL(p->tty)) { |
| 1427 | if (!dcd) | ||
| 1428 | tty_hangup(p->tty); | ||
| 1429 | } | ||
| 1430 | p->DCDState = dcd; | ||
| 1431 | } | ||
| 858 | 1432 | ||
| 859 | if (MoxaDriverPoll() < 0) { | 1433 | static int moxa_poll_port(struct moxa_port *p, unsigned int handle, |
| 860 | mod_timer(&moxaTimer, jiffies + HZ / 50); | 1434 | u16 __iomem *ip) |
| 861 | return; | 1435 | { |
| 1436 | struct tty_struct *tty = p->tty; | ||
| 1437 | void __iomem *ofsAddr; | ||
| 1438 | unsigned int inited = p->asyncflags & ASYNC_INITIALIZED; | ||
| 1439 | u16 intr; | ||
| 1440 | |||
| 1441 | if (tty) { | ||
| 1442 | if ((p->statusflags & EMPTYWAIT) && | ||
| 1443 | MoxaPortTxQueue(p) == 0) { | ||
| 1444 | p->statusflags &= ~EMPTYWAIT; | ||
| 1445 | tty_wakeup(tty); | ||
| 1446 | } | ||
| 1447 | if ((p->statusflags & LOWWAIT) && !tty->stopped && | ||
| 1448 | MoxaPortTxQueue(p) <= WAKEUP_CHARS) { | ||
| 1449 | p->statusflags &= ~LOWWAIT; | ||
| 1450 | tty_wakeup(tty); | ||
| 1451 | } | ||
| 1452 | |||
| 1453 | if (inited && !(p->statusflags & THROTTLE) && | ||
| 1454 | MoxaPortRxQueue(p) > 0) { /* RX */ | ||
| 1455 | MoxaPortReadData(p); | ||
| 1456 | tty_schedule_flip(tty); | ||
| 1457 | } | ||
| 1458 | } else { | ||
| 1459 | p->statusflags &= ~EMPTYWAIT; | ||
| 1460 | MoxaPortFlushData(p, 0); /* flush RX */ | ||
| 862 | } | 1461 | } |
| 1462 | |||
| 1463 | if (!handle) /* nothing else to do */ | ||
| 1464 | return 0; | ||
| 1465 | |||
| 1466 | intr = readw(ip); /* port irq status */ | ||
| 1467 | if (intr == 0) | ||
| 1468 | return 0; | ||
| 1469 | |||
| 1470 | writew(0, ip); /* ACK port */ | ||
| 1471 | ofsAddr = p->tableAddr; | ||
| 1472 | if (intr & IntrTx) /* disable tx intr */ | ||
| 1473 | writew(readw(ofsAddr + HostStat) & ~WakeupTx, | ||
| 1474 | ofsAddr + HostStat); | ||
| 1475 | |||
| 1476 | if (!inited) | ||
| 1477 | return 0; | ||
| 1478 | |||
| 1479 | if (tty && (intr & IntrBreak) && !I_IGNBRK(tty)) { /* BREAK */ | ||
| 1480 | tty_insert_flip_char(tty, 0, TTY_BREAK); | ||
| 1481 | tty_schedule_flip(tty); | ||
| 1482 | } | ||
| 1483 | |||
| 1484 | if (intr & IntrLine) | ||
| 1485 | moxa_new_dcdstate(p, readb(ofsAddr + FlagStat) & DCD_state); | ||
| 1486 | |||
| 1487 | return 0; | ||
| 1488 | } | ||
| 1489 | |||
| 1490 | static void moxa_poll(unsigned long ignored) | ||
| 1491 | { | ||
| 1492 | struct moxa_board_conf *brd; | ||
| 1493 | u16 __iomem *ip; | ||
| 1494 | unsigned int card, port, served = 0; | ||
| 1495 | |||
| 1496 | spin_lock(&moxa_lock); | ||
| 863 | for (card = 0; card < MAX_BOARDS; card++) { | 1497 | for (card = 0; card < MAX_BOARDS; card++) { |
| 864 | if ((ports = MoxaPortsOfCard(card)) <= 0) | 1498 | brd = &moxa_boards[card]; |
| 1499 | if (!brd->ready) | ||
| 865 | continue; | 1500 | continue; |
| 866 | ch = &moxa_ports[card * MAX_PORTS_PER_BOARD]; | 1501 | |
| 867 | for (i = 0; i < ports; i++, ch++) { | 1502 | served++; |
| 868 | if ((ch->asyncflags & ASYNC_INITIALIZED) == 0) | 1503 | |
| 869 | continue; | 1504 | ip = NULL; |
| 870 | if (!(ch->statusflags & THROTTLE) && | 1505 | if (readb(brd->intPend) == 0xff) |
| 871 | (MoxaPortRxQueue(ch->port) > 0)) | 1506 | ip = brd->intTable + readb(brd->intNdx); |
| 872 | moxa_receive_data(ch); | 1507 | |
| 873 | if ((tp = ch->tty) == 0) | 1508 | for (port = 0; port < brd->numPorts; port++) |
| 874 | continue; | 1509 | moxa_poll_port(&brd->ports[port], !!ip, ip + port); |
| 875 | if (ch->statusflags & LOWWAIT) { | 1510 | |
| 876 | if (MoxaPortTxQueue(ch->port) <= WAKEUP_CHARS) { | 1511 | if (ip) |
| 877 | if (!tp->stopped) { | 1512 | writeb(0, brd->intPend); /* ACK */ |
| 878 | ch->statusflags &= ~LOWWAIT; | 1513 | |
| 879 | tty_wakeup(tp); | 1514 | if (moxaLowWaterChk) { |
| 880 | } | 1515 | struct moxa_port *p = brd->ports; |
| 881 | } | 1516 | for (port = 0; port < brd->numPorts; port++, p++) |
| 882 | } | 1517 | if (p->lowChkFlag) { |
| 883 | if (!I_IGNBRK(tp) && (MoxaPortResetBrkCnt(ch->port) > 0)) { | 1518 | p->lowChkFlag = 0; |
| 884 | tty_insert_flip_char(tp, 0, TTY_BREAK); | 1519 | moxa_low_water_check(p->tableAddr); |
| 885 | tty_schedule_flip(tp); | ||
| 886 | } | ||
| 887 | if (MoxaPortDCDChange(ch->port)) { | ||
| 888 | if (ch->asyncflags & ASYNC_CHECK_CD) { | ||
| 889 | if (MoxaPortDCDON(ch->port)) | ||
| 890 | wake_up_interruptible(&ch->open_wait); | ||
| 891 | else { | ||
| 892 | tty_hangup(tp); | ||
| 893 | wake_up_interruptible(&ch->open_wait); | ||
| 894 | ch->asyncflags &= ~ASYNC_NORMAL_ACTIVE; | ||
| 895 | } | ||
| 896 | } | 1520 | } |
| 897 | } | ||
| 898 | } | 1521 | } |
| 899 | } | 1522 | } |
| 1523 | moxaLowWaterChk = 0; | ||
| 900 | 1524 | ||
| 901 | mod_timer(&moxaTimer, jiffies + HZ / 50); | 1525 | if (served) |
| 1526 | mod_timer(&moxaTimer, jiffies + HZ / 50); | ||
| 1527 | spin_unlock(&moxa_lock); | ||
| 902 | } | 1528 | } |
| 903 | 1529 | ||
| 904 | /******************************************************************************/ | 1530 | /******************************************************************************/ |
| 905 | 1531 | ||
| 906 | static void moxa_set_tty_param(struct tty_struct *tty, struct ktermios *old_termios) | 1532 | static void moxa_set_tty_param(struct tty_struct *tty, struct ktermios *old_termios) |
| 907 | { | 1533 | { |
| 908 | register struct ktermios *ts; | 1534 | register struct ktermios *ts = tty->termios; |
| 909 | struct moxa_port *ch; | 1535 | struct moxa_port *ch = tty->driver_data; |
| 910 | int rts, cts, txflow, rxflow, xany, baud; | 1536 | int rts, cts, txflow, rxflow, xany, baud; |
| 911 | 1537 | ||
| 912 | ch = (struct moxa_port *) tty->driver_data; | ||
| 913 | ts = tty->termios; | ||
| 914 | if (ts->c_cflag & CLOCAL) | ||
| 915 | ch->asyncflags &= ~ASYNC_CHECK_CD; | ||
| 916 | else | ||
| 917 | ch->asyncflags |= ASYNC_CHECK_CD; | ||
| 918 | rts = cts = txflow = rxflow = xany = 0; | 1538 | rts = cts = txflow = rxflow = xany = 0; |
| 919 | if (ts->c_cflag & CRTSCTS) | 1539 | if (ts->c_cflag & CRTSCTS) |
| 920 | rts = cts = 1; | 1540 | rts = cts = 1; |
| @@ -927,776 +1547,60 @@ static void moxa_set_tty_param(struct tty_struct *tty, struct ktermios *old_term | |||
| 927 | 1547 | ||
| 928 | /* Clear the features we don't support */ | 1548 | /* Clear the features we don't support */ |
| 929 | ts->c_cflag &= ~CMSPAR; | 1549 | ts->c_cflag &= ~CMSPAR; |
| 930 | MoxaPortFlowCtrl(ch->port, rts, cts, txflow, rxflow, xany); | 1550 | MoxaPortFlowCtrl(ch, rts, cts, txflow, rxflow, xany); |
| 931 | baud = MoxaPortSetTermio(ch->port, ts, tty_get_baud_rate(tty)); | 1551 | baud = MoxaPortSetTermio(ch, ts, tty_get_baud_rate(tty)); |
| 932 | if (baud == -1) | 1552 | if (baud == -1) |
| 933 | baud = tty_termios_baud_rate(old_termios); | 1553 | baud = tty_termios_baud_rate(old_termios); |
| 934 | /* Not put the baud rate into the termios data */ | 1554 | /* Not put the baud rate into the termios data */ |
| 935 | tty_encode_baud_rate(tty, baud, baud); | 1555 | tty_encode_baud_rate(tty, baud, baud); |
| 936 | } | 1556 | } |
| 937 | 1557 | ||
| 938 | static int moxa_block_till_ready(struct tty_struct *tty, struct file *filp, | ||
| 939 | struct moxa_port *ch) | ||
| 940 | { | ||
| 941 | DECLARE_WAITQUEUE(wait,current); | ||
| 942 | unsigned long flags; | ||
| 943 | int retval; | ||
| 944 | int do_clocal = C_CLOCAL(tty); | ||
| 945 | |||
| 946 | /* | ||
| 947 | * If the device is in the middle of being closed, then block | ||
| 948 | * until it's done, and then try again. | ||
| 949 | */ | ||
| 950 | if (tty_hung_up_p(filp) || (ch->asyncflags & ASYNC_CLOSING)) { | ||
| 951 | if (ch->asyncflags & ASYNC_CLOSING) | ||
| 952 | wait_for_completion_interruptible(&ch->close_wait); | ||
| 953 | #ifdef SERIAL_DO_RESTART | ||
| 954 | if (ch->asyncflags & ASYNC_HUP_NOTIFY) | ||
| 955 | return (-EAGAIN); | ||
| 956 | else | ||
| 957 | return (-ERESTARTSYS); | ||
| 958 | #else | ||
| 959 | return (-EAGAIN); | ||
| 960 | #endif | ||
| 961 | } | ||
| 962 | /* | ||
| 963 | * If non-blocking mode is set, then make the check up front | ||
| 964 | * and then exit. | ||
| 965 | */ | ||
| 966 | if (filp->f_flags & O_NONBLOCK) { | ||
| 967 | ch->asyncflags |= ASYNC_NORMAL_ACTIVE; | ||
| 968 | return (0); | ||
| 969 | } | ||
| 970 | /* | ||
| 971 | * Block waiting for the carrier detect and the line to become free | ||
| 972 | */ | ||
| 973 | retval = 0; | ||
| 974 | add_wait_queue(&ch->open_wait, &wait); | ||
| 975 | pr_debug("block_til_ready before block: ttys%d, count = %d\n", | ||
| 976 | ch->port, ch->count); | ||
| 977 | spin_lock_irqsave(&moxa_lock, flags); | ||
| 978 | if (!tty_hung_up_p(filp)) | ||
| 979 | ch->count--; | ||
| 980 | ch->blocked_open++; | ||
| 981 | spin_unlock_irqrestore(&moxa_lock, flags); | ||
| 982 | |||
| 983 | while (1) { | ||
| 984 | set_current_state(TASK_INTERRUPTIBLE); | ||
| 985 | if (tty_hung_up_p(filp) || | ||
| 986 | !(ch->asyncflags & ASYNC_INITIALIZED)) { | ||
| 987 | #ifdef SERIAL_DO_RESTART | ||
| 988 | if (ch->asyncflags & ASYNC_HUP_NOTIFY) | ||
| 989 | retval = -EAGAIN; | ||
| 990 | else | ||
| 991 | retval = -ERESTARTSYS; | ||
| 992 | #else | ||
| 993 | retval = -EAGAIN; | ||
| 994 | #endif | ||
| 995 | break; | ||
| 996 | } | ||
| 997 | if (!(ch->asyncflags & ASYNC_CLOSING) && (do_clocal || | ||
| 998 | MoxaPortDCDON(ch->port))) | ||
| 999 | break; | ||
| 1000 | |||
| 1001 | if (signal_pending(current)) { | ||
| 1002 | retval = -ERESTARTSYS; | ||
| 1003 | break; | ||
| 1004 | } | ||
| 1005 | schedule(); | ||
| 1006 | } | ||
| 1007 | set_current_state(TASK_RUNNING); | ||
| 1008 | remove_wait_queue(&ch->open_wait, &wait); | ||
| 1009 | |||
| 1010 | spin_lock_irqsave(&moxa_lock, flags); | ||
| 1011 | if (!tty_hung_up_p(filp)) | ||
| 1012 | ch->count++; | ||
| 1013 | ch->blocked_open--; | ||
| 1014 | spin_unlock_irqrestore(&moxa_lock, flags); | ||
| 1015 | pr_debug("block_til_ready after blocking: ttys%d, count = %d\n", | ||
| 1016 | ch->port, ch->count); | ||
| 1017 | if (retval) | ||
| 1018 | return (retval); | ||
| 1019 | /* FIXME: review to see if we need to use set_bit on these */ | ||
| 1020 | ch->asyncflags |= ASYNC_NORMAL_ACTIVE; | ||
| 1021 | return 0; | ||
| 1022 | } | ||
| 1023 | |||
| 1024 | static void moxa_setup_empty_event(struct tty_struct *tty) | 1558 | static void moxa_setup_empty_event(struct tty_struct *tty) |
| 1025 | { | 1559 | { |
| 1026 | struct moxa_port *ch = tty->driver_data; | 1560 | struct moxa_port *ch = tty->driver_data; |
| 1027 | unsigned long flags; | ||
| 1028 | 1561 | ||
| 1029 | spin_lock_irqsave(&moxa_lock, flags); | 1562 | spin_lock_bh(&moxa_lock); |
| 1030 | ch->statusflags |= EMPTYWAIT; | 1563 | ch->statusflags |= EMPTYWAIT; |
| 1031 | mod_timer(&moxa_ports[ch->port].emptyTimer, jiffies + HZ); | 1564 | spin_unlock_bh(&moxa_lock); |
| 1032 | spin_unlock_irqrestore(&moxa_lock, flags); | ||
| 1033 | } | ||
| 1034 | |||
| 1035 | static void moxa_check_xmit_empty(unsigned long data) | ||
| 1036 | { | ||
| 1037 | struct moxa_port *ch; | ||
| 1038 | |||
| 1039 | ch = (struct moxa_port *) data; | ||
| 1040 | if (ch->tty && (ch->statusflags & EMPTYWAIT)) { | ||
| 1041 | if (MoxaPortTxQueue(ch->port) == 0) { | ||
| 1042 | ch->statusflags &= ~EMPTYWAIT; | ||
| 1043 | tty_wakeup(ch->tty); | ||
| 1044 | return; | ||
| 1045 | } | ||
| 1046 | mod_timer(&moxa_ports[ch->port].emptyTimer, | ||
| 1047 | round_jiffies(jiffies + HZ)); | ||
| 1048 | } else | ||
| 1049 | ch->statusflags &= ~EMPTYWAIT; | ||
| 1050 | } | 1565 | } |
| 1051 | 1566 | ||
| 1052 | static void moxa_shut_down(struct moxa_port *ch) | 1567 | static void moxa_shut_down(struct moxa_port *ch) |
| 1053 | { | 1568 | { |
| 1054 | struct tty_struct *tp; | 1569 | struct tty_struct *tp = ch->tty; |
| 1055 | 1570 | ||
| 1056 | if (!(ch->asyncflags & ASYNC_INITIALIZED)) | 1571 | if (!(ch->asyncflags & ASYNC_INITIALIZED)) |
| 1057 | return; | 1572 | return; |
| 1058 | 1573 | ||
| 1059 | tp = ch->tty; | 1574 | MoxaPortDisable(ch); |
| 1060 | |||
| 1061 | MoxaPortDisable(ch->port); | ||
| 1062 | 1575 | ||
| 1063 | /* | 1576 | /* |
| 1064 | * If we're a modem control device and HUPCL is on, drop RTS & DTR. | 1577 | * If we're a modem control device and HUPCL is on, drop RTS & DTR. |
| 1065 | */ | 1578 | */ |
| 1066 | if (tp->termios->c_cflag & HUPCL) | 1579 | if (C_HUPCL(tp)) |
| 1067 | MoxaPortLineCtrl(ch->port, 0, 0); | 1580 | MoxaPortLineCtrl(ch, 0, 0); |
| 1068 | 1581 | ||
| 1582 | spin_lock_bh(&moxa_lock); | ||
| 1069 | ch->asyncflags &= ~ASYNC_INITIALIZED; | 1583 | ch->asyncflags &= ~ASYNC_INITIALIZED; |
| 1584 | spin_unlock_bh(&moxa_lock); | ||
| 1070 | } | 1585 | } |
| 1071 | 1586 | ||
| 1072 | static void moxa_receive_data(struct moxa_port *ch) | ||
| 1073 | { | ||
| 1074 | struct tty_struct *tp; | ||
| 1075 | struct ktermios *ts; | ||
| 1076 | unsigned long flags; | ||
| 1077 | |||
| 1078 | ts = NULL; | ||
| 1079 | tp = ch->tty; | ||
| 1080 | if (tp) | ||
| 1081 | ts = tp->termios; | ||
| 1082 | /************************************************** | ||
| 1083 | if ( !tp || !ts || !(ts->c_cflag & CREAD) ) { | ||
| 1084 | *****************************************************/ | ||
| 1085 | if (!tp || !ts) { | ||
| 1086 | MoxaPortFlushData(ch->port, 0); | ||
| 1087 | return; | ||
| 1088 | } | ||
| 1089 | spin_lock_irqsave(&moxa_lock, flags); | ||
| 1090 | MoxaPortReadData(ch->port, tp); | ||
| 1091 | spin_unlock_irqrestore(&moxa_lock, flags); | ||
| 1092 | tty_schedule_flip(tp); | ||
| 1093 | } | ||
| 1094 | |||
| 1095 | #define Magic_code 0x404 | ||
| 1096 | |||
| 1097 | /* | ||
| 1098 | * System Configuration | ||
| 1099 | */ | ||
| 1100 | /* | ||
| 1101 | * for C218 BIOS initialization | ||
| 1102 | */ | ||
| 1103 | #define C218_ConfBase 0x800 | ||
| 1104 | #define C218_status (C218_ConfBase + 0) /* BIOS running status */ | ||
| 1105 | #define C218_diag (C218_ConfBase + 2) /* diagnostic status */ | ||
| 1106 | #define C218_key (C218_ConfBase + 4) /* WORD (0x218 for C218) */ | ||
| 1107 | #define C218DLoad_len (C218_ConfBase + 6) /* WORD */ | ||
| 1108 | #define C218check_sum (C218_ConfBase + 8) /* BYTE */ | ||
| 1109 | #define C218chksum_ok (C218_ConfBase + 0x0a) /* BYTE (1:ok) */ | ||
| 1110 | #define C218_TestRx (C218_ConfBase + 0x10) /* 8 bytes for 8 ports */ | ||
| 1111 | #define C218_TestTx (C218_ConfBase + 0x18) /* 8 bytes for 8 ports */ | ||
| 1112 | #define C218_RXerr (C218_ConfBase + 0x20) /* 8 bytes for 8 ports */ | ||
| 1113 | #define C218_ErrFlag (C218_ConfBase + 0x28) /* 8 bytes for 8 ports */ | ||
| 1114 | |||
| 1115 | #define C218_LoadBuf 0x0F00 | ||
| 1116 | #define C218_KeyCode 0x218 | ||
| 1117 | #define CP204J_KeyCode 0x204 | ||
| 1118 | |||
| 1119 | /* | ||
| 1120 | * for C320 BIOS initialization | ||
| 1121 | */ | ||
| 1122 | #define C320_ConfBase 0x800 | ||
| 1123 | #define C320_LoadBuf 0x0f00 | ||
| 1124 | #define STS_init 0x05 /* for C320_status */ | ||
| 1125 | |||
| 1126 | #define C320_status C320_ConfBase + 0 /* BIOS running status */ | ||
| 1127 | #define C320_diag C320_ConfBase + 2 /* diagnostic status */ | ||
| 1128 | #define C320_key C320_ConfBase + 4 /* WORD (0320H for C320) */ | ||
| 1129 | #define C320DLoad_len C320_ConfBase + 6 /* WORD */ | ||
| 1130 | #define C320check_sum C320_ConfBase + 8 /* WORD */ | ||
| 1131 | #define C320chksum_ok C320_ConfBase + 0x0a /* WORD (1:ok) */ | ||
| 1132 | #define C320bapi_len C320_ConfBase + 0x0c /* WORD */ | ||
| 1133 | #define C320UART_no C320_ConfBase + 0x0e /* WORD */ | ||
| 1134 | |||
| 1135 | #define C320_KeyCode 0x320 | ||
| 1136 | |||
| 1137 | #define FixPage_addr 0x0000 /* starting addr of static page */ | ||
| 1138 | #define DynPage_addr 0x2000 /* starting addr of dynamic page */ | ||
| 1139 | #define C218_start 0x3000 /* starting addr of C218 BIOS prg */ | ||
| 1140 | #define Control_reg 0x1ff0 /* select page and reset control */ | ||
| 1141 | #define HW_reset 0x80 | ||
| 1142 | |||
| 1143 | /* | ||
| 1144 | * Function Codes | ||
| 1145 | */ | ||
| 1146 | #define FC_CardReset 0x80 | ||
| 1147 | #define FC_ChannelReset 1 /* C320 firmware not supported */ | ||
| 1148 | #define FC_EnableCH 2 | ||
| 1149 | #define FC_DisableCH 3 | ||
| 1150 | #define FC_SetParam 4 | ||
| 1151 | #define FC_SetMode 5 | ||
| 1152 | #define FC_SetRate 6 | ||
| 1153 | #define FC_LineControl 7 | ||
| 1154 | #define FC_LineStatus 8 | ||
| 1155 | #define FC_XmitControl 9 | ||
| 1156 | #define FC_FlushQueue 10 | ||
| 1157 | #define FC_SendBreak 11 | ||
| 1158 | #define FC_StopBreak 12 | ||
| 1159 | #define FC_LoopbackON 13 | ||
| 1160 | #define FC_LoopbackOFF 14 | ||
| 1161 | #define FC_ClrIrqTable 15 | ||
| 1162 | #define FC_SendXon 16 | ||
| 1163 | #define FC_SetTermIrq 17 /* C320 firmware not supported */ | ||
| 1164 | #define FC_SetCntIrq 18 /* C320 firmware not supported */ | ||
| 1165 | #define FC_SetBreakIrq 19 | ||
| 1166 | #define FC_SetLineIrq 20 | ||
| 1167 | #define FC_SetFlowCtl 21 | ||
| 1168 | #define FC_GenIrq 22 | ||
| 1169 | #define FC_InCD180 23 | ||
| 1170 | #define FC_OutCD180 24 | ||
| 1171 | #define FC_InUARTreg 23 | ||
| 1172 | #define FC_OutUARTreg 24 | ||
| 1173 | #define FC_SetXonXoff 25 | ||
| 1174 | #define FC_OutCD180CCR 26 | ||
| 1175 | #define FC_ExtIQueue 27 | ||
| 1176 | #define FC_ExtOQueue 28 | ||
| 1177 | #define FC_ClrLineIrq 29 | ||
| 1178 | #define FC_HWFlowCtl 30 | ||
| 1179 | #define FC_GetClockRate 35 | ||
| 1180 | #define FC_SetBaud 36 | ||
| 1181 | #define FC_SetDataMode 41 | ||
| 1182 | #define FC_GetCCSR 43 | ||
| 1183 | #define FC_GetDataError 45 | ||
| 1184 | #define FC_RxControl 50 | ||
| 1185 | #define FC_ImmSend 51 | ||
| 1186 | #define FC_SetXonState 52 | ||
| 1187 | #define FC_SetXoffState 53 | ||
| 1188 | #define FC_SetRxFIFOTrig 54 | ||
| 1189 | #define FC_SetTxFIFOCnt 55 | ||
| 1190 | #define FC_UnixRate 56 | ||
| 1191 | #define FC_UnixResetTimer 57 | ||
| 1192 | |||
| 1193 | #define RxFIFOTrig1 0 | ||
| 1194 | #define RxFIFOTrig4 1 | ||
| 1195 | #define RxFIFOTrig8 2 | ||
| 1196 | #define RxFIFOTrig14 3 | ||
| 1197 | |||
| 1198 | /* | ||
| 1199 | * Dual-Ported RAM | ||
| 1200 | */ | ||
| 1201 | #define DRAM_global 0 | ||
| 1202 | #define INT_data (DRAM_global + 0) | ||
| 1203 | #define Config_base (DRAM_global + 0x108) | ||
| 1204 | |||
| 1205 | #define IRQindex (INT_data + 0) | ||
| 1206 | #define IRQpending (INT_data + 4) | ||
| 1207 | #define IRQtable (INT_data + 8) | ||
| 1208 | |||
| 1209 | /* | ||
| 1210 | * Interrupt Status | ||
| 1211 | */ | ||
| 1212 | #define IntrRx 0x01 /* receiver data O.K. */ | ||
| 1213 | #define IntrTx 0x02 /* transmit buffer empty */ | ||
| 1214 | #define IntrFunc 0x04 /* function complete */ | ||
| 1215 | #define IntrBreak 0x08 /* received break */ | ||
| 1216 | #define IntrLine 0x10 /* line status change | ||
| 1217 | for transmitter */ | ||
| 1218 | #define IntrIntr 0x20 /* received INTR code */ | ||
| 1219 | #define IntrQuit 0x40 /* received QUIT code */ | ||
| 1220 | #define IntrEOF 0x80 /* received EOF code */ | ||
| 1221 | |||
| 1222 | #define IntrRxTrigger 0x100 /* rx data count reach tigger value */ | ||
| 1223 | #define IntrTxTrigger 0x200 /* tx data count below trigger value */ | ||
| 1224 | |||
| 1225 | #define Magic_no (Config_base + 0) | ||
| 1226 | #define Card_model_no (Config_base + 2) | ||
| 1227 | #define Total_ports (Config_base + 4) | ||
| 1228 | #define Module_cnt (Config_base + 8) | ||
| 1229 | #define Module_no (Config_base + 10) | ||
| 1230 | #define Timer_10ms (Config_base + 14) | ||
| 1231 | #define Disable_IRQ (Config_base + 20) | ||
| 1232 | #define TMS320_PORT1 (Config_base + 22) | ||
| 1233 | #define TMS320_PORT2 (Config_base + 24) | ||
| 1234 | #define TMS320_CLOCK (Config_base + 26) | ||
| 1235 | |||
| 1236 | /* | ||
| 1237 | * DATA BUFFER in DRAM | ||
| 1238 | */ | ||
| 1239 | #define Extern_table 0x400 /* Base address of the external table | ||
| 1240 | (24 words * 64) total 3K bytes | ||
| 1241 | (24 words * 128) total 6K bytes */ | ||
| 1242 | #define Extern_size 0x60 /* 96 bytes */ | ||
| 1243 | #define RXrptr 0x00 /* read pointer for RX buffer */ | ||
| 1244 | #define RXwptr 0x02 /* write pointer for RX buffer */ | ||
| 1245 | #define TXrptr 0x04 /* read pointer for TX buffer */ | ||
| 1246 | #define TXwptr 0x06 /* write pointer for TX buffer */ | ||
| 1247 | #define HostStat 0x08 /* IRQ flag and general flag */ | ||
| 1248 | #define FlagStat 0x0A | ||
| 1249 | #define FlowControl 0x0C /* B7 B6 B5 B4 B3 B2 B1 B0 */ | ||
| 1250 | /* x x x x | | | | */ | ||
| 1251 | /* | | | + CTS flow */ | ||
| 1252 | /* | | +--- RTS flow */ | ||
| 1253 | /* | +------ TX Xon/Xoff */ | ||
| 1254 | /* +--------- RX Xon/Xoff */ | ||
| 1255 | #define Break_cnt 0x0E /* received break count */ | ||
| 1256 | #define CD180TXirq 0x10 /* if non-0: enable TX irq */ | ||
| 1257 | #define RX_mask 0x12 | ||
| 1258 | #define TX_mask 0x14 | ||
| 1259 | #define Ofs_rxb 0x16 | ||
| 1260 | #define Ofs_txb 0x18 | ||
| 1261 | #define Page_rxb 0x1A | ||
| 1262 | #define Page_txb 0x1C | ||
| 1263 | #define EndPage_rxb 0x1E | ||
| 1264 | #define EndPage_txb 0x20 | ||
| 1265 | #define Data_error 0x22 | ||
| 1266 | #define RxTrigger 0x28 | ||
| 1267 | #define TxTrigger 0x2a | ||
| 1268 | |||
| 1269 | #define rRXwptr 0x34 | ||
| 1270 | #define Low_water 0x36 | ||
| 1271 | |||
| 1272 | #define FuncCode 0x40 | ||
| 1273 | #define FuncArg 0x42 | ||
| 1274 | #define FuncArg1 0x44 | ||
| 1275 | |||
| 1276 | #define C218rx_size 0x2000 /* 8K bytes */ | ||
| 1277 | #define C218tx_size 0x8000 /* 32K bytes */ | ||
| 1278 | |||
| 1279 | #define C218rx_mask (C218rx_size - 1) | ||
| 1280 | #define C218tx_mask (C218tx_size - 1) | ||
| 1281 | |||
| 1282 | #define C320p8rx_size 0x2000 | ||
| 1283 | #define C320p8tx_size 0x8000 | ||
| 1284 | #define C320p8rx_mask (C320p8rx_size - 1) | ||
| 1285 | #define C320p8tx_mask (C320p8tx_size - 1) | ||
| 1286 | |||
| 1287 | #define C320p16rx_size 0x2000 | ||
| 1288 | #define C320p16tx_size 0x4000 | ||
| 1289 | #define C320p16rx_mask (C320p16rx_size - 1) | ||
| 1290 | #define C320p16tx_mask (C320p16tx_size - 1) | ||
| 1291 | |||
| 1292 | #define C320p24rx_size 0x2000 | ||
| 1293 | #define C320p24tx_size 0x2000 | ||
| 1294 | #define C320p24rx_mask (C320p24rx_size - 1) | ||
| 1295 | #define C320p24tx_mask (C320p24tx_size - 1) | ||
| 1296 | |||
| 1297 | #define C320p32rx_size 0x1000 | ||
| 1298 | #define C320p32tx_size 0x1000 | ||
| 1299 | #define C320p32rx_mask (C320p32rx_size - 1) | ||
| 1300 | #define C320p32tx_mask (C320p32tx_size - 1) | ||
| 1301 | |||
| 1302 | #define Page_size 0x2000 | ||
| 1303 | #define Page_mask (Page_size - 1) | ||
| 1304 | #define C218rx_spage 3 | ||
| 1305 | #define C218tx_spage 4 | ||
| 1306 | #define C218rx_pageno 1 | ||
| 1307 | #define C218tx_pageno 4 | ||
| 1308 | #define C218buf_pageno 5 | ||
| 1309 | |||
| 1310 | #define C320p8rx_spage 3 | ||
| 1311 | #define C320p8tx_spage 4 | ||
| 1312 | #define C320p8rx_pgno 1 | ||
| 1313 | #define C320p8tx_pgno 4 | ||
| 1314 | #define C320p8buf_pgno 5 | ||
| 1315 | |||
| 1316 | #define C320p16rx_spage 3 | ||
| 1317 | #define C320p16tx_spage 4 | ||
| 1318 | #define C320p16rx_pgno 1 | ||
| 1319 | #define C320p16tx_pgno 2 | ||
| 1320 | #define C320p16buf_pgno 3 | ||
| 1321 | |||
| 1322 | #define C320p24rx_spage 3 | ||
| 1323 | #define C320p24tx_spage 4 | ||
| 1324 | #define C320p24rx_pgno 1 | ||
| 1325 | #define C320p24tx_pgno 1 | ||
| 1326 | #define C320p24buf_pgno 2 | ||
| 1327 | |||
| 1328 | #define C320p32rx_spage 3 | ||
| 1329 | #define C320p32tx_ofs C320p32rx_size | ||
| 1330 | #define C320p32tx_spage 3 | ||
| 1331 | #define C320p32buf_pgno 1 | ||
| 1332 | |||
| 1333 | /* | ||
| 1334 | * Host Status | ||
| 1335 | */ | ||
| 1336 | #define WakeupRx 0x01 | ||
| 1337 | #define WakeupTx 0x02 | ||
| 1338 | #define WakeupBreak 0x08 | ||
| 1339 | #define WakeupLine 0x10 | ||
| 1340 | #define WakeupIntr 0x20 | ||
| 1341 | #define WakeupQuit 0x40 | ||
| 1342 | #define WakeupEOF 0x80 /* used in VTIME control */ | ||
| 1343 | #define WakeupRxTrigger 0x100 | ||
| 1344 | #define WakeupTxTrigger 0x200 | ||
| 1345 | /* | ||
| 1346 | * Flag status | ||
| 1347 | */ | ||
| 1348 | #define Rx_over 0x01 | ||
| 1349 | #define Xoff_state 0x02 | ||
| 1350 | #define Tx_flowOff 0x04 | ||
| 1351 | #define Tx_enable 0x08 | ||
| 1352 | #define CTS_state 0x10 | ||
| 1353 | #define DSR_state 0x20 | ||
| 1354 | #define DCD_state 0x80 | ||
| 1355 | /* | ||
| 1356 | * FlowControl | ||
| 1357 | */ | ||
| 1358 | #define CTS_FlowCtl 1 | ||
| 1359 | #define RTS_FlowCtl 2 | ||
| 1360 | #define Tx_FlowCtl 4 | ||
| 1361 | #define Rx_FlowCtl 8 | ||
| 1362 | #define IXM_IXANY 0x10 | ||
| 1363 | |||
| 1364 | #define LowWater 128 | ||
| 1365 | |||
| 1366 | #define DTR_ON 1 | ||
| 1367 | #define RTS_ON 2 | ||
| 1368 | #define CTS_ON 1 | ||
| 1369 | #define DSR_ON 2 | ||
| 1370 | #define DCD_ON 8 | ||
| 1371 | |||
| 1372 | /* mode definition */ | ||
| 1373 | #define MX_CS8 0x03 | ||
| 1374 | #define MX_CS7 0x02 | ||
| 1375 | #define MX_CS6 0x01 | ||
| 1376 | #define MX_CS5 0x00 | ||
| 1377 | |||
| 1378 | #define MX_STOP1 0x00 | ||
| 1379 | #define MX_STOP15 0x04 | ||
| 1380 | #define MX_STOP2 0x08 | ||
| 1381 | |||
| 1382 | #define MX_PARNONE 0x00 | ||
| 1383 | #define MX_PAREVEN 0x40 | ||
| 1384 | #define MX_PARODD 0xC0 | ||
| 1385 | |||
| 1386 | /* | ||
| 1387 | * Query | ||
| 1388 | */ | ||
| 1389 | |||
| 1390 | struct mon_str { | ||
| 1391 | int tick; | ||
| 1392 | int rxcnt[MAX_PORTS]; | ||
| 1393 | int txcnt[MAX_PORTS]; | ||
| 1394 | }; | ||
| 1395 | |||
| 1396 | #define DCD_changed 0x01 | ||
| 1397 | #define DCD_oldstate 0x80 | ||
| 1398 | |||
| 1399 | static unsigned char moxaBuff[10240]; | ||
| 1400 | static int moxaLowWaterChk; | ||
| 1401 | static int moxaCard; | ||
| 1402 | static struct mon_str moxaLog; | ||
| 1403 | static int moxaFuncTout = HZ / 2; | ||
| 1404 | |||
| 1405 | static void moxafunc(void __iomem *, int, ushort); | ||
| 1406 | static void moxa_wait_finish(void __iomem *); | ||
| 1407 | static void moxa_low_water_check(void __iomem *); | ||
| 1408 | static int moxaloadbios(int, unsigned char __user *, int); | ||
| 1409 | static int moxafindcard(int); | ||
| 1410 | static int moxaload320b(int, unsigned char __user *, int); | ||
| 1411 | static int moxaloadcode(int, unsigned char __user *, int); | ||
| 1412 | static int moxaloadc218(int, void __iomem *, int); | ||
| 1413 | static int moxaloadc320(int, void __iomem *, int, int *); | ||
| 1414 | |||
| 1415 | /***************************************************************************** | 1587 | /***************************************************************************** |
| 1416 | * Driver level functions: * | 1588 | * Driver level functions: * |
| 1417 | * 1. MoxaDriverInit(void); * | ||
| 1418 | * 2. MoxaDriverIoctl(unsigned int cmd, unsigned long arg, int port); * | ||
| 1419 | * 3. MoxaDriverPoll(void); * | ||
| 1420 | *****************************************************************************/ | 1589 | *****************************************************************************/ |
| 1421 | void MoxaDriverInit(void) | ||
| 1422 | { | ||
| 1423 | struct moxa_port *p; | ||
| 1424 | unsigned int i; | ||
| 1425 | 1590 | ||
| 1426 | moxaFuncTout = HZ / 2; /* 500 mini-seconds */ | 1591 | static void MoxaPortFlushData(struct moxa_port *port, int mode) |
| 1427 | moxaCard = 0; | ||
| 1428 | moxaLog.tick = 0; | ||
| 1429 | moxaLowWaterChk = 0; | ||
| 1430 | for (i = 0; i < MAX_PORTS; i++) { | ||
| 1431 | p = &moxa_ports[i]; | ||
| 1432 | p->chkPort = 0; | ||
| 1433 | p->lowChkFlag = 0; | ||
| 1434 | p->lineCtrl = 0; | ||
| 1435 | moxaLog.rxcnt[i] = 0; | ||
| 1436 | moxaLog.txcnt[i] = 0; | ||
| 1437 | } | ||
| 1438 | } | ||
| 1439 | |||
| 1440 | #define MOXA 0x400 | ||
| 1441 | #define MOXA_GET_IQUEUE (MOXA + 1) /* get input buffered count */ | ||
| 1442 | #define MOXA_GET_OQUEUE (MOXA + 2) /* get output buffered count */ | ||
| 1443 | #define MOXA_INIT_DRIVER (MOXA + 6) /* moxaCard=0 */ | ||
| 1444 | #define MOXA_LOAD_BIOS (MOXA + 9) /* download BIOS */ | ||
| 1445 | #define MOXA_FIND_BOARD (MOXA + 10) /* Check if MOXA card exist? */ | ||
| 1446 | #define MOXA_LOAD_C320B (MOXA + 11) /* download 320B firmware */ | ||
| 1447 | #define MOXA_LOAD_CODE (MOXA + 12) /* download firmware */ | ||
| 1448 | #define MOXA_GETDATACOUNT (MOXA + 23) | ||
| 1449 | #define MOXA_GET_IOQUEUE (MOXA + 27) | ||
| 1450 | #define MOXA_FLUSH_QUEUE (MOXA + 28) | ||
| 1451 | #define MOXA_GET_CONF (MOXA + 35) /* configuration */ | ||
| 1452 | #define MOXA_GET_MAJOR (MOXA + 63) | ||
| 1453 | #define MOXA_GET_CUMAJOR (MOXA + 64) | ||
| 1454 | #define MOXA_GETMSTATUS (MOXA + 65) | ||
| 1455 | |||
| 1456 | struct dl_str { | ||
| 1457 | char __user *buf; | ||
| 1458 | int len; | ||
| 1459 | int cardno; | ||
| 1460 | }; | ||
| 1461 | |||
| 1462 | static struct dl_str dltmp; | ||
| 1463 | |||
| 1464 | void MoxaPortFlushData(int port, int mode) | ||
| 1465 | { | 1592 | { |
| 1466 | void __iomem *ofsAddr; | 1593 | void __iomem *ofsAddr; |
| 1467 | if ((mode < 0) || (mode > 2)) | 1594 | if (mode < 0 || mode > 2) |
| 1468 | return; | 1595 | return; |
| 1469 | ofsAddr = moxa_ports[port].tableAddr; | 1596 | ofsAddr = port->tableAddr; |
| 1470 | moxafunc(ofsAddr, FC_FlushQueue, mode); | 1597 | moxafunc(ofsAddr, FC_FlushQueue, mode); |
| 1471 | if (mode != 1) { | 1598 | if (mode != 1) { |
| 1472 | moxa_ports[port].lowChkFlag = 0; | 1599 | port->lowChkFlag = 0; |
| 1473 | moxa_low_water_check(ofsAddr); | 1600 | moxa_low_water_check(ofsAddr); |
| 1474 | } | 1601 | } |
| 1475 | } | 1602 | } |
| 1476 | 1603 | ||
| 1477 | int MoxaDriverIoctl(unsigned int cmd, unsigned long arg, int port) | ||
| 1478 | { | ||
| 1479 | int i; | ||
| 1480 | int status; | ||
| 1481 | int MoxaPortTxQueue(int), MoxaPortRxQueue(int); | ||
| 1482 | void __user *argp = (void __user *)arg; | ||
| 1483 | |||
| 1484 | if (port == MAX_PORTS) { | ||
| 1485 | if ((cmd != MOXA_GET_CONF) && (cmd != MOXA_INIT_DRIVER) && | ||
| 1486 | (cmd != MOXA_LOAD_BIOS) && (cmd != MOXA_FIND_BOARD) && (cmd != MOXA_LOAD_C320B) && | ||
| 1487 | (cmd != MOXA_LOAD_CODE) && (cmd != MOXA_GETDATACOUNT) && | ||
| 1488 | (cmd != MOXA_GET_IOQUEUE) && (cmd != MOXA_GET_MAJOR) && | ||
| 1489 | (cmd != MOXA_GET_CUMAJOR) && (cmd != MOXA_GETMSTATUS)) | ||
| 1490 | return (-EINVAL); | ||
| 1491 | } | ||
| 1492 | switch (cmd) { | ||
| 1493 | case MOXA_GET_CONF: | ||
| 1494 | if(copy_to_user(argp, &moxa_boards, MAX_BOARDS * | ||
| 1495 | sizeof(struct moxa_board_conf))) | ||
| 1496 | return -EFAULT; | ||
| 1497 | return (0); | ||
| 1498 | case MOXA_INIT_DRIVER: | ||
| 1499 | if ((int) arg == 0x404) | ||
| 1500 | MoxaDriverInit(); | ||
| 1501 | return (0); | ||
| 1502 | case MOXA_GETDATACOUNT: | ||
| 1503 | moxaLog.tick = jiffies; | ||
| 1504 | if(copy_to_user(argp, &moxaLog, sizeof(struct mon_str))) | ||
| 1505 | return -EFAULT; | ||
| 1506 | return (0); | ||
| 1507 | case MOXA_FLUSH_QUEUE: | ||
| 1508 | MoxaPortFlushData(port, arg); | ||
| 1509 | return (0); | ||
| 1510 | case MOXA_GET_IOQUEUE: { | ||
| 1511 | struct moxaq_str __user *argm = argp; | ||
| 1512 | struct moxaq_str tmp; | ||
| 1513 | |||
| 1514 | for (i = 0; i < MAX_PORTS; i++, argm++) { | ||
| 1515 | memset(&tmp, 0, sizeof(tmp)); | ||
| 1516 | if (moxa_ports[i].chkPort) { | ||
| 1517 | tmp.inq = MoxaPortRxQueue(i); | ||
| 1518 | tmp.outq = MoxaPortTxQueue(i); | ||
| 1519 | } | ||
| 1520 | if (copy_to_user(argm, &tmp, sizeof(tmp))) | ||
| 1521 | return -EFAULT; | ||
| 1522 | } | ||
| 1523 | return (0); | ||
| 1524 | } case MOXA_GET_OQUEUE: | ||
| 1525 | i = MoxaPortTxQueue(port); | ||
| 1526 | return put_user(i, (unsigned long __user *)argp); | ||
| 1527 | case MOXA_GET_IQUEUE: | ||
| 1528 | i = MoxaPortRxQueue(port); | ||
| 1529 | return put_user(i, (unsigned long __user *)argp); | ||
| 1530 | case MOXA_GET_MAJOR: | ||
| 1531 | if(copy_to_user(argp, &ttymajor, sizeof(int))) | ||
| 1532 | return -EFAULT; | ||
| 1533 | return 0; | ||
| 1534 | case MOXA_GET_CUMAJOR: | ||
| 1535 | i = 0; | ||
| 1536 | if(copy_to_user(argp, &i, sizeof(int))) | ||
| 1537 | return -EFAULT; | ||
| 1538 | return 0; | ||
| 1539 | case MOXA_GETMSTATUS: { | ||
| 1540 | struct mxser_mstatus __user *argm = argp; | ||
| 1541 | struct mxser_mstatus tmp; | ||
| 1542 | struct moxa_port *p; | ||
| 1543 | |||
| 1544 | for (i = 0; i < MAX_PORTS; i++, argm++) { | ||
| 1545 | p = &moxa_ports[i]; | ||
| 1546 | memset(&tmp, 0, sizeof(tmp)); | ||
| 1547 | if (!p->chkPort) { | ||
| 1548 | goto copy; | ||
| 1549 | } else { | ||
| 1550 | status = MoxaPortLineStatus(p->port); | ||
| 1551 | if (status & 1) | ||
| 1552 | tmp.cts = 1; | ||
| 1553 | if (status & 2) | ||
| 1554 | tmp.dsr = 1; | ||
| 1555 | if (status & 4) | ||
| 1556 | tmp.dcd = 1; | ||
| 1557 | } | ||
| 1558 | |||
| 1559 | if (!p->tty || !p->tty->termios) | ||
| 1560 | tmp.cflag = p->cflag; | ||
| 1561 | else | ||
| 1562 | tmp.cflag = p->tty->termios->c_cflag; | ||
| 1563 | copy: | ||
| 1564 | if (copy_to_user(argm, &tmp, sizeof(tmp))) | ||
| 1565 | return -EFAULT; | ||
| 1566 | } | ||
| 1567 | return 0; | ||
| 1568 | } default: | ||
| 1569 | return (-ENOIOCTLCMD); | ||
| 1570 | case MOXA_LOAD_BIOS: | ||
| 1571 | case MOXA_FIND_BOARD: | ||
| 1572 | case MOXA_LOAD_C320B: | ||
| 1573 | case MOXA_LOAD_CODE: | ||
| 1574 | if (!capable(CAP_SYS_RAWIO)) | ||
| 1575 | return -EPERM; | ||
| 1576 | break; | ||
| 1577 | } | ||
| 1578 | |||
| 1579 | if(copy_from_user(&dltmp, argp, sizeof(struct dl_str))) | ||
| 1580 | return -EFAULT; | ||
| 1581 | if(dltmp.cardno < 0 || dltmp.cardno >= MAX_BOARDS || dltmp.len < 0) | ||
| 1582 | return -EINVAL; | ||
| 1583 | |||
| 1584 | switch(cmd) | ||
| 1585 | { | ||
| 1586 | case MOXA_LOAD_BIOS: | ||
| 1587 | i = moxaloadbios(dltmp.cardno, dltmp.buf, dltmp.len); | ||
| 1588 | return (i); | ||
| 1589 | case MOXA_FIND_BOARD: | ||
| 1590 | return moxafindcard(dltmp.cardno); | ||
| 1591 | case MOXA_LOAD_C320B: | ||
| 1592 | moxaload320b(dltmp.cardno, dltmp.buf, dltmp.len); | ||
| 1593 | default: /* to keep gcc happy */ | ||
| 1594 | return (0); | ||
| 1595 | case MOXA_LOAD_CODE: | ||
| 1596 | i = moxaloadcode(dltmp.cardno, dltmp.buf, dltmp.len); | ||
| 1597 | if (i == -1) | ||
| 1598 | return (-EFAULT); | ||
| 1599 | return (i); | ||
| 1600 | |||
| 1601 | } | ||
| 1602 | } | ||
| 1603 | |||
| 1604 | int MoxaDriverPoll(void) | ||
| 1605 | { | ||
| 1606 | struct moxa_board_conf *brd; | ||
| 1607 | register ushort temp; | ||
| 1608 | register int card; | ||
| 1609 | void __iomem *ofsAddr; | ||
| 1610 | void __iomem *ip; | ||
| 1611 | int port, p, ports; | ||
| 1612 | |||
| 1613 | if (moxaCard == 0) | ||
| 1614 | return (-1); | ||
| 1615 | for (card = 0; card < MAX_BOARDS; card++) { | ||
| 1616 | brd = &moxa_boards[card]; | ||
| 1617 | if (brd->loadstat == 0) | ||
| 1618 | continue; | ||
| 1619 | if ((ports = brd->numPorts) == 0) | ||
| 1620 | continue; | ||
| 1621 | if (readb(brd->intPend) == 0xff) { | ||
| 1622 | ip = brd->intTable + readb(brd->intNdx); | ||
| 1623 | p = card * MAX_PORTS_PER_BOARD; | ||
| 1624 | ports <<= 1; | ||
| 1625 | for (port = 0; port < ports; port += 2, p++) { | ||
| 1626 | if ((temp = readw(ip + port)) != 0) { | ||
| 1627 | writew(0, ip + port); | ||
| 1628 | ofsAddr = moxa_ports[p].tableAddr; | ||
| 1629 | if (temp & IntrTx) | ||
| 1630 | writew(readw(ofsAddr + HostStat) & ~WakeupTx, ofsAddr + HostStat); | ||
| 1631 | if (temp & IntrBreak) { | ||
| 1632 | moxa_ports[p].breakCnt++; | ||
| 1633 | } | ||
| 1634 | if (temp & IntrLine) { | ||
| 1635 | if (readb(ofsAddr + FlagStat) & DCD_state) { | ||
| 1636 | if ((moxa_ports[p].DCDState & DCD_oldstate) == 0) | ||
| 1637 | moxa_ports[p].DCDState = (DCD_oldstate | | ||
| 1638 | DCD_changed); | ||
| 1639 | } else { | ||
| 1640 | if (moxa_ports[p].DCDState & DCD_oldstate) | ||
| 1641 | moxa_ports[p].DCDState = DCD_changed; | ||
| 1642 | } | ||
| 1643 | } | ||
| 1644 | } | ||
| 1645 | } | ||
| 1646 | writeb(0, brd->intPend); | ||
| 1647 | } | ||
| 1648 | if (moxaLowWaterChk) { | ||
| 1649 | p = card * MAX_PORTS_PER_BOARD; | ||
| 1650 | for (port = 0; port < ports; port++, p++) { | ||
| 1651 | if (moxa_ports[p].lowChkFlag) { | ||
| 1652 | moxa_ports[p].lowChkFlag = 0; | ||
| 1653 | ofsAddr = moxa_ports[p].tableAddr; | ||
| 1654 | moxa_low_water_check(ofsAddr); | ||
| 1655 | } | ||
| 1656 | } | ||
| 1657 | } | ||
| 1658 | } | ||
| 1659 | moxaLowWaterChk = 0; | ||
| 1660 | return (0); | ||
| 1661 | } | ||
| 1662 | |||
| 1663 | /***************************************************************************** | ||
| 1664 | * Card level function: * | ||
| 1665 | * 1. MoxaPortsOfCard(int cardno); * | ||
| 1666 | *****************************************************************************/ | ||
| 1667 | int MoxaPortsOfCard(int cardno) | ||
| 1668 | { | ||
| 1669 | |||
| 1670 | if (moxa_boards[cardno].boardType == 0) | ||
| 1671 | return (0); | ||
| 1672 | return (moxa_boards[cardno].numPorts); | ||
| 1673 | } | ||
| 1674 | |||
| 1675 | /***************************************************************************** | ||
| 1676 | * Port level functions: * | ||
| 1677 | * 1. MoxaPortIsValid(int port); * | ||
| 1678 | * 2. MoxaPortEnable(int port); * | ||
| 1679 | * 3. MoxaPortDisable(int port); * | ||
| 1680 | * 4. MoxaPortGetMaxBaud(int port); * | ||
| 1681 | * 6. MoxaPortSetBaud(int port, long baud); * | ||
| 1682 | * 8. MoxaPortSetTermio(int port, unsigned char *termio); * | ||
| 1683 | * 9. MoxaPortGetLineOut(int port, int *dtrState, int *rtsState); * | ||
| 1684 | * 10. MoxaPortLineCtrl(int port, int dtrState, int rtsState); * | ||
| 1685 | * 11. MoxaPortFlowCtrl(int port, int rts, int cts, int rx, int tx,int xany); * | ||
| 1686 | * 12. MoxaPortLineStatus(int port); * | ||
| 1687 | * 13. MoxaPortDCDChange(int port); * | ||
| 1688 | * 14. MoxaPortDCDON(int port); * | ||
| 1689 | * 15. MoxaPortFlushData(int port, int mode); * | ||
| 1690 | * 16. MoxaPortWriteData(int port, unsigned char * buffer, int length); * | ||
| 1691 | * 17. MoxaPortReadData(int port, struct tty_struct *tty); * | ||
| 1692 | * 20. MoxaPortTxQueue(int port); * | ||
| 1693 | * 21. MoxaPortTxFree(int port); * | ||
| 1694 | * 22. MoxaPortRxQueue(int port); * | ||
| 1695 | * 24. MoxaPortTxDisable(int port); * | ||
| 1696 | * 25. MoxaPortTxEnable(int port); * | ||
| 1697 | * 27. MoxaPortResetBrkCnt(int port); * | ||
| 1698 | * 30. MoxaPortSendBreak(int port, int ticks); * | ||
| 1699 | *****************************************************************************/ | ||
| 1700 | /* | 1604 | /* |
| 1701 | * Moxa Port Number Description: | 1605 | * Moxa Port Number Description: |
| 1702 | * | 1606 | * |
| @@ -1733,33 +1637,6 @@ int MoxaPortsOfCard(int cardno) | |||
| 1733 | * -ENOIOCTLCMD | 1637 | * -ENOIOCTLCMD |
| 1734 | * | 1638 | * |
| 1735 | * | 1639 | * |
| 1736 | * Function 3: Moxa driver polling process routine. | ||
| 1737 | * Syntax: | ||
| 1738 | * int MoxaDriverPoll(void); | ||
| 1739 | * | ||
| 1740 | * return: 0 ; polling O.K. | ||
| 1741 | * -1 : no any Moxa card. | ||
| 1742 | * | ||
| 1743 | * | ||
| 1744 | * Function 4: Get the ports of this card. | ||
| 1745 | * Syntax: | ||
| 1746 | * int MoxaPortsOfCard(int cardno); | ||
| 1747 | * | ||
| 1748 | * int cardno : card number (0 - 3) | ||
| 1749 | * | ||
| 1750 | * return: 0 : this card is invalid | ||
| 1751 | * 8/16/24/32 | ||
| 1752 | * | ||
| 1753 | * | ||
| 1754 | * Function 5: Check this port is valid or invalid | ||
| 1755 | * Syntax: | ||
| 1756 | * int MoxaPortIsValid(int port); | ||
| 1757 | * int port : port number (0 - 127, ref port description) | ||
| 1758 | * | ||
| 1759 | * return: 0 : this port is invalid | ||
| 1760 | * 1 : this port is valid | ||
| 1761 | * | ||
| 1762 | * | ||
| 1763 | * Function 6: Enable this port to start Tx/Rx data. | 1640 | * Function 6: Enable this port to start Tx/Rx data. |
| 1764 | * Syntax: | 1641 | * Syntax: |
| 1765 | * void MoxaPortEnable(int port); | 1642 | * void MoxaPortEnable(int port); |
| @@ -1772,18 +1649,9 @@ int MoxaPortsOfCard(int cardno) | |||
| 1772 | * int port : port number (0 - 127) | 1649 | * int port : port number (0 - 127) |
| 1773 | * | 1650 | * |
| 1774 | * | 1651 | * |
| 1775 | * Function 8: Get the maximun available baud rate of this port. | ||
| 1776 | * Syntax: | ||
| 1777 | * long MoxaPortGetMaxBaud(int port); | ||
| 1778 | * int port : port number (0 - 127) | ||
| 1779 | * | ||
| 1780 | * return: 0 : this port is invalid | ||
| 1781 | * 38400/57600/115200 bps | ||
| 1782 | * | ||
| 1783 | * | ||
| 1784 | * Function 10: Setting baud rate of this port. | 1652 | * Function 10: Setting baud rate of this port. |
| 1785 | * Syntax: | 1653 | * Syntax: |
| 1786 | * long MoxaPortSetBaud(int port, long baud); | 1654 | * speed_t MoxaPortSetBaud(int port, speed_t baud); |
| 1787 | * int port : port number (0 - 127) | 1655 | * int port : port number (0 - 127) |
| 1788 | * long baud : baud rate (50 - 115200) | 1656 | * long baud : baud rate (50 - 115200) |
| 1789 | * | 1657 | * |
| @@ -1850,25 +1718,6 @@ int MoxaPortsOfCard(int cardno) | |||
| 1850 | * Bit 2 - DCD state (0: off, 1: on) | 1718 | * Bit 2 - DCD state (0: off, 1: on) |
| 1851 | * | 1719 | * |
| 1852 | * | 1720 | * |
| 1853 | * Function 17: Check the DCD state has changed since the last read | ||
| 1854 | * of this function. | ||
| 1855 | * Syntax: | ||
| 1856 | * int MoxaPortDCDChange(int port); | ||
| 1857 | * int port : port number (0 - 127) | ||
| 1858 | * | ||
| 1859 | * return: 0 : no changed | ||
| 1860 | * 1 : DCD has changed | ||
| 1861 | * | ||
| 1862 | * | ||
| 1863 | * Function 18: Check ths current DCD state is ON or not. | ||
| 1864 | * Syntax: | ||
| 1865 | * int MoxaPortDCDON(int port); | ||
| 1866 | * int port : port number (0 - 127) | ||
| 1867 | * | ||
| 1868 | * return: 0 : DCD off | ||
| 1869 | * 1 : DCD on | ||
| 1870 | * | ||
| 1871 | * | ||
| 1872 | * Function 19: Flush the Rx/Tx buffer data of this port. | 1721 | * Function 19: Flush the Rx/Tx buffer data of this port. |
| 1873 | * Syntax: | 1722 | * Syntax: |
| 1874 | * void MoxaPortFlushData(int port, int mode); | 1723 | * void MoxaPortFlushData(int port, int mode); |
| @@ -1942,40 +1791,20 @@ int MoxaPortsOfCard(int cardno) | |||
| 1942 | * return: 0 - .. : BREAK signal count | 1791 | * return: 0 - .. : BREAK signal count |
| 1943 | * | 1792 | * |
| 1944 | * | 1793 | * |
| 1945 | * Function 34: Send out a BREAK signal. | ||
| 1946 | * Syntax: | ||
| 1947 | * void MoxaPortSendBreak(int port, int ms100); | ||
| 1948 | * int port : port number (0 - 127) | ||
| 1949 | * int ms100 : break signal time interval. | ||
| 1950 | * unit: 100 mini-second. if ms100 == 0, it will | ||
| 1951 | * send out a about 250 ms BREAK signal. | ||
| 1952 | * | ||
| 1953 | */ | 1794 | */ |
| 1954 | int MoxaPortIsValid(int port) | ||
| 1955 | { | ||
| 1956 | |||
| 1957 | if (moxaCard == 0) | ||
| 1958 | return (0); | ||
| 1959 | if (moxa_ports[port].chkPort == 0) | ||
| 1960 | return (0); | ||
| 1961 | return (1); | ||
| 1962 | } | ||
| 1963 | 1795 | ||
| 1964 | void MoxaPortEnable(int port) | 1796 | static void MoxaPortEnable(struct moxa_port *port) |
| 1965 | { | 1797 | { |
| 1966 | void __iomem *ofsAddr; | 1798 | void __iomem *ofsAddr; |
| 1967 | int MoxaPortLineStatus(int); | 1799 | u16 lowwater = 512; |
| 1968 | short lowwater = 512; | ||
| 1969 | 1800 | ||
| 1970 | ofsAddr = moxa_ports[port].tableAddr; | 1801 | ofsAddr = port->tableAddr; |
| 1971 | writew(lowwater, ofsAddr + Low_water); | 1802 | writew(lowwater, ofsAddr + Low_water); |
| 1972 | moxa_ports[port].breakCnt = 0; | 1803 | if (MOXA_IS_320(port->board)) |
| 1973 | if ((moxa_boards[port / MAX_PORTS_PER_BOARD].boardType == MOXA_BOARD_C320_ISA) || | ||
| 1974 | (moxa_boards[port / MAX_PORTS_PER_BOARD].boardType == MOXA_BOARD_C320_PCI)) { | ||
| 1975 | moxafunc(ofsAddr, FC_SetBreakIrq, 0); | 1804 | moxafunc(ofsAddr, FC_SetBreakIrq, 0); |
| 1976 | } else { | 1805 | else |
| 1977 | writew(readw(ofsAddr + HostStat) | WakeupBreak, ofsAddr + HostStat); | 1806 | writew(readw(ofsAddr + HostStat) | WakeupBreak, |
| 1978 | } | 1807 | ofsAddr + HostStat); |
| 1979 | 1808 | ||
| 1980 | moxafunc(ofsAddr, FC_SetLineIrq, Magic_code); | 1809 | moxafunc(ofsAddr, FC_SetLineIrq, Magic_code); |
| 1981 | moxafunc(ofsAddr, FC_FlushQueue, 2); | 1810 | moxafunc(ofsAddr, FC_FlushQueue, 2); |
| @@ -1984,9 +1813,9 @@ void MoxaPortEnable(int port) | |||
| 1984 | MoxaPortLineStatus(port); | 1813 | MoxaPortLineStatus(port); |
| 1985 | } | 1814 | } |
| 1986 | 1815 | ||
| 1987 | void MoxaPortDisable(int port) | 1816 | static void MoxaPortDisable(struct moxa_port *port) |
| 1988 | { | 1817 | { |
| 1989 | void __iomem *ofsAddr = moxa_ports[port].tableAddr; | 1818 | void __iomem *ofsAddr = port->tableAddr; |
| 1990 | 1819 | ||
| 1991 | moxafunc(ofsAddr, FC_SetFlowCtl, 0); /* disable flow control */ | 1820 | moxafunc(ofsAddr, FC_SetFlowCtl, 0); /* disable flow control */ |
| 1992 | moxafunc(ofsAddr, FC_ClrLineIrq, Magic_code); | 1821 | moxafunc(ofsAddr, FC_ClrLineIrq, Magic_code); |
| @@ -1994,49 +1823,32 @@ void MoxaPortDisable(int port) | |||
| 1994 | moxafunc(ofsAddr, FC_DisableCH, Magic_code); | 1823 | moxafunc(ofsAddr, FC_DisableCH, Magic_code); |
| 1995 | } | 1824 | } |
| 1996 | 1825 | ||
| 1997 | long MoxaPortGetMaxBaud(int port) | 1826 | static speed_t MoxaPortSetBaud(struct moxa_port *port, speed_t baud) |
| 1998 | { | ||
| 1999 | if ((moxa_boards[port / MAX_PORTS_PER_BOARD].boardType == MOXA_BOARD_C320_ISA) || | ||
| 2000 | (moxa_boards[port / MAX_PORTS_PER_BOARD].boardType == MOXA_BOARD_C320_PCI)) | ||
| 2001 | return (460800L); | ||
| 2002 | else | ||
| 2003 | return (921600L); | ||
| 2004 | } | ||
| 2005 | |||
| 2006 | |||
| 2007 | long MoxaPortSetBaud(int port, long baud) | ||
| 2008 | { | 1827 | { |
| 2009 | void __iomem *ofsAddr; | 1828 | void __iomem *ofsAddr = port->tableAddr; |
| 2010 | long max, clock; | 1829 | unsigned int clock, val; |
| 2011 | unsigned int val; | 1830 | speed_t max; |
| 2012 | 1831 | ||
| 2013 | if ((baud < 50L) || ((max = MoxaPortGetMaxBaud(port)) == 0)) | 1832 | max = MOXA_IS_320(port->board) ? 460800 : 921600; |
| 2014 | return (0); | 1833 | if (baud < 50) |
| 2015 | ofsAddr = moxa_ports[port].tableAddr; | 1834 | return 0; |
| 2016 | if (baud > max) | 1835 | if (baud > max) |
| 2017 | baud = max; | 1836 | baud = max; |
| 2018 | if (max == 38400L) | 1837 | clock = 921600; |
| 2019 | clock = 614400L; /* for 9.8304 Mhz : max. 38400 bps */ | ||
| 2020 | else if (max == 57600L) | ||
| 2021 | clock = 691200L; /* for 11.0592 Mhz : max. 57600 bps */ | ||
| 2022 | else | ||
| 2023 | clock = 921600L; /* for 14.7456 Mhz : max. 115200 bps */ | ||
| 2024 | val = clock / baud; | 1838 | val = clock / baud; |
| 2025 | moxafunc(ofsAddr, FC_SetBaud, val); | 1839 | moxafunc(ofsAddr, FC_SetBaud, val); |
| 2026 | baud = clock / val; | 1840 | baud = clock / val; |
| 2027 | moxa_ports[port].curBaud = baud; | 1841 | return baud; |
| 2028 | return (baud); | ||
| 2029 | } | 1842 | } |
| 2030 | 1843 | ||
| 2031 | int MoxaPortSetTermio(int port, struct ktermios *termio, speed_t baud) | 1844 | static int MoxaPortSetTermio(struct moxa_port *port, struct ktermios *termio, |
| 1845 | speed_t baud) | ||
| 2032 | { | 1846 | { |
| 2033 | void __iomem *ofsAddr; | 1847 | void __iomem *ofsAddr; |
| 2034 | tcflag_t cflag; | 1848 | tcflag_t cflag; |
| 2035 | tcflag_t mode = 0; | 1849 | tcflag_t mode = 0; |
| 2036 | 1850 | ||
| 2037 | if (moxa_ports[port].chkPort == 0 || termio == 0) | 1851 | ofsAddr = port->tableAddr; |
| 2038 | return (-1); | ||
| 2039 | ofsAddr = moxa_ports[port].tableAddr; | ||
| 2040 | cflag = termio->c_cflag; /* termio->c_cflag */ | 1852 | cflag = termio->c_cflag; /* termio->c_cflag */ |
| 2041 | 1853 | ||
| 2042 | mode = termio->c_cflag & CSIZE; | 1854 | mode = termio->c_cflag & CSIZE; |
| @@ -2065,13 +1877,11 @@ int MoxaPortSetTermio(int port, struct ktermios *termio, speed_t baud) | |||
| 2065 | } else | 1877 | } else |
| 2066 | mode |= MX_PARNONE; | 1878 | mode |= MX_PARNONE; |
| 2067 | 1879 | ||
| 2068 | moxafunc(ofsAddr, FC_SetDataMode, (ushort) mode); | 1880 | moxafunc(ofsAddr, FC_SetDataMode, (u16)mode); |
| 1881 | |||
| 1882 | if (MOXA_IS_320(port->board) && baud >= 921600) | ||
| 1883 | return -1; | ||
| 2069 | 1884 | ||
| 2070 | if ((moxa_boards[port / MAX_PORTS_PER_BOARD].boardType == MOXA_BOARD_C320_ISA) || | ||
| 2071 | (moxa_boards[port / MAX_PORTS_PER_BOARD].boardType == MOXA_BOARD_C320_PCI)) { | ||
| 2072 | if (baud >= 921600L) | ||
| 2073 | return (-1); | ||
| 2074 | } | ||
| 2075 | baud = MoxaPortSetBaud(port, baud); | 1885 | baud = MoxaPortSetBaud(port, baud); |
| 2076 | 1886 | ||
| 2077 | if (termio->c_iflag & (IXON | IXOFF | IXANY)) { | 1887 | if (termio->c_iflag & (IXON | IXOFF | IXANY)) { |
| @@ -2081,51 +1891,37 @@ int MoxaPortSetTermio(int port, struct ktermios *termio, speed_t baud) | |||
| 2081 | moxa_wait_finish(ofsAddr); | 1891 | moxa_wait_finish(ofsAddr); |
| 2082 | 1892 | ||
| 2083 | } | 1893 | } |
| 2084 | return (baud); | 1894 | return baud; |
| 2085 | } | 1895 | } |
| 2086 | 1896 | ||
| 2087 | int MoxaPortGetLineOut(int port, int *dtrState, int *rtsState) | 1897 | static int MoxaPortGetLineOut(struct moxa_port *port, int *dtrState, |
| 1898 | int *rtsState) | ||
| 2088 | { | 1899 | { |
| 1900 | if (dtrState) | ||
| 1901 | *dtrState = !!(port->lineCtrl & DTR_ON); | ||
| 1902 | if (rtsState) | ||
| 1903 | *rtsState = !!(port->lineCtrl & RTS_ON); | ||
| 2089 | 1904 | ||
| 2090 | if (!MoxaPortIsValid(port)) | 1905 | return 0; |
| 2091 | return (-1); | ||
| 2092 | if (dtrState) { | ||
| 2093 | if (moxa_ports[port].lineCtrl & DTR_ON) | ||
| 2094 | *dtrState = 1; | ||
| 2095 | else | ||
| 2096 | *dtrState = 0; | ||
| 2097 | } | ||
| 2098 | if (rtsState) { | ||
| 2099 | if (moxa_ports[port].lineCtrl & RTS_ON) | ||
| 2100 | *rtsState = 1; | ||
| 2101 | else | ||
| 2102 | *rtsState = 0; | ||
| 2103 | } | ||
| 2104 | return (0); | ||
| 2105 | } | 1906 | } |
| 2106 | 1907 | ||
| 2107 | void MoxaPortLineCtrl(int port, int dtr, int rts) | 1908 | static void MoxaPortLineCtrl(struct moxa_port *port, int dtr, int rts) |
| 2108 | { | 1909 | { |
| 2109 | void __iomem *ofsAddr; | 1910 | u8 mode = 0; |
| 2110 | int mode; | ||
| 2111 | 1911 | ||
| 2112 | ofsAddr = moxa_ports[port].tableAddr; | ||
| 2113 | mode = 0; | ||
| 2114 | if (dtr) | 1912 | if (dtr) |
| 2115 | mode |= DTR_ON; | 1913 | mode |= DTR_ON; |
| 2116 | if (rts) | 1914 | if (rts) |
| 2117 | mode |= RTS_ON; | 1915 | mode |= RTS_ON; |
| 2118 | moxa_ports[port].lineCtrl = mode; | 1916 | port->lineCtrl = mode; |
| 2119 | moxafunc(ofsAddr, FC_LineControl, mode); | 1917 | moxafunc(port->tableAddr, FC_LineControl, mode); |
| 2120 | } | 1918 | } |
| 2121 | 1919 | ||
| 2122 | void MoxaPortFlowCtrl(int port, int rts, int cts, int txflow, int rxflow, int txany) | 1920 | static void MoxaPortFlowCtrl(struct moxa_port *port, int rts, int cts, |
| 1921 | int txflow, int rxflow, int txany) | ||
| 2123 | { | 1922 | { |
| 2124 | void __iomem *ofsAddr; | 1923 | int mode = 0; |
| 2125 | int mode; | ||
| 2126 | 1924 | ||
| 2127 | ofsAddr = moxa_ports[port].tableAddr; | ||
| 2128 | mode = 0; | ||
| 2129 | if (rts) | 1925 | if (rts) |
| 2130 | mode |= RTS_FlowCtl; | 1926 | mode |= RTS_FlowCtl; |
| 2131 | if (cts) | 1927 | if (cts) |
| @@ -2136,81 +1932,50 @@ void MoxaPortFlowCtrl(int port, int rts, int cts, int txflow, int rxflow, int tx | |||
| 2136 | mode |= Rx_FlowCtl; | 1932 | mode |= Rx_FlowCtl; |
| 2137 | if (txany) | 1933 | if (txany) |
| 2138 | mode |= IXM_IXANY; | 1934 | mode |= IXM_IXANY; |
| 2139 | moxafunc(ofsAddr, FC_SetFlowCtl, mode); | 1935 | moxafunc(port->tableAddr, FC_SetFlowCtl, mode); |
| 2140 | } | 1936 | } |
| 2141 | 1937 | ||
| 2142 | int MoxaPortLineStatus(int port) | 1938 | static int MoxaPortLineStatus(struct moxa_port *port) |
| 2143 | { | 1939 | { |
| 2144 | void __iomem *ofsAddr; | 1940 | void __iomem *ofsAddr; |
| 2145 | int val; | 1941 | int val; |
| 2146 | 1942 | ||
| 2147 | ofsAddr = moxa_ports[port].tableAddr; | 1943 | ofsAddr = port->tableAddr; |
| 2148 | if ((moxa_boards[port / MAX_PORTS_PER_BOARD].boardType == MOXA_BOARD_C320_ISA) || | 1944 | if (MOXA_IS_320(port->board)) { |
| 2149 | (moxa_boards[port / MAX_PORTS_PER_BOARD].boardType == MOXA_BOARD_C320_PCI)) { | ||
| 2150 | moxafunc(ofsAddr, FC_LineStatus, 0); | 1945 | moxafunc(ofsAddr, FC_LineStatus, 0); |
| 2151 | val = readw(ofsAddr + FuncArg); | 1946 | val = readw(ofsAddr + FuncArg); |
| 2152 | } else { | 1947 | } else { |
| 2153 | val = readw(ofsAddr + FlagStat) >> 4; | 1948 | val = readw(ofsAddr + FlagStat) >> 4; |
| 2154 | } | 1949 | } |
| 2155 | val &= 0x0B; | 1950 | val &= 0x0B; |
| 2156 | if (val & 8) { | 1951 | if (val & 8) |
| 2157 | val |= 4; | 1952 | val |= 4; |
| 2158 | if ((moxa_ports[port].DCDState & DCD_oldstate) == 0) | 1953 | spin_lock_bh(&moxa_lock); |
| 2159 | moxa_ports[port].DCDState = (DCD_oldstate | DCD_changed); | 1954 | moxa_new_dcdstate(port, val & 8); |
| 2160 | } else { | 1955 | spin_unlock_bh(&moxa_lock); |
| 2161 | if (moxa_ports[port].DCDState & DCD_oldstate) | ||
| 2162 | moxa_ports[port].DCDState = DCD_changed; | ||
| 2163 | } | ||
| 2164 | val &= 7; | 1956 | val &= 7; |
| 2165 | return (val); | 1957 | return val; |
| 2166 | } | ||
| 2167 | |||
| 2168 | int MoxaPortDCDChange(int port) | ||
| 2169 | { | ||
| 2170 | int n; | ||
| 2171 | |||
| 2172 | if (moxa_ports[port].chkPort == 0) | ||
| 2173 | return (0); | ||
| 2174 | n = moxa_ports[port].DCDState; | ||
| 2175 | moxa_ports[port].DCDState &= ~DCD_changed; | ||
| 2176 | n &= DCD_changed; | ||
| 2177 | return (n); | ||
| 2178 | } | ||
| 2179 | |||
| 2180 | int MoxaPortDCDON(int port) | ||
| 2181 | { | ||
| 2182 | int n; | ||
| 2183 | |||
| 2184 | if (moxa_ports[port].chkPort == 0) | ||
| 2185 | return (0); | ||
| 2186 | if (moxa_ports[port].DCDState & DCD_oldstate) | ||
| 2187 | n = 1; | ||
| 2188 | else | ||
| 2189 | n = 0; | ||
| 2190 | return (n); | ||
| 2191 | } | 1958 | } |
| 2192 | 1959 | ||
| 2193 | int MoxaPortWriteData(int port, unsigned char * buffer, int len) | 1960 | static int MoxaPortWriteData(struct moxa_port *port, |
| 1961 | const unsigned char *buffer, int len) | ||
| 2194 | { | 1962 | { |
| 2195 | int c, total, i; | ||
| 2196 | ushort tail; | ||
| 2197 | int cnt; | ||
| 2198 | ushort head, tx_mask, spage, epage; | ||
| 2199 | ushort pageno, pageofs, bufhead; | ||
| 2200 | void __iomem *baseAddr, *ofsAddr, *ofs; | 1963 | void __iomem *baseAddr, *ofsAddr, *ofs; |
| 1964 | unsigned int c, total; | ||
| 1965 | u16 head, tail, tx_mask, spage, epage; | ||
| 1966 | u16 pageno, pageofs, bufhead; | ||
| 2201 | 1967 | ||
| 2202 | ofsAddr = moxa_ports[port].tableAddr; | 1968 | ofsAddr = port->tableAddr; |
| 2203 | baseAddr = moxa_boards[port / MAX_PORTS_PER_BOARD].basemem; | 1969 | baseAddr = port->board->basemem; |
| 2204 | tx_mask = readw(ofsAddr + TX_mask); | 1970 | tx_mask = readw(ofsAddr + TX_mask); |
| 2205 | spage = readw(ofsAddr + Page_txb); | 1971 | spage = readw(ofsAddr + Page_txb); |
| 2206 | epage = readw(ofsAddr + EndPage_txb); | 1972 | epage = readw(ofsAddr + EndPage_txb); |
| 2207 | tail = readw(ofsAddr + TXwptr); | 1973 | tail = readw(ofsAddr + TXwptr); |
| 2208 | head = readw(ofsAddr + TXrptr); | 1974 | head = readw(ofsAddr + TXrptr); |
| 2209 | c = (head > tail) ? (head - tail - 1) | 1975 | c = (head > tail) ? (head - tail - 1) : (head - tail + tx_mask); |
| 2210 | : (head - tail + tx_mask); | ||
| 2211 | if (c > len) | 1976 | if (c > len) |
| 2212 | c = len; | 1977 | c = len; |
| 2213 | moxaLog.txcnt[port] += c; | 1978 | moxaLog.txcnt[port->tty->index] += c; |
| 2214 | total = c; | 1979 | total = c; |
| 2215 | if (spage == epage) { | 1980 | if (spage == epage) { |
| 2216 | bufhead = readw(ofsAddr + Ofs_txb); | 1981 | bufhead = readw(ofsAddr + Ofs_txb); |
| @@ -2222,249 +1987,179 @@ int MoxaPortWriteData(int port, unsigned char * buffer, int len) | |||
| 2222 | len = tx_mask + 1 - tail; | 1987 | len = tx_mask + 1 - tail; |
| 2223 | len = (c > len) ? len : c; | 1988 | len = (c > len) ? len : c; |
| 2224 | ofs = baseAddr + DynPage_addr + bufhead + tail; | 1989 | ofs = baseAddr + DynPage_addr + bufhead + tail; |
| 2225 | for (i = 0; i < len; i++) | 1990 | memcpy_toio(ofs, buffer, len); |
| 2226 | writeb(*buffer++, ofs + i); | 1991 | buffer += len; |
| 2227 | tail = (tail + len) & tx_mask; | 1992 | tail = (tail + len) & tx_mask; |
| 2228 | c -= len; | 1993 | c -= len; |
| 2229 | } | 1994 | } |
| 2230 | writew(tail, ofsAddr + TXwptr); | ||
| 2231 | } else { | 1995 | } else { |
| 2232 | len = c; | ||
| 2233 | pageno = spage + (tail >> 13); | 1996 | pageno = spage + (tail >> 13); |
| 2234 | pageofs = tail & Page_mask; | 1997 | pageofs = tail & Page_mask; |
| 2235 | do { | 1998 | while (c > 0) { |
| 2236 | cnt = Page_size - pageofs; | 1999 | len = Page_size - pageofs; |
| 2237 | if (cnt > c) | 2000 | if (len > c) |
| 2238 | cnt = c; | 2001 | len = c; |
| 2239 | c -= cnt; | ||
| 2240 | writeb(pageno, baseAddr + Control_reg); | 2002 | writeb(pageno, baseAddr + Control_reg); |
| 2241 | ofs = baseAddr + DynPage_addr + pageofs; | 2003 | ofs = baseAddr + DynPage_addr + pageofs; |
| 2242 | for (i = 0; i < cnt; i++) | 2004 | memcpy_toio(ofs, buffer, len); |
| 2243 | writeb(*buffer++, ofs + i); | 2005 | buffer += len; |
| 2244 | if (c == 0) { | ||
| 2245 | writew((tail + len) & tx_mask, ofsAddr + TXwptr); | ||
| 2246 | break; | ||
| 2247 | } | ||
| 2248 | if (++pageno == epage) | 2006 | if (++pageno == epage) |
| 2249 | pageno = spage; | 2007 | pageno = spage; |
| 2250 | pageofs = 0; | 2008 | pageofs = 0; |
| 2251 | } while (1); | 2009 | c -= len; |
| 2010 | } | ||
| 2011 | tail = (tail + total) & tx_mask; | ||
| 2252 | } | 2012 | } |
| 2013 | writew(tail, ofsAddr + TXwptr); | ||
| 2253 | writeb(1, ofsAddr + CD180TXirq); /* start to send */ | 2014 | writeb(1, ofsAddr + CD180TXirq); /* start to send */ |
| 2254 | return (total); | 2015 | return total; |
| 2255 | } | 2016 | } |
| 2256 | 2017 | ||
| 2257 | int MoxaPortReadData(int port, struct tty_struct *tty) | 2018 | static int MoxaPortReadData(struct moxa_port *port) |
| 2258 | { | 2019 | { |
| 2259 | register ushort head, pageofs; | 2020 | struct tty_struct *tty = port->tty; |
| 2260 | int i, count, cnt, len, total, remain; | 2021 | unsigned char *dst; |
| 2261 | ushort tail, rx_mask, spage, epage; | ||
| 2262 | ushort pageno, bufhead; | ||
| 2263 | void __iomem *baseAddr, *ofsAddr, *ofs; | 2022 | void __iomem *baseAddr, *ofsAddr, *ofs; |
| 2023 | unsigned int count, len, total; | ||
| 2024 | u16 tail, rx_mask, spage, epage; | ||
| 2025 | u16 pageno, pageofs, bufhead, head; | ||
| 2264 | 2026 | ||
| 2265 | ofsAddr = moxa_ports[port].tableAddr; | 2027 | ofsAddr = port->tableAddr; |
| 2266 | baseAddr = moxa_boards[port / MAX_PORTS_PER_BOARD].basemem; | 2028 | baseAddr = port->board->basemem; |
| 2267 | head = readw(ofsAddr + RXrptr); | 2029 | head = readw(ofsAddr + RXrptr); |
| 2268 | tail = readw(ofsAddr + RXwptr); | 2030 | tail = readw(ofsAddr + RXwptr); |
| 2269 | rx_mask = readw(ofsAddr + RX_mask); | 2031 | rx_mask = readw(ofsAddr + RX_mask); |
| 2270 | spage = readw(ofsAddr + Page_rxb); | 2032 | spage = readw(ofsAddr + Page_rxb); |
| 2271 | epage = readw(ofsAddr + EndPage_rxb); | 2033 | epage = readw(ofsAddr + EndPage_rxb); |
| 2272 | count = (tail >= head) ? (tail - head) | 2034 | count = (tail >= head) ? (tail - head) : (tail - head + rx_mask + 1); |
| 2273 | : (tail - head + rx_mask + 1); | ||
| 2274 | if (count == 0) | 2035 | if (count == 0) |
| 2275 | return 0; | 2036 | return 0; |
| 2276 | 2037 | ||
| 2277 | total = count; | 2038 | total = count; |
| 2278 | remain = count - total; | 2039 | moxaLog.rxcnt[tty->index] += total; |
| 2279 | moxaLog.rxcnt[port] += total; | ||
| 2280 | count = total; | ||
| 2281 | if (spage == epage) { | 2040 | if (spage == epage) { |
| 2282 | bufhead = readw(ofsAddr + Ofs_rxb); | 2041 | bufhead = readw(ofsAddr + Ofs_rxb); |
| 2283 | writew(spage, baseAddr + Control_reg); | 2042 | writew(spage, baseAddr + Control_reg); |
| 2284 | while (count > 0) { | 2043 | while (count > 0) { |
| 2285 | if (tail >= head) | ||
| 2286 | len = tail - head; | ||
| 2287 | else | ||
| 2288 | len = rx_mask + 1 - head; | ||
| 2289 | len = (count > len) ? len : count; | ||
| 2290 | ofs = baseAddr + DynPage_addr + bufhead + head; | 2044 | ofs = baseAddr + DynPage_addr + bufhead + head; |
| 2291 | for (i = 0; i < len; i++) | 2045 | len = (tail >= head) ? (tail - head) : |
| 2292 | tty_insert_flip_char(tty, readb(ofs + i), TTY_NORMAL); | 2046 | (rx_mask + 1 - head); |
| 2047 | len = tty_prepare_flip_string(tty, &dst, | ||
| 2048 | min(len, count)); | ||
| 2049 | memcpy_fromio(dst, ofs, len); | ||
| 2293 | head = (head + len) & rx_mask; | 2050 | head = (head + len) & rx_mask; |
| 2294 | count -= len; | 2051 | count -= len; |
| 2295 | } | 2052 | } |
| 2296 | writew(head, ofsAddr + RXrptr); | ||
| 2297 | } else { | 2053 | } else { |
| 2298 | len = count; | ||
| 2299 | pageno = spage + (head >> 13); | 2054 | pageno = spage + (head >> 13); |
| 2300 | pageofs = head & Page_mask; | 2055 | pageofs = head & Page_mask; |
| 2301 | do { | 2056 | while (count > 0) { |
| 2302 | cnt = Page_size - pageofs; | ||
| 2303 | if (cnt > count) | ||
| 2304 | cnt = count; | ||
| 2305 | count -= cnt; | ||
| 2306 | writew(pageno, baseAddr + Control_reg); | 2057 | writew(pageno, baseAddr + Control_reg); |
| 2307 | ofs = baseAddr + DynPage_addr + pageofs; | 2058 | ofs = baseAddr + DynPage_addr + pageofs; |
| 2308 | for (i = 0; i < cnt; i++) | 2059 | len = tty_prepare_flip_string(tty, &dst, |
| 2309 | tty_insert_flip_char(tty, readb(ofs + i), TTY_NORMAL); | 2060 | min(Page_size - pageofs, count)); |
| 2310 | if (count == 0) { | 2061 | memcpy_fromio(dst, ofs, len); |
| 2311 | writew((head + len) & rx_mask, ofsAddr + RXrptr); | 2062 | |
| 2312 | break; | 2063 | count -= len; |
| 2313 | } | 2064 | pageofs = (pageofs + len) & Page_mask; |
| 2314 | if (++pageno == epage) | 2065 | if (pageofs == 0 && ++pageno == epage) |
| 2315 | pageno = spage; | 2066 | pageno = spage; |
| 2316 | pageofs = 0; | 2067 | } |
| 2317 | } while (1); | 2068 | head = (head + total) & rx_mask; |
| 2318 | } | 2069 | } |
| 2319 | if ((readb(ofsAddr + FlagStat) & Xoff_state) && (remain < LowWater)) { | 2070 | writew(head, ofsAddr + RXrptr); |
| 2071 | if (readb(ofsAddr + FlagStat) & Xoff_state) { | ||
| 2320 | moxaLowWaterChk = 1; | 2072 | moxaLowWaterChk = 1; |
| 2321 | moxa_ports[port].lowChkFlag = 1; | 2073 | port->lowChkFlag = 1; |
| 2322 | } | 2074 | } |
| 2323 | return (total); | 2075 | return total; |
| 2324 | } | 2076 | } |
| 2325 | 2077 | ||
| 2326 | 2078 | ||
| 2327 | int MoxaPortTxQueue(int port) | 2079 | static int MoxaPortTxQueue(struct moxa_port *port) |
| 2328 | { | 2080 | { |
| 2329 | void __iomem *ofsAddr; | 2081 | void __iomem *ofsAddr = port->tableAddr; |
| 2330 | ushort rptr, wptr, mask; | 2082 | u16 rptr, wptr, mask; |
| 2331 | int len; | ||
| 2332 | 2083 | ||
| 2333 | ofsAddr = moxa_ports[port].tableAddr; | ||
| 2334 | rptr = readw(ofsAddr + TXrptr); | 2084 | rptr = readw(ofsAddr + TXrptr); |
| 2335 | wptr = readw(ofsAddr + TXwptr); | 2085 | wptr = readw(ofsAddr + TXwptr); |
| 2336 | mask = readw(ofsAddr + TX_mask); | 2086 | mask = readw(ofsAddr + TX_mask); |
| 2337 | len = (wptr - rptr) & mask; | 2087 | return (wptr - rptr) & mask; |
| 2338 | return (len); | ||
| 2339 | } | 2088 | } |
| 2340 | 2089 | ||
| 2341 | int MoxaPortTxFree(int port) | 2090 | static int MoxaPortTxFree(struct moxa_port *port) |
| 2342 | { | 2091 | { |
| 2343 | void __iomem *ofsAddr; | 2092 | void __iomem *ofsAddr = port->tableAddr; |
| 2344 | ushort rptr, wptr, mask; | 2093 | u16 rptr, wptr, mask; |
| 2345 | int len; | ||
| 2346 | 2094 | ||
| 2347 | ofsAddr = moxa_ports[port].tableAddr; | ||
| 2348 | rptr = readw(ofsAddr + TXrptr); | 2095 | rptr = readw(ofsAddr + TXrptr); |
| 2349 | wptr = readw(ofsAddr + TXwptr); | 2096 | wptr = readw(ofsAddr + TXwptr); |
| 2350 | mask = readw(ofsAddr + TX_mask); | 2097 | mask = readw(ofsAddr + TX_mask); |
| 2351 | len = mask - ((wptr - rptr) & mask); | 2098 | return mask - ((wptr - rptr) & mask); |
| 2352 | return (len); | ||
| 2353 | } | 2099 | } |
| 2354 | 2100 | ||
| 2355 | int MoxaPortRxQueue(int port) | 2101 | static int MoxaPortRxQueue(struct moxa_port *port) |
| 2356 | { | 2102 | { |
| 2357 | void __iomem *ofsAddr; | 2103 | void __iomem *ofsAddr = port->tableAddr; |
| 2358 | ushort rptr, wptr, mask; | 2104 | u16 rptr, wptr, mask; |
| 2359 | int len; | ||
| 2360 | 2105 | ||
| 2361 | ofsAddr = moxa_ports[port].tableAddr; | ||
| 2362 | rptr = readw(ofsAddr + RXrptr); | 2106 | rptr = readw(ofsAddr + RXrptr); |
| 2363 | wptr = readw(ofsAddr + RXwptr); | 2107 | wptr = readw(ofsAddr + RXwptr); |
| 2364 | mask = readw(ofsAddr + RX_mask); | 2108 | mask = readw(ofsAddr + RX_mask); |
| 2365 | len = (wptr - rptr) & mask; | 2109 | return (wptr - rptr) & mask; |
| 2366 | return (len); | ||
| 2367 | } | 2110 | } |
| 2368 | 2111 | ||
| 2369 | 2112 | static void MoxaPortTxDisable(struct moxa_port *port) | |
| 2370 | void MoxaPortTxDisable(int port) | ||
| 2371 | { | 2113 | { |
| 2372 | void __iomem *ofsAddr; | 2114 | moxafunc(port->tableAddr, FC_SetXoffState, Magic_code); |
| 2373 | |||
| 2374 | ofsAddr = moxa_ports[port].tableAddr; | ||
| 2375 | moxafunc(ofsAddr, FC_SetXoffState, Magic_code); | ||
| 2376 | } | 2115 | } |
| 2377 | 2116 | ||
| 2378 | void MoxaPortTxEnable(int port) | 2117 | static void MoxaPortTxEnable(struct moxa_port *port) |
| 2379 | { | 2118 | { |
| 2380 | void __iomem *ofsAddr; | 2119 | moxafunc(port->tableAddr, FC_SetXonState, Magic_code); |
| 2381 | |||
| 2382 | ofsAddr = moxa_ports[port].tableAddr; | ||
| 2383 | moxafunc(ofsAddr, FC_SetXonState, Magic_code); | ||
| 2384 | } | ||
| 2385 | |||
| 2386 | |||
| 2387 | int MoxaPortResetBrkCnt(int port) | ||
| 2388 | { | ||
| 2389 | ushort cnt; | ||
| 2390 | cnt = moxa_ports[port].breakCnt; | ||
| 2391 | moxa_ports[port].breakCnt = 0; | ||
| 2392 | return (cnt); | ||
| 2393 | } | ||
| 2394 | |||
| 2395 | |||
| 2396 | void MoxaPortSendBreak(int port, int ms100) | ||
| 2397 | { | ||
| 2398 | void __iomem *ofsAddr; | ||
| 2399 | |||
| 2400 | ofsAddr = moxa_ports[port].tableAddr; | ||
| 2401 | if (ms100) { | ||
| 2402 | moxafunc(ofsAddr, FC_SendBreak, Magic_code); | ||
| 2403 | msleep(ms100 * 10); | ||
| 2404 | } else { | ||
| 2405 | moxafunc(ofsAddr, FC_SendBreak, Magic_code); | ||
| 2406 | msleep(250); | ||
| 2407 | } | ||
| 2408 | moxafunc(ofsAddr, FC_StopBreak, Magic_code); | ||
| 2409 | } | 2120 | } |
| 2410 | 2121 | ||
| 2411 | static int moxa_get_serial_info(struct moxa_port *info, | 2122 | static int moxa_get_serial_info(struct moxa_port *info, |
| 2412 | struct serial_struct __user *retinfo) | 2123 | struct serial_struct __user *retinfo) |
| 2413 | { | 2124 | { |
| 2414 | struct serial_struct tmp; | 2125 | struct serial_struct tmp = { |
| 2415 | 2126 | .type = info->type, | |
| 2416 | memset(&tmp, 0, sizeof(tmp)); | 2127 | .line = info->tty->index, |
| 2417 | tmp.type = info->type; | 2128 | .flags = info->asyncflags, |
| 2418 | tmp.line = info->port; | 2129 | .baud_base = 921600, |
| 2419 | tmp.port = 0; | 2130 | .close_delay = info->close_delay |
| 2420 | tmp.irq = 0; | 2131 | }; |
| 2421 | tmp.flags = info->asyncflags; | 2132 | return copy_to_user(retinfo, &tmp, sizeof(*retinfo)) ? -EFAULT : 0; |
| 2422 | tmp.baud_base = 921600; | ||
| 2423 | tmp.close_delay = info->close_delay; | ||
| 2424 | tmp.closing_wait = info->closing_wait; | ||
| 2425 | tmp.custom_divisor = 0; | ||
| 2426 | tmp.hub6 = 0; | ||
| 2427 | if(copy_to_user(retinfo, &tmp, sizeof(*retinfo))) | ||
| 2428 | return -EFAULT; | ||
| 2429 | return (0); | ||
| 2430 | } | 2133 | } |
| 2431 | 2134 | ||
| 2432 | 2135 | ||
| 2433 | static int moxa_set_serial_info(struct moxa_port *info, | 2136 | static int moxa_set_serial_info(struct moxa_port *info, |
| 2434 | struct serial_struct __user *new_info) | 2137 | struct serial_struct __user *new_info) |
| 2435 | { | 2138 | { |
| 2436 | struct serial_struct new_serial; | 2139 | struct serial_struct new_serial; |
| 2437 | 2140 | ||
| 2438 | if(copy_from_user(&new_serial, new_info, sizeof(new_serial))) | 2141 | if (copy_from_user(&new_serial, new_info, sizeof(new_serial))) |
| 2439 | return -EFAULT; | 2142 | return -EFAULT; |
| 2440 | 2143 | ||
| 2441 | if ((new_serial.irq != 0) || | 2144 | if (new_serial.irq != 0 || new_serial.port != 0 || |
| 2442 | (new_serial.port != 0) || | 2145 | new_serial.custom_divisor != 0 || |
| 2443 | // (new_serial.type != info->type) || | 2146 | new_serial.baud_base != 921600) |
| 2444 | (new_serial.custom_divisor != 0) || | 2147 | return -EPERM; |
| 2445 | (new_serial.baud_base != 921600)) | ||
| 2446 | return (-EPERM); | ||
| 2447 | 2148 | ||
| 2448 | if (!capable(CAP_SYS_ADMIN)) { | 2149 | if (!capable(CAP_SYS_ADMIN)) { |
| 2449 | if (((new_serial.flags & ~ASYNC_USR_MASK) != | 2150 | if (((new_serial.flags & ~ASYNC_USR_MASK) != |
| 2450 | (info->asyncflags & ~ASYNC_USR_MASK))) | 2151 | (info->asyncflags & ~ASYNC_USR_MASK))) |
| 2451 | return (-EPERM); | 2152 | return -EPERM; |
| 2452 | } else { | 2153 | } else |
| 2453 | info->close_delay = new_serial.close_delay * HZ / 100; | 2154 | info->close_delay = new_serial.close_delay * HZ / 100; |
| 2454 | info->closing_wait = new_serial.closing_wait * HZ / 100; | ||
| 2455 | } | ||
| 2456 | 2155 | ||
| 2457 | new_serial.flags = (new_serial.flags & ~ASYNC_FLAGS); | 2156 | new_serial.flags = (new_serial.flags & ~ASYNC_FLAGS); |
| 2458 | new_serial.flags |= (info->asyncflags & ASYNC_FLAGS); | 2157 | new_serial.flags |= (info->asyncflags & ASYNC_FLAGS); |
| 2459 | 2158 | ||
| 2460 | if (new_serial.type == PORT_16550A) { | 2159 | MoxaSetFifo(info, new_serial.type == PORT_16550A); |
| 2461 | MoxaSetFifo(info->port, 1); | ||
| 2462 | } else { | ||
| 2463 | MoxaSetFifo(info->port, 0); | ||
| 2464 | } | ||
| 2465 | 2160 | ||
| 2466 | info->type = new_serial.type; | 2161 | info->type = new_serial.type; |
| 2467 | return (0); | 2162 | return 0; |
| 2468 | } | 2163 | } |
| 2469 | 2164 | ||
| 2470 | 2165 | ||
| @@ -2472,374 +2167,10 @@ static int moxa_set_serial_info(struct moxa_port *info, | |||
| 2472 | /***************************************************************************** | 2167 | /***************************************************************************** |
| 2473 | * Static local functions: * | 2168 | * Static local functions: * |
| 2474 | *****************************************************************************/ | 2169 | *****************************************************************************/ |
| 2475 | static void moxafunc(void __iomem *ofsAddr, int cmd, ushort arg) | ||
| 2476 | { | ||
| 2477 | |||
| 2478 | writew(arg, ofsAddr + FuncArg); | ||
| 2479 | writew(cmd, ofsAddr + FuncCode); | ||
| 2480 | moxa_wait_finish(ofsAddr); | ||
| 2481 | } | ||
| 2482 | |||
| 2483 | static void moxa_wait_finish(void __iomem *ofsAddr) | ||
| 2484 | { | ||
| 2485 | unsigned long i, j; | ||
| 2486 | |||
| 2487 | i = jiffies; | ||
| 2488 | while (readw(ofsAddr + FuncCode) != 0) { | ||
| 2489 | j = jiffies; | ||
| 2490 | if ((j - i) > moxaFuncTout) { | ||
| 2491 | return; | ||
| 2492 | } | ||
| 2493 | } | ||
| 2494 | } | ||
| 2495 | |||
| 2496 | static void moxa_low_water_check(void __iomem *ofsAddr) | ||
| 2497 | { | ||
| 2498 | int len; | ||
| 2499 | ushort rptr, wptr, mask; | ||
| 2500 | |||
| 2501 | if (readb(ofsAddr + FlagStat) & Xoff_state) { | ||
| 2502 | rptr = readw(ofsAddr + RXrptr); | ||
| 2503 | wptr = readw(ofsAddr + RXwptr); | ||
| 2504 | mask = readw(ofsAddr + RX_mask); | ||
| 2505 | len = (wptr - rptr) & mask; | ||
| 2506 | if (len <= Low_water) | ||
| 2507 | moxafunc(ofsAddr, FC_SendXon, 0); | ||
| 2508 | } | ||
| 2509 | } | ||
| 2510 | |||
| 2511 | static int moxaloadbios(int cardno, unsigned char __user *tmp, int len) | ||
| 2512 | { | ||
| 2513 | void __iomem *baseAddr; | ||
| 2514 | int i; | ||
| 2515 | |||
| 2516 | if(len < 0 || len > sizeof(moxaBuff)) | ||
| 2517 | return -EINVAL; | ||
| 2518 | if(copy_from_user(moxaBuff, tmp, len)) | ||
| 2519 | return -EFAULT; | ||
| 2520 | baseAddr = moxa_boards[cardno].basemem; | ||
| 2521 | writeb(HW_reset, baseAddr + Control_reg); /* reset */ | ||
| 2522 | msleep(10); | ||
| 2523 | for (i = 0; i < 4096; i++) | ||
| 2524 | writeb(0, baseAddr + i); /* clear fix page */ | ||
| 2525 | for (i = 0; i < len; i++) | ||
| 2526 | writeb(moxaBuff[i], baseAddr + i); /* download BIOS */ | ||
| 2527 | writeb(0, baseAddr + Control_reg); /* restart */ | ||
| 2528 | return (0); | ||
| 2529 | } | ||
| 2530 | |||
| 2531 | static int moxafindcard(int cardno) | ||
| 2532 | { | ||
| 2533 | void __iomem *baseAddr; | ||
| 2534 | ushort tmp; | ||
| 2535 | |||
| 2536 | baseAddr = moxa_boards[cardno].basemem; | ||
| 2537 | switch (moxa_boards[cardno].boardType) { | ||
| 2538 | case MOXA_BOARD_C218_ISA: | ||
| 2539 | case MOXA_BOARD_C218_PCI: | ||
| 2540 | if ((tmp = readw(baseAddr + C218_key)) != C218_KeyCode) { | ||
| 2541 | return (-1); | ||
| 2542 | } | ||
| 2543 | break; | ||
| 2544 | case MOXA_BOARD_CP204J: | ||
| 2545 | if ((tmp = readw(baseAddr + C218_key)) != CP204J_KeyCode) { | ||
| 2546 | return (-1); | ||
| 2547 | } | ||
| 2548 | break; | ||
| 2549 | default: | ||
| 2550 | if ((tmp = readw(baseAddr + C320_key)) != C320_KeyCode) { | ||
| 2551 | return (-1); | ||
| 2552 | } | ||
| 2553 | if ((tmp = readw(baseAddr + C320_status)) != STS_init) { | ||
| 2554 | return (-2); | ||
| 2555 | } | ||
| 2556 | } | ||
| 2557 | return (0); | ||
| 2558 | } | ||
| 2559 | |||
| 2560 | static int moxaload320b(int cardno, unsigned char __user *tmp, int len) | ||
| 2561 | { | ||
| 2562 | void __iomem *baseAddr; | ||
| 2563 | int i; | ||
| 2564 | |||
| 2565 | if(len < 0 || len > sizeof(moxaBuff)) | ||
| 2566 | return -EINVAL; | ||
| 2567 | if(copy_from_user(moxaBuff, tmp, len)) | ||
| 2568 | return -EFAULT; | ||
| 2569 | baseAddr = moxa_boards[cardno].basemem; | ||
| 2570 | writew(len - 7168 - 2, baseAddr + C320bapi_len); | ||
| 2571 | writeb(1, baseAddr + Control_reg); /* Select Page 1 */ | ||
| 2572 | for (i = 0; i < 7168; i++) | ||
| 2573 | writeb(moxaBuff[i], baseAddr + DynPage_addr + i); | ||
| 2574 | writeb(2, baseAddr + Control_reg); /* Select Page 2 */ | ||
| 2575 | for (i = 0; i < (len - 7168); i++) | ||
| 2576 | writeb(moxaBuff[i + 7168], baseAddr + DynPage_addr + i); | ||
| 2577 | return (0); | ||
| 2578 | } | ||
| 2579 | |||
| 2580 | static int moxaloadcode(int cardno, unsigned char __user *tmp, int len) | ||
| 2581 | { | ||
| 2582 | void __iomem *baseAddr, *ofsAddr; | ||
| 2583 | int retval, port, i; | ||
| 2584 | |||
| 2585 | if(len < 0 || len > sizeof(moxaBuff)) | ||
| 2586 | return -EINVAL; | ||
| 2587 | if(copy_from_user(moxaBuff, tmp, len)) | ||
| 2588 | return -EFAULT; | ||
| 2589 | baseAddr = moxa_boards[cardno].basemem; | ||
| 2590 | switch (moxa_boards[cardno].boardType) { | ||
| 2591 | case MOXA_BOARD_C218_ISA: | ||
| 2592 | case MOXA_BOARD_C218_PCI: | ||
| 2593 | case MOXA_BOARD_CP204J: | ||
| 2594 | retval = moxaloadc218(cardno, baseAddr, len); | ||
| 2595 | if (retval) | ||
| 2596 | return (retval); | ||
| 2597 | port = cardno * MAX_PORTS_PER_BOARD; | ||
| 2598 | for (i = 0; i < moxa_boards[cardno].numPorts; i++, port++) { | ||
| 2599 | struct moxa_port *p = &moxa_ports[port]; | ||
| 2600 | |||
| 2601 | p->chkPort = 1; | ||
| 2602 | p->curBaud = 9600L; | ||
| 2603 | p->DCDState = 0; | ||
| 2604 | p->tableAddr = baseAddr + Extern_table + Extern_size * i; | ||
| 2605 | ofsAddr = p->tableAddr; | ||
| 2606 | writew(C218rx_mask, ofsAddr + RX_mask); | ||
| 2607 | writew(C218tx_mask, ofsAddr + TX_mask); | ||
| 2608 | writew(C218rx_spage + i * C218buf_pageno, ofsAddr + Page_rxb); | ||
| 2609 | writew(readw(ofsAddr + Page_rxb) + C218rx_pageno, ofsAddr + EndPage_rxb); | ||
| 2610 | |||
| 2611 | writew(C218tx_spage + i * C218buf_pageno, ofsAddr + Page_txb); | ||
| 2612 | writew(readw(ofsAddr + Page_txb) + C218tx_pageno, ofsAddr + EndPage_txb); | ||
| 2613 | |||
| 2614 | } | ||
| 2615 | break; | ||
| 2616 | default: | ||
| 2617 | retval = moxaloadc320(cardno, baseAddr, len, | ||
| 2618 | &moxa_boards[cardno].numPorts); | ||
| 2619 | if (retval) | ||
| 2620 | return (retval); | ||
| 2621 | port = cardno * MAX_PORTS_PER_BOARD; | ||
| 2622 | for (i = 0; i < moxa_boards[cardno].numPorts; i++, port++) { | ||
| 2623 | struct moxa_port *p = &moxa_ports[port]; | ||
| 2624 | |||
| 2625 | p->chkPort = 1; | ||
| 2626 | p->curBaud = 9600L; | ||
| 2627 | p->DCDState = 0; | ||
| 2628 | p->tableAddr = baseAddr + Extern_table + Extern_size * i; | ||
| 2629 | ofsAddr = p->tableAddr; | ||
| 2630 | if (moxa_boards[cardno].numPorts == 8) { | ||
| 2631 | writew(C320p8rx_mask, ofsAddr + RX_mask); | ||
| 2632 | writew(C320p8tx_mask, ofsAddr + TX_mask); | ||
| 2633 | writew(C320p8rx_spage + i * C320p8buf_pgno, ofsAddr + Page_rxb); | ||
| 2634 | writew(readw(ofsAddr + Page_rxb) + C320p8rx_pgno, ofsAddr + EndPage_rxb); | ||
| 2635 | writew(C320p8tx_spage + i * C320p8buf_pgno, ofsAddr + Page_txb); | ||
| 2636 | writew(readw(ofsAddr + Page_txb) + C320p8tx_pgno, ofsAddr + EndPage_txb); | ||
| 2637 | |||
| 2638 | } else if (moxa_boards[cardno].numPorts == 16) { | ||
| 2639 | writew(C320p16rx_mask, ofsAddr + RX_mask); | ||
| 2640 | writew(C320p16tx_mask, ofsAddr + TX_mask); | ||
| 2641 | writew(C320p16rx_spage + i * C320p16buf_pgno, ofsAddr + Page_rxb); | ||
| 2642 | writew(readw(ofsAddr + Page_rxb) + C320p16rx_pgno, ofsAddr + EndPage_rxb); | ||
| 2643 | writew(C320p16tx_spage + i * C320p16buf_pgno, ofsAddr + Page_txb); | ||
| 2644 | writew(readw(ofsAddr + Page_txb) + C320p16tx_pgno, ofsAddr + EndPage_txb); | ||
| 2645 | |||
| 2646 | } else if (moxa_boards[cardno].numPorts == 24) { | ||
| 2647 | writew(C320p24rx_mask, ofsAddr + RX_mask); | ||
| 2648 | writew(C320p24tx_mask, ofsAddr + TX_mask); | ||
| 2649 | writew(C320p24rx_spage + i * C320p24buf_pgno, ofsAddr + Page_rxb); | ||
| 2650 | writew(readw(ofsAddr + Page_rxb) + C320p24rx_pgno, ofsAddr + EndPage_rxb); | ||
| 2651 | writew(C320p24tx_spage + i * C320p24buf_pgno, ofsAddr + Page_txb); | ||
| 2652 | writew(readw(ofsAddr + Page_txb), ofsAddr + EndPage_txb); | ||
| 2653 | } else if (moxa_boards[cardno].numPorts == 32) { | ||
| 2654 | writew(C320p32rx_mask, ofsAddr + RX_mask); | ||
| 2655 | writew(C320p32tx_mask, ofsAddr + TX_mask); | ||
| 2656 | writew(C320p32tx_ofs, ofsAddr + Ofs_txb); | ||
| 2657 | writew(C320p32rx_spage + i * C320p32buf_pgno, ofsAddr + Page_rxb); | ||
| 2658 | writew(readb(ofsAddr + Page_rxb), ofsAddr + EndPage_rxb); | ||
| 2659 | writew(C320p32tx_spage + i * C320p32buf_pgno, ofsAddr + Page_txb); | ||
| 2660 | writew(readw(ofsAddr + Page_txb), ofsAddr + EndPage_txb); | ||
| 2661 | } | ||
| 2662 | } | ||
| 2663 | break; | ||
| 2664 | } | ||
| 2665 | moxa_boards[cardno].loadstat = 1; | ||
| 2666 | return (0); | ||
| 2667 | } | ||
| 2668 | |||
| 2669 | static int moxaloadc218(int cardno, void __iomem *baseAddr, int len) | ||
| 2670 | { | ||
| 2671 | char retry; | ||
| 2672 | int i, j, len1, len2; | ||
| 2673 | ushort usum, *ptr, keycode; | ||
| 2674 | |||
| 2675 | if (moxa_boards[cardno].boardType == MOXA_BOARD_CP204J) | ||
| 2676 | keycode = CP204J_KeyCode; | ||
| 2677 | else | ||
| 2678 | keycode = C218_KeyCode; | ||
| 2679 | usum = 0; | ||
| 2680 | len1 = len >> 1; | ||
| 2681 | ptr = (ushort *) moxaBuff; | ||
| 2682 | for (i = 0; i < len1; i++) | ||
| 2683 | usum += le16_to_cpu(*(ptr + i)); | ||
| 2684 | retry = 0; | ||
| 2685 | do { | ||
| 2686 | len1 = len >> 1; | ||
| 2687 | j = 0; | ||
| 2688 | while (len1) { | ||
| 2689 | len2 = (len1 > 2048) ? 2048 : len1; | ||
| 2690 | len1 -= len2; | ||
| 2691 | for (i = 0; i < len2 << 1; i++) | ||
| 2692 | writeb(moxaBuff[i + j], baseAddr + C218_LoadBuf + i); | ||
| 2693 | j += i; | ||
| 2694 | |||
| 2695 | writew(len2, baseAddr + C218DLoad_len); | ||
| 2696 | writew(0, baseAddr + C218_key); | ||
| 2697 | for (i = 0; i < 100; i++) { | ||
| 2698 | if (readw(baseAddr + C218_key) == keycode) | ||
| 2699 | break; | ||
| 2700 | msleep(10); | ||
| 2701 | } | ||
| 2702 | if (readw(baseAddr + C218_key) != keycode) { | ||
| 2703 | return (-1); | ||
| 2704 | } | ||
| 2705 | } | ||
| 2706 | writew(0, baseAddr + C218DLoad_len); | ||
| 2707 | writew(usum, baseAddr + C218check_sum); | ||
| 2708 | writew(0, baseAddr + C218_key); | ||
| 2709 | for (i = 0; i < 100; i++) { | ||
| 2710 | if (readw(baseAddr + C218_key) == keycode) | ||
| 2711 | break; | ||
| 2712 | msleep(10); | ||
| 2713 | } | ||
| 2714 | retry++; | ||
| 2715 | } while ((readb(baseAddr + C218chksum_ok) != 1) && (retry < 3)); | ||
| 2716 | if (readb(baseAddr + C218chksum_ok) != 1) { | ||
| 2717 | return (-1); | ||
| 2718 | } | ||
| 2719 | writew(0, baseAddr + C218_key); | ||
| 2720 | for (i = 0; i < 100; i++) { | ||
| 2721 | if (readw(baseAddr + Magic_no) == Magic_code) | ||
| 2722 | break; | ||
| 2723 | msleep(10); | ||
| 2724 | } | ||
| 2725 | if (readw(baseAddr + Magic_no) != Magic_code) { | ||
| 2726 | return (-1); | ||
| 2727 | } | ||
| 2728 | writew(1, baseAddr + Disable_IRQ); | ||
| 2729 | writew(0, baseAddr + Magic_no); | ||
| 2730 | for (i = 0; i < 100; i++) { | ||
| 2731 | if (readw(baseAddr + Magic_no) == Magic_code) | ||
| 2732 | break; | ||
| 2733 | msleep(10); | ||
| 2734 | } | ||
| 2735 | if (readw(baseAddr + Magic_no) != Magic_code) { | ||
| 2736 | return (-1); | ||
| 2737 | } | ||
| 2738 | moxaCard = 1; | ||
| 2739 | moxa_boards[cardno].intNdx = baseAddr + IRQindex; | ||
| 2740 | moxa_boards[cardno].intPend = baseAddr + IRQpending; | ||
| 2741 | moxa_boards[cardno].intTable = baseAddr + IRQtable; | ||
| 2742 | return (0); | ||
| 2743 | } | ||
| 2744 | |||
| 2745 | static int moxaloadc320(int cardno, void __iomem *baseAddr, int len, int *numPorts) | ||
| 2746 | { | ||
| 2747 | ushort usum; | ||
| 2748 | int i, j, wlen, len2, retry; | ||
| 2749 | ushort *uptr; | ||
| 2750 | |||
| 2751 | usum = 0; | ||
| 2752 | wlen = len >> 1; | ||
| 2753 | uptr = (ushort *) moxaBuff; | ||
| 2754 | for (i = 0; i < wlen; i++) | ||
| 2755 | usum += le16_to_cpu(uptr[i]); | ||
| 2756 | retry = 0; | ||
| 2757 | j = 0; | ||
| 2758 | do { | ||
| 2759 | while (wlen) { | ||
| 2760 | if (wlen > 2048) | ||
| 2761 | len2 = 2048; | ||
| 2762 | else | ||
| 2763 | len2 = wlen; | ||
| 2764 | wlen -= len2; | ||
| 2765 | len2 <<= 1; | ||
| 2766 | for (i = 0; i < len2; i++) | ||
| 2767 | writeb(moxaBuff[j + i], baseAddr + C320_LoadBuf + i); | ||
| 2768 | len2 >>= 1; | ||
| 2769 | j += i; | ||
| 2770 | writew(len2, baseAddr + C320DLoad_len); | ||
| 2771 | writew(0, baseAddr + C320_key); | ||
| 2772 | for (i = 0; i < 10; i++) { | ||
| 2773 | if (readw(baseAddr + C320_key) == C320_KeyCode) | ||
| 2774 | break; | ||
| 2775 | msleep(10); | ||
| 2776 | } | ||
| 2777 | if (readw(baseAddr + C320_key) != C320_KeyCode) | ||
| 2778 | return (-1); | ||
| 2779 | } | ||
| 2780 | writew(0, baseAddr + C320DLoad_len); | ||
| 2781 | writew(usum, baseAddr + C320check_sum); | ||
| 2782 | writew(0, baseAddr + C320_key); | ||
| 2783 | for (i = 0; i < 10; i++) { | ||
| 2784 | if (readw(baseAddr + C320_key) == C320_KeyCode) | ||
| 2785 | break; | ||
| 2786 | msleep(10); | ||
| 2787 | } | ||
| 2788 | retry++; | ||
| 2789 | } while ((readb(baseAddr + C320chksum_ok) != 1) && (retry < 3)); | ||
| 2790 | if (readb(baseAddr + C320chksum_ok) != 1) | ||
| 2791 | return (-1); | ||
| 2792 | writew(0, baseAddr + C320_key); | ||
| 2793 | for (i = 0; i < 600; i++) { | ||
| 2794 | if (readw(baseAddr + Magic_no) == Magic_code) | ||
| 2795 | break; | ||
| 2796 | msleep(10); | ||
| 2797 | } | ||
| 2798 | if (readw(baseAddr + Magic_no) != Magic_code) | ||
| 2799 | return (-100); | ||
| 2800 | |||
| 2801 | if (moxa_boards[cardno].busType == MOXA_BUS_TYPE_PCI) { /* ASIC board */ | ||
| 2802 | writew(0x3800, baseAddr + TMS320_PORT1); | ||
| 2803 | writew(0x3900, baseAddr + TMS320_PORT2); | ||
| 2804 | writew(28499, baseAddr + TMS320_CLOCK); | ||
| 2805 | } else { | ||
| 2806 | writew(0x3200, baseAddr + TMS320_PORT1); | ||
| 2807 | writew(0x3400, baseAddr + TMS320_PORT2); | ||
| 2808 | writew(19999, baseAddr + TMS320_CLOCK); | ||
| 2809 | } | ||
| 2810 | writew(1, baseAddr + Disable_IRQ); | ||
| 2811 | writew(0, baseAddr + Magic_no); | ||
| 2812 | for (i = 0; i < 500; i++) { | ||
| 2813 | if (readw(baseAddr + Magic_no) == Magic_code) | ||
| 2814 | break; | ||
| 2815 | msleep(10); | ||
| 2816 | } | ||
| 2817 | if (readw(baseAddr + Magic_no) != Magic_code) | ||
| 2818 | return (-102); | ||
| 2819 | |||
| 2820 | j = readw(baseAddr + Module_cnt); | ||
| 2821 | if (j <= 0) | ||
| 2822 | return (-101); | ||
| 2823 | *numPorts = j * 8; | ||
| 2824 | writew(j, baseAddr + Module_no); | ||
| 2825 | writew(0, baseAddr + Magic_no); | ||
| 2826 | for (i = 0; i < 600; i++) { | ||
| 2827 | if (readw(baseAddr + Magic_no) == Magic_code) | ||
| 2828 | break; | ||
| 2829 | msleep(10); | ||
| 2830 | } | ||
| 2831 | if (readw(baseAddr + Magic_no) != Magic_code) | ||
| 2832 | return (-102); | ||
| 2833 | moxaCard = 1; | ||
| 2834 | moxa_boards[cardno].intNdx = baseAddr + IRQindex; | ||
| 2835 | moxa_boards[cardno].intPend = baseAddr + IRQpending; | ||
| 2836 | moxa_boards[cardno].intTable = baseAddr + IRQtable; | ||
| 2837 | return (0); | ||
| 2838 | } | ||
| 2839 | 2170 | ||
| 2840 | static void MoxaSetFifo(int port, int enable) | 2171 | static void MoxaSetFifo(struct moxa_port *port, int enable) |
| 2841 | { | 2172 | { |
| 2842 | void __iomem *ofsAddr = moxa_ports[port].tableAddr; | 2173 | void __iomem *ofsAddr = port->tableAddr; |
| 2843 | 2174 | ||
| 2844 | if (!enable) { | 2175 | if (!enable) { |
| 2845 | moxafunc(ofsAddr, FC_SetRxFIFOTrig, 0); | 2176 | moxafunc(ofsAddr, FC_SetRxFIFOTrig, 0); |
diff --git a/drivers/char/moxa.h b/drivers/char/moxa.h new file mode 100644 index 000000000000..87d16ce57be7 --- /dev/null +++ b/drivers/char/moxa.h | |||
| @@ -0,0 +1,304 @@ | |||
| 1 | #ifndef MOXA_H_FILE | ||
| 2 | #define MOXA_H_FILE | ||
| 3 | |||
| 4 | #define MOXA 0x400 | ||
| 5 | #define MOXA_GET_IQUEUE (MOXA + 1) /* get input buffered count */ | ||
| 6 | #define MOXA_GET_OQUEUE (MOXA + 2) /* get output buffered count */ | ||
| 7 | #define MOXA_GETDATACOUNT (MOXA + 23) | ||
| 8 | #define MOXA_GET_IOQUEUE (MOXA + 27) | ||
| 9 | #define MOXA_FLUSH_QUEUE (MOXA + 28) | ||
| 10 | #define MOXA_GETMSTATUS (MOXA + 65) | ||
| 11 | |||
| 12 | /* | ||
| 13 | * System Configuration | ||
| 14 | */ | ||
| 15 | |||
| 16 | #define Magic_code 0x404 | ||
| 17 | |||
| 18 | /* | ||
| 19 | * for C218 BIOS initialization | ||
| 20 | */ | ||
| 21 | #define C218_ConfBase 0x800 | ||
| 22 | #define C218_status (C218_ConfBase + 0) /* BIOS running status */ | ||
| 23 | #define C218_diag (C218_ConfBase + 2) /* diagnostic status */ | ||
| 24 | #define C218_key (C218_ConfBase + 4) /* WORD (0x218 for C218) */ | ||
| 25 | #define C218DLoad_len (C218_ConfBase + 6) /* WORD */ | ||
| 26 | #define C218check_sum (C218_ConfBase + 8) /* BYTE */ | ||
| 27 | #define C218chksum_ok (C218_ConfBase + 0x0a) /* BYTE (1:ok) */ | ||
| 28 | #define C218_TestRx (C218_ConfBase + 0x10) /* 8 bytes for 8 ports */ | ||
| 29 | #define C218_TestTx (C218_ConfBase + 0x18) /* 8 bytes for 8 ports */ | ||
| 30 | #define C218_RXerr (C218_ConfBase + 0x20) /* 8 bytes for 8 ports */ | ||
| 31 | #define C218_ErrFlag (C218_ConfBase + 0x28) /* 8 bytes for 8 ports */ | ||
| 32 | |||
| 33 | #define C218_LoadBuf 0x0F00 | ||
| 34 | #define C218_KeyCode 0x218 | ||
| 35 | #define CP204J_KeyCode 0x204 | ||
| 36 | |||
| 37 | /* | ||
| 38 | * for C320 BIOS initialization | ||
| 39 | */ | ||
| 40 | #define C320_ConfBase 0x800 | ||
| 41 | #define C320_LoadBuf 0x0f00 | ||
| 42 | #define STS_init 0x05 /* for C320_status */ | ||
| 43 | |||
| 44 | #define C320_status C320_ConfBase + 0 /* BIOS running status */ | ||
| 45 | #define C320_diag C320_ConfBase + 2 /* diagnostic status */ | ||
| 46 | #define C320_key C320_ConfBase + 4 /* WORD (0320H for C320) */ | ||
| 47 | #define C320DLoad_len C320_ConfBase + 6 /* WORD */ | ||
| 48 | #define C320check_sum C320_ConfBase + 8 /* WORD */ | ||
| 49 | #define C320chksum_ok C320_ConfBase + 0x0a /* WORD (1:ok) */ | ||
| 50 | #define C320bapi_len C320_ConfBase + 0x0c /* WORD */ | ||
| 51 | #define C320UART_no C320_ConfBase + 0x0e /* WORD */ | ||
| 52 | |||
| 53 | #define C320_KeyCode 0x320 | ||
| 54 | |||
| 55 | #define FixPage_addr 0x0000 /* starting addr of static page */ | ||
| 56 | #define DynPage_addr 0x2000 /* starting addr of dynamic page */ | ||
| 57 | #define C218_start 0x3000 /* starting addr of C218 BIOS prg */ | ||
| 58 | #define Control_reg 0x1ff0 /* select page and reset control */ | ||
| 59 | #define HW_reset 0x80 | ||
| 60 | |||
| 61 | /* | ||
| 62 | * Function Codes | ||
| 63 | */ | ||
| 64 | #define FC_CardReset 0x80 | ||
| 65 | #define FC_ChannelReset 1 /* C320 firmware not supported */ | ||
| 66 | #define FC_EnableCH 2 | ||
| 67 | #define FC_DisableCH 3 | ||
| 68 | #define FC_SetParam 4 | ||
| 69 | #define FC_SetMode 5 | ||
| 70 | #define FC_SetRate 6 | ||
| 71 | #define FC_LineControl 7 | ||
| 72 | #define FC_LineStatus 8 | ||
| 73 | #define FC_XmitControl 9 | ||
| 74 | #define FC_FlushQueue 10 | ||
| 75 | #define FC_SendBreak 11 | ||
| 76 | #define FC_StopBreak 12 | ||
| 77 | #define FC_LoopbackON 13 | ||
| 78 | #define FC_LoopbackOFF 14 | ||
| 79 | #define FC_ClrIrqTable 15 | ||
| 80 | #define FC_SendXon 16 | ||
| 81 | #define FC_SetTermIrq 17 /* C320 firmware not supported */ | ||
| 82 | #define FC_SetCntIrq 18 /* C320 firmware not supported */ | ||
| 83 | #define FC_SetBreakIrq 19 | ||
| 84 | #define FC_SetLineIrq 20 | ||
| 85 | #define FC_SetFlowCtl 21 | ||
| 86 | #define FC_GenIrq 22 | ||
| 87 | #define FC_InCD180 23 | ||
| 88 | #define FC_OutCD180 24 | ||
| 89 | #define FC_InUARTreg 23 | ||
| 90 | #define FC_OutUARTreg 24 | ||
| 91 | #define FC_SetXonXoff 25 | ||
| 92 | #define FC_OutCD180CCR 26 | ||
| 93 | #define FC_ExtIQueue 27 | ||
| 94 | #define FC_ExtOQueue 28 | ||
| 95 | #define FC_ClrLineIrq 29 | ||
| 96 | #define FC_HWFlowCtl 30 | ||
| 97 | #define FC_GetClockRate 35 | ||
| 98 | #define FC_SetBaud 36 | ||
| 99 | #define FC_SetDataMode 41 | ||
| 100 | #define FC_GetCCSR 43 | ||
| 101 | #define FC_GetDataError 45 | ||
| 102 | #define FC_RxControl 50 | ||
| 103 | #define FC_ImmSend 51 | ||
| 104 | #define FC_SetXonState 52 | ||
| 105 | #define FC_SetXoffState 53 | ||
| 106 | #define FC_SetRxFIFOTrig 54 | ||
| 107 | #define FC_SetTxFIFOCnt 55 | ||
| 108 | #define FC_UnixRate 56 | ||
| 109 | #define FC_UnixResetTimer 57 | ||
| 110 | |||
| 111 | #define RxFIFOTrig1 0 | ||
| 112 | #define RxFIFOTrig4 1 | ||
| 113 | #define RxFIFOTrig8 2 | ||
| 114 | #define RxFIFOTrig14 3 | ||
| 115 | |||
| 116 | /* | ||
| 117 | * Dual-Ported RAM | ||
| 118 | */ | ||
| 119 | #define DRAM_global 0 | ||
| 120 | #define INT_data (DRAM_global + 0) | ||
| 121 | #define Config_base (DRAM_global + 0x108) | ||
| 122 | |||
| 123 | #define IRQindex (INT_data + 0) | ||
| 124 | #define IRQpending (INT_data + 4) | ||
| 125 | #define IRQtable (INT_data + 8) | ||
| 126 | |||
| 127 | /* | ||
| 128 | * Interrupt Status | ||
| 129 | */ | ||
| 130 | #define IntrRx 0x01 /* receiver data O.K. */ | ||
| 131 | #define IntrTx 0x02 /* transmit buffer empty */ | ||
| 132 | #define IntrFunc 0x04 /* function complete */ | ||
| 133 | #define IntrBreak 0x08 /* received break */ | ||
| 134 | #define IntrLine 0x10 /* line status change | ||
| 135 | for transmitter */ | ||
| 136 | #define IntrIntr 0x20 /* received INTR code */ | ||
| 137 | #define IntrQuit 0x40 /* received QUIT code */ | ||
| 138 | #define IntrEOF 0x80 /* received EOF code */ | ||
| 139 | |||
| 140 | #define IntrRxTrigger 0x100 /* rx data count reach tigger value */ | ||
| 141 | #define IntrTxTrigger 0x200 /* tx data count below trigger value */ | ||
| 142 | |||
| 143 | #define Magic_no (Config_base + 0) | ||
| 144 | #define Card_model_no (Config_base + 2) | ||
| 145 | #define Total_ports (Config_base + 4) | ||
| 146 | #define Module_cnt (Config_base + 8) | ||
| 147 | #define Module_no (Config_base + 10) | ||
| 148 | #define Timer_10ms (Config_base + 14) | ||
| 149 | #define Disable_IRQ (Config_base + 20) | ||
| 150 | #define TMS320_PORT1 (Config_base + 22) | ||
| 151 | #define TMS320_PORT2 (Config_base + 24) | ||
| 152 | #define TMS320_CLOCK (Config_base + 26) | ||
| 153 | |||
| 154 | /* | ||
| 155 | * DATA BUFFER in DRAM | ||
| 156 | */ | ||
| 157 | #define Extern_table 0x400 /* Base address of the external table | ||
| 158 | (24 words * 64) total 3K bytes | ||
| 159 | (24 words * 128) total 6K bytes */ | ||
| 160 | #define Extern_size 0x60 /* 96 bytes */ | ||
| 161 | #define RXrptr 0x00 /* read pointer for RX buffer */ | ||
| 162 | #define RXwptr 0x02 /* write pointer for RX buffer */ | ||
| 163 | #define TXrptr 0x04 /* read pointer for TX buffer */ | ||
| 164 | #define TXwptr 0x06 /* write pointer for TX buffer */ | ||
| 165 | #define HostStat 0x08 /* IRQ flag and general flag */ | ||
| 166 | #define FlagStat 0x0A | ||
| 167 | #define FlowControl 0x0C /* B7 B6 B5 B4 B3 B2 B1 B0 */ | ||
| 168 | /* x x x x | | | | */ | ||
| 169 | /* | | | + CTS flow */ | ||
| 170 | /* | | +--- RTS flow */ | ||
| 171 | /* | +------ TX Xon/Xoff */ | ||
| 172 | /* +--------- RX Xon/Xoff */ | ||
| 173 | #define Break_cnt 0x0E /* received break count */ | ||
| 174 | #define CD180TXirq 0x10 /* if non-0: enable TX irq */ | ||
| 175 | #define RX_mask 0x12 | ||
| 176 | #define TX_mask 0x14 | ||
| 177 | #define Ofs_rxb 0x16 | ||
| 178 | #define Ofs_txb 0x18 | ||
| 179 | #define Page_rxb 0x1A | ||
| 180 | #define Page_txb 0x1C | ||
| 181 | #define EndPage_rxb 0x1E | ||
| 182 | #define EndPage_txb 0x20 | ||
| 183 | #define Data_error 0x22 | ||
| 184 | #define RxTrigger 0x28 | ||
| 185 | #define TxTrigger 0x2a | ||
| 186 | |||
| 187 | #define rRXwptr 0x34 | ||
| 188 | #define Low_water 0x36 | ||
| 189 | |||
| 190 | #define FuncCode 0x40 | ||
| 191 | #define FuncArg 0x42 | ||
| 192 | #define FuncArg1 0x44 | ||
| 193 | |||
| 194 | #define C218rx_size 0x2000 /* 8K bytes */ | ||
| 195 | #define C218tx_size 0x8000 /* 32K bytes */ | ||
| 196 | |||
| 197 | #define C218rx_mask (C218rx_size - 1) | ||
| 198 | #define C218tx_mask (C218tx_size - 1) | ||
| 199 | |||
| 200 | #define C320p8rx_size 0x2000 | ||
| 201 | #define C320p8tx_size 0x8000 | ||
| 202 | #define C320p8rx_mask (C320p8rx_size - 1) | ||
| 203 | #define C320p8tx_mask (C320p8tx_size - 1) | ||
| 204 | |||
| 205 | #define C320p16rx_size 0x2000 | ||
| 206 | #define C320p16tx_size 0x4000 | ||
| 207 | #define C320p16rx_mask (C320p16rx_size - 1) | ||
| 208 | #define C320p16tx_mask (C320p16tx_size - 1) | ||
| 209 | |||
| 210 | #define C320p24rx_size 0x2000 | ||
| 211 | #define C320p24tx_size 0x2000 | ||
| 212 | #define C320p24rx_mask (C320p24rx_size - 1) | ||
| 213 | #define C320p24tx_mask (C320p24tx_size - 1) | ||
| 214 | |||
| 215 | #define C320p32rx_size 0x1000 | ||
| 216 | #define C320p32tx_size 0x1000 | ||
| 217 | #define C320p32rx_mask (C320p32rx_size - 1) | ||
| 218 | #define C320p32tx_mask (C320p32tx_size - 1) | ||
| 219 | |||
| 220 | #define Page_size 0x2000U | ||
| 221 | #define Page_mask (Page_size - 1) | ||
| 222 | #define C218rx_spage 3 | ||
| 223 | #define C218tx_spage 4 | ||
| 224 | #define C218rx_pageno 1 | ||
| 225 | #define C218tx_pageno 4 | ||
| 226 | #define C218buf_pageno 5 | ||
| 227 | |||
| 228 | #define C320p8rx_spage 3 | ||
| 229 | #define C320p8tx_spage 4 | ||
| 230 | #define C320p8rx_pgno 1 | ||
| 231 | #define C320p8tx_pgno 4 | ||
| 232 | #define C320p8buf_pgno 5 | ||
| 233 | |||
| 234 | #define C320p16rx_spage 3 | ||
| 235 | #define C320p16tx_spage 4 | ||
| 236 | #define C320p16rx_pgno 1 | ||
| 237 | #define C320p16tx_pgno 2 | ||
| 238 | #define C320p16buf_pgno 3 | ||
| 239 | |||
| 240 | #define C320p24rx_spage 3 | ||
| 241 | #define C320p24tx_spage 4 | ||
| 242 | #define C320p24rx_pgno 1 | ||
| 243 | #define C320p24tx_pgno 1 | ||
| 244 | #define C320p24buf_pgno 2 | ||
| 245 | |||
| 246 | #define C320p32rx_spage 3 | ||
| 247 | #define C320p32tx_ofs C320p32rx_size | ||
| 248 | #define C320p32tx_spage 3 | ||
| 249 | #define C320p32buf_pgno 1 | ||
| 250 | |||
| 251 | /* | ||
| 252 | * Host Status | ||
| 253 | */ | ||
| 254 | #define WakeupRx 0x01 | ||
| 255 | #define WakeupTx 0x02 | ||
| 256 | #define WakeupBreak 0x08 | ||
| 257 | #define WakeupLine 0x10 | ||
| 258 | #define WakeupIntr 0x20 | ||
| 259 | #define WakeupQuit 0x40 | ||
| 260 | #define WakeupEOF 0x80 /* used in VTIME control */ | ||
| 261 | #define WakeupRxTrigger 0x100 | ||
| 262 | #define WakeupTxTrigger 0x200 | ||
| 263 | /* | ||
| 264 | * Flag status | ||
| 265 | */ | ||
| 266 | #define Rx_over 0x01 | ||
| 267 | #define Xoff_state 0x02 | ||
| 268 | #define Tx_flowOff 0x04 | ||
| 269 | #define Tx_enable 0x08 | ||
| 270 | #define CTS_state 0x10 | ||
| 271 | #define DSR_state 0x20 | ||
| 272 | #define DCD_state 0x80 | ||
| 273 | /* | ||
| 274 | * FlowControl | ||
| 275 | */ | ||
| 276 | #define CTS_FlowCtl 1 | ||
| 277 | #define RTS_FlowCtl 2 | ||
| 278 | #define Tx_FlowCtl 4 | ||
| 279 | #define Rx_FlowCtl 8 | ||
| 280 | #define IXM_IXANY 0x10 | ||
| 281 | |||
| 282 | #define LowWater 128 | ||
| 283 | |||
| 284 | #define DTR_ON 1 | ||
| 285 | #define RTS_ON 2 | ||
| 286 | #define CTS_ON 1 | ||
| 287 | #define DSR_ON 2 | ||
| 288 | #define DCD_ON 8 | ||
| 289 | |||
| 290 | /* mode definition */ | ||
| 291 | #define MX_CS8 0x03 | ||
| 292 | #define MX_CS7 0x02 | ||
| 293 | #define MX_CS6 0x01 | ||
| 294 | #define MX_CS5 0x00 | ||
| 295 | |||
| 296 | #define MX_STOP1 0x00 | ||
| 297 | #define MX_STOP15 0x04 | ||
| 298 | #define MX_STOP2 0x08 | ||
| 299 | |||
| 300 | #define MX_PARNONE 0x00 | ||
| 301 | #define MX_PAREVEN 0x40 | ||
| 302 | #define MX_PARODD 0xC0 | ||
| 303 | |||
| 304 | #endif | ||
diff --git a/drivers/char/mxser.c b/drivers/char/mxser.c index 68c2e9234691..4b81a85c5b53 100644 --- a/drivers/char/mxser.c +++ b/drivers/char/mxser.c | |||
| @@ -307,6 +307,200 @@ static unsigned char mxser_msr[MXSER_PORTS + 1]; | |||
| 307 | static struct mxser_mon_ext mon_data_ext; | 307 | static struct mxser_mon_ext mon_data_ext; |
| 308 | static int mxser_set_baud_method[MXSER_PORTS + 1]; | 308 | static int mxser_set_baud_method[MXSER_PORTS + 1]; |
| 309 | 309 | ||
| 310 | static void mxser_enable_must_enchance_mode(unsigned long baseio) | ||
| 311 | { | ||
| 312 | u8 oldlcr; | ||
| 313 | u8 efr; | ||
| 314 | |||
| 315 | oldlcr = inb(baseio + UART_LCR); | ||
| 316 | outb(MOXA_MUST_ENTER_ENCHANCE, baseio + UART_LCR); | ||
| 317 | |||
| 318 | efr = inb(baseio + MOXA_MUST_EFR_REGISTER); | ||
| 319 | efr |= MOXA_MUST_EFR_EFRB_ENABLE; | ||
| 320 | |||
| 321 | outb(efr, baseio + MOXA_MUST_EFR_REGISTER); | ||
| 322 | outb(oldlcr, baseio + UART_LCR); | ||
| 323 | } | ||
| 324 | |||
| 325 | static void mxser_disable_must_enchance_mode(unsigned long baseio) | ||
| 326 | { | ||
| 327 | u8 oldlcr; | ||
| 328 | u8 efr; | ||
| 329 | |||
| 330 | oldlcr = inb(baseio + UART_LCR); | ||
| 331 | outb(MOXA_MUST_ENTER_ENCHANCE, baseio + UART_LCR); | ||
| 332 | |||
| 333 | efr = inb(baseio + MOXA_MUST_EFR_REGISTER); | ||
| 334 | efr &= ~MOXA_MUST_EFR_EFRB_ENABLE; | ||
| 335 | |||
| 336 | outb(efr, baseio + MOXA_MUST_EFR_REGISTER); | ||
| 337 | outb(oldlcr, baseio + UART_LCR); | ||
| 338 | } | ||
| 339 | |||
| 340 | static void mxser_set_must_xon1_value(unsigned long baseio, u8 value) | ||
| 341 | { | ||
| 342 | u8 oldlcr; | ||
| 343 | u8 efr; | ||
| 344 | |||
| 345 | oldlcr = inb(baseio + UART_LCR); | ||
| 346 | outb(MOXA_MUST_ENTER_ENCHANCE, baseio + UART_LCR); | ||
| 347 | |||
| 348 | efr = inb(baseio + MOXA_MUST_EFR_REGISTER); | ||
| 349 | efr &= ~MOXA_MUST_EFR_BANK_MASK; | ||
| 350 | efr |= MOXA_MUST_EFR_BANK0; | ||
| 351 | |||
| 352 | outb(efr, baseio + MOXA_MUST_EFR_REGISTER); | ||
| 353 | outb(value, baseio + MOXA_MUST_XON1_REGISTER); | ||
| 354 | outb(oldlcr, baseio + UART_LCR); | ||
| 355 | } | ||
| 356 | |||
| 357 | static void mxser_set_must_xoff1_value(unsigned long baseio, u8 value) | ||
| 358 | { | ||
| 359 | u8 oldlcr; | ||
| 360 | u8 efr; | ||
| 361 | |||
| 362 | oldlcr = inb(baseio + UART_LCR); | ||
| 363 | outb(MOXA_MUST_ENTER_ENCHANCE, baseio + UART_LCR); | ||
| 364 | |||
| 365 | efr = inb(baseio + MOXA_MUST_EFR_REGISTER); | ||
| 366 | efr &= ~MOXA_MUST_EFR_BANK_MASK; | ||
| 367 | efr |= MOXA_MUST_EFR_BANK0; | ||
| 368 | |||
| 369 | outb(efr, baseio + MOXA_MUST_EFR_REGISTER); | ||
| 370 | outb(value, baseio + MOXA_MUST_XOFF1_REGISTER); | ||
| 371 | outb(oldlcr, baseio + UART_LCR); | ||
| 372 | } | ||
| 373 | |||
| 374 | static void mxser_set_must_fifo_value(struct mxser_port *info) | ||
| 375 | { | ||
| 376 | u8 oldlcr; | ||
| 377 | u8 efr; | ||
| 378 | |||
| 379 | oldlcr = inb(info->ioaddr + UART_LCR); | ||
| 380 | outb(MOXA_MUST_ENTER_ENCHANCE, info->ioaddr + UART_LCR); | ||
| 381 | |||
| 382 | efr = inb(info->ioaddr + MOXA_MUST_EFR_REGISTER); | ||
| 383 | efr &= ~MOXA_MUST_EFR_BANK_MASK; | ||
| 384 | efr |= MOXA_MUST_EFR_BANK1; | ||
| 385 | |||
| 386 | outb(efr, info->ioaddr + MOXA_MUST_EFR_REGISTER); | ||
| 387 | outb((u8)info->rx_high_water, info->ioaddr + MOXA_MUST_RBRTH_REGISTER); | ||
| 388 | outb((u8)info->rx_trigger, info->ioaddr + MOXA_MUST_RBRTI_REGISTER); | ||
| 389 | outb((u8)info->rx_low_water, info->ioaddr + MOXA_MUST_RBRTL_REGISTER); | ||
| 390 | outb(oldlcr, info->ioaddr + UART_LCR); | ||
| 391 | } | ||
| 392 | |||
| 393 | static void mxser_set_must_enum_value(unsigned long baseio, u8 value) | ||
| 394 | { | ||
| 395 | u8 oldlcr; | ||
| 396 | u8 efr; | ||
| 397 | |||
| 398 | oldlcr = inb(baseio + UART_LCR); | ||
| 399 | outb(MOXA_MUST_ENTER_ENCHANCE, baseio + UART_LCR); | ||
| 400 | |||
| 401 | efr = inb(baseio + MOXA_MUST_EFR_REGISTER); | ||
| 402 | efr &= ~MOXA_MUST_EFR_BANK_MASK; | ||
| 403 | efr |= MOXA_MUST_EFR_BANK2; | ||
| 404 | |||
| 405 | outb(efr, baseio + MOXA_MUST_EFR_REGISTER); | ||
| 406 | outb(value, baseio + MOXA_MUST_ENUM_REGISTER); | ||
| 407 | outb(oldlcr, baseio + UART_LCR); | ||
| 408 | } | ||
| 409 | |||
| 410 | static void mxser_get_must_hardware_id(unsigned long baseio, u8 *pId) | ||
| 411 | { | ||
| 412 | u8 oldlcr; | ||
| 413 | u8 efr; | ||
| 414 | |||
| 415 | oldlcr = inb(baseio + UART_LCR); | ||
| 416 | outb(MOXA_MUST_ENTER_ENCHANCE, baseio + UART_LCR); | ||
| 417 | |||
| 418 | efr = inb(baseio + MOXA_MUST_EFR_REGISTER); | ||
| 419 | efr &= ~MOXA_MUST_EFR_BANK_MASK; | ||
| 420 | efr |= MOXA_MUST_EFR_BANK2; | ||
| 421 | |||
| 422 | outb(efr, baseio + MOXA_MUST_EFR_REGISTER); | ||
| 423 | *pId = inb(baseio + MOXA_MUST_HWID_REGISTER); | ||
| 424 | outb(oldlcr, baseio + UART_LCR); | ||
| 425 | } | ||
| 426 | |||
| 427 | static void SET_MOXA_MUST_NO_SOFTWARE_FLOW_CONTROL(unsigned long baseio) | ||
| 428 | { | ||
| 429 | u8 oldlcr; | ||
| 430 | u8 efr; | ||
| 431 | |||
| 432 | oldlcr = inb(baseio + UART_LCR); | ||
| 433 | outb(MOXA_MUST_ENTER_ENCHANCE, baseio + UART_LCR); | ||
| 434 | |||
| 435 | efr = inb(baseio + MOXA_MUST_EFR_REGISTER); | ||
| 436 | efr &= ~MOXA_MUST_EFR_SF_MASK; | ||
| 437 | |||
| 438 | outb(efr, baseio + MOXA_MUST_EFR_REGISTER); | ||
| 439 | outb(oldlcr, baseio + UART_LCR); | ||
| 440 | } | ||
| 441 | |||
| 442 | static void mxser_enable_must_tx_software_flow_control(unsigned long baseio) | ||
| 443 | { | ||
| 444 | u8 oldlcr; | ||
| 445 | u8 efr; | ||
| 446 | |||
| 447 | oldlcr = inb(baseio + UART_LCR); | ||
| 448 | outb(MOXA_MUST_ENTER_ENCHANCE, baseio + UART_LCR); | ||
| 449 | |||
| 450 | efr = inb(baseio + MOXA_MUST_EFR_REGISTER); | ||
| 451 | efr &= ~MOXA_MUST_EFR_SF_TX_MASK; | ||
| 452 | efr |= MOXA_MUST_EFR_SF_TX1; | ||
| 453 | |||
| 454 | outb(efr, baseio + MOXA_MUST_EFR_REGISTER); | ||
| 455 | outb(oldlcr, baseio + UART_LCR); | ||
| 456 | } | ||
| 457 | |||
| 458 | static void mxser_disable_must_tx_software_flow_control(unsigned long baseio) | ||
| 459 | { | ||
| 460 | u8 oldlcr; | ||
| 461 | u8 efr; | ||
| 462 | |||
| 463 | oldlcr = inb(baseio + UART_LCR); | ||
| 464 | outb(MOXA_MUST_ENTER_ENCHANCE, baseio + UART_LCR); | ||
| 465 | |||
| 466 | efr = inb(baseio + MOXA_MUST_EFR_REGISTER); | ||
| 467 | efr &= ~MOXA_MUST_EFR_SF_TX_MASK; | ||
| 468 | |||
| 469 | outb(efr, baseio + MOXA_MUST_EFR_REGISTER); | ||
| 470 | outb(oldlcr, baseio + UART_LCR); | ||
| 471 | } | ||
| 472 | |||
| 473 | static void mxser_enable_must_rx_software_flow_control(unsigned long baseio) | ||
| 474 | { | ||
| 475 | u8 oldlcr; | ||
| 476 | u8 efr; | ||
| 477 | |||
| 478 | oldlcr = inb(baseio + UART_LCR); | ||
| 479 | outb(MOXA_MUST_ENTER_ENCHANCE, baseio + UART_LCR); | ||
| 480 | |||
| 481 | efr = inb(baseio + MOXA_MUST_EFR_REGISTER); | ||
| 482 | efr &= ~MOXA_MUST_EFR_SF_RX_MASK; | ||
| 483 | efr |= MOXA_MUST_EFR_SF_RX1; | ||
| 484 | |||
| 485 | outb(efr, baseio + MOXA_MUST_EFR_REGISTER); | ||
| 486 | outb(oldlcr, baseio + UART_LCR); | ||
| 487 | } | ||
| 488 | |||
| 489 | static void mxser_disable_must_rx_software_flow_control(unsigned long baseio) | ||
| 490 | { | ||
| 491 | u8 oldlcr; | ||
| 492 | u8 efr; | ||
| 493 | |||
| 494 | oldlcr = inb(baseio + UART_LCR); | ||
| 495 | outb(MOXA_MUST_ENTER_ENCHANCE, baseio + UART_LCR); | ||
| 496 | |||
| 497 | efr = inb(baseio + MOXA_MUST_EFR_REGISTER); | ||
| 498 | efr &= ~MOXA_MUST_EFR_SF_RX_MASK; | ||
| 499 | |||
| 500 | outb(efr, baseio + MOXA_MUST_EFR_REGISTER); | ||
| 501 | outb(oldlcr, baseio + UART_LCR); | ||
| 502 | } | ||
| 503 | |||
| 310 | #ifdef CONFIG_PCI | 504 | #ifdef CONFIG_PCI |
| 311 | static int __devinit CheckIsMoxaMust(unsigned long io) | 505 | static int __devinit CheckIsMoxaMust(unsigned long io) |
| 312 | { | 506 | { |
| @@ -314,16 +508,16 @@ static int __devinit CheckIsMoxaMust(unsigned long io) | |||
| 314 | int i; | 508 | int i; |
| 315 | 509 | ||
| 316 | outb(0, io + UART_LCR); | 510 | outb(0, io + UART_LCR); |
| 317 | DISABLE_MOXA_MUST_ENCHANCE_MODE(io); | 511 | mxser_disable_must_enchance_mode(io); |
| 318 | oldmcr = inb(io + UART_MCR); | 512 | oldmcr = inb(io + UART_MCR); |
| 319 | outb(0, io + UART_MCR); | 513 | outb(0, io + UART_MCR); |
| 320 | SET_MOXA_MUST_XON1_VALUE(io, 0x11); | 514 | mxser_set_must_xon1_value(io, 0x11); |
| 321 | if ((hwid = inb(io + UART_MCR)) != 0) { | 515 | if ((hwid = inb(io + UART_MCR)) != 0) { |
| 322 | outb(oldmcr, io + UART_MCR); | 516 | outb(oldmcr, io + UART_MCR); |
| 323 | return MOXA_OTHER_UART; | 517 | return MOXA_OTHER_UART; |
| 324 | } | 518 | } |
| 325 | 519 | ||
| 326 | GET_MOXA_MUST_HARDWARE_ID(io, &hwid); | 520 | mxser_get_must_hardware_id(io, &hwid); |
| 327 | for (i = 1; i < UART_INFO_NUM; i++) { /* 0 = OTHER_UART */ | 521 | for (i = 1; i < UART_INFO_NUM; i++) { /* 0 = OTHER_UART */ |
| 328 | if (hwid == Gpci_uart_info[i].type) | 522 | if (hwid == Gpci_uart_info[i].type) |
| 329 | return (int)hwid; | 523 | return (int)hwid; |
| @@ -494,10 +688,10 @@ static int mxser_set_baud(struct mxser_port *info, long newspd) | |||
| 494 | } else | 688 | } else |
| 495 | quot /= newspd; | 689 | quot /= newspd; |
| 496 | 690 | ||
| 497 | SET_MOXA_MUST_ENUM_VALUE(info->ioaddr, quot); | 691 | mxser_set_must_enum_value(info->ioaddr, quot); |
| 498 | } else | 692 | } else |
| 499 | #endif | 693 | #endif |
| 500 | SET_MOXA_MUST_ENUM_VALUE(info->ioaddr, 0); | 694 | mxser_set_must_enum_value(info->ioaddr, 0); |
| 501 | 695 | ||
| 502 | return 0; | 696 | return 0; |
| 503 | } | 697 | } |
| @@ -553,14 +747,14 @@ static int mxser_change_speed(struct mxser_port *info, | |||
| 553 | if (info->board->chip_flag) { | 747 | if (info->board->chip_flag) { |
| 554 | fcr = UART_FCR_ENABLE_FIFO; | 748 | fcr = UART_FCR_ENABLE_FIFO; |
| 555 | fcr |= MOXA_MUST_FCR_GDA_MODE_ENABLE; | 749 | fcr |= MOXA_MUST_FCR_GDA_MODE_ENABLE; |
| 556 | SET_MOXA_MUST_FIFO_VALUE(info); | 750 | mxser_set_must_fifo_value(info); |
| 557 | } else | 751 | } else |
| 558 | fcr = 0; | 752 | fcr = 0; |
| 559 | } else { | 753 | } else { |
| 560 | fcr = UART_FCR_ENABLE_FIFO; | 754 | fcr = UART_FCR_ENABLE_FIFO; |
| 561 | if (info->board->chip_flag) { | 755 | if (info->board->chip_flag) { |
| 562 | fcr |= MOXA_MUST_FCR_GDA_MODE_ENABLE; | 756 | fcr |= MOXA_MUST_FCR_GDA_MODE_ENABLE; |
| 563 | SET_MOXA_MUST_FIFO_VALUE(info); | 757 | mxser_set_must_fifo_value(info); |
| 564 | } else { | 758 | } else { |
| 565 | switch (info->rx_trigger) { | 759 | switch (info->rx_trigger) { |
| 566 | case 1: | 760 | case 1: |
| @@ -657,17 +851,21 @@ static int mxser_change_speed(struct mxser_port *info, | |||
| 657 | } | 851 | } |
| 658 | } | 852 | } |
| 659 | if (info->board->chip_flag) { | 853 | if (info->board->chip_flag) { |
| 660 | SET_MOXA_MUST_XON1_VALUE(info->ioaddr, START_CHAR(info->tty)); | 854 | mxser_set_must_xon1_value(info->ioaddr, START_CHAR(info->tty)); |
| 661 | SET_MOXA_MUST_XOFF1_VALUE(info->ioaddr, STOP_CHAR(info->tty)); | 855 | mxser_set_must_xoff1_value(info->ioaddr, STOP_CHAR(info->tty)); |
| 662 | if (I_IXON(info->tty)) { | 856 | if (I_IXON(info->tty)) { |
| 663 | ENABLE_MOXA_MUST_RX_SOFTWARE_FLOW_CONTROL(info->ioaddr); | 857 | mxser_enable_must_rx_software_flow_control( |
| 858 | info->ioaddr); | ||
| 664 | } else { | 859 | } else { |
| 665 | DISABLE_MOXA_MUST_RX_SOFTWARE_FLOW_CONTROL(info->ioaddr); | 860 | mxser_disable_must_rx_software_flow_control( |
| 861 | info->ioaddr); | ||
| 666 | } | 862 | } |
| 667 | if (I_IXOFF(info->tty)) { | 863 | if (I_IXOFF(info->tty)) { |
| 668 | ENABLE_MOXA_MUST_TX_SOFTWARE_FLOW_CONTROL(info->ioaddr); | 864 | mxser_enable_must_tx_software_flow_control( |
| 865 | info->ioaddr); | ||
| 669 | } else { | 866 | } else { |
| 670 | DISABLE_MOXA_MUST_TX_SOFTWARE_FLOW_CONTROL(info->ioaddr); | 867 | mxser_disable_must_tx_software_flow_control( |
| 868 | info->ioaddr); | ||
| 671 | } | 869 | } |
| 672 | } | 870 | } |
| 673 | 871 | ||
| @@ -927,6 +1125,27 @@ static int mxser_open(struct tty_struct *tty, struct file *filp) | |||
| 927 | return 0; | 1125 | return 0; |
| 928 | } | 1126 | } |
| 929 | 1127 | ||
| 1128 | static void mxser_flush_buffer(struct tty_struct *tty) | ||
| 1129 | { | ||
| 1130 | struct mxser_port *info = tty->driver_data; | ||
| 1131 | char fcr; | ||
| 1132 | unsigned long flags; | ||
| 1133 | |||
| 1134 | |||
| 1135 | spin_lock_irqsave(&info->slock, flags); | ||
| 1136 | info->xmit_cnt = info->xmit_head = info->xmit_tail = 0; | ||
| 1137 | |||
| 1138 | fcr = inb(info->ioaddr + UART_FCR); | ||
| 1139 | outb((fcr | UART_FCR_CLEAR_RCVR | UART_FCR_CLEAR_XMIT), | ||
| 1140 | info->ioaddr + UART_FCR); | ||
| 1141 | outb(fcr, info->ioaddr + UART_FCR); | ||
| 1142 | |||
| 1143 | spin_unlock_irqrestore(&info->slock, flags); | ||
| 1144 | |||
| 1145 | tty_wakeup(tty); | ||
| 1146 | } | ||
| 1147 | |||
| 1148 | |||
| 930 | /* | 1149 | /* |
| 931 | * This routine is called when the serial port gets closed. First, we | 1150 | * This routine is called when the serial port gets closed. First, we |
| 932 | * wait for the last remaining data to be sent. Then, we unlink its | 1151 | * wait for the last remaining data to be sent. Then, we unlink its |
| @@ -1013,9 +1232,7 @@ static void mxser_close(struct tty_struct *tty, struct file *filp) | |||
| 1013 | } | 1232 | } |
| 1014 | mxser_shutdown(info); | 1233 | mxser_shutdown(info); |
| 1015 | 1234 | ||
| 1016 | if (tty->driver->flush_buffer) | 1235 | mxser_flush_buffer(tty); |
| 1017 | tty->driver->flush_buffer(tty); | ||
| 1018 | |||
| 1019 | tty_ldisc_flush(tty); | 1236 | tty_ldisc_flush(tty); |
| 1020 | 1237 | ||
| 1021 | tty->closing = 0; | 1238 | tty->closing = 0; |
| @@ -1072,16 +1289,16 @@ static int mxser_write(struct tty_struct *tty, const unsigned char *buf, int cou | |||
| 1072 | return total; | 1289 | return total; |
| 1073 | } | 1290 | } |
| 1074 | 1291 | ||
| 1075 | static void mxser_put_char(struct tty_struct *tty, unsigned char ch) | 1292 | static int mxser_put_char(struct tty_struct *tty, unsigned char ch) |
| 1076 | { | 1293 | { |
| 1077 | struct mxser_port *info = tty->driver_data; | 1294 | struct mxser_port *info = tty->driver_data; |
| 1078 | unsigned long flags; | 1295 | unsigned long flags; |
| 1079 | 1296 | ||
| 1080 | if (!info->xmit_buf) | 1297 | if (!info->xmit_buf) |
| 1081 | return; | 1298 | return 0; |
| 1082 | 1299 | ||
| 1083 | if (info->xmit_cnt >= SERIAL_XMIT_SIZE - 1) | 1300 | if (info->xmit_cnt >= SERIAL_XMIT_SIZE - 1) |
| 1084 | return; | 1301 | return 0; |
| 1085 | 1302 | ||
| 1086 | spin_lock_irqsave(&info->slock, flags); | 1303 | spin_lock_irqsave(&info->slock, flags); |
| 1087 | info->xmit_buf[info->xmit_head++] = ch; | 1304 | info->xmit_buf[info->xmit_head++] = ch; |
| @@ -1099,6 +1316,7 @@ static void mxser_put_char(struct tty_struct *tty, unsigned char ch) | |||
| 1099 | spin_unlock_irqrestore(&info->slock, flags); | 1316 | spin_unlock_irqrestore(&info->slock, flags); |
| 1100 | } | 1317 | } |
| 1101 | } | 1318 | } |
| 1319 | return 1; | ||
| 1102 | } | 1320 | } |
| 1103 | 1321 | ||
| 1104 | 1322 | ||
| @@ -1142,26 +1360,6 @@ static int mxser_chars_in_buffer(struct tty_struct *tty) | |||
| 1142 | return info->xmit_cnt; | 1360 | return info->xmit_cnt; |
| 1143 | } | 1361 | } |
| 1144 | 1362 | ||
| 1145 | static void mxser_flush_buffer(struct tty_struct *tty) | ||
| 1146 | { | ||
| 1147 | struct mxser_port *info = tty->driver_data; | ||
| 1148 | char fcr; | ||
| 1149 | unsigned long flags; | ||
| 1150 | |||
| 1151 | |||
| 1152 | spin_lock_irqsave(&info->slock, flags); | ||
| 1153 | info->xmit_cnt = info->xmit_head = info->xmit_tail = 0; | ||
| 1154 | |||
| 1155 | fcr = inb(info->ioaddr + UART_FCR); | ||
| 1156 | outb((fcr | UART_FCR_CLEAR_RCVR | UART_FCR_CLEAR_XMIT), | ||
| 1157 | info->ioaddr + UART_FCR); | ||
| 1158 | outb(fcr, info->ioaddr + UART_FCR); | ||
| 1159 | |||
| 1160 | spin_unlock_irqrestore(&info->slock, flags); | ||
| 1161 | |||
| 1162 | tty_wakeup(tty); | ||
| 1163 | } | ||
| 1164 | |||
| 1165 | /* | 1363 | /* |
| 1166 | * ------------------------------------------------------------ | 1364 | * ------------------------------------------------------------ |
| 1167 | * friends of mxser_ioctl() | 1365 | * friends of mxser_ioctl() |
| @@ -1460,6 +1658,7 @@ static int mxser_ioctl_special(unsigned int cmd, void __user *argp) | |||
| 1460 | struct mxser_port *port; | 1658 | struct mxser_port *port; |
| 1461 | int result, status; | 1659 | int result, status; |
| 1462 | unsigned int i, j; | 1660 | unsigned int i, j; |
| 1661 | int ret = 0; | ||
| 1463 | 1662 | ||
| 1464 | switch (cmd) { | 1663 | switch (cmd) { |
| 1465 | case MOXA_GET_MAJOR: | 1664 | case MOXA_GET_MAJOR: |
| @@ -1467,18 +1666,21 @@ static int mxser_ioctl_special(unsigned int cmd, void __user *argp) | |||
| 1467 | 1666 | ||
| 1468 | case MOXA_CHKPORTENABLE: | 1667 | case MOXA_CHKPORTENABLE: |
| 1469 | result = 0; | 1668 | result = 0; |
| 1470 | 1669 | lock_kernel(); | |
| 1471 | for (i = 0; i < MXSER_BOARDS; i++) | 1670 | for (i = 0; i < MXSER_BOARDS; i++) |
| 1472 | for (j = 0; j < MXSER_PORTS_PER_BOARD; j++) | 1671 | for (j = 0; j < MXSER_PORTS_PER_BOARD; j++) |
| 1473 | if (mxser_boards[i].ports[j].ioaddr) | 1672 | if (mxser_boards[i].ports[j].ioaddr) |
| 1474 | result |= (1 << i); | 1673 | result |= (1 << i); |
| 1475 | 1674 | unlock_kernel(); | |
| 1476 | return put_user(result, (unsigned long __user *)argp); | 1675 | return put_user(result, (unsigned long __user *)argp); |
| 1477 | case MOXA_GETDATACOUNT: | 1676 | case MOXA_GETDATACOUNT: |
| 1677 | lock_kernel(); | ||
| 1478 | if (copy_to_user(argp, &mxvar_log, sizeof(mxvar_log))) | 1678 | if (copy_to_user(argp, &mxvar_log, sizeof(mxvar_log))) |
| 1479 | return -EFAULT; | 1679 | ret = -EFAULT; |
| 1480 | return 0; | 1680 | unlock_kernel(); |
| 1681 | return ret; | ||
| 1481 | case MOXA_GETMSTATUS: | 1682 | case MOXA_GETMSTATUS: |
| 1683 | lock_kernel(); | ||
| 1482 | for (i = 0; i < MXSER_BOARDS; i++) | 1684 | for (i = 0; i < MXSER_BOARDS; i++) |
| 1483 | for (j = 0; j < MXSER_PORTS_PER_BOARD; j++) { | 1685 | for (j = 0; j < MXSER_PORTS_PER_BOARD; j++) { |
| 1484 | port = &mxser_boards[i].ports[j]; | 1686 | port = &mxser_boards[i].ports[j]; |
| @@ -1515,6 +1717,7 @@ static int mxser_ioctl_special(unsigned int cmd, void __user *argp) | |||
| 1515 | else | 1717 | else |
| 1516 | GMStatus[i].cts = 0; | 1718 | GMStatus[i].cts = 0; |
| 1517 | } | 1719 | } |
| 1720 | unlock_kernel(); | ||
| 1518 | if (copy_to_user(argp, GMStatus, | 1721 | if (copy_to_user(argp, GMStatus, |
| 1519 | sizeof(struct mxser_mstatus) * MXSER_PORTS)) | 1722 | sizeof(struct mxser_mstatus) * MXSER_PORTS)) |
| 1520 | return -EFAULT; | 1723 | return -EFAULT; |
| @@ -1524,7 +1727,8 @@ static int mxser_ioctl_special(unsigned int cmd, void __user *argp) | |||
| 1524 | unsigned long opmode; | 1727 | unsigned long opmode; |
| 1525 | unsigned cflag, iflag; | 1728 | unsigned cflag, iflag; |
| 1526 | 1729 | ||
| 1527 | for (i = 0; i < MXSER_BOARDS; i++) | 1730 | lock_kernel(); |
| 1731 | for (i = 0; i < MXSER_BOARDS; i++) { | ||
| 1528 | for (j = 0; j < MXSER_PORTS_PER_BOARD; j++) { | 1732 | for (j = 0; j < MXSER_PORTS_PER_BOARD; j++) { |
| 1529 | port = &mxser_boards[i].ports[j]; | 1733 | port = &mxser_boards[i].ports[j]; |
| 1530 | if (!port->ioaddr) | 1734 | if (!port->ioaddr) |
| @@ -1589,13 +1793,14 @@ static int mxser_ioctl_special(unsigned int cmd, void __user *argp) | |||
| 1589 | mon_data_ext.iftype[i] = opmode; | 1793 | mon_data_ext.iftype[i] = opmode; |
| 1590 | 1794 | ||
| 1591 | } | 1795 | } |
| 1592 | if (copy_to_user(argp, &mon_data_ext, | 1796 | } |
| 1593 | sizeof(mon_data_ext))) | 1797 | unlock_kernel(); |
| 1594 | return -EFAULT; | 1798 | if (copy_to_user(argp, &mon_data_ext, |
| 1595 | 1799 | sizeof(mon_data_ext))) | |
| 1596 | return 0; | 1800 | return -EFAULT; |
| 1597 | 1801 | return 0; | |
| 1598 | } default: | 1802 | } |
| 1803 | default: | ||
| 1599 | return -ENOIOCTLCMD; | 1804 | return -ENOIOCTLCMD; |
| 1600 | } | 1805 | } |
| 1601 | return 0; | 1806 | return 0; |
| @@ -1651,16 +1856,20 @@ static int mxser_ioctl(struct tty_struct *tty, struct file *file, | |||
| 1651 | opmode != RS422_MODE && | 1856 | opmode != RS422_MODE && |
| 1652 | opmode != RS485_4WIRE_MODE) | 1857 | opmode != RS485_4WIRE_MODE) |
| 1653 | return -EFAULT; | 1858 | return -EFAULT; |
| 1859 | lock_kernel(); | ||
| 1654 | mask = ModeMask[p]; | 1860 | mask = ModeMask[p]; |
| 1655 | shiftbit = p * 2; | 1861 | shiftbit = p * 2; |
| 1656 | val = inb(info->opmode_ioaddr); | 1862 | val = inb(info->opmode_ioaddr); |
| 1657 | val &= mask; | 1863 | val &= mask; |
| 1658 | val |= (opmode << shiftbit); | 1864 | val |= (opmode << shiftbit); |
| 1659 | outb(val, info->opmode_ioaddr); | 1865 | outb(val, info->opmode_ioaddr); |
| 1866 | unlock_kernel(); | ||
| 1660 | } else { | 1867 | } else { |
| 1868 | lock_kernel(); | ||
| 1661 | shiftbit = p * 2; | 1869 | shiftbit = p * 2; |
| 1662 | opmode = inb(info->opmode_ioaddr) >> shiftbit; | 1870 | opmode = inb(info->opmode_ioaddr) >> shiftbit; |
| 1663 | opmode &= OP_MODE_MASK; | 1871 | opmode &= OP_MODE_MASK; |
| 1872 | unlock_kernel(); | ||
| 1664 | if (put_user(opmode, (int __user *)argp)) | 1873 | if (put_user(opmode, (int __user *)argp)) |
| 1665 | return -EFAULT; | 1874 | return -EFAULT; |
| 1666 | } | 1875 | } |
| @@ -1687,19 +1896,18 @@ static int mxser_ioctl(struct tty_struct *tty, struct file *file, | |||
| 1687 | tty_wait_until_sent(tty, 0); | 1896 | tty_wait_until_sent(tty, 0); |
| 1688 | mxser_send_break(info, arg ? arg * (HZ / 10) : HZ / 4); | 1897 | mxser_send_break(info, arg ? arg * (HZ / 10) : HZ / 4); |
| 1689 | return 0; | 1898 | return 0; |
| 1690 | case TIOCGSOFTCAR: | ||
| 1691 | return put_user(!!C_CLOCAL(tty), (unsigned long __user *)argp); | ||
| 1692 | case TIOCSSOFTCAR: | ||
| 1693 | if (get_user(arg, (unsigned long __user *)argp)) | ||
| 1694 | return -EFAULT; | ||
| 1695 | tty->termios->c_cflag = ((tty->termios->c_cflag & ~CLOCAL) | (arg ? CLOCAL : 0)); | ||
| 1696 | return 0; | ||
| 1697 | case TIOCGSERIAL: | 1899 | case TIOCGSERIAL: |
| 1698 | return mxser_get_serial_info(info, argp); | 1900 | lock_kernel(); |
| 1901 | retval = mxser_get_serial_info(info, argp); | ||
| 1902 | unlock_kernel(); | ||
| 1903 | return retval; | ||
| 1699 | case TIOCSSERIAL: | 1904 | case TIOCSSERIAL: |
| 1700 | return mxser_set_serial_info(info, argp); | 1905 | lock_kernel(); |
| 1906 | retval = mxser_set_serial_info(info, argp); | ||
| 1907 | unlock_kernel(); | ||
| 1908 | return retval; | ||
| 1701 | case TIOCSERGETLSR: /* Get line status register */ | 1909 | case TIOCSERGETLSR: /* Get line status register */ |
| 1702 | return mxser_get_lsr_info(info, argp); | 1910 | return mxser_get_lsr_info(info, argp); |
| 1703 | /* | 1911 | /* |
| 1704 | * Wait for any of the 4 modem inputs (DCD,RI,DSR,CTS) to change | 1912 | * Wait for any of the 4 modem inputs (DCD,RI,DSR,CTS) to change |
| 1705 | * - mask passed in arg for lines of interest | 1913 | * - mask passed in arg for lines of interest |
| @@ -1746,24 +1954,27 @@ static int mxser_ioctl(struct tty_struct *tty, struct file *file, | |||
| 1746 | case MOXA_HighSpeedOn: | 1954 | case MOXA_HighSpeedOn: |
| 1747 | return put_user(info->baud_base != 115200 ? 1 : 0, (int __user *)argp); | 1955 | return put_user(info->baud_base != 115200 ? 1 : 0, (int __user *)argp); |
| 1748 | case MOXA_SDS_RSTICOUNTER: | 1956 | case MOXA_SDS_RSTICOUNTER: |
| 1957 | lock_kernel(); | ||
| 1749 | info->mon_data.rxcnt = 0; | 1958 | info->mon_data.rxcnt = 0; |
| 1750 | info->mon_data.txcnt = 0; | 1959 | info->mon_data.txcnt = 0; |
| 1960 | unlock_kernel(); | ||
| 1751 | return 0; | 1961 | return 0; |
| 1752 | 1962 | ||
| 1753 | case MOXA_ASPP_OQUEUE:{ | 1963 | case MOXA_ASPP_OQUEUE:{ |
| 1754 | int len, lsr; | 1964 | int len, lsr; |
| 1755 | 1965 | ||
| 1966 | lock_kernel(); | ||
| 1756 | len = mxser_chars_in_buffer(tty); | 1967 | len = mxser_chars_in_buffer(tty); |
| 1757 | |||
| 1758 | lsr = inb(info->ioaddr + UART_LSR) & UART_LSR_TEMT; | 1968 | lsr = inb(info->ioaddr + UART_LSR) & UART_LSR_TEMT; |
| 1759 | |||
| 1760 | len += (lsr ? 0 : 1); | 1969 | len += (lsr ? 0 : 1); |
| 1970 | unlock_kernel(); | ||
| 1761 | 1971 | ||
| 1762 | return put_user(len, (int __user *)argp); | 1972 | return put_user(len, (int __user *)argp); |
| 1763 | } | 1973 | } |
| 1764 | case MOXA_ASPP_MON: { | 1974 | case MOXA_ASPP_MON: { |
| 1765 | int mcr, status; | 1975 | int mcr, status; |
| 1766 | 1976 | ||
| 1977 | lock_kernel(); | ||
| 1767 | status = mxser_get_msr(info->ioaddr, 1, tty->index); | 1978 | status = mxser_get_msr(info->ioaddr, 1, tty->index); |
| 1768 | mxser_check_modem_status(info, status); | 1979 | mxser_check_modem_status(info, status); |
| 1769 | 1980 | ||
| @@ -1782,7 +1993,7 @@ static int mxser_ioctl(struct tty_struct *tty, struct file *file, | |||
| 1782 | info->mon_data.hold_reason |= NPPI_NOTIFY_CTSHOLD; | 1993 | info->mon_data.hold_reason |= NPPI_NOTIFY_CTSHOLD; |
| 1783 | else | 1994 | else |
| 1784 | info->mon_data.hold_reason &= ~NPPI_NOTIFY_CTSHOLD; | 1995 | info->mon_data.hold_reason &= ~NPPI_NOTIFY_CTSHOLD; |
| 1785 | 1996 | unlock_kernel(); | |
| 1786 | if (copy_to_user(argp, &info->mon_data, | 1997 | if (copy_to_user(argp, &info->mon_data, |
| 1787 | sizeof(struct mxser_mon))) | 1998 | sizeof(struct mxser_mon))) |
| 1788 | return -EFAULT; | 1999 | return -EFAULT; |
| @@ -1925,7 +2136,8 @@ static void mxser_set_termios(struct tty_struct *tty, struct ktermios *old_termi | |||
| 1925 | 2136 | ||
| 1926 | if (info->board->chip_flag) { | 2137 | if (info->board->chip_flag) { |
| 1927 | spin_lock_irqsave(&info->slock, flags); | 2138 | spin_lock_irqsave(&info->slock, flags); |
| 1928 | DISABLE_MOXA_MUST_RX_SOFTWARE_FLOW_CONTROL(info->ioaddr); | 2139 | mxser_disable_must_rx_software_flow_control( |
| 2140 | info->ioaddr); | ||
| 1929 | spin_unlock_irqrestore(&info->slock, flags); | 2141 | spin_unlock_irqrestore(&info->slock, flags); |
| 1930 | } | 2142 | } |
| 1931 | 2143 | ||
| @@ -1979,6 +2191,7 @@ static void mxser_wait_until_sent(struct tty_struct *tty, int timeout) | |||
| 1979 | timeout, char_time); | 2191 | timeout, char_time); |
| 1980 | printk("jiff=%lu...", jiffies); | 2192 | printk("jiff=%lu...", jiffies); |
| 1981 | #endif | 2193 | #endif |
| 2194 | lock_kernel(); | ||
| 1982 | while (!((lsr = inb(info->ioaddr + UART_LSR)) & UART_LSR_TEMT)) { | 2195 | while (!((lsr = inb(info->ioaddr + UART_LSR)) & UART_LSR_TEMT)) { |
| 1983 | #ifdef SERIAL_DEBUG_RS_WAIT_UNTIL_SENT | 2196 | #ifdef SERIAL_DEBUG_RS_WAIT_UNTIL_SENT |
| 1984 | printk("lsr = %d (jiff=%lu)...", lsr, jiffies); | 2197 | printk("lsr = %d (jiff=%lu)...", lsr, jiffies); |
| @@ -1990,6 +2203,7 @@ static void mxser_wait_until_sent(struct tty_struct *tty, int timeout) | |||
| 1990 | break; | 2203 | break; |
| 1991 | } | 2204 | } |
| 1992 | set_current_state(TASK_RUNNING); | 2205 | set_current_state(TASK_RUNNING); |
| 2206 | unlock_kernel(); | ||
| 1993 | 2207 | ||
| 1994 | #ifdef SERIAL_DEBUG_RS_WAIT_UNTIL_SENT | 2208 | #ifdef SERIAL_DEBUG_RS_WAIT_UNTIL_SENT |
| 1995 | printk("lsr = %d (jiff=%lu)...done\n", lsr, jiffies); | 2209 | printk("lsr = %d (jiff=%lu)...done\n", lsr, jiffies); |
| @@ -2342,7 +2556,7 @@ static int __devinit mxser_initbrd(struct mxser_board *brd, | |||
| 2342 | 2556 | ||
| 2343 | /* Enhance mode enabled here */ | 2557 | /* Enhance mode enabled here */ |
| 2344 | if (brd->chip_flag != MOXA_OTHER_UART) | 2558 | if (brd->chip_flag != MOXA_OTHER_UART) |
| 2345 | ENABLE_MOXA_MUST_ENCHANCE_MODE(info->ioaddr); | 2559 | mxser_enable_must_enchance_mode(info->ioaddr); |
| 2346 | 2560 | ||
| 2347 | info->flags = ASYNC_SHARE_IRQ; | 2561 | info->flags = ASYNC_SHARE_IRQ; |
| 2348 | info->type = brd->uart_type; | 2562 | info->type = brd->uart_type; |
diff --git a/drivers/char/mxser.h b/drivers/char/mxser.h index 844171115954..41878a69203d 100644 --- a/drivers/char/mxser.h +++ b/drivers/char/mxser.h | |||
| @@ -147,141 +147,4 @@ | |||
| 147 | /* Rx software flow control mask */ | 147 | /* Rx software flow control mask */ |
| 148 | #define MOXA_MUST_EFR_SF_RX_MASK 0x03 | 148 | #define MOXA_MUST_EFR_SF_RX_MASK 0x03 |
| 149 | 149 | ||
| 150 | #define ENABLE_MOXA_MUST_ENCHANCE_MODE(baseio) do { \ | ||
| 151 | u8 __oldlcr, __efr; \ | ||
| 152 | __oldlcr = inb((baseio)+UART_LCR); \ | ||
| 153 | outb(MOXA_MUST_ENTER_ENCHANCE, (baseio)+UART_LCR); \ | ||
| 154 | __efr = inb((baseio)+MOXA_MUST_EFR_REGISTER); \ | ||
| 155 | __efr |= MOXA_MUST_EFR_EFRB_ENABLE; \ | ||
| 156 | outb(__efr, (baseio)+MOXA_MUST_EFR_REGISTER); \ | ||
| 157 | outb(__oldlcr, (baseio)+UART_LCR); \ | ||
| 158 | } while (0) | ||
| 159 | |||
| 160 | #define DISABLE_MOXA_MUST_ENCHANCE_MODE(baseio) do { \ | ||
| 161 | u8 __oldlcr, __efr; \ | ||
| 162 | __oldlcr = inb((baseio)+UART_LCR); \ | ||
| 163 | outb(MOXA_MUST_ENTER_ENCHANCE, (baseio)+UART_LCR); \ | ||
| 164 | __efr = inb((baseio)+MOXA_MUST_EFR_REGISTER); \ | ||
| 165 | __efr &= ~MOXA_MUST_EFR_EFRB_ENABLE; \ | ||
| 166 | outb(__efr, (baseio)+MOXA_MUST_EFR_REGISTER); \ | ||
| 167 | outb(__oldlcr, (baseio)+UART_LCR); \ | ||
| 168 | } while (0) | ||
| 169 | |||
| 170 | #define SET_MOXA_MUST_XON1_VALUE(baseio, Value) do { \ | ||
| 171 | u8 __oldlcr, __efr; \ | ||
| 172 | __oldlcr = inb((baseio)+UART_LCR); \ | ||
| 173 | outb(MOXA_MUST_ENTER_ENCHANCE, (baseio)+UART_LCR); \ | ||
| 174 | __efr = inb((baseio)+MOXA_MUST_EFR_REGISTER); \ | ||
| 175 | __efr &= ~MOXA_MUST_EFR_BANK_MASK; \ | ||
| 176 | __efr |= MOXA_MUST_EFR_BANK0; \ | ||
| 177 | outb(__efr, (baseio)+MOXA_MUST_EFR_REGISTER); \ | ||
| 178 | outb((u8)(Value), (baseio)+MOXA_MUST_XON1_REGISTER); \ | ||
| 179 | outb(__oldlcr, (baseio)+UART_LCR); \ | ||
| 180 | } while (0) | ||
| 181 | |||
| 182 | #define SET_MOXA_MUST_XOFF1_VALUE(baseio, Value) do { \ | ||
| 183 | u8 __oldlcr, __efr; \ | ||
| 184 | __oldlcr = inb((baseio)+UART_LCR); \ | ||
| 185 | outb(MOXA_MUST_ENTER_ENCHANCE, (baseio)+UART_LCR); \ | ||
| 186 | __efr = inb((baseio)+MOXA_MUST_EFR_REGISTER); \ | ||
| 187 | __efr &= ~MOXA_MUST_EFR_BANK_MASK; \ | ||
| 188 | __efr |= MOXA_MUST_EFR_BANK0; \ | ||
| 189 | outb(__efr, (baseio)+MOXA_MUST_EFR_REGISTER); \ | ||
| 190 | outb((u8)(Value), (baseio)+MOXA_MUST_XOFF1_REGISTER); \ | ||
| 191 | outb(__oldlcr, (baseio)+UART_LCR); \ | ||
| 192 | } while (0) | ||
| 193 | |||
| 194 | #define SET_MOXA_MUST_FIFO_VALUE(info) do { \ | ||
| 195 | u8 __oldlcr, __efr; \ | ||
| 196 | __oldlcr = inb((info)->ioaddr+UART_LCR); \ | ||
| 197 | outb(MOXA_MUST_ENTER_ENCHANCE, (info)->ioaddr+UART_LCR);\ | ||
| 198 | __efr = inb((info)->ioaddr+MOXA_MUST_EFR_REGISTER); \ | ||
| 199 | __efr &= ~MOXA_MUST_EFR_BANK_MASK; \ | ||
| 200 | __efr |= MOXA_MUST_EFR_BANK1; \ | ||
| 201 | outb(__efr, (info)->ioaddr+MOXA_MUST_EFR_REGISTER); \ | ||
| 202 | outb((u8)((info)->rx_high_water), (info)->ioaddr+ \ | ||
| 203 | MOXA_MUST_RBRTH_REGISTER); \ | ||
| 204 | outb((u8)((info)->rx_trigger), (info)->ioaddr+ \ | ||
| 205 | MOXA_MUST_RBRTI_REGISTER); \ | ||
| 206 | outb((u8)((info)->rx_low_water), (info)->ioaddr+ \ | ||
| 207 | MOXA_MUST_RBRTL_REGISTER); \ | ||
| 208 | outb(__oldlcr, (info)->ioaddr+UART_LCR); \ | ||
| 209 | } while (0) | ||
| 210 | |||
| 211 | #define SET_MOXA_MUST_ENUM_VALUE(baseio, Value) do { \ | ||
| 212 | u8 __oldlcr, __efr; \ | ||
| 213 | __oldlcr = inb((baseio)+UART_LCR); \ | ||
| 214 | outb(MOXA_MUST_ENTER_ENCHANCE, (baseio)+UART_LCR); \ | ||
| 215 | __efr = inb((baseio)+MOXA_MUST_EFR_REGISTER); \ | ||
| 216 | __efr &= ~MOXA_MUST_EFR_BANK_MASK; \ | ||
| 217 | __efr |= MOXA_MUST_EFR_BANK2; \ | ||
| 218 | outb(__efr, (baseio)+MOXA_MUST_EFR_REGISTER); \ | ||
| 219 | outb((u8)(Value), (baseio)+MOXA_MUST_ENUM_REGISTER); \ | ||
| 220 | outb(__oldlcr, (baseio)+UART_LCR); \ | ||
| 221 | } while (0) | ||
| 222 | |||
| 223 | #define GET_MOXA_MUST_HARDWARE_ID(baseio, pId) do { \ | ||
| 224 | u8 __oldlcr, __efr; \ | ||
| 225 | __oldlcr = inb((baseio)+UART_LCR); \ | ||
| 226 | outb(MOXA_MUST_ENTER_ENCHANCE, (baseio)+UART_LCR); \ | ||
| 227 | __efr = inb((baseio)+MOXA_MUST_EFR_REGISTER); \ | ||
| 228 | __efr &= ~MOXA_MUST_EFR_BANK_MASK; \ | ||
| 229 | __efr |= MOXA_MUST_EFR_BANK2; \ | ||
| 230 | outb(__efr, (baseio)+MOXA_MUST_EFR_REGISTER); \ | ||
| 231 | *pId = inb((baseio)+MOXA_MUST_HWID_REGISTER); \ | ||
| 232 | outb(__oldlcr, (baseio)+UART_LCR); \ | ||
| 233 | } while (0) | ||
| 234 | |||
| 235 | #define SET_MOXA_MUST_NO_SOFTWARE_FLOW_CONTROL(baseio) do { \ | ||
| 236 | u8 __oldlcr, __efr; \ | ||
| 237 | __oldlcr = inb((baseio)+UART_LCR); \ | ||
| 238 | outb(MOXA_MUST_ENTER_ENCHANCE, (baseio)+UART_LCR); \ | ||
| 239 | __efr = inb((baseio)+MOXA_MUST_EFR_REGISTER); \ | ||
| 240 | __efr &= ~MOXA_MUST_EFR_SF_MASK; \ | ||
| 241 | outb(__efr, (baseio)+MOXA_MUST_EFR_REGISTER); \ | ||
| 242 | outb(__oldlcr, (baseio)+UART_LCR); \ | ||
| 243 | } while (0) | ||
| 244 | |||
| 245 | #define ENABLE_MOXA_MUST_TX_SOFTWARE_FLOW_CONTROL(baseio) do { \ | ||
| 246 | u8 __oldlcr, __efr; \ | ||
| 247 | __oldlcr = inb((baseio)+UART_LCR); \ | ||
| 248 | outb(MOXA_MUST_ENTER_ENCHANCE, (baseio)+UART_LCR); \ | ||
| 249 | __efr = inb((baseio)+MOXA_MUST_EFR_REGISTER); \ | ||
| 250 | __efr &= ~MOXA_MUST_EFR_SF_TX_MASK; \ | ||
| 251 | __efr |= MOXA_MUST_EFR_SF_TX1; \ | ||
| 252 | outb(__efr, (baseio)+MOXA_MUST_EFR_REGISTER); \ | ||
| 253 | outb(__oldlcr, (baseio)+UART_LCR); \ | ||
| 254 | } while (0) | ||
| 255 | |||
| 256 | #define DISABLE_MOXA_MUST_TX_SOFTWARE_FLOW_CONTROL(baseio) do { \ | ||
| 257 | u8 __oldlcr, __efr; \ | ||
| 258 | __oldlcr = inb((baseio)+UART_LCR); \ | ||
| 259 | outb(MOXA_MUST_ENTER_ENCHANCE, (baseio)+UART_LCR); \ | ||
| 260 | __efr = inb((baseio)+MOXA_MUST_EFR_REGISTER); \ | ||
| 261 | __efr &= ~MOXA_MUST_EFR_SF_TX_MASK; \ | ||
| 262 | outb(__efr, (baseio)+MOXA_MUST_EFR_REGISTER); \ | ||
| 263 | outb(__oldlcr, (baseio)+UART_LCR); \ | ||
| 264 | } while (0) | ||
| 265 | |||
| 266 | #define ENABLE_MOXA_MUST_RX_SOFTWARE_FLOW_CONTROL(baseio) do { \ | ||
| 267 | u8 __oldlcr, __efr; \ | ||
| 268 | __oldlcr = inb((baseio)+UART_LCR); \ | ||
| 269 | outb(MOXA_MUST_ENTER_ENCHANCE, (baseio)+UART_LCR); \ | ||
| 270 | __efr = inb((baseio)+MOXA_MUST_EFR_REGISTER); \ | ||
| 271 | __efr &= ~MOXA_MUST_EFR_SF_RX_MASK; \ | ||
| 272 | __efr |= MOXA_MUST_EFR_SF_RX1; \ | ||
| 273 | outb(__efr, (baseio)+MOXA_MUST_EFR_REGISTER); \ | ||
| 274 | outb(__oldlcr, (baseio)+UART_LCR); \ | ||
| 275 | } while (0) | ||
| 276 | |||
| 277 | #define DISABLE_MOXA_MUST_RX_SOFTWARE_FLOW_CONTROL(baseio) do { \ | ||
| 278 | u8 __oldlcr, __efr; \ | ||
| 279 | __oldlcr = inb((baseio)+UART_LCR); \ | ||
| 280 | outb(MOXA_MUST_ENTER_ENCHANCE, (baseio)+UART_LCR); \ | ||
| 281 | __efr = inb((baseio)+MOXA_MUST_EFR_REGISTER); \ | ||
| 282 | __efr &= ~MOXA_MUST_EFR_SF_RX_MASK; \ | ||
| 283 | outb(__efr, (baseio)+MOXA_MUST_EFR_REGISTER); \ | ||
| 284 | outb(__oldlcr, (baseio)+UART_LCR); \ | ||
| 285 | } while (0) | ||
| 286 | |||
| 287 | #endif | 150 | #endif |
diff --git a/drivers/char/n_hdlc.c b/drivers/char/n_hdlc.c index 06803ed5568c..a35bfd7ee80e 100644 --- a/drivers/char/n_hdlc.c +++ b/drivers/char/n_hdlc.c | |||
| @@ -342,12 +342,10 @@ static int n_hdlc_tty_open (struct tty_struct *tty) | |||
| 342 | #endif | 342 | #endif |
| 343 | 343 | ||
| 344 | /* Flush any pending characters in the driver and discipline. */ | 344 | /* Flush any pending characters in the driver and discipline. */ |
| 345 | |||
| 346 | if (tty->ldisc.flush_buffer) | 345 | if (tty->ldisc.flush_buffer) |
| 347 | tty->ldisc.flush_buffer (tty); | 346 | tty->ldisc.flush_buffer(tty); |
| 348 | 347 | ||
| 349 | if (tty->driver->flush_buffer) | 348 | tty_driver_flush_buffer(tty); |
| 350 | tty->driver->flush_buffer (tty); | ||
| 351 | 349 | ||
| 352 | if (debuglevel >= DEBUG_LEVEL_INFO) | 350 | if (debuglevel >= DEBUG_LEVEL_INFO) |
| 353 | printk("%s(%d)n_hdlc_tty_open() success\n",__FILE__,__LINE__); | 351 | printk("%s(%d)n_hdlc_tty_open() success\n",__FILE__,__LINE__); |
| @@ -399,7 +397,7 @@ static void n_hdlc_send_frames(struct n_hdlc *n_hdlc, struct tty_struct *tty) | |||
| 399 | 397 | ||
| 400 | /* Send the next block of data to device */ | 398 | /* Send the next block of data to device */ |
| 401 | tty->flags |= (1 << TTY_DO_WRITE_WAKEUP); | 399 | tty->flags |= (1 << TTY_DO_WRITE_WAKEUP); |
| 402 | actual = tty->driver->write(tty, tbuf->buf, tbuf->count); | 400 | actual = tty->ops->write(tty, tbuf->buf, tbuf->count); |
| 403 | 401 | ||
| 404 | /* rollback was possible and has been done */ | 402 | /* rollback was possible and has been done */ |
| 405 | if (actual == -ERESTARTSYS) { | 403 | if (actual == -ERESTARTSYS) { |
| @@ -578,26 +576,36 @@ static ssize_t n_hdlc_tty_read(struct tty_struct *tty, struct file *file, | |||
| 578 | return -EFAULT; | 576 | return -EFAULT; |
| 579 | } | 577 | } |
| 580 | 578 | ||
| 579 | lock_kernel(); | ||
| 580 | |||
| 581 | for (;;) { | 581 | for (;;) { |
| 582 | if (test_bit(TTY_OTHER_CLOSED, &tty->flags)) | 582 | if (test_bit(TTY_OTHER_CLOSED, &tty->flags)) { |
| 583 | unlock_kernel(); | ||
| 583 | return -EIO; | 584 | return -EIO; |
| 585 | } | ||
| 584 | 586 | ||
| 585 | n_hdlc = tty2n_hdlc (tty); | 587 | n_hdlc = tty2n_hdlc (tty); |
| 586 | if (!n_hdlc || n_hdlc->magic != HDLC_MAGIC || | 588 | if (!n_hdlc || n_hdlc->magic != HDLC_MAGIC || |
| 587 | tty != n_hdlc->tty) | 589 | tty != n_hdlc->tty) { |
| 590 | unlock_kernel(); | ||
| 588 | return 0; | 591 | return 0; |
| 592 | } | ||
| 589 | 593 | ||
| 590 | rbuf = n_hdlc_buf_get(&n_hdlc->rx_buf_list); | 594 | rbuf = n_hdlc_buf_get(&n_hdlc->rx_buf_list); |
| 591 | if (rbuf) | 595 | if (rbuf) |
| 592 | break; | 596 | break; |
| 593 | 597 | ||
| 594 | /* no data */ | 598 | /* no data */ |
| 595 | if (file->f_flags & O_NONBLOCK) | 599 | if (file->f_flags & O_NONBLOCK) { |
| 600 | unlock_kernel(); | ||
| 596 | return -EAGAIN; | 601 | return -EAGAIN; |
| 602 | } | ||
| 597 | 603 | ||
| 598 | interruptible_sleep_on (&tty->read_wait); | 604 | interruptible_sleep_on (&tty->read_wait); |
| 599 | if (signal_pending(current)) | 605 | if (signal_pending(current)) { |
| 606 | unlock_kernel(); | ||
| 600 | return -EINTR; | 607 | return -EINTR; |
| 608 | } | ||
| 601 | } | 609 | } |
| 602 | 610 | ||
| 603 | if (rbuf->count > nr) | 611 | if (rbuf->count > nr) |
| @@ -618,7 +626,7 @@ static ssize_t n_hdlc_tty_read(struct tty_struct *tty, struct file *file, | |||
| 618 | kfree(rbuf); | 626 | kfree(rbuf); |
| 619 | else | 627 | else |
| 620 | n_hdlc_buf_put(&n_hdlc->rx_free_buf_list,rbuf); | 628 | n_hdlc_buf_put(&n_hdlc->rx_free_buf_list,rbuf); |
| 621 | 629 | unlock_kernel(); | |
| 622 | return ret; | 630 | return ret; |
| 623 | 631 | ||
| 624 | } /* end of n_hdlc_tty_read() */ | 632 | } /* end of n_hdlc_tty_read() */ |
| @@ -661,6 +669,8 @@ static ssize_t n_hdlc_tty_write(struct tty_struct *tty, struct file *file, | |||
| 661 | count = maxframe; | 669 | count = maxframe; |
| 662 | } | 670 | } |
| 663 | 671 | ||
| 672 | lock_kernel(); | ||
| 673 | |||
| 664 | add_wait_queue(&tty->write_wait, &wait); | 674 | add_wait_queue(&tty->write_wait, &wait); |
| 665 | set_current_state(TASK_INTERRUPTIBLE); | 675 | set_current_state(TASK_INTERRUPTIBLE); |
| 666 | 676 | ||
| @@ -695,7 +705,7 @@ static ssize_t n_hdlc_tty_write(struct tty_struct *tty, struct file *file, | |||
| 695 | n_hdlc_buf_put(&n_hdlc->tx_buf_list,tbuf); | 705 | n_hdlc_buf_put(&n_hdlc->tx_buf_list,tbuf); |
| 696 | n_hdlc_send_frames(n_hdlc,tty); | 706 | n_hdlc_send_frames(n_hdlc,tty); |
| 697 | } | 707 | } |
| 698 | 708 | unlock_kernel(); | |
| 699 | return error; | 709 | return error; |
| 700 | 710 | ||
| 701 | } /* end of n_hdlc_tty_write() */ | 711 | } /* end of n_hdlc_tty_write() */ |
| @@ -740,8 +750,7 @@ static int n_hdlc_tty_ioctl(struct tty_struct *tty, struct file *file, | |||
| 740 | 750 | ||
| 741 | case TIOCOUTQ: | 751 | case TIOCOUTQ: |
| 742 | /* get the pending tx byte count in the driver */ | 752 | /* get the pending tx byte count in the driver */ |
| 743 | count = tty->driver->chars_in_buffer ? | 753 | count = tty_chars_in_buffer(tty); |
| 744 | tty->driver->chars_in_buffer(tty) : 0; | ||
| 745 | /* add size of next output frame in queue */ | 754 | /* add size of next output frame in queue */ |
| 746 | spin_lock_irqsave(&n_hdlc->tx_buf_list.spinlock,flags); | 755 | spin_lock_irqsave(&n_hdlc->tx_buf_list.spinlock,flags); |
| 747 | if (n_hdlc->tx_buf_list.head) | 756 | if (n_hdlc->tx_buf_list.head) |
diff --git a/drivers/char/n_r3964.c b/drivers/char/n_r3964.c index 6b918b80f73e..902169062332 100644 --- a/drivers/char/n_r3964.c +++ b/drivers/char/n_r3964.c | |||
| @@ -376,8 +376,9 @@ static void put_char(struct r3964_info *pInfo, unsigned char ch) | |||
| 376 | if (tty == NULL) | 376 | if (tty == NULL) |
| 377 | return; | 377 | return; |
| 378 | 378 | ||
| 379 | if (tty->driver->put_char) { | 379 | /* FIXME: put_char should not be called from an IRQ */ |
| 380 | tty->driver->put_char(tty, ch); | 380 | if (tty->ops->put_char) { |
| 381 | tty->ops->put_char(tty, ch); | ||
| 381 | } | 382 | } |
| 382 | pInfo->bcc ^= ch; | 383 | pInfo->bcc ^= ch; |
| 383 | } | 384 | } |
| @@ -386,12 +387,9 @@ static void flush(struct r3964_info *pInfo) | |||
| 386 | { | 387 | { |
| 387 | struct tty_struct *tty = pInfo->tty; | 388 | struct tty_struct *tty = pInfo->tty; |
| 388 | 389 | ||
| 389 | if (tty == NULL) | 390 | if (tty == NULL || tty->ops->flush_chars == NULL) |
| 390 | return; | 391 | return; |
| 391 | 392 | tty->ops->flush_chars(tty); | |
| 392 | if (tty->driver->flush_chars) { | ||
| 393 | tty->driver->flush_chars(tty); | ||
| 394 | } | ||
| 395 | } | 393 | } |
| 396 | 394 | ||
| 397 | static void trigger_transmit(struct r3964_info *pInfo) | 395 | static void trigger_transmit(struct r3964_info *pInfo) |
| @@ -449,12 +447,11 @@ static void transmit_block(struct r3964_info *pInfo) | |||
| 449 | struct r3964_block_header *pBlock = pInfo->tx_first; | 447 | struct r3964_block_header *pBlock = pInfo->tx_first; |
| 450 | int room = 0; | 448 | int room = 0; |
| 451 | 449 | ||
| 452 | if ((tty == NULL) || (pBlock == NULL)) { | 450 | if (tty == NULL || pBlock == NULL) { |
| 453 | return; | 451 | return; |
| 454 | } | 452 | } |
| 455 | 453 | ||
| 456 | if (tty->driver->write_room) | 454 | room = tty_write_room(tty); |
| 457 | room = tty->driver->write_room(tty); | ||
| 458 | 455 | ||
| 459 | TRACE_PS("transmit_block %p, room %d, length %d", | 456 | TRACE_PS("transmit_block %p, room %d, length %d", |
| 460 | pBlock, room, pBlock->length); | 457 | pBlock, room, pBlock->length); |
| @@ -1075,12 +1072,15 @@ static ssize_t r3964_read(struct tty_struct *tty, struct file *file, | |||
| 1075 | 1072 | ||
| 1076 | TRACE_L("read()"); | 1073 | TRACE_L("read()"); |
| 1077 | 1074 | ||
| 1075 | lock_kernel(); | ||
| 1076 | |||
| 1078 | pClient = findClient(pInfo, task_pid(current)); | 1077 | pClient = findClient(pInfo, task_pid(current)); |
| 1079 | if (pClient) { | 1078 | if (pClient) { |
| 1080 | pMsg = remove_msg(pInfo, pClient); | 1079 | pMsg = remove_msg(pInfo, pClient); |
| 1081 | if (pMsg == NULL) { | 1080 | if (pMsg == NULL) { |
| 1082 | /* no messages available. */ | 1081 | /* no messages available. */ |
| 1083 | if (file->f_flags & O_NONBLOCK) { | 1082 | if (file->f_flags & O_NONBLOCK) { |
| 1083 | unlock_kernel(); | ||
| 1084 | return -EAGAIN; | 1084 | return -EAGAIN; |
| 1085 | } | 1085 | } |
| 1086 | /* block until there is a message: */ | 1086 | /* block until there is a message: */ |
| @@ -1090,8 +1090,10 @@ static ssize_t r3964_read(struct tty_struct *tty, struct file *file, | |||
| 1090 | 1090 | ||
| 1091 | /* If we still haven't got a message, we must have been signalled */ | 1091 | /* If we still haven't got a message, we must have been signalled */ |
| 1092 | 1092 | ||
| 1093 | if (!pMsg) | 1093 | if (!pMsg) { |
| 1094 | unlock_kernel(); | ||
| 1094 | return -EINTR; | 1095 | return -EINTR; |
| 1096 | } | ||
| 1095 | 1097 | ||
| 1096 | /* deliver msg to client process: */ | 1098 | /* deliver msg to client process: */ |
| 1097 | theMsg.msg_id = pMsg->msg_id; | 1099 | theMsg.msg_id = pMsg->msg_id; |
| @@ -1102,12 +1104,15 @@ static ssize_t r3964_read(struct tty_struct *tty, struct file *file, | |||
| 1102 | kfree(pMsg); | 1104 | kfree(pMsg); |
| 1103 | TRACE_M("r3964_read - msg kfree %p", pMsg); | 1105 | TRACE_M("r3964_read - msg kfree %p", pMsg); |
| 1104 | 1106 | ||
| 1105 | if (copy_to_user(buf, &theMsg, count)) | 1107 | if (copy_to_user(buf, &theMsg, count)) { |
| 1108 | unlock_kernel(); | ||
| 1106 | return -EFAULT; | 1109 | return -EFAULT; |
| 1110 | } | ||
| 1107 | 1111 | ||
| 1108 | TRACE_PS("read - return %d", count); | 1112 | TRACE_PS("read - return %d", count); |
| 1109 | return count; | 1113 | return count; |
| 1110 | } | 1114 | } |
| 1115 | unlock_kernel(); | ||
| 1111 | return -EPERM; | 1116 | return -EPERM; |
| 1112 | } | 1117 | } |
| 1113 | 1118 | ||
| @@ -1156,6 +1161,8 @@ static ssize_t r3964_write(struct tty_struct *tty, struct file *file, | |||
| 1156 | pHeader->locks = 0; | 1161 | pHeader->locks = 0; |
| 1157 | pHeader->owner = NULL; | 1162 | pHeader->owner = NULL; |
| 1158 | 1163 | ||
| 1164 | lock_kernel(); | ||
| 1165 | |||
| 1159 | pClient = findClient(pInfo, task_pid(current)); | 1166 | pClient = findClient(pInfo, task_pid(current)); |
| 1160 | if (pClient) { | 1167 | if (pClient) { |
| 1161 | pHeader->owner = pClient; | 1168 | pHeader->owner = pClient; |
| @@ -1173,6 +1180,8 @@ static ssize_t r3964_write(struct tty_struct *tty, struct file *file, | |||
| 1173 | add_tx_queue(pInfo, pHeader); | 1180 | add_tx_queue(pInfo, pHeader); |
| 1174 | trigger_transmit(pInfo); | 1181 | trigger_transmit(pInfo); |
| 1175 | 1182 | ||
| 1183 | unlock_kernel(); | ||
| 1184 | |||
| 1176 | return 0; | 1185 | return 0; |
| 1177 | } | 1186 | } |
| 1178 | 1187 | ||
diff --git a/drivers/char/n_tty.c b/drivers/char/n_tty.c index 0c09409fa45d..19105ec203f7 100644 --- a/drivers/char/n_tty.c +++ b/drivers/char/n_tty.c | |||
| @@ -147,10 +147,8 @@ static void put_tty_queue(unsigned char c, struct tty_struct *tty) | |||
| 147 | 147 | ||
| 148 | static void check_unthrottle(struct tty_struct *tty) | 148 | static void check_unthrottle(struct tty_struct *tty) |
| 149 | { | 149 | { |
| 150 | if (tty->count && | 150 | if (tty->count) |
| 151 | test_and_clear_bit(TTY_THROTTLED, &tty->flags) && | 151 | tty_unthrottle(tty); |
| 152 | tty->driver->unthrottle) | ||
| 153 | tty->driver->unthrottle(tty); | ||
| 154 | } | 152 | } |
| 155 | 153 | ||
| 156 | /** | 154 | /** |
| @@ -183,22 +181,24 @@ static void reset_buffer_flags(struct tty_struct *tty) | |||
| 183 | * at hangup) or when the N_TTY line discipline internally has to | 181 | * at hangup) or when the N_TTY line discipline internally has to |
| 184 | * clean the pending queue (for example some signals). | 182 | * clean the pending queue (for example some signals). |
| 185 | * | 183 | * |
| 186 | * FIXME: tty->ctrl_status is not spinlocked and relies on | 184 | * Locking: ctrl_lock |
| 187 | * lock_kernel() still. | ||
| 188 | */ | 185 | */ |
| 189 | 186 | ||
| 190 | static void n_tty_flush_buffer(struct tty_struct *tty) | 187 | static void n_tty_flush_buffer(struct tty_struct *tty) |
| 191 | { | 188 | { |
| 189 | unsigned long flags; | ||
| 192 | /* clear everything and unthrottle the driver */ | 190 | /* clear everything and unthrottle the driver */ |
| 193 | reset_buffer_flags(tty); | 191 | reset_buffer_flags(tty); |
| 194 | 192 | ||
| 195 | if (!tty->link) | 193 | if (!tty->link) |
| 196 | return; | 194 | return; |
| 197 | 195 | ||
| 196 | spin_lock_irqsave(&tty->ctrl_lock, flags); | ||
| 198 | if (tty->link->packet) { | 197 | if (tty->link->packet) { |
| 199 | tty->ctrl_status |= TIOCPKT_FLUSHREAD; | 198 | tty->ctrl_status |= TIOCPKT_FLUSHREAD; |
| 200 | wake_up_interruptible(&tty->link->read_wait); | 199 | wake_up_interruptible(&tty->link->read_wait); |
| 201 | } | 200 | } |
| 201 | spin_unlock_irqrestore(&tty->ctrl_lock, flags); | ||
| 202 | } | 202 | } |
| 203 | 203 | ||
| 204 | /** | 204 | /** |
| @@ -264,17 +264,18 @@ static inline int is_continuation(unsigned char c, struct tty_struct *tty) | |||
| 264 | * relevant in the world today. If you ever need them, add them here. | 264 | * relevant in the world today. If you ever need them, add them here. |
| 265 | * | 265 | * |
| 266 | * Called from both the receive and transmit sides and can be called | 266 | * Called from both the receive and transmit sides and can be called |
| 267 | * re-entrantly. Relies on lock_kernel() still. | 267 | * re-entrantly. Relies on lock_kernel() for tty->column state. |
| 268 | */ | 268 | */ |
| 269 | 269 | ||
| 270 | static int opost(unsigned char c, struct tty_struct *tty) | 270 | static int opost(unsigned char c, struct tty_struct *tty) |
| 271 | { | 271 | { |
| 272 | int space, spaces; | 272 | int space, spaces; |
| 273 | 273 | ||
| 274 | space = tty->driver->write_room(tty); | 274 | space = tty_write_room(tty); |
| 275 | if (!space) | 275 | if (!space) |
| 276 | return -1; | 276 | return -1; |
| 277 | 277 | ||
| 278 | lock_kernel(); | ||
| 278 | if (O_OPOST(tty)) { | 279 | if (O_OPOST(tty)) { |
| 279 | switch (c) { | 280 | switch (c) { |
| 280 | case '\n': | 281 | case '\n': |
| @@ -283,7 +284,7 @@ static int opost(unsigned char c, struct tty_struct *tty) | |||
| 283 | if (O_ONLCR(tty)) { | 284 | if (O_ONLCR(tty)) { |
| 284 | if (space < 2) | 285 | if (space < 2) |
| 285 | return -1; | 286 | return -1; |
| 286 | tty->driver->put_char(tty, '\r'); | 287 | tty_put_char(tty, '\r'); |
| 287 | tty->column = 0; | 288 | tty->column = 0; |
| 288 | } | 289 | } |
| 289 | tty->canon_column = tty->column; | 290 | tty->canon_column = tty->column; |
| @@ -305,7 +306,7 @@ static int opost(unsigned char c, struct tty_struct *tty) | |||
| 305 | if (space < spaces) | 306 | if (space < spaces) |
| 306 | return -1; | 307 | return -1; |
| 307 | tty->column += spaces; | 308 | tty->column += spaces; |
| 308 | tty->driver->write(tty, " ", spaces); | 309 | tty->ops->write(tty, " ", spaces); |
| 309 | return 0; | 310 | return 0; |
| 310 | } | 311 | } |
| 311 | tty->column += spaces; | 312 | tty->column += spaces; |
| @@ -322,7 +323,8 @@ static int opost(unsigned char c, struct tty_struct *tty) | |||
| 322 | break; | 323 | break; |
| 323 | } | 324 | } |
| 324 | } | 325 | } |
| 325 | tty->driver->put_char(tty, c); | 326 | tty_put_char(tty, c); |
| 327 | unlock_kernel(); | ||
| 326 | return 0; | 328 | return 0; |
| 327 | } | 329 | } |
| 328 | 330 | ||
| @@ -337,7 +339,8 @@ static int opost(unsigned char c, struct tty_struct *tty) | |||
| 337 | * the simple cases normally found and helps to generate blocks of | 339 | * the simple cases normally found and helps to generate blocks of |
| 338 | * symbols for the console driver and thus improve performance. | 340 | * symbols for the console driver and thus improve performance. |
| 339 | * | 341 | * |
| 340 | * Called from write_chan under the tty layer write lock. | 342 | * Called from write_chan under the tty layer write lock. Relies |
| 343 | * on lock_kernel for the tty->column state. | ||
| 341 | */ | 344 | */ |
| 342 | 345 | ||
| 343 | static ssize_t opost_block(struct tty_struct *tty, | 346 | static ssize_t opost_block(struct tty_struct *tty, |
| @@ -347,12 +350,13 @@ static ssize_t opost_block(struct tty_struct *tty, | |||
| 347 | int i; | 350 | int i; |
| 348 | const unsigned char *cp; | 351 | const unsigned char *cp; |
| 349 | 352 | ||
| 350 | space = tty->driver->write_room(tty); | 353 | space = tty_write_room(tty); |
| 351 | if (!space) | 354 | if (!space) |
| 352 | return 0; | 355 | return 0; |
| 353 | if (nr > space) | 356 | if (nr > space) |
| 354 | nr = space; | 357 | nr = space; |
| 355 | 358 | ||
| 359 | lock_kernel(); | ||
| 356 | for (i = 0, cp = buf; i < nr; i++, cp++) { | 360 | for (i = 0, cp = buf; i < nr; i++, cp++) { |
| 357 | switch (*cp) { | 361 | switch (*cp) { |
| 358 | case '\n': | 362 | case '\n': |
| @@ -384,27 +388,15 @@ static ssize_t opost_block(struct tty_struct *tty, | |||
| 384 | } | 388 | } |
| 385 | } | 389 | } |
| 386 | break_out: | 390 | break_out: |
| 387 | if (tty->driver->flush_chars) | 391 | if (tty->ops->flush_chars) |
| 388 | tty->driver->flush_chars(tty); | 392 | tty->ops->flush_chars(tty); |
| 389 | i = tty->driver->write(tty, buf, i); | 393 | i = tty->ops->write(tty, buf, i); |
| 394 | unlock_kernel(); | ||
| 390 | return i; | 395 | return i; |
| 391 | } | 396 | } |
| 392 | 397 | ||
| 393 | 398 | ||
| 394 | /** | 399 | /** |
| 395 | * put_char - write character to driver | ||
| 396 | * @c: character (or part of unicode symbol) | ||
| 397 | * @tty: terminal device | ||
| 398 | * | ||
| 399 | * Queue a byte to the driver layer for output | ||
| 400 | */ | ||
| 401 | |||
| 402 | static inline void put_char(unsigned char c, struct tty_struct *tty) | ||
| 403 | { | ||
| 404 | tty->driver->put_char(tty, c); | ||
| 405 | } | ||
| 406 | |||
| 407 | /** | ||
| 408 | * echo_char - echo characters | 400 | * echo_char - echo characters |
| 409 | * @c: unicode byte to echo | 401 | * @c: unicode byte to echo |
| 410 | * @tty: terminal device | 402 | * @tty: terminal device |
| @@ -416,8 +408,8 @@ static inline void put_char(unsigned char c, struct tty_struct *tty) | |||
| 416 | static void echo_char(unsigned char c, struct tty_struct *tty) | 408 | static void echo_char(unsigned char c, struct tty_struct *tty) |
| 417 | { | 409 | { |
| 418 | if (L_ECHOCTL(tty) && iscntrl(c) && c != '\t') { | 410 | if (L_ECHOCTL(tty) && iscntrl(c) && c != '\t') { |
| 419 | put_char('^', tty); | 411 | tty_put_char(tty, '^'); |
| 420 | put_char(c ^ 0100, tty); | 412 | tty_put_char(tty, c ^ 0100); |
| 421 | tty->column += 2; | 413 | tty->column += 2; |
| 422 | } else | 414 | } else |
| 423 | opost(c, tty); | 415 | opost(c, tty); |
| @@ -426,7 +418,7 @@ static void echo_char(unsigned char c, struct tty_struct *tty) | |||
| 426 | static inline void finish_erasing(struct tty_struct *tty) | 418 | static inline void finish_erasing(struct tty_struct *tty) |
| 427 | { | 419 | { |
| 428 | if (tty->erasing) { | 420 | if (tty->erasing) { |
| 429 | put_char('/', tty); | 421 | tty_put_char(tty, '/'); |
| 430 | tty->column++; | 422 | tty->column++; |
| 431 | tty->erasing = 0; | 423 | tty->erasing = 0; |
| 432 | } | 424 | } |
| @@ -510,7 +502,7 @@ static void eraser(unsigned char c, struct tty_struct *tty) | |||
| 510 | if (L_ECHO(tty)) { | 502 | if (L_ECHO(tty)) { |
| 511 | if (L_ECHOPRT(tty)) { | 503 | if (L_ECHOPRT(tty)) { |
| 512 | if (!tty->erasing) { | 504 | if (!tty->erasing) { |
| 513 | put_char('\\', tty); | 505 | tty_put_char(tty, '\\'); |
| 514 | tty->column++; | 506 | tty->column++; |
| 515 | tty->erasing = 1; | 507 | tty->erasing = 1; |
| 516 | } | 508 | } |
| @@ -518,7 +510,7 @@ static void eraser(unsigned char c, struct tty_struct *tty) | |||
| 518 | echo_char(c, tty); | 510 | echo_char(c, tty); |
| 519 | while (--cnt > 0) { | 511 | while (--cnt > 0) { |
| 520 | head = (head+1) & (N_TTY_BUF_SIZE-1); | 512 | head = (head+1) & (N_TTY_BUF_SIZE-1); |
| 521 | put_char(tty->read_buf[head], tty); | 513 | tty_put_char(tty, tty->read_buf[head]); |
| 522 | } | 514 | } |
| 523 | } else if (kill_type == ERASE && !L_ECHOE(tty)) { | 515 | } else if (kill_type == ERASE && !L_ECHOE(tty)) { |
| 524 | echo_char(ERASE_CHAR(tty), tty); | 516 | echo_char(ERASE_CHAR(tty), tty); |
| @@ -546,22 +538,22 @@ static void eraser(unsigned char c, struct tty_struct *tty) | |||
| 546 | /* Now backup to that column. */ | 538 | /* Now backup to that column. */ |
| 547 | while (tty->column > col) { | 539 | while (tty->column > col) { |
| 548 | /* Can't use opost here. */ | 540 | /* Can't use opost here. */ |
| 549 | put_char('\b', tty); | 541 | tty_put_char(tty, '\b'); |
| 550 | if (tty->column > 0) | 542 | if (tty->column > 0) |
| 551 | tty->column--; | 543 | tty->column--; |
| 552 | } | 544 | } |
| 553 | } else { | 545 | } else { |
| 554 | if (iscntrl(c) && L_ECHOCTL(tty)) { | 546 | if (iscntrl(c) && L_ECHOCTL(tty)) { |
| 555 | put_char('\b', tty); | 547 | tty_put_char(tty, '\b'); |
| 556 | put_char(' ', tty); | 548 | tty_put_char(tty, ' '); |
| 557 | put_char('\b', tty); | 549 | tty_put_char(tty, '\b'); |
| 558 | if (tty->column > 0) | 550 | if (tty->column > 0) |
| 559 | tty->column--; | 551 | tty->column--; |
| 560 | } | 552 | } |
| 561 | if (!iscntrl(c) || L_ECHOCTL(tty)) { | 553 | if (!iscntrl(c) || L_ECHOCTL(tty)) { |
| 562 | put_char('\b', tty); | 554 | tty_put_char(tty, '\b'); |
| 563 | put_char(' ', tty); | 555 | tty_put_char(tty, ' '); |
| 564 | put_char('\b', tty); | 556 | tty_put_char(tty, '\b'); |
| 565 | if (tty->column > 0) | 557 | if (tty->column > 0) |
| 566 | tty->column--; | 558 | tty->column--; |
| 567 | } | 559 | } |
| @@ -592,8 +584,7 @@ static inline void isig(int sig, struct tty_struct *tty, int flush) | |||
| 592 | kill_pgrp(tty->pgrp, sig, 1); | 584 | kill_pgrp(tty->pgrp, sig, 1); |
| 593 | if (flush || !L_NOFLSH(tty)) { | 585 | if (flush || !L_NOFLSH(tty)) { |
| 594 | n_tty_flush_buffer(tty); | 586 | n_tty_flush_buffer(tty); |
| 595 | if (tty->driver->flush_buffer) | 587 | tty_driver_flush_buffer(tty); |
| 596 | tty->driver->flush_buffer(tty); | ||
| 597 | } | 588 | } |
| 598 | } | 589 | } |
| 599 | 590 | ||
| @@ -701,7 +692,7 @@ static inline void n_tty_receive_char(struct tty_struct *tty, unsigned char c) | |||
| 701 | 692 | ||
| 702 | if (tty->stopped && !tty->flow_stopped && I_IXON(tty) && | 693 | if (tty->stopped && !tty->flow_stopped && I_IXON(tty) && |
| 703 | ((I_IXANY(tty) && c != START_CHAR(tty) && c != STOP_CHAR(tty)) || | 694 | ((I_IXANY(tty) && c != START_CHAR(tty) && c != STOP_CHAR(tty)) || |
| 704 | c == INTR_CHAR(tty) || c == QUIT_CHAR(tty))) | 695 | c == INTR_CHAR(tty) || c == QUIT_CHAR(tty) || c == SUSP_CHAR(tty))) |
| 705 | start_tty(tty); | 696 | start_tty(tty); |
| 706 | 697 | ||
| 707 | if (tty->closing) { | 698 | if (tty->closing) { |
| @@ -725,7 +716,7 @@ static inline void n_tty_receive_char(struct tty_struct *tty, unsigned char c) | |||
| 725 | tty->lnext = 0; | 716 | tty->lnext = 0; |
| 726 | if (L_ECHO(tty)) { | 717 | if (L_ECHO(tty)) { |
| 727 | if (tty->read_cnt >= N_TTY_BUF_SIZE-1) { | 718 | if (tty->read_cnt >= N_TTY_BUF_SIZE-1) { |
| 728 | put_char('\a', tty); /* beep if no space */ | 719 | tty_put_char(tty, '\a'); /* beep if no space */ |
| 729 | return; | 720 | return; |
| 730 | } | 721 | } |
| 731 | /* Record the column of first canon char. */ | 722 | /* Record the column of first canon char. */ |
| @@ -739,13 +730,6 @@ static inline void n_tty_receive_char(struct tty_struct *tty, unsigned char c) | |||
| 739 | return; | 730 | return; |
| 740 | } | 731 | } |
| 741 | 732 | ||
| 742 | if (c == '\r') { | ||
| 743 | if (I_IGNCR(tty)) | ||
| 744 | return; | ||
| 745 | if (I_ICRNL(tty)) | ||
| 746 | c = '\n'; | ||
| 747 | } else if (c == '\n' && I_INLCR(tty)) | ||
| 748 | c = '\r'; | ||
| 749 | if (I_IXON(tty)) { | 733 | if (I_IXON(tty)) { |
| 750 | if (c == START_CHAR(tty)) { | 734 | if (c == START_CHAR(tty)) { |
| 751 | start_tty(tty); | 735 | start_tty(tty); |
| @@ -756,6 +740,7 @@ static inline void n_tty_receive_char(struct tty_struct *tty, unsigned char c) | |||
| 756 | return; | 740 | return; |
| 757 | } | 741 | } |
| 758 | } | 742 | } |
| 743 | |||
| 759 | if (L_ISIG(tty)) { | 744 | if (L_ISIG(tty)) { |
| 760 | int signal; | 745 | int signal; |
| 761 | signal = SIGINT; | 746 | signal = SIGINT; |
| @@ -775,8 +760,7 @@ send_signal: | |||
| 775 | */ | 760 | */ |
| 776 | if (!L_NOFLSH(tty)) { | 761 | if (!L_NOFLSH(tty)) { |
| 777 | n_tty_flush_buffer(tty); | 762 | n_tty_flush_buffer(tty); |
| 778 | if (tty->driver->flush_buffer) | 763 | tty_driver_flush_buffer(tty); |
| 779 | tty->driver->flush_buffer(tty); | ||
| 780 | } | 764 | } |
| 781 | if (L_ECHO(tty)) | 765 | if (L_ECHO(tty)) |
| 782 | echo_char(c, tty); | 766 | echo_char(c, tty); |
| @@ -785,6 +769,15 @@ send_signal: | |||
| 785 | return; | 769 | return; |
| 786 | } | 770 | } |
| 787 | } | 771 | } |
| 772 | |||
| 773 | if (c == '\r') { | ||
| 774 | if (I_IGNCR(tty)) | ||
| 775 | return; | ||
| 776 | if (I_ICRNL(tty)) | ||
| 777 | c = '\n'; | ||
| 778 | } else if (c == '\n' && I_INLCR(tty)) | ||
| 779 | c = '\r'; | ||
| 780 | |||
| 788 | if (tty->icanon) { | 781 | if (tty->icanon) { |
| 789 | if (c == ERASE_CHAR(tty) || c == KILL_CHAR(tty) || | 782 | if (c == ERASE_CHAR(tty) || c == KILL_CHAR(tty) || |
| 790 | (c == WERASE_CHAR(tty) && L_IEXTEN(tty))) { | 783 | (c == WERASE_CHAR(tty) && L_IEXTEN(tty))) { |
| @@ -796,8 +789,8 @@ send_signal: | |||
| 796 | if (L_ECHO(tty)) { | 789 | if (L_ECHO(tty)) { |
| 797 | finish_erasing(tty); | 790 | finish_erasing(tty); |
| 798 | if (L_ECHOCTL(tty)) { | 791 | if (L_ECHOCTL(tty)) { |
| 799 | put_char('^', tty); | 792 | tty_put_char(tty, '^'); |
| 800 | put_char('\b', tty); | 793 | tty_put_char(tty, '\b'); |
| 801 | } | 794 | } |
| 802 | } | 795 | } |
| 803 | return; | 796 | return; |
| @@ -818,7 +811,7 @@ send_signal: | |||
| 818 | if (c == '\n') { | 811 | if (c == '\n') { |
| 819 | if (L_ECHO(tty) || L_ECHONL(tty)) { | 812 | if (L_ECHO(tty) || L_ECHONL(tty)) { |
| 820 | if (tty->read_cnt >= N_TTY_BUF_SIZE-1) | 813 | if (tty->read_cnt >= N_TTY_BUF_SIZE-1) |
| 821 | put_char('\a', tty); | 814 | tty_put_char(tty, '\a'); |
| 822 | opost('\n', tty); | 815 | opost('\n', tty); |
| 823 | } | 816 | } |
| 824 | goto handle_newline; | 817 | goto handle_newline; |
| @@ -836,7 +829,7 @@ send_signal: | |||
| 836 | */ | 829 | */ |
| 837 | if (L_ECHO(tty)) { | 830 | if (L_ECHO(tty)) { |
| 838 | if (tty->read_cnt >= N_TTY_BUF_SIZE-1) | 831 | if (tty->read_cnt >= N_TTY_BUF_SIZE-1) |
| 839 | put_char('\a', tty); | 832 | tty_put_char(tty, '\a'); |
| 840 | /* Record the column of first canon char. */ | 833 | /* Record the column of first canon char. */ |
| 841 | if (tty->canon_head == tty->read_head) | 834 | if (tty->canon_head == tty->read_head) |
| 842 | tty->canon_column = tty->column; | 835 | tty->canon_column = tty->column; |
| @@ -866,7 +859,7 @@ handle_newline: | |||
| 866 | finish_erasing(tty); | 859 | finish_erasing(tty); |
| 867 | if (L_ECHO(tty)) { | 860 | if (L_ECHO(tty)) { |
| 868 | if (tty->read_cnt >= N_TTY_BUF_SIZE-1) { | 861 | if (tty->read_cnt >= N_TTY_BUF_SIZE-1) { |
| 869 | put_char('\a', tty); /* beep if no space */ | 862 | tty_put_char(tty, '\a'); /* beep if no space */ |
| 870 | return; | 863 | return; |
| 871 | } | 864 | } |
| 872 | if (c == '\n') | 865 | if (c == '\n') |
| @@ -970,8 +963,8 @@ static void n_tty_receive_buf(struct tty_struct *tty, const unsigned char *cp, | |||
| 970 | break; | 963 | break; |
| 971 | } | 964 | } |
| 972 | } | 965 | } |
| 973 | if (tty->driver->flush_chars) | 966 | if (tty->ops->flush_chars) |
| 974 | tty->driver->flush_chars(tty); | 967 | tty->ops->flush_chars(tty); |
| 975 | } | 968 | } |
| 976 | 969 | ||
| 977 | n_tty_set_room(tty); | 970 | n_tty_set_room(tty); |
| @@ -987,12 +980,8 @@ static void n_tty_receive_buf(struct tty_struct *tty, const unsigned char *cp, | |||
| 987 | * mode. We don't want to throttle the driver if we're in | 980 | * mode. We don't want to throttle the driver if we're in |
| 988 | * canonical mode and don't have a newline yet! | 981 | * canonical mode and don't have a newline yet! |
| 989 | */ | 982 | */ |
| 990 | if (tty->receive_room < TTY_THRESHOLD_THROTTLE) { | 983 | if (tty->receive_room < TTY_THRESHOLD_THROTTLE) |
| 991 | /* check TTY_THROTTLED first so it indicates our state */ | 984 | tty_throttle(tty); |
| 992 | if (!test_and_set_bit(TTY_THROTTLED, &tty->flags) && | ||
| 993 | tty->driver->throttle) | ||
| 994 | tty->driver->throttle(tty); | ||
| 995 | } | ||
| 996 | } | 985 | } |
| 997 | 986 | ||
| 998 | int is_ignored(int sig) | 987 | int is_ignored(int sig) |
| @@ -1076,6 +1065,9 @@ static void n_tty_set_termios(struct tty_struct *tty, struct ktermios *old) | |||
| 1076 | tty->real_raw = 0; | 1065 | tty->real_raw = 0; |
| 1077 | } | 1066 | } |
| 1078 | n_tty_set_room(tty); | 1067 | n_tty_set_room(tty); |
| 1068 | /* The termios change make the tty ready for I/O */ | ||
| 1069 | wake_up_interruptible(&tty->write_wait); | ||
| 1070 | wake_up_interruptible(&tty->read_wait); | ||
| 1079 | } | 1071 | } |
| 1080 | 1072 | ||
| 1081 | /** | 1073 | /** |
| @@ -1194,6 +1186,11 @@ extern ssize_t redirected_tty_write(struct file *, const char __user *, | |||
| 1194 | * Perform job control management checks on this file/tty descriptor | 1186 | * Perform job control management checks on this file/tty descriptor |
| 1195 | * and if appropriate send any needed signals and return a negative | 1187 | * and if appropriate send any needed signals and return a negative |
| 1196 | * error code if action should be taken. | 1188 | * error code if action should be taken. |
| 1189 | * | ||
| 1190 | * FIXME: | ||
| 1191 | * Locking: None - redirected write test is safe, testing | ||
| 1192 | * current->signal should possibly lock current->sighand | ||
| 1193 | * pgrp locking ? | ||
| 1197 | */ | 1194 | */ |
| 1198 | 1195 | ||
| 1199 | static int job_control(struct tty_struct *tty, struct file *file) | 1196 | static int job_control(struct tty_struct *tty, struct file *file) |
| @@ -1246,6 +1243,7 @@ static ssize_t read_chan(struct tty_struct *tty, struct file *file, | |||
| 1246 | ssize_t size; | 1243 | ssize_t size; |
| 1247 | long timeout; | 1244 | long timeout; |
| 1248 | unsigned long flags; | 1245 | unsigned long flags; |
| 1246 | int packet; | ||
| 1249 | 1247 | ||
| 1250 | do_it_again: | 1248 | do_it_again: |
| 1251 | 1249 | ||
| @@ -1289,16 +1287,19 @@ do_it_again: | |||
| 1289 | if (mutex_lock_interruptible(&tty->atomic_read_lock)) | 1287 | if (mutex_lock_interruptible(&tty->atomic_read_lock)) |
| 1290 | return -ERESTARTSYS; | 1288 | return -ERESTARTSYS; |
| 1291 | } | 1289 | } |
| 1290 | packet = tty->packet; | ||
| 1292 | 1291 | ||
| 1293 | add_wait_queue(&tty->read_wait, &wait); | 1292 | add_wait_queue(&tty->read_wait, &wait); |
| 1294 | while (nr) { | 1293 | while (nr) { |
| 1295 | /* First test for status change. */ | 1294 | /* First test for status change. */ |
| 1296 | if (tty->packet && tty->link->ctrl_status) { | 1295 | if (packet && tty->link->ctrl_status) { |
| 1297 | unsigned char cs; | 1296 | unsigned char cs; |
| 1298 | if (b != buf) | 1297 | if (b != buf) |
| 1299 | break; | 1298 | break; |
| 1299 | spin_lock_irqsave(&tty->link->ctrl_lock, flags); | ||
| 1300 | cs = tty->link->ctrl_status; | 1300 | cs = tty->link->ctrl_status; |
| 1301 | tty->link->ctrl_status = 0; | 1301 | tty->link->ctrl_status = 0; |
| 1302 | spin_unlock_irqrestore(&tty->link->ctrl_lock, flags); | ||
| 1302 | if (tty_put_user(tty, cs, b++)) { | 1303 | if (tty_put_user(tty, cs, b++)) { |
| 1303 | retval = -EFAULT; | 1304 | retval = -EFAULT; |
| 1304 | b--; | 1305 | b--; |
| @@ -1333,6 +1334,7 @@ do_it_again: | |||
| 1333 | retval = -ERESTARTSYS; | 1334 | retval = -ERESTARTSYS; |
| 1334 | break; | 1335 | break; |
| 1335 | } | 1336 | } |
| 1337 | /* FIXME: does n_tty_set_room need locking ? */ | ||
| 1336 | n_tty_set_room(tty); | 1338 | n_tty_set_room(tty); |
| 1337 | timeout = schedule_timeout(timeout); | 1339 | timeout = schedule_timeout(timeout); |
| 1338 | continue; | 1340 | continue; |
| @@ -1340,7 +1342,7 @@ do_it_again: | |||
| 1340 | __set_current_state(TASK_RUNNING); | 1342 | __set_current_state(TASK_RUNNING); |
| 1341 | 1343 | ||
| 1342 | /* Deal with packet mode. */ | 1344 | /* Deal with packet mode. */ |
| 1343 | if (tty->packet && b == buf) { | 1345 | if (packet && b == buf) { |
| 1344 | if (tty_put_user(tty, TIOCPKT_DATA, b++)) { | 1346 | if (tty_put_user(tty, TIOCPKT_DATA, b++)) { |
| 1345 | retval = -EFAULT; | 1347 | retval = -EFAULT; |
| 1346 | b--; | 1348 | b--; |
| @@ -1388,6 +1390,8 @@ do_it_again: | |||
| 1388 | break; | 1390 | break; |
| 1389 | } else { | 1391 | } else { |
| 1390 | int uncopied; | 1392 | int uncopied; |
| 1393 | /* The copy function takes the read lock and handles | ||
| 1394 | locking internally for this case */ | ||
| 1391 | uncopied = copy_from_read_buf(tty, &b, &nr); | 1395 | uncopied = copy_from_read_buf(tty, &b, &nr); |
| 1392 | uncopied += copy_from_read_buf(tty, &b, &nr); | 1396 | uncopied += copy_from_read_buf(tty, &b, &nr); |
| 1393 | if (uncopied) { | 1397 | if (uncopied) { |
| @@ -1429,7 +1433,6 @@ do_it_again: | |||
| 1429 | goto do_it_again; | 1433 | goto do_it_again; |
| 1430 | 1434 | ||
| 1431 | n_tty_set_room(tty); | 1435 | n_tty_set_room(tty); |
| 1432 | |||
| 1433 | return retval; | 1436 | return retval; |
| 1434 | } | 1437 | } |
| 1435 | 1438 | ||
| @@ -1492,11 +1495,11 @@ static ssize_t write_chan(struct tty_struct *tty, struct file *file, | |||
| 1492 | break; | 1495 | break; |
| 1493 | b++; nr--; | 1496 | b++; nr--; |
| 1494 | } | 1497 | } |
| 1495 | if (tty->driver->flush_chars) | 1498 | if (tty->ops->flush_chars) |
| 1496 | tty->driver->flush_chars(tty); | 1499 | tty->ops->flush_chars(tty); |
| 1497 | } else { | 1500 | } else { |
| 1498 | while (nr > 0) { | 1501 | while (nr > 0) { |
| 1499 | c = tty->driver->write(tty, b, nr); | 1502 | c = tty->ops->write(tty, b, nr); |
| 1500 | if (c < 0) { | 1503 | if (c < 0) { |
| 1501 | retval = c; | 1504 | retval = c; |
| 1502 | goto break_out; | 1505 | goto break_out; |
| @@ -1533,11 +1536,6 @@ break_out: | |||
| 1533 | * | 1536 | * |
| 1534 | * This code must be sure never to sleep through a hangup. | 1537 | * This code must be sure never to sleep through a hangup. |
| 1535 | * Called without the kernel lock held - fine | 1538 | * Called without the kernel lock held - fine |
| 1536 | * | ||
| 1537 | * FIXME: if someone changes the VMIN or discipline settings for the | ||
| 1538 | * terminal while another process is in poll() the poll does not | ||
| 1539 | * recompute the new limits. Possibly set_termios should issue | ||
| 1540 | * a read wakeup to fix this bug. | ||
| 1541 | */ | 1539 | */ |
| 1542 | 1540 | ||
| 1543 | static unsigned int normal_poll(struct tty_struct *tty, struct file *file, | 1541 | static unsigned int normal_poll(struct tty_struct *tty, struct file *file, |
| @@ -1561,9 +1559,9 @@ static unsigned int normal_poll(struct tty_struct *tty, struct file *file, | |||
| 1561 | else | 1559 | else |
| 1562 | tty->minimum_to_wake = 1; | 1560 | tty->minimum_to_wake = 1; |
| 1563 | } | 1561 | } |
| 1564 | if (!tty_is_writelocked(tty) && | 1562 | if (tty->ops->write && !tty_is_writelocked(tty) && |
| 1565 | tty->driver->chars_in_buffer(tty) < WAKEUP_CHARS && | 1563 | tty_chars_in_buffer(tty) < WAKEUP_CHARS && |
| 1566 | tty->driver->write_room(tty) > 0) | 1564 | tty_write_room(tty) > 0) |
| 1567 | mask |= POLLOUT | POLLWRNORM; | 1565 | mask |= POLLOUT | POLLWRNORM; |
| 1568 | return mask; | 1566 | return mask; |
| 1569 | } | 1567 | } |
diff --git a/drivers/char/nozomi.c b/drivers/char/nozomi.c index 6a6843a0a674..66a0f931c66c 100644 --- a/drivers/char/nozomi.c +++ b/drivers/char/nozomi.c | |||
| @@ -73,7 +73,7 @@ do { \ | |||
| 73 | char tmp[P_BUF_SIZE]; \ | 73 | char tmp[P_BUF_SIZE]; \ |
| 74 | snprintf(tmp, sizeof(tmp), ##args); \ | 74 | snprintf(tmp, sizeof(tmp), ##args); \ |
| 75 | printk(_err_flag_ "[%d] %s(): %s\n", __LINE__, \ | 75 | printk(_err_flag_ "[%d] %s(): %s\n", __LINE__, \ |
| 76 | __FUNCTION__, tmp); \ | 76 | __func__, tmp); \ |
| 77 | } while (0) | 77 | } while (0) |
| 78 | 78 | ||
| 79 | #define DBG1(args...) D_(0x01, ##args) | 79 | #define DBG1(args...) D_(0x01, ##args) |
| @@ -1407,7 +1407,7 @@ static int __devinit nozomi_card_init(struct pci_dev *pdev, | |||
| 1407 | /* Find out what card type it is */ | 1407 | /* Find out what card type it is */ |
| 1408 | nozomi_get_card_type(dc); | 1408 | nozomi_get_card_type(dc); |
| 1409 | 1409 | ||
| 1410 | dc->base_addr = ioremap(start, dc->card_type); | 1410 | dc->base_addr = ioremap_nocache(start, dc->card_type); |
| 1411 | if (!dc->base_addr) { | 1411 | if (!dc->base_addr) { |
| 1412 | dev_err(&pdev->dev, "Unable to map card MMIO\n"); | 1412 | dev_err(&pdev->dev, "Unable to map card MMIO\n"); |
| 1413 | ret = -ENODEV; | 1413 | ret = -ENODEV; |
| @@ -1724,6 +1724,8 @@ static int ntty_tiocmget(struct tty_struct *tty, struct file *file) | |||
| 1724 | const struct ctrl_dl *ctrl_dl = &port->ctrl_dl; | 1724 | const struct ctrl_dl *ctrl_dl = &port->ctrl_dl; |
| 1725 | const struct ctrl_ul *ctrl_ul = &port->ctrl_ul; | 1725 | const struct ctrl_ul *ctrl_ul = &port->ctrl_ul; |
| 1726 | 1726 | ||
| 1727 | /* Note: these could change under us but it is not clear this | ||
| 1728 | matters if so */ | ||
| 1727 | return (ctrl_ul->RTS ? TIOCM_RTS : 0) | | 1729 | return (ctrl_ul->RTS ? TIOCM_RTS : 0) | |
| 1728 | (ctrl_ul->DTR ? TIOCM_DTR : 0) | | 1730 | (ctrl_ul->DTR ? TIOCM_DTR : 0) | |
| 1729 | (ctrl_dl->DCD ? TIOCM_CAR : 0) | | 1731 | (ctrl_dl->DCD ? TIOCM_CAR : 0) | |
| @@ -1849,16 +1851,6 @@ static void ntty_throttle(struct tty_struct *tty) | |||
| 1849 | spin_unlock_irqrestore(&dc->spin_mutex, flags); | 1851 | spin_unlock_irqrestore(&dc->spin_mutex, flags); |
| 1850 | } | 1852 | } |
| 1851 | 1853 | ||
| 1852 | /* just to discard single character writes */ | ||
| 1853 | static void ntty_put_char(struct tty_struct *tty, unsigned char c) | ||
| 1854 | { | ||
| 1855 | /* | ||
| 1856 | * card does not react correct when we write single chars | ||
| 1857 | * to the card, so we discard them | ||
| 1858 | */ | ||
| 1859 | DBG2("PUT CHAR Function: %c", c); | ||
| 1860 | } | ||
| 1861 | |||
| 1862 | /* Returns number of chars in buffer, called by tty layer */ | 1854 | /* Returns number of chars in buffer, called by tty layer */ |
| 1863 | static s32 ntty_chars_in_buffer(struct tty_struct *tty) | 1855 | static s32 ntty_chars_in_buffer(struct tty_struct *tty) |
| 1864 | { | 1856 | { |
| @@ -1892,7 +1884,6 @@ static const struct tty_operations tty_ops = { | |||
| 1892 | .unthrottle = ntty_unthrottle, | 1884 | .unthrottle = ntty_unthrottle, |
| 1893 | .throttle = ntty_throttle, | 1885 | .throttle = ntty_throttle, |
| 1894 | .chars_in_buffer = ntty_chars_in_buffer, | 1886 | .chars_in_buffer = ntty_chars_in_buffer, |
| 1895 | .put_char = ntty_put_char, | ||
| 1896 | .tiocmget = ntty_tiocmget, | 1887 | .tiocmget = ntty_tiocmget, |
| 1897 | .tiocmset = ntty_tiocmset, | 1888 | .tiocmset = ntty_tiocmset, |
| 1898 | }; | 1889 | }; |
diff --git a/drivers/char/pcmcia/cm4000_cs.c b/drivers/char/pcmcia/cm4000_cs.c index 454d7324ba40..4a933d413423 100644 --- a/drivers/char/pcmcia/cm4000_cs.c +++ b/drivers/char/pcmcia/cm4000_cs.c | |||
| @@ -53,7 +53,7 @@ module_param(pc_debug, int, 0600); | |||
| 53 | #define DEBUGP(n, rdr, x, args...) do { \ | 53 | #define DEBUGP(n, rdr, x, args...) do { \ |
| 54 | if (pc_debug >= (n)) \ | 54 | if (pc_debug >= (n)) \ |
| 55 | dev_printk(KERN_DEBUG, reader_to_dev(rdr), "%s:" x, \ | 55 | dev_printk(KERN_DEBUG, reader_to_dev(rdr), "%s:" x, \ |
| 56 | __FUNCTION__ , ## args); \ | 56 | __func__ , ## args); \ |
| 57 | } while (0) | 57 | } while (0) |
| 58 | #else | 58 | #else |
| 59 | #define DEBUGP(n, rdr, x, args...) | 59 | #define DEBUGP(n, rdr, x, args...) |
diff --git a/drivers/char/pcmcia/cm4040_cs.c b/drivers/char/pcmcia/cm4040_cs.c index 5f291bf739a6..035084c07329 100644 --- a/drivers/char/pcmcia/cm4040_cs.c +++ b/drivers/char/pcmcia/cm4040_cs.c | |||
| @@ -47,7 +47,7 @@ module_param(pc_debug, int, 0600); | |||
| 47 | #define DEBUGP(n, rdr, x, args...) do { \ | 47 | #define DEBUGP(n, rdr, x, args...) do { \ |
| 48 | if (pc_debug >= (n)) \ | 48 | if (pc_debug >= (n)) \ |
| 49 | dev_printk(KERN_DEBUG, reader_to_dev(rdr), "%s:" x, \ | 49 | dev_printk(KERN_DEBUG, reader_to_dev(rdr), "%s:" x, \ |
| 50 | __FUNCTION__ , ##args); \ | 50 | __func__ , ##args); \ |
| 51 | } while (0) | 51 | } while (0) |
| 52 | #else | 52 | #else |
| 53 | #define DEBUGP(n, rdr, x, args...) | 53 | #define DEBUGP(n, rdr, x, args...) |
diff --git a/drivers/char/pcmcia/synclink_cs.c b/drivers/char/pcmcia/synclink_cs.c index 583356426dfb..1dd0e992c83d 100644 --- a/drivers/char/pcmcia/synclink_cs.c +++ b/drivers/char/pcmcia/synclink_cs.c | |||
| @@ -503,20 +503,9 @@ static void* mgslpc_get_text_ptr(void) | |||
| 503 | * The wrappers maintain line discipline references | 503 | * The wrappers maintain line discipline references |
| 504 | * while calling into the line discipline. | 504 | * while calling into the line discipline. |
| 505 | * | 505 | * |
| 506 | * ldisc_flush_buffer - flush line discipline receive buffers | ||
| 507 | * ldisc_receive_buf - pass receive data to line discipline | 506 | * ldisc_receive_buf - pass receive data to line discipline |
| 508 | */ | 507 | */ |
| 509 | 508 | ||
| 510 | static void ldisc_flush_buffer(struct tty_struct *tty) | ||
| 511 | { | ||
| 512 | struct tty_ldisc *ld = tty_ldisc_ref(tty); | ||
| 513 | if (ld) { | ||
| 514 | if (ld->flush_buffer) | ||
| 515 | ld->flush_buffer(tty); | ||
| 516 | tty_ldisc_deref(ld); | ||
| 517 | } | ||
| 518 | } | ||
| 519 | |||
| 520 | static void ldisc_receive_buf(struct tty_struct *tty, | 509 | static void ldisc_receive_buf(struct tty_struct *tty, |
| 521 | const __u8 *data, char *flags, int count) | 510 | const __u8 *data, char *flags, int count) |
| 522 | { | 511 | { |
| @@ -1556,7 +1545,7 @@ static void mgslpc_change_params(MGSLPC_INFO *info) | |||
| 1556 | 1545 | ||
| 1557 | /* Add a character to the transmit buffer | 1546 | /* Add a character to the transmit buffer |
| 1558 | */ | 1547 | */ |
| 1559 | static void mgslpc_put_char(struct tty_struct *tty, unsigned char ch) | 1548 | static int mgslpc_put_char(struct tty_struct *tty, unsigned char ch) |
| 1560 | { | 1549 | { |
| 1561 | MGSLPC_INFO *info = (MGSLPC_INFO *)tty->driver_data; | 1550 | MGSLPC_INFO *info = (MGSLPC_INFO *)tty->driver_data; |
| 1562 | unsigned long flags; | 1551 | unsigned long flags; |
| @@ -1567,10 +1556,10 @@ static void mgslpc_put_char(struct tty_struct *tty, unsigned char ch) | |||
| 1567 | } | 1556 | } |
| 1568 | 1557 | ||
| 1569 | if (mgslpc_paranoia_check(info, tty->name, "mgslpc_put_char")) | 1558 | if (mgslpc_paranoia_check(info, tty->name, "mgslpc_put_char")) |
| 1570 | return; | 1559 | return 0; |
| 1571 | 1560 | ||
| 1572 | if (!info->tx_buf) | 1561 | if (!info->tx_buf) |
| 1573 | return; | 1562 | return 0; |
| 1574 | 1563 | ||
| 1575 | spin_lock_irqsave(&info->lock,flags); | 1564 | spin_lock_irqsave(&info->lock,flags); |
| 1576 | 1565 | ||
| @@ -1583,6 +1572,7 @@ static void mgslpc_put_char(struct tty_struct *tty, unsigned char ch) | |||
| 1583 | } | 1572 | } |
| 1584 | 1573 | ||
| 1585 | spin_unlock_irqrestore(&info->lock,flags); | 1574 | spin_unlock_irqrestore(&info->lock,flags); |
| 1575 | return 1; | ||
| 1586 | } | 1576 | } |
| 1587 | 1577 | ||
| 1588 | /* Enable transmitter so remaining characters in the | 1578 | /* Enable transmitter so remaining characters in the |
| @@ -2467,10 +2457,9 @@ static void mgslpc_close(struct tty_struct *tty, struct file * filp) | |||
| 2467 | if (info->flags & ASYNC_INITIALIZED) | 2457 | if (info->flags & ASYNC_INITIALIZED) |
| 2468 | mgslpc_wait_until_sent(tty, info->timeout); | 2458 | mgslpc_wait_until_sent(tty, info->timeout); |
| 2469 | 2459 | ||
| 2470 | if (tty->driver->flush_buffer) | 2460 | mgslpc_flush_buffer(tty); |
| 2471 | tty->driver->flush_buffer(tty); | ||
| 2472 | 2461 | ||
| 2473 | ldisc_flush_buffer(tty); | 2462 | tty_ldisc_flush(tty); |
| 2474 | 2463 | ||
| 2475 | shutdown(info); | 2464 | shutdown(info); |
| 2476 | 2465 | ||
diff --git a/drivers/char/pty.c b/drivers/char/pty.c index 706ff34728f1..0a05c038ae6f 100644 --- a/drivers/char/pty.c +++ b/drivers/char/pty.c | |||
| @@ -181,6 +181,7 @@ static int pty_set_lock(struct tty_struct *tty, int __user * arg) | |||
| 181 | static void pty_flush_buffer(struct tty_struct *tty) | 181 | static void pty_flush_buffer(struct tty_struct *tty) |
| 182 | { | 182 | { |
| 183 | struct tty_struct *to = tty->link; | 183 | struct tty_struct *to = tty->link; |
| 184 | unsigned long flags; | ||
| 184 | 185 | ||
| 185 | if (!to) | 186 | if (!to) |
| 186 | return; | 187 | return; |
| @@ -189,8 +190,10 @@ static void pty_flush_buffer(struct tty_struct *tty) | |||
| 189 | to->ldisc.flush_buffer(to); | 190 | to->ldisc.flush_buffer(to); |
| 190 | 191 | ||
| 191 | if (to->packet) { | 192 | if (to->packet) { |
| 193 | spin_lock_irqsave(&tty->ctrl_lock, flags); | ||
| 192 | tty->ctrl_status |= TIOCPKT_FLUSHWRITE; | 194 | tty->ctrl_status |= TIOCPKT_FLUSHWRITE; |
| 193 | wake_up_interruptible(&to->read_wait); | 195 | wake_up_interruptible(&to->read_wait); |
| 196 | spin_unlock_irqrestore(&tty->ctrl_lock, flags); | ||
| 194 | } | 197 | } |
| 195 | } | 198 | } |
| 196 | 199 | ||
| @@ -251,6 +254,18 @@ static int pty_bsd_ioctl(struct tty_struct *tty, struct file *file, | |||
| 251 | static int legacy_count = CONFIG_LEGACY_PTY_COUNT; | 254 | static int legacy_count = CONFIG_LEGACY_PTY_COUNT; |
| 252 | module_param(legacy_count, int, 0); | 255 | module_param(legacy_count, int, 0); |
| 253 | 256 | ||
| 257 | static const struct tty_operations pty_ops_bsd = { | ||
| 258 | .open = pty_open, | ||
| 259 | .close = pty_close, | ||
| 260 | .write = pty_write, | ||
| 261 | .write_room = pty_write_room, | ||
| 262 | .flush_buffer = pty_flush_buffer, | ||
| 263 | .chars_in_buffer = pty_chars_in_buffer, | ||
| 264 | .unthrottle = pty_unthrottle, | ||
| 265 | .set_termios = pty_set_termios, | ||
| 266 | .ioctl = pty_bsd_ioctl, | ||
| 267 | }; | ||
| 268 | |||
| 254 | static void __init legacy_pty_init(void) | 269 | static void __init legacy_pty_init(void) |
| 255 | { | 270 | { |
| 256 | if (legacy_count <= 0) | 271 | if (legacy_count <= 0) |
| @@ -281,7 +296,6 @@ static void __init legacy_pty_init(void) | |||
| 281 | pty_driver->flags = TTY_DRIVER_RESET_TERMIOS | TTY_DRIVER_REAL_RAW; | 296 | pty_driver->flags = TTY_DRIVER_RESET_TERMIOS | TTY_DRIVER_REAL_RAW; |
| 282 | pty_driver->other = pty_slave_driver; | 297 | pty_driver->other = pty_slave_driver; |
| 283 | tty_set_operations(pty_driver, &pty_ops); | 298 | tty_set_operations(pty_driver, &pty_ops); |
| 284 | pty_driver->ioctl = pty_bsd_ioctl; | ||
| 285 | 299 | ||
| 286 | pty_slave_driver->owner = THIS_MODULE; | 300 | pty_slave_driver->owner = THIS_MODULE; |
| 287 | pty_slave_driver->driver_name = "pty_slave"; | 301 | pty_slave_driver->driver_name = "pty_slave"; |
| @@ -374,6 +388,19 @@ static int pty_unix98_ioctl(struct tty_struct *tty, struct file *file, | |||
| 374 | return -ENOIOCTLCMD; | 388 | return -ENOIOCTLCMD; |
| 375 | } | 389 | } |
| 376 | 390 | ||
| 391 | static const struct tty_operations pty_unix98_ops = { | ||
| 392 | .open = pty_open, | ||
| 393 | .close = pty_close, | ||
| 394 | .write = pty_write, | ||
| 395 | .write_room = pty_write_room, | ||
| 396 | .flush_buffer = pty_flush_buffer, | ||
| 397 | .chars_in_buffer = pty_chars_in_buffer, | ||
| 398 | .unthrottle = pty_unthrottle, | ||
| 399 | .set_termios = pty_set_termios, | ||
| 400 | .ioctl = pty_unix98_ioctl | ||
| 401 | }; | ||
| 402 | |||
| 403 | |||
| 377 | static void __init unix98_pty_init(void) | 404 | static void __init unix98_pty_init(void) |
| 378 | { | 405 | { |
| 379 | ptm_driver = alloc_tty_driver(NR_UNIX98_PTY_MAX); | 406 | ptm_driver = alloc_tty_driver(NR_UNIX98_PTY_MAX); |
| @@ -400,8 +427,7 @@ static void __init unix98_pty_init(void) | |||
| 400 | ptm_driver->flags = TTY_DRIVER_RESET_TERMIOS | TTY_DRIVER_REAL_RAW | | 427 | ptm_driver->flags = TTY_DRIVER_RESET_TERMIOS | TTY_DRIVER_REAL_RAW | |
| 401 | TTY_DRIVER_DYNAMIC_DEV | TTY_DRIVER_DEVPTS_MEM; | 428 | TTY_DRIVER_DYNAMIC_DEV | TTY_DRIVER_DEVPTS_MEM; |
| 402 | ptm_driver->other = pts_driver; | 429 | ptm_driver->other = pts_driver; |
| 403 | tty_set_operations(ptm_driver, &pty_ops); | 430 | tty_set_operations(ptm_driver, &pty_unix98_ops); |
| 404 | ptm_driver->ioctl = pty_unix98_ioctl; | ||
| 405 | 431 | ||
| 406 | pts_driver->owner = THIS_MODULE; | 432 | pts_driver->owner = THIS_MODULE; |
| 407 | pts_driver->driver_name = "pty_slave"; | 433 | pts_driver->driver_name = "pty_slave"; |
diff --git a/drivers/char/rio/cirrus.h b/drivers/char/rio/cirrus.h index f4f837f86829..a03a538a3efb 100644 --- a/drivers/char/rio/cirrus.h +++ b/drivers/char/rio/cirrus.h | |||
| @@ -43,83 +43,83 @@ | |||
| 43 | /* Bit fields for particular registers shared with driver */ | 43 | /* Bit fields for particular registers shared with driver */ |
| 44 | 44 | ||
| 45 | /* COR1 - driver and RTA */ | 45 | /* COR1 - driver and RTA */ |
| 46 | #define COR1_ODD 0x80 /* Odd parity */ | 46 | #define RIOC_COR1_ODD 0x80 /* Odd parity */ |
| 47 | #define COR1_EVEN 0x00 /* Even parity */ | 47 | #define RIOC_COR1_EVEN 0x00 /* Even parity */ |
| 48 | #define COR1_NOP 0x00 /* No parity */ | 48 | #define RIOC_COR1_NOP 0x00 /* No parity */ |
| 49 | #define COR1_FORCE 0x20 /* Force parity */ | 49 | #define RIOC_COR1_FORCE 0x20 /* Force parity */ |
| 50 | #define COR1_NORMAL 0x40 /* With parity */ | 50 | #define RIOC_COR1_NORMAL 0x40 /* With parity */ |
| 51 | #define COR1_1STOP 0x00 /* 1 stop bit */ | 51 | #define RIOC_COR1_1STOP 0x00 /* 1 stop bit */ |
| 52 | #define COR1_15STOP 0x04 /* 1.5 stop bits */ | 52 | #define RIOC_COR1_15STOP 0x04 /* 1.5 stop bits */ |
| 53 | #define COR1_2STOP 0x08 /* 2 stop bits */ | 53 | #define RIOC_COR1_2STOP 0x08 /* 2 stop bits */ |
| 54 | #define COR1_5BITS 0x00 /* 5 data bits */ | 54 | #define RIOC_COR1_5BITS 0x00 /* 5 data bits */ |
| 55 | #define COR1_6BITS 0x01 /* 6 data bits */ | 55 | #define RIOC_COR1_6BITS 0x01 /* 6 data bits */ |
| 56 | #define COR1_7BITS 0x02 /* 7 data bits */ | 56 | #define RIOC_COR1_7BITS 0x02 /* 7 data bits */ |
| 57 | #define COR1_8BITS 0x03 /* 8 data bits */ | 57 | #define RIOC_COR1_8BITS 0x03 /* 8 data bits */ |
| 58 | 58 | ||
| 59 | #define COR1_HOST 0xef /* Safe host bits */ | 59 | #define RIOC_COR1_HOST 0xef /* Safe host bits */ |
| 60 | 60 | ||
| 61 | /* RTA only */ | 61 | /* RTA only */ |
| 62 | #define COR1_CINPCK 0x00 /* Check parity of received characters */ | 62 | #define RIOC_COR1_CINPCK 0x00 /* Check parity of received characters */ |
| 63 | #define COR1_CNINPCK 0x10 /* Don't check parity */ | 63 | #define RIOC_COR1_CNINPCK 0x10 /* Don't check parity */ |
| 64 | 64 | ||
| 65 | /* COR2 bits for both RTA and driver use */ | 65 | /* COR2 bits for both RTA and driver use */ |
| 66 | #define COR2_IXANY 0x80 /* IXANY - any character is XON */ | 66 | #define RIOC_COR2_IXANY 0x80 /* IXANY - any character is XON */ |
| 67 | #define COR2_IXON 0x40 /* IXON - enable tx soft flowcontrol */ | 67 | #define RIOC_COR2_IXON 0x40 /* IXON - enable tx soft flowcontrol */ |
| 68 | #define COR2_RTSFLOW 0x02 /* Enable tx hardware flow control */ | 68 | #define RIOC_COR2_RTSFLOW 0x02 /* Enable tx hardware flow control */ |
| 69 | 69 | ||
| 70 | /* Additional driver bits */ | 70 | /* Additional driver bits */ |
| 71 | #define COR2_HUPCL 0x20 /* Hang up on close */ | 71 | #define RIOC_COR2_HUPCL 0x20 /* Hang up on close */ |
| 72 | #define COR2_CTSFLOW 0x04 /* Enable rx hardware flow control */ | 72 | #define RIOC_COR2_CTSFLOW 0x04 /* Enable rx hardware flow control */ |
| 73 | #define COR2_IXOFF 0x01 /* Enable rx software flow control */ | 73 | #define RIOC_COR2_IXOFF 0x01 /* Enable rx software flow control */ |
| 74 | #define COR2_DTRFLOW 0x08 /* Enable tx hardware flow control */ | 74 | #define RIOC_COR2_DTRFLOW 0x08 /* Enable tx hardware flow control */ |
| 75 | 75 | ||
| 76 | /* RTA use only */ | 76 | /* RTA use only */ |
| 77 | #define COR2_ETC 0x20 /* Embedded transmit options */ | 77 | #define RIOC_COR2_ETC 0x20 /* Embedded transmit options */ |
| 78 | #define COR2_LOCAL 0x10 /* Local loopback mode */ | 78 | #define RIOC_COR2_LOCAL 0x10 /* Local loopback mode */ |
| 79 | #define COR2_REMOTE 0x08 /* Remote loopback mode */ | 79 | #define RIOC_COR2_REMOTE 0x08 /* Remote loopback mode */ |
| 80 | #define COR2_HOST 0xc2 /* Safe host bits */ | 80 | #define RIOC_COR2_HOST 0xc2 /* Safe host bits */ |
| 81 | 81 | ||
| 82 | /* COR3 - RTA use only */ | 82 | /* COR3 - RTA use only */ |
| 83 | #define COR3_SCDRNG 0x80 /* Enable special char detect for range */ | 83 | #define RIOC_COR3_SCDRNG 0x80 /* Enable special char detect for range */ |
| 84 | #define COR3_SCD34 0x40 /* Special character detect for SCHR's 3 + 4 */ | 84 | #define RIOC_COR3_SCD34 0x40 /* Special character detect for SCHR's 3 + 4 */ |
| 85 | #define COR3_FCT 0x20 /* Flow control transparency */ | 85 | #define RIOC_COR3_FCT 0x20 /* Flow control transparency */ |
| 86 | #define COR3_SCD12 0x10 /* Special character detect for SCHR's 1 + 2 */ | 86 | #define RIOC_COR3_SCD12 0x10 /* Special character detect for SCHR's 1 + 2 */ |
| 87 | #define COR3_FIFO12 0x0c /* 12 chars for receive FIFO threshold */ | 87 | #define RIOC_COR3_FIFO12 0x0c /* 12 chars for receive FIFO threshold */ |
| 88 | #define COR3_FIFO10 0x0a /* 10 chars for receive FIFO threshold */ | 88 | #define RIOC_COR3_FIFO10 0x0a /* 10 chars for receive FIFO threshold */ |
| 89 | #define COR3_FIFO8 0x08 /* 8 chars for receive FIFO threshold */ | 89 | #define RIOC_COR3_FIFO8 0x08 /* 8 chars for receive FIFO threshold */ |
| 90 | #define COR3_FIFO6 0x06 /* 6 chars for receive FIFO threshold */ | 90 | #define RIOC_COR3_FIFO6 0x06 /* 6 chars for receive FIFO threshold */ |
| 91 | 91 | ||
| 92 | #define COR3_THRESHOLD COR3_FIFO8 /* MUST BE LESS THAN MCOR_THRESHOLD */ | 92 | #define RIOC_COR3_THRESHOLD RIOC_COR3_FIFO8 /* MUST BE LESS THAN MCOR_THRESHOLD */ |
| 93 | 93 | ||
| 94 | #define COR3_DEFAULT (COR3_FCT | COR3_THRESHOLD) | 94 | #define RIOC_COR3_DEFAULT (RIOC_COR3_FCT | RIOC_COR3_THRESHOLD) |
| 95 | /* Default bits for COR3 */ | 95 | /* Default bits for COR3 */ |
| 96 | 96 | ||
| 97 | /* COR4 driver and RTA use */ | 97 | /* COR4 driver and RTA use */ |
| 98 | #define COR4_IGNCR 0x80 /* Throw away CR's on input */ | 98 | #define RIOC_COR4_IGNCR 0x80 /* Throw away CR's on input */ |
| 99 | #define COR4_ICRNL 0x40 /* Map CR -> NL on input */ | 99 | #define RIOC_COR4_ICRNL 0x40 /* Map CR -> NL on input */ |
| 100 | #define COR4_INLCR 0x20 /* Map NL -> CR on input */ | 100 | #define RIOC_COR4_INLCR 0x20 /* Map NL -> CR on input */ |
| 101 | #define COR4_IGNBRK 0x10 /* Ignore Break */ | 101 | #define RIOC_COR4_IGNBRK 0x10 /* Ignore Break */ |
| 102 | #define COR4_NBRKINT 0x08 /* No interrupt on break (-BRKINT) */ | 102 | #define RIOC_COR4_NBRKINT 0x08 /* No interrupt on break (-BRKINT) */ |
| 103 | #define COR4_RAISEMOD 0x01 /* Raise modem output lines on non-zero baud */ | 103 | #define RIOC_COR4_RAISEMOD 0x01 /* Raise modem output lines on non-zero baud */ |
| 104 | 104 | ||
| 105 | 105 | ||
| 106 | /* COR4 driver only */ | 106 | /* COR4 driver only */ |
| 107 | #define COR4_IGNPAR 0x04 /* IGNPAR (ignore characters with errors) */ | 107 | #define RIOC_COR4_IGNPAR 0x04 /* IGNPAR (ignore characters with errors) */ |
| 108 | #define COR4_PARMRK 0x02 /* PARMRK */ | 108 | #define RIOC_COR4_PARMRK 0x02 /* PARMRK */ |
| 109 | 109 | ||
| 110 | #define COR4_HOST 0xf8 /* Safe host bits */ | 110 | #define RIOC_COR4_HOST 0xf8 /* Safe host bits */ |
| 111 | 111 | ||
| 112 | /* COR4 RTA only */ | 112 | /* COR4 RTA only */ |
| 113 | #define COR4_CIGNPAR 0x02 /* Thrown away bad characters */ | 113 | #define RIOC_COR4_CIGNPAR 0x02 /* Thrown away bad characters */ |
| 114 | #define COR4_CPARMRK 0x04 /* PARMRK characters */ | 114 | #define RIOC_COR4_CPARMRK 0x04 /* PARMRK characters */ |
| 115 | #define COR4_CNPARMRK 0x03 /* Don't PARMRK */ | 115 | #define RIOC_COR4_CNPARMRK 0x03 /* Don't PARMRK */ |
| 116 | 116 | ||
| 117 | /* COR5 driver and RTA use */ | 117 | /* COR5 driver and RTA use */ |
| 118 | #define COR5_ISTRIP 0x80 /* Strip input chars to 7 bits */ | 118 | #define RIOC_COR5_ISTRIP 0x80 /* Strip input chars to 7 bits */ |
| 119 | #define COR5_LNE 0x40 /* Enable LNEXT processing */ | 119 | #define RIOC_COR5_LNE 0x40 /* Enable LNEXT processing */ |
| 120 | #define COR5_CMOE 0x20 /* Match good and errored characters */ | 120 | #define RIOC_COR5_CMOE 0x20 /* Match good and errored characters */ |
| 121 | #define COR5_ONLCR 0x02 /* NL -> CR NL on output */ | 121 | #define RIOC_COR5_ONLCR 0x02 /* NL -> CR NL on output */ |
| 122 | #define COR5_OCRNL 0x01 /* CR -> NL on output */ | 122 | #define RIOC_COR5_OCRNL 0x01 /* CR -> NL on output */ |
| 123 | 123 | ||
| 124 | /* | 124 | /* |
| 125 | ** Spare bits - these are not used in the CIRRUS registers, so we use | 125 | ** Spare bits - these are not used in the CIRRUS registers, so we use |
| @@ -128,86 +128,86 @@ | |||
| 128 | /* | 128 | /* |
| 129 | ** tstop and tbusy indication | 129 | ** tstop and tbusy indication |
| 130 | */ | 130 | */ |
| 131 | #define COR5_TSTATE_ON 0x08 /* Turn on monitoring of tbusy and tstop */ | 131 | #define RIOC_COR5_TSTATE_ON 0x08 /* Turn on monitoring of tbusy and tstop */ |
| 132 | #define COR5_TSTATE_OFF 0x04 /* Turn off monitoring of tbusy and tstop */ | 132 | #define RIOC_COR5_TSTATE_OFF 0x04 /* Turn off monitoring of tbusy and tstop */ |
| 133 | /* | 133 | /* |
| 134 | ** TAB3 | 134 | ** TAB3 |
| 135 | */ | 135 | */ |
| 136 | #define COR5_TAB3 0x10 /* TAB3 mode */ | 136 | #define RIOC_COR5_TAB3 0x10 /* TAB3 mode */ |
| 137 | 137 | ||
| 138 | #define COR5_HOST 0xc3 /* Safe host bits */ | 138 | #define RIOC_COR5_HOST 0xc3 /* Safe host bits */ |
| 139 | 139 | ||
| 140 | /* CCSR */ | 140 | /* CCSR */ |
| 141 | #define CCSR_TXFLOFF 0x04 /* Tx is xoffed */ | 141 | #define RIOC_CCSR_TXFLOFF 0x04 /* Tx is xoffed */ |
| 142 | 142 | ||
| 143 | /* MSVR1 */ | 143 | /* MSVR1 */ |
| 144 | /* NB. DTR / CD swapped from Cirrus spec as the pins are also reversed on the | 144 | /* NB. DTR / CD swapped from Cirrus spec as the pins are also reversed on the |
| 145 | RTA. This is because otherwise DCD would get lost on the 1 parallel / 3 | 145 | RTA. This is because otherwise DCD would get lost on the 1 parallel / 3 |
| 146 | serial option. | 146 | serial option. |
| 147 | */ | 147 | */ |
| 148 | #define MSVR1_CD 0x80 /* CD (DSR on Cirrus) */ | 148 | #define RIOC_MSVR1_CD 0x80 /* CD (DSR on Cirrus) */ |
| 149 | #define MSVR1_RTS 0x40 /* RTS (CTS on Cirrus) */ | 149 | #define RIOC_MSVR1_RTS 0x40 /* RTS (CTS on Cirrus) */ |
| 150 | #define MSVR1_RI 0x20 /* RI */ | 150 | #define RIOC_MSVR1_RI 0x20 /* RI */ |
| 151 | #define MSVR1_DTR 0x10 /* DTR (CD on Cirrus) */ | 151 | #define RIOC_MSVR1_DTR 0x10 /* DTR (CD on Cirrus) */ |
| 152 | #define MSVR1_CTS 0x01 /* CTS output pin (RTS on Cirrus) */ | 152 | #define RIOC_MSVR1_CTS 0x01 /* CTS output pin (RTS on Cirrus) */ |
| 153 | /* Next two used to indicate state of tbusy and tstop to driver */ | 153 | /* Next two used to indicate state of tbusy and tstop to driver */ |
| 154 | #define MSVR1_TSTOP 0x08 /* Set if port flow controlled */ | 154 | #define RIOC_MSVR1_TSTOP 0x08 /* Set if port flow controlled */ |
| 155 | #define MSVR1_TEMPTY 0x04 /* Set if port tx buffer empty */ | 155 | #define RIOC_MSVR1_TEMPTY 0x04 /* Set if port tx buffer empty */ |
| 156 | 156 | ||
| 157 | #define MSVR1_HOST 0xf3 /* The bits the host wants */ | 157 | #define RIOC_MSVR1_HOST 0xf3 /* The bits the host wants */ |
| 158 | 158 | ||
| 159 | /* Defines for the subscripts of a CONFIG packet */ | 159 | /* Defines for the subscripts of a CONFIG packet */ |
| 160 | #define CONFIG_COR1 1 /* Option register 1 */ | 160 | #define RIOC_CONFIG_COR1 1 /* Option register 1 */ |
| 161 | #define CONFIG_COR2 2 /* Option register 2 */ | 161 | #define RIOC_CONFIG_COR2 2 /* Option register 2 */ |
| 162 | #define CONFIG_COR4 3 /* Option register 4 */ | 162 | #define RIOC_CONFIG_COR4 3 /* Option register 4 */ |
| 163 | #define CONFIG_COR5 4 /* Option register 5 */ | 163 | #define RIOC_CONFIG_COR5 4 /* Option register 5 */ |
| 164 | #define CONFIG_TXXON 5 /* Tx XON character */ | 164 | #define RIOC_CONFIG_TXXON 5 /* Tx XON character */ |
| 165 | #define CONFIG_TXXOFF 6 /* Tx XOFF character */ | 165 | #define RIOC_CONFIG_TXXOFF 6 /* Tx XOFF character */ |
| 166 | #define CONFIG_RXXON 7 /* Rx XON character */ | 166 | #define RIOC_CONFIG_RXXON 7 /* Rx XON character */ |
| 167 | #define CONFIG_RXXOFF 8 /* Rx XOFF character */ | 167 | #define RIOC_CONFIG_RXXOFF 8 /* Rx XOFF character */ |
| 168 | #define CONFIG_LNEXT 9 /* LNEXT character */ | 168 | #define RIOC_CONFIG_LNEXT 9 /* LNEXT character */ |
| 169 | #define CONFIG_TXBAUD 10 /* Tx baud rate */ | 169 | #define RIOC_CONFIG_TXBAUD 10 /* Tx baud rate */ |
| 170 | #define CONFIG_RXBAUD 11 /* Rx baud rate */ | 170 | #define RIOC_CONFIG_RXBAUD 11 /* Rx baud rate */ |
| 171 | 171 | ||
| 172 | #define PRE_EMPTIVE 0x80 /* Pre-emptive bit in command field */ | 172 | #define RIOC_PRE_EMPTIVE 0x80 /* Pre-emptive bit in command field */ |
| 173 | 173 | ||
| 174 | /* Packet types going from Host to remote - with the exception of OPEN, MOPEN, | 174 | /* Packet types going from Host to remote - with the exception of OPEN, MOPEN, |
| 175 | CONFIG, SBREAK and MEMDUMP the remaining bytes of the data array will not | 175 | CONFIG, SBREAK and MEMDUMP the remaining bytes of the data array will not |
| 176 | be used | 176 | be used |
| 177 | */ | 177 | */ |
| 178 | #define OPEN 0x00 /* Open a port */ | 178 | #define RIOC_OPEN 0x00 /* Open a port */ |
| 179 | #define CONFIG 0x01 /* Configure a port */ | 179 | #define RIOC_CONFIG 0x01 /* Configure a port */ |
| 180 | #define MOPEN 0x02 /* Modem open (block for DCD) */ | 180 | #define RIOC_MOPEN 0x02 /* Modem open (block for DCD) */ |
| 181 | #define CLOSE 0x03 /* Close a port */ | 181 | #define RIOC_CLOSE 0x03 /* Close a port */ |
| 182 | #define WFLUSH (0x04 | PRE_EMPTIVE) /* Write flush */ | 182 | #define RIOC_WFLUSH (0x04 | RIOC_PRE_EMPTIVE) /* Write flush */ |
| 183 | #define RFLUSH (0x05 | PRE_EMPTIVE) /* Read flush */ | 183 | #define RIOC_RFLUSH (0x05 | RIOC_PRE_EMPTIVE) /* Read flush */ |
| 184 | #define RESUME (0x06 | PRE_EMPTIVE) /* Resume if xoffed */ | 184 | #define RIOC_RESUME (0x06 | RIOC_PRE_EMPTIVE) /* Resume if xoffed */ |
| 185 | #define SBREAK 0x07 /* Start break */ | 185 | #define RIOC_SBREAK 0x07 /* Start break */ |
| 186 | #define EBREAK 0x08 /* End break */ | 186 | #define RIOC_EBREAK 0x08 /* End break */ |
| 187 | #define SUSPEND (0x09 | PRE_EMPTIVE) /* Susp op (behave as tho xoffed) */ | 187 | #define RIOC_SUSPEND (0x09 | RIOC_PRE_EMPTIVE) /* Susp op (behave as tho xoffed) */ |
| 188 | #define FCLOSE (0x0a | PRE_EMPTIVE) /* Force close */ | 188 | #define RIOC_FCLOSE (0x0a | RIOC_PRE_EMPTIVE) /* Force close */ |
| 189 | #define XPRINT 0x0b /* Xprint packet */ | 189 | #define RIOC_XPRINT 0x0b /* Xprint packet */ |
| 190 | #define MBIS (0x0c | PRE_EMPTIVE) /* Set modem lines */ | 190 | #define RIOC_MBIS (0x0c | RIOC_PRE_EMPTIVE) /* Set modem lines */ |
| 191 | #define MBIC (0x0d | PRE_EMPTIVE) /* Clear modem lines */ | 191 | #define RIOC_MBIC (0x0d | RIOC_PRE_EMPTIVE) /* Clear modem lines */ |
| 192 | #define MSET (0x0e | PRE_EMPTIVE) /* Set modem lines */ | 192 | #define RIOC_MSET (0x0e | RIOC_PRE_EMPTIVE) /* Set modem lines */ |
| 193 | #define PCLOSE 0x0f /* Pseudo close - Leaves rx/tx enabled */ | 193 | #define RIOC_PCLOSE 0x0f /* Pseudo close - Leaves rx/tx enabled */ |
| 194 | #define MGET (0x10 | PRE_EMPTIVE) /* Force update of modem status */ | 194 | #define RIOC_MGET (0x10 | RIOC_PRE_EMPTIVE) /* Force update of modem status */ |
| 195 | #define MEMDUMP (0x11 | PRE_EMPTIVE) /* Send back mem from addr supplied */ | 195 | #define RIOC_MEMDUMP (0x11 | RIOC_PRE_EMPTIVE) /* Send back mem from addr supplied */ |
| 196 | #define READ_REGISTER (0x12 | PRE_EMPTIVE) /* Read CD1400 register (debug) */ | 196 | #define RIOC_READ_REGISTER (0x12 | RIOC_PRE_EMPTIVE) /* Read CD1400 register (debug) */ |
| 197 | 197 | ||
| 198 | /* "Command" packets going from remote to host COMPLETE and MODEM_STATUS | 198 | /* "Command" packets going from remote to host COMPLETE and MODEM_STATUS |
| 199 | use data[4] / data[3] to indicate current state and modem status respectively | 199 | use data[4] / data[3] to indicate current state and modem status respectively |
| 200 | */ | 200 | */ |
| 201 | 201 | ||
| 202 | #define COMPLETE (0x20 | PRE_EMPTIVE) | 202 | #define RIOC_COMPLETE (0x20 | RIOC_PRE_EMPTIVE) |
| 203 | /* Command complete */ | 203 | /* Command complete */ |
| 204 | #define BREAK_RECEIVED (0x21 | PRE_EMPTIVE) | 204 | #define RIOC_BREAK_RECEIVED (0x21 | RIOC_PRE_EMPTIVE) |
| 205 | /* Break received */ | 205 | /* Break received */ |
| 206 | #define MODEM_STATUS (0x22 | PRE_EMPTIVE) | 206 | #define RIOC_MODEM_STATUS (0x22 | RIOC_PRE_EMPTIVE) |
| 207 | /* Change in modem status */ | 207 | /* Change in modem status */ |
| 208 | 208 | ||
| 209 | /* "Command" packet that could go either way - handshake wake-up */ | 209 | /* "Command" packet that could go either way - handshake wake-up */ |
| 210 | #define HANDSHAKE (0x23 | PRE_EMPTIVE) | 210 | #define RIOC_HANDSHAKE (0x23 | RIOC_PRE_EMPTIVE) |
| 211 | /* Wake-up to HOST / RTA */ | 211 | /* Wake-up to HOST / RTA */ |
| 212 | 212 | ||
| 213 | #endif | 213 | #endif |
diff --git a/drivers/char/rio/rio_linux.c b/drivers/char/rio/rio_linux.c index 0ce96670f979..412777cd1e68 100644 --- a/drivers/char/rio/rio_linux.c +++ b/drivers/char/rio/rio_linux.c | |||
| @@ -344,7 +344,7 @@ int rio_minor(struct tty_struct *tty) | |||
| 344 | 344 | ||
| 345 | static int rio_set_real_termios(void *ptr) | 345 | static int rio_set_real_termios(void *ptr) |
| 346 | { | 346 | { |
| 347 | return RIOParam((struct Port *) ptr, CONFIG, 1, 1); | 347 | return RIOParam((struct Port *) ptr, RIOC_CONFIG, 1, 1); |
| 348 | } | 348 | } |
| 349 | 349 | ||
| 350 | 350 | ||
| @@ -487,7 +487,7 @@ static int rio_get_CD(void *ptr) | |||
| 487 | int rv; | 487 | int rv; |
| 488 | 488 | ||
| 489 | func_enter(); | 489 | func_enter(); |
| 490 | rv = (PortP->ModemState & MSVR1_CD) != 0; | 490 | rv = (PortP->ModemState & RIOC_MSVR1_CD) != 0; |
| 491 | 491 | ||
| 492 | rio_dprintk(RIO_DEBUG_INIT, "Getting CD status: %d\n", rv); | 492 | rio_dprintk(RIO_DEBUG_INIT, "Getting CD status: %d\n", rv); |
| 493 | 493 | ||
| @@ -607,7 +607,8 @@ static int rio_ioctl(struct tty_struct *tty, struct file *filp, unsigned int cmd | |||
| 607 | rio_dprintk(RIO_DEBUG_TTY, "BREAK on deleted RTA\n"); | 607 | rio_dprintk(RIO_DEBUG_TTY, "BREAK on deleted RTA\n"); |
| 608 | rc = -EIO; | 608 | rc = -EIO; |
| 609 | } else { | 609 | } else { |
| 610 | if (RIOShortCommand(p, PortP, SBREAK, 2, 250) == RIO_FAIL) { | 610 | if (RIOShortCommand(p, PortP, RIOC_SBREAK, 2, 250) == |
| 611 | RIO_FAIL) { | ||
| 611 | rio_dprintk(RIO_DEBUG_INTR, "SBREAK RIOShortCommand failed\n"); | 612 | rio_dprintk(RIO_DEBUG_INTR, "SBREAK RIOShortCommand failed\n"); |
| 612 | rc = -EIO; | 613 | rc = -EIO; |
| 613 | } | 614 | } |
| @@ -622,7 +623,8 @@ static int rio_ioctl(struct tty_struct *tty, struct file *filp, unsigned int cmd | |||
| 622 | l = arg ? arg * 100 : 250; | 623 | l = arg ? arg * 100 : 250; |
| 623 | if (l > 255) | 624 | if (l > 255) |
| 624 | l = 255; | 625 | l = 255; |
| 625 | if (RIOShortCommand(p, PortP, SBREAK, 2, arg ? arg * 100 : 250) == RIO_FAIL) { | 626 | if (RIOShortCommand(p, PortP, RIOC_SBREAK, 2, |
| 627 | arg ? arg * 100 : 250) == RIO_FAIL) { | ||
| 626 | rio_dprintk(RIO_DEBUG_INTR, "SBREAK RIOShortCommand failed\n"); | 628 | rio_dprintk(RIO_DEBUG_INTR, "SBREAK RIOShortCommand failed\n"); |
| 627 | rc = -EIO; | 629 | rc = -EIO; |
| 628 | } | 630 | } |
diff --git a/drivers/char/rio/rio_linux.h b/drivers/char/rio/rio_linux.h index dc3f005614a3..7f26cd7c815e 100644 --- a/drivers/char/rio/rio_linux.h +++ b/drivers/char/rio/rio_linux.h | |||
| @@ -186,9 +186,9 @@ static inline void *rio_memcpy_fromio(void *dest, void __iomem *source, int n) | |||
| 186 | 186 | ||
| 187 | #ifdef DEBUG | 187 | #ifdef DEBUG |
| 188 | #define rio_dprintk(f, str...) do { if (rio_debug & f) printk (str);} while (0) | 188 | #define rio_dprintk(f, str...) do { if (rio_debug & f) printk (str);} while (0) |
| 189 | #define func_enter() rio_dprintk (RIO_DEBUG_FLOW, "rio: enter %s\n", __FUNCTION__) | 189 | #define func_enter() rio_dprintk (RIO_DEBUG_FLOW, "rio: enter %s\n", __func__) |
| 190 | #define func_exit() rio_dprintk (RIO_DEBUG_FLOW, "rio: exit %s\n", __FUNCTION__) | 190 | #define func_exit() rio_dprintk (RIO_DEBUG_FLOW, "rio: exit %s\n", __func__) |
| 191 | #define func_enter2() rio_dprintk (RIO_DEBUG_FLOW, "rio: enter %s (port %d)\n",__FUNCTION__, port->line) | 191 | #define func_enter2() rio_dprintk (RIO_DEBUG_FLOW, "rio: enter %s (port %d)\n",__func__, port->line) |
| 192 | #else | 192 | #else |
| 193 | #define rio_dprintk(f, str...) /* nothing */ | 193 | #define rio_dprintk(f, str...) /* nothing */ |
| 194 | #define func_enter() | 194 | #define func_enter() |
diff --git a/drivers/char/rio/riocmd.c b/drivers/char/rio/riocmd.c index bf36959fc121..7b96e0814887 100644 --- a/drivers/char/rio/riocmd.c +++ b/drivers/char/rio/riocmd.c | |||
| @@ -417,7 +417,7 @@ static int RIOCommandRup(struct rio_info *p, uint Rup, struct Host *HostP, struc | |||
| 417 | PortP = p->RIOPortp[SysPort]; | 417 | PortP = p->RIOPortp[SysPort]; |
| 418 | rio_spin_lock_irqsave(&PortP->portSem, flags); | 418 | rio_spin_lock_irqsave(&PortP->portSem, flags); |
| 419 | switch (readb(&PktCmdP->Command)) { | 419 | switch (readb(&PktCmdP->Command)) { |
| 420 | case BREAK_RECEIVED: | 420 | case RIOC_BREAK_RECEIVED: |
| 421 | rio_dprintk(RIO_DEBUG_CMD, "Received a break!\n"); | 421 | rio_dprintk(RIO_DEBUG_CMD, "Received a break!\n"); |
| 422 | /* If the current line disc. is not multi-threading and | 422 | /* If the current line disc. is not multi-threading and |
| 423 | the current processor is not the default, reset rup_intr | 423 | the current processor is not the default, reset rup_intr |
| @@ -428,16 +428,16 @@ static int RIOCommandRup(struct rio_info *p, uint Rup, struct Host *HostP, struc | |||
| 428 | gs_got_break(&PortP->gs); | 428 | gs_got_break(&PortP->gs); |
| 429 | break; | 429 | break; |
| 430 | 430 | ||
| 431 | case COMPLETE: | 431 | case RIOC_COMPLETE: |
| 432 | rio_dprintk(RIO_DEBUG_CMD, "Command complete on phb %d host %Zd\n", readb(&PktCmdP->PhbNum), HostP - p->RIOHosts); | 432 | rio_dprintk(RIO_DEBUG_CMD, "Command complete on phb %d host %Zd\n", readb(&PktCmdP->PhbNum), HostP - p->RIOHosts); |
| 433 | subCommand = 1; | 433 | subCommand = 1; |
| 434 | switch (readb(&PktCmdP->SubCommand)) { | 434 | switch (readb(&PktCmdP->SubCommand)) { |
| 435 | case MEMDUMP: | 435 | case RIOC_MEMDUMP: |
| 436 | rio_dprintk(RIO_DEBUG_CMD, "Memory dump cmd (0x%x) from addr 0x%x\n", readb(&PktCmdP->SubCommand), readw(&PktCmdP->SubAddr)); | 436 | rio_dprintk(RIO_DEBUG_CMD, "Memory dump cmd (0x%x) from addr 0x%x\n", readb(&PktCmdP->SubCommand), readw(&PktCmdP->SubAddr)); |
| 437 | break; | 437 | break; |
| 438 | case READ_REGISTER: | 438 | case RIOC_READ_REGISTER: |
| 439 | rio_dprintk(RIO_DEBUG_CMD, "Read register (0x%x)\n", readw(&PktCmdP->SubAddr)); | 439 | rio_dprintk(RIO_DEBUG_CMD, "Read register (0x%x)\n", readw(&PktCmdP->SubAddr)); |
| 440 | p->CdRegister = (readb(&PktCmdP->ModemStatus) & MSVR1_HOST); | 440 | p->CdRegister = (readb(&PktCmdP->ModemStatus) & RIOC_MSVR1_HOST); |
| 441 | break; | 441 | break; |
| 442 | default: | 442 | default: |
| 443 | subCommand = 0; | 443 | subCommand = 0; |
| @@ -456,14 +456,15 @@ static int RIOCommandRup(struct rio_info *p, uint Rup, struct Host *HostP, struc | |||
| 456 | rio_dprintk(RIO_DEBUG_CMD, "No change\n"); | 456 | rio_dprintk(RIO_DEBUG_CMD, "No change\n"); |
| 457 | 457 | ||
| 458 | /* FALLTHROUGH */ | 458 | /* FALLTHROUGH */ |
| 459 | case MODEM_STATUS: | 459 | case RIOC_MODEM_STATUS: |
| 460 | /* | 460 | /* |
| 461 | ** Knock out the tbusy and tstop bits, as these are not relevant | 461 | ** Knock out the tbusy and tstop bits, as these are not relevant |
| 462 | ** to the check for modem status change (they're just there because | 462 | ** to the check for modem status change (they're just there because |
| 463 | ** it's a convenient place to put them!). | 463 | ** it's a convenient place to put them!). |
| 464 | */ | 464 | */ |
| 465 | ReportedModemStatus = readb(&PktCmdP->ModemStatus); | 465 | ReportedModemStatus = readb(&PktCmdP->ModemStatus); |
| 466 | if ((PortP->ModemState & MSVR1_HOST) == (ReportedModemStatus & MSVR1_HOST)) { | 466 | if ((PortP->ModemState & RIOC_MSVR1_HOST) == |
| 467 | (ReportedModemStatus & RIOC_MSVR1_HOST)) { | ||
| 467 | rio_dprintk(RIO_DEBUG_CMD, "Modem status unchanged 0x%x\n", PortP->ModemState); | 468 | rio_dprintk(RIO_DEBUG_CMD, "Modem status unchanged 0x%x\n", PortP->ModemState); |
| 468 | /* | 469 | /* |
| 469 | ** Update ModemState just in case tbusy or tstop states have | 470 | ** Update ModemState just in case tbusy or tstop states have |
| @@ -497,7 +498,7 @@ static int RIOCommandRup(struct rio_info *p, uint Rup, struct Host *HostP, struc | |||
| 497 | /* | 498 | /* |
| 498 | ** Is there a carrier? | 499 | ** Is there a carrier? |
| 499 | */ | 500 | */ |
| 500 | if (PortP->ModemState & MSVR1_CD) { | 501 | if (PortP->ModemState & RIOC_MSVR1_CD) { |
| 501 | /* | 502 | /* |
| 502 | ** Has carrier just appeared? | 503 | ** Has carrier just appeared? |
| 503 | */ | 504 | */ |
| @@ -691,7 +692,7 @@ void RIOPollHostCommands(struct rio_info *p, struct Host *HostP) | |||
| 691 | */ | 692 | */ |
| 692 | rio_spin_unlock_irqrestore(&UnixRupP->RupLock, flags); | 693 | rio_spin_unlock_irqrestore(&UnixRupP->RupLock, flags); |
| 693 | FreeMe = RIOCommandRup(p, Rup, HostP, PacketP); | 694 | FreeMe = RIOCommandRup(p, Rup, HostP, PacketP); |
| 694 | if (readb(&PacketP->data[5]) == MEMDUMP) { | 695 | if (readb(&PacketP->data[5]) == RIOC_MEMDUMP) { |
| 695 | rio_dprintk(RIO_DEBUG_CMD, "Memdump from 0x%x complete\n", readw(&(PacketP->data[6]))); | 696 | rio_dprintk(RIO_DEBUG_CMD, "Memdump from 0x%x complete\n", readw(&(PacketP->data[6]))); |
| 696 | rio_memcpy_fromio(p->RIOMemDump, &(PacketP->data[8]), 32); | 697 | rio_memcpy_fromio(p->RIOMemDump, &(PacketP->data[8]), 32); |
| 697 | } | 698 | } |
diff --git a/drivers/char/rio/rioctrl.c b/drivers/char/rio/rioctrl.c index d8eb2bcbe015..d65ceb9a434a 100644 --- a/drivers/char/rio/rioctrl.c +++ b/drivers/char/rio/rioctrl.c | |||
| @@ -422,7 +422,8 @@ int riocontrol(struct rio_info *p, dev_t dev, int cmd, unsigned long arg, int su | |||
| 422 | } | 422 | } |
| 423 | 423 | ||
| 424 | rio_spin_lock_irqsave(&PortP->portSem, flags); | 424 | rio_spin_lock_irqsave(&PortP->portSem, flags); |
| 425 | if (RIOPreemptiveCmd(p, (p->RIOPortp[port]), RESUME) == RIO_FAIL) { | 425 | if (RIOPreemptiveCmd(p, (p->RIOPortp[port]), RIOC_RESUME) == |
| 426 | RIO_FAIL) { | ||
| 426 | rio_dprintk(RIO_DEBUG_CTRL, "RIO_RESUME failed\n"); | 427 | rio_dprintk(RIO_DEBUG_CTRL, "RIO_RESUME failed\n"); |
| 427 | rio_spin_unlock_irqrestore(&PortP->portSem, flags); | 428 | rio_spin_unlock_irqrestore(&PortP->portSem, flags); |
| 428 | return -EBUSY; | 429 | return -EBUSY; |
| @@ -636,7 +637,8 @@ int riocontrol(struct rio_info *p, dev_t dev, int cmd, unsigned long arg, int su | |||
| 636 | return -ENXIO; | 637 | return -ENXIO; |
| 637 | } | 638 | } |
| 638 | PortP = (p->RIOPortp[PortTty.port]); | 639 | PortP = (p->RIOPortp[PortTty.port]); |
| 639 | RIOParam(PortP, CONFIG, PortP->State & RIO_MODEM, OK_TO_SLEEP); | 640 | RIOParam(PortP, RIOC_CONFIG, PortP->State & RIO_MODEM, |
| 641 | OK_TO_SLEEP); | ||
| 640 | return retval; | 642 | return retval; |
| 641 | 643 | ||
| 642 | case RIO_SET_PORT_PARAMS: | 644 | case RIO_SET_PORT_PARAMS: |
| @@ -1247,7 +1249,7 @@ int riocontrol(struct rio_info *p, dev_t dev, int cmd, unsigned long arg, int su | |||
| 1247 | 1249 | ||
| 1248 | rio_spin_lock_irqsave(&PortP->portSem, flags); | 1250 | rio_spin_lock_irqsave(&PortP->portSem, flags); |
| 1249 | 1251 | ||
| 1250 | if (RIOPreemptiveCmd(p, PortP, MEMDUMP) == RIO_FAIL) { | 1252 | if (RIOPreemptiveCmd(p, PortP, RIOC_MEMDUMP) == RIO_FAIL) { |
| 1251 | rio_dprintk(RIO_DEBUG_CTRL, "RIO_MEM_DUMP failed\n"); | 1253 | rio_dprintk(RIO_DEBUG_CTRL, "RIO_MEM_DUMP failed\n"); |
| 1252 | rio_spin_unlock_irqrestore(&PortP->portSem, flags); | 1254 | rio_spin_unlock_irqrestore(&PortP->portSem, flags); |
| 1253 | return -EBUSY; | 1255 | return -EBUSY; |
| @@ -1313,7 +1315,8 @@ int riocontrol(struct rio_info *p, dev_t dev, int cmd, unsigned long arg, int su | |||
| 1313 | 1315 | ||
| 1314 | rio_spin_lock_irqsave(&PortP->portSem, flags); | 1316 | rio_spin_lock_irqsave(&PortP->portSem, flags); |
| 1315 | 1317 | ||
| 1316 | if (RIOPreemptiveCmd(p, PortP, READ_REGISTER) == RIO_FAIL) { | 1318 | if (RIOPreemptiveCmd(p, PortP, RIOC_READ_REGISTER) == |
| 1319 | RIO_FAIL) { | ||
| 1317 | rio_dprintk(RIO_DEBUG_CTRL, "RIO_READ_REGISTER failed\n"); | 1320 | rio_dprintk(RIO_DEBUG_CTRL, "RIO_READ_REGISTER failed\n"); |
| 1318 | rio_spin_unlock_irqrestore(&PortP->portSem, flags); | 1321 | rio_spin_unlock_irqrestore(&PortP->portSem, flags); |
| 1319 | return -EBUSY; | 1322 | return -EBUSY; |
| @@ -1434,50 +1437,50 @@ int RIOPreemptiveCmd(struct rio_info *p, struct Port *PortP, u8 Cmd) | |||
| 1434 | PktCmdP->PhbNum = port; | 1437 | PktCmdP->PhbNum = port; |
| 1435 | 1438 | ||
| 1436 | switch (Cmd) { | 1439 | switch (Cmd) { |
| 1437 | case MEMDUMP: | 1440 | case RIOC_MEMDUMP: |
| 1438 | rio_dprintk(RIO_DEBUG_CTRL, "Queue MEMDUMP command blk %p " | 1441 | rio_dprintk(RIO_DEBUG_CTRL, "Queue MEMDUMP command blk %p " |
| 1439 | "(addr 0x%x)\n", CmdBlkP, (int) SubCmd.Addr); | 1442 | "(addr 0x%x)\n", CmdBlkP, (int) SubCmd.Addr); |
| 1440 | PktCmdP->SubCommand = MEMDUMP; | 1443 | PktCmdP->SubCommand = RIOC_MEMDUMP; |
| 1441 | PktCmdP->SubAddr = SubCmd.Addr; | 1444 | PktCmdP->SubAddr = SubCmd.Addr; |
| 1442 | break; | 1445 | break; |
| 1443 | case FCLOSE: | 1446 | case RIOC_FCLOSE: |
| 1444 | rio_dprintk(RIO_DEBUG_CTRL, "Queue FCLOSE command blk %p\n", | 1447 | rio_dprintk(RIO_DEBUG_CTRL, "Queue FCLOSE command blk %p\n", |
| 1445 | CmdBlkP); | 1448 | CmdBlkP); |
| 1446 | break; | 1449 | break; |
| 1447 | case READ_REGISTER: | 1450 | case RIOC_READ_REGISTER: |
| 1448 | rio_dprintk(RIO_DEBUG_CTRL, "Queue READ_REGISTER (0x%x) " | 1451 | rio_dprintk(RIO_DEBUG_CTRL, "Queue READ_REGISTER (0x%x) " |
| 1449 | "command blk %p\n", (int) SubCmd.Addr, CmdBlkP); | 1452 | "command blk %p\n", (int) SubCmd.Addr, CmdBlkP); |
| 1450 | PktCmdP->SubCommand = READ_REGISTER; | 1453 | PktCmdP->SubCommand = RIOC_READ_REGISTER; |
| 1451 | PktCmdP->SubAddr = SubCmd.Addr; | 1454 | PktCmdP->SubAddr = SubCmd.Addr; |
| 1452 | break; | 1455 | break; |
| 1453 | case RESUME: | 1456 | case RIOC_RESUME: |
| 1454 | rio_dprintk(RIO_DEBUG_CTRL, "Queue RESUME command blk %p\n", | 1457 | rio_dprintk(RIO_DEBUG_CTRL, "Queue RESUME command blk %p\n", |
| 1455 | CmdBlkP); | 1458 | CmdBlkP); |
| 1456 | break; | 1459 | break; |
| 1457 | case RFLUSH: | 1460 | case RIOC_RFLUSH: |
| 1458 | rio_dprintk(RIO_DEBUG_CTRL, "Queue RFLUSH command blk %p\n", | 1461 | rio_dprintk(RIO_DEBUG_CTRL, "Queue RFLUSH command blk %p\n", |
| 1459 | CmdBlkP); | 1462 | CmdBlkP); |
| 1460 | CmdBlkP->PostFuncP = RIORFlushEnable; | 1463 | CmdBlkP->PostFuncP = RIORFlushEnable; |
| 1461 | break; | 1464 | break; |
| 1462 | case SUSPEND: | 1465 | case RIOC_SUSPEND: |
| 1463 | rio_dprintk(RIO_DEBUG_CTRL, "Queue SUSPEND command blk %p\n", | 1466 | rio_dprintk(RIO_DEBUG_CTRL, "Queue SUSPEND command blk %p\n", |
| 1464 | CmdBlkP); | 1467 | CmdBlkP); |
| 1465 | break; | 1468 | break; |
| 1466 | 1469 | ||
| 1467 | case MGET: | 1470 | case RIOC_MGET: |
| 1468 | rio_dprintk(RIO_DEBUG_CTRL, "Queue MGET command blk %p\n", | 1471 | rio_dprintk(RIO_DEBUG_CTRL, "Queue MGET command blk %p\n", |
| 1469 | CmdBlkP); | 1472 | CmdBlkP); |
| 1470 | break; | 1473 | break; |
| 1471 | 1474 | ||
| 1472 | case MSET: | 1475 | case RIOC_MSET: |
| 1473 | case MBIC: | 1476 | case RIOC_MBIC: |
| 1474 | case MBIS: | 1477 | case RIOC_MBIS: |
| 1475 | CmdBlkP->Packet.data[4] = (char) PortP->ModemLines; | 1478 | CmdBlkP->Packet.data[4] = (char) PortP->ModemLines; |
| 1476 | rio_dprintk(RIO_DEBUG_CTRL, "Queue MSET/MBIC/MBIS command " | 1479 | rio_dprintk(RIO_DEBUG_CTRL, "Queue MSET/MBIC/MBIS command " |
| 1477 | "blk %p\n", CmdBlkP); | 1480 | "blk %p\n", CmdBlkP); |
| 1478 | break; | 1481 | break; |
| 1479 | 1482 | ||
| 1480 | case WFLUSH: | 1483 | case RIOC_WFLUSH: |
| 1481 | /* | 1484 | /* |
| 1482 | ** If we have queued up the maximum number of Write flushes | 1485 | ** If we have queued up the maximum number of Write flushes |
| 1483 | ** allowed then we should not bother sending any more to the | 1486 | ** allowed then we should not bother sending any more to the |
diff --git a/drivers/char/rio/riointr.c b/drivers/char/rio/riointr.c index 4734e26e1ccd..ea21686c69a4 100644 --- a/drivers/char/rio/riointr.c +++ b/drivers/char/rio/riointr.c | |||
| @@ -401,9 +401,8 @@ void RIOServiceHost(struct rio_info *p, struct Host *HostP) | |||
| 401 | PortP->InUse = NOT_INUSE; | 401 | PortP->InUse = NOT_INUSE; |
| 402 | 402 | ||
| 403 | rio_spin_unlock(&PortP->portSem); | 403 | rio_spin_unlock(&PortP->portSem); |
| 404 | if (RIOParam(PortP, OPEN, ((PortP->Cor2Copy & (COR2_RTSFLOW | COR2_CTSFLOW)) == (COR2_RTSFLOW | COR2_CTSFLOW)) ? 1 : 0, DONT_SLEEP) == RIO_FAIL) { | 404 | if (RIOParam(PortP, RIOC_OPEN, ((PortP->Cor2Copy & (RIOC_COR2_RTSFLOW | RIOC_COR2_CTSFLOW)) == (RIOC_COR2_RTSFLOW | RIOC_COR2_CTSFLOW)) ? 1 : 0, DONT_SLEEP) == RIO_FAIL) |
| 405 | continue; /* with next port */ | 405 | continue; /* with next port */ |
| 406 | } | ||
| 407 | rio_spin_lock(&PortP->portSem); | 406 | rio_spin_lock(&PortP->portSem); |
| 408 | PortP->MagicFlags &= ~MAGIC_REBOOT; | 407 | PortP->MagicFlags &= ~MAGIC_REBOOT; |
| 409 | } | 408 | } |
| @@ -429,7 +428,7 @@ void RIOServiceHost(struct rio_info *p, struct Host *HostP) | |||
| 429 | */ | 428 | */ |
| 430 | PktCmdP = (struct PktCmd __iomem *) &PacketP->data[0]; | 429 | PktCmdP = (struct PktCmd __iomem *) &PacketP->data[0]; |
| 431 | 430 | ||
| 432 | writeb(WFLUSH, &PktCmdP->Command); | 431 | writeb(RIOC_WFLUSH, &PktCmdP->Command); |
| 433 | 432 | ||
| 434 | p = PortP->HostPort % (u16) PORTS_PER_RTA; | 433 | p = PortP->HostPort % (u16) PORTS_PER_RTA; |
| 435 | 434 | ||
diff --git a/drivers/char/rio/rioparam.c b/drivers/char/rio/rioparam.c index da276ed57b3f..4810b845cc21 100644 --- a/drivers/char/rio/rioparam.c +++ b/drivers/char/rio/rioparam.c | |||
| @@ -177,7 +177,7 @@ int RIOParam(struct Port *PortP, int cmd, int Modem, int SleepFlag) | |||
| 177 | } | 177 | } |
| 178 | rio_spin_lock_irqsave(&PortP->portSem, flags); | 178 | rio_spin_lock_irqsave(&PortP->portSem, flags); |
| 179 | 179 | ||
| 180 | if (cmd == OPEN) { | 180 | if (cmd == RIOC_OPEN) { |
| 181 | /* | 181 | /* |
| 182 | ** If the port is set to store or lock the parameters, and it is | 182 | ** If the port is set to store or lock the parameters, and it is |
| 183 | ** paramed with OPEN, we want to restore the saved port termio, but | 183 | ** paramed with OPEN, we want to restore the saved port termio, but |
| @@ -241,50 +241,50 @@ int RIOParam(struct Port *PortP, int cmd, int Modem, int SleepFlag) | |||
| 241 | case CS5: | 241 | case CS5: |
| 242 | { | 242 | { |
| 243 | rio_dprintk(RIO_DEBUG_PARAM, "5 bit data\n"); | 243 | rio_dprintk(RIO_DEBUG_PARAM, "5 bit data\n"); |
| 244 | Cor1 |= COR1_5BITS; | 244 | Cor1 |= RIOC_COR1_5BITS; |
| 245 | break; | 245 | break; |
| 246 | } | 246 | } |
| 247 | case CS6: | 247 | case CS6: |
| 248 | { | 248 | { |
| 249 | rio_dprintk(RIO_DEBUG_PARAM, "6 bit data\n"); | 249 | rio_dprintk(RIO_DEBUG_PARAM, "6 bit data\n"); |
| 250 | Cor1 |= COR1_6BITS; | 250 | Cor1 |= RIOC_COR1_6BITS; |
| 251 | break; | 251 | break; |
| 252 | } | 252 | } |
| 253 | case CS7: | 253 | case CS7: |
| 254 | { | 254 | { |
| 255 | rio_dprintk(RIO_DEBUG_PARAM, "7 bit data\n"); | 255 | rio_dprintk(RIO_DEBUG_PARAM, "7 bit data\n"); |
| 256 | Cor1 |= COR1_7BITS; | 256 | Cor1 |= RIOC_COR1_7BITS; |
| 257 | break; | 257 | break; |
| 258 | } | 258 | } |
| 259 | case CS8: | 259 | case CS8: |
| 260 | { | 260 | { |
| 261 | rio_dprintk(RIO_DEBUG_PARAM, "8 bit data\n"); | 261 | rio_dprintk(RIO_DEBUG_PARAM, "8 bit data\n"); |
| 262 | Cor1 |= COR1_8BITS; | 262 | Cor1 |= RIOC_COR1_8BITS; |
| 263 | break; | 263 | break; |
| 264 | } | 264 | } |
| 265 | } | 265 | } |
| 266 | 266 | ||
| 267 | if (TtyP->termios->c_cflag & CSTOPB) { | 267 | if (TtyP->termios->c_cflag & CSTOPB) { |
| 268 | rio_dprintk(RIO_DEBUG_PARAM, "2 stop bits\n"); | 268 | rio_dprintk(RIO_DEBUG_PARAM, "2 stop bits\n"); |
| 269 | Cor1 |= COR1_2STOP; | 269 | Cor1 |= RIOC_COR1_2STOP; |
| 270 | } else { | 270 | } else { |
| 271 | rio_dprintk(RIO_DEBUG_PARAM, "1 stop bit\n"); | 271 | rio_dprintk(RIO_DEBUG_PARAM, "1 stop bit\n"); |
| 272 | Cor1 |= COR1_1STOP; | 272 | Cor1 |= RIOC_COR1_1STOP; |
| 273 | } | 273 | } |
| 274 | 274 | ||
| 275 | if (TtyP->termios->c_cflag & PARENB) { | 275 | if (TtyP->termios->c_cflag & PARENB) { |
| 276 | rio_dprintk(RIO_DEBUG_PARAM, "Enable parity\n"); | 276 | rio_dprintk(RIO_DEBUG_PARAM, "Enable parity\n"); |
| 277 | Cor1 |= COR1_NORMAL; | 277 | Cor1 |= RIOC_COR1_NORMAL; |
| 278 | } else { | 278 | } else { |
| 279 | rio_dprintk(RIO_DEBUG_PARAM, "Disable parity\n"); | 279 | rio_dprintk(RIO_DEBUG_PARAM, "Disable parity\n"); |
| 280 | Cor1 |= COR1_NOP; | 280 | Cor1 |= RIOC_COR1_NOP; |
| 281 | } | 281 | } |
| 282 | if (TtyP->termios->c_cflag & PARODD) { | 282 | if (TtyP->termios->c_cflag & PARODD) { |
| 283 | rio_dprintk(RIO_DEBUG_PARAM, "Odd parity\n"); | 283 | rio_dprintk(RIO_DEBUG_PARAM, "Odd parity\n"); |
| 284 | Cor1 |= COR1_ODD; | 284 | Cor1 |= RIOC_COR1_ODD; |
| 285 | } else { | 285 | } else { |
| 286 | rio_dprintk(RIO_DEBUG_PARAM, "Even parity\n"); | 286 | rio_dprintk(RIO_DEBUG_PARAM, "Even parity\n"); |
| 287 | Cor1 |= COR1_EVEN; | 287 | Cor1 |= RIOC_COR1_EVEN; |
| 288 | } | 288 | } |
| 289 | 289 | ||
| 290 | /* | 290 | /* |
| @@ -292,11 +292,11 @@ int RIOParam(struct Port *PortP, int cmd, int Modem, int SleepFlag) | |||
| 292 | */ | 292 | */ |
| 293 | if (TtyP->termios->c_iflag & IXON) { | 293 | if (TtyP->termios->c_iflag & IXON) { |
| 294 | rio_dprintk(RIO_DEBUG_PARAM, "Enable start/stop output control\n"); | 294 | rio_dprintk(RIO_DEBUG_PARAM, "Enable start/stop output control\n"); |
| 295 | Cor2 |= COR2_IXON; | 295 | Cor2 |= RIOC_COR2_IXON; |
| 296 | } else { | 296 | } else { |
| 297 | if (PortP->Config & RIO_IXON) { | 297 | if (PortP->Config & RIO_IXON) { |
| 298 | rio_dprintk(RIO_DEBUG_PARAM, "Force enable start/stop output control\n"); | 298 | rio_dprintk(RIO_DEBUG_PARAM, "Force enable start/stop output control\n"); |
| 299 | Cor2 |= COR2_IXON; | 299 | Cor2 |= RIOC_COR2_IXON; |
| 300 | } else | 300 | } else |
| 301 | rio_dprintk(RIO_DEBUG_PARAM, "IXON has been disabled.\n"); | 301 | rio_dprintk(RIO_DEBUG_PARAM, "IXON has been disabled.\n"); |
| 302 | } | 302 | } |
| @@ -304,29 +304,29 @@ int RIOParam(struct Port *PortP, int cmd, int Modem, int SleepFlag) | |||
| 304 | if (TtyP->termios->c_iflag & IXANY) { | 304 | if (TtyP->termios->c_iflag & IXANY) { |
| 305 | if (PortP->Config & RIO_IXANY) { | 305 | if (PortP->Config & RIO_IXANY) { |
| 306 | rio_dprintk(RIO_DEBUG_PARAM, "Enable any key to restart output\n"); | 306 | rio_dprintk(RIO_DEBUG_PARAM, "Enable any key to restart output\n"); |
| 307 | Cor2 |= COR2_IXANY; | 307 | Cor2 |= RIOC_COR2_IXANY; |
| 308 | } else | 308 | } else |
| 309 | rio_dprintk(RIO_DEBUG_PARAM, "IXANY has been disabled due to sanity reasons.\n"); | 309 | rio_dprintk(RIO_DEBUG_PARAM, "IXANY has been disabled due to sanity reasons.\n"); |
| 310 | } | 310 | } |
| 311 | 311 | ||
| 312 | if (TtyP->termios->c_iflag & IXOFF) { | 312 | if (TtyP->termios->c_iflag & IXOFF) { |
| 313 | rio_dprintk(RIO_DEBUG_PARAM, "Enable start/stop input control 2\n"); | 313 | rio_dprintk(RIO_DEBUG_PARAM, "Enable start/stop input control 2\n"); |
| 314 | Cor2 |= COR2_IXOFF; | 314 | Cor2 |= RIOC_COR2_IXOFF; |
| 315 | } | 315 | } |
| 316 | 316 | ||
| 317 | if (TtyP->termios->c_cflag & HUPCL) { | 317 | if (TtyP->termios->c_cflag & HUPCL) { |
| 318 | rio_dprintk(RIO_DEBUG_PARAM, "Hangup on last close\n"); | 318 | rio_dprintk(RIO_DEBUG_PARAM, "Hangup on last close\n"); |
| 319 | Cor2 |= COR2_HUPCL; | 319 | Cor2 |= RIOC_COR2_HUPCL; |
| 320 | } | 320 | } |
| 321 | 321 | ||
| 322 | if (C_CRTSCTS(TtyP)) { | 322 | if (C_CRTSCTS(TtyP)) { |
| 323 | rio_dprintk(RIO_DEBUG_PARAM, "Rx hardware flow control enabled\n"); | 323 | rio_dprintk(RIO_DEBUG_PARAM, "Rx hardware flow control enabled\n"); |
| 324 | Cor2 |= COR2_CTSFLOW; | 324 | Cor2 |= RIOC_COR2_CTSFLOW; |
| 325 | Cor2 |= COR2_RTSFLOW; | 325 | Cor2 |= RIOC_COR2_RTSFLOW; |
| 326 | } else { | 326 | } else { |
| 327 | rio_dprintk(RIO_DEBUG_PARAM, "Rx hardware flow control disabled\n"); | 327 | rio_dprintk(RIO_DEBUG_PARAM, "Rx hardware flow control disabled\n"); |
| 328 | Cor2 &= ~COR2_CTSFLOW; | 328 | Cor2 &= ~RIOC_COR2_CTSFLOW; |
| 329 | Cor2 &= ~COR2_RTSFLOW; | 329 | Cor2 &= ~RIOC_COR2_RTSFLOW; |
| 330 | } | 330 | } |
| 331 | 331 | ||
| 332 | 332 | ||
| @@ -341,36 +341,36 @@ int RIOParam(struct Port *PortP, int cmd, int Modem, int SleepFlag) | |||
| 341 | */ | 341 | */ |
| 342 | if (TtyP->termios->c_iflag & IGNBRK) { | 342 | if (TtyP->termios->c_iflag & IGNBRK) { |
| 343 | rio_dprintk(RIO_DEBUG_PARAM, "Ignore break condition\n"); | 343 | rio_dprintk(RIO_DEBUG_PARAM, "Ignore break condition\n"); |
| 344 | Cor4 |= COR4_IGNBRK; | 344 | Cor4 |= RIOC_COR4_IGNBRK; |
| 345 | } | 345 | } |
| 346 | if (!(TtyP->termios->c_iflag & BRKINT)) { | 346 | if (!(TtyP->termios->c_iflag & BRKINT)) { |
| 347 | rio_dprintk(RIO_DEBUG_PARAM, "Break generates NULL condition\n"); | 347 | rio_dprintk(RIO_DEBUG_PARAM, "Break generates NULL condition\n"); |
| 348 | Cor4 |= COR4_NBRKINT; | 348 | Cor4 |= RIOC_COR4_NBRKINT; |
| 349 | } else { | 349 | } else { |
| 350 | rio_dprintk(RIO_DEBUG_PARAM, "Interrupt on break condition\n"); | 350 | rio_dprintk(RIO_DEBUG_PARAM, "Interrupt on break condition\n"); |
| 351 | } | 351 | } |
| 352 | 352 | ||
| 353 | if (TtyP->termios->c_iflag & INLCR) { | 353 | if (TtyP->termios->c_iflag & INLCR) { |
| 354 | rio_dprintk(RIO_DEBUG_PARAM, "Map newline to carriage return on input\n"); | 354 | rio_dprintk(RIO_DEBUG_PARAM, "Map newline to carriage return on input\n"); |
| 355 | Cor4 |= COR4_INLCR; | 355 | Cor4 |= RIOC_COR4_INLCR; |
| 356 | } | 356 | } |
| 357 | 357 | ||
| 358 | if (TtyP->termios->c_iflag & IGNCR) { | 358 | if (TtyP->termios->c_iflag & IGNCR) { |
| 359 | rio_dprintk(RIO_DEBUG_PARAM, "Ignore carriage return on input\n"); | 359 | rio_dprintk(RIO_DEBUG_PARAM, "Ignore carriage return on input\n"); |
| 360 | Cor4 |= COR4_IGNCR; | 360 | Cor4 |= RIOC_COR4_IGNCR; |
| 361 | } | 361 | } |
| 362 | 362 | ||
| 363 | if (TtyP->termios->c_iflag & ICRNL) { | 363 | if (TtyP->termios->c_iflag & ICRNL) { |
| 364 | rio_dprintk(RIO_DEBUG_PARAM, "Map carriage return to newline on input\n"); | 364 | rio_dprintk(RIO_DEBUG_PARAM, "Map carriage return to newline on input\n"); |
| 365 | Cor4 |= COR4_ICRNL; | 365 | Cor4 |= RIOC_COR4_ICRNL; |
| 366 | } | 366 | } |
| 367 | if (TtyP->termios->c_iflag & IGNPAR) { | 367 | if (TtyP->termios->c_iflag & IGNPAR) { |
| 368 | rio_dprintk(RIO_DEBUG_PARAM, "Ignore characters with parity errors\n"); | 368 | rio_dprintk(RIO_DEBUG_PARAM, "Ignore characters with parity errors\n"); |
| 369 | Cor4 |= COR4_IGNPAR; | 369 | Cor4 |= RIOC_COR4_IGNPAR; |
| 370 | } | 370 | } |
| 371 | if (TtyP->termios->c_iflag & PARMRK) { | 371 | if (TtyP->termios->c_iflag & PARMRK) { |
| 372 | rio_dprintk(RIO_DEBUG_PARAM, "Mark parity errors\n"); | 372 | rio_dprintk(RIO_DEBUG_PARAM, "Mark parity errors\n"); |
| 373 | Cor4 |= COR4_PARMRK; | 373 | Cor4 |= RIOC_COR4_PARMRK; |
| 374 | } | 374 | } |
| 375 | 375 | ||
| 376 | /* | 376 | /* |
| @@ -378,22 +378,22 @@ int RIOParam(struct Port *PortP, int cmd, int Modem, int SleepFlag) | |||
| 378 | ** on reception of a config packet. | 378 | ** on reception of a config packet. |
| 379 | ** The download code handles the zero baud condition. | 379 | ** The download code handles the zero baud condition. |
| 380 | */ | 380 | */ |
| 381 | Cor4 |= COR4_RAISEMOD; | 381 | Cor4 |= RIOC_COR4_RAISEMOD; |
| 382 | 382 | ||
| 383 | /* | 383 | /* |
| 384 | ** COR 5 | 384 | ** COR 5 |
| 385 | */ | 385 | */ |
| 386 | 386 | ||
| 387 | Cor5 = COR5_CMOE; | 387 | Cor5 = RIOC_COR5_CMOE; |
| 388 | 388 | ||
| 389 | /* | 389 | /* |
| 390 | ** Set to monitor tbusy/tstop (or not). | 390 | ** Set to monitor tbusy/tstop (or not). |
| 391 | */ | 391 | */ |
| 392 | 392 | ||
| 393 | if (PortP->MonitorTstate) | 393 | if (PortP->MonitorTstate) |
| 394 | Cor5 |= COR5_TSTATE_ON; | 394 | Cor5 |= RIOC_COR5_TSTATE_ON; |
| 395 | else | 395 | else |
| 396 | Cor5 |= COR5_TSTATE_OFF; | 396 | Cor5 |= RIOC_COR5_TSTATE_OFF; |
| 397 | 397 | ||
| 398 | /* | 398 | /* |
| 399 | ** Could set LNE here if you wanted LNext processing. SVR4 will use it. | 399 | ** Could set LNE here if you wanted LNext processing. SVR4 will use it. |
| @@ -401,24 +401,24 @@ int RIOParam(struct Port *PortP, int cmd, int Modem, int SleepFlag) | |||
| 401 | if (TtyP->termios->c_iflag & ISTRIP) { | 401 | if (TtyP->termios->c_iflag & ISTRIP) { |
| 402 | rio_dprintk(RIO_DEBUG_PARAM, "Strip input characters\n"); | 402 | rio_dprintk(RIO_DEBUG_PARAM, "Strip input characters\n"); |
| 403 | if (!(PortP->State & RIO_TRIAD_MODE)) { | 403 | if (!(PortP->State & RIO_TRIAD_MODE)) { |
| 404 | Cor5 |= COR5_ISTRIP; | 404 | Cor5 |= RIOC_COR5_ISTRIP; |
| 405 | } | 405 | } |
| 406 | } | 406 | } |
| 407 | 407 | ||
| 408 | if (TtyP->termios->c_oflag & ONLCR) { | 408 | if (TtyP->termios->c_oflag & ONLCR) { |
| 409 | rio_dprintk(RIO_DEBUG_PARAM, "Map newline to carriage-return, newline on output\n"); | 409 | rio_dprintk(RIO_DEBUG_PARAM, "Map newline to carriage-return, newline on output\n"); |
| 410 | if (PortP->CookMode == COOK_MEDIUM) | 410 | if (PortP->CookMode == COOK_MEDIUM) |
| 411 | Cor5 |= COR5_ONLCR; | 411 | Cor5 |= RIOC_COR5_ONLCR; |
| 412 | } | 412 | } |
| 413 | if (TtyP->termios->c_oflag & OCRNL) { | 413 | if (TtyP->termios->c_oflag & OCRNL) { |
| 414 | rio_dprintk(RIO_DEBUG_PARAM, "Map carriage return to newline on output\n"); | 414 | rio_dprintk(RIO_DEBUG_PARAM, "Map carriage return to newline on output\n"); |
| 415 | if (PortP->CookMode == COOK_MEDIUM) | 415 | if (PortP->CookMode == COOK_MEDIUM) |
| 416 | Cor5 |= COR5_OCRNL; | 416 | Cor5 |= RIOC_COR5_OCRNL; |
| 417 | } | 417 | } |
| 418 | if ((TtyP->termios->c_oflag & TABDLY) == TAB3) { | 418 | if ((TtyP->termios->c_oflag & TABDLY) == TAB3) { |
| 419 | rio_dprintk(RIO_DEBUG_PARAM, "Tab delay 3 set\n"); | 419 | rio_dprintk(RIO_DEBUG_PARAM, "Tab delay 3 set\n"); |
| 420 | if (PortP->CookMode == COOK_MEDIUM) | 420 | if (PortP->CookMode == COOK_MEDIUM) |
| 421 | Cor5 |= COR5_TAB3; | 421 | Cor5 |= RIOC_COR5_TAB3; |
| 422 | } | 422 | } |
| 423 | 423 | ||
| 424 | /* | 424 | /* |
diff --git a/drivers/char/rio/riotty.c b/drivers/char/rio/riotty.c index 1cb8580a161d..c99354843be1 100644 --- a/drivers/char/rio/riotty.c +++ b/drivers/char/rio/riotty.c | |||
| @@ -211,7 +211,7 @@ int riotopen(struct tty_struct *tty, struct file *filp) | |||
| 211 | rio_dprintk(RIO_DEBUG_TTY, "Waiting for RIO_CLOSING to go away\n"); | 211 | rio_dprintk(RIO_DEBUG_TTY, "Waiting for RIO_CLOSING to go away\n"); |
| 212 | if (repeat_this-- <= 0) { | 212 | if (repeat_this-- <= 0) { |
| 213 | rio_dprintk(RIO_DEBUG_TTY, "Waiting for not idle closed broken by signal\n"); | 213 | rio_dprintk(RIO_DEBUG_TTY, "Waiting for not idle closed broken by signal\n"); |
| 214 | RIOPreemptiveCmd(p, PortP, FCLOSE); | 214 | RIOPreemptiveCmd(p, PortP, RIOC_FCLOSE); |
| 215 | retval = -EINTR; | 215 | retval = -EINTR; |
| 216 | goto bombout; | 216 | goto bombout; |
| 217 | } | 217 | } |
| @@ -264,7 +264,7 @@ int riotopen(struct tty_struct *tty, struct file *filp) | |||
| 264 | here. If I read the docs correctly the "open" | 264 | here. If I read the docs correctly the "open" |
| 265 | command piggybacks the parameters immediately. | 265 | command piggybacks the parameters immediately. |
| 266 | -- REW */ | 266 | -- REW */ |
| 267 | RIOParam(PortP, OPEN, 1, OK_TO_SLEEP); /* Open the port */ | 267 | RIOParam(PortP, RIOC_OPEN, 1, OK_TO_SLEEP); /* Open the port */ |
| 268 | rio_spin_lock_irqsave(&PortP->portSem, flags); | 268 | rio_spin_lock_irqsave(&PortP->portSem, flags); |
| 269 | 269 | ||
| 270 | /* | 270 | /* |
| @@ -275,7 +275,7 @@ int riotopen(struct tty_struct *tty, struct file *filp) | |||
| 275 | rio_spin_unlock_irqrestore(&PortP->portSem, flags); | 275 | rio_spin_unlock_irqrestore(&PortP->portSem, flags); |
| 276 | if (RIODelay(PortP, HUNDRED_MS) == RIO_FAIL) { | 276 | if (RIODelay(PortP, HUNDRED_MS) == RIO_FAIL) { |
| 277 | rio_dprintk(RIO_DEBUG_TTY, "Waiting for open to finish broken by signal\n"); | 277 | rio_dprintk(RIO_DEBUG_TTY, "Waiting for open to finish broken by signal\n"); |
| 278 | RIOPreemptiveCmd(p, PortP, FCLOSE); | 278 | RIOPreemptiveCmd(p, PortP, RIOC_FCLOSE); |
| 279 | func_exit(); | 279 | func_exit(); |
| 280 | return -EINTR; | 280 | return -EINTR; |
| 281 | } | 281 | } |
| @@ -297,7 +297,8 @@ int riotopen(struct tty_struct *tty, struct file *filp) | |||
| 297 | ** insert test for carrier here. -- ??? | 297 | ** insert test for carrier here. -- ??? |
| 298 | ** I already see that test here. What's the deal? -- REW | 298 | ** I already see that test here. What's the deal? -- REW |
| 299 | */ | 299 | */ |
| 300 | if ((PortP->gs.tty->termios->c_cflag & CLOCAL) || (PortP->ModemState & MSVR1_CD)) { | 300 | if ((PortP->gs.tty->termios->c_cflag & CLOCAL) || |
| 301 | (PortP->ModemState & RIOC_MSVR1_CD)) { | ||
| 301 | rio_dprintk(RIO_DEBUG_TTY, "open(%d) Modem carr on\n", SysPort); | 302 | rio_dprintk(RIO_DEBUG_TTY, "open(%d) Modem carr on\n", SysPort); |
| 302 | /* | 303 | /* |
| 303 | tp->tm.c_state |= CARR_ON; | 304 | tp->tm.c_state |= CARR_ON; |
| @@ -325,7 +326,7 @@ int riotopen(struct tty_struct *tty, struct file *filp) | |||
| 325 | ** I think it's OK. -- REW | 326 | ** I think it's OK. -- REW |
| 326 | */ | 327 | */ |
| 327 | rio_dprintk(RIO_DEBUG_TTY, "open(%d) sleeping for carr broken by signal\n", SysPort); | 328 | rio_dprintk(RIO_DEBUG_TTY, "open(%d) sleeping for carr broken by signal\n", SysPort); |
| 328 | RIOPreemptiveCmd(p, PortP, FCLOSE); | 329 | RIOPreemptiveCmd(p, PortP, RIOC_FCLOSE); |
| 329 | /* | 330 | /* |
| 330 | tp->tm.c_state &= ~WOPEN; | 331 | tp->tm.c_state &= ~WOPEN; |
| 331 | */ | 332 | */ |
| @@ -416,7 +417,7 @@ int riotclose(void *ptr) | |||
| 416 | */ | 417 | */ |
| 417 | PortP->State &= ~RIO_MOPEN; | 418 | PortP->State &= ~RIO_MOPEN; |
| 418 | PortP->State &= ~RIO_CARR_ON; | 419 | PortP->State &= ~RIO_CARR_ON; |
| 419 | PortP->ModemState &= ~MSVR1_CD; | 420 | PortP->ModemState &= ~RIOC_MSVR1_CD; |
| 420 | /* | 421 | /* |
| 421 | ** If the device was open as both a Modem and a tty line | 422 | ** If the device was open as both a Modem and a tty line |
| 422 | ** then we need to wimp out here, as the port has not really | 423 | ** then we need to wimp out here, as the port has not really |
| @@ -453,7 +454,7 @@ int riotclose(void *ptr) | |||
| 453 | if (repeat_this-- <= 0) { | 454 | if (repeat_this-- <= 0) { |
| 454 | rv = -EINTR; | 455 | rv = -EINTR; |
| 455 | rio_dprintk(RIO_DEBUG_TTY, "Waiting for not idle closed broken by signal\n"); | 456 | rio_dprintk(RIO_DEBUG_TTY, "Waiting for not idle closed broken by signal\n"); |
| 456 | RIOPreemptiveCmd(p, PortP, FCLOSE); | 457 | RIOPreemptiveCmd(p, PortP, RIOC_FCLOSE); |
| 457 | goto close_end; | 458 | goto close_end; |
| 458 | } | 459 | } |
| 459 | rio_dprintk(RIO_DEBUG_TTY, "Calling timeout to flush in closing\n"); | 460 | rio_dprintk(RIO_DEBUG_TTY, "Calling timeout to flush in closing\n"); |
| @@ -492,8 +493,8 @@ int riotclose(void *ptr) | |||
| 492 | /* Can't call RIOShortCommand with the port locked. */ | 493 | /* Can't call RIOShortCommand with the port locked. */ |
| 493 | rio_spin_unlock_irqrestore(&PortP->portSem, flags); | 494 | rio_spin_unlock_irqrestore(&PortP->portSem, flags); |
| 494 | 495 | ||
| 495 | if (RIOShortCommand(p, PortP, CLOSE, 1, 0) == RIO_FAIL) { | 496 | if (RIOShortCommand(p, PortP, RIOC_CLOSE, 1, 0) == RIO_FAIL) { |
| 496 | RIOPreemptiveCmd(p, PortP, FCLOSE); | 497 | RIOPreemptiveCmd(p, PortP, RIOC_FCLOSE); |
| 497 | rio_spin_lock_irqsave(&PortP->portSem, flags); | 498 | rio_spin_lock_irqsave(&PortP->portSem, flags); |
| 498 | goto close_end; | 499 | goto close_end; |
| 499 | } | 500 | } |
| @@ -503,7 +504,7 @@ int riotclose(void *ptr) | |||
| 503 | try--; | 504 | try--; |
| 504 | if (time_after(jiffies, end_time)) { | 505 | if (time_after(jiffies, end_time)) { |
| 505 | rio_dprintk(RIO_DEBUG_TTY, "Run out of tries - force the bugger shut!\n"); | 506 | rio_dprintk(RIO_DEBUG_TTY, "Run out of tries - force the bugger shut!\n"); |
| 506 | RIOPreemptiveCmd(p, PortP, FCLOSE); | 507 | RIOPreemptiveCmd(p, PortP, RIOC_FCLOSE); |
| 507 | break; | 508 | break; |
| 508 | } | 509 | } |
| 509 | rio_dprintk(RIO_DEBUG_TTY, "Close: PortState:ISOPEN is %d\n", PortP->PortState & PORT_ISOPEN); | 510 | rio_dprintk(RIO_DEBUG_TTY, "Close: PortState:ISOPEN is %d\n", PortP->PortState & PORT_ISOPEN); |
| @@ -515,14 +516,14 @@ int riotclose(void *ptr) | |||
| 515 | } | 516 | } |
| 516 | if (RIODelay(PortP, HUNDRED_MS) == RIO_FAIL) { | 517 | if (RIODelay(PortP, HUNDRED_MS) == RIO_FAIL) { |
| 517 | rio_dprintk(RIO_DEBUG_TTY, "RTA EINTR in delay \n"); | 518 | rio_dprintk(RIO_DEBUG_TTY, "RTA EINTR in delay \n"); |
| 518 | RIOPreemptiveCmd(p, PortP, FCLOSE); | 519 | RIOPreemptiveCmd(p, PortP, RIOC_FCLOSE); |
| 519 | break; | 520 | break; |
| 520 | } | 521 | } |
| 521 | } | 522 | } |
| 522 | rio_spin_lock_irqsave(&PortP->portSem, flags); | 523 | rio_spin_lock_irqsave(&PortP->portSem, flags); |
| 523 | rio_dprintk(RIO_DEBUG_TTY, "Close: try was %d on completion\n", try); | 524 | rio_dprintk(RIO_DEBUG_TTY, "Close: try was %d on completion\n", try); |
| 524 | 525 | ||
| 525 | /* RIOPreemptiveCmd(p, PortP, FCLOSE); */ | 526 | /* RIOPreemptiveCmd(p, PortP, RIOC_FCLOSE); */ |
| 526 | 527 | ||
| 527 | /* | 528 | /* |
| 528 | ** 15.10.1998 ARG - ESIL 0761 part fix | 529 | ** 15.10.1998 ARG - ESIL 0761 part fix |
diff --git a/drivers/char/riscom8.c b/drivers/char/riscom8.c index 3f9d0a9ac36d..f073c710ab8d 100644 --- a/drivers/char/riscom8.c +++ b/drivers/char/riscom8.c | |||
| @@ -4,9 +4,9 @@ | |||
| 4 | * Copyright (C) 1994-1996 Dmitry Gorodchanin (pgmdsg@ibi.com) | 4 | * Copyright (C) 1994-1996 Dmitry Gorodchanin (pgmdsg@ibi.com) |
| 5 | * | 5 | * |
| 6 | * This code is loosely based on the Linux serial driver, written by | 6 | * This code is loosely based on the Linux serial driver, written by |
| 7 | * Linus Torvalds, Theodore T'so and others. The RISCom/8 card | 7 | * Linus Torvalds, Theodore T'so and others. The RISCom/8 card |
| 8 | * programming info was obtained from various drivers for other OSes | 8 | * programming info was obtained from various drivers for other OSes |
| 9 | * (FreeBSD, ISC, etc), but no source code from those drivers were | 9 | * (FreeBSD, ISC, etc), but no source code from those drivers were |
| 10 | * directly included in this driver. | 10 | * directly included in this driver. |
| 11 | * | 11 | * |
| 12 | * | 12 | * |
| @@ -33,7 +33,7 @@ | |||
| 33 | 33 | ||
| 34 | #include <linux/module.h> | 34 | #include <linux/module.h> |
| 35 | 35 | ||
| 36 | #include <asm/io.h> | 36 | #include <linux/io.h> |
| 37 | #include <linux/kernel.h> | 37 | #include <linux/kernel.h> |
| 38 | #include <linux/sched.h> | 38 | #include <linux/sched.h> |
| 39 | #include <linux/ioport.h> | 39 | #include <linux/ioport.h> |
| @@ -49,7 +49,7 @@ | |||
| 49 | #include <linux/tty_flip.h> | 49 | #include <linux/tty_flip.h> |
| 50 | #include <linux/spinlock.h> | 50 | #include <linux/spinlock.h> |
| 51 | 51 | ||
| 52 | #include <asm/uaccess.h> | 52 | #include <linux/uaccess.h> |
| 53 | 53 | ||
| 54 | #include "riscom8.h" | 54 | #include "riscom8.h" |
| 55 | #include "riscom8_reg.h" | 55 | #include "riscom8_reg.h" |
| @@ -57,15 +57,15 @@ | |||
| 57 | /* Am I paranoid or not ? ;-) */ | 57 | /* Am I paranoid or not ? ;-) */ |
| 58 | #define RISCOM_PARANOIA_CHECK | 58 | #define RISCOM_PARANOIA_CHECK |
| 59 | 59 | ||
| 60 | /* | 60 | /* |
| 61 | * Crazy InteliCom/8 boards sometimes has swapped CTS & DSR signals. | 61 | * Crazy InteliCom/8 boards sometimes have swapped CTS & DSR signals. |
| 62 | * You can slightly speed up things by #undefing the following option, | 62 | * You can slightly speed up things by #undefing the following option, |
| 63 | * if you are REALLY sure that your board is correct one. | 63 | * if you are REALLY sure that your board is correct one. |
| 64 | */ | 64 | */ |
| 65 | 65 | ||
| 66 | #define RISCOM_BRAIN_DAMAGED_CTS | 66 | #define RISCOM_BRAIN_DAMAGED_CTS |
| 67 | 67 | ||
| 68 | /* | 68 | /* |
| 69 | * The following defines are mostly for testing purposes. But if you need | 69 | * The following defines are mostly for testing purposes. But if you need |
| 70 | * some nice reporting in your syslog, you can define them also. | 70 | * some nice reporting in your syslog, you can define them also. |
| 71 | */ | 71 | */ |
| @@ -112,7 +112,7 @@ static unsigned short rc_ioport[] = { | |||
| 112 | #define RC_NIOPORT ARRAY_SIZE(rc_ioport) | 112 | #define RC_NIOPORT ARRAY_SIZE(rc_ioport) |
| 113 | 113 | ||
| 114 | 114 | ||
| 115 | static inline int rc_paranoia_check(struct riscom_port const * port, | 115 | static int rc_paranoia_check(struct riscom_port const *port, |
| 116 | char *name, const char *routine) | 116 | char *name, const char *routine) |
| 117 | { | 117 | { |
| 118 | #ifdef RISCOM_PARANOIA_CHECK | 118 | #ifdef RISCOM_PARANOIA_CHECK |
| @@ -134,52 +134,53 @@ static inline int rc_paranoia_check(struct riscom_port const * port, | |||
| 134 | } | 134 | } |
| 135 | 135 | ||
| 136 | /* | 136 | /* |
| 137 | * | 137 | * |
| 138 | * Service functions for RISCom/8 driver. | 138 | * Service functions for RISCom/8 driver. |
| 139 | * | 139 | * |
| 140 | */ | 140 | */ |
| 141 | 141 | ||
| 142 | /* Get board number from pointer */ | 142 | /* Get board number from pointer */ |
| 143 | static inline int board_No (struct riscom_board const * bp) | 143 | static inline int board_No(struct riscom_board const *bp) |
| 144 | { | 144 | { |
| 145 | return bp - rc_board; | 145 | return bp - rc_board; |
| 146 | } | 146 | } |
| 147 | 147 | ||
| 148 | /* Get port number from pointer */ | 148 | /* Get port number from pointer */ |
| 149 | static inline int port_No (struct riscom_port const * port) | 149 | static inline int port_No(struct riscom_port const *port) |
| 150 | { | 150 | { |
| 151 | return RC_PORT(port - rc_port); | 151 | return RC_PORT(port - rc_port); |
| 152 | } | 152 | } |
| 153 | 153 | ||
| 154 | /* Get pointer to board from pointer to port */ | 154 | /* Get pointer to board from pointer to port */ |
| 155 | static inline struct riscom_board * port_Board(struct riscom_port const * port) | 155 | static inline struct riscom_board *port_Board(struct riscom_port const *port) |
| 156 | { | 156 | { |
| 157 | return &rc_board[RC_BOARD(port - rc_port)]; | 157 | return &rc_board[RC_BOARD(port - rc_port)]; |
| 158 | } | 158 | } |
| 159 | 159 | ||
| 160 | /* Input Byte from CL CD180 register */ | 160 | /* Input Byte from CL CD180 register */ |
| 161 | static inline unsigned char rc_in(struct riscom_board const * bp, unsigned short reg) | 161 | static inline unsigned char rc_in(struct riscom_board const *bp, |
| 162 | unsigned short reg) | ||
| 162 | { | 163 | { |
| 163 | return inb(bp->base + RC_TO_ISA(reg)); | 164 | return inb(bp->base + RC_TO_ISA(reg)); |
| 164 | } | 165 | } |
| 165 | 166 | ||
| 166 | /* Output Byte to CL CD180 register */ | 167 | /* Output Byte to CL CD180 register */ |
| 167 | static inline void rc_out(struct riscom_board const * bp, unsigned short reg, | 168 | static inline void rc_out(struct riscom_board const *bp, unsigned short reg, |
| 168 | unsigned char val) | 169 | unsigned char val) |
| 169 | { | 170 | { |
| 170 | outb(val, bp->base + RC_TO_ISA(reg)); | 171 | outb(val, bp->base + RC_TO_ISA(reg)); |
| 171 | } | 172 | } |
| 172 | 173 | ||
| 173 | /* Wait for Channel Command Register ready */ | 174 | /* Wait for Channel Command Register ready */ |
| 174 | static inline void rc_wait_CCR(struct riscom_board const * bp) | 175 | static void rc_wait_CCR(struct riscom_board const *bp) |
| 175 | { | 176 | { |
| 176 | unsigned long delay; | 177 | unsigned long delay; |
| 177 | 178 | ||
| 178 | /* FIXME: need something more descriptive then 100000 :) */ | 179 | /* FIXME: need something more descriptive then 100000 :) */ |
| 179 | for (delay = 100000; delay; delay--) | 180 | for (delay = 100000; delay; delay--) |
| 180 | if (!rc_in(bp, CD180_CCR)) | 181 | if (!rc_in(bp, CD180_CCR)) |
| 181 | return; | 182 | return; |
| 182 | 183 | ||
| 183 | printk(KERN_INFO "rc%d: Timeout waiting for CCR.\n", board_No(bp)); | 184 | printk(KERN_INFO "rc%d: Timeout waiting for CCR.\n", board_No(bp)); |
| 184 | } | 185 | } |
| 185 | 186 | ||
| @@ -187,11 +188,11 @@ static inline void rc_wait_CCR(struct riscom_board const * bp) | |||
| 187 | * RISCom/8 probe functions. | 188 | * RISCom/8 probe functions. |
| 188 | */ | 189 | */ |
| 189 | 190 | ||
| 190 | static inline int rc_request_io_range(struct riscom_board * const bp) | 191 | static int rc_request_io_range(struct riscom_board * const bp) |
| 191 | { | 192 | { |
| 192 | int i; | 193 | int i; |
| 193 | 194 | ||
| 194 | for (i = 0; i < RC_NIOPORT; i++) | 195 | for (i = 0; i < RC_NIOPORT; i++) |
| 195 | if (!request_region(RC_TO_ISA(rc_ioport[i]) + bp->base, 1, | 196 | if (!request_region(RC_TO_ISA(rc_ioport[i]) + bp->base, 1, |
| 196 | "RISCom/8")) { | 197 | "RISCom/8")) { |
| 197 | goto out_release; | 198 | goto out_release; |
| @@ -200,42 +201,42 @@ static inline int rc_request_io_range(struct riscom_board * const bp) | |||
| 200 | out_release: | 201 | out_release: |
| 201 | printk(KERN_INFO "rc%d: Skipping probe at 0x%03x. IO address in use.\n", | 202 | printk(KERN_INFO "rc%d: Skipping probe at 0x%03x. IO address in use.\n", |
| 202 | board_No(bp), bp->base); | 203 | board_No(bp), bp->base); |
| 203 | while(--i >= 0) | 204 | while (--i >= 0) |
| 204 | release_region(RC_TO_ISA(rc_ioport[i]) + bp->base, 1); | 205 | release_region(RC_TO_ISA(rc_ioport[i]) + bp->base, 1); |
| 205 | return 1; | 206 | return 1; |
| 206 | } | 207 | } |
| 207 | 208 | ||
| 208 | static inline void rc_release_io_range(struct riscom_board * const bp) | 209 | static void rc_release_io_range(struct riscom_board * const bp) |
| 209 | { | 210 | { |
| 210 | int i; | 211 | int i; |
| 211 | 212 | ||
| 212 | for (i = 0; i < RC_NIOPORT; i++) | 213 | for (i = 0; i < RC_NIOPORT; i++) |
| 213 | release_region(RC_TO_ISA(rc_ioport[i]) + bp->base, 1); | 214 | release_region(RC_TO_ISA(rc_ioport[i]) + bp->base, 1); |
| 214 | } | 215 | } |
| 215 | 216 | ||
| 216 | /* Reset and setup CD180 chip */ | 217 | /* Reset and setup CD180 chip */ |
| 217 | static void __init rc_init_CD180(struct riscom_board const * bp) | 218 | static void __init rc_init_CD180(struct riscom_board const *bp) |
| 218 | { | 219 | { |
| 219 | unsigned long flags; | 220 | unsigned long flags; |
| 220 | 221 | ||
| 221 | spin_lock_irqsave(&riscom_lock, flags); | 222 | spin_lock_irqsave(&riscom_lock, flags); |
| 222 | 223 | ||
| 223 | rc_out(bp, RC_CTOUT, 0); /* Clear timeout */ | 224 | rc_out(bp, RC_CTOUT, 0); /* Clear timeout */ |
| 224 | rc_wait_CCR(bp); /* Wait for CCR ready */ | 225 | rc_wait_CCR(bp); /* Wait for CCR ready */ |
| 225 | rc_out(bp, CD180_CCR, CCR_HARDRESET); /* Reset CD180 chip */ | 226 | rc_out(bp, CD180_CCR, CCR_HARDRESET); /* Reset CD180 chip */ |
| 226 | spin_unlock_irqrestore(&riscom_lock, flags); | 227 | spin_unlock_irqrestore(&riscom_lock, flags); |
| 227 | msleep(50); /* Delay 0.05 sec */ | 228 | msleep(50); /* Delay 0.05 sec */ |
| 228 | spin_lock_irqsave(&riscom_lock, flags); | 229 | spin_lock_irqsave(&riscom_lock, flags); |
| 229 | rc_out(bp, CD180_GIVR, RC_ID); /* Set ID for this chip */ | 230 | rc_out(bp, CD180_GIVR, RC_ID); /* Set ID for this chip */ |
| 230 | rc_out(bp, CD180_GICR, 0); /* Clear all bits */ | 231 | rc_out(bp, CD180_GICR, 0); /* Clear all bits */ |
| 231 | rc_out(bp, CD180_PILR1, RC_ACK_MINT); /* Prio for modem intr */ | 232 | rc_out(bp, CD180_PILR1, RC_ACK_MINT); /* Prio for modem intr */ |
| 232 | rc_out(bp, CD180_PILR2, RC_ACK_TINT); /* Prio for transmitter intr */ | 233 | rc_out(bp, CD180_PILR2, RC_ACK_TINT); /* Prio for tx intr */ |
| 233 | rc_out(bp, CD180_PILR3, RC_ACK_RINT); /* Prio for receiver intr */ | 234 | rc_out(bp, CD180_PILR3, RC_ACK_RINT); /* Prio for rx intr */ |
| 234 | 235 | ||
| 235 | /* Setting up prescaler. We need 4 ticks per 1 ms */ | 236 | /* Setting up prescaler. We need 4 ticks per 1 ms */ |
| 236 | rc_out(bp, CD180_PPRH, (RC_OSCFREQ/(1000000/RISCOM_TPS)) >> 8); | 237 | rc_out(bp, CD180_PPRH, (RC_OSCFREQ/(1000000/RISCOM_TPS)) >> 8); |
| 237 | rc_out(bp, CD180_PPRL, (RC_OSCFREQ/(1000000/RISCOM_TPS)) & 0xff); | 238 | rc_out(bp, CD180_PPRL, (RC_OSCFREQ/(1000000/RISCOM_TPS)) & 0xff); |
| 238 | 239 | ||
| 239 | spin_unlock_irqrestore(&riscom_lock, flags); | 240 | spin_unlock_irqrestore(&riscom_lock, flags); |
| 240 | } | 241 | } |
| 241 | 242 | ||
| @@ -245,12 +246,12 @@ static int __init rc_probe(struct riscom_board *bp) | |||
| 245 | unsigned char val1, val2; | 246 | unsigned char val1, val2; |
| 246 | int irqs = 0; | 247 | int irqs = 0; |
| 247 | int retries; | 248 | int retries; |
| 248 | 249 | ||
| 249 | bp->irq = 0; | 250 | bp->irq = 0; |
| 250 | 251 | ||
| 251 | if (rc_request_io_range(bp)) | 252 | if (rc_request_io_range(bp)) |
| 252 | return 1; | 253 | return 1; |
| 253 | 254 | ||
| 254 | /* Are the I/O ports here ? */ | 255 | /* Are the I/O ports here ? */ |
| 255 | rc_out(bp, CD180_PPRL, 0x5a); | 256 | rc_out(bp, CD180_PPRL, 0x5a); |
| 256 | outb(0xff, 0x80); | 257 | outb(0xff, 0x80); |
| @@ -258,34 +259,34 @@ static int __init rc_probe(struct riscom_board *bp) | |||
| 258 | rc_out(bp, CD180_PPRL, 0xa5); | 259 | rc_out(bp, CD180_PPRL, 0xa5); |
| 259 | outb(0x00, 0x80); | 260 | outb(0x00, 0x80); |
| 260 | val2 = rc_in(bp, CD180_PPRL); | 261 | val2 = rc_in(bp, CD180_PPRL); |
| 261 | 262 | ||
| 262 | if ((val1 != 0x5a) || (val2 != 0xa5)) { | 263 | if ((val1 != 0x5a) || (val2 != 0xa5)) { |
| 263 | printk(KERN_ERR "rc%d: RISCom/8 Board at 0x%03x not found.\n", | 264 | printk(KERN_ERR "rc%d: RISCom/8 Board at 0x%03x not found.\n", |
| 264 | board_No(bp), bp->base); | 265 | board_No(bp), bp->base); |
| 265 | goto out_release; | 266 | goto out_release; |
| 266 | } | 267 | } |
| 267 | 268 | ||
| 268 | /* It's time to find IRQ for this board */ | 269 | /* It's time to find IRQ for this board */ |
| 269 | for (retries = 0; retries < 5 && irqs <= 0; retries++) { | 270 | for (retries = 0; retries < 5 && irqs <= 0; retries++) { |
| 270 | irqs = probe_irq_on(); | 271 | irqs = probe_irq_on(); |
| 271 | rc_init_CD180(bp); /* Reset CD180 chip */ | 272 | rc_init_CD180(bp); /* Reset CD180 chip */ |
| 272 | rc_out(bp, CD180_CAR, 2); /* Select port 2 */ | 273 | rc_out(bp, CD180_CAR, 2); /* Select port 2 */ |
| 273 | rc_wait_CCR(bp); | 274 | rc_wait_CCR(bp); |
| 274 | rc_out(bp, CD180_CCR, CCR_TXEN); /* Enable transmitter */ | 275 | rc_out(bp, CD180_CCR, CCR_TXEN); /* Enable transmitter */ |
| 275 | rc_out(bp, CD180_IER, IER_TXRDY); /* Enable tx empty intr */ | 276 | rc_out(bp, CD180_IER, IER_TXRDY);/* Enable tx empty intr */ |
| 276 | msleep(50); | 277 | msleep(50); |
| 277 | irqs = probe_irq_off(irqs); | 278 | irqs = probe_irq_off(irqs); |
| 278 | val1 = rc_in(bp, RC_BSR); /* Get Board Status reg */ | 279 | val1 = rc_in(bp, RC_BSR); /* Get Board Status reg */ |
| 279 | val2 = rc_in(bp, RC_ACK_TINT); /* ACK interrupt */ | 280 | val2 = rc_in(bp, RC_ACK_TINT); /* ACK interrupt */ |
| 280 | rc_init_CD180(bp); /* Reset CD180 again */ | 281 | rc_init_CD180(bp); /* Reset CD180 again */ |
| 281 | 282 | ||
| 282 | if ((val1 & RC_BSR_TINT) || (val2 != (RC_ID | GIVR_IT_TX))) { | 283 | if ((val1 & RC_BSR_TINT) || (val2 != (RC_ID | GIVR_IT_TX))) { |
| 283 | printk(KERN_ERR "rc%d: RISCom/8 Board at 0x%03x not " | 284 | printk(KERN_ERR "rc%d: RISCom/8 Board at 0x%03x not " |
| 284 | "found.\n", board_No(bp), bp->base); | 285 | "found.\n", board_No(bp), bp->base); |
| 285 | goto out_release; | 286 | goto out_release; |
| 286 | } | 287 | } |
| 287 | } | 288 | } |
| 288 | 289 | ||
| 289 | if (irqs <= 0) { | 290 | if (irqs <= 0) { |
| 290 | printk(KERN_ERR "rc%d: Can't find IRQ for RISCom/8 board " | 291 | printk(KERN_ERR "rc%d: Can't find IRQ for RISCom/8 board " |
| 291 | "at 0x%03x.\n", board_No(bp), bp->base); | 292 | "at 0x%03x.\n", board_No(bp), bp->base); |
| @@ -293,113 +294,112 @@ static int __init rc_probe(struct riscom_board *bp) | |||
| 293 | } | 294 | } |
| 294 | bp->irq = irqs; | 295 | bp->irq = irqs; |
| 295 | bp->flags |= RC_BOARD_PRESENT; | 296 | bp->flags |= RC_BOARD_PRESENT; |
| 296 | 297 | ||
| 297 | printk(KERN_INFO "rc%d: RISCom/8 Rev. %c board detected at " | 298 | printk(KERN_INFO "rc%d: RISCom/8 Rev. %c board detected at " |
| 298 | "0x%03x, IRQ %d.\n", | 299 | "0x%03x, IRQ %d.\n", |
| 299 | board_No(bp), | 300 | board_No(bp), |
| 300 | (rc_in(bp, CD180_GFRCR) & 0x0f) + 'A', /* Board revision */ | 301 | (rc_in(bp, CD180_GFRCR) & 0x0f) + 'A', /* Board revision */ |
| 301 | bp->base, bp->irq); | 302 | bp->base, bp->irq); |
| 302 | 303 | ||
| 303 | return 0; | 304 | return 0; |
| 304 | out_release: | 305 | out_release: |
| 305 | rc_release_io_range(bp); | 306 | rc_release_io_range(bp); |
| 306 | return 1; | 307 | return 1; |
| 307 | } | 308 | } |
| 308 | 309 | ||
| 309 | /* | 310 | /* |
| 310 | * | 311 | * |
| 311 | * Interrupt processing routines. | 312 | * Interrupt processing routines. |
| 312 | * | 313 | * |
| 313 | */ | 314 | */ |
| 314 | 315 | ||
| 315 | static inline struct riscom_port * rc_get_port(struct riscom_board const * bp, | 316 | static struct riscom_port *rc_get_port(struct riscom_board const *bp, |
| 316 | unsigned char const * what) | 317 | unsigned char const *what) |
| 317 | { | 318 | { |
| 318 | unsigned char channel; | 319 | unsigned char channel; |
| 319 | struct riscom_port * port; | 320 | struct riscom_port *port; |
| 320 | 321 | ||
| 321 | channel = rc_in(bp, CD180_GICR) >> GICR_CHAN_OFF; | 322 | channel = rc_in(bp, CD180_GICR) >> GICR_CHAN_OFF; |
| 322 | if (channel < CD180_NCH) { | 323 | if (channel < CD180_NCH) { |
| 323 | port = &rc_port[board_No(bp) * RC_NPORT + channel]; | 324 | port = &rc_port[board_No(bp) * RC_NPORT + channel]; |
| 324 | if (port->flags & ASYNC_INITIALIZED) { | 325 | if (port->flags & ASYNC_INITIALIZED) |
| 325 | return port; | 326 | return port; |
| 326 | } | ||
| 327 | } | 327 | } |
| 328 | printk(KERN_ERR "rc%d: %s interrupt from invalid port %d\n", | 328 | printk(KERN_ERR "rc%d: %s interrupt from invalid port %d\n", |
| 329 | board_No(bp), what, channel); | 329 | board_No(bp), what, channel); |
| 330 | return NULL; | 330 | return NULL; |
| 331 | } | 331 | } |
| 332 | 332 | ||
| 333 | static inline void rc_receive_exc(struct riscom_board const * bp) | 333 | static void rc_receive_exc(struct riscom_board const *bp) |
| 334 | { | 334 | { |
| 335 | struct riscom_port *port; | 335 | struct riscom_port *port; |
| 336 | struct tty_struct *tty; | 336 | struct tty_struct *tty; |
| 337 | unsigned char status; | 337 | unsigned char status; |
| 338 | unsigned char ch, flag; | 338 | unsigned char ch, flag; |
| 339 | 339 | ||
| 340 | if (!(port = rc_get_port(bp, "Receive"))) | 340 | port = rc_get_port(bp, "Receive"); |
| 341 | if (port == NULL) | ||
| 341 | return; | 342 | return; |
| 342 | 343 | ||
| 343 | tty = port->tty; | 344 | tty = port->tty; |
| 344 | 345 | ||
| 345 | #ifdef RC_REPORT_OVERRUN | 346 | #ifdef RC_REPORT_OVERRUN |
| 346 | status = rc_in(bp, CD180_RCSR); | 347 | status = rc_in(bp, CD180_RCSR); |
| 347 | if (status & RCSR_OE) | 348 | if (status & RCSR_OE) |
| 348 | port->overrun++; | 349 | port->overrun++; |
| 349 | status &= port->mark_mask; | 350 | status &= port->mark_mask; |
| 350 | #else | 351 | #else |
| 351 | status = rc_in(bp, CD180_RCSR) & port->mark_mask; | 352 | status = rc_in(bp, CD180_RCSR) & port->mark_mask; |
| 352 | #endif | 353 | #endif |
| 353 | ch = rc_in(bp, CD180_RDR); | 354 | ch = rc_in(bp, CD180_RDR); |
| 354 | if (!status) { | 355 | if (!status) |
| 355 | return; | 356 | return; |
| 356 | } | ||
| 357 | if (status & RCSR_TOUT) { | 357 | if (status & RCSR_TOUT) { |
| 358 | printk(KERN_WARNING "rc%d: port %d: Receiver timeout. " | 358 | printk(KERN_WARNING "rc%d: port %d: Receiver timeout. " |
| 359 | "Hardware problems ?\n", | 359 | "Hardware problems ?\n", |
| 360 | board_No(bp), port_No(port)); | 360 | board_No(bp), port_No(port)); |
| 361 | return; | 361 | return; |
| 362 | 362 | ||
| 363 | } else if (status & RCSR_BREAK) { | 363 | } else if (status & RCSR_BREAK) { |
| 364 | printk(KERN_INFO "rc%d: port %d: Handling break...\n", | 364 | printk(KERN_INFO "rc%d: port %d: Handling break...\n", |
| 365 | board_No(bp), port_No(port)); | 365 | board_No(bp), port_No(port)); |
| 366 | flag = TTY_BREAK; | 366 | flag = TTY_BREAK; |
| 367 | if (port->flags & ASYNC_SAK) | 367 | if (port->flags & ASYNC_SAK) |
| 368 | do_SAK(tty); | 368 | do_SAK(tty); |
| 369 | 369 | ||
| 370 | } else if (status & RCSR_PE) | 370 | } else if (status & RCSR_PE) |
| 371 | flag = TTY_PARITY; | 371 | flag = TTY_PARITY; |
| 372 | 372 | ||
| 373 | else if (status & RCSR_FE) | 373 | else if (status & RCSR_FE) |
| 374 | flag = TTY_FRAME; | 374 | flag = TTY_FRAME; |
| 375 | 375 | ||
| 376 | else if (status & RCSR_OE) | 376 | else if (status & RCSR_OE) |
| 377 | flag = TTY_OVERRUN; | 377 | flag = TTY_OVERRUN; |
| 378 | |||
| 379 | else | 378 | else |
| 380 | flag = TTY_NORMAL; | 379 | flag = TTY_NORMAL; |
| 381 | 380 | ||
| 382 | tty_insert_flip_char(tty, ch, flag); | 381 | tty_insert_flip_char(tty, ch, flag); |
| 383 | tty_flip_buffer_push(tty); | 382 | tty_flip_buffer_push(tty); |
| 384 | } | 383 | } |
| 385 | 384 | ||
| 386 | static inline void rc_receive(struct riscom_board const * bp) | 385 | static void rc_receive(struct riscom_board const *bp) |
| 387 | { | 386 | { |
| 388 | struct riscom_port *port; | 387 | struct riscom_port *port; |
| 389 | struct tty_struct *tty; | 388 | struct tty_struct *tty; |
| 390 | unsigned char count; | 389 | unsigned char count; |
| 391 | 390 | ||
| 392 | if (!(port = rc_get_port(bp, "Receive"))) | 391 | port = rc_get_port(bp, "Receive"); |
| 392 | if (port == NULL) | ||
| 393 | return; | 393 | return; |
| 394 | 394 | ||
| 395 | tty = port->tty; | 395 | tty = port->tty; |
| 396 | 396 | ||
| 397 | count = rc_in(bp, CD180_RDCR); | 397 | count = rc_in(bp, CD180_RDCR); |
| 398 | 398 | ||
| 399 | #ifdef RC_REPORT_FIFO | 399 | #ifdef RC_REPORT_FIFO |
| 400 | port->hits[count > 8 ? 9 : count]++; | 400 | port->hits[count > 8 ? 9 : count]++; |
| 401 | #endif | 401 | #endif |
| 402 | 402 | ||
| 403 | while (count--) { | 403 | while (count--) { |
| 404 | if (tty_buffer_request_room(tty, 1) == 0) { | 404 | if (tty_buffer_request_room(tty, 1) == 0) { |
| 405 | printk(KERN_WARNING "rc%d: port %d: Working around " | 405 | printk(KERN_WARNING "rc%d: port %d: Working around " |
| @@ -412,26 +412,26 @@ static inline void rc_receive(struct riscom_board const * bp) | |||
| 412 | tty_flip_buffer_push(tty); | 412 | tty_flip_buffer_push(tty); |
| 413 | } | 413 | } |
| 414 | 414 | ||
| 415 | static inline void rc_transmit(struct riscom_board const * bp) | 415 | static void rc_transmit(struct riscom_board const *bp) |
| 416 | { | 416 | { |
| 417 | struct riscom_port *port; | 417 | struct riscom_port *port; |
| 418 | struct tty_struct *tty; | 418 | struct tty_struct *tty; |
| 419 | unsigned char count; | 419 | unsigned char count; |
| 420 | 420 | ||
| 421 | 421 | port = rc_get_port(bp, "Transmit"); | |
| 422 | if (!(port = rc_get_port(bp, "Transmit"))) | 422 | if (port == NULL) |
| 423 | return; | 423 | return; |
| 424 | 424 | ||
| 425 | tty = port->tty; | 425 | tty = port->tty; |
| 426 | 426 | ||
| 427 | if (port->IER & IER_TXEMPTY) { | 427 | if (port->IER & IER_TXEMPTY) { |
| 428 | /* FIFO drained */ | 428 | /* FIFO drained */ |
| 429 | rc_out(bp, CD180_CAR, port_No(port)); | 429 | rc_out(bp, CD180_CAR, port_No(port)); |
| 430 | port->IER &= ~IER_TXEMPTY; | 430 | port->IER &= ~IER_TXEMPTY; |
| 431 | rc_out(bp, CD180_IER, port->IER); | 431 | rc_out(bp, CD180_IER, port->IER); |
| 432 | return; | 432 | return; |
| 433 | } | 433 | } |
| 434 | 434 | ||
| 435 | if ((port->xmit_cnt <= 0 && !port->break_length) | 435 | if ((port->xmit_cnt <= 0 && !port->break_length) |
| 436 | || tty->stopped || tty->hw_stopped) { | 436 | || tty->stopped || tty->hw_stopped) { |
| 437 | rc_out(bp, CD180_CAR, port_No(port)); | 437 | rc_out(bp, CD180_CAR, port_No(port)); |
| @@ -439,7 +439,7 @@ static inline void rc_transmit(struct riscom_board const * bp) | |||
| 439 | rc_out(bp, CD180_IER, port->IER); | 439 | rc_out(bp, CD180_IER, port->IER); |
| 440 | return; | 440 | return; |
| 441 | } | 441 | } |
| 442 | 442 | ||
| 443 | if (port->break_length) { | 443 | if (port->break_length) { |
| 444 | if (port->break_length > 0) { | 444 | if (port->break_length > 0) { |
| 445 | if (port->COR2 & COR2_ETC) { | 445 | if (port->COR2 & COR2_ETC) { |
| @@ -451,7 +451,8 @@ static inline void rc_transmit(struct riscom_board const * bp) | |||
| 451 | rc_out(bp, CD180_TDR, CD180_C_ESC); | 451 | rc_out(bp, CD180_TDR, CD180_C_ESC); |
| 452 | rc_out(bp, CD180_TDR, CD180_C_DELAY); | 452 | rc_out(bp, CD180_TDR, CD180_C_DELAY); |
| 453 | rc_out(bp, CD180_TDR, count); | 453 | rc_out(bp, CD180_TDR, count); |
| 454 | if (!(port->break_length -= count)) | 454 | port->break_length -= count; |
| 455 | if (port->break_length == 0) | ||
| 455 | port->break_length--; | 456 | port->break_length--; |
| 456 | } else { | 457 | } else { |
| 457 | rc_out(bp, CD180_TDR, CD180_C_ESC); | 458 | rc_out(bp, CD180_TDR, CD180_C_ESC); |
| @@ -463,7 +464,7 @@ static inline void rc_transmit(struct riscom_board const * bp) | |||
| 463 | } | 464 | } |
| 464 | return; | 465 | return; |
| 465 | } | 466 | } |
| 466 | 467 | ||
| 467 | count = CD180_NFIFO; | 468 | count = CD180_NFIFO; |
| 468 | do { | 469 | do { |
| 469 | rc_out(bp, CD180_TDR, port->xmit_buf[port->xmit_tail++]); | 470 | rc_out(bp, CD180_TDR, port->xmit_buf[port->xmit_tail++]); |
| @@ -471,7 +472,7 @@ static inline void rc_transmit(struct riscom_board const * bp) | |||
| 471 | if (--port->xmit_cnt <= 0) | 472 | if (--port->xmit_cnt <= 0) |
| 472 | break; | 473 | break; |
| 473 | } while (--count > 0); | 474 | } while (--count > 0); |
| 474 | 475 | ||
| 475 | if (port->xmit_cnt <= 0) { | 476 | if (port->xmit_cnt <= 0) { |
| 476 | rc_out(bp, CD180_CAR, port_No(port)); | 477 | rc_out(bp, CD180_CAR, port_No(port)); |
| 477 | port->IER &= ~IER_TXRDY; | 478 | port->IER &= ~IER_TXRDY; |
| @@ -481,25 +482,26 @@ static inline void rc_transmit(struct riscom_board const * bp) | |||
| 481 | tty_wakeup(tty); | 482 | tty_wakeup(tty); |
| 482 | } | 483 | } |
| 483 | 484 | ||
| 484 | static inline void rc_check_modem(struct riscom_board const * bp) | 485 | static void rc_check_modem(struct riscom_board const *bp) |
| 485 | { | 486 | { |
| 486 | struct riscom_port *port; | 487 | struct riscom_port *port; |
| 487 | struct tty_struct *tty; | 488 | struct tty_struct *tty; |
| 488 | unsigned char mcr; | 489 | unsigned char mcr; |
| 489 | 490 | ||
| 490 | if (!(port = rc_get_port(bp, "Modem"))) | 491 | port = rc_get_port(bp, "Modem"); |
| 492 | if (port == NULL) | ||
| 491 | return; | 493 | return; |
| 492 | 494 | ||
| 493 | tty = port->tty; | 495 | tty = port->tty; |
| 494 | 496 | ||
| 495 | mcr = rc_in(bp, CD180_MCR); | 497 | mcr = rc_in(bp, CD180_MCR); |
| 496 | if (mcr & MCR_CDCHG) { | 498 | if (mcr & MCR_CDCHG) { |
| 497 | if (rc_in(bp, CD180_MSVR) & MSVR_CD) | 499 | if (rc_in(bp, CD180_MSVR) & MSVR_CD) |
| 498 | wake_up_interruptible(&port->open_wait); | 500 | wake_up_interruptible(&port->open_wait); |
| 499 | else | 501 | else |
| 500 | tty_hangup(tty); | 502 | tty_hangup(tty); |
| 501 | } | 503 | } |
| 502 | 504 | ||
| 503 | #ifdef RISCOM_BRAIN_DAMAGED_CTS | 505 | #ifdef RISCOM_BRAIN_DAMAGED_CTS |
| 504 | if (mcr & MCR_CTSCHG) { | 506 | if (mcr & MCR_CTSCHG) { |
| 505 | if (rc_in(bp, CD180_MSVR) & MSVR_CTS) { | 507 | if (rc_in(bp, CD180_MSVR) & MSVR_CTS) { |
| @@ -526,13 +528,13 @@ static inline void rc_check_modem(struct riscom_board const * bp) | |||
| 526 | rc_out(bp, CD180_IER, port->IER); | 528 | rc_out(bp, CD180_IER, port->IER); |
| 527 | } | 529 | } |
| 528 | #endif /* RISCOM_BRAIN_DAMAGED_CTS */ | 530 | #endif /* RISCOM_BRAIN_DAMAGED_CTS */ |
| 529 | 531 | ||
| 530 | /* Clear change bits */ | 532 | /* Clear change bits */ |
| 531 | rc_out(bp, CD180_MCR, 0); | 533 | rc_out(bp, CD180_MCR, 0); |
| 532 | } | 534 | } |
| 533 | 535 | ||
| 534 | /* The main interrupt processing routine */ | 536 | /* The main interrupt processing routine */ |
| 535 | static irqreturn_t rc_interrupt(int dummy, void * dev_id) | 537 | static irqreturn_t rc_interrupt(int dummy, void *dev_id) |
| 536 | { | 538 | { |
| 537 | unsigned char status; | 539 | unsigned char status; |
| 538 | unsigned char ack; | 540 | unsigned char ack; |
| @@ -547,13 +549,11 @@ static irqreturn_t rc_interrupt(int dummy, void * dev_id) | |||
| 547 | (RC_BSR_TOUT | RC_BSR_TINT | | 549 | (RC_BSR_TOUT | RC_BSR_TINT | |
| 548 | RC_BSR_MINT | RC_BSR_RINT))) { | 550 | RC_BSR_MINT | RC_BSR_RINT))) { |
| 549 | handled = 1; | 551 | handled = 1; |
| 550 | if (status & RC_BSR_TOUT) | 552 | if (status & RC_BSR_TOUT) |
| 551 | printk(KERN_WARNING "rc%d: Got timeout. Hardware " | 553 | printk(KERN_WARNING "rc%d: Got timeout. Hardware " |
| 552 | "error?\n", board_No(bp)); | 554 | "error?\n", board_No(bp)); |
| 553 | |||
| 554 | else if (status & RC_BSR_RINT) { | 555 | else if (status & RC_BSR_RINT) { |
| 555 | ack = rc_in(bp, RC_ACK_RINT); | 556 | ack = rc_in(bp, RC_ACK_RINT); |
| 556 | |||
| 557 | if (ack == (RC_ID | GIVR_IT_RCV)) | 557 | if (ack == (RC_ID | GIVR_IT_RCV)) |
| 558 | rc_receive(bp); | 558 | rc_receive(bp); |
| 559 | else if (ack == (RC_ID | GIVR_IT_REXC)) | 559 | else if (ack == (RC_ID | GIVR_IT_REXC)) |
| @@ -562,29 +562,23 @@ static irqreturn_t rc_interrupt(int dummy, void * dev_id) | |||
| 562 | printk(KERN_WARNING "rc%d: Bad receive ack " | 562 | printk(KERN_WARNING "rc%d: Bad receive ack " |
| 563 | "0x%02x.\n", | 563 | "0x%02x.\n", |
| 564 | board_No(bp), ack); | 564 | board_No(bp), ack); |
| 565 | |||
| 566 | } else if (status & RC_BSR_TINT) { | 565 | } else if (status & RC_BSR_TINT) { |
| 567 | ack = rc_in(bp, RC_ACK_TINT); | 566 | ack = rc_in(bp, RC_ACK_TINT); |
| 568 | |||
| 569 | if (ack == (RC_ID | GIVR_IT_TX)) | 567 | if (ack == (RC_ID | GIVR_IT_TX)) |
| 570 | rc_transmit(bp); | 568 | rc_transmit(bp); |
| 571 | else | 569 | else |
| 572 | printk(KERN_WARNING "rc%d: Bad transmit ack " | 570 | printk(KERN_WARNING "rc%d: Bad transmit ack " |
| 573 | "0x%02x.\n", | 571 | "0x%02x.\n", |
| 574 | board_No(bp), ack); | 572 | board_No(bp), ack); |
| 575 | |||
| 576 | } else /* if (status & RC_BSR_MINT) */ { | 573 | } else /* if (status & RC_BSR_MINT) */ { |
| 577 | ack = rc_in(bp, RC_ACK_MINT); | 574 | ack = rc_in(bp, RC_ACK_MINT); |
| 578 | 575 | if (ack == (RC_ID | GIVR_IT_MODEM)) | |
| 579 | if (ack == (RC_ID | GIVR_IT_MODEM)) | ||
| 580 | rc_check_modem(bp); | 576 | rc_check_modem(bp); |
| 581 | else | 577 | else |
| 582 | printk(KERN_WARNING "rc%d: Bad modem ack " | 578 | printk(KERN_WARNING "rc%d: Bad modem ack " |
| 583 | "0x%02x.\n", | 579 | "0x%02x.\n", |
| 584 | board_No(bp), ack); | 580 | board_No(bp), ack); |
| 585 | 581 | } | |
| 586 | } | ||
| 587 | |||
| 588 | rc_out(bp, CD180_EOIR, 0); /* Mark end of interrupt */ | 582 | rc_out(bp, CD180_EOIR, 0); /* Mark end of interrupt */ |
| 589 | rc_out(bp, RC_CTOUT, 0); /* Clear timeout flag */ | 583 | rc_out(bp, RC_CTOUT, 0); /* Clear timeout flag */ |
| 590 | } | 584 | } |
| @@ -596,24 +590,24 @@ static irqreturn_t rc_interrupt(int dummy, void * dev_id) | |||
| 596 | */ | 590 | */ |
| 597 | 591 | ||
| 598 | /* Called with disabled interrupts */ | 592 | /* Called with disabled interrupts */ |
| 599 | static int rc_setup_board(struct riscom_board * bp) | 593 | static int rc_setup_board(struct riscom_board *bp) |
| 600 | { | 594 | { |
| 601 | int error; | 595 | int error; |
| 602 | 596 | ||
| 603 | if (bp->flags & RC_BOARD_ACTIVE) | 597 | if (bp->flags & RC_BOARD_ACTIVE) |
| 604 | return 0; | 598 | return 0; |
| 605 | 599 | ||
| 606 | error = request_irq(bp->irq, rc_interrupt, IRQF_DISABLED, | 600 | error = request_irq(bp->irq, rc_interrupt, IRQF_DISABLED, |
| 607 | "RISCom/8", bp); | 601 | "RISCom/8", bp); |
| 608 | if (error) | 602 | if (error) |
| 609 | return error; | 603 | return error; |
| 610 | 604 | ||
| 611 | rc_out(bp, RC_CTOUT, 0); /* Just in case */ | 605 | rc_out(bp, RC_CTOUT, 0); /* Just in case */ |
| 612 | bp->DTR = ~0; | 606 | bp->DTR = ~0; |
| 613 | rc_out(bp, RC_DTR, bp->DTR); /* Drop DTR on all ports */ | 607 | rc_out(bp, RC_DTR, bp->DTR); /* Drop DTR on all ports */ |
| 614 | 608 | ||
| 615 | bp->flags |= RC_BOARD_ACTIVE; | 609 | bp->flags |= RC_BOARD_ACTIVE; |
| 616 | 610 | ||
| 617 | return 0; | 611 | return 0; |
| 618 | } | 612 | } |
| 619 | 613 | ||
| @@ -622,40 +616,40 @@ static void rc_shutdown_board(struct riscom_board *bp) | |||
| 622 | { | 616 | { |
| 623 | if (!(bp->flags & RC_BOARD_ACTIVE)) | 617 | if (!(bp->flags & RC_BOARD_ACTIVE)) |
| 624 | return; | 618 | return; |
| 625 | 619 | ||
| 626 | bp->flags &= ~RC_BOARD_ACTIVE; | 620 | bp->flags &= ~RC_BOARD_ACTIVE; |
| 627 | 621 | ||
| 628 | free_irq(bp->irq, NULL); | 622 | free_irq(bp->irq, NULL); |
| 629 | 623 | ||
| 630 | bp->DTR = ~0; | 624 | bp->DTR = ~0; |
| 631 | rc_out(bp, RC_DTR, bp->DTR); /* Drop DTR on all ports */ | 625 | rc_out(bp, RC_DTR, bp->DTR); /* Drop DTR on all ports */ |
| 632 | 626 | ||
| 633 | } | 627 | } |
| 634 | 628 | ||
| 635 | /* | 629 | /* |
| 636 | * Setting up port characteristics. | 630 | * Setting up port characteristics. |
| 637 | * Must be called with disabled interrupts | 631 | * Must be called with disabled interrupts |
| 638 | */ | 632 | */ |
| 639 | static void rc_change_speed(struct riscom_board *bp, struct riscom_port *port) | 633 | static void rc_change_speed(struct riscom_board *bp, struct riscom_port *port) |
| 640 | { | 634 | { |
| 641 | struct tty_struct *tty; | 635 | struct tty_struct *tty = port->tty; |
| 642 | unsigned long baud; | 636 | unsigned long baud; |
| 643 | long tmp; | 637 | long tmp; |
| 644 | unsigned char cor1 = 0, cor3 = 0; | 638 | unsigned char cor1 = 0, cor3 = 0; |
| 645 | unsigned char mcor1 = 0, mcor2 = 0; | 639 | unsigned char mcor1 = 0, mcor2 = 0; |
| 646 | 640 | ||
| 647 | if (!(tty = port->tty) || !tty->termios) | 641 | if (tty == NULL || tty->termios == NULL) |
| 648 | return; | 642 | return; |
| 649 | 643 | ||
| 650 | port->IER = 0; | 644 | port->IER = 0; |
| 651 | port->COR2 = 0; | 645 | port->COR2 = 0; |
| 652 | port->MSVR = MSVR_RTS; | 646 | port->MSVR = MSVR_RTS; |
| 653 | 647 | ||
| 654 | baud = tty_get_baud_rate(tty); | 648 | baud = tty_get_baud_rate(tty); |
| 655 | 649 | ||
| 656 | /* Select port on the board */ | 650 | /* Select port on the board */ |
| 657 | rc_out(bp, CD180_CAR, port_No(port)); | 651 | rc_out(bp, CD180_CAR, port_No(port)); |
| 658 | 652 | ||
| 659 | if (!baud) { | 653 | if (!baud) { |
| 660 | /* Drop DTR & exit */ | 654 | /* Drop DTR & exit */ |
| 661 | bp->DTR |= (1u << port_No(port)); | 655 | bp->DTR |= (1u << port_No(port)); |
| @@ -666,69 +660,68 @@ static void rc_change_speed(struct riscom_board *bp, struct riscom_port *port) | |||
| 666 | bp->DTR &= ~(1u << port_No(port)); | 660 | bp->DTR &= ~(1u << port_No(port)); |
| 667 | rc_out(bp, RC_DTR, bp->DTR); | 661 | rc_out(bp, RC_DTR, bp->DTR); |
| 668 | } | 662 | } |
| 669 | 663 | ||
| 670 | /* | 664 | /* |
| 671 | * Now we must calculate some speed depended things | 665 | * Now we must calculate some speed depended things |
| 672 | */ | 666 | */ |
| 673 | 667 | ||
| 674 | /* Set baud rate for port */ | 668 | /* Set baud rate for port */ |
| 675 | tmp = (((RC_OSCFREQ + baud/2) / baud + | 669 | tmp = (((RC_OSCFREQ + baud/2) / baud + |
| 676 | CD180_TPC/2) / CD180_TPC); | 670 | CD180_TPC/2) / CD180_TPC); |
| 677 | 671 | ||
| 678 | rc_out(bp, CD180_RBPRH, (tmp >> 8) & 0xff); | 672 | rc_out(bp, CD180_RBPRH, (tmp >> 8) & 0xff); |
| 679 | rc_out(bp, CD180_TBPRH, (tmp >> 8) & 0xff); | 673 | rc_out(bp, CD180_TBPRH, (tmp >> 8) & 0xff); |
| 680 | rc_out(bp, CD180_RBPRL, tmp & 0xff); | 674 | rc_out(bp, CD180_RBPRL, tmp & 0xff); |
| 681 | rc_out(bp, CD180_TBPRL, tmp & 0xff); | 675 | rc_out(bp, CD180_TBPRL, tmp & 0xff); |
| 682 | 676 | ||
| 683 | baud = (baud + 5) / 10; /* Estimated CPS */ | 677 | baud = (baud + 5) / 10; /* Estimated CPS */ |
| 684 | 678 | ||
| 685 | /* Two timer ticks seems enough to wakeup something like SLIP driver */ | 679 | /* Two timer ticks seems enough to wakeup something like SLIP driver */ |
| 686 | tmp = ((baud + HZ/2) / HZ) * 2 - CD180_NFIFO; | 680 | tmp = ((baud + HZ/2) / HZ) * 2 - CD180_NFIFO; |
| 687 | port->wakeup_chars = (tmp < 0) ? 0 : ((tmp >= SERIAL_XMIT_SIZE) ? | 681 | port->wakeup_chars = (tmp < 0) ? 0 : ((tmp >= SERIAL_XMIT_SIZE) ? |
| 688 | SERIAL_XMIT_SIZE - 1 : tmp); | 682 | SERIAL_XMIT_SIZE - 1 : tmp); |
| 689 | 683 | ||
| 690 | /* Receiver timeout will be transmission time for 1.5 chars */ | 684 | /* Receiver timeout will be transmission time for 1.5 chars */ |
| 691 | tmp = (RISCOM_TPS + RISCOM_TPS/2 + baud/2) / baud; | 685 | tmp = (RISCOM_TPS + RISCOM_TPS/2 + baud/2) / baud; |
| 692 | tmp = (tmp > 0xff) ? 0xff : tmp; | 686 | tmp = (tmp > 0xff) ? 0xff : tmp; |
| 693 | rc_out(bp, CD180_RTPR, tmp); | 687 | rc_out(bp, CD180_RTPR, tmp); |
| 694 | 688 | ||
| 695 | switch (C_CSIZE(tty)) { | 689 | switch (C_CSIZE(tty)) { |
| 696 | case CS5: | 690 | case CS5: |
| 697 | cor1 |= COR1_5BITS; | 691 | cor1 |= COR1_5BITS; |
| 698 | break; | 692 | break; |
| 699 | case CS6: | 693 | case CS6: |
| 700 | cor1 |= COR1_6BITS; | 694 | cor1 |= COR1_6BITS; |
| 701 | break; | 695 | break; |
| 702 | case CS7: | 696 | case CS7: |
| 703 | cor1 |= COR1_7BITS; | 697 | cor1 |= COR1_7BITS; |
| 704 | break; | 698 | break; |
| 705 | case CS8: | 699 | case CS8: |
| 706 | cor1 |= COR1_8BITS; | 700 | cor1 |= COR1_8BITS; |
| 707 | break; | 701 | break; |
| 708 | } | 702 | } |
| 709 | 703 | if (C_CSTOPB(tty)) | |
| 710 | if (C_CSTOPB(tty)) | ||
| 711 | cor1 |= COR1_2SB; | 704 | cor1 |= COR1_2SB; |
| 712 | 705 | ||
| 713 | cor1 |= COR1_IGNORE; | 706 | cor1 |= COR1_IGNORE; |
| 714 | if (C_PARENB(tty)) { | 707 | if (C_PARENB(tty)) { |
| 715 | cor1 |= COR1_NORMPAR; | 708 | cor1 |= COR1_NORMPAR; |
| 716 | if (C_PARODD(tty)) | 709 | if (C_PARODD(tty)) |
| 717 | cor1 |= COR1_ODDP; | 710 | cor1 |= COR1_ODDP; |
| 718 | if (I_INPCK(tty)) | 711 | if (I_INPCK(tty)) |
| 719 | cor1 &= ~COR1_IGNORE; | 712 | cor1 &= ~COR1_IGNORE; |
| 720 | } | 713 | } |
| 721 | /* Set marking of some errors */ | 714 | /* Set marking of some errors */ |
| 722 | port->mark_mask = RCSR_OE | RCSR_TOUT; | 715 | port->mark_mask = RCSR_OE | RCSR_TOUT; |
| 723 | if (I_INPCK(tty)) | 716 | if (I_INPCK(tty)) |
| 724 | port->mark_mask |= RCSR_FE | RCSR_PE; | 717 | port->mark_mask |= RCSR_FE | RCSR_PE; |
| 725 | if (I_BRKINT(tty) || I_PARMRK(tty)) | 718 | if (I_BRKINT(tty) || I_PARMRK(tty)) |
| 726 | port->mark_mask |= RCSR_BREAK; | 719 | port->mark_mask |= RCSR_BREAK; |
| 727 | if (I_IGNPAR(tty)) | 720 | if (I_IGNPAR(tty)) |
| 728 | port->mark_mask &= ~(RCSR_FE | RCSR_PE); | 721 | port->mark_mask &= ~(RCSR_FE | RCSR_PE); |
| 729 | if (I_IGNBRK(tty)) { | 722 | if (I_IGNBRK(tty)) { |
| 730 | port->mark_mask &= ~RCSR_BREAK; | 723 | port->mark_mask &= ~RCSR_BREAK; |
| 731 | if (I_IGNPAR(tty)) | 724 | if (I_IGNPAR(tty)) |
| 732 | /* Real raw mode. Ignore all */ | 725 | /* Real raw mode. Ignore all */ |
| 733 | port->mark_mask &= ~RCSR_OE; | 726 | port->mark_mask &= ~RCSR_OE; |
| 734 | } | 727 | } |
| @@ -738,7 +731,8 @@ static void rc_change_speed(struct riscom_board *bp, struct riscom_port *port) | |||
| 738 | port->IER |= IER_DSR | IER_CTS; | 731 | port->IER |= IER_DSR | IER_CTS; |
| 739 | mcor1 |= MCOR1_DSRZD | MCOR1_CTSZD; | 732 | mcor1 |= MCOR1_DSRZD | MCOR1_CTSZD; |
| 740 | mcor2 |= MCOR2_DSROD | MCOR2_CTSOD; | 733 | mcor2 |= MCOR2_DSROD | MCOR2_CTSOD; |
| 741 | tty->hw_stopped = !(rc_in(bp, CD180_MSVR) & (MSVR_CTS|MSVR_DSR)); | 734 | tty->hw_stopped = !(rc_in(bp, CD180_MSVR) & |
| 735 | (MSVR_CTS|MSVR_DSR)); | ||
| 742 | #else | 736 | #else |
| 743 | port->COR2 |= COR2_CTSAE; | 737 | port->COR2 |= COR2_CTSAE; |
| 744 | #endif | 738 | #endif |
| @@ -761,13 +755,13 @@ static void rc_change_speed(struct riscom_board *bp, struct riscom_port *port) | |||
| 761 | mcor1 |= MCOR1_CDZD; | 755 | mcor1 |= MCOR1_CDZD; |
| 762 | mcor2 |= MCOR2_CDOD; | 756 | mcor2 |= MCOR2_CDOD; |
| 763 | } | 757 | } |
| 764 | 758 | ||
| 765 | if (C_CREAD(tty)) | 759 | if (C_CREAD(tty)) |
| 766 | /* Enable receiver */ | 760 | /* Enable receiver */ |
| 767 | port->IER |= IER_RXD; | 761 | port->IER |= IER_RXD; |
| 768 | 762 | ||
| 769 | /* Set input FIFO size (1-8 bytes) */ | 763 | /* Set input FIFO size (1-8 bytes) */ |
| 770 | cor3 |= RISCOM_RXFIFO; | 764 | cor3 |= RISCOM_RXFIFO; |
| 771 | /* Setting up CD180 channel registers */ | 765 | /* Setting up CD180 channel registers */ |
| 772 | rc_out(bp, CD180_COR1, cor1); | 766 | rc_out(bp, CD180_COR1, cor1); |
| 773 | rc_out(bp, CD180_COR2, port->COR2); | 767 | rc_out(bp, CD180_COR2, port->COR2); |
| @@ -791,36 +785,30 @@ static void rc_change_speed(struct riscom_board *bp, struct riscom_port *port) | |||
| 791 | static int rc_setup_port(struct riscom_board *bp, struct riscom_port *port) | 785 | static int rc_setup_port(struct riscom_board *bp, struct riscom_port *port) |
| 792 | { | 786 | { |
| 793 | unsigned long flags; | 787 | unsigned long flags; |
| 794 | 788 | ||
| 795 | if (port->flags & ASYNC_INITIALIZED) | 789 | if (port->flags & ASYNC_INITIALIZED) |
| 796 | return 0; | 790 | return 0; |
| 797 | 791 | ||
| 798 | if (!port->xmit_buf) { | 792 | if (!port->xmit_buf) { |
| 799 | /* We may sleep in get_zeroed_page() */ | 793 | /* We may sleep in get_zeroed_page() */ |
| 800 | unsigned long tmp; | 794 | unsigned long tmp = get_zeroed_page(GFP_KERNEL); |
| 801 | 795 | if (tmp == 0) | |
| 802 | if (!(tmp = get_zeroed_page(GFP_KERNEL))) | ||
| 803 | return -ENOMEM; | 796 | return -ENOMEM; |
| 804 | 797 | if (port->xmit_buf) | |
| 805 | if (port->xmit_buf) { | ||
| 806 | free_page(tmp); | 798 | free_page(tmp); |
| 807 | return -ERESTARTSYS; | 799 | else |
| 808 | } | 800 | port->xmit_buf = (unsigned char *) tmp; |
| 809 | port->xmit_buf = (unsigned char *) tmp; | ||
| 810 | } | 801 | } |
| 811 | |||
| 812 | spin_lock_irqsave(&riscom_lock, flags); | 802 | spin_lock_irqsave(&riscom_lock, flags); |
| 813 | 803 | ||
| 814 | if (port->tty) | 804 | if (port->tty) |
| 815 | clear_bit(TTY_IO_ERROR, &port->tty->flags); | 805 | clear_bit(TTY_IO_ERROR, &port->tty->flags); |
| 816 | 806 | if (port->count == 1) | |
| 817 | if (port->count == 1) | ||
| 818 | bp->count++; | 807 | bp->count++; |
| 819 | |||
| 820 | port->xmit_cnt = port->xmit_head = port->xmit_tail = 0; | 808 | port->xmit_cnt = port->xmit_head = port->xmit_tail = 0; |
| 821 | rc_change_speed(bp, port); | 809 | rc_change_speed(bp, port); |
| 822 | port->flags |= ASYNC_INITIALIZED; | 810 | port->flags |= ASYNC_INITIALIZED; |
| 823 | 811 | ||
| 824 | spin_unlock_irqrestore(&riscom_lock, flags); | 812 | spin_unlock_irqrestore(&riscom_lock, flags); |
| 825 | return 0; | 813 | return 0; |
| 826 | } | 814 | } |
| @@ -829,38 +817,39 @@ static int rc_setup_port(struct riscom_board *bp, struct riscom_port *port) | |||
| 829 | static void rc_shutdown_port(struct riscom_board *bp, struct riscom_port *port) | 817 | static void rc_shutdown_port(struct riscom_board *bp, struct riscom_port *port) |
| 830 | { | 818 | { |
| 831 | struct tty_struct *tty; | 819 | struct tty_struct *tty; |
| 832 | 820 | ||
| 833 | if (!(port->flags & ASYNC_INITIALIZED)) | 821 | if (!(port->flags & ASYNC_INITIALIZED)) |
| 834 | return; | 822 | return; |
| 835 | 823 | ||
| 836 | #ifdef RC_REPORT_OVERRUN | 824 | #ifdef RC_REPORT_OVERRUN |
| 837 | printk(KERN_INFO "rc%d: port %d: Total %ld overruns were detected.\n", | 825 | printk(KERN_INFO "rc%d: port %d: Total %ld overruns were detected.\n", |
| 838 | board_No(bp), port_No(port), port->overrun); | 826 | board_No(bp), port_No(port), port->overrun); |
| 839 | #endif | 827 | #endif |
| 840 | #ifdef RC_REPORT_FIFO | 828 | #ifdef RC_REPORT_FIFO |
| 841 | { | 829 | { |
| 842 | int i; | 830 | int i; |
| 843 | 831 | ||
| 844 | printk(KERN_INFO "rc%d: port %d: FIFO hits [ ", | 832 | printk(KERN_INFO "rc%d: port %d: FIFO hits [ ", |
| 845 | board_No(bp), port_No(port)); | 833 | board_No(bp), port_No(port)); |
| 846 | for (i = 0; i < 10; i++) { | 834 | for (i = 0; i < 10; i++) |
| 847 | printk("%ld ", port->hits[i]); | 835 | printk("%ld ", port->hits[i]); |
| 848 | } | ||
| 849 | printk("].\n"); | 836 | printk("].\n"); |
| 850 | } | 837 | } |
| 851 | #endif | 838 | #endif |
| 852 | if (port->xmit_buf) { | 839 | if (port->xmit_buf) { |
| 853 | free_page((unsigned long) port->xmit_buf); | 840 | free_page((unsigned long) port->xmit_buf); |
| 854 | port->xmit_buf = NULL; | 841 | port->xmit_buf = NULL; |
| 855 | } | 842 | } |
| 856 | 843 | ||
| 857 | if (!(tty = port->tty) || C_HUPCL(tty)) { | 844 | tty = port->tty; |
| 845 | |||
| 846 | if (tty == NULL || C_HUPCL(tty)) { | ||
| 858 | /* Drop DTR */ | 847 | /* Drop DTR */ |
| 859 | bp->DTR |= (1u << port_No(port)); | 848 | bp->DTR |= (1u << port_No(port)); |
| 860 | rc_out(bp, RC_DTR, bp->DTR); | 849 | rc_out(bp, RC_DTR, bp->DTR); |
| 861 | } | 850 | } |
| 862 | 851 | ||
| 863 | /* Select port */ | 852 | /* Select port */ |
| 864 | rc_out(bp, CD180_CAR, port_No(port)); | 853 | rc_out(bp, CD180_CAR, port_No(port)); |
| 865 | /* Reset port */ | 854 | /* Reset port */ |
| 866 | rc_wait_CCR(bp); | 855 | rc_wait_CCR(bp); |
| @@ -868,28 +857,26 @@ static void rc_shutdown_port(struct riscom_board *bp, struct riscom_port *port) | |||
| 868 | /* Disable all interrupts from this port */ | 857 | /* Disable all interrupts from this port */ |
| 869 | port->IER = 0; | 858 | port->IER = 0; |
| 870 | rc_out(bp, CD180_IER, port->IER); | 859 | rc_out(bp, CD180_IER, port->IER); |
| 871 | 860 | ||
| 872 | if (tty) | 861 | if (tty) |
| 873 | set_bit(TTY_IO_ERROR, &tty->flags); | 862 | set_bit(TTY_IO_ERROR, &tty->flags); |
| 874 | port->flags &= ~ASYNC_INITIALIZED; | 863 | port->flags &= ~ASYNC_INITIALIZED; |
| 875 | 864 | ||
| 876 | if (--bp->count < 0) { | 865 | if (--bp->count < 0) { |
| 877 | printk(KERN_INFO "rc%d: rc_shutdown_port: " | 866 | printk(KERN_INFO "rc%d: rc_shutdown_port: " |
| 878 | "bad board count: %d\n", | 867 | "bad board count: %d\n", |
| 879 | board_No(bp), bp->count); | 868 | board_No(bp), bp->count); |
| 880 | bp->count = 0; | 869 | bp->count = 0; |
| 881 | } | 870 | } |
| 882 | |||
| 883 | /* | 871 | /* |
| 884 | * If this is the last opened port on the board | 872 | * If this is the last opened port on the board |
| 885 | * shutdown whole board | 873 | * shutdown whole board |
| 886 | */ | 874 | */ |
| 887 | if (!bp->count) | 875 | if (!bp->count) |
| 888 | rc_shutdown_board(bp); | 876 | rc_shutdown_board(bp); |
| 889 | } | 877 | } |
| 890 | 878 | ||
| 891 | 879 | static int block_til_ready(struct tty_struct *tty, struct file *filp, | |
| 892 | static int block_til_ready(struct tty_struct *tty, struct file * filp, | ||
| 893 | struct riscom_port *port) | 880 | struct riscom_port *port) |
| 894 | { | 881 | { |
| 895 | DECLARE_WAITQUEUE(wait, current); | 882 | DECLARE_WAITQUEUE(wait, current); |
| @@ -921,7 +908,7 @@ static int block_til_ready(struct tty_struct *tty, struct file * filp, | |||
| 921 | return 0; | 908 | return 0; |
| 922 | } | 909 | } |
| 923 | 910 | ||
| 924 | if (C_CLOCAL(tty)) | 911 | if (C_CLOCAL(tty)) |
| 925 | do_clocal = 1; | 912 | do_clocal = 1; |
| 926 | 913 | ||
| 927 | /* | 914 | /* |
| @@ -959,7 +946,7 @@ static int block_til_ready(struct tty_struct *tty, struct file * filp, | |||
| 959 | if (port->flags & ASYNC_HUP_NOTIFY) | 946 | if (port->flags & ASYNC_HUP_NOTIFY) |
| 960 | retval = -EAGAIN; | 947 | retval = -EAGAIN; |
| 961 | else | 948 | else |
| 962 | retval = -ERESTARTSYS; | 949 | retval = -ERESTARTSYS; |
| 963 | break; | 950 | break; |
| 964 | } | 951 | } |
| 965 | if (!(port->flags & ASYNC_CLOSING) && | 952 | if (!(port->flags & ASYNC_CLOSING) && |
| @@ -978,50 +965,63 @@ static int block_til_ready(struct tty_struct *tty, struct file * filp, | |||
| 978 | port->blocked_open--; | 965 | port->blocked_open--; |
| 979 | if (retval) | 966 | if (retval) |
| 980 | return retval; | 967 | return retval; |
| 981 | 968 | ||
| 982 | port->flags |= ASYNC_NORMAL_ACTIVE; | 969 | port->flags |= ASYNC_NORMAL_ACTIVE; |
| 983 | return 0; | 970 | return 0; |
| 984 | } | 971 | } |
| 985 | 972 | ||
| 986 | static int rc_open(struct tty_struct * tty, struct file * filp) | 973 | static int rc_open(struct tty_struct *tty, struct file *filp) |
| 987 | { | 974 | { |
| 988 | int board; | 975 | int board; |
| 989 | int error; | 976 | int error; |
| 990 | struct riscom_port * port; | 977 | struct riscom_port *port; |
| 991 | struct riscom_board * bp; | 978 | struct riscom_board *bp; |
| 992 | 979 | ||
| 993 | board = RC_BOARD(tty->index); | 980 | board = RC_BOARD(tty->index); |
| 994 | if (board >= RC_NBOARD || !(rc_board[board].flags & RC_BOARD_PRESENT)) | 981 | if (board >= RC_NBOARD || !(rc_board[board].flags & RC_BOARD_PRESENT)) |
| 995 | return -ENODEV; | 982 | return -ENODEV; |
| 996 | 983 | ||
| 997 | bp = &rc_board[board]; | 984 | bp = &rc_board[board]; |
| 998 | port = rc_port + board * RC_NPORT + RC_PORT(tty->index); | 985 | port = rc_port + board * RC_NPORT + RC_PORT(tty->index); |
| 999 | if (rc_paranoia_check(port, tty->name, "rc_open")) | 986 | if (rc_paranoia_check(port, tty->name, "rc_open")) |
| 1000 | return -ENODEV; | 987 | return -ENODEV; |
| 1001 | 988 | ||
| 1002 | if ((error = rc_setup_board(bp))) | 989 | error = rc_setup_board(bp); |
| 990 | if (error) | ||
| 1003 | return error; | 991 | return error; |
| 1004 | 992 | ||
| 1005 | port->count++; | 993 | port->count++; |
| 1006 | tty->driver_data = port; | 994 | tty->driver_data = port; |
| 1007 | port->tty = tty; | 995 | port->tty = tty; |
| 1008 | 996 | ||
| 1009 | if ((error = rc_setup_port(bp, port))) | 997 | error = rc_setup_port(bp, port); |
| 1010 | return error; | 998 | if (error == 0) |
| 1011 | 999 | error = block_til_ready(tty, filp, port); | |
| 1012 | if ((error = block_til_ready(tty, filp, port))) | 1000 | return error; |
| 1013 | return error; | ||
| 1014 | |||
| 1015 | return 0; | ||
| 1016 | } | 1001 | } |
| 1017 | 1002 | ||
| 1018 | static void rc_close(struct tty_struct * tty, struct file * filp) | 1003 | static void rc_flush_buffer(struct tty_struct *tty) |
| 1004 | { | ||
| 1005 | struct riscom_port *port = (struct riscom_port *)tty->driver_data; | ||
| 1006 | unsigned long flags; | ||
| 1007 | |||
| 1008 | if (rc_paranoia_check(port, tty->name, "rc_flush_buffer")) | ||
| 1009 | return; | ||
| 1010 | |||
| 1011 | spin_lock_irqsave(&riscom_lock, flags); | ||
| 1012 | port->xmit_cnt = port->xmit_head = port->xmit_tail = 0; | ||
| 1013 | spin_unlock_irqrestore(&riscom_lock, flags); | ||
| 1014 | |||
| 1015 | tty_wakeup(tty); | ||
| 1016 | } | ||
| 1017 | |||
| 1018 | static void rc_close(struct tty_struct *tty, struct file *filp) | ||
| 1019 | { | 1019 | { |
| 1020 | struct riscom_port *port = (struct riscom_port *) tty->driver_data; | 1020 | struct riscom_port *port = (struct riscom_port *) tty->driver_data; |
| 1021 | struct riscom_board *bp; | 1021 | struct riscom_board *bp; |
| 1022 | unsigned long flags; | 1022 | unsigned long flags; |
| 1023 | unsigned long timeout; | 1023 | unsigned long timeout; |
| 1024 | 1024 | ||
| 1025 | if (!port || rc_paranoia_check(port, tty->name, "close")) | 1025 | if (!port || rc_paranoia_check(port, tty->name, "close")) |
| 1026 | return; | 1026 | return; |
| 1027 | 1027 | ||
| @@ -1029,7 +1029,7 @@ static void rc_close(struct tty_struct * tty, struct file * filp) | |||
| 1029 | 1029 | ||
| 1030 | if (tty_hung_up_p(filp)) | 1030 | if (tty_hung_up_p(filp)) |
| 1031 | goto out; | 1031 | goto out; |
| 1032 | 1032 | ||
| 1033 | bp = port_Board(port); | 1033 | bp = port_Board(port); |
| 1034 | if ((tty->count == 1) && (port->count != 1)) { | 1034 | if ((tty->count == 1) && (port->count != 1)) { |
| 1035 | printk(KERN_INFO "rc%d: rc_close: bad port count;" | 1035 | printk(KERN_INFO "rc%d: rc_close: bad port count;" |
| @@ -1047,7 +1047,7 @@ static void rc_close(struct tty_struct * tty, struct file * filp) | |||
| 1047 | goto out; | 1047 | goto out; |
| 1048 | port->flags |= ASYNC_CLOSING; | 1048 | port->flags |= ASYNC_CLOSING; |
| 1049 | /* | 1049 | /* |
| 1050 | * Now we wait for the transmit buffer to clear; and we notify | 1050 | * Now we wait for the transmit buffer to clear; and we notify |
| 1051 | * the line discipline to only process XON/XOFF characters. | 1051 | * the line discipline to only process XON/XOFF characters. |
| 1052 | */ | 1052 | */ |
| 1053 | tty->closing = 1; | 1053 | tty->closing = 1; |
| @@ -1070,24 +1070,22 @@ static void rc_close(struct tty_struct * tty, struct file * filp) | |||
| 1070 | * has completely drained; this is especially | 1070 | * has completely drained; this is especially |
| 1071 | * important if there is a transmit FIFO! | 1071 | * important if there is a transmit FIFO! |
| 1072 | */ | 1072 | */ |
| 1073 | timeout = jiffies+HZ; | 1073 | timeout = jiffies + HZ; |
| 1074 | while(port->IER & IER_TXEMPTY) { | 1074 | while (port->IER & IER_TXEMPTY) { |
| 1075 | msleep_interruptible(jiffies_to_msecs(port->timeout)); | 1075 | msleep_interruptible(jiffies_to_msecs(port->timeout)); |
| 1076 | if (time_after(jiffies, timeout)) | 1076 | if (time_after(jiffies, timeout)) |
| 1077 | break; | 1077 | break; |
| 1078 | } | 1078 | } |
| 1079 | } | 1079 | } |
| 1080 | rc_shutdown_port(bp, port); | 1080 | rc_shutdown_port(bp, port); |
| 1081 | if (tty->driver->flush_buffer) | 1081 | rc_flush_buffer(tty); |
| 1082 | tty->driver->flush_buffer(tty); | ||
| 1083 | tty_ldisc_flush(tty); | 1082 | tty_ldisc_flush(tty); |
| 1084 | 1083 | ||
| 1085 | tty->closing = 0; | 1084 | tty->closing = 0; |
| 1086 | port->tty = NULL; | 1085 | port->tty = NULL; |
| 1087 | if (port->blocked_open) { | 1086 | if (port->blocked_open) { |
| 1088 | if (port->close_delay) { | 1087 | if (port->close_delay) |
| 1089 | msleep_interruptible(jiffies_to_msecs(port->close_delay)); | 1088 | msleep_interruptible(jiffies_to_msecs(port->close_delay)); |
| 1090 | } | ||
| 1091 | wake_up_interruptible(&port->open_wait); | 1089 | wake_up_interruptible(&port->open_wait); |
| 1092 | } | 1090 | } |
| 1093 | port->flags &= ~(ASYNC_NORMAL_ACTIVE|ASYNC_CLOSING); | 1091 | port->flags &= ~(ASYNC_NORMAL_ACTIVE|ASYNC_CLOSING); |
| @@ -1097,17 +1095,17 @@ out: | |||
| 1097 | spin_unlock_irqrestore(&riscom_lock, flags); | 1095 | spin_unlock_irqrestore(&riscom_lock, flags); |
| 1098 | } | 1096 | } |
| 1099 | 1097 | ||
| 1100 | static int rc_write(struct tty_struct * tty, | 1098 | static int rc_write(struct tty_struct *tty, |
| 1101 | const unsigned char *buf, int count) | 1099 | const unsigned char *buf, int count) |
| 1102 | { | 1100 | { |
| 1103 | struct riscom_port *port = (struct riscom_port *)tty->driver_data; | 1101 | struct riscom_port *port = (struct riscom_port *)tty->driver_data; |
| 1104 | struct riscom_board *bp; | 1102 | struct riscom_board *bp; |
| 1105 | int c, total = 0; | 1103 | int c, total = 0; |
| 1106 | unsigned long flags; | 1104 | unsigned long flags; |
| 1107 | 1105 | ||
| 1108 | if (rc_paranoia_check(port, tty->name, "rc_write")) | 1106 | if (rc_paranoia_check(port, tty->name, "rc_write")) |
| 1109 | return 0; | 1107 | return 0; |
| 1110 | 1108 | ||
| 1111 | bp = port_Board(port); | 1109 | bp = port_Board(port); |
| 1112 | 1110 | ||
| 1113 | if (!tty || !port->xmit_buf) | 1111 | if (!tty || !port->xmit_buf) |
| @@ -1144,38 +1142,41 @@ static int rc_write(struct tty_struct * tty, | |||
| 1144 | return total; | 1142 | return total; |
| 1145 | } | 1143 | } |
| 1146 | 1144 | ||
| 1147 | static void rc_put_char(struct tty_struct * tty, unsigned char ch) | 1145 | static int rc_put_char(struct tty_struct *tty, unsigned char ch) |
| 1148 | { | 1146 | { |
| 1149 | struct riscom_port *port = (struct riscom_port *)tty->driver_data; | 1147 | struct riscom_port *port = (struct riscom_port *)tty->driver_data; |
| 1150 | unsigned long flags; | 1148 | unsigned long flags; |
| 1149 | int ret = 0; | ||
| 1151 | 1150 | ||
| 1152 | if (rc_paranoia_check(port, tty->name, "rc_put_char")) | 1151 | if (rc_paranoia_check(port, tty->name, "rc_put_char")) |
| 1153 | return; | 1152 | return 0; |
| 1154 | 1153 | ||
| 1155 | if (!tty || !port->xmit_buf) | 1154 | if (!tty || !port->xmit_buf) |
| 1156 | return; | 1155 | return 0; |
| 1157 | 1156 | ||
| 1158 | spin_lock_irqsave(&riscom_lock, flags); | 1157 | spin_lock_irqsave(&riscom_lock, flags); |
| 1159 | 1158 | ||
| 1160 | if (port->xmit_cnt >= SERIAL_XMIT_SIZE - 1) | 1159 | if (port->xmit_cnt >= SERIAL_XMIT_SIZE - 1) |
| 1161 | goto out; | 1160 | goto out; |
| 1162 | 1161 | ||
| 1163 | port->xmit_buf[port->xmit_head++] = ch; | 1162 | port->xmit_buf[port->xmit_head++] = ch; |
| 1164 | port->xmit_head &= SERIAL_XMIT_SIZE - 1; | 1163 | port->xmit_head &= SERIAL_XMIT_SIZE - 1; |
| 1165 | port->xmit_cnt++; | 1164 | port->xmit_cnt++; |
| 1165 | ret = 1; | ||
| 1166 | 1166 | ||
| 1167 | out: | 1167 | out: |
| 1168 | spin_unlock_irqrestore(&riscom_lock, flags); | 1168 | spin_unlock_irqrestore(&riscom_lock, flags); |
| 1169 | return ret; | ||
| 1169 | } | 1170 | } |
| 1170 | 1171 | ||
| 1171 | static void rc_flush_chars(struct tty_struct * tty) | 1172 | static void rc_flush_chars(struct tty_struct *tty) |
| 1172 | { | 1173 | { |
| 1173 | struct riscom_port *port = (struct riscom_port *)tty->driver_data; | 1174 | struct riscom_port *port = (struct riscom_port *)tty->driver_data; |
| 1174 | unsigned long flags; | 1175 | unsigned long flags; |
| 1175 | 1176 | ||
| 1176 | if (rc_paranoia_check(port, tty->name, "rc_flush_chars")) | 1177 | if (rc_paranoia_check(port, tty->name, "rc_flush_chars")) |
| 1177 | return; | 1178 | return; |
| 1178 | 1179 | ||
| 1179 | if (port->xmit_cnt <= 0 || tty->stopped || tty->hw_stopped || | 1180 | if (port->xmit_cnt <= 0 || tty->stopped || tty->hw_stopped || |
| 1180 | !port->xmit_buf) | 1181 | !port->xmit_buf) |
| 1181 | return; | 1182 | return; |
| @@ -1189,11 +1190,11 @@ static void rc_flush_chars(struct tty_struct * tty) | |||
| 1189 | spin_unlock_irqrestore(&riscom_lock, flags); | 1190 | spin_unlock_irqrestore(&riscom_lock, flags); |
| 1190 | } | 1191 | } |
| 1191 | 1192 | ||
| 1192 | static int rc_write_room(struct tty_struct * tty) | 1193 | static int rc_write_room(struct tty_struct *tty) |
| 1193 | { | 1194 | { |
| 1194 | struct riscom_port *port = (struct riscom_port *)tty->driver_data; | 1195 | struct riscom_port *port = (struct riscom_port *)tty->driver_data; |
| 1195 | int ret; | 1196 | int ret; |
| 1196 | 1197 | ||
| 1197 | if (rc_paranoia_check(port, tty->name, "rc_write_room")) | 1198 | if (rc_paranoia_check(port, tty->name, "rc_write_room")) |
| 1198 | return 0; | 1199 | return 0; |
| 1199 | 1200 | ||
| @@ -1206,39 +1207,22 @@ static int rc_write_room(struct tty_struct * tty) | |||
| 1206 | static int rc_chars_in_buffer(struct tty_struct *tty) | 1207 | static int rc_chars_in_buffer(struct tty_struct *tty) |
| 1207 | { | 1208 | { |
| 1208 | struct riscom_port *port = (struct riscom_port *)tty->driver_data; | 1209 | struct riscom_port *port = (struct riscom_port *)tty->driver_data; |
| 1209 | 1210 | ||
| 1210 | if (rc_paranoia_check(port, tty->name, "rc_chars_in_buffer")) | 1211 | if (rc_paranoia_check(port, tty->name, "rc_chars_in_buffer")) |
| 1211 | return 0; | 1212 | return 0; |
| 1212 | |||
| 1213 | return port->xmit_cnt; | ||
| 1214 | } | ||
| 1215 | |||
| 1216 | static void rc_flush_buffer(struct tty_struct *tty) | ||
| 1217 | { | ||
| 1218 | struct riscom_port *port = (struct riscom_port *)tty->driver_data; | ||
| 1219 | unsigned long flags; | ||
| 1220 | |||
| 1221 | if (rc_paranoia_check(port, tty->name, "rc_flush_buffer")) | ||
| 1222 | return; | ||
| 1223 | |||
| 1224 | spin_lock_irqsave(&riscom_lock, flags); | ||
| 1225 | |||
| 1226 | port->xmit_cnt = port->xmit_head = port->xmit_tail = 0; | ||
| 1227 | 1213 | ||
| 1228 | spin_unlock_irqrestore(&riscom_lock, flags); | 1214 | return port->xmit_cnt; |
| 1229 | |||
| 1230 | tty_wakeup(tty); | ||
| 1231 | } | 1215 | } |
| 1232 | 1216 | ||
| 1233 | static int rc_tiocmget(struct tty_struct *tty, struct file *file) | 1217 | static int rc_tiocmget(struct tty_struct *tty, struct file *file) |
| 1234 | { | 1218 | { |
| 1235 | struct riscom_port *port = (struct riscom_port *)tty->driver_data; | 1219 | struct riscom_port *port = (struct riscom_port *)tty->driver_data; |
| 1236 | struct riscom_board * bp; | 1220 | struct riscom_board *bp; |
| 1237 | unsigned char status; | 1221 | unsigned char status; |
| 1238 | unsigned int result; | 1222 | unsigned int result; |
| 1239 | unsigned long flags; | 1223 | unsigned long flags; |
| 1240 | 1224 | ||
| 1241 | if (rc_paranoia_check(port, tty->name, __FUNCTION__)) | 1225 | if (rc_paranoia_check(port, tty->name, __func__)) |
| 1242 | return -ENODEV; | 1226 | return -ENODEV; |
| 1243 | 1227 | ||
| 1244 | bp = port_Board(port); | 1228 | bp = port_Board(port); |
| @@ -1266,7 +1250,7 @@ static int rc_tiocmset(struct tty_struct *tty, struct file *file, | |||
| 1266 | unsigned long flags; | 1250 | unsigned long flags; |
| 1267 | struct riscom_board *bp; | 1251 | struct riscom_board *bp; |
| 1268 | 1252 | ||
| 1269 | if (rc_paranoia_check(port, tty->name, __FUNCTION__)) | 1253 | if (rc_paranoia_check(port, tty->name, __func__)) |
| 1270 | return -ENODEV; | 1254 | return -ENODEV; |
| 1271 | 1255 | ||
| 1272 | bp = port_Board(port); | 1256 | bp = port_Board(port); |
| @@ -1292,11 +1276,11 @@ static int rc_tiocmset(struct tty_struct *tty, struct file *file, | |||
| 1292 | return 0; | 1276 | return 0; |
| 1293 | } | 1277 | } |
| 1294 | 1278 | ||
| 1295 | static inline void rc_send_break(struct riscom_port * port, unsigned long length) | 1279 | static void rc_send_break(struct riscom_port *port, unsigned long length) |
| 1296 | { | 1280 | { |
| 1297 | struct riscom_board *bp = port_Board(port); | 1281 | struct riscom_board *bp = port_Board(port); |
| 1298 | unsigned long flags; | 1282 | unsigned long flags; |
| 1299 | 1283 | ||
| 1300 | spin_lock_irqsave(&riscom_lock, flags); | 1284 | spin_lock_irqsave(&riscom_lock, flags); |
| 1301 | 1285 | ||
| 1302 | port->break_length = RISCOM_TPS / HZ * length; | 1286 | port->break_length = RISCOM_TPS / HZ * length; |
| @@ -1312,17 +1296,17 @@ static inline void rc_send_break(struct riscom_port * port, unsigned long length | |||
| 1312 | spin_unlock_irqrestore(&riscom_lock, flags); | 1296 | spin_unlock_irqrestore(&riscom_lock, flags); |
| 1313 | } | 1297 | } |
| 1314 | 1298 | ||
| 1315 | static inline int rc_set_serial_info(struct riscom_port * port, | 1299 | static int rc_set_serial_info(struct riscom_port *port, |
| 1316 | struct serial_struct __user * newinfo) | 1300 | struct serial_struct __user *newinfo) |
| 1317 | { | 1301 | { |
| 1318 | struct serial_struct tmp; | 1302 | struct serial_struct tmp; |
| 1319 | struct riscom_board *bp = port_Board(port); | 1303 | struct riscom_board *bp = port_Board(port); |
| 1320 | int change_speed; | 1304 | int change_speed; |
| 1321 | 1305 | ||
| 1322 | if (copy_from_user(&tmp, newinfo, sizeof(tmp))) | 1306 | if (copy_from_user(&tmp, newinfo, sizeof(tmp))) |
| 1323 | return -EFAULT; | 1307 | return -EFAULT; |
| 1324 | 1308 | ||
| 1325 | #if 0 | 1309 | #if 0 |
| 1326 | if ((tmp.irq != bp->irq) || | 1310 | if ((tmp.irq != bp->irq) || |
| 1327 | (tmp.port != bp->base) || | 1311 | (tmp.port != bp->base) || |
| 1328 | (tmp.type != PORT_CIRRUS) || | 1312 | (tmp.type != PORT_CIRRUS) || |
| @@ -1331,16 +1315,16 @@ static inline int rc_set_serial_info(struct riscom_port * port, | |||
| 1331 | (tmp.xmit_fifo_size != CD180_NFIFO) || | 1315 | (tmp.xmit_fifo_size != CD180_NFIFO) || |
| 1332 | (tmp.flags & ~RISCOM_LEGAL_FLAGS)) | 1316 | (tmp.flags & ~RISCOM_LEGAL_FLAGS)) |
| 1333 | return -EINVAL; | 1317 | return -EINVAL; |
| 1334 | #endif | 1318 | #endif |
| 1335 | 1319 | ||
| 1336 | change_speed = ((port->flags & ASYNC_SPD_MASK) != | 1320 | change_speed = ((port->flags & ASYNC_SPD_MASK) != |
| 1337 | (tmp.flags & ASYNC_SPD_MASK)); | 1321 | (tmp.flags & ASYNC_SPD_MASK)); |
| 1338 | 1322 | ||
| 1339 | if (!capable(CAP_SYS_ADMIN)) { | 1323 | if (!capable(CAP_SYS_ADMIN)) { |
| 1340 | if ((tmp.close_delay != port->close_delay) || | 1324 | if ((tmp.close_delay != port->close_delay) || |
| 1341 | (tmp.closing_wait != port->closing_wait) || | 1325 | (tmp.closing_wait != port->closing_wait) || |
| 1342 | ((tmp.flags & ~ASYNC_USR_MASK) != | 1326 | ((tmp.flags & ~ASYNC_USR_MASK) != |
| 1343 | (port->flags & ~ASYNC_USR_MASK))) | 1327 | (port->flags & ~ASYNC_USR_MASK))) |
| 1344 | return -EPERM; | 1328 | return -EPERM; |
| 1345 | port->flags = ((port->flags & ~ASYNC_USR_MASK) | | 1329 | port->flags = ((port->flags & ~ASYNC_USR_MASK) | |
| 1346 | (tmp.flags & ASYNC_USR_MASK)); | 1330 | (tmp.flags & ASYNC_USR_MASK)); |
| @@ -1360,12 +1344,12 @@ static inline int rc_set_serial_info(struct riscom_port * port, | |||
| 1360 | return 0; | 1344 | return 0; |
| 1361 | } | 1345 | } |
| 1362 | 1346 | ||
| 1363 | static inline int rc_get_serial_info(struct riscom_port * port, | 1347 | static int rc_get_serial_info(struct riscom_port *port, |
| 1364 | struct serial_struct __user *retinfo) | 1348 | struct serial_struct __user *retinfo) |
| 1365 | { | 1349 | { |
| 1366 | struct serial_struct tmp; | 1350 | struct serial_struct tmp; |
| 1367 | struct riscom_board *bp = port_Board(port); | 1351 | struct riscom_board *bp = port_Board(port); |
| 1368 | 1352 | ||
| 1369 | memset(&tmp, 0, sizeof(tmp)); | 1353 | memset(&tmp, 0, sizeof(tmp)); |
| 1370 | tmp.type = PORT_CIRRUS; | 1354 | tmp.type = PORT_CIRRUS; |
| 1371 | tmp.line = port - rc_port; | 1355 | tmp.line = port - rc_port; |
| @@ -1379,19 +1363,18 @@ static inline int rc_get_serial_info(struct riscom_port * port, | |||
| 1379 | return copy_to_user(retinfo, &tmp, sizeof(tmp)) ? -EFAULT : 0; | 1363 | return copy_to_user(retinfo, &tmp, sizeof(tmp)) ? -EFAULT : 0; |
| 1380 | } | 1364 | } |
| 1381 | 1365 | ||
| 1382 | static int rc_ioctl(struct tty_struct * tty, struct file * filp, | 1366 | static int rc_ioctl(struct tty_struct *tty, struct file *filp, |
| 1383 | unsigned int cmd, unsigned long arg) | 1367 | unsigned int cmd, unsigned long arg) |
| 1384 | |||
| 1385 | { | 1368 | { |
| 1386 | struct riscom_port *port = (struct riscom_port *)tty->driver_data; | 1369 | struct riscom_port *port = (struct riscom_port *)tty->driver_data; |
| 1387 | void __user *argp = (void __user *)arg; | 1370 | void __user *argp = (void __user *)arg; |
| 1388 | int retval; | 1371 | int retval = 0; |
| 1389 | 1372 | ||
| 1390 | if (rc_paranoia_check(port, tty->name, "rc_ioctl")) | 1373 | if (rc_paranoia_check(port, tty->name, "rc_ioctl")) |
| 1391 | return -ENODEV; | 1374 | return -ENODEV; |
| 1392 | 1375 | ||
| 1393 | switch (cmd) { | 1376 | switch (cmd) { |
| 1394 | case TCSBRK: /* SVID version: non-zero arg --> no break */ | 1377 | case TCSBRK: /* SVID version: non-zero arg --> no break */ |
| 1395 | retval = tty_check_change(tty); | 1378 | retval = tty_check_change(tty); |
| 1396 | if (retval) | 1379 | if (retval) |
| 1397 | return retval; | 1380 | return retval; |
| @@ -1399,45 +1382,40 @@ static int rc_ioctl(struct tty_struct * tty, struct file * filp, | |||
| 1399 | if (!arg) | 1382 | if (!arg) |
| 1400 | rc_send_break(port, HZ/4); /* 1/4 second */ | 1383 | rc_send_break(port, HZ/4); /* 1/4 second */ |
| 1401 | break; | 1384 | break; |
| 1402 | case TCSBRKP: /* support for POSIX tcsendbreak() */ | 1385 | case TCSBRKP: /* support for POSIX tcsendbreak() */ |
| 1403 | retval = tty_check_change(tty); | 1386 | retval = tty_check_change(tty); |
| 1404 | if (retval) | 1387 | if (retval) |
| 1405 | return retval; | 1388 | return retval; |
| 1406 | tty_wait_until_sent(tty, 0); | 1389 | tty_wait_until_sent(tty, 0); |
| 1407 | rc_send_break(port, arg ? arg*(HZ/10) : HZ/4); | 1390 | rc_send_break(port, arg ? arg*(HZ/10) : HZ/4); |
| 1408 | break; | 1391 | break; |
| 1409 | case TIOCGSOFTCAR: | 1392 | case TIOCGSERIAL: |
| 1410 | return put_user(C_CLOCAL(tty) ? 1 : 0, (unsigned __user *)argp); | 1393 | lock_kernel(); |
| 1411 | case TIOCSSOFTCAR: | 1394 | retval = rc_get_serial_info(port, argp); |
| 1412 | if (get_user(arg,(unsigned __user *) argp)) | 1395 | unlock_kernel(); |
| 1413 | return -EFAULT; | ||
| 1414 | tty->termios->c_cflag = | ||
| 1415 | ((tty->termios->c_cflag & ~CLOCAL) | | ||
| 1416 | (arg ? CLOCAL : 0)); | ||
| 1417 | break; | 1396 | break; |
| 1418 | case TIOCGSERIAL: | 1397 | case TIOCSSERIAL: |
| 1419 | return rc_get_serial_info(port, argp); | 1398 | lock_kernel(); |
| 1420 | case TIOCSSERIAL: | 1399 | retval = rc_set_serial_info(port, argp); |
| 1421 | return rc_set_serial_info(port, argp); | 1400 | unlock_kernel(); |
| 1422 | default: | 1401 | break; |
| 1423 | return -ENOIOCTLCMD; | 1402 | default: |
| 1403 | retval = -ENOIOCTLCMD; | ||
| 1424 | } | 1404 | } |
| 1425 | return 0; | 1405 | return retval; |
| 1426 | } | 1406 | } |
| 1427 | 1407 | ||
| 1428 | static void rc_throttle(struct tty_struct * tty) | 1408 | static void rc_throttle(struct tty_struct *tty) |
| 1429 | { | 1409 | { |
| 1430 | struct riscom_port *port = (struct riscom_port *)tty->driver_data; | 1410 | struct riscom_port *port = (struct riscom_port *)tty->driver_data; |
| 1431 | struct riscom_board *bp; | 1411 | struct riscom_board *bp; |
| 1432 | unsigned long flags; | 1412 | unsigned long flags; |
| 1433 | 1413 | ||
| 1434 | if (rc_paranoia_check(port, tty->name, "rc_throttle")) | 1414 | if (rc_paranoia_check(port, tty->name, "rc_throttle")) |
| 1435 | return; | 1415 | return; |
| 1436 | |||
| 1437 | bp = port_Board(port); | 1416 | bp = port_Board(port); |
| 1438 | 1417 | ||
| 1439 | spin_lock_irqsave(&riscom_lock, flags); | 1418 | spin_lock_irqsave(&riscom_lock, flags); |
| 1440 | |||
| 1441 | port->MSVR &= ~MSVR_RTS; | 1419 | port->MSVR &= ~MSVR_RTS; |
| 1442 | rc_out(bp, CD180_CAR, port_No(port)); | 1420 | rc_out(bp, CD180_CAR, port_No(port)); |
| 1443 | if (I_IXOFF(tty)) { | 1421 | if (I_IXOFF(tty)) { |
| @@ -1446,23 +1424,20 @@ static void rc_throttle(struct tty_struct * tty) | |||
| 1446 | rc_wait_CCR(bp); | 1424 | rc_wait_CCR(bp); |
| 1447 | } | 1425 | } |
| 1448 | rc_out(bp, CD180_MSVR, port->MSVR); | 1426 | rc_out(bp, CD180_MSVR, port->MSVR); |
| 1449 | |||
| 1450 | spin_unlock_irqrestore(&riscom_lock, flags); | 1427 | spin_unlock_irqrestore(&riscom_lock, flags); |
| 1451 | } | 1428 | } |
| 1452 | 1429 | ||
| 1453 | static void rc_unthrottle(struct tty_struct * tty) | 1430 | static void rc_unthrottle(struct tty_struct *tty) |
| 1454 | { | 1431 | { |
| 1455 | struct riscom_port *port = (struct riscom_port *)tty->driver_data; | 1432 | struct riscom_port *port = (struct riscom_port *)tty->driver_data; |
| 1456 | struct riscom_board *bp; | 1433 | struct riscom_board *bp; |
| 1457 | unsigned long flags; | 1434 | unsigned long flags; |
| 1458 | 1435 | ||
| 1459 | if (rc_paranoia_check(port, tty->name, "rc_unthrottle")) | 1436 | if (rc_paranoia_check(port, tty->name, "rc_unthrottle")) |
| 1460 | return; | 1437 | return; |
| 1461 | |||
| 1462 | bp = port_Board(port); | 1438 | bp = port_Board(port); |
| 1463 | |||
| 1464 | spin_lock_irqsave(&riscom_lock, flags); | ||
| 1465 | 1439 | ||
| 1440 | spin_lock_irqsave(&riscom_lock, flags); | ||
| 1466 | port->MSVR |= MSVR_RTS; | 1441 | port->MSVR |= MSVR_RTS; |
| 1467 | rc_out(bp, CD180_CAR, port_No(port)); | 1442 | rc_out(bp, CD180_CAR, port_No(port)); |
| 1468 | if (I_IXOFF(tty)) { | 1443 | if (I_IXOFF(tty)) { |
| @@ -1471,62 +1446,58 @@ static void rc_unthrottle(struct tty_struct * tty) | |||
| 1471 | rc_wait_CCR(bp); | 1446 | rc_wait_CCR(bp); |
| 1472 | } | 1447 | } |
| 1473 | rc_out(bp, CD180_MSVR, port->MSVR); | 1448 | rc_out(bp, CD180_MSVR, port->MSVR); |
| 1474 | |||
| 1475 | spin_unlock_irqrestore(&riscom_lock, flags); | 1449 | spin_unlock_irqrestore(&riscom_lock, flags); |
| 1476 | } | 1450 | } |
| 1477 | 1451 | ||
| 1478 | static void rc_stop(struct tty_struct * tty) | 1452 | static void rc_stop(struct tty_struct *tty) |
| 1479 | { | 1453 | { |
| 1480 | struct riscom_port *port = (struct riscom_port *)tty->driver_data; | 1454 | struct riscom_port *port = (struct riscom_port *)tty->driver_data; |
| 1481 | struct riscom_board *bp; | 1455 | struct riscom_board *bp; |
| 1482 | unsigned long flags; | 1456 | unsigned long flags; |
| 1483 | 1457 | ||
| 1484 | if (rc_paranoia_check(port, tty->name, "rc_stop")) | 1458 | if (rc_paranoia_check(port, tty->name, "rc_stop")) |
| 1485 | return; | 1459 | return; |
| 1486 | 1460 | ||
| 1487 | bp = port_Board(port); | 1461 | bp = port_Board(port); |
| 1488 | |||
| 1489 | spin_lock_irqsave(&riscom_lock, flags); | ||
| 1490 | 1462 | ||
| 1463 | spin_lock_irqsave(&riscom_lock, flags); | ||
| 1491 | port->IER &= ~IER_TXRDY; | 1464 | port->IER &= ~IER_TXRDY; |
| 1492 | rc_out(bp, CD180_CAR, port_No(port)); | 1465 | rc_out(bp, CD180_CAR, port_No(port)); |
| 1493 | rc_out(bp, CD180_IER, port->IER); | 1466 | rc_out(bp, CD180_IER, port->IER); |
| 1494 | |||
| 1495 | spin_unlock_irqrestore(&riscom_lock, flags); | 1467 | spin_unlock_irqrestore(&riscom_lock, flags); |
| 1496 | } | 1468 | } |
| 1497 | 1469 | ||
| 1498 | static void rc_start(struct tty_struct * tty) | 1470 | static void rc_start(struct tty_struct *tty) |
| 1499 | { | 1471 | { |
| 1500 | struct riscom_port *port = (struct riscom_port *)tty->driver_data; | 1472 | struct riscom_port *port = (struct riscom_port *)tty->driver_data; |
| 1501 | struct riscom_board *bp; | 1473 | struct riscom_board *bp; |
| 1502 | unsigned long flags; | 1474 | unsigned long flags; |
| 1503 | 1475 | ||
| 1504 | if (rc_paranoia_check(port, tty->name, "rc_start")) | 1476 | if (rc_paranoia_check(port, tty->name, "rc_start")) |
| 1505 | return; | 1477 | return; |
| 1506 | 1478 | ||
| 1507 | bp = port_Board(port); | 1479 | bp = port_Board(port); |
| 1508 | 1480 | ||
| 1509 | spin_lock_irqsave(&riscom_lock, flags); | 1481 | spin_lock_irqsave(&riscom_lock, flags); |
| 1510 | 1482 | ||
| 1511 | if (port->xmit_cnt && port->xmit_buf && !(port->IER & IER_TXRDY)) { | 1483 | if (port->xmit_cnt && port->xmit_buf && !(port->IER & IER_TXRDY)) { |
| 1512 | port->IER |= IER_TXRDY; | 1484 | port->IER |= IER_TXRDY; |
| 1513 | rc_out(bp, CD180_CAR, port_No(port)); | 1485 | rc_out(bp, CD180_CAR, port_No(port)); |
| 1514 | rc_out(bp, CD180_IER, port->IER); | 1486 | rc_out(bp, CD180_IER, port->IER); |
| 1515 | } | 1487 | } |
| 1516 | |||
| 1517 | spin_unlock_irqrestore(&riscom_lock, flags); | 1488 | spin_unlock_irqrestore(&riscom_lock, flags); |
| 1518 | } | 1489 | } |
| 1519 | 1490 | ||
| 1520 | static void rc_hangup(struct tty_struct * tty) | 1491 | static void rc_hangup(struct tty_struct *tty) |
| 1521 | { | 1492 | { |
| 1522 | struct riscom_port *port = (struct riscom_port *)tty->driver_data; | 1493 | struct riscom_port *port = (struct riscom_port *)tty->driver_data; |
| 1523 | struct riscom_board *bp; | 1494 | struct riscom_board *bp; |
| 1524 | 1495 | ||
| 1525 | if (rc_paranoia_check(port, tty->name, "rc_hangup")) | 1496 | if (rc_paranoia_check(port, tty->name, "rc_hangup")) |
| 1526 | return; | 1497 | return; |
| 1527 | 1498 | ||
| 1528 | bp = port_Board(port); | 1499 | bp = port_Board(port); |
| 1529 | 1500 | ||
| 1530 | rc_shutdown_port(bp, port); | 1501 | rc_shutdown_port(bp, port); |
| 1531 | port->count = 0; | 1502 | port->count = 0; |
| 1532 | port->flags &= ~ASYNC_NORMAL_ACTIVE; | 1503 | port->flags &= ~ASYNC_NORMAL_ACTIVE; |
| @@ -1534,17 +1505,14 @@ static void rc_hangup(struct tty_struct * tty) | |||
| 1534 | wake_up_interruptible(&port->open_wait); | 1505 | wake_up_interruptible(&port->open_wait); |
| 1535 | } | 1506 | } |
| 1536 | 1507 | ||
| 1537 | static void rc_set_termios(struct tty_struct * tty, struct ktermios * old_termios) | 1508 | static void rc_set_termios(struct tty_struct *tty, |
| 1509 | struct ktermios *old_termios) | ||
| 1538 | { | 1510 | { |
| 1539 | struct riscom_port *port = (struct riscom_port *)tty->driver_data; | 1511 | struct riscom_port *port = (struct riscom_port *)tty->driver_data; |
| 1540 | unsigned long flags; | 1512 | unsigned long flags; |
| 1541 | 1513 | ||
| 1542 | if (rc_paranoia_check(port, tty->name, "rc_set_termios")) | 1514 | if (rc_paranoia_check(port, tty->name, "rc_set_termios")) |
| 1543 | return; | 1515 | return; |
| 1544 | |||
| 1545 | if (tty->termios->c_cflag == old_termios->c_cflag && | ||
| 1546 | tty->termios->c_iflag == old_termios->c_iflag) | ||
| 1547 | return; | ||
| 1548 | 1516 | ||
| 1549 | spin_lock_irqsave(&riscom_lock, flags); | 1517 | spin_lock_irqsave(&riscom_lock, flags); |
| 1550 | rc_change_speed(port_Board(port), port); | 1518 | rc_change_speed(port_Board(port), port); |
| @@ -1583,9 +1551,9 @@ static int __init rc_init_drivers(void) | |||
| 1583 | int i; | 1551 | int i; |
| 1584 | 1552 | ||
| 1585 | riscom_driver = alloc_tty_driver(RC_NBOARD * RC_NPORT); | 1553 | riscom_driver = alloc_tty_driver(RC_NBOARD * RC_NPORT); |
| 1586 | if (!riscom_driver) | 1554 | if (!riscom_driver) |
| 1587 | return -ENOMEM; | 1555 | return -ENOMEM; |
| 1588 | 1556 | ||
| 1589 | riscom_driver->owner = THIS_MODULE; | 1557 | riscom_driver->owner = THIS_MODULE; |
| 1590 | riscom_driver->name = "ttyL"; | 1558 | riscom_driver->name = "ttyL"; |
| 1591 | riscom_driver->major = RISCOM8_NORMAL_MAJOR; | 1559 | riscom_driver->major = RISCOM8_NORMAL_MAJOR; |
| @@ -1598,23 +1566,21 @@ static int __init rc_init_drivers(void) | |||
| 1598 | riscom_driver->init_termios.c_ospeed = 9600; | 1566 | riscom_driver->init_termios.c_ospeed = 9600; |
| 1599 | riscom_driver->flags = TTY_DRIVER_REAL_RAW; | 1567 | riscom_driver->flags = TTY_DRIVER_REAL_RAW; |
| 1600 | tty_set_operations(riscom_driver, &riscom_ops); | 1568 | tty_set_operations(riscom_driver, &riscom_ops); |
| 1601 | if ((error = tty_register_driver(riscom_driver))) { | 1569 | error = tty_register_driver(riscom_driver); |
| 1570 | if (error != 0) { | ||
| 1602 | put_tty_driver(riscom_driver); | 1571 | put_tty_driver(riscom_driver); |
| 1603 | printk(KERN_ERR "rc: Couldn't register RISCom/8 driver, " | 1572 | printk(KERN_ERR "rc: Couldn't register RISCom/8 driver, " |
| 1604 | "error = %d\n", | 1573 | "error = %d\n", error); |
| 1605 | error); | ||
| 1606 | return 1; | 1574 | return 1; |
| 1607 | } | 1575 | } |
| 1608 | |||
| 1609 | memset(rc_port, 0, sizeof(rc_port)); | 1576 | memset(rc_port, 0, sizeof(rc_port)); |
| 1610 | for (i = 0; i < RC_NPORT * RC_NBOARD; i++) { | 1577 | for (i = 0; i < RC_NPORT * RC_NBOARD; i++) { |
| 1611 | rc_port[i].magic = RISCOM8_MAGIC; | 1578 | rc_port[i].magic = RISCOM8_MAGIC; |
| 1612 | rc_port[i].close_delay = 50 * HZ/100; | 1579 | rc_port[i].close_delay = 50 * HZ / 100; |
| 1613 | rc_port[i].closing_wait = 3000 * HZ/100; | 1580 | rc_port[i].closing_wait = 3000 * HZ / 100; |
| 1614 | init_waitqueue_head(&rc_port[i].open_wait); | 1581 | init_waitqueue_head(&rc_port[i].open_wait); |
| 1615 | init_waitqueue_head(&rc_port[i].close_wait); | 1582 | init_waitqueue_head(&rc_port[i].close_wait); |
| 1616 | } | 1583 | } |
| 1617 | |||
| 1618 | return 0; | 1584 | return 0; |
| 1619 | } | 1585 | } |
| 1620 | 1586 | ||
| @@ -1627,13 +1593,13 @@ static void rc_release_drivers(void) | |||
| 1627 | #ifndef MODULE | 1593 | #ifndef MODULE |
| 1628 | /* | 1594 | /* |
| 1629 | * Called at boot time. | 1595 | * Called at boot time. |
| 1630 | * | 1596 | * |
| 1631 | * You can specify IO base for up to RC_NBOARD cards, | 1597 | * You can specify IO base for up to RC_NBOARD cards, |
| 1632 | * using line "riscom8=0xiobase1,0xiobase2,.." at LILO prompt. | 1598 | * using line "riscom8=0xiobase1,0xiobase2,.." at LILO prompt. |
| 1633 | * Note that there will be no probing at default | 1599 | * Note that there will be no probing at default |
| 1634 | * addresses in this case. | 1600 | * addresses in this case. |
| 1635 | * | 1601 | * |
| 1636 | */ | 1602 | */ |
| 1637 | static int __init riscom8_setup(char *str) | 1603 | static int __init riscom8_setup(char *str) |
| 1638 | { | 1604 | { |
| 1639 | int ints[RC_NBOARD]; | 1605 | int ints[RC_NBOARD]; |
| @@ -1644,7 +1610,7 @@ static int __init riscom8_setup(char *str) | |||
| 1644 | for (i = 0; i < RC_NBOARD; i++) { | 1610 | for (i = 0; i < RC_NBOARD; i++) { |
| 1645 | if (i < ints[0]) | 1611 | if (i < ints[0]) |
| 1646 | rc_board[i].base = ints[i+1]; | 1612 | rc_board[i].base = ints[i+1]; |
| 1647 | else | 1613 | else |
| 1648 | rc_board[i].base = 0; | 1614 | rc_board[i].base = 0; |
| 1649 | } | 1615 | } |
| 1650 | return 1; | 1616 | return 1; |
| @@ -1659,8 +1625,8 @@ static char banner[] __initdata = | |||
| 1659 | static char no_boards_msg[] __initdata = | 1625 | static char no_boards_msg[] __initdata = |
| 1660 | KERN_INFO "rc: No RISCom/8 boards detected.\n"; | 1626 | KERN_INFO "rc: No RISCom/8 boards detected.\n"; |
| 1661 | 1627 | ||
| 1662 | /* | 1628 | /* |
| 1663 | * This routine must be called by kernel at boot time | 1629 | * This routine must be called by kernel at boot time |
| 1664 | */ | 1630 | */ |
| 1665 | static int __init riscom8_init(void) | 1631 | static int __init riscom8_init(void) |
| 1666 | { | 1632 | { |
| @@ -1669,13 +1635,12 @@ static int __init riscom8_init(void) | |||
| 1669 | 1635 | ||
| 1670 | printk(banner); | 1636 | printk(banner); |
| 1671 | 1637 | ||
| 1672 | if (rc_init_drivers()) | 1638 | if (rc_init_drivers()) |
| 1673 | return -EIO; | 1639 | return -EIO; |
| 1674 | 1640 | ||
| 1675 | for (i = 0; i < RC_NBOARD; i++) | 1641 | for (i = 0; i < RC_NBOARD; i++) |
| 1676 | if (rc_board[i].base && !rc_probe(&rc_board[i])) | 1642 | if (rc_board[i].base && !rc_probe(&rc_board[i])) |
| 1677 | found++; | 1643 | found++; |
| 1678 | |||
| 1679 | if (!found) { | 1644 | if (!found) { |
| 1680 | rc_release_drivers(); | 1645 | rc_release_drivers(); |
| 1681 | printk(no_boards_msg); | 1646 | printk(no_boards_msg); |
| @@ -1702,13 +1667,13 @@ MODULE_LICENSE("GPL"); | |||
| 1702 | * by specifying "iobase=0xXXX iobase1=0xXXX ..." as insmod parameter. | 1667 | * by specifying "iobase=0xXXX iobase1=0xXXX ..." as insmod parameter. |
| 1703 | * | 1668 | * |
| 1704 | */ | 1669 | */ |
| 1705 | static int __init riscom8_init_module (void) | 1670 | static int __init riscom8_init_module(void) |
| 1706 | { | 1671 | { |
| 1707 | #ifdef MODULE | 1672 | #ifdef MODULE |
| 1708 | int i; | 1673 | int i; |
| 1709 | 1674 | ||
| 1710 | if (iobase || iobase1 || iobase2 || iobase3) { | 1675 | if (iobase || iobase1 || iobase2 || iobase3) { |
| 1711 | for(i = 0; i < RC_NBOARD; i++) | 1676 | for (i = 0; i < RC_NBOARD; i++) |
| 1712 | rc_board[i].base = 0; | 1677 | rc_board[i].base = 0; |
| 1713 | } | 1678 | } |
| 1714 | 1679 | ||
| @@ -1724,18 +1689,17 @@ static int __init riscom8_init_module (void) | |||
| 1724 | 1689 | ||
| 1725 | return riscom8_init(); | 1690 | return riscom8_init(); |
| 1726 | } | 1691 | } |
| 1727 | 1692 | ||
| 1728 | static void __exit riscom8_exit_module (void) | 1693 | static void __exit riscom8_exit_module(void) |
| 1729 | { | 1694 | { |
| 1730 | int i; | 1695 | int i; |
| 1731 | 1696 | ||
| 1732 | rc_release_drivers(); | 1697 | rc_release_drivers(); |
| 1733 | for (i = 0; i < RC_NBOARD; i++) | 1698 | for (i = 0; i < RC_NBOARD; i++) |
| 1734 | if (rc_board[i].flags & RC_BOARD_PRESENT) | 1699 | if (rc_board[i].flags & RC_BOARD_PRESENT) |
| 1735 | rc_release_io_range(&rc_board[i]); | 1700 | rc_release_io_range(&rc_board[i]); |
| 1736 | 1701 | ||
| 1737 | } | 1702 | } |
| 1738 | 1703 | ||
| 1739 | module_init(riscom8_init_module); | 1704 | module_init(riscom8_init_module); |
| 1740 | module_exit(riscom8_exit_module); | 1705 | module_exit(riscom8_exit_module); |
| 1741 | |||
diff --git a/drivers/char/rocket.c b/drivers/char/rocket.c index f585bc8579e9..743dc80a9325 100644 --- a/drivers/char/rocket.c +++ b/drivers/char/rocket.c | |||
| @@ -449,7 +449,8 @@ static void rp_do_transmit(struct r_port *info) | |||
| 449 | while (1) { | 449 | while (1) { |
| 450 | if (tty->stopped || tty->hw_stopped) | 450 | if (tty->stopped || tty->hw_stopped) |
| 451 | break; | 451 | break; |
| 452 | c = min(info->xmit_fifo_room, min(info->xmit_cnt, XMIT_BUF_SIZE - info->xmit_tail)); | 452 | c = min(info->xmit_fifo_room, info->xmit_cnt); |
| 453 | c = min(c, XMIT_BUF_SIZE - info->xmit_tail); | ||
| 453 | if (c <= 0 || info->xmit_fifo_room <= 0) | 454 | if (c <= 0 || info->xmit_fifo_room <= 0) |
| 454 | break; | 455 | break; |
| 455 | sOutStrW(sGetTxRxDataIO(cp), (unsigned short *) (info->xmit_buf + info->xmit_tail), c / 2); | 456 | sOutStrW(sGetTxRxDataIO(cp), (unsigned short *) (info->xmit_buf + info->xmit_tail), c / 2); |
| @@ -1433,29 +1434,38 @@ static int rp_ioctl(struct tty_struct *tty, struct file *file, | |||
| 1433 | { | 1434 | { |
| 1434 | struct r_port *info = (struct r_port *) tty->driver_data; | 1435 | struct r_port *info = (struct r_port *) tty->driver_data; |
| 1435 | void __user *argp = (void __user *)arg; | 1436 | void __user *argp = (void __user *)arg; |
| 1437 | int ret = 0; | ||
| 1436 | 1438 | ||
| 1437 | if (cmd != RCKP_GET_PORTS && rocket_paranoia_check(info, "rp_ioctl")) | 1439 | if (cmd != RCKP_GET_PORTS && rocket_paranoia_check(info, "rp_ioctl")) |
| 1438 | return -ENXIO; | 1440 | return -ENXIO; |
| 1439 | 1441 | ||
| 1442 | lock_kernel(); | ||
| 1443 | |||
| 1440 | switch (cmd) { | 1444 | switch (cmd) { |
| 1441 | case RCKP_GET_STRUCT: | 1445 | case RCKP_GET_STRUCT: |
| 1442 | if (copy_to_user(argp, info, sizeof (struct r_port))) | 1446 | if (copy_to_user(argp, info, sizeof (struct r_port))) |
| 1443 | return -EFAULT; | 1447 | ret = -EFAULT; |
| 1444 | return 0; | 1448 | break; |
| 1445 | case RCKP_GET_CONFIG: | 1449 | case RCKP_GET_CONFIG: |
| 1446 | return get_config(info, argp); | 1450 | ret = get_config(info, argp); |
| 1451 | break; | ||
| 1447 | case RCKP_SET_CONFIG: | 1452 | case RCKP_SET_CONFIG: |
| 1448 | return set_config(info, argp); | 1453 | ret = set_config(info, argp); |
| 1454 | break; | ||
| 1449 | case RCKP_GET_PORTS: | 1455 | case RCKP_GET_PORTS: |
| 1450 | return get_ports(info, argp); | 1456 | ret = get_ports(info, argp); |
| 1457 | break; | ||
| 1451 | case RCKP_RESET_RM2: | 1458 | case RCKP_RESET_RM2: |
| 1452 | return reset_rm2(info, argp); | 1459 | ret = reset_rm2(info, argp); |
| 1460 | break; | ||
| 1453 | case RCKP_GET_VERSION: | 1461 | case RCKP_GET_VERSION: |
| 1454 | return get_version(info, argp); | 1462 | ret = get_version(info, argp); |
| 1463 | break; | ||
| 1455 | default: | 1464 | default: |
| 1456 | return -ENOIOCTLCMD; | 1465 | ret = -ENOIOCTLCMD; |
| 1457 | } | 1466 | } |
| 1458 | return 0; | 1467 | unlock_kernel(); |
| 1468 | return ret; | ||
| 1459 | } | 1469 | } |
| 1460 | 1470 | ||
| 1461 | static void rp_send_xchar(struct tty_struct *tty, char ch) | 1471 | static void rp_send_xchar(struct tty_struct *tty, char ch) |
| @@ -1575,6 +1585,7 @@ static void rp_wait_until_sent(struct tty_struct *tty, int timeout) | |||
| 1575 | jiffies); | 1585 | jiffies); |
| 1576 | printk(KERN_INFO "cps=%d...\n", info->cps); | 1586 | printk(KERN_INFO "cps=%d...\n", info->cps); |
| 1577 | #endif | 1587 | #endif |
| 1588 | lock_kernel(); | ||
| 1578 | while (1) { | 1589 | while (1) { |
| 1579 | txcnt = sGetTxCnt(cp); | 1590 | txcnt = sGetTxCnt(cp); |
| 1580 | if (!txcnt) { | 1591 | if (!txcnt) { |
| @@ -1602,6 +1613,7 @@ static void rp_wait_until_sent(struct tty_struct *tty, int timeout) | |||
| 1602 | break; | 1613 | break; |
| 1603 | } | 1614 | } |
| 1604 | __set_current_state(TASK_RUNNING); | 1615 | __set_current_state(TASK_RUNNING); |
| 1616 | unlock_kernel(); | ||
| 1605 | #ifdef ROCKET_DEBUG_WAIT_UNTIL_SENT | 1617 | #ifdef ROCKET_DEBUG_WAIT_UNTIL_SENT |
| 1606 | printk(KERN_INFO "txcnt = %d (jiff=%lu)...done\n", txcnt, jiffies); | 1618 | printk(KERN_INFO "txcnt = %d (jiff=%lu)...done\n", txcnt, jiffies); |
| 1607 | #endif | 1619 | #endif |
| @@ -1651,14 +1663,14 @@ static void rp_hangup(struct tty_struct *tty) | |||
| 1651 | * writing routines will write directly to transmit FIFO. | 1663 | * writing routines will write directly to transmit FIFO. |
| 1652 | * Write buffer and counters protected by spinlocks | 1664 | * Write buffer and counters protected by spinlocks |
| 1653 | */ | 1665 | */ |
| 1654 | static void rp_put_char(struct tty_struct *tty, unsigned char ch) | 1666 | static int rp_put_char(struct tty_struct *tty, unsigned char ch) |
| 1655 | { | 1667 | { |
| 1656 | struct r_port *info = (struct r_port *) tty->driver_data; | 1668 | struct r_port *info = (struct r_port *) tty->driver_data; |
| 1657 | CHANNEL_t *cp; | 1669 | CHANNEL_t *cp; |
| 1658 | unsigned long flags; | 1670 | unsigned long flags; |
| 1659 | 1671 | ||
| 1660 | if (rocket_paranoia_check(info, "rp_put_char")) | 1672 | if (rocket_paranoia_check(info, "rp_put_char")) |
| 1661 | return; | 1673 | return 0; |
| 1662 | 1674 | ||
| 1663 | /* | 1675 | /* |
| 1664 | * Grab the port write mutex, locking out other processes that try to | 1676 | * Grab the port write mutex, locking out other processes that try to |
| @@ -1687,6 +1699,7 @@ static void rp_put_char(struct tty_struct *tty, unsigned char ch) | |||
| 1687 | } | 1699 | } |
| 1688 | spin_unlock_irqrestore(&info->slock, flags); | 1700 | spin_unlock_irqrestore(&info->slock, flags); |
| 1689 | mutex_unlock(&info->write_mtx); | 1701 | mutex_unlock(&info->write_mtx); |
| 1702 | return 1; | ||
| 1690 | } | 1703 | } |
| 1691 | 1704 | ||
| 1692 | /* | 1705 | /* |
| @@ -1749,10 +1762,10 @@ static int rp_write(struct tty_struct *tty, | |||
| 1749 | 1762 | ||
| 1750 | /* Write remaining data into the port's xmit_buf */ | 1763 | /* Write remaining data into the port's xmit_buf */ |
| 1751 | while (1) { | 1764 | while (1) { |
| 1752 | if (!info->tty) /* Seemingly obligatory check... */ | 1765 | if (!info->tty) /* Seemingly obligatory check... */ |
| 1753 | goto end; | 1766 | goto end; |
| 1754 | 1767 | c = min(count, XMIT_BUF_SIZE - info->xmit_cnt - 1); | |
| 1755 | c = min(count, min(XMIT_BUF_SIZE - info->xmit_cnt - 1, XMIT_BUF_SIZE - info->xmit_head)); | 1768 | c = min(c, XMIT_BUF_SIZE - info->xmit_head); |
| 1756 | if (c <= 0) | 1769 | if (c <= 0) |
| 1757 | break; | 1770 | break; |
| 1758 | 1771 | ||
diff --git a/drivers/char/serial167.c b/drivers/char/serial167.c index df8cd0ca97eb..fd2db07a50fc 100644 --- a/drivers/char/serial167.c +++ b/drivers/char/serial167.c | |||
| @@ -1060,7 +1060,7 @@ static void config_setup(struct cyclades_port *info) | |||
| 1060 | 1060 | ||
| 1061 | } /* config_setup */ | 1061 | } /* config_setup */ |
| 1062 | 1062 | ||
| 1063 | static void cy_put_char(struct tty_struct *tty, unsigned char ch) | 1063 | static int cy_put_char(struct tty_struct *tty, unsigned char ch) |
| 1064 | { | 1064 | { |
| 1065 | struct cyclades_port *info = (struct cyclades_port *)tty->driver_data; | 1065 | struct cyclades_port *info = (struct cyclades_port *)tty->driver_data; |
| 1066 | unsigned long flags; | 1066 | unsigned long flags; |
| @@ -1070,7 +1070,7 @@ static void cy_put_char(struct tty_struct *tty, unsigned char ch) | |||
| 1070 | #endif | 1070 | #endif |
| 1071 | 1071 | ||
| 1072 | if (serial_paranoia_check(info, tty->name, "cy_put_char")) | 1072 | if (serial_paranoia_check(info, tty->name, "cy_put_char")) |
| 1073 | return; | 1073 | return 0; |
| 1074 | 1074 | ||
| 1075 | if (!info->xmit_buf) | 1075 | if (!info->xmit_buf) |
| 1076 | return; | 1076 | return; |
| @@ -1078,13 +1078,14 @@ static void cy_put_char(struct tty_struct *tty, unsigned char ch) | |||
| 1078 | local_irq_save(flags); | 1078 | local_irq_save(flags); |
| 1079 | if (info->xmit_cnt >= PAGE_SIZE - 1) { | 1079 | if (info->xmit_cnt >= PAGE_SIZE - 1) { |
| 1080 | local_irq_restore(flags); | 1080 | local_irq_restore(flags); |
| 1081 | return; | 1081 | return 0; |
| 1082 | } | 1082 | } |
| 1083 | 1083 | ||
| 1084 | info->xmit_buf[info->xmit_head++] = ch; | 1084 | info->xmit_buf[info->xmit_head++] = ch; |
| 1085 | info->xmit_head &= PAGE_SIZE - 1; | 1085 | info->xmit_head &= PAGE_SIZE - 1; |
| 1086 | info->xmit_cnt++; | 1086 | info->xmit_cnt++; |
| 1087 | local_irq_restore(flags); | 1087 | local_irq_restore(flags); |
| 1088 | return 1; | ||
| 1088 | } /* cy_put_char */ | 1089 | } /* cy_put_char */ |
| 1089 | 1090 | ||
| 1090 | static void cy_flush_chars(struct tty_struct *tty) | 1091 | static void cy_flush_chars(struct tty_struct *tty) |
| @@ -1539,6 +1540,8 @@ cy_ioctl(struct tty_struct *tty, struct file *file, | |||
| 1539 | printk("cy_ioctl %s, cmd = %x arg = %lx\n", tty->name, cmd, arg); /* */ | 1540 | printk("cy_ioctl %s, cmd = %x arg = %lx\n", tty->name, cmd, arg); /* */ |
| 1540 | #endif | 1541 | #endif |
| 1541 | 1542 | ||
| 1543 | lock_kernel(); | ||
| 1544 | |||
| 1542 | switch (cmd) { | 1545 | switch (cmd) { |
| 1543 | case CYGETMON: | 1546 | case CYGETMON: |
| 1544 | ret_val = get_mon_info(info, argp); | 1547 | ret_val = get_mon_info(info, argp); |
| @@ -1584,18 +1587,6 @@ cy_ioctl(struct tty_struct *tty, struct file *file, | |||
| 1584 | break; | 1587 | break; |
| 1585 | 1588 | ||
| 1586 | /* The following commands are incompletely implemented!!! */ | 1589 | /* The following commands are incompletely implemented!!! */ |
| 1587 | case TIOCGSOFTCAR: | ||
| 1588 | ret_val = | ||
| 1589 | put_user(C_CLOCAL(tty) ? 1 : 0, | ||
| 1590 | (unsigned long __user *)argp); | ||
| 1591 | break; | ||
| 1592 | case TIOCSSOFTCAR: | ||
| 1593 | ret_val = get_user(val, (unsigned long __user *)argp); | ||
| 1594 | if (ret_val) | ||
| 1595 | break; | ||
| 1596 | tty->termios->c_cflag = | ||
| 1597 | ((tty->termios->c_cflag & ~CLOCAL) | (val ? CLOCAL : 0)); | ||
| 1598 | break; | ||
| 1599 | case TIOCGSERIAL: | 1590 | case TIOCGSERIAL: |
| 1600 | ret_val = get_serial_info(info, argp); | 1591 | ret_val = get_serial_info(info, argp); |
| 1601 | break; | 1592 | break; |
| @@ -1605,6 +1596,7 @@ cy_ioctl(struct tty_struct *tty, struct file *file, | |||
| 1605 | default: | 1596 | default: |
| 1606 | ret_val = -ENOIOCTLCMD; | 1597 | ret_val = -ENOIOCTLCMD; |
| 1607 | } | 1598 | } |
| 1599 | unlock_kernel(); | ||
| 1608 | 1600 | ||
| 1609 | #ifdef SERIAL_DEBUG_OTHER | 1601 | #ifdef SERIAL_DEBUG_OTHER |
| 1610 | printk("cy_ioctl done\n"); | 1602 | printk("cy_ioctl done\n"); |
| @@ -1683,8 +1675,7 @@ static void cy_close(struct tty_struct *tty, struct file *filp) | |||
| 1683 | if (info->flags & ASYNC_INITIALIZED) | 1675 | if (info->flags & ASYNC_INITIALIZED) |
| 1684 | tty_wait_until_sent(tty, 3000); /* 30 seconds timeout */ | 1676 | tty_wait_until_sent(tty, 3000); /* 30 seconds timeout */ |
| 1685 | shutdown(info); | 1677 | shutdown(info); |
| 1686 | if (tty->driver->flush_buffer) | 1678 | cy_flush_buffer(tty); |
| 1687 | tty->driver->flush_buffer(tty); | ||
| 1688 | tty_ldisc_flush(tty); | 1679 | tty_ldisc_flush(tty); |
| 1689 | info->tty = NULL; | 1680 | info->tty = NULL; |
| 1690 | if (info->blocked_open) { | 1681 | if (info->blocked_open) { |
diff --git a/drivers/char/snsc.c b/drivers/char/snsc.c index b9c1dba6bd01..8fe099a41065 100644 --- a/drivers/char/snsc.c +++ b/drivers/char/snsc.c | |||
| @@ -80,7 +80,7 @@ scdrv_open(struct inode *inode, struct file *file) | |||
| 80 | sd = kzalloc(sizeof (struct subch_data_s), GFP_KERNEL); | 80 | sd = kzalloc(sizeof (struct subch_data_s), GFP_KERNEL); |
| 81 | if (sd == NULL) { | 81 | if (sd == NULL) { |
| 82 | printk("%s: couldn't allocate subchannel data\n", | 82 | printk("%s: couldn't allocate subchannel data\n", |
| 83 | __FUNCTION__); | 83 | __func__); |
| 84 | return -ENOMEM; | 84 | return -ENOMEM; |
| 85 | } | 85 | } |
| 86 | 86 | ||
| @@ -90,7 +90,7 @@ scdrv_open(struct inode *inode, struct file *file) | |||
| 90 | 90 | ||
| 91 | if (sd->sd_subch < 0) { | 91 | if (sd->sd_subch < 0) { |
| 92 | kfree(sd); | 92 | kfree(sd); |
| 93 | printk("%s: couldn't allocate subchannel\n", __FUNCTION__); | 93 | printk("%s: couldn't allocate subchannel\n", __func__); |
| 94 | return -EBUSY; | 94 | return -EBUSY; |
| 95 | } | 95 | } |
| 96 | 96 | ||
| @@ -110,7 +110,7 @@ scdrv_open(struct inode *inode, struct file *file) | |||
| 110 | if (rv) { | 110 | if (rv) { |
| 111 | ia64_sn_irtr_close(sd->sd_nasid, sd->sd_subch); | 111 | ia64_sn_irtr_close(sd->sd_nasid, sd->sd_subch); |
| 112 | kfree(sd); | 112 | kfree(sd); |
| 113 | printk("%s: irq request failed (%d)\n", __FUNCTION__, rv); | 113 | printk("%s: irq request failed (%d)\n", __func__, rv); |
| 114 | return -EBUSY; | 114 | return -EBUSY; |
| 115 | } | 115 | } |
| 116 | 116 | ||
| @@ -215,7 +215,7 @@ scdrv_read(struct file *file, char __user *buf, size_t count, loff_t *f_pos) | |||
| 215 | */ | 215 | */ |
| 216 | if (count < len) { | 216 | if (count < len) { |
| 217 | pr_debug("%s: only accepting %d of %d bytes\n", | 217 | pr_debug("%s: only accepting %d of %d bytes\n", |
| 218 | __FUNCTION__, (int) count, len); | 218 | __func__, (int) count, len); |
| 219 | } | 219 | } |
| 220 | len = min((int) count, len); | 220 | len = min((int) count, len); |
| 221 | if (copy_to_user(buf, sd->sd_rb, len)) | 221 | if (copy_to_user(buf, sd->sd_rb, len)) |
| @@ -384,7 +384,7 @@ scdrv_init(void) | |||
| 384 | if (alloc_chrdev_region(&first_dev, 0, num_cnodes, | 384 | if (alloc_chrdev_region(&first_dev, 0, num_cnodes, |
| 385 | SYSCTL_BASENAME) < 0) { | 385 | SYSCTL_BASENAME) < 0) { |
| 386 | printk("%s: failed to register SN system controller device\n", | 386 | printk("%s: failed to register SN system controller device\n", |
| 387 | __FUNCTION__); | 387 | __func__); |
| 388 | return -ENODEV; | 388 | return -ENODEV; |
| 389 | } | 389 | } |
| 390 | snsc_class = class_create(THIS_MODULE, SYSCTL_BASENAME); | 390 | snsc_class = class_create(THIS_MODULE, SYSCTL_BASENAME); |
| @@ -403,7 +403,7 @@ scdrv_init(void) | |||
| 403 | GFP_KERNEL); | 403 | GFP_KERNEL); |
| 404 | if (!scd) { | 404 | if (!scd) { |
| 405 | printk("%s: failed to allocate device info" | 405 | printk("%s: failed to allocate device info" |
| 406 | "for %s/%s\n", __FUNCTION__, | 406 | "for %s/%s\n", __func__, |
| 407 | SYSCTL_BASENAME, devname); | 407 | SYSCTL_BASENAME, devname); |
| 408 | continue; | 408 | continue; |
| 409 | } | 409 | } |
| @@ -412,7 +412,7 @@ scdrv_init(void) | |||
| 412 | scd->scd_nasid = cnodeid_to_nasid(cnode); | 412 | scd->scd_nasid = cnodeid_to_nasid(cnode); |
| 413 | if (!(salbuf = kmalloc(SCDRV_BUFSZ, GFP_KERNEL))) { | 413 | if (!(salbuf = kmalloc(SCDRV_BUFSZ, GFP_KERNEL))) { |
| 414 | printk("%s: failed to allocate driver buffer" | 414 | printk("%s: failed to allocate driver buffer" |
| 415 | "(%s%s)\n", __FUNCTION__, | 415 | "(%s%s)\n", __func__, |
| 416 | SYSCTL_BASENAME, devname); | 416 | SYSCTL_BASENAME, devname); |
| 417 | kfree(scd); | 417 | kfree(scd); |
| 418 | continue; | 418 | continue; |
| @@ -424,7 +424,7 @@ scdrv_init(void) | |||
| 424 | ("%s: failed to initialize SAL for" | 424 | ("%s: failed to initialize SAL for" |
| 425 | " system controller communication" | 425 | " system controller communication" |
| 426 | " (%s/%s): outdated PROM?\n", | 426 | " (%s/%s): outdated PROM?\n", |
| 427 | __FUNCTION__, SYSCTL_BASENAME, devname); | 427 | __func__, SYSCTL_BASENAME, devname); |
| 428 | kfree(scd); | 428 | kfree(scd); |
| 429 | kfree(salbuf); | 429 | kfree(salbuf); |
| 430 | continue; | 430 | continue; |
| @@ -435,7 +435,7 @@ scdrv_init(void) | |||
| 435 | if (cdev_add(&scd->scd_cdev, dev, 1)) { | 435 | if (cdev_add(&scd->scd_cdev, dev, 1)) { |
| 436 | printk("%s: failed to register system" | 436 | printk("%s: failed to register system" |
| 437 | " controller device (%s%s)\n", | 437 | " controller device (%s%s)\n", |
| 438 | __FUNCTION__, SYSCTL_BASENAME, devname); | 438 | __func__, SYSCTL_BASENAME, devname); |
| 439 | kfree(scd); | 439 | kfree(scd); |
| 440 | kfree(salbuf); | 440 | kfree(salbuf); |
| 441 | continue; | 441 | continue; |
diff --git a/drivers/char/snsc_event.c b/drivers/char/snsc_event.c index 31a7765eaf73..53b3d44f8c06 100644 --- a/drivers/char/snsc_event.c +++ b/drivers/char/snsc_event.c | |||
| @@ -271,7 +271,7 @@ scdrv_event_init(struct sysctl_data_s *scd) | |||
| 271 | event_sd = kzalloc(sizeof (struct subch_data_s), GFP_KERNEL); | 271 | event_sd = kzalloc(sizeof (struct subch_data_s), GFP_KERNEL); |
| 272 | if (event_sd == NULL) { | 272 | if (event_sd == NULL) { |
| 273 | printk(KERN_WARNING "%s: couldn't allocate subchannel info" | 273 | printk(KERN_WARNING "%s: couldn't allocate subchannel info" |
| 274 | " for event monitoring\n", __FUNCTION__); | 274 | " for event monitoring\n", __func__); |
| 275 | return; | 275 | return; |
| 276 | } | 276 | } |
| 277 | 277 | ||
| @@ -285,7 +285,7 @@ scdrv_event_init(struct sysctl_data_s *scd) | |||
| 285 | if (event_sd->sd_subch < 0) { | 285 | if (event_sd->sd_subch < 0) { |
| 286 | kfree(event_sd); | 286 | kfree(event_sd); |
| 287 | printk(KERN_WARNING "%s: couldn't open event subchannel\n", | 287 | printk(KERN_WARNING "%s: couldn't open event subchannel\n", |
| 288 | __FUNCTION__); | 288 | __func__); |
| 289 | return; | 289 | return; |
| 290 | } | 290 | } |
| 291 | 291 | ||
| @@ -295,7 +295,7 @@ scdrv_event_init(struct sysctl_data_s *scd) | |||
| 295 | "system controller events", event_sd); | 295 | "system controller events", event_sd); |
| 296 | if (rv) { | 296 | if (rv) { |
| 297 | printk(KERN_WARNING "%s: irq request failed (%d)\n", | 297 | printk(KERN_WARNING "%s: irq request failed (%d)\n", |
| 298 | __FUNCTION__, rv); | 298 | __func__, rv); |
| 299 | ia64_sn_irtr_close(event_sd->sd_nasid, event_sd->sd_subch); | 299 | ia64_sn_irtr_close(event_sd->sd_nasid, event_sd->sd_subch); |
| 300 | kfree(event_sd); | 300 | kfree(event_sd); |
| 301 | return; | 301 | return; |
diff --git a/drivers/char/sonypi.c b/drivers/char/sonypi.c index c03ad164c39a..58533de59027 100644 --- a/drivers/char/sonypi.c +++ b/drivers/char/sonypi.c | |||
| @@ -506,7 +506,7 @@ static struct sonypi_device { | |||
| 506 | while (--n && (command)) \ | 506 | while (--n && (command)) \ |
| 507 | udelay(1); \ | 507 | udelay(1); \ |
| 508 | if (!n && (verbose || !quiet)) \ | 508 | if (!n && (verbose || !quiet)) \ |
| 509 | printk(KERN_WARNING "sonypi command failed at %s : %s (line %d)\n", __FILE__, __FUNCTION__, __LINE__); \ | 509 | printk(KERN_WARNING "sonypi command failed at %s : %s (line %d)\n", __FILE__, __func__, __LINE__); \ |
| 510 | } | 510 | } |
| 511 | 511 | ||
| 512 | #ifdef CONFIG_ACPI | 512 | #ifdef CONFIG_ACPI |
diff --git a/drivers/char/specialix.c b/drivers/char/specialix.c index 4b5b5b78acb4..2ee4d9893757 100644 --- a/drivers/char/specialix.c +++ b/drivers/char/specialix.c | |||
| @@ -131,8 +131,8 @@ static int sx_rxfifo = SPECIALIX_RXFIFO; | |||
| 131 | #define SX_DEBUG_FIFO 0x0800 | 131 | #define SX_DEBUG_FIFO 0x0800 |
| 132 | 132 | ||
| 133 | 133 | ||
| 134 | #define func_enter() dprintk (SX_DEBUG_FLOW, "io8: enter %s\n",__FUNCTION__) | 134 | #define func_enter() dprintk (SX_DEBUG_FLOW, "io8: enter %s\n",__func__) |
| 135 | #define func_exit() dprintk (SX_DEBUG_FLOW, "io8: exit %s\n", __FUNCTION__) | 135 | #define func_exit() dprintk (SX_DEBUG_FLOW, "io8: exit %s\n", __func__) |
| 136 | 136 | ||
| 137 | #define jiffies_from_ms(a) ((((a) * HZ)/1000)+1) | 137 | #define jiffies_from_ms(a) ((((a) * HZ)/1000)+1) |
| 138 | 138 | ||
| @@ -874,7 +874,7 @@ static irqreturn_t sx_interrupt(int dummy, void *dev_id) | |||
| 874 | 874 | ||
| 875 | spin_lock_irqsave(&bp->lock, flags); | 875 | spin_lock_irqsave(&bp->lock, flags); |
| 876 | 876 | ||
| 877 | dprintk (SX_DEBUG_FLOW, "enter %s port %d room: %ld\n", __FUNCTION__, port_No(sx_get_port(bp, "INT")), SERIAL_XMIT_SIZE - sx_get_port(bp, "ITN")->xmit_cnt - 1); | 877 | dprintk (SX_DEBUG_FLOW, "enter %s port %d room: %ld\n", __func__, port_No(sx_get_port(bp, "INT")), SERIAL_XMIT_SIZE - sx_get_port(bp, "ITN")->xmit_cnt - 1); |
| 878 | if (!(bp->flags & SX_BOARD_ACTIVE)) { | 878 | if (!(bp->flags & SX_BOARD_ACTIVE)) { |
| 879 | dprintk (SX_DEBUG_IRQ, "sx: False interrupt. irq %d.\n", bp->irq); | 879 | dprintk (SX_DEBUG_IRQ, "sx: False interrupt. irq %d.\n", bp->irq); |
| 880 | spin_unlock_irqrestore(&bp->lock, flags); | 880 | spin_unlock_irqrestore(&bp->lock, flags); |
| @@ -1504,6 +1504,27 @@ static int sx_open(struct tty_struct * tty, struct file * filp) | |||
| 1504 | return 0; | 1504 | return 0; |
| 1505 | } | 1505 | } |
| 1506 | 1506 | ||
| 1507 | static void sx_flush_buffer(struct tty_struct *tty) | ||
| 1508 | { | ||
| 1509 | struct specialix_port *port = (struct specialix_port *)tty->driver_data; | ||
| 1510 | unsigned long flags; | ||
| 1511 | struct specialix_board * bp; | ||
| 1512 | |||
| 1513 | func_enter(); | ||
| 1514 | |||
| 1515 | if (sx_paranoia_check(port, tty->name, "sx_flush_buffer")) { | ||
| 1516 | func_exit(); | ||
| 1517 | return; | ||
| 1518 | } | ||
| 1519 | |||
| 1520 | bp = port_Board(port); | ||
| 1521 | spin_lock_irqsave(&port->lock, flags); | ||
| 1522 | port->xmit_cnt = port->xmit_head = port->xmit_tail = 0; | ||
| 1523 | spin_unlock_irqrestore(&port->lock, flags); | ||
| 1524 | tty_wakeup(tty); | ||
| 1525 | |||
| 1526 | func_exit(); | ||
| 1527 | } | ||
| 1507 | 1528 | ||
| 1508 | static void sx_close(struct tty_struct * tty, struct file * filp) | 1529 | static void sx_close(struct tty_struct * tty, struct file * filp) |
| 1509 | { | 1530 | { |
| @@ -1597,8 +1618,7 @@ static void sx_close(struct tty_struct * tty, struct file * filp) | |||
| 1597 | } | 1618 | } |
| 1598 | 1619 | ||
| 1599 | sx_shutdown_port(bp, port); | 1620 | sx_shutdown_port(bp, port); |
| 1600 | if (tty->driver->flush_buffer) | 1621 | sx_flush_buffer(tty); |
| 1601 | tty->driver->flush_buffer(tty); | ||
| 1602 | tty_ldisc_flush(tty); | 1622 | tty_ldisc_flush(tty); |
| 1603 | spin_lock_irqsave(&port->lock, flags); | 1623 | spin_lock_irqsave(&port->lock, flags); |
| 1604 | tty->closing = 0; | 1624 | tty->closing = 0; |
| @@ -1670,7 +1690,7 @@ static int sx_write(struct tty_struct * tty, | |||
| 1670 | } | 1690 | } |
| 1671 | 1691 | ||
| 1672 | 1692 | ||
| 1673 | static void sx_put_char(struct tty_struct * tty, unsigned char ch) | 1693 | static int sx_put_char(struct tty_struct * tty, unsigned char ch) |
| 1674 | { | 1694 | { |
| 1675 | struct specialix_port *port = (struct specialix_port *)tty->driver_data; | 1695 | struct specialix_port *port = (struct specialix_port *)tty->driver_data; |
| 1676 | unsigned long flags; | 1696 | unsigned long flags; |
| @@ -1680,12 +1700,12 @@ static void sx_put_char(struct tty_struct * tty, unsigned char ch) | |||
| 1680 | 1700 | ||
| 1681 | if (sx_paranoia_check(port, tty->name, "sx_put_char")) { | 1701 | if (sx_paranoia_check(port, tty->name, "sx_put_char")) { |
| 1682 | func_exit(); | 1702 | func_exit(); |
| 1683 | return; | 1703 | return 0; |
| 1684 | } | 1704 | } |
| 1685 | dprintk (SX_DEBUG_TX, "check tty: %p %p\n", tty, port->xmit_buf); | 1705 | dprintk (SX_DEBUG_TX, "check tty: %p %p\n", tty, port->xmit_buf); |
| 1686 | if (!port->xmit_buf) { | 1706 | if (!port->xmit_buf) { |
| 1687 | func_exit(); | 1707 | func_exit(); |
| 1688 | return; | 1708 | return 0; |
| 1689 | } | 1709 | } |
| 1690 | bp = port_Board(port); | 1710 | bp = port_Board(port); |
| 1691 | spin_lock_irqsave(&port->lock, flags); | 1711 | spin_lock_irqsave(&port->lock, flags); |
| @@ -1695,7 +1715,7 @@ static void sx_put_char(struct tty_struct * tty, unsigned char ch) | |||
| 1695 | spin_unlock_irqrestore(&port->lock, flags); | 1715 | spin_unlock_irqrestore(&port->lock, flags); |
| 1696 | dprintk (SX_DEBUG_TX, "Exit size\n"); | 1716 | dprintk (SX_DEBUG_TX, "Exit size\n"); |
| 1697 | func_exit(); | 1717 | func_exit(); |
| 1698 | return; | 1718 | return 0; |
| 1699 | } | 1719 | } |
| 1700 | dprintk (SX_DEBUG_TX, "Handle xmit: %p %p\n", port, port->xmit_buf); | 1720 | dprintk (SX_DEBUG_TX, "Handle xmit: %p %p\n", port, port->xmit_buf); |
| 1701 | port->xmit_buf[port->xmit_head++] = ch; | 1721 | port->xmit_buf[port->xmit_head++] = ch; |
| @@ -1704,6 +1724,7 @@ static void sx_put_char(struct tty_struct * tty, unsigned char ch) | |||
| 1704 | spin_unlock_irqrestore(&port->lock, flags); | 1724 | spin_unlock_irqrestore(&port->lock, flags); |
| 1705 | 1725 | ||
| 1706 | func_exit(); | 1726 | func_exit(); |
| 1727 | return 1; | ||
| 1707 | } | 1728 | } |
| 1708 | 1729 | ||
| 1709 | 1730 | ||
| @@ -1770,28 +1791,6 @@ static int sx_chars_in_buffer(struct tty_struct *tty) | |||
| 1770 | } | 1791 | } |
| 1771 | 1792 | ||
| 1772 | 1793 | ||
| 1773 | static void sx_flush_buffer(struct tty_struct *tty) | ||
| 1774 | { | ||
| 1775 | struct specialix_port *port = (struct specialix_port *)tty->driver_data; | ||
| 1776 | unsigned long flags; | ||
| 1777 | struct specialix_board * bp; | ||
| 1778 | |||
| 1779 | func_enter(); | ||
| 1780 | |||
| 1781 | if (sx_paranoia_check(port, tty->name, "sx_flush_buffer")) { | ||
| 1782 | func_exit(); | ||
| 1783 | return; | ||
| 1784 | } | ||
| 1785 | |||
| 1786 | bp = port_Board(port); | ||
| 1787 | spin_lock_irqsave(&port->lock, flags); | ||
| 1788 | port->xmit_cnt = port->xmit_head = port->xmit_tail = 0; | ||
| 1789 | spin_unlock_irqrestore(&port->lock, flags); | ||
| 1790 | tty_wakeup(tty); | ||
| 1791 | |||
| 1792 | func_exit(); | ||
| 1793 | } | ||
| 1794 | |||
| 1795 | 1794 | ||
| 1796 | static int sx_tiocmget(struct tty_struct *tty, struct file *file) | 1795 | static int sx_tiocmget(struct tty_struct *tty, struct file *file) |
| 1797 | { | 1796 | { |
| @@ -1803,7 +1802,7 @@ static int sx_tiocmget(struct tty_struct *tty, struct file *file) | |||
| 1803 | 1802 | ||
| 1804 | func_enter(); | 1803 | func_enter(); |
| 1805 | 1804 | ||
| 1806 | if (sx_paranoia_check(port, tty->name, __FUNCTION__)) { | 1805 | if (sx_paranoia_check(port, tty->name, __func__)) { |
| 1807 | func_exit(); | 1806 | func_exit(); |
| 1808 | return -ENODEV; | 1807 | return -ENODEV; |
| 1809 | } | 1808 | } |
| @@ -1845,7 +1844,7 @@ static int sx_tiocmset(struct tty_struct *tty, struct file *file, | |||
| 1845 | 1844 | ||
| 1846 | func_enter(); | 1845 | func_enter(); |
| 1847 | 1846 | ||
| 1848 | if (sx_paranoia_check(port, tty->name, __FUNCTION__)) { | 1847 | if (sx_paranoia_check(port, tty->name, __func__)) { |
| 1849 | func_exit(); | 1848 | func_exit(); |
| 1850 | return -ENODEV; | 1849 | return -ENODEV; |
| 1851 | } | 1850 | } |
| @@ -1922,29 +1921,13 @@ static inline int sx_set_serial_info(struct specialix_port * port, | |||
| 1922 | int change_speed; | 1921 | int change_speed; |
| 1923 | 1922 | ||
| 1924 | func_enter(); | 1923 | func_enter(); |
| 1925 | /* | 1924 | |
| 1926 | if (!access_ok(VERIFY_READ, (void *) newinfo, sizeof(tmp))) { | ||
| 1927 | func_exit(); | ||
| 1928 | return -EFAULT; | ||
| 1929 | } | ||
| 1930 | */ | ||
| 1931 | if (copy_from_user(&tmp, newinfo, sizeof(tmp))) { | 1925 | if (copy_from_user(&tmp, newinfo, sizeof(tmp))) { |
| 1932 | func_enter(); | 1926 | func_enter(); |
| 1933 | return -EFAULT; | 1927 | return -EFAULT; |
| 1934 | } | 1928 | } |
| 1935 | 1929 | ||
| 1936 | #if 0 | 1930 | lock_kernel(); |
| 1937 | if ((tmp.irq != bp->irq) || | ||
| 1938 | (tmp.port != bp->base) || | ||
| 1939 | (tmp.type != PORT_CIRRUS) || | ||
| 1940 | (tmp.baud_base != (SX_OSCFREQ + CD186x_TPC/2) / CD186x_TPC) || | ||
| 1941 | (tmp.custom_divisor != 0) || | ||
| 1942 | (tmp.xmit_fifo_size != CD186x_NFIFO) || | ||
| 1943 | (tmp.flags & ~SPECIALIX_LEGAL_FLAGS)) { | ||
| 1944 | func_exit(); | ||
| 1945 | return -EINVAL; | ||
| 1946 | } | ||
| 1947 | #endif | ||
| 1948 | 1931 | ||
| 1949 | change_speed = ((port->flags & ASYNC_SPD_MASK) != | 1932 | change_speed = ((port->flags & ASYNC_SPD_MASK) != |
| 1950 | (tmp.flags & ASYNC_SPD_MASK)); | 1933 | (tmp.flags & ASYNC_SPD_MASK)); |
| @@ -1956,6 +1939,7 @@ static inline int sx_set_serial_info(struct specialix_port * port, | |||
| 1956 | ((tmp.flags & ~ASYNC_USR_MASK) != | 1939 | ((tmp.flags & ~ASYNC_USR_MASK) != |
| 1957 | (port->flags & ~ASYNC_USR_MASK))) { | 1940 | (port->flags & ~ASYNC_USR_MASK))) { |
| 1958 | func_exit(); | 1941 | func_exit(); |
| 1942 | unlock_kernel(); | ||
| 1959 | return -EPERM; | 1943 | return -EPERM; |
| 1960 | } | 1944 | } |
| 1961 | port->flags = ((port->flags & ~ASYNC_USR_MASK) | | 1945 | port->flags = ((port->flags & ~ASYNC_USR_MASK) | |
| @@ -1972,6 +1956,7 @@ static inline int sx_set_serial_info(struct specialix_port * port, | |||
| 1972 | sx_change_speed(bp, port); | 1956 | sx_change_speed(bp, port); |
| 1973 | } | 1957 | } |
| 1974 | func_exit(); | 1958 | func_exit(); |
| 1959 | unlock_kernel(); | ||
| 1975 | return 0; | 1960 | return 0; |
| 1976 | } | 1961 | } |
| 1977 | 1962 | ||
| @@ -1984,12 +1969,8 @@ static inline int sx_get_serial_info(struct specialix_port * port, | |||
| 1984 | 1969 | ||
| 1985 | func_enter(); | 1970 | func_enter(); |
| 1986 | 1971 | ||
| 1987 | /* | ||
| 1988 | if (!access_ok(VERIFY_WRITE, (void *) retinfo, sizeof(tmp))) | ||
| 1989 | return -EFAULT; | ||
| 1990 | */ | ||
| 1991 | |||
| 1992 | memset(&tmp, 0, sizeof(tmp)); | 1972 | memset(&tmp, 0, sizeof(tmp)); |
| 1973 | lock_kernel(); | ||
| 1993 | tmp.type = PORT_CIRRUS; | 1974 | tmp.type = PORT_CIRRUS; |
| 1994 | tmp.line = port - sx_port; | 1975 | tmp.line = port - sx_port; |
| 1995 | tmp.port = bp->base; | 1976 | tmp.port = bp->base; |
| @@ -2000,6 +1981,7 @@ static inline int sx_get_serial_info(struct specialix_port * port, | |||
| 2000 | tmp.closing_wait = port->closing_wait * HZ/100; | 1981 | tmp.closing_wait = port->closing_wait * HZ/100; |
| 2001 | tmp.custom_divisor = port->custom_divisor; | 1982 | tmp.custom_divisor = port->custom_divisor; |
| 2002 | tmp.xmit_fifo_size = CD186x_NFIFO; | 1983 | tmp.xmit_fifo_size = CD186x_NFIFO; |
| 1984 | unlock_kernel(); | ||
| 2003 | if (copy_to_user(retinfo, &tmp, sizeof(tmp))) { | 1985 | if (copy_to_user(retinfo, &tmp, sizeof(tmp))) { |
| 2004 | func_exit(); | 1986 | func_exit(); |
| 2005 | return -EFAULT; | 1987 | return -EFAULT; |
| @@ -2045,23 +2027,6 @@ static int sx_ioctl(struct tty_struct * tty, struct file * filp, | |||
| 2045 | sx_send_break(port, arg ? arg*(HZ/10) : HZ/4); | 2027 | sx_send_break(port, arg ? arg*(HZ/10) : HZ/4); |
| 2046 | func_exit(); | 2028 | func_exit(); |
| 2047 | return 0; | 2029 | return 0; |
| 2048 | case TIOCGSOFTCAR: | ||
| 2049 | if (put_user(C_CLOCAL(tty)?1:0, (unsigned long __user *)argp)) { | ||
| 2050 | func_exit(); | ||
| 2051 | return -EFAULT; | ||
| 2052 | } | ||
| 2053 | func_exit(); | ||
| 2054 | return 0; | ||
| 2055 | case TIOCSSOFTCAR: | ||
| 2056 | if (get_user(arg, (unsigned long __user *) argp)) { | ||
| 2057 | func_exit(); | ||
| 2058 | return -EFAULT; | ||
| 2059 | } | ||
| 2060 | tty->termios->c_cflag = | ||
| 2061 | ((tty->termios->c_cflag & ~CLOCAL) | | ||
| 2062 | (arg ? CLOCAL : 0)); | ||
| 2063 | func_exit(); | ||
| 2064 | return 0; | ||
| 2065 | case TIOCGSERIAL: | 2030 | case TIOCGSERIAL: |
| 2066 | func_exit(); | 2031 | func_exit(); |
| 2067 | return sx_get_serial_info(port, argp); | 2032 | return sx_get_serial_info(port, argp); |
diff --git a/drivers/char/stallion.c b/drivers/char/stallion.c index 874aaa08e956..d17be10c5d21 100644 --- a/drivers/char/stallion.c +++ b/drivers/char/stallion.c | |||
| @@ -875,6 +875,7 @@ static void stl_waituntilsent(struct tty_struct *tty, int timeout) | |||
| 875 | timeout = HZ; | 875 | timeout = HZ; |
| 876 | tend = jiffies + timeout; | 876 | tend = jiffies + timeout; |
| 877 | 877 | ||
| 878 | lock_kernel(); | ||
| 878 | while (stl_datastate(portp)) { | 879 | while (stl_datastate(portp)) { |
| 879 | if (signal_pending(current)) | 880 | if (signal_pending(current)) |
| 880 | break; | 881 | break; |
| @@ -882,6 +883,7 @@ static void stl_waituntilsent(struct tty_struct *tty, int timeout) | |||
| 882 | if (time_after_eq(jiffies, tend)) | 883 | if (time_after_eq(jiffies, tend)) |
| 883 | break; | 884 | break; |
| 884 | } | 885 | } |
| 886 | unlock_kernel(); | ||
| 885 | } | 887 | } |
| 886 | 888 | ||
| 887 | /*****************************************************************************/ | 889 | /*****************************************************************************/ |
| @@ -1273,18 +1275,9 @@ static int stl_ioctl(struct tty_struct *tty, struct file *file, unsigned int cmd | |||
| 1273 | 1275 | ||
| 1274 | rc = 0; | 1276 | rc = 0; |
| 1275 | 1277 | ||
| 1278 | lock_kernel(); | ||
| 1279 | |||
| 1276 | switch (cmd) { | 1280 | switch (cmd) { |
| 1277 | case TIOCGSOFTCAR: | ||
| 1278 | rc = put_user(((tty->termios->c_cflag & CLOCAL) ? 1 : 0), | ||
| 1279 | (unsigned __user *) argp); | ||
| 1280 | break; | ||
| 1281 | case TIOCSSOFTCAR: | ||
| 1282 | if (get_user(ival, (unsigned int __user *) arg)) | ||
| 1283 | return -EFAULT; | ||
| 1284 | tty->termios->c_cflag = | ||
| 1285 | (tty->termios->c_cflag & ~CLOCAL) | | ||
| 1286 | (ival ? CLOCAL : 0); | ||
| 1287 | break; | ||
| 1288 | case TIOCGSERIAL: | 1281 | case TIOCGSERIAL: |
| 1289 | rc = stl_getserial(portp, argp); | 1282 | rc = stl_getserial(portp, argp); |
| 1290 | break; | 1283 | break; |
| @@ -1308,7 +1301,7 @@ static int stl_ioctl(struct tty_struct *tty, struct file *file, unsigned int cmd | |||
| 1308 | rc = -ENOIOCTLCMD; | 1301 | rc = -ENOIOCTLCMD; |
| 1309 | break; | 1302 | break; |
| 1310 | } | 1303 | } |
| 1311 | 1304 | unlock_kernel(); | |
| 1312 | return rc; | 1305 | return rc; |
| 1313 | } | 1306 | } |
| 1314 | 1307 | ||
diff --git a/drivers/char/sx.c b/drivers/char/sx.c index a6e1c9ba1217..f39f6fd89350 100644 --- a/drivers/char/sx.c +++ b/drivers/char/sx.c | |||
| @@ -384,11 +384,11 @@ static struct real_driver sx_real_driver = { | |||
| 384 | #define sx_dprintk(f, str...) /* nothing */ | 384 | #define sx_dprintk(f, str...) /* nothing */ |
| 385 | #endif | 385 | #endif |
| 386 | 386 | ||
| 387 | #define func_enter() sx_dprintk(SX_DEBUG_FLOW, "sx: enter %s\n",__FUNCTION__) | 387 | #define func_enter() sx_dprintk(SX_DEBUG_FLOW, "sx: enter %s\n",__func__) |
| 388 | #define func_exit() sx_dprintk(SX_DEBUG_FLOW, "sx: exit %s\n",__FUNCTION__) | 388 | #define func_exit() sx_dprintk(SX_DEBUG_FLOW, "sx: exit %s\n",__func__) |
| 389 | 389 | ||
| 390 | #define func_enter2() sx_dprintk(SX_DEBUG_FLOW, "sx: enter %s (port %d)\n", \ | 390 | #define func_enter2() sx_dprintk(SX_DEBUG_FLOW, "sx: enter %s (port %d)\n", \ |
| 391 | __FUNCTION__, port->line) | 391 | __func__, port->line) |
| 392 | 392 | ||
| 393 | /* | 393 | /* |
| 394 | * Firmware loader driver specific routines | 394 | * Firmware loader driver specific routines |
| @@ -1574,7 +1574,7 @@ static void sx_close(void *ptr) | |||
| 1574 | sx_dprintk(SX_DEBUG_CLOSE, "WARNING port count:%d\n", | 1574 | sx_dprintk(SX_DEBUG_CLOSE, "WARNING port count:%d\n", |
| 1575 | port->gs.count); | 1575 | port->gs.count); |
| 1576 | /*printk("%s SETTING port count to zero: %p count: %d\n", | 1576 | /*printk("%s SETTING port count to zero: %p count: %d\n", |
| 1577 | __FUNCTION__, port, port->gs.count); | 1577 | __func__, port, port->gs.count); |
| 1578 | port->gs.count = 0;*/ | 1578 | port->gs.count = 0;*/ |
| 1579 | } | 1579 | } |
| 1580 | 1580 | ||
| @@ -1844,6 +1844,7 @@ static void sx_break(struct tty_struct *tty, int flag) | |||
| 1844 | int rv; | 1844 | int rv; |
| 1845 | 1845 | ||
| 1846 | func_enter(); | 1846 | func_enter(); |
| 1847 | lock_kernel(); | ||
| 1847 | 1848 | ||
| 1848 | if (flag) | 1849 | if (flag) |
| 1849 | rv = sx_send_command(port, HS_START, -1, HS_IDLE_BREAK); | 1850 | rv = sx_send_command(port, HS_START, -1, HS_IDLE_BREAK); |
| @@ -1852,7 +1853,7 @@ static void sx_break(struct tty_struct *tty, int flag) | |||
| 1852 | if (rv != 1) | 1853 | if (rv != 1) |
| 1853 | printk(KERN_ERR "sx: couldn't send break (%x).\n", | 1854 | printk(KERN_ERR "sx: couldn't send break (%x).\n", |
| 1854 | read_sx_byte(port->board, CHAN_OFFSET(port, hi_hstat))); | 1855 | read_sx_byte(port->board, CHAN_OFFSET(port, hi_hstat))); |
| 1855 | 1856 | unlock_kernel(); | |
| 1856 | func_exit(); | 1857 | func_exit(); |
| 1857 | } | 1858 | } |
| 1858 | 1859 | ||
| @@ -1888,23 +1889,12 @@ static int sx_ioctl(struct tty_struct *tty, struct file *filp, | |||
| 1888 | int rc; | 1889 | int rc; |
| 1889 | struct sx_port *port = tty->driver_data; | 1890 | struct sx_port *port = tty->driver_data; |
| 1890 | void __user *argp = (void __user *)arg; | 1891 | void __user *argp = (void __user *)arg; |
| 1891 | int ival; | ||
| 1892 | 1892 | ||
| 1893 | /* func_enter2(); */ | 1893 | /* func_enter2(); */ |
| 1894 | 1894 | ||
| 1895 | rc = 0; | 1895 | rc = 0; |
| 1896 | lock_kernel(); | ||
| 1896 | switch (cmd) { | 1897 | switch (cmd) { |
| 1897 | case TIOCGSOFTCAR: | ||
| 1898 | rc = put_user(((tty->termios->c_cflag & CLOCAL) ? 1 : 0), | ||
| 1899 | (unsigned __user *)argp); | ||
| 1900 | break; | ||
| 1901 | case TIOCSSOFTCAR: | ||
| 1902 | if ((rc = get_user(ival, (unsigned __user *)argp)) == 0) { | ||
| 1903 | tty->termios->c_cflag = | ||
| 1904 | (tty->termios->c_cflag & ~CLOCAL) | | ||
| 1905 | (ival ? CLOCAL : 0); | ||
| 1906 | } | ||
| 1907 | break; | ||
| 1908 | case TIOCGSERIAL: | 1898 | case TIOCGSERIAL: |
| 1909 | rc = gs_getserial(&port->gs, argp); | 1899 | rc = gs_getserial(&port->gs, argp); |
| 1910 | break; | 1900 | break; |
| @@ -1915,6 +1905,7 @@ static int sx_ioctl(struct tty_struct *tty, struct file *filp, | |||
| 1915 | rc = -ENOIOCTLCMD; | 1905 | rc = -ENOIOCTLCMD; |
| 1916 | break; | 1906 | break; |
| 1917 | } | 1907 | } |
| 1908 | unlock_kernel(); | ||
| 1918 | 1909 | ||
| 1919 | /* func_exit(); */ | 1910 | /* func_exit(); */ |
| 1920 | return rc; | 1911 | return rc; |
| @@ -2549,7 +2540,7 @@ static int __devinit sx_eisa_probe(struct device *dev) | |||
| 2549 | goto err_flag; | 2540 | goto err_flag; |
| 2550 | } | 2541 | } |
| 2551 | board->base2 = | 2542 | board->base2 = |
| 2552 | board->base = ioremap(board->hw_base, SI2_EISA_WINDOW_LEN); | 2543 | board->base = ioremap_nocache(board->hw_base, SI2_EISA_WINDOW_LEN); |
| 2553 | if (!board->base) { | 2544 | if (!board->base) { |
| 2554 | dev_err(dev, "can't remap memory\n"); | 2545 | dev_err(dev, "can't remap memory\n"); |
| 2555 | goto err_reg; | 2546 | goto err_reg; |
| @@ -2626,7 +2617,7 @@ static void __devinit fix_sx_pci(struct pci_dev *pdev, struct sx_board *board) | |||
| 2626 | 2617 | ||
| 2627 | pci_read_config_dword(pdev, PCI_BASE_ADDRESS_0, &hwbase); | 2618 | pci_read_config_dword(pdev, PCI_BASE_ADDRESS_0, &hwbase); |
| 2628 | hwbase &= PCI_BASE_ADDRESS_MEM_MASK; | 2619 | hwbase &= PCI_BASE_ADDRESS_MEM_MASK; |
| 2629 | rebase = ioremap(hwbase, 0x80); | 2620 | rebase = ioremap_nocache(hwbase, 0x80); |
| 2630 | t = readl(rebase + CNTRL_REG_OFFSET); | 2621 | t = readl(rebase + CNTRL_REG_OFFSET); |
| 2631 | if (t != CNTRL_REG_GOODVALUE) { | 2622 | if (t != CNTRL_REG_GOODVALUE) { |
| 2632 | printk(KERN_DEBUG "sx: performing cntrl reg fix: %08x -> " | 2623 | printk(KERN_DEBUG "sx: performing cntrl reg fix: %08x -> " |
| @@ -2770,7 +2761,7 @@ static int __init sx_init(void) | |||
| 2770 | if (!request_region(board->hw_base, board->hw_len, "sx")) | 2761 | if (!request_region(board->hw_base, board->hw_len, "sx")) |
| 2771 | continue; | 2762 | continue; |
| 2772 | board->base2 = | 2763 | board->base2 = |
| 2773 | board->base = ioremap(board->hw_base, board->hw_len); | 2764 | board->base = ioremap_nocache(board->hw_base, board->hw_len); |
| 2774 | if (!board->base) | 2765 | if (!board->base) |
| 2775 | goto err_sx_reg; | 2766 | goto err_sx_reg; |
| 2776 | board->flags &= ~SX_BOARD_TYPE; | 2767 | board->flags &= ~SX_BOARD_TYPE; |
| @@ -2794,7 +2785,7 @@ err_sx_reg: | |||
| 2794 | if (!request_region(board->hw_base, board->hw_len, "sx")) | 2785 | if (!request_region(board->hw_base, board->hw_len, "sx")) |
| 2795 | continue; | 2786 | continue; |
| 2796 | board->base2 = | 2787 | board->base2 = |
| 2797 | board->base = ioremap(board->hw_base, board->hw_len); | 2788 | board->base = ioremap_nocache(board->hw_base, board->hw_len); |
| 2798 | if (!board->base) | 2789 | if (!board->base) |
| 2799 | goto err_si_reg; | 2790 | goto err_si_reg; |
| 2800 | board->flags &= ~SX_BOARD_TYPE; | 2791 | board->flags &= ~SX_BOARD_TYPE; |
| @@ -2817,7 +2808,7 @@ err_si_reg: | |||
| 2817 | if (!request_region(board->hw_base, board->hw_len, "sx")) | 2808 | if (!request_region(board->hw_base, board->hw_len, "sx")) |
| 2818 | continue; | 2809 | continue; |
| 2819 | board->base2 = | 2810 | board->base2 = |
| 2820 | board->base = ioremap(board->hw_base, board->hw_len); | 2811 | board->base = ioremap_nocache(board->hw_base, board->hw_len); |
| 2821 | if (!board->base) | 2812 | if (!board->base) |
| 2822 | goto err_si1_reg; | 2813 | goto err_si1_reg; |
| 2823 | board->flags &= ~SX_BOARD_TYPE; | 2814 | board->flags &= ~SX_BOARD_TYPE; |
diff --git a/drivers/char/synclink.c b/drivers/char/synclink.c index fadab1d9510f..513b7c2f3e26 100644 --- a/drivers/char/synclink.c +++ b/drivers/char/synclink.c | |||
| @@ -2026,10 +2026,11 @@ static void mgsl_change_params(struct mgsl_struct *info) | |||
| 2026 | * | 2026 | * |
| 2027 | * Return Value: None | 2027 | * Return Value: None |
| 2028 | */ | 2028 | */ |
| 2029 | static void mgsl_put_char(struct tty_struct *tty, unsigned char ch) | 2029 | static int mgsl_put_char(struct tty_struct *tty, unsigned char ch) |
| 2030 | { | 2030 | { |
| 2031 | struct mgsl_struct *info = (struct mgsl_struct *)tty->driver_data; | 2031 | struct mgsl_struct *info = (struct mgsl_struct *)tty->driver_data; |
| 2032 | unsigned long flags; | 2032 | unsigned long flags; |
| 2033 | int ret; | ||
| 2033 | 2034 | ||
| 2034 | if ( debug_level >= DEBUG_LEVEL_INFO ) { | 2035 | if ( debug_level >= DEBUG_LEVEL_INFO ) { |
| 2035 | printk( "%s(%d):mgsl_put_char(%d) on %s\n", | 2036 | printk( "%s(%d):mgsl_put_char(%d) on %s\n", |
| @@ -2037,23 +2038,23 @@ static void mgsl_put_char(struct tty_struct *tty, unsigned char ch) | |||
| 2037 | } | 2038 | } |
| 2038 | 2039 | ||
| 2039 | if (mgsl_paranoia_check(info, tty->name, "mgsl_put_char")) | 2040 | if (mgsl_paranoia_check(info, tty->name, "mgsl_put_char")) |
| 2040 | return; | 2041 | return 0; |
| 2041 | 2042 | ||
| 2042 | if (!tty || !info->xmit_buf) | 2043 | if (!tty || !info->xmit_buf) |
| 2043 | return; | 2044 | return 0; |
| 2044 | 2045 | ||
| 2045 | spin_lock_irqsave(&info->irq_spinlock,flags); | 2046 | spin_lock_irqsave(&info->irq_spinlock,flags); |
| 2046 | 2047 | ||
| 2047 | if ( (info->params.mode == MGSL_MODE_ASYNC ) || !info->tx_active ) { | 2048 | if ( (info->params.mode == MGSL_MODE_ASYNC ) || !info->tx_active ) { |
| 2048 | |||
| 2049 | if (info->xmit_cnt < SERIAL_XMIT_SIZE - 1) { | 2049 | if (info->xmit_cnt < SERIAL_XMIT_SIZE - 1) { |
| 2050 | info->xmit_buf[info->xmit_head++] = ch; | 2050 | info->xmit_buf[info->xmit_head++] = ch; |
| 2051 | info->xmit_head &= SERIAL_XMIT_SIZE-1; | 2051 | info->xmit_head &= SERIAL_XMIT_SIZE-1; |
| 2052 | info->xmit_cnt++; | 2052 | info->xmit_cnt++; |
| 2053 | ret = 1; | ||
| 2053 | } | 2054 | } |
| 2054 | } | 2055 | } |
| 2055 | |||
| 2056 | spin_unlock_irqrestore(&info->irq_spinlock,flags); | 2056 | spin_unlock_irqrestore(&info->irq_spinlock,flags); |
| 2057 | return ret; | ||
| 2057 | 2058 | ||
| 2058 | } /* end of mgsl_put_char() */ | 2059 | } /* end of mgsl_put_char() */ |
| 2059 | 2060 | ||
| @@ -2942,6 +2943,7 @@ static int mgsl_ioctl(struct tty_struct *tty, struct file * file, | |||
| 2942 | unsigned int cmd, unsigned long arg) | 2943 | unsigned int cmd, unsigned long arg) |
| 2943 | { | 2944 | { |
| 2944 | struct mgsl_struct * info = (struct mgsl_struct *)tty->driver_data; | 2945 | struct mgsl_struct * info = (struct mgsl_struct *)tty->driver_data; |
| 2946 | int ret; | ||
| 2945 | 2947 | ||
| 2946 | if (debug_level >= DEBUG_LEVEL_INFO) | 2948 | if (debug_level >= DEBUG_LEVEL_INFO) |
| 2947 | printk("%s(%d):mgsl_ioctl %s cmd=%08X\n", __FILE__,__LINE__, | 2949 | printk("%s(%d):mgsl_ioctl %s cmd=%08X\n", __FILE__,__LINE__, |
| @@ -2956,7 +2958,10 @@ static int mgsl_ioctl(struct tty_struct *tty, struct file * file, | |||
| 2956 | return -EIO; | 2958 | return -EIO; |
| 2957 | } | 2959 | } |
| 2958 | 2960 | ||
| 2959 | return mgsl_ioctl_common(info, cmd, arg); | 2961 | lock_kernel(); |
| 2962 | ret = mgsl_ioctl_common(info, cmd, arg); | ||
| 2963 | unlock_kernel(); | ||
| 2964 | return ret; | ||
| 2960 | } | 2965 | } |
| 2961 | 2966 | ||
| 2962 | static int mgsl_ioctl_common(struct mgsl_struct *info, unsigned int cmd, unsigned long arg) | 2967 | static int mgsl_ioctl_common(struct mgsl_struct *info, unsigned int cmd, unsigned long arg) |
| @@ -3153,8 +3158,7 @@ static void mgsl_close(struct tty_struct *tty, struct file * filp) | |||
| 3153 | if (info->flags & ASYNC_INITIALIZED) | 3158 | if (info->flags & ASYNC_INITIALIZED) |
| 3154 | mgsl_wait_until_sent(tty, info->timeout); | 3159 | mgsl_wait_until_sent(tty, info->timeout); |
| 3155 | 3160 | ||
| 3156 | if (tty->driver->flush_buffer) | 3161 | mgsl_flush_buffer(tty); |
| 3157 | tty->driver->flush_buffer(tty); | ||
| 3158 | 3162 | ||
| 3159 | tty_ldisc_flush(tty); | 3163 | tty_ldisc_flush(tty); |
| 3160 | 3164 | ||
| @@ -3217,7 +3221,8 @@ static void mgsl_wait_until_sent(struct tty_struct *tty, int timeout) | |||
| 3217 | * interval should also be less than the timeout. | 3221 | * interval should also be less than the timeout. |
| 3218 | * Note: use tight timings here to satisfy the NIST-PCTS. | 3222 | * Note: use tight timings here to satisfy the NIST-PCTS. |
| 3219 | */ | 3223 | */ |
| 3220 | 3224 | ||
| 3225 | lock_kernel(); | ||
| 3221 | if ( info->params.data_rate ) { | 3226 | if ( info->params.data_rate ) { |
| 3222 | char_time = info->timeout/(32 * 5); | 3227 | char_time = info->timeout/(32 * 5); |
| 3223 | if (!char_time) | 3228 | if (!char_time) |
| @@ -3247,6 +3252,7 @@ static void mgsl_wait_until_sent(struct tty_struct *tty, int timeout) | |||
| 3247 | break; | 3252 | break; |
| 3248 | } | 3253 | } |
| 3249 | } | 3254 | } |
| 3255 | unlock_kernel(); | ||
| 3250 | 3256 | ||
| 3251 | exit: | 3257 | exit: |
| 3252 | if (debug_level >= DEBUG_LEVEL_INFO) | 3258 | if (debug_level >= DEBUG_LEVEL_INFO) |
| @@ -4144,7 +4150,8 @@ static int mgsl_claim_resources(struct mgsl_struct *info) | |||
| 4144 | } | 4150 | } |
| 4145 | info->lcr_mem_requested = true; | 4151 | info->lcr_mem_requested = true; |
| 4146 | 4152 | ||
| 4147 | info->memory_base = ioremap(info->phys_memory_base,0x40000); | 4153 | info->memory_base = ioremap_nocache(info->phys_memory_base, |
| 4154 | 0x40000); | ||
| 4148 | if (!info->memory_base) { | 4155 | if (!info->memory_base) { |
| 4149 | printk( "%s(%d):Cant map shared memory on device %s MemAddr=%08X\n", | 4156 | printk( "%s(%d):Cant map shared memory on device %s MemAddr=%08X\n", |
| 4150 | __FILE__,__LINE__,info->device_name, info->phys_memory_base ); | 4157 | __FILE__,__LINE__,info->device_name, info->phys_memory_base ); |
| @@ -4157,12 +4164,14 @@ static int mgsl_claim_resources(struct mgsl_struct *info) | |||
| 4157 | goto errout; | 4164 | goto errout; |
| 4158 | } | 4165 | } |
| 4159 | 4166 | ||
| 4160 | info->lcr_base = ioremap(info->phys_lcr_base,PAGE_SIZE) + info->lcr_offset; | 4167 | info->lcr_base = ioremap_nocache(info->phys_lcr_base, |
| 4168 | PAGE_SIZE); | ||
| 4161 | if (!info->lcr_base) { | 4169 | if (!info->lcr_base) { |
| 4162 | printk( "%s(%d):Cant map LCR memory on device %s MemAddr=%08X\n", | 4170 | printk( "%s(%d):Cant map LCR memory on device %s MemAddr=%08X\n", |
| 4163 | __FILE__,__LINE__,info->device_name, info->phys_lcr_base ); | 4171 | __FILE__,__LINE__,info->device_name, info->phys_lcr_base ); |
| 4164 | goto errout; | 4172 | goto errout; |
| 4165 | } | 4173 | } |
| 4174 | info->lcr_base += info->lcr_offset; | ||
| 4166 | 4175 | ||
| 4167 | } else { | 4176 | } else { |
| 4168 | /* claim DMA channel */ | 4177 | /* claim DMA channel */ |
diff --git a/drivers/char/synclink_gt.c b/drivers/char/synclink_gt.c index f3d8d72e5ea4..2001b0e52dc6 100644 --- a/drivers/char/synclink_gt.c +++ b/drivers/char/synclink_gt.c | |||
| @@ -151,7 +151,7 @@ static void hangup(struct tty_struct *tty); | |||
| 151 | static void set_termios(struct tty_struct *tty, struct ktermios *old_termios); | 151 | static void set_termios(struct tty_struct *tty, struct ktermios *old_termios); |
| 152 | 152 | ||
| 153 | static int write(struct tty_struct *tty, const unsigned char *buf, int count); | 153 | static int write(struct tty_struct *tty, const unsigned char *buf, int count); |
| 154 | static void put_char(struct tty_struct *tty, unsigned char ch); | 154 | static int put_char(struct tty_struct *tty, unsigned char ch); |
| 155 | static void send_xchar(struct tty_struct *tty, char ch); | 155 | static void send_xchar(struct tty_struct *tty, char ch); |
| 156 | static void wait_until_sent(struct tty_struct *tty, int timeout); | 156 | static void wait_until_sent(struct tty_struct *tty, int timeout); |
| 157 | static int write_room(struct tty_struct *tty); | 157 | static int write_room(struct tty_struct *tty); |
| @@ -771,8 +771,7 @@ static void close(struct tty_struct *tty, struct file *filp) | |||
| 771 | 771 | ||
| 772 | if (info->flags & ASYNC_INITIALIZED) | 772 | if (info->flags & ASYNC_INITIALIZED) |
| 773 | wait_until_sent(tty, info->timeout); | 773 | wait_until_sent(tty, info->timeout); |
| 774 | if (tty->driver->flush_buffer) | 774 | flush_buffer(tty); |
| 775 | tty->driver->flush_buffer(tty); | ||
| 776 | tty_ldisc_flush(tty); | 775 | tty_ldisc_flush(tty); |
| 777 | 776 | ||
| 778 | shutdown(info); | 777 | shutdown(info); |
| @@ -913,20 +912,24 @@ cleanup: | |||
| 913 | return ret; | 912 | return ret; |
| 914 | } | 913 | } |
| 915 | 914 | ||
| 916 | static void put_char(struct tty_struct *tty, unsigned char ch) | 915 | static int put_char(struct tty_struct *tty, unsigned char ch) |
| 917 | { | 916 | { |
| 918 | struct slgt_info *info = tty->driver_data; | 917 | struct slgt_info *info = tty->driver_data; |
| 919 | unsigned long flags; | 918 | unsigned long flags; |
| 919 | int ret; | ||
| 920 | 920 | ||
| 921 | if (sanity_check(info, tty->name, "put_char")) | 921 | if (sanity_check(info, tty->name, "put_char")) |
| 922 | return; | 922 | return 0; |
| 923 | DBGINFO(("%s put_char(%d)\n", info->device_name, ch)); | 923 | DBGINFO(("%s put_char(%d)\n", info->device_name, ch)); |
| 924 | if (!info->tx_buf) | 924 | if (!info->tx_buf) |
| 925 | return; | 925 | return 0; |
| 926 | spin_lock_irqsave(&info->lock,flags); | 926 | spin_lock_irqsave(&info->lock,flags); |
| 927 | if (!info->tx_active && (info->tx_count < info->max_frame_size)) | 927 | if (!info->tx_active && (info->tx_count < info->max_frame_size)) { |
| 928 | info->tx_buf[info->tx_count++] = ch; | 928 | info->tx_buf[info->tx_count++] = ch; |
| 929 | ret = 1; | ||
| 930 | } | ||
| 929 | spin_unlock_irqrestore(&info->lock,flags); | 931 | spin_unlock_irqrestore(&info->lock,flags); |
| 932 | return ret; | ||
| 930 | } | 933 | } |
| 931 | 934 | ||
| 932 | static void send_xchar(struct tty_struct *tty, char ch) | 935 | static void send_xchar(struct tty_struct *tty, char ch) |
| @@ -967,6 +970,8 @@ static void wait_until_sent(struct tty_struct *tty, int timeout) | |||
| 967 | * Note: use tight timings here to satisfy the NIST-PCTS. | 970 | * Note: use tight timings here to satisfy the NIST-PCTS. |
| 968 | */ | 971 | */ |
| 969 | 972 | ||
| 973 | lock_kernel(); | ||
| 974 | |||
| 970 | if (info->params.data_rate) { | 975 | if (info->params.data_rate) { |
| 971 | char_time = info->timeout/(32 * 5); | 976 | char_time = info->timeout/(32 * 5); |
| 972 | if (!char_time) | 977 | if (!char_time) |
| @@ -984,6 +989,7 @@ static void wait_until_sent(struct tty_struct *tty, int timeout) | |||
| 984 | if (timeout && time_after(jiffies, orig_jiffies + timeout)) | 989 | if (timeout && time_after(jiffies, orig_jiffies + timeout)) |
| 985 | break; | 990 | break; |
| 986 | } | 991 | } |
| 992 | unlock_kernel(); | ||
| 987 | 993 | ||
| 988 | exit: | 994 | exit: |
| 989 | DBGINFO(("%s wait_until_sent exit\n", info->device_name)); | 995 | DBGINFO(("%s wait_until_sent exit\n", info->device_name)); |
| @@ -1097,6 +1103,7 @@ static int ioctl(struct tty_struct *tty, struct file *file, | |||
| 1097 | struct serial_icounter_struct __user *p_cuser; /* user space */ | 1103 | struct serial_icounter_struct __user *p_cuser; /* user space */ |
| 1098 | unsigned long flags; | 1104 | unsigned long flags; |
| 1099 | void __user *argp = (void __user *)arg; | 1105 | void __user *argp = (void __user *)arg; |
| 1106 | int ret; | ||
| 1100 | 1107 | ||
| 1101 | if (sanity_check(info, tty->name, "ioctl")) | 1108 | if (sanity_check(info, tty->name, "ioctl")) |
| 1102 | return -ENODEV; | 1109 | return -ENODEV; |
| @@ -1108,37 +1115,54 @@ static int ioctl(struct tty_struct *tty, struct file *file, | |||
| 1108 | return -EIO; | 1115 | return -EIO; |
| 1109 | } | 1116 | } |
| 1110 | 1117 | ||
| 1118 | lock_kernel(); | ||
| 1119 | |||
| 1111 | switch (cmd) { | 1120 | switch (cmd) { |
| 1112 | case MGSL_IOCGPARAMS: | 1121 | case MGSL_IOCGPARAMS: |
| 1113 | return get_params(info, argp); | 1122 | ret = get_params(info, argp); |
| 1123 | break; | ||
| 1114 | case MGSL_IOCSPARAMS: | 1124 | case MGSL_IOCSPARAMS: |
| 1115 | return set_params(info, argp); | 1125 | ret = set_params(info, argp); |
| 1126 | break; | ||
| 1116 | case MGSL_IOCGTXIDLE: | 1127 | case MGSL_IOCGTXIDLE: |
| 1117 | return get_txidle(info, argp); | 1128 | ret = get_txidle(info, argp); |
| 1129 | break; | ||
| 1118 | case MGSL_IOCSTXIDLE: | 1130 | case MGSL_IOCSTXIDLE: |
| 1119 | return set_txidle(info, (int)arg); | 1131 | ret = set_txidle(info, (int)arg); |
| 1132 | break; | ||
| 1120 | case MGSL_IOCTXENABLE: | 1133 | case MGSL_IOCTXENABLE: |
| 1121 | return tx_enable(info, (int)arg); | 1134 | ret = tx_enable(info, (int)arg); |
| 1135 | break; | ||
| 1122 | case MGSL_IOCRXENABLE: | 1136 | case MGSL_IOCRXENABLE: |
| 1123 | return rx_enable(info, (int)arg); | 1137 | ret = rx_enable(info, (int)arg); |
| 1138 | break; | ||
| 1124 | case MGSL_IOCTXABORT: | 1139 | case MGSL_IOCTXABORT: |
| 1125 | return tx_abort(info); | 1140 | ret = tx_abort(info); |
| 1141 | break; | ||
| 1126 | case MGSL_IOCGSTATS: | 1142 | case MGSL_IOCGSTATS: |
| 1127 | return get_stats(info, argp); | 1143 | ret = get_stats(info, argp); |
| 1144 | break; | ||
| 1128 | case MGSL_IOCWAITEVENT: | 1145 | case MGSL_IOCWAITEVENT: |
| 1129 | return wait_mgsl_event(info, argp); | 1146 | ret = wait_mgsl_event(info, argp); |
| 1147 | break; | ||
| 1130 | case TIOCMIWAIT: | 1148 | case TIOCMIWAIT: |
| 1131 | return modem_input_wait(info,(int)arg); | 1149 | ret = modem_input_wait(info,(int)arg); |
| 1150 | break; | ||
| 1132 | case MGSL_IOCGIF: | 1151 | case MGSL_IOCGIF: |
| 1133 | return get_interface(info, argp); | 1152 | ret = get_interface(info, argp); |
| 1153 | break; | ||
| 1134 | case MGSL_IOCSIF: | 1154 | case MGSL_IOCSIF: |
| 1135 | return set_interface(info,(int)arg); | 1155 | ret = set_interface(info,(int)arg); |
| 1156 | break; | ||
| 1136 | case MGSL_IOCSGPIO: | 1157 | case MGSL_IOCSGPIO: |
| 1137 | return set_gpio(info, argp); | 1158 | ret = set_gpio(info, argp); |
| 1159 | break; | ||
| 1138 | case MGSL_IOCGGPIO: | 1160 | case MGSL_IOCGGPIO: |
| 1139 | return get_gpio(info, argp); | 1161 | ret = get_gpio(info, argp); |
| 1162 | break; | ||
| 1140 | case MGSL_IOCWAITGPIO: | 1163 | case MGSL_IOCWAITGPIO: |
| 1141 | return wait_gpio(info, argp); | 1164 | ret = wait_gpio(info, argp); |
| 1165 | break; | ||
| 1142 | case TIOCGICOUNT: | 1166 | case TIOCGICOUNT: |
| 1143 | spin_lock_irqsave(&info->lock,flags); | 1167 | spin_lock_irqsave(&info->lock,flags); |
| 1144 | cnow = info->icount; | 1168 | cnow = info->icount; |
| @@ -1155,12 +1179,14 @@ static int ioctl(struct tty_struct *tty, struct file *file, | |||
| 1155 | put_user(cnow.parity, &p_cuser->parity) || | 1179 | put_user(cnow.parity, &p_cuser->parity) || |
| 1156 | put_user(cnow.brk, &p_cuser->brk) || | 1180 | put_user(cnow.brk, &p_cuser->brk) || |
| 1157 | put_user(cnow.buf_overrun, &p_cuser->buf_overrun)) | 1181 | put_user(cnow.buf_overrun, &p_cuser->buf_overrun)) |
| 1158 | return -EFAULT; | 1182 | ret = -EFAULT; |
| 1159 | return 0; | 1183 | ret = 0; |
| 1184 | break; | ||
| 1160 | default: | 1185 | default: |
| 1161 | return -ENOIOCTLCMD; | 1186 | ret = -ENOIOCTLCMD; |
| 1162 | } | 1187 | } |
| 1163 | return 0; | 1188 | unlock_kernel(); |
| 1189 | return ret; | ||
| 1164 | } | 1190 | } |
| 1165 | 1191 | ||
| 1166 | /* | 1192 | /* |
| @@ -3324,7 +3350,7 @@ static int claim_resources(struct slgt_info *info) | |||
| 3324 | else | 3350 | else |
| 3325 | info->reg_addr_requested = true; | 3351 | info->reg_addr_requested = true; |
| 3326 | 3352 | ||
| 3327 | info->reg_addr = ioremap(info->phys_reg_addr, SLGT_REG_SIZE); | 3353 | info->reg_addr = ioremap_nocache(info->phys_reg_addr, SLGT_REG_SIZE); |
| 3328 | if (!info->reg_addr) { | 3354 | if (!info->reg_addr) { |
| 3329 | DBGERR(("%s cant map device registers, addr=%08X\n", | 3355 | DBGERR(("%s cant map device registers, addr=%08X\n", |
| 3330 | info->device_name, info->phys_reg_addr)); | 3356 | info->device_name, info->phys_reg_addr)); |
diff --git a/drivers/char/synclinkmp.c b/drivers/char/synclinkmp.c index e98c3e6f8216..bec54866e0bb 100644 --- a/drivers/char/synclinkmp.c +++ b/drivers/char/synclinkmp.c | |||
| @@ -519,7 +519,7 @@ static void hangup(struct tty_struct *tty); | |||
| 519 | static void set_termios(struct tty_struct *tty, struct ktermios *old_termios); | 519 | static void set_termios(struct tty_struct *tty, struct ktermios *old_termios); |
| 520 | 520 | ||
| 521 | static int write(struct tty_struct *tty, const unsigned char *buf, int count); | 521 | static int write(struct tty_struct *tty, const unsigned char *buf, int count); |
| 522 | static void put_char(struct tty_struct *tty, unsigned char ch); | 522 | static int put_char(struct tty_struct *tty, unsigned char ch); |
| 523 | static void send_xchar(struct tty_struct *tty, char ch); | 523 | static void send_xchar(struct tty_struct *tty, char ch); |
| 524 | static void wait_until_sent(struct tty_struct *tty, int timeout); | 524 | static void wait_until_sent(struct tty_struct *tty, int timeout); |
| 525 | static int write_room(struct tty_struct *tty); | 525 | static int write_room(struct tty_struct *tty); |
| @@ -862,8 +862,7 @@ static void close(struct tty_struct *tty, struct file *filp) | |||
| 862 | if (info->flags & ASYNC_INITIALIZED) | 862 | if (info->flags & ASYNC_INITIALIZED) |
| 863 | wait_until_sent(tty, info->timeout); | 863 | wait_until_sent(tty, info->timeout); |
| 864 | 864 | ||
| 865 | if (tty->driver->flush_buffer) | 865 | flush_buffer(tty); |
| 866 | tty->driver->flush_buffer(tty); | ||
| 867 | 866 | ||
| 868 | tty_ldisc_flush(tty); | 867 | tty_ldisc_flush(tty); |
| 869 | 868 | ||
| @@ -1046,10 +1045,11 @@ cleanup: | |||
| 1046 | 1045 | ||
| 1047 | /* Add a character to the transmit buffer. | 1046 | /* Add a character to the transmit buffer. |
| 1048 | */ | 1047 | */ |
| 1049 | static void put_char(struct tty_struct *tty, unsigned char ch) | 1048 | static int put_char(struct tty_struct *tty, unsigned char ch) |
| 1050 | { | 1049 | { |
| 1051 | SLMP_INFO *info = (SLMP_INFO *)tty->driver_data; | 1050 | SLMP_INFO *info = (SLMP_INFO *)tty->driver_data; |
| 1052 | unsigned long flags; | 1051 | unsigned long flags; |
| 1052 | int ret = 0; | ||
| 1053 | 1053 | ||
| 1054 | if ( debug_level >= DEBUG_LEVEL_INFO ) { | 1054 | if ( debug_level >= DEBUG_LEVEL_INFO ) { |
| 1055 | printk( "%s(%d):%s put_char(%d)\n", | 1055 | printk( "%s(%d):%s put_char(%d)\n", |
| @@ -1057,10 +1057,10 @@ static void put_char(struct tty_struct *tty, unsigned char ch) | |||
| 1057 | } | 1057 | } |
| 1058 | 1058 | ||
| 1059 | if (sanity_check(info, tty->name, "put_char")) | 1059 | if (sanity_check(info, tty->name, "put_char")) |
| 1060 | return; | 1060 | return 0; |
| 1061 | 1061 | ||
| 1062 | if (!info->tx_buf) | 1062 | if (!info->tx_buf) |
| 1063 | return; | 1063 | return 0; |
| 1064 | 1064 | ||
| 1065 | spin_lock_irqsave(&info->lock,flags); | 1065 | spin_lock_irqsave(&info->lock,flags); |
| 1066 | 1066 | ||
| @@ -1072,10 +1072,12 @@ static void put_char(struct tty_struct *tty, unsigned char ch) | |||
| 1072 | if (info->tx_put >= info->max_frame_size) | 1072 | if (info->tx_put >= info->max_frame_size) |
| 1073 | info->tx_put -= info->max_frame_size; | 1073 | info->tx_put -= info->max_frame_size; |
| 1074 | info->tx_count++; | 1074 | info->tx_count++; |
| 1075 | ret = 1; | ||
| 1075 | } | 1076 | } |
| 1076 | } | 1077 | } |
| 1077 | 1078 | ||
| 1078 | spin_unlock_irqrestore(&info->lock,flags); | 1079 | spin_unlock_irqrestore(&info->lock,flags); |
| 1080 | return ret; | ||
| 1079 | } | 1081 | } |
| 1080 | 1082 | ||
| 1081 | /* Send a high-priority XON/XOFF character | 1083 | /* Send a high-priority XON/XOFF character |
| @@ -1119,6 +1121,8 @@ static void wait_until_sent(struct tty_struct *tty, int timeout) | |||
| 1119 | if (sanity_check(info, tty->name, "wait_until_sent")) | 1121 | if (sanity_check(info, tty->name, "wait_until_sent")) |
| 1120 | return; | 1122 | return; |
| 1121 | 1123 | ||
| 1124 | lock_kernel(); | ||
| 1125 | |||
| 1122 | if (!(info->flags & ASYNC_INITIALIZED)) | 1126 | if (!(info->flags & ASYNC_INITIALIZED)) |
| 1123 | goto exit; | 1127 | goto exit; |
| 1124 | 1128 | ||
| @@ -1161,6 +1165,7 @@ static void wait_until_sent(struct tty_struct *tty, int timeout) | |||
| 1161 | } | 1165 | } |
| 1162 | 1166 | ||
| 1163 | exit: | 1167 | exit: |
| 1168 | unlock_kernel(); | ||
| 1164 | if (debug_level >= DEBUG_LEVEL_INFO) | 1169 | if (debug_level >= DEBUG_LEVEL_INFO) |
| 1165 | printk("%s(%d):%s wait_until_sent() exit\n", | 1170 | printk("%s(%d):%s wait_until_sent() exit\n", |
| 1166 | __FILE__,__LINE__, info->device_name ); | 1171 | __FILE__,__LINE__, info->device_name ); |
| @@ -1176,6 +1181,7 @@ static int write_room(struct tty_struct *tty) | |||
| 1176 | if (sanity_check(info, tty->name, "write_room")) | 1181 | if (sanity_check(info, tty->name, "write_room")) |
| 1177 | return 0; | 1182 | return 0; |
| 1178 | 1183 | ||
| 1184 | lock_kernel(); | ||
| 1179 | if (info->params.mode == MGSL_MODE_HDLC) { | 1185 | if (info->params.mode == MGSL_MODE_HDLC) { |
| 1180 | ret = (info->tx_active) ? 0 : HDLC_MAX_FRAME_SIZE; | 1186 | ret = (info->tx_active) ? 0 : HDLC_MAX_FRAME_SIZE; |
| 1181 | } else { | 1187 | } else { |
| @@ -1183,6 +1189,7 @@ static int write_room(struct tty_struct *tty) | |||
| 1183 | if (ret < 0) | 1189 | if (ret < 0) |
| 1184 | ret = 0; | 1190 | ret = 0; |
| 1185 | } | 1191 | } |
| 1192 | unlock_kernel(); | ||
| 1186 | 1193 | ||
| 1187 | if (debug_level >= DEBUG_LEVEL_INFO) | 1194 | if (debug_level >= DEBUG_LEVEL_INFO) |
| 1188 | printk("%s(%d):%s write_room()=%d\n", | 1195 | printk("%s(%d):%s write_room()=%d\n", |
| @@ -1303,7 +1310,7 @@ static void tx_release(struct tty_struct *tty) | |||
| 1303 | * | 1310 | * |
| 1304 | * Return Value: 0 if success, otherwise error code | 1311 | * Return Value: 0 if success, otherwise error code |
| 1305 | */ | 1312 | */ |
| 1306 | static int ioctl(struct tty_struct *tty, struct file *file, | 1313 | static int do_ioctl(struct tty_struct *tty, struct file *file, |
| 1307 | unsigned int cmd, unsigned long arg) | 1314 | unsigned int cmd, unsigned long arg) |
| 1308 | { | 1315 | { |
| 1309 | SLMP_INFO *info = (SLMP_INFO *)tty->driver_data; | 1316 | SLMP_INFO *info = (SLMP_INFO *)tty->driver_data; |
| @@ -1393,6 +1400,16 @@ static int ioctl(struct tty_struct *tty, struct file *file, | |||
| 1393 | return 0; | 1400 | return 0; |
| 1394 | } | 1401 | } |
| 1395 | 1402 | ||
| 1403 | static int ioctl(struct tty_struct *tty, struct file *file, | ||
| 1404 | unsigned int cmd, unsigned long arg) | ||
| 1405 | { | ||
| 1406 | int ret; | ||
| 1407 | lock_kernel(); | ||
| 1408 | ret = do_ioctl(tty, file, cmd, arg); | ||
| 1409 | unlock_kernel(); | ||
| 1410 | return ret; | ||
| 1411 | } | ||
| 1412 | |||
| 1396 | /* | 1413 | /* |
| 1397 | * /proc fs routines.... | 1414 | * /proc fs routines.... |
| 1398 | */ | 1415 | */ |
| @@ -3626,7 +3643,8 @@ static int claim_resources(SLMP_INFO *info) | |||
| 3626 | else | 3643 | else |
| 3627 | info->sca_statctrl_requested = true; | 3644 | info->sca_statctrl_requested = true; |
| 3628 | 3645 | ||
| 3629 | info->memory_base = ioremap(info->phys_memory_base,SCA_MEM_SIZE); | 3646 | info->memory_base = ioremap_nocache(info->phys_memory_base, |
| 3647 | SCA_MEM_SIZE); | ||
| 3630 | if (!info->memory_base) { | 3648 | if (!info->memory_base) { |
| 3631 | printk( "%s(%d):%s Cant map shared memory, MemAddr=%08X\n", | 3649 | printk( "%s(%d):%s Cant map shared memory, MemAddr=%08X\n", |
| 3632 | __FILE__,__LINE__,info->device_name, info->phys_memory_base ); | 3650 | __FILE__,__LINE__,info->device_name, info->phys_memory_base ); |
| @@ -3634,7 +3652,7 @@ static int claim_resources(SLMP_INFO *info) | |||
| 3634 | goto errout; | 3652 | goto errout; |
| 3635 | } | 3653 | } |
| 3636 | 3654 | ||
| 3637 | info->lcr_base = ioremap(info->phys_lcr_base,PAGE_SIZE); | 3655 | info->lcr_base = ioremap_nocache(info->phys_lcr_base, PAGE_SIZE); |
| 3638 | if (!info->lcr_base) { | 3656 | if (!info->lcr_base) { |
| 3639 | printk( "%s(%d):%s Cant map LCR memory, MemAddr=%08X\n", | 3657 | printk( "%s(%d):%s Cant map LCR memory, MemAddr=%08X\n", |
| 3640 | __FILE__,__LINE__,info->device_name, info->phys_lcr_base ); | 3658 | __FILE__,__LINE__,info->device_name, info->phys_lcr_base ); |
| @@ -3643,7 +3661,7 @@ static int claim_resources(SLMP_INFO *info) | |||
| 3643 | } | 3661 | } |
| 3644 | info->lcr_base += info->lcr_offset; | 3662 | info->lcr_base += info->lcr_offset; |
| 3645 | 3663 | ||
| 3646 | info->sca_base = ioremap(info->phys_sca_base,PAGE_SIZE); | 3664 | info->sca_base = ioremap_nocache(info->phys_sca_base, PAGE_SIZE); |
| 3647 | if (!info->sca_base) { | 3665 | if (!info->sca_base) { |
| 3648 | printk( "%s(%d):%s Cant map SCA memory, MemAddr=%08X\n", | 3666 | printk( "%s(%d):%s Cant map SCA memory, MemAddr=%08X\n", |
| 3649 | __FILE__,__LINE__,info->device_name, info->phys_sca_base ); | 3667 | __FILE__,__LINE__,info->device_name, info->phys_sca_base ); |
| @@ -3652,7 +3670,8 @@ static int claim_resources(SLMP_INFO *info) | |||
| 3652 | } | 3670 | } |
| 3653 | info->sca_base += info->sca_offset; | 3671 | info->sca_base += info->sca_offset; |
| 3654 | 3672 | ||
| 3655 | info->statctrl_base = ioremap(info->phys_statctrl_base,PAGE_SIZE); | 3673 | info->statctrl_base = ioremap_nocache(info->phys_statctrl_base, |
| 3674 | PAGE_SIZE); | ||
| 3656 | if (!info->statctrl_base) { | 3675 | if (!info->statctrl_base) { |
| 3657 | printk( "%s(%d):%s Cant map SCA Status/Control memory, MemAddr=%08X\n", | 3676 | printk( "%s(%d):%s Cant map SCA Status/Control memory, MemAddr=%08X\n", |
| 3658 | __FILE__,__LINE__,info->device_name, info->phys_statctrl_base ); | 3677 | __FILE__,__LINE__,info->device_name, info->phys_statctrl_base ); |
diff --git a/drivers/char/tty_io.c b/drivers/char/tty_io.c index 2fa6856706ab..1d298c2cf930 100644 --- a/drivers/char/tty_io.c +++ b/drivers/char/tty_io.c | |||
| @@ -91,7 +91,6 @@ | |||
| 91 | #include <linux/module.h> | 91 | #include <linux/module.h> |
| 92 | #include <linux/smp_lock.h> | 92 | #include <linux/smp_lock.h> |
| 93 | #include <linux/device.h> | 93 | #include <linux/device.h> |
| 94 | #include <linux/idr.h> | ||
| 95 | #include <linux/wait.h> | 94 | #include <linux/wait.h> |
| 96 | #include <linux/bitops.h> | 95 | #include <linux/bitops.h> |
| 97 | #include <linux/delay.h> | 96 | #include <linux/delay.h> |
| @@ -137,9 +136,6 @@ EXPORT_SYMBOL(tty_mutex); | |||
| 137 | 136 | ||
| 138 | #ifdef CONFIG_UNIX98_PTYS | 137 | #ifdef CONFIG_UNIX98_PTYS |
| 139 | extern struct tty_driver *ptm_driver; /* Unix98 pty masters; for /dev/ptmx */ | 138 | extern struct tty_driver *ptm_driver; /* Unix98 pty masters; for /dev/ptmx */ |
| 140 | extern int pty_limit; /* Config limit on Unix98 ptys */ | ||
| 141 | static DEFINE_IDR(allocated_ptys); | ||
| 142 | static DEFINE_MUTEX(allocated_ptys_lock); | ||
| 143 | static int ptmx_open(struct inode *, struct file *); | 139 | static int ptmx_open(struct inode *, struct file *); |
| 144 | #endif | 140 | #endif |
| 145 | 141 | ||
| @@ -152,8 +148,7 @@ ssize_t redirected_tty_write(struct file *, const char __user *, | |||
| 152 | static unsigned int tty_poll(struct file *, poll_table *); | 148 | static unsigned int tty_poll(struct file *, poll_table *); |
| 153 | static int tty_open(struct inode *, struct file *); | 149 | static int tty_open(struct inode *, struct file *); |
| 154 | static int tty_release(struct inode *, struct file *); | 150 | static int tty_release(struct inode *, struct file *); |
| 155 | int tty_ioctl(struct inode *inode, struct file *file, | 151 | long tty_ioctl(struct file *file, unsigned int cmd, unsigned long arg); |
| 156 | unsigned int cmd, unsigned long arg); | ||
| 157 | #ifdef CONFIG_COMPAT | 152 | #ifdef CONFIG_COMPAT |
| 158 | static long tty_compat_ioctl(struct file *file, unsigned int cmd, | 153 | static long tty_compat_ioctl(struct file *file, unsigned int cmd, |
| 159 | unsigned long arg); | 154 | unsigned long arg); |
| @@ -1109,8 +1104,8 @@ restart: | |||
| 1109 | a reference to the old ldisc. If we ended up flipping back | 1104 | a reference to the old ldisc. If we ended up flipping back |
| 1110 | to the existing ldisc we have two references to it */ | 1105 | to the existing ldisc we have two references to it */ |
| 1111 | 1106 | ||
| 1112 | if (tty->ldisc.num != o_ldisc.num && tty->driver->set_ldisc) | 1107 | if (tty->ldisc.num != o_ldisc.num && tty->ops->set_ldisc) |
| 1113 | tty->driver->set_ldisc(tty); | 1108 | tty->ops->set_ldisc(tty); |
| 1114 | 1109 | ||
| 1115 | tty_ldisc_put(o_ldisc.num); | 1110 | tty_ldisc_put(o_ldisc.num); |
| 1116 | 1111 | ||
| @@ -1182,9 +1177,8 @@ struct tty_driver *tty_find_polling_driver(char *name, int *line) | |||
| 1182 | if (*str == '\0') | 1177 | if (*str == '\0') |
| 1183 | str = NULL; | 1178 | str = NULL; |
| 1184 | 1179 | ||
| 1185 | if (tty_line >= 0 && tty_line <= p->num && p->poll_init && | 1180 | if (tty_line >= 0 && tty_line <= p->num && p->ops && |
| 1186 | !p->poll_init(p, tty_line, str)) { | 1181 | p->ops->poll_init && !p->ops->poll_init(p, tty_line, str)) { |
| 1187 | |||
| 1188 | res = p; | 1182 | res = p; |
| 1189 | *line = tty_line; | 1183 | *line = tty_line; |
| 1190 | break; | 1184 | break; |
| @@ -1205,26 +1199,37 @@ EXPORT_SYMBOL_GPL(tty_find_polling_driver); | |||
| 1205 | * not in the foreground, send a SIGTTOU. If the signal is blocked or | 1199 | * not in the foreground, send a SIGTTOU. If the signal is blocked or |
| 1206 | * ignored, go ahead and perform the operation. (POSIX 7.2) | 1200 | * ignored, go ahead and perform the operation. (POSIX 7.2) |
| 1207 | * | 1201 | * |
| 1208 | * Locking: none | 1202 | * Locking: ctrl_lock |
| 1209 | */ | 1203 | */ |
| 1210 | 1204 | ||
| 1211 | int tty_check_change(struct tty_struct *tty) | 1205 | int tty_check_change(struct tty_struct *tty) |
| 1212 | { | 1206 | { |
| 1207 | unsigned long flags; | ||
| 1208 | int ret = 0; | ||
| 1209 | |||
| 1213 | if (current->signal->tty != tty) | 1210 | if (current->signal->tty != tty) |
| 1214 | return 0; | 1211 | return 0; |
| 1212 | |||
| 1213 | spin_lock_irqsave(&tty->ctrl_lock, flags); | ||
| 1214 | |||
| 1215 | if (!tty->pgrp) { | 1215 | if (!tty->pgrp) { |
| 1216 | printk(KERN_WARNING "tty_check_change: tty->pgrp == NULL!\n"); | 1216 | printk(KERN_WARNING "tty_check_change: tty->pgrp == NULL!\n"); |
| 1217 | return 0; | 1217 | goto out; |
| 1218 | } | 1218 | } |
| 1219 | if (task_pgrp(current) == tty->pgrp) | 1219 | if (task_pgrp(current) == tty->pgrp) |
| 1220 | return 0; | 1220 | goto out; |
| 1221 | if (is_ignored(SIGTTOU)) | 1221 | if (is_ignored(SIGTTOU)) |
| 1222 | return 0; | 1222 | goto out; |
| 1223 | if (is_current_pgrp_orphaned()) | 1223 | if (is_current_pgrp_orphaned()) { |
| 1224 | return -EIO; | 1224 | ret = -EIO; |
| 1225 | goto out; | ||
| 1226 | } | ||
| 1225 | kill_pgrp(task_pgrp(current), SIGTTOU, 1); | 1227 | kill_pgrp(task_pgrp(current), SIGTTOU, 1); |
| 1226 | set_thread_flag(TIF_SIGPENDING); | 1228 | set_thread_flag(TIF_SIGPENDING); |
| 1227 | return -ERESTARTSYS; | 1229 | ret = -ERESTARTSYS; |
| 1230 | out: | ||
| 1231 | spin_unlock_irqrestore(&tty->ctrl_lock, flags); | ||
| 1232 | return ret; | ||
| 1228 | } | 1233 | } |
| 1229 | 1234 | ||
| 1230 | EXPORT_SYMBOL(tty_check_change); | 1235 | EXPORT_SYMBOL(tty_check_change); |
| @@ -1247,8 +1252,8 @@ static unsigned int hung_up_tty_poll(struct file *filp, poll_table *wait) | |||
| 1247 | return POLLIN | POLLOUT | POLLERR | POLLHUP | POLLRDNORM | POLLWRNORM; | 1252 | return POLLIN | POLLOUT | POLLERR | POLLHUP | POLLRDNORM | POLLWRNORM; |
| 1248 | } | 1253 | } |
| 1249 | 1254 | ||
| 1250 | static int hung_up_tty_ioctl(struct inode *inode, struct file *file, | 1255 | static long hung_up_tty_ioctl(struct file *file, unsigned int cmd, |
| 1251 | unsigned int cmd, unsigned long arg) | 1256 | unsigned long arg) |
| 1252 | { | 1257 | { |
| 1253 | return cmd == TIOCSPGRP ? -ENOTTY : -EIO; | 1258 | return cmd == TIOCSPGRP ? -ENOTTY : -EIO; |
| 1254 | } | 1259 | } |
| @@ -1264,7 +1269,7 @@ static const struct file_operations tty_fops = { | |||
| 1264 | .read = tty_read, | 1269 | .read = tty_read, |
| 1265 | .write = tty_write, | 1270 | .write = tty_write, |
| 1266 | .poll = tty_poll, | 1271 | .poll = tty_poll, |
| 1267 | .ioctl = tty_ioctl, | 1272 | .unlocked_ioctl = tty_ioctl, |
| 1268 | .compat_ioctl = tty_compat_ioctl, | 1273 | .compat_ioctl = tty_compat_ioctl, |
| 1269 | .open = tty_open, | 1274 | .open = tty_open, |
| 1270 | .release = tty_release, | 1275 | .release = tty_release, |
| @@ -1277,7 +1282,7 @@ static const struct file_operations ptmx_fops = { | |||
| 1277 | .read = tty_read, | 1282 | .read = tty_read, |
| 1278 | .write = tty_write, | 1283 | .write = tty_write, |
| 1279 | .poll = tty_poll, | 1284 | .poll = tty_poll, |
| 1280 | .ioctl = tty_ioctl, | 1285 | .unlocked_ioctl = tty_ioctl, |
| 1281 | .compat_ioctl = tty_compat_ioctl, | 1286 | .compat_ioctl = tty_compat_ioctl, |
| 1282 | .open = ptmx_open, | 1287 | .open = ptmx_open, |
| 1283 | .release = tty_release, | 1288 | .release = tty_release, |
| @@ -1290,7 +1295,7 @@ static const struct file_operations console_fops = { | |||
| 1290 | .read = tty_read, | 1295 | .read = tty_read, |
| 1291 | .write = redirected_tty_write, | 1296 | .write = redirected_tty_write, |
| 1292 | .poll = tty_poll, | 1297 | .poll = tty_poll, |
| 1293 | .ioctl = tty_ioctl, | 1298 | .unlocked_ioctl = tty_ioctl, |
| 1294 | .compat_ioctl = tty_compat_ioctl, | 1299 | .compat_ioctl = tty_compat_ioctl, |
| 1295 | .open = tty_open, | 1300 | .open = tty_open, |
| 1296 | .release = tty_release, | 1301 | .release = tty_release, |
| @@ -1302,7 +1307,7 @@ static const struct file_operations hung_up_tty_fops = { | |||
| 1302 | .read = hung_up_tty_read, | 1307 | .read = hung_up_tty_read, |
| 1303 | .write = hung_up_tty_write, | 1308 | .write = hung_up_tty_write, |
| 1304 | .poll = hung_up_tty_poll, | 1309 | .poll = hung_up_tty_poll, |
| 1305 | .ioctl = hung_up_tty_ioctl, | 1310 | .unlocked_ioctl = hung_up_tty_ioctl, |
| 1306 | .compat_ioctl = hung_up_tty_compat_ioctl, | 1311 | .compat_ioctl = hung_up_tty_compat_ioctl, |
| 1307 | .release = tty_release, | 1312 | .release = tty_release, |
| 1308 | }; | 1313 | }; |
| @@ -1404,6 +1409,7 @@ static void do_tty_hangup(struct work_struct *work) | |||
| 1404 | struct task_struct *p; | 1409 | struct task_struct *p; |
| 1405 | struct tty_ldisc *ld; | 1410 | struct tty_ldisc *ld; |
| 1406 | int closecount = 0, n; | 1411 | int closecount = 0, n; |
| 1412 | unsigned long flags; | ||
| 1407 | 1413 | ||
| 1408 | if (!tty) | 1414 | if (!tty) |
| 1409 | return; | 1415 | return; |
| @@ -1441,8 +1447,7 @@ static void do_tty_hangup(struct work_struct *work) | |||
| 1441 | /* We may have no line discipline at this point */ | 1447 | /* We may have no line discipline at this point */ |
| 1442 | if (ld->flush_buffer) | 1448 | if (ld->flush_buffer) |
| 1443 | ld->flush_buffer(tty); | 1449 | ld->flush_buffer(tty); |
| 1444 | if (tty->driver->flush_buffer) | 1450 | tty_driver_flush_buffer(tty); |
| 1445 | tty->driver->flush_buffer(tty); | ||
| 1446 | if ((test_bit(TTY_DO_WRITE_WAKEUP, &tty->flags)) && | 1451 | if ((test_bit(TTY_DO_WRITE_WAKEUP, &tty->flags)) && |
| 1447 | ld->write_wakeup) | 1452 | ld->write_wakeup) |
| 1448 | ld->write_wakeup(tty); | 1453 | ld->write_wakeup(tty); |
| @@ -1480,19 +1485,24 @@ static void do_tty_hangup(struct work_struct *work) | |||
| 1480 | __group_send_sig_info(SIGHUP, SEND_SIG_PRIV, p); | 1485 | __group_send_sig_info(SIGHUP, SEND_SIG_PRIV, p); |
| 1481 | __group_send_sig_info(SIGCONT, SEND_SIG_PRIV, p); | 1486 | __group_send_sig_info(SIGCONT, SEND_SIG_PRIV, p); |
| 1482 | put_pid(p->signal->tty_old_pgrp); /* A noop */ | 1487 | put_pid(p->signal->tty_old_pgrp); /* A noop */ |
| 1488 | spin_lock_irqsave(&tty->ctrl_lock, flags); | ||
| 1483 | if (tty->pgrp) | 1489 | if (tty->pgrp) |
| 1484 | p->signal->tty_old_pgrp = get_pid(tty->pgrp); | 1490 | p->signal->tty_old_pgrp = get_pid(tty->pgrp); |
| 1491 | spin_unlock_irqrestore(&tty->ctrl_lock, flags); | ||
| 1485 | spin_unlock_irq(&p->sighand->siglock); | 1492 | spin_unlock_irq(&p->sighand->siglock); |
| 1486 | } while_each_pid_task(tty->session, PIDTYPE_SID, p); | 1493 | } while_each_pid_task(tty->session, PIDTYPE_SID, p); |
| 1487 | } | 1494 | } |
| 1488 | read_unlock(&tasklist_lock); | 1495 | read_unlock(&tasklist_lock); |
| 1489 | 1496 | ||
| 1497 | spin_lock_irqsave(&tty->ctrl_lock, flags); | ||
| 1490 | tty->flags = 0; | 1498 | tty->flags = 0; |
| 1491 | put_pid(tty->session); | 1499 | put_pid(tty->session); |
| 1492 | put_pid(tty->pgrp); | 1500 | put_pid(tty->pgrp); |
| 1493 | tty->session = NULL; | 1501 | tty->session = NULL; |
| 1494 | tty->pgrp = NULL; | 1502 | tty->pgrp = NULL; |
| 1495 | tty->ctrl_status = 0; | 1503 | tty->ctrl_status = 0; |
| 1504 | spin_unlock_irqrestore(&tty->ctrl_lock, flags); | ||
| 1505 | |||
| 1496 | /* | 1506 | /* |
| 1497 | * If one of the devices matches a console pointer, we | 1507 | * If one of the devices matches a console pointer, we |
| 1498 | * cannot just call hangup() because that will cause | 1508 | * cannot just call hangup() because that will cause |
| @@ -1500,11 +1510,11 @@ static void do_tty_hangup(struct work_struct *work) | |||
| 1500 | * So we just call close() the right number of times. | 1510 | * So we just call close() the right number of times. |
| 1501 | */ | 1511 | */ |
| 1502 | if (cons_filp) { | 1512 | if (cons_filp) { |
| 1503 | if (tty->driver->close) | 1513 | if (tty->ops->close) |
| 1504 | for (n = 0; n < closecount; n++) | 1514 | for (n = 0; n < closecount; n++) |
| 1505 | tty->driver->close(tty, cons_filp); | 1515 | tty->ops->close(tty, cons_filp); |
| 1506 | } else if (tty->driver->hangup) | 1516 | } else if (tty->ops->hangup) |
| 1507 | (tty->driver->hangup)(tty); | 1517 | (tty->ops->hangup)(tty); |
| 1508 | /* | 1518 | /* |
| 1509 | * We don't want to have driver/ldisc interactions beyond | 1519 | * We don't want to have driver/ldisc interactions beyond |
| 1510 | * the ones we did here. The driver layer expects no | 1520 | * the ones we did here. The driver layer expects no |
| @@ -1626,16 +1636,17 @@ void disassociate_ctty(int on_exit) | |||
| 1626 | struct tty_struct *tty; | 1636 | struct tty_struct *tty; |
| 1627 | struct pid *tty_pgrp = NULL; | 1637 | struct pid *tty_pgrp = NULL; |
| 1628 | 1638 | ||
| 1629 | lock_kernel(); | ||
| 1630 | 1639 | ||
| 1631 | mutex_lock(&tty_mutex); | 1640 | mutex_lock(&tty_mutex); |
| 1632 | tty = get_current_tty(); | 1641 | tty = get_current_tty(); |
| 1633 | if (tty) { | 1642 | if (tty) { |
| 1634 | tty_pgrp = get_pid(tty->pgrp); | 1643 | tty_pgrp = get_pid(tty->pgrp); |
| 1635 | mutex_unlock(&tty_mutex); | 1644 | mutex_unlock(&tty_mutex); |
| 1645 | lock_kernel(); | ||
| 1636 | /* XXX: here we race, there is nothing protecting tty */ | 1646 | /* XXX: here we race, there is nothing protecting tty */ |
| 1637 | if (on_exit && tty->driver->type != TTY_DRIVER_TYPE_PTY) | 1647 | if (on_exit && tty->driver->type != TTY_DRIVER_TYPE_PTY) |
| 1638 | tty_vhangup(tty); | 1648 | tty_vhangup(tty); |
| 1649 | unlock_kernel(); | ||
| 1639 | } else if (on_exit) { | 1650 | } else if (on_exit) { |
| 1640 | struct pid *old_pgrp; | 1651 | struct pid *old_pgrp; |
| 1641 | spin_lock_irq(¤t->sighand->siglock); | 1652 | spin_lock_irq(¤t->sighand->siglock); |
| @@ -1648,7 +1659,6 @@ void disassociate_ctty(int on_exit) | |||
| 1648 | put_pid(old_pgrp); | 1659 | put_pid(old_pgrp); |
| 1649 | } | 1660 | } |
| 1650 | mutex_unlock(&tty_mutex); | 1661 | mutex_unlock(&tty_mutex); |
| 1651 | unlock_kernel(); | ||
| 1652 | return; | 1662 | return; |
| 1653 | } | 1663 | } |
| 1654 | if (tty_pgrp) { | 1664 | if (tty_pgrp) { |
| @@ -1667,10 +1677,13 @@ void disassociate_ctty(int on_exit) | |||
| 1667 | /* It is possible that do_tty_hangup has free'd this tty */ | 1677 | /* It is possible that do_tty_hangup has free'd this tty */ |
| 1668 | tty = get_current_tty(); | 1678 | tty = get_current_tty(); |
| 1669 | if (tty) { | 1679 | if (tty) { |
| 1680 | unsigned long flags; | ||
| 1681 | spin_lock_irqsave(&tty->ctrl_lock, flags); | ||
| 1670 | put_pid(tty->session); | 1682 | put_pid(tty->session); |
| 1671 | put_pid(tty->pgrp); | 1683 | put_pid(tty->pgrp); |
| 1672 | tty->session = NULL; | 1684 | tty->session = NULL; |
| 1673 | tty->pgrp = NULL; | 1685 | tty->pgrp = NULL; |
| 1686 | spin_unlock_irqrestore(&tty->ctrl_lock, flags); | ||
| 1674 | } else { | 1687 | } else { |
| 1675 | #ifdef TTY_DEBUG_HANGUP | 1688 | #ifdef TTY_DEBUG_HANGUP |
| 1676 | printk(KERN_DEBUG "error attempted to write to tty [0x%p]" | 1689 | printk(KERN_DEBUG "error attempted to write to tty [0x%p]" |
| @@ -1683,7 +1696,6 @@ void disassociate_ctty(int on_exit) | |||
| 1683 | read_lock(&tasklist_lock); | 1696 | read_lock(&tasklist_lock); |
| 1684 | session_clear_tty(task_session(current)); | 1697 | session_clear_tty(task_session(current)); |
| 1685 | read_unlock(&tasklist_lock); | 1698 | read_unlock(&tasklist_lock); |
| 1686 | unlock_kernel(); | ||
| 1687 | } | 1699 | } |
| 1688 | 1700 | ||
| 1689 | /** | 1701 | /** |
| @@ -1693,8 +1705,10 @@ void disassociate_ctty(int on_exit) | |||
| 1693 | void no_tty(void) | 1705 | void no_tty(void) |
| 1694 | { | 1706 | { |
| 1695 | struct task_struct *tsk = current; | 1707 | struct task_struct *tsk = current; |
| 1708 | lock_kernel(); | ||
| 1696 | if (tsk->signal->leader) | 1709 | if (tsk->signal->leader) |
| 1697 | disassociate_ctty(0); | 1710 | disassociate_ctty(0); |
| 1711 | unlock_kernel(); | ||
| 1698 | proc_clear_tty(tsk); | 1712 | proc_clear_tty(tsk); |
| 1699 | } | 1713 | } |
| 1700 | 1714 | ||
| @@ -1714,21 +1728,26 @@ void no_tty(void) | |||
| 1714 | * but not always. | 1728 | * but not always. |
| 1715 | * | 1729 | * |
| 1716 | * Locking: | 1730 | * Locking: |
| 1717 | * Broken. Relies on BKL which is unsafe here. | 1731 | * Uses the tty control lock internally |
| 1718 | */ | 1732 | */ |
| 1719 | 1733 | ||
| 1720 | void stop_tty(struct tty_struct *tty) | 1734 | void stop_tty(struct tty_struct *tty) |
| 1721 | { | 1735 | { |
| 1722 | if (tty->stopped) | 1736 | unsigned long flags; |
| 1737 | spin_lock_irqsave(&tty->ctrl_lock, flags); | ||
| 1738 | if (tty->stopped) { | ||
| 1739 | spin_unlock_irqrestore(&tty->ctrl_lock, flags); | ||
| 1723 | return; | 1740 | return; |
| 1741 | } | ||
| 1724 | tty->stopped = 1; | 1742 | tty->stopped = 1; |
| 1725 | if (tty->link && tty->link->packet) { | 1743 | if (tty->link && tty->link->packet) { |
| 1726 | tty->ctrl_status &= ~TIOCPKT_START; | 1744 | tty->ctrl_status &= ~TIOCPKT_START; |
| 1727 | tty->ctrl_status |= TIOCPKT_STOP; | 1745 | tty->ctrl_status |= TIOCPKT_STOP; |
| 1728 | wake_up_interruptible(&tty->link->read_wait); | 1746 | wake_up_interruptible(&tty->link->read_wait); |
| 1729 | } | 1747 | } |
| 1730 | if (tty->driver->stop) | 1748 | spin_unlock_irqrestore(&tty->ctrl_lock, flags); |
| 1731 | (tty->driver->stop)(tty); | 1749 | if (tty->ops->stop) |
| 1750 | (tty->ops->stop)(tty); | ||
| 1732 | } | 1751 | } |
| 1733 | 1752 | ||
| 1734 | EXPORT_SYMBOL(stop_tty); | 1753 | EXPORT_SYMBOL(stop_tty); |
| @@ -1743,21 +1762,26 @@ EXPORT_SYMBOL(stop_tty); | |||
| 1743 | * driver start method is invoked and the line discipline woken. | 1762 | * driver start method is invoked and the line discipline woken. |
| 1744 | * | 1763 | * |
| 1745 | * Locking: | 1764 | * Locking: |
| 1746 | * Broken. Relies on BKL which is unsafe here. | 1765 | * ctrl_lock |
| 1747 | */ | 1766 | */ |
| 1748 | 1767 | ||
| 1749 | void start_tty(struct tty_struct *tty) | 1768 | void start_tty(struct tty_struct *tty) |
| 1750 | { | 1769 | { |
| 1751 | if (!tty->stopped || tty->flow_stopped) | 1770 | unsigned long flags; |
| 1771 | spin_lock_irqsave(&tty->ctrl_lock, flags); | ||
| 1772 | if (!tty->stopped || tty->flow_stopped) { | ||
| 1773 | spin_unlock_irqrestore(&tty->ctrl_lock, flags); | ||
| 1752 | return; | 1774 | return; |
| 1775 | } | ||
| 1753 | tty->stopped = 0; | 1776 | tty->stopped = 0; |
| 1754 | if (tty->link && tty->link->packet) { | 1777 | if (tty->link && tty->link->packet) { |
| 1755 | tty->ctrl_status &= ~TIOCPKT_STOP; | 1778 | tty->ctrl_status &= ~TIOCPKT_STOP; |
| 1756 | tty->ctrl_status |= TIOCPKT_START; | 1779 | tty->ctrl_status |= TIOCPKT_START; |
| 1757 | wake_up_interruptible(&tty->link->read_wait); | 1780 | wake_up_interruptible(&tty->link->read_wait); |
| 1758 | } | 1781 | } |
| 1759 | if (tty->driver->start) | 1782 | spin_unlock_irqrestore(&tty->ctrl_lock, flags); |
| 1760 | (tty->driver->start)(tty); | 1783 | if (tty->ops->start) |
| 1784 | (tty->ops->start)(tty); | ||
| 1761 | /* If we have a running line discipline it may need kicking */ | 1785 | /* If we have a running line discipline it may need kicking */ |
| 1762 | tty_wakeup(tty); | 1786 | tty_wakeup(tty); |
| 1763 | } | 1787 | } |
| @@ -1775,10 +1799,8 @@ EXPORT_SYMBOL(start_tty); | |||
| 1775 | * for hung up devices before calling the line discipline method. | 1799 | * for hung up devices before calling the line discipline method. |
| 1776 | * | 1800 | * |
| 1777 | * Locking: | 1801 | * Locking: |
| 1778 | * Locks the line discipline internally while needed | 1802 | * Locks the line discipline internally while needed. Multiple |
| 1779 | * For historical reasons the line discipline read method is | 1803 | * read calls may be outstanding in parallel. |
| 1780 | * invoked under the BKL. This will go away in time so do not rely on it | ||
| 1781 | * in new code. Multiple read calls may be outstanding in parallel. | ||
| 1782 | */ | 1804 | */ |
| 1783 | 1805 | ||
| 1784 | static ssize_t tty_read(struct file *file, char __user *buf, size_t count, | 1806 | static ssize_t tty_read(struct file *file, char __user *buf, size_t count, |
| @@ -1799,13 +1821,11 @@ static ssize_t tty_read(struct file *file, char __user *buf, size_t count, | |||
| 1799 | /* We want to wait for the line discipline to sort out in this | 1821 | /* We want to wait for the line discipline to sort out in this |
| 1800 | situation */ | 1822 | situation */ |
| 1801 | ld = tty_ldisc_ref_wait(tty); | 1823 | ld = tty_ldisc_ref_wait(tty); |
| 1802 | lock_kernel(); | ||
| 1803 | if (ld->read) | 1824 | if (ld->read) |
| 1804 | i = (ld->read)(tty, file, buf, count); | 1825 | i = (ld->read)(tty, file, buf, count); |
| 1805 | else | 1826 | else |
| 1806 | i = -EIO; | 1827 | i = -EIO; |
| 1807 | tty_ldisc_deref(ld); | 1828 | tty_ldisc_deref(ld); |
| 1808 | unlock_kernel(); | ||
| 1809 | if (i > 0) | 1829 | if (i > 0) |
| 1810 | inode->i_atime = current_fs_time(inode->i_sb); | 1830 | inode->i_atime = current_fs_time(inode->i_sb); |
| 1811 | return i; | 1831 | return i; |
| @@ -1893,9 +1913,7 @@ static inline ssize_t do_tty_write( | |||
| 1893 | ret = -EFAULT; | 1913 | ret = -EFAULT; |
| 1894 | if (copy_from_user(tty->write_buf, buf, size)) | 1914 | if (copy_from_user(tty->write_buf, buf, size)) |
| 1895 | break; | 1915 | break; |
| 1896 | lock_kernel(); | ||
| 1897 | ret = write(tty, file, tty->write_buf, size); | 1916 | ret = write(tty, file, tty->write_buf, size); |
| 1898 | unlock_kernel(); | ||
| 1899 | if (ret <= 0) | 1917 | if (ret <= 0) |
| 1900 | break; | 1918 | break; |
| 1901 | written += ret; | 1919 | written += ret; |
| @@ -1948,10 +1966,13 @@ static ssize_t tty_write(struct file *file, const char __user *buf, | |||
| 1948 | tty = (struct tty_struct *)file->private_data; | 1966 | tty = (struct tty_struct *)file->private_data; |
| 1949 | if (tty_paranoia_check(tty, inode, "tty_write")) | 1967 | if (tty_paranoia_check(tty, inode, "tty_write")) |
| 1950 | return -EIO; | 1968 | return -EIO; |
| 1951 | if (!tty || !tty->driver->write || | 1969 | if (!tty || !tty->ops->write || |
| 1952 | (test_bit(TTY_IO_ERROR, &tty->flags))) | 1970 | (test_bit(TTY_IO_ERROR, &tty->flags))) |
| 1953 | return -EIO; | 1971 | return -EIO; |
| 1954 | 1972 | /* Short term debug to catch buggy drivers */ | |
| 1973 | if (tty->ops->write_room == NULL) | ||
| 1974 | printk(KERN_ERR "tty driver %s lacks a write_room method.\n", | ||
| 1975 | tty->driver->name); | ||
| 1955 | ld = tty_ldisc_ref_wait(tty); | 1976 | ld = tty_ldisc_ref_wait(tty); |
| 1956 | if (!ld->write) | 1977 | if (!ld->write) |
| 1957 | ret = -EIO; | 1978 | ret = -EIO; |
| @@ -2098,6 +2119,7 @@ static int init_dev(struct tty_driver *driver, int idx, | |||
| 2098 | goto fail_no_mem; | 2119 | goto fail_no_mem; |
| 2099 | initialize_tty_struct(tty); | 2120 | initialize_tty_struct(tty); |
| 2100 | tty->driver = driver; | 2121 | tty->driver = driver; |
| 2122 | tty->ops = driver->ops; | ||
| 2101 | tty->index = idx; | 2123 | tty->index = idx; |
| 2102 | tty_line_name(driver, idx, tty->name); | 2124 | tty_line_name(driver, idx, tty->name); |
| 2103 | 2125 | ||
| @@ -2128,6 +2150,7 @@ static int init_dev(struct tty_driver *driver, int idx, | |||
| 2128 | goto free_mem_out; | 2150 | goto free_mem_out; |
| 2129 | initialize_tty_struct(o_tty); | 2151 | initialize_tty_struct(o_tty); |
| 2130 | o_tty->driver = driver->other; | 2152 | o_tty->driver = driver->other; |
| 2153 | o_tty->ops = driver->ops; | ||
| 2131 | o_tty->index = idx; | 2154 | o_tty->index = idx; |
| 2132 | tty_line_name(driver->other, idx, o_tty->name); | 2155 | tty_line_name(driver->other, idx, o_tty->name); |
| 2133 | 2156 | ||
| @@ -2432,8 +2455,8 @@ static void release_dev(struct file *filp) | |||
| 2432 | } | 2455 | } |
| 2433 | } | 2456 | } |
| 2434 | #endif | 2457 | #endif |
| 2435 | if (tty->driver->close) | 2458 | if (tty->ops->close) |
| 2436 | tty->driver->close(tty, filp); | 2459 | tty->ops->close(tty, filp); |
| 2437 | 2460 | ||
| 2438 | /* | 2461 | /* |
| 2439 | * Sanity check: if tty->count is going to zero, there shouldn't be | 2462 | * Sanity check: if tty->count is going to zero, there shouldn't be |
| @@ -2612,15 +2635,9 @@ static void release_dev(struct file *filp) | |||
| 2612 | */ | 2635 | */ |
| 2613 | release_tty(tty, idx); | 2636 | release_tty(tty, idx); |
| 2614 | 2637 | ||
| 2615 | #ifdef CONFIG_UNIX98_PTYS | ||
| 2616 | /* Make this pty number available for reallocation */ | 2638 | /* Make this pty number available for reallocation */ |
| 2617 | if (devpts) { | 2639 | if (devpts) |
| 2618 | mutex_lock(&allocated_ptys_lock); | 2640 | devpts_kill_index(idx); |
| 2619 | idr_remove(&allocated_ptys, idx); | ||
| 2620 | mutex_unlock(&allocated_ptys_lock); | ||
| 2621 | } | ||
| 2622 | #endif | ||
| 2623 | |||
| 2624 | } | 2641 | } |
| 2625 | 2642 | ||
| 2626 | /** | 2643 | /** |
| @@ -2716,8 +2733,8 @@ got_driver: | |||
| 2716 | printk(KERN_DEBUG "opening %s...", tty->name); | 2733 | printk(KERN_DEBUG "opening %s...", tty->name); |
| 2717 | #endif | 2734 | #endif |
| 2718 | if (!retval) { | 2735 | if (!retval) { |
| 2719 | if (tty->driver->open) | 2736 | if (tty->ops->open) |
| 2720 | retval = tty->driver->open(tty, filp); | 2737 | retval = tty->ops->open(tty, filp); |
| 2721 | else | 2738 | else |
| 2722 | retval = -ENODEV; | 2739 | retval = -ENODEV; |
| 2723 | } | 2740 | } |
| @@ -2776,29 +2793,13 @@ static int ptmx_open(struct inode *inode, struct file *filp) | |||
| 2776 | struct tty_struct *tty; | 2793 | struct tty_struct *tty; |
| 2777 | int retval; | 2794 | int retval; |
| 2778 | int index; | 2795 | int index; |
| 2779 | int idr_ret; | ||
| 2780 | 2796 | ||
| 2781 | nonseekable_open(inode, filp); | 2797 | nonseekable_open(inode, filp); |
| 2782 | 2798 | ||
| 2783 | /* find a device that is not in use. */ | 2799 | /* find a device that is not in use. */ |
| 2784 | mutex_lock(&allocated_ptys_lock); | 2800 | index = devpts_new_index(); |
| 2785 | if (!idr_pre_get(&allocated_ptys, GFP_KERNEL)) { | 2801 | if (index < 0) |
| 2786 | mutex_unlock(&allocated_ptys_lock); | 2802 | return index; |
| 2787 | return -ENOMEM; | ||
| 2788 | } | ||
| 2789 | idr_ret = idr_get_new(&allocated_ptys, NULL, &index); | ||
| 2790 | if (idr_ret < 0) { | ||
| 2791 | mutex_unlock(&allocated_ptys_lock); | ||
| 2792 | if (idr_ret == -EAGAIN) | ||
| 2793 | return -ENOMEM; | ||
| 2794 | return -EIO; | ||
| 2795 | } | ||
| 2796 | if (index >= pty_limit) { | ||
| 2797 | idr_remove(&allocated_ptys, index); | ||
| 2798 | mutex_unlock(&allocated_ptys_lock); | ||
| 2799 | return -EIO; | ||
| 2800 | } | ||
| 2801 | mutex_unlock(&allocated_ptys_lock); | ||
| 2802 | 2803 | ||
| 2803 | mutex_lock(&tty_mutex); | 2804 | mutex_lock(&tty_mutex); |
| 2804 | retval = init_dev(ptm_driver, index, &tty); | 2805 | retval = init_dev(ptm_driver, index, &tty); |
| @@ -2811,21 +2812,19 @@ static int ptmx_open(struct inode *inode, struct file *filp) | |||
| 2811 | filp->private_data = tty; | 2812 | filp->private_data = tty; |
| 2812 | file_move(filp, &tty->tty_files); | 2813 | file_move(filp, &tty->tty_files); |
| 2813 | 2814 | ||
| 2814 | retval = -ENOMEM; | 2815 | retval = devpts_pty_new(tty->link); |
| 2815 | if (devpts_pty_new(tty->link)) | 2816 | if (retval) |
| 2816 | goto out1; | 2817 | goto out1; |
| 2817 | 2818 | ||
| 2818 | check_tty_count(tty, "tty_open"); | 2819 | check_tty_count(tty, "ptmx_open"); |
| 2819 | retval = ptm_driver->open(tty, filp); | 2820 | retval = ptm_driver->ops->open(tty, filp); |
| 2820 | if (!retval) | 2821 | if (!retval) |
| 2821 | return 0; | 2822 | return 0; |
| 2822 | out1: | 2823 | out1: |
| 2823 | release_dev(filp); | 2824 | release_dev(filp); |
| 2824 | return retval; | 2825 | return retval; |
| 2825 | out: | 2826 | out: |
| 2826 | mutex_lock(&allocated_ptys_lock); | 2827 | devpts_kill_index(index); |
| 2827 | idr_remove(&allocated_ptys, index); | ||
| 2828 | mutex_unlock(&allocated_ptys_lock); | ||
| 2829 | return retval; | 2828 | return retval; |
| 2830 | } | 2829 | } |
| 2831 | #endif | 2830 | #endif |
| @@ -2882,6 +2881,7 @@ static unsigned int tty_poll(struct file *filp, poll_table *wait) | |||
| 2882 | static int tty_fasync(int fd, struct file *filp, int on) | 2881 | static int tty_fasync(int fd, struct file *filp, int on) |
| 2883 | { | 2882 | { |
| 2884 | struct tty_struct *tty; | 2883 | struct tty_struct *tty; |
| 2884 | unsigned long flags; | ||
| 2885 | int retval; | 2885 | int retval; |
| 2886 | 2886 | ||
| 2887 | tty = (struct tty_struct *)filp->private_data; | 2887 | tty = (struct tty_struct *)filp->private_data; |
| @@ -2897,6 +2897,7 @@ static int tty_fasync(int fd, struct file *filp, int on) | |||
| 2897 | struct pid *pid; | 2897 | struct pid *pid; |
| 2898 | if (!waitqueue_active(&tty->read_wait)) | 2898 | if (!waitqueue_active(&tty->read_wait)) |
| 2899 | tty->minimum_to_wake = 1; | 2899 | tty->minimum_to_wake = 1; |
| 2900 | spin_lock_irqsave(&tty->ctrl_lock, flags); | ||
| 2900 | if (tty->pgrp) { | 2901 | if (tty->pgrp) { |
| 2901 | pid = tty->pgrp; | 2902 | pid = tty->pgrp; |
| 2902 | type = PIDTYPE_PGID; | 2903 | type = PIDTYPE_PGID; |
| @@ -2904,6 +2905,7 @@ static int tty_fasync(int fd, struct file *filp, int on) | |||
| 2904 | pid = task_pid(current); | 2905 | pid = task_pid(current); |
| 2905 | type = PIDTYPE_PID; | 2906 | type = PIDTYPE_PID; |
| 2906 | } | 2907 | } |
| 2908 | spin_unlock_irqrestore(&tty->ctrl_lock, flags); | ||
| 2907 | retval = __f_setown(filp, pid, type, 0); | 2909 | retval = __f_setown(filp, pid, type, 0); |
| 2908 | if (retval) | 2910 | if (retval) |
| 2909 | return retval; | 2911 | return retval; |
| @@ -2989,6 +2991,8 @@ static int tiocswinsz(struct tty_struct *tty, struct tty_struct *real_tty, | |||
| 2989 | struct winsize __user *arg) | 2991 | struct winsize __user *arg) |
| 2990 | { | 2992 | { |
| 2991 | struct winsize tmp_ws; | 2993 | struct winsize tmp_ws; |
| 2994 | struct pid *pgrp, *rpgrp; | ||
| 2995 | unsigned long flags; | ||
| 2992 | 2996 | ||
| 2993 | if (copy_from_user(&tmp_ws, arg, sizeof(*arg))) | 2997 | if (copy_from_user(&tmp_ws, arg, sizeof(*arg))) |
| 2994 | return -EFAULT; | 2998 | return -EFAULT; |
| @@ -3006,10 +3010,21 @@ static int tiocswinsz(struct tty_struct *tty, struct tty_struct *real_tty, | |||
| 3006 | } | 3010 | } |
| 3007 | } | 3011 | } |
| 3008 | #endif | 3012 | #endif |
| 3009 | if (tty->pgrp) | 3013 | /* Get the PID values and reference them so we can |
| 3010 | kill_pgrp(tty->pgrp, SIGWINCH, 1); | 3014 | avoid holding the tty ctrl lock while sending signals */ |
| 3011 | if ((real_tty->pgrp != tty->pgrp) && real_tty->pgrp) | 3015 | spin_lock_irqsave(&tty->ctrl_lock, flags); |
| 3012 | kill_pgrp(real_tty->pgrp, SIGWINCH, 1); | 3016 | pgrp = get_pid(tty->pgrp); |
| 3017 | rpgrp = get_pid(real_tty->pgrp); | ||
| 3018 | spin_unlock_irqrestore(&tty->ctrl_lock, flags); | ||
| 3019 | |||
| 3020 | if (pgrp) | ||
| 3021 | kill_pgrp(pgrp, SIGWINCH, 1); | ||
| 3022 | if (rpgrp != pgrp && rpgrp) | ||
| 3023 | kill_pgrp(rpgrp, SIGWINCH, 1); | ||
| 3024 | |||
| 3025 | put_pid(pgrp); | ||
| 3026 | put_pid(rpgrp); | ||
| 3027 | |||
| 3013 | tty->winsize = tmp_ws; | 3028 | tty->winsize = tmp_ws; |
| 3014 | real_tty->winsize = tmp_ws; | 3029 | real_tty->winsize = tmp_ws; |
| 3015 | done: | 3030 | done: |
| @@ -3070,10 +3085,13 @@ static int fionbio(struct file *file, int __user *p) | |||
| 3070 | if (get_user(nonblock, p)) | 3085 | if (get_user(nonblock, p)) |
| 3071 | return -EFAULT; | 3086 | return -EFAULT; |
| 3072 | 3087 | ||
| 3088 | /* file->f_flags is still BKL protected in the fs layer - vomit */ | ||
| 3089 | lock_kernel(); | ||
| 3073 | if (nonblock) | 3090 | if (nonblock) |
| 3074 | file->f_flags |= O_NONBLOCK; | 3091 | file->f_flags |= O_NONBLOCK; |
| 3075 | else | 3092 | else |
| 3076 | file->f_flags &= ~O_NONBLOCK; | 3093 | file->f_flags &= ~O_NONBLOCK; |
| 3094 | unlock_kernel(); | ||
| 3077 | return 0; | 3095 | return 0; |
| 3078 | } | 3096 | } |
| 3079 | 3097 | ||
| @@ -3131,6 +3149,27 @@ unlock: | |||
| 3131 | } | 3149 | } |
| 3132 | 3150 | ||
| 3133 | /** | 3151 | /** |
| 3152 | * tty_get_pgrp - return a ref counted pgrp pid | ||
| 3153 | * @tty: tty to read | ||
| 3154 | * | ||
| 3155 | * Returns a refcounted instance of the pid struct for the process | ||
| 3156 | * group controlling the tty. | ||
| 3157 | */ | ||
| 3158 | |||
| 3159 | struct pid *tty_get_pgrp(struct tty_struct *tty) | ||
| 3160 | { | ||
| 3161 | unsigned long flags; | ||
| 3162 | struct pid *pgrp; | ||
| 3163 | |||
| 3164 | spin_lock_irqsave(&tty->ctrl_lock, flags); | ||
| 3165 | pgrp = get_pid(tty->pgrp); | ||
| 3166 | spin_unlock_irqrestore(&tty->ctrl_lock, flags); | ||
| 3167 | |||
| 3168 | return pgrp; | ||
| 3169 | } | ||
| 3170 | EXPORT_SYMBOL_GPL(tty_get_pgrp); | ||
| 3171 | |||
| 3172 | /** | ||
| 3134 | * tiocgpgrp - get process group | 3173 | * tiocgpgrp - get process group |
| 3135 | * @tty: tty passed by user | 3174 | * @tty: tty passed by user |
| 3136 | * @real_tty: tty side of the tty pased by the user if a pty else the tty | 3175 | * @real_tty: tty side of the tty pased by the user if a pty else the tty |
| @@ -3144,13 +3183,18 @@ unlock: | |||
| 3144 | 3183 | ||
| 3145 | static int tiocgpgrp(struct tty_struct *tty, struct tty_struct *real_tty, pid_t __user *p) | 3184 | static int tiocgpgrp(struct tty_struct *tty, struct tty_struct *real_tty, pid_t __user *p) |
| 3146 | { | 3185 | { |
| 3186 | struct pid *pid; | ||
| 3187 | int ret; | ||
| 3147 | /* | 3188 | /* |
| 3148 | * (tty == real_tty) is a cheap way of | 3189 | * (tty == real_tty) is a cheap way of |
| 3149 | * testing if the tty is NOT a master pty. | 3190 | * testing if the tty is NOT a master pty. |
| 3150 | */ | 3191 | */ |
| 3151 | if (tty == real_tty && current->signal->tty != real_tty) | 3192 | if (tty == real_tty && current->signal->tty != real_tty) |
| 3152 | return -ENOTTY; | 3193 | return -ENOTTY; |
| 3153 | return put_user(pid_vnr(real_tty->pgrp), p); | 3194 | pid = tty_get_pgrp(real_tty); |
| 3195 | ret = put_user(pid_vnr(pid), p); | ||
| 3196 | put_pid(pid); | ||
| 3197 | return ret; | ||
| 3154 | } | 3198 | } |
| 3155 | 3199 | ||
| 3156 | /** | 3200 | /** |
| @@ -3162,7 +3206,7 @@ static int tiocgpgrp(struct tty_struct *tty, struct tty_struct *real_tty, pid_t | |||
| 3162 | * Set the process group of the tty to the session passed. Only | 3206 | * Set the process group of the tty to the session passed. Only |
| 3163 | * permitted where the tty session is our session. | 3207 | * permitted where the tty session is our session. |
| 3164 | * | 3208 | * |
| 3165 | * Locking: None | 3209 | * Locking: RCU, ctrl lock |
| 3166 | */ | 3210 | */ |
| 3167 | 3211 | ||
| 3168 | static int tiocspgrp(struct tty_struct *tty, struct tty_struct *real_tty, pid_t __user *p) | 3212 | static int tiocspgrp(struct tty_struct *tty, struct tty_struct *real_tty, pid_t __user *p) |
| @@ -3170,6 +3214,7 @@ static int tiocspgrp(struct tty_struct *tty, struct tty_struct *real_tty, pid_t | |||
| 3170 | struct pid *pgrp; | 3214 | struct pid *pgrp; |
| 3171 | pid_t pgrp_nr; | 3215 | pid_t pgrp_nr; |
| 3172 | int retval = tty_check_change(real_tty); | 3216 | int retval = tty_check_change(real_tty); |
| 3217 | unsigned long flags; | ||
| 3173 | 3218 | ||
| 3174 | if (retval == -EIO) | 3219 | if (retval == -EIO) |
| 3175 | return -ENOTTY; | 3220 | return -ENOTTY; |
| @@ -3192,8 +3237,10 @@ static int tiocspgrp(struct tty_struct *tty, struct tty_struct *real_tty, pid_t | |||
| 3192 | if (session_of_pgrp(pgrp) != task_session(current)) | 3237 | if (session_of_pgrp(pgrp) != task_session(current)) |
| 3193 | goto out_unlock; | 3238 | goto out_unlock; |
| 3194 | retval = 0; | 3239 | retval = 0; |
| 3240 | spin_lock_irqsave(&tty->ctrl_lock, flags); | ||
| 3195 | put_pid(real_tty->pgrp); | 3241 | put_pid(real_tty->pgrp); |
| 3196 | real_tty->pgrp = get_pid(pgrp); | 3242 | real_tty->pgrp = get_pid(pgrp); |
| 3243 | spin_unlock_irqrestore(&tty->ctrl_lock, flags); | ||
| 3197 | out_unlock: | 3244 | out_unlock: |
| 3198 | rcu_read_unlock(); | 3245 | rcu_read_unlock(); |
| 3199 | return retval; | 3246 | return retval; |
| @@ -3237,10 +3284,16 @@ static int tiocgsid(struct tty_struct *tty, struct tty_struct *real_tty, pid_t _ | |||
| 3237 | static int tiocsetd(struct tty_struct *tty, int __user *p) | 3284 | static int tiocsetd(struct tty_struct *tty, int __user *p) |
| 3238 | { | 3285 | { |
| 3239 | int ldisc; | 3286 | int ldisc; |
| 3287 | int ret; | ||
| 3240 | 3288 | ||
| 3241 | if (get_user(ldisc, p)) | 3289 | if (get_user(ldisc, p)) |
| 3242 | return -EFAULT; | 3290 | return -EFAULT; |
| 3243 | return tty_set_ldisc(tty, ldisc); | 3291 | |
| 3292 | lock_kernel(); | ||
| 3293 | ret = tty_set_ldisc(tty, ldisc); | ||
| 3294 | unlock_kernel(); | ||
| 3295 | |||
| 3296 | return ret; | ||
| 3244 | } | 3297 | } |
| 3245 | 3298 | ||
| 3246 | /** | 3299 | /** |
| @@ -3260,18 +3313,18 @@ static int send_break(struct tty_struct *tty, unsigned int duration) | |||
| 3260 | { | 3313 | { |
| 3261 | if (tty_write_lock(tty, 0) < 0) | 3314 | if (tty_write_lock(tty, 0) < 0) |
| 3262 | return -EINTR; | 3315 | return -EINTR; |
| 3263 | tty->driver->break_ctl(tty, -1); | 3316 | tty->ops->break_ctl(tty, -1); |
| 3264 | if (!signal_pending(current)) | 3317 | if (!signal_pending(current)) |
| 3265 | msleep_interruptible(duration); | 3318 | msleep_interruptible(duration); |
| 3266 | tty->driver->break_ctl(tty, 0); | 3319 | tty->ops->break_ctl(tty, 0); |
| 3267 | tty_write_unlock(tty); | 3320 | tty_write_unlock(tty); |
| 3268 | if (signal_pending(current)) | 3321 | if (!signal_pending(current)) |
| 3269 | return -EINTR; | 3322 | return -EINTR; |
| 3270 | return 0; | 3323 | return 0; |
| 3271 | } | 3324 | } |
| 3272 | 3325 | ||
| 3273 | /** | 3326 | /** |
| 3274 | * tiocmget - get modem status | 3327 | * tty_tiocmget - get modem status |
| 3275 | * @tty: tty device | 3328 | * @tty: tty device |
| 3276 | * @file: user file pointer | 3329 | * @file: user file pointer |
| 3277 | * @p: pointer to result | 3330 | * @p: pointer to result |
| @@ -3286,8 +3339,8 @@ static int tty_tiocmget(struct tty_struct *tty, struct file *file, int __user *p | |||
| 3286 | { | 3339 | { |
| 3287 | int retval = -EINVAL; | 3340 | int retval = -EINVAL; |
| 3288 | 3341 | ||
| 3289 | if (tty->driver->tiocmget) { | 3342 | if (tty->ops->tiocmget) { |
| 3290 | retval = tty->driver->tiocmget(tty, file); | 3343 | retval = tty->ops->tiocmget(tty, file); |
| 3291 | 3344 | ||
| 3292 | if (retval >= 0) | 3345 | if (retval >= 0) |
| 3293 | retval = put_user(retval, p); | 3346 | retval = put_user(retval, p); |
| @@ -3296,7 +3349,7 @@ static int tty_tiocmget(struct tty_struct *tty, struct file *file, int __user *p | |||
| 3296 | } | 3349 | } |
| 3297 | 3350 | ||
| 3298 | /** | 3351 | /** |
| 3299 | * tiocmset - set modem status | 3352 | * tty_tiocmset - set modem status |
| 3300 | * @tty: tty device | 3353 | * @tty: tty device |
| 3301 | * @file: user file pointer | 3354 | * @file: user file pointer |
| 3302 | * @cmd: command - clear bits, set bits or set all | 3355 | * @cmd: command - clear bits, set bits or set all |
| @@ -3313,7 +3366,7 @@ static int tty_tiocmset(struct tty_struct *tty, struct file *file, unsigned int | |||
| 3313 | { | 3366 | { |
| 3314 | int retval = -EINVAL; | 3367 | int retval = -EINVAL; |
| 3315 | 3368 | ||
| 3316 | if (tty->driver->tiocmset) { | 3369 | if (tty->ops->tiocmset) { |
| 3317 | unsigned int set, clear, val; | 3370 | unsigned int set, clear, val; |
| 3318 | 3371 | ||
| 3319 | retval = get_user(val, p); | 3372 | retval = get_user(val, p); |
| @@ -3337,7 +3390,7 @@ static int tty_tiocmset(struct tty_struct *tty, struct file *file, unsigned int | |||
| 3337 | set &= TIOCM_DTR|TIOCM_RTS|TIOCM_OUT1|TIOCM_OUT2|TIOCM_LOOP; | 3390 | set &= TIOCM_DTR|TIOCM_RTS|TIOCM_OUT1|TIOCM_OUT2|TIOCM_LOOP; |
| 3338 | clear &= TIOCM_DTR|TIOCM_RTS|TIOCM_OUT1|TIOCM_OUT2|TIOCM_LOOP; | 3391 | clear &= TIOCM_DTR|TIOCM_RTS|TIOCM_OUT1|TIOCM_OUT2|TIOCM_LOOP; |
| 3339 | 3392 | ||
| 3340 | retval = tty->driver->tiocmset(tty, file, set, clear); | 3393 | retval = tty->ops->tiocmset(tty, file, set, clear); |
| 3341 | } | 3394 | } |
| 3342 | return retval; | 3395 | return retval; |
| 3343 | } | 3396 | } |
| @@ -3345,20 +3398,18 @@ static int tty_tiocmset(struct tty_struct *tty, struct file *file, unsigned int | |||
| 3345 | /* | 3398 | /* |
| 3346 | * Split this up, as gcc can choke on it otherwise.. | 3399 | * Split this up, as gcc can choke on it otherwise.. |
| 3347 | */ | 3400 | */ |
| 3348 | int tty_ioctl(struct inode *inode, struct file *file, | 3401 | long tty_ioctl(struct file *file, unsigned int cmd, unsigned long arg) |
| 3349 | unsigned int cmd, unsigned long arg) | ||
| 3350 | { | 3402 | { |
| 3351 | struct tty_struct *tty, *real_tty; | 3403 | struct tty_struct *tty, *real_tty; |
| 3352 | void __user *p = (void __user *)arg; | 3404 | void __user *p = (void __user *)arg; |
| 3353 | int retval; | 3405 | int retval; |
| 3354 | struct tty_ldisc *ld; | 3406 | struct tty_ldisc *ld; |
| 3407 | struct inode *inode = file->f_dentry->d_inode; | ||
| 3355 | 3408 | ||
| 3356 | tty = (struct tty_struct *)file->private_data; | 3409 | tty = (struct tty_struct *)file->private_data; |
| 3357 | if (tty_paranoia_check(tty, inode, "tty_ioctl")) | 3410 | if (tty_paranoia_check(tty, inode, "tty_ioctl")) |
| 3358 | return -EINVAL; | 3411 | return -EINVAL; |
| 3359 | 3412 | ||
| 3360 | /* CHECKME: is this safe as one end closes ? */ | ||
| 3361 | |||
| 3362 | real_tty = tty; | 3413 | real_tty = tty; |
| 3363 | if (tty->driver->type == TTY_DRIVER_TYPE_PTY && | 3414 | if (tty->driver->type == TTY_DRIVER_TYPE_PTY && |
| 3364 | tty->driver->subtype == PTY_TYPE_MASTER) | 3415 | tty->driver->subtype == PTY_TYPE_MASTER) |
| @@ -3367,21 +3418,28 @@ int tty_ioctl(struct inode *inode, struct file *file, | |||
| 3367 | /* | 3418 | /* |
| 3368 | * Break handling by driver | 3419 | * Break handling by driver |
| 3369 | */ | 3420 | */ |
| 3370 | if (!tty->driver->break_ctl) { | 3421 | |
| 3422 | retval = -EINVAL; | ||
| 3423 | |||
| 3424 | if (!tty->ops->break_ctl) { | ||
| 3371 | switch (cmd) { | 3425 | switch (cmd) { |
| 3372 | case TIOCSBRK: | 3426 | case TIOCSBRK: |
| 3373 | case TIOCCBRK: | 3427 | case TIOCCBRK: |
| 3374 | if (tty->driver->ioctl) | 3428 | if (tty->ops->ioctl) |
| 3375 | return tty->driver->ioctl(tty, file, cmd, arg); | 3429 | retval = tty->ops->ioctl(tty, file, cmd, arg); |
| 3376 | return -EINVAL; | 3430 | if (retval != -EINVAL && retval != -ENOIOCTLCMD) |
| 3431 | printk(KERN_WARNING "tty: driver %s needs updating to use break_ctl\n", tty->driver->name); | ||
| 3432 | return retval; | ||
| 3377 | 3433 | ||
| 3378 | /* These two ioctl's always return success; even if */ | 3434 | /* These two ioctl's always return success; even if */ |
| 3379 | /* the driver doesn't support them. */ | 3435 | /* the driver doesn't support them. */ |
| 3380 | case TCSBRK: | 3436 | case TCSBRK: |
| 3381 | case TCSBRKP: | 3437 | case TCSBRKP: |
| 3382 | if (!tty->driver->ioctl) | 3438 | if (!tty->ops->ioctl) |
| 3383 | return 0; | 3439 | return 0; |
| 3384 | retval = tty->driver->ioctl(tty, file, cmd, arg); | 3440 | retval = tty->ops->ioctl(tty, file, cmd, arg); |
| 3441 | if (retval != -EINVAL && retval != -ENOIOCTLCMD) | ||
| 3442 | printk(KERN_WARNING "tty: driver %s needs updating to use break_ctl\n", tty->driver->name); | ||
| 3385 | if (retval == -ENOIOCTLCMD) | 3443 | if (retval == -ENOIOCTLCMD) |
| 3386 | retval = 0; | 3444 | retval = 0; |
| 3387 | return retval; | 3445 | return retval; |
| @@ -3439,7 +3497,6 @@ int tty_ioctl(struct inode *inode, struct file *file, | |||
| 3439 | case TIOCGSID: | 3497 | case TIOCGSID: |
| 3440 | return tiocgsid(tty, real_tty, p); | 3498 | return tiocgsid(tty, real_tty, p); |
| 3441 | case TIOCGETD: | 3499 | case TIOCGETD: |
| 3442 | /* FIXME: check this is ok */ | ||
| 3443 | return put_user(tty->ldisc.num, (int __user *)p); | 3500 | return put_user(tty->ldisc.num, (int __user *)p); |
| 3444 | case TIOCSETD: | 3501 | case TIOCSETD: |
| 3445 | return tiocsetd(tty, p); | 3502 | return tiocsetd(tty, p); |
| @@ -3451,11 +3508,13 @@ int tty_ioctl(struct inode *inode, struct file *file, | |||
| 3451 | * Break handling | 3508 | * Break handling |
| 3452 | */ | 3509 | */ |
| 3453 | case TIOCSBRK: /* Turn break on, unconditionally */ | 3510 | case TIOCSBRK: /* Turn break on, unconditionally */ |
| 3454 | tty->driver->break_ctl(tty, -1); | 3511 | if (tty->ops->break_ctl) |
| 3512 | tty->ops->break_ctl(tty, -1); | ||
| 3455 | return 0; | 3513 | return 0; |
| 3456 | 3514 | ||
| 3457 | case TIOCCBRK: /* Turn break off, unconditionally */ | 3515 | case TIOCCBRK: /* Turn break off, unconditionally */ |
| 3458 | tty->driver->break_ctl(tty, 0); | 3516 | if (tty->ops->break_ctl) |
| 3517 | tty->ops->break_ctl(tty, 0); | ||
| 3459 | return 0; | 3518 | return 0; |
| 3460 | case TCSBRK: /* SVID version: non-zero arg --> no break */ | 3519 | case TCSBRK: /* SVID version: non-zero arg --> no break */ |
| 3461 | /* non-zero arg means wait for all output data | 3520 | /* non-zero arg means wait for all output data |
| @@ -3484,8 +3543,8 @@ int tty_ioctl(struct inode *inode, struct file *file, | |||
| 3484 | } | 3543 | } |
| 3485 | break; | 3544 | break; |
| 3486 | } | 3545 | } |
| 3487 | if (tty->driver->ioctl) { | 3546 | if (tty->ops->ioctl) { |
| 3488 | retval = (tty->driver->ioctl)(tty, file, cmd, arg); | 3547 | retval = (tty->ops->ioctl)(tty, file, cmd, arg); |
| 3489 | if (retval != -ENOIOCTLCMD) | 3548 | if (retval != -ENOIOCTLCMD) |
| 3490 | return retval; | 3549 | return retval; |
| 3491 | } | 3550 | } |
| @@ -3512,8 +3571,8 @@ static long tty_compat_ioctl(struct file *file, unsigned int cmd, | |||
| 3512 | if (tty_paranoia_check(tty, inode, "tty_ioctl")) | 3571 | if (tty_paranoia_check(tty, inode, "tty_ioctl")) |
| 3513 | return -EINVAL; | 3572 | return -EINVAL; |
| 3514 | 3573 | ||
| 3515 | if (tty->driver->compat_ioctl) { | 3574 | if (tty->ops->compat_ioctl) { |
| 3516 | retval = (tty->driver->compat_ioctl)(tty, file, cmd, arg); | 3575 | retval = (tty->ops->compat_ioctl)(tty, file, cmd, arg); |
| 3517 | if (retval != -ENOIOCTLCMD) | 3576 | if (retval != -ENOIOCTLCMD) |
| 3518 | return retval; | 3577 | return retval; |
| 3519 | } | 3578 | } |
| @@ -3563,8 +3622,7 @@ void __do_SAK(struct tty_struct *tty) | |||
| 3563 | 3622 | ||
| 3564 | tty_ldisc_flush(tty); | 3623 | tty_ldisc_flush(tty); |
| 3565 | 3624 | ||
| 3566 | if (tty->driver->flush_buffer) | 3625 | tty_driver_flush_buffer(tty); |
| 3567 | tty->driver->flush_buffer(tty); | ||
| 3568 | 3626 | ||
| 3569 | read_lock(&tasklist_lock); | 3627 | read_lock(&tasklist_lock); |
| 3570 | /* Kill the entire session */ | 3628 | /* Kill the entire session */ |
| @@ -3770,19 +3828,32 @@ static void initialize_tty_struct(struct tty_struct *tty) | |||
| 3770 | mutex_init(&tty->atomic_read_lock); | 3828 | mutex_init(&tty->atomic_read_lock); |
| 3771 | mutex_init(&tty->atomic_write_lock); | 3829 | mutex_init(&tty->atomic_write_lock); |
| 3772 | spin_lock_init(&tty->read_lock); | 3830 | spin_lock_init(&tty->read_lock); |
| 3831 | spin_lock_init(&tty->ctrl_lock); | ||
| 3773 | INIT_LIST_HEAD(&tty->tty_files); | 3832 | INIT_LIST_HEAD(&tty->tty_files); |
| 3774 | INIT_WORK(&tty->SAK_work, do_SAK_work); | 3833 | INIT_WORK(&tty->SAK_work, do_SAK_work); |
| 3775 | } | 3834 | } |
| 3776 | 3835 | ||
| 3777 | /* | 3836 | /** |
| 3778 | * The default put_char routine if the driver did not define one. | 3837 | * tty_put_char - write one character to a tty |
| 3838 | * @tty: tty | ||
| 3839 | * @ch: character | ||
| 3840 | * | ||
| 3841 | * Write one byte to the tty using the provided put_char method | ||
| 3842 | * if present. Returns the number of characters successfully output. | ||
| 3843 | * | ||
| 3844 | * Note: the specific put_char operation in the driver layer may go | ||
| 3845 | * away soon. Don't call it directly, use this method | ||
| 3779 | */ | 3846 | */ |
| 3780 | 3847 | ||
| 3781 | static void tty_default_put_char(struct tty_struct *tty, unsigned char ch) | 3848 | int tty_put_char(struct tty_struct *tty, unsigned char ch) |
| 3782 | { | 3849 | { |
| 3783 | tty->driver->write(tty, &ch, 1); | 3850 | if (tty->ops->put_char) |
| 3851 | return tty->ops->put_char(tty, ch); | ||
| 3852 | return tty->ops->write(tty, &ch, 1); | ||
| 3784 | } | 3853 | } |
| 3785 | 3854 | ||
| 3855 | EXPORT_SYMBOL_GPL(tty_put_char); | ||
| 3856 | |||
| 3786 | static struct class *tty_class; | 3857 | static struct class *tty_class; |
| 3787 | 3858 | ||
| 3788 | /** | 3859 | /** |
| @@ -3865,37 +3936,8 @@ void put_tty_driver(struct tty_driver *driver) | |||
| 3865 | void tty_set_operations(struct tty_driver *driver, | 3936 | void tty_set_operations(struct tty_driver *driver, |
| 3866 | const struct tty_operations *op) | 3937 | const struct tty_operations *op) |
| 3867 | { | 3938 | { |
| 3868 | driver->open = op->open; | 3939 | driver->ops = op; |
| 3869 | driver->close = op->close; | 3940 | }; |
| 3870 | driver->write = op->write; | ||
| 3871 | driver->put_char = op->put_char; | ||
| 3872 | driver->flush_chars = op->flush_chars; | ||
| 3873 | driver->write_room = op->write_room; | ||
| 3874 | driver->chars_in_buffer = op->chars_in_buffer; | ||
| 3875 | driver->ioctl = op->ioctl; | ||
| 3876 | driver->compat_ioctl = op->compat_ioctl; | ||
| 3877 | driver->set_termios = op->set_termios; | ||
| 3878 | driver->throttle = op->throttle; | ||
| 3879 | driver->unthrottle = op->unthrottle; | ||
| 3880 | driver->stop = op->stop; | ||
| 3881 | driver->start = op->start; | ||
| 3882 | driver->hangup = op->hangup; | ||
| 3883 | driver->break_ctl = op->break_ctl; | ||
| 3884 | driver->flush_buffer = op->flush_buffer; | ||
| 3885 | driver->set_ldisc = op->set_ldisc; | ||
| 3886 | driver->wait_until_sent = op->wait_until_sent; | ||
| 3887 | driver->send_xchar = op->send_xchar; | ||
| 3888 | driver->read_proc = op->read_proc; | ||
| 3889 | driver->write_proc = op->write_proc; | ||
| 3890 | driver->tiocmget = op->tiocmget; | ||
| 3891 | driver->tiocmset = op->tiocmset; | ||
| 3892 | #ifdef CONFIG_CONSOLE_POLL | ||
| 3893 | driver->poll_init = op->poll_init; | ||
| 3894 | driver->poll_get_char = op->poll_get_char; | ||
| 3895 | driver->poll_put_char = op->poll_put_char; | ||
| 3896 | #endif | ||
| 3897 | } | ||
| 3898 | |||
| 3899 | 3941 | ||
| 3900 | EXPORT_SYMBOL(alloc_tty_driver); | 3942 | EXPORT_SYMBOL(alloc_tty_driver); |
| 3901 | EXPORT_SYMBOL(put_tty_driver); | 3943 | EXPORT_SYMBOL(put_tty_driver); |
| @@ -3958,9 +4000,6 @@ int tty_register_driver(struct tty_driver *driver) | |||
| 3958 | return error; | 4000 | return error; |
| 3959 | } | 4001 | } |
| 3960 | 4002 | ||
| 3961 | if (!driver->put_char) | ||
| 3962 | driver->put_char = tty_default_put_char; | ||
| 3963 | |||
| 3964 | mutex_lock(&tty_mutex); | 4003 | mutex_lock(&tty_mutex); |
| 3965 | list_add(&driver->tty_drivers, &tty_drivers); | 4004 | list_add(&driver->tty_drivers, &tty_drivers); |
| 3966 | mutex_unlock(&tty_mutex); | 4005 | mutex_unlock(&tty_mutex); |
| @@ -4036,14 +4075,19 @@ void proc_clear_tty(struct task_struct *p) | |||
| 4036 | } | 4075 | } |
| 4037 | EXPORT_SYMBOL(proc_clear_tty); | 4076 | EXPORT_SYMBOL(proc_clear_tty); |
| 4038 | 4077 | ||
| 4078 | /* Called under the sighand lock */ | ||
| 4079 | |||
| 4039 | static void __proc_set_tty(struct task_struct *tsk, struct tty_struct *tty) | 4080 | static void __proc_set_tty(struct task_struct *tsk, struct tty_struct *tty) |
| 4040 | { | 4081 | { |
| 4041 | if (tty) { | 4082 | if (tty) { |
| 4042 | /* We should not have a session or pgrp to here but.... */ | 4083 | unsigned long flags; |
| 4084 | /* We should not have a session or pgrp to put here but.... */ | ||
| 4085 | spin_lock_irqsave(&tty->ctrl_lock, flags); | ||
| 4043 | put_pid(tty->session); | 4086 | put_pid(tty->session); |
| 4044 | put_pid(tty->pgrp); | 4087 | put_pid(tty->pgrp); |
| 4045 | tty->session = get_pid(task_session(tsk)); | ||
| 4046 | tty->pgrp = get_pid(task_pgrp(tsk)); | 4088 | tty->pgrp = get_pid(task_pgrp(tsk)); |
| 4089 | spin_unlock_irqrestore(&tty->ctrl_lock, flags); | ||
| 4090 | tty->session = get_pid(task_session(tsk)); | ||
| 4047 | } | 4091 | } |
| 4048 | put_pid(tsk->signal->tty_old_pgrp); | 4092 | put_pid(tsk->signal->tty_old_pgrp); |
| 4049 | tsk->signal->tty = tty; | 4093 | tsk->signal->tty = tty; |
diff --git a/drivers/char/tty_ioctl.c b/drivers/char/tty_ioctl.c index f95a80b2265f..b1a757a5ee27 100644 --- a/drivers/char/tty_ioctl.c +++ b/drivers/char/tty_ioctl.c | |||
| @@ -21,6 +21,7 @@ | |||
| 21 | #include <linux/module.h> | 21 | #include <linux/module.h> |
| 22 | #include <linux/bitops.h> | 22 | #include <linux/bitops.h> |
| 23 | #include <linux/mutex.h> | 23 | #include <linux/mutex.h> |
| 24 | #include <linux/smp_lock.h> | ||
| 24 | 25 | ||
| 25 | #include <asm/io.h> | 26 | #include <asm/io.h> |
| 26 | #include <asm/uaccess.h> | 27 | #include <asm/uaccess.h> |
| @@ -39,6 +40,50 @@ | |||
| 39 | #define TERMIOS_OLD 8 | 40 | #define TERMIOS_OLD 8 |
| 40 | 41 | ||
| 41 | 42 | ||
| 43 | int tty_chars_in_buffer(struct tty_struct *tty) | ||
| 44 | { | ||
| 45 | if (tty->ops->chars_in_buffer) | ||
| 46 | return tty->ops->chars_in_buffer(tty); | ||
| 47 | else | ||
| 48 | return 0; | ||
| 49 | } | ||
| 50 | |||
| 51 | EXPORT_SYMBOL(tty_chars_in_buffer); | ||
| 52 | |||
| 53 | int tty_write_room(struct tty_struct *tty) | ||
| 54 | { | ||
| 55 | if (tty->ops->write_room) | ||
| 56 | return tty->ops->write_room(tty); | ||
| 57 | return 2048; | ||
| 58 | } | ||
| 59 | |||
| 60 | EXPORT_SYMBOL(tty_write_room); | ||
| 61 | |||
| 62 | void tty_driver_flush_buffer(struct tty_struct *tty) | ||
| 63 | { | ||
| 64 | if (tty->ops->flush_buffer) | ||
| 65 | tty->ops->flush_buffer(tty); | ||
| 66 | } | ||
| 67 | |||
| 68 | EXPORT_SYMBOL(tty_driver_flush_buffer); | ||
| 69 | |||
| 70 | void tty_throttle(struct tty_struct *tty) | ||
| 71 | { | ||
| 72 | /* check TTY_THROTTLED first so it indicates our state */ | ||
| 73 | if (!test_and_set_bit(TTY_THROTTLED, &tty->flags) && | ||
| 74 | tty->ops->throttle) | ||
| 75 | tty->ops->throttle(tty); | ||
| 76 | } | ||
| 77 | EXPORT_SYMBOL(tty_throttle); | ||
| 78 | |||
| 79 | void tty_unthrottle(struct tty_struct *tty) | ||
| 80 | { | ||
| 81 | if (test_and_clear_bit(TTY_THROTTLED, &tty->flags) && | ||
| 82 | tty->ops->unthrottle) | ||
| 83 | tty->ops->unthrottle(tty); | ||
| 84 | } | ||
| 85 | EXPORT_SYMBOL(tty_unthrottle); | ||
| 86 | |||
| 42 | /** | 87 | /** |
| 43 | * tty_wait_until_sent - wait for I/O to finish | 88 | * tty_wait_until_sent - wait for I/O to finish |
| 44 | * @tty: tty we are waiting for | 89 | * @tty: tty we are waiting for |
| @@ -57,15 +102,13 @@ void tty_wait_until_sent(struct tty_struct *tty, long timeout) | |||
| 57 | 102 | ||
| 58 | printk(KERN_DEBUG "%s wait until sent...\n", tty_name(tty, buf)); | 103 | printk(KERN_DEBUG "%s wait until sent...\n", tty_name(tty, buf)); |
| 59 | #endif | 104 | #endif |
| 60 | if (!tty->driver->chars_in_buffer) | ||
| 61 | return; | ||
| 62 | if (!timeout) | 105 | if (!timeout) |
| 63 | timeout = MAX_SCHEDULE_TIMEOUT; | 106 | timeout = MAX_SCHEDULE_TIMEOUT; |
| 64 | if (wait_event_interruptible_timeout(tty->write_wait, | 107 | if (wait_event_interruptible_timeout(tty->write_wait, |
| 65 | !tty->driver->chars_in_buffer(tty), timeout) < 0) | 108 | !tty_chars_in_buffer(tty), timeout) >= 0) { |
| 66 | return; | 109 | if (tty->ops->wait_until_sent) |
| 67 | if (tty->driver->wait_until_sent) | 110 | tty->ops->wait_until_sent(tty, timeout); |
| 68 | tty->driver->wait_until_sent(tty, timeout); | 111 | } |
| 69 | } | 112 | } |
| 70 | EXPORT_SYMBOL(tty_wait_until_sent); | 113 | EXPORT_SYMBOL(tty_wait_until_sent); |
| 71 | 114 | ||
| @@ -393,8 +436,9 @@ EXPORT_SYMBOL(tty_termios_hw_change); | |||
| 393 | static void change_termios(struct tty_struct *tty, struct ktermios *new_termios) | 436 | static void change_termios(struct tty_struct *tty, struct ktermios *new_termios) |
| 394 | { | 437 | { |
| 395 | int canon_change; | 438 | int canon_change; |
| 396 | struct ktermios old_termios = *tty->termios; | 439 | struct ktermios old_termios; |
| 397 | struct tty_ldisc *ld; | 440 | struct tty_ldisc *ld; |
| 441 | unsigned long flags; | ||
| 398 | 442 | ||
| 399 | /* | 443 | /* |
| 400 | * Perform the actual termios internal changes under lock. | 444 | * Perform the actual termios internal changes under lock. |
| @@ -404,7 +448,7 @@ static void change_termios(struct tty_struct *tty, struct ktermios *new_termios) | |||
| 404 | /* FIXME: we need to decide on some locking/ordering semantics | 448 | /* FIXME: we need to decide on some locking/ordering semantics |
| 405 | for the set_termios notification eventually */ | 449 | for the set_termios notification eventually */ |
| 406 | mutex_lock(&tty->termios_mutex); | 450 | mutex_lock(&tty->termios_mutex); |
| 407 | 451 | old_termios = *tty->termios; | |
| 408 | *tty->termios = *new_termios; | 452 | *tty->termios = *new_termios; |
| 409 | unset_locked_termios(tty->termios, &old_termios, tty->termios_locked); | 453 | unset_locked_termios(tty->termios, &old_termios, tty->termios_locked); |
| 410 | canon_change = (old_termios.c_lflag ^ tty->termios->c_lflag) & ICANON; | 454 | canon_change = (old_termios.c_lflag ^ tty->termios->c_lflag) & ICANON; |
| @@ -429,17 +473,19 @@ static void change_termios(struct tty_struct *tty, struct ktermios *new_termios) | |||
| 429 | STOP_CHAR(tty) == '\023' && | 473 | STOP_CHAR(tty) == '\023' && |
| 430 | START_CHAR(tty) == '\021'); | 474 | START_CHAR(tty) == '\021'); |
| 431 | if (old_flow != new_flow) { | 475 | if (old_flow != new_flow) { |
| 476 | spin_lock_irqsave(&tty->ctrl_lock, flags); | ||
| 432 | tty->ctrl_status &= ~(TIOCPKT_DOSTOP | TIOCPKT_NOSTOP); | 477 | tty->ctrl_status &= ~(TIOCPKT_DOSTOP | TIOCPKT_NOSTOP); |
| 433 | if (new_flow) | 478 | if (new_flow) |
| 434 | tty->ctrl_status |= TIOCPKT_DOSTOP; | 479 | tty->ctrl_status |= TIOCPKT_DOSTOP; |
| 435 | else | 480 | else |
| 436 | tty->ctrl_status |= TIOCPKT_NOSTOP; | 481 | tty->ctrl_status |= TIOCPKT_NOSTOP; |
| 482 | spin_unlock_irqrestore(&tty->ctrl_lock, flags); | ||
| 437 | wake_up_interruptible(&tty->link->read_wait); | 483 | wake_up_interruptible(&tty->link->read_wait); |
| 438 | } | 484 | } |
| 439 | } | 485 | } |
| 440 | 486 | ||
| 441 | if (tty->driver->set_termios) | 487 | if (tty->ops->set_termios) |
| 442 | (*tty->driver->set_termios)(tty, &old_termios); | 488 | (*tty->ops->set_termios)(tty, &old_termios); |
| 443 | else | 489 | else |
| 444 | tty_termios_copy_hw(tty->termios, &old_termios); | 490 | tty_termios_copy_hw(tty->termios, &old_termios); |
| 445 | 491 | ||
| @@ -474,7 +520,9 @@ static int set_termios(struct tty_struct *tty, void __user *arg, int opt) | |||
| 474 | if (retval) | 520 | if (retval) |
| 475 | return retval; | 521 | return retval; |
| 476 | 522 | ||
| 523 | mutex_lock(&tty->termios_mutex); | ||
| 477 | memcpy(&tmp_termios, tty->termios, sizeof(struct ktermios)); | 524 | memcpy(&tmp_termios, tty->termios, sizeof(struct ktermios)); |
| 525 | mutex_unlock(&tty->termios_mutex); | ||
| 478 | 526 | ||
| 479 | if (opt & TERMIOS_TERMIO) { | 527 | if (opt & TERMIOS_TERMIO) { |
| 480 | if (user_termio_to_kernel_termios(&tmp_termios, | 528 | if (user_termio_to_kernel_termios(&tmp_termios, |
| @@ -660,12 +708,14 @@ static int get_tchars(struct tty_struct *tty, struct tchars __user *tchars) | |||
| 660 | { | 708 | { |
| 661 | struct tchars tmp; | 709 | struct tchars tmp; |
| 662 | 710 | ||
| 711 | mutex_lock(&tty->termios_mutex); | ||
| 663 | tmp.t_intrc = tty->termios->c_cc[VINTR]; | 712 | tmp.t_intrc = tty->termios->c_cc[VINTR]; |
| 664 | tmp.t_quitc = tty->termios->c_cc[VQUIT]; | 713 | tmp.t_quitc = tty->termios->c_cc[VQUIT]; |
| 665 | tmp.t_startc = tty->termios->c_cc[VSTART]; | 714 | tmp.t_startc = tty->termios->c_cc[VSTART]; |
| 666 | tmp.t_stopc = tty->termios->c_cc[VSTOP]; | 715 | tmp.t_stopc = tty->termios->c_cc[VSTOP]; |
| 667 | tmp.t_eofc = tty->termios->c_cc[VEOF]; | 716 | tmp.t_eofc = tty->termios->c_cc[VEOF]; |
| 668 | tmp.t_brkc = tty->termios->c_cc[VEOL2]; /* what is brkc anyway? */ | 717 | tmp.t_brkc = tty->termios->c_cc[VEOL2]; /* what is brkc anyway? */ |
| 718 | mutex_unlock(&tty->termios_mutex); | ||
| 669 | return copy_to_user(tchars, &tmp, sizeof(tmp)) ? -EFAULT : 0; | 719 | return copy_to_user(tchars, &tmp, sizeof(tmp)) ? -EFAULT : 0; |
| 670 | } | 720 | } |
| 671 | 721 | ||
| @@ -675,12 +725,14 @@ static int set_tchars(struct tty_struct *tty, struct tchars __user *tchars) | |||
| 675 | 725 | ||
| 676 | if (copy_from_user(&tmp, tchars, sizeof(tmp))) | 726 | if (copy_from_user(&tmp, tchars, sizeof(tmp))) |
| 677 | return -EFAULT; | 727 | return -EFAULT; |
| 728 | mutex_lock(&tty->termios_mutex); | ||
| 678 | tty->termios->c_cc[VINTR] = tmp.t_intrc; | 729 | tty->termios->c_cc[VINTR] = tmp.t_intrc; |
| 679 | tty->termios->c_cc[VQUIT] = tmp.t_quitc; | 730 | tty->termios->c_cc[VQUIT] = tmp.t_quitc; |
| 680 | tty->termios->c_cc[VSTART] = tmp.t_startc; | 731 | tty->termios->c_cc[VSTART] = tmp.t_startc; |
| 681 | tty->termios->c_cc[VSTOP] = tmp.t_stopc; | 732 | tty->termios->c_cc[VSTOP] = tmp.t_stopc; |
| 682 | tty->termios->c_cc[VEOF] = tmp.t_eofc; | 733 | tty->termios->c_cc[VEOF] = tmp.t_eofc; |
| 683 | tty->termios->c_cc[VEOL2] = tmp.t_brkc; /* what is brkc anyway? */ | 734 | tty->termios->c_cc[VEOL2] = tmp.t_brkc; /* what is brkc anyway? */ |
| 735 | mutex_unlock(&tty->termios_mutex); | ||
| 684 | return 0; | 736 | return 0; |
| 685 | } | 737 | } |
| 686 | #endif | 738 | #endif |
| @@ -690,6 +742,7 @@ static int get_ltchars(struct tty_struct *tty, struct ltchars __user *ltchars) | |||
| 690 | { | 742 | { |
| 691 | struct ltchars tmp; | 743 | struct ltchars tmp; |
| 692 | 744 | ||
| 745 | mutex_lock(&tty->termios_mutex); | ||
| 693 | tmp.t_suspc = tty->termios->c_cc[VSUSP]; | 746 | tmp.t_suspc = tty->termios->c_cc[VSUSP]; |
| 694 | /* what is dsuspc anyway? */ | 747 | /* what is dsuspc anyway? */ |
| 695 | tmp.t_dsuspc = tty->termios->c_cc[VSUSP]; | 748 | tmp.t_dsuspc = tty->termios->c_cc[VSUSP]; |
| @@ -698,6 +751,7 @@ static int get_ltchars(struct tty_struct *tty, struct ltchars __user *ltchars) | |||
| 698 | tmp.t_flushc = tty->termios->c_cc[VEOL2]; | 751 | tmp.t_flushc = tty->termios->c_cc[VEOL2]; |
| 699 | tmp.t_werasc = tty->termios->c_cc[VWERASE]; | 752 | tmp.t_werasc = tty->termios->c_cc[VWERASE]; |
| 700 | tmp.t_lnextc = tty->termios->c_cc[VLNEXT]; | 753 | tmp.t_lnextc = tty->termios->c_cc[VLNEXT]; |
| 754 | mutex_unlock(&tty->termios_mutex); | ||
| 701 | return copy_to_user(ltchars, &tmp, sizeof(tmp)) ? -EFAULT : 0; | 755 | return copy_to_user(ltchars, &tmp, sizeof(tmp)) ? -EFAULT : 0; |
| 702 | } | 756 | } |
| 703 | 757 | ||
| @@ -708,6 +762,7 @@ static int set_ltchars(struct tty_struct *tty, struct ltchars __user *ltchars) | |||
| 708 | if (copy_from_user(&tmp, ltchars, sizeof(tmp))) | 762 | if (copy_from_user(&tmp, ltchars, sizeof(tmp))) |
| 709 | return -EFAULT; | 763 | return -EFAULT; |
| 710 | 764 | ||
| 765 | mutex_lock(&tty->termios_mutex); | ||
| 711 | tty->termios->c_cc[VSUSP] = tmp.t_suspc; | 766 | tty->termios->c_cc[VSUSP] = tmp.t_suspc; |
| 712 | /* what is dsuspc anyway? */ | 767 | /* what is dsuspc anyway? */ |
| 713 | tty->termios->c_cc[VEOL2] = tmp.t_dsuspc; | 768 | tty->termios->c_cc[VEOL2] = tmp.t_dsuspc; |
| @@ -716,6 +771,7 @@ static int set_ltchars(struct tty_struct *tty, struct ltchars __user *ltchars) | |||
| 716 | tty->termios->c_cc[VEOL2] = tmp.t_flushc; | 771 | tty->termios->c_cc[VEOL2] = tmp.t_flushc; |
| 717 | tty->termios->c_cc[VWERASE] = tmp.t_werasc; | 772 | tty->termios->c_cc[VWERASE] = tmp.t_werasc; |
| 718 | tty->termios->c_cc[VLNEXT] = tmp.t_lnextc; | 773 | tty->termios->c_cc[VLNEXT] = tmp.t_lnextc; |
| 774 | mutex_unlock(&tty->termios_mutex); | ||
| 719 | return 0; | 775 | return 0; |
| 720 | } | 776 | } |
| 721 | #endif | 777 | #endif |
| @@ -732,8 +788,8 @@ static int send_prio_char(struct tty_struct *tty, char ch) | |||
| 732 | { | 788 | { |
| 733 | int was_stopped = tty->stopped; | 789 | int was_stopped = tty->stopped; |
| 734 | 790 | ||
| 735 | if (tty->driver->send_xchar) { | 791 | if (tty->ops->send_xchar) { |
| 736 | tty->driver->send_xchar(tty, ch); | 792 | tty->ops->send_xchar(tty, ch); |
| 737 | return 0; | 793 | return 0; |
| 738 | } | 794 | } |
| 739 | 795 | ||
| @@ -742,7 +798,7 @@ static int send_prio_char(struct tty_struct *tty, char ch) | |||
| 742 | 798 | ||
| 743 | if (was_stopped) | 799 | if (was_stopped) |
| 744 | start_tty(tty); | 800 | start_tty(tty); |
| 745 | tty->driver->write(tty, &ch, 1); | 801 | tty->ops->write(tty, &ch, 1); |
| 746 | if (was_stopped) | 802 | if (was_stopped) |
| 747 | stop_tty(tty); | 803 | stop_tty(tty); |
| 748 | tty_write_unlock(tty); | 804 | tty_write_unlock(tty); |
| @@ -750,6 +806,33 @@ static int send_prio_char(struct tty_struct *tty, char ch) | |||
| 750 | } | 806 | } |
| 751 | 807 | ||
| 752 | /** | 808 | /** |
| 809 | * tty_change_softcar - carrier change ioctl helper | ||
| 810 | * @tty: tty to update | ||
| 811 | * @arg: enable/disable CLOCAL | ||
| 812 | * | ||
| 813 | * Perform a change to the CLOCAL state and call into the driver | ||
| 814 | * layer to make it visible. All done with the termios mutex | ||
| 815 | */ | ||
| 816 | |||
| 817 | static int tty_change_softcar(struct tty_struct *tty, int arg) | ||
| 818 | { | ||
| 819 | int ret = 0; | ||
| 820 | int bit = arg ? CLOCAL : 0; | ||
| 821 | struct ktermios old; | ||
| 822 | |||
| 823 | mutex_lock(&tty->termios_mutex); | ||
| 824 | old = *tty->termios; | ||
| 825 | tty->termios->c_cflag &= ~CLOCAL; | ||
| 826 | tty->termios->c_cflag |= bit; | ||
| 827 | if (tty->ops->set_termios) | ||
| 828 | tty->ops->set_termios(tty, &old); | ||
| 829 | if ((tty->termios->c_cflag & CLOCAL) != bit) | ||
| 830 | ret = -EINVAL; | ||
| 831 | mutex_unlock(&tty->termios_mutex); | ||
| 832 | return ret; | ||
| 833 | } | ||
| 834 | |||
| 835 | /** | ||
| 753 | * tty_mode_ioctl - mode related ioctls | 836 | * tty_mode_ioctl - mode related ioctls |
| 754 | * @tty: tty for the ioctl | 837 | * @tty: tty for the ioctl |
| 755 | * @file: file pointer for the tty | 838 | * @file: file pointer for the tty |
| @@ -859,12 +942,7 @@ int tty_mode_ioctl(struct tty_struct *tty, struct file *file, | |||
| 859 | case TIOCSSOFTCAR: | 942 | case TIOCSSOFTCAR: |
| 860 | if (get_user(arg, (unsigned int __user *) arg)) | 943 | if (get_user(arg, (unsigned int __user *) arg)) |
| 861 | return -EFAULT; | 944 | return -EFAULT; |
| 862 | mutex_lock(&tty->termios_mutex); | 945 | return tty_change_softcar(tty, arg); |
| 863 | tty->termios->c_cflag = | ||
| 864 | ((tty->termios->c_cflag & ~CLOCAL) | | ||
| 865 | (arg ? CLOCAL : 0)); | ||
| 866 | mutex_unlock(&tty->termios_mutex); | ||
| 867 | return 0; | ||
| 868 | default: | 946 | default: |
| 869 | return -ENOIOCTLCMD; | 947 | return -ENOIOCTLCMD; |
| 870 | } | 948 | } |
| @@ -889,8 +967,7 @@ int tty_perform_flush(struct tty_struct *tty, unsigned long arg) | |||
| 889 | ld->flush_buffer(tty); | 967 | ld->flush_buffer(tty); |
| 890 | /* fall through */ | 968 | /* fall through */ |
| 891 | case TCOFLUSH: | 969 | case TCOFLUSH: |
| 892 | if (tty->driver->flush_buffer) | 970 | tty_driver_flush_buffer(tty); |
| 893 | tty->driver->flush_buffer(tty); | ||
| 894 | break; | 971 | break; |
| 895 | default: | 972 | default: |
| 896 | tty_ldisc_deref(ld); | 973 | tty_ldisc_deref(ld); |
| @@ -905,6 +982,7 @@ int n_tty_ioctl(struct tty_struct *tty, struct file *file, | |||
| 905 | unsigned int cmd, unsigned long arg) | 982 | unsigned int cmd, unsigned long arg) |
| 906 | { | 983 | { |
| 907 | struct tty_struct *real_tty; | 984 | struct tty_struct *real_tty; |
| 985 | unsigned long flags; | ||
| 908 | int retval; | 986 | int retval; |
| 909 | 987 | ||
| 910 | if (tty->driver->type == TTY_DRIVER_TYPE_PTY && | 988 | if (tty->driver->type == TTY_DRIVER_TYPE_PTY && |
| @@ -946,9 +1024,7 @@ int n_tty_ioctl(struct tty_struct *tty, struct file *file, | |||
| 946 | case TCFLSH: | 1024 | case TCFLSH: |
| 947 | return tty_perform_flush(tty, arg); | 1025 | return tty_perform_flush(tty, arg); |
| 948 | case TIOCOUTQ: | 1026 | case TIOCOUTQ: |
| 949 | return put_user(tty->driver->chars_in_buffer ? | 1027 | return put_user(tty_chars_in_buffer(tty), (int __user *) arg); |
| 950 | tty->driver->chars_in_buffer(tty) : 0, | ||
| 951 | (int __user *) arg); | ||
| 952 | case TIOCINQ: | 1028 | case TIOCINQ: |
| 953 | retval = tty->read_cnt; | 1029 | retval = tty->read_cnt; |
| 954 | if (L_ICANON(tty)) | 1030 | if (L_ICANON(tty)) |
| @@ -963,6 +1039,7 @@ int n_tty_ioctl(struct tty_struct *tty, struct file *file, | |||
| 963 | return -ENOTTY; | 1039 | return -ENOTTY; |
| 964 | if (get_user(pktmode, (int __user *) arg)) | 1040 | if (get_user(pktmode, (int __user *) arg)) |
| 965 | return -EFAULT; | 1041 | return -EFAULT; |
| 1042 | spin_lock_irqsave(&tty->ctrl_lock, flags); | ||
| 966 | if (pktmode) { | 1043 | if (pktmode) { |
| 967 | if (!tty->packet) { | 1044 | if (!tty->packet) { |
| 968 | tty->packet = 1; | 1045 | tty->packet = 1; |
| @@ -970,6 +1047,7 @@ int n_tty_ioctl(struct tty_struct *tty, struct file *file, | |||
| 970 | } | 1047 | } |
| 971 | } else | 1048 | } else |
| 972 | tty->packet = 0; | 1049 | tty->packet = 0; |
| 1050 | spin_unlock_irqrestore(&tty->ctrl_lock, flags); | ||
| 973 | return 0; | 1051 | return 0; |
| 974 | } | 1052 | } |
| 975 | default: | 1053 | default: |
diff --git a/drivers/char/viocons.c b/drivers/char/viocons.c index 8de6b95aeb84..3d3e1c2b310f 100644 --- a/drivers/char/viocons.c +++ b/drivers/char/viocons.c | |||
| @@ -628,13 +628,13 @@ static int viotty_write(struct tty_struct *tty, const unsigned char *buf, | |||
| 628 | /* | 628 | /* |
| 629 | * TTY put_char method | 629 | * TTY put_char method |
| 630 | */ | 630 | */ |
| 631 | static void viotty_put_char(struct tty_struct *tty, unsigned char ch) | 631 | static int viotty_put_char(struct tty_struct *tty, unsigned char ch) |
| 632 | { | 632 | { |
| 633 | struct port_info *pi; | 633 | struct port_info *pi; |
| 634 | 634 | ||
| 635 | pi = get_port_data(tty); | 635 | pi = get_port_data(tty); |
| 636 | if (pi == NULL) | 636 | if (pi == NULL) |
| 637 | return; | 637 | return 0; |
| 638 | 638 | ||
| 639 | /* This will append '\r' as well if the char is '\n' */ | 639 | /* This will append '\r' as well if the char is '\n' */ |
| 640 | if (viochar_is_console(pi)) | 640 | if (viochar_is_console(pi)) |
| @@ -642,6 +642,7 @@ static void viotty_put_char(struct tty_struct *tty, unsigned char ch) | |||
| 642 | 642 | ||
| 643 | if (viopath_isactive(pi->lp)) | 643 | if (viopath_isactive(pi->lp)) |
| 644 | internal_write(pi, &ch, 1); | 644 | internal_write(pi, &ch, 1); |
| 645 | return 1; | ||
| 645 | } | 646 | } |
| 646 | 647 | ||
| 647 | /* | 648 | /* |
| @@ -704,8 +705,11 @@ static int viotty_ioctl(struct tty_struct *tty, struct file *file, | |||
| 704 | case KDSKBLED: | 705 | case KDSKBLED: |
| 705 | return 0; | 706 | return 0; |
| 706 | } | 707 | } |
| 707 | 708 | /* FIXME: WTF is this being called for ??? */ | |
| 708 | return n_tty_ioctl(tty, file, cmd, arg); | 709 | lock_kernel(); |
| 710 | ret = n_tty_ioctl(tty, file, cmd, arg); | ||
| 711 | unlock_kernel(); | ||
| 712 | return ret; | ||
| 709 | } | 713 | } |
| 710 | 714 | ||
| 711 | /* | 715 | /* |
diff --git a/drivers/char/vt.c b/drivers/char/vt.c index 1c2660477135..e458b08139af 100644 --- a/drivers/char/vt.c +++ b/drivers/char/vt.c | |||
| @@ -909,15 +909,21 @@ int vc_resize(struct vc_data *vc, unsigned int cols, unsigned int lines) | |||
| 909 | 909 | ||
| 910 | if (vc->vc_tty) { | 910 | if (vc->vc_tty) { |
| 911 | struct winsize ws, *cws = &vc->vc_tty->winsize; | 911 | struct winsize ws, *cws = &vc->vc_tty->winsize; |
| 912 | unsigned long flags; | ||
| 912 | 913 | ||
| 913 | memset(&ws, 0, sizeof(ws)); | 914 | memset(&ws, 0, sizeof(ws)); |
| 914 | ws.ws_row = vc->vc_rows; | 915 | ws.ws_row = vc->vc_rows; |
| 915 | ws.ws_col = vc->vc_cols; | 916 | ws.ws_col = vc->vc_cols; |
| 916 | ws.ws_ypixel = vc->vc_scan_lines; | 917 | ws.ws_ypixel = vc->vc_scan_lines; |
| 918 | |||
| 919 | mutex_lock(&vc->vc_tty->termios_mutex); | ||
| 920 | spin_lock_irqsave(&vc->vc_tty->ctrl_lock, flags); | ||
| 917 | if ((ws.ws_row != cws->ws_row || ws.ws_col != cws->ws_col) && | 921 | if ((ws.ws_row != cws->ws_row || ws.ws_col != cws->ws_col) && |
| 918 | vc->vc_tty->pgrp) | 922 | vc->vc_tty->pgrp) |
| 919 | kill_pgrp(vc->vc_tty->pgrp, SIGWINCH, 1); | 923 | kill_pgrp(vc->vc_tty->pgrp, SIGWINCH, 1); |
| 924 | spin_unlock_irqrestore(&vc->vc_tty->ctrl_lock, flags); | ||
| 920 | *cws = ws; | 925 | *cws = ws; |
| 926 | mutex_unlock(&vc->vc_tty->termios_mutex); | ||
| 921 | } | 927 | } |
| 922 | 928 | ||
| 923 | if (CON_IS_VISIBLE(vc)) | 929 | if (CON_IS_VISIBLE(vc)) |
| @@ -2541,6 +2547,9 @@ int tioclinux(struct tty_struct *tty, unsigned long arg) | |||
| 2541 | if (get_user(type, p)) | 2547 | if (get_user(type, p)) |
| 2542 | return -EFAULT; | 2548 | return -EFAULT; |
| 2543 | ret = 0; | 2549 | ret = 0; |
| 2550 | |||
| 2551 | lock_kernel(); | ||
| 2552 | |||
| 2544 | switch (type) | 2553 | switch (type) |
| 2545 | { | 2554 | { |
| 2546 | case TIOCL_SETSEL: | 2555 | case TIOCL_SETSEL: |
| @@ -2560,7 +2569,7 @@ int tioclinux(struct tty_struct *tty, unsigned long arg) | |||
| 2560 | ret = sel_loadlut(p); | 2569 | ret = sel_loadlut(p); |
| 2561 | break; | 2570 | break; |
| 2562 | case TIOCL_GETSHIFTSTATE: | 2571 | case TIOCL_GETSHIFTSTATE: |
| 2563 | 2572 | ||
| 2564 | /* | 2573 | /* |
| 2565 | * Make it possible to react to Shift+Mousebutton. | 2574 | * Make it possible to react to Shift+Mousebutton. |
| 2566 | * Note that 'shift_state' is an undocumented | 2575 | * Note that 'shift_state' is an undocumented |
| @@ -2615,6 +2624,7 @@ int tioclinux(struct tty_struct *tty, unsigned long arg) | |||
| 2615 | ret = -EINVAL; | 2624 | ret = -EINVAL; |
| 2616 | break; | 2625 | break; |
| 2617 | } | 2626 | } |
| 2627 | unlock_kernel(); | ||
| 2618 | return ret; | 2628 | return ret; |
| 2619 | } | 2629 | } |
| 2620 | 2630 | ||
| @@ -2632,11 +2642,11 @@ static int con_write(struct tty_struct *tty, const unsigned char *buf, int count | |||
| 2632 | return retval; | 2642 | return retval; |
| 2633 | } | 2643 | } |
| 2634 | 2644 | ||
| 2635 | static void con_put_char(struct tty_struct *tty, unsigned char ch) | 2645 | static int con_put_char(struct tty_struct *tty, unsigned char ch) |
| 2636 | { | 2646 | { |
| 2637 | if (in_interrupt()) | 2647 | if (in_interrupt()) |
| 2638 | return; /* n_r3964 calls put_char() from interrupt context */ | 2648 | return 0; /* n_r3964 calls put_char() from interrupt context */ |
| 2639 | do_con_write(tty, &ch, 1); | 2649 | return do_con_write(tty, &ch, 1); |
| 2640 | } | 2650 | } |
| 2641 | 2651 | ||
| 2642 | static int con_write_room(struct tty_struct *tty) | 2652 | static int con_write_room(struct tty_struct *tty) |
| @@ -3829,7 +3839,7 @@ static int con_font_get(struct vc_data *vc, struct console_font_op *op) | |||
| 3829 | goto out; | 3839 | goto out; |
| 3830 | 3840 | ||
| 3831 | c = (font.width+7)/8 * 32 * font.charcount; | 3841 | c = (font.width+7)/8 * 32 * font.charcount; |
| 3832 | 3842 | ||
| 3833 | if (op->data && font.charcount > op->charcount) | 3843 | if (op->data && font.charcount > op->charcount) |
| 3834 | rc = -ENOSPC; | 3844 | rc = -ENOSPC; |
| 3835 | if (!(op->flags & KD_FONT_FLAG_OLD)) { | 3845 | if (!(op->flags & KD_FONT_FLAG_OLD)) { |
| @@ -3994,6 +4004,7 @@ u16 screen_glyph(struct vc_data *vc, int offset) | |||
| 3994 | c |= 0x100; | 4004 | c |= 0x100; |
| 3995 | return c; | 4005 | return c; |
| 3996 | } | 4006 | } |
| 4007 | EXPORT_SYMBOL_GPL(screen_glyph); | ||
| 3997 | 4008 | ||
| 3998 | /* used by vcs - note the word offset */ | 4009 | /* used by vcs - note the word offset */ |
| 3999 | unsigned short *screen_pos(struct vc_data *vc, int w_offset, int viewed) | 4010 | unsigned short *screen_pos(struct vc_data *vc, int w_offset, int viewed) |
diff --git a/drivers/char/vt_ioctl.c b/drivers/char/vt_ioctl.c index e6f89e8b9258..3211afd9d57e 100644 --- a/drivers/char/vt_ioctl.c +++ b/drivers/char/vt_ioctl.c | |||
| @@ -373,11 +373,17 @@ int vt_ioctl(struct tty_struct *tty, struct file * file, | |||
| 373 | unsigned char ucval; | 373 | unsigned char ucval; |
| 374 | void __user *up = (void __user *)arg; | 374 | void __user *up = (void __user *)arg; |
| 375 | int i, perm; | 375 | int i, perm; |
| 376 | 376 | int ret = 0; | |
| 377 | |||
| 377 | console = vc->vc_num; | 378 | console = vc->vc_num; |
| 378 | 379 | ||
| 379 | if (!vc_cons_allocated(console)) /* impossible? */ | 380 | lock_kernel(); |
| 380 | return -ENOIOCTLCMD; | 381 | |
| 382 | if (!vc_cons_allocated(console)) { /* impossible? */ | ||
| 383 | ret = -ENOIOCTLCMD; | ||
| 384 | goto out; | ||
| 385 | } | ||
| 386 | |||
| 381 | 387 | ||
| 382 | /* | 388 | /* |
| 383 | * To have permissions to do most of the vt ioctls, we either have | 389 | * To have permissions to do most of the vt ioctls, we either have |
| @@ -391,15 +397,15 @@ int vt_ioctl(struct tty_struct *tty, struct file * file, | |||
| 391 | switch (cmd) { | 397 | switch (cmd) { |
| 392 | case KIOCSOUND: | 398 | case KIOCSOUND: |
| 393 | if (!perm) | 399 | if (!perm) |
| 394 | return -EPERM; | 400 | goto eperm; |
| 395 | if (arg) | 401 | if (arg) |
| 396 | arg = CLOCK_TICK_RATE / arg; | 402 | arg = CLOCK_TICK_RATE / arg; |
| 397 | kd_mksound(arg, 0); | 403 | kd_mksound(arg, 0); |
| 398 | return 0; | 404 | break; |
| 399 | 405 | ||
| 400 | case KDMKTONE: | 406 | case KDMKTONE: |
| 401 | if (!perm) | 407 | if (!perm) |
| 402 | return -EPERM; | 408 | goto eperm; |
| 403 | { | 409 | { |
| 404 | unsigned int ticks, count; | 410 | unsigned int ticks, count; |
| 405 | 411 | ||
| @@ -412,7 +418,7 @@ int vt_ioctl(struct tty_struct *tty, struct file * file, | |||
| 412 | if (count) | 418 | if (count) |
| 413 | count = CLOCK_TICK_RATE / count; | 419 | count = CLOCK_TICK_RATE / count; |
| 414 | kd_mksound(count, ticks); | 420 | kd_mksound(count, ticks); |
| 415 | return 0; | 421 | break; |
| 416 | } | 422 | } |
| 417 | 423 | ||
| 418 | case KDGKBTYPE: | 424 | case KDGKBTYPE: |
| @@ -435,14 +441,18 @@ int vt_ioctl(struct tty_struct *tty, struct file * file, | |||
| 435 | * KDADDIO and KDDELIO may be able to add ports beyond what | 441 | * KDADDIO and KDDELIO may be able to add ports beyond what |
| 436 | * we reject here, but to be safe... | 442 | * we reject here, but to be safe... |
| 437 | */ | 443 | */ |
| 438 | if (arg < GPFIRST || arg > GPLAST) | 444 | if (arg < GPFIRST || arg > GPLAST) { |
| 439 | return -EINVAL; | 445 | ret = -EINVAL; |
| 440 | return sys_ioperm(arg, 1, (cmd == KDADDIO)) ? -ENXIO : 0; | 446 | break; |
| 447 | } | ||
| 448 | ret = sys_ioperm(arg, 1, (cmd == KDADDIO)) ? -ENXIO : 0; | ||
| 449 | break; | ||
| 441 | 450 | ||
| 442 | case KDENABIO: | 451 | case KDENABIO: |
| 443 | case KDDISABIO: | 452 | case KDDISABIO: |
| 444 | return sys_ioperm(GPFIRST, GPNUM, | 453 | ret = sys_ioperm(GPFIRST, GPNUM, |
| 445 | (cmd == KDENABIO)) ? -ENXIO : 0; | 454 | (cmd == KDENABIO)) ? -ENXIO : 0; |
| 455 | break; | ||
| 446 | #endif | 456 | #endif |
| 447 | 457 | ||
| 448 | /* Linux m68k/i386 interface for setting the keyboard delay/repeat rate */ | 458 | /* Linux m68k/i386 interface for setting the keyboard delay/repeat rate */ |
| @@ -450,19 +460,20 @@ int vt_ioctl(struct tty_struct *tty, struct file * file, | |||
| 450 | case KDKBDREP: | 460 | case KDKBDREP: |
| 451 | { | 461 | { |
| 452 | struct kbd_repeat kbrep; | 462 | struct kbd_repeat kbrep; |
| 453 | int err; | ||
| 454 | 463 | ||
| 455 | if (!capable(CAP_SYS_TTY_CONFIG)) | 464 | if (!capable(CAP_SYS_TTY_CONFIG)) |
| 456 | return -EPERM; | 465 | goto eperm; |
| 457 | 466 | ||
| 458 | if (copy_from_user(&kbrep, up, sizeof(struct kbd_repeat))) | 467 | if (copy_from_user(&kbrep, up, sizeof(struct kbd_repeat))) { |
| 459 | return -EFAULT; | 468 | ret = -EFAULT; |
| 460 | err = kbd_rate(&kbrep); | 469 | break; |
| 461 | if (err) | 470 | } |
| 462 | return err; | 471 | ret = kbd_rate(&kbrep); |
| 472 | if (ret) | ||
| 473 | break; | ||
| 463 | if (copy_to_user(up, &kbrep, sizeof(struct kbd_repeat))) | 474 | if (copy_to_user(up, &kbrep, sizeof(struct kbd_repeat))) |
| 464 | return -EFAULT; | 475 | ret = -EFAULT; |
| 465 | return 0; | 476 | break; |
| 466 | } | 477 | } |
| 467 | 478 | ||
| 468 | case KDSETMODE: | 479 | case KDSETMODE: |
| @@ -475,7 +486,7 @@ int vt_ioctl(struct tty_struct *tty, struct file * file, | |||
| 475 | * need to restore their engine state. --BenH | 486 | * need to restore their engine state. --BenH |
| 476 | */ | 487 | */ |
| 477 | if (!perm) | 488 | if (!perm) |
| 478 | return -EPERM; | 489 | goto eperm; |
| 479 | switch (arg) { | 490 | switch (arg) { |
| 480 | case KD_GRAPHICS: | 491 | case KD_GRAPHICS: |
| 481 | break; | 492 | break; |
| @@ -485,13 +496,14 @@ int vt_ioctl(struct tty_struct *tty, struct file * file, | |||
| 485 | case KD_TEXT: | 496 | case KD_TEXT: |
| 486 | break; | 497 | break; |
| 487 | default: | 498 | default: |
| 488 | return -EINVAL; | 499 | ret = -EINVAL; |
| 500 | goto out; | ||
| 489 | } | 501 | } |
| 490 | if (vc->vc_mode == (unsigned char) arg) | 502 | if (vc->vc_mode == (unsigned char) arg) |
| 491 | return 0; | 503 | break; |
| 492 | vc->vc_mode = (unsigned char) arg; | 504 | vc->vc_mode = (unsigned char) arg; |
| 493 | if (console != fg_console) | 505 | if (console != fg_console) |
| 494 | return 0; | 506 | break; |
| 495 | /* | 507 | /* |
| 496 | * explicitly blank/unblank the screen if switching modes | 508 | * explicitly blank/unblank the screen if switching modes |
| 497 | */ | 509 | */ |
| @@ -501,7 +513,7 @@ int vt_ioctl(struct tty_struct *tty, struct file * file, | |||
| 501 | else | 513 | else |
| 502 | do_blank_screen(1); | 514 | do_blank_screen(1); |
| 503 | release_console_sem(); | 515 | release_console_sem(); |
| 504 | return 0; | 516 | break; |
| 505 | 517 | ||
| 506 | case KDGETMODE: | 518 | case KDGETMODE: |
| 507 | ucval = vc->vc_mode; | 519 | ucval = vc->vc_mode; |
| @@ -513,11 +525,12 @@ int vt_ioctl(struct tty_struct *tty, struct file * file, | |||
| 513 | * these work like a combination of mmap and KDENABIO. | 525 | * these work like a combination of mmap and KDENABIO. |
| 514 | * this could be easily finished. | 526 | * this could be easily finished. |
| 515 | */ | 527 | */ |
| 516 | return -EINVAL; | 528 | ret = -EINVAL; |
| 529 | break; | ||
| 517 | 530 | ||
| 518 | case KDSKBMODE: | 531 | case KDSKBMODE: |
| 519 | if (!perm) | 532 | if (!perm) |
| 520 | return -EPERM; | 533 | goto eperm; |
| 521 | switch(arg) { | 534 | switch(arg) { |
| 522 | case K_RAW: | 535 | case K_RAW: |
| 523 | kbd->kbdmode = VC_RAW; | 536 | kbd->kbdmode = VC_RAW; |
| @@ -534,10 +547,11 @@ int vt_ioctl(struct tty_struct *tty, struct file * file, | |||
| 534 | compute_shiftstate(); | 547 | compute_shiftstate(); |
| 535 | break; | 548 | break; |
| 536 | default: | 549 | default: |
| 537 | return -EINVAL; | 550 | ret = -EINVAL; |
| 551 | goto out; | ||
| 538 | } | 552 | } |
| 539 | tty_ldisc_flush(tty); | 553 | tty_ldisc_flush(tty); |
| 540 | return 0; | 554 | break; |
| 541 | 555 | ||
| 542 | case KDGKBMODE: | 556 | case KDGKBMODE: |
| 543 | ucval = ((kbd->kbdmode == VC_RAW) ? K_RAW : | 557 | ucval = ((kbd->kbdmode == VC_RAW) ? K_RAW : |
| @@ -557,28 +571,32 @@ int vt_ioctl(struct tty_struct *tty, struct file * file, | |||
| 557 | set_vc_kbd_mode(kbd, VC_META); | 571 | set_vc_kbd_mode(kbd, VC_META); |
| 558 | break; | 572 | break; |
| 559 | default: | 573 | default: |
| 560 | return -EINVAL; | 574 | ret = -EINVAL; |
| 561 | } | 575 | } |
| 562 | return 0; | 576 | break; |
| 563 | 577 | ||
| 564 | case KDGKBMETA: | 578 | case KDGKBMETA: |
| 565 | ucval = (vc_kbd_mode(kbd, VC_META) ? K_ESCPREFIX : K_METABIT); | 579 | ucval = (vc_kbd_mode(kbd, VC_META) ? K_ESCPREFIX : K_METABIT); |
| 566 | setint: | 580 | setint: |
| 567 | return put_user(ucval, (int __user *)arg); | 581 | ret = put_user(ucval, (int __user *)arg); |
| 582 | break; | ||
| 568 | 583 | ||
| 569 | case KDGETKEYCODE: | 584 | case KDGETKEYCODE: |
| 570 | case KDSETKEYCODE: | 585 | case KDSETKEYCODE: |
| 571 | if(!capable(CAP_SYS_TTY_CONFIG)) | 586 | if(!capable(CAP_SYS_TTY_CONFIG)) |
| 572 | perm=0; | 587 | perm = 0; |
| 573 | return do_kbkeycode_ioctl(cmd, up, perm); | 588 | ret = do_kbkeycode_ioctl(cmd, up, perm); |
| 589 | break; | ||
| 574 | 590 | ||
| 575 | case KDGKBENT: | 591 | case KDGKBENT: |
| 576 | case KDSKBENT: | 592 | case KDSKBENT: |
| 577 | return do_kdsk_ioctl(cmd, up, perm, kbd); | 593 | ret = do_kdsk_ioctl(cmd, up, perm, kbd); |
| 594 | break; | ||
| 578 | 595 | ||
| 579 | case KDGKBSENT: | 596 | case KDGKBSENT: |
| 580 | case KDSKBSENT: | 597 | case KDSKBSENT: |
| 581 | return do_kdgkb_ioctl(cmd, up, perm); | 598 | ret = do_kdgkb_ioctl(cmd, up, perm); |
| 599 | break; | ||
| 582 | 600 | ||
| 583 | case KDGKBDIACR: | 601 | case KDGKBDIACR: |
| 584 | { | 602 | { |
| @@ -586,26 +604,31 @@ int vt_ioctl(struct tty_struct *tty, struct file * file, | |||
| 586 | struct kbdiacr diacr; | 604 | struct kbdiacr diacr; |
| 587 | int i; | 605 | int i; |
| 588 | 606 | ||
| 589 | if (put_user(accent_table_size, &a->kb_cnt)) | 607 | if (put_user(accent_table_size, &a->kb_cnt)) { |
| 590 | return -EFAULT; | 608 | ret = -EFAULT; |
| 609 | break; | ||
| 610 | } | ||
| 591 | for (i = 0; i < accent_table_size; i++) { | 611 | for (i = 0; i < accent_table_size; i++) { |
| 592 | diacr.diacr = conv_uni_to_8bit(accent_table[i].diacr); | 612 | diacr.diacr = conv_uni_to_8bit(accent_table[i].diacr); |
| 593 | diacr.base = conv_uni_to_8bit(accent_table[i].base); | 613 | diacr.base = conv_uni_to_8bit(accent_table[i].base); |
| 594 | diacr.result = conv_uni_to_8bit(accent_table[i].result); | 614 | diacr.result = conv_uni_to_8bit(accent_table[i].result); |
| 595 | if (copy_to_user(a->kbdiacr + i, &diacr, sizeof(struct kbdiacr))) | 615 | if (copy_to_user(a->kbdiacr + i, &diacr, sizeof(struct kbdiacr))) { |
| 596 | return -EFAULT; | 616 | ret = -EFAULT; |
| 617 | break; | ||
| 618 | } | ||
| 597 | } | 619 | } |
| 598 | return 0; | 620 | break; |
| 599 | } | 621 | } |
| 600 | case KDGKBDIACRUC: | 622 | case KDGKBDIACRUC: |
| 601 | { | 623 | { |
| 602 | struct kbdiacrsuc __user *a = up; | 624 | struct kbdiacrsuc __user *a = up; |
| 603 | 625 | ||
| 604 | if (put_user(accent_table_size, &a->kb_cnt)) | 626 | if (put_user(accent_table_size, &a->kb_cnt)) |
| 605 | return -EFAULT; | 627 | ret = -EFAULT; |
| 606 | if (copy_to_user(a->kbdiacruc, accent_table, accent_table_size*sizeof(struct kbdiacruc))) | 628 | else if (copy_to_user(a->kbdiacruc, accent_table, |
| 607 | return -EFAULT; | 629 | accent_table_size*sizeof(struct kbdiacruc))) |
| 608 | return 0; | 630 | ret = -EFAULT; |
| 631 | break; | ||
| 609 | } | 632 | } |
| 610 | 633 | ||
| 611 | case KDSKBDIACR: | 634 | case KDSKBDIACR: |
| @@ -616,20 +639,26 @@ int vt_ioctl(struct tty_struct *tty, struct file * file, | |||
| 616 | int i; | 639 | int i; |
| 617 | 640 | ||
| 618 | if (!perm) | 641 | if (!perm) |
| 619 | return -EPERM; | 642 | goto eperm; |
| 620 | if (get_user(ct,&a->kb_cnt)) | 643 | if (get_user(ct,&a->kb_cnt)) { |
| 621 | return -EFAULT; | 644 | ret = -EFAULT; |
| 622 | if (ct >= MAX_DIACR) | 645 | break; |
| 623 | return -EINVAL; | 646 | } |
| 647 | if (ct >= MAX_DIACR) { | ||
| 648 | ret = -EINVAL; | ||
| 649 | break; | ||
| 650 | } | ||
| 624 | accent_table_size = ct; | 651 | accent_table_size = ct; |
| 625 | for (i = 0; i < ct; i++) { | 652 | for (i = 0; i < ct; i++) { |
| 626 | if (copy_from_user(&diacr, a->kbdiacr + i, sizeof(struct kbdiacr))) | 653 | if (copy_from_user(&diacr, a->kbdiacr + i, sizeof(struct kbdiacr))) { |
| 627 | return -EFAULT; | 654 | ret = -EFAULT; |
| 655 | break; | ||
| 656 | } | ||
| 628 | accent_table[i].diacr = conv_8bit_to_uni(diacr.diacr); | 657 | accent_table[i].diacr = conv_8bit_to_uni(diacr.diacr); |
| 629 | accent_table[i].base = conv_8bit_to_uni(diacr.base); | 658 | accent_table[i].base = conv_8bit_to_uni(diacr.base); |
| 630 | accent_table[i].result = conv_8bit_to_uni(diacr.result); | 659 | accent_table[i].result = conv_8bit_to_uni(diacr.result); |
| 631 | } | 660 | } |
| 632 | return 0; | 661 | break; |
| 633 | } | 662 | } |
| 634 | 663 | ||
| 635 | case KDSKBDIACRUC: | 664 | case KDSKBDIACRUC: |
| @@ -638,15 +667,19 @@ int vt_ioctl(struct tty_struct *tty, struct file * file, | |||
| 638 | unsigned int ct; | 667 | unsigned int ct; |
| 639 | 668 | ||
| 640 | if (!perm) | 669 | if (!perm) |
| 641 | return -EPERM; | 670 | goto eperm; |
| 642 | if (get_user(ct,&a->kb_cnt)) | 671 | if (get_user(ct,&a->kb_cnt)) { |
| 643 | return -EFAULT; | 672 | ret = -EFAULT; |
| 644 | if (ct >= MAX_DIACR) | 673 | break; |
| 645 | return -EINVAL; | 674 | } |
| 675 | if (ct >= MAX_DIACR) { | ||
| 676 | ret = -EINVAL; | ||
| 677 | break; | ||
| 678 | } | ||
| 646 | accent_table_size = ct; | 679 | accent_table_size = ct; |
| 647 | if (copy_from_user(accent_table, a->kbdiacruc, ct*sizeof(struct kbdiacruc))) | 680 | if (copy_from_user(accent_table, a->kbdiacruc, ct*sizeof(struct kbdiacruc))) |
| 648 | return -EFAULT; | 681 | ret = -EFAULT; |
| 649 | return 0; | 682 | break; |
| 650 | } | 683 | } |
| 651 | 684 | ||
| 652 | /* the ioctls below read/set the flags usually shown in the leds */ | 685 | /* the ioctls below read/set the flags usually shown in the leds */ |
| @@ -657,26 +690,29 @@ int vt_ioctl(struct tty_struct *tty, struct file * file, | |||
| 657 | 690 | ||
| 658 | case KDSKBLED: | 691 | case KDSKBLED: |
| 659 | if (!perm) | 692 | if (!perm) |
| 660 | return -EPERM; | 693 | goto eperm; |
| 661 | if (arg & ~0x77) | 694 | if (arg & ~0x77) { |
| 662 | return -EINVAL; | 695 | ret = -EINVAL; |
| 696 | break; | ||
| 697 | } | ||
| 663 | kbd->ledflagstate = (arg & 7); | 698 | kbd->ledflagstate = (arg & 7); |
| 664 | kbd->default_ledflagstate = ((arg >> 4) & 7); | 699 | kbd->default_ledflagstate = ((arg >> 4) & 7); |
| 665 | set_leds(); | 700 | set_leds(); |
| 666 | return 0; | 701 | break; |
| 667 | 702 | ||
| 668 | /* the ioctls below only set the lights, not the functions */ | 703 | /* the ioctls below only set the lights, not the functions */ |
| 669 | /* for those, see KDGKBLED and KDSKBLED above */ | 704 | /* for those, see KDGKBLED and KDSKBLED above */ |
| 670 | case KDGETLED: | 705 | case KDGETLED: |
| 671 | ucval = getledstate(); | 706 | ucval = getledstate(); |
| 672 | setchar: | 707 | setchar: |
| 673 | return put_user(ucval, (char __user *)arg); | 708 | ret = put_user(ucval, (char __user *)arg); |
| 709 | break; | ||
| 674 | 710 | ||
| 675 | case KDSETLED: | 711 | case KDSETLED: |
| 676 | if (!perm) | 712 | if (!perm) |
| 677 | return -EPERM; | 713 | goto eperm; |
| 678 | setledstate(kbd, arg); | 714 | setledstate(kbd, arg); |
| 679 | return 0; | 715 | break; |
| 680 | 716 | ||
| 681 | /* | 717 | /* |
| 682 | * A process can indicate its willingness to accept signals | 718 | * A process can indicate its willingness to accept signals |
| @@ -688,16 +724,17 @@ int vt_ioctl(struct tty_struct *tty, struct file * file, | |||
| 688 | case KDSIGACCEPT: | 724 | case KDSIGACCEPT: |
| 689 | { | 725 | { |
| 690 | if (!perm || !capable(CAP_KILL)) | 726 | if (!perm || !capable(CAP_KILL)) |
| 691 | return -EPERM; | 727 | goto eperm; |
| 692 | if (!valid_signal(arg) || arg < 1 || arg == SIGKILL) | 728 | if (!valid_signal(arg) || arg < 1 || arg == SIGKILL) |
| 693 | return -EINVAL; | 729 | ret = -EINVAL; |
| 694 | 730 | else { | |
| 695 | spin_lock_irq(&vt_spawn_con.lock); | 731 | spin_lock_irq(&vt_spawn_con.lock); |
| 696 | put_pid(vt_spawn_con.pid); | 732 | put_pid(vt_spawn_con.pid); |
| 697 | vt_spawn_con.pid = get_pid(task_pid(current)); | 733 | vt_spawn_con.pid = get_pid(task_pid(current)); |
| 698 | vt_spawn_con.sig = arg; | 734 | vt_spawn_con.sig = arg; |
| 699 | spin_unlock_irq(&vt_spawn_con.lock); | 735 | spin_unlock_irq(&vt_spawn_con.lock); |
| 700 | return 0; | 736 | } |
| 737 | break; | ||
| 701 | } | 738 | } |
| 702 | 739 | ||
| 703 | case VT_SETMODE: | 740 | case VT_SETMODE: |
| @@ -705,11 +742,15 @@ int vt_ioctl(struct tty_struct *tty, struct file * file, | |||
| 705 | struct vt_mode tmp; | 742 | struct vt_mode tmp; |
| 706 | 743 | ||
| 707 | if (!perm) | 744 | if (!perm) |
| 708 | return -EPERM; | 745 | goto eperm; |
| 709 | if (copy_from_user(&tmp, up, sizeof(struct vt_mode))) | 746 | if (copy_from_user(&tmp, up, sizeof(struct vt_mode))) { |
| 710 | return -EFAULT; | 747 | ret = -EFAULT; |
| 711 | if (tmp.mode != VT_AUTO && tmp.mode != VT_PROCESS) | 748 | goto out; |
| 712 | return -EINVAL; | 749 | } |
| 750 | if (tmp.mode != VT_AUTO && tmp.mode != VT_PROCESS) { | ||
| 751 | ret = -EINVAL; | ||
| 752 | goto out; | ||
| 753 | } | ||
| 713 | acquire_console_sem(); | 754 | acquire_console_sem(); |
| 714 | vc->vt_mode = tmp; | 755 | vc->vt_mode = tmp; |
| 715 | /* the frsig is ignored, so we set it to 0 */ | 756 | /* the frsig is ignored, so we set it to 0 */ |
| @@ -719,7 +760,7 @@ int vt_ioctl(struct tty_struct *tty, struct file * file, | |||
| 719 | /* no switch is required -- saw@shade.msu.ru */ | 760 | /* no switch is required -- saw@shade.msu.ru */ |
| 720 | vc->vt_newvt = -1; | 761 | vc->vt_newvt = -1; |
| 721 | release_console_sem(); | 762 | release_console_sem(); |
| 722 | return 0; | 763 | break; |
| 723 | } | 764 | } |
| 724 | 765 | ||
| 725 | case VT_GETMODE: | 766 | case VT_GETMODE: |
| @@ -732,7 +773,9 @@ int vt_ioctl(struct tty_struct *tty, struct file * file, | |||
| 732 | release_console_sem(); | 773 | release_console_sem(); |
| 733 | 774 | ||
| 734 | rc = copy_to_user(up, &tmp, sizeof(struct vt_mode)); | 775 | rc = copy_to_user(up, &tmp, sizeof(struct vt_mode)); |
| 735 | return rc ? -EFAULT : 0; | 776 | if (rc) |
| 777 | ret = -EFAULT; | ||
| 778 | break; | ||
| 736 | } | 779 | } |
| 737 | 780 | ||
| 738 | /* | 781 | /* |
| @@ -746,12 +789,16 @@ int vt_ioctl(struct tty_struct *tty, struct file * file, | |||
| 746 | unsigned short state, mask; | 789 | unsigned short state, mask; |
| 747 | 790 | ||
| 748 | if (put_user(fg_console + 1, &vtstat->v_active)) | 791 | if (put_user(fg_console + 1, &vtstat->v_active)) |
| 749 | return -EFAULT; | 792 | ret = -EFAULT; |
| 750 | state = 1; /* /dev/tty0 is always open */ | 793 | else { |
| 751 | for (i = 0, mask = 2; i < MAX_NR_CONSOLES && mask; ++i, mask <<= 1) | 794 | state = 1; /* /dev/tty0 is always open */ |
| 752 | if (VT_IS_IN_USE(i)) | 795 | for (i = 0, mask = 2; i < MAX_NR_CONSOLES && mask; |
| 753 | state |= mask; | 796 | ++i, mask <<= 1) |
| 754 | return put_user(state, &vtstat->v_state); | 797 | if (VT_IS_IN_USE(i)) |
| 798 | state |= mask; | ||
| 799 | ret = put_user(state, &vtstat->v_state); | ||
| 800 | } | ||
| 801 | break; | ||
| 755 | } | 802 | } |
| 756 | 803 | ||
| 757 | /* | 804 | /* |
| @@ -771,27 +818,31 @@ int vt_ioctl(struct tty_struct *tty, struct file * file, | |||
| 771 | */ | 818 | */ |
| 772 | case VT_ACTIVATE: | 819 | case VT_ACTIVATE: |
| 773 | if (!perm) | 820 | if (!perm) |
| 774 | return -EPERM; | 821 | goto eperm; |
| 775 | if (arg == 0 || arg > MAX_NR_CONSOLES) | 822 | if (arg == 0 || arg > MAX_NR_CONSOLES) |
| 776 | return -ENXIO; | 823 | ret = -ENXIO; |
| 777 | arg--; | 824 | else { |
| 778 | acquire_console_sem(); | 825 | arg--; |
| 779 | i = vc_allocate(arg); | 826 | acquire_console_sem(); |
| 780 | release_console_sem(); | 827 | ret = vc_allocate(arg); |
| 781 | if (i) | 828 | release_console_sem(); |
| 782 | return i; | 829 | if (ret) |
| 783 | set_console(arg); | 830 | break; |
| 784 | return 0; | 831 | set_console(arg); |
| 832 | } | ||
| 833 | break; | ||
| 785 | 834 | ||
| 786 | /* | 835 | /* |
| 787 | * wait until the specified VT has been activated | 836 | * wait until the specified VT has been activated |
| 788 | */ | 837 | */ |
| 789 | case VT_WAITACTIVE: | 838 | case VT_WAITACTIVE: |
| 790 | if (!perm) | 839 | if (!perm) |
| 791 | return -EPERM; | 840 | goto eperm; |
| 792 | if (arg == 0 || arg > MAX_NR_CONSOLES) | 841 | if (arg == 0 || arg > MAX_NR_CONSOLES) |
| 793 | return -ENXIO; | 842 | ret = -ENXIO; |
| 794 | return vt_waitactive(arg-1); | 843 | else |
| 844 | ret = vt_waitactive(arg - 1); | ||
| 845 | break; | ||
| 795 | 846 | ||
| 796 | /* | 847 | /* |
| 797 | * If a vt is under process control, the kernel will not switch to it | 848 | * If a vt is under process control, the kernel will not switch to it |
| @@ -805,10 +856,12 @@ int vt_ioctl(struct tty_struct *tty, struct file * file, | |||
| 805 | */ | 856 | */ |
| 806 | case VT_RELDISP: | 857 | case VT_RELDISP: |
| 807 | if (!perm) | 858 | if (!perm) |
| 808 | return -EPERM; | 859 | goto eperm; |
| 809 | if (vc->vt_mode.mode != VT_PROCESS) | ||
| 810 | return -EINVAL; | ||
| 811 | 860 | ||
| 861 | if (vc->vt_mode.mode != VT_PROCESS) { | ||
| 862 | ret = -EINVAL; | ||
| 863 | break; | ||
| 864 | } | ||
| 812 | /* | 865 | /* |
| 813 | * Switching-from response | 866 | * Switching-from response |
| 814 | */ | 867 | */ |
| @@ -829,10 +882,10 @@ int vt_ioctl(struct tty_struct *tty, struct file * file, | |||
| 829 | int newvt; | 882 | int newvt; |
| 830 | newvt = vc->vt_newvt; | 883 | newvt = vc->vt_newvt; |
| 831 | vc->vt_newvt = -1; | 884 | vc->vt_newvt = -1; |
| 832 | i = vc_allocate(newvt); | 885 | ret = vc_allocate(newvt); |
| 833 | if (i) { | 886 | if (ret) { |
| 834 | release_console_sem(); | 887 | release_console_sem(); |
| 835 | return i; | 888 | break; |
| 836 | } | 889 | } |
| 837 | /* | 890 | /* |
| 838 | * When we actually do the console switch, | 891 | * When we actually do the console switch, |
| @@ -841,31 +894,27 @@ int vt_ioctl(struct tty_struct *tty, struct file * file, | |||
| 841 | */ | 894 | */ |
| 842 | complete_change_console(vc_cons[newvt].d); | 895 | complete_change_console(vc_cons[newvt].d); |
| 843 | } | 896 | } |
| 844 | } | 897 | } else { |
| 845 | 898 | /* | |
| 846 | /* | 899 | * Switched-to response |
| 847 | * Switched-to response | 900 | */ |
| 848 | */ | ||
| 849 | else | ||
| 850 | { | ||
| 851 | /* | 901 | /* |
| 852 | * If it's just an ACK, ignore it | 902 | * If it's just an ACK, ignore it |
| 853 | */ | 903 | */ |
| 854 | if (arg != VT_ACKACQ) { | 904 | if (arg != VT_ACKACQ) |
| 855 | release_console_sem(); | 905 | ret = -EINVAL; |
| 856 | return -EINVAL; | ||
| 857 | } | ||
| 858 | } | 906 | } |
| 859 | release_console_sem(); | 907 | release_console_sem(); |
| 860 | 908 | break; | |
| 861 | return 0; | ||
| 862 | 909 | ||
| 863 | /* | 910 | /* |
| 864 | * Disallocate memory associated to VT (but leave VT1) | 911 | * Disallocate memory associated to VT (but leave VT1) |
| 865 | */ | 912 | */ |
| 866 | case VT_DISALLOCATE: | 913 | case VT_DISALLOCATE: |
| 867 | if (arg > MAX_NR_CONSOLES) | 914 | if (arg > MAX_NR_CONSOLES) { |
| 868 | return -ENXIO; | 915 | ret = -ENXIO; |
| 916 | break; | ||
| 917 | } | ||
| 869 | if (arg == 0) { | 918 | if (arg == 0) { |
| 870 | /* deallocate all unused consoles, but leave 0 */ | 919 | /* deallocate all unused consoles, but leave 0 */ |
| 871 | acquire_console_sem(); | 920 | acquire_console_sem(); |
| @@ -877,14 +926,14 @@ int vt_ioctl(struct tty_struct *tty, struct file * file, | |||
| 877 | /* deallocate a single console, if possible */ | 926 | /* deallocate a single console, if possible */ |
| 878 | arg--; | 927 | arg--; |
| 879 | if (VT_BUSY(arg)) | 928 | if (VT_BUSY(arg)) |
| 880 | return -EBUSY; | 929 | ret = -EBUSY; |
| 881 | if (arg) { /* leave 0 */ | 930 | else if (arg) { /* leave 0 */ |
| 882 | acquire_console_sem(); | 931 | acquire_console_sem(); |
| 883 | vc_deallocate(arg); | 932 | vc_deallocate(arg); |
| 884 | release_console_sem(); | 933 | release_console_sem(); |
| 885 | } | 934 | } |
| 886 | } | 935 | } |
| 887 | return 0; | 936 | break; |
| 888 | 937 | ||
| 889 | case VT_RESIZE: | 938 | case VT_RESIZE: |
| 890 | { | 939 | { |
| @@ -893,21 +942,21 @@ int vt_ioctl(struct tty_struct *tty, struct file * file, | |||
| 893 | 942 | ||
| 894 | ushort ll,cc; | 943 | ushort ll,cc; |
| 895 | if (!perm) | 944 | if (!perm) |
| 896 | return -EPERM; | 945 | goto eperm; |
| 897 | if (get_user(ll, &vtsizes->v_rows) || | 946 | if (get_user(ll, &vtsizes->v_rows) || |
| 898 | get_user(cc, &vtsizes->v_cols)) | 947 | get_user(cc, &vtsizes->v_cols)) |
| 899 | return -EFAULT; | 948 | ret = -EFAULT; |
| 900 | 949 | else { | |
| 901 | for (i = 0; i < MAX_NR_CONSOLES; i++) { | 950 | for (i = 0; i < MAX_NR_CONSOLES; i++) { |
| 902 | vc = vc_cons[i].d; | 951 | vc = vc_cons[i].d; |
| 903 | 952 | ||
| 904 | if (vc) { | 953 | if (vc) { |
| 905 | vc->vc_resize_user = 1; | 954 | vc->vc_resize_user = 1; |
| 906 | vc_lock_resize(vc_cons[i].d, cc, ll); | 955 | vc_lock_resize(vc_cons[i].d, cc, ll); |
| 956 | } | ||
| 907 | } | 957 | } |
| 908 | } | 958 | } |
| 909 | 959 | break; | |
| 910 | return 0; | ||
| 911 | } | 960 | } |
| 912 | 961 | ||
| 913 | case VT_RESIZEX: | 962 | case VT_RESIZEX: |
| @@ -915,10 +964,13 @@ int vt_ioctl(struct tty_struct *tty, struct file * file, | |||
| 915 | struct vt_consize __user *vtconsize = up; | 964 | struct vt_consize __user *vtconsize = up; |
| 916 | ushort ll,cc,vlin,clin,vcol,ccol; | 965 | ushort ll,cc,vlin,clin,vcol,ccol; |
| 917 | if (!perm) | 966 | if (!perm) |
| 918 | return -EPERM; | 967 | goto eperm; |
| 919 | if (!access_ok(VERIFY_READ, vtconsize, | 968 | if (!access_ok(VERIFY_READ, vtconsize, |
| 920 | sizeof(struct vt_consize))) | 969 | sizeof(struct vt_consize))) { |
| 921 | return -EFAULT; | 970 | ret = -EFAULT; |
| 971 | break; | ||
| 972 | } | ||
| 973 | /* FIXME: Should check the copies properly */ | ||
| 922 | __get_user(ll, &vtconsize->v_rows); | 974 | __get_user(ll, &vtconsize->v_rows); |
| 923 | __get_user(cc, &vtconsize->v_cols); | 975 | __get_user(cc, &vtconsize->v_cols); |
| 924 | __get_user(vlin, &vtconsize->v_vlin); | 976 | __get_user(vlin, &vtconsize->v_vlin); |
| @@ -928,21 +980,28 @@ int vt_ioctl(struct tty_struct *tty, struct file * file, | |||
| 928 | vlin = vlin ? vlin : vc->vc_scan_lines; | 980 | vlin = vlin ? vlin : vc->vc_scan_lines; |
| 929 | if (clin) { | 981 | if (clin) { |
| 930 | if (ll) { | 982 | if (ll) { |
| 931 | if (ll != vlin/clin) | 983 | if (ll != vlin/clin) { |
| 932 | return -EINVAL; /* Parameters don't add up */ | 984 | /* Parameters don't add up */ |
| 985 | ret = -EINVAL; | ||
| 986 | break; | ||
| 987 | } | ||
| 933 | } else | 988 | } else |
| 934 | ll = vlin/clin; | 989 | ll = vlin/clin; |
| 935 | } | 990 | } |
| 936 | if (vcol && ccol) { | 991 | if (vcol && ccol) { |
| 937 | if (cc) { | 992 | if (cc) { |
| 938 | if (cc != vcol/ccol) | 993 | if (cc != vcol/ccol) { |
| 939 | return -EINVAL; | 994 | ret = -EINVAL; |
| 995 | break; | ||
| 996 | } | ||
| 940 | } else | 997 | } else |
| 941 | cc = vcol/ccol; | 998 | cc = vcol/ccol; |
| 942 | } | 999 | } |
| 943 | 1000 | ||
| 944 | if (clin > 32) | 1001 | if (clin > 32) { |
| 945 | return -EINVAL; | 1002 | ret = -EINVAL; |
| 1003 | break; | ||
| 1004 | } | ||
| 946 | 1005 | ||
| 947 | for (i = 0; i < MAX_NR_CONSOLES; i++) { | 1006 | for (i = 0; i < MAX_NR_CONSOLES; i++) { |
| 948 | if (!vc_cons[i].d) | 1007 | if (!vc_cons[i].d) |
| @@ -956,19 +1015,20 @@ int vt_ioctl(struct tty_struct *tty, struct file * file, | |||
| 956 | vc_resize(vc_cons[i].d, cc, ll); | 1015 | vc_resize(vc_cons[i].d, cc, ll); |
| 957 | release_console_sem(); | 1016 | release_console_sem(); |
| 958 | } | 1017 | } |
| 959 | return 0; | 1018 | break; |
| 960 | } | 1019 | } |
| 961 | 1020 | ||
| 962 | case PIO_FONT: { | 1021 | case PIO_FONT: { |
| 963 | if (!perm) | 1022 | if (!perm) |
| 964 | return -EPERM; | 1023 | goto eperm; |
| 965 | op.op = KD_FONT_OP_SET; | 1024 | op.op = KD_FONT_OP_SET; |
| 966 | op.flags = KD_FONT_FLAG_OLD | KD_FONT_FLAG_DONT_RECALC; /* Compatibility */ | 1025 | op.flags = KD_FONT_FLAG_OLD | KD_FONT_FLAG_DONT_RECALC; /* Compatibility */ |
| 967 | op.width = 8; | 1026 | op.width = 8; |
| 968 | op.height = 0; | 1027 | op.height = 0; |
| 969 | op.charcount = 256; | 1028 | op.charcount = 256; |
| 970 | op.data = up; | 1029 | op.data = up; |
| 971 | return con_font_op(vc_cons[fg_console].d, &op); | 1030 | ret = con_font_op(vc_cons[fg_console].d, &op); |
| 1031 | break; | ||
| 972 | } | 1032 | } |
| 973 | 1033 | ||
| 974 | case GIO_FONT: { | 1034 | case GIO_FONT: { |
| @@ -978,100 +1038,124 @@ int vt_ioctl(struct tty_struct *tty, struct file * file, | |||
| 978 | op.height = 32; | 1038 | op.height = 32; |
| 979 | op.charcount = 256; | 1039 | op.charcount = 256; |
| 980 | op.data = up; | 1040 | op.data = up; |
| 981 | return con_font_op(vc_cons[fg_console].d, &op); | 1041 | ret = con_font_op(vc_cons[fg_console].d, &op); |
| 1042 | break; | ||
| 982 | } | 1043 | } |
| 983 | 1044 | ||
| 984 | case PIO_CMAP: | 1045 | case PIO_CMAP: |
| 985 | if (!perm) | 1046 | if (!perm) |
| 986 | return -EPERM; | 1047 | ret = -EPERM; |
| 987 | return con_set_cmap(up); | 1048 | else |
| 1049 | ret = con_set_cmap(up); | ||
| 1050 | break; | ||
| 988 | 1051 | ||
| 989 | case GIO_CMAP: | 1052 | case GIO_CMAP: |
| 990 | return con_get_cmap(up); | 1053 | ret = con_get_cmap(up); |
| 1054 | break; | ||
| 991 | 1055 | ||
| 992 | case PIO_FONTX: | 1056 | case PIO_FONTX: |
| 993 | case GIO_FONTX: | 1057 | case GIO_FONTX: |
| 994 | return do_fontx_ioctl(cmd, up, perm, &op); | 1058 | ret = do_fontx_ioctl(cmd, up, perm, &op); |
| 1059 | break; | ||
| 995 | 1060 | ||
| 996 | case PIO_FONTRESET: | 1061 | case PIO_FONTRESET: |
| 997 | { | 1062 | { |
| 998 | if (!perm) | 1063 | if (!perm) |
| 999 | return -EPERM; | 1064 | goto eperm; |
| 1000 | 1065 | ||
| 1001 | #ifdef BROKEN_GRAPHICS_PROGRAMS | 1066 | #ifdef BROKEN_GRAPHICS_PROGRAMS |
| 1002 | /* With BROKEN_GRAPHICS_PROGRAMS defined, the default | 1067 | /* With BROKEN_GRAPHICS_PROGRAMS defined, the default |
| 1003 | font is not saved. */ | 1068 | font is not saved. */ |
| 1004 | return -ENOSYS; | 1069 | ret = -ENOSYS; |
| 1070 | break; | ||
| 1005 | #else | 1071 | #else |
| 1006 | { | 1072 | { |
| 1007 | op.op = KD_FONT_OP_SET_DEFAULT; | 1073 | op.op = KD_FONT_OP_SET_DEFAULT; |
| 1008 | op.data = NULL; | 1074 | op.data = NULL; |
| 1009 | i = con_font_op(vc_cons[fg_console].d, &op); | 1075 | ret = con_font_op(vc_cons[fg_console].d, &op); |
| 1010 | if (i) | 1076 | if (ret) |
| 1011 | return i; | 1077 | break; |
| 1012 | con_set_default_unimap(vc_cons[fg_console].d); | 1078 | con_set_default_unimap(vc_cons[fg_console].d); |
| 1013 | return 0; | 1079 | break; |
| 1014 | } | 1080 | } |
| 1015 | #endif | 1081 | #endif |
| 1016 | } | 1082 | } |
| 1017 | 1083 | ||
| 1018 | case KDFONTOP: { | 1084 | case KDFONTOP: { |
| 1019 | if (copy_from_user(&op, up, sizeof(op))) | 1085 | if (copy_from_user(&op, up, sizeof(op))) { |
| 1020 | return -EFAULT; | 1086 | ret = -EFAULT; |
| 1087 | break; | ||
| 1088 | } | ||
| 1021 | if (!perm && op.op != KD_FONT_OP_GET) | 1089 | if (!perm && op.op != KD_FONT_OP_GET) |
| 1022 | return -EPERM; | 1090 | goto eperm; |
| 1023 | i = con_font_op(vc, &op); | 1091 | ret = con_font_op(vc, &op); |
| 1024 | if (i) return i; | 1092 | if (ret) |
| 1093 | break; | ||
| 1025 | if (copy_to_user(up, &op, sizeof(op))) | 1094 | if (copy_to_user(up, &op, sizeof(op))) |
| 1026 | return -EFAULT; | 1095 | ret = -EFAULT; |
| 1027 | return 0; | 1096 | break; |
| 1028 | } | 1097 | } |
| 1029 | 1098 | ||
| 1030 | case PIO_SCRNMAP: | 1099 | case PIO_SCRNMAP: |
| 1031 | if (!perm) | 1100 | if (!perm) |
| 1032 | return -EPERM; | 1101 | ret = -EPERM; |
| 1033 | return con_set_trans_old(up); | 1102 | else |
| 1103 | ret = con_set_trans_old(up); | ||
| 1104 | break; | ||
| 1034 | 1105 | ||
| 1035 | case GIO_SCRNMAP: | 1106 | case GIO_SCRNMAP: |
| 1036 | return con_get_trans_old(up); | 1107 | ret = con_get_trans_old(up); |
| 1108 | break; | ||
| 1037 | 1109 | ||
| 1038 | case PIO_UNISCRNMAP: | 1110 | case PIO_UNISCRNMAP: |
| 1039 | if (!perm) | 1111 | if (!perm) |
| 1040 | return -EPERM; | 1112 | ret = -EPERM; |
| 1041 | return con_set_trans_new(up); | 1113 | else |
| 1114 | ret = con_set_trans_new(up); | ||
| 1115 | break; | ||
| 1042 | 1116 | ||
| 1043 | case GIO_UNISCRNMAP: | 1117 | case GIO_UNISCRNMAP: |
| 1044 | return con_get_trans_new(up); | 1118 | ret = con_get_trans_new(up); |
| 1119 | break; | ||
| 1045 | 1120 | ||
| 1046 | case PIO_UNIMAPCLR: | 1121 | case PIO_UNIMAPCLR: |
| 1047 | { struct unimapinit ui; | 1122 | { struct unimapinit ui; |
| 1048 | if (!perm) | 1123 | if (!perm) |
| 1049 | return -EPERM; | 1124 | goto eperm; |
| 1050 | i = copy_from_user(&ui, up, sizeof(struct unimapinit)); | 1125 | ret = copy_from_user(&ui, up, sizeof(struct unimapinit)); |
| 1051 | if (i) return -EFAULT; | 1126 | if (!ret) |
| 1052 | con_clear_unimap(vc, &ui); | 1127 | con_clear_unimap(vc, &ui); |
| 1053 | return 0; | 1128 | break; |
| 1054 | } | 1129 | } |
| 1055 | 1130 | ||
| 1056 | case PIO_UNIMAP: | 1131 | case PIO_UNIMAP: |
| 1057 | case GIO_UNIMAP: | 1132 | case GIO_UNIMAP: |
| 1058 | return do_unimap_ioctl(cmd, up, perm, vc); | 1133 | ret = do_unimap_ioctl(cmd, up, perm, vc); |
| 1134 | break; | ||
| 1059 | 1135 | ||
| 1060 | case VT_LOCKSWITCH: | 1136 | case VT_LOCKSWITCH: |
| 1061 | if (!capable(CAP_SYS_TTY_CONFIG)) | 1137 | if (!capable(CAP_SYS_TTY_CONFIG)) |
| 1062 | return -EPERM; | 1138 | goto eperm; |
| 1063 | vt_dont_switch = 1; | 1139 | vt_dont_switch = 1; |
| 1064 | return 0; | 1140 | break; |
| 1065 | case VT_UNLOCKSWITCH: | 1141 | case VT_UNLOCKSWITCH: |
| 1066 | if (!capable(CAP_SYS_TTY_CONFIG)) | 1142 | if (!capable(CAP_SYS_TTY_CONFIG)) |
| 1067 | return -EPERM; | 1143 | goto eperm; |
| 1068 | vt_dont_switch = 0; | 1144 | vt_dont_switch = 0; |
| 1069 | return 0; | 1145 | break; |
| 1070 | case VT_GETHIFONTMASK: | 1146 | case VT_GETHIFONTMASK: |
| 1071 | return put_user(vc->vc_hi_font_mask, (unsigned short __user *)arg); | 1147 | ret = put_user(vc->vc_hi_font_mask, |
| 1148 | (unsigned short __user *)arg); | ||
| 1149 | break; | ||
| 1072 | default: | 1150 | default: |
| 1073 | return -ENOIOCTLCMD; | 1151 | ret = -ENOIOCTLCMD; |
| 1074 | } | 1152 | } |
| 1153 | out: | ||
| 1154 | unlock_kernel(); | ||
| 1155 | return ret; | ||
| 1156 | eperm: | ||
| 1157 | ret = -EPERM; | ||
| 1158 | goto out; | ||
| 1075 | } | 1159 | } |
| 1076 | 1160 | ||
| 1077 | /* | 1161 | /* |
diff --git a/drivers/gpio/gpiolib.c b/drivers/gpio/gpiolib.c index 24c62b848bf9..7f138c6195ff 100644 --- a/drivers/gpio/gpiolib.c +++ b/drivers/gpio/gpiolib.c | |||
| @@ -382,7 +382,7 @@ fail: | |||
| 382 | spin_unlock_irqrestore(&gpio_lock, flags); | 382 | spin_unlock_irqrestore(&gpio_lock, flags); |
| 383 | if (status) | 383 | if (status) |
| 384 | pr_debug("%s: gpio-%d status %d\n", | 384 | pr_debug("%s: gpio-%d status %d\n", |
| 385 | __FUNCTION__, gpio, status); | 385 | __func__, gpio, status); |
| 386 | return status; | 386 | return status; |
| 387 | } | 387 | } |
| 388 | EXPORT_SYMBOL_GPL(gpio_direction_input); | 388 | EXPORT_SYMBOL_GPL(gpio_direction_input); |
| @@ -420,7 +420,7 @@ fail: | |||
| 420 | spin_unlock_irqrestore(&gpio_lock, flags); | 420 | spin_unlock_irqrestore(&gpio_lock, flags); |
| 421 | if (status) | 421 | if (status) |
| 422 | pr_debug("%s: gpio-%d status %d\n", | 422 | pr_debug("%s: gpio-%d status %d\n", |
| 423 | __FUNCTION__, gpio, status); | 423 | __func__, gpio, status); |
| 424 | return status; | 424 | return status; |
| 425 | } | 425 | } |
| 426 | EXPORT_SYMBOL_GPL(gpio_direction_output); | 426 | EXPORT_SYMBOL_GPL(gpio_direction_output); |
diff --git a/drivers/hwmon/ads7828.c b/drivers/hwmon/ads7828.c index ed71a8bc70dc..5c8b6e0ff47c 100644 --- a/drivers/hwmon/ads7828.c +++ b/drivers/hwmon/ads7828.c | |||
| @@ -224,7 +224,7 @@ static int ads7828_detect(struct i2c_adapter *adapter, int address, int kind) | |||
| 224 | if (in_data & 0xF000) { | 224 | if (in_data & 0xF000) { |
| 225 | printk(KERN_DEBUG | 225 | printk(KERN_DEBUG |
| 226 | "%s : Doesn't look like an ads7828 device\n", | 226 | "%s : Doesn't look like an ads7828 device\n", |
| 227 | __FUNCTION__); | 227 | __func__); |
| 228 | goto exit_free; | 228 | goto exit_free; |
| 229 | } | 229 | } |
| 230 | } | 230 | } |
diff --git a/drivers/input/serio/serport.c b/drivers/input/serio/serport.c index e1a3a79ab3f9..7ff71ba7b7c9 100644 --- a/drivers/input/serio/serport.c +++ b/drivers/input/serio/serport.c | |||
| @@ -46,7 +46,7 @@ struct serport { | |||
| 46 | static int serport_serio_write(struct serio *serio, unsigned char data) | 46 | static int serport_serio_write(struct serio *serio, unsigned char data) |
| 47 | { | 47 | { |
| 48 | struct serport *serport = serio->port_data; | 48 | struct serport *serport = serio->port_data; |
| 49 | return -(serport->tty->driver->write(serport->tty, &data, 1) != 1); | 49 | return -(serport->tty->ops->write(serport->tty, &data, 1) != 1); |
| 50 | } | 50 | } |
| 51 | 51 | ||
| 52 | static int serport_serio_open(struct serio *serio) | 52 | static int serport_serio_open(struct serio *serio) |
diff --git a/drivers/isdn/capi/capi.c b/drivers/isdn/capi/capi.c index 24c6b7ca62be..6ca0bb949ad3 100644 --- a/drivers/isdn/capi/capi.c +++ b/drivers/isdn/capi/capi.c | |||
| @@ -1111,11 +1111,12 @@ static int capinc_tty_write(struct tty_struct * tty, | |||
| 1111 | return count; | 1111 | return count; |
| 1112 | } | 1112 | } |
| 1113 | 1113 | ||
| 1114 | static void capinc_tty_put_char(struct tty_struct *tty, unsigned char ch) | 1114 | static int capinc_tty_put_char(struct tty_struct *tty, unsigned char ch) |
| 1115 | { | 1115 | { |
| 1116 | struct capiminor *mp = (struct capiminor *)tty->driver_data; | 1116 | struct capiminor *mp = (struct capiminor *)tty->driver_data; |
| 1117 | struct sk_buff *skb; | 1117 | struct sk_buff *skb; |
| 1118 | unsigned long flags; | 1118 | unsigned long flags; |
| 1119 | int ret = 1; | ||
| 1119 | 1120 | ||
| 1120 | #ifdef _DEBUG_TTYFUNCS | 1121 | #ifdef _DEBUG_TTYFUNCS |
| 1121 | printk(KERN_DEBUG "capinc_put_char(%u)\n", ch); | 1122 | printk(KERN_DEBUG "capinc_put_char(%u)\n", ch); |
| @@ -1125,7 +1126,7 @@ static void capinc_tty_put_char(struct tty_struct *tty, unsigned char ch) | |||
| 1125 | #ifdef _DEBUG_TTYFUNCS | 1126 | #ifdef _DEBUG_TTYFUNCS |
| 1126 | printk(KERN_DEBUG "capinc_tty_put_char: mp or mp->ncci NULL\n"); | 1127 | printk(KERN_DEBUG "capinc_tty_put_char: mp or mp->ncci NULL\n"); |
| 1127 | #endif | 1128 | #endif |
| 1128 | return; | 1129 | return 0; |
| 1129 | } | 1130 | } |
| 1130 | 1131 | ||
| 1131 | spin_lock_irqsave(&workaround_lock, flags); | 1132 | spin_lock_irqsave(&workaround_lock, flags); |
| @@ -1134,7 +1135,7 @@ static void capinc_tty_put_char(struct tty_struct *tty, unsigned char ch) | |||
| 1134 | if (skb_tailroom(skb) > 0) { | 1135 | if (skb_tailroom(skb) > 0) { |
| 1135 | *(skb_put(skb, 1)) = ch; | 1136 | *(skb_put(skb, 1)) = ch; |
| 1136 | spin_unlock_irqrestore(&workaround_lock, flags); | 1137 | spin_unlock_irqrestore(&workaround_lock, flags); |
| 1137 | return; | 1138 | return 1; |
| 1138 | } | 1139 | } |
| 1139 | mp->ttyskb = NULL; | 1140 | mp->ttyskb = NULL; |
| 1140 | skb_queue_tail(&mp->outqueue, skb); | 1141 | skb_queue_tail(&mp->outqueue, skb); |
| @@ -1148,8 +1149,10 @@ static void capinc_tty_put_char(struct tty_struct *tty, unsigned char ch) | |||
| 1148 | mp->ttyskb = skb; | 1149 | mp->ttyskb = skb; |
| 1149 | } else { | 1150 | } else { |
| 1150 | printk(KERN_ERR "capinc_put_char: char %u lost\n", ch); | 1151 | printk(KERN_ERR "capinc_put_char: char %u lost\n", ch); |
| 1152 | ret = 0; | ||
| 1151 | } | 1153 | } |
| 1152 | spin_unlock_irqrestore(&workaround_lock, flags); | 1154 | spin_unlock_irqrestore(&workaround_lock, flags); |
| 1155 | return ret; | ||
| 1153 | } | 1156 | } |
| 1154 | 1157 | ||
| 1155 | static void capinc_tty_flush_chars(struct tty_struct *tty) | 1158 | static void capinc_tty_flush_chars(struct tty_struct *tty) |
diff --git a/drivers/isdn/gigaset/ser-gigaset.c b/drivers/isdn/gigaset/ser-gigaset.c index fceeb1d57682..45d1ee93cd39 100644 --- a/drivers/isdn/gigaset/ser-gigaset.c +++ b/drivers/isdn/gigaset/ser-gigaset.c | |||
| @@ -68,10 +68,10 @@ static int write_modem(struct cardstate *cs) | |||
| 68 | struct tty_struct *tty = cs->hw.ser->tty; | 68 | struct tty_struct *tty = cs->hw.ser->tty; |
| 69 | struct bc_state *bcs = &cs->bcs[0]; /* only one channel */ | 69 | struct bc_state *bcs = &cs->bcs[0]; /* only one channel */ |
| 70 | struct sk_buff *skb = bcs->tx_skb; | 70 | struct sk_buff *skb = bcs->tx_skb; |
| 71 | int sent; | 71 | int sent = -EOPNOTSUPP; |
| 72 | 72 | ||
| 73 | if (!tty || !tty->driver || !skb) | 73 | if (!tty || !tty->driver || !skb) |
| 74 | return -EFAULT; | 74 | return -EINVAL; |
| 75 | 75 | ||
| 76 | if (!skb->len) { | 76 | if (!skb->len) { |
| 77 | dev_kfree_skb_any(skb); | 77 | dev_kfree_skb_any(skb); |
| @@ -80,7 +80,8 @@ static int write_modem(struct cardstate *cs) | |||
| 80 | } | 80 | } |
| 81 | 81 | ||
| 82 | set_bit(TTY_DO_WRITE_WAKEUP, &tty->flags); | 82 | set_bit(TTY_DO_WRITE_WAKEUP, &tty->flags); |
| 83 | sent = tty->driver->write(tty, skb->data, skb->len); | 83 | if (tty->ops->write) |
| 84 | sent = tty->ops->write(tty, skb->data, skb->len); | ||
| 84 | gig_dbg(DEBUG_OUTPUT, "write_modem: sent %d", sent); | 85 | gig_dbg(DEBUG_OUTPUT, "write_modem: sent %d", sent); |
| 85 | if (sent < 0) { | 86 | if (sent < 0) { |
| 86 | /* error */ | 87 | /* error */ |
| @@ -120,7 +121,7 @@ static int send_cb(struct cardstate *cs) | |||
| 120 | 121 | ||
| 121 | if (cb->len) { | 122 | if (cb->len) { |
| 122 | set_bit(TTY_DO_WRITE_WAKEUP, &tty->flags); | 123 | set_bit(TTY_DO_WRITE_WAKEUP, &tty->flags); |
| 123 | sent = tty->driver->write(tty, cb->buf + cb->offset, cb->len); | 124 | sent = tty->ops->write(tty, cb->buf + cb->offset, cb->len); |
| 124 | if (sent < 0) { | 125 | if (sent < 0) { |
| 125 | /* error */ | 126 | /* error */ |
| 126 | gig_dbg(DEBUG_OUTPUT, "send_cb: write error %d", sent); | 127 | gig_dbg(DEBUG_OUTPUT, "send_cb: write error %d", sent); |
| @@ -440,14 +441,14 @@ static int gigaset_set_modem_ctrl(struct cardstate *cs, unsigned old_state, unsi | |||
| 440 | struct tty_struct *tty = cs->hw.ser->tty; | 441 | struct tty_struct *tty = cs->hw.ser->tty; |
| 441 | unsigned int set, clear; | 442 | unsigned int set, clear; |
| 442 | 443 | ||
| 443 | if (!tty || !tty->driver || !tty->driver->tiocmset) | 444 | if (!tty || !tty->driver || !tty->ops->tiocmset) |
| 444 | return -EFAULT; | 445 | return -EINVAL; |
| 445 | set = new_state & ~old_state; | 446 | set = new_state & ~old_state; |
| 446 | clear = old_state & ~new_state; | 447 | clear = old_state & ~new_state; |
| 447 | if (!set && !clear) | 448 | if (!set && !clear) |
| 448 | return 0; | 449 | return 0; |
| 449 | gig_dbg(DEBUG_IF, "tiocmset set %x clear %x", set, clear); | 450 | gig_dbg(DEBUG_IF, "tiocmset set %x clear %x", set, clear); |
| 450 | return tty->driver->tiocmset(tty, NULL, set, clear); | 451 | return tty->ops->tiocmset(tty, NULL, set, clear); |
| 451 | } | 452 | } |
| 452 | 453 | ||
| 453 | static int gigaset_baud_rate(struct cardstate *cs, unsigned cflag) | 454 | static int gigaset_baud_rate(struct cardstate *cs, unsigned cflag) |
diff --git a/drivers/isdn/i4l/isdn_tty.c b/drivers/isdn/i4l/isdn_tty.c index 8af0df1d5b8c..1a2222cbb805 100644 --- a/drivers/isdn/i4l/isdn_tty.c +++ b/drivers/isdn/i4l/isdn_tty.c | |||
| @@ -1352,12 +1352,14 @@ isdn_tty_tiocmget(struct tty_struct *tty, struct file *file) | |||
| 1352 | if (tty->flags & (1 << TTY_IO_ERROR)) | 1352 | if (tty->flags & (1 << TTY_IO_ERROR)) |
| 1353 | return -EIO; | 1353 | return -EIO; |
| 1354 | 1354 | ||
| 1355 | lock_kernel(); | ||
| 1355 | #ifdef ISDN_DEBUG_MODEM_IOCTL | 1356 | #ifdef ISDN_DEBUG_MODEM_IOCTL |
| 1356 | printk(KERN_DEBUG "ttyI%d ioctl TIOCMGET\n", info->line); | 1357 | printk(KERN_DEBUG "ttyI%d ioctl TIOCMGET\n", info->line); |
| 1357 | #endif | 1358 | #endif |
| 1358 | 1359 | ||
| 1359 | control = info->mcr; | 1360 | control = info->mcr; |
| 1360 | status = info->msr; | 1361 | status = info->msr; |
| 1362 | unlock_kernel(); | ||
| 1361 | return ((control & UART_MCR_RTS) ? TIOCM_RTS : 0) | 1363 | return ((control & UART_MCR_RTS) ? TIOCM_RTS : 0) |
| 1362 | | ((control & UART_MCR_DTR) ? TIOCM_DTR : 0) | 1364 | | ((control & UART_MCR_DTR) ? TIOCM_DTR : 0) |
| 1363 | | ((status & UART_MSR_DCD) ? TIOCM_CAR : 0) | 1365 | | ((status & UART_MSR_DCD) ? TIOCM_CAR : 0) |
| @@ -1381,6 +1383,7 @@ isdn_tty_tiocmset(struct tty_struct *tty, struct file *file, | |||
| 1381 | printk(KERN_DEBUG "ttyI%d ioctl TIOCMxxx: %x %x\n", info->line, set, clear); | 1383 | printk(KERN_DEBUG "ttyI%d ioctl TIOCMxxx: %x %x\n", info->line, set, clear); |
| 1382 | #endif | 1384 | #endif |
| 1383 | 1385 | ||
| 1386 | lock_kernel(); | ||
| 1384 | if (set & TIOCM_RTS) | 1387 | if (set & TIOCM_RTS) |
| 1385 | info->mcr |= UART_MCR_RTS; | 1388 | info->mcr |= UART_MCR_RTS; |
| 1386 | if (set & TIOCM_DTR) { | 1389 | if (set & TIOCM_DTR) { |
| @@ -1402,6 +1405,7 @@ isdn_tty_tiocmset(struct tty_struct *tty, struct file *file, | |||
| 1402 | isdn_tty_modem_hup(info, 1); | 1405 | isdn_tty_modem_hup(info, 1); |
| 1403 | } | 1406 | } |
| 1404 | } | 1407 | } |
| 1408 | unlock_kernel(); | ||
| 1405 | return 0; | 1409 | return 0; |
| 1406 | } | 1410 | } |
| 1407 | 1411 | ||
| @@ -1435,21 +1439,6 @@ isdn_tty_ioctl(struct tty_struct *tty, struct file *file, | |||
| 1435 | return retval; | 1439 | return retval; |
| 1436 | tty_wait_until_sent(tty, 0); | 1440 | tty_wait_until_sent(tty, 0); |
| 1437 | return 0; | 1441 | return 0; |
| 1438 | case TIOCGSOFTCAR: | ||
| 1439 | #ifdef ISDN_DEBUG_MODEM_IOCTL | ||
| 1440 | printk(KERN_DEBUG "ttyI%d ioctl TIOCGSOFTCAR\n", info->line); | ||
| 1441 | #endif | ||
| 1442 | return put_user(C_CLOCAL(tty) ? 1 : 0, (ulong __user *) arg); | ||
| 1443 | case TIOCSSOFTCAR: | ||
| 1444 | #ifdef ISDN_DEBUG_MODEM_IOCTL | ||
| 1445 | printk(KERN_DEBUG "ttyI%d ioctl TIOCSSOFTCAR\n", info->line); | ||
| 1446 | #endif | ||
| 1447 | if (get_user(arg, (ulong __user *) arg)) | ||
| 1448 | return -EFAULT; | ||
| 1449 | tty->termios->c_cflag = | ||
| 1450 | ((tty->termios->c_cflag & ~CLOCAL) | | ||
| 1451 | (arg ? CLOCAL : 0)); | ||
| 1452 | return 0; | ||
| 1453 | case TIOCSERGETLSR: /* Get line status register */ | 1442 | case TIOCSERGETLSR: /* Get line status register */ |
| 1454 | #ifdef ISDN_DEBUG_MODEM_IOCTL | 1443 | #ifdef ISDN_DEBUG_MODEM_IOCTL |
| 1455 | printk(KERN_DEBUG "ttyI%d ioctl TIOCSERGETLSR\n", info->line); | 1444 | printk(KERN_DEBUG "ttyI%d ioctl TIOCSERGETLSR\n", info->line); |
| @@ -1472,13 +1461,14 @@ isdn_tty_set_termios(struct tty_struct *tty, struct ktermios *old_termios) | |||
| 1472 | if (!old_termios) | 1461 | if (!old_termios) |
| 1473 | isdn_tty_change_speed(info); | 1462 | isdn_tty_change_speed(info); |
| 1474 | else { | 1463 | else { |
| 1475 | if (tty->termios->c_cflag == old_termios->c_cflag) | 1464 | if (tty->termios->c_cflag == old_termios->c_cflag && |
| 1465 | tty->termios->c_ispeed == old_termios->c_ispeed && | ||
| 1466 | tty->termios->c_ospeed == old_termios->c_ospeed) | ||
| 1476 | return; | 1467 | return; |
| 1477 | isdn_tty_change_speed(info); | 1468 | isdn_tty_change_speed(info); |
| 1478 | if ((old_termios->c_cflag & CRTSCTS) && | 1469 | if ((old_termios->c_cflag & CRTSCTS) && |
| 1479 | !(tty->termios->c_cflag & CRTSCTS)) { | 1470 | !(tty->termios->c_cflag & CRTSCTS)) |
| 1480 | tty->hw_stopped = 0; | 1471 | tty->hw_stopped = 0; |
| 1481 | } | ||
| 1482 | } | 1472 | } |
| 1483 | } | 1473 | } |
| 1484 | 1474 | ||
| @@ -1718,9 +1708,7 @@ isdn_tty_close(struct tty_struct *tty, struct file *filp) | |||
| 1718 | } | 1708 | } |
| 1719 | dev->modempoll--; | 1709 | dev->modempoll--; |
| 1720 | isdn_tty_shutdown(info); | 1710 | isdn_tty_shutdown(info); |
| 1721 | 1711 | isdn_tty_flush_buffer(tty); | |
| 1722 | if (tty->driver->flush_buffer) | ||
| 1723 | tty->driver->flush_buffer(tty); | ||
| 1724 | tty_ldisc_flush(tty); | 1712 | tty_ldisc_flush(tty); |
| 1725 | info->tty = NULL; | 1713 | info->tty = NULL; |
| 1726 | info->ncarrier = 0; | 1714 | info->ncarrier = 0; |
diff --git a/drivers/md/md.c b/drivers/md/md.c index bb3e4b1cb773..83eb78b00137 100644 --- a/drivers/md/md.c +++ b/drivers/md/md.c | |||
| @@ -276,6 +276,7 @@ static mddev_t * mddev_find(dev_t unit) | |||
| 276 | init_waitqueue_head(&new->sb_wait); | 276 | init_waitqueue_head(&new->sb_wait); |
| 277 | new->reshape_position = MaxSector; | 277 | new->reshape_position = MaxSector; |
| 278 | new->resync_max = MaxSector; | 278 | new->resync_max = MaxSector; |
| 279 | new->level = LEVEL_NONE; | ||
| 279 | 280 | ||
| 280 | new->queue = blk_alloc_queue(GFP_KERNEL); | 281 | new->queue = blk_alloc_queue(GFP_KERNEL); |
| 281 | if (!new->queue) { | 282 | if (!new->queue) { |
| @@ -1369,6 +1370,11 @@ static int bind_rdev_to_array(mdk_rdev_t * rdev, mddev_t * mddev) | |||
| 1369 | MD_BUG(); | 1370 | MD_BUG(); |
| 1370 | return -EINVAL; | 1371 | return -EINVAL; |
| 1371 | } | 1372 | } |
| 1373 | |||
| 1374 | /* prevent duplicates */ | ||
| 1375 | if (find_rdev(mddev, rdev->bdev->bd_dev)) | ||
| 1376 | return -EEXIST; | ||
| 1377 | |||
| 1372 | /* make sure rdev->size exceeds mddev->size */ | 1378 | /* make sure rdev->size exceeds mddev->size */ |
| 1373 | if (rdev->size && (mddev->size == 0 || rdev->size < mddev->size)) { | 1379 | if (rdev->size && (mddev->size == 0 || rdev->size < mddev->size)) { |
| 1374 | if (mddev->pers) { | 1380 | if (mddev->pers) { |
| @@ -1652,6 +1658,8 @@ static void md_update_sb(mddev_t * mddev, int force_change) | |||
| 1652 | int sync_req; | 1658 | int sync_req; |
| 1653 | int nospares = 0; | 1659 | int nospares = 0; |
| 1654 | 1660 | ||
| 1661 | if (mddev->external) | ||
| 1662 | return; | ||
| 1655 | repeat: | 1663 | repeat: |
| 1656 | spin_lock_irq(&mddev->write_lock); | 1664 | spin_lock_irq(&mddev->write_lock); |
| 1657 | 1665 | ||
| @@ -1820,6 +1828,10 @@ state_show(mdk_rdev_t *rdev, char *page) | |||
| 1820 | len += sprintf(page+len, "%swrite_mostly",sep); | 1828 | len += sprintf(page+len, "%swrite_mostly",sep); |
| 1821 | sep = ","; | 1829 | sep = ","; |
| 1822 | } | 1830 | } |
| 1831 | if (test_bit(Blocked, &rdev->flags)) { | ||
| 1832 | len += sprintf(page+len, "%sblocked", sep); | ||
| 1833 | sep = ","; | ||
| 1834 | } | ||
| 1823 | if (!test_bit(Faulty, &rdev->flags) && | 1835 | if (!test_bit(Faulty, &rdev->flags) && |
| 1824 | !test_bit(In_sync, &rdev->flags)) { | 1836 | !test_bit(In_sync, &rdev->flags)) { |
| 1825 | len += sprintf(page+len, "%sspare", sep); | 1837 | len += sprintf(page+len, "%sspare", sep); |
| @@ -1836,6 +1848,8 @@ state_store(mdk_rdev_t *rdev, const char *buf, size_t len) | |||
| 1836 | * remove - disconnects the device | 1848 | * remove - disconnects the device |
| 1837 | * writemostly - sets write_mostly | 1849 | * writemostly - sets write_mostly |
| 1838 | * -writemostly - clears write_mostly | 1850 | * -writemostly - clears write_mostly |
| 1851 | * blocked - sets the Blocked flag | ||
| 1852 | * -blocked - clears the Blocked flag | ||
| 1839 | */ | 1853 | */ |
| 1840 | int err = -EINVAL; | 1854 | int err = -EINVAL; |
| 1841 | if (cmd_match(buf, "faulty") && rdev->mddev->pers) { | 1855 | if (cmd_match(buf, "faulty") && rdev->mddev->pers) { |
| @@ -1858,6 +1872,16 @@ state_store(mdk_rdev_t *rdev, const char *buf, size_t len) | |||
| 1858 | } else if (cmd_match(buf, "-writemostly")) { | 1872 | } else if (cmd_match(buf, "-writemostly")) { |
| 1859 | clear_bit(WriteMostly, &rdev->flags); | 1873 | clear_bit(WriteMostly, &rdev->flags); |
| 1860 | err = 0; | 1874 | err = 0; |
| 1875 | } else if (cmd_match(buf, "blocked")) { | ||
| 1876 | set_bit(Blocked, &rdev->flags); | ||
| 1877 | err = 0; | ||
| 1878 | } else if (cmd_match(buf, "-blocked")) { | ||
| 1879 | clear_bit(Blocked, &rdev->flags); | ||
| 1880 | wake_up(&rdev->blocked_wait); | ||
| 1881 | set_bit(MD_RECOVERY_NEEDED, &rdev->mddev->recovery); | ||
| 1882 | md_wakeup_thread(rdev->mddev->thread); | ||
| 1883 | |||
| 1884 | err = 0; | ||
| 1861 | } | 1885 | } |
| 1862 | return err ? err : len; | 1886 | return err ? err : len; |
| 1863 | } | 1887 | } |
| @@ -2097,7 +2121,7 @@ rdev_attr_store(struct kobject *kobj, struct attribute *attr, | |||
| 2097 | rv = -EBUSY; | 2121 | rv = -EBUSY; |
| 2098 | else | 2122 | else |
| 2099 | rv = entry->store(rdev, page, length); | 2123 | rv = entry->store(rdev, page, length); |
| 2100 | mddev_unlock(rdev->mddev); | 2124 | mddev_unlock(mddev); |
| 2101 | } | 2125 | } |
| 2102 | return rv; | 2126 | return rv; |
| 2103 | } | 2127 | } |
| @@ -2186,7 +2210,9 @@ static mdk_rdev_t *md_import_device(dev_t newdev, int super_format, int super_mi | |||
| 2186 | goto abort_free; | 2210 | goto abort_free; |
| 2187 | } | 2211 | } |
| 2188 | } | 2212 | } |
| 2213 | |||
| 2189 | INIT_LIST_HEAD(&rdev->same_set); | 2214 | INIT_LIST_HEAD(&rdev->same_set); |
| 2215 | init_waitqueue_head(&rdev->blocked_wait); | ||
| 2190 | 2216 | ||
| 2191 | return rdev; | 2217 | return rdev; |
| 2192 | 2218 | ||
| @@ -2457,7 +2483,6 @@ resync_start_show(mddev_t *mddev, char *page) | |||
| 2457 | static ssize_t | 2483 | static ssize_t |
| 2458 | resync_start_store(mddev_t *mddev, const char *buf, size_t len) | 2484 | resync_start_store(mddev_t *mddev, const char *buf, size_t len) |
| 2459 | { | 2485 | { |
| 2460 | /* can only set chunk_size if array is not yet active */ | ||
| 2461 | char *e; | 2486 | char *e; |
| 2462 | unsigned long long n = simple_strtoull(buf, &e, 10); | 2487 | unsigned long long n = simple_strtoull(buf, &e, 10); |
| 2463 | 2488 | ||
| @@ -2591,15 +2616,20 @@ array_state_store(mddev_t *mddev, const char *buf, size_t len) | |||
| 2591 | err = do_md_stop(mddev, 1); | 2616 | err = do_md_stop(mddev, 1); |
| 2592 | else { | 2617 | else { |
| 2593 | mddev->ro = 1; | 2618 | mddev->ro = 1; |
| 2619 | set_disk_ro(mddev->gendisk, 1); | ||
| 2594 | err = do_md_run(mddev); | 2620 | err = do_md_run(mddev); |
| 2595 | } | 2621 | } |
| 2596 | break; | 2622 | break; |
| 2597 | case read_auto: | 2623 | case read_auto: |
| 2598 | /* stopping an active array */ | ||
| 2599 | if (mddev->pers) { | 2624 | if (mddev->pers) { |
| 2600 | err = do_md_stop(mddev, 1); | 2625 | if (mddev->ro != 1) |
| 2601 | if (err == 0) | 2626 | err = do_md_stop(mddev, 1); |
| 2602 | mddev->ro = 2; /* FIXME mark devices writable */ | 2627 | else |
| 2628 | err = restart_array(mddev); | ||
| 2629 | if (err == 0) { | ||
| 2630 | mddev->ro = 2; | ||
| 2631 | set_disk_ro(mddev->gendisk, 0); | ||
| 2632 | } | ||
| 2603 | } else { | 2633 | } else { |
| 2604 | mddev->ro = 2; | 2634 | mddev->ro = 2; |
| 2605 | err = do_md_run(mddev); | 2635 | err = do_md_run(mddev); |
| @@ -2612,6 +2642,8 @@ array_state_store(mddev_t *mddev, const char *buf, size_t len) | |||
| 2612 | if (atomic_read(&mddev->writes_pending) == 0) { | 2642 | if (atomic_read(&mddev->writes_pending) == 0) { |
| 2613 | if (mddev->in_sync == 0) { | 2643 | if (mddev->in_sync == 0) { |
| 2614 | mddev->in_sync = 1; | 2644 | mddev->in_sync = 1; |
| 2645 | if (mddev->safemode == 1) | ||
| 2646 | mddev->safemode = 0; | ||
| 2615 | if (mddev->persistent) | 2647 | if (mddev->persistent) |
| 2616 | set_bit(MD_CHANGE_CLEAN, | 2648 | set_bit(MD_CHANGE_CLEAN, |
| 2617 | &mddev->flags); | 2649 | &mddev->flags); |
| @@ -2635,6 +2667,7 @@ array_state_store(mddev_t *mddev, const char *buf, size_t len) | |||
| 2635 | err = 0; | 2667 | err = 0; |
| 2636 | } else { | 2668 | } else { |
| 2637 | mddev->ro = 0; | 2669 | mddev->ro = 0; |
| 2670 | set_disk_ro(mddev->gendisk, 0); | ||
| 2638 | err = do_md_run(mddev); | 2671 | err = do_md_run(mddev); |
| 2639 | } | 2672 | } |
| 2640 | break; | 2673 | break; |
| @@ -3712,6 +3745,30 @@ static int do_md_stop(mddev_t * mddev, int mode) | |||
| 3712 | mddev->reshape_position = MaxSector; | 3745 | mddev->reshape_position = MaxSector; |
| 3713 | mddev->external = 0; | 3746 | mddev->external = 0; |
| 3714 | mddev->persistent = 0; | 3747 | mddev->persistent = 0; |
| 3748 | mddev->level = LEVEL_NONE; | ||
| 3749 | mddev->clevel[0] = 0; | ||
| 3750 | mddev->flags = 0; | ||
| 3751 | mddev->ro = 0; | ||
| 3752 | mddev->metadata_type[0] = 0; | ||
| 3753 | mddev->chunk_size = 0; | ||
| 3754 | mddev->ctime = mddev->utime = 0; | ||
| 3755 | mddev->layout = 0; | ||
| 3756 | mddev->max_disks = 0; | ||
| 3757 | mddev->events = 0; | ||
| 3758 | mddev->delta_disks = 0; | ||
| 3759 | mddev->new_level = LEVEL_NONE; | ||
| 3760 | mddev->new_layout = 0; | ||
| 3761 | mddev->new_chunk = 0; | ||
| 3762 | mddev->curr_resync = 0; | ||
| 3763 | mddev->resync_mismatches = 0; | ||
| 3764 | mddev->suspend_lo = mddev->suspend_hi = 0; | ||
| 3765 | mddev->sync_speed_min = mddev->sync_speed_max = 0; | ||
| 3766 | mddev->recovery = 0; | ||
| 3767 | mddev->in_sync = 0; | ||
| 3768 | mddev->changed = 0; | ||
| 3769 | mddev->degraded = 0; | ||
| 3770 | mddev->barriers_work = 0; | ||
| 3771 | mddev->safemode = 0; | ||
| 3715 | 3772 | ||
| 3716 | } else if (mddev->pers) | 3773 | } else if (mddev->pers) |
| 3717 | printk(KERN_INFO "md: %s switched to read-only mode.\n", | 3774 | printk(KERN_INFO "md: %s switched to read-only mode.\n", |
| @@ -4919,6 +4976,9 @@ void md_error(mddev_t *mddev, mdk_rdev_t *rdev) | |||
| 4919 | 4976 | ||
| 4920 | if (!rdev || test_bit(Faulty, &rdev->flags)) | 4977 | if (!rdev || test_bit(Faulty, &rdev->flags)) |
| 4921 | return; | 4978 | return; |
| 4979 | |||
| 4980 | if (mddev->external) | ||
| 4981 | set_bit(Blocked, &rdev->flags); | ||
| 4922 | /* | 4982 | /* |
| 4923 | dprintk("md_error dev:%s, rdev:(%d:%d), (caller: %p,%p,%p,%p).\n", | 4983 | dprintk("md_error dev:%s, rdev:(%d:%d), (caller: %p,%p,%p,%p).\n", |
| 4924 | mdname(mddev), | 4984 | mdname(mddev), |
| @@ -5365,6 +5425,8 @@ void md_write_start(mddev_t *mddev, struct bio *bi) | |||
| 5365 | md_wakeup_thread(mddev->sync_thread); | 5425 | md_wakeup_thread(mddev->sync_thread); |
| 5366 | } | 5426 | } |
| 5367 | atomic_inc(&mddev->writes_pending); | 5427 | atomic_inc(&mddev->writes_pending); |
| 5428 | if (mddev->safemode == 1) | ||
| 5429 | mddev->safemode = 0; | ||
| 5368 | if (mddev->in_sync) { | 5430 | if (mddev->in_sync) { |
| 5369 | spin_lock_irq(&mddev->write_lock); | 5431 | spin_lock_irq(&mddev->write_lock); |
| 5370 | if (mddev->in_sync) { | 5432 | if (mddev->in_sync) { |
| @@ -5719,7 +5781,7 @@ static int remove_and_add_spares(mddev_t *mddev) | |||
| 5719 | 5781 | ||
| 5720 | rdev_for_each(rdev, rtmp, mddev) | 5782 | rdev_for_each(rdev, rtmp, mddev) |
| 5721 | if (rdev->raid_disk >= 0 && | 5783 | if (rdev->raid_disk >= 0 && |
| 5722 | !mddev->external && | 5784 | !test_bit(Blocked, &rdev->flags) && |
| 5723 | (test_bit(Faulty, &rdev->flags) || | 5785 | (test_bit(Faulty, &rdev->flags) || |
| 5724 | ! test_bit(In_sync, &rdev->flags)) && | 5786 | ! test_bit(In_sync, &rdev->flags)) && |
| 5725 | atomic_read(&rdev->nr_pending)==0) { | 5787 | atomic_read(&rdev->nr_pending)==0) { |
| @@ -5789,7 +5851,7 @@ void md_check_recovery(mddev_t *mddev) | |||
| 5789 | return; | 5851 | return; |
| 5790 | 5852 | ||
| 5791 | if (signal_pending(current)) { | 5853 | if (signal_pending(current)) { |
| 5792 | if (mddev->pers->sync_request) { | 5854 | if (mddev->pers->sync_request && !mddev->external) { |
| 5793 | printk(KERN_INFO "md: %s in immediate safe mode\n", | 5855 | printk(KERN_INFO "md: %s in immediate safe mode\n", |
| 5794 | mdname(mddev)); | 5856 | mdname(mddev)); |
| 5795 | mddev->safemode = 2; | 5857 | mddev->safemode = 2; |
| @@ -5801,7 +5863,7 @@ void md_check_recovery(mddev_t *mddev) | |||
| 5801 | (mddev->flags && !mddev->external) || | 5863 | (mddev->flags && !mddev->external) || |
| 5802 | test_bit(MD_RECOVERY_NEEDED, &mddev->recovery) || | 5864 | test_bit(MD_RECOVERY_NEEDED, &mddev->recovery) || |
| 5803 | test_bit(MD_RECOVERY_DONE, &mddev->recovery) || | 5865 | test_bit(MD_RECOVERY_DONE, &mddev->recovery) || |
| 5804 | (mddev->safemode == 1) || | 5866 | (mddev->external == 0 && mddev->safemode == 1) || |
| 5805 | (mddev->safemode == 2 && ! atomic_read(&mddev->writes_pending) | 5867 | (mddev->safemode == 2 && ! atomic_read(&mddev->writes_pending) |
| 5806 | && !mddev->in_sync && mddev->recovery_cp == MaxSector) | 5868 | && !mddev->in_sync && mddev->recovery_cp == MaxSector) |
| 5807 | )) | 5869 | )) |
| @@ -5810,16 +5872,20 @@ void md_check_recovery(mddev_t *mddev) | |||
| 5810 | if (mddev_trylock(mddev)) { | 5872 | if (mddev_trylock(mddev)) { |
| 5811 | int spares = 0; | 5873 | int spares = 0; |
| 5812 | 5874 | ||
| 5813 | spin_lock_irq(&mddev->write_lock); | 5875 | if (!mddev->external) { |
| 5814 | if (mddev->safemode && !atomic_read(&mddev->writes_pending) && | 5876 | spin_lock_irq(&mddev->write_lock); |
| 5815 | !mddev->in_sync && mddev->recovery_cp == MaxSector) { | 5877 | if (mddev->safemode && |
| 5816 | mddev->in_sync = 1; | 5878 | !atomic_read(&mddev->writes_pending) && |
| 5817 | if (mddev->persistent) | 5879 | !mddev->in_sync && |
| 5818 | set_bit(MD_CHANGE_CLEAN, &mddev->flags); | 5880 | mddev->recovery_cp == MaxSector) { |
| 5881 | mddev->in_sync = 1; | ||
| 5882 | if (mddev->persistent) | ||
| 5883 | set_bit(MD_CHANGE_CLEAN, &mddev->flags); | ||
| 5884 | } | ||
| 5885 | if (mddev->safemode == 1) | ||
| 5886 | mddev->safemode = 0; | ||
| 5887 | spin_unlock_irq(&mddev->write_lock); | ||
| 5819 | } | 5888 | } |
| 5820 | if (mddev->safemode == 1) | ||
| 5821 | mddev->safemode = 0; | ||
| 5822 | spin_unlock_irq(&mddev->write_lock); | ||
| 5823 | 5889 | ||
| 5824 | if (mddev->flags) | 5890 | if (mddev->flags) |
| 5825 | md_update_sb(mddev, 0); | 5891 | md_update_sb(mddev, 0); |
| @@ -5914,6 +5980,16 @@ void md_check_recovery(mddev_t *mddev) | |||
| 5914 | } | 5980 | } |
| 5915 | } | 5981 | } |
| 5916 | 5982 | ||
| 5983 | void md_wait_for_blocked_rdev(mdk_rdev_t *rdev, mddev_t *mddev) | ||
| 5984 | { | ||
| 5985 | sysfs_notify(&rdev->kobj, NULL, "state"); | ||
| 5986 | wait_event_timeout(rdev->blocked_wait, | ||
| 5987 | !test_bit(Blocked, &rdev->flags), | ||
| 5988 | msecs_to_jiffies(5000)); | ||
| 5989 | rdev_dec_pending(rdev, mddev); | ||
| 5990 | } | ||
| 5991 | EXPORT_SYMBOL(md_wait_for_blocked_rdev); | ||
| 5992 | |||
| 5917 | static int md_notify_reboot(struct notifier_block *this, | 5993 | static int md_notify_reboot(struct notifier_block *this, |
| 5918 | unsigned long code, void *x) | 5994 | unsigned long code, void *x) |
| 5919 | { | 5995 | { |
diff --git a/drivers/md/raid1.c b/drivers/md/raid1.c index 9fd473a6dbf5..6778b7cb39bd 100644 --- a/drivers/md/raid1.c +++ b/drivers/md/raid1.c | |||
| @@ -773,7 +773,6 @@ static int make_request(struct request_queue *q, struct bio * bio) | |||
| 773 | r1bio_t *r1_bio; | 773 | r1bio_t *r1_bio; |
| 774 | struct bio *read_bio; | 774 | struct bio *read_bio; |
| 775 | int i, targets = 0, disks; | 775 | int i, targets = 0, disks; |
| 776 | mdk_rdev_t *rdev; | ||
| 777 | struct bitmap *bitmap = mddev->bitmap; | 776 | struct bitmap *bitmap = mddev->bitmap; |
| 778 | unsigned long flags; | 777 | unsigned long flags; |
| 779 | struct bio_list bl; | 778 | struct bio_list bl; |
| @@ -781,6 +780,7 @@ static int make_request(struct request_queue *q, struct bio * bio) | |||
| 781 | const int rw = bio_data_dir(bio); | 780 | const int rw = bio_data_dir(bio); |
| 782 | const int do_sync = bio_sync(bio); | 781 | const int do_sync = bio_sync(bio); |
| 783 | int do_barriers; | 782 | int do_barriers; |
| 783 | mdk_rdev_t *blocked_rdev; | ||
| 784 | 784 | ||
| 785 | /* | 785 | /* |
| 786 | * Register the new request and wait if the reconstruction | 786 | * Register the new request and wait if the reconstruction |
| @@ -862,10 +862,17 @@ static int make_request(struct request_queue *q, struct bio * bio) | |||
| 862 | first = 0; | 862 | first = 0; |
| 863 | } | 863 | } |
| 864 | #endif | 864 | #endif |
| 865 | retry_write: | ||
| 866 | blocked_rdev = NULL; | ||
| 865 | rcu_read_lock(); | 867 | rcu_read_lock(); |
| 866 | for (i = 0; i < disks; i++) { | 868 | for (i = 0; i < disks; i++) { |
| 867 | if ((rdev=rcu_dereference(conf->mirrors[i].rdev)) != NULL && | 869 | mdk_rdev_t *rdev = rcu_dereference(conf->mirrors[i].rdev); |
| 868 | !test_bit(Faulty, &rdev->flags)) { | 870 | if (rdev && unlikely(test_bit(Blocked, &rdev->flags))) { |
| 871 | atomic_inc(&rdev->nr_pending); | ||
| 872 | blocked_rdev = rdev; | ||
| 873 | break; | ||
| 874 | } | ||
| 875 | if (rdev && !test_bit(Faulty, &rdev->flags)) { | ||
| 869 | atomic_inc(&rdev->nr_pending); | 876 | atomic_inc(&rdev->nr_pending); |
| 870 | if (test_bit(Faulty, &rdev->flags)) { | 877 | if (test_bit(Faulty, &rdev->flags)) { |
| 871 | rdev_dec_pending(rdev, mddev); | 878 | rdev_dec_pending(rdev, mddev); |
| @@ -878,6 +885,20 @@ static int make_request(struct request_queue *q, struct bio * bio) | |||
| 878 | } | 885 | } |
| 879 | rcu_read_unlock(); | 886 | rcu_read_unlock(); |
| 880 | 887 | ||
| 888 | if (unlikely(blocked_rdev)) { | ||
| 889 | /* Wait for this device to become unblocked */ | ||
| 890 | int j; | ||
| 891 | |||
| 892 | for (j = 0; j < i; j++) | ||
| 893 | if (r1_bio->bios[j]) | ||
| 894 | rdev_dec_pending(conf->mirrors[j].rdev, mddev); | ||
| 895 | |||
| 896 | allow_barrier(conf); | ||
| 897 | md_wait_for_blocked_rdev(blocked_rdev, mddev); | ||
| 898 | wait_barrier(conf); | ||
| 899 | goto retry_write; | ||
| 900 | } | ||
| 901 | |||
| 881 | BUG_ON(targets == 0); /* we never fail the last device */ | 902 | BUG_ON(targets == 0); /* we never fail the last device */ |
| 882 | 903 | ||
| 883 | if (targets < conf->raid_disks) { | 904 | if (targets < conf->raid_disks) { |
diff --git a/drivers/md/raid10.c b/drivers/md/raid10.c index 1e96aa3ff513..5938fa962922 100644 --- a/drivers/md/raid10.c +++ b/drivers/md/raid10.c | |||
| @@ -790,6 +790,7 @@ static int make_request(struct request_queue *q, struct bio * bio) | |||
| 790 | const int do_sync = bio_sync(bio); | 790 | const int do_sync = bio_sync(bio); |
| 791 | struct bio_list bl; | 791 | struct bio_list bl; |
| 792 | unsigned long flags; | 792 | unsigned long flags; |
| 793 | mdk_rdev_t *blocked_rdev; | ||
| 793 | 794 | ||
| 794 | if (unlikely(bio_barrier(bio))) { | 795 | if (unlikely(bio_barrier(bio))) { |
| 795 | bio_endio(bio, -EOPNOTSUPP); | 796 | bio_endio(bio, -EOPNOTSUPP); |
| @@ -879,17 +880,23 @@ static int make_request(struct request_queue *q, struct bio * bio) | |||
| 879 | /* | 880 | /* |
| 880 | * WRITE: | 881 | * WRITE: |
| 881 | */ | 882 | */ |
| 882 | /* first select target devices under spinlock and | 883 | /* first select target devices under rcu_lock and |
| 883 | * inc refcount on their rdev. Record them by setting | 884 | * inc refcount on their rdev. Record them by setting |
| 884 | * bios[x] to bio | 885 | * bios[x] to bio |
| 885 | */ | 886 | */ |
| 886 | raid10_find_phys(conf, r10_bio); | 887 | raid10_find_phys(conf, r10_bio); |
| 888 | retry_write: | ||
| 889 | blocked_rdev = 0; | ||
| 887 | rcu_read_lock(); | 890 | rcu_read_lock(); |
| 888 | for (i = 0; i < conf->copies; i++) { | 891 | for (i = 0; i < conf->copies; i++) { |
| 889 | int d = r10_bio->devs[i].devnum; | 892 | int d = r10_bio->devs[i].devnum; |
| 890 | mdk_rdev_t *rdev = rcu_dereference(conf->mirrors[d].rdev); | 893 | mdk_rdev_t *rdev = rcu_dereference(conf->mirrors[d].rdev); |
| 891 | if (rdev && | 894 | if (rdev && unlikely(test_bit(Blocked, &rdev->flags))) { |
| 892 | !test_bit(Faulty, &rdev->flags)) { | 895 | atomic_inc(&rdev->nr_pending); |
| 896 | blocked_rdev = rdev; | ||
| 897 | break; | ||
| 898 | } | ||
| 899 | if (rdev && !test_bit(Faulty, &rdev->flags)) { | ||
| 893 | atomic_inc(&rdev->nr_pending); | 900 | atomic_inc(&rdev->nr_pending); |
| 894 | r10_bio->devs[i].bio = bio; | 901 | r10_bio->devs[i].bio = bio; |
| 895 | } else { | 902 | } else { |
| @@ -899,6 +906,22 @@ static int make_request(struct request_queue *q, struct bio * bio) | |||
| 899 | } | 906 | } |
| 900 | rcu_read_unlock(); | 907 | rcu_read_unlock(); |
| 901 | 908 | ||
| 909 | if (unlikely(blocked_rdev)) { | ||
| 910 | /* Have to wait for this device to get unblocked, then retry */ | ||
| 911 | int j; | ||
| 912 | int d; | ||
| 913 | |||
| 914 | for (j = 0; j < i; j++) | ||
| 915 | if (r10_bio->devs[j].bio) { | ||
| 916 | d = r10_bio->devs[j].devnum; | ||
| 917 | rdev_dec_pending(conf->mirrors[d].rdev, mddev); | ||
| 918 | } | ||
| 919 | allow_barrier(conf); | ||
| 920 | md_wait_for_blocked_rdev(blocked_rdev, mddev); | ||
| 921 | wait_barrier(conf); | ||
| 922 | goto retry_write; | ||
| 923 | } | ||
| 924 | |||
| 902 | atomic_set(&r10_bio->remaining, 0); | 925 | atomic_set(&r10_bio->remaining, 0); |
| 903 | 926 | ||
| 904 | bio_list_init(&bl); | 927 | bio_list_init(&bl); |
diff --git a/drivers/md/raid5.c b/drivers/md/raid5.c index 968dacaced6d..087eee0cb809 100644 --- a/drivers/md/raid5.c +++ b/drivers/md/raid5.c | |||
| @@ -2607,6 +2607,7 @@ static void handle_stripe_expansion(raid5_conf_t *conf, struct stripe_head *sh, | |||
| 2607 | } | 2607 | } |
| 2608 | } | 2608 | } |
| 2609 | 2609 | ||
| 2610 | |||
| 2610 | /* | 2611 | /* |
| 2611 | * handle_stripe - do things to a stripe. | 2612 | * handle_stripe - do things to a stripe. |
| 2612 | * | 2613 | * |
| @@ -2632,6 +2633,7 @@ static void handle_stripe5(struct stripe_head *sh) | |||
| 2632 | struct stripe_head_state s; | 2633 | struct stripe_head_state s; |
| 2633 | struct r5dev *dev; | 2634 | struct r5dev *dev; |
| 2634 | unsigned long pending = 0; | 2635 | unsigned long pending = 0; |
| 2636 | mdk_rdev_t *blocked_rdev = NULL; | ||
| 2635 | 2637 | ||
| 2636 | memset(&s, 0, sizeof(s)); | 2638 | memset(&s, 0, sizeof(s)); |
| 2637 | pr_debug("handling stripe %llu, state=%#lx cnt=%d, pd_idx=%d " | 2639 | pr_debug("handling stripe %llu, state=%#lx cnt=%d, pd_idx=%d " |
| @@ -2691,6 +2693,11 @@ static void handle_stripe5(struct stripe_head *sh) | |||
| 2691 | if (dev->written) | 2693 | if (dev->written) |
| 2692 | s.written++; | 2694 | s.written++; |
| 2693 | rdev = rcu_dereference(conf->disks[i].rdev); | 2695 | rdev = rcu_dereference(conf->disks[i].rdev); |
| 2696 | if (rdev && unlikely(test_bit(Blocked, &rdev->flags))) { | ||
| 2697 | blocked_rdev = rdev; | ||
| 2698 | atomic_inc(&rdev->nr_pending); | ||
| 2699 | break; | ||
| 2700 | } | ||
| 2694 | if (!rdev || !test_bit(In_sync, &rdev->flags)) { | 2701 | if (!rdev || !test_bit(In_sync, &rdev->flags)) { |
| 2695 | /* The ReadError flag will just be confusing now */ | 2702 | /* The ReadError flag will just be confusing now */ |
| 2696 | clear_bit(R5_ReadError, &dev->flags); | 2703 | clear_bit(R5_ReadError, &dev->flags); |
| @@ -2705,6 +2712,11 @@ static void handle_stripe5(struct stripe_head *sh) | |||
| 2705 | } | 2712 | } |
| 2706 | rcu_read_unlock(); | 2713 | rcu_read_unlock(); |
| 2707 | 2714 | ||
| 2715 | if (unlikely(blocked_rdev)) { | ||
| 2716 | set_bit(STRIPE_HANDLE, &sh->state); | ||
| 2717 | goto unlock; | ||
| 2718 | } | ||
| 2719 | |||
| 2708 | if (s.to_fill && !test_and_set_bit(STRIPE_OP_BIOFILL, &sh->ops.pending)) | 2720 | if (s.to_fill && !test_and_set_bit(STRIPE_OP_BIOFILL, &sh->ops.pending)) |
| 2709 | sh->ops.count++; | 2721 | sh->ops.count++; |
| 2710 | 2722 | ||
| @@ -2894,8 +2906,13 @@ static void handle_stripe5(struct stripe_head *sh) | |||
| 2894 | if (sh->ops.count) | 2906 | if (sh->ops.count) |
| 2895 | pending = get_stripe_work(sh); | 2907 | pending = get_stripe_work(sh); |
| 2896 | 2908 | ||
| 2909 | unlock: | ||
| 2897 | spin_unlock(&sh->lock); | 2910 | spin_unlock(&sh->lock); |
| 2898 | 2911 | ||
| 2912 | /* wait for this device to become unblocked */ | ||
| 2913 | if (unlikely(blocked_rdev)) | ||
| 2914 | md_wait_for_blocked_rdev(blocked_rdev, conf->mddev); | ||
| 2915 | |||
| 2899 | if (pending) | 2916 | if (pending) |
| 2900 | raid5_run_ops(sh, pending); | 2917 | raid5_run_ops(sh, pending); |
| 2901 | 2918 | ||
| @@ -2912,6 +2929,7 @@ static void handle_stripe6(struct stripe_head *sh, struct page *tmp_page) | |||
| 2912 | struct stripe_head_state s; | 2929 | struct stripe_head_state s; |
| 2913 | struct r6_state r6s; | 2930 | struct r6_state r6s; |
| 2914 | struct r5dev *dev, *pdev, *qdev; | 2931 | struct r5dev *dev, *pdev, *qdev; |
| 2932 | mdk_rdev_t *blocked_rdev = NULL; | ||
| 2915 | 2933 | ||
| 2916 | r6s.qd_idx = raid6_next_disk(pd_idx, disks); | 2934 | r6s.qd_idx = raid6_next_disk(pd_idx, disks); |
| 2917 | pr_debug("handling stripe %llu, state=%#lx cnt=%d, " | 2935 | pr_debug("handling stripe %llu, state=%#lx cnt=%d, " |
| @@ -2975,6 +2993,11 @@ static void handle_stripe6(struct stripe_head *sh, struct page *tmp_page) | |||
| 2975 | if (dev->written) | 2993 | if (dev->written) |
| 2976 | s.written++; | 2994 | s.written++; |
| 2977 | rdev = rcu_dereference(conf->disks[i].rdev); | 2995 | rdev = rcu_dereference(conf->disks[i].rdev); |
| 2996 | if (rdev && unlikely(test_bit(Blocked, &rdev->flags))) { | ||
| 2997 | blocked_rdev = rdev; | ||
| 2998 | atomic_inc(&rdev->nr_pending); | ||
| 2999 | break; | ||
| 3000 | } | ||
| 2978 | if (!rdev || !test_bit(In_sync, &rdev->flags)) { | 3001 | if (!rdev || !test_bit(In_sync, &rdev->flags)) { |
| 2979 | /* The ReadError flag will just be confusing now */ | 3002 | /* The ReadError flag will just be confusing now */ |
| 2980 | clear_bit(R5_ReadError, &dev->flags); | 3003 | clear_bit(R5_ReadError, &dev->flags); |
| @@ -2989,6 +3012,11 @@ static void handle_stripe6(struct stripe_head *sh, struct page *tmp_page) | |||
| 2989 | set_bit(R5_Insync, &dev->flags); | 3012 | set_bit(R5_Insync, &dev->flags); |
| 2990 | } | 3013 | } |
| 2991 | rcu_read_unlock(); | 3014 | rcu_read_unlock(); |
| 3015 | |||
| 3016 | if (unlikely(blocked_rdev)) { | ||
| 3017 | set_bit(STRIPE_HANDLE, &sh->state); | ||
| 3018 | goto unlock; | ||
| 3019 | } | ||
| 2992 | pr_debug("locked=%d uptodate=%d to_read=%d" | 3020 | pr_debug("locked=%d uptodate=%d to_read=%d" |
| 2993 | " to_write=%d failed=%d failed_num=%d,%d\n", | 3021 | " to_write=%d failed=%d failed_num=%d,%d\n", |
| 2994 | s.locked, s.uptodate, s.to_read, s.to_write, s.failed, | 3022 | s.locked, s.uptodate, s.to_read, s.to_write, s.failed, |
| @@ -3094,8 +3122,13 @@ static void handle_stripe6(struct stripe_head *sh, struct page *tmp_page) | |||
| 3094 | !test_bit(STRIPE_OP_COMPUTE_BLK, &sh->ops.pending)) | 3122 | !test_bit(STRIPE_OP_COMPUTE_BLK, &sh->ops.pending)) |
| 3095 | handle_stripe_expansion(conf, sh, &r6s); | 3123 | handle_stripe_expansion(conf, sh, &r6s); |
| 3096 | 3124 | ||
| 3125 | unlock: | ||
| 3097 | spin_unlock(&sh->lock); | 3126 | spin_unlock(&sh->lock); |
| 3098 | 3127 | ||
| 3128 | /* wait for this device to become unblocked */ | ||
| 3129 | if (unlikely(blocked_rdev)) | ||
| 3130 | md_wait_for_blocked_rdev(blocked_rdev, conf->mddev); | ||
| 3131 | |||
| 3099 | return_io(return_bi); | 3132 | return_io(return_bi); |
| 3100 | 3133 | ||
| 3101 | for (i=disks; i-- ;) { | 3134 | for (i=disks; i-- ;) { |
diff --git a/drivers/media/common/Makefile b/drivers/media/common/Makefile index 8e7448230643..351b98b9b302 100644 --- a/drivers/media/common/Makefile +++ b/drivers/media/common/Makefile | |||
| @@ -2,6 +2,7 @@ saa7146-objs := saa7146_i2c.o saa7146_core.o | |||
| 2 | saa7146_vv-objs := saa7146_fops.o saa7146_video.o saa7146_hlp.o saa7146_vbi.o | 2 | saa7146_vv-objs := saa7146_fops.o saa7146_video.o saa7146_hlp.o saa7146_vbi.o |
| 3 | ir-common-objs := ir-functions.o ir-keymaps.o | 3 | ir-common-objs := ir-functions.o ir-keymaps.o |
| 4 | 4 | ||
| 5 | obj-y += tuners/ | ||
| 5 | obj-$(CONFIG_VIDEO_SAA7146) += saa7146.o | 6 | obj-$(CONFIG_VIDEO_SAA7146) += saa7146.o |
| 6 | obj-$(CONFIG_VIDEO_SAA7146_VV) += saa7146_vv.o | 7 | obj-$(CONFIG_VIDEO_SAA7146_VV) += saa7146_vv.o |
| 7 | obj-$(CONFIG_VIDEO_IR) += ir-common.o | 8 | obj-$(CONFIG_VIDEO_IR) += ir-common.o |
diff --git a/drivers/media/video/bt8xx/bttvp.h b/drivers/media/video/bt8xx/bttvp.h index 03816b73f847..27da7b423275 100644 --- a/drivers/media/video/bt8xx/bttvp.h +++ b/drivers/media/video/bt8xx/bttvp.h | |||
| @@ -81,8 +81,6 @@ | |||
| 81 | /* Limits scaled width, which must be a multiple of 4. */ | 81 | /* Limits scaled width, which must be a multiple of 4. */ |
| 82 | #define MAX_HACTIVE (0x3FF & -4) | 82 | #define MAX_HACTIVE (0x3FF & -4) |
| 83 | 83 | ||
| 84 | #define clamp(x, low, high) min (max (low, x), high) | ||
| 85 | |||
| 86 | #define BTTV_NORMS (\ | 84 | #define BTTV_NORMS (\ |
| 87 | V4L2_STD_PAL | V4L2_STD_PAL_N | \ | 85 | V4L2_STD_PAL | V4L2_STD_PAL_N | \ |
| 88 | V4L2_STD_PAL_Nc | V4L2_STD_SECAM | \ | 86 | V4L2_STD_PAL_Nc | V4L2_STD_SECAM | \ |
diff --git a/drivers/media/video/usbvideo/vicam.c b/drivers/media/video/usbvideo/vicam.c index 64819353276a..17f542dfb366 100644 --- a/drivers/media/video/usbvideo/vicam.c +++ b/drivers/media/video/usbvideo/vicam.c | |||
| @@ -70,12 +70,6 @@ | |||
| 70 | 70 | ||
| 71 | #define VICAM_HEADER_SIZE 64 | 71 | #define VICAM_HEADER_SIZE 64 |
| 72 | 72 | ||
| 73 | #define clamp( x, l, h ) max_t( __typeof__( x ), \ | ||
| 74 | ( l ), \ | ||
| 75 | min_t( __typeof__( x ), \ | ||
| 76 | ( h ), \ | ||
| 77 | ( x ) ) ) | ||
| 78 | |||
| 79 | /* Not sure what all the bytes in these char | 73 | /* Not sure what all the bytes in these char |
| 80 | * arrays do, but they're necessary to make | 74 | * arrays do, but they're necessary to make |
| 81 | * the camera work. | 75 | * the camera work. |
diff --git a/drivers/mfd/asic3.c b/drivers/mfd/asic3.c index f6f2d960cadb..ef8a492766a7 100644 --- a/drivers/mfd/asic3.c +++ b/drivers/mfd/asic3.c | |||
| @@ -132,7 +132,7 @@ static void asic3_irq_demux(unsigned int irq, struct irq_desc *desc) | |||
| 132 | 132 | ||
| 133 | if (iter >= MAX_ASIC_ISR_LOOPS) | 133 | if (iter >= MAX_ASIC_ISR_LOOPS) |
| 134 | printk(KERN_ERR "%s: interrupt processing overrun\n", | 134 | printk(KERN_ERR "%s: interrupt processing overrun\n", |
| 135 | __FUNCTION__); | 135 | __func__); |
| 136 | } | 136 | } |
| 137 | 137 | ||
| 138 | static inline int asic3_irq_to_bank(struct asic3 *asic, int irq) | 138 | static inline int asic3_irq_to_bank(struct asic3 *asic, int irq) |
| @@ -409,7 +409,7 @@ int asic3_gpio_get_value(struct asic3 *asic, unsigned gpio) | |||
| 409 | return asic3_get_gpio_d(asic, Status) & mask; | 409 | return asic3_get_gpio_d(asic, Status) & mask; |
| 410 | default: | 410 | default: |
| 411 | printk(KERN_ERR "%s: invalid GPIO value 0x%x", | 411 | printk(KERN_ERR "%s: invalid GPIO value 0x%x", |
| 412 | __FUNCTION__, gpio); | 412 | __func__, gpio); |
| 413 | return -EINVAL; | 413 | return -EINVAL; |
| 414 | } | 414 | } |
| 415 | } | 415 | } |
| @@ -437,7 +437,7 @@ void asic3_gpio_set_value(struct asic3 *asic, unsigned gpio, int val) | |||
| 437 | return; | 437 | return; |
| 438 | default: | 438 | default: |
| 439 | printk(KERN_ERR "%s: invalid GPIO value 0x%x", | 439 | printk(KERN_ERR "%s: invalid GPIO value 0x%x", |
| 440 | __FUNCTION__, gpio); | 440 | __func__, gpio); |
| 441 | return; | 441 | return; |
| 442 | } | 442 | } |
| 443 | } | 443 | } |
diff --git a/drivers/mfd/sm501.c b/drivers/mfd/sm501.c index 6e655b4c6682..2fe64734d8af 100644 --- a/drivers/mfd/sm501.c +++ b/drivers/mfd/sm501.c | |||
| @@ -349,11 +349,11 @@ int sm501_unit_power(struct device *dev, unsigned int unit, unsigned int to) | |||
| 349 | mode &= 3; /* get current power mode */ | 349 | mode &= 3; /* get current power mode */ |
| 350 | 350 | ||
| 351 | if (unit >= ARRAY_SIZE(sm->unit_power)) { | 351 | if (unit >= ARRAY_SIZE(sm->unit_power)) { |
| 352 | dev_err(dev, "%s: bad unit %d\n", __FUNCTION__, unit); | 352 | dev_err(dev, "%s: bad unit %d\n", __func__, unit); |
| 353 | goto already; | 353 | goto already; |
| 354 | } | 354 | } |
| 355 | 355 | ||
| 356 | dev_dbg(sm->dev, "%s: unit %d, cur %d, to %d\n", __FUNCTION__, unit, | 356 | dev_dbg(sm->dev, "%s: unit %d, cur %d, to %d\n", __func__, unit, |
| 357 | sm->unit_power[unit], to); | 357 | sm->unit_power[unit], to); |
| 358 | 358 | ||
| 359 | if (to == 0 && sm->unit_power[unit] == 0) { | 359 | if (to == 0 && sm->unit_power[unit] == 0) { |
diff --git a/drivers/misc/kgdbts.c b/drivers/misc/kgdbts.c index 6d6286c4eeac..30a1af857c7a 100644 --- a/drivers/misc/kgdbts.c +++ b/drivers/misc/kgdbts.c | |||
| @@ -132,7 +132,7 @@ static int send_ack; | |||
| 132 | static int final_ack; | 132 | static int final_ack; |
| 133 | static int hw_break_val; | 133 | static int hw_break_val; |
| 134 | static int hw_break_val2; | 134 | static int hw_break_val2; |
| 135 | #if defined(CONFIG_ARM) || defined(CONFIG_MIPS) | 135 | #if defined(CONFIG_ARM) || defined(CONFIG_MIPS) || defined(CONFIG_SPARC) |
| 136 | static int arch_needs_sstep_emulation = 1; | 136 | static int arch_needs_sstep_emulation = 1; |
| 137 | #else | 137 | #else |
| 138 | static int arch_needs_sstep_emulation; | 138 | static int arch_needs_sstep_emulation; |
diff --git a/drivers/net/3c505.c b/drivers/net/3c505.c index 9c6573419f5a..fdfb2b2cb734 100644 --- a/drivers/net/3c505.c +++ b/drivers/net/3c505.c | |||
| @@ -670,7 +670,7 @@ static irqreturn_t elp_interrupt(int irq, void *dev_id) | |||
| 670 | memcpy(adapter->current_dma.target, adapter->dma_buffer, adapter->current_dma.length); | 670 | memcpy(adapter->current_dma.target, adapter->dma_buffer, adapter->current_dma.length); |
| 671 | } | 671 | } |
| 672 | skb->protocol = eth_type_trans(skb,dev); | 672 | skb->protocol = eth_type_trans(skb,dev); |
| 673 | adapter->stats.rx_bytes += skb->len; | 673 | dev->stats.rx_bytes += skb->len; |
| 674 | netif_rx(skb); | 674 | netif_rx(skb); |
| 675 | dev->last_rx = jiffies; | 675 | dev->last_rx = jiffies; |
| 676 | } | 676 | } |
| @@ -773,12 +773,12 @@ static irqreturn_t elp_interrupt(int irq, void *dev_id) | |||
| 773 | * received board statistics | 773 | * received board statistics |
| 774 | */ | 774 | */ |
| 775 | case CMD_NETWORK_STATISTICS_RESPONSE: | 775 | case CMD_NETWORK_STATISTICS_RESPONSE: |
| 776 | adapter->stats.rx_packets += adapter->irx_pcb.data.netstat.tot_recv; | 776 | dev->stats.rx_packets += adapter->irx_pcb.data.netstat.tot_recv; |
| 777 | adapter->stats.tx_packets += adapter->irx_pcb.data.netstat.tot_xmit; | 777 | dev->stats.tx_packets += adapter->irx_pcb.data.netstat.tot_xmit; |
| 778 | adapter->stats.rx_crc_errors += adapter->irx_pcb.data.netstat.err_CRC; | 778 | dev->stats.rx_crc_errors += adapter->irx_pcb.data.netstat.err_CRC; |
| 779 | adapter->stats.rx_frame_errors += adapter->irx_pcb.data.netstat.err_align; | 779 | dev->stats.rx_frame_errors += adapter->irx_pcb.data.netstat.err_align; |
| 780 | adapter->stats.rx_fifo_errors += adapter->irx_pcb.data.netstat.err_ovrrun; | 780 | dev->stats.rx_fifo_errors += adapter->irx_pcb.data.netstat.err_ovrrun; |
| 781 | adapter->stats.rx_over_errors += adapter->irx_pcb.data.netstat.err_res; | 781 | dev->stats.rx_over_errors += adapter->irx_pcb.data.netstat.err_res; |
| 782 | adapter->got[CMD_NETWORK_STATISTICS] = 1; | 782 | adapter->got[CMD_NETWORK_STATISTICS] = 1; |
| 783 | if (elp_debug >= 3) | 783 | if (elp_debug >= 3) |
| 784 | printk(KERN_DEBUG "%s: interrupt - statistics response received\n", dev->name); | 784 | printk(KERN_DEBUG "%s: interrupt - statistics response received\n", dev->name); |
| @@ -794,11 +794,11 @@ static irqreturn_t elp_interrupt(int irq, void *dev_id) | |||
| 794 | break; | 794 | break; |
| 795 | switch (adapter->irx_pcb.data.xmit_resp.c_stat) { | 795 | switch (adapter->irx_pcb.data.xmit_resp.c_stat) { |
| 796 | case 0xffff: | 796 | case 0xffff: |
| 797 | adapter->stats.tx_aborted_errors++; | 797 | dev->stats.tx_aborted_errors++; |
| 798 | printk(KERN_INFO "%s: transmit timed out, network cable problem?\n", dev->name); | 798 | printk(KERN_INFO "%s: transmit timed out, network cable problem?\n", dev->name); |
| 799 | break; | 799 | break; |
| 800 | case 0xfffe: | 800 | case 0xfffe: |
| 801 | adapter->stats.tx_fifo_errors++; | 801 | dev->stats.tx_fifo_errors++; |
| 802 | printk(KERN_INFO "%s: transmit timed out, FIFO underrun\n", dev->name); | 802 | printk(KERN_INFO "%s: transmit timed out, FIFO underrun\n", dev->name); |
| 803 | break; | 803 | break; |
| 804 | } | 804 | } |
| @@ -986,7 +986,7 @@ static bool send_packet(struct net_device *dev, struct sk_buff *skb) | |||
| 986 | return false; | 986 | return false; |
| 987 | } | 987 | } |
| 988 | 988 | ||
| 989 | adapter->stats.tx_bytes += nlen; | 989 | dev->stats.tx_bytes += nlen; |
| 990 | 990 | ||
| 991 | /* | 991 | /* |
| 992 | * send the adapter a transmit packet command. Ignore segment and offset | 992 | * send the adapter a transmit packet command. Ignore segment and offset |
| @@ -1041,7 +1041,6 @@ static bool send_packet(struct net_device *dev, struct sk_buff *skb) | |||
| 1041 | 1041 | ||
| 1042 | static void elp_timeout(struct net_device *dev) | 1042 | static void elp_timeout(struct net_device *dev) |
| 1043 | { | 1043 | { |
| 1044 | elp_device *adapter = dev->priv; | ||
| 1045 | int stat; | 1044 | int stat; |
| 1046 | 1045 | ||
| 1047 | stat = inb_status(dev->base_addr); | 1046 | stat = inb_status(dev->base_addr); |
| @@ -1049,7 +1048,7 @@ static void elp_timeout(struct net_device *dev) | |||
| 1049 | if (elp_debug >= 1) | 1048 | if (elp_debug >= 1) |
| 1050 | printk(KERN_DEBUG "%s: status %#02x\n", dev->name, stat); | 1049 | printk(KERN_DEBUG "%s: status %#02x\n", dev->name, stat); |
| 1051 | dev->trans_start = jiffies; | 1050 | dev->trans_start = jiffies; |
| 1052 | adapter->stats.tx_dropped++; | 1051 | dev->stats.tx_dropped++; |
| 1053 | netif_wake_queue(dev); | 1052 | netif_wake_queue(dev); |
| 1054 | } | 1053 | } |
| 1055 | 1054 | ||
| @@ -1113,7 +1112,7 @@ static struct net_device_stats *elp_get_stats(struct net_device *dev) | |||
| 1113 | /* If the device is closed, just return the latest stats we have, | 1112 | /* If the device is closed, just return the latest stats we have, |
| 1114 | - we cannot ask from the adapter without interrupts */ | 1113 | - we cannot ask from the adapter without interrupts */ |
| 1115 | if (!netif_running(dev)) | 1114 | if (!netif_running(dev)) |
| 1116 | return &adapter->stats; | 1115 | return &dev->stats; |
| 1117 | 1116 | ||
| 1118 | /* send a get statistics command to the board */ | 1117 | /* send a get statistics command to the board */ |
| 1119 | adapter->tx_pcb.command = CMD_NETWORK_STATISTICS; | 1118 | adapter->tx_pcb.command = CMD_NETWORK_STATISTICS; |
| @@ -1126,12 +1125,12 @@ static struct net_device_stats *elp_get_stats(struct net_device *dev) | |||
| 1126 | while (adapter->got[CMD_NETWORK_STATISTICS] == 0 && time_before(jiffies, timeout)); | 1125 | while (adapter->got[CMD_NETWORK_STATISTICS] == 0 && time_before(jiffies, timeout)); |
| 1127 | if (time_after_eq(jiffies, timeout)) { | 1126 | if (time_after_eq(jiffies, timeout)) { |
| 1128 | TIMEOUT_MSG(__LINE__); | 1127 | TIMEOUT_MSG(__LINE__); |
| 1129 | return &adapter->stats; | 1128 | return &dev->stats; |
| 1130 | } | 1129 | } |
| 1131 | } | 1130 | } |
| 1132 | 1131 | ||
| 1133 | /* statistics are now up to date */ | 1132 | /* statistics are now up to date */ |
| 1134 | return &adapter->stats; | 1133 | return &dev->stats; |
| 1135 | } | 1134 | } |
| 1136 | 1135 | ||
| 1137 | 1136 | ||
| @@ -1571,7 +1570,6 @@ static int __init elplus_setup(struct net_device *dev) | |||
| 1571 | dev->set_multicast_list = elp_set_mc_list; /* local */ | 1570 | dev->set_multicast_list = elp_set_mc_list; /* local */ |
| 1572 | dev->ethtool_ops = &netdev_ethtool_ops; /* local */ | 1571 | dev->ethtool_ops = &netdev_ethtool_ops; /* local */ |
| 1573 | 1572 | ||
| 1574 | memset(&(adapter->stats), 0, sizeof(struct net_device_stats)); | ||
| 1575 | dev->mem_start = dev->mem_end = 0; | 1573 | dev->mem_start = dev->mem_end = 0; |
| 1576 | 1574 | ||
| 1577 | err = register_netdev(dev); | 1575 | err = register_netdev(dev); |
diff --git a/drivers/net/3c505.h b/drivers/net/3c505.h index 1910cb1dc787..04df2a9002b6 100644 --- a/drivers/net/3c505.h +++ b/drivers/net/3c505.h | |||
| @@ -264,7 +264,6 @@ typedef struct { | |||
| 264 | pcb_struct rx_pcb; /* PCB for foreground receiving */ | 264 | pcb_struct rx_pcb; /* PCB for foreground receiving */ |
| 265 | pcb_struct itx_pcb; /* PCB for background sending */ | 265 | pcb_struct itx_pcb; /* PCB for background sending */ |
| 266 | pcb_struct irx_pcb; /* PCB for background receiving */ | 266 | pcb_struct irx_pcb; /* PCB for background receiving */ |
| 267 | struct net_device_stats stats; | ||
| 268 | 267 | ||
| 269 | void *dma_buffer; | 268 | void *dma_buffer; |
| 270 | 269 | ||
diff --git a/drivers/net/3c509.c b/drivers/net/3c509.c index 54dac0696d91..e6c545fe5f58 100644 --- a/drivers/net/3c509.c +++ b/drivers/net/3c509.c | |||
| @@ -167,7 +167,6 @@ enum RxFilter { | |||
| 167 | enum el3_cardtype { EL3_ISA, EL3_PNP, EL3_MCA, EL3_EISA }; | 167 | enum el3_cardtype { EL3_ISA, EL3_PNP, EL3_MCA, EL3_EISA }; |
| 168 | 168 | ||
| 169 | struct el3_private { | 169 | struct el3_private { |
| 170 | struct net_device_stats stats; | ||
| 171 | spinlock_t lock; | 170 | spinlock_t lock; |
| 172 | /* skb send-queue */ | 171 | /* skb send-queue */ |
| 173 | int head, size; | 172 | int head, size; |
| @@ -794,7 +793,6 @@ el3_open(struct net_device *dev) | |||
| 794 | static void | 793 | static void |
| 795 | el3_tx_timeout (struct net_device *dev) | 794 | el3_tx_timeout (struct net_device *dev) |
| 796 | { | 795 | { |
| 797 | struct el3_private *lp = netdev_priv(dev); | ||
| 798 | int ioaddr = dev->base_addr; | 796 | int ioaddr = dev->base_addr; |
| 799 | 797 | ||
| 800 | /* Transmitter timeout, serious problems. */ | 798 | /* Transmitter timeout, serious problems. */ |
| @@ -802,7 +800,7 @@ el3_tx_timeout (struct net_device *dev) | |||
| 802 | "Tx FIFO room %d.\n", | 800 | "Tx FIFO room %d.\n", |
| 803 | dev->name, inb(ioaddr + TX_STATUS), inw(ioaddr + EL3_STATUS), | 801 | dev->name, inb(ioaddr + TX_STATUS), inw(ioaddr + EL3_STATUS), |
| 804 | inw(ioaddr + TX_FREE)); | 802 | inw(ioaddr + TX_FREE)); |
| 805 | lp->stats.tx_errors++; | 803 | dev->stats.tx_errors++; |
| 806 | dev->trans_start = jiffies; | 804 | dev->trans_start = jiffies; |
| 807 | /* Issue TX_RESET and TX_START commands. */ | 805 | /* Issue TX_RESET and TX_START commands. */ |
| 808 | outw(TxReset, ioaddr + EL3_CMD); | 806 | outw(TxReset, ioaddr + EL3_CMD); |
| @@ -820,7 +818,7 @@ el3_start_xmit(struct sk_buff *skb, struct net_device *dev) | |||
| 820 | 818 | ||
| 821 | netif_stop_queue (dev); | 819 | netif_stop_queue (dev); |
| 822 | 820 | ||
| 823 | lp->stats.tx_bytes += skb->len; | 821 | dev->stats.tx_bytes += skb->len; |
| 824 | 822 | ||
| 825 | if (el3_debug > 4) { | 823 | if (el3_debug > 4) { |
| 826 | printk(KERN_DEBUG "%s: el3_start_xmit(length = %u) called, status %4.4x.\n", | 824 | printk(KERN_DEBUG "%s: el3_start_xmit(length = %u) called, status %4.4x.\n", |
| @@ -881,7 +879,7 @@ el3_start_xmit(struct sk_buff *skb, struct net_device *dev) | |||
| 881 | int i = 4; | 879 | int i = 4; |
| 882 | 880 | ||
| 883 | while (--i > 0 && (tx_status = inb(ioaddr + TX_STATUS)) > 0) { | 881 | while (--i > 0 && (tx_status = inb(ioaddr + TX_STATUS)) > 0) { |
| 884 | if (tx_status & 0x38) lp->stats.tx_aborted_errors++; | 882 | if (tx_status & 0x38) dev->stats.tx_aborted_errors++; |
| 885 | if (tx_status & 0x30) outw(TxReset, ioaddr + EL3_CMD); | 883 | if (tx_status & 0x30) outw(TxReset, ioaddr + EL3_CMD); |
| 886 | if (tx_status & 0x3C) outw(TxEnable, ioaddr + EL3_CMD); | 884 | if (tx_status & 0x3C) outw(TxEnable, ioaddr + EL3_CMD); |
| 887 | outb(0x00, ioaddr + TX_STATUS); /* Pop the status stack. */ | 885 | outb(0x00, ioaddr + TX_STATUS); /* Pop the status stack. */ |
| @@ -931,12 +929,11 @@ el3_interrupt(int irq, void *dev_id) | |||
| 931 | outw(AckIntr | RxEarly, ioaddr + EL3_CMD); | 929 | outw(AckIntr | RxEarly, ioaddr + EL3_CMD); |
| 932 | } | 930 | } |
| 933 | if (status & TxComplete) { /* Really Tx error. */ | 931 | if (status & TxComplete) { /* Really Tx error. */ |
| 934 | struct el3_private *lp = netdev_priv(dev); | ||
| 935 | short tx_status; | 932 | short tx_status; |
| 936 | int i = 4; | 933 | int i = 4; |
| 937 | 934 | ||
| 938 | while (--i>0 && (tx_status = inb(ioaddr + TX_STATUS)) > 0) { | 935 | while (--i>0 && (tx_status = inb(ioaddr + TX_STATUS)) > 0) { |
| 939 | if (tx_status & 0x38) lp->stats.tx_aborted_errors++; | 936 | if (tx_status & 0x38) dev->stats.tx_aborted_errors++; |
| 940 | if (tx_status & 0x30) outw(TxReset, ioaddr + EL3_CMD); | 937 | if (tx_status & 0x30) outw(TxReset, ioaddr + EL3_CMD); |
| 941 | if (tx_status & 0x3C) outw(TxEnable, ioaddr + EL3_CMD); | 938 | if (tx_status & 0x3C) outw(TxEnable, ioaddr + EL3_CMD); |
| 942 | outb(0x00, ioaddr + TX_STATUS); /* Pop the status stack. */ | 939 | outb(0x00, ioaddr + TX_STATUS); /* Pop the status stack. */ |
| @@ -1002,7 +999,7 @@ el3_get_stats(struct net_device *dev) | |||
| 1002 | spin_lock_irqsave(&lp->lock, flags); | 999 | spin_lock_irqsave(&lp->lock, flags); |
| 1003 | update_stats(dev); | 1000 | update_stats(dev); |
| 1004 | spin_unlock_irqrestore(&lp->lock, flags); | 1001 | spin_unlock_irqrestore(&lp->lock, flags); |
| 1005 | return &lp->stats; | 1002 | return &dev->stats; |
| 1006 | } | 1003 | } |
| 1007 | 1004 | ||
| 1008 | /* Update statistics. We change to register window 6, so this should be run | 1005 | /* Update statistics. We change to register window 6, so this should be run |
| @@ -1012,7 +1009,6 @@ el3_get_stats(struct net_device *dev) | |||
| 1012 | */ | 1009 | */ |
| 1013 | static void update_stats(struct net_device *dev) | 1010 | static void update_stats(struct net_device *dev) |
| 1014 | { | 1011 | { |
| 1015 | struct el3_private *lp = netdev_priv(dev); | ||
| 1016 | int ioaddr = dev->base_addr; | 1012 | int ioaddr = dev->base_addr; |
| 1017 | 1013 | ||
| 1018 | if (el3_debug > 5) | 1014 | if (el3_debug > 5) |
| @@ -1021,13 +1017,13 @@ static void update_stats(struct net_device *dev) | |||
| 1021 | outw(StatsDisable, ioaddr + EL3_CMD); | 1017 | outw(StatsDisable, ioaddr + EL3_CMD); |
| 1022 | /* Switch to the stats window, and read everything. */ | 1018 | /* Switch to the stats window, and read everything. */ |
| 1023 | EL3WINDOW(6); | 1019 | EL3WINDOW(6); |
| 1024 | lp->stats.tx_carrier_errors += inb(ioaddr + 0); | 1020 | dev->stats.tx_carrier_errors += inb(ioaddr + 0); |
| 1025 | lp->stats.tx_heartbeat_errors += inb(ioaddr + 1); | 1021 | dev->stats.tx_heartbeat_errors += inb(ioaddr + 1); |
| 1026 | /* Multiple collisions. */ inb(ioaddr + 2); | 1022 | /* Multiple collisions. */ inb(ioaddr + 2); |
| 1027 | lp->stats.collisions += inb(ioaddr + 3); | 1023 | dev->stats.collisions += inb(ioaddr + 3); |
| 1028 | lp->stats.tx_window_errors += inb(ioaddr + 4); | 1024 | dev->stats.tx_window_errors += inb(ioaddr + 4); |
| 1029 | lp->stats.rx_fifo_errors += inb(ioaddr + 5); | 1025 | dev->stats.rx_fifo_errors += inb(ioaddr + 5); |
| 1030 | lp->stats.tx_packets += inb(ioaddr + 6); | 1026 | dev->stats.tx_packets += inb(ioaddr + 6); |
| 1031 | /* Rx packets */ inb(ioaddr + 7); | 1027 | /* Rx packets */ inb(ioaddr + 7); |
| 1032 | /* Tx deferrals */ inb(ioaddr + 8); | 1028 | /* Tx deferrals */ inb(ioaddr + 8); |
| 1033 | inw(ioaddr + 10); /* Total Rx and Tx octets. */ | 1029 | inw(ioaddr + 10); /* Total Rx and Tx octets. */ |
| @@ -1042,7 +1038,6 @@ static void update_stats(struct net_device *dev) | |||
| 1042 | static int | 1038 | static int |
| 1043 | el3_rx(struct net_device *dev) | 1039 | el3_rx(struct net_device *dev) |
| 1044 | { | 1040 | { |
| 1045 | struct el3_private *lp = netdev_priv(dev); | ||
| 1046 | int ioaddr = dev->base_addr; | 1041 | int ioaddr = dev->base_addr; |
| 1047 | short rx_status; | 1042 | short rx_status; |
| 1048 | 1043 | ||
| @@ -1054,21 +1049,21 @@ el3_rx(struct net_device *dev) | |||
| 1054 | short error = rx_status & 0x3800; | 1049 | short error = rx_status & 0x3800; |
| 1055 | 1050 | ||
| 1056 | outw(RxDiscard, ioaddr + EL3_CMD); | 1051 | outw(RxDiscard, ioaddr + EL3_CMD); |
| 1057 | lp->stats.rx_errors++; | 1052 | dev->stats.rx_errors++; |
| 1058 | switch (error) { | 1053 | switch (error) { |
| 1059 | case 0x0000: lp->stats.rx_over_errors++; break; | 1054 | case 0x0000: dev->stats.rx_over_errors++; break; |
| 1060 | case 0x0800: lp->stats.rx_length_errors++; break; | 1055 | case 0x0800: dev->stats.rx_length_errors++; break; |
| 1061 | case 0x1000: lp->stats.rx_frame_errors++; break; | 1056 | case 0x1000: dev->stats.rx_frame_errors++; break; |
| 1062 | case 0x1800: lp->stats.rx_length_errors++; break; | 1057 | case 0x1800: dev->stats.rx_length_errors++; break; |
| 1063 | case 0x2000: lp->stats.rx_frame_errors++; break; | 1058 | case 0x2000: dev->stats.rx_frame_errors++; break; |
| 1064 | case 0x2800: lp->stats.rx_crc_errors++; break; | 1059 | case 0x2800: dev->stats.rx_crc_errors++; break; |
| 1065 | } | 1060 | } |
| 1066 | } else { | 1061 | } else { |
| 1067 | short pkt_len = rx_status & 0x7ff; | 1062 | short pkt_len = rx_status & 0x7ff; |
| 1068 | struct sk_buff *skb; | 1063 | struct sk_buff *skb; |
| 1069 | 1064 | ||
| 1070 | skb = dev_alloc_skb(pkt_len+5); | 1065 | skb = dev_alloc_skb(pkt_len+5); |
| 1071 | lp->stats.rx_bytes += pkt_len; | 1066 | dev->stats.rx_bytes += pkt_len; |
| 1072 | if (el3_debug > 4) | 1067 | if (el3_debug > 4) |
| 1073 | printk("Receiving packet size %d status %4.4x.\n", | 1068 | printk("Receiving packet size %d status %4.4x.\n", |
| 1074 | pkt_len, rx_status); | 1069 | pkt_len, rx_status); |
| @@ -1083,11 +1078,11 @@ el3_rx(struct net_device *dev) | |||
| 1083 | skb->protocol = eth_type_trans(skb,dev); | 1078 | skb->protocol = eth_type_trans(skb,dev); |
| 1084 | netif_rx(skb); | 1079 | netif_rx(skb); |
| 1085 | dev->last_rx = jiffies; | 1080 | dev->last_rx = jiffies; |
| 1086 | lp->stats.rx_packets++; | 1081 | dev->stats.rx_packets++; |
| 1087 | continue; | 1082 | continue; |
| 1088 | } | 1083 | } |
| 1089 | outw(RxDiscard, ioaddr + EL3_CMD); | 1084 | outw(RxDiscard, ioaddr + EL3_CMD); |
| 1090 | lp->stats.rx_dropped++; | 1085 | dev->stats.rx_dropped++; |
| 1091 | if (el3_debug) | 1086 | if (el3_debug) |
| 1092 | printk("%s: Couldn't allocate a sk_buff of size %d.\n", | 1087 | printk("%s: Couldn't allocate a sk_buff of size %d.\n", |
| 1093 | dev->name, pkt_len); | 1088 | dev->name, pkt_len); |
diff --git a/drivers/net/3c515.c b/drivers/net/3c515.c index 6ab84b661d70..105a8c7ca7e9 100644 --- a/drivers/net/3c515.c +++ b/drivers/net/3c515.c | |||
| @@ -310,7 +310,6 @@ struct corkscrew_private { | |||
| 310 | struct sk_buff *tx_skbuff[TX_RING_SIZE]; | 310 | struct sk_buff *tx_skbuff[TX_RING_SIZE]; |
| 311 | unsigned int cur_rx, cur_tx; /* The next free ring entry */ | 311 | unsigned int cur_rx, cur_tx; /* The next free ring entry */ |
| 312 | unsigned int dirty_rx, dirty_tx;/* The ring entries to be free()ed. */ | 312 | unsigned int dirty_rx, dirty_tx;/* The ring entries to be free()ed. */ |
| 313 | struct net_device_stats stats; | ||
| 314 | struct sk_buff *tx_skb; /* Packet being eaten by bus master ctrl. */ | 313 | struct sk_buff *tx_skb; /* Packet being eaten by bus master ctrl. */ |
| 315 | struct timer_list timer; /* Media selection timer. */ | 314 | struct timer_list timer; /* Media selection timer. */ |
| 316 | int capabilities ; /* Adapter capabilities word. */ | 315 | int capabilities ; /* Adapter capabilities word. */ |
| @@ -983,8 +982,8 @@ static void corkscrew_timeout(struct net_device *dev) | |||
| 983 | break; | 982 | break; |
| 984 | outw(TxEnable, ioaddr + EL3_CMD); | 983 | outw(TxEnable, ioaddr + EL3_CMD); |
| 985 | dev->trans_start = jiffies; | 984 | dev->trans_start = jiffies; |
| 986 | vp->stats.tx_errors++; | 985 | dev->stats.tx_errors++; |
| 987 | vp->stats.tx_dropped++; | 986 | dev->stats.tx_dropped++; |
| 988 | netif_wake_queue(dev); | 987 | netif_wake_queue(dev); |
| 989 | } | 988 | } |
| 990 | 989 | ||
| @@ -1050,7 +1049,7 @@ static int corkscrew_start_xmit(struct sk_buff *skb, | |||
| 1050 | } | 1049 | } |
| 1051 | /* Put out the doubleword header... */ | 1050 | /* Put out the doubleword header... */ |
| 1052 | outl(skb->len, ioaddr + TX_FIFO); | 1051 | outl(skb->len, ioaddr + TX_FIFO); |
| 1053 | vp->stats.tx_bytes += skb->len; | 1052 | dev->stats.tx_bytes += skb->len; |
| 1054 | #ifdef VORTEX_BUS_MASTER | 1053 | #ifdef VORTEX_BUS_MASTER |
| 1055 | if (vp->bus_master) { | 1054 | if (vp->bus_master) { |
| 1056 | /* Set the bus-master controller to transfer the packet. */ | 1055 | /* Set the bus-master controller to transfer the packet. */ |
| @@ -1094,9 +1093,9 @@ static int corkscrew_start_xmit(struct sk_buff *skb, | |||
| 1094 | printk("%s: Tx error, status %2.2x.\n", | 1093 | printk("%s: Tx error, status %2.2x.\n", |
| 1095 | dev->name, tx_status); | 1094 | dev->name, tx_status); |
| 1096 | if (tx_status & 0x04) | 1095 | if (tx_status & 0x04) |
| 1097 | vp->stats.tx_fifo_errors++; | 1096 | dev->stats.tx_fifo_errors++; |
| 1098 | if (tx_status & 0x38) | 1097 | if (tx_status & 0x38) |
| 1099 | vp->stats.tx_aborted_errors++; | 1098 | dev->stats.tx_aborted_errors++; |
| 1100 | if (tx_status & 0x30) { | 1099 | if (tx_status & 0x30) { |
| 1101 | int j; | 1100 | int j; |
| 1102 | outw(TxReset, ioaddr + EL3_CMD); | 1101 | outw(TxReset, ioaddr + EL3_CMD); |
| @@ -1257,7 +1256,6 @@ static irqreturn_t corkscrew_interrupt(int irq, void *dev_id) | |||
| 1257 | 1256 | ||
| 1258 | static int corkscrew_rx(struct net_device *dev) | 1257 | static int corkscrew_rx(struct net_device *dev) |
| 1259 | { | 1258 | { |
| 1260 | struct corkscrew_private *vp = netdev_priv(dev); | ||
| 1261 | int ioaddr = dev->base_addr; | 1259 | int ioaddr = dev->base_addr; |
| 1262 | int i; | 1260 | int i; |
| 1263 | short rx_status; | 1261 | short rx_status; |
| @@ -1271,17 +1269,17 @@ static int corkscrew_rx(struct net_device *dev) | |||
| 1271 | if (corkscrew_debug > 2) | 1269 | if (corkscrew_debug > 2) |
| 1272 | printk(" Rx error: status %2.2x.\n", | 1270 | printk(" Rx error: status %2.2x.\n", |
| 1273 | rx_error); | 1271 | rx_error); |
| 1274 | vp->stats.rx_errors++; | 1272 | dev->stats.rx_errors++; |
| 1275 | if (rx_error & 0x01) | 1273 | if (rx_error & 0x01) |
| 1276 | vp->stats.rx_over_errors++; | 1274 | dev->stats.rx_over_errors++; |
| 1277 | if (rx_error & 0x02) | 1275 | if (rx_error & 0x02) |
| 1278 | vp->stats.rx_length_errors++; | 1276 | dev->stats.rx_length_errors++; |
| 1279 | if (rx_error & 0x04) | 1277 | if (rx_error & 0x04) |
| 1280 | vp->stats.rx_frame_errors++; | 1278 | dev->stats.rx_frame_errors++; |
| 1281 | if (rx_error & 0x08) | 1279 | if (rx_error & 0x08) |
| 1282 | vp->stats.rx_crc_errors++; | 1280 | dev->stats.rx_crc_errors++; |
| 1283 | if (rx_error & 0x10) | 1281 | if (rx_error & 0x10) |
| 1284 | vp->stats.rx_length_errors++; | 1282 | dev->stats.rx_length_errors++; |
| 1285 | } else { | 1283 | } else { |
| 1286 | /* The packet length: up to 4.5K!. */ | 1284 | /* The packet length: up to 4.5K!. */ |
| 1287 | short pkt_len = rx_status & 0x1fff; | 1285 | short pkt_len = rx_status & 0x1fff; |
| @@ -1301,8 +1299,8 @@ static int corkscrew_rx(struct net_device *dev) | |||
| 1301 | skb->protocol = eth_type_trans(skb, dev); | 1299 | skb->protocol = eth_type_trans(skb, dev); |
| 1302 | netif_rx(skb); | 1300 | netif_rx(skb); |
| 1303 | dev->last_rx = jiffies; | 1301 | dev->last_rx = jiffies; |
| 1304 | vp->stats.rx_packets++; | 1302 | dev->stats.rx_packets++; |
| 1305 | vp->stats.rx_bytes += pkt_len; | 1303 | dev->stats.rx_bytes += pkt_len; |
| 1306 | /* Wait a limited time to go to next packet. */ | 1304 | /* Wait a limited time to go to next packet. */ |
| 1307 | for (i = 200; i >= 0; i--) | 1305 | for (i = 200; i >= 0; i--) |
| 1308 | if (! (inw(ioaddr + EL3_STATUS) & CmdInProgress)) | 1306 | if (! (inw(ioaddr + EL3_STATUS) & CmdInProgress)) |
| @@ -1312,7 +1310,7 @@ static int corkscrew_rx(struct net_device *dev) | |||
| 1312 | printk("%s: Couldn't allocate a sk_buff of size %d.\n", dev->name, pkt_len); | 1310 | printk("%s: Couldn't allocate a sk_buff of size %d.\n", dev->name, pkt_len); |
| 1313 | } | 1311 | } |
| 1314 | outw(RxDiscard, ioaddr + EL3_CMD); | 1312 | outw(RxDiscard, ioaddr + EL3_CMD); |
| 1315 | vp->stats.rx_dropped++; | 1313 | dev->stats.rx_dropped++; |
| 1316 | /* Wait a limited time to skip this packet. */ | 1314 | /* Wait a limited time to skip this packet. */ |
| 1317 | for (i = 200; i >= 0; i--) | 1315 | for (i = 200; i >= 0; i--) |
| 1318 | if (!(inw(ioaddr + EL3_STATUS) & CmdInProgress)) | 1316 | if (!(inw(ioaddr + EL3_STATUS) & CmdInProgress)) |
| @@ -1337,23 +1335,23 @@ static int boomerang_rx(struct net_device *dev) | |||
| 1337 | if (corkscrew_debug > 2) | 1335 | if (corkscrew_debug > 2) |
| 1338 | printk(" Rx error: status %2.2x.\n", | 1336 | printk(" Rx error: status %2.2x.\n", |
| 1339 | rx_error); | 1337 | rx_error); |
| 1340 | vp->stats.rx_errors++; | 1338 | dev->stats.rx_errors++; |
| 1341 | if (rx_error & 0x01) | 1339 | if (rx_error & 0x01) |
| 1342 | vp->stats.rx_over_errors++; | 1340 | dev->stats.rx_over_errors++; |
| 1343 | if (rx_error & 0x02) | 1341 | if (rx_error & 0x02) |
| 1344 | vp->stats.rx_length_errors++; | 1342 | dev->stats.rx_length_errors++; |
| 1345 | if (rx_error & 0x04) | 1343 | if (rx_error & 0x04) |
| 1346 | vp->stats.rx_frame_errors++; | 1344 | dev->stats.rx_frame_errors++; |
| 1347 | if (rx_error & 0x08) | 1345 | if (rx_error & 0x08) |
| 1348 | vp->stats.rx_crc_errors++; | 1346 | dev->stats.rx_crc_errors++; |
| 1349 | if (rx_error & 0x10) | 1347 | if (rx_error & 0x10) |
| 1350 | vp->stats.rx_length_errors++; | 1348 | dev->stats.rx_length_errors++; |
| 1351 | } else { | 1349 | } else { |
| 1352 | /* The packet length: up to 4.5K!. */ | 1350 | /* The packet length: up to 4.5K!. */ |
| 1353 | short pkt_len = rx_status & 0x1fff; | 1351 | short pkt_len = rx_status & 0x1fff; |
| 1354 | struct sk_buff *skb; | 1352 | struct sk_buff *skb; |
| 1355 | 1353 | ||
| 1356 | vp->stats.rx_bytes += pkt_len; | 1354 | dev->stats.rx_bytes += pkt_len; |
| 1357 | if (corkscrew_debug > 4) | 1355 | if (corkscrew_debug > 4) |
| 1358 | printk("Receiving packet size %d status %4.4x.\n", | 1356 | printk("Receiving packet size %d status %4.4x.\n", |
| 1359 | pkt_len, rx_status); | 1357 | pkt_len, rx_status); |
| @@ -1388,7 +1386,7 @@ static int boomerang_rx(struct net_device *dev) | |||
| 1388 | skb->protocol = eth_type_trans(skb, dev); | 1386 | skb->protocol = eth_type_trans(skb, dev); |
| 1389 | netif_rx(skb); | 1387 | netif_rx(skb); |
| 1390 | dev->last_rx = jiffies; | 1388 | dev->last_rx = jiffies; |
| 1391 | vp->stats.rx_packets++; | 1389 | dev->stats.rx_packets++; |
| 1392 | } | 1390 | } |
| 1393 | entry = (++vp->cur_rx) % RX_RING_SIZE; | 1391 | entry = (++vp->cur_rx) % RX_RING_SIZE; |
| 1394 | } | 1392 | } |
| @@ -1475,7 +1473,7 @@ static struct net_device_stats *corkscrew_get_stats(struct net_device *dev) | |||
| 1475 | update_stats(dev->base_addr, dev); | 1473 | update_stats(dev->base_addr, dev); |
| 1476 | spin_unlock_irqrestore(&vp->lock, flags); | 1474 | spin_unlock_irqrestore(&vp->lock, flags); |
| 1477 | } | 1475 | } |
| 1478 | return &vp->stats; | 1476 | return &dev->stats; |
| 1479 | } | 1477 | } |
| 1480 | 1478 | ||
| 1481 | /* Update statistics. | 1479 | /* Update statistics. |
| @@ -1487,19 +1485,17 @@ static struct net_device_stats *corkscrew_get_stats(struct net_device *dev) | |||
| 1487 | */ | 1485 | */ |
| 1488 | static void update_stats(int ioaddr, struct net_device *dev) | 1486 | static void update_stats(int ioaddr, struct net_device *dev) |
| 1489 | { | 1487 | { |
| 1490 | struct corkscrew_private *vp = netdev_priv(dev); | ||
| 1491 | |||
| 1492 | /* Unlike the 3c5x9 we need not turn off stats updates while reading. */ | 1488 | /* Unlike the 3c5x9 we need not turn off stats updates while reading. */ |
| 1493 | /* Switch to the stats window, and read everything. */ | 1489 | /* Switch to the stats window, and read everything. */ |
| 1494 | EL3WINDOW(6); | 1490 | EL3WINDOW(6); |
| 1495 | vp->stats.tx_carrier_errors += inb(ioaddr + 0); | 1491 | dev->stats.tx_carrier_errors += inb(ioaddr + 0); |
| 1496 | vp->stats.tx_heartbeat_errors += inb(ioaddr + 1); | 1492 | dev->stats.tx_heartbeat_errors += inb(ioaddr + 1); |
| 1497 | /* Multiple collisions. */ inb(ioaddr + 2); | 1493 | /* Multiple collisions. */ inb(ioaddr + 2); |
| 1498 | vp->stats.collisions += inb(ioaddr + 3); | 1494 | dev->stats.collisions += inb(ioaddr + 3); |
| 1499 | vp->stats.tx_window_errors += inb(ioaddr + 4); | 1495 | dev->stats.tx_window_errors += inb(ioaddr + 4); |
| 1500 | vp->stats.rx_fifo_errors += inb(ioaddr + 5); | 1496 | dev->stats.rx_fifo_errors += inb(ioaddr + 5); |
| 1501 | vp->stats.tx_packets += inb(ioaddr + 6); | 1497 | dev->stats.tx_packets += inb(ioaddr + 6); |
| 1502 | vp->stats.tx_packets += (inb(ioaddr + 9) & 0x30) << 4; | 1498 | dev->stats.tx_packets += (inb(ioaddr + 9) & 0x30) << 4; |
| 1503 | /* Rx packets */ inb(ioaddr + 7); | 1499 | /* Rx packets */ inb(ioaddr + 7); |
| 1504 | /* Must read to clear */ | 1500 | /* Must read to clear */ |
| 1505 | /* Tx deferrals */ inb(ioaddr + 8); | 1501 | /* Tx deferrals */ inb(ioaddr + 8); |
diff --git a/drivers/net/Kconfig b/drivers/net/Kconfig index f90a86ba7e2f..af46341827f2 100644 --- a/drivers/net/Kconfig +++ b/drivers/net/Kconfig | |||
| @@ -2593,6 +2593,7 @@ config BNX2X | |||
| 2593 | To compile this driver as a module, choose M here: the module | 2593 | To compile this driver as a module, choose M here: the module |
| 2594 | will be called bnx2x. This is recommended. | 2594 | will be called bnx2x. This is recommended. |
| 2595 | 2595 | ||
| 2596 | source "drivers/net/sfc/Kconfig" | ||
| 2596 | 2597 | ||
| 2597 | endif # NETDEV_10000 | 2598 | endif # NETDEV_10000 |
| 2598 | 2599 | ||
diff --git a/drivers/net/Makefile b/drivers/net/Makefile index 2f1f3f2739fd..dcbfe8421154 100644 --- a/drivers/net/Makefile +++ b/drivers/net/Makefile | |||
| @@ -253,3 +253,5 @@ obj-$(CONFIG_FS_ENET) += fs_enet/ | |||
| 253 | obj-$(CONFIG_NETXEN_NIC) += netxen/ | 253 | obj-$(CONFIG_NETXEN_NIC) += netxen/ |
| 254 | obj-$(CONFIG_NIU) += niu.o | 254 | obj-$(CONFIG_NIU) += niu.o |
| 255 | obj-$(CONFIG_VIRTIO_NET) += virtio_net.o | 255 | obj-$(CONFIG_VIRTIO_NET) += virtio_net.o |
| 256 | obj-$(CONFIG_SFC) += sfc/ | ||
| 257 | |||
diff --git a/drivers/net/arm/Kconfig b/drivers/net/arm/Kconfig index f9cc2b621fe2..8eda6eeb43b7 100644 --- a/drivers/net/arm/Kconfig +++ b/drivers/net/arm/Kconfig | |||
| @@ -47,3 +47,11 @@ config EP93XX_ETH | |||
| 47 | help | 47 | help |
| 48 | This is a driver for the ethernet hardware included in EP93xx CPUs. | 48 | This is a driver for the ethernet hardware included in EP93xx CPUs. |
| 49 | Say Y if you are building a kernel for EP93xx based devices. | 49 | Say Y if you are building a kernel for EP93xx based devices. |
| 50 | |||
| 51 | config IXP4XX_ETH | ||
| 52 | tristate "Intel IXP4xx Ethernet support" | ||
| 53 | depends on ARM && ARCH_IXP4XX && IXP4XX_NPE && IXP4XX_QMGR | ||
| 54 | select MII | ||
| 55 | help | ||
| 56 | Say Y here if you want to use built-in Ethernet ports | ||
| 57 | on IXP4xx processor. | ||
diff --git a/drivers/net/arm/Makefile b/drivers/net/arm/Makefile index a4c868278e11..7c812ac2b6a5 100644 --- a/drivers/net/arm/Makefile +++ b/drivers/net/arm/Makefile | |||
| @@ -9,3 +9,4 @@ obj-$(CONFIG_ARM_ETHER3) += ether3.o | |||
| 9 | obj-$(CONFIG_ARM_ETHER1) += ether1.o | 9 | obj-$(CONFIG_ARM_ETHER1) += ether1.o |
| 10 | obj-$(CONFIG_ARM_AT91_ETHER) += at91_ether.o | 10 | obj-$(CONFIG_ARM_AT91_ETHER) += at91_ether.o |
| 11 | obj-$(CONFIG_EP93XX_ETH) += ep93xx_eth.o | 11 | obj-$(CONFIG_EP93XX_ETH) += ep93xx_eth.o |
| 12 | obj-$(CONFIG_IXP4XX_ETH) += ixp4xx_eth.o | ||
diff --git a/drivers/net/arm/ixp4xx_eth.c b/drivers/net/arm/ixp4xx_eth.c new file mode 100644 index 000000000000..c617b64c288e --- /dev/null +++ b/drivers/net/arm/ixp4xx_eth.c | |||
| @@ -0,0 +1,1265 @@ | |||
| 1 | /* | ||
| 2 | * Intel IXP4xx Ethernet driver for Linux | ||
| 3 | * | ||
| 4 | * Copyright (C) 2007 Krzysztof Halasa <khc@pm.waw.pl> | ||
| 5 | * | ||
| 6 | * This program is free software; you can redistribute it and/or modify it | ||
| 7 | * under the terms of version 2 of the GNU General Public License | ||
| 8 | * as published by the Free Software Foundation. | ||
| 9 | * | ||
| 10 | * Ethernet port config (0x00 is not present on IXP42X): | ||
| 11 | * | ||
| 12 | * logical port 0x00 0x10 0x20 | ||
| 13 | * NPE 0 (NPE-A) 1 (NPE-B) 2 (NPE-C) | ||
| 14 | * physical PortId 2 0 1 | ||
| 15 | * TX queue 23 24 25 | ||
| 16 | * RX-free queue 26 27 28 | ||
| 17 | * TX-done queue is always 31, per-port RX and TX-ready queues are configurable | ||
| 18 | * | ||
| 19 | * | ||
| 20 | * Queue entries: | ||
| 21 | * bits 0 -> 1 - NPE ID (RX and TX-done) | ||
| 22 | * bits 0 -> 2 - priority (TX, per 802.1D) | ||
| 23 | * bits 3 -> 4 - port ID (user-set?) | ||
| 24 | * bits 5 -> 31 - physical descriptor address | ||
| 25 | */ | ||
| 26 | |||
| 27 | #include <linux/delay.h> | ||
| 28 | #include <linux/dma-mapping.h> | ||
| 29 | #include <linux/dmapool.h> | ||
| 30 | #include <linux/etherdevice.h> | ||
| 31 | #include <linux/io.h> | ||
| 32 | #include <linux/kernel.h> | ||
| 33 | #include <linux/mii.h> | ||
| 34 | #include <linux/platform_device.h> | ||
| 35 | #include <asm/arch/npe.h> | ||
| 36 | #include <asm/arch/qmgr.h> | ||
| 37 | |||
| 38 | #define DEBUG_QUEUES 0 | ||
| 39 | #define DEBUG_DESC 0 | ||
| 40 | #define DEBUG_RX 0 | ||
| 41 | #define DEBUG_TX 0 | ||
| 42 | #define DEBUG_PKT_BYTES 0 | ||
| 43 | #define DEBUG_MDIO 0 | ||
| 44 | #define DEBUG_CLOSE 0 | ||
| 45 | |||
| 46 | #define DRV_NAME "ixp4xx_eth" | ||
| 47 | |||
| 48 | #define MAX_NPES 3 | ||
| 49 | |||
| 50 | #define RX_DESCS 64 /* also length of all RX queues */ | ||
| 51 | #define TX_DESCS 16 /* also length of all TX queues */ | ||
| 52 | #define TXDONE_QUEUE_LEN 64 /* dwords */ | ||
| 53 | |||
| 54 | #define POOL_ALLOC_SIZE (sizeof(struct desc) * (RX_DESCS + TX_DESCS)) | ||
| 55 | #define REGS_SIZE 0x1000 | ||
| 56 | #define MAX_MRU 1536 /* 0x600 */ | ||
| 57 | #define RX_BUFF_SIZE ALIGN((NET_IP_ALIGN) + MAX_MRU, 4) | ||
| 58 | |||
| 59 | #define NAPI_WEIGHT 16 | ||
| 60 | #define MDIO_INTERVAL (3 * HZ) | ||
| 61 | #define MAX_MDIO_RETRIES 100 /* microseconds, typically 30 cycles */ | ||
| 62 | #define MAX_MII_RESET_RETRIES 100 /* mdio_read() cycles, typically 4 */ | ||
| 63 | #define MAX_CLOSE_WAIT 1000 /* microseconds, typically 2-3 cycles */ | ||
| 64 | |||
| 65 | #define NPE_ID(port_id) ((port_id) >> 4) | ||
| 66 | #define PHYSICAL_ID(port_id) ((NPE_ID(port_id) + 2) % 3) | ||
| 67 | #define TX_QUEUE(port_id) (NPE_ID(port_id) + 23) | ||
| 68 | #define RXFREE_QUEUE(port_id) (NPE_ID(port_id) + 26) | ||
| 69 | #define TXDONE_QUEUE 31 | ||
| 70 | |||
| 71 | /* TX Control Registers */ | ||
| 72 | #define TX_CNTRL0_TX_EN 0x01 | ||
| 73 | #define TX_CNTRL0_HALFDUPLEX 0x02 | ||
| 74 | #define TX_CNTRL0_RETRY 0x04 | ||
| 75 | #define TX_CNTRL0_PAD_EN 0x08 | ||
| 76 | #define TX_CNTRL0_APPEND_FCS 0x10 | ||
| 77 | #define TX_CNTRL0_2DEFER 0x20 | ||
| 78 | #define TX_CNTRL0_RMII 0x40 /* reduced MII */ | ||
| 79 | #define TX_CNTRL1_RETRIES 0x0F /* 4 bits */ | ||
| 80 | |||
| 81 | /* RX Control Registers */ | ||
| 82 | #define RX_CNTRL0_RX_EN 0x01 | ||
| 83 | #define RX_CNTRL0_PADSTRIP_EN 0x02 | ||
| 84 | #define RX_CNTRL0_SEND_FCS 0x04 | ||
| 85 | #define RX_CNTRL0_PAUSE_EN 0x08 | ||
| 86 | #define RX_CNTRL0_LOOP_EN 0x10 | ||
| 87 | #define RX_CNTRL0_ADDR_FLTR_EN 0x20 | ||
| 88 | #define RX_CNTRL0_RX_RUNT_EN 0x40 | ||
| 89 | #define RX_CNTRL0_BCAST_DIS 0x80 | ||
| 90 | #define RX_CNTRL1_DEFER_EN 0x01 | ||
| 91 | |||
| 92 | /* Core Control Register */ | ||
| 93 | #define CORE_RESET 0x01 | ||
| 94 | #define CORE_RX_FIFO_FLUSH 0x02 | ||
| 95 | #define CORE_TX_FIFO_FLUSH 0x04 | ||
| 96 | #define CORE_SEND_JAM 0x08 | ||
| 97 | #define CORE_MDC_EN 0x10 /* MDIO using NPE-B ETH-0 only */ | ||
| 98 | |||
| 99 | #define DEFAULT_TX_CNTRL0 (TX_CNTRL0_TX_EN | TX_CNTRL0_RETRY | \ | ||
| 100 | TX_CNTRL0_PAD_EN | TX_CNTRL0_APPEND_FCS | \ | ||
| 101 | TX_CNTRL0_2DEFER) | ||
| 102 | #define DEFAULT_RX_CNTRL0 RX_CNTRL0_RX_EN | ||
| 103 | #define DEFAULT_CORE_CNTRL CORE_MDC_EN | ||
| 104 | |||
| 105 | |||
| 106 | /* NPE message codes */ | ||
| 107 | #define NPE_GETSTATUS 0x00 | ||
| 108 | #define NPE_EDB_SETPORTADDRESS 0x01 | ||
| 109 | #define NPE_EDB_GETMACADDRESSDATABASE 0x02 | ||
| 110 | #define NPE_EDB_SETMACADDRESSSDATABASE 0x03 | ||
| 111 | #define NPE_GETSTATS 0x04 | ||
| 112 | #define NPE_RESETSTATS 0x05 | ||
| 113 | #define NPE_SETMAXFRAMELENGTHS 0x06 | ||
| 114 | #define NPE_VLAN_SETRXTAGMODE 0x07 | ||
| 115 | #define NPE_VLAN_SETDEFAULTRXVID 0x08 | ||
| 116 | #define NPE_VLAN_SETPORTVLANTABLEENTRY 0x09 | ||
| 117 | #define NPE_VLAN_SETPORTVLANTABLERANGE 0x0A | ||
| 118 | #define NPE_VLAN_SETRXQOSENTRY 0x0B | ||
| 119 | #define NPE_VLAN_SETPORTIDEXTRACTIONMODE 0x0C | ||
| 120 | #define NPE_STP_SETBLOCKINGSTATE 0x0D | ||
| 121 | #define NPE_FW_SETFIREWALLMODE 0x0E | ||
| 122 | #define NPE_PC_SETFRAMECONTROLDURATIONID 0x0F | ||
| 123 | #define NPE_PC_SETAPMACTABLE 0x11 | ||
| 124 | #define NPE_SETLOOPBACK_MODE 0x12 | ||
| 125 | #define NPE_PC_SETBSSIDTABLE 0x13 | ||
| 126 | #define NPE_ADDRESS_FILTER_CONFIG 0x14 | ||
| 127 | #define NPE_APPENDFCSCONFIG 0x15 | ||
| 128 | #define NPE_NOTIFY_MAC_RECOVERY_DONE 0x16 | ||
| 129 | #define NPE_MAC_RECOVERY_START 0x17 | ||
| 130 | |||
| 131 | |||
| 132 | #ifdef __ARMEB__ | ||
| 133 | typedef struct sk_buff buffer_t; | ||
| 134 | #define free_buffer dev_kfree_skb | ||
| 135 | #define free_buffer_irq dev_kfree_skb_irq | ||
| 136 | #else | ||
| 137 | typedef void buffer_t; | ||
| 138 | #define free_buffer kfree | ||
| 139 | #define free_buffer_irq kfree | ||
| 140 | #endif | ||
| 141 | |||
| 142 | struct eth_regs { | ||
| 143 | u32 tx_control[2], __res1[2]; /* 000 */ | ||
| 144 | u32 rx_control[2], __res2[2]; /* 010 */ | ||
| 145 | u32 random_seed, __res3[3]; /* 020 */ | ||
| 146 | u32 partial_empty_threshold, __res4; /* 030 */ | ||
| 147 | u32 partial_full_threshold, __res5; /* 038 */ | ||
| 148 | u32 tx_start_bytes, __res6[3]; /* 040 */ | ||
| 149 | u32 tx_deferral, rx_deferral, __res7[2];/* 050 */ | ||
| 150 | u32 tx_2part_deferral[2], __res8[2]; /* 060 */ | ||
| 151 | u32 slot_time, __res9[3]; /* 070 */ | ||
| 152 | u32 mdio_command[4]; /* 080 */ | ||
| 153 | u32 mdio_status[4]; /* 090 */ | ||
| 154 | u32 mcast_mask[6], __res10[2]; /* 0A0 */ | ||
| 155 | u32 mcast_addr[6], __res11[2]; /* 0C0 */ | ||
| 156 | u32 int_clock_threshold, __res12[3]; /* 0E0 */ | ||
| 157 | u32 hw_addr[6], __res13[61]; /* 0F0 */ | ||
| 158 | u32 core_control; /* 1FC */ | ||
| 159 | }; | ||
| 160 | |||
| 161 | struct port { | ||
| 162 | struct resource *mem_res; | ||
| 163 | struct eth_regs __iomem *regs; | ||
| 164 | struct npe *npe; | ||
| 165 | struct net_device *netdev; | ||
| 166 | struct napi_struct napi; | ||
| 167 | struct net_device_stats stat; | ||
| 168 | struct mii_if_info mii; | ||
| 169 | struct delayed_work mdio_thread; | ||
| 170 | struct eth_plat_info *plat; | ||
| 171 | buffer_t *rx_buff_tab[RX_DESCS], *tx_buff_tab[TX_DESCS]; | ||
| 172 | struct desc *desc_tab; /* coherent */ | ||
| 173 | u32 desc_tab_phys; | ||
| 174 | int id; /* logical port ID */ | ||
| 175 | u16 mii_bmcr; | ||
| 176 | }; | ||
| 177 | |||
| 178 | /* NPE message structure */ | ||
| 179 | struct msg { | ||
| 180 | #ifdef __ARMEB__ | ||
| 181 | u8 cmd, eth_id, byte2, byte3; | ||
| 182 | u8 byte4, byte5, byte6, byte7; | ||
| 183 | #else | ||
| 184 | u8 byte3, byte2, eth_id, cmd; | ||
| 185 | u8 byte7, byte6, byte5, byte4; | ||
| 186 | #endif | ||
| 187 | }; | ||
| 188 | |||
| 189 | /* Ethernet packet descriptor */ | ||
| 190 | struct desc { | ||
| 191 | u32 next; /* pointer to next buffer, unused */ | ||
| 192 | |||
| 193 | #ifdef __ARMEB__ | ||
| 194 | u16 buf_len; /* buffer length */ | ||
| 195 | u16 pkt_len; /* packet length */ | ||
| 196 | u32 data; /* pointer to data buffer in RAM */ | ||
| 197 | u8 dest_id; | ||
| 198 | u8 src_id; | ||
| 199 | u16 flags; | ||
| 200 | u8 qos; | ||
| 201 | u8 padlen; | ||
| 202 | u16 vlan_tci; | ||
| 203 | #else | ||
| 204 | u16 pkt_len; /* packet length */ | ||
| 205 | u16 buf_len; /* buffer length */ | ||
| 206 | u32 data; /* pointer to data buffer in RAM */ | ||
| 207 | u16 flags; | ||
| 208 | u8 src_id; | ||
| 209 | u8 dest_id; | ||
| 210 | u16 vlan_tci; | ||
| 211 | u8 padlen; | ||
| 212 | u8 qos; | ||
| 213 | #endif | ||
| 214 | |||
| 215 | #ifdef __ARMEB__ | ||
| 216 | u8 dst_mac_0, dst_mac_1, dst_mac_2, dst_mac_3; | ||
| 217 | u8 dst_mac_4, dst_mac_5, src_mac_0, src_mac_1; | ||
| 218 | u8 src_mac_2, src_mac_3, src_mac_4, src_mac_5; | ||
| 219 | #else | ||
| 220 | u8 dst_mac_3, dst_mac_2, dst_mac_1, dst_mac_0; | ||
| 221 | u8 src_mac_1, src_mac_0, dst_mac_5, dst_mac_4; | ||
| 222 | u8 src_mac_5, src_mac_4, src_mac_3, src_mac_2; | ||
| 223 | #endif | ||
| 224 | }; | ||
| 225 | |||
| 226 | |||
| 227 | #define rx_desc_phys(port, n) ((port)->desc_tab_phys + \ | ||
| 228 | (n) * sizeof(struct desc)) | ||
| 229 | #define rx_desc_ptr(port, n) (&(port)->desc_tab[n]) | ||
| 230 | |||
| 231 | #define tx_desc_phys(port, n) ((port)->desc_tab_phys + \ | ||
| 232 | ((n) + RX_DESCS) * sizeof(struct desc)) | ||
| 233 | #define tx_desc_ptr(port, n) (&(port)->desc_tab[(n) + RX_DESCS]) | ||
| 234 | |||
| 235 | #ifndef __ARMEB__ | ||
| 236 | static inline void memcpy_swab32(u32 *dest, u32 *src, int cnt) | ||
| 237 | { | ||
| 238 | int i; | ||
| 239 | for (i = 0; i < cnt; i++) | ||
| 240 | dest[i] = swab32(src[i]); | ||
| 241 | } | ||
| 242 | #endif | ||
| 243 | |||
| 244 | static spinlock_t mdio_lock; | ||
| 245 | static struct eth_regs __iomem *mdio_regs; /* mdio command and status only */ | ||
| 246 | static int ports_open; | ||
| 247 | static struct port *npe_port_tab[MAX_NPES]; | ||
| 248 | static struct dma_pool *dma_pool; | ||
| 249 | |||
| 250 | |||
| 251 | static u16 mdio_cmd(struct net_device *dev, int phy_id, int location, | ||
| 252 | int write, u16 cmd) | ||
| 253 | { | ||
| 254 | int cycles = 0; | ||
| 255 | |||
| 256 | if (__raw_readl(&mdio_regs->mdio_command[3]) & 0x80) { | ||
| 257 | printk(KERN_ERR "%s: MII not ready to transmit\n", dev->name); | ||
| 258 | return 0; | ||
| 259 | } | ||
| 260 | |||
| 261 | if (write) { | ||
| 262 | __raw_writel(cmd & 0xFF, &mdio_regs->mdio_command[0]); | ||
| 263 | __raw_writel(cmd >> 8, &mdio_regs->mdio_command[1]); | ||
| 264 | } | ||
| 265 | __raw_writel(((phy_id << 5) | location) & 0xFF, | ||
| 266 | &mdio_regs->mdio_command[2]); | ||
| 267 | __raw_writel((phy_id >> 3) | (write << 2) | 0x80 /* GO */, | ||
| 268 | &mdio_regs->mdio_command[3]); | ||
| 269 | |||
| 270 | while ((cycles < MAX_MDIO_RETRIES) && | ||
| 271 | (__raw_readl(&mdio_regs->mdio_command[3]) & 0x80)) { | ||
| 272 | udelay(1); | ||
| 273 | cycles++; | ||
| 274 | } | ||
| 275 | |||
| 276 | if (cycles == MAX_MDIO_RETRIES) { | ||
| 277 | printk(KERN_ERR "%s: MII write failed\n", dev->name); | ||
| 278 | return 0; | ||
| 279 | } | ||
| 280 | |||
| 281 | #if DEBUG_MDIO | ||
| 282 | printk(KERN_DEBUG "%s: mdio_cmd() took %i cycles\n", dev->name, | ||
| 283 | cycles); | ||
| 284 | #endif | ||
| 285 | |||
| 286 | if (write) | ||
| 287 | return 0; | ||
| 288 | |||
| 289 | if (__raw_readl(&mdio_regs->mdio_status[3]) & 0x80) { | ||
| 290 | printk(KERN_ERR "%s: MII read failed\n", dev->name); | ||
| 291 | return 0; | ||
| 292 | } | ||
| 293 | |||
| 294 | return (__raw_readl(&mdio_regs->mdio_status[0]) & 0xFF) | | ||
| 295 | (__raw_readl(&mdio_regs->mdio_status[1]) << 8); | ||
| 296 | } | ||
| 297 | |||
| 298 | static int mdio_read(struct net_device *dev, int phy_id, int location) | ||
| 299 | { | ||
| 300 | unsigned long flags; | ||
| 301 | u16 val; | ||
| 302 | |||
| 303 | spin_lock_irqsave(&mdio_lock, flags); | ||
| 304 | val = mdio_cmd(dev, phy_id, location, 0, 0); | ||
| 305 | spin_unlock_irqrestore(&mdio_lock, flags); | ||
| 306 | return val; | ||
| 307 | } | ||
| 308 | |||
| 309 | static void mdio_write(struct net_device *dev, int phy_id, int location, | ||
| 310 | int val) | ||
| 311 | { | ||
| 312 | unsigned long flags; | ||
| 313 | |||
| 314 | spin_lock_irqsave(&mdio_lock, flags); | ||
| 315 | mdio_cmd(dev, phy_id, location, 1, val); | ||
| 316 | spin_unlock_irqrestore(&mdio_lock, flags); | ||
| 317 | } | ||
| 318 | |||
| 319 | static void phy_reset(struct net_device *dev, int phy_id) | ||
| 320 | { | ||
| 321 | struct port *port = netdev_priv(dev); | ||
| 322 | int cycles = 0; | ||
| 323 | |||
| 324 | mdio_write(dev, phy_id, MII_BMCR, port->mii_bmcr | BMCR_RESET); | ||
| 325 | |||
| 326 | while (cycles < MAX_MII_RESET_RETRIES) { | ||
| 327 | if (!(mdio_read(dev, phy_id, MII_BMCR) & BMCR_RESET)) { | ||
| 328 | #if DEBUG_MDIO | ||
| 329 | printk(KERN_DEBUG "%s: phy_reset() took %i cycles\n", | ||
| 330 | dev->name, cycles); | ||
| 331 | #endif | ||
| 332 | return; | ||
| 333 | } | ||
| 334 | udelay(1); | ||
| 335 | cycles++; | ||
| 336 | } | ||
| 337 | |||
| 338 | printk(KERN_ERR "%s: MII reset failed\n", dev->name); | ||
| 339 | } | ||
| 340 | |||
| 341 | static void eth_set_duplex(struct port *port) | ||
| 342 | { | ||
| 343 | if (port->mii.full_duplex) | ||
| 344 | __raw_writel(DEFAULT_TX_CNTRL0 & ~TX_CNTRL0_HALFDUPLEX, | ||
| 345 | &port->regs->tx_control[0]); | ||
| 346 | else | ||
| 347 | __raw_writel(DEFAULT_TX_CNTRL0 | TX_CNTRL0_HALFDUPLEX, | ||
| 348 | &port->regs->tx_control[0]); | ||
| 349 | } | ||
| 350 | |||
| 351 | |||
| 352 | static void phy_check_media(struct port *port, int init) | ||
| 353 | { | ||
| 354 | if (mii_check_media(&port->mii, 1, init)) | ||
| 355 | eth_set_duplex(port); | ||
| 356 | if (port->mii.force_media) { /* mii_check_media() doesn't work */ | ||
| 357 | struct net_device *dev = port->netdev; | ||
| 358 | int cur_link = mii_link_ok(&port->mii); | ||
| 359 | int prev_link = netif_carrier_ok(dev); | ||
| 360 | |||
| 361 | if (!prev_link && cur_link) { | ||
| 362 | printk(KERN_INFO "%s: link up\n", dev->name); | ||
| 363 | netif_carrier_on(dev); | ||
| 364 | } else if (prev_link && !cur_link) { | ||
| 365 | printk(KERN_INFO "%s: link down\n", dev->name); | ||
| 366 | netif_carrier_off(dev); | ||
| 367 | } | ||
| 368 | } | ||
| 369 | } | ||
| 370 | |||
| 371 | |||
| 372 | static void mdio_thread(struct work_struct *work) | ||
| 373 | { | ||
| 374 | struct port *port = container_of(work, struct port, mdio_thread.work); | ||
| 375 | |||
| 376 | phy_check_media(port, 0); | ||
| 377 | schedule_delayed_work(&port->mdio_thread, MDIO_INTERVAL); | ||
| 378 | } | ||
| 379 | |||
| 380 | |||
| 381 | static inline void debug_pkt(struct net_device *dev, const char *func, | ||
| 382 | u8 *data, int len) | ||
| 383 | { | ||
| 384 | #if DEBUG_PKT_BYTES | ||
| 385 | int i; | ||
| 386 | |||
| 387 | printk(KERN_DEBUG "%s: %s(%i) ", dev->name, func, len); | ||
| 388 | for (i = 0; i < len; i++) { | ||
| 389 | if (i >= DEBUG_PKT_BYTES) | ||
| 390 | break; | ||
| 391 | printk("%s%02X", | ||
| 392 | ((i == 6) || (i == 12) || (i >= 14)) ? " " : "", | ||
| 393 | data[i]); | ||
| 394 | } | ||
| 395 | printk("\n"); | ||
| 396 | #endif | ||
| 397 | } | ||
| 398 | |||
| 399 | |||
| 400 | static inline void debug_desc(u32 phys, struct desc *desc) | ||
| 401 | { | ||
| 402 | #if DEBUG_DESC | ||
| 403 | printk(KERN_DEBUG "%X: %X %3X %3X %08X %2X < %2X %4X %X" | ||
| 404 | " %X %X %02X%02X%02X%02X%02X%02X < %02X%02X%02X%02X%02X%02X\n", | ||
| 405 | phys, desc->next, desc->buf_len, desc->pkt_len, | ||
| 406 | desc->data, desc->dest_id, desc->src_id, desc->flags, | ||
| 407 | desc->qos, desc->padlen, desc->vlan_tci, | ||
| 408 | desc->dst_mac_0, desc->dst_mac_1, desc->dst_mac_2, | ||
| 409 | desc->dst_mac_3, desc->dst_mac_4, desc->dst_mac_5, | ||
| 410 | desc->src_mac_0, desc->src_mac_1, desc->src_mac_2, | ||
| 411 | desc->src_mac_3, desc->src_mac_4, desc->src_mac_5); | ||
| 412 | #endif | ||
| 413 | } | ||
| 414 | |||
| 415 | static inline void debug_queue(unsigned int queue, int is_get, u32 phys) | ||
| 416 | { | ||
| 417 | #if DEBUG_QUEUES | ||
| 418 | static struct { | ||
| 419 | int queue; | ||
| 420 | char *name; | ||
| 421 | } names[] = { | ||
| 422 | { TX_QUEUE(0x10), "TX#0 " }, | ||
| 423 | { TX_QUEUE(0x20), "TX#1 " }, | ||
| 424 | { TX_QUEUE(0x00), "TX#2 " }, | ||
| 425 | { RXFREE_QUEUE(0x10), "RX-free#0 " }, | ||
| 426 | { RXFREE_QUEUE(0x20), "RX-free#1 " }, | ||
| 427 | { RXFREE_QUEUE(0x00), "RX-free#2 " }, | ||
| 428 | { TXDONE_QUEUE, "TX-done " }, | ||
| 429 | }; | ||
| 430 | int i; | ||
| 431 | |||
| 432 | for (i = 0; i < ARRAY_SIZE(names); i++) | ||
| 433 | if (names[i].queue == queue) | ||
| 434 | break; | ||
| 435 | |||
| 436 | printk(KERN_DEBUG "Queue %i %s%s %X\n", queue, | ||
| 437 | i < ARRAY_SIZE(names) ? names[i].name : "", | ||
| 438 | is_get ? "->" : "<-", phys); | ||
| 439 | #endif | ||
| 440 | } | ||
| 441 | |||
| 442 | static inline u32 queue_get_entry(unsigned int queue) | ||
| 443 | { | ||
| 444 | u32 phys = qmgr_get_entry(queue); | ||
| 445 | debug_queue(queue, 1, phys); | ||
| 446 | return phys; | ||
| 447 | } | ||
| 448 | |||
| 449 | static inline int queue_get_desc(unsigned int queue, struct port *port, | ||
| 450 | int is_tx) | ||
| 451 | { | ||
| 452 | u32 phys, tab_phys, n_desc; | ||
| 453 | struct desc *tab; | ||
| 454 | |||
| 455 | if (!(phys = queue_get_entry(queue))) | ||
| 456 | return -1; | ||
| 457 | |||
| 458 | phys &= ~0x1F; /* mask out non-address bits */ | ||
| 459 | tab_phys = is_tx ? tx_desc_phys(port, 0) : rx_desc_phys(port, 0); | ||
| 460 | tab = is_tx ? tx_desc_ptr(port, 0) : rx_desc_ptr(port, 0); | ||
| 461 | n_desc = (phys - tab_phys) / sizeof(struct desc); | ||
| 462 | BUG_ON(n_desc >= (is_tx ? TX_DESCS : RX_DESCS)); | ||
| 463 | debug_desc(phys, &tab[n_desc]); | ||
| 464 | BUG_ON(tab[n_desc].next); | ||
| 465 | return n_desc; | ||
| 466 | } | ||
| 467 | |||
| 468 | static inline void queue_put_desc(unsigned int queue, u32 phys, | ||
| 469 | struct desc *desc) | ||
| 470 | { | ||
| 471 | debug_queue(queue, 0, phys); | ||
| 472 | debug_desc(phys, desc); | ||
| 473 | BUG_ON(phys & 0x1F); | ||
| 474 | qmgr_put_entry(queue, phys); | ||
| 475 | BUG_ON(qmgr_stat_overflow(queue)); | ||
| 476 | } | ||
| 477 | |||
| 478 | |||
| 479 | static inline void dma_unmap_tx(struct port *port, struct desc *desc) | ||
| 480 | { | ||
| 481 | #ifdef __ARMEB__ | ||
| 482 | dma_unmap_single(&port->netdev->dev, desc->data, | ||
| 483 | desc->buf_len, DMA_TO_DEVICE); | ||
| 484 | #else | ||
| 485 | dma_unmap_single(&port->netdev->dev, desc->data & ~3, | ||
| 486 | ALIGN((desc->data & 3) + desc->buf_len, 4), | ||
| 487 | DMA_TO_DEVICE); | ||
| 488 | #endif | ||
| 489 | } | ||
| 490 | |||
| 491 | |||
| 492 | static void eth_rx_irq(void *pdev) | ||
| 493 | { | ||
| 494 | struct net_device *dev = pdev; | ||
| 495 | struct port *port = netdev_priv(dev); | ||
| 496 | |||
| 497 | #if DEBUG_RX | ||
| 498 | printk(KERN_DEBUG "%s: eth_rx_irq\n", dev->name); | ||
| 499 | #endif | ||
| 500 | qmgr_disable_irq(port->plat->rxq); | ||
| 501 | netif_rx_schedule(dev, &port->napi); | ||
| 502 | } | ||
| 503 | |||
| 504 | static int eth_poll(struct napi_struct *napi, int budget) | ||
| 505 | { | ||
| 506 | struct port *port = container_of(napi, struct port, napi); | ||
| 507 | struct net_device *dev = port->netdev; | ||
| 508 | unsigned int rxq = port->plat->rxq, rxfreeq = RXFREE_QUEUE(port->id); | ||
| 509 | int received = 0; | ||
| 510 | |||
| 511 | #if DEBUG_RX | ||
| 512 | printk(KERN_DEBUG "%s: eth_poll\n", dev->name); | ||
| 513 | #endif | ||
| 514 | |||
| 515 | while (received < budget) { | ||
| 516 | struct sk_buff *skb; | ||
| 517 | struct desc *desc; | ||
| 518 | int n; | ||
| 519 | #ifdef __ARMEB__ | ||
| 520 | struct sk_buff *temp; | ||
| 521 | u32 phys; | ||
| 522 | #endif | ||
| 523 | |||
| 524 | if ((n = queue_get_desc(rxq, port, 0)) < 0) { | ||
| 525 | received = 0; /* No packet received */ | ||
| 526 | #if DEBUG_RX | ||
| 527 | printk(KERN_DEBUG "%s: eth_poll netif_rx_complete\n", | ||
| 528 | dev->name); | ||
| 529 | #endif | ||
| 530 | netif_rx_complete(dev, napi); | ||
| 531 | qmgr_enable_irq(rxq); | ||
| 532 | if (!qmgr_stat_empty(rxq) && | ||
| 533 | netif_rx_reschedule(dev, napi)) { | ||
| 534 | #if DEBUG_RX | ||
| 535 | printk(KERN_DEBUG "%s: eth_poll" | ||
| 536 | " netif_rx_reschedule successed\n", | ||
| 537 | dev->name); | ||
| 538 | #endif | ||
| 539 | qmgr_disable_irq(rxq); | ||
| 540 | continue; | ||
| 541 | } | ||
| 542 | #if DEBUG_RX | ||
| 543 | printk(KERN_DEBUG "%s: eth_poll all done\n", | ||
| 544 | dev->name); | ||
| 545 | #endif | ||
| 546 | return 0; /* all work done */ | ||
| 547 | } | ||
| 548 | |||
| 549 | desc = rx_desc_ptr(port, n); | ||
| 550 | |||
| 551 | #ifdef __ARMEB__ | ||
| 552 | if ((skb = netdev_alloc_skb(dev, RX_BUFF_SIZE))) { | ||
| 553 | phys = dma_map_single(&dev->dev, skb->data, | ||
| 554 | RX_BUFF_SIZE, DMA_FROM_DEVICE); | ||
| 555 | if (dma_mapping_error(phys)) { | ||
| 556 | dev_kfree_skb(skb); | ||
| 557 | skb = NULL; | ||
| 558 | } | ||
| 559 | } | ||
| 560 | #else | ||
| 561 | skb = netdev_alloc_skb(dev, | ||
| 562 | ALIGN(NET_IP_ALIGN + desc->pkt_len, 4)); | ||
| 563 | #endif | ||
| 564 | |||
| 565 | if (!skb) { | ||
| 566 | port->stat.rx_dropped++; | ||
| 567 | /* put the desc back on RX-ready queue */ | ||
| 568 | desc->buf_len = MAX_MRU; | ||
| 569 | desc->pkt_len = 0; | ||
| 570 | queue_put_desc(rxfreeq, rx_desc_phys(port, n), desc); | ||
| 571 | continue; | ||
| 572 | } | ||
| 573 | |||
| 574 | /* process received frame */ | ||
| 575 | #ifdef __ARMEB__ | ||
| 576 | temp = skb; | ||
| 577 | skb = port->rx_buff_tab[n]; | ||
| 578 | dma_unmap_single(&dev->dev, desc->data - NET_IP_ALIGN, | ||
| 579 | RX_BUFF_SIZE, DMA_FROM_DEVICE); | ||
| 580 | #else | ||
| 581 | dma_sync_single(&dev->dev, desc->data - NET_IP_ALIGN, | ||
| 582 | RX_BUFF_SIZE, DMA_FROM_DEVICE); | ||
| 583 | memcpy_swab32((u32 *)skb->data, (u32 *)port->rx_buff_tab[n], | ||
| 584 | ALIGN(NET_IP_ALIGN + desc->pkt_len, 4) / 4); | ||
| 585 | #endif | ||
| 586 | skb_reserve(skb, NET_IP_ALIGN); | ||
| 587 | skb_put(skb, desc->pkt_len); | ||
| 588 | |||
| 589 | debug_pkt(dev, "eth_poll", skb->data, skb->len); | ||
| 590 | |||
| 591 | skb->protocol = eth_type_trans(skb, dev); | ||
| 592 | dev->last_rx = jiffies; | ||
| 593 | port->stat.rx_packets++; | ||
| 594 | port->stat.rx_bytes += skb->len; | ||
| 595 | netif_receive_skb(skb); | ||
| 596 | |||
| 597 | /* put the new buffer on RX-free queue */ | ||
| 598 | #ifdef __ARMEB__ | ||
| 599 | port->rx_buff_tab[n] = temp; | ||
| 600 | desc->data = phys + NET_IP_ALIGN; | ||
| 601 | #endif | ||
| 602 | desc->buf_len = MAX_MRU; | ||
| 603 | desc->pkt_len = 0; | ||
| 604 | queue_put_desc(rxfreeq, rx_desc_phys(port, n), desc); | ||
| 605 | received++; | ||
| 606 | } | ||
| 607 | |||
| 608 | #if DEBUG_RX | ||
| 609 | printk(KERN_DEBUG "eth_poll(): end, not all work done\n"); | ||
| 610 | #endif | ||
| 611 | return received; /* not all work done */ | ||
| 612 | } | ||
| 613 | |||
| 614 | |||
| 615 | static void eth_txdone_irq(void *unused) | ||
| 616 | { | ||
| 617 | u32 phys; | ||
| 618 | |||
| 619 | #if DEBUG_TX | ||
| 620 | printk(KERN_DEBUG DRV_NAME ": eth_txdone_irq\n"); | ||
| 621 | #endif | ||
| 622 | while ((phys = queue_get_entry(TXDONE_QUEUE)) != 0) { | ||
| 623 | u32 npe_id, n_desc; | ||
| 624 | struct port *port; | ||
| 625 | struct desc *desc; | ||
| 626 | int start; | ||
| 627 | |||
| 628 | npe_id = phys & 3; | ||
| 629 | BUG_ON(npe_id >= MAX_NPES); | ||
| 630 | port = npe_port_tab[npe_id]; | ||
| 631 | BUG_ON(!port); | ||
| 632 | phys &= ~0x1F; /* mask out non-address bits */ | ||
| 633 | n_desc = (phys - tx_desc_phys(port, 0)) / sizeof(struct desc); | ||
| 634 | BUG_ON(n_desc >= TX_DESCS); | ||
| 635 | desc = tx_desc_ptr(port, n_desc); | ||
| 636 | debug_desc(phys, desc); | ||
| 637 | |||
| 638 | if (port->tx_buff_tab[n_desc]) { /* not the draining packet */ | ||
| 639 | port->stat.tx_packets++; | ||
| 640 | port->stat.tx_bytes += desc->pkt_len; | ||
| 641 | |||
| 642 | dma_unmap_tx(port, desc); | ||
| 643 | #if DEBUG_TX | ||
| 644 | printk(KERN_DEBUG "%s: eth_txdone_irq free %p\n", | ||
| 645 | port->netdev->name, port->tx_buff_tab[n_desc]); | ||
| 646 | #endif | ||
| 647 | free_buffer_irq(port->tx_buff_tab[n_desc]); | ||
| 648 | port->tx_buff_tab[n_desc] = NULL; | ||
| 649 | } | ||
| 650 | |||
| 651 | start = qmgr_stat_empty(port->plat->txreadyq); | ||
| 652 | queue_put_desc(port->plat->txreadyq, phys, desc); | ||
| 653 | if (start) { | ||
| 654 | #if DEBUG_TX | ||
| 655 | printk(KERN_DEBUG "%s: eth_txdone_irq xmit ready\n", | ||
| 656 | port->netdev->name); | ||
| 657 | #endif | ||
| 658 | netif_wake_queue(port->netdev); | ||
| 659 | } | ||
| 660 | } | ||
| 661 | } | ||
| 662 | |||
| 663 | static int eth_xmit(struct sk_buff *skb, struct net_device *dev) | ||
| 664 | { | ||
| 665 | struct port *port = netdev_priv(dev); | ||
| 666 | unsigned int txreadyq = port->plat->txreadyq; | ||
| 667 | int len, offset, bytes, n; | ||
| 668 | void *mem; | ||
| 669 | u32 phys; | ||
| 670 | struct desc *desc; | ||
| 671 | |||
| 672 | #if DEBUG_TX | ||
| 673 | printk(KERN_DEBUG "%s: eth_xmit\n", dev->name); | ||
| 674 | #endif | ||
| 675 | |||
| 676 | if (unlikely(skb->len > MAX_MRU)) { | ||
| 677 | dev_kfree_skb(skb); | ||
| 678 | port->stat.tx_errors++; | ||
| 679 | return NETDEV_TX_OK; | ||
| 680 | } | ||
| 681 | |||
| 682 | debug_pkt(dev, "eth_xmit", skb->data, skb->len); | ||
| 683 | |||
| 684 | len = skb->len; | ||
| 685 | #ifdef __ARMEB__ | ||
| 686 | offset = 0; /* no need to keep alignment */ | ||
| 687 | bytes = len; | ||
| 688 | mem = skb->data; | ||
| 689 | #else | ||
| 690 | offset = (int)skb->data & 3; /* keep 32-bit alignment */ | ||
| 691 | bytes = ALIGN(offset + len, 4); | ||
| 692 | if (!(mem = kmalloc(bytes, GFP_ATOMIC))) { | ||
| 693 | dev_kfree_skb(skb); | ||
| 694 | port->stat.tx_dropped++; | ||
| 695 | return NETDEV_TX_OK; | ||
| 696 | } | ||
| 697 | memcpy_swab32(mem, (u32 *)((int)skb->data & ~3), bytes / 4); | ||
| 698 | dev_kfree_skb(skb); | ||
| 699 | #endif | ||
| 700 | |||
| 701 | phys = dma_map_single(&dev->dev, mem, bytes, DMA_TO_DEVICE); | ||
| 702 | if (dma_mapping_error(phys)) { | ||
| 703 | #ifdef __ARMEB__ | ||
| 704 | dev_kfree_skb(skb); | ||
| 705 | #else | ||
| 706 | kfree(mem); | ||
| 707 | #endif | ||
| 708 | port->stat.tx_dropped++; | ||
| 709 | return NETDEV_TX_OK; | ||
| 710 | } | ||
| 711 | |||
| 712 | n = queue_get_desc(txreadyq, port, 1); | ||
| 713 | BUG_ON(n < 0); | ||
| 714 | desc = tx_desc_ptr(port, n); | ||
| 715 | |||
| 716 | #ifdef __ARMEB__ | ||
| 717 | port->tx_buff_tab[n] = skb; | ||
| 718 | #else | ||
| 719 | port->tx_buff_tab[n] = mem; | ||
| 720 | #endif | ||
| 721 | desc->data = phys + offset; | ||
| 722 | desc->buf_len = desc->pkt_len = len; | ||
| 723 | |||
| 724 | /* NPE firmware pads short frames with zeros internally */ | ||
| 725 | wmb(); | ||
| 726 | queue_put_desc(TX_QUEUE(port->id), tx_desc_phys(port, n), desc); | ||
| 727 | dev->trans_start = jiffies; | ||
| 728 | |||
| 729 | if (qmgr_stat_empty(txreadyq)) { | ||
| 730 | #if DEBUG_TX | ||
| 731 | printk(KERN_DEBUG "%s: eth_xmit queue full\n", dev->name); | ||
| 732 | #endif | ||
| 733 | netif_stop_queue(dev); | ||
| 734 | /* we could miss TX ready interrupt */ | ||
| 735 | if (!qmgr_stat_empty(txreadyq)) { | ||
| 736 | #if DEBUG_TX | ||
| 737 | printk(KERN_DEBUG "%s: eth_xmit ready again\n", | ||
| 738 | dev->name); | ||
| 739 | #endif | ||
| 740 | netif_wake_queue(dev); | ||
| 741 | } | ||
| 742 | } | ||
| 743 | |||
| 744 | #if DEBUG_TX | ||
| 745 | printk(KERN_DEBUG "%s: eth_xmit end\n", dev->name); | ||
| 746 | #endif | ||
| 747 | return NETDEV_TX_OK; | ||
| 748 | } | ||
| 749 | |||
| 750 | |||
| 751 | static struct net_device_stats *eth_stats(struct net_device *dev) | ||
| 752 | { | ||
| 753 | struct port *port = netdev_priv(dev); | ||
| 754 | return &port->stat; | ||
| 755 | } | ||
| 756 | |||
| 757 | static void eth_set_mcast_list(struct net_device *dev) | ||
| 758 | { | ||
| 759 | struct port *port = netdev_priv(dev); | ||
| 760 | struct dev_mc_list *mclist = dev->mc_list; | ||
| 761 | u8 diffs[ETH_ALEN], *addr; | ||
| 762 | int cnt = dev->mc_count, i; | ||
| 763 | |||
| 764 | if ((dev->flags & IFF_PROMISC) || !mclist || !cnt) { | ||
| 765 | __raw_writel(DEFAULT_RX_CNTRL0 & ~RX_CNTRL0_ADDR_FLTR_EN, | ||
| 766 | &port->regs->rx_control[0]); | ||
| 767 | return; | ||
| 768 | } | ||
| 769 | |||
| 770 | memset(diffs, 0, ETH_ALEN); | ||
| 771 | addr = mclist->dmi_addr; /* first MAC address */ | ||
| 772 | |||
| 773 | while (--cnt && (mclist = mclist->next)) | ||
| 774 | for (i = 0; i < ETH_ALEN; i++) | ||
| 775 | diffs[i] |= addr[i] ^ mclist->dmi_addr[i]; | ||
| 776 | |||
| 777 | for (i = 0; i < ETH_ALEN; i++) { | ||
| 778 | __raw_writel(addr[i], &port->regs->mcast_addr[i]); | ||
| 779 | __raw_writel(~diffs[i], &port->regs->mcast_mask[i]); | ||
| 780 | } | ||
| 781 | |||
| 782 | __raw_writel(DEFAULT_RX_CNTRL0 | RX_CNTRL0_ADDR_FLTR_EN, | ||
| 783 | &port->regs->rx_control[0]); | ||
| 784 | } | ||
| 785 | |||
| 786 | |||
| 787 | static int eth_ioctl(struct net_device *dev, struct ifreq *req, int cmd) | ||
| 788 | { | ||
| 789 | struct port *port = netdev_priv(dev); | ||
| 790 | unsigned int duplex_chg; | ||
| 791 | int err; | ||
| 792 | |||
| 793 | if (!netif_running(dev)) | ||
| 794 | return -EINVAL; | ||
| 795 | err = generic_mii_ioctl(&port->mii, if_mii(req), cmd, &duplex_chg); | ||
| 796 | if (duplex_chg) | ||
| 797 | eth_set_duplex(port); | ||
| 798 | return err; | ||
| 799 | } | ||
| 800 | |||
| 801 | |||
| 802 | static int request_queues(struct port *port) | ||
| 803 | { | ||
| 804 | int err; | ||
| 805 | |||
| 806 | err = qmgr_request_queue(RXFREE_QUEUE(port->id), RX_DESCS, 0, 0); | ||
| 807 | if (err) | ||
| 808 | return err; | ||
| 809 | |||
| 810 | err = qmgr_request_queue(port->plat->rxq, RX_DESCS, 0, 0); | ||
| 811 | if (err) | ||
| 812 | goto rel_rxfree; | ||
| 813 | |||
| 814 | err = qmgr_request_queue(TX_QUEUE(port->id), TX_DESCS, 0, 0); | ||
| 815 | if (err) | ||
| 816 | goto rel_rx; | ||
| 817 | |||
| 818 | err = qmgr_request_queue(port->plat->txreadyq, TX_DESCS, 0, 0); | ||
| 819 | if (err) | ||
| 820 | goto rel_tx; | ||
| 821 | |||
| 822 | /* TX-done queue handles skbs sent out by the NPEs */ | ||
| 823 | if (!ports_open) { | ||
| 824 | err = qmgr_request_queue(TXDONE_QUEUE, TXDONE_QUEUE_LEN, 0, 0); | ||
| 825 | if (err) | ||
| 826 | goto rel_txready; | ||
| 827 | } | ||
| 828 | return 0; | ||
| 829 | |||
| 830 | rel_txready: | ||
| 831 | qmgr_release_queue(port->plat->txreadyq); | ||
| 832 | rel_tx: | ||
| 833 | qmgr_release_queue(TX_QUEUE(port->id)); | ||
| 834 | rel_rx: | ||
| 835 | qmgr_release_queue(port->plat->rxq); | ||
| 836 | rel_rxfree: | ||
| 837 | qmgr_release_queue(RXFREE_QUEUE(port->id)); | ||
| 838 | printk(KERN_DEBUG "%s: unable to request hardware queues\n", | ||
| 839 | port->netdev->name); | ||
| 840 | return err; | ||
| 841 | } | ||
| 842 | |||
| 843 | static void release_queues(struct port *port) | ||
| 844 | { | ||
| 845 | qmgr_release_queue(RXFREE_QUEUE(port->id)); | ||
| 846 | qmgr_release_queue(port->plat->rxq); | ||
| 847 | qmgr_release_queue(TX_QUEUE(port->id)); | ||
| 848 | qmgr_release_queue(port->plat->txreadyq); | ||
| 849 | |||
| 850 | if (!ports_open) | ||
| 851 | qmgr_release_queue(TXDONE_QUEUE); | ||
| 852 | } | ||
| 853 | |||
| 854 | static int init_queues(struct port *port) | ||
| 855 | { | ||
| 856 | int i; | ||
| 857 | |||
| 858 | if (!ports_open) | ||
| 859 | if (!(dma_pool = dma_pool_create(DRV_NAME, NULL, | ||
| 860 | POOL_ALLOC_SIZE, 32, 0))) | ||
| 861 | return -ENOMEM; | ||
| 862 | |||
| 863 | if (!(port->desc_tab = dma_pool_alloc(dma_pool, GFP_KERNEL, | ||
| 864 | &port->desc_tab_phys))) | ||
| 865 | return -ENOMEM; | ||
| 866 | memset(port->desc_tab, 0, POOL_ALLOC_SIZE); | ||
| 867 | memset(port->rx_buff_tab, 0, sizeof(port->rx_buff_tab)); /* tables */ | ||
| 868 | memset(port->tx_buff_tab, 0, sizeof(port->tx_buff_tab)); | ||
| 869 | |||
| 870 | /* Setup RX buffers */ | ||
| 871 | for (i = 0; i < RX_DESCS; i++) { | ||
| 872 | struct desc *desc = rx_desc_ptr(port, i); | ||
| 873 | buffer_t *buff; /* skb or kmalloc()ated memory */ | ||
| 874 | void *data; | ||
| 875 | #ifdef __ARMEB__ | ||
| 876 | if (!(buff = netdev_alloc_skb(port->netdev, RX_BUFF_SIZE))) | ||
| 877 | return -ENOMEM; | ||
| 878 | data = buff->data; | ||
| 879 | #else | ||
| 880 | if (!(buff = kmalloc(RX_BUFF_SIZE, GFP_KERNEL))) | ||
| 881 | return -ENOMEM; | ||
| 882 | data = buff; | ||
| 883 | #endif | ||
| 884 | desc->buf_len = MAX_MRU; | ||
| 885 | desc->data = dma_map_single(&port->netdev->dev, data, | ||
| 886 | RX_BUFF_SIZE, DMA_FROM_DEVICE); | ||
| 887 | if (dma_mapping_error(desc->data)) { | ||
| 888 | free_buffer(buff); | ||
| 889 | return -EIO; | ||
| 890 | } | ||
| 891 | desc->data += NET_IP_ALIGN; | ||
| 892 | port->rx_buff_tab[i] = buff; | ||
| 893 | } | ||
| 894 | |||
| 895 | return 0; | ||
| 896 | } | ||
| 897 | |||
| 898 | static void destroy_queues(struct port *port) | ||
| 899 | { | ||
| 900 | int i; | ||
| 901 | |||
| 902 | if (port->desc_tab) { | ||
| 903 | for (i = 0; i < RX_DESCS; i++) { | ||
| 904 | struct desc *desc = rx_desc_ptr(port, i); | ||
| 905 | buffer_t *buff = port->rx_buff_tab[i]; | ||
| 906 | if (buff) { | ||
| 907 | dma_unmap_single(&port->netdev->dev, | ||
| 908 | desc->data - NET_IP_ALIGN, | ||
| 909 | RX_BUFF_SIZE, DMA_FROM_DEVICE); | ||
| 910 | free_buffer(buff); | ||
| 911 | } | ||
| 912 | } | ||
| 913 | for (i = 0; i < TX_DESCS; i++) { | ||
| 914 | struct desc *desc = tx_desc_ptr(port, i); | ||
| 915 | buffer_t *buff = port->tx_buff_tab[i]; | ||
| 916 | if (buff) { | ||
| 917 | dma_unmap_tx(port, desc); | ||
| 918 | free_buffer(buff); | ||
| 919 | } | ||
| 920 | } | ||
| 921 | dma_pool_free(dma_pool, port->desc_tab, port->desc_tab_phys); | ||
| 922 | port->desc_tab = NULL; | ||
| 923 | } | ||
| 924 | |||
| 925 | if (!ports_open && dma_pool) { | ||
| 926 | dma_pool_destroy(dma_pool); | ||
| 927 | dma_pool = NULL; | ||
| 928 | } | ||
| 929 | } | ||
| 930 | |||
| 931 | static int eth_open(struct net_device *dev) | ||
| 932 | { | ||
| 933 | struct port *port = netdev_priv(dev); | ||
| 934 | struct npe *npe = port->npe; | ||
| 935 | struct msg msg; | ||
| 936 | int i, err; | ||
| 937 | |||
| 938 | if (!npe_running(npe)) { | ||
| 939 | err = npe_load_firmware(npe, npe_name(npe), &dev->dev); | ||
| 940 | if (err) | ||
| 941 | return err; | ||
| 942 | |||
| 943 | if (npe_recv_message(npe, &msg, "ETH_GET_STATUS")) { | ||
| 944 | printk(KERN_ERR "%s: %s not responding\n", dev->name, | ||
| 945 | npe_name(npe)); | ||
| 946 | return -EIO; | ||
| 947 | } | ||
| 948 | } | ||
| 949 | |||
| 950 | mdio_write(dev, port->plat->phy, MII_BMCR, port->mii_bmcr); | ||
| 951 | |||
| 952 | memset(&msg, 0, sizeof(msg)); | ||
| 953 | msg.cmd = NPE_VLAN_SETRXQOSENTRY; | ||
| 954 | msg.eth_id = port->id; | ||
| 955 | msg.byte5 = port->plat->rxq | 0x80; | ||
| 956 | msg.byte7 = port->plat->rxq << 4; | ||
| 957 | for (i = 0; i < 8; i++) { | ||
| 958 | msg.byte3 = i; | ||
| 959 | if (npe_send_recv_message(port->npe, &msg, "ETH_SET_RXQ")) | ||
| 960 | return -EIO; | ||
| 961 | } | ||
| 962 | |||
| 963 | msg.cmd = NPE_EDB_SETPORTADDRESS; | ||
| 964 | msg.eth_id = PHYSICAL_ID(port->id); | ||
| 965 | msg.byte2 = dev->dev_addr[0]; | ||
| 966 | msg.byte3 = dev->dev_addr[1]; | ||
| 967 | msg.byte4 = dev->dev_addr[2]; | ||
| 968 | msg.byte5 = dev->dev_addr[3]; | ||
| 969 | msg.byte6 = dev->dev_addr[4]; | ||
| 970 | msg.byte7 = dev->dev_addr[5]; | ||
| 971 | if (npe_send_recv_message(port->npe, &msg, "ETH_SET_MAC")) | ||
| 972 | return -EIO; | ||
| 973 | |||
| 974 | memset(&msg, 0, sizeof(msg)); | ||
| 975 | msg.cmd = NPE_FW_SETFIREWALLMODE; | ||
| 976 | msg.eth_id = port->id; | ||
| 977 | if (npe_send_recv_message(port->npe, &msg, "ETH_SET_FIREWALL_MODE")) | ||
| 978 | return -EIO; | ||
| 979 | |||
| 980 | if ((err = request_queues(port)) != 0) | ||
| 981 | return err; | ||
| 982 | |||
| 983 | if ((err = init_queues(port)) != 0) { | ||
| 984 | destroy_queues(port); | ||
| 985 | release_queues(port); | ||
| 986 | return err; | ||
| 987 | } | ||
| 988 | |||
| 989 | for (i = 0; i < ETH_ALEN; i++) | ||
| 990 | __raw_writel(dev->dev_addr[i], &port->regs->hw_addr[i]); | ||
| 991 | __raw_writel(0x08, &port->regs->random_seed); | ||
| 992 | __raw_writel(0x12, &port->regs->partial_empty_threshold); | ||
| 993 | __raw_writel(0x30, &port->regs->partial_full_threshold); | ||
| 994 | __raw_writel(0x08, &port->regs->tx_start_bytes); | ||
| 995 | __raw_writel(0x15, &port->regs->tx_deferral); | ||
| 996 | __raw_writel(0x08, &port->regs->tx_2part_deferral[0]); | ||
| 997 | __raw_writel(0x07, &port->regs->tx_2part_deferral[1]); | ||
| 998 | __raw_writel(0x80, &port->regs->slot_time); | ||
| 999 | __raw_writel(0x01, &port->regs->int_clock_threshold); | ||
| 1000 | |||
| 1001 | /* Populate queues with buffers, no failure after this point */ | ||
| 1002 | for (i = 0; i < TX_DESCS; i++) | ||
| 1003 | queue_put_desc(port->plat->txreadyq, | ||
| 1004 | tx_desc_phys(port, i), tx_desc_ptr(port, i)); | ||
| 1005 | |||
| 1006 | for (i = 0; i < RX_DESCS; i++) | ||
| 1007 | queue_put_desc(RXFREE_QUEUE(port->id), | ||
| 1008 | rx_desc_phys(port, i), rx_desc_ptr(port, i)); | ||
| 1009 | |||
| 1010 | __raw_writel(TX_CNTRL1_RETRIES, &port->regs->tx_control[1]); | ||
| 1011 | __raw_writel(DEFAULT_TX_CNTRL0, &port->regs->tx_control[0]); | ||
| 1012 | __raw_writel(0, &port->regs->rx_control[1]); | ||
| 1013 | __raw_writel(DEFAULT_RX_CNTRL0, &port->regs->rx_control[0]); | ||
| 1014 | |||
| 1015 | napi_enable(&port->napi); | ||
| 1016 | phy_check_media(port, 1); | ||
| 1017 | eth_set_mcast_list(dev); | ||
| 1018 | netif_start_queue(dev); | ||
| 1019 | schedule_delayed_work(&port->mdio_thread, MDIO_INTERVAL); | ||
| 1020 | |||
| 1021 | qmgr_set_irq(port->plat->rxq, QUEUE_IRQ_SRC_NOT_EMPTY, | ||
| 1022 | eth_rx_irq, dev); | ||
| 1023 | if (!ports_open) { | ||
| 1024 | qmgr_set_irq(TXDONE_QUEUE, QUEUE_IRQ_SRC_NOT_EMPTY, | ||
| 1025 | eth_txdone_irq, NULL); | ||
| 1026 | qmgr_enable_irq(TXDONE_QUEUE); | ||
| 1027 | } | ||
| 1028 | ports_open++; | ||
| 1029 | /* we may already have RX data, enables IRQ */ | ||
| 1030 | netif_rx_schedule(dev, &port->napi); | ||
| 1031 | return 0; | ||
| 1032 | } | ||
| 1033 | |||
| 1034 | static int eth_close(struct net_device *dev) | ||
| 1035 | { | ||
| 1036 | struct port *port = netdev_priv(dev); | ||
| 1037 | struct msg msg; | ||
| 1038 | int buffs = RX_DESCS; /* allocated RX buffers */ | ||
| 1039 | int i; | ||
| 1040 | |||
| 1041 | ports_open--; | ||
| 1042 | qmgr_disable_irq(port->plat->rxq); | ||
| 1043 | napi_disable(&port->napi); | ||
| 1044 | netif_stop_queue(dev); | ||
| 1045 | |||
| 1046 | while (queue_get_desc(RXFREE_QUEUE(port->id), port, 0) >= 0) | ||
| 1047 | buffs--; | ||
| 1048 | |||
| 1049 | memset(&msg, 0, sizeof(msg)); | ||
| 1050 | msg.cmd = NPE_SETLOOPBACK_MODE; | ||
| 1051 | msg.eth_id = port->id; | ||
| 1052 | msg.byte3 = 1; | ||
| 1053 | if (npe_send_recv_message(port->npe, &msg, "ETH_ENABLE_LOOPBACK")) | ||
| 1054 | printk(KERN_CRIT "%s: unable to enable loopback\n", dev->name); | ||
| 1055 | |||
| 1056 | i = 0; | ||
| 1057 | do { /* drain RX buffers */ | ||
| 1058 | while (queue_get_desc(port->plat->rxq, port, 0) >= 0) | ||
| 1059 | buffs--; | ||
| 1060 | if (!buffs) | ||
| 1061 | break; | ||
| 1062 | if (qmgr_stat_empty(TX_QUEUE(port->id))) { | ||
| 1063 | /* we have to inject some packet */ | ||
| 1064 | struct desc *desc; | ||
| 1065 | u32 phys; | ||
| 1066 | int n = queue_get_desc(port->plat->txreadyq, port, 1); | ||
| 1067 | BUG_ON(n < 0); | ||
| 1068 | desc = tx_desc_ptr(port, n); | ||
| 1069 | phys = tx_desc_phys(port, n); | ||
| 1070 | desc->buf_len = desc->pkt_len = 1; | ||
| 1071 | wmb(); | ||
| 1072 | queue_put_desc(TX_QUEUE(port->id), phys, desc); | ||
| 1073 | } | ||
| 1074 | udelay(1); | ||
| 1075 | } while (++i < MAX_CLOSE_WAIT); | ||
| 1076 | |||
| 1077 | if (buffs) | ||
| 1078 | printk(KERN_CRIT "%s: unable to drain RX queue, %i buffer(s)" | ||
| 1079 | " left in NPE\n", dev->name, buffs); | ||
| 1080 | #if DEBUG_CLOSE | ||
| 1081 | if (!buffs) | ||
| 1082 | printk(KERN_DEBUG "Draining RX queue took %i cycles\n", i); | ||
| 1083 | #endif | ||
| 1084 | |||
| 1085 | buffs = TX_DESCS; | ||
| 1086 | while (queue_get_desc(TX_QUEUE(port->id), port, 1) >= 0) | ||
| 1087 | buffs--; /* cancel TX */ | ||
| 1088 | |||
| 1089 | i = 0; | ||
| 1090 | do { | ||
| 1091 | while (queue_get_desc(port->plat->txreadyq, port, 1) >= 0) | ||
| 1092 | buffs--; | ||
| 1093 | if (!buffs) | ||
| 1094 | break; | ||
| 1095 | } while (++i < MAX_CLOSE_WAIT); | ||
| 1096 | |||
| 1097 | if (buffs) | ||
| 1098 | printk(KERN_CRIT "%s: unable to drain TX queue, %i buffer(s) " | ||
| 1099 | "left in NPE\n", dev->name, buffs); | ||
| 1100 | #if DEBUG_CLOSE | ||
| 1101 | if (!buffs) | ||
| 1102 | printk(KERN_DEBUG "Draining TX queues took %i cycles\n", i); | ||
| 1103 | #endif | ||
| 1104 | |||
| 1105 | msg.byte3 = 0; | ||
| 1106 | if (npe_send_recv_message(port->npe, &msg, "ETH_DISABLE_LOOPBACK")) | ||
| 1107 | printk(KERN_CRIT "%s: unable to disable loopback\n", | ||
| 1108 | dev->name); | ||
| 1109 | |||
| 1110 | port->mii_bmcr = mdio_read(dev, port->plat->phy, MII_BMCR) & | ||
| 1111 | ~(BMCR_RESET | BMCR_PDOWN); /* may have been altered */ | ||
| 1112 | mdio_write(dev, port->plat->phy, MII_BMCR, | ||
| 1113 | port->mii_bmcr | BMCR_PDOWN); | ||
| 1114 | |||
| 1115 | if (!ports_open) | ||
| 1116 | qmgr_disable_irq(TXDONE_QUEUE); | ||
| 1117 | cancel_rearming_delayed_work(&port->mdio_thread); | ||
| 1118 | destroy_queues(port); | ||
| 1119 | release_queues(port); | ||
| 1120 | return 0; | ||
| 1121 | } | ||
| 1122 | |||
| 1123 | static int __devinit eth_init_one(struct platform_device *pdev) | ||
| 1124 | { | ||
| 1125 | struct port *port; | ||
| 1126 | struct net_device *dev; | ||
| 1127 | struct eth_plat_info *plat = pdev->dev.platform_data; | ||
| 1128 | u32 regs_phys; | ||
| 1129 | int err; | ||
| 1130 | |||
| 1131 | if (!(dev = alloc_etherdev(sizeof(struct port)))) | ||
| 1132 | return -ENOMEM; | ||
| 1133 | |||
| 1134 | SET_NETDEV_DEV(dev, &pdev->dev); | ||
| 1135 | port = netdev_priv(dev); | ||
| 1136 | port->netdev = dev; | ||
| 1137 | port->id = pdev->id; | ||
| 1138 | |||
| 1139 | switch (port->id) { | ||
| 1140 | case IXP4XX_ETH_NPEA: | ||
| 1141 | port->regs = (struct eth_regs __iomem *)IXP4XX_EthA_BASE_VIRT; | ||
| 1142 | regs_phys = IXP4XX_EthA_BASE_PHYS; | ||
| 1143 | break; | ||
| 1144 | case IXP4XX_ETH_NPEB: | ||
| 1145 | port->regs = (struct eth_regs __iomem *)IXP4XX_EthB_BASE_VIRT; | ||
| 1146 | regs_phys = IXP4XX_EthB_BASE_PHYS; | ||
| 1147 | break; | ||
| 1148 | case IXP4XX_ETH_NPEC: | ||
| 1149 | port->regs = (struct eth_regs __iomem *)IXP4XX_EthC_BASE_VIRT; | ||
| 1150 | regs_phys = IXP4XX_EthC_BASE_PHYS; | ||
| 1151 | break; | ||
| 1152 | default: | ||
| 1153 | err = -ENOSYS; | ||
| 1154 | goto err_free; | ||
| 1155 | } | ||
| 1156 | |||
| 1157 | dev->open = eth_open; | ||
| 1158 | dev->hard_start_xmit = eth_xmit; | ||
| 1159 | dev->stop = eth_close; | ||
| 1160 | dev->get_stats = eth_stats; | ||
| 1161 | dev->do_ioctl = eth_ioctl; | ||
| 1162 | dev->set_multicast_list = eth_set_mcast_list; | ||
| 1163 | dev->tx_queue_len = 100; | ||
| 1164 | |||
| 1165 | netif_napi_add(dev, &port->napi, eth_poll, NAPI_WEIGHT); | ||
| 1166 | |||
| 1167 | if (!(port->npe = npe_request(NPE_ID(port->id)))) { | ||
| 1168 | err = -EIO; | ||
| 1169 | goto err_free; | ||
| 1170 | } | ||
| 1171 | |||
| 1172 | if (register_netdev(dev)) { | ||
| 1173 | err = -EIO; | ||
| 1174 | goto err_npe_rel; | ||
| 1175 | } | ||
| 1176 | |||
| 1177 | port->mem_res = request_mem_region(regs_phys, REGS_SIZE, dev->name); | ||
| 1178 | if (!port->mem_res) { | ||
| 1179 | err = -EBUSY; | ||
| 1180 | goto err_unreg; | ||
| 1181 | } | ||
| 1182 | |||
| 1183 | port->plat = plat; | ||
| 1184 | npe_port_tab[NPE_ID(port->id)] = port; | ||
| 1185 | memcpy(dev->dev_addr, plat->hwaddr, ETH_ALEN); | ||
| 1186 | |||
| 1187 | platform_set_drvdata(pdev, dev); | ||
| 1188 | |||
| 1189 | __raw_writel(DEFAULT_CORE_CNTRL | CORE_RESET, | ||
| 1190 | &port->regs->core_control); | ||
| 1191 | udelay(50); | ||
| 1192 | __raw_writel(DEFAULT_CORE_CNTRL, &port->regs->core_control); | ||
| 1193 | udelay(50); | ||
| 1194 | |||
| 1195 | port->mii.dev = dev; | ||
| 1196 | port->mii.mdio_read = mdio_read; | ||
| 1197 | port->mii.mdio_write = mdio_write; | ||
| 1198 | port->mii.phy_id = plat->phy; | ||
| 1199 | port->mii.phy_id_mask = 0x1F; | ||
| 1200 | port->mii.reg_num_mask = 0x1F; | ||
| 1201 | |||
| 1202 | printk(KERN_INFO "%s: MII PHY %i on %s\n", dev->name, plat->phy, | ||
| 1203 | npe_name(port->npe)); | ||
| 1204 | |||
| 1205 | phy_reset(dev, plat->phy); | ||
| 1206 | port->mii_bmcr = mdio_read(dev, plat->phy, MII_BMCR) & | ||
| 1207 | ~(BMCR_RESET | BMCR_PDOWN); | ||
| 1208 | mdio_write(dev, plat->phy, MII_BMCR, port->mii_bmcr | BMCR_PDOWN); | ||
| 1209 | |||
| 1210 | INIT_DELAYED_WORK(&port->mdio_thread, mdio_thread); | ||
| 1211 | return 0; | ||
| 1212 | |||
| 1213 | err_unreg: | ||
| 1214 | unregister_netdev(dev); | ||
| 1215 | err_npe_rel: | ||
| 1216 | npe_release(port->npe); | ||
| 1217 | err_free: | ||
| 1218 | free_netdev(dev); | ||
| 1219 | return err; | ||
| 1220 | } | ||
| 1221 | |||
| 1222 | static int __devexit eth_remove_one(struct platform_device *pdev) | ||
| 1223 | { | ||
| 1224 | struct net_device *dev = platform_get_drvdata(pdev); | ||
| 1225 | struct port *port = netdev_priv(dev); | ||
| 1226 | |||
| 1227 | unregister_netdev(dev); | ||
| 1228 | npe_port_tab[NPE_ID(port->id)] = NULL; | ||
| 1229 | platform_set_drvdata(pdev, NULL); | ||
| 1230 | npe_release(port->npe); | ||
| 1231 | release_resource(port->mem_res); | ||
| 1232 | free_netdev(dev); | ||
| 1233 | return 0; | ||
| 1234 | } | ||
| 1235 | |||
| 1236 | static struct platform_driver drv = { | ||
| 1237 | .driver.name = DRV_NAME, | ||
| 1238 | .probe = eth_init_one, | ||
| 1239 | .remove = eth_remove_one, | ||
| 1240 | }; | ||
| 1241 | |||
| 1242 | static int __init eth_init_module(void) | ||
| 1243 | { | ||
| 1244 | if (!(ixp4xx_read_feature_bits() & IXP4XX_FEATURE_NPEB_ETH0)) | ||
| 1245 | return -ENOSYS; | ||
| 1246 | |||
| 1247 | /* All MII PHY accesses use NPE-B Ethernet registers */ | ||
| 1248 | spin_lock_init(&mdio_lock); | ||
| 1249 | mdio_regs = (struct eth_regs __iomem *)IXP4XX_EthB_BASE_VIRT; | ||
| 1250 | __raw_writel(DEFAULT_CORE_CNTRL, &mdio_regs->core_control); | ||
| 1251 | |||
| 1252 | return platform_driver_register(&drv); | ||
| 1253 | } | ||
| 1254 | |||
| 1255 | static void __exit eth_cleanup_module(void) | ||
| 1256 | { | ||
| 1257 | platform_driver_unregister(&drv); | ||
| 1258 | } | ||
| 1259 | |||
| 1260 | MODULE_AUTHOR("Krzysztof Halasa"); | ||
| 1261 | MODULE_DESCRIPTION("Intel IXP4xx Ethernet driver"); | ||
| 1262 | MODULE_LICENSE("GPL v2"); | ||
| 1263 | MODULE_ALIAS("platform:ixp4xx_eth"); | ||
| 1264 | module_init(eth_init_module); | ||
| 1265 | module_exit(eth_cleanup_module); | ||
diff --git a/drivers/net/bfin_mac.c b/drivers/net/bfin_mac.c index 4fec8581bfd7..89c0018132ec 100644 --- a/drivers/net/bfin_mac.c +++ b/drivers/net/bfin_mac.c | |||
| @@ -27,6 +27,7 @@ | |||
| 27 | #include <linux/phy.h> | 27 | #include <linux/phy.h> |
| 28 | #include <linux/netdevice.h> | 28 | #include <linux/netdevice.h> |
| 29 | #include <linux/etherdevice.h> | 29 | #include <linux/etherdevice.h> |
| 30 | #include <linux/ethtool.h> | ||
| 30 | #include <linux/skbuff.h> | 31 | #include <linux/skbuff.h> |
| 31 | #include <linux/platform_device.h> | 32 | #include <linux/platform_device.h> |
| 32 | 33 | ||
| @@ -42,7 +43,7 @@ | |||
| 42 | #define DRV_NAME "bfin_mac" | 43 | #define DRV_NAME "bfin_mac" |
| 43 | #define DRV_VERSION "1.1" | 44 | #define DRV_VERSION "1.1" |
| 44 | #define DRV_AUTHOR "Bryan Wu, Luke Yang" | 45 | #define DRV_AUTHOR "Bryan Wu, Luke Yang" |
| 45 | #define DRV_DESC "Blackfin BF53[67] BF527 on-chip Ethernet MAC driver" | 46 | #define DRV_DESC "Blackfin on-chip Ethernet MAC driver" |
| 46 | 47 | ||
| 47 | MODULE_AUTHOR(DRV_AUTHOR); | 48 | MODULE_AUTHOR(DRV_AUTHOR); |
| 48 | MODULE_LICENSE("GPL"); | 49 | MODULE_LICENSE("GPL"); |
| @@ -73,8 +74,14 @@ static struct net_dma_desc_tx *current_tx_ptr; | |||
| 73 | static struct net_dma_desc_tx *tx_desc; | 74 | static struct net_dma_desc_tx *tx_desc; |
| 74 | static struct net_dma_desc_rx *rx_desc; | 75 | static struct net_dma_desc_rx *rx_desc; |
| 75 | 76 | ||
| 76 | static void bf537mac_disable(void); | 77 | #if defined(CONFIG_BFIN_MAC_RMII) |
| 77 | static void bf537mac_enable(void); | 78 | static u16 pin_req[] = P_RMII0; |
| 79 | #else | ||
| 80 | static u16 pin_req[] = P_MII0; | ||
| 81 | #endif | ||
| 82 | |||
| 83 | static void bfin_mac_disable(void); | ||
| 84 | static void bfin_mac_enable(void); | ||
| 78 | 85 | ||
| 79 | static void desc_list_free(void) | 86 | static void desc_list_free(void) |
| 80 | { | 87 | { |
| @@ -243,27 +250,6 @@ init_error: | |||
| 243 | 250 | ||
| 244 | /*---PHY CONTROL AND CONFIGURATION-----------------------------------------*/ | 251 | /*---PHY CONTROL AND CONFIGURATION-----------------------------------------*/ |
| 245 | 252 | ||
| 246 | /* Set FER regs to MUX in Ethernet pins */ | ||
| 247 | static int setup_pin_mux(int action) | ||
| 248 | { | ||
| 249 | #if defined(CONFIG_BFIN_MAC_RMII) | ||
| 250 | u16 pin_req[] = P_RMII0; | ||
| 251 | #else | ||
| 252 | u16 pin_req[] = P_MII0; | ||
| 253 | #endif | ||
| 254 | |||
| 255 | if (action) { | ||
| 256 | if (peripheral_request_list(pin_req, DRV_NAME)) { | ||
| 257 | printk(KERN_ERR DRV_NAME | ||
| 258 | ": Requesting Peripherals failed\n"); | ||
| 259 | return -EFAULT; | ||
| 260 | } | ||
| 261 | } else | ||
| 262 | peripheral_free_list(pin_req); | ||
| 263 | |||
| 264 | return 0; | ||
| 265 | } | ||
| 266 | |||
| 267 | /* | 253 | /* |
| 268 | * MII operations | 254 | * MII operations |
| 269 | */ | 255 | */ |
| @@ -322,9 +308,9 @@ static int mdiobus_reset(struct mii_bus *bus) | |||
| 322 | return 0; | 308 | return 0; |
| 323 | } | 309 | } |
| 324 | 310 | ||
| 325 | static void bf537_adjust_link(struct net_device *dev) | 311 | static void bfin_mac_adjust_link(struct net_device *dev) |
| 326 | { | 312 | { |
| 327 | struct bf537mac_local *lp = netdev_priv(dev); | 313 | struct bfin_mac_local *lp = netdev_priv(dev); |
| 328 | struct phy_device *phydev = lp->phydev; | 314 | struct phy_device *phydev = lp->phydev; |
| 329 | unsigned long flags; | 315 | unsigned long flags; |
| 330 | int new_state = 0; | 316 | int new_state = 0; |
| @@ -395,7 +381,7 @@ static void bf537_adjust_link(struct net_device *dev) | |||
| 395 | 381 | ||
| 396 | static int mii_probe(struct net_device *dev) | 382 | static int mii_probe(struct net_device *dev) |
| 397 | { | 383 | { |
| 398 | struct bf537mac_local *lp = netdev_priv(dev); | 384 | struct bfin_mac_local *lp = netdev_priv(dev); |
| 399 | struct phy_device *phydev = NULL; | 385 | struct phy_device *phydev = NULL; |
| 400 | unsigned short sysctl; | 386 | unsigned short sysctl; |
| 401 | int i; | 387 | int i; |
| @@ -431,10 +417,10 @@ static int mii_probe(struct net_device *dev) | |||
| 431 | } | 417 | } |
| 432 | 418 | ||
| 433 | #if defined(CONFIG_BFIN_MAC_RMII) | 419 | #if defined(CONFIG_BFIN_MAC_RMII) |
| 434 | phydev = phy_connect(dev, phydev->dev.bus_id, &bf537_adjust_link, 0, | 420 | phydev = phy_connect(dev, phydev->dev.bus_id, &bfin_mac_adjust_link, 0, |
| 435 | PHY_INTERFACE_MODE_RMII); | 421 | PHY_INTERFACE_MODE_RMII); |
| 436 | #else | 422 | #else |
| 437 | phydev = phy_connect(dev, phydev->dev.bus_id, &bf537_adjust_link, 0, | 423 | phydev = phy_connect(dev, phydev->dev.bus_id, &bfin_mac_adjust_link, 0, |
| 438 | PHY_INTERFACE_MODE_MII); | 424 | PHY_INTERFACE_MODE_MII); |
| 439 | #endif | 425 | #endif |
| 440 | 426 | ||
| @@ -469,6 +455,51 @@ static int mii_probe(struct net_device *dev) | |||
| 469 | return 0; | 455 | return 0; |
| 470 | } | 456 | } |
| 471 | 457 | ||
| 458 | /* | ||
| 459 | * Ethtool support | ||
| 460 | */ | ||
| 461 | |||
| 462 | static int | ||
| 463 | bfin_mac_ethtool_getsettings(struct net_device *dev, struct ethtool_cmd *cmd) | ||
| 464 | { | ||
| 465 | struct bfin_mac_local *lp = netdev_priv(dev); | ||
| 466 | |||
| 467 | if (lp->phydev) | ||
| 468 | return phy_ethtool_gset(lp->phydev, cmd); | ||
| 469 | |||
| 470 | return -EINVAL; | ||
| 471 | } | ||
| 472 | |||
| 473 | static int | ||
| 474 | bfin_mac_ethtool_setsettings(struct net_device *dev, struct ethtool_cmd *cmd) | ||
| 475 | { | ||
| 476 | struct bfin_mac_local *lp = netdev_priv(dev); | ||
| 477 | |||
| 478 | if (!capable(CAP_NET_ADMIN)) | ||
| 479 | return -EPERM; | ||
| 480 | |||
| 481 | if (lp->phydev) | ||
| 482 | return phy_ethtool_sset(lp->phydev, cmd); | ||
| 483 | |||
| 484 | return -EINVAL; | ||
| 485 | } | ||
| 486 | |||
| 487 | static void bfin_mac_ethtool_getdrvinfo(struct net_device *dev, | ||
| 488 | struct ethtool_drvinfo *info) | ||
| 489 | { | ||
| 490 | strcpy(info->driver, DRV_NAME); | ||
| 491 | strcpy(info->version, DRV_VERSION); | ||
| 492 | strcpy(info->fw_version, "N/A"); | ||
| 493 | strcpy(info->bus_info, dev->dev.bus_id); | ||
| 494 | } | ||
| 495 | |||
| 496 | static struct ethtool_ops bfin_mac_ethtool_ops = { | ||
| 497 | .get_settings = bfin_mac_ethtool_getsettings, | ||
| 498 | .set_settings = bfin_mac_ethtool_setsettings, | ||
| 499 | .get_link = ethtool_op_get_link, | ||
| 500 | .get_drvinfo = bfin_mac_ethtool_getdrvinfo, | ||
| 501 | }; | ||
| 502 | |||
| 472 | /**************************************************************************/ | 503 | /**************************************************************************/ |
| 473 | void setup_system_regs(struct net_device *dev) | 504 | void setup_system_regs(struct net_device *dev) |
| 474 | { | 505 | { |
| @@ -511,7 +542,7 @@ static void setup_mac_addr(u8 *mac_addr) | |||
| 511 | bfin_write_EMAC_ADDRHI(addr_hi); | 542 | bfin_write_EMAC_ADDRHI(addr_hi); |
| 512 | } | 543 | } |
| 513 | 544 | ||
| 514 | static int bf537mac_set_mac_address(struct net_device *dev, void *p) | 545 | static int bfin_mac_set_mac_address(struct net_device *dev, void *p) |
| 515 | { | 546 | { |
| 516 | struct sockaddr *addr = p; | 547 | struct sockaddr *addr = p; |
| 517 | if (netif_running(dev)) | 548 | if (netif_running(dev)) |
| @@ -573,7 +604,7 @@ adjust_head: | |||
| 573 | 604 | ||
| 574 | } | 605 | } |
| 575 | 606 | ||
| 576 | static int bf537mac_hard_start_xmit(struct sk_buff *skb, | 607 | static int bfin_mac_hard_start_xmit(struct sk_buff *skb, |
| 577 | struct net_device *dev) | 608 | struct net_device *dev) |
| 578 | { | 609 | { |
| 579 | unsigned int data; | 610 | unsigned int data; |
| @@ -631,7 +662,7 @@ out: | |||
| 631 | return 0; | 662 | return 0; |
| 632 | } | 663 | } |
| 633 | 664 | ||
| 634 | static void bf537mac_rx(struct net_device *dev) | 665 | static void bfin_mac_rx(struct net_device *dev) |
| 635 | { | 666 | { |
| 636 | struct sk_buff *skb, *new_skb; | 667 | struct sk_buff *skb, *new_skb; |
| 637 | unsigned short len; | 668 | unsigned short len; |
| @@ -680,7 +711,7 @@ out: | |||
| 680 | } | 711 | } |
| 681 | 712 | ||
| 682 | /* interrupt routine to handle rx and error signal */ | 713 | /* interrupt routine to handle rx and error signal */ |
| 683 | static irqreturn_t bf537mac_interrupt(int irq, void *dev_id) | 714 | static irqreturn_t bfin_mac_interrupt(int irq, void *dev_id) |
| 684 | { | 715 | { |
| 685 | struct net_device *dev = dev_id; | 716 | struct net_device *dev = dev_id; |
| 686 | int number = 0; | 717 | int number = 0; |
| @@ -700,21 +731,21 @@ get_one_packet: | |||
| 700 | } | 731 | } |
| 701 | 732 | ||
| 702 | real_rx: | 733 | real_rx: |
| 703 | bf537mac_rx(dev); | 734 | bfin_mac_rx(dev); |
| 704 | number++; | 735 | number++; |
| 705 | goto get_one_packet; | 736 | goto get_one_packet; |
| 706 | } | 737 | } |
| 707 | 738 | ||
| 708 | #ifdef CONFIG_NET_POLL_CONTROLLER | 739 | #ifdef CONFIG_NET_POLL_CONTROLLER |
| 709 | static void bf537mac_poll(struct net_device *dev) | 740 | static void bfin_mac_poll(struct net_device *dev) |
| 710 | { | 741 | { |
| 711 | disable_irq(IRQ_MAC_RX); | 742 | disable_irq(IRQ_MAC_RX); |
| 712 | bf537mac_interrupt(IRQ_MAC_RX, dev); | 743 | bfin_mac_interrupt(IRQ_MAC_RX, dev); |
| 713 | enable_irq(IRQ_MAC_RX); | 744 | enable_irq(IRQ_MAC_RX); |
| 714 | } | 745 | } |
| 715 | #endif /* CONFIG_NET_POLL_CONTROLLER */ | 746 | #endif /* CONFIG_NET_POLL_CONTROLLER */ |
| 716 | 747 | ||
| 717 | static void bf537mac_disable(void) | 748 | static void bfin_mac_disable(void) |
| 718 | { | 749 | { |
| 719 | unsigned int opmode; | 750 | unsigned int opmode; |
| 720 | 751 | ||
| @@ -728,7 +759,7 @@ static void bf537mac_disable(void) | |||
| 728 | /* | 759 | /* |
| 729 | * Enable Interrupts, Receive, and Transmit | 760 | * Enable Interrupts, Receive, and Transmit |
| 730 | */ | 761 | */ |
| 731 | static void bf537mac_enable(void) | 762 | static void bfin_mac_enable(void) |
| 732 | { | 763 | { |
| 733 | u32 opmode; | 764 | u32 opmode; |
| 734 | 765 | ||
| @@ -766,23 +797,23 @@ static void bf537mac_enable(void) | |||
| 766 | } | 797 | } |
| 767 | 798 | ||
| 768 | /* Our watchdog timed out. Called by the networking layer */ | 799 | /* Our watchdog timed out. Called by the networking layer */ |
| 769 | static void bf537mac_timeout(struct net_device *dev) | 800 | static void bfin_mac_timeout(struct net_device *dev) |
| 770 | { | 801 | { |
| 771 | pr_debug("%s: %s\n", dev->name, __FUNCTION__); | 802 | pr_debug("%s: %s\n", dev->name, __FUNCTION__); |
| 772 | 803 | ||
| 773 | bf537mac_disable(); | 804 | bfin_mac_disable(); |
| 774 | 805 | ||
| 775 | /* reset tx queue */ | 806 | /* reset tx queue */ |
| 776 | tx_list_tail = tx_list_head->next; | 807 | tx_list_tail = tx_list_head->next; |
| 777 | 808 | ||
| 778 | bf537mac_enable(); | 809 | bfin_mac_enable(); |
| 779 | 810 | ||
| 780 | /* We can accept TX packets again */ | 811 | /* We can accept TX packets again */ |
| 781 | dev->trans_start = jiffies; | 812 | dev->trans_start = jiffies; |
| 782 | netif_wake_queue(dev); | 813 | netif_wake_queue(dev); |
| 783 | } | 814 | } |
| 784 | 815 | ||
| 785 | static void bf537mac_multicast_hash(struct net_device *dev) | 816 | static void bfin_mac_multicast_hash(struct net_device *dev) |
| 786 | { | 817 | { |
| 787 | u32 emac_hashhi, emac_hashlo; | 818 | u32 emac_hashhi, emac_hashlo; |
| 788 | struct dev_mc_list *dmi = dev->mc_list; | 819 | struct dev_mc_list *dmi = dev->mc_list; |
| @@ -821,7 +852,7 @@ static void bf537mac_multicast_hash(struct net_device *dev) | |||
| 821 | * promiscuous mode (for TCPDUMP and cousins) or accept | 852 | * promiscuous mode (for TCPDUMP and cousins) or accept |
| 822 | * a select set of multicast packets | 853 | * a select set of multicast packets |
| 823 | */ | 854 | */ |
| 824 | static void bf537mac_set_multicast_list(struct net_device *dev) | 855 | static void bfin_mac_set_multicast_list(struct net_device *dev) |
| 825 | { | 856 | { |
| 826 | u32 sysctl; | 857 | u32 sysctl; |
| 827 | 858 | ||
| @@ -840,7 +871,7 @@ static void bf537mac_set_multicast_list(struct net_device *dev) | |||
| 840 | sysctl = bfin_read_EMAC_OPMODE(); | 871 | sysctl = bfin_read_EMAC_OPMODE(); |
| 841 | sysctl |= HM; | 872 | sysctl |= HM; |
| 842 | bfin_write_EMAC_OPMODE(sysctl); | 873 | bfin_write_EMAC_OPMODE(sysctl); |
| 843 | bf537mac_multicast_hash(dev); | 874 | bfin_mac_multicast_hash(dev); |
| 844 | } else { | 875 | } else { |
| 845 | /* clear promisc or multicast mode */ | 876 | /* clear promisc or multicast mode */ |
| 846 | sysctl = bfin_read_EMAC_OPMODE(); | 877 | sysctl = bfin_read_EMAC_OPMODE(); |
| @@ -852,7 +883,7 @@ static void bf537mac_set_multicast_list(struct net_device *dev) | |||
| 852 | /* | 883 | /* |
| 853 | * this puts the device in an inactive state | 884 | * this puts the device in an inactive state |
| 854 | */ | 885 | */ |
| 855 | static void bf537mac_shutdown(struct net_device *dev) | 886 | static void bfin_mac_shutdown(struct net_device *dev) |
| 856 | { | 887 | { |
| 857 | /* Turn off the EMAC */ | 888 | /* Turn off the EMAC */ |
| 858 | bfin_write_EMAC_OPMODE(0x00000000); | 889 | bfin_write_EMAC_OPMODE(0x00000000); |
| @@ -866,9 +897,9 @@ static void bf537mac_shutdown(struct net_device *dev) | |||
| 866 | * | 897 | * |
| 867 | * Set up everything, reset the card, etc.. | 898 | * Set up everything, reset the card, etc.. |
| 868 | */ | 899 | */ |
| 869 | static int bf537mac_open(struct net_device *dev) | 900 | static int bfin_mac_open(struct net_device *dev) |
| 870 | { | 901 | { |
| 871 | struct bf537mac_local *lp = netdev_priv(dev); | 902 | struct bfin_mac_local *lp = netdev_priv(dev); |
| 872 | int retval; | 903 | int retval; |
| 873 | pr_debug("%s: %s\n", dev->name, __FUNCTION__); | 904 | pr_debug("%s: %s\n", dev->name, __FUNCTION__); |
| 874 | 905 | ||
| @@ -891,8 +922,8 @@ static int bf537mac_open(struct net_device *dev) | |||
| 891 | phy_start(lp->phydev); | 922 | phy_start(lp->phydev); |
| 892 | phy_write(lp->phydev, MII_BMCR, BMCR_RESET); | 923 | phy_write(lp->phydev, MII_BMCR, BMCR_RESET); |
| 893 | setup_system_regs(dev); | 924 | setup_system_regs(dev); |
| 894 | bf537mac_disable(); | 925 | bfin_mac_disable(); |
| 895 | bf537mac_enable(); | 926 | bfin_mac_enable(); |
| 896 | pr_debug("hardware init finished\n"); | 927 | pr_debug("hardware init finished\n"); |
| 897 | netif_start_queue(dev); | 928 | netif_start_queue(dev); |
| 898 | netif_carrier_on(dev); | 929 | netif_carrier_on(dev); |
| @@ -906,9 +937,9 @@ static int bf537mac_open(struct net_device *dev) | |||
| 906 | * and not talk to the outside world. Caused by | 937 | * and not talk to the outside world. Caused by |
| 907 | * an 'ifconfig ethX down' | 938 | * an 'ifconfig ethX down' |
| 908 | */ | 939 | */ |
| 909 | static int bf537mac_close(struct net_device *dev) | 940 | static int bfin_mac_close(struct net_device *dev) |
| 910 | { | 941 | { |
| 911 | struct bf537mac_local *lp = netdev_priv(dev); | 942 | struct bfin_mac_local *lp = netdev_priv(dev); |
| 912 | pr_debug("%s: %s\n", dev->name, __FUNCTION__); | 943 | pr_debug("%s: %s\n", dev->name, __FUNCTION__); |
| 913 | 944 | ||
| 914 | netif_stop_queue(dev); | 945 | netif_stop_queue(dev); |
| @@ -918,7 +949,7 @@ static int bf537mac_close(struct net_device *dev) | |||
| 918 | phy_write(lp->phydev, MII_BMCR, BMCR_PDOWN); | 949 | phy_write(lp->phydev, MII_BMCR, BMCR_PDOWN); |
| 919 | 950 | ||
| 920 | /* clear everything */ | 951 | /* clear everything */ |
| 921 | bf537mac_shutdown(dev); | 952 | bfin_mac_shutdown(dev); |
| 922 | 953 | ||
| 923 | /* free the rx/tx buffers */ | 954 | /* free the rx/tx buffers */ |
| 924 | desc_list_free(); | 955 | desc_list_free(); |
| @@ -926,46 +957,59 @@ static int bf537mac_close(struct net_device *dev) | |||
| 926 | return 0; | 957 | return 0; |
| 927 | } | 958 | } |
| 928 | 959 | ||
| 929 | static int __init bf537mac_probe(struct net_device *dev) | 960 | static int __init bfin_mac_probe(struct platform_device *pdev) |
| 930 | { | 961 | { |
| 931 | struct bf537mac_local *lp = netdev_priv(dev); | 962 | struct net_device *ndev; |
| 932 | int retval; | 963 | struct bfin_mac_local *lp; |
| 933 | int i; | 964 | int rc, i; |
| 965 | |||
| 966 | ndev = alloc_etherdev(sizeof(struct bfin_mac_local)); | ||
| 967 | if (!ndev) { | ||
| 968 | dev_err(&pdev->dev, "Cannot allocate net device!\n"); | ||
| 969 | return -ENOMEM; | ||
| 970 | } | ||
| 971 | |||
| 972 | SET_NETDEV_DEV(ndev, &pdev->dev); | ||
| 973 | platform_set_drvdata(pdev, ndev); | ||
| 974 | lp = netdev_priv(ndev); | ||
| 934 | 975 | ||
| 935 | /* Grab the MAC address in the MAC */ | 976 | /* Grab the MAC address in the MAC */ |
| 936 | *(__le32 *) (&(dev->dev_addr[0])) = cpu_to_le32(bfin_read_EMAC_ADDRLO()); | 977 | *(__le32 *) (&(ndev->dev_addr[0])) = cpu_to_le32(bfin_read_EMAC_ADDRLO()); |
| 937 | *(__le16 *) (&(dev->dev_addr[4])) = cpu_to_le16((u16) bfin_read_EMAC_ADDRHI()); | 978 | *(__le16 *) (&(ndev->dev_addr[4])) = cpu_to_le16((u16) bfin_read_EMAC_ADDRHI()); |
| 938 | 979 | ||
| 939 | /* probe mac */ | 980 | /* probe mac */ |
| 940 | /*todo: how to proble? which is revision_register */ | 981 | /*todo: how to proble? which is revision_register */ |
| 941 | bfin_write_EMAC_ADDRLO(0x12345678); | 982 | bfin_write_EMAC_ADDRLO(0x12345678); |
| 942 | if (bfin_read_EMAC_ADDRLO() != 0x12345678) { | 983 | if (bfin_read_EMAC_ADDRLO() != 0x12345678) { |
| 943 | pr_debug("can't detect bf537 mac!\n"); | 984 | dev_err(&pdev->dev, "Cannot detect Blackfin on-chip ethernet MAC controller!\n"); |
| 944 | retval = -ENODEV; | 985 | rc = -ENODEV; |
| 945 | goto err_out; | 986 | goto out_err_probe_mac; |
| 946 | } | 987 | } |
| 947 | 988 | ||
| 948 | /* set the GPIO pins to Ethernet mode */ | 989 | /* set the GPIO pins to Ethernet mode */ |
| 949 | retval = setup_pin_mux(1); | 990 | rc = peripheral_request_list(pin_req, DRV_NAME); |
| 950 | if (retval) | 991 | if (rc) { |
| 951 | return retval; | 992 | dev_err(&pdev->dev, "Requesting peripherals failed!\n"); |
| 952 | 993 | rc = -EFAULT; | |
| 953 | /*Is it valid? (Did bootloader initialize it?) */ | 994 | goto out_err_setup_pin_mux; |
| 954 | if (!is_valid_ether_addr(dev->dev_addr)) { | ||
| 955 | /* Grab the MAC from the board somehow - this is done in the | ||
| 956 | arch/blackfin/mach-bf537/boards/eth_mac.c */ | ||
| 957 | bfin_get_ether_addr(dev->dev_addr); | ||
| 958 | } | 995 | } |
| 959 | 996 | ||
| 997 | /* | ||
| 998 | * Is it valid? (Did bootloader initialize it?) | ||
| 999 | * Grab the MAC from the board somehow | ||
| 1000 | * this is done in the arch/blackfin/mach-bfxxx/boards/eth_mac.c | ||
| 1001 | */ | ||
| 1002 | if (!is_valid_ether_addr(ndev->dev_addr)) | ||
| 1003 | bfin_get_ether_addr(ndev->dev_addr); | ||
| 1004 | |||
| 960 | /* If still not valid, get a random one */ | 1005 | /* If still not valid, get a random one */ |
| 961 | if (!is_valid_ether_addr(dev->dev_addr)) { | 1006 | if (!is_valid_ether_addr(ndev->dev_addr)) |
| 962 | random_ether_addr(dev->dev_addr); | 1007 | random_ether_addr(ndev->dev_addr); |
| 963 | } | ||
| 964 | 1008 | ||
| 965 | setup_mac_addr(dev->dev_addr); | 1009 | setup_mac_addr(ndev->dev_addr); |
| 966 | 1010 | ||
| 967 | /* MDIO bus initial */ | 1011 | /* MDIO bus initial */ |
| 968 | lp->mii_bus.priv = dev; | 1012 | lp->mii_bus.priv = ndev; |
| 969 | lp->mii_bus.read = mdiobus_read; | 1013 | lp->mii_bus.read = mdiobus_read; |
| 970 | lp->mii_bus.write = mdiobus_write; | 1014 | lp->mii_bus.write = mdiobus_write; |
| 971 | lp->mii_bus.reset = mdiobus_reset; | 1015 | lp->mii_bus.reset = mdiobus_reset; |
| @@ -975,86 +1019,86 @@ static int __init bf537mac_probe(struct net_device *dev) | |||
| 975 | for (i = 0; i < PHY_MAX_ADDR; ++i) | 1019 | for (i = 0; i < PHY_MAX_ADDR; ++i) |
| 976 | lp->mii_bus.irq[i] = PHY_POLL; | 1020 | lp->mii_bus.irq[i] = PHY_POLL; |
| 977 | 1021 | ||
| 978 | mdiobus_register(&lp->mii_bus); | 1022 | rc = mdiobus_register(&lp->mii_bus); |
| 1023 | if (rc) { | ||
| 1024 | dev_err(&pdev->dev, "Cannot register MDIO bus!\n"); | ||
| 1025 | goto out_err_mdiobus_register; | ||
| 1026 | } | ||
| 979 | 1027 | ||
| 980 | retval = mii_probe(dev); | 1028 | rc = mii_probe(ndev); |
| 981 | if (retval) | 1029 | if (rc) { |
| 982 | return retval; | 1030 | dev_err(&pdev->dev, "MII Probe failed!\n"); |
| 1031 | goto out_err_mii_probe; | ||
| 1032 | } | ||
| 983 | 1033 | ||
| 984 | /* Fill in the fields of the device structure with ethernet values. */ | 1034 | /* Fill in the fields of the device structure with ethernet values. */ |
| 985 | ether_setup(dev); | 1035 | ether_setup(ndev); |
| 986 | 1036 | ||
| 987 | dev->open = bf537mac_open; | 1037 | ndev->open = bfin_mac_open; |
| 988 | dev->stop = bf537mac_close; | 1038 | ndev->stop = bfin_mac_close; |
| 989 | dev->hard_start_xmit = bf537mac_hard_start_xmit; | 1039 | ndev->hard_start_xmit = bfin_mac_hard_start_xmit; |
| 990 | dev->set_mac_address = bf537mac_set_mac_address; | 1040 | ndev->set_mac_address = bfin_mac_set_mac_address; |
| 991 | dev->tx_timeout = bf537mac_timeout; | 1041 | ndev->tx_timeout = bfin_mac_timeout; |
| 992 | dev->set_multicast_list = bf537mac_set_multicast_list; | 1042 | ndev->set_multicast_list = bfin_mac_set_multicast_list; |
| 993 | #ifdef CONFIG_NET_POLL_CONTROLLER | 1043 | #ifdef CONFIG_NET_POLL_CONTROLLER |
| 994 | dev->poll_controller = bf537mac_poll; | 1044 | ndev->poll_controller = bfin_mac_poll; |
| 995 | #endif | 1045 | #endif |
| 1046 | ndev->ethtool_ops = &bfin_mac_ethtool_ops; | ||
| 996 | 1047 | ||
| 997 | spin_lock_init(&lp->lock); | 1048 | spin_lock_init(&lp->lock); |
| 998 | 1049 | ||
| 999 | /* now, enable interrupts */ | 1050 | /* now, enable interrupts */ |
| 1000 | /* register irq handler */ | 1051 | /* register irq handler */ |
| 1001 | if (request_irq | 1052 | rc = request_irq(IRQ_MAC_RX, bfin_mac_interrupt, |
| 1002 | (IRQ_MAC_RX, bf537mac_interrupt, IRQF_DISABLED | IRQF_SHARED, | 1053 | IRQF_DISABLED | IRQF_SHARED, "EMAC_RX", ndev); |
| 1003 | "EMAC_RX", dev)) { | 1054 | if (rc) { |
| 1004 | printk(KERN_WARNING DRV_NAME | 1055 | dev_err(&pdev->dev, "Cannot request Blackfin MAC RX IRQ!\n"); |
| 1005 | ": Unable to attach BlackFin MAC RX interrupt\n"); | 1056 | rc = -EBUSY; |
| 1006 | return -EBUSY; | 1057 | goto out_err_request_irq; |
| 1007 | } | 1058 | } |
| 1008 | 1059 | ||
| 1009 | 1060 | rc = register_netdev(ndev); | |
| 1010 | retval = register_netdev(dev); | 1061 | if (rc) { |
| 1011 | if (retval == 0) { | 1062 | dev_err(&pdev->dev, "Cannot register net device!\n"); |
| 1012 | /* now, print out the card info, in a short format.. */ | 1063 | goto out_err_reg_ndev; |
| 1013 | printk(KERN_INFO "%s: Version %s, %s\n", | ||
| 1014 | DRV_NAME, DRV_VERSION, DRV_DESC); | ||
| 1015 | } | ||
| 1016 | |||
| 1017 | err_out: | ||
| 1018 | return retval; | ||
| 1019 | } | ||
| 1020 | |||
| 1021 | static int bfin_mac_probe(struct platform_device *pdev) | ||
| 1022 | { | ||
| 1023 | struct net_device *ndev; | ||
| 1024 | |||
| 1025 | ndev = alloc_etherdev(sizeof(struct bf537mac_local)); | ||
| 1026 | if (!ndev) { | ||
| 1027 | printk(KERN_WARNING DRV_NAME ": could not allocate device\n"); | ||
| 1028 | return -ENOMEM; | ||
| 1029 | } | 1064 | } |
| 1030 | 1065 | ||
| 1031 | SET_NETDEV_DEV(ndev, &pdev->dev); | 1066 | /* now, print out the card info, in a short format.. */ |
| 1067 | dev_info(&pdev->dev, "%s, Version %s\n", DRV_DESC, DRV_VERSION); | ||
| 1032 | 1068 | ||
| 1033 | platform_set_drvdata(pdev, ndev); | 1069 | return 0; |
| 1034 | 1070 | ||
| 1035 | if (bf537mac_probe(ndev) != 0) { | 1071 | out_err_reg_ndev: |
| 1036 | platform_set_drvdata(pdev, NULL); | 1072 | free_irq(IRQ_MAC_RX, ndev); |
| 1037 | free_netdev(ndev); | 1073 | out_err_request_irq: |
| 1038 | printk(KERN_WARNING DRV_NAME ": not found\n"); | 1074 | out_err_mii_probe: |
| 1039 | return -ENODEV; | 1075 | mdiobus_unregister(&lp->mii_bus); |
| 1040 | } | 1076 | out_err_mdiobus_register: |
| 1077 | peripheral_free_list(pin_req); | ||
| 1078 | out_err_setup_pin_mux: | ||
| 1079 | out_err_probe_mac: | ||
| 1080 | platform_set_drvdata(pdev, NULL); | ||
| 1081 | free_netdev(ndev); | ||
| 1041 | 1082 | ||
| 1042 | return 0; | 1083 | return rc; |
| 1043 | } | 1084 | } |
| 1044 | 1085 | ||
| 1045 | static int bfin_mac_remove(struct platform_device *pdev) | 1086 | static int bfin_mac_remove(struct platform_device *pdev) |
| 1046 | { | 1087 | { |
| 1047 | struct net_device *ndev = platform_get_drvdata(pdev); | 1088 | struct net_device *ndev = platform_get_drvdata(pdev); |
| 1089 | struct bfin_mac_local *lp = netdev_priv(ndev); | ||
| 1048 | 1090 | ||
| 1049 | platform_set_drvdata(pdev, NULL); | 1091 | platform_set_drvdata(pdev, NULL); |
| 1050 | 1092 | ||
| 1093 | mdiobus_unregister(&lp->mii_bus); | ||
| 1094 | |||
| 1051 | unregister_netdev(ndev); | 1095 | unregister_netdev(ndev); |
| 1052 | 1096 | ||
| 1053 | free_irq(IRQ_MAC_RX, ndev); | 1097 | free_irq(IRQ_MAC_RX, ndev); |
| 1054 | 1098 | ||
| 1055 | free_netdev(ndev); | 1099 | free_netdev(ndev); |
| 1056 | 1100 | ||
| 1057 | setup_pin_mux(0); | 1101 | peripheral_free_list(pin_req); |
| 1058 | 1102 | ||
| 1059 | return 0; | 1103 | return 0; |
| 1060 | } | 1104 | } |
| @@ -1065,7 +1109,7 @@ static int bfin_mac_suspend(struct platform_device *pdev, pm_message_t mesg) | |||
| 1065 | struct net_device *net_dev = platform_get_drvdata(pdev); | 1109 | struct net_device *net_dev = platform_get_drvdata(pdev); |
| 1066 | 1110 | ||
| 1067 | if (netif_running(net_dev)) | 1111 | if (netif_running(net_dev)) |
| 1068 | bf537mac_close(net_dev); | 1112 | bfin_mac_close(net_dev); |
| 1069 | 1113 | ||
| 1070 | return 0; | 1114 | return 0; |
| 1071 | } | 1115 | } |
| @@ -1075,7 +1119,7 @@ static int bfin_mac_resume(struct platform_device *pdev) | |||
| 1075 | struct net_device *net_dev = platform_get_drvdata(pdev); | 1119 | struct net_device *net_dev = platform_get_drvdata(pdev); |
| 1076 | 1120 | ||
| 1077 | if (netif_running(net_dev)) | 1121 | if (netif_running(net_dev)) |
| 1078 | bf537mac_open(net_dev); | 1122 | bfin_mac_open(net_dev); |
| 1079 | 1123 | ||
| 1080 | return 0; | 1124 | return 0; |
| 1081 | } | 1125 | } |
diff --git a/drivers/net/bfin_mac.h b/drivers/net/bfin_mac.h index f774d5a36942..beff51064ff4 100644 --- a/drivers/net/bfin_mac.h +++ b/drivers/net/bfin_mac.h | |||
| @@ -49,7 +49,7 @@ struct net_dma_desc_tx { | |||
| 49 | struct status_area_tx status; | 49 | struct status_area_tx status; |
| 50 | }; | 50 | }; |
| 51 | 51 | ||
| 52 | struct bf537mac_local { | 52 | struct bfin_mac_local { |
| 53 | /* | 53 | /* |
| 54 | * these are things that the kernel wants me to keep, so users | 54 | * these are things that the kernel wants me to keep, so users |
| 55 | * can find out semi-useless statistics of how well the card is | 55 | * can find out semi-useless statistics of how well the card is |
diff --git a/drivers/net/eepro.c b/drivers/net/eepro.c index 83bda6ccde98..56f50491a453 100644 --- a/drivers/net/eepro.c +++ b/drivers/net/eepro.c | |||
| @@ -633,7 +633,7 @@ static void __init printEEPROMInfo(struct net_device *dev) | |||
| 633 | printk(KERN_DEBUG " PC: %d\n", GetBit(Word,ee_PC)); | 633 | printk(KERN_DEBUG " PC: %d\n", GetBit(Word,ee_PC)); |
| 634 | printk(KERN_DEBUG " TPE/AUI: %d\n", GetBit(Word,ee_TPE_AUI)); | 634 | printk(KERN_DEBUG " TPE/AUI: %d\n", GetBit(Word,ee_TPE_AUI)); |
| 635 | printk(KERN_DEBUG " Jabber: %d\n", GetBit(Word,ee_Jabber)); | 635 | printk(KERN_DEBUG " Jabber: %d\n", GetBit(Word,ee_Jabber)); |
| 636 | printk(KERN_DEBUG " AutoPort: %d\n", GetBit(!Word,ee_Jabber)); | 636 | printk(KERN_DEBUG " AutoPort: %d\n", !GetBit(Word,ee_AutoPort)); |
| 637 | printk(KERN_DEBUG " Duplex: %d\n", GetBit(Word,ee_Duplex)); | 637 | printk(KERN_DEBUG " Duplex: %d\n", GetBit(Word,ee_Duplex)); |
| 638 | } | 638 | } |
| 639 | 639 | ||
diff --git a/drivers/net/fec_mpc52xx.c b/drivers/net/fec_mpc52xx.c index e5e6352556fa..d21b7ab64bd1 100644 --- a/drivers/net/fec_mpc52xx.c +++ b/drivers/net/fec_mpc52xx.c | |||
| @@ -491,20 +491,23 @@ static irqreturn_t mpc52xx_fec_interrupt(int irq, void *dev_id) | |||
| 491 | 491 | ||
| 492 | out_be32(&fec->ievent, ievent); /* clear pending events */ | 492 | out_be32(&fec->ievent, ievent); /* clear pending events */ |
| 493 | 493 | ||
| 494 | if (ievent & ~(FEC_IEVENT_RFIFO_ERROR | FEC_IEVENT_XFIFO_ERROR)) { | 494 | /* on fifo error, soft-reset fec */ |
| 495 | if (ievent & ~FEC_IEVENT_TFINT) | 495 | if (ievent & (FEC_IEVENT_RFIFO_ERROR | FEC_IEVENT_XFIFO_ERROR)) { |
| 496 | dev_dbg(&dev->dev, "ievent: %08x\n", ievent); | 496 | |
| 497 | if (net_ratelimit() && (ievent & FEC_IEVENT_RFIFO_ERROR)) | ||
| 498 | dev_warn(&dev->dev, "FEC_IEVENT_RFIFO_ERROR\n"); | ||
| 499 | if (net_ratelimit() && (ievent & FEC_IEVENT_XFIFO_ERROR)) | ||
| 500 | dev_warn(&dev->dev, "FEC_IEVENT_XFIFO_ERROR\n"); | ||
| 501 | |||
| 502 | mpc52xx_fec_reset(dev); | ||
| 503 | |||
| 504 | netif_wake_queue(dev); | ||
| 497 | return IRQ_HANDLED; | 505 | return IRQ_HANDLED; |
| 498 | } | 506 | } |
| 499 | 507 | ||
| 500 | if (net_ratelimit() && (ievent & FEC_IEVENT_RFIFO_ERROR)) | 508 | if (ievent & ~FEC_IEVENT_TFINT) |
| 501 | dev_warn(&dev->dev, "FEC_IEVENT_RFIFO_ERROR\n"); | 509 | dev_dbg(&dev->dev, "ievent: %08x\n", ievent); |
| 502 | if (net_ratelimit() && (ievent & FEC_IEVENT_XFIFO_ERROR)) | ||
| 503 | dev_warn(&dev->dev, "FEC_IEVENT_XFIFO_ERROR\n"); | ||
| 504 | 510 | ||
| 505 | mpc52xx_fec_reset(dev); | ||
| 506 | |||
| 507 | netif_wake_queue(dev); | ||
| 508 | return IRQ_HANDLED; | 511 | return IRQ_HANDLED; |
| 509 | } | 512 | } |
| 510 | 513 | ||
diff --git a/drivers/net/gianfar.c b/drivers/net/gianfar.c index 99a4b990939f..587afe7be689 100644 --- a/drivers/net/gianfar.c +++ b/drivers/net/gianfar.c | |||
| @@ -131,8 +131,6 @@ static void free_skb_resources(struct gfar_private *priv); | |||
| 131 | static void gfar_set_multi(struct net_device *dev); | 131 | static void gfar_set_multi(struct net_device *dev); |
| 132 | static void gfar_set_hash_for_addr(struct net_device *dev, u8 *addr); | 132 | static void gfar_set_hash_for_addr(struct net_device *dev, u8 *addr); |
| 133 | static void gfar_configure_serdes(struct net_device *dev); | 133 | static void gfar_configure_serdes(struct net_device *dev); |
| 134 | extern int gfar_local_mdio_write(struct gfar_mii __iomem *regs, int mii_id, int regnum, u16 value); | ||
| 135 | extern int gfar_local_mdio_read(struct gfar_mii __iomem *regs, int mii_id, int regnum); | ||
| 136 | #ifdef CONFIG_GFAR_NAPI | 134 | #ifdef CONFIG_GFAR_NAPI |
| 137 | static int gfar_poll(struct napi_struct *napi, int budget); | 135 | static int gfar_poll(struct napi_struct *napi, int budget); |
| 138 | #endif | 136 | #endif |
| @@ -477,24 +475,30 @@ static int init_phy(struct net_device *dev) | |||
| 477 | return 0; | 475 | return 0; |
| 478 | } | 476 | } |
| 479 | 477 | ||
| 478 | /* | ||
| 479 | * Initialize TBI PHY interface for communicating with the | ||
| 480 | * SERDES lynx PHY on the chip. We communicate with this PHY | ||
| 481 | * through the MDIO bus on each controller, treating it as a | ||
| 482 | * "normal" PHY at the address found in the TBIPA register. We assume | ||
| 483 | * that the TBIPA register is valid. Either the MDIO bus code will set | ||
| 484 | * it to a value that doesn't conflict with other PHYs on the bus, or the | ||
| 485 | * value doesn't matter, as there are no other PHYs on the bus. | ||
| 486 | */ | ||
| 480 | static void gfar_configure_serdes(struct net_device *dev) | 487 | static void gfar_configure_serdes(struct net_device *dev) |
| 481 | { | 488 | { |
| 482 | struct gfar_private *priv = netdev_priv(dev); | 489 | struct gfar_private *priv = netdev_priv(dev); |
| 483 | struct gfar_mii __iomem *regs = | 490 | struct gfar_mii __iomem *regs = |
| 484 | (void __iomem *)&priv->regs->gfar_mii_regs; | 491 | (void __iomem *)&priv->regs->gfar_mii_regs; |
| 492 | int tbipa = gfar_read(&priv->regs->tbipa); | ||
| 485 | 493 | ||
| 486 | /* Initialise TBI i/f to communicate with serdes (lynx phy) */ | 494 | /* Single clk mode, mii mode off(for serdes communication) */ |
| 495 | gfar_local_mdio_write(regs, tbipa, MII_TBICON, TBICON_CLK_SELECT); | ||
| 487 | 496 | ||
| 488 | /* Single clk mode, mii mode off(for aerdes communication) */ | 497 | gfar_local_mdio_write(regs, tbipa, MII_ADVERTISE, |
| 489 | gfar_local_mdio_write(regs, TBIPA_VALUE, MII_TBICON, TBICON_CLK_SELECT); | ||
| 490 | |||
| 491 | /* Supported pause and full-duplex, no half-duplex */ | ||
| 492 | gfar_local_mdio_write(regs, TBIPA_VALUE, MII_ADVERTISE, | ||
| 493 | ADVERTISE_1000XFULL | ADVERTISE_1000XPAUSE | | 498 | ADVERTISE_1000XFULL | ADVERTISE_1000XPAUSE | |
| 494 | ADVERTISE_1000XPSE_ASYM); | 499 | ADVERTISE_1000XPSE_ASYM); |
| 495 | 500 | ||
| 496 | /* ANEG enable, restart ANEG, full duplex mode, speed[1] set */ | 501 | gfar_local_mdio_write(regs, tbipa, MII_BMCR, BMCR_ANENABLE | |
| 497 | gfar_local_mdio_write(regs, TBIPA_VALUE, MII_BMCR, BMCR_ANENABLE | | ||
| 498 | BMCR_ANRESTART | BMCR_FULLDPLX | BMCR_SPEED1000); | 502 | BMCR_ANRESTART | BMCR_FULLDPLX | BMCR_SPEED1000); |
| 499 | } | 503 | } |
| 500 | 504 | ||
| @@ -541,9 +545,6 @@ static void init_registers(struct net_device *dev) | |||
| 541 | 545 | ||
| 542 | /* Initialize the Minimum Frame Length Register */ | 546 | /* Initialize the Minimum Frame Length Register */ |
| 543 | gfar_write(&priv->regs->minflr, MINFLR_INIT_SETTINGS); | 547 | gfar_write(&priv->regs->minflr, MINFLR_INIT_SETTINGS); |
| 544 | |||
| 545 | /* Assign the TBI an address which won't conflict with the PHYs */ | ||
| 546 | gfar_write(&priv->regs->tbipa, TBIPA_VALUE); | ||
| 547 | } | 548 | } |
| 548 | 549 | ||
| 549 | 550 | ||
diff --git a/drivers/net/gianfar.h b/drivers/net/gianfar.h index 0d0883609469..fd487be3993e 100644 --- a/drivers/net/gianfar.h +++ b/drivers/net/gianfar.h | |||
| @@ -137,7 +137,6 @@ extern const char gfar_driver_version[]; | |||
| 137 | #define DEFAULT_RXCOUNT 0 | 137 | #define DEFAULT_RXCOUNT 0 |
| 138 | #endif /* CONFIG_GFAR_NAPI */ | 138 | #endif /* CONFIG_GFAR_NAPI */ |
| 139 | 139 | ||
| 140 | #define TBIPA_VALUE 0x1f | ||
| 141 | #define MIIMCFG_INIT_VALUE 0x00000007 | 140 | #define MIIMCFG_INIT_VALUE 0x00000007 |
| 142 | #define MIIMCFG_RESET 0x80000000 | 141 | #define MIIMCFG_RESET 0x80000000 |
| 143 | #define MIIMIND_BUSY 0x00000001 | 142 | #define MIIMIND_BUSY 0x00000001 |
diff --git a/drivers/net/gianfar_mii.c b/drivers/net/gianfar_mii.c index b8898927236a..ebcfb27a904e 100644 --- a/drivers/net/gianfar_mii.c +++ b/drivers/net/gianfar_mii.c | |||
| @@ -78,7 +78,6 @@ int gfar_local_mdio_write(struct gfar_mii __iomem *regs, int mii_id, | |||
| 78 | * same as system mdio bus, used for controlling the external PHYs, for eg. | 78 | * same as system mdio bus, used for controlling the external PHYs, for eg. |
| 79 | */ | 79 | */ |
| 80 | int gfar_local_mdio_read(struct gfar_mii __iomem *regs, int mii_id, int regnum) | 80 | int gfar_local_mdio_read(struct gfar_mii __iomem *regs, int mii_id, int regnum) |
| 81 | |||
| 82 | { | 81 | { |
| 83 | u16 value; | 82 | u16 value; |
| 84 | 83 | ||
| @@ -122,7 +121,7 @@ int gfar_mdio_read(struct mii_bus *bus, int mii_id, int regnum) | |||
| 122 | } | 121 | } |
| 123 | 122 | ||
| 124 | /* Reset the MIIM registers, and wait for the bus to free */ | 123 | /* Reset the MIIM registers, and wait for the bus to free */ |
| 125 | int gfar_mdio_reset(struct mii_bus *bus) | 124 | static int gfar_mdio_reset(struct mii_bus *bus) |
| 126 | { | 125 | { |
| 127 | struct gfar_mii __iomem *regs = (void __iomem *)bus->priv; | 126 | struct gfar_mii __iomem *regs = (void __iomem *)bus->priv; |
| 128 | unsigned int timeout = PHY_INIT_TIMEOUT; | 127 | unsigned int timeout = PHY_INIT_TIMEOUT; |
| @@ -152,14 +151,15 @@ int gfar_mdio_reset(struct mii_bus *bus) | |||
| 152 | } | 151 | } |
| 153 | 152 | ||
| 154 | 153 | ||
| 155 | int gfar_mdio_probe(struct device *dev) | 154 | static int gfar_mdio_probe(struct device *dev) |
| 156 | { | 155 | { |
| 157 | struct platform_device *pdev = to_platform_device(dev); | 156 | struct platform_device *pdev = to_platform_device(dev); |
| 158 | struct gianfar_mdio_data *pdata; | 157 | struct gianfar_mdio_data *pdata; |
| 159 | struct gfar_mii __iomem *regs; | 158 | struct gfar_mii __iomem *regs; |
| 159 | struct gfar __iomem *enet_regs; | ||
| 160 | struct mii_bus *new_bus; | 160 | struct mii_bus *new_bus; |
| 161 | struct resource *r; | 161 | struct resource *r; |
| 162 | int err = 0; | 162 | int i, err = 0; |
| 163 | 163 | ||
| 164 | if (NULL == dev) | 164 | if (NULL == dev) |
| 165 | return -EINVAL; | 165 | return -EINVAL; |
| @@ -199,6 +199,34 @@ int gfar_mdio_probe(struct device *dev) | |||
| 199 | new_bus->dev = dev; | 199 | new_bus->dev = dev; |
| 200 | dev_set_drvdata(dev, new_bus); | 200 | dev_set_drvdata(dev, new_bus); |
| 201 | 201 | ||
| 202 | /* | ||
| 203 | * This is mildly evil, but so is our hardware for doing this. | ||
| 204 | * Also, we have to cast back to struct gfar_mii because of | ||
| 205 | * definition weirdness done in gianfar.h. | ||
| 206 | */ | ||
| 207 | enet_regs = (struct gfar __iomem *) | ||
| 208 | ((char *)regs - offsetof(struct gfar, gfar_mii_regs)); | ||
| 209 | |||
| 210 | /* Scan the bus, looking for an empty spot for TBIPA */ | ||
| 211 | gfar_write(&enet_regs->tbipa, 0); | ||
| 212 | for (i = PHY_MAX_ADDR; i > 0; i--) { | ||
| 213 | u32 phy_id; | ||
| 214 | int r; | ||
| 215 | |||
| 216 | r = get_phy_id(new_bus, i, &phy_id); | ||
| 217 | if (r) | ||
| 218 | return r; | ||
| 219 | |||
| 220 | if (phy_id == 0xffffffff) | ||
| 221 | break; | ||
| 222 | } | ||
| 223 | |||
| 224 | /* The bus is full. We don't support using 31 PHYs, sorry */ | ||
| 225 | if (i == 0) | ||
| 226 | return -EBUSY; | ||
| 227 | |||
| 228 | gfar_write(&enet_regs->tbipa, i); | ||
| 229 | |||
| 202 | err = mdiobus_register(new_bus); | 230 | err = mdiobus_register(new_bus); |
| 203 | 231 | ||
| 204 | if (0 != err) { | 232 | if (0 != err) { |
| @@ -218,7 +246,7 @@ reg_map_fail: | |||
| 218 | } | 246 | } |
| 219 | 247 | ||
| 220 | 248 | ||
| 221 | int gfar_mdio_remove(struct device *dev) | 249 | static int gfar_mdio_remove(struct device *dev) |
| 222 | { | 250 | { |
| 223 | struct mii_bus *bus = dev_get_drvdata(dev); | 251 | struct mii_bus *bus = dev_get_drvdata(dev); |
| 224 | 252 | ||
diff --git a/drivers/net/gianfar_mii.h b/drivers/net/gianfar_mii.h index b373091c7031..2af28b16a0e2 100644 --- a/drivers/net/gianfar_mii.h +++ b/drivers/net/gianfar_mii.h | |||
| @@ -41,6 +41,9 @@ struct gfar_mii { | |||
| 41 | 41 | ||
| 42 | int gfar_mdio_read(struct mii_bus *bus, int mii_id, int regnum); | 42 | int gfar_mdio_read(struct mii_bus *bus, int mii_id, int regnum); |
| 43 | int gfar_mdio_write(struct mii_bus *bus, int mii_id, int regnum, u16 value); | 43 | int gfar_mdio_write(struct mii_bus *bus, int mii_id, int regnum, u16 value); |
| 44 | int gfar_local_mdio_write(struct gfar_mii __iomem *regs, int mii_id, | ||
| 45 | int regnum, u16 value); | ||
| 46 | int gfar_local_mdio_read(struct gfar_mii __iomem *regs, int mii_id, int regnum); | ||
| 44 | int __init gfar_mdio_init(void); | 47 | int __init gfar_mdio_init(void); |
| 45 | void gfar_mdio_exit(void); | 48 | void gfar_mdio_exit(void); |
| 46 | #endif /* GIANFAR_PHY_H */ | 49 | #endif /* GIANFAR_PHY_H */ |
diff --git a/drivers/net/hamradio/6pack.c b/drivers/net/hamradio/6pack.c index 1da55dd2a5a0..9d5721287d6f 100644 --- a/drivers/net/hamradio/6pack.c +++ b/drivers/net/hamradio/6pack.c | |||
| @@ -148,13 +148,13 @@ static void sp_xmit_on_air(unsigned long channel) | |||
| 148 | 148 | ||
| 149 | if (((sp->status1 & SIXP_DCD_MASK) == 0) && (random < sp->persistence)) { | 149 | if (((sp->status1 & SIXP_DCD_MASK) == 0) && (random < sp->persistence)) { |
| 150 | sp->led_state = 0x70; | 150 | sp->led_state = 0x70; |
| 151 | sp->tty->driver->write(sp->tty, &sp->led_state, 1); | 151 | sp->tty->ops->write(sp->tty, &sp->led_state, 1); |
| 152 | sp->tx_enable = 1; | 152 | sp->tx_enable = 1; |
| 153 | actual = sp->tty->driver->write(sp->tty, sp->xbuff, sp->status2); | 153 | actual = sp->tty->ops->write(sp->tty, sp->xbuff, sp->status2); |
| 154 | sp->xleft -= actual; | 154 | sp->xleft -= actual; |
| 155 | sp->xhead += actual; | 155 | sp->xhead += actual; |
| 156 | sp->led_state = 0x60; | 156 | sp->led_state = 0x60; |
| 157 | sp->tty->driver->write(sp->tty, &sp->led_state, 1); | 157 | sp->tty->ops->write(sp->tty, &sp->led_state, 1); |
| 158 | sp->status2 = 0; | 158 | sp->status2 = 0; |
| 159 | } else | 159 | } else |
| 160 | mod_timer(&sp->tx_t, jiffies + ((when + 1) * HZ) / 100); | 160 | mod_timer(&sp->tx_t, jiffies + ((when + 1) * HZ) / 100); |
| @@ -220,13 +220,13 @@ static void sp_encaps(struct sixpack *sp, unsigned char *icp, int len) | |||
| 220 | */ | 220 | */ |
| 221 | if (sp->duplex == 1) { | 221 | if (sp->duplex == 1) { |
| 222 | sp->led_state = 0x70; | 222 | sp->led_state = 0x70; |
| 223 | sp->tty->driver->write(sp->tty, &sp->led_state, 1); | 223 | sp->tty->ops->write(sp->tty, &sp->led_state, 1); |
| 224 | sp->tx_enable = 1; | 224 | sp->tx_enable = 1; |
| 225 | actual = sp->tty->driver->write(sp->tty, sp->xbuff, count); | 225 | actual = sp->tty->ops->write(sp->tty, sp->xbuff, count); |
| 226 | sp->xleft = count - actual; | 226 | sp->xleft = count - actual; |
| 227 | sp->xhead = sp->xbuff + actual; | 227 | sp->xhead = sp->xbuff + actual; |
| 228 | sp->led_state = 0x60; | 228 | sp->led_state = 0x60; |
| 229 | sp->tty->driver->write(sp->tty, &sp->led_state, 1); | 229 | sp->tty->ops->write(sp->tty, &sp->led_state, 1); |
| 230 | } else { | 230 | } else { |
| 231 | sp->xleft = count; | 231 | sp->xleft = count; |
| 232 | sp->xhead = sp->xbuff; | 232 | sp->xhead = sp->xbuff; |
| @@ -444,7 +444,7 @@ static void sixpack_write_wakeup(struct tty_struct *tty) | |||
| 444 | } | 444 | } |
| 445 | 445 | ||
| 446 | if (sp->tx_enable) { | 446 | if (sp->tx_enable) { |
| 447 | actual = tty->driver->write(tty, sp->xhead, sp->xleft); | 447 | actual = tty->ops->write(tty, sp->xhead, sp->xleft); |
| 448 | sp->xleft -= actual; | 448 | sp->xleft -= actual; |
| 449 | sp->xhead += actual; | 449 | sp->xhead += actual; |
| 450 | } | 450 | } |
| @@ -491,9 +491,7 @@ static void sixpack_receive_buf(struct tty_struct *tty, | |||
| 491 | sixpack_decode(sp, buf, count1); | 491 | sixpack_decode(sp, buf, count1); |
| 492 | 492 | ||
| 493 | sp_put(sp); | 493 | sp_put(sp); |
| 494 | if (test_and_clear_bit(TTY_THROTTLED, &tty->flags) | 494 | tty_unthrottle(tty); |
| 495 | && tty->driver->unthrottle) | ||
| 496 | tty->driver->unthrottle(tty); | ||
| 497 | } | 495 | } |
| 498 | 496 | ||
| 499 | /* | 497 | /* |
| @@ -554,8 +552,8 @@ static void resync_tnc(unsigned long channel) | |||
| 554 | /* resync the TNC */ | 552 | /* resync the TNC */ |
| 555 | 553 | ||
| 556 | sp->led_state = 0x60; | 554 | sp->led_state = 0x60; |
| 557 | sp->tty->driver->write(sp->tty, &sp->led_state, 1); | 555 | sp->tty->ops->write(sp->tty, &sp->led_state, 1); |
| 558 | sp->tty->driver->write(sp->tty, &resync_cmd, 1); | 556 | sp->tty->ops->write(sp->tty, &resync_cmd, 1); |
| 559 | 557 | ||
| 560 | 558 | ||
| 561 | /* Start resync timer again -- the TNC might be still absent */ | 559 | /* Start resync timer again -- the TNC might be still absent */ |
| @@ -573,7 +571,7 @@ static inline int tnc_init(struct sixpack *sp) | |||
| 573 | 571 | ||
| 574 | tnc_set_sync_state(sp, TNC_UNSYNC_STARTUP); | 572 | tnc_set_sync_state(sp, TNC_UNSYNC_STARTUP); |
| 575 | 573 | ||
| 576 | sp->tty->driver->write(sp->tty, &inbyte, 1); | 574 | sp->tty->ops->write(sp->tty, &inbyte, 1); |
| 577 | 575 | ||
| 578 | del_timer(&sp->resync_t); | 576 | del_timer(&sp->resync_t); |
| 579 | sp->resync_t.data = (unsigned long) sp; | 577 | sp->resync_t.data = (unsigned long) sp; |
| @@ -601,6 +599,8 @@ static int sixpack_open(struct tty_struct *tty) | |||
| 601 | 599 | ||
| 602 | if (!capable(CAP_NET_ADMIN)) | 600 | if (!capable(CAP_NET_ADMIN)) |
| 603 | return -EPERM; | 601 | return -EPERM; |
| 602 | if (tty->ops->write == NULL) | ||
| 603 | return -EOPNOTSUPP; | ||
| 604 | 604 | ||
| 605 | dev = alloc_netdev(sizeof(struct sixpack), "sp%d", sp_setup); | 605 | dev = alloc_netdev(sizeof(struct sixpack), "sp%d", sp_setup); |
| 606 | if (!dev) { | 606 | if (!dev) { |
| @@ -914,9 +914,9 @@ static void decode_prio_command(struct sixpack *sp, unsigned char cmd) | |||
| 914 | } else { /* output watchdog char if idle */ | 914 | } else { /* output watchdog char if idle */ |
| 915 | if ((sp->status2 != 0) && (sp->duplex == 1)) { | 915 | if ((sp->status2 != 0) && (sp->duplex == 1)) { |
| 916 | sp->led_state = 0x70; | 916 | sp->led_state = 0x70; |
| 917 | sp->tty->driver->write(sp->tty, &sp->led_state, 1); | 917 | sp->tty->ops->write(sp->tty, &sp->led_state, 1); |
| 918 | sp->tx_enable = 1; | 918 | sp->tx_enable = 1; |
| 919 | actual = sp->tty->driver->write(sp->tty, sp->xbuff, sp->status2); | 919 | actual = sp->tty->ops->write(sp->tty, sp->xbuff, sp->status2); |
| 920 | sp->xleft -= actual; | 920 | sp->xleft -= actual; |
| 921 | sp->xhead += actual; | 921 | sp->xhead += actual; |
| 922 | sp->led_state = 0x60; | 922 | sp->led_state = 0x60; |
| @@ -926,7 +926,7 @@ static void decode_prio_command(struct sixpack *sp, unsigned char cmd) | |||
| 926 | } | 926 | } |
| 927 | 927 | ||
| 928 | /* needed to trigger the TNC watchdog */ | 928 | /* needed to trigger the TNC watchdog */ |
| 929 | sp->tty->driver->write(sp->tty, &sp->led_state, 1); | 929 | sp->tty->ops->write(sp->tty, &sp->led_state, 1); |
| 930 | 930 | ||
| 931 | /* if the state byte has been received, the TNC is present, | 931 | /* if the state byte has been received, the TNC is present, |
| 932 | so the resync timer can be reset. */ | 932 | so the resync timer can be reset. */ |
| @@ -956,12 +956,12 @@ static void decode_std_command(struct sixpack *sp, unsigned char cmd) | |||
| 956 | if ((sp->status & SIXP_RX_DCD_MASK) == | 956 | if ((sp->status & SIXP_RX_DCD_MASK) == |
| 957 | SIXP_RX_DCD_MASK) { | 957 | SIXP_RX_DCD_MASK) { |
| 958 | sp->led_state = 0x68; | 958 | sp->led_state = 0x68; |
| 959 | sp->tty->driver->write(sp->tty, &sp->led_state, 1); | 959 | sp->tty->ops->write(sp->tty, &sp->led_state, 1); |
| 960 | } | 960 | } |
| 961 | } else { | 961 | } else { |
| 962 | sp->led_state = 0x60; | 962 | sp->led_state = 0x60; |
| 963 | /* fill trailing bytes with zeroes */ | 963 | /* fill trailing bytes with zeroes */ |
| 964 | sp->tty->driver->write(sp->tty, &sp->led_state, 1); | 964 | sp->tty->ops->write(sp->tty, &sp->led_state, 1); |
| 965 | rest = sp->rx_count; | 965 | rest = sp->rx_count; |
| 966 | if (rest != 0) | 966 | if (rest != 0) |
| 967 | for (i = rest; i <= 3; i++) | 967 | for (i = rest; i <= 3; i++) |
diff --git a/drivers/net/hamradio/mkiss.c b/drivers/net/hamradio/mkiss.c index 30c9b3b0d131..65166035aca0 100644 --- a/drivers/net/hamradio/mkiss.c +++ b/drivers/net/hamradio/mkiss.c | |||
| @@ -516,7 +516,7 @@ static void ax_encaps(struct net_device *dev, unsigned char *icp, int len) | |||
| 516 | spin_unlock_bh(&ax->buflock); | 516 | spin_unlock_bh(&ax->buflock); |
| 517 | 517 | ||
| 518 | set_bit(TTY_DO_WRITE_WAKEUP, &ax->tty->flags); | 518 | set_bit(TTY_DO_WRITE_WAKEUP, &ax->tty->flags); |
| 519 | actual = ax->tty->driver->write(ax->tty, ax->xbuff, count); | 519 | actual = ax->tty->ops->write(ax->tty, ax->xbuff, count); |
| 520 | ax->stats.tx_packets++; | 520 | ax->stats.tx_packets++; |
| 521 | ax->stats.tx_bytes += actual; | 521 | ax->stats.tx_bytes += actual; |
| 522 | 522 | ||
| @@ -546,7 +546,7 @@ static int ax_xmit(struct sk_buff *skb, struct net_device *dev) | |||
| 546 | } | 546 | } |
| 547 | 547 | ||
| 548 | printk(KERN_ERR "mkiss: %s: transmit timed out, %s?\n", dev->name, | 548 | printk(KERN_ERR "mkiss: %s: transmit timed out, %s?\n", dev->name, |
| 549 | (ax->tty->driver->chars_in_buffer(ax->tty) || ax->xleft) ? | 549 | (ax->tty->ops->chars_in_buffer(ax->tty) || ax->xleft) ? |
| 550 | "bad line quality" : "driver error"); | 550 | "bad line quality" : "driver error"); |
| 551 | 551 | ||
| 552 | ax->xleft = 0; | 552 | ax->xleft = 0; |
| @@ -736,6 +736,8 @@ static int mkiss_open(struct tty_struct *tty) | |||
| 736 | 736 | ||
| 737 | if (!capable(CAP_NET_ADMIN)) | 737 | if (!capable(CAP_NET_ADMIN)) |
| 738 | return -EPERM; | 738 | return -EPERM; |
| 739 | if (tty->ops->write == NULL) | ||
| 740 | return -EOPNOTSUPP; | ||
| 739 | 741 | ||
| 740 | dev = alloc_netdev(sizeof(struct mkiss), "ax%d", ax_setup); | 742 | dev = alloc_netdev(sizeof(struct mkiss), "ax%d", ax_setup); |
| 741 | if (!dev) { | 743 | if (!dev) { |
| @@ -754,8 +756,7 @@ static int mkiss_open(struct tty_struct *tty) | |||
| 754 | tty->disc_data = ax; | 756 | tty->disc_data = ax; |
| 755 | tty->receive_room = 65535; | 757 | tty->receive_room = 65535; |
| 756 | 758 | ||
| 757 | if (tty->driver->flush_buffer) | 759 | tty_driver_flush_buffer(tty); |
| 758 | tty->driver->flush_buffer(tty); | ||
| 759 | 760 | ||
| 760 | /* Restore default settings */ | 761 | /* Restore default settings */ |
| 761 | dev->type = ARPHRD_AX25; | 762 | dev->type = ARPHRD_AX25; |
| @@ -935,9 +936,7 @@ static void mkiss_receive_buf(struct tty_struct *tty, const unsigned char *cp, | |||
| 935 | } | 936 | } |
| 936 | 937 | ||
| 937 | mkiss_put(ax); | 938 | mkiss_put(ax); |
| 938 | if (test_and_clear_bit(TTY_THROTTLED, &tty->flags) | 939 | tty_unthrottle(tty); |
| 939 | && tty->driver->unthrottle) | ||
| 940 | tty->driver->unthrottle(tty); | ||
| 941 | } | 940 | } |
| 942 | 941 | ||
| 943 | /* | 942 | /* |
| @@ -962,7 +961,7 @@ static void mkiss_write_wakeup(struct tty_struct *tty) | |||
| 962 | goto out; | 961 | goto out; |
| 963 | } | 962 | } |
| 964 | 963 | ||
| 965 | actual = tty->driver->write(tty, ax->xhead, ax->xleft); | 964 | actual = tty->ops->write(tty, ax->xhead, ax->xleft); |
| 966 | ax->xleft -= actual; | 965 | ax->xleft -= actual; |
| 967 | ax->xhead += actual; | 966 | ax->xhead += actual; |
| 968 | 967 | ||
diff --git a/drivers/net/irda/irtty-sir.c b/drivers/net/irda/irtty-sir.c index fc753d7f674e..e6f40b7f9041 100644 --- a/drivers/net/irda/irtty-sir.c +++ b/drivers/net/irda/irtty-sir.c | |||
| @@ -64,7 +64,7 @@ static int irtty_chars_in_buffer(struct sir_dev *dev) | |||
| 64 | IRDA_ASSERT(priv != NULL, return -1;); | 64 | IRDA_ASSERT(priv != NULL, return -1;); |
| 65 | IRDA_ASSERT(priv->magic == IRTTY_MAGIC, return -1;); | 65 | IRDA_ASSERT(priv->magic == IRTTY_MAGIC, return -1;); |
| 66 | 66 | ||
| 67 | return priv->tty->driver->chars_in_buffer(priv->tty); | 67 | return tty_chars_in_buffer(priv->tty); |
| 68 | } | 68 | } |
| 69 | 69 | ||
| 70 | /* Wait (sleep) until underlaying hardware finished transmission | 70 | /* Wait (sleep) until underlaying hardware finished transmission |
| @@ -93,10 +93,8 @@ static void irtty_wait_until_sent(struct sir_dev *dev) | |||
| 93 | IRDA_ASSERT(priv->magic == IRTTY_MAGIC, return;); | 93 | IRDA_ASSERT(priv->magic == IRTTY_MAGIC, return;); |
| 94 | 94 | ||
| 95 | tty = priv->tty; | 95 | tty = priv->tty; |
| 96 | if (tty->driver->wait_until_sent) { | 96 | if (tty->ops->wait_until_sent) { |
| 97 | lock_kernel(); | 97 | tty->ops->wait_until_sent(tty, msecs_to_jiffies(100)); |
| 98 | tty->driver->wait_until_sent(tty, msecs_to_jiffies(100)); | ||
| 99 | unlock_kernel(); | ||
| 100 | } | 98 | } |
| 101 | else { | 99 | else { |
| 102 | msleep(USBSERIAL_TX_DONE_DELAY); | 100 | msleep(USBSERIAL_TX_DONE_DELAY); |
| @@ -125,48 +123,14 @@ static int irtty_change_speed(struct sir_dev *dev, unsigned speed) | |||
| 125 | 123 | ||
| 126 | tty = priv->tty; | 124 | tty = priv->tty; |
| 127 | 125 | ||
| 128 | lock_kernel(); | 126 | mutex_lock(&tty->termios_mutex); |
| 129 | old_termios = *(tty->termios); | 127 | old_termios = *(tty->termios); |
| 130 | cflag = tty->termios->c_cflag; | 128 | cflag = tty->termios->c_cflag; |
| 131 | 129 | tty_encode_baud_rate(tty, speed, speed); | |
| 132 | cflag &= ~CBAUD; | 130 | if (tty->ops->set_termios) |
| 133 | 131 | tty->ops->set_termios(tty, &old_termios); | |
| 134 | IRDA_DEBUG(2, "%s(), Setting speed to %d\n", __FUNCTION__, speed); | ||
| 135 | |||
| 136 | switch (speed) { | ||
| 137 | case 1200: | ||
| 138 | cflag |= B1200; | ||
| 139 | break; | ||
| 140 | case 2400: | ||
| 141 | cflag |= B2400; | ||
| 142 | break; | ||
| 143 | case 4800: | ||
| 144 | cflag |= B4800; | ||
| 145 | break; | ||
| 146 | case 19200: | ||
| 147 | cflag |= B19200; | ||
| 148 | break; | ||
| 149 | case 38400: | ||
| 150 | cflag |= B38400; | ||
| 151 | break; | ||
| 152 | case 57600: | ||
| 153 | cflag |= B57600; | ||
| 154 | break; | ||
| 155 | case 115200: | ||
| 156 | cflag |= B115200; | ||
| 157 | break; | ||
| 158 | case 9600: | ||
| 159 | default: | ||
| 160 | cflag |= B9600; | ||
| 161 | break; | ||
| 162 | } | ||
| 163 | |||
| 164 | tty->termios->c_cflag = cflag; | ||
| 165 | if (tty->driver->set_termios) | ||
| 166 | tty->driver->set_termios(tty, &old_termios); | ||
| 167 | unlock_kernel(); | ||
| 168 | |||
| 169 | priv->io.speed = speed; | 132 | priv->io.speed = speed; |
| 133 | mutex_unlock(&tty->termios_mutex); | ||
| 170 | 134 | ||
| 171 | return 0; | 135 | return 0; |
| 172 | } | 136 | } |
| @@ -202,8 +166,8 @@ static int irtty_set_dtr_rts(struct sir_dev *dev, int dtr, int rts) | |||
| 202 | * This function is not yet defined for all tty driver, so | 166 | * This function is not yet defined for all tty driver, so |
| 203 | * let's be careful... Jean II | 167 | * let's be careful... Jean II |
| 204 | */ | 168 | */ |
| 205 | IRDA_ASSERT(priv->tty->driver->tiocmset != NULL, return -1;); | 169 | IRDA_ASSERT(priv->tty->ops->tiocmset != NULL, return -1;); |
| 206 | priv->tty->driver->tiocmset(priv->tty, NULL, set, clear); | 170 | priv->tty->ops->tiocmset(priv->tty, NULL, set, clear); |
| 207 | 171 | ||
| 208 | return 0; | 172 | return 0; |
| 209 | } | 173 | } |
| @@ -225,17 +189,13 @@ static int irtty_do_write(struct sir_dev *dev, const unsigned char *ptr, size_t | |||
| 225 | IRDA_ASSERT(priv->magic == IRTTY_MAGIC, return -1;); | 189 | IRDA_ASSERT(priv->magic == IRTTY_MAGIC, return -1;); |
| 226 | 190 | ||
| 227 | tty = priv->tty; | 191 | tty = priv->tty; |
| 228 | if (!tty->driver->write) | 192 | if (!tty->ops->write) |
| 229 | return 0; | 193 | return 0; |
| 230 | tty->flags |= (1 << TTY_DO_WRITE_WAKEUP); | 194 | tty->flags |= (1 << TTY_DO_WRITE_WAKEUP); |
| 231 | if (tty->driver->write_room) { | 195 | writelen = tty_write_room(tty); |
| 232 | writelen = tty->driver->write_room(tty); | 196 | if (writelen > len) |
| 233 | if (writelen > len) | ||
| 234 | writelen = len; | ||
| 235 | } | ||
| 236 | else | ||
| 237 | writelen = len; | 197 | writelen = len; |
| 238 | return tty->driver->write(tty, ptr, writelen); | 198 | return tty->ops->write(tty, ptr, writelen); |
| 239 | } | 199 | } |
| 240 | 200 | ||
| 241 | /* ------------------------------------------------------- */ | 201 | /* ------------------------------------------------------- */ |
| @@ -321,7 +281,7 @@ static inline void irtty_stop_receiver(struct tty_struct *tty, int stop) | |||
| 321 | struct ktermios old_termios; | 281 | struct ktermios old_termios; |
| 322 | int cflag; | 282 | int cflag; |
| 323 | 283 | ||
| 324 | lock_kernel(); | 284 | mutex_lock(&tty->termios_mutex); |
| 325 | old_termios = *(tty->termios); | 285 | old_termios = *(tty->termios); |
| 326 | cflag = tty->termios->c_cflag; | 286 | cflag = tty->termios->c_cflag; |
| 327 | 287 | ||
| @@ -331,9 +291,9 @@ static inline void irtty_stop_receiver(struct tty_struct *tty, int stop) | |||
| 331 | cflag |= CREAD; | 291 | cflag |= CREAD; |
| 332 | 292 | ||
| 333 | tty->termios->c_cflag = cflag; | 293 | tty->termios->c_cflag = cflag; |
| 334 | if (tty->driver->set_termios) | 294 | if (tty->ops->set_termios) |
| 335 | tty->driver->set_termios(tty, &old_termios); | 295 | tty->ops->set_termios(tty, &old_termios); |
| 336 | unlock_kernel(); | 296 | mutex_unlock(&tty->termios_mutex); |
| 337 | } | 297 | } |
| 338 | 298 | ||
| 339 | /*****************************************************************/ | 299 | /*****************************************************************/ |
| @@ -359,8 +319,8 @@ static int irtty_start_dev(struct sir_dev *dev) | |||
| 359 | 319 | ||
| 360 | tty = priv->tty; | 320 | tty = priv->tty; |
| 361 | 321 | ||
| 362 | if (tty->driver->start) | 322 | if (tty->ops->start) |
| 363 | tty->driver->start(tty); | 323 | tty->ops->start(tty); |
| 364 | /* Make sure we can receive more data */ | 324 | /* Make sure we can receive more data */ |
| 365 | irtty_stop_receiver(tty, FALSE); | 325 | irtty_stop_receiver(tty, FALSE); |
| 366 | 326 | ||
| @@ -388,8 +348,8 @@ static int irtty_stop_dev(struct sir_dev *dev) | |||
| 388 | 348 | ||
| 389 | /* Make sure we don't receive more data */ | 349 | /* Make sure we don't receive more data */ |
| 390 | irtty_stop_receiver(tty, TRUE); | 350 | irtty_stop_receiver(tty, TRUE); |
| 391 | if (tty->driver->stop) | 351 | if (tty->ops->stop) |
| 392 | tty->driver->stop(tty); | 352 | tty->ops->stop(tty); |
| 393 | 353 | ||
| 394 | mutex_unlock(&irtty_mutex); | 354 | mutex_unlock(&irtty_mutex); |
| 395 | 355 | ||
| @@ -483,11 +443,10 @@ static int irtty_open(struct tty_struct *tty) | |||
| 483 | 443 | ||
| 484 | /* stop the underlying driver */ | 444 | /* stop the underlying driver */ |
| 485 | irtty_stop_receiver(tty, TRUE); | 445 | irtty_stop_receiver(tty, TRUE); |
| 486 | if (tty->driver->stop) | 446 | if (tty->ops->stop) |
| 487 | tty->driver->stop(tty); | 447 | tty->ops->stop(tty); |
| 488 | 448 | ||
| 489 | if (tty->driver->flush_buffer) | 449 | tty_driver_flush_buffer(tty); |
| 490 | tty->driver->flush_buffer(tty); | ||
| 491 | 450 | ||
| 492 | /* apply mtt override */ | 451 | /* apply mtt override */ |
| 493 | sir_tty_drv.qos_mtt_bits = qos_mtt_bits; | 452 | sir_tty_drv.qos_mtt_bits = qos_mtt_bits; |
| @@ -564,8 +523,8 @@ static void irtty_close(struct tty_struct *tty) | |||
| 564 | /* Stop tty */ | 523 | /* Stop tty */ |
| 565 | irtty_stop_receiver(tty, TRUE); | 524 | irtty_stop_receiver(tty, TRUE); |
| 566 | tty->flags &= ~(1 << TTY_DO_WRITE_WAKEUP); | 525 | tty->flags &= ~(1 << TTY_DO_WRITE_WAKEUP); |
| 567 | if (tty->driver->stop) | 526 | if (tty->ops->stop) |
| 568 | tty->driver->stop(tty); | 527 | tty->ops->stop(tty); |
| 569 | 528 | ||
| 570 | kfree(priv); | 529 | kfree(priv); |
| 571 | 530 | ||
diff --git a/drivers/net/phy/Kconfig b/drivers/net/phy/Kconfig index 3ac8529bb92c..6bf9e76b0a00 100644 --- a/drivers/net/phy/Kconfig +++ b/drivers/net/phy/Kconfig | |||
| @@ -48,7 +48,7 @@ config VITESSE_PHY | |||
| 48 | config SMSC_PHY | 48 | config SMSC_PHY |
| 49 | tristate "Drivers for SMSC PHYs" | 49 | tristate "Drivers for SMSC PHYs" |
| 50 | ---help--- | 50 | ---help--- |
| 51 | Currently supports the LAN83C185 PHY | 51 | Currently supports the LAN83C185, LAN8187 and LAN8700 PHYs |
| 52 | 52 | ||
| 53 | config BROADCOM_PHY | 53 | config BROADCOM_PHY |
| 54 | tristate "Drivers for Broadcom PHYs" | 54 | tristate "Drivers for Broadcom PHYs" |
diff --git a/drivers/net/phy/phy_device.c b/drivers/net/phy/phy_device.c index ddf8d51832a6..ac3c01d28fdf 100644 --- a/drivers/net/phy/phy_device.c +++ b/drivers/net/phy/phy_device.c | |||
| @@ -256,7 +256,7 @@ void phy_prepare_link(struct phy_device *phydev, | |||
| 256 | /** | 256 | /** |
| 257 | * phy_connect - connect an ethernet device to a PHY device | 257 | * phy_connect - connect an ethernet device to a PHY device |
| 258 | * @dev: the network device to connect | 258 | * @dev: the network device to connect |
| 259 | * @phy_id: the PHY device to connect | 259 | * @bus_id: the id string of the PHY device to connect |
| 260 | * @handler: callback function for state change notifications | 260 | * @handler: callback function for state change notifications |
| 261 | * @flags: PHY device's dev_flags | 261 | * @flags: PHY device's dev_flags |
| 262 | * @interface: PHY device's interface | 262 | * @interface: PHY device's interface |
diff --git a/drivers/net/phy/smsc.c b/drivers/net/phy/smsc.c index b1d8ed40ad98..73baa7a3bb0e 100644 --- a/drivers/net/phy/smsc.c +++ b/drivers/net/phy/smsc.c | |||
| @@ -12,6 +12,8 @@ | |||
| 12 | * Free Software Foundation; either version 2 of the License, or (at your | 12 | * Free Software Foundation; either version 2 of the License, or (at your |
| 13 | * option) any later version. | 13 | * option) any later version. |
| 14 | * | 14 | * |
| 15 | * Support added for SMSC LAN8187 and LAN8700 by steve.glendinning@smsc.com | ||
| 16 | * | ||
| 15 | */ | 17 | */ |
| 16 | 18 | ||
| 17 | #include <linux/kernel.h> | 19 | #include <linux/kernel.h> |
| @@ -38,7 +40,7 @@ | |||
| 38 | (MII_LAN83C185_ISF_INT6 | MII_LAN83C185_ISF_INT4) | 40 | (MII_LAN83C185_ISF_INT6 | MII_LAN83C185_ISF_INT4) |
| 39 | 41 | ||
| 40 | 42 | ||
| 41 | static int lan83c185_config_intr(struct phy_device *phydev) | 43 | static int smsc_phy_config_intr(struct phy_device *phydev) |
| 42 | { | 44 | { |
| 43 | int rc = phy_write (phydev, MII_LAN83C185_IM, | 45 | int rc = phy_write (phydev, MII_LAN83C185_IM, |
| 44 | ((PHY_INTERRUPT_ENABLED == phydev->interrupts) | 46 | ((PHY_INTERRUPT_ENABLED == phydev->interrupts) |
| @@ -48,16 +50,16 @@ static int lan83c185_config_intr(struct phy_device *phydev) | |||
| 48 | return rc < 0 ? rc : 0; | 50 | return rc < 0 ? rc : 0; |
| 49 | } | 51 | } |
| 50 | 52 | ||
| 51 | static int lan83c185_ack_interrupt(struct phy_device *phydev) | 53 | static int smsc_phy_ack_interrupt(struct phy_device *phydev) |
| 52 | { | 54 | { |
| 53 | int rc = phy_read (phydev, MII_LAN83C185_ISF); | 55 | int rc = phy_read (phydev, MII_LAN83C185_ISF); |
| 54 | 56 | ||
| 55 | return rc < 0 ? rc : 0; | 57 | return rc < 0 ? rc : 0; |
| 56 | } | 58 | } |
| 57 | 59 | ||
| 58 | static int lan83c185_config_init(struct phy_device *phydev) | 60 | static int smsc_phy_config_init(struct phy_device *phydev) |
| 59 | { | 61 | { |
| 60 | return lan83c185_ack_interrupt (phydev); | 62 | return smsc_phy_ack_interrupt (phydev); |
| 61 | } | 63 | } |
| 62 | 64 | ||
| 63 | 65 | ||
| @@ -73,22 +75,87 @@ static struct phy_driver lan83c185_driver = { | |||
| 73 | /* basic functions */ | 75 | /* basic functions */ |
| 74 | .config_aneg = genphy_config_aneg, | 76 | .config_aneg = genphy_config_aneg, |
| 75 | .read_status = genphy_read_status, | 77 | .read_status = genphy_read_status, |
| 76 | .config_init = lan83c185_config_init, | 78 | .config_init = smsc_phy_config_init, |
| 77 | 79 | ||
| 78 | /* IRQ related */ | 80 | /* IRQ related */ |
| 79 | .ack_interrupt = lan83c185_ack_interrupt, | 81 | .ack_interrupt = smsc_phy_ack_interrupt, |
| 80 | .config_intr = lan83c185_config_intr, | 82 | .config_intr = smsc_phy_config_intr, |
| 83 | |||
| 84 | .driver = { .owner = THIS_MODULE, } | ||
| 85 | }; | ||
| 86 | |||
| 87 | static struct phy_driver lan8187_driver = { | ||
| 88 | .phy_id = 0x0007c0b0, /* OUI=0x00800f, Model#=0x0b */ | ||
| 89 | .phy_id_mask = 0xfffffff0, | ||
| 90 | .name = "SMSC LAN8187", | ||
| 91 | |||
| 92 | .features = (PHY_BASIC_FEATURES | SUPPORTED_Pause | ||
| 93 | | SUPPORTED_Asym_Pause), | ||
| 94 | .flags = PHY_HAS_INTERRUPT | PHY_HAS_MAGICANEG, | ||
| 95 | |||
| 96 | /* basic functions */ | ||
| 97 | .config_aneg = genphy_config_aneg, | ||
| 98 | .read_status = genphy_read_status, | ||
| 99 | .config_init = smsc_phy_config_init, | ||
| 100 | |||
| 101 | /* IRQ related */ | ||
| 102 | .ack_interrupt = smsc_phy_ack_interrupt, | ||
| 103 | .config_intr = smsc_phy_config_intr, | ||
| 104 | |||
| 105 | .driver = { .owner = THIS_MODULE, } | ||
| 106 | }; | ||
| 107 | |||
| 108 | static struct phy_driver lan8700_driver = { | ||
| 109 | .phy_id = 0x0007c0c0, /* OUI=0x00800f, Model#=0x0c */ | ||
| 110 | .phy_id_mask = 0xfffffff0, | ||
| 111 | .name = "SMSC LAN8700", | ||
| 112 | |||
| 113 | .features = (PHY_BASIC_FEATURES | SUPPORTED_Pause | ||
| 114 | | SUPPORTED_Asym_Pause), | ||
| 115 | .flags = PHY_HAS_INTERRUPT | PHY_HAS_MAGICANEG, | ||
| 116 | |||
| 117 | /* basic functions */ | ||
| 118 | .config_aneg = genphy_config_aneg, | ||
| 119 | .read_status = genphy_read_status, | ||
| 120 | .config_init = smsc_phy_config_init, | ||
| 121 | |||
| 122 | /* IRQ related */ | ||
| 123 | .ack_interrupt = smsc_phy_ack_interrupt, | ||
| 124 | .config_intr = smsc_phy_config_intr, | ||
| 81 | 125 | ||
| 82 | .driver = { .owner = THIS_MODULE, } | 126 | .driver = { .owner = THIS_MODULE, } |
| 83 | }; | 127 | }; |
| 84 | 128 | ||
| 85 | static int __init smsc_init(void) | 129 | static int __init smsc_init(void) |
| 86 | { | 130 | { |
| 87 | return phy_driver_register (&lan83c185_driver); | 131 | int ret; |
| 132 | |||
| 133 | ret = phy_driver_register (&lan83c185_driver); | ||
| 134 | if (ret) | ||
| 135 | goto err1; | ||
| 136 | |||
| 137 | ret = phy_driver_register (&lan8187_driver); | ||
| 138 | if (ret) | ||
| 139 | goto err2; | ||
| 140 | |||
| 141 | ret = phy_driver_register (&lan8700_driver); | ||
| 142 | if (ret) | ||
| 143 | goto err3; | ||
| 144 | |||
| 145 | return 0; | ||
| 146 | |||
| 147 | err3: | ||
| 148 | phy_driver_unregister (&lan8187_driver); | ||
| 149 | err2: | ||
| 150 | phy_driver_unregister (&lan83c185_driver); | ||
| 151 | err1: | ||
| 152 | return ret; | ||
| 88 | } | 153 | } |
| 89 | 154 | ||
| 90 | static void __exit smsc_exit(void) | 155 | static void __exit smsc_exit(void) |
| 91 | { | 156 | { |
| 157 | phy_driver_unregister (&lan8700_driver); | ||
| 158 | phy_driver_unregister (&lan8187_driver); | ||
| 92 | phy_driver_unregister (&lan83c185_driver); | 159 | phy_driver_unregister (&lan83c185_driver); |
| 93 | } | 160 | } |
| 94 | 161 | ||
diff --git a/drivers/net/ppp_async.c b/drivers/net/ppp_async.c index f023d5b67e6e..f1a52def1241 100644 --- a/drivers/net/ppp_async.c +++ b/drivers/net/ppp_async.c | |||
| @@ -158,6 +158,9 @@ ppp_asynctty_open(struct tty_struct *tty) | |||
| 158 | struct asyncppp *ap; | 158 | struct asyncppp *ap; |
| 159 | int err; | 159 | int err; |
| 160 | 160 | ||
| 161 | if (tty->ops->write == NULL) | ||
| 162 | return -EOPNOTSUPP; | ||
| 163 | |||
| 161 | err = -ENOMEM; | 164 | err = -ENOMEM; |
| 162 | ap = kzalloc(sizeof(*ap), GFP_KERNEL); | 165 | ap = kzalloc(sizeof(*ap), GFP_KERNEL); |
| 163 | if (!ap) | 166 | if (!ap) |
| @@ -358,9 +361,7 @@ ppp_asynctty_receive(struct tty_struct *tty, const unsigned char *buf, | |||
| 358 | if (!skb_queue_empty(&ap->rqueue)) | 361 | if (!skb_queue_empty(&ap->rqueue)) |
| 359 | tasklet_schedule(&ap->tsk); | 362 | tasklet_schedule(&ap->tsk); |
| 360 | ap_put(ap); | 363 | ap_put(ap); |
| 361 | if (test_and_clear_bit(TTY_THROTTLED, &tty->flags) | 364 | tty_unthrottle(tty); |
| 362 | && tty->driver->unthrottle) | ||
| 363 | tty->driver->unthrottle(tty); | ||
| 364 | } | 365 | } |
| 365 | 366 | ||
| 366 | static void | 367 | static void |
| @@ -676,7 +677,7 @@ ppp_async_push(struct asyncppp *ap) | |||
| 676 | if (!tty_stuffed && ap->optr < ap->olim) { | 677 | if (!tty_stuffed && ap->optr < ap->olim) { |
| 677 | avail = ap->olim - ap->optr; | 678 | avail = ap->olim - ap->optr; |
| 678 | set_bit(TTY_DO_WRITE_WAKEUP, &tty->flags); | 679 | set_bit(TTY_DO_WRITE_WAKEUP, &tty->flags); |
| 679 | sent = tty->driver->write(tty, ap->optr, avail); | 680 | sent = tty->ops->write(tty, ap->optr, avail); |
| 680 | if (sent < 0) | 681 | if (sent < 0) |
| 681 | goto flush; /* error, e.g. loss of CD */ | 682 | goto flush; /* error, e.g. loss of CD */ |
| 682 | ap->optr += sent; | 683 | ap->optr += sent; |
diff --git a/drivers/net/ppp_synctty.c b/drivers/net/ppp_synctty.c index 0d80fa546719..b8f0369a71e7 100644 --- a/drivers/net/ppp_synctty.c +++ b/drivers/net/ppp_synctty.c | |||
| @@ -207,6 +207,9 @@ ppp_sync_open(struct tty_struct *tty) | |||
| 207 | struct syncppp *ap; | 207 | struct syncppp *ap; |
| 208 | int err; | 208 | int err; |
| 209 | 209 | ||
| 210 | if (tty->ops->write == NULL) | ||
| 211 | return -EOPNOTSUPP; | ||
| 212 | |||
| 210 | ap = kzalloc(sizeof(*ap), GFP_KERNEL); | 213 | ap = kzalloc(sizeof(*ap), GFP_KERNEL); |
| 211 | err = -ENOMEM; | 214 | err = -ENOMEM; |
| 212 | if (!ap) | 215 | if (!ap) |
| @@ -398,9 +401,7 @@ ppp_sync_receive(struct tty_struct *tty, const unsigned char *buf, | |||
| 398 | if (!skb_queue_empty(&ap->rqueue)) | 401 | if (!skb_queue_empty(&ap->rqueue)) |
| 399 | tasklet_schedule(&ap->tsk); | 402 | tasklet_schedule(&ap->tsk); |
| 400 | sp_put(ap); | 403 | sp_put(ap); |
| 401 | if (test_and_clear_bit(TTY_THROTTLED, &tty->flags) | 404 | tty_unthrottle(tty); |
| 402 | && tty->driver->unthrottle) | ||
| 403 | tty->driver->unthrottle(tty); | ||
| 404 | } | 405 | } |
| 405 | 406 | ||
| 406 | static void | 407 | static void |
| @@ -653,7 +654,7 @@ ppp_sync_push(struct syncppp *ap) | |||
| 653 | tty_stuffed = 0; | 654 | tty_stuffed = 0; |
| 654 | if (!tty_stuffed && ap->tpkt) { | 655 | if (!tty_stuffed && ap->tpkt) { |
| 655 | set_bit(TTY_DO_WRITE_WAKEUP, &tty->flags); | 656 | set_bit(TTY_DO_WRITE_WAKEUP, &tty->flags); |
| 656 | sent = tty->driver->write(tty, ap->tpkt->data, ap->tpkt->len); | 657 | sent = tty->ops->write(tty, ap->tpkt->data, ap->tpkt->len); |
| 657 | if (sent < 0) | 658 | if (sent < 0) |
| 658 | goto flush; /* error, e.g. loss of CD */ | 659 | goto flush; /* error, e.g. loss of CD */ |
| 659 | if (sent < ap->tpkt->len) { | 660 | if (sent < ap->tpkt->len) { |
diff --git a/drivers/net/r8169.c b/drivers/net/r8169.c index 3acfeeabdee1..657242504621 100644 --- a/drivers/net/r8169.c +++ b/drivers/net/r8169.c | |||
| @@ -1617,6 +1617,7 @@ rtl8169_init_one(struct pci_dev *pdev, const struct pci_device_id *ent) | |||
| 1617 | SET_NETDEV_DEV(dev, &pdev->dev); | 1617 | SET_NETDEV_DEV(dev, &pdev->dev); |
| 1618 | tp = netdev_priv(dev); | 1618 | tp = netdev_priv(dev); |
| 1619 | tp->dev = dev; | 1619 | tp->dev = dev; |
| 1620 | tp->pci_dev = pdev; | ||
| 1620 | tp->msg_enable = netif_msg_init(debug.msg_enable, R8169_MSG_DEFAULT); | 1621 | tp->msg_enable = netif_msg_init(debug.msg_enable, R8169_MSG_DEFAULT); |
| 1621 | 1622 | ||
| 1622 | /* enable device (incl. PCI PM wakeup and hotplug setup) */ | 1623 | /* enable device (incl. PCI PM wakeup and hotplug setup) */ |
| @@ -1705,18 +1706,18 @@ rtl8169_init_one(struct pci_dev *pdev, const struct pci_device_id *ent) | |||
| 1705 | 1706 | ||
| 1706 | rtl8169_print_mac_version(tp); | 1707 | rtl8169_print_mac_version(tp); |
| 1707 | 1708 | ||
| 1708 | for (i = ARRAY_SIZE(rtl_chip_info) - 1; i >= 0; i--) { | 1709 | for (i = 0; i < ARRAY_SIZE(rtl_chip_info); i++) { |
| 1709 | if (tp->mac_version == rtl_chip_info[i].mac_version) | 1710 | if (tp->mac_version == rtl_chip_info[i].mac_version) |
| 1710 | break; | 1711 | break; |
| 1711 | } | 1712 | } |
| 1712 | if (i < 0) { | 1713 | if (i == ARRAY_SIZE(rtl_chip_info)) { |
| 1713 | /* Unknown chip: assume array element #0, original RTL-8169 */ | 1714 | /* Unknown chip: assume array element #0, original RTL-8169 */ |
| 1714 | if (netif_msg_probe(tp)) { | 1715 | if (netif_msg_probe(tp)) { |
| 1715 | dev_printk(KERN_DEBUG, &pdev->dev, | 1716 | dev_printk(KERN_DEBUG, &pdev->dev, |
| 1716 | "unknown chip version, assuming %s\n", | 1717 | "unknown chip version, assuming %s\n", |
| 1717 | rtl_chip_info[0].name); | 1718 | rtl_chip_info[0].name); |
| 1718 | } | 1719 | } |
| 1719 | i++; | 1720 | i = 0; |
| 1720 | } | 1721 | } |
| 1721 | tp->chipset = i; | 1722 | tp->chipset = i; |
| 1722 | 1723 | ||
| @@ -1777,7 +1778,6 @@ rtl8169_init_one(struct pci_dev *pdev, const struct pci_device_id *ent) | |||
| 1777 | #endif | 1778 | #endif |
| 1778 | 1779 | ||
| 1779 | tp->intr_mask = 0xffff; | 1780 | tp->intr_mask = 0xffff; |
| 1780 | tp->pci_dev = pdev; | ||
| 1781 | tp->mmio_addr = ioaddr; | 1781 | tp->mmio_addr = ioaddr; |
| 1782 | tp->align = cfg->align; | 1782 | tp->align = cfg->align; |
| 1783 | tp->hw_start = cfg->hw_start; | 1783 | tp->hw_start = cfg->hw_start; |
diff --git a/drivers/net/s2io.c b/drivers/net/s2io.c index 157fd932e951..523478ebfd69 100644 --- a/drivers/net/s2io.c +++ b/drivers/net/s2io.c | |||
| @@ -86,7 +86,7 @@ | |||
| 86 | #include "s2io.h" | 86 | #include "s2io.h" |
| 87 | #include "s2io-regs.h" | 87 | #include "s2io-regs.h" |
| 88 | 88 | ||
| 89 | #define DRV_VERSION "2.0.26.22" | 89 | #define DRV_VERSION "2.0.26.23" |
| 90 | 90 | ||
| 91 | /* S2io Driver name & version. */ | 91 | /* S2io Driver name & version. */ |
| 92 | static char s2io_driver_name[] = "Neterion"; | 92 | static char s2io_driver_name[] = "Neterion"; |
| @@ -809,6 +809,7 @@ static int init_shared_mem(struct s2io_nic *nic) | |||
| 809 | config->rx_cfg[i].num_rxd - 1; | 809 | config->rx_cfg[i].num_rxd - 1; |
| 810 | mac_control->rings[i].nic = nic; | 810 | mac_control->rings[i].nic = nic; |
| 811 | mac_control->rings[i].ring_no = i; | 811 | mac_control->rings[i].ring_no = i; |
| 812 | mac_control->rings[i].lro = lro_enable; | ||
| 812 | 813 | ||
| 813 | blk_cnt = config->rx_cfg[i].num_rxd / | 814 | blk_cnt = config->rx_cfg[i].num_rxd / |
| 814 | (rxd_count[nic->rxd_mode] + 1); | 815 | (rxd_count[nic->rxd_mode] + 1); |
| @@ -1560,113 +1561,112 @@ static int init_nic(struct s2io_nic *nic) | |||
| 1560 | writeq(val64, &bar0->tx_fifo_partition_0); | 1561 | writeq(val64, &bar0->tx_fifo_partition_0); |
| 1561 | 1562 | ||
| 1562 | /* Filling the Rx round robin registers as per the | 1563 | /* Filling the Rx round robin registers as per the |
| 1563 | * number of Rings and steering based on QoS. | 1564 | * number of Rings and steering based on QoS with |
| 1564 | */ | 1565 | * equal priority. |
| 1566 | */ | ||
| 1565 | switch (config->rx_ring_num) { | 1567 | switch (config->rx_ring_num) { |
| 1566 | case 1: | 1568 | case 1: |
| 1569 | val64 = 0x0; | ||
| 1570 | writeq(val64, &bar0->rx_w_round_robin_0); | ||
| 1571 | writeq(val64, &bar0->rx_w_round_robin_1); | ||
| 1572 | writeq(val64, &bar0->rx_w_round_robin_2); | ||
| 1573 | writeq(val64, &bar0->rx_w_round_robin_3); | ||
| 1574 | writeq(val64, &bar0->rx_w_round_robin_4); | ||
| 1575 | |||
| 1567 | val64 = 0x8080808080808080ULL; | 1576 | val64 = 0x8080808080808080ULL; |
| 1568 | writeq(val64, &bar0->rts_qos_steering); | 1577 | writeq(val64, &bar0->rts_qos_steering); |
| 1569 | break; | 1578 | break; |
| 1570 | case 2: | 1579 | case 2: |
| 1571 | val64 = 0x0000010000010000ULL; | 1580 | val64 = 0x0001000100010001ULL; |
| 1572 | writeq(val64, &bar0->rx_w_round_robin_0); | 1581 | writeq(val64, &bar0->rx_w_round_robin_0); |
| 1573 | val64 = 0x0100000100000100ULL; | ||
| 1574 | writeq(val64, &bar0->rx_w_round_robin_1); | 1582 | writeq(val64, &bar0->rx_w_round_robin_1); |
| 1575 | val64 = 0x0001000001000001ULL; | ||
| 1576 | writeq(val64, &bar0->rx_w_round_robin_2); | 1583 | writeq(val64, &bar0->rx_w_round_robin_2); |
| 1577 | val64 = 0x0000010000010000ULL; | ||
| 1578 | writeq(val64, &bar0->rx_w_round_robin_3); | 1584 | writeq(val64, &bar0->rx_w_round_robin_3); |
| 1579 | val64 = 0x0100000000000000ULL; | 1585 | val64 = 0x0001000100000000ULL; |
| 1580 | writeq(val64, &bar0->rx_w_round_robin_4); | 1586 | writeq(val64, &bar0->rx_w_round_robin_4); |
| 1581 | 1587 | ||
| 1582 | val64 = 0x8080808040404040ULL; | 1588 | val64 = 0x8080808040404040ULL; |
| 1583 | writeq(val64, &bar0->rts_qos_steering); | 1589 | writeq(val64, &bar0->rts_qos_steering); |
| 1584 | break; | 1590 | break; |
| 1585 | case 3: | 1591 | case 3: |
| 1586 | val64 = 0x0001000102000001ULL; | 1592 | val64 = 0x0001020001020001ULL; |
| 1587 | writeq(val64, &bar0->rx_w_round_robin_0); | 1593 | writeq(val64, &bar0->rx_w_round_robin_0); |
| 1588 | val64 = 0x0001020000010001ULL; | 1594 | val64 = 0x0200010200010200ULL; |
| 1589 | writeq(val64, &bar0->rx_w_round_robin_1); | 1595 | writeq(val64, &bar0->rx_w_round_robin_1); |
| 1590 | val64 = 0x0200000100010200ULL; | 1596 | val64 = 0x0102000102000102ULL; |
| 1591 | writeq(val64, &bar0->rx_w_round_robin_2); | 1597 | writeq(val64, &bar0->rx_w_round_robin_2); |
| 1592 | val64 = 0x0001000102000001ULL; | 1598 | val64 = 0x0001020001020001ULL; |
| 1593 | writeq(val64, &bar0->rx_w_round_robin_3); | 1599 | writeq(val64, &bar0->rx_w_round_robin_3); |
| 1594 | val64 = 0x0001020000000000ULL; | 1600 | val64 = 0x0200010200000000ULL; |
| 1595 | writeq(val64, &bar0->rx_w_round_robin_4); | 1601 | writeq(val64, &bar0->rx_w_round_robin_4); |
| 1596 | 1602 | ||
| 1597 | val64 = 0x8080804040402020ULL; | 1603 | val64 = 0x8080804040402020ULL; |
| 1598 | writeq(val64, &bar0->rts_qos_steering); | 1604 | writeq(val64, &bar0->rts_qos_steering); |
| 1599 | break; | 1605 | break; |
| 1600 | case 4: | 1606 | case 4: |
| 1601 | val64 = 0x0001020300010200ULL; | 1607 | val64 = 0x0001020300010203ULL; |
| 1602 | writeq(val64, &bar0->rx_w_round_robin_0); | 1608 | writeq(val64, &bar0->rx_w_round_robin_0); |
| 1603 | val64 = 0x0100000102030001ULL; | ||
| 1604 | writeq(val64, &bar0->rx_w_round_robin_1); | 1609 | writeq(val64, &bar0->rx_w_round_robin_1); |
| 1605 | val64 = 0x0200010000010203ULL; | ||
| 1606 | writeq(val64, &bar0->rx_w_round_robin_2); | 1610 | writeq(val64, &bar0->rx_w_round_robin_2); |
| 1607 | val64 = 0x0001020001000001ULL; | ||
| 1608 | writeq(val64, &bar0->rx_w_round_robin_3); | 1611 | writeq(val64, &bar0->rx_w_round_robin_3); |
| 1609 | val64 = 0x0203000100000000ULL; | 1612 | val64 = 0x0001020300000000ULL; |
| 1610 | writeq(val64, &bar0->rx_w_round_robin_4); | 1613 | writeq(val64, &bar0->rx_w_round_robin_4); |
| 1611 | 1614 | ||
| 1612 | val64 = 0x8080404020201010ULL; | 1615 | val64 = 0x8080404020201010ULL; |
| 1613 | writeq(val64, &bar0->rts_qos_steering); | 1616 | writeq(val64, &bar0->rts_qos_steering); |
| 1614 | break; | 1617 | break; |
| 1615 | case 5: | 1618 | case 5: |
| 1616 | val64 = 0x0001000203000102ULL; | 1619 | val64 = 0x0001020304000102ULL; |
| 1617 | writeq(val64, &bar0->rx_w_round_robin_0); | 1620 | writeq(val64, &bar0->rx_w_round_robin_0); |
| 1618 | val64 = 0x0001020001030004ULL; | 1621 | val64 = 0x0304000102030400ULL; |
| 1619 | writeq(val64, &bar0->rx_w_round_robin_1); | 1622 | writeq(val64, &bar0->rx_w_round_robin_1); |
| 1620 | val64 = 0x0001000203000102ULL; | 1623 | val64 = 0x0102030400010203ULL; |
| 1621 | writeq(val64, &bar0->rx_w_round_robin_2); | 1624 | writeq(val64, &bar0->rx_w_round_robin_2); |
| 1622 | val64 = 0x0001020001030004ULL; | 1625 | val64 = 0x0400010203040001ULL; |
| 1623 | writeq(val64, &bar0->rx_w_round_robin_3); | 1626 | writeq(val64, &bar0->rx_w_round_robin_3); |
| 1624 | val64 = 0x0001000000000000ULL; | 1627 | val64 = 0x0203040000000000ULL; |
| 1625 | writeq(val64, &bar0->rx_w_round_robin_4); | 1628 | writeq(val64, &bar0->rx_w_round_robin_4); |
| 1626 | 1629 | ||
| 1627 | val64 = 0x8080404020201008ULL; | 1630 | val64 = 0x8080404020201008ULL; |
| 1628 | writeq(val64, &bar0->rts_qos_steering); | 1631 | writeq(val64, &bar0->rts_qos_steering); |
| 1629 | break; | 1632 | break; |
| 1630 | case 6: | 1633 | case 6: |
| 1631 | val64 = 0x0001020304000102ULL; | 1634 | val64 = 0x0001020304050001ULL; |
| 1632 | writeq(val64, &bar0->rx_w_round_robin_0); | 1635 | writeq(val64, &bar0->rx_w_round_robin_0); |
| 1633 | val64 = 0x0304050001020001ULL; | 1636 | val64 = 0x0203040500010203ULL; |
| 1634 | writeq(val64, &bar0->rx_w_round_robin_1); | 1637 | writeq(val64, &bar0->rx_w_round_robin_1); |
| 1635 | val64 = 0x0203000100000102ULL; | 1638 | val64 = 0x0405000102030405ULL; |
| 1636 | writeq(val64, &bar0->rx_w_round_robin_2); | 1639 | writeq(val64, &bar0->rx_w_round_robin_2); |
| 1637 | val64 = 0x0304000102030405ULL; | 1640 | val64 = 0x0001020304050001ULL; |
| 1638 | writeq(val64, &bar0->rx_w_round_robin_3); | 1641 | writeq(val64, &bar0->rx_w_round_robin_3); |
| 1639 | val64 = 0x0001000200000000ULL; | 1642 | val64 = 0x0203040500000000ULL; |
| 1640 | writeq(val64, &bar0->rx_w_round_robin_4); | 1643 | writeq(val64, &bar0->rx_w_round_robin_4); |
| 1641 | 1644 | ||
| 1642 | val64 = 0x8080404020100804ULL; | 1645 | val64 = 0x8080404020100804ULL; |
| 1643 | writeq(val64, &bar0->rts_qos_steering); | 1646 | writeq(val64, &bar0->rts_qos_steering); |
| 1644 | break; | 1647 | break; |
| 1645 | case 7: | 1648 | case 7: |
| 1646 | val64 = 0x0001020001020300ULL; | 1649 | val64 = 0x0001020304050600ULL; |
| 1647 | writeq(val64, &bar0->rx_w_round_robin_0); | 1650 | writeq(val64, &bar0->rx_w_round_robin_0); |
| 1648 | val64 = 0x0102030400010203ULL; | 1651 | val64 = 0x0102030405060001ULL; |
| 1649 | writeq(val64, &bar0->rx_w_round_robin_1); | 1652 | writeq(val64, &bar0->rx_w_round_robin_1); |
| 1650 | val64 = 0x0405060001020001ULL; | 1653 | val64 = 0x0203040506000102ULL; |
| 1651 | writeq(val64, &bar0->rx_w_round_robin_2); | 1654 | writeq(val64, &bar0->rx_w_round_robin_2); |
| 1652 | val64 = 0x0304050000010200ULL; | 1655 | val64 = 0x0304050600010203ULL; |
| 1653 | writeq(val64, &bar0->rx_w_round_robin_3); | 1656 | writeq(val64, &bar0->rx_w_round_robin_3); |
| 1654 | val64 = 0x0102030000000000ULL; | 1657 | val64 = 0x0405060000000000ULL; |
| 1655 | writeq(val64, &bar0->rx_w_round_robin_4); | 1658 | writeq(val64, &bar0->rx_w_round_robin_4); |
| 1656 | 1659 | ||
| 1657 | val64 = 0x8080402010080402ULL; | 1660 | val64 = 0x8080402010080402ULL; |
| 1658 | writeq(val64, &bar0->rts_qos_steering); | 1661 | writeq(val64, &bar0->rts_qos_steering); |
| 1659 | break; | 1662 | break; |
| 1660 | case 8: | 1663 | case 8: |
| 1661 | val64 = 0x0001020300040105ULL; | 1664 | val64 = 0x0001020304050607ULL; |
| 1662 | writeq(val64, &bar0->rx_w_round_robin_0); | 1665 | writeq(val64, &bar0->rx_w_round_robin_0); |
| 1663 | val64 = 0x0200030106000204ULL; | ||
| 1664 | writeq(val64, &bar0->rx_w_round_robin_1); | 1666 | writeq(val64, &bar0->rx_w_round_robin_1); |
| 1665 | val64 = 0x0103000502010007ULL; | ||
| 1666 | writeq(val64, &bar0->rx_w_round_robin_2); | 1667 | writeq(val64, &bar0->rx_w_round_robin_2); |
| 1667 | val64 = 0x0304010002060500ULL; | ||
| 1668 | writeq(val64, &bar0->rx_w_round_robin_3); | 1668 | writeq(val64, &bar0->rx_w_round_robin_3); |
| 1669 | val64 = 0x0103020400000000ULL; | 1669 | val64 = 0x0001020300000000ULL; |
| 1670 | writeq(val64, &bar0->rx_w_round_robin_4); | 1670 | writeq(val64, &bar0->rx_w_round_robin_4); |
| 1671 | 1671 | ||
| 1672 | val64 = 0x8040201008040201ULL; | 1672 | val64 = 0x8040201008040201ULL; |
| @@ -2499,8 +2499,7 @@ static void stop_nic(struct s2io_nic *nic) | |||
| 2499 | 2499 | ||
| 2500 | /** | 2500 | /** |
| 2501 | * fill_rx_buffers - Allocates the Rx side skbs | 2501 | * fill_rx_buffers - Allocates the Rx side skbs |
| 2502 | * @nic: device private variable | 2502 | * @ring_info: per ring structure |
| 2503 | * @ring_no: ring number | ||
| 2504 | * Description: | 2503 | * Description: |
| 2505 | * The function allocates Rx side skbs and puts the physical | 2504 | * The function allocates Rx side skbs and puts the physical |
| 2506 | * address of these buffers into the RxD buffer pointers, so that the NIC | 2505 | * address of these buffers into the RxD buffer pointers, so that the NIC |
| @@ -2518,103 +2517,94 @@ static void stop_nic(struct s2io_nic *nic) | |||
| 2518 | * SUCCESS on success or an appropriate -ve value on failure. | 2517 | * SUCCESS on success or an appropriate -ve value on failure. |
| 2519 | */ | 2518 | */ |
| 2520 | 2519 | ||
| 2521 | static int fill_rx_buffers(struct s2io_nic *nic, int ring_no) | 2520 | static int fill_rx_buffers(struct ring_info *ring) |
| 2522 | { | 2521 | { |
| 2523 | struct net_device *dev = nic->dev; | ||
| 2524 | struct sk_buff *skb; | 2522 | struct sk_buff *skb; |
| 2525 | struct RxD_t *rxdp; | 2523 | struct RxD_t *rxdp; |
| 2526 | int off, off1, size, block_no, block_no1; | 2524 | int off, size, block_no, block_no1; |
| 2527 | u32 alloc_tab = 0; | 2525 | u32 alloc_tab = 0; |
| 2528 | u32 alloc_cnt; | 2526 | u32 alloc_cnt; |
| 2529 | struct mac_info *mac_control; | ||
| 2530 | struct config_param *config; | ||
| 2531 | u64 tmp; | 2527 | u64 tmp; |
| 2532 | struct buffAdd *ba; | 2528 | struct buffAdd *ba; |
| 2533 | struct RxD_t *first_rxdp = NULL; | 2529 | struct RxD_t *first_rxdp = NULL; |
| 2534 | u64 Buffer0_ptr = 0, Buffer1_ptr = 0; | 2530 | u64 Buffer0_ptr = 0, Buffer1_ptr = 0; |
| 2531 | int rxd_index = 0; | ||
| 2535 | struct RxD1 *rxdp1; | 2532 | struct RxD1 *rxdp1; |
| 2536 | struct RxD3 *rxdp3; | 2533 | struct RxD3 *rxdp3; |
| 2537 | struct swStat *stats = &nic->mac_control.stats_info->sw_stat; | 2534 | struct swStat *stats = &ring->nic->mac_control.stats_info->sw_stat; |
| 2538 | 2535 | ||
| 2539 | mac_control = &nic->mac_control; | 2536 | alloc_cnt = ring->pkt_cnt - ring->rx_bufs_left; |
| 2540 | config = &nic->config; | ||
| 2541 | alloc_cnt = mac_control->rings[ring_no].pkt_cnt - | ||
| 2542 | atomic_read(&nic->rx_bufs_left[ring_no]); | ||
| 2543 | 2537 | ||
| 2544 | block_no1 = mac_control->rings[ring_no].rx_curr_get_info.block_index; | 2538 | block_no1 = ring->rx_curr_get_info.block_index; |
| 2545 | off1 = mac_control->rings[ring_no].rx_curr_get_info.offset; | ||
| 2546 | while (alloc_tab < alloc_cnt) { | 2539 | while (alloc_tab < alloc_cnt) { |
| 2547 | block_no = mac_control->rings[ring_no].rx_curr_put_info. | 2540 | block_no = ring->rx_curr_put_info.block_index; |
| 2548 | block_index; | ||
| 2549 | off = mac_control->rings[ring_no].rx_curr_put_info.offset; | ||
| 2550 | 2541 | ||
| 2551 | rxdp = mac_control->rings[ring_no]. | 2542 | off = ring->rx_curr_put_info.offset; |
| 2552 | rx_blocks[block_no].rxds[off].virt_addr; | 2543 | |
| 2544 | rxdp = ring->rx_blocks[block_no].rxds[off].virt_addr; | ||
| 2545 | |||
| 2546 | rxd_index = off + 1; | ||
| 2547 | if (block_no) | ||
| 2548 | rxd_index += (block_no * ring->rxd_count); | ||
| 2553 | 2549 | ||
| 2554 | if ((block_no == block_no1) && (off == off1) && | 2550 | if ((block_no == block_no1) && |
| 2555 | (rxdp->Host_Control)) { | 2551 | (off == ring->rx_curr_get_info.offset) && |
| 2552 | (rxdp->Host_Control)) { | ||
| 2556 | DBG_PRINT(INTR_DBG, "%s: Get and Put", | 2553 | DBG_PRINT(INTR_DBG, "%s: Get and Put", |
| 2557 | dev->name); | 2554 | ring->dev->name); |
| 2558 | DBG_PRINT(INTR_DBG, " info equated\n"); | 2555 | DBG_PRINT(INTR_DBG, " info equated\n"); |
| 2559 | goto end; | 2556 | goto end; |
| 2560 | } | 2557 | } |
| 2561 | if (off && (off == rxd_count[nic->rxd_mode])) { | 2558 | if (off && (off == ring->rxd_count)) { |
| 2562 | mac_control->rings[ring_no].rx_curr_put_info. | 2559 | ring->rx_curr_put_info.block_index++; |
| 2563 | block_index++; | 2560 | if (ring->rx_curr_put_info.block_index == |
| 2564 | if (mac_control->rings[ring_no].rx_curr_put_info. | 2561 | ring->block_count) |
| 2565 | block_index == mac_control->rings[ring_no]. | 2562 | ring->rx_curr_put_info.block_index = 0; |
| 2566 | block_count) | 2563 | block_no = ring->rx_curr_put_info.block_index; |
| 2567 | mac_control->rings[ring_no].rx_curr_put_info. | 2564 | off = 0; |
| 2568 | block_index = 0; | 2565 | ring->rx_curr_put_info.offset = off; |
| 2569 | block_no = mac_control->rings[ring_no]. | 2566 | rxdp = ring->rx_blocks[block_no].block_virt_addr; |
| 2570 | rx_curr_put_info.block_index; | ||
| 2571 | if (off == rxd_count[nic->rxd_mode]) | ||
| 2572 | off = 0; | ||
| 2573 | mac_control->rings[ring_no].rx_curr_put_info. | ||
| 2574 | offset = off; | ||
| 2575 | rxdp = mac_control->rings[ring_no]. | ||
| 2576 | rx_blocks[block_no].block_virt_addr; | ||
| 2577 | DBG_PRINT(INTR_DBG, "%s: Next block at: %p\n", | 2567 | DBG_PRINT(INTR_DBG, "%s: Next block at: %p\n", |
| 2578 | dev->name, rxdp); | 2568 | ring->dev->name, rxdp); |
| 2569 | |||
| 2579 | } | 2570 | } |
| 2580 | 2571 | ||
| 2581 | if ((rxdp->Control_1 & RXD_OWN_XENA) && | 2572 | if ((rxdp->Control_1 & RXD_OWN_XENA) && |
| 2582 | ((nic->rxd_mode == RXD_MODE_3B) && | 2573 | ((ring->rxd_mode == RXD_MODE_3B) && |
| 2583 | (rxdp->Control_2 & s2BIT(0)))) { | 2574 | (rxdp->Control_2 & s2BIT(0)))) { |
| 2584 | mac_control->rings[ring_no].rx_curr_put_info. | 2575 | ring->rx_curr_put_info.offset = off; |
| 2585 | offset = off; | ||
| 2586 | goto end; | 2576 | goto end; |
| 2587 | } | 2577 | } |
| 2588 | /* calculate size of skb based on ring mode */ | 2578 | /* calculate size of skb based on ring mode */ |
| 2589 | size = dev->mtu + HEADER_ETHERNET_II_802_3_SIZE + | 2579 | size = ring->mtu + HEADER_ETHERNET_II_802_3_SIZE + |
| 2590 | HEADER_802_2_SIZE + HEADER_SNAP_SIZE; | 2580 | HEADER_802_2_SIZE + HEADER_SNAP_SIZE; |
| 2591 | if (nic->rxd_mode == RXD_MODE_1) | 2581 | if (ring->rxd_mode == RXD_MODE_1) |
| 2592 | size += NET_IP_ALIGN; | 2582 | size += NET_IP_ALIGN; |
| 2593 | else | 2583 | else |
| 2594 | size = dev->mtu + ALIGN_SIZE + BUF0_LEN + 4; | 2584 | size = ring->mtu + ALIGN_SIZE + BUF0_LEN + 4; |
| 2595 | 2585 | ||
| 2596 | /* allocate skb */ | 2586 | /* allocate skb */ |
| 2597 | skb = dev_alloc_skb(size); | 2587 | skb = dev_alloc_skb(size); |
| 2598 | if(!skb) { | 2588 | if(!skb) { |
| 2599 | DBG_PRINT(INFO_DBG, "%s: Out of ", dev->name); | 2589 | DBG_PRINT(INFO_DBG, "%s: Out of ", ring->dev->name); |
| 2600 | DBG_PRINT(INFO_DBG, "memory to allocate SKBs\n"); | 2590 | DBG_PRINT(INFO_DBG, "memory to allocate SKBs\n"); |
| 2601 | if (first_rxdp) { | 2591 | if (first_rxdp) { |
| 2602 | wmb(); | 2592 | wmb(); |
| 2603 | first_rxdp->Control_1 |= RXD_OWN_XENA; | 2593 | first_rxdp->Control_1 |= RXD_OWN_XENA; |
| 2604 | } | 2594 | } |
| 2605 | nic->mac_control.stats_info->sw_stat. \ | 2595 | stats->mem_alloc_fail_cnt++; |
| 2606 | mem_alloc_fail_cnt++; | 2596 | |
| 2607 | return -ENOMEM ; | 2597 | return -ENOMEM ; |
| 2608 | } | 2598 | } |
| 2609 | nic->mac_control.stats_info->sw_stat.mem_allocated | 2599 | stats->mem_allocated += skb->truesize; |
| 2610 | += skb->truesize; | 2600 | |
| 2611 | if (nic->rxd_mode == RXD_MODE_1) { | 2601 | if (ring->rxd_mode == RXD_MODE_1) { |
| 2612 | /* 1 buffer mode - normal operation mode */ | 2602 | /* 1 buffer mode - normal operation mode */ |
| 2613 | rxdp1 = (struct RxD1*)rxdp; | 2603 | rxdp1 = (struct RxD1*)rxdp; |
| 2614 | memset(rxdp, 0, sizeof(struct RxD1)); | 2604 | memset(rxdp, 0, sizeof(struct RxD1)); |
| 2615 | skb_reserve(skb, NET_IP_ALIGN); | 2605 | skb_reserve(skb, NET_IP_ALIGN); |
| 2616 | rxdp1->Buffer0_ptr = pci_map_single | 2606 | rxdp1->Buffer0_ptr = pci_map_single |
| 2617 | (nic->pdev, skb->data, size - NET_IP_ALIGN, | 2607 | (ring->pdev, skb->data, size - NET_IP_ALIGN, |
| 2618 | PCI_DMA_FROMDEVICE); | 2608 | PCI_DMA_FROMDEVICE); |
| 2619 | if( (rxdp1->Buffer0_ptr == 0) || | 2609 | if( (rxdp1->Buffer0_ptr == 0) || |
| 2620 | (rxdp1->Buffer0_ptr == | 2610 | (rxdp1->Buffer0_ptr == |
| @@ -2623,8 +2613,8 @@ static int fill_rx_buffers(struct s2io_nic *nic, int ring_no) | |||
| 2623 | 2613 | ||
| 2624 | rxdp->Control_2 = | 2614 | rxdp->Control_2 = |
| 2625 | SET_BUFFER0_SIZE_1(size - NET_IP_ALIGN); | 2615 | SET_BUFFER0_SIZE_1(size - NET_IP_ALIGN); |
| 2626 | 2616 | rxdp->Host_Control = (unsigned long) (skb); | |
| 2627 | } else if (nic->rxd_mode == RXD_MODE_3B) { | 2617 | } else if (ring->rxd_mode == RXD_MODE_3B) { |
| 2628 | /* | 2618 | /* |
| 2629 | * 2 buffer mode - | 2619 | * 2 buffer mode - |
| 2630 | * 2 buffer mode provides 128 | 2620 | * 2 buffer mode provides 128 |
| @@ -2640,7 +2630,7 @@ static int fill_rx_buffers(struct s2io_nic *nic, int ring_no) | |||
| 2640 | rxdp3->Buffer0_ptr = Buffer0_ptr; | 2630 | rxdp3->Buffer0_ptr = Buffer0_ptr; |
| 2641 | rxdp3->Buffer1_ptr = Buffer1_ptr; | 2631 | rxdp3->Buffer1_ptr = Buffer1_ptr; |
| 2642 | 2632 | ||
| 2643 | ba = &mac_control->rings[ring_no].ba[block_no][off]; | 2633 | ba = &ring->ba[block_no][off]; |
| 2644 | skb_reserve(skb, BUF0_LEN); | 2634 | skb_reserve(skb, BUF0_LEN); |
| 2645 | tmp = (u64)(unsigned long) skb->data; | 2635 | tmp = (u64)(unsigned long) skb->data; |
| 2646 | tmp += ALIGN_SIZE; | 2636 | tmp += ALIGN_SIZE; |
| @@ -2650,10 +2640,10 @@ static int fill_rx_buffers(struct s2io_nic *nic, int ring_no) | |||
| 2650 | 2640 | ||
| 2651 | if (!(rxdp3->Buffer0_ptr)) | 2641 | if (!(rxdp3->Buffer0_ptr)) |
| 2652 | rxdp3->Buffer0_ptr = | 2642 | rxdp3->Buffer0_ptr = |
| 2653 | pci_map_single(nic->pdev, ba->ba_0, BUF0_LEN, | 2643 | pci_map_single(ring->pdev, ba->ba_0, |
| 2654 | PCI_DMA_FROMDEVICE); | 2644 | BUF0_LEN, PCI_DMA_FROMDEVICE); |
| 2655 | else | 2645 | else |
| 2656 | pci_dma_sync_single_for_device(nic->pdev, | 2646 | pci_dma_sync_single_for_device(ring->pdev, |
| 2657 | (dma_addr_t) rxdp3->Buffer0_ptr, | 2647 | (dma_addr_t) rxdp3->Buffer0_ptr, |
| 2658 | BUF0_LEN, PCI_DMA_FROMDEVICE); | 2648 | BUF0_LEN, PCI_DMA_FROMDEVICE); |
| 2659 | if( (rxdp3->Buffer0_ptr == 0) || | 2649 | if( (rxdp3->Buffer0_ptr == 0) || |
| @@ -2661,7 +2651,7 @@ static int fill_rx_buffers(struct s2io_nic *nic, int ring_no) | |||
| 2661 | goto pci_map_failed; | 2651 | goto pci_map_failed; |
| 2662 | 2652 | ||
| 2663 | rxdp->Control_2 = SET_BUFFER0_SIZE_3(BUF0_LEN); | 2653 | rxdp->Control_2 = SET_BUFFER0_SIZE_3(BUF0_LEN); |
| 2664 | if (nic->rxd_mode == RXD_MODE_3B) { | 2654 | if (ring->rxd_mode == RXD_MODE_3B) { |
| 2665 | /* Two buffer mode */ | 2655 | /* Two buffer mode */ |
| 2666 | 2656 | ||
| 2667 | /* | 2657 | /* |
| @@ -2669,39 +2659,42 @@ static int fill_rx_buffers(struct s2io_nic *nic, int ring_no) | |||
| 2669 | * L4 payload | 2659 | * L4 payload |
| 2670 | */ | 2660 | */ |
| 2671 | rxdp3->Buffer2_ptr = pci_map_single | 2661 | rxdp3->Buffer2_ptr = pci_map_single |
| 2672 | (nic->pdev, skb->data, dev->mtu + 4, | 2662 | (ring->pdev, skb->data, ring->mtu + 4, |
| 2673 | PCI_DMA_FROMDEVICE); | 2663 | PCI_DMA_FROMDEVICE); |
| 2674 | 2664 | ||
| 2675 | if( (rxdp3->Buffer2_ptr == 0) || | 2665 | if( (rxdp3->Buffer2_ptr == 0) || |
| 2676 | (rxdp3->Buffer2_ptr == DMA_ERROR_CODE)) | 2666 | (rxdp3->Buffer2_ptr == DMA_ERROR_CODE)) |
| 2677 | goto pci_map_failed; | 2667 | goto pci_map_failed; |
| 2678 | 2668 | ||
| 2679 | rxdp3->Buffer1_ptr = | 2669 | if (!rxdp3->Buffer1_ptr) |
| 2680 | pci_map_single(nic->pdev, | 2670 | rxdp3->Buffer1_ptr = |
| 2671 | pci_map_single(ring->pdev, | ||
| 2681 | ba->ba_1, BUF1_LEN, | 2672 | ba->ba_1, BUF1_LEN, |
| 2682 | PCI_DMA_FROMDEVICE); | 2673 | PCI_DMA_FROMDEVICE); |
| 2674 | |||
| 2683 | if( (rxdp3->Buffer1_ptr == 0) || | 2675 | if( (rxdp3->Buffer1_ptr == 0) || |
| 2684 | (rxdp3->Buffer1_ptr == DMA_ERROR_CODE)) { | 2676 | (rxdp3->Buffer1_ptr == DMA_ERROR_CODE)) { |
| 2685 | pci_unmap_single | 2677 | pci_unmap_single |
| 2686 | (nic->pdev, | 2678 | (ring->pdev, |
| 2687 | (dma_addr_t)rxdp3->Buffer2_ptr, | 2679 | (dma_addr_t)(unsigned long) |
| 2688 | dev->mtu + 4, | 2680 | skb->data, |
| 2681 | ring->mtu + 4, | ||
| 2689 | PCI_DMA_FROMDEVICE); | 2682 | PCI_DMA_FROMDEVICE); |
| 2690 | goto pci_map_failed; | 2683 | goto pci_map_failed; |
| 2691 | } | 2684 | } |
| 2692 | rxdp->Control_2 |= SET_BUFFER1_SIZE_3(1); | 2685 | rxdp->Control_2 |= SET_BUFFER1_SIZE_3(1); |
| 2693 | rxdp->Control_2 |= SET_BUFFER2_SIZE_3 | 2686 | rxdp->Control_2 |= SET_BUFFER2_SIZE_3 |
| 2694 | (dev->mtu + 4); | 2687 | (ring->mtu + 4); |
| 2695 | } | 2688 | } |
| 2696 | rxdp->Control_2 |= s2BIT(0); | 2689 | rxdp->Control_2 |= s2BIT(0); |
| 2690 | rxdp->Host_Control = (unsigned long) (skb); | ||
| 2697 | } | 2691 | } |
| 2698 | rxdp->Host_Control = (unsigned long) (skb); | ||
| 2699 | if (alloc_tab & ((1 << rxsync_frequency) - 1)) | 2692 | if (alloc_tab & ((1 << rxsync_frequency) - 1)) |
| 2700 | rxdp->Control_1 |= RXD_OWN_XENA; | 2693 | rxdp->Control_1 |= RXD_OWN_XENA; |
| 2701 | off++; | 2694 | off++; |
| 2702 | if (off == (rxd_count[nic->rxd_mode] + 1)) | 2695 | if (off == (ring->rxd_count + 1)) |
| 2703 | off = 0; | 2696 | off = 0; |
| 2704 | mac_control->rings[ring_no].rx_curr_put_info.offset = off; | 2697 | ring->rx_curr_put_info.offset = off; |
| 2705 | 2698 | ||
| 2706 | rxdp->Control_2 |= SET_RXD_MARKER; | 2699 | rxdp->Control_2 |= SET_RXD_MARKER; |
| 2707 | if (!(alloc_tab & ((1 << rxsync_frequency) - 1))) { | 2700 | if (!(alloc_tab & ((1 << rxsync_frequency) - 1))) { |
| @@ -2711,7 +2704,7 @@ static int fill_rx_buffers(struct s2io_nic *nic, int ring_no) | |||
| 2711 | } | 2704 | } |
| 2712 | first_rxdp = rxdp; | 2705 | first_rxdp = rxdp; |
| 2713 | } | 2706 | } |
| 2714 | atomic_inc(&nic->rx_bufs_left[ring_no]); | 2707 | ring->rx_bufs_left += 1; |
| 2715 | alloc_tab++; | 2708 | alloc_tab++; |
| 2716 | } | 2709 | } |
| 2717 | 2710 | ||
| @@ -2783,7 +2776,7 @@ static void free_rxd_blk(struct s2io_nic *sp, int ring_no, int blk) | |||
| 2783 | } | 2776 | } |
| 2784 | sp->mac_control.stats_info->sw_stat.mem_freed += skb->truesize; | 2777 | sp->mac_control.stats_info->sw_stat.mem_freed += skb->truesize; |
| 2785 | dev_kfree_skb(skb); | 2778 | dev_kfree_skb(skb); |
| 2786 | atomic_dec(&sp->rx_bufs_left[ring_no]); | 2779 | mac_control->rings[ring_no].rx_bufs_left -= 1; |
| 2787 | } | 2780 | } |
| 2788 | } | 2781 | } |
| 2789 | 2782 | ||
| @@ -2814,7 +2807,7 @@ static void free_rx_buffers(struct s2io_nic *sp) | |||
| 2814 | mac_control->rings[i].rx_curr_get_info.block_index = 0; | 2807 | mac_control->rings[i].rx_curr_get_info.block_index = 0; |
| 2815 | mac_control->rings[i].rx_curr_put_info.offset = 0; | 2808 | mac_control->rings[i].rx_curr_put_info.offset = 0; |
| 2816 | mac_control->rings[i].rx_curr_get_info.offset = 0; | 2809 | mac_control->rings[i].rx_curr_get_info.offset = 0; |
| 2817 | atomic_set(&sp->rx_bufs_left[i], 0); | 2810 | mac_control->rings[i].rx_bufs_left = 0; |
| 2818 | DBG_PRINT(INIT_DBG, "%s:Freed 0x%x Rx Buffers on ring%d\n", | 2811 | DBG_PRINT(INIT_DBG, "%s:Freed 0x%x Rx Buffers on ring%d\n", |
| 2819 | dev->name, buf_cnt, i); | 2812 | dev->name, buf_cnt, i); |
| 2820 | } | 2813 | } |
| @@ -2864,7 +2857,7 @@ static int s2io_poll(struct napi_struct *napi, int budget) | |||
| 2864 | netif_rx_complete(dev, napi); | 2857 | netif_rx_complete(dev, napi); |
| 2865 | 2858 | ||
| 2866 | for (i = 0; i < config->rx_ring_num; i++) { | 2859 | for (i = 0; i < config->rx_ring_num; i++) { |
| 2867 | if (fill_rx_buffers(nic, i) == -ENOMEM) { | 2860 | if (fill_rx_buffers(&mac_control->rings[i]) == -ENOMEM) { |
| 2868 | DBG_PRINT(INFO_DBG, "%s:Out of memory", dev->name); | 2861 | DBG_PRINT(INFO_DBG, "%s:Out of memory", dev->name); |
| 2869 | DBG_PRINT(INFO_DBG, " in Rx Poll!!\n"); | 2862 | DBG_PRINT(INFO_DBG, " in Rx Poll!!\n"); |
| 2870 | break; | 2863 | break; |
| @@ -2877,7 +2870,7 @@ static int s2io_poll(struct napi_struct *napi, int budget) | |||
| 2877 | 2870 | ||
| 2878 | no_rx: | 2871 | no_rx: |
| 2879 | for (i = 0; i < config->rx_ring_num; i++) { | 2872 | for (i = 0; i < config->rx_ring_num; i++) { |
| 2880 | if (fill_rx_buffers(nic, i) == -ENOMEM) { | 2873 | if (fill_rx_buffers(&mac_control->rings[i]) == -ENOMEM) { |
| 2881 | DBG_PRINT(INFO_DBG, "%s:Out of memory", dev->name); | 2874 | DBG_PRINT(INFO_DBG, "%s:Out of memory", dev->name); |
| 2882 | DBG_PRINT(INFO_DBG, " in Rx Poll!!\n"); | 2875 | DBG_PRINT(INFO_DBG, " in Rx Poll!!\n"); |
| 2883 | break; | 2876 | break; |
| @@ -2928,7 +2921,7 @@ static void s2io_netpoll(struct net_device *dev) | |||
| 2928 | rx_intr_handler(&mac_control->rings[i]); | 2921 | rx_intr_handler(&mac_control->rings[i]); |
| 2929 | 2922 | ||
| 2930 | for (i = 0; i < config->rx_ring_num; i++) { | 2923 | for (i = 0; i < config->rx_ring_num; i++) { |
| 2931 | if (fill_rx_buffers(nic, i) == -ENOMEM) { | 2924 | if (fill_rx_buffers(&mac_control->rings[i]) == -ENOMEM) { |
| 2932 | DBG_PRINT(INFO_DBG, "%s:Out of memory", dev->name); | 2925 | DBG_PRINT(INFO_DBG, "%s:Out of memory", dev->name); |
| 2933 | DBG_PRINT(INFO_DBG, " in Rx Netpoll!!\n"); | 2926 | DBG_PRINT(INFO_DBG, " in Rx Netpoll!!\n"); |
| 2934 | break; | 2927 | break; |
| @@ -2953,8 +2946,6 @@ static void s2io_netpoll(struct net_device *dev) | |||
| 2953 | */ | 2946 | */ |
| 2954 | static void rx_intr_handler(struct ring_info *ring_data) | 2947 | static void rx_intr_handler(struct ring_info *ring_data) |
| 2955 | { | 2948 | { |
| 2956 | struct s2io_nic *nic = ring_data->nic; | ||
| 2957 | struct net_device *dev = (struct net_device *) nic->dev; | ||
| 2958 | int get_block, put_block; | 2949 | int get_block, put_block; |
| 2959 | struct rx_curr_get_info get_info, put_info; | 2950 | struct rx_curr_get_info get_info, put_info; |
| 2960 | struct RxD_t *rxdp; | 2951 | struct RxD_t *rxdp; |
| @@ -2977,33 +2968,34 @@ static void rx_intr_handler(struct ring_info *ring_data) | |||
| 2977 | */ | 2968 | */ |
| 2978 | if ((get_block == put_block) && | 2969 | if ((get_block == put_block) && |
| 2979 | (get_info.offset + 1) == put_info.offset) { | 2970 | (get_info.offset + 1) == put_info.offset) { |
| 2980 | DBG_PRINT(INTR_DBG, "%s: Ring Full\n",dev->name); | 2971 | DBG_PRINT(INTR_DBG, "%s: Ring Full\n", |
| 2972 | ring_data->dev->name); | ||
| 2981 | break; | 2973 | break; |
| 2982 | } | 2974 | } |
| 2983 | skb = (struct sk_buff *) ((unsigned long)rxdp->Host_Control); | 2975 | skb = (struct sk_buff *) ((unsigned long)rxdp->Host_Control); |
| 2984 | if (skb == NULL) { | 2976 | if (skb == NULL) { |
| 2985 | DBG_PRINT(ERR_DBG, "%s: The skb is ", | 2977 | DBG_PRINT(ERR_DBG, "%s: The skb is ", |
| 2986 | dev->name); | 2978 | ring_data->dev->name); |
| 2987 | DBG_PRINT(ERR_DBG, "Null in Rx Intr\n"); | 2979 | DBG_PRINT(ERR_DBG, "Null in Rx Intr\n"); |
| 2988 | return; | 2980 | return; |
| 2989 | } | 2981 | } |
| 2990 | if (nic->rxd_mode == RXD_MODE_1) { | 2982 | if (ring_data->rxd_mode == RXD_MODE_1) { |
| 2991 | rxdp1 = (struct RxD1*)rxdp; | 2983 | rxdp1 = (struct RxD1*)rxdp; |
| 2992 | pci_unmap_single(nic->pdev, (dma_addr_t) | 2984 | pci_unmap_single(ring_data->pdev, (dma_addr_t) |
| 2993 | rxdp1->Buffer0_ptr, | 2985 | rxdp1->Buffer0_ptr, |
| 2994 | dev->mtu + | 2986 | ring_data->mtu + |
| 2995 | HEADER_ETHERNET_II_802_3_SIZE + | 2987 | HEADER_ETHERNET_II_802_3_SIZE + |
| 2996 | HEADER_802_2_SIZE + | 2988 | HEADER_802_2_SIZE + |
| 2997 | HEADER_SNAP_SIZE, | 2989 | HEADER_SNAP_SIZE, |
| 2998 | PCI_DMA_FROMDEVICE); | 2990 | PCI_DMA_FROMDEVICE); |
| 2999 | } else if (nic->rxd_mode == RXD_MODE_3B) { | 2991 | } else if (ring_data->rxd_mode == RXD_MODE_3B) { |
| 3000 | rxdp3 = (struct RxD3*)rxdp; | 2992 | rxdp3 = (struct RxD3*)rxdp; |
| 3001 | pci_dma_sync_single_for_cpu(nic->pdev, (dma_addr_t) | 2993 | pci_dma_sync_single_for_cpu(ring_data->pdev, (dma_addr_t) |
| 3002 | rxdp3->Buffer0_ptr, | 2994 | rxdp3->Buffer0_ptr, |
| 3003 | BUF0_LEN, PCI_DMA_FROMDEVICE); | 2995 | BUF0_LEN, PCI_DMA_FROMDEVICE); |
| 3004 | pci_unmap_single(nic->pdev, (dma_addr_t) | 2996 | pci_unmap_single(ring_data->pdev, (dma_addr_t) |
| 3005 | rxdp3->Buffer2_ptr, | 2997 | rxdp3->Buffer2_ptr, |
| 3006 | dev->mtu + 4, | 2998 | ring_data->mtu + 4, |
| 3007 | PCI_DMA_FROMDEVICE); | 2999 | PCI_DMA_FROMDEVICE); |
| 3008 | } | 3000 | } |
| 3009 | prefetch(skb->data); | 3001 | prefetch(skb->data); |
| @@ -3012,7 +3004,7 @@ static void rx_intr_handler(struct ring_info *ring_data) | |||
| 3012 | ring_data->rx_curr_get_info.offset = get_info.offset; | 3004 | ring_data->rx_curr_get_info.offset = get_info.offset; |
| 3013 | rxdp = ring_data->rx_blocks[get_block]. | 3005 | rxdp = ring_data->rx_blocks[get_block]. |
| 3014 | rxds[get_info.offset].virt_addr; | 3006 | rxds[get_info.offset].virt_addr; |
| 3015 | if (get_info.offset == rxd_count[nic->rxd_mode]) { | 3007 | if (get_info.offset == rxd_count[ring_data->rxd_mode]) { |
| 3016 | get_info.offset = 0; | 3008 | get_info.offset = 0; |
| 3017 | ring_data->rx_curr_get_info.offset = get_info.offset; | 3009 | ring_data->rx_curr_get_info.offset = get_info.offset; |
| 3018 | get_block++; | 3010 | get_block++; |
| @@ -3022,19 +3014,21 @@ static void rx_intr_handler(struct ring_info *ring_data) | |||
| 3022 | rxdp = ring_data->rx_blocks[get_block].block_virt_addr; | 3014 | rxdp = ring_data->rx_blocks[get_block].block_virt_addr; |
| 3023 | } | 3015 | } |
| 3024 | 3016 | ||
| 3025 | nic->pkts_to_process -= 1; | 3017 | if(ring_data->nic->config.napi){ |
| 3026 | if ((napi) && (!nic->pkts_to_process)) | 3018 | ring_data->nic->pkts_to_process -= 1; |
| 3027 | break; | 3019 | if (!ring_data->nic->pkts_to_process) |
| 3020 | break; | ||
| 3021 | } | ||
| 3028 | pkt_cnt++; | 3022 | pkt_cnt++; |
| 3029 | if ((indicate_max_pkts) && (pkt_cnt > indicate_max_pkts)) | 3023 | if ((indicate_max_pkts) && (pkt_cnt > indicate_max_pkts)) |
| 3030 | break; | 3024 | break; |
| 3031 | } | 3025 | } |
| 3032 | if (nic->lro) { | 3026 | if (ring_data->lro) { |
| 3033 | /* Clear all LRO sessions before exiting */ | 3027 | /* Clear all LRO sessions before exiting */ |
| 3034 | for (i=0; i<MAX_LRO_SESSIONS; i++) { | 3028 | for (i=0; i<MAX_LRO_SESSIONS; i++) { |
| 3035 | struct lro *lro = &nic->lro0_n[i]; | 3029 | struct lro *lro = &ring_data->lro0_n[i]; |
| 3036 | if (lro->in_use) { | 3030 | if (lro->in_use) { |
| 3037 | update_L3L4_header(nic, lro); | 3031 | update_L3L4_header(ring_data->nic, lro); |
| 3038 | queue_rx_frame(lro->parent, lro->vlan_tag); | 3032 | queue_rx_frame(lro->parent, lro->vlan_tag); |
| 3039 | clear_lro_session(lro); | 3033 | clear_lro_session(lro); |
| 3040 | } | 3034 | } |
| @@ -4333,10 +4327,10 @@ s2io_alarm_handle(unsigned long data) | |||
| 4333 | mod_timer(&sp->alarm_timer, jiffies + HZ / 2); | 4327 | mod_timer(&sp->alarm_timer, jiffies + HZ / 2); |
| 4334 | } | 4328 | } |
| 4335 | 4329 | ||
| 4336 | static int s2io_chk_rx_buffers(struct s2io_nic *sp, int rng_n) | 4330 | static int s2io_chk_rx_buffers(struct ring_info *ring) |
| 4337 | { | 4331 | { |
| 4338 | if (fill_rx_buffers(sp, rng_n) == -ENOMEM) { | 4332 | if (fill_rx_buffers(ring) == -ENOMEM) { |
| 4339 | DBG_PRINT(INFO_DBG, "%s:Out of memory", sp->dev->name); | 4333 | DBG_PRINT(INFO_DBG, "%s:Out of memory", ring->dev->name); |
| 4340 | DBG_PRINT(INFO_DBG, " in Rx Intr!!\n"); | 4334 | DBG_PRINT(INFO_DBG, " in Rx Intr!!\n"); |
| 4341 | } | 4335 | } |
| 4342 | return 0; | 4336 | return 0; |
| @@ -4351,7 +4345,7 @@ static irqreturn_t s2io_msix_ring_handle(int irq, void *dev_id) | |||
| 4351 | return IRQ_HANDLED; | 4345 | return IRQ_HANDLED; |
| 4352 | 4346 | ||
| 4353 | rx_intr_handler(ring); | 4347 | rx_intr_handler(ring); |
| 4354 | s2io_chk_rx_buffers(sp, ring->ring_no); | 4348 | s2io_chk_rx_buffers(ring); |
| 4355 | 4349 | ||
| 4356 | return IRQ_HANDLED; | 4350 | return IRQ_HANDLED; |
| 4357 | } | 4351 | } |
| @@ -4809,7 +4803,7 @@ static irqreturn_t s2io_isr(int irq, void *dev_id) | |||
| 4809 | */ | 4803 | */ |
| 4810 | if (!config->napi) { | 4804 | if (!config->napi) { |
| 4811 | for (i = 0; i < config->rx_ring_num; i++) | 4805 | for (i = 0; i < config->rx_ring_num; i++) |
| 4812 | s2io_chk_rx_buffers(sp, i); | 4806 | s2io_chk_rx_buffers(&mac_control->rings[i]); |
| 4813 | } | 4807 | } |
| 4814 | writeq(sp->general_int_mask, &bar0->general_int_mask); | 4808 | writeq(sp->general_int_mask, &bar0->general_int_mask); |
| 4815 | readl(&bar0->general_int_status); | 4809 | readl(&bar0->general_int_status); |
| @@ -4866,6 +4860,7 @@ static struct net_device_stats *s2io_get_stats(struct net_device *dev) | |||
| 4866 | struct s2io_nic *sp = dev->priv; | 4860 | struct s2io_nic *sp = dev->priv; |
| 4867 | struct mac_info *mac_control; | 4861 | struct mac_info *mac_control; |
| 4868 | struct config_param *config; | 4862 | struct config_param *config; |
| 4863 | int i; | ||
| 4869 | 4864 | ||
| 4870 | 4865 | ||
| 4871 | mac_control = &sp->mac_control; | 4866 | mac_control = &sp->mac_control; |
| @@ -4885,6 +4880,13 @@ static struct net_device_stats *s2io_get_stats(struct net_device *dev) | |||
| 4885 | sp->stats.rx_length_errors = | 4880 | sp->stats.rx_length_errors = |
| 4886 | le64_to_cpu(mac_control->stats_info->rmac_long_frms); | 4881 | le64_to_cpu(mac_control->stats_info->rmac_long_frms); |
| 4887 | 4882 | ||
| 4883 | /* collect per-ring rx_packets and rx_bytes */ | ||
| 4884 | sp->stats.rx_packets = sp->stats.rx_bytes = 0; | ||
| 4885 | for (i = 0; i < config->rx_ring_num; i++) { | ||
| 4886 | sp->stats.rx_packets += mac_control->rings[i].rx_packets; | ||
| 4887 | sp->stats.rx_bytes += mac_control->rings[i].rx_bytes; | ||
| 4888 | } | ||
| 4889 | |||
| 4888 | return (&sp->stats); | 4890 | return (&sp->stats); |
| 4889 | } | 4891 | } |
| 4890 | 4892 | ||
| @@ -7157,7 +7159,9 @@ static int s2io_card_up(struct s2io_nic * sp) | |||
| 7157 | config = &sp->config; | 7159 | config = &sp->config; |
| 7158 | 7160 | ||
| 7159 | for (i = 0; i < config->rx_ring_num; i++) { | 7161 | for (i = 0; i < config->rx_ring_num; i++) { |
| 7160 | if ((ret = fill_rx_buffers(sp, i))) { | 7162 | mac_control->rings[i].mtu = dev->mtu; |
| 7163 | ret = fill_rx_buffers(&mac_control->rings[i]); | ||
| 7164 | if (ret) { | ||
| 7161 | DBG_PRINT(ERR_DBG, "%s: Out of memory in Open\n", | 7165 | DBG_PRINT(ERR_DBG, "%s: Out of memory in Open\n", |
| 7162 | dev->name); | 7166 | dev->name); |
| 7163 | s2io_reset(sp); | 7167 | s2io_reset(sp); |
| @@ -7165,7 +7169,7 @@ static int s2io_card_up(struct s2io_nic * sp) | |||
| 7165 | return -ENOMEM; | 7169 | return -ENOMEM; |
| 7166 | } | 7170 | } |
| 7167 | DBG_PRINT(INFO_DBG, "Buf in ring:%d is %d:\n", i, | 7171 | DBG_PRINT(INFO_DBG, "Buf in ring:%d is %d:\n", i, |
| 7168 | atomic_read(&sp->rx_bufs_left[i])); | 7172 | mac_control->rings[i].rx_bufs_left); |
| 7169 | } | 7173 | } |
| 7170 | 7174 | ||
| 7171 | /* Initialise napi */ | 7175 | /* Initialise napi */ |
| @@ -7300,7 +7304,7 @@ static void s2io_tx_watchdog(struct net_device *dev) | |||
| 7300 | static int rx_osm_handler(struct ring_info *ring_data, struct RxD_t * rxdp) | 7304 | static int rx_osm_handler(struct ring_info *ring_data, struct RxD_t * rxdp) |
| 7301 | { | 7305 | { |
| 7302 | struct s2io_nic *sp = ring_data->nic; | 7306 | struct s2io_nic *sp = ring_data->nic; |
| 7303 | struct net_device *dev = (struct net_device *) sp->dev; | 7307 | struct net_device *dev = (struct net_device *) ring_data->dev; |
| 7304 | struct sk_buff *skb = (struct sk_buff *) | 7308 | struct sk_buff *skb = (struct sk_buff *) |
| 7305 | ((unsigned long) rxdp->Host_Control); | 7309 | ((unsigned long) rxdp->Host_Control); |
| 7306 | int ring_no = ring_data->ring_no; | 7310 | int ring_no = ring_data->ring_no; |
| @@ -7377,19 +7381,19 @@ static int rx_osm_handler(struct ring_info *ring_data, struct RxD_t * rxdp) | |||
| 7377 | sp->mac_control.stats_info->sw_stat.mem_freed | 7381 | sp->mac_control.stats_info->sw_stat.mem_freed |
| 7378 | += skb->truesize; | 7382 | += skb->truesize; |
| 7379 | dev_kfree_skb(skb); | 7383 | dev_kfree_skb(skb); |
| 7380 | atomic_dec(&sp->rx_bufs_left[ring_no]); | 7384 | ring_data->rx_bufs_left -= 1; |
| 7381 | rxdp->Host_Control = 0; | 7385 | rxdp->Host_Control = 0; |
| 7382 | return 0; | 7386 | return 0; |
| 7383 | } | 7387 | } |
| 7384 | } | 7388 | } |
| 7385 | 7389 | ||
| 7386 | /* Updating statistics */ | 7390 | /* Updating statistics */ |
| 7387 | sp->stats.rx_packets++; | 7391 | ring_data->rx_packets++; |
| 7388 | rxdp->Host_Control = 0; | 7392 | rxdp->Host_Control = 0; |
| 7389 | if (sp->rxd_mode == RXD_MODE_1) { | 7393 | if (sp->rxd_mode == RXD_MODE_1) { |
| 7390 | int len = RXD_GET_BUFFER0_SIZE_1(rxdp->Control_2); | 7394 | int len = RXD_GET_BUFFER0_SIZE_1(rxdp->Control_2); |
| 7391 | 7395 | ||
| 7392 | sp->stats.rx_bytes += len; | 7396 | ring_data->rx_bytes += len; |
| 7393 | skb_put(skb, len); | 7397 | skb_put(skb, len); |
| 7394 | 7398 | ||
| 7395 | } else if (sp->rxd_mode == RXD_MODE_3B) { | 7399 | } else if (sp->rxd_mode == RXD_MODE_3B) { |
| @@ -7400,13 +7404,13 @@ static int rx_osm_handler(struct ring_info *ring_data, struct RxD_t * rxdp) | |||
| 7400 | unsigned char *buff = skb_push(skb, buf0_len); | 7404 | unsigned char *buff = skb_push(skb, buf0_len); |
| 7401 | 7405 | ||
| 7402 | struct buffAdd *ba = &ring_data->ba[get_block][get_off]; | 7406 | struct buffAdd *ba = &ring_data->ba[get_block][get_off]; |
| 7403 | sp->stats.rx_bytes += buf0_len + buf2_len; | 7407 | ring_data->rx_bytes += buf0_len + buf2_len; |
| 7404 | memcpy(buff, ba->ba_0, buf0_len); | 7408 | memcpy(buff, ba->ba_0, buf0_len); |
| 7405 | skb_put(skb, buf2_len); | 7409 | skb_put(skb, buf2_len); |
| 7406 | } | 7410 | } |
| 7407 | 7411 | ||
| 7408 | if ((rxdp->Control_1 & TCP_OR_UDP_FRAME) && ((!sp->lro) || | 7412 | if ((rxdp->Control_1 & TCP_OR_UDP_FRAME) && ((!ring_data->lro) || |
| 7409 | (sp->lro && (!(rxdp->Control_1 & RXD_FRAME_IP_FRAG)))) && | 7413 | (ring_data->lro && (!(rxdp->Control_1 & RXD_FRAME_IP_FRAG)))) && |
| 7410 | (sp->rx_csum)) { | 7414 | (sp->rx_csum)) { |
| 7411 | l3_csum = RXD_GET_L3_CKSUM(rxdp->Control_1); | 7415 | l3_csum = RXD_GET_L3_CKSUM(rxdp->Control_1); |
| 7412 | l4_csum = RXD_GET_L4_CKSUM(rxdp->Control_1); | 7416 | l4_csum = RXD_GET_L4_CKSUM(rxdp->Control_1); |
| @@ -7417,14 +7421,14 @@ static int rx_osm_handler(struct ring_info *ring_data, struct RxD_t * rxdp) | |||
| 7417 | * a flag in the RxD. | 7421 | * a flag in the RxD. |
| 7418 | */ | 7422 | */ |
| 7419 | skb->ip_summed = CHECKSUM_UNNECESSARY; | 7423 | skb->ip_summed = CHECKSUM_UNNECESSARY; |
| 7420 | if (sp->lro) { | 7424 | if (ring_data->lro) { |
| 7421 | u32 tcp_len; | 7425 | u32 tcp_len; |
| 7422 | u8 *tcp; | 7426 | u8 *tcp; |
| 7423 | int ret = 0; | 7427 | int ret = 0; |
| 7424 | 7428 | ||
| 7425 | ret = s2io_club_tcp_session(skb->data, &tcp, | 7429 | ret = s2io_club_tcp_session(ring_data, |
| 7426 | &tcp_len, &lro, | 7430 | skb->data, &tcp, &tcp_len, &lro, |
| 7427 | rxdp, sp); | 7431 | rxdp, sp); |
| 7428 | switch (ret) { | 7432 | switch (ret) { |
| 7429 | case 3: /* Begin anew */ | 7433 | case 3: /* Begin anew */ |
| 7430 | lro->parent = skb; | 7434 | lro->parent = skb; |
| @@ -7486,7 +7490,7 @@ send_up: | |||
| 7486 | queue_rx_frame(skb, RXD_GET_VLAN_TAG(rxdp->Control_2)); | 7490 | queue_rx_frame(skb, RXD_GET_VLAN_TAG(rxdp->Control_2)); |
| 7487 | dev->last_rx = jiffies; | 7491 | dev->last_rx = jiffies; |
| 7488 | aggregate: | 7492 | aggregate: |
| 7489 | atomic_dec(&sp->rx_bufs_left[ring_no]); | 7493 | sp->mac_control.rings[ring_no].rx_bufs_left -= 1; |
| 7490 | return SUCCESS; | 7494 | return SUCCESS; |
| 7491 | } | 7495 | } |
| 7492 | 7496 | ||
| @@ -7603,12 +7607,14 @@ static int s2io_verify_parm(struct pci_dev *pdev, u8 *dev_intr_type, | |||
| 7603 | tx_steering_type = NO_STEERING; | 7607 | tx_steering_type = NO_STEERING; |
| 7604 | } | 7608 | } |
| 7605 | 7609 | ||
| 7606 | if ( rx_ring_num > 8) { | 7610 | if (rx_ring_num > MAX_RX_RINGS) { |
| 7607 | DBG_PRINT(ERR_DBG, "s2io: Requested number of Rx rings not " | 7611 | DBG_PRINT(ERR_DBG, "s2io: Requested number of rx rings not " |
| 7608 | "supported\n"); | 7612 | "supported\n"); |
| 7609 | DBG_PRINT(ERR_DBG, "s2io: Default to 8 Rx rings\n"); | 7613 | DBG_PRINT(ERR_DBG, "s2io: Default to %d rx rings\n", |
| 7610 | rx_ring_num = 8; | 7614 | MAX_RX_RINGS); |
| 7615 | rx_ring_num = MAX_RX_RINGS; | ||
| 7611 | } | 7616 | } |
| 7617 | |||
| 7612 | if (*dev_intr_type != INTA) | 7618 | if (*dev_intr_type != INTA) |
| 7613 | napi = 0; | 7619 | napi = 0; |
| 7614 | 7620 | ||
| @@ -7836,10 +7842,15 @@ s2io_init_nic(struct pci_dev *pdev, const struct pci_device_id *pre) | |||
| 7836 | 7842 | ||
| 7837 | /* Rx side parameters. */ | 7843 | /* Rx side parameters. */ |
| 7838 | config->rx_ring_num = rx_ring_num; | 7844 | config->rx_ring_num = rx_ring_num; |
| 7839 | for (i = 0; i < MAX_RX_RINGS; i++) { | 7845 | for (i = 0; i < config->rx_ring_num; i++) { |
| 7840 | config->rx_cfg[i].num_rxd = rx_ring_sz[i] * | 7846 | config->rx_cfg[i].num_rxd = rx_ring_sz[i] * |
| 7841 | (rxd_count[sp->rxd_mode] + 1); | 7847 | (rxd_count[sp->rxd_mode] + 1); |
| 7842 | config->rx_cfg[i].ring_priority = i; | 7848 | config->rx_cfg[i].ring_priority = i; |
| 7849 | mac_control->rings[i].rx_bufs_left = 0; | ||
| 7850 | mac_control->rings[i].rxd_mode = sp->rxd_mode; | ||
| 7851 | mac_control->rings[i].rxd_count = rxd_count[sp->rxd_mode]; | ||
| 7852 | mac_control->rings[i].pdev = sp->pdev; | ||
| 7853 | mac_control->rings[i].dev = sp->dev; | ||
| 7843 | } | 7854 | } |
| 7844 | 7855 | ||
| 7845 | for (i = 0; i < rx_ring_num; i++) { | 7856 | for (i = 0; i < rx_ring_num; i++) { |
| @@ -7854,10 +7865,6 @@ s2io_init_nic(struct pci_dev *pdev, const struct pci_device_id *pre) | |||
| 7854 | mac_control->mc_pause_threshold_q4q7 = mc_pause_threshold_q4q7; | 7865 | mac_control->mc_pause_threshold_q4q7 = mc_pause_threshold_q4q7; |
| 7855 | 7866 | ||
| 7856 | 7867 | ||
| 7857 | /* Initialize Ring buffer parameters. */ | ||
| 7858 | for (i = 0; i < config->rx_ring_num; i++) | ||
| 7859 | atomic_set(&sp->rx_bufs_left[i], 0); | ||
| 7860 | |||
| 7861 | /* initialize the shared memory used by the NIC and the host */ | 7868 | /* initialize the shared memory used by the NIC and the host */ |
| 7862 | if (init_shared_mem(sp)) { | 7869 | if (init_shared_mem(sp)) { |
| 7863 | DBG_PRINT(ERR_DBG, "%s: Memory allocation failed\n", | 7870 | DBG_PRINT(ERR_DBG, "%s: Memory allocation failed\n", |
| @@ -8077,6 +8084,9 @@ s2io_init_nic(struct pci_dev *pdev, const struct pci_device_id *pre) | |||
| 8077 | DBG_PRINT(ERR_DBG, "%s: Using %d Tx fifo(s)\n", dev->name, | 8084 | DBG_PRINT(ERR_DBG, "%s: Using %d Tx fifo(s)\n", dev->name, |
| 8078 | sp->config.tx_fifo_num); | 8085 | sp->config.tx_fifo_num); |
| 8079 | 8086 | ||
| 8087 | DBG_PRINT(ERR_DBG, "%s: Using %d Rx ring(s)\n", dev->name, | ||
| 8088 | sp->config.rx_ring_num); | ||
| 8089 | |||
| 8080 | switch(sp->config.intr_type) { | 8090 | switch(sp->config.intr_type) { |
| 8081 | case INTA: | 8091 | case INTA: |
| 8082 | DBG_PRINT(ERR_DBG, "%s: Interrupt type INTA\n", dev->name); | 8092 | DBG_PRINT(ERR_DBG, "%s: Interrupt type INTA\n", dev->name); |
| @@ -8391,8 +8401,9 @@ static int verify_l3_l4_lro_capable(struct lro *l_lro, struct iphdr *ip, | |||
| 8391 | } | 8401 | } |
| 8392 | 8402 | ||
| 8393 | static int | 8403 | static int |
| 8394 | s2io_club_tcp_session(u8 *buffer, u8 **tcp, u32 *tcp_len, struct lro **lro, | 8404 | s2io_club_tcp_session(struct ring_info *ring_data, u8 *buffer, u8 **tcp, |
| 8395 | struct RxD_t *rxdp, struct s2io_nic *sp) | 8405 | u32 *tcp_len, struct lro **lro, struct RxD_t *rxdp, |
| 8406 | struct s2io_nic *sp) | ||
| 8396 | { | 8407 | { |
| 8397 | struct iphdr *ip; | 8408 | struct iphdr *ip; |
| 8398 | struct tcphdr *tcph; | 8409 | struct tcphdr *tcph; |
| @@ -8410,7 +8421,7 @@ s2io_club_tcp_session(u8 *buffer, u8 **tcp, u32 *tcp_len, struct lro **lro, | |||
| 8410 | tcph = (struct tcphdr *)*tcp; | 8421 | tcph = (struct tcphdr *)*tcp; |
| 8411 | *tcp_len = get_l4_pyld_length(ip, tcph); | 8422 | *tcp_len = get_l4_pyld_length(ip, tcph); |
| 8412 | for (i=0; i<MAX_LRO_SESSIONS; i++) { | 8423 | for (i=0; i<MAX_LRO_SESSIONS; i++) { |
| 8413 | struct lro *l_lro = &sp->lro0_n[i]; | 8424 | struct lro *l_lro = &ring_data->lro0_n[i]; |
| 8414 | if (l_lro->in_use) { | 8425 | if (l_lro->in_use) { |
| 8415 | if (check_for_socket_match(l_lro, ip, tcph)) | 8426 | if (check_for_socket_match(l_lro, ip, tcph)) |
| 8416 | continue; | 8427 | continue; |
| @@ -8448,7 +8459,7 @@ s2io_club_tcp_session(u8 *buffer, u8 **tcp, u32 *tcp_len, struct lro **lro, | |||
| 8448 | } | 8459 | } |
| 8449 | 8460 | ||
| 8450 | for (i=0; i<MAX_LRO_SESSIONS; i++) { | 8461 | for (i=0; i<MAX_LRO_SESSIONS; i++) { |
| 8451 | struct lro *l_lro = &sp->lro0_n[i]; | 8462 | struct lro *l_lro = &ring_data->lro0_n[i]; |
| 8452 | if (!(l_lro->in_use)) { | 8463 | if (!(l_lro->in_use)) { |
| 8453 | *lro = l_lro; | 8464 | *lro = l_lro; |
| 8454 | ret = 3; /* Begin anew */ | 8465 | ret = 3; /* Begin anew */ |
diff --git a/drivers/net/s2io.h b/drivers/net/s2io.h index ce53a02105f2..0709ebae9139 100644 --- a/drivers/net/s2io.h +++ b/drivers/net/s2io.h | |||
| @@ -678,11 +678,53 @@ struct rx_block_info { | |||
| 678 | struct rxd_info *rxds; | 678 | struct rxd_info *rxds; |
| 679 | }; | 679 | }; |
| 680 | 680 | ||
| 681 | /* Data structure to represent a LRO session */ | ||
| 682 | struct lro { | ||
| 683 | struct sk_buff *parent; | ||
| 684 | struct sk_buff *last_frag; | ||
| 685 | u8 *l2h; | ||
| 686 | struct iphdr *iph; | ||
| 687 | struct tcphdr *tcph; | ||
| 688 | u32 tcp_next_seq; | ||
| 689 | __be32 tcp_ack; | ||
| 690 | int total_len; | ||
| 691 | int frags_len; | ||
| 692 | int sg_num; | ||
| 693 | int in_use; | ||
| 694 | __be16 window; | ||
| 695 | u16 vlan_tag; | ||
| 696 | u32 cur_tsval; | ||
| 697 | __be32 cur_tsecr; | ||
| 698 | u8 saw_ts; | ||
| 699 | } ____cacheline_aligned; | ||
| 700 | |||
| 681 | /* Ring specific structure */ | 701 | /* Ring specific structure */ |
| 682 | struct ring_info { | 702 | struct ring_info { |
| 683 | /* The ring number */ | 703 | /* The ring number */ |
| 684 | int ring_no; | 704 | int ring_no; |
| 685 | 705 | ||
| 706 | /* per-ring buffer counter */ | ||
| 707 | u32 rx_bufs_left; | ||
| 708 | |||
| 709 | #define MAX_LRO_SESSIONS 32 | ||
| 710 | struct lro lro0_n[MAX_LRO_SESSIONS]; | ||
| 711 | u8 lro; | ||
| 712 | |||
| 713 | /* copy of sp->rxd_mode flag */ | ||
| 714 | int rxd_mode; | ||
| 715 | |||
| 716 | /* Number of rxds per block for the rxd_mode */ | ||
| 717 | int rxd_count; | ||
| 718 | |||
| 719 | /* copy of sp pointer */ | ||
| 720 | struct s2io_nic *nic; | ||
| 721 | |||
| 722 | /* copy of sp->dev pointer */ | ||
| 723 | struct net_device *dev; | ||
| 724 | |||
| 725 | /* copy of sp->pdev pointer */ | ||
| 726 | struct pci_dev *pdev; | ||
| 727 | |||
| 686 | /* | 728 | /* |
| 687 | * Place holders for the virtual and physical addresses of | 729 | * Place holders for the virtual and physical addresses of |
| 688 | * all the Rx Blocks | 730 | * all the Rx Blocks |
| @@ -703,10 +745,16 @@ struct ring_info { | |||
| 703 | */ | 745 | */ |
| 704 | struct rx_curr_get_info rx_curr_get_info; | 746 | struct rx_curr_get_info rx_curr_get_info; |
| 705 | 747 | ||
| 748 | /* interface MTU value */ | ||
| 749 | unsigned mtu; | ||
| 750 | |||
| 706 | /* Buffer Address store. */ | 751 | /* Buffer Address store. */ |
| 707 | struct buffAdd **ba; | 752 | struct buffAdd **ba; |
| 708 | struct s2io_nic *nic; | 753 | |
| 709 | }; | 754 | /* per-Ring statistics */ |
| 755 | unsigned long rx_packets; | ||
| 756 | unsigned long rx_bytes; | ||
| 757 | } ____cacheline_aligned; | ||
| 710 | 758 | ||
| 711 | /* Fifo specific structure */ | 759 | /* Fifo specific structure */ |
| 712 | struct fifo_info { | 760 | struct fifo_info { |
| @@ -813,26 +861,6 @@ struct msix_info_st { | |||
| 813 | u64 data; | 861 | u64 data; |
| 814 | }; | 862 | }; |
| 815 | 863 | ||
| 816 | /* Data structure to represent a LRO session */ | ||
| 817 | struct lro { | ||
| 818 | struct sk_buff *parent; | ||
| 819 | struct sk_buff *last_frag; | ||
| 820 | u8 *l2h; | ||
| 821 | struct iphdr *iph; | ||
| 822 | struct tcphdr *tcph; | ||
| 823 | u32 tcp_next_seq; | ||
| 824 | __be32 tcp_ack; | ||
| 825 | int total_len; | ||
| 826 | int frags_len; | ||
| 827 | int sg_num; | ||
| 828 | int in_use; | ||
| 829 | __be16 window; | ||
| 830 | u16 vlan_tag; | ||
| 831 | u32 cur_tsval; | ||
| 832 | __be32 cur_tsecr; | ||
| 833 | u8 saw_ts; | ||
| 834 | } ____cacheline_aligned; | ||
| 835 | |||
| 836 | /* These flags represent the devices temporary state */ | 864 | /* These flags represent the devices temporary state */ |
| 837 | enum s2io_device_state_t | 865 | enum s2io_device_state_t |
| 838 | { | 866 | { |
| @@ -872,8 +900,6 @@ struct s2io_nic { | |||
| 872 | /* Space to back up the PCI config space */ | 900 | /* Space to back up the PCI config space */ |
| 873 | u32 config_space[256 / sizeof(u32)]; | 901 | u32 config_space[256 / sizeof(u32)]; |
| 874 | 902 | ||
| 875 | atomic_t rx_bufs_left[MAX_RX_RINGS]; | ||
| 876 | |||
| 877 | #define PROMISC 1 | 903 | #define PROMISC 1 |
| 878 | #define ALL_MULTI 2 | 904 | #define ALL_MULTI 2 |
| 879 | 905 | ||
| @@ -950,8 +976,6 @@ struct s2io_nic { | |||
| 950 | #define XFRAME_II_DEVICE 2 | 976 | #define XFRAME_II_DEVICE 2 |
| 951 | u8 device_type; | 977 | u8 device_type; |
| 952 | 978 | ||
| 953 | #define MAX_LRO_SESSIONS 32 | ||
| 954 | struct lro lro0_n[MAX_LRO_SESSIONS]; | ||
| 955 | unsigned long clubbed_frms_cnt; | 979 | unsigned long clubbed_frms_cnt; |
| 956 | unsigned long sending_both; | 980 | unsigned long sending_both; |
| 957 | u8 lro; | 981 | u8 lro; |
| @@ -1118,9 +1142,9 @@ static int do_s2io_add_mc(struct s2io_nic *sp, u8 *addr); | |||
| 1118 | static int do_s2io_add_mac(struct s2io_nic *sp, u64 addr, int offset); | 1142 | static int do_s2io_add_mac(struct s2io_nic *sp, u64 addr, int offset); |
| 1119 | static int do_s2io_delete_unicast_mc(struct s2io_nic *sp, u64 addr); | 1143 | static int do_s2io_delete_unicast_mc(struct s2io_nic *sp, u64 addr); |
| 1120 | 1144 | ||
| 1121 | static int | 1145 | static int s2io_club_tcp_session(struct ring_info *ring_data, u8 *buffer, |
| 1122 | s2io_club_tcp_session(u8 *buffer, u8 **tcp, u32 *tcp_len, struct lro **lro, | 1146 | u8 **tcp, u32 *tcp_len, struct lro **lro, struct RxD_t *rxdp, |
| 1123 | struct RxD_t *rxdp, struct s2io_nic *sp); | 1147 | struct s2io_nic *sp); |
| 1124 | static void clear_lro_session(struct lro *lro); | 1148 | static void clear_lro_session(struct lro *lro); |
| 1125 | static void queue_rx_frame(struct sk_buff *skb, u16 vlan_tag); | 1149 | static void queue_rx_frame(struct sk_buff *skb, u16 vlan_tag); |
| 1126 | static void update_L3L4_header(struct s2io_nic *sp, struct lro *lro); | 1150 | static void update_L3L4_header(struct s2io_nic *sp, struct lro *lro); |
diff --git a/drivers/net/sfc/Kconfig b/drivers/net/sfc/Kconfig new file mode 100644 index 000000000000..dbad95c295bd --- /dev/null +++ b/drivers/net/sfc/Kconfig | |||
| @@ -0,0 +1,12 @@ | |||
| 1 | config SFC | ||
| 2 | tristate "Solarflare Solarstorm SFC4000 support" | ||
| 3 | depends on PCI && INET | ||
| 4 | select MII | ||
| 5 | select INET_LRO | ||
| 6 | select CRC32 | ||
| 7 | help | ||
| 8 | This driver supports 10-gigabit Ethernet cards based on | ||
| 9 | the Solarflare Communications Solarstorm SFC4000 controller. | ||
| 10 | |||
| 11 | To compile this driver as a module, choose M here. The module | ||
| 12 | will be called sfc. | ||
diff --git a/drivers/net/sfc/Makefile b/drivers/net/sfc/Makefile new file mode 100644 index 000000000000..0f023447eafd --- /dev/null +++ b/drivers/net/sfc/Makefile | |||
| @@ -0,0 +1,5 @@ | |||
| 1 | sfc-y += efx.o falcon.o tx.o rx.o falcon_xmac.o \ | ||
| 2 | i2c-direct.o ethtool.o xfp_phy.o mdio_10g.o \ | ||
| 3 | tenxpress.o boards.o sfe4001.o | ||
| 4 | |||
| 5 | obj-$(CONFIG_SFC) += sfc.o | ||
diff --git a/drivers/net/sfc/bitfield.h b/drivers/net/sfc/bitfield.h new file mode 100644 index 000000000000..2806201644cc --- /dev/null +++ b/drivers/net/sfc/bitfield.h | |||
| @@ -0,0 +1,508 @@ | |||
| 1 | /**************************************************************************** | ||
| 2 | * Driver for Solarflare Solarstorm network controllers and boards | ||
| 3 | * Copyright 2005-2006 Fen Systems Ltd. | ||
| 4 | * Copyright 2006-2008 Solarflare Communications Inc. | ||
| 5 | * | ||
| 6 | * This program is free software; you can redistribute it and/or modify it | ||
| 7 | * under the terms of the GNU General Public License version 2 as published | ||
| 8 | * by the Free Software Foundation, incorporated herein by reference. | ||
| 9 | */ | ||
| 10 | |||
| 11 | #ifndef EFX_BITFIELD_H | ||
| 12 | #define EFX_BITFIELD_H | ||
| 13 | |||
| 14 | /* | ||
| 15 | * Efx bitfield access | ||
| 16 | * | ||
| 17 | * Efx NICs make extensive use of bitfields up to 128 bits | ||
| 18 | * wide. Since there is no native 128-bit datatype on most systems, | ||
| 19 | * and since 64-bit datatypes are inefficient on 32-bit systems and | ||
| 20 | * vice versa, we wrap accesses in a way that uses the most efficient | ||
| 21 | * datatype. | ||
| 22 | * | ||
| 23 | * The NICs are PCI devices and therefore little-endian. Since most | ||
| 24 | * of the quantities that we deal with are DMAed to/from host memory, | ||
| 25 | * we define our datatypes (efx_oword_t, efx_qword_t and | ||
| 26 | * efx_dword_t) to be little-endian. | ||
| 27 | */ | ||
| 28 | |||
| 29 | /* Lowest bit numbers and widths */ | ||
| 30 | #define EFX_DUMMY_FIELD_LBN 0 | ||
| 31 | #define EFX_DUMMY_FIELD_WIDTH 0 | ||
| 32 | #define EFX_DWORD_0_LBN 0 | ||
| 33 | #define EFX_DWORD_0_WIDTH 32 | ||
| 34 | #define EFX_DWORD_1_LBN 32 | ||
| 35 | #define EFX_DWORD_1_WIDTH 32 | ||
| 36 | #define EFX_DWORD_2_LBN 64 | ||
| 37 | #define EFX_DWORD_2_WIDTH 32 | ||
| 38 | #define EFX_DWORD_3_LBN 96 | ||
| 39 | #define EFX_DWORD_3_WIDTH 32 | ||
| 40 | |||
| 41 | /* Specified attribute (e.g. LBN) of the specified field */ | ||
| 42 | #define EFX_VAL(field, attribute) field ## _ ## attribute | ||
| 43 | /* Low bit number of the specified field */ | ||
| 44 | #define EFX_LOW_BIT(field) EFX_VAL(field, LBN) | ||
| 45 | /* Bit width of the specified field */ | ||
| 46 | #define EFX_WIDTH(field) EFX_VAL(field, WIDTH) | ||
| 47 | /* High bit number of the specified field */ | ||
| 48 | #define EFX_HIGH_BIT(field) (EFX_LOW_BIT(field) + EFX_WIDTH(field) - 1) | ||
| 49 | /* Mask equal in width to the specified field. | ||
| 50 | * | ||
| 51 | * For example, a field with width 5 would have a mask of 0x1f. | ||
| 52 | * | ||
| 53 | * The maximum width mask that can be generated is 64 bits. | ||
| 54 | */ | ||
| 55 | #define EFX_MASK64(field) \ | ||
| 56 | (EFX_WIDTH(field) == 64 ? ~((u64) 0) : \ | ||
| 57 | (((((u64) 1) << EFX_WIDTH(field))) - 1)) | ||
| 58 | |||
| 59 | /* Mask equal in width to the specified field. | ||
| 60 | * | ||
| 61 | * For example, a field with width 5 would have a mask of 0x1f. | ||
| 62 | * | ||
| 63 | * The maximum width mask that can be generated is 32 bits. Use | ||
| 64 | * EFX_MASK64 for higher width fields. | ||
| 65 | */ | ||
| 66 | #define EFX_MASK32(field) \ | ||
| 67 | (EFX_WIDTH(field) == 32 ? ~((u32) 0) : \ | ||
| 68 | (((((u32) 1) << EFX_WIDTH(field))) - 1)) | ||
| 69 | |||
| 70 | /* A doubleword (i.e. 4 byte) datatype - little-endian in HW */ | ||
| 71 | typedef union efx_dword { | ||
| 72 | __le32 u32[1]; | ||
| 73 | } efx_dword_t; | ||
| 74 | |||
| 75 | /* A quadword (i.e. 8 byte) datatype - little-endian in HW */ | ||
| 76 | typedef union efx_qword { | ||
| 77 | __le64 u64[1]; | ||
| 78 | __le32 u32[2]; | ||
| 79 | efx_dword_t dword[2]; | ||
| 80 | } efx_qword_t; | ||
| 81 | |||
| 82 | /* An octword (eight-word, i.e. 16 byte) datatype - little-endian in HW */ | ||
| 83 | typedef union efx_oword { | ||
| 84 | __le64 u64[2]; | ||
| 85 | efx_qword_t qword[2]; | ||
| 86 | __le32 u32[4]; | ||
| 87 | efx_dword_t dword[4]; | ||
| 88 | } efx_oword_t; | ||
| 89 | |||
| 90 | /* Format string and value expanders for printk */ | ||
| 91 | #define EFX_DWORD_FMT "%08x" | ||
| 92 | #define EFX_QWORD_FMT "%08x:%08x" | ||
| 93 | #define EFX_OWORD_FMT "%08x:%08x:%08x:%08x" | ||
| 94 | #define EFX_DWORD_VAL(dword) \ | ||
| 95 | ((unsigned int) le32_to_cpu((dword).u32[0])) | ||
| 96 | #define EFX_QWORD_VAL(qword) \ | ||
| 97 | ((unsigned int) le32_to_cpu((qword).u32[1])), \ | ||
| 98 | ((unsigned int) le32_to_cpu((qword).u32[0])) | ||
| 99 | #define EFX_OWORD_VAL(oword) \ | ||
| 100 | ((unsigned int) le32_to_cpu((oword).u32[3])), \ | ||
| 101 | ((unsigned int) le32_to_cpu((oword).u32[2])), \ | ||
| 102 | ((unsigned int) le32_to_cpu((oword).u32[1])), \ | ||
| 103 | ((unsigned int) le32_to_cpu((oword).u32[0])) | ||
| 104 | |||
| 105 | /* | ||
| 106 | * Extract bit field portion [low,high) from the native-endian element | ||
| 107 | * which contains bits [min,max). | ||
| 108 | * | ||
| 109 | * For example, suppose "element" represents the high 32 bits of a | ||
| 110 | * 64-bit value, and we wish to extract the bits belonging to the bit | ||
| 111 | * field occupying bits 28-45 of this 64-bit value. | ||
| 112 | * | ||
| 113 | * Then EFX_EXTRACT ( element, 32, 63, 28, 45 ) would give | ||
| 114 | * | ||
| 115 | * ( element ) << 4 | ||
| 116 | * | ||
| 117 | * The result will contain the relevant bits filled in in the range | ||
| 118 | * [0,high-low), with garbage in bits [high-low+1,...). | ||
| 119 | */ | ||
| 120 | #define EFX_EXTRACT_NATIVE(native_element, min, max, low, high) \ | ||
| 121 | (((low > max) || (high < min)) ? 0 : \ | ||
| 122 | ((low > min) ? \ | ||
| 123 | ((native_element) >> (low - min)) : \ | ||
| 124 | ((native_element) << (min - low)))) | ||
| 125 | |||
| 126 | /* | ||
| 127 | * Extract bit field portion [low,high) from the 64-bit little-endian | ||
| 128 | * element which contains bits [min,max) | ||
| 129 | */ | ||
| 130 | #define EFX_EXTRACT64(element, min, max, low, high) \ | ||
| 131 | EFX_EXTRACT_NATIVE(le64_to_cpu(element), min, max, low, high) | ||
| 132 | |||
| 133 | /* | ||
| 134 | * Extract bit field portion [low,high) from the 32-bit little-endian | ||
| 135 | * element which contains bits [min,max) | ||
| 136 | */ | ||
| 137 | #define EFX_EXTRACT32(element, min, max, low, high) \ | ||
| 138 | EFX_EXTRACT_NATIVE(le32_to_cpu(element), min, max, low, high) | ||
| 139 | |||
| 140 | #define EFX_EXTRACT_OWORD64(oword, low, high) \ | ||
| 141 | (EFX_EXTRACT64((oword).u64[0], 0, 63, low, high) | \ | ||
| 142 | EFX_EXTRACT64((oword).u64[1], 64, 127, low, high)) | ||
| 143 | |||
| 144 | #define EFX_EXTRACT_QWORD64(qword, low, high) \ | ||
| 145 | EFX_EXTRACT64((qword).u64[0], 0, 63, low, high) | ||
| 146 | |||
| 147 | #define EFX_EXTRACT_OWORD32(oword, low, high) \ | ||
| 148 | (EFX_EXTRACT32((oword).u32[0], 0, 31, low, high) | \ | ||
| 149 | EFX_EXTRACT32((oword).u32[1], 32, 63, low, high) | \ | ||
| 150 | EFX_EXTRACT32((oword).u32[2], 64, 95, low, high) | \ | ||
| 151 | EFX_EXTRACT32((oword).u32[3], 96, 127, low, high)) | ||
| 152 | |||
| 153 | #define EFX_EXTRACT_QWORD32(qword, low, high) \ | ||
| 154 | (EFX_EXTRACT32((qword).u32[0], 0, 31, low, high) | \ | ||
| 155 | EFX_EXTRACT32((qword).u32[1], 32, 63, low, high)) | ||
| 156 | |||
| 157 | #define EFX_EXTRACT_DWORD(dword, low, high) \ | ||
| 158 | EFX_EXTRACT32((dword).u32[0], 0, 31, low, high) | ||
| 159 | |||
| 160 | #define EFX_OWORD_FIELD64(oword, field) \ | ||
| 161 | (EFX_EXTRACT_OWORD64(oword, EFX_LOW_BIT(field), EFX_HIGH_BIT(field)) \ | ||
| 162 | & EFX_MASK64(field)) | ||
| 163 | |||
| 164 | #define EFX_QWORD_FIELD64(qword, field) \ | ||
| 165 | (EFX_EXTRACT_QWORD64(qword, EFX_LOW_BIT(field), EFX_HIGH_BIT(field)) \ | ||
| 166 | & EFX_MASK64(field)) | ||
| 167 | |||
| 168 | #define EFX_OWORD_FIELD32(oword, field) \ | ||
| 169 | (EFX_EXTRACT_OWORD32(oword, EFX_LOW_BIT(field), EFX_HIGH_BIT(field)) \ | ||
| 170 | & EFX_MASK32(field)) | ||
| 171 | |||
| 172 | #define EFX_QWORD_FIELD32(qword, field) \ | ||
| 173 | (EFX_EXTRACT_QWORD32(qword, EFX_LOW_BIT(field), EFX_HIGH_BIT(field)) \ | ||
| 174 | & EFX_MASK32(field)) | ||
| 175 | |||
| 176 | #define EFX_DWORD_FIELD(dword, field) \ | ||
| 177 | (EFX_EXTRACT_DWORD(dword, EFX_LOW_BIT(field), EFX_HIGH_BIT(field)) \ | ||
| 178 | & EFX_MASK32(field)) | ||
| 179 | |||
| 180 | #define EFX_OWORD_IS_ZERO64(oword) \ | ||
| 181 | (((oword).u64[0] | (oword).u64[1]) == (__force __le64) 0) | ||
| 182 | |||
| 183 | #define EFX_QWORD_IS_ZERO64(qword) \ | ||
| 184 | (((qword).u64[0]) == (__force __le64) 0) | ||
| 185 | |||
| 186 | #define EFX_OWORD_IS_ZERO32(oword) \ | ||
| 187 | (((oword).u32[0] | (oword).u32[1] | (oword).u32[2] | (oword).u32[3]) \ | ||
| 188 | == (__force __le32) 0) | ||
| 189 | |||
| 190 | #define EFX_QWORD_IS_ZERO32(qword) \ | ||
| 191 | (((qword).u32[0] | (qword).u32[1]) == (__force __le32) 0) | ||
| 192 | |||
| 193 | #define EFX_DWORD_IS_ZERO(dword) \ | ||
| 194 | (((dword).u32[0]) == (__force __le32) 0) | ||
| 195 | |||
| 196 | #define EFX_OWORD_IS_ALL_ONES64(oword) \ | ||
| 197 | (((oword).u64[0] & (oword).u64[1]) == ~((__force __le64) 0)) | ||
| 198 | |||
| 199 | #define EFX_QWORD_IS_ALL_ONES64(qword) \ | ||
| 200 | ((qword).u64[0] == ~((__force __le64) 0)) | ||
| 201 | |||
| 202 | #define EFX_OWORD_IS_ALL_ONES32(oword) \ | ||
| 203 | (((oword).u32[0] & (oword).u32[1] & (oword).u32[2] & (oword).u32[3]) \ | ||
| 204 | == ~((__force __le32) 0)) | ||
| 205 | |||
| 206 | #define EFX_QWORD_IS_ALL_ONES32(qword) \ | ||
| 207 | (((qword).u32[0] & (qword).u32[1]) == ~((__force __le32) 0)) | ||
| 208 | |||
| 209 | #define EFX_DWORD_IS_ALL_ONES(dword) \ | ||
| 210 | ((dword).u32[0] == ~((__force __le32) 0)) | ||
| 211 | |||
| 212 | #if BITS_PER_LONG == 64 | ||
| 213 | #define EFX_OWORD_FIELD EFX_OWORD_FIELD64 | ||
| 214 | #define EFX_QWORD_FIELD EFX_QWORD_FIELD64 | ||
| 215 | #define EFX_OWORD_IS_ZERO EFX_OWORD_IS_ZERO64 | ||
| 216 | #define EFX_QWORD_IS_ZERO EFX_QWORD_IS_ZERO64 | ||
| 217 | #define EFX_OWORD_IS_ALL_ONES EFX_OWORD_IS_ALL_ONES64 | ||
| 218 | #define EFX_QWORD_IS_ALL_ONES EFX_QWORD_IS_ALL_ONES64 | ||
| 219 | #else | ||
| 220 | #define EFX_OWORD_FIELD EFX_OWORD_FIELD32 | ||
| 221 | #define EFX_QWORD_FIELD EFX_QWORD_FIELD32 | ||
| 222 | #define EFX_OWORD_IS_ZERO EFX_OWORD_IS_ZERO32 | ||
| 223 | #define EFX_QWORD_IS_ZERO EFX_QWORD_IS_ZERO32 | ||
| 224 | #define EFX_OWORD_IS_ALL_ONES EFX_OWORD_IS_ALL_ONES32 | ||
| 225 | #define EFX_QWORD_IS_ALL_ONES EFX_QWORD_IS_ALL_ONES32 | ||
| 226 | #endif | ||
| 227 | |||
| 228 | /* | ||
| 229 | * Construct bit field portion | ||
| 230 | * | ||
| 231 | * Creates the portion of the bit field [low,high) that lies within | ||
| 232 | * the range [min,max). | ||
| 233 | */ | ||
| 234 | #define EFX_INSERT_NATIVE64(min, max, low, high, value) \ | ||
| 235 | (((low > max) || (high < min)) ? 0 : \ | ||
| 236 | ((low > min) ? \ | ||
| 237 | (((u64) (value)) << (low - min)) : \ | ||
| 238 | (((u64) (value)) >> (min - low)))) | ||
| 239 | |||
| 240 | #define EFX_INSERT_NATIVE32(min, max, low, high, value) \ | ||
| 241 | (((low > max) || (high < min)) ? 0 : \ | ||
| 242 | ((low > min) ? \ | ||
| 243 | (((u32) (value)) << (low - min)) : \ | ||
| 244 | (((u32) (value)) >> (min - low)))) | ||
| 245 | |||
| 246 | #define EFX_INSERT_NATIVE(min, max, low, high, value) \ | ||
| 247 | ((((max - min) >= 32) || ((high - low) >= 32)) ? \ | ||
| 248 | EFX_INSERT_NATIVE64(min, max, low, high, value) : \ | ||
| 249 | EFX_INSERT_NATIVE32(min, max, low, high, value)) | ||
| 250 | |||
| 251 | /* | ||
| 252 | * Construct bit field portion | ||
| 253 | * | ||
| 254 | * Creates the portion of the named bit field that lies within the | ||
| 255 | * range [min,max). | ||
| 256 | */ | ||
| 257 | #define EFX_INSERT_FIELD_NATIVE(min, max, field, value) \ | ||
| 258 | EFX_INSERT_NATIVE(min, max, EFX_LOW_BIT(field), \ | ||
| 259 | EFX_HIGH_BIT(field), value) | ||
| 260 | |||
| 261 | /* | ||
| 262 | * Construct bit field | ||
| 263 | * | ||
| 264 | * Creates the portion of the named bit fields that lie within the | ||
| 265 | * range [min,max). | ||
| 266 | */ | ||
| 267 | #define EFX_INSERT_FIELDS_NATIVE(min, max, \ | ||
| 268 | field1, value1, \ | ||
| 269 | field2, value2, \ | ||
| 270 | field3, value3, \ | ||
| 271 | field4, value4, \ | ||
| 272 | field5, value5, \ | ||
| 273 | field6, value6, \ | ||
| 274 | field7, value7, \ | ||
| 275 | field8, value8, \ | ||
| 276 | field9, value9, \ | ||
| 277 | field10, value10) \ | ||
| 278 | (EFX_INSERT_FIELD_NATIVE((min), (max), field1, (value1)) | \ | ||
| 279 | EFX_INSERT_FIELD_NATIVE((min), (max), field2, (value2)) | \ | ||
| 280 | EFX_INSERT_FIELD_NATIVE((min), (max), field3, (value3)) | \ | ||
| 281 | EFX_INSERT_FIELD_NATIVE((min), (max), field4, (value4)) | \ | ||
| 282 | EFX_INSERT_FIELD_NATIVE((min), (max), field5, (value5)) | \ | ||
| 283 | EFX_INSERT_FIELD_NATIVE((min), (max), field6, (value6)) | \ | ||
| 284 | EFX_INSERT_FIELD_NATIVE((min), (max), field7, (value7)) | \ | ||
| 285 | EFX_INSERT_FIELD_NATIVE((min), (max), field8, (value8)) | \ | ||
| 286 | EFX_INSERT_FIELD_NATIVE((min), (max), field9, (value9)) | \ | ||
| 287 | EFX_INSERT_FIELD_NATIVE((min), (max), field10, (value10))) | ||
| 288 | |||
| 289 | #define EFX_INSERT_FIELDS64(...) \ | ||
| 290 | cpu_to_le64(EFX_INSERT_FIELDS_NATIVE(__VA_ARGS__)) | ||
| 291 | |||
| 292 | #define EFX_INSERT_FIELDS32(...) \ | ||
| 293 | cpu_to_le32(EFX_INSERT_FIELDS_NATIVE(__VA_ARGS__)) | ||
| 294 | |||
| 295 | #define EFX_POPULATE_OWORD64(oword, ...) do { \ | ||
| 296 | (oword).u64[0] = EFX_INSERT_FIELDS64(0, 63, __VA_ARGS__); \ | ||
| 297 | (oword).u64[1] = EFX_INSERT_FIELDS64(64, 127, __VA_ARGS__); \ | ||
| 298 | } while (0) | ||
| 299 | |||
| 300 | #define EFX_POPULATE_QWORD64(qword, ...) do { \ | ||
| 301 | (qword).u64[0] = EFX_INSERT_FIELDS64(0, 63, __VA_ARGS__); \ | ||
| 302 | } while (0) | ||
| 303 | |||
| 304 | #define EFX_POPULATE_OWORD32(oword, ...) do { \ | ||
| 305 | (oword).u32[0] = EFX_INSERT_FIELDS32(0, 31, __VA_ARGS__); \ | ||
| 306 | (oword).u32[1] = EFX_INSERT_FIELDS32(32, 63, __VA_ARGS__); \ | ||
| 307 | (oword).u32[2] = EFX_INSERT_FIELDS32(64, 95, __VA_ARGS__); \ | ||
| 308 | (oword).u32[3] = EFX_INSERT_FIELDS32(96, 127, __VA_ARGS__); \ | ||
| 309 | } while (0) | ||
| 310 | |||
| 311 | #define EFX_POPULATE_QWORD32(qword, ...) do { \ | ||
| 312 | (qword).u32[0] = EFX_INSERT_FIELDS32(0, 31, __VA_ARGS__); \ | ||
| 313 | (qword).u32[1] = EFX_INSERT_FIELDS32(32, 63, __VA_ARGS__); \ | ||
| 314 | } while (0) | ||
| 315 | |||
| 316 | #define EFX_POPULATE_DWORD(dword, ...) do { \ | ||
| 317 | (dword).u32[0] = EFX_INSERT_FIELDS32(0, 31, __VA_ARGS__); \ | ||
| 318 | } while (0) | ||
| 319 | |||
| 320 | #if BITS_PER_LONG == 64 | ||
| 321 | #define EFX_POPULATE_OWORD EFX_POPULATE_OWORD64 | ||
| 322 | #define EFX_POPULATE_QWORD EFX_POPULATE_QWORD64 | ||
| 323 | #else | ||
| 324 | #define EFX_POPULATE_OWORD EFX_POPULATE_OWORD32 | ||
| 325 | #define EFX_POPULATE_QWORD EFX_POPULATE_QWORD32 | ||
| 326 | #endif | ||
| 327 | |||
| 328 | /* Populate an octword field with various numbers of arguments */ | ||
| 329 | #define EFX_POPULATE_OWORD_10 EFX_POPULATE_OWORD | ||
| 330 | #define EFX_POPULATE_OWORD_9(oword, ...) \ | ||
| 331 | EFX_POPULATE_OWORD_10(oword, EFX_DUMMY_FIELD, 0, __VA_ARGS__) | ||
| 332 | #define EFX_POPULATE_OWORD_8(oword, ...) \ | ||
| 333 | EFX_POPULATE_OWORD_9(oword, EFX_DUMMY_FIELD, 0, __VA_ARGS__) | ||
| 334 | #define EFX_POPULATE_OWORD_7(oword, ...) \ | ||
| 335 | EFX_POPULATE_OWORD_8(oword, EFX_DUMMY_FIELD, 0, __VA_ARGS__) | ||
| 336 | #define EFX_POPULATE_OWORD_6(oword, ...) \ | ||
| 337 | EFX_POPULATE_OWORD_7(oword, EFX_DUMMY_FIELD, 0, __VA_ARGS__) | ||
| 338 | #define EFX_POPULATE_OWORD_5(oword, ...) \ | ||
| 339 | EFX_POPULATE_OWORD_6(oword, EFX_DUMMY_FIELD, 0, __VA_ARGS__) | ||
| 340 | #define EFX_POPULATE_OWORD_4(oword, ...) \ | ||
| 341 | EFX_POPULATE_OWORD_5(oword, EFX_DUMMY_FIELD, 0, __VA_ARGS__) | ||
| 342 | #define EFX_POPULATE_OWORD_3(oword, ...) \ | ||
| 343 | EFX_POPULATE_OWORD_4(oword, EFX_DUMMY_FIELD, 0, __VA_ARGS__) | ||
| 344 | #define EFX_POPULATE_OWORD_2(oword, ...) \ | ||
| 345 | EFX_POPULATE_OWORD_3(oword, EFX_DUMMY_FIELD, 0, __VA_ARGS__) | ||
| 346 | #define EFX_POPULATE_OWORD_1(oword, ...) \ | ||
| 347 | EFX_POPULATE_OWORD_2(oword, EFX_DUMMY_FIELD, 0, __VA_ARGS__) | ||
| 348 | #define EFX_ZERO_OWORD(oword) \ | ||
| 349 | EFX_POPULATE_OWORD_1(oword, EFX_DUMMY_FIELD, 0) | ||
| 350 | #define EFX_SET_OWORD(oword) \ | ||
| 351 | EFX_POPULATE_OWORD_4(oword, \ | ||
| 352 | EFX_DWORD_0, 0xffffffff, \ | ||
| 353 | EFX_DWORD_1, 0xffffffff, \ | ||
| 354 | EFX_DWORD_2, 0xffffffff, \ | ||
| 355 | EFX_DWORD_3, 0xffffffff) | ||
| 356 | |||
| 357 | /* Populate a quadword field with various numbers of arguments */ | ||
| 358 | #define EFX_POPULATE_QWORD_10 EFX_POPULATE_QWORD | ||
| 359 | #define EFX_POPULATE_QWORD_9(qword, ...) \ | ||
| 360 | EFX_POPULATE_QWORD_10(qword, EFX_DUMMY_FIELD, 0, __VA_ARGS__) | ||
| 361 | #define EFX_POPULATE_QWORD_8(qword, ...) \ | ||
| 362 | EFX_POPULATE_QWORD_9(qword, EFX_DUMMY_FIELD, 0, __VA_ARGS__) | ||
| 363 | #define EFX_POPULATE_QWORD_7(qword, ...) \ | ||
| 364 | EFX_POPULATE_QWORD_8(qword, EFX_DUMMY_FIELD, 0, __VA_ARGS__) | ||
| 365 | #define EFX_POPULATE_QWORD_6(qword, ...) \ | ||
| 366 | EFX_POPULATE_QWORD_7(qword, EFX_DUMMY_FIELD, 0, __VA_ARGS__) | ||
| 367 | #define EFX_POPULATE_QWORD_5(qword, ...) \ | ||
| 368 | EFX_POPULATE_QWORD_6(qword, EFX_DUMMY_FIELD, 0, __VA_ARGS__) | ||
| 369 | #define EFX_POPULATE_QWORD_4(qword, ...) \ | ||
| 370 | EFX_POPULATE_QWORD_5(qword, EFX_DUMMY_FIELD, 0, __VA_ARGS__) | ||
| 371 | #define EFX_POPULATE_QWORD_3(qword, ...) \ | ||
| 372 | EFX_POPULATE_QWORD_4(qword, EFX_DUMMY_FIELD, 0, __VA_ARGS__) | ||
| 373 | #define EFX_POPULATE_QWORD_2(qword, ...) \ | ||
| 374 | EFX_POPULATE_QWORD_3(qword, EFX_DUMMY_FIELD, 0, __VA_ARGS__) | ||
| 375 | #define EFX_POPULATE_QWORD_1(qword, ...) \ | ||
| 376 | EFX_POPULATE_QWORD_2(qword, EFX_DUMMY_FIELD, 0, __VA_ARGS__) | ||
| 377 | #define EFX_ZERO_QWORD(qword) \ | ||
| 378 | EFX_POPULATE_QWORD_1(qword, EFX_DUMMY_FIELD, 0) | ||
| 379 | #define EFX_SET_QWORD(qword) \ | ||
| 380 | EFX_POPULATE_QWORD_2(qword, \ | ||
| 381 | EFX_DWORD_0, 0xffffffff, \ | ||
| 382 | EFX_DWORD_1, 0xffffffff) | ||
| 383 | |||
| 384 | /* Populate a dword field with various numbers of arguments */ | ||
| 385 | #define EFX_POPULATE_DWORD_10 EFX_POPULATE_DWORD | ||
| 386 | #define EFX_POPULATE_DWORD_9(dword, ...) \ | ||
| 387 | EFX_POPULATE_DWORD_10(dword, EFX_DUMMY_FIELD, 0, __VA_ARGS__) | ||
| 388 | #define EFX_POPULATE_DWORD_8(dword, ...) \ | ||
| 389 | EFX_POPULATE_DWORD_9(dword, EFX_DUMMY_FIELD, 0, __VA_ARGS__) | ||
| 390 | #define EFX_POPULATE_DWORD_7(dword, ...) \ | ||
| 391 | EFX_POPULATE_DWORD_8(dword, EFX_DUMMY_FIELD, 0, __VA_ARGS__) | ||
| 392 | #define EFX_POPULATE_DWORD_6(dword, ...) \ | ||
| 393 | EFX_POPULATE_DWORD_7(dword, EFX_DUMMY_FIELD, 0, __VA_ARGS__) | ||
| 394 | #define EFX_POPULATE_DWORD_5(dword, ...) \ | ||
| 395 | EFX_POPULATE_DWORD_6(dword, EFX_DUMMY_FIELD, 0, __VA_ARGS__) | ||
| 396 | #define EFX_POPULATE_DWORD_4(dword, ...) \ | ||
| 397 | EFX_POPULATE_DWORD_5(dword, EFX_DUMMY_FIELD, 0, __VA_ARGS__) | ||
| 398 | #define EFX_POPULATE_DWORD_3(dword, ...) \ | ||
| 399 | EFX_POPULATE_DWORD_4(dword, EFX_DUMMY_FIELD, 0, __VA_ARGS__) | ||
| 400 | #define EFX_POPULATE_DWORD_2(dword, ...) \ | ||
| 401 | EFX_POPULATE_DWORD_3(dword, EFX_DUMMY_FIELD, 0, __VA_ARGS__) | ||
| 402 | #define EFX_POPULATE_DWORD_1(dword, ...) \ | ||
| 403 | EFX_POPULATE_DWORD_2(dword, EFX_DUMMY_FIELD, 0, __VA_ARGS__) | ||
| 404 | #define EFX_ZERO_DWORD(dword) \ | ||
| 405 | EFX_POPULATE_DWORD_1(dword, EFX_DUMMY_FIELD, 0) | ||
| 406 | #define EFX_SET_DWORD(dword) \ | ||
| 407 | EFX_POPULATE_DWORD_1(dword, EFX_DWORD_0, 0xffffffff) | ||
| 408 | |||
| 409 | /* | ||
| 410 | * Modify a named field within an already-populated structure. Used | ||
| 411 | * for read-modify-write operations. | ||
| 412 | * | ||
| 413 | */ | ||
| 414 | |||
| 415 | #define EFX_INVERT_OWORD(oword) do { \ | ||
| 416 | (oword).u64[0] = ~((oword).u64[0]); \ | ||
| 417 | (oword).u64[1] = ~((oword).u64[1]); \ | ||
| 418 | } while (0) | ||
| 419 | |||
| 420 | #define EFX_INSERT_FIELD64(...) \ | ||
| 421 | cpu_to_le64(EFX_INSERT_FIELD_NATIVE(__VA_ARGS__)) | ||
| 422 | |||
| 423 | #define EFX_INSERT_FIELD32(...) \ | ||
| 424 | cpu_to_le32(EFX_INSERT_FIELD_NATIVE(__VA_ARGS__)) | ||
| 425 | |||
| 426 | #define EFX_INPLACE_MASK64(min, max, field) \ | ||
| 427 | EFX_INSERT_FIELD64(min, max, field, EFX_MASK64(field)) | ||
| 428 | |||
| 429 | #define EFX_INPLACE_MASK32(min, max, field) \ | ||
| 430 | EFX_INSERT_FIELD32(min, max, field, EFX_MASK32(field)) | ||
| 431 | |||
| 432 | #define EFX_SET_OWORD_FIELD64(oword, field, value) do { \ | ||
| 433 | (oword).u64[0] = (((oword).u64[0] \ | ||
| 434 | & ~EFX_INPLACE_MASK64(0, 63, field)) \ | ||
| 435 | | EFX_INSERT_FIELD64(0, 63, field, value)); \ | ||
| 436 | (oword).u64[1] = (((oword).u64[1] \ | ||
| 437 | & ~EFX_INPLACE_MASK64(64, 127, field)) \ | ||
| 438 | | EFX_INSERT_FIELD64(64, 127, field, value)); \ | ||
| 439 | } while (0) | ||
| 440 | |||
| 441 | #define EFX_SET_QWORD_FIELD64(qword, field, value) do { \ | ||
| 442 | (qword).u64[0] = (((qword).u64[0] \ | ||
| 443 | & ~EFX_INPLACE_MASK64(0, 63, field)) \ | ||
| 444 | | EFX_INSERT_FIELD64(0, 63, field, value)); \ | ||
| 445 | } while (0) | ||
| 446 | |||
| 447 | #define EFX_SET_OWORD_FIELD32(oword, field, value) do { \ | ||
| 448 | (oword).u32[0] = (((oword).u32[0] \ | ||
| 449 | & ~EFX_INPLACE_MASK32(0, 31, field)) \ | ||
| 450 | | EFX_INSERT_FIELD32(0, 31, field, value)); \ | ||
| 451 | (oword).u32[1] = (((oword).u32[1] \ | ||
| 452 | & ~EFX_INPLACE_MASK32(32, 63, field)) \ | ||
| 453 | | EFX_INSERT_FIELD32(32, 63, field, value)); \ | ||
| 454 | (oword).u32[2] = (((oword).u32[2] \ | ||
| 455 | & ~EFX_INPLACE_MASK32(64, 95, field)) \ | ||
| 456 | | EFX_INSERT_FIELD32(64, 95, field, value)); \ | ||
| 457 | (oword).u32[3] = (((oword).u32[3] \ | ||
| 458 | & ~EFX_INPLACE_MASK32(96, 127, field)) \ | ||
| 459 | | EFX_INSERT_FIELD32(96, 127, field, value)); \ | ||
| 460 | } while (0) | ||
| 461 | |||
| 462 | #define EFX_SET_QWORD_FIELD32(qword, field, value) do { \ | ||
| 463 | (qword).u32[0] = (((qword).u32[0] \ | ||
| 464 | & ~EFX_INPLACE_MASK32(0, 31, field)) \ | ||
| 465 | | EFX_INSERT_FIELD32(0, 31, field, value)); \ | ||
| 466 | (qword).u32[1] = (((qword).u32[1] \ | ||
| 467 | & ~EFX_INPLACE_MASK32(32, 63, field)) \ | ||
| 468 | | EFX_INSERT_FIELD32(32, 63, field, value)); \ | ||
| 469 | } while (0) | ||
| 470 | |||
| 471 | #define EFX_SET_DWORD_FIELD(dword, field, value) do { \ | ||
| 472 | (dword).u32[0] = (((dword).u32[0] \ | ||
| 473 | & ~EFX_INPLACE_MASK32(0, 31, field)) \ | ||
| 474 | | EFX_INSERT_FIELD32(0, 31, field, value)); \ | ||
| 475 | } while (0) | ||
| 476 | |||
| 477 | #if BITS_PER_LONG == 64 | ||
| 478 | #define EFX_SET_OWORD_FIELD EFX_SET_OWORD_FIELD64 | ||
| 479 | #define EFX_SET_QWORD_FIELD EFX_SET_QWORD_FIELD64 | ||
| 480 | #else | ||
| 481 | #define EFX_SET_OWORD_FIELD EFX_SET_OWORD_FIELD32 | ||
| 482 | #define EFX_SET_QWORD_FIELD EFX_SET_QWORD_FIELD32 | ||
| 483 | #endif | ||
| 484 | |||
| 485 | #define EFX_SET_OWORD_FIELD_VER(efx, oword, field, value) do { \ | ||
| 486 | if (FALCON_REV(efx) >= FALCON_REV_B0) { \ | ||
| 487 | EFX_SET_OWORD_FIELD((oword), field##_B0, (value)); \ | ||
| 488 | } else { \ | ||
| 489 | EFX_SET_OWORD_FIELD((oword), field##_A1, (value)); \ | ||
| 490 | } \ | ||
| 491 | } while (0) | ||
| 492 | |||
| 493 | #define EFX_QWORD_FIELD_VER(efx, qword, field) \ | ||
| 494 | (FALCON_REV(efx) >= FALCON_REV_B0 ? \ | ||
| 495 | EFX_QWORD_FIELD((qword), field##_B0) : \ | ||
| 496 | EFX_QWORD_FIELD((qword), field##_A1)) | ||
| 497 | |||
| 498 | /* Used to avoid compiler warnings about shift range exceeding width | ||
| 499 | * of the data types when dma_addr_t is only 32 bits wide. | ||
| 500 | */ | ||
| 501 | #define DMA_ADDR_T_WIDTH (8 * sizeof(dma_addr_t)) | ||
| 502 | #define EFX_DMA_TYPE_WIDTH(width) \ | ||
| 503 | (((width) < DMA_ADDR_T_WIDTH) ? (width) : DMA_ADDR_T_WIDTH) | ||
| 504 | #define EFX_DMA_MAX_MASK ((DMA_ADDR_T_WIDTH == 64) ? \ | ||
| 505 | ~((u64) 0) : ~((u32) 0)) | ||
| 506 | #define EFX_DMA_MASK(mask) ((mask) & EFX_DMA_MAX_MASK) | ||
| 507 | |||
| 508 | #endif /* EFX_BITFIELD_H */ | ||
diff --git a/drivers/net/sfc/boards.c b/drivers/net/sfc/boards.c new file mode 100644 index 000000000000..eecaa6d58584 --- /dev/null +++ b/drivers/net/sfc/boards.c | |||
| @@ -0,0 +1,167 @@ | |||
| 1 | /**************************************************************************** | ||
| 2 | * Driver for Solarflare Solarstorm network controllers and boards | ||
| 3 | * Copyright 2007 Solarflare Communications Inc. | ||
| 4 | * | ||
| 5 | * This program is free software; you can redistribute it and/or modify it | ||
| 6 | * under the terms of the GNU General Public License version 2 as published | ||
| 7 | * by the Free Software Foundation, incorporated herein by reference. | ||
| 8 | */ | ||
| 9 | |||
| 10 | #include "net_driver.h" | ||
| 11 | #include "phy.h" | ||
| 12 | #include "boards.h" | ||
| 13 | #include "efx.h" | ||
| 14 | |||
| 15 | /* Macros for unpacking the board revision */ | ||
| 16 | /* The revision info is in host byte order. */ | ||
| 17 | #define BOARD_TYPE(_rev) (_rev >> 8) | ||
| 18 | #define BOARD_MAJOR(_rev) ((_rev >> 4) & 0xf) | ||
| 19 | #define BOARD_MINOR(_rev) (_rev & 0xf) | ||
| 20 | |||
| 21 | /* Blink support. If the PHY has no auto-blink mode so we hang it off a timer */ | ||
| 22 | #define BLINK_INTERVAL (HZ/2) | ||
| 23 | |||
| 24 | static void blink_led_timer(unsigned long context) | ||
| 25 | { | ||
| 26 | struct efx_nic *efx = (struct efx_nic *)context; | ||
| 27 | struct efx_blinker *bl = &efx->board_info.blinker; | ||
| 28 | efx->board_info.set_fault_led(efx, bl->state); | ||
| 29 | bl->state = !bl->state; | ||
| 30 | if (bl->resubmit) { | ||
| 31 | bl->timer.expires = jiffies + BLINK_INTERVAL; | ||
| 32 | add_timer(&bl->timer); | ||
| 33 | } | ||
| 34 | } | ||
| 35 | |||
| 36 | static void board_blink(struct efx_nic *efx, int blink) | ||
| 37 | { | ||
| 38 | struct efx_blinker *blinker = &efx->board_info.blinker; | ||
| 39 | |||
| 40 | /* The rtnl mutex serialises all ethtool ioctls, so | ||
| 41 | * nothing special needs doing here. */ | ||
| 42 | if (blink) { | ||
| 43 | blinker->resubmit = 1; | ||
| 44 | blinker->state = 0; | ||
| 45 | setup_timer(&blinker->timer, blink_led_timer, | ||
| 46 | (unsigned long)efx); | ||
| 47 | blinker->timer.expires = jiffies + BLINK_INTERVAL; | ||
| 48 | add_timer(&blinker->timer); | ||
| 49 | } else { | ||
| 50 | blinker->resubmit = 0; | ||
| 51 | if (blinker->timer.function) | ||
| 52 | del_timer_sync(&blinker->timer); | ||
| 53 | efx->board_info.set_fault_led(efx, 0); | ||
| 54 | } | ||
| 55 | } | ||
| 56 | |||
| 57 | /***************************************************************************** | ||
| 58 | * Support for the SFE4002 | ||
| 59 | * | ||
| 60 | */ | ||
| 61 | /****************************************************************************/ | ||
| 62 | /* LED allocations. Note that on rev A0 boards the schematic and the reality | ||
| 63 | * differ: red and green are swapped. Below is the fixed (A1) layout (there | ||
| 64 | * are only 3 A0 boards in existence, so no real reason to make this | ||
| 65 | * conditional). | ||
| 66 | */ | ||
| 67 | #define SFE4002_FAULT_LED (2) /* Red */ | ||
| 68 | #define SFE4002_RX_LED (0) /* Green */ | ||
| 69 | #define SFE4002_TX_LED (1) /* Amber */ | ||
| 70 | |||
| 71 | static int sfe4002_init_leds(struct efx_nic *efx) | ||
| 72 | { | ||
| 73 | /* Set the TX and RX LEDs to reflect status and activity, and the | ||
| 74 | * fault LED off */ | ||
| 75 | xfp_set_led(efx, SFE4002_TX_LED, | ||
| 76 | QUAKE_LED_TXLINK | QUAKE_LED_LINK_ACTSTAT); | ||
| 77 | xfp_set_led(efx, SFE4002_RX_LED, | ||
| 78 | QUAKE_LED_RXLINK | QUAKE_LED_LINK_ACTSTAT); | ||
| 79 | xfp_set_led(efx, SFE4002_FAULT_LED, QUAKE_LED_OFF); | ||
| 80 | efx->board_info.blinker.led_num = SFE4002_FAULT_LED; | ||
| 81 | return 0; | ||
| 82 | } | ||
| 83 | |||
| 84 | static void sfe4002_fault_led(struct efx_nic *efx, int state) | ||
| 85 | { | ||
| 86 | xfp_set_led(efx, SFE4002_FAULT_LED, state ? QUAKE_LED_ON : | ||
| 87 | QUAKE_LED_OFF); | ||
| 88 | } | ||
| 89 | |||
| 90 | static int sfe4002_init(struct efx_nic *efx) | ||
| 91 | { | ||
| 92 | efx->board_info.init_leds = sfe4002_init_leds; | ||
| 93 | efx->board_info.set_fault_led = sfe4002_fault_led; | ||
| 94 | efx->board_info.blink = board_blink; | ||
| 95 | return 0; | ||
| 96 | } | ||
| 97 | |||
| 98 | /* This will get expanded as board-specific details get moved out of the | ||
| 99 | * PHY drivers. */ | ||
| 100 | struct efx_board_data { | ||
| 101 | const char *ref_model; | ||
| 102 | const char *gen_type; | ||
| 103 | int (*init) (struct efx_nic *nic); | ||
| 104 | }; | ||
| 105 | |||
| 106 | static int dummy_init(struct efx_nic *nic) | ||
| 107 | { | ||
| 108 | return 0; | ||
| 109 | } | ||
| 110 | |||
| 111 | static struct efx_board_data board_data[] = { | ||
| 112 | [EFX_BOARD_INVALID] = | ||
| 113 | {NULL, NULL, dummy_init}, | ||
| 114 | [EFX_BOARD_SFE4001] = | ||
| 115 | {"SFE4001", "10GBASE-T adapter", sfe4001_poweron}, | ||
| 116 | [EFX_BOARD_SFE4002] = | ||
| 117 | {"SFE4002", "XFP adapter", sfe4002_init}, | ||
| 118 | }; | ||
| 119 | |||
| 120 | int efx_set_board_info(struct efx_nic *efx, u16 revision_info) | ||
| 121 | { | ||
| 122 | int rc = 0; | ||
| 123 | struct efx_board_data *data; | ||
| 124 | |||
| 125 | if (BOARD_TYPE(revision_info) >= EFX_BOARD_MAX) { | ||
| 126 | EFX_ERR(efx, "squashing unknown board type %d\n", | ||
| 127 | BOARD_TYPE(revision_info)); | ||
| 128 | revision_info = 0; | ||
| 129 | } | ||
| 130 | |||
| 131 | if (BOARD_TYPE(revision_info) == 0) { | ||
| 132 | efx->board_info.major = 0; | ||
| 133 | efx->board_info.minor = 0; | ||
| 134 | /* For early boards that don't have revision info. there is | ||
| 135 | * only 1 board for each PHY type, so we can work it out, with | ||
| 136 | * the exception of the PHY-less boards. */ | ||
| 137 | switch (efx->phy_type) { | ||
| 138 | case PHY_TYPE_10XPRESS: | ||
| 139 | efx->board_info.type = EFX_BOARD_SFE4001; | ||
| 140 | break; | ||
| 141 | case PHY_TYPE_XFP: | ||
| 142 | efx->board_info.type = EFX_BOARD_SFE4002; | ||
| 143 | break; | ||
| 144 | default: | ||
| 145 | efx->board_info.type = 0; | ||
| 146 | break; | ||
| 147 | } | ||
| 148 | } else { | ||
| 149 | efx->board_info.type = BOARD_TYPE(revision_info); | ||
| 150 | efx->board_info.major = BOARD_MAJOR(revision_info); | ||
| 151 | efx->board_info.minor = BOARD_MINOR(revision_info); | ||
| 152 | } | ||
| 153 | |||
| 154 | data = &board_data[efx->board_info.type]; | ||
| 155 | |||
| 156 | /* Report the board model number or generic type for recognisable | ||
| 157 | * boards. */ | ||
| 158 | if (efx->board_info.type != 0) | ||
| 159 | EFX_INFO(efx, "board is %s rev %c%d\n", | ||
| 160 | (efx->pci_dev->subsystem_vendor == EFX_VENDID_SFC) | ||
| 161 | ? data->ref_model : data->gen_type, | ||
| 162 | 'A' + efx->board_info.major, efx->board_info.minor); | ||
| 163 | |||
| 164 | efx->board_info.init = data->init; | ||
| 165 | |||
| 166 | return rc; | ||
| 167 | } | ||
diff --git a/drivers/net/sfc/boards.h b/drivers/net/sfc/boards.h new file mode 100644 index 000000000000..f56341d428e1 --- /dev/null +++ b/drivers/net/sfc/boards.h | |||
| @@ -0,0 +1,26 @@ | |||
| 1 | /**************************************************************************** | ||
| 2 | * Driver for Solarflare Solarstorm network controllers and boards | ||
| 3 | * Copyright 2007 Solarflare Communications Inc. | ||
| 4 | * | ||
| 5 | * This program is free software; you can redistribute it and/or modify it | ||
| 6 | * under the terms of the GNU General Public License version 2 as published | ||
| 7 | * by the Free Software Foundation, incorporated herein by reference. | ||
| 8 | */ | ||
| 9 | |||
| 10 | #ifndef EFX_BOARDS_H | ||
| 11 | #define EFX_BOARDS_H | ||
| 12 | |||
| 13 | /* Board IDs (must fit in 8 bits) */ | ||
| 14 | enum efx_board_type { | ||
| 15 | EFX_BOARD_INVALID = 0, | ||
| 16 | EFX_BOARD_SFE4001 = 1, /* SFE4001 (10GBASE-T) */ | ||
| 17 | EFX_BOARD_SFE4002 = 2, | ||
| 18 | /* Insert new types before here */ | ||
| 19 | EFX_BOARD_MAX | ||
| 20 | }; | ||
| 21 | |||
| 22 | extern int efx_set_board_info(struct efx_nic *efx, u16 revision_info); | ||
| 23 | extern int sfe4001_poweron(struct efx_nic *efx); | ||
| 24 | extern void sfe4001_poweroff(struct efx_nic *efx); | ||
| 25 | |||
| 26 | #endif | ||
diff --git a/drivers/net/sfc/efx.c b/drivers/net/sfc/efx.c new file mode 100644 index 000000000000..59edcf793c19 --- /dev/null +++ b/drivers/net/sfc/efx.c | |||
| @@ -0,0 +1,2208 @@ | |||
| 1 | /**************************************************************************** | ||
| 2 | * Driver for Solarflare Solarstorm network controllers and boards | ||
| 3 | * Copyright 2005-2006 Fen Systems Ltd. | ||
| 4 | * Copyright 2005-2008 Solarflare Communications Inc. | ||
| 5 | * | ||
| 6 | * This program is free software; you can redistribute it and/or modify it | ||
| 7 | * under the terms of the GNU General Public License version 2 as published | ||
| 8 | * by the Free Software Foundation, incorporated herein by reference. | ||
| 9 | */ | ||
| 10 | |||
| 11 | #include <linux/module.h> | ||
| 12 | #include <linux/pci.h> | ||
| 13 | #include <linux/netdevice.h> | ||
| 14 | #include <linux/etherdevice.h> | ||
| 15 | #include <linux/delay.h> | ||
| 16 | #include <linux/notifier.h> | ||
| 17 | #include <linux/ip.h> | ||
| 18 | #include <linux/tcp.h> | ||
| 19 | #include <linux/in.h> | ||
| 20 | #include <linux/crc32.h> | ||
| 21 | #include <linux/ethtool.h> | ||
| 22 | #include "net_driver.h" | ||
| 23 | #include "gmii.h" | ||
| 24 | #include "ethtool.h" | ||
| 25 | #include "tx.h" | ||
| 26 | #include "rx.h" | ||
| 27 | #include "efx.h" | ||
| 28 | #include "mdio_10g.h" | ||
| 29 | #include "falcon.h" | ||
| 30 | #include "workarounds.h" | ||
| 31 | #include "mac.h" | ||
| 32 | |||
| 33 | #define EFX_MAX_MTU (9 * 1024) | ||
| 34 | |||
| 35 | /* RX slow fill workqueue. If memory allocation fails in the fast path, | ||
| 36 | * a work item is pushed onto this work queue to retry the allocation later, | ||
| 37 | * to avoid the NIC being starved of RX buffers. Since this is a per cpu | ||
| 38 | * workqueue, there is nothing to be gained in making it per NIC | ||
| 39 | */ | ||
| 40 | static struct workqueue_struct *refill_workqueue; | ||
| 41 | |||
| 42 | /************************************************************************** | ||
| 43 | * | ||
| 44 | * Configurable values | ||
| 45 | * | ||
| 46 | *************************************************************************/ | ||
| 47 | |||
| 48 | /* | ||
| 49 | * Enable large receive offload (LRO) aka soft segment reassembly (SSR) | ||
| 50 | * | ||
| 51 | * This sets the default for new devices. It can be controlled later | ||
| 52 | * using ethtool. | ||
| 53 | */ | ||
| 54 | static int lro = 1; | ||
| 55 | module_param(lro, int, 0644); | ||
| 56 | MODULE_PARM_DESC(lro, "Large receive offload acceleration"); | ||
| 57 | |||
| 58 | /* | ||
| 59 | * Use separate channels for TX and RX events | ||
| 60 | * | ||
| 61 | * Set this to 1 to use separate channels for TX and RX. It allows us to | ||
| 62 | * apply a higher level of interrupt moderation to TX events. | ||
| 63 | * | ||
| 64 | * This is forced to 0 for MSI interrupt mode as the interrupt vector | ||
| 65 | * is not written | ||
| 66 | */ | ||
| 67 | static unsigned int separate_tx_and_rx_channels = 1; | ||
| 68 | |||
| 69 | /* This is the weight assigned to each of the (per-channel) virtual | ||
| 70 | * NAPI devices. | ||
| 71 | */ | ||
| 72 | static int napi_weight = 64; | ||
| 73 | |||
| 74 | /* This is the time (in jiffies) between invocations of the hardware | ||
| 75 | * monitor, which checks for known hardware bugs and resets the | ||
| 76 | * hardware and driver as necessary. | ||
| 77 | */ | ||
| 78 | unsigned int efx_monitor_interval = 1 * HZ; | ||
| 79 | |||
| 80 | /* This controls whether or not the hardware monitor will trigger a | ||
| 81 | * reset when it detects an error condition. | ||
| 82 | */ | ||
| 83 | static unsigned int monitor_reset = 1; | ||
| 84 | |||
| 85 | /* This controls whether or not the driver will initialise devices | ||
| 86 | * with invalid MAC addresses stored in the EEPROM or flash. If true, | ||
| 87 | * such devices will be initialised with a random locally-generated | ||
| 88 | * MAC address. This allows for loading the sfc_mtd driver to | ||
| 89 | * reprogram the flash, even if the flash contents (including the MAC | ||
| 90 | * address) have previously been erased. | ||
| 91 | */ | ||
| 92 | static unsigned int allow_bad_hwaddr; | ||
| 93 | |||
| 94 | /* Initial interrupt moderation settings. They can be modified after | ||
| 95 | * module load with ethtool. | ||
| 96 | * | ||
| 97 | * The default for RX should strike a balance between increasing the | ||
| 98 | * round-trip latency and reducing overhead. | ||
| 99 | */ | ||
| 100 | static unsigned int rx_irq_mod_usec = 60; | ||
| 101 | |||
| 102 | /* Initial interrupt moderation settings. They can be modified after | ||
| 103 | * module load with ethtool. | ||
| 104 | * | ||
| 105 | * This default is chosen to ensure that a 10G link does not go idle | ||
| 106 | * while a TX queue is stopped after it has become full. A queue is | ||
| 107 | * restarted when it drops below half full. The time this takes (assuming | ||
| 108 | * worst case 3 descriptors per packet and 1024 descriptors) is | ||
| 109 | * 512 / 3 * 1.2 = 205 usec. | ||
| 110 | */ | ||
| 111 | static unsigned int tx_irq_mod_usec = 150; | ||
| 112 | |||
| 113 | /* This is the first interrupt mode to try out of: | ||
| 114 | * 0 => MSI-X | ||
| 115 | * 1 => MSI | ||
| 116 | * 2 => legacy | ||
| 117 | */ | ||
| 118 | static unsigned int interrupt_mode; | ||
| 119 | |||
| 120 | /* This is the requested number of CPUs to use for Receive-Side Scaling (RSS), | ||
| 121 | * i.e. the number of CPUs among which we may distribute simultaneous | ||
| 122 | * interrupt handling. | ||
| 123 | * | ||
| 124 | * Cards without MSI-X will only target one CPU via legacy or MSI interrupt. | ||
| 125 | * The default (0) means to assign an interrupt to each package (level II cache) | ||
| 126 | */ | ||
| 127 | static unsigned int rss_cpus; | ||
| 128 | module_param(rss_cpus, uint, 0444); | ||
| 129 | MODULE_PARM_DESC(rss_cpus, "Number of CPUs to use for Receive-Side Scaling"); | ||
| 130 | |||
| 131 | /************************************************************************** | ||
| 132 | * | ||
| 133 | * Utility functions and prototypes | ||
| 134 | * | ||
| 135 | *************************************************************************/ | ||
| 136 | static void efx_remove_channel(struct efx_channel *channel); | ||
| 137 | static void efx_remove_port(struct efx_nic *efx); | ||
| 138 | static void efx_fini_napi(struct efx_nic *efx); | ||
| 139 | static void efx_fini_channels(struct efx_nic *efx); | ||
| 140 | |||
| 141 | #define EFX_ASSERT_RESET_SERIALISED(efx) \ | ||
| 142 | do { \ | ||
| 143 | if ((efx->state == STATE_RUNNING) || \ | ||
| 144 | (efx->state == STATE_RESETTING)) \ | ||
| 145 | ASSERT_RTNL(); \ | ||
| 146 | } while (0) | ||
| 147 | |||
| 148 | /************************************************************************** | ||
| 149 | * | ||
| 150 | * Event queue processing | ||
| 151 | * | ||
| 152 | *************************************************************************/ | ||
| 153 | |||
| 154 | /* Process channel's event queue | ||
| 155 | * | ||
| 156 | * This function is responsible for processing the event queue of a | ||
| 157 | * single channel. The caller must guarantee that this function will | ||
| 158 | * never be concurrently called more than once on the same channel, | ||
| 159 | * though different channels may be being processed concurrently. | ||
| 160 | */ | ||
| 161 | static inline int efx_process_channel(struct efx_channel *channel, int rx_quota) | ||
| 162 | { | ||
| 163 | int rxdmaqs; | ||
| 164 | struct efx_rx_queue *rx_queue; | ||
| 165 | |||
| 166 | if (unlikely(channel->efx->reset_pending != RESET_TYPE_NONE || | ||
| 167 | !channel->enabled)) | ||
| 168 | return rx_quota; | ||
| 169 | |||
| 170 | rxdmaqs = falcon_process_eventq(channel, &rx_quota); | ||
| 171 | |||
| 172 | /* Deliver last RX packet. */ | ||
| 173 | if (channel->rx_pkt) { | ||
| 174 | __efx_rx_packet(channel, channel->rx_pkt, | ||
| 175 | channel->rx_pkt_csummed); | ||
| 176 | channel->rx_pkt = NULL; | ||
| 177 | } | ||
| 178 | |||
| 179 | efx_flush_lro(channel); | ||
| 180 | efx_rx_strategy(channel); | ||
| 181 | |||
| 182 | /* Refill descriptor rings as necessary */ | ||
| 183 | rx_queue = &channel->efx->rx_queue[0]; | ||
| 184 | while (rxdmaqs) { | ||
| 185 | if (rxdmaqs & 0x01) | ||
| 186 | efx_fast_push_rx_descriptors(rx_queue); | ||
| 187 | rx_queue++; | ||
| 188 | rxdmaqs >>= 1; | ||
| 189 | } | ||
| 190 | |||
| 191 | return rx_quota; | ||
| 192 | } | ||
| 193 | |||
| 194 | /* Mark channel as finished processing | ||
| 195 | * | ||
| 196 | * Note that since we will not receive further interrupts for this | ||
| 197 | * channel before we finish processing and call the eventq_read_ack() | ||
| 198 | * method, there is no need to use the interrupt hold-off timers. | ||
| 199 | */ | ||
| 200 | static inline void efx_channel_processed(struct efx_channel *channel) | ||
| 201 | { | ||
| 202 | /* Write to EVQ_RPTR_REG. If a new event arrived in a race | ||
| 203 | * with finishing processing, a new interrupt will be raised. | ||
| 204 | */ | ||
| 205 | channel->work_pending = 0; | ||
| 206 | smp_wmb(); /* Ensure channel updated before any new interrupt. */ | ||
| 207 | falcon_eventq_read_ack(channel); | ||
| 208 | } | ||
| 209 | |||
| 210 | /* NAPI poll handler | ||
| 211 | * | ||
| 212 | * NAPI guarantees serialisation of polls of the same device, which | ||
| 213 | * provides the guarantee required by efx_process_channel(). | ||
| 214 | */ | ||
| 215 | static int efx_poll(struct napi_struct *napi, int budget) | ||
| 216 | { | ||
| 217 | struct efx_channel *channel = | ||
| 218 | container_of(napi, struct efx_channel, napi_str); | ||
| 219 | struct net_device *napi_dev = channel->napi_dev; | ||
| 220 | int unused; | ||
| 221 | int rx_packets; | ||
| 222 | |||
| 223 | EFX_TRACE(channel->efx, "channel %d NAPI poll executing on CPU %d\n", | ||
| 224 | channel->channel, raw_smp_processor_id()); | ||
| 225 | |||
| 226 | unused = efx_process_channel(channel, budget); | ||
| 227 | rx_packets = (budget - unused); | ||
| 228 | |||
| 229 | if (rx_packets < budget) { | ||
| 230 | /* There is no race here; although napi_disable() will | ||
| 231 | * only wait for netif_rx_complete(), this isn't a problem | ||
| 232 | * since efx_channel_processed() will have no effect if | ||
| 233 | * interrupts have already been disabled. | ||
| 234 | */ | ||
| 235 | netif_rx_complete(napi_dev, napi); | ||
| 236 | efx_channel_processed(channel); | ||
| 237 | } | ||
| 238 | |||
| 239 | return rx_packets; | ||
| 240 | } | ||
| 241 | |||
| 242 | /* Process the eventq of the specified channel immediately on this CPU | ||
| 243 | * | ||
| 244 | * Disable hardware generated interrupts, wait for any existing | ||
| 245 | * processing to finish, then directly poll (and ack ) the eventq. | ||
| 246 | * Finally reenable NAPI and interrupts. | ||
| 247 | * | ||
| 248 | * Since we are touching interrupts the caller should hold the suspend lock | ||
| 249 | */ | ||
| 250 | void efx_process_channel_now(struct efx_channel *channel) | ||
| 251 | { | ||
| 252 | struct efx_nic *efx = channel->efx; | ||
| 253 | |||
| 254 | BUG_ON(!channel->used_flags); | ||
| 255 | BUG_ON(!channel->enabled); | ||
| 256 | |||
| 257 | /* Disable interrupts and wait for ISRs to complete */ | ||
| 258 | falcon_disable_interrupts(efx); | ||
| 259 | if (efx->legacy_irq) | ||
| 260 | synchronize_irq(efx->legacy_irq); | ||
| 261 | if (channel->has_interrupt && channel->irq) | ||
| 262 | synchronize_irq(channel->irq); | ||
| 263 | |||
| 264 | /* Wait for any NAPI processing to complete */ | ||
| 265 | napi_disable(&channel->napi_str); | ||
| 266 | |||
| 267 | /* Poll the channel */ | ||
| 268 | (void) efx_process_channel(channel, efx->type->evq_size); | ||
| 269 | |||
| 270 | /* Ack the eventq. This may cause an interrupt to be generated | ||
| 271 | * when they are reenabled */ | ||
| 272 | efx_channel_processed(channel); | ||
| 273 | |||
| 274 | napi_enable(&channel->napi_str); | ||
| 275 | falcon_enable_interrupts(efx); | ||
| 276 | } | ||
| 277 | |||
| 278 | /* Create event queue | ||
| 279 | * Event queue memory allocations are done only once. If the channel | ||
| 280 | * is reset, the memory buffer will be reused; this guards against | ||
| 281 | * errors during channel reset and also simplifies interrupt handling. | ||
| 282 | */ | ||
| 283 | static int efx_probe_eventq(struct efx_channel *channel) | ||
| 284 | { | ||
| 285 | EFX_LOG(channel->efx, "chan %d create event queue\n", channel->channel); | ||
| 286 | |||
| 287 | return falcon_probe_eventq(channel); | ||
| 288 | } | ||
| 289 | |||
| 290 | /* Prepare channel's event queue */ | ||
| 291 | static int efx_init_eventq(struct efx_channel *channel) | ||
| 292 | { | ||
| 293 | EFX_LOG(channel->efx, "chan %d init event queue\n", channel->channel); | ||
| 294 | |||
| 295 | channel->eventq_read_ptr = 0; | ||
| 296 | |||
| 297 | return falcon_init_eventq(channel); | ||
| 298 | } | ||
| 299 | |||
| 300 | static void efx_fini_eventq(struct efx_channel *channel) | ||
| 301 | { | ||
| 302 | EFX_LOG(channel->efx, "chan %d fini event queue\n", channel->channel); | ||
| 303 | |||
| 304 | falcon_fini_eventq(channel); | ||
| 305 | } | ||
| 306 | |||
| 307 | static void efx_remove_eventq(struct efx_channel *channel) | ||
| 308 | { | ||
| 309 | EFX_LOG(channel->efx, "chan %d remove event queue\n", channel->channel); | ||
| 310 | |||
| 311 | falcon_remove_eventq(channel); | ||
| 312 | } | ||
| 313 | |||
| 314 | /************************************************************************** | ||
| 315 | * | ||
| 316 | * Channel handling | ||
| 317 | * | ||
| 318 | *************************************************************************/ | ||
| 319 | |||
| 320 | /* Setup per-NIC RX buffer parameters. | ||
| 321 | * Calculate the rx buffer allocation parameters required to support | ||
| 322 | * the current MTU, including padding for header alignment and overruns. | ||
| 323 | */ | ||
| 324 | static void efx_calc_rx_buffer_params(struct efx_nic *efx) | ||
| 325 | { | ||
| 326 | unsigned int order, len; | ||
| 327 | |||
| 328 | len = (max(EFX_PAGE_IP_ALIGN, NET_IP_ALIGN) + | ||
| 329 | EFX_MAX_FRAME_LEN(efx->net_dev->mtu) + | ||
| 330 | efx->type->rx_buffer_padding); | ||
| 331 | |||
| 332 | /* Calculate page-order */ | ||
| 333 | for (order = 0; ((1u << order) * PAGE_SIZE) < len; ++order) | ||
| 334 | ; | ||
| 335 | |||
| 336 | efx->rx_buffer_len = len; | ||
| 337 | efx->rx_buffer_order = order; | ||
| 338 | } | ||
| 339 | |||
| 340 | static int efx_probe_channel(struct efx_channel *channel) | ||
| 341 | { | ||
| 342 | struct efx_tx_queue *tx_queue; | ||
| 343 | struct efx_rx_queue *rx_queue; | ||
| 344 | int rc; | ||
| 345 | |||
| 346 | EFX_LOG(channel->efx, "creating channel %d\n", channel->channel); | ||
| 347 | |||
| 348 | rc = efx_probe_eventq(channel); | ||
| 349 | if (rc) | ||
| 350 | goto fail1; | ||
| 351 | |||
| 352 | efx_for_each_channel_tx_queue(tx_queue, channel) { | ||
| 353 | rc = efx_probe_tx_queue(tx_queue); | ||
| 354 | if (rc) | ||
| 355 | goto fail2; | ||
| 356 | } | ||
| 357 | |||
| 358 | efx_for_each_channel_rx_queue(rx_queue, channel) { | ||
| 359 | rc = efx_probe_rx_queue(rx_queue); | ||
| 360 | if (rc) | ||
| 361 | goto fail3; | ||
| 362 | } | ||
| 363 | |||
| 364 | channel->n_rx_frm_trunc = 0; | ||
| 365 | |||
| 366 | return 0; | ||
| 367 | |||
| 368 | fail3: | ||
| 369 | efx_for_each_channel_rx_queue(rx_queue, channel) | ||
| 370 | efx_remove_rx_queue(rx_queue); | ||
| 371 | fail2: | ||
| 372 | efx_for_each_channel_tx_queue(tx_queue, channel) | ||
| 373 | efx_remove_tx_queue(tx_queue); | ||
| 374 | fail1: | ||
| 375 | return rc; | ||
| 376 | } | ||
| 377 | |||
| 378 | |||
| 379 | /* Channels are shutdown and reinitialised whilst the NIC is running | ||
| 380 | * to propagate configuration changes (mtu, checksum offload), or | ||
| 381 | * to clear hardware error conditions | ||
| 382 | */ | ||
| 383 | static int efx_init_channels(struct efx_nic *efx) | ||
| 384 | { | ||
| 385 | struct efx_tx_queue *tx_queue; | ||
| 386 | struct efx_rx_queue *rx_queue; | ||
| 387 | struct efx_channel *channel; | ||
| 388 | int rc = 0; | ||
| 389 | |||
| 390 | efx_calc_rx_buffer_params(efx); | ||
| 391 | |||
| 392 | /* Initialise the channels */ | ||
| 393 | efx_for_each_channel(channel, efx) { | ||
| 394 | EFX_LOG(channel->efx, "init chan %d\n", channel->channel); | ||
| 395 | |||
| 396 | rc = efx_init_eventq(channel); | ||
| 397 | if (rc) | ||
| 398 | goto err; | ||
| 399 | |||
| 400 | efx_for_each_channel_tx_queue(tx_queue, channel) { | ||
| 401 | rc = efx_init_tx_queue(tx_queue); | ||
| 402 | if (rc) | ||
| 403 | goto err; | ||
| 404 | } | ||
| 405 | |||
| 406 | /* The rx buffer allocation strategy is MTU dependent */ | ||
| 407 | efx_rx_strategy(channel); | ||
| 408 | |||
| 409 | efx_for_each_channel_rx_queue(rx_queue, channel) { | ||
| 410 | rc = efx_init_rx_queue(rx_queue); | ||
| 411 | if (rc) | ||
| 412 | goto err; | ||
| 413 | } | ||
| 414 | |||
| 415 | WARN_ON(channel->rx_pkt != NULL); | ||
| 416 | efx_rx_strategy(channel); | ||
| 417 | } | ||
| 418 | |||
| 419 | return 0; | ||
| 420 | |||
| 421 | err: | ||
| 422 | EFX_ERR(efx, "failed to initialise channel %d\n", | ||
| 423 | channel ? channel->channel : -1); | ||
| 424 | efx_fini_channels(efx); | ||
| 425 | return rc; | ||
| 426 | } | ||
| 427 | |||
| 428 | /* This enables event queue processing and packet transmission. | ||
| 429 | * | ||
| 430 | * Note that this function is not allowed to fail, since that would | ||
| 431 | * introduce too much complexity into the suspend/resume path. | ||
| 432 | */ | ||
| 433 | static void efx_start_channel(struct efx_channel *channel) | ||
| 434 | { | ||
| 435 | struct efx_rx_queue *rx_queue; | ||
| 436 | |||
| 437 | EFX_LOG(channel->efx, "starting chan %d\n", channel->channel); | ||
| 438 | |||
| 439 | if (!(channel->efx->net_dev->flags & IFF_UP)) | ||
| 440 | netif_napi_add(channel->napi_dev, &channel->napi_str, | ||
| 441 | efx_poll, napi_weight); | ||
| 442 | |||
| 443 | channel->work_pending = 0; | ||
| 444 | channel->enabled = 1; | ||
| 445 | smp_wmb(); /* ensure channel updated before first interrupt */ | ||
| 446 | |||
| 447 | napi_enable(&channel->napi_str); | ||
| 448 | |||
| 449 | /* Load up RX descriptors */ | ||
| 450 | efx_for_each_channel_rx_queue(rx_queue, channel) | ||
| 451 | efx_fast_push_rx_descriptors(rx_queue); | ||
| 452 | } | ||
| 453 | |||
| 454 | /* This disables event queue processing and packet transmission. | ||
| 455 | * This function does not guarantee that all queue processing | ||
| 456 | * (e.g. RX refill) is complete. | ||
| 457 | */ | ||
| 458 | static void efx_stop_channel(struct efx_channel *channel) | ||
| 459 | { | ||
| 460 | struct efx_rx_queue *rx_queue; | ||
| 461 | |||
| 462 | if (!channel->enabled) | ||
| 463 | return; | ||
| 464 | |||
| 465 | EFX_LOG(channel->efx, "stop chan %d\n", channel->channel); | ||
| 466 | |||
| 467 | channel->enabled = 0; | ||
| 468 | napi_disable(&channel->napi_str); | ||
| 469 | |||
| 470 | /* Ensure that any worker threads have exited or will be no-ops */ | ||
| 471 | efx_for_each_channel_rx_queue(rx_queue, channel) { | ||
| 472 | spin_lock_bh(&rx_queue->add_lock); | ||
| 473 | spin_unlock_bh(&rx_queue->add_lock); | ||
| 474 | } | ||
| 475 | } | ||
| 476 | |||
| 477 | static void efx_fini_channels(struct efx_nic *efx) | ||
| 478 | { | ||
| 479 | struct efx_channel *channel; | ||
| 480 | struct efx_tx_queue *tx_queue; | ||
| 481 | struct efx_rx_queue *rx_queue; | ||
| 482 | |||
| 483 | EFX_ASSERT_RESET_SERIALISED(efx); | ||
| 484 | BUG_ON(efx->port_enabled); | ||
| 485 | |||
| 486 | efx_for_each_channel(channel, efx) { | ||
| 487 | EFX_LOG(channel->efx, "shut down chan %d\n", channel->channel); | ||
| 488 | |||
| 489 | efx_for_each_channel_rx_queue(rx_queue, channel) | ||
| 490 | efx_fini_rx_queue(rx_queue); | ||
| 491 | efx_for_each_channel_tx_queue(tx_queue, channel) | ||
| 492 | efx_fini_tx_queue(tx_queue); | ||
| 493 | } | ||
| 494 | |||
| 495 | /* Do the event queues last so that we can handle flush events | ||
| 496 | * for all DMA queues. */ | ||
| 497 | efx_for_each_channel(channel, efx) { | ||
| 498 | EFX_LOG(channel->efx, "shut down evq %d\n", channel->channel); | ||
| 499 | |||
| 500 | efx_fini_eventq(channel); | ||
| 501 | } | ||
| 502 | } | ||
| 503 | |||
| 504 | static void efx_remove_channel(struct efx_channel *channel) | ||
| 505 | { | ||
| 506 | struct efx_tx_queue *tx_queue; | ||
| 507 | struct efx_rx_queue *rx_queue; | ||
| 508 | |||
| 509 | EFX_LOG(channel->efx, "destroy chan %d\n", channel->channel); | ||
| 510 | |||
| 511 | efx_for_each_channel_rx_queue(rx_queue, channel) | ||
| 512 | efx_remove_rx_queue(rx_queue); | ||
| 513 | efx_for_each_channel_tx_queue(tx_queue, channel) | ||
| 514 | efx_remove_tx_queue(tx_queue); | ||
| 515 | efx_remove_eventq(channel); | ||
| 516 | |||
| 517 | channel->used_flags = 0; | ||
| 518 | } | ||
| 519 | |||
| 520 | void efx_schedule_slow_fill(struct efx_rx_queue *rx_queue, int delay) | ||
| 521 | { | ||
| 522 | queue_delayed_work(refill_workqueue, &rx_queue->work, delay); | ||
| 523 | } | ||
| 524 | |||
| 525 | /************************************************************************** | ||
| 526 | * | ||
| 527 | * Port handling | ||
| 528 | * | ||
| 529 | **************************************************************************/ | ||
| 530 | |||
| 531 | /* This ensures that the kernel is kept informed (via | ||
| 532 | * netif_carrier_on/off) of the link status, and also maintains the | ||
| 533 | * link status's stop on the port's TX queue. | ||
| 534 | */ | ||
| 535 | static void efx_link_status_changed(struct efx_nic *efx) | ||
| 536 | { | ||
| 537 | int carrier_ok; | ||
| 538 | |||
| 539 | /* SFC Bug 5356: A net_dev notifier is registered, so we must ensure | ||
| 540 | * that no events are triggered between unregister_netdev() and the | ||
| 541 | * driver unloading. A more general condition is that NETDEV_CHANGE | ||
| 542 | * can only be generated between NETDEV_UP and NETDEV_DOWN */ | ||
| 543 | if (!netif_running(efx->net_dev)) | ||
| 544 | return; | ||
| 545 | |||
| 546 | carrier_ok = netif_carrier_ok(efx->net_dev) ? 1 : 0; | ||
| 547 | if (efx->link_up != carrier_ok) { | ||
| 548 | efx->n_link_state_changes++; | ||
| 549 | |||
| 550 | if (efx->link_up) | ||
| 551 | netif_carrier_on(efx->net_dev); | ||
| 552 | else | ||
| 553 | netif_carrier_off(efx->net_dev); | ||
| 554 | } | ||
| 555 | |||
| 556 | /* Status message for kernel log */ | ||
| 557 | if (efx->link_up) { | ||
| 558 | struct mii_if_info *gmii = &efx->mii; | ||
| 559 | unsigned adv, lpa; | ||
| 560 | /* NONE here means direct XAUI from the controller, with no | ||
| 561 | * MDIO-attached device we can query. */ | ||
| 562 | if (efx->phy_type != PHY_TYPE_NONE) { | ||
| 563 | adv = gmii_advertised(gmii); | ||
| 564 | lpa = gmii_lpa(gmii); | ||
| 565 | } else { | ||
| 566 | lpa = GM_LPA_10000 | LPA_DUPLEX; | ||
| 567 | adv = lpa; | ||
| 568 | } | ||
| 569 | EFX_INFO(efx, "link up at %dMbps %s-duplex " | ||
| 570 | "(adv %04x lpa %04x) (MTU %d)%s\n", | ||
| 571 | (efx->link_options & GM_LPA_10000 ? 10000 : | ||
| 572 | (efx->link_options & GM_LPA_1000 ? 1000 : | ||
| 573 | (efx->link_options & GM_LPA_100 ? 100 : | ||
| 574 | 10))), | ||
| 575 | (efx->link_options & GM_LPA_DUPLEX ? | ||
| 576 | "full" : "half"), | ||
| 577 | adv, lpa, | ||
| 578 | efx->net_dev->mtu, | ||
| 579 | (efx->promiscuous ? " [PROMISC]" : "")); | ||
| 580 | } else { | ||
| 581 | EFX_INFO(efx, "link down\n"); | ||
| 582 | } | ||
| 583 | |||
| 584 | } | ||
| 585 | |||
| 586 | /* This call reinitialises the MAC to pick up new PHY settings. The | ||
| 587 | * caller must hold the mac_lock */ | ||
| 588 | static void __efx_reconfigure_port(struct efx_nic *efx) | ||
| 589 | { | ||
| 590 | WARN_ON(!mutex_is_locked(&efx->mac_lock)); | ||
| 591 | |||
| 592 | EFX_LOG(efx, "reconfiguring MAC from PHY settings on CPU %d\n", | ||
| 593 | raw_smp_processor_id()); | ||
| 594 | |||
| 595 | falcon_reconfigure_xmac(efx); | ||
| 596 | |||
| 597 | /* Inform kernel of loss/gain of carrier */ | ||
| 598 | efx_link_status_changed(efx); | ||
| 599 | } | ||
| 600 | |||
| 601 | /* Reinitialise the MAC to pick up new PHY settings, even if the port is | ||
| 602 | * disabled. */ | ||
| 603 | void efx_reconfigure_port(struct efx_nic *efx) | ||
| 604 | { | ||
| 605 | EFX_ASSERT_RESET_SERIALISED(efx); | ||
| 606 | |||
| 607 | mutex_lock(&efx->mac_lock); | ||
| 608 | __efx_reconfigure_port(efx); | ||
| 609 | mutex_unlock(&efx->mac_lock); | ||
| 610 | } | ||
| 611 | |||
| 612 | /* Asynchronous efx_reconfigure_port work item. To speed up efx_flush_all() | ||
| 613 | * we don't efx_reconfigure_port() if the port is disabled. Care is taken | ||
| 614 | * in efx_stop_all() and efx_start_port() to prevent PHY events being lost */ | ||
| 615 | static void efx_reconfigure_work(struct work_struct *data) | ||
| 616 | { | ||
| 617 | struct efx_nic *efx = container_of(data, struct efx_nic, | ||
| 618 | reconfigure_work); | ||
| 619 | |||
| 620 | mutex_lock(&efx->mac_lock); | ||
| 621 | if (efx->port_enabled) | ||
| 622 | __efx_reconfigure_port(efx); | ||
| 623 | mutex_unlock(&efx->mac_lock); | ||
| 624 | } | ||
| 625 | |||
| 626 | static int efx_probe_port(struct efx_nic *efx) | ||
| 627 | { | ||
| 628 | int rc; | ||
| 629 | |||
| 630 | EFX_LOG(efx, "create port\n"); | ||
| 631 | |||
| 632 | /* Connect up MAC/PHY operations table and read MAC address */ | ||
| 633 | rc = falcon_probe_port(efx); | ||
| 634 | if (rc) | ||
| 635 | goto err; | ||
| 636 | |||
| 637 | /* Sanity check MAC address */ | ||
| 638 | if (is_valid_ether_addr(efx->mac_address)) { | ||
| 639 | memcpy(efx->net_dev->dev_addr, efx->mac_address, ETH_ALEN); | ||
| 640 | } else { | ||
| 641 | DECLARE_MAC_BUF(mac); | ||
| 642 | |||
| 643 | EFX_ERR(efx, "invalid MAC address %s\n", | ||
| 644 | print_mac(mac, efx->mac_address)); | ||
| 645 | if (!allow_bad_hwaddr) { | ||
| 646 | rc = -EINVAL; | ||
| 647 | goto err; | ||
| 648 | } | ||
| 649 | random_ether_addr(efx->net_dev->dev_addr); | ||
| 650 | EFX_INFO(efx, "using locally-generated MAC %s\n", | ||
| 651 | print_mac(mac, efx->net_dev->dev_addr)); | ||
| 652 | } | ||
| 653 | |||
| 654 | return 0; | ||
| 655 | |||
| 656 | err: | ||
| 657 | efx_remove_port(efx); | ||
| 658 | return rc; | ||
| 659 | } | ||
| 660 | |||
| 661 | static int efx_init_port(struct efx_nic *efx) | ||
| 662 | { | ||
| 663 | int rc; | ||
| 664 | |||
| 665 | EFX_LOG(efx, "init port\n"); | ||
| 666 | |||
| 667 | /* Initialise the MAC and PHY */ | ||
| 668 | rc = falcon_init_xmac(efx); | ||
| 669 | if (rc) | ||
| 670 | return rc; | ||
| 671 | |||
| 672 | efx->port_initialized = 1; | ||
| 673 | |||
| 674 | /* Reconfigure port to program MAC registers */ | ||
| 675 | falcon_reconfigure_xmac(efx); | ||
| 676 | |||
| 677 | return 0; | ||
| 678 | } | ||
| 679 | |||
| 680 | /* Allow efx_reconfigure_port() to be scheduled, and close the window | ||
| 681 | * between efx_stop_port and efx_flush_all whereby a previously scheduled | ||
| 682 | * efx_reconfigure_port() may have been cancelled */ | ||
| 683 | static void efx_start_port(struct efx_nic *efx) | ||
| 684 | { | ||
| 685 | EFX_LOG(efx, "start port\n"); | ||
| 686 | BUG_ON(efx->port_enabled); | ||
| 687 | |||
| 688 | mutex_lock(&efx->mac_lock); | ||
| 689 | efx->port_enabled = 1; | ||
| 690 | __efx_reconfigure_port(efx); | ||
| 691 | mutex_unlock(&efx->mac_lock); | ||
| 692 | } | ||
| 693 | |||
| 694 | /* Prevent efx_reconfigure_work and efx_monitor() from executing, and | ||
| 695 | * efx_set_multicast_list() from scheduling efx_reconfigure_work. | ||
| 696 | * efx_reconfigure_work can still be scheduled via NAPI processing | ||
| 697 | * until efx_flush_all() is called */ | ||
| 698 | static void efx_stop_port(struct efx_nic *efx) | ||
| 699 | { | ||
| 700 | EFX_LOG(efx, "stop port\n"); | ||
| 701 | |||
| 702 | mutex_lock(&efx->mac_lock); | ||
| 703 | efx->port_enabled = 0; | ||
| 704 | mutex_unlock(&efx->mac_lock); | ||
| 705 | |||
| 706 | /* Serialise against efx_set_multicast_list() */ | ||
| 707 | if (NET_DEV_REGISTERED(efx)) { | ||
| 708 | netif_tx_lock_bh(efx->net_dev); | ||
| 709 | netif_tx_unlock_bh(efx->net_dev); | ||
| 710 | } | ||
| 711 | } | ||
| 712 | |||
| 713 | static void efx_fini_port(struct efx_nic *efx) | ||
| 714 | { | ||
| 715 | EFX_LOG(efx, "shut down port\n"); | ||
| 716 | |||
| 717 | if (!efx->port_initialized) | ||
| 718 | return; | ||
| 719 | |||
| 720 | falcon_fini_xmac(efx); | ||
| 721 | efx->port_initialized = 0; | ||
| 722 | |||
| 723 | efx->link_up = 0; | ||
| 724 | efx_link_status_changed(efx); | ||
| 725 | } | ||
| 726 | |||
| 727 | static void efx_remove_port(struct efx_nic *efx) | ||
| 728 | { | ||
| 729 | EFX_LOG(efx, "destroying port\n"); | ||
| 730 | |||
| 731 | falcon_remove_port(efx); | ||
| 732 | } | ||
| 733 | |||
| 734 | /************************************************************************** | ||
| 735 | * | ||
| 736 | * NIC handling | ||
| 737 | * | ||
| 738 | **************************************************************************/ | ||
| 739 | |||
| 740 | /* This configures the PCI device to enable I/O and DMA. */ | ||
| 741 | static int efx_init_io(struct efx_nic *efx) | ||
| 742 | { | ||
| 743 | struct pci_dev *pci_dev = efx->pci_dev; | ||
| 744 | dma_addr_t dma_mask = efx->type->max_dma_mask; | ||
| 745 | int rc; | ||
| 746 | |||
| 747 | EFX_LOG(efx, "initialising I/O\n"); | ||
| 748 | |||
| 749 | rc = pci_enable_device(pci_dev); | ||
| 750 | if (rc) { | ||
| 751 | EFX_ERR(efx, "failed to enable PCI device\n"); | ||
| 752 | goto fail1; | ||
| 753 | } | ||
| 754 | |||
| 755 | pci_set_master(pci_dev); | ||
| 756 | |||
| 757 | /* Set the PCI DMA mask. Try all possibilities from our | ||
| 758 | * genuine mask down to 32 bits, because some architectures | ||
| 759 | * (e.g. x86_64 with iommu_sac_force set) will allow 40 bit | ||
| 760 | * masks event though they reject 46 bit masks. | ||
| 761 | */ | ||
| 762 | while (dma_mask > 0x7fffffffUL) { | ||
| 763 | if (pci_dma_supported(pci_dev, dma_mask) && | ||
| 764 | ((rc = pci_set_dma_mask(pci_dev, dma_mask)) == 0)) | ||
| 765 | break; | ||
| 766 | dma_mask >>= 1; | ||
| 767 | } | ||
| 768 | if (rc) { | ||
| 769 | EFX_ERR(efx, "could not find a suitable DMA mask\n"); | ||
| 770 | goto fail2; | ||
| 771 | } | ||
| 772 | EFX_LOG(efx, "using DMA mask %llx\n", (unsigned long long) dma_mask); | ||
| 773 | rc = pci_set_consistent_dma_mask(pci_dev, dma_mask); | ||
| 774 | if (rc) { | ||
| 775 | /* pci_set_consistent_dma_mask() is not *allowed* to | ||
| 776 | * fail with a mask that pci_set_dma_mask() accepted, | ||
| 777 | * but just in case... | ||
| 778 | */ | ||
| 779 | EFX_ERR(efx, "failed to set consistent DMA mask\n"); | ||
| 780 | goto fail2; | ||
| 781 | } | ||
| 782 | |||
| 783 | efx->membase_phys = pci_resource_start(efx->pci_dev, | ||
| 784 | efx->type->mem_bar); | ||
| 785 | rc = pci_request_region(pci_dev, efx->type->mem_bar, "sfc"); | ||
| 786 | if (rc) { | ||
| 787 | EFX_ERR(efx, "request for memory BAR failed\n"); | ||
| 788 | rc = -EIO; | ||
| 789 | goto fail3; | ||
| 790 | } | ||
| 791 | efx->membase = ioremap_nocache(efx->membase_phys, | ||
| 792 | efx->type->mem_map_size); | ||
| 793 | if (!efx->membase) { | ||
| 794 | EFX_ERR(efx, "could not map memory BAR %d at %lx+%x\n", | ||
| 795 | efx->type->mem_bar, efx->membase_phys, | ||
| 796 | efx->type->mem_map_size); | ||
| 797 | rc = -ENOMEM; | ||
| 798 | goto fail4; | ||
| 799 | } | ||
| 800 | EFX_LOG(efx, "memory BAR %u at %lx+%x (virtual %p)\n", | ||
| 801 | efx->type->mem_bar, efx->membase_phys, efx->type->mem_map_size, | ||
| 802 | efx->membase); | ||
| 803 | |||
| 804 | return 0; | ||
| 805 | |||
| 806 | fail4: | ||
| 807 | release_mem_region(efx->membase_phys, efx->type->mem_map_size); | ||
| 808 | fail3: | ||
| 809 | efx->membase_phys = 0UL; | ||
| 810 | fail2: | ||
| 811 | pci_disable_device(efx->pci_dev); | ||
| 812 | fail1: | ||
| 813 | return rc; | ||
| 814 | } | ||
| 815 | |||
| 816 | static void efx_fini_io(struct efx_nic *efx) | ||
| 817 | { | ||
| 818 | EFX_LOG(efx, "shutting down I/O\n"); | ||
| 819 | |||
| 820 | if (efx->membase) { | ||
| 821 | iounmap(efx->membase); | ||
| 822 | efx->membase = NULL; | ||
| 823 | } | ||
| 824 | |||
| 825 | if (efx->membase_phys) { | ||
| 826 | pci_release_region(efx->pci_dev, efx->type->mem_bar); | ||
| 827 | efx->membase_phys = 0UL; | ||
| 828 | } | ||
| 829 | |||
| 830 | pci_disable_device(efx->pci_dev); | ||
| 831 | } | ||
| 832 | |||
| 833 | /* Probe the number and type of interrupts we are able to obtain. */ | ||
| 834 | static void efx_probe_interrupts(struct efx_nic *efx) | ||
| 835 | { | ||
| 836 | int max_channel = efx->type->phys_addr_channels - 1; | ||
| 837 | struct msix_entry xentries[EFX_MAX_CHANNELS]; | ||
| 838 | int rc, i; | ||
| 839 | |||
| 840 | if (efx->interrupt_mode == EFX_INT_MODE_MSIX) { | ||
| 841 | BUG_ON(!pci_find_capability(efx->pci_dev, PCI_CAP_ID_MSIX)); | ||
| 842 | |||
| 843 | efx->rss_queues = rss_cpus ? rss_cpus : num_online_cpus(); | ||
| 844 | efx->rss_queues = min(efx->rss_queues, max_channel + 1); | ||
| 845 | efx->rss_queues = min(efx->rss_queues, EFX_MAX_CHANNELS); | ||
| 846 | |||
| 847 | /* Request maximum number of MSI interrupts, and fill out | ||
| 848 | * the channel interrupt information the allowed allocation */ | ||
| 849 | for (i = 0; i < efx->rss_queues; i++) | ||
| 850 | xentries[i].entry = i; | ||
| 851 | rc = pci_enable_msix(efx->pci_dev, xentries, efx->rss_queues); | ||
| 852 | if (rc > 0) { | ||
| 853 | EFX_BUG_ON_PARANOID(rc >= efx->rss_queues); | ||
| 854 | efx->rss_queues = rc; | ||
| 855 | rc = pci_enable_msix(efx->pci_dev, xentries, | ||
| 856 | efx->rss_queues); | ||
| 857 | } | ||
| 858 | |||
| 859 | if (rc == 0) { | ||
| 860 | for (i = 0; i < efx->rss_queues; i++) { | ||
| 861 | efx->channel[i].has_interrupt = 1; | ||
| 862 | efx->channel[i].irq = xentries[i].vector; | ||
| 863 | } | ||
| 864 | } else { | ||
| 865 | /* Fall back to single channel MSI */ | ||
| 866 | efx->interrupt_mode = EFX_INT_MODE_MSI; | ||
| 867 | EFX_ERR(efx, "could not enable MSI-X\n"); | ||
| 868 | } | ||
| 869 | } | ||
| 870 | |||
| 871 | /* Try single interrupt MSI */ | ||
| 872 | if (efx->interrupt_mode == EFX_INT_MODE_MSI) { | ||
| 873 | efx->rss_queues = 1; | ||
| 874 | rc = pci_enable_msi(efx->pci_dev); | ||
| 875 | if (rc == 0) { | ||
| 876 | efx->channel[0].irq = efx->pci_dev->irq; | ||
| 877 | efx->channel[0].has_interrupt = 1; | ||
| 878 | } else { | ||
| 879 | EFX_ERR(efx, "could not enable MSI\n"); | ||
| 880 | efx->interrupt_mode = EFX_INT_MODE_LEGACY; | ||
| 881 | } | ||
| 882 | } | ||
| 883 | |||
| 884 | /* Assume legacy interrupts */ | ||
| 885 | if (efx->interrupt_mode == EFX_INT_MODE_LEGACY) { | ||
| 886 | efx->rss_queues = 1; | ||
| 887 | /* Every channel is interruptible */ | ||
| 888 | for (i = 0; i < EFX_MAX_CHANNELS; i++) | ||
| 889 | efx->channel[i].has_interrupt = 1; | ||
| 890 | efx->legacy_irq = efx->pci_dev->irq; | ||
| 891 | } | ||
| 892 | } | ||
| 893 | |||
| 894 | static void efx_remove_interrupts(struct efx_nic *efx) | ||
| 895 | { | ||
| 896 | struct efx_channel *channel; | ||
| 897 | |||
| 898 | /* Remove MSI/MSI-X interrupts */ | ||
| 899 | efx_for_each_channel_with_interrupt(channel, efx) | ||
| 900 | channel->irq = 0; | ||
| 901 | pci_disable_msi(efx->pci_dev); | ||
| 902 | pci_disable_msix(efx->pci_dev); | ||
| 903 | |||
| 904 | /* Remove legacy interrupt */ | ||
| 905 | efx->legacy_irq = 0; | ||
| 906 | } | ||
| 907 | |||
| 908 | /* Select number of used resources | ||
| 909 | * Should be called after probe_interrupts() | ||
| 910 | */ | ||
| 911 | static void efx_select_used(struct efx_nic *efx) | ||
| 912 | { | ||
| 913 | struct efx_tx_queue *tx_queue; | ||
| 914 | struct efx_rx_queue *rx_queue; | ||
| 915 | int i; | ||
| 916 | |||
| 917 | /* TX queues. One per port per channel with TX capability | ||
| 918 | * (more than one per port won't work on Linux, due to out | ||
| 919 | * of order issues... but will be fine on Solaris) | ||
| 920 | */ | ||
| 921 | tx_queue = &efx->tx_queue[0]; | ||
| 922 | |||
| 923 | /* Perform this for each channel with TX capabilities. | ||
| 924 | * At the moment, we only support a single TX queue | ||
| 925 | */ | ||
| 926 | tx_queue->used = 1; | ||
| 927 | if ((!EFX_INT_MODE_USE_MSI(efx)) && separate_tx_and_rx_channels) | ||
| 928 | tx_queue->channel = &efx->channel[1]; | ||
| 929 | else | ||
| 930 | tx_queue->channel = &efx->channel[0]; | ||
| 931 | tx_queue->channel->used_flags |= EFX_USED_BY_TX; | ||
| 932 | tx_queue++; | ||
| 933 | |||
| 934 | /* RX queues. Each has a dedicated channel. */ | ||
| 935 | for (i = 0; i < EFX_MAX_RX_QUEUES; i++) { | ||
| 936 | rx_queue = &efx->rx_queue[i]; | ||
| 937 | |||
| 938 | if (i < efx->rss_queues) { | ||
| 939 | rx_queue->used = 1; | ||
| 940 | /* If we allow multiple RX queues per channel | ||
| 941 | * we need to decide that here | ||
| 942 | */ | ||
| 943 | rx_queue->channel = &efx->channel[rx_queue->queue]; | ||
| 944 | rx_queue->channel->used_flags |= EFX_USED_BY_RX; | ||
| 945 | rx_queue++; | ||
| 946 | } | ||
| 947 | } | ||
| 948 | } | ||
| 949 | |||
| 950 | static int efx_probe_nic(struct efx_nic *efx) | ||
| 951 | { | ||
| 952 | int rc; | ||
| 953 | |||
| 954 | EFX_LOG(efx, "creating NIC\n"); | ||
| 955 | |||
| 956 | /* Carry out hardware-type specific initialisation */ | ||
| 957 | rc = falcon_probe_nic(efx); | ||
| 958 | if (rc) | ||
| 959 | return rc; | ||
| 960 | |||
| 961 | /* Determine the number of channels and RX queues by trying to hook | ||
| 962 | * in MSI-X interrupts. */ | ||
| 963 | efx_probe_interrupts(efx); | ||
| 964 | |||
| 965 | /* Determine number of RX queues and TX queues */ | ||
| 966 | efx_select_used(efx); | ||
| 967 | |||
| 968 | /* Initialise the interrupt moderation settings */ | ||
| 969 | efx_init_irq_moderation(efx, tx_irq_mod_usec, rx_irq_mod_usec); | ||
| 970 | |||
| 971 | return 0; | ||
| 972 | } | ||
| 973 | |||
| 974 | static void efx_remove_nic(struct efx_nic *efx) | ||
| 975 | { | ||
| 976 | EFX_LOG(efx, "destroying NIC\n"); | ||
| 977 | |||
| 978 | efx_remove_interrupts(efx); | ||
| 979 | falcon_remove_nic(efx); | ||
| 980 | } | ||
| 981 | |||
| 982 | /************************************************************************** | ||
| 983 | * | ||
| 984 | * NIC startup/shutdown | ||
| 985 | * | ||
| 986 | *************************************************************************/ | ||
| 987 | |||
| 988 | static int efx_probe_all(struct efx_nic *efx) | ||
| 989 | { | ||
| 990 | struct efx_channel *channel; | ||
| 991 | int rc; | ||
| 992 | |||
| 993 | /* Create NIC */ | ||
| 994 | rc = efx_probe_nic(efx); | ||
| 995 | if (rc) { | ||
| 996 | EFX_ERR(efx, "failed to create NIC\n"); | ||
| 997 | goto fail1; | ||
| 998 | } | ||
| 999 | |||
| 1000 | /* Create port */ | ||
| 1001 | rc = efx_probe_port(efx); | ||
| 1002 | if (rc) { | ||
| 1003 | EFX_ERR(efx, "failed to create port\n"); | ||
| 1004 | goto fail2; | ||
| 1005 | } | ||
| 1006 | |||
| 1007 | /* Create channels */ | ||
| 1008 | efx_for_each_channel(channel, efx) { | ||
| 1009 | rc = efx_probe_channel(channel); | ||
| 1010 | if (rc) { | ||
| 1011 | EFX_ERR(efx, "failed to create channel %d\n", | ||
| 1012 | channel->channel); | ||
| 1013 | goto fail3; | ||
| 1014 | } | ||
| 1015 | } | ||
| 1016 | |||
| 1017 | return 0; | ||
| 1018 | |||
| 1019 | fail3: | ||
| 1020 | efx_for_each_channel(channel, efx) | ||
| 1021 | efx_remove_channel(channel); | ||
| 1022 | efx_remove_port(efx); | ||
| 1023 | fail2: | ||
| 1024 | efx_remove_nic(efx); | ||
| 1025 | fail1: | ||
| 1026 | return rc; | ||
| 1027 | } | ||
| 1028 | |||
| 1029 | /* Called after previous invocation(s) of efx_stop_all, restarts the | ||
| 1030 | * port, kernel transmit queue, NAPI processing and hardware interrupts, | ||
| 1031 | * and ensures that the port is scheduled to be reconfigured. | ||
| 1032 | * This function is safe to call multiple times when the NIC is in any | ||
| 1033 | * state. */ | ||
| 1034 | static void efx_start_all(struct efx_nic *efx) | ||
| 1035 | { | ||
| 1036 | struct efx_channel *channel; | ||
| 1037 | |||
| 1038 | EFX_ASSERT_RESET_SERIALISED(efx); | ||
| 1039 | |||
| 1040 | /* Check that it is appropriate to restart the interface. All | ||
| 1041 | * of these flags are safe to read under just the rtnl lock */ | ||
| 1042 | if (efx->port_enabled) | ||
| 1043 | return; | ||
| 1044 | if ((efx->state != STATE_RUNNING) && (efx->state != STATE_INIT)) | ||
| 1045 | return; | ||
| 1046 | if (NET_DEV_REGISTERED(efx) && !netif_running(efx->net_dev)) | ||
| 1047 | return; | ||
| 1048 | |||
| 1049 | /* Mark the port as enabled so port reconfigurations can start, then | ||
| 1050 | * restart the transmit interface early so the watchdog timer stops */ | ||
| 1051 | efx_start_port(efx); | ||
| 1052 | efx_wake_queue(efx); | ||
| 1053 | |||
| 1054 | efx_for_each_channel(channel, efx) | ||
| 1055 | efx_start_channel(channel); | ||
| 1056 | |||
| 1057 | falcon_enable_interrupts(efx); | ||
| 1058 | |||
| 1059 | /* Start hardware monitor if we're in RUNNING */ | ||
| 1060 | if (efx->state == STATE_RUNNING) | ||
| 1061 | queue_delayed_work(efx->workqueue, &efx->monitor_work, | ||
| 1062 | efx_monitor_interval); | ||
| 1063 | } | ||
| 1064 | |||
| 1065 | /* Flush all delayed work. Should only be called when no more delayed work | ||
| 1066 | * will be scheduled. This doesn't flush pending online resets (efx_reset), | ||
| 1067 | * since we're holding the rtnl_lock at this point. */ | ||
| 1068 | static void efx_flush_all(struct efx_nic *efx) | ||
| 1069 | { | ||
| 1070 | struct efx_rx_queue *rx_queue; | ||
| 1071 | |||
| 1072 | /* Make sure the hardware monitor is stopped */ | ||
| 1073 | cancel_delayed_work_sync(&efx->monitor_work); | ||
| 1074 | |||
| 1075 | /* Ensure that all RX slow refills are complete. */ | ||
| 1076 | efx_for_each_rx_queue(rx_queue, efx) { | ||
| 1077 | cancel_delayed_work_sync(&rx_queue->work); | ||
| 1078 | } | ||
| 1079 | |||
| 1080 | /* Stop scheduled port reconfigurations */ | ||
| 1081 | cancel_work_sync(&efx->reconfigure_work); | ||
| 1082 | |||
| 1083 | } | ||
| 1084 | |||
| 1085 | /* Quiesce hardware and software without bringing the link down. | ||
| 1086 | * Safe to call multiple times, when the nic and interface is in any | ||
| 1087 | * state. The caller is guaranteed to subsequently be in a position | ||
| 1088 | * to modify any hardware and software state they see fit without | ||
| 1089 | * taking locks. */ | ||
| 1090 | static void efx_stop_all(struct efx_nic *efx) | ||
| 1091 | { | ||
| 1092 | struct efx_channel *channel; | ||
| 1093 | |||
| 1094 | EFX_ASSERT_RESET_SERIALISED(efx); | ||
| 1095 | |||
| 1096 | /* port_enabled can be read safely under the rtnl lock */ | ||
| 1097 | if (!efx->port_enabled) | ||
| 1098 | return; | ||
| 1099 | |||
| 1100 | /* Disable interrupts and wait for ISR to complete */ | ||
| 1101 | falcon_disable_interrupts(efx); | ||
| 1102 | if (efx->legacy_irq) | ||
| 1103 | synchronize_irq(efx->legacy_irq); | ||
| 1104 | efx_for_each_channel_with_interrupt(channel, efx) | ||
| 1105 | if (channel->irq) | ||
| 1106 | synchronize_irq(channel->irq); | ||
| 1107 | |||
| 1108 | /* Stop all NAPI processing and synchronous rx refills */ | ||
| 1109 | efx_for_each_channel(channel, efx) | ||
| 1110 | efx_stop_channel(channel); | ||
| 1111 | |||
| 1112 | /* Stop all asynchronous port reconfigurations. Since all | ||
| 1113 | * event processing has already been stopped, there is no | ||
| 1114 | * window to loose phy events */ | ||
| 1115 | efx_stop_port(efx); | ||
| 1116 | |||
| 1117 | /* Flush reconfigure_work, refill_workqueue, monitor_work */ | ||
| 1118 | efx_flush_all(efx); | ||
| 1119 | |||
| 1120 | /* Isolate the MAC from the TX and RX engines, so that queue | ||
| 1121 | * flushes will complete in a timely fashion. */ | ||
| 1122 | falcon_deconfigure_mac_wrapper(efx); | ||
| 1123 | falcon_drain_tx_fifo(efx); | ||
| 1124 | |||
| 1125 | /* Stop the kernel transmit interface late, so the watchdog | ||
| 1126 | * timer isn't ticking over the flush */ | ||
| 1127 | efx_stop_queue(efx); | ||
| 1128 | if (NET_DEV_REGISTERED(efx)) { | ||
| 1129 | netif_tx_lock_bh(efx->net_dev); | ||
| 1130 | netif_tx_unlock_bh(efx->net_dev); | ||
| 1131 | } | ||
| 1132 | } | ||
| 1133 | |||
| 1134 | static void efx_remove_all(struct efx_nic *efx) | ||
| 1135 | { | ||
| 1136 | struct efx_channel *channel; | ||
| 1137 | |||
| 1138 | efx_for_each_channel(channel, efx) | ||
| 1139 | efx_remove_channel(channel); | ||
| 1140 | efx_remove_port(efx); | ||
| 1141 | efx_remove_nic(efx); | ||
| 1142 | } | ||
| 1143 | |||
| 1144 | /* A convinience function to safely flush all the queues */ | ||
| 1145 | int efx_flush_queues(struct efx_nic *efx) | ||
| 1146 | { | ||
| 1147 | int rc; | ||
| 1148 | |||
| 1149 | EFX_ASSERT_RESET_SERIALISED(efx); | ||
| 1150 | |||
| 1151 | efx_stop_all(efx); | ||
| 1152 | |||
| 1153 | efx_fini_channels(efx); | ||
| 1154 | rc = efx_init_channels(efx); | ||
| 1155 | if (rc) { | ||
| 1156 | efx_schedule_reset(efx, RESET_TYPE_DISABLE); | ||
| 1157 | return rc; | ||
| 1158 | } | ||
| 1159 | |||
| 1160 | efx_start_all(efx); | ||
| 1161 | |||
| 1162 | return 0; | ||
| 1163 | } | ||
| 1164 | |||
| 1165 | /************************************************************************** | ||
| 1166 | * | ||
| 1167 | * Interrupt moderation | ||
| 1168 | * | ||
| 1169 | **************************************************************************/ | ||
| 1170 | |||
| 1171 | /* Set interrupt moderation parameters */ | ||
| 1172 | void efx_init_irq_moderation(struct efx_nic *efx, int tx_usecs, int rx_usecs) | ||
| 1173 | { | ||
| 1174 | struct efx_tx_queue *tx_queue; | ||
| 1175 | struct efx_rx_queue *rx_queue; | ||
| 1176 | |||
| 1177 | EFX_ASSERT_RESET_SERIALISED(efx); | ||
| 1178 | |||
| 1179 | efx_for_each_tx_queue(tx_queue, efx) | ||
| 1180 | tx_queue->channel->irq_moderation = tx_usecs; | ||
| 1181 | |||
| 1182 | efx_for_each_rx_queue(rx_queue, efx) | ||
| 1183 | rx_queue->channel->irq_moderation = rx_usecs; | ||
| 1184 | } | ||
| 1185 | |||
| 1186 | /************************************************************************** | ||
| 1187 | * | ||
| 1188 | * Hardware monitor | ||
| 1189 | * | ||
| 1190 | **************************************************************************/ | ||
| 1191 | |||
| 1192 | /* Run periodically off the general workqueue. Serialised against | ||
| 1193 | * efx_reconfigure_port via the mac_lock */ | ||
| 1194 | static void efx_monitor(struct work_struct *data) | ||
| 1195 | { | ||
| 1196 | struct efx_nic *efx = container_of(data, struct efx_nic, | ||
| 1197 | monitor_work.work); | ||
| 1198 | int rc = 0; | ||
| 1199 | |||
| 1200 | EFX_TRACE(efx, "hardware monitor executing on CPU %d\n", | ||
| 1201 | raw_smp_processor_id()); | ||
| 1202 | |||
| 1203 | |||
| 1204 | /* If the mac_lock is already held then it is likely a port | ||
| 1205 | * reconfiguration is already in place, which will likely do | ||
| 1206 | * most of the work of check_hw() anyway. */ | ||
| 1207 | if (!mutex_trylock(&efx->mac_lock)) { | ||
| 1208 | queue_delayed_work(efx->workqueue, &efx->monitor_work, | ||
| 1209 | efx_monitor_interval); | ||
| 1210 | return; | ||
| 1211 | } | ||
| 1212 | |||
| 1213 | if (efx->port_enabled) | ||
| 1214 | rc = falcon_check_xmac(efx); | ||
| 1215 | mutex_unlock(&efx->mac_lock); | ||
| 1216 | |||
| 1217 | if (rc) { | ||
| 1218 | if (monitor_reset) { | ||
| 1219 | EFX_ERR(efx, "hardware monitor detected a fault: " | ||
| 1220 | "triggering reset\n"); | ||
| 1221 | efx_schedule_reset(efx, RESET_TYPE_MONITOR); | ||
| 1222 | } else { | ||
| 1223 | EFX_ERR(efx, "hardware monitor detected a fault, " | ||
| 1224 | "skipping reset\n"); | ||
| 1225 | } | ||
| 1226 | } | ||
| 1227 | |||
| 1228 | queue_delayed_work(efx->workqueue, &efx->monitor_work, | ||
| 1229 | efx_monitor_interval); | ||
| 1230 | } | ||
| 1231 | |||
| 1232 | /************************************************************************** | ||
| 1233 | * | ||
| 1234 | * ioctls | ||
| 1235 | * | ||
| 1236 | *************************************************************************/ | ||
| 1237 | |||
| 1238 | /* Net device ioctl | ||
| 1239 | * Context: process, rtnl_lock() held. | ||
| 1240 | */ | ||
| 1241 | static int efx_ioctl(struct net_device *net_dev, struct ifreq *ifr, int cmd) | ||
| 1242 | { | ||
| 1243 | struct efx_nic *efx = net_dev->priv; | ||
| 1244 | |||
| 1245 | EFX_ASSERT_RESET_SERIALISED(efx); | ||
| 1246 | |||
| 1247 | return generic_mii_ioctl(&efx->mii, if_mii(ifr), cmd, NULL); | ||
| 1248 | } | ||
| 1249 | |||
| 1250 | /************************************************************************** | ||
| 1251 | * | ||
| 1252 | * NAPI interface | ||
| 1253 | * | ||
| 1254 | **************************************************************************/ | ||
| 1255 | |||
| 1256 | static int efx_init_napi(struct efx_nic *efx) | ||
| 1257 | { | ||
| 1258 | struct efx_channel *channel; | ||
| 1259 | int rc; | ||
| 1260 | |||
| 1261 | efx_for_each_channel(channel, efx) { | ||
| 1262 | channel->napi_dev = efx->net_dev; | ||
| 1263 | rc = efx_lro_init(&channel->lro_mgr, efx); | ||
| 1264 | if (rc) | ||
| 1265 | goto err; | ||
| 1266 | } | ||
| 1267 | return 0; | ||
| 1268 | err: | ||
| 1269 | efx_fini_napi(efx); | ||
| 1270 | return rc; | ||
| 1271 | } | ||
| 1272 | |||
| 1273 | static void efx_fini_napi(struct efx_nic *efx) | ||
| 1274 | { | ||
| 1275 | struct efx_channel *channel; | ||
| 1276 | |||
| 1277 | efx_for_each_channel(channel, efx) { | ||
| 1278 | efx_lro_fini(&channel->lro_mgr); | ||
| 1279 | channel->napi_dev = NULL; | ||
| 1280 | } | ||
| 1281 | } | ||
| 1282 | |||
| 1283 | /************************************************************************** | ||
| 1284 | * | ||
| 1285 | * Kernel netpoll interface | ||
| 1286 | * | ||
| 1287 | *************************************************************************/ | ||
| 1288 | |||
| 1289 | #ifdef CONFIG_NET_POLL_CONTROLLER | ||
| 1290 | |||
| 1291 | /* Although in the common case interrupts will be disabled, this is not | ||
| 1292 | * guaranteed. However, all our work happens inside the NAPI callback, | ||
| 1293 | * so no locking is required. | ||
| 1294 | */ | ||
| 1295 | static void efx_netpoll(struct net_device *net_dev) | ||
| 1296 | { | ||
| 1297 | struct efx_nic *efx = net_dev->priv; | ||
| 1298 | struct efx_channel *channel; | ||
| 1299 | |||
| 1300 | efx_for_each_channel_with_interrupt(channel, efx) | ||
| 1301 | efx_schedule_channel(channel); | ||
| 1302 | } | ||
| 1303 | |||
| 1304 | #endif | ||
| 1305 | |||
| 1306 | /************************************************************************** | ||
| 1307 | * | ||
| 1308 | * Kernel net device interface | ||
| 1309 | * | ||
| 1310 | *************************************************************************/ | ||
| 1311 | |||
| 1312 | /* Context: process, rtnl_lock() held. */ | ||
| 1313 | static int efx_net_open(struct net_device *net_dev) | ||
| 1314 | { | ||
| 1315 | struct efx_nic *efx = net_dev->priv; | ||
| 1316 | EFX_ASSERT_RESET_SERIALISED(efx); | ||
| 1317 | |||
| 1318 | EFX_LOG(efx, "opening device %s on CPU %d\n", net_dev->name, | ||
| 1319 | raw_smp_processor_id()); | ||
| 1320 | |||
| 1321 | efx_start_all(efx); | ||
| 1322 | return 0; | ||
| 1323 | } | ||
| 1324 | |||
| 1325 | /* Context: process, rtnl_lock() held. | ||
| 1326 | * Note that the kernel will ignore our return code; this method | ||
| 1327 | * should really be a void. | ||
| 1328 | */ | ||
| 1329 | static int efx_net_stop(struct net_device *net_dev) | ||
| 1330 | { | ||
| 1331 | struct efx_nic *efx = net_dev->priv; | ||
| 1332 | int rc; | ||
| 1333 | |||
| 1334 | EFX_LOG(efx, "closing %s on CPU %d\n", net_dev->name, | ||
| 1335 | raw_smp_processor_id()); | ||
| 1336 | |||
| 1337 | /* Stop the device and flush all the channels */ | ||
| 1338 | efx_stop_all(efx); | ||
| 1339 | efx_fini_channels(efx); | ||
| 1340 | rc = efx_init_channels(efx); | ||
| 1341 | if (rc) | ||
| 1342 | efx_schedule_reset(efx, RESET_TYPE_DISABLE); | ||
| 1343 | |||
| 1344 | return 0; | ||
| 1345 | } | ||
| 1346 | |||
| 1347 | /* Context: process, dev_base_lock held, non-blocking. */ | ||
| 1348 | static struct net_device_stats *efx_net_stats(struct net_device *net_dev) | ||
| 1349 | { | ||
| 1350 | struct efx_nic *efx = net_dev->priv; | ||
| 1351 | struct efx_mac_stats *mac_stats = &efx->mac_stats; | ||
| 1352 | struct net_device_stats *stats = &net_dev->stats; | ||
| 1353 | |||
| 1354 | if (!spin_trylock(&efx->stats_lock)) | ||
| 1355 | return stats; | ||
| 1356 | if (efx->state == STATE_RUNNING) { | ||
| 1357 | falcon_update_stats_xmac(efx); | ||
| 1358 | falcon_update_nic_stats(efx); | ||
| 1359 | } | ||
| 1360 | spin_unlock(&efx->stats_lock); | ||
| 1361 | |||
| 1362 | stats->rx_packets = mac_stats->rx_packets; | ||
| 1363 | stats->tx_packets = mac_stats->tx_packets; | ||
| 1364 | stats->rx_bytes = mac_stats->rx_bytes; | ||
| 1365 | stats->tx_bytes = mac_stats->tx_bytes; | ||
| 1366 | stats->multicast = mac_stats->rx_multicast; | ||
| 1367 | stats->collisions = mac_stats->tx_collision; | ||
| 1368 | stats->rx_length_errors = (mac_stats->rx_gtjumbo + | ||
| 1369 | mac_stats->rx_length_error); | ||
| 1370 | stats->rx_over_errors = efx->n_rx_nodesc_drop_cnt; | ||
| 1371 | stats->rx_crc_errors = mac_stats->rx_bad; | ||
| 1372 | stats->rx_frame_errors = mac_stats->rx_align_error; | ||
| 1373 | stats->rx_fifo_errors = mac_stats->rx_overflow; | ||
| 1374 | stats->rx_missed_errors = mac_stats->rx_missed; | ||
| 1375 | stats->tx_window_errors = mac_stats->tx_late_collision; | ||
| 1376 | |||
| 1377 | stats->rx_errors = (stats->rx_length_errors + | ||
| 1378 | stats->rx_over_errors + | ||
| 1379 | stats->rx_crc_errors + | ||
| 1380 | stats->rx_frame_errors + | ||
| 1381 | stats->rx_fifo_errors + | ||
| 1382 | stats->rx_missed_errors + | ||
| 1383 | mac_stats->rx_symbol_error); | ||
| 1384 | stats->tx_errors = (stats->tx_window_errors + | ||
| 1385 | mac_stats->tx_bad); | ||
| 1386 | |||
| 1387 | return stats; | ||
| 1388 | } | ||
| 1389 | |||
| 1390 | /* Context: netif_tx_lock held, BHs disabled. */ | ||
| 1391 | static void efx_watchdog(struct net_device *net_dev) | ||
| 1392 | { | ||
| 1393 | struct efx_nic *efx = net_dev->priv; | ||
| 1394 | |||
| 1395 | EFX_ERR(efx, "TX stuck with stop_count=%d port_enabled=%d: %s\n", | ||
| 1396 | atomic_read(&efx->netif_stop_count), efx->port_enabled, | ||
| 1397 | monitor_reset ? "resetting channels" : "skipping reset"); | ||
| 1398 | |||
| 1399 | if (monitor_reset) | ||
| 1400 | efx_schedule_reset(efx, RESET_TYPE_MONITOR); | ||
| 1401 | } | ||
| 1402 | |||
| 1403 | |||
| 1404 | /* Context: process, rtnl_lock() held. */ | ||
| 1405 | static int efx_change_mtu(struct net_device *net_dev, int new_mtu) | ||
| 1406 | { | ||
| 1407 | struct efx_nic *efx = net_dev->priv; | ||
| 1408 | int rc = 0; | ||
| 1409 | |||
| 1410 | EFX_ASSERT_RESET_SERIALISED(efx); | ||
| 1411 | |||
| 1412 | if (new_mtu > EFX_MAX_MTU) | ||
| 1413 | return -EINVAL; | ||
| 1414 | |||
| 1415 | efx_stop_all(efx); | ||
| 1416 | |||
| 1417 | EFX_LOG(efx, "changing MTU to %d\n", new_mtu); | ||
| 1418 | |||
| 1419 | efx_fini_channels(efx); | ||
| 1420 | net_dev->mtu = new_mtu; | ||
| 1421 | rc = efx_init_channels(efx); | ||
| 1422 | if (rc) | ||
| 1423 | goto fail; | ||
| 1424 | |||
| 1425 | efx_start_all(efx); | ||
| 1426 | return rc; | ||
| 1427 | |||
| 1428 | fail: | ||
| 1429 | efx_schedule_reset(efx, RESET_TYPE_DISABLE); | ||
| 1430 | return rc; | ||
| 1431 | } | ||
| 1432 | |||
| 1433 | static int efx_set_mac_address(struct net_device *net_dev, void *data) | ||
| 1434 | { | ||
| 1435 | struct efx_nic *efx = net_dev->priv; | ||
| 1436 | struct sockaddr *addr = data; | ||
| 1437 | char *new_addr = addr->sa_data; | ||
| 1438 | |||
| 1439 | EFX_ASSERT_RESET_SERIALISED(efx); | ||
| 1440 | |||
| 1441 | if (!is_valid_ether_addr(new_addr)) { | ||
| 1442 | DECLARE_MAC_BUF(mac); | ||
| 1443 | EFX_ERR(efx, "invalid ethernet MAC address requested: %s\n", | ||
| 1444 | print_mac(mac, new_addr)); | ||
| 1445 | return -EINVAL; | ||
| 1446 | } | ||
| 1447 | |||
| 1448 | memcpy(net_dev->dev_addr, new_addr, net_dev->addr_len); | ||
| 1449 | |||
| 1450 | /* Reconfigure the MAC */ | ||
| 1451 | efx_reconfigure_port(efx); | ||
| 1452 | |||
| 1453 | return 0; | ||
| 1454 | } | ||
| 1455 | |||
| 1456 | /* Context: netif_tx_lock held, BHs disabled. */ | ||
| 1457 | static void efx_set_multicast_list(struct net_device *net_dev) | ||
| 1458 | { | ||
| 1459 | struct efx_nic *efx = net_dev->priv; | ||
| 1460 | struct dev_mc_list *mc_list = net_dev->mc_list; | ||
| 1461 | union efx_multicast_hash *mc_hash = &efx->multicast_hash; | ||
| 1462 | int promiscuous; | ||
| 1463 | u32 crc; | ||
| 1464 | int bit; | ||
| 1465 | int i; | ||
| 1466 | |||
| 1467 | /* Set per-MAC promiscuity flag and reconfigure MAC if necessary */ | ||
| 1468 | promiscuous = (net_dev->flags & IFF_PROMISC) ? 1 : 0; | ||
| 1469 | if (efx->promiscuous != promiscuous) { | ||
| 1470 | efx->promiscuous = promiscuous; | ||
| 1471 | /* Close the window between efx_stop_port() and efx_flush_all() | ||
| 1472 | * by only queuing work when the port is enabled. */ | ||
| 1473 | if (efx->port_enabled) | ||
| 1474 | queue_work(efx->workqueue, &efx->reconfigure_work); | ||
| 1475 | } | ||
| 1476 | |||
| 1477 | /* Build multicast hash table */ | ||
| 1478 | if (promiscuous || (net_dev->flags & IFF_ALLMULTI)) { | ||
| 1479 | memset(mc_hash, 0xff, sizeof(*mc_hash)); | ||
| 1480 | } else { | ||
| 1481 | memset(mc_hash, 0x00, sizeof(*mc_hash)); | ||
| 1482 | for (i = 0; i < net_dev->mc_count; i++) { | ||
| 1483 | crc = ether_crc_le(ETH_ALEN, mc_list->dmi_addr); | ||
| 1484 | bit = crc & (EFX_MCAST_HASH_ENTRIES - 1); | ||
| 1485 | set_bit_le(bit, mc_hash->byte); | ||
| 1486 | mc_list = mc_list->next; | ||
| 1487 | } | ||
| 1488 | } | ||
| 1489 | |||
| 1490 | /* Create and activate new global multicast hash table */ | ||
| 1491 | falcon_set_multicast_hash(efx); | ||
| 1492 | } | ||
| 1493 | |||
| 1494 | static int efx_netdev_event(struct notifier_block *this, | ||
| 1495 | unsigned long event, void *ptr) | ||
| 1496 | { | ||
| 1497 | struct net_device *net_dev = (struct net_device *)ptr; | ||
| 1498 | |||
| 1499 | if (net_dev->open == efx_net_open && event == NETDEV_CHANGENAME) { | ||
| 1500 | struct efx_nic *efx = net_dev->priv; | ||
| 1501 | |||
| 1502 | strcpy(efx->name, net_dev->name); | ||
| 1503 | } | ||
| 1504 | |||
| 1505 | return NOTIFY_DONE; | ||
| 1506 | } | ||
| 1507 | |||
| 1508 | static struct notifier_block efx_netdev_notifier = { | ||
| 1509 | .notifier_call = efx_netdev_event, | ||
| 1510 | }; | ||
| 1511 | |||
| 1512 | static int efx_register_netdev(struct efx_nic *efx) | ||
| 1513 | { | ||
| 1514 | struct net_device *net_dev = efx->net_dev; | ||
| 1515 | int rc; | ||
| 1516 | |||
| 1517 | net_dev->watchdog_timeo = 5 * HZ; | ||
| 1518 | net_dev->irq = efx->pci_dev->irq; | ||
| 1519 | net_dev->open = efx_net_open; | ||
| 1520 | net_dev->stop = efx_net_stop; | ||
| 1521 | net_dev->get_stats = efx_net_stats; | ||
| 1522 | net_dev->tx_timeout = &efx_watchdog; | ||
| 1523 | net_dev->hard_start_xmit = efx_hard_start_xmit; | ||
| 1524 | net_dev->do_ioctl = efx_ioctl; | ||
| 1525 | net_dev->change_mtu = efx_change_mtu; | ||
| 1526 | net_dev->set_mac_address = efx_set_mac_address; | ||
| 1527 | net_dev->set_multicast_list = efx_set_multicast_list; | ||
| 1528 | #ifdef CONFIG_NET_POLL_CONTROLLER | ||
| 1529 | net_dev->poll_controller = efx_netpoll; | ||
| 1530 | #endif | ||
| 1531 | SET_NETDEV_DEV(net_dev, &efx->pci_dev->dev); | ||
| 1532 | SET_ETHTOOL_OPS(net_dev, &efx_ethtool_ops); | ||
| 1533 | |||
| 1534 | /* Always start with carrier off; PHY events will detect the link */ | ||
| 1535 | netif_carrier_off(efx->net_dev); | ||
| 1536 | |||
| 1537 | /* Clear MAC statistics */ | ||
| 1538 | falcon_update_stats_xmac(efx); | ||
| 1539 | memset(&efx->mac_stats, 0, sizeof(efx->mac_stats)); | ||
| 1540 | |||
| 1541 | rc = register_netdev(net_dev); | ||
| 1542 | if (rc) { | ||
| 1543 | EFX_ERR(efx, "could not register net dev\n"); | ||
| 1544 | return rc; | ||
| 1545 | } | ||
| 1546 | strcpy(efx->name, net_dev->name); | ||
| 1547 | |||
| 1548 | return 0; | ||
| 1549 | } | ||
| 1550 | |||
| 1551 | static void efx_unregister_netdev(struct efx_nic *efx) | ||
| 1552 | { | ||
| 1553 | struct efx_tx_queue *tx_queue; | ||
| 1554 | |||
| 1555 | if (!efx->net_dev) | ||
| 1556 | return; | ||
| 1557 | |||
| 1558 | BUG_ON(efx->net_dev->priv != efx); | ||
| 1559 | |||
| 1560 | /* Free up any skbs still remaining. This has to happen before | ||
| 1561 | * we try to unregister the netdev as running their destructors | ||
| 1562 | * may be needed to get the device ref. count to 0. */ | ||
| 1563 | efx_for_each_tx_queue(tx_queue, efx) | ||
| 1564 | efx_release_tx_buffers(tx_queue); | ||
| 1565 | |||
| 1566 | if (NET_DEV_REGISTERED(efx)) { | ||
| 1567 | strlcpy(efx->name, pci_name(efx->pci_dev), sizeof(efx->name)); | ||
| 1568 | unregister_netdev(efx->net_dev); | ||
| 1569 | } | ||
| 1570 | } | ||
| 1571 | |||
| 1572 | /************************************************************************** | ||
| 1573 | * | ||
| 1574 | * Device reset and suspend | ||
| 1575 | * | ||
| 1576 | **************************************************************************/ | ||
| 1577 | |||
| 1578 | /* The final hardware and software finalisation before reset. */ | ||
| 1579 | static int efx_reset_down(struct efx_nic *efx, struct ethtool_cmd *ecmd) | ||
| 1580 | { | ||
| 1581 | int rc; | ||
| 1582 | |||
| 1583 | EFX_ASSERT_RESET_SERIALISED(efx); | ||
| 1584 | |||
| 1585 | rc = falcon_xmac_get_settings(efx, ecmd); | ||
| 1586 | if (rc) { | ||
| 1587 | EFX_ERR(efx, "could not back up PHY settings\n"); | ||
| 1588 | goto fail; | ||
| 1589 | } | ||
| 1590 | |||
| 1591 | efx_fini_channels(efx); | ||
| 1592 | return 0; | ||
| 1593 | |||
| 1594 | fail: | ||
| 1595 | return rc; | ||
| 1596 | } | ||
| 1597 | |||
| 1598 | /* The first part of software initialisation after a hardware reset | ||
| 1599 | * This function does not handle serialisation with the kernel, it | ||
| 1600 | * assumes the caller has done this */ | ||
| 1601 | static int efx_reset_up(struct efx_nic *efx, struct ethtool_cmd *ecmd) | ||
| 1602 | { | ||
| 1603 | int rc; | ||
| 1604 | |||
| 1605 | rc = efx_init_channels(efx); | ||
| 1606 | if (rc) | ||
| 1607 | goto fail1; | ||
| 1608 | |||
| 1609 | /* Restore MAC and PHY settings. */ | ||
| 1610 | rc = falcon_xmac_set_settings(efx, ecmd); | ||
| 1611 | if (rc) { | ||
| 1612 | EFX_ERR(efx, "could not restore PHY settings\n"); | ||
| 1613 | goto fail2; | ||
| 1614 | } | ||
| 1615 | |||
| 1616 | return 0; | ||
| 1617 | |||
| 1618 | fail2: | ||
| 1619 | efx_fini_channels(efx); | ||
| 1620 | fail1: | ||
| 1621 | return rc; | ||
| 1622 | } | ||
| 1623 | |||
| 1624 | /* Reset the NIC as transparently as possible. Do not reset the PHY | ||
| 1625 | * Note that the reset may fail, in which case the card will be left | ||
| 1626 | * in a most-probably-unusable state. | ||
| 1627 | * | ||
| 1628 | * This function will sleep. You cannot reset from within an atomic | ||
| 1629 | * state; use efx_schedule_reset() instead. | ||
| 1630 | * | ||
| 1631 | * Grabs the rtnl_lock. | ||
| 1632 | */ | ||
| 1633 | static int efx_reset(struct efx_nic *efx) | ||
| 1634 | { | ||
| 1635 | struct ethtool_cmd ecmd; | ||
| 1636 | enum reset_type method = efx->reset_pending; | ||
| 1637 | int rc; | ||
| 1638 | |||
| 1639 | /* Serialise with kernel interfaces */ | ||
| 1640 | rtnl_lock(); | ||
| 1641 | |||
| 1642 | /* If we're not RUNNING then don't reset. Leave the reset_pending | ||
| 1643 | * flag set so that efx_pci_probe_main will be retried */ | ||
| 1644 | if (efx->state != STATE_RUNNING) { | ||
| 1645 | EFX_INFO(efx, "scheduled reset quenched. NIC not RUNNING\n"); | ||
| 1646 | goto unlock_rtnl; | ||
| 1647 | } | ||
| 1648 | |||
| 1649 | efx->state = STATE_RESETTING; | ||
| 1650 | EFX_INFO(efx, "resetting (%d)\n", method); | ||
| 1651 | |||
| 1652 | /* The net_dev->get_stats handler is quite slow, and will fail | ||
| 1653 | * if a fetch is pending over reset. Serialise against it. */ | ||
| 1654 | spin_lock(&efx->stats_lock); | ||
| 1655 | spin_unlock(&efx->stats_lock); | ||
| 1656 | |||
| 1657 | efx_stop_all(efx); | ||
| 1658 | mutex_lock(&efx->mac_lock); | ||
| 1659 | |||
| 1660 | rc = efx_reset_down(efx, &ecmd); | ||
| 1661 | if (rc) | ||
| 1662 | goto fail1; | ||
| 1663 | |||
| 1664 | rc = falcon_reset_hw(efx, method); | ||
| 1665 | if (rc) { | ||
| 1666 | EFX_ERR(efx, "failed to reset hardware\n"); | ||
| 1667 | goto fail2; | ||
| 1668 | } | ||
| 1669 | |||
| 1670 | /* Allow resets to be rescheduled. */ | ||
| 1671 | efx->reset_pending = RESET_TYPE_NONE; | ||
| 1672 | |||
| 1673 | /* Reinitialise bus-mastering, which may have been turned off before | ||
| 1674 | * the reset was scheduled. This is still appropriate, even in the | ||
| 1675 | * RESET_TYPE_DISABLE since this driver generally assumes the hardware | ||
| 1676 | * can respond to requests. */ | ||
| 1677 | pci_set_master(efx->pci_dev); | ||
| 1678 | |||
| 1679 | /* Reinitialise device. This is appropriate in the RESET_TYPE_DISABLE | ||
| 1680 | * case so the driver can talk to external SRAM */ | ||
| 1681 | rc = falcon_init_nic(efx); | ||
| 1682 | if (rc) { | ||
| 1683 | EFX_ERR(efx, "failed to initialise NIC\n"); | ||
| 1684 | goto fail3; | ||
| 1685 | } | ||
| 1686 | |||
| 1687 | /* Leave device stopped if necessary */ | ||
| 1688 | if (method == RESET_TYPE_DISABLE) { | ||
| 1689 | /* Reinitialise the device anyway so the driver unload sequence | ||
| 1690 | * can talk to the external SRAM */ | ||
| 1691 | (void) falcon_init_nic(efx); | ||
| 1692 | rc = -EIO; | ||
| 1693 | goto fail4; | ||
| 1694 | } | ||
| 1695 | |||
| 1696 | rc = efx_reset_up(efx, &ecmd); | ||
| 1697 | if (rc) | ||
| 1698 | goto fail5; | ||
| 1699 | |||
| 1700 | mutex_unlock(&efx->mac_lock); | ||
| 1701 | EFX_LOG(efx, "reset complete\n"); | ||
| 1702 | |||
| 1703 | efx->state = STATE_RUNNING; | ||
| 1704 | efx_start_all(efx); | ||
| 1705 | |||
| 1706 | unlock_rtnl: | ||
| 1707 | rtnl_unlock(); | ||
| 1708 | return 0; | ||
| 1709 | |||
| 1710 | fail5: | ||
| 1711 | fail4: | ||
| 1712 | fail3: | ||
| 1713 | fail2: | ||
| 1714 | fail1: | ||
| 1715 | EFX_ERR(efx, "has been disabled\n"); | ||
| 1716 | efx->state = STATE_DISABLED; | ||
| 1717 | |||
| 1718 | mutex_unlock(&efx->mac_lock); | ||
| 1719 | rtnl_unlock(); | ||
| 1720 | efx_unregister_netdev(efx); | ||
| 1721 | efx_fini_port(efx); | ||
| 1722 | return rc; | ||
| 1723 | } | ||
| 1724 | |||
| 1725 | /* The worker thread exists so that code that cannot sleep can | ||
| 1726 | * schedule a reset for later. | ||
| 1727 | */ | ||
| 1728 | static void efx_reset_work(struct work_struct *data) | ||
| 1729 | { | ||
| 1730 | struct efx_nic *nic = container_of(data, struct efx_nic, reset_work); | ||
| 1731 | |||
| 1732 | efx_reset(nic); | ||
| 1733 | } | ||
| 1734 | |||
| 1735 | void efx_schedule_reset(struct efx_nic *efx, enum reset_type type) | ||
| 1736 | { | ||
| 1737 | enum reset_type method; | ||
| 1738 | |||
| 1739 | if (efx->reset_pending != RESET_TYPE_NONE) { | ||
| 1740 | EFX_INFO(efx, "quenching already scheduled reset\n"); | ||
| 1741 | return; | ||
| 1742 | } | ||
| 1743 | |||
| 1744 | switch (type) { | ||
| 1745 | case RESET_TYPE_INVISIBLE: | ||
| 1746 | case RESET_TYPE_ALL: | ||
| 1747 | case RESET_TYPE_WORLD: | ||
| 1748 | case RESET_TYPE_DISABLE: | ||
| 1749 | method = type; | ||
| 1750 | break; | ||
| 1751 | case RESET_TYPE_RX_RECOVERY: | ||
| 1752 | case RESET_TYPE_RX_DESC_FETCH: | ||
| 1753 | case RESET_TYPE_TX_DESC_FETCH: | ||
| 1754 | case RESET_TYPE_TX_SKIP: | ||
| 1755 | method = RESET_TYPE_INVISIBLE; | ||
| 1756 | break; | ||
| 1757 | default: | ||
| 1758 | method = RESET_TYPE_ALL; | ||
| 1759 | break; | ||
| 1760 | } | ||
| 1761 | |||
| 1762 | if (method != type) | ||
| 1763 | EFX_LOG(efx, "scheduling reset (%d:%d)\n", type, method); | ||
| 1764 | else | ||
| 1765 | EFX_LOG(efx, "scheduling reset (%d)\n", method); | ||
| 1766 | |||
| 1767 | efx->reset_pending = method; | ||
| 1768 | |||
| 1769 | queue_work(efx->workqueue, &efx->reset_work); | ||
| 1770 | } | ||
| 1771 | |||
| 1772 | /************************************************************************** | ||
| 1773 | * | ||
| 1774 | * List of NICs we support | ||
| 1775 | * | ||
| 1776 | **************************************************************************/ | ||
| 1777 | |||
| 1778 | /* PCI device ID table */ | ||
| 1779 | static struct pci_device_id efx_pci_table[] __devinitdata = { | ||
| 1780 | {PCI_DEVICE(EFX_VENDID_SFC, FALCON_A_P_DEVID), | ||
| 1781 | .driver_data = (unsigned long) &falcon_a_nic_type}, | ||
| 1782 | {PCI_DEVICE(EFX_VENDID_SFC, FALCON_B_P_DEVID), | ||
| 1783 | .driver_data = (unsigned long) &falcon_b_nic_type}, | ||
| 1784 | {0} /* end of list */ | ||
| 1785 | }; | ||
| 1786 | |||
| 1787 | /************************************************************************** | ||
| 1788 | * | ||
| 1789 | * Dummy PHY/MAC/Board operations | ||
| 1790 | * | ||
| 1791 | * Can be used where the MAC does not implement this operation | ||
| 1792 | * Needed so all function pointers are valid and do not have to be tested | ||
| 1793 | * before use | ||
| 1794 | * | ||
| 1795 | **************************************************************************/ | ||
| 1796 | int efx_port_dummy_op_int(struct efx_nic *efx) | ||
| 1797 | { | ||
| 1798 | return 0; | ||
| 1799 | } | ||
| 1800 | void efx_port_dummy_op_void(struct efx_nic *efx) {} | ||
| 1801 | void efx_port_dummy_op_blink(struct efx_nic *efx, int blink) {} | ||
| 1802 | |||
| 1803 | static struct efx_phy_operations efx_dummy_phy_operations = { | ||
| 1804 | .init = efx_port_dummy_op_int, | ||
| 1805 | .reconfigure = efx_port_dummy_op_void, | ||
| 1806 | .check_hw = efx_port_dummy_op_int, | ||
| 1807 | .fini = efx_port_dummy_op_void, | ||
| 1808 | .clear_interrupt = efx_port_dummy_op_void, | ||
| 1809 | .reset_xaui = efx_port_dummy_op_void, | ||
| 1810 | }; | ||
| 1811 | |||
| 1812 | /* Dummy board operations */ | ||
| 1813 | static int efx_nic_dummy_op_int(struct efx_nic *nic) | ||
| 1814 | { | ||
| 1815 | return 0; | ||
| 1816 | } | ||
| 1817 | |||
| 1818 | static struct efx_board efx_dummy_board_info = { | ||
| 1819 | .init = efx_nic_dummy_op_int, | ||
| 1820 | .init_leds = efx_port_dummy_op_int, | ||
| 1821 | .set_fault_led = efx_port_dummy_op_blink, | ||
| 1822 | }; | ||
| 1823 | |||
| 1824 | /************************************************************************** | ||
| 1825 | * | ||
| 1826 | * Data housekeeping | ||
| 1827 | * | ||
| 1828 | **************************************************************************/ | ||
| 1829 | |||
| 1830 | /* This zeroes out and then fills in the invariants in a struct | ||
| 1831 | * efx_nic (including all sub-structures). | ||
| 1832 | */ | ||
| 1833 | static int efx_init_struct(struct efx_nic *efx, struct efx_nic_type *type, | ||
| 1834 | struct pci_dev *pci_dev, struct net_device *net_dev) | ||
| 1835 | { | ||
| 1836 | struct efx_channel *channel; | ||
| 1837 | struct efx_tx_queue *tx_queue; | ||
| 1838 | struct efx_rx_queue *rx_queue; | ||
| 1839 | int i, rc; | ||
| 1840 | |||
| 1841 | /* Initialise common structures */ | ||
| 1842 | memset(efx, 0, sizeof(*efx)); | ||
| 1843 | spin_lock_init(&efx->biu_lock); | ||
| 1844 | spin_lock_init(&efx->phy_lock); | ||
| 1845 | INIT_WORK(&efx->reset_work, efx_reset_work); | ||
| 1846 | INIT_DELAYED_WORK(&efx->monitor_work, efx_monitor); | ||
| 1847 | efx->pci_dev = pci_dev; | ||
| 1848 | efx->state = STATE_INIT; | ||
| 1849 | efx->reset_pending = RESET_TYPE_NONE; | ||
| 1850 | strlcpy(efx->name, pci_name(pci_dev), sizeof(efx->name)); | ||
| 1851 | efx->board_info = efx_dummy_board_info; | ||
| 1852 | |||
| 1853 | efx->net_dev = net_dev; | ||
| 1854 | efx->rx_checksum_enabled = 1; | ||
| 1855 | spin_lock_init(&efx->netif_stop_lock); | ||
| 1856 | spin_lock_init(&efx->stats_lock); | ||
| 1857 | mutex_init(&efx->mac_lock); | ||
| 1858 | efx->phy_op = &efx_dummy_phy_operations; | ||
| 1859 | efx->mii.dev = net_dev; | ||
| 1860 | INIT_WORK(&efx->reconfigure_work, efx_reconfigure_work); | ||
| 1861 | atomic_set(&efx->netif_stop_count, 1); | ||
| 1862 | |||
| 1863 | for (i = 0; i < EFX_MAX_CHANNELS; i++) { | ||
| 1864 | channel = &efx->channel[i]; | ||
| 1865 | channel->efx = efx; | ||
| 1866 | channel->channel = i; | ||
| 1867 | channel->evqnum = i; | ||
| 1868 | channel->work_pending = 0; | ||
| 1869 | } | ||
| 1870 | for (i = 0; i < EFX_MAX_TX_QUEUES; i++) { | ||
| 1871 | tx_queue = &efx->tx_queue[i]; | ||
| 1872 | tx_queue->efx = efx; | ||
| 1873 | tx_queue->queue = i; | ||
| 1874 | tx_queue->buffer = NULL; | ||
| 1875 | tx_queue->channel = &efx->channel[0]; /* for safety */ | ||
| 1876 | } | ||
| 1877 | for (i = 0; i < EFX_MAX_RX_QUEUES; i++) { | ||
| 1878 | rx_queue = &efx->rx_queue[i]; | ||
| 1879 | rx_queue->efx = efx; | ||
| 1880 | rx_queue->queue = i; | ||
| 1881 | rx_queue->channel = &efx->channel[0]; /* for safety */ | ||
| 1882 | rx_queue->buffer = NULL; | ||
| 1883 | spin_lock_init(&rx_queue->add_lock); | ||
| 1884 | INIT_DELAYED_WORK(&rx_queue->work, efx_rx_work); | ||
| 1885 | } | ||
| 1886 | |||
| 1887 | efx->type = type; | ||
| 1888 | |||
| 1889 | /* Sanity-check NIC type */ | ||
| 1890 | EFX_BUG_ON_PARANOID(efx->type->txd_ring_mask & | ||
| 1891 | (efx->type->txd_ring_mask + 1)); | ||
| 1892 | EFX_BUG_ON_PARANOID(efx->type->rxd_ring_mask & | ||
| 1893 | (efx->type->rxd_ring_mask + 1)); | ||
| 1894 | EFX_BUG_ON_PARANOID(efx->type->evq_size & | ||
| 1895 | (efx->type->evq_size - 1)); | ||
| 1896 | /* As close as we can get to guaranteeing that we don't overflow */ | ||
| 1897 | EFX_BUG_ON_PARANOID(efx->type->evq_size < | ||
| 1898 | (efx->type->txd_ring_mask + 1 + | ||
| 1899 | efx->type->rxd_ring_mask + 1)); | ||
| 1900 | EFX_BUG_ON_PARANOID(efx->type->phys_addr_channels > EFX_MAX_CHANNELS); | ||
| 1901 | |||
| 1902 | /* Higher numbered interrupt modes are less capable! */ | ||
| 1903 | efx->interrupt_mode = max(efx->type->max_interrupt_mode, | ||
| 1904 | interrupt_mode); | ||
| 1905 | |||
| 1906 | efx->workqueue = create_singlethread_workqueue("sfc_work"); | ||
| 1907 | if (!efx->workqueue) { | ||
| 1908 | rc = -ENOMEM; | ||
| 1909 | goto fail1; | ||
| 1910 | } | ||
| 1911 | |||
| 1912 | return 0; | ||
| 1913 | |||
| 1914 | fail1: | ||
| 1915 | return rc; | ||
| 1916 | } | ||
| 1917 | |||
| 1918 | static void efx_fini_struct(struct efx_nic *efx) | ||
| 1919 | { | ||
| 1920 | if (efx->workqueue) { | ||
| 1921 | destroy_workqueue(efx->workqueue); | ||
| 1922 | efx->workqueue = NULL; | ||
| 1923 | } | ||
| 1924 | } | ||
| 1925 | |||
| 1926 | /************************************************************************** | ||
| 1927 | * | ||
| 1928 | * PCI interface | ||
| 1929 | * | ||
| 1930 | **************************************************************************/ | ||
| 1931 | |||
| 1932 | /* Main body of final NIC shutdown code | ||
| 1933 | * This is called only at module unload (or hotplug removal). | ||
| 1934 | */ | ||
| 1935 | static void efx_pci_remove_main(struct efx_nic *efx) | ||
| 1936 | { | ||
| 1937 | EFX_ASSERT_RESET_SERIALISED(efx); | ||
| 1938 | |||
| 1939 | /* Skip everything if we never obtained a valid membase */ | ||
| 1940 | if (!efx->membase) | ||
| 1941 | return; | ||
| 1942 | |||
| 1943 | efx_fini_channels(efx); | ||
| 1944 | efx_fini_port(efx); | ||
| 1945 | |||
| 1946 | /* Shutdown the board, then the NIC and board state */ | ||
| 1947 | falcon_fini_interrupt(efx); | ||
| 1948 | |||
| 1949 | efx_fini_napi(efx); | ||
| 1950 | efx_remove_all(efx); | ||
| 1951 | } | ||
| 1952 | |||
| 1953 | /* Final NIC shutdown | ||
| 1954 | * This is called only at module unload (or hotplug removal). | ||
| 1955 | */ | ||
| 1956 | static void efx_pci_remove(struct pci_dev *pci_dev) | ||
| 1957 | { | ||
| 1958 | struct efx_nic *efx; | ||
| 1959 | |||
| 1960 | efx = pci_get_drvdata(pci_dev); | ||
| 1961 | if (!efx) | ||
| 1962 | return; | ||
| 1963 | |||
| 1964 | /* Mark the NIC as fini, then stop the interface */ | ||
| 1965 | rtnl_lock(); | ||
| 1966 | efx->state = STATE_FINI; | ||
| 1967 | dev_close(efx->net_dev); | ||
| 1968 | |||
| 1969 | /* Allow any queued efx_resets() to complete */ | ||
| 1970 | rtnl_unlock(); | ||
| 1971 | |||
| 1972 | if (efx->membase == NULL) | ||
| 1973 | goto out; | ||
| 1974 | |||
| 1975 | efx_unregister_netdev(efx); | ||
| 1976 | |||
| 1977 | /* Wait for any scheduled resets to complete. No more will be | ||
| 1978 | * scheduled from this point because efx_stop_all() has been | ||
| 1979 | * called, we are no longer registered with driverlink, and | ||
| 1980 | * the net_device's have been removed. */ | ||
| 1981 | flush_workqueue(efx->workqueue); | ||
| 1982 | |||
| 1983 | efx_pci_remove_main(efx); | ||
| 1984 | |||
| 1985 | out: | ||
| 1986 | efx_fini_io(efx); | ||
| 1987 | EFX_LOG(efx, "shutdown successful\n"); | ||
| 1988 | |||
| 1989 | pci_set_drvdata(pci_dev, NULL); | ||
| 1990 | efx_fini_struct(efx); | ||
| 1991 | free_netdev(efx->net_dev); | ||
| 1992 | }; | ||
| 1993 | |||
| 1994 | /* Main body of NIC initialisation | ||
| 1995 | * This is called at module load (or hotplug insertion, theoretically). | ||
| 1996 | */ | ||
| 1997 | static int efx_pci_probe_main(struct efx_nic *efx) | ||
| 1998 | { | ||
| 1999 | int rc; | ||
| 2000 | |||
| 2001 | /* Do start-of-day initialisation */ | ||
| 2002 | rc = efx_probe_all(efx); | ||
| 2003 | if (rc) | ||
| 2004 | goto fail1; | ||
| 2005 | |||
| 2006 | rc = efx_init_napi(efx); | ||
| 2007 | if (rc) | ||
| 2008 | goto fail2; | ||
| 2009 | |||
| 2010 | /* Initialise the board */ | ||
| 2011 | rc = efx->board_info.init(efx); | ||
| 2012 | if (rc) { | ||
| 2013 | EFX_ERR(efx, "failed to initialise board\n"); | ||
| 2014 | goto fail3; | ||
| 2015 | } | ||
| 2016 | |||
| 2017 | rc = falcon_init_nic(efx); | ||
| 2018 | if (rc) { | ||
| 2019 | EFX_ERR(efx, "failed to initialise NIC\n"); | ||
| 2020 | goto fail4; | ||
| 2021 | } | ||
| 2022 | |||
| 2023 | rc = efx_init_port(efx); | ||
| 2024 | if (rc) { | ||
| 2025 | EFX_ERR(efx, "failed to initialise port\n"); | ||
| 2026 | goto fail5; | ||
| 2027 | } | ||
| 2028 | |||
| 2029 | rc = efx_init_channels(efx); | ||
| 2030 | if (rc) | ||
| 2031 | goto fail6; | ||
| 2032 | |||
| 2033 | rc = falcon_init_interrupt(efx); | ||
| 2034 | if (rc) | ||
| 2035 | goto fail7; | ||
| 2036 | |||
| 2037 | return 0; | ||
| 2038 | |||
| 2039 | fail7: | ||
| 2040 | efx_fini_channels(efx); | ||
| 2041 | fail6: | ||
| 2042 | efx_fini_port(efx); | ||
| 2043 | fail5: | ||
| 2044 | fail4: | ||
| 2045 | fail3: | ||
| 2046 | efx_fini_napi(efx); | ||
| 2047 | fail2: | ||
| 2048 | efx_remove_all(efx); | ||
| 2049 | fail1: | ||
| 2050 | return rc; | ||
| 2051 | } | ||
| 2052 | |||
| 2053 | /* NIC initialisation | ||
| 2054 | * | ||
| 2055 | * This is called at module load (or hotplug insertion, | ||
| 2056 | * theoretically). It sets up PCI mappings, tests and resets the NIC, | ||
| 2057 | * sets up and registers the network devices with the kernel and hooks | ||
| 2058 | * the interrupt service routine. It does not prepare the device for | ||
| 2059 | * transmission; this is left to the first time one of the network | ||
| 2060 | * interfaces is brought up (i.e. efx_net_open). | ||
| 2061 | */ | ||
| 2062 | static int __devinit efx_pci_probe(struct pci_dev *pci_dev, | ||
| 2063 | const struct pci_device_id *entry) | ||
| 2064 | { | ||
| 2065 | struct efx_nic_type *type = (struct efx_nic_type *) entry->driver_data; | ||
| 2066 | struct net_device *net_dev; | ||
| 2067 | struct efx_nic *efx; | ||
| 2068 | int i, rc; | ||
| 2069 | |||
| 2070 | /* Allocate and initialise a struct net_device and struct efx_nic */ | ||
| 2071 | net_dev = alloc_etherdev(sizeof(*efx)); | ||
| 2072 | if (!net_dev) | ||
| 2073 | return -ENOMEM; | ||
| 2074 | net_dev->features |= NETIF_F_IP_CSUM | NETIF_F_SG | NETIF_F_HIGHDMA; | ||
| 2075 | if (lro) | ||
| 2076 | net_dev->features |= NETIF_F_LRO; | ||
| 2077 | efx = net_dev->priv; | ||
| 2078 | pci_set_drvdata(pci_dev, efx); | ||
| 2079 | rc = efx_init_struct(efx, type, pci_dev, net_dev); | ||
| 2080 | if (rc) | ||
| 2081 | goto fail1; | ||
| 2082 | |||
| 2083 | EFX_INFO(efx, "Solarflare Communications NIC detected\n"); | ||
| 2084 | |||
| 2085 | /* Set up basic I/O (BAR mappings etc) */ | ||
| 2086 | rc = efx_init_io(efx); | ||
| 2087 | if (rc) | ||
| 2088 | goto fail2; | ||
| 2089 | |||
| 2090 | /* No serialisation is required with the reset path because | ||
| 2091 | * we're in STATE_INIT. */ | ||
| 2092 | for (i = 0; i < 5; i++) { | ||
| 2093 | rc = efx_pci_probe_main(efx); | ||
| 2094 | if (rc == 0) | ||
| 2095 | break; | ||
| 2096 | |||
| 2097 | /* Serialise against efx_reset(). No more resets will be | ||
| 2098 | * scheduled since efx_stop_all() has been called, and we | ||
| 2099 | * have not and never have been registered with either | ||
| 2100 | * the rtnetlink or driverlink layers. */ | ||
| 2101 | cancel_work_sync(&efx->reset_work); | ||
| 2102 | |||
| 2103 | /* Retry if a recoverably reset event has been scheduled */ | ||
| 2104 | if ((efx->reset_pending != RESET_TYPE_INVISIBLE) && | ||
| 2105 | (efx->reset_pending != RESET_TYPE_ALL)) | ||
| 2106 | goto fail3; | ||
| 2107 | |||
| 2108 | efx->reset_pending = RESET_TYPE_NONE; | ||
| 2109 | } | ||
| 2110 | |||
| 2111 | if (rc) { | ||
| 2112 | EFX_ERR(efx, "Could not reset NIC\n"); | ||
| 2113 | goto fail4; | ||
| 2114 | } | ||
| 2115 | |||
| 2116 | /* Switch to the running state before we expose the device to | ||
| 2117 | * the OS. This is to ensure that the initial gathering of | ||
| 2118 | * MAC stats succeeds. */ | ||
| 2119 | rtnl_lock(); | ||
| 2120 | efx->state = STATE_RUNNING; | ||
| 2121 | rtnl_unlock(); | ||
| 2122 | |||
| 2123 | rc = efx_register_netdev(efx); | ||
| 2124 | if (rc) | ||
| 2125 | goto fail5; | ||
| 2126 | |||
| 2127 | EFX_LOG(efx, "initialisation successful\n"); | ||
| 2128 | |||
| 2129 | return 0; | ||
| 2130 | |||
| 2131 | fail5: | ||
| 2132 | efx_pci_remove_main(efx); | ||
| 2133 | fail4: | ||
| 2134 | fail3: | ||
| 2135 | efx_fini_io(efx); | ||
| 2136 | fail2: | ||
| 2137 | efx_fini_struct(efx); | ||
| 2138 | fail1: | ||
| 2139 | EFX_LOG(efx, "initialisation failed. rc=%d\n", rc); | ||
| 2140 | free_netdev(net_dev); | ||
| 2141 | return rc; | ||
| 2142 | } | ||
| 2143 | |||
| 2144 | static struct pci_driver efx_pci_driver = { | ||
| 2145 | .name = EFX_DRIVER_NAME, | ||
| 2146 | .id_table = efx_pci_table, | ||
| 2147 | .probe = efx_pci_probe, | ||
| 2148 | .remove = efx_pci_remove, | ||
| 2149 | }; | ||
| 2150 | |||
| 2151 | /************************************************************************** | ||
| 2152 | * | ||
| 2153 | * Kernel module interface | ||
| 2154 | * | ||
| 2155 | *************************************************************************/ | ||
| 2156 | |||
| 2157 | module_param(interrupt_mode, uint, 0444); | ||
| 2158 | MODULE_PARM_DESC(interrupt_mode, | ||
| 2159 | "Interrupt mode (0=>MSIX 1=>MSI 2=>legacy)"); | ||
| 2160 | |||
| 2161 | static int __init efx_init_module(void) | ||
| 2162 | { | ||
| 2163 | int rc; | ||
| 2164 | |||
| 2165 | printk(KERN_INFO "Solarflare NET driver v" EFX_DRIVER_VERSION "\n"); | ||
| 2166 | |||
| 2167 | rc = register_netdevice_notifier(&efx_netdev_notifier); | ||
| 2168 | if (rc) | ||
| 2169 | goto err_notifier; | ||
| 2170 | |||
| 2171 | refill_workqueue = create_workqueue("sfc_refill"); | ||
| 2172 | if (!refill_workqueue) { | ||
| 2173 | rc = -ENOMEM; | ||
| 2174 | goto err_refill; | ||
| 2175 | } | ||
| 2176 | |||
| 2177 | rc = pci_register_driver(&efx_pci_driver); | ||
| 2178 | if (rc < 0) | ||
| 2179 | goto err_pci; | ||
| 2180 | |||
| 2181 | return 0; | ||
| 2182 | |||
| 2183 | err_pci: | ||
| 2184 | destroy_workqueue(refill_workqueue); | ||
| 2185 | err_refill: | ||
| 2186 | unregister_netdevice_notifier(&efx_netdev_notifier); | ||
| 2187 | err_notifier: | ||
| 2188 | return rc; | ||
| 2189 | } | ||
| 2190 | |||
| 2191 | static void __exit efx_exit_module(void) | ||
| 2192 | { | ||
| 2193 | printk(KERN_INFO "Solarflare NET driver unloading\n"); | ||
| 2194 | |||
| 2195 | pci_unregister_driver(&efx_pci_driver); | ||
| 2196 | destroy_workqueue(refill_workqueue); | ||
| 2197 | unregister_netdevice_notifier(&efx_netdev_notifier); | ||
| 2198 | |||
| 2199 | } | ||
| 2200 | |||
| 2201 | module_init(efx_init_module); | ||
| 2202 | module_exit(efx_exit_module); | ||
| 2203 | |||
| 2204 | MODULE_AUTHOR("Michael Brown <mbrown@fensystems.co.uk> and " | ||
| 2205 | "Solarflare Communications"); | ||
| 2206 | MODULE_DESCRIPTION("Solarflare Communications network driver"); | ||
| 2207 | MODULE_LICENSE("GPL"); | ||
| 2208 | MODULE_DEVICE_TABLE(pci, efx_pci_table); | ||
diff --git a/drivers/net/sfc/efx.h b/drivers/net/sfc/efx.h new file mode 100644 index 000000000000..3b2f69f4a9ab --- /dev/null +++ b/drivers/net/sfc/efx.h | |||
| @@ -0,0 +1,67 @@ | |||
| 1 | /**************************************************************************** | ||
| 2 | * Driver for Solarflare Solarstorm network controllers and boards | ||
| 3 | * Copyright 2005-2006 Fen Systems Ltd. | ||
| 4 | * Copyright 2006-2008 Solarflare Communications Inc. | ||
| 5 | * | ||
| 6 | * This program is free software; you can redistribute it and/or modify it | ||
| 7 | * under the terms of the GNU General Public License version 2 as published | ||
| 8 | * by the Free Software Foundation, incorporated herein by reference. | ||
| 9 | */ | ||
| 10 | |||
| 11 | #ifndef EFX_EFX_H | ||
| 12 | #define EFX_EFX_H | ||
| 13 | |||
| 14 | #include "net_driver.h" | ||
| 15 | |||
| 16 | /* PCI IDs */ | ||
| 17 | #define EFX_VENDID_SFC 0x1924 | ||
| 18 | #define FALCON_A_P_DEVID 0x0703 | ||
| 19 | #define FALCON_A_S_DEVID 0x6703 | ||
| 20 | #define FALCON_B_P_DEVID 0x0710 | ||
| 21 | |||
| 22 | /* TX */ | ||
| 23 | extern int efx_xmit(struct efx_nic *efx, | ||
| 24 | struct efx_tx_queue *tx_queue, struct sk_buff *skb); | ||
| 25 | extern void efx_stop_queue(struct efx_nic *efx); | ||
| 26 | extern void efx_wake_queue(struct efx_nic *efx); | ||
| 27 | |||
| 28 | /* RX */ | ||
| 29 | extern void efx_xmit_done(struct efx_tx_queue *tx_queue, unsigned int index); | ||
| 30 | extern void efx_rx_packet(struct efx_rx_queue *rx_queue, unsigned int index, | ||
| 31 | unsigned int len, int checksummed, int discard); | ||
| 32 | extern void efx_schedule_slow_fill(struct efx_rx_queue *rx_queue, int delay); | ||
| 33 | |||
| 34 | /* Channels */ | ||
| 35 | extern void efx_process_channel_now(struct efx_channel *channel); | ||
| 36 | extern int efx_flush_queues(struct efx_nic *efx); | ||
| 37 | |||
| 38 | /* Ports */ | ||
| 39 | extern void efx_reconfigure_port(struct efx_nic *efx); | ||
| 40 | |||
| 41 | /* Global */ | ||
| 42 | extern void efx_schedule_reset(struct efx_nic *efx, enum reset_type type); | ||
| 43 | extern void efx_suspend(struct efx_nic *efx); | ||
| 44 | extern void efx_resume(struct efx_nic *efx); | ||
| 45 | extern void efx_init_irq_moderation(struct efx_nic *efx, int tx_usecs, | ||
| 46 | int rx_usecs); | ||
| 47 | extern int efx_request_power(struct efx_nic *efx, int mw, const char *name); | ||
| 48 | extern void efx_hex_dump(const u8 *, unsigned int, const char *); | ||
| 49 | |||
| 50 | /* Dummy PHY ops for PHY drivers */ | ||
| 51 | extern int efx_port_dummy_op_int(struct efx_nic *efx); | ||
| 52 | extern void efx_port_dummy_op_void(struct efx_nic *efx); | ||
| 53 | extern void efx_port_dummy_op_blink(struct efx_nic *efx, int blink); | ||
| 54 | |||
| 55 | |||
| 56 | extern unsigned int efx_monitor_interval; | ||
| 57 | |||
| 58 | static inline void efx_schedule_channel(struct efx_channel *channel) | ||
| 59 | { | ||
| 60 | EFX_TRACE(channel->efx, "channel %d scheduling NAPI poll on CPU%d\n", | ||
| 61 | channel->channel, raw_smp_processor_id()); | ||
| 62 | channel->work_pending = 1; | ||
| 63 | |||
| 64 | netif_rx_schedule(channel->napi_dev, &channel->napi_str); | ||
| 65 | } | ||
| 66 | |||
| 67 | #endif /* EFX_EFX_H */ | ||
diff --git a/drivers/net/sfc/enum.h b/drivers/net/sfc/enum.h new file mode 100644 index 000000000000..43663a4619da --- /dev/null +++ b/drivers/net/sfc/enum.h | |||
| @@ -0,0 +1,50 @@ | |||
| 1 | /**************************************************************************** | ||
| 2 | * Driver for Solarflare Solarstorm network controllers and boards | ||
| 3 | * Copyright 2007 Solarflare Communications Inc. | ||
| 4 | * | ||
| 5 | * This program is free software; you can redistribute it and/or modify it | ||
| 6 | * under the terms of the GNU General Public License version 2 as published | ||
| 7 | * by the Free Software Foundation, incorporated herein by reference. | ||
| 8 | */ | ||
| 9 | |||
| 10 | #ifndef EFX_ENUM_H | ||
| 11 | #define EFX_ENUM_H | ||
| 12 | |||
| 13 | /*****************************************************************************/ | ||
| 14 | |||
| 15 | /** | ||
| 16 | * enum reset_type - reset types | ||
| 17 | * | ||
| 18 | * %RESET_TYPE_INVSIBLE, %RESET_TYPE_ALL, %RESET_TYPE_WORLD and | ||
| 19 | * %RESET_TYPE_DISABLE specify the method/scope of the reset. The | ||
| 20 | * other valuesspecify reasons, which efx_schedule_reset() will choose | ||
| 21 | * a method for. | ||
| 22 | * | ||
| 23 | * @RESET_TYPE_INVISIBLE: don't reset the PHYs or interrupts | ||
| 24 | * @RESET_TYPE_ALL: reset everything but PCI core blocks | ||
| 25 | * @RESET_TYPE_WORLD: reset everything, save & restore PCI config | ||
| 26 | * @RESET_TYPE_DISABLE: disable NIC | ||
| 27 | * @RESET_TYPE_MONITOR: reset due to hardware monitor | ||
| 28 | * @RESET_TYPE_INT_ERROR: reset due to internal error | ||
| 29 | * @RESET_TYPE_RX_RECOVERY: reset to recover from RX datapath errors | ||
| 30 | * @RESET_TYPE_RX_DESC_FETCH: pcie error during rx descriptor fetch | ||
| 31 | * @RESET_TYPE_TX_DESC_FETCH: pcie error during tx descriptor fetch | ||
| 32 | * @RESET_TYPE_TX_SKIP: hardware completed empty tx descriptors | ||
| 33 | */ | ||
| 34 | enum reset_type { | ||
| 35 | RESET_TYPE_NONE = -1, | ||
| 36 | RESET_TYPE_INVISIBLE = 0, | ||
| 37 | RESET_TYPE_ALL = 1, | ||
| 38 | RESET_TYPE_WORLD = 2, | ||
| 39 | RESET_TYPE_DISABLE = 3, | ||
| 40 | RESET_TYPE_MAX_METHOD, | ||
| 41 | RESET_TYPE_MONITOR, | ||
| 42 | RESET_TYPE_INT_ERROR, | ||
| 43 | RESET_TYPE_RX_RECOVERY, | ||
| 44 | RESET_TYPE_RX_DESC_FETCH, | ||
| 45 | RESET_TYPE_TX_DESC_FETCH, | ||
| 46 | RESET_TYPE_TX_SKIP, | ||
| 47 | RESET_TYPE_MAX, | ||
| 48 | }; | ||
| 49 | |||
| 50 | #endif /* EFX_ENUM_H */ | ||
diff --git a/drivers/net/sfc/ethtool.c b/drivers/net/sfc/ethtool.c new file mode 100644 index 000000000000..ad541badbd98 --- /dev/null +++ b/drivers/net/sfc/ethtool.c | |||
| @@ -0,0 +1,460 @@ | |||
| 1 | /**************************************************************************** | ||
| 2 | * Driver for Solarflare Solarstorm network controllers and boards | ||
| 3 | * Copyright 2005-2006 Fen Systems Ltd. | ||
| 4 | * Copyright 2006-2008 Solarflare Communications Inc. | ||
| 5 | * | ||
| 6 | * This program is free software; you can redistribute it and/or modify it | ||
| 7 | * under the terms of the GNU General Public License version 2 as published | ||
| 8 | * by the Free Software Foundation, incorporated herein by reference. | ||
| 9 | */ | ||
| 10 | |||
| 11 | #include <linux/netdevice.h> | ||
| 12 | #include <linux/ethtool.h> | ||
| 13 | #include <linux/rtnetlink.h> | ||
| 14 | #include "net_driver.h" | ||
| 15 | #include "efx.h" | ||
| 16 | #include "ethtool.h" | ||
| 17 | #include "falcon.h" | ||
| 18 | #include "gmii.h" | ||
| 19 | #include "mac.h" | ||
| 20 | |||
| 21 | static int efx_ethtool_set_tx_csum(struct net_device *net_dev, u32 enable); | ||
| 22 | |||
| 23 | struct ethtool_string { | ||
| 24 | char name[ETH_GSTRING_LEN]; | ||
| 25 | }; | ||
| 26 | |||
| 27 | struct efx_ethtool_stat { | ||
| 28 | const char *name; | ||
| 29 | enum { | ||
| 30 | EFX_ETHTOOL_STAT_SOURCE_mac_stats, | ||
| 31 | EFX_ETHTOOL_STAT_SOURCE_nic, | ||
| 32 | EFX_ETHTOOL_STAT_SOURCE_channel | ||
| 33 | } source; | ||
| 34 | unsigned offset; | ||
| 35 | u64(*get_stat) (void *field); /* Reader function */ | ||
| 36 | }; | ||
| 37 | |||
| 38 | /* Initialiser for a struct #efx_ethtool_stat with type-checking */ | ||
| 39 | #define EFX_ETHTOOL_STAT(stat_name, source_name, field, field_type, \ | ||
| 40 | get_stat_function) { \ | ||
| 41 | .name = #stat_name, \ | ||
| 42 | .source = EFX_ETHTOOL_STAT_SOURCE_##source_name, \ | ||
| 43 | .offset = ((((field_type *) 0) == \ | ||
| 44 | &((struct efx_##source_name *)0)->field) ? \ | ||
| 45 | offsetof(struct efx_##source_name, field) : \ | ||
| 46 | offsetof(struct efx_##source_name, field)), \ | ||
| 47 | .get_stat = get_stat_function, \ | ||
| 48 | } | ||
| 49 | |||
| 50 | static u64 efx_get_uint_stat(void *field) | ||
| 51 | { | ||
| 52 | return *(unsigned int *)field; | ||
| 53 | } | ||
| 54 | |||
| 55 | static u64 efx_get_ulong_stat(void *field) | ||
| 56 | { | ||
| 57 | return *(unsigned long *)field; | ||
| 58 | } | ||
| 59 | |||
| 60 | static u64 efx_get_u64_stat(void *field) | ||
| 61 | { | ||
| 62 | return *(u64 *) field; | ||
| 63 | } | ||
| 64 | |||
| 65 | static u64 efx_get_atomic_stat(void *field) | ||
| 66 | { | ||
| 67 | return atomic_read((atomic_t *) field); | ||
| 68 | } | ||
| 69 | |||
| 70 | #define EFX_ETHTOOL_ULONG_MAC_STAT(field) \ | ||
| 71 | EFX_ETHTOOL_STAT(field, mac_stats, field, \ | ||
| 72 | unsigned long, efx_get_ulong_stat) | ||
| 73 | |||
| 74 | #define EFX_ETHTOOL_U64_MAC_STAT(field) \ | ||
| 75 | EFX_ETHTOOL_STAT(field, mac_stats, field, \ | ||
| 76 | u64, efx_get_u64_stat) | ||
| 77 | |||
| 78 | #define EFX_ETHTOOL_UINT_NIC_STAT(name) \ | ||
| 79 | EFX_ETHTOOL_STAT(name, nic, n_##name, \ | ||
| 80 | unsigned int, efx_get_uint_stat) | ||
| 81 | |||
| 82 | #define EFX_ETHTOOL_ATOMIC_NIC_ERROR_STAT(field) \ | ||
| 83 | EFX_ETHTOOL_STAT(field, nic, field, \ | ||
| 84 | atomic_t, efx_get_atomic_stat) | ||
| 85 | |||
| 86 | #define EFX_ETHTOOL_UINT_CHANNEL_STAT(field) \ | ||
| 87 | EFX_ETHTOOL_STAT(field, channel, n_##field, \ | ||
| 88 | unsigned int, efx_get_uint_stat) | ||
| 89 | |||
| 90 | static struct efx_ethtool_stat efx_ethtool_stats[] = { | ||
| 91 | EFX_ETHTOOL_U64_MAC_STAT(tx_bytes), | ||
| 92 | EFX_ETHTOOL_U64_MAC_STAT(tx_good_bytes), | ||
| 93 | EFX_ETHTOOL_U64_MAC_STAT(tx_bad_bytes), | ||
| 94 | EFX_ETHTOOL_ULONG_MAC_STAT(tx_packets), | ||
| 95 | EFX_ETHTOOL_ULONG_MAC_STAT(tx_bad), | ||
| 96 | EFX_ETHTOOL_ULONG_MAC_STAT(tx_pause), | ||
| 97 | EFX_ETHTOOL_ULONG_MAC_STAT(tx_control), | ||
| 98 | EFX_ETHTOOL_ULONG_MAC_STAT(tx_unicast), | ||
| 99 | EFX_ETHTOOL_ULONG_MAC_STAT(tx_multicast), | ||
| 100 | EFX_ETHTOOL_ULONG_MAC_STAT(tx_broadcast), | ||
| 101 | EFX_ETHTOOL_ULONG_MAC_STAT(tx_lt64), | ||
| 102 | EFX_ETHTOOL_ULONG_MAC_STAT(tx_64), | ||
| 103 | EFX_ETHTOOL_ULONG_MAC_STAT(tx_65_to_127), | ||
| 104 | EFX_ETHTOOL_ULONG_MAC_STAT(tx_128_to_255), | ||
| 105 | EFX_ETHTOOL_ULONG_MAC_STAT(tx_256_to_511), | ||
| 106 | EFX_ETHTOOL_ULONG_MAC_STAT(tx_512_to_1023), | ||
| 107 | EFX_ETHTOOL_ULONG_MAC_STAT(tx_1024_to_15xx), | ||
| 108 | EFX_ETHTOOL_ULONG_MAC_STAT(tx_15xx_to_jumbo), | ||
| 109 | EFX_ETHTOOL_ULONG_MAC_STAT(tx_gtjumbo), | ||
| 110 | EFX_ETHTOOL_ULONG_MAC_STAT(tx_collision), | ||
| 111 | EFX_ETHTOOL_ULONG_MAC_STAT(tx_single_collision), | ||
| 112 | EFX_ETHTOOL_ULONG_MAC_STAT(tx_multiple_collision), | ||
| 113 | EFX_ETHTOOL_ULONG_MAC_STAT(tx_excessive_collision), | ||
| 114 | EFX_ETHTOOL_ULONG_MAC_STAT(tx_deferred), | ||
| 115 | EFX_ETHTOOL_ULONG_MAC_STAT(tx_late_collision), | ||
| 116 | EFX_ETHTOOL_ULONG_MAC_STAT(tx_excessive_deferred), | ||
| 117 | EFX_ETHTOOL_ULONG_MAC_STAT(tx_non_tcpudp), | ||
| 118 | EFX_ETHTOOL_ULONG_MAC_STAT(tx_mac_src_error), | ||
| 119 | EFX_ETHTOOL_ULONG_MAC_STAT(tx_ip_src_error), | ||
| 120 | EFX_ETHTOOL_U64_MAC_STAT(rx_bytes), | ||
| 121 | EFX_ETHTOOL_U64_MAC_STAT(rx_good_bytes), | ||
| 122 | EFX_ETHTOOL_U64_MAC_STAT(rx_bad_bytes), | ||
| 123 | EFX_ETHTOOL_ULONG_MAC_STAT(rx_packets), | ||
| 124 | EFX_ETHTOOL_ULONG_MAC_STAT(rx_good), | ||
| 125 | EFX_ETHTOOL_ULONG_MAC_STAT(rx_bad), | ||
| 126 | EFX_ETHTOOL_ULONG_MAC_STAT(rx_pause), | ||
| 127 | EFX_ETHTOOL_ULONG_MAC_STAT(rx_control), | ||
| 128 | EFX_ETHTOOL_ULONG_MAC_STAT(rx_unicast), | ||
| 129 | EFX_ETHTOOL_ULONG_MAC_STAT(rx_multicast), | ||
| 130 | EFX_ETHTOOL_ULONG_MAC_STAT(rx_broadcast), | ||
| 131 | EFX_ETHTOOL_ULONG_MAC_STAT(rx_lt64), | ||
| 132 | EFX_ETHTOOL_ULONG_MAC_STAT(rx_64), | ||
| 133 | EFX_ETHTOOL_ULONG_MAC_STAT(rx_65_to_127), | ||
| 134 | EFX_ETHTOOL_ULONG_MAC_STAT(rx_128_to_255), | ||
| 135 | EFX_ETHTOOL_ULONG_MAC_STAT(rx_256_to_511), | ||
| 136 | EFX_ETHTOOL_ULONG_MAC_STAT(rx_512_to_1023), | ||
| 137 | EFX_ETHTOOL_ULONG_MAC_STAT(rx_1024_to_15xx), | ||
| 138 | EFX_ETHTOOL_ULONG_MAC_STAT(rx_15xx_to_jumbo), | ||
| 139 | EFX_ETHTOOL_ULONG_MAC_STAT(rx_gtjumbo), | ||
| 140 | EFX_ETHTOOL_ULONG_MAC_STAT(rx_bad_lt64), | ||
| 141 | EFX_ETHTOOL_ULONG_MAC_STAT(rx_bad_64_to_15xx), | ||
| 142 | EFX_ETHTOOL_ULONG_MAC_STAT(rx_bad_15xx_to_jumbo), | ||
| 143 | EFX_ETHTOOL_ULONG_MAC_STAT(rx_bad_gtjumbo), | ||
| 144 | EFX_ETHTOOL_ULONG_MAC_STAT(rx_overflow), | ||
| 145 | EFX_ETHTOOL_ULONG_MAC_STAT(rx_missed), | ||
| 146 | EFX_ETHTOOL_ULONG_MAC_STAT(rx_false_carrier), | ||
| 147 | EFX_ETHTOOL_ULONG_MAC_STAT(rx_symbol_error), | ||
| 148 | EFX_ETHTOOL_ULONG_MAC_STAT(rx_align_error), | ||
| 149 | EFX_ETHTOOL_ULONG_MAC_STAT(rx_length_error), | ||
| 150 | EFX_ETHTOOL_ULONG_MAC_STAT(rx_internal_error), | ||
| 151 | EFX_ETHTOOL_UINT_NIC_STAT(rx_nodesc_drop_cnt), | ||
| 152 | EFX_ETHTOOL_ATOMIC_NIC_ERROR_STAT(rx_reset), | ||
| 153 | EFX_ETHTOOL_UINT_CHANNEL_STAT(rx_tobe_disc), | ||
| 154 | EFX_ETHTOOL_UINT_CHANNEL_STAT(rx_ip_hdr_chksum_err), | ||
| 155 | EFX_ETHTOOL_UINT_CHANNEL_STAT(rx_tcp_udp_chksum_err), | ||
| 156 | EFX_ETHTOOL_UINT_CHANNEL_STAT(rx_frm_trunc), | ||
| 157 | }; | ||
| 158 | |||
| 159 | /* Number of ethtool statistics */ | ||
| 160 | #define EFX_ETHTOOL_NUM_STATS ARRAY_SIZE(efx_ethtool_stats) | ||
| 161 | |||
| 162 | /************************************************************************** | ||
| 163 | * | ||
| 164 | * Ethtool operations | ||
| 165 | * | ||
| 166 | ************************************************************************** | ||
| 167 | */ | ||
| 168 | |||
| 169 | /* Identify device by flashing LEDs */ | ||
| 170 | static int efx_ethtool_phys_id(struct net_device *net_dev, u32 seconds) | ||
| 171 | { | ||
| 172 | struct efx_nic *efx = net_dev->priv; | ||
| 173 | |||
| 174 | efx->board_info.blink(efx, 1); | ||
| 175 | schedule_timeout_interruptible(seconds * HZ); | ||
| 176 | efx->board_info.blink(efx, 0); | ||
| 177 | return 0; | ||
| 178 | } | ||
| 179 | |||
| 180 | /* This must be called with rtnl_lock held. */ | ||
| 181 | int efx_ethtool_get_settings(struct net_device *net_dev, | ||
| 182 | struct ethtool_cmd *ecmd) | ||
| 183 | { | ||
| 184 | struct efx_nic *efx = net_dev->priv; | ||
| 185 | int rc; | ||
| 186 | |||
| 187 | mutex_lock(&efx->mac_lock); | ||
| 188 | rc = falcon_xmac_get_settings(efx, ecmd); | ||
| 189 | mutex_unlock(&efx->mac_lock); | ||
| 190 | |||
| 191 | return rc; | ||
| 192 | } | ||
| 193 | |||
| 194 | /* This must be called with rtnl_lock held. */ | ||
| 195 | int efx_ethtool_set_settings(struct net_device *net_dev, | ||
| 196 | struct ethtool_cmd *ecmd) | ||
| 197 | { | ||
| 198 | struct efx_nic *efx = net_dev->priv; | ||
| 199 | int rc; | ||
| 200 | |||
| 201 | mutex_lock(&efx->mac_lock); | ||
| 202 | rc = falcon_xmac_set_settings(efx, ecmd); | ||
| 203 | mutex_unlock(&efx->mac_lock); | ||
| 204 | if (!rc) | ||
| 205 | efx_reconfigure_port(efx); | ||
| 206 | |||
| 207 | return rc; | ||
| 208 | } | ||
| 209 | |||
| 210 | static void efx_ethtool_get_drvinfo(struct net_device *net_dev, | ||
| 211 | struct ethtool_drvinfo *info) | ||
| 212 | { | ||
| 213 | struct efx_nic *efx = net_dev->priv; | ||
| 214 | |||
| 215 | strlcpy(info->driver, EFX_DRIVER_NAME, sizeof(info->driver)); | ||
| 216 | strlcpy(info->version, EFX_DRIVER_VERSION, sizeof(info->version)); | ||
| 217 | strlcpy(info->bus_info, pci_name(efx->pci_dev), sizeof(info->bus_info)); | ||
| 218 | } | ||
| 219 | |||
| 220 | static int efx_ethtool_get_stats_count(struct net_device *net_dev) | ||
| 221 | { | ||
| 222 | return EFX_ETHTOOL_NUM_STATS; | ||
| 223 | } | ||
| 224 | |||
| 225 | static void efx_ethtool_get_strings(struct net_device *net_dev, | ||
| 226 | u32 string_set, u8 *strings) | ||
| 227 | { | ||
| 228 | struct ethtool_string *ethtool_strings = | ||
| 229 | (struct ethtool_string *)strings; | ||
| 230 | int i; | ||
| 231 | |||
| 232 | if (string_set == ETH_SS_STATS) | ||
| 233 | for (i = 0; i < EFX_ETHTOOL_NUM_STATS; i++) | ||
| 234 | strncpy(ethtool_strings[i].name, | ||
| 235 | efx_ethtool_stats[i].name, | ||
| 236 | sizeof(ethtool_strings[i].name)); | ||
| 237 | } | ||
| 238 | |||
| 239 | static void efx_ethtool_get_stats(struct net_device *net_dev, | ||
| 240 | struct ethtool_stats *stats, | ||
| 241 | u64 *data) | ||
| 242 | { | ||
| 243 | struct efx_nic *efx = net_dev->priv; | ||
| 244 | struct efx_mac_stats *mac_stats = &efx->mac_stats; | ||
| 245 | struct efx_ethtool_stat *stat; | ||
| 246 | struct efx_channel *channel; | ||
| 247 | int i; | ||
| 248 | |||
| 249 | EFX_BUG_ON_PARANOID(stats->n_stats != EFX_ETHTOOL_NUM_STATS); | ||
| 250 | |||
| 251 | /* Update MAC and NIC statistics */ | ||
| 252 | net_dev->get_stats(net_dev); | ||
| 253 | |||
| 254 | /* Fill detailed statistics buffer */ | ||
| 255 | for (i = 0; i < EFX_ETHTOOL_NUM_STATS; i++) { | ||
| 256 | stat = &efx_ethtool_stats[i]; | ||
| 257 | switch (stat->source) { | ||
| 258 | case EFX_ETHTOOL_STAT_SOURCE_mac_stats: | ||
| 259 | data[i] = stat->get_stat((void *)mac_stats + | ||
| 260 | stat->offset); | ||
| 261 | break; | ||
| 262 | case EFX_ETHTOOL_STAT_SOURCE_nic: | ||
| 263 | data[i] = stat->get_stat((void *)efx + stat->offset); | ||
| 264 | break; | ||
| 265 | case EFX_ETHTOOL_STAT_SOURCE_channel: | ||
| 266 | data[i] = 0; | ||
| 267 | efx_for_each_channel(channel, efx) | ||
| 268 | data[i] += stat->get_stat((void *)channel + | ||
| 269 | stat->offset); | ||
| 270 | break; | ||
| 271 | } | ||
| 272 | } | ||
| 273 | } | ||
| 274 | |||
| 275 | static int efx_ethtool_set_tx_csum(struct net_device *net_dev, u32 enable) | ||
| 276 | { | ||
| 277 | struct efx_nic *efx = net_dev->priv; | ||
| 278 | int rc; | ||
| 279 | |||
| 280 | rc = ethtool_op_set_tx_csum(net_dev, enable); | ||
| 281 | if (rc) | ||
| 282 | return rc; | ||
| 283 | |||
| 284 | efx_flush_queues(efx); | ||
| 285 | |||
| 286 | return 0; | ||
| 287 | } | ||
| 288 | |||
| 289 | static int efx_ethtool_set_rx_csum(struct net_device *net_dev, u32 enable) | ||
| 290 | { | ||
| 291 | struct efx_nic *efx = net_dev->priv; | ||
| 292 | |||
| 293 | /* No way to stop the hardware doing the checks; we just | ||
| 294 | * ignore the result. | ||
| 295 | */ | ||
| 296 | efx->rx_checksum_enabled = (enable ? 1 : 0); | ||
| 297 | |||
| 298 | return 0; | ||
| 299 | } | ||
| 300 | |||
| 301 | static u32 efx_ethtool_get_rx_csum(struct net_device *net_dev) | ||
| 302 | { | ||
| 303 | struct efx_nic *efx = net_dev->priv; | ||
| 304 | |||
| 305 | return efx->rx_checksum_enabled; | ||
| 306 | } | ||
| 307 | |||
| 308 | /* Restart autonegotiation */ | ||
| 309 | static int efx_ethtool_nway_reset(struct net_device *net_dev) | ||
| 310 | { | ||
| 311 | struct efx_nic *efx = net_dev->priv; | ||
| 312 | |||
| 313 | return mii_nway_restart(&efx->mii); | ||
| 314 | } | ||
| 315 | |||
| 316 | static u32 efx_ethtool_get_link(struct net_device *net_dev) | ||
| 317 | { | ||
| 318 | struct efx_nic *efx = net_dev->priv; | ||
| 319 | |||
| 320 | return efx->link_up; | ||
| 321 | } | ||
| 322 | |||
| 323 | static int efx_ethtool_get_coalesce(struct net_device *net_dev, | ||
| 324 | struct ethtool_coalesce *coalesce) | ||
| 325 | { | ||
| 326 | struct efx_nic *efx = net_dev->priv; | ||
| 327 | struct efx_tx_queue *tx_queue; | ||
| 328 | struct efx_rx_queue *rx_queue; | ||
| 329 | struct efx_channel *channel; | ||
| 330 | |||
| 331 | memset(coalesce, 0, sizeof(*coalesce)); | ||
| 332 | |||
| 333 | /* Find lowest IRQ moderation across all used TX queues */ | ||
| 334 | coalesce->tx_coalesce_usecs_irq = ~((u32) 0); | ||
| 335 | efx_for_each_tx_queue(tx_queue, efx) { | ||
| 336 | channel = tx_queue->channel; | ||
| 337 | if (channel->irq_moderation < coalesce->tx_coalesce_usecs_irq) { | ||
| 338 | if (channel->used_flags != EFX_USED_BY_RX_TX) | ||
| 339 | coalesce->tx_coalesce_usecs_irq = | ||
| 340 | channel->irq_moderation; | ||
| 341 | else | ||
| 342 | coalesce->tx_coalesce_usecs_irq = 0; | ||
| 343 | } | ||
| 344 | } | ||
| 345 | |||
| 346 | /* Find lowest IRQ moderation across all used RX queues */ | ||
| 347 | coalesce->rx_coalesce_usecs_irq = ~((u32) 0); | ||
| 348 | efx_for_each_rx_queue(rx_queue, efx) { | ||
| 349 | channel = rx_queue->channel; | ||
| 350 | if (channel->irq_moderation < coalesce->rx_coalesce_usecs_irq) | ||
| 351 | coalesce->rx_coalesce_usecs_irq = | ||
| 352 | channel->irq_moderation; | ||
| 353 | } | ||
| 354 | |||
| 355 | return 0; | ||
| 356 | } | ||
| 357 | |||
| 358 | /* Set coalescing parameters | ||
| 359 | * The difficulties occur for shared channels | ||
| 360 | */ | ||
| 361 | static int efx_ethtool_set_coalesce(struct net_device *net_dev, | ||
| 362 | struct ethtool_coalesce *coalesce) | ||
| 363 | { | ||
| 364 | struct efx_nic *efx = net_dev->priv; | ||
| 365 | struct efx_channel *channel; | ||
| 366 | struct efx_tx_queue *tx_queue; | ||
| 367 | unsigned tx_usecs, rx_usecs; | ||
| 368 | |||
| 369 | if (coalesce->use_adaptive_rx_coalesce || | ||
| 370 | coalesce->use_adaptive_tx_coalesce) | ||
| 371 | return -EOPNOTSUPP; | ||
| 372 | |||
| 373 | if (coalesce->rx_coalesce_usecs || coalesce->tx_coalesce_usecs) { | ||
| 374 | EFX_ERR(efx, "invalid coalescing setting. " | ||
| 375 | "Only rx/tx_coalesce_usecs_irq are supported\n"); | ||
| 376 | return -EOPNOTSUPP; | ||
| 377 | } | ||
| 378 | |||
| 379 | rx_usecs = coalesce->rx_coalesce_usecs_irq; | ||
| 380 | tx_usecs = coalesce->tx_coalesce_usecs_irq; | ||
| 381 | |||
| 382 | /* If the channel is shared only allow RX parameters to be set */ | ||
| 383 | efx_for_each_tx_queue(tx_queue, efx) { | ||
| 384 | if ((tx_queue->channel->used_flags == EFX_USED_BY_RX_TX) && | ||
| 385 | tx_usecs) { | ||
| 386 | EFX_ERR(efx, "Channel is shared. " | ||
| 387 | "Only RX coalescing may be set\n"); | ||
| 388 | return -EOPNOTSUPP; | ||
| 389 | } | ||
| 390 | } | ||
| 391 | |||
| 392 | efx_init_irq_moderation(efx, tx_usecs, rx_usecs); | ||
| 393 | |||
| 394 | /* Reset channel to pick up new moderation value. Note that | ||
| 395 | * this may change the value of the irq_moderation field | ||
| 396 | * (e.g. to allow for hardware timer granularity). | ||
| 397 | */ | ||
| 398 | efx_for_each_channel(channel, efx) | ||
| 399 | falcon_set_int_moderation(channel); | ||
| 400 | |||
| 401 | return 0; | ||
| 402 | } | ||
| 403 | |||
| 404 | static int efx_ethtool_set_pauseparam(struct net_device *net_dev, | ||
| 405 | struct ethtool_pauseparam *pause) | ||
| 406 | { | ||
| 407 | struct efx_nic *efx = net_dev->priv; | ||
| 408 | enum efx_fc_type flow_control = efx->flow_control; | ||
| 409 | int rc; | ||
| 410 | |||
| 411 | flow_control &= ~(EFX_FC_RX | EFX_FC_TX | EFX_FC_AUTO); | ||
| 412 | flow_control |= pause->rx_pause ? EFX_FC_RX : 0; | ||
| 413 | flow_control |= pause->tx_pause ? EFX_FC_TX : 0; | ||
| 414 | flow_control |= pause->autoneg ? EFX_FC_AUTO : 0; | ||
| 415 | |||
| 416 | /* Try to push the pause parameters */ | ||
| 417 | mutex_lock(&efx->mac_lock); | ||
| 418 | rc = falcon_xmac_set_pause(efx, flow_control); | ||
| 419 | mutex_unlock(&efx->mac_lock); | ||
| 420 | |||
| 421 | if (!rc) | ||
| 422 | efx_reconfigure_port(efx); | ||
| 423 | |||
| 424 | return rc; | ||
| 425 | } | ||
| 426 | |||
| 427 | static void efx_ethtool_get_pauseparam(struct net_device *net_dev, | ||
| 428 | struct ethtool_pauseparam *pause) | ||
| 429 | { | ||
| 430 | struct efx_nic *efx = net_dev->priv; | ||
| 431 | |||
| 432 | pause->rx_pause = (efx->flow_control & EFX_FC_RX) ? 1 : 0; | ||
| 433 | pause->tx_pause = (efx->flow_control & EFX_FC_TX) ? 1 : 0; | ||
| 434 | pause->autoneg = (efx->flow_control & EFX_FC_AUTO) ? 1 : 0; | ||
| 435 | } | ||
| 436 | |||
| 437 | |||
| 438 | struct ethtool_ops efx_ethtool_ops = { | ||
| 439 | .get_settings = efx_ethtool_get_settings, | ||
| 440 | .set_settings = efx_ethtool_set_settings, | ||
| 441 | .get_drvinfo = efx_ethtool_get_drvinfo, | ||
| 442 | .nway_reset = efx_ethtool_nway_reset, | ||
| 443 | .get_link = efx_ethtool_get_link, | ||
| 444 | .get_coalesce = efx_ethtool_get_coalesce, | ||
| 445 | .set_coalesce = efx_ethtool_set_coalesce, | ||
| 446 | .get_pauseparam = efx_ethtool_get_pauseparam, | ||
| 447 | .set_pauseparam = efx_ethtool_set_pauseparam, | ||
| 448 | .get_rx_csum = efx_ethtool_get_rx_csum, | ||
| 449 | .set_rx_csum = efx_ethtool_set_rx_csum, | ||
| 450 | .get_tx_csum = ethtool_op_get_tx_csum, | ||
| 451 | .set_tx_csum = efx_ethtool_set_tx_csum, | ||
| 452 | .get_sg = ethtool_op_get_sg, | ||
| 453 | .set_sg = ethtool_op_set_sg, | ||
| 454 | .get_flags = ethtool_op_get_flags, | ||
| 455 | .set_flags = ethtool_op_set_flags, | ||
| 456 | .get_strings = efx_ethtool_get_strings, | ||
| 457 | .phys_id = efx_ethtool_phys_id, | ||
| 458 | .get_stats_count = efx_ethtool_get_stats_count, | ||
| 459 | .get_ethtool_stats = efx_ethtool_get_stats, | ||
| 460 | }; | ||
diff --git a/drivers/net/sfc/ethtool.h b/drivers/net/sfc/ethtool.h new file mode 100644 index 000000000000..3628e43df14d --- /dev/null +++ b/drivers/net/sfc/ethtool.h | |||
| @@ -0,0 +1,27 @@ | |||
| 1 | /**************************************************************************** | ||
| 2 | * Driver for Solarflare Solarstorm network controllers and boards | ||
| 3 | * Copyright 2005 Fen Systems Ltd. | ||
| 4 | * Copyright 2006 Solarflare Communications Inc. | ||
| 5 | * | ||
| 6 | * This program is free software; you can redistribute it and/or modify it | ||
| 7 | * under the terms of the GNU General Public License version 2 as published | ||
| 8 | * by the Free Software Foundation, incorporated herein by reference. | ||
| 9 | */ | ||
| 10 | |||
| 11 | #ifndef EFX_ETHTOOL_H | ||
| 12 | #define EFX_ETHTOOL_H | ||
| 13 | |||
| 14 | #include "net_driver.h" | ||
| 15 | |||
| 16 | /* | ||
| 17 | * Ethtool support | ||
| 18 | */ | ||
| 19 | |||
| 20 | extern int efx_ethtool_get_settings(struct net_device *net_dev, | ||
| 21 | struct ethtool_cmd *ecmd); | ||
| 22 | extern int efx_ethtool_set_settings(struct net_device *net_dev, | ||
| 23 | struct ethtool_cmd *ecmd); | ||
| 24 | |||
| 25 | extern struct ethtool_ops efx_ethtool_ops; | ||
| 26 | |||
| 27 | #endif /* EFX_ETHTOOL_H */ | ||
diff --git a/drivers/net/sfc/falcon.c b/drivers/net/sfc/falcon.c new file mode 100644 index 000000000000..46db549ce580 --- /dev/null +++ b/drivers/net/sfc/falcon.c | |||
| @@ -0,0 +1,2722 @@ | |||
| 1 | /**************************************************************************** | ||
| 2 | * Driver for Solarflare Solarstorm network controllers and boards | ||
| 3 | * Copyright 2005-2006 Fen Systems Ltd. | ||
| 4 | * Copyright 2006-2008 Solarflare Communications Inc. | ||
| 5 | * | ||
| 6 | * This program is free software; you can redistribute it and/or modify it | ||
| 7 | * under the terms of the GNU General Public License version 2 as published | ||
| 8 | * by the Free Software Foundation, incorporated herein by reference. | ||
| 9 | */ | ||
| 10 | |||
| 11 | #include <linux/bitops.h> | ||
| 12 | #include <linux/delay.h> | ||
| 13 | #include <linux/pci.h> | ||
| 14 | #include <linux/module.h> | ||
| 15 | #include <linux/seq_file.h> | ||
| 16 | #include "net_driver.h" | ||
| 17 | #include "bitfield.h" | ||
| 18 | #include "efx.h" | ||
| 19 | #include "mac.h" | ||
| 20 | #include "gmii.h" | ||
| 21 | #include "spi.h" | ||
| 22 | #include "falcon.h" | ||
| 23 | #include "falcon_hwdefs.h" | ||
| 24 | #include "falcon_io.h" | ||
| 25 | #include "mdio_10g.h" | ||
| 26 | #include "phy.h" | ||
| 27 | #include "boards.h" | ||
| 28 | #include "workarounds.h" | ||
| 29 | |||
| 30 | /* Falcon hardware control. | ||
| 31 | * Falcon is the internal codename for the SFC4000 controller that is | ||
| 32 | * present in SFE400X evaluation boards | ||
| 33 | */ | ||
| 34 | |||
| 35 | /** | ||
| 36 | * struct falcon_nic_data - Falcon NIC state | ||
| 37 | * @next_buffer_table: First available buffer table id | ||
| 38 | * @pci_dev2: The secondary PCI device if present | ||
| 39 | */ | ||
| 40 | struct falcon_nic_data { | ||
| 41 | unsigned next_buffer_table; | ||
| 42 | struct pci_dev *pci_dev2; | ||
| 43 | }; | ||
| 44 | |||
| 45 | /************************************************************************** | ||
| 46 | * | ||
| 47 | * Configurable values | ||
| 48 | * | ||
| 49 | ************************************************************************** | ||
| 50 | */ | ||
| 51 | |||
| 52 | static int disable_dma_stats; | ||
| 53 | |||
| 54 | /* This is set to 16 for a good reason. In summary, if larger than | ||
| 55 | * 16, the descriptor cache holds more than a default socket | ||
| 56 | * buffer's worth of packets (for UDP we can only have at most one | ||
| 57 | * socket buffer's worth outstanding). This combined with the fact | ||
| 58 | * that we only get 1 TX event per descriptor cache means the NIC | ||
| 59 | * goes idle. | ||
| 60 | */ | ||
| 61 | #define TX_DC_ENTRIES 16 | ||
| 62 | #define TX_DC_ENTRIES_ORDER 0 | ||
| 63 | #define TX_DC_BASE 0x130000 | ||
| 64 | |||
| 65 | #define RX_DC_ENTRIES 64 | ||
| 66 | #define RX_DC_ENTRIES_ORDER 2 | ||
| 67 | #define RX_DC_BASE 0x100000 | ||
| 68 | |||
| 69 | /* RX FIFO XOFF watermark | ||
| 70 | * | ||
| 71 | * When the amount of the RX FIFO increases used increases past this | ||
| 72 | * watermark send XOFF. Only used if RX flow control is enabled (ethtool -A) | ||
| 73 | * This also has an effect on RX/TX arbitration | ||
| 74 | */ | ||
| 75 | static int rx_xoff_thresh_bytes = -1; | ||
| 76 | module_param(rx_xoff_thresh_bytes, int, 0644); | ||
| 77 | MODULE_PARM_DESC(rx_xoff_thresh_bytes, "RX fifo XOFF threshold"); | ||
| 78 | |||
| 79 | /* RX FIFO XON watermark | ||
| 80 | * | ||
| 81 | * When the amount of the RX FIFO used decreases below this | ||
| 82 | * watermark send XON. Only used if TX flow control is enabled (ethtool -A) | ||
| 83 | * This also has an effect on RX/TX arbitration | ||
| 84 | */ | ||
| 85 | static int rx_xon_thresh_bytes = -1; | ||
| 86 | module_param(rx_xon_thresh_bytes, int, 0644); | ||
| 87 | MODULE_PARM_DESC(rx_xon_thresh_bytes, "RX fifo XON threshold"); | ||
| 88 | |||
| 89 | /* TX descriptor ring size - min 512 max 4k */ | ||
| 90 | #define FALCON_TXD_RING_ORDER TX_DESCQ_SIZE_1K | ||
| 91 | #define FALCON_TXD_RING_SIZE 1024 | ||
| 92 | #define FALCON_TXD_RING_MASK (FALCON_TXD_RING_SIZE - 1) | ||
| 93 | |||
| 94 | /* RX descriptor ring size - min 512 max 4k */ | ||
| 95 | #define FALCON_RXD_RING_ORDER RX_DESCQ_SIZE_1K | ||
| 96 | #define FALCON_RXD_RING_SIZE 1024 | ||
| 97 | #define FALCON_RXD_RING_MASK (FALCON_RXD_RING_SIZE - 1) | ||
| 98 | |||
| 99 | /* Event queue size - max 32k */ | ||
| 100 | #define FALCON_EVQ_ORDER EVQ_SIZE_4K | ||
| 101 | #define FALCON_EVQ_SIZE 4096 | ||
| 102 | #define FALCON_EVQ_MASK (FALCON_EVQ_SIZE - 1) | ||
| 103 | |||
| 104 | /* Max number of internal errors. After this resets will not be performed */ | ||
| 105 | #define FALCON_MAX_INT_ERRORS 4 | ||
| 106 | |||
| 107 | /* Maximum period that we wait for flush events. If the flush event | ||
| 108 | * doesn't arrive in this period of time then we check if the queue | ||
| 109 | * was disabled anyway. */ | ||
| 110 | #define FALCON_FLUSH_TIMEOUT 10 /* 10ms */ | ||
| 111 | |||
| 112 | /************************************************************************** | ||
| 113 | * | ||
| 114 | * Falcon constants | ||
| 115 | * | ||
| 116 | ************************************************************************** | ||
| 117 | */ | ||
| 118 | |||
| 119 | /* DMA address mask (up to 46-bit, avoiding compiler warnings) | ||
| 120 | * | ||
| 121 | * Note that it is possible to have a platform with 64-bit longs and | ||
| 122 | * 32-bit DMA addresses, or vice versa. EFX_DMA_MASK takes care of the | ||
| 123 | * platform DMA mask. | ||
| 124 | */ | ||
| 125 | #if BITS_PER_LONG == 64 | ||
| 126 | #define FALCON_DMA_MASK EFX_DMA_MASK(0x00003fffffffffffUL) | ||
| 127 | #else | ||
| 128 | #define FALCON_DMA_MASK EFX_DMA_MASK(0x00003fffffffffffULL) | ||
| 129 | #endif | ||
| 130 | |||
| 131 | /* TX DMA length mask (13-bit) */ | ||
| 132 | #define FALCON_TX_DMA_MASK (4096 - 1) | ||
| 133 | |||
| 134 | /* Size and alignment of special buffers (4KB) */ | ||
| 135 | #define FALCON_BUF_SIZE 4096 | ||
| 136 | |||
| 137 | /* Dummy SRAM size code */ | ||
| 138 | #define SRM_NB_BSZ_ONCHIP_ONLY (-1) | ||
| 139 | |||
| 140 | /* Be nice if these (or equiv.) were in linux/pci_regs.h, but they're not. */ | ||
| 141 | #define PCI_EXP_DEVCAP_PWR_VAL_LBN 18 | ||
| 142 | #define PCI_EXP_DEVCAP_PWR_SCL_LBN 26 | ||
| 143 | #define PCI_EXP_DEVCTL_PAYLOAD_LBN 5 | ||
| 144 | #define PCI_EXP_LNKSTA_LNK_WID 0x3f0 | ||
| 145 | #define PCI_EXP_LNKSTA_LNK_WID_LBN 4 | ||
| 146 | |||
| 147 | #define FALCON_IS_DUAL_FUNC(efx) \ | ||
| 148 | (FALCON_REV(efx) < FALCON_REV_B0) | ||
| 149 | |||
| 150 | /************************************************************************** | ||
| 151 | * | ||
| 152 | * Falcon hardware access | ||
| 153 | * | ||
| 154 | **************************************************************************/ | ||
| 155 | |||
| 156 | /* Read the current event from the event queue */ | ||
| 157 | static inline efx_qword_t *falcon_event(struct efx_channel *channel, | ||
| 158 | unsigned int index) | ||
| 159 | { | ||
| 160 | return (((efx_qword_t *) (channel->eventq.addr)) + index); | ||
| 161 | } | ||
| 162 | |||
| 163 | /* See if an event is present | ||
| 164 | * | ||
| 165 | * We check both the high and low dword of the event for all ones. We | ||
| 166 | * wrote all ones when we cleared the event, and no valid event can | ||
| 167 | * have all ones in either its high or low dwords. This approach is | ||
| 168 | * robust against reordering. | ||
| 169 | * | ||
| 170 | * Note that using a single 64-bit comparison is incorrect; even | ||
| 171 | * though the CPU read will be atomic, the DMA write may not be. | ||
| 172 | */ | ||
| 173 | static inline int falcon_event_present(efx_qword_t *event) | ||
| 174 | { | ||
| 175 | return (!(EFX_DWORD_IS_ALL_ONES(event->dword[0]) | | ||
| 176 | EFX_DWORD_IS_ALL_ONES(event->dword[1]))); | ||
| 177 | } | ||
| 178 | |||
| 179 | /************************************************************************** | ||
| 180 | * | ||
| 181 | * I2C bus - this is a bit-bashing interface using GPIO pins | ||
| 182 | * Note that it uses the output enables to tristate the outputs | ||
| 183 | * SDA is the data pin and SCL is the clock | ||
| 184 | * | ||
| 185 | ************************************************************************** | ||
| 186 | */ | ||
| 187 | static void falcon_setsdascl(struct efx_i2c_interface *i2c) | ||
| 188 | { | ||
| 189 | efx_oword_t reg; | ||
| 190 | |||
| 191 | falcon_read(i2c->efx, ®, GPIO_CTL_REG_KER); | ||
| 192 | EFX_SET_OWORD_FIELD(reg, GPIO0_OEN, (i2c->scl ? 0 : 1)); | ||
| 193 | EFX_SET_OWORD_FIELD(reg, GPIO3_OEN, (i2c->sda ? 0 : 1)); | ||
| 194 | falcon_write(i2c->efx, ®, GPIO_CTL_REG_KER); | ||
| 195 | } | ||
| 196 | |||
| 197 | static int falcon_getsda(struct efx_i2c_interface *i2c) | ||
| 198 | { | ||
| 199 | efx_oword_t reg; | ||
| 200 | |||
| 201 | falcon_read(i2c->efx, ®, GPIO_CTL_REG_KER); | ||
| 202 | return EFX_OWORD_FIELD(reg, GPIO3_IN); | ||
| 203 | } | ||
| 204 | |||
| 205 | static int falcon_getscl(struct efx_i2c_interface *i2c) | ||
| 206 | { | ||
| 207 | efx_oword_t reg; | ||
| 208 | |||
| 209 | falcon_read(i2c->efx, ®, GPIO_CTL_REG_KER); | ||
| 210 | return EFX_DWORD_FIELD(reg, GPIO0_IN); | ||
| 211 | } | ||
| 212 | |||
| 213 | static struct efx_i2c_bit_operations falcon_i2c_bit_operations = { | ||
| 214 | .setsda = falcon_setsdascl, | ||
| 215 | .setscl = falcon_setsdascl, | ||
| 216 | .getsda = falcon_getsda, | ||
| 217 | .getscl = falcon_getscl, | ||
| 218 | .udelay = 100, | ||
| 219 | .mdelay = 10, | ||
| 220 | }; | ||
| 221 | |||
| 222 | /************************************************************************** | ||
| 223 | * | ||
| 224 | * Falcon special buffer handling | ||
| 225 | * Special buffers are used for event queues and the TX and RX | ||
| 226 | * descriptor rings. | ||
| 227 | * | ||
| 228 | *************************************************************************/ | ||
| 229 | |||
| 230 | /* | ||
| 231 | * Initialise a Falcon special buffer | ||
| 232 | * | ||
| 233 | * This will define a buffer (previously allocated via | ||
| 234 | * falcon_alloc_special_buffer()) in Falcon's buffer table, allowing | ||
| 235 | * it to be used for event queues, descriptor rings etc. | ||
| 236 | */ | ||
| 237 | static int | ||
| 238 | falcon_init_special_buffer(struct efx_nic *efx, | ||
| 239 | struct efx_special_buffer *buffer) | ||
| 240 | { | ||
| 241 | efx_qword_t buf_desc; | ||
| 242 | int index; | ||
| 243 | dma_addr_t dma_addr; | ||
| 244 | int i; | ||
| 245 | |||
| 246 | EFX_BUG_ON_PARANOID(!buffer->addr); | ||
| 247 | |||
| 248 | /* Write buffer descriptors to NIC */ | ||
| 249 | for (i = 0; i < buffer->entries; i++) { | ||
| 250 | index = buffer->index + i; | ||
| 251 | dma_addr = buffer->dma_addr + (i * 4096); | ||
| 252 | EFX_LOG(efx, "mapping special buffer %d at %llx\n", | ||
| 253 | index, (unsigned long long)dma_addr); | ||
| 254 | EFX_POPULATE_QWORD_4(buf_desc, | ||
| 255 | IP_DAT_BUF_SIZE, IP_DAT_BUF_SIZE_4K, | ||
| 256 | BUF_ADR_REGION, 0, | ||
| 257 | BUF_ADR_FBUF, (dma_addr >> 12), | ||
| 258 | BUF_OWNER_ID_FBUF, 0); | ||
| 259 | falcon_write_sram(efx, &buf_desc, index); | ||
| 260 | } | ||
| 261 | |||
| 262 | return 0; | ||
| 263 | } | ||
| 264 | |||
| 265 | /* Unmaps a buffer from Falcon and clears the buffer table entries */ | ||
| 266 | static void | ||
| 267 | falcon_fini_special_buffer(struct efx_nic *efx, | ||
| 268 | struct efx_special_buffer *buffer) | ||
| 269 | { | ||
| 270 | efx_oword_t buf_tbl_upd; | ||
| 271 | unsigned int start = buffer->index; | ||
| 272 | unsigned int end = (buffer->index + buffer->entries - 1); | ||
| 273 | |||
| 274 | if (!buffer->entries) | ||
| 275 | return; | ||
| 276 | |||
| 277 | EFX_LOG(efx, "unmapping special buffers %d-%d\n", | ||
| 278 | buffer->index, buffer->index + buffer->entries - 1); | ||
| 279 | |||
| 280 | EFX_POPULATE_OWORD_4(buf_tbl_upd, | ||
| 281 | BUF_UPD_CMD, 0, | ||
| 282 | BUF_CLR_CMD, 1, | ||
| 283 | BUF_CLR_END_ID, end, | ||
| 284 | BUF_CLR_START_ID, start); | ||
| 285 | falcon_write(efx, &buf_tbl_upd, BUF_TBL_UPD_REG_KER); | ||
| 286 | } | ||
| 287 | |||
| 288 | /* | ||
| 289 | * Allocate a new Falcon special buffer | ||
| 290 | * | ||
| 291 | * This allocates memory for a new buffer, clears it and allocates a | ||
| 292 | * new buffer ID range. It does not write into Falcon's buffer table. | ||
| 293 | * | ||
| 294 | * This call will allocate 4KB buffers, since Falcon can't use 8KB | ||
| 295 | * buffers for event queues and descriptor rings. | ||
| 296 | */ | ||
| 297 | static int falcon_alloc_special_buffer(struct efx_nic *efx, | ||
| 298 | struct efx_special_buffer *buffer, | ||
| 299 | unsigned int len) | ||
| 300 | { | ||
| 301 | struct falcon_nic_data *nic_data = efx->nic_data; | ||
| 302 | |||
| 303 | len = ALIGN(len, FALCON_BUF_SIZE); | ||
| 304 | |||
| 305 | buffer->addr = pci_alloc_consistent(efx->pci_dev, len, | ||
| 306 | &buffer->dma_addr); | ||
| 307 | if (!buffer->addr) | ||
| 308 | return -ENOMEM; | ||
| 309 | buffer->len = len; | ||
| 310 | buffer->entries = len / FALCON_BUF_SIZE; | ||
| 311 | BUG_ON(buffer->dma_addr & (FALCON_BUF_SIZE - 1)); | ||
| 312 | |||
| 313 | /* All zeros is a potentially valid event so memset to 0xff */ | ||
| 314 | memset(buffer->addr, 0xff, len); | ||
| 315 | |||
| 316 | /* Select new buffer ID */ | ||
| 317 | buffer->index = nic_data->next_buffer_table; | ||
| 318 | nic_data->next_buffer_table += buffer->entries; | ||
| 319 | |||
| 320 | EFX_LOG(efx, "allocating special buffers %d-%d at %llx+%x " | ||
| 321 | "(virt %p phys %lx)\n", buffer->index, | ||
| 322 | buffer->index + buffer->entries - 1, | ||
| 323 | (unsigned long long)buffer->dma_addr, len, | ||
| 324 | buffer->addr, virt_to_phys(buffer->addr)); | ||
| 325 | |||
| 326 | return 0; | ||
| 327 | } | ||
| 328 | |||
| 329 | static void falcon_free_special_buffer(struct efx_nic *efx, | ||
| 330 | struct efx_special_buffer *buffer) | ||
| 331 | { | ||
| 332 | if (!buffer->addr) | ||
| 333 | return; | ||
| 334 | |||
| 335 | EFX_LOG(efx, "deallocating special buffers %d-%d at %llx+%x " | ||
| 336 | "(virt %p phys %lx)\n", buffer->index, | ||
| 337 | buffer->index + buffer->entries - 1, | ||
| 338 | (unsigned long long)buffer->dma_addr, buffer->len, | ||
| 339 | buffer->addr, virt_to_phys(buffer->addr)); | ||
| 340 | |||
| 341 | pci_free_consistent(efx->pci_dev, buffer->len, buffer->addr, | ||
| 342 | buffer->dma_addr); | ||
| 343 | buffer->addr = NULL; | ||
| 344 | buffer->entries = 0; | ||
| 345 | } | ||
| 346 | |||
| 347 | /************************************************************************** | ||
| 348 | * | ||
| 349 | * Falcon generic buffer handling | ||
| 350 | * These buffers are used for interrupt status and MAC stats | ||
| 351 | * | ||
| 352 | **************************************************************************/ | ||
| 353 | |||
| 354 | static int falcon_alloc_buffer(struct efx_nic *efx, | ||
| 355 | struct efx_buffer *buffer, unsigned int len) | ||
| 356 | { | ||
| 357 | buffer->addr = pci_alloc_consistent(efx->pci_dev, len, | ||
| 358 | &buffer->dma_addr); | ||
| 359 | if (!buffer->addr) | ||
| 360 | return -ENOMEM; | ||
| 361 | buffer->len = len; | ||
| 362 | memset(buffer->addr, 0, len); | ||
| 363 | return 0; | ||
| 364 | } | ||
| 365 | |||
| 366 | static void falcon_free_buffer(struct efx_nic *efx, struct efx_buffer *buffer) | ||
| 367 | { | ||
| 368 | if (buffer->addr) { | ||
| 369 | pci_free_consistent(efx->pci_dev, buffer->len, | ||
| 370 | buffer->addr, buffer->dma_addr); | ||
| 371 | buffer->addr = NULL; | ||
| 372 | } | ||
| 373 | } | ||
| 374 | |||
| 375 | /************************************************************************** | ||
| 376 | * | ||
| 377 | * Falcon TX path | ||
| 378 | * | ||
| 379 | **************************************************************************/ | ||
| 380 | |||
| 381 | /* Returns a pointer to the specified transmit descriptor in the TX | ||
| 382 | * descriptor queue belonging to the specified channel. | ||
| 383 | */ | ||
| 384 | static inline efx_qword_t *falcon_tx_desc(struct efx_tx_queue *tx_queue, | ||
| 385 | unsigned int index) | ||
| 386 | { | ||
| 387 | return (((efx_qword_t *) (tx_queue->txd.addr)) + index); | ||
| 388 | } | ||
| 389 | |||
| 390 | /* This writes to the TX_DESC_WPTR; write pointer for TX descriptor ring */ | ||
| 391 | static inline void falcon_notify_tx_desc(struct efx_tx_queue *tx_queue) | ||
| 392 | { | ||
| 393 | unsigned write_ptr; | ||
| 394 | efx_dword_t reg; | ||
| 395 | |||
| 396 | write_ptr = tx_queue->write_count & FALCON_TXD_RING_MASK; | ||
| 397 | EFX_POPULATE_DWORD_1(reg, TX_DESC_WPTR_DWORD, write_ptr); | ||
| 398 | falcon_writel_page(tx_queue->efx, ®, | ||
| 399 | TX_DESC_UPD_REG_KER_DWORD, tx_queue->queue); | ||
| 400 | } | ||
| 401 | |||
| 402 | |||
| 403 | /* For each entry inserted into the software descriptor ring, create a | ||
| 404 | * descriptor in the hardware TX descriptor ring (in host memory), and | ||
| 405 | * write a doorbell. | ||
| 406 | */ | ||
| 407 | void falcon_push_buffers(struct efx_tx_queue *tx_queue) | ||
| 408 | { | ||
| 409 | |||
| 410 | struct efx_tx_buffer *buffer; | ||
| 411 | efx_qword_t *txd; | ||
| 412 | unsigned write_ptr; | ||
| 413 | |||
| 414 | BUG_ON(tx_queue->write_count == tx_queue->insert_count); | ||
| 415 | |||
| 416 | do { | ||
| 417 | write_ptr = tx_queue->write_count & FALCON_TXD_RING_MASK; | ||
| 418 | buffer = &tx_queue->buffer[write_ptr]; | ||
| 419 | txd = falcon_tx_desc(tx_queue, write_ptr); | ||
| 420 | ++tx_queue->write_count; | ||
| 421 | |||
| 422 | /* Create TX descriptor ring entry */ | ||
| 423 | EFX_POPULATE_QWORD_5(*txd, | ||
| 424 | TX_KER_PORT, 0, | ||
| 425 | TX_KER_CONT, buffer->continuation, | ||
| 426 | TX_KER_BYTE_CNT, buffer->len, | ||
| 427 | TX_KER_BUF_REGION, 0, | ||
| 428 | TX_KER_BUF_ADR, buffer->dma_addr); | ||
| 429 | } while (tx_queue->write_count != tx_queue->insert_count); | ||
| 430 | |||
| 431 | wmb(); /* Ensure descriptors are written before they are fetched */ | ||
| 432 | falcon_notify_tx_desc(tx_queue); | ||
| 433 | } | ||
| 434 | |||
| 435 | /* Allocate hardware resources for a TX queue */ | ||
| 436 | int falcon_probe_tx(struct efx_tx_queue *tx_queue) | ||
| 437 | { | ||
| 438 | struct efx_nic *efx = tx_queue->efx; | ||
| 439 | return falcon_alloc_special_buffer(efx, &tx_queue->txd, | ||
| 440 | FALCON_TXD_RING_SIZE * | ||
| 441 | sizeof(efx_qword_t)); | ||
| 442 | } | ||
| 443 | |||
| 444 | int falcon_init_tx(struct efx_tx_queue *tx_queue) | ||
| 445 | { | ||
| 446 | efx_oword_t tx_desc_ptr; | ||
| 447 | struct efx_nic *efx = tx_queue->efx; | ||
| 448 | int rc; | ||
| 449 | |||
| 450 | /* Pin TX descriptor ring */ | ||
| 451 | rc = falcon_init_special_buffer(efx, &tx_queue->txd); | ||
| 452 | if (rc) | ||
| 453 | return rc; | ||
| 454 | |||
| 455 | /* Push TX descriptor ring to card */ | ||
| 456 | EFX_POPULATE_OWORD_10(tx_desc_ptr, | ||
| 457 | TX_DESCQ_EN, 1, | ||
| 458 | TX_ISCSI_DDIG_EN, 0, | ||
| 459 | TX_ISCSI_HDIG_EN, 0, | ||
| 460 | TX_DESCQ_BUF_BASE_ID, tx_queue->txd.index, | ||
| 461 | TX_DESCQ_EVQ_ID, tx_queue->channel->evqnum, | ||
| 462 | TX_DESCQ_OWNER_ID, 0, | ||
| 463 | TX_DESCQ_LABEL, tx_queue->queue, | ||
| 464 | TX_DESCQ_SIZE, FALCON_TXD_RING_ORDER, | ||
| 465 | TX_DESCQ_TYPE, 0, | ||
| 466 | TX_NON_IP_DROP_DIS_B0, 1); | ||
| 467 | |||
| 468 | if (FALCON_REV(efx) >= FALCON_REV_B0) { | ||
| 469 | int csum = !(efx->net_dev->features & NETIF_F_IP_CSUM); | ||
| 470 | EFX_SET_OWORD_FIELD(tx_desc_ptr, TX_IP_CHKSM_DIS_B0, csum); | ||
| 471 | EFX_SET_OWORD_FIELD(tx_desc_ptr, TX_TCP_CHKSM_DIS_B0, csum); | ||
| 472 | } | ||
| 473 | |||
| 474 | falcon_write_table(efx, &tx_desc_ptr, efx->type->txd_ptr_tbl_base, | ||
| 475 | tx_queue->queue); | ||
| 476 | |||
| 477 | if (FALCON_REV(efx) < FALCON_REV_B0) { | ||
| 478 | efx_oword_t reg; | ||
| 479 | |||
| 480 | BUG_ON(tx_queue->queue >= 128); /* HW limit */ | ||
| 481 | |||
| 482 | falcon_read(efx, ®, TX_CHKSM_CFG_REG_KER_A1); | ||
| 483 | if (efx->net_dev->features & NETIF_F_IP_CSUM) | ||
| 484 | clear_bit_le(tx_queue->queue, (void *)®); | ||
| 485 | else | ||
| 486 | set_bit_le(tx_queue->queue, (void *)®); | ||
| 487 | falcon_write(efx, ®, TX_CHKSM_CFG_REG_KER_A1); | ||
| 488 | } | ||
| 489 | |||
| 490 | return 0; | ||
| 491 | } | ||
| 492 | |||
| 493 | static int falcon_flush_tx_queue(struct efx_tx_queue *tx_queue) | ||
| 494 | { | ||
| 495 | struct efx_nic *efx = tx_queue->efx; | ||
| 496 | struct efx_channel *channel = &efx->channel[0]; | ||
| 497 | efx_oword_t tx_flush_descq; | ||
| 498 | unsigned int read_ptr, i; | ||
| 499 | |||
| 500 | /* Post a flush command */ | ||
| 501 | EFX_POPULATE_OWORD_2(tx_flush_descq, | ||
| 502 | TX_FLUSH_DESCQ_CMD, 1, | ||
| 503 | TX_FLUSH_DESCQ, tx_queue->queue); | ||
| 504 | falcon_write(efx, &tx_flush_descq, TX_FLUSH_DESCQ_REG_KER); | ||
| 505 | msleep(FALCON_FLUSH_TIMEOUT); | ||
| 506 | |||
| 507 | if (EFX_WORKAROUND_7803(efx)) | ||
| 508 | return 0; | ||
| 509 | |||
| 510 | /* Look for a flush completed event */ | ||
| 511 | read_ptr = channel->eventq_read_ptr; | ||
| 512 | for (i = 0; i < FALCON_EVQ_SIZE; ++i) { | ||
| 513 | efx_qword_t *event = falcon_event(channel, read_ptr); | ||
| 514 | int ev_code, ev_sub_code, ev_queue; | ||
| 515 | if (!falcon_event_present(event)) | ||
| 516 | break; | ||
| 517 | |||
| 518 | ev_code = EFX_QWORD_FIELD(*event, EV_CODE); | ||
| 519 | ev_sub_code = EFX_QWORD_FIELD(*event, DRIVER_EV_SUB_CODE); | ||
| 520 | ev_queue = EFX_QWORD_FIELD(*event, DRIVER_EV_TX_DESCQ_ID); | ||
| 521 | if ((ev_sub_code == TX_DESCQ_FLS_DONE_EV_DECODE) && | ||
| 522 | (ev_queue == tx_queue->queue)) { | ||
| 523 | EFX_LOG(efx, "tx queue %d flush command succesful\n", | ||
| 524 | tx_queue->queue); | ||
| 525 | return 0; | ||
| 526 | } | ||
| 527 | |||
| 528 | read_ptr = (read_ptr + 1) & FALCON_EVQ_MASK; | ||
| 529 | } | ||
| 530 | |||
| 531 | if (EFX_WORKAROUND_11557(efx)) { | ||
| 532 | efx_oword_t reg; | ||
| 533 | int enabled; | ||
| 534 | |||
| 535 | falcon_read_table(efx, ®, efx->type->txd_ptr_tbl_base, | ||
| 536 | tx_queue->queue); | ||
| 537 | enabled = EFX_OWORD_FIELD(reg, TX_DESCQ_EN); | ||
| 538 | if (!enabled) { | ||
| 539 | EFX_LOG(efx, "tx queue %d disabled without a " | ||
| 540 | "flush event seen\n", tx_queue->queue); | ||
| 541 | return 0; | ||
| 542 | } | ||
| 543 | } | ||
| 544 | |||
| 545 | EFX_ERR(efx, "tx queue %d flush command timed out\n", tx_queue->queue); | ||
| 546 | return -ETIMEDOUT; | ||
| 547 | } | ||
| 548 | |||
| 549 | void falcon_fini_tx(struct efx_tx_queue *tx_queue) | ||
| 550 | { | ||
| 551 | struct efx_nic *efx = tx_queue->efx; | ||
| 552 | efx_oword_t tx_desc_ptr; | ||
| 553 | |||
| 554 | /* Stop the hardware using the queue */ | ||
| 555 | if (falcon_flush_tx_queue(tx_queue)) | ||
| 556 | EFX_ERR(efx, "failed to flush tx queue %d\n", tx_queue->queue); | ||
| 557 | |||
| 558 | /* Remove TX descriptor ring from card */ | ||
| 559 | EFX_ZERO_OWORD(tx_desc_ptr); | ||
| 560 | falcon_write_table(efx, &tx_desc_ptr, efx->type->txd_ptr_tbl_base, | ||
| 561 | tx_queue->queue); | ||
| 562 | |||
| 563 | /* Unpin TX descriptor ring */ | ||
| 564 | falcon_fini_special_buffer(efx, &tx_queue->txd); | ||
| 565 | } | ||
| 566 | |||
| 567 | /* Free buffers backing TX queue */ | ||
| 568 | void falcon_remove_tx(struct efx_tx_queue *tx_queue) | ||
| 569 | { | ||
| 570 | falcon_free_special_buffer(tx_queue->efx, &tx_queue->txd); | ||
| 571 | } | ||
| 572 | |||
| 573 | /************************************************************************** | ||
| 574 | * | ||
| 575 | * Falcon RX path | ||
| 576 | * | ||
| 577 | **************************************************************************/ | ||
| 578 | |||
| 579 | /* Returns a pointer to the specified descriptor in the RX descriptor queue */ | ||
| 580 | static inline efx_qword_t *falcon_rx_desc(struct efx_rx_queue *rx_queue, | ||
| 581 | unsigned int index) | ||
| 582 | { | ||
| 583 | return (((efx_qword_t *) (rx_queue->rxd.addr)) + index); | ||
| 584 | } | ||
| 585 | |||
| 586 | /* This creates an entry in the RX descriptor queue */ | ||
| 587 | static inline void falcon_build_rx_desc(struct efx_rx_queue *rx_queue, | ||
| 588 | unsigned index) | ||
| 589 | { | ||
| 590 | struct efx_rx_buffer *rx_buf; | ||
| 591 | efx_qword_t *rxd; | ||
| 592 | |||
| 593 | rxd = falcon_rx_desc(rx_queue, index); | ||
| 594 | rx_buf = efx_rx_buffer(rx_queue, index); | ||
| 595 | EFX_POPULATE_QWORD_3(*rxd, | ||
| 596 | RX_KER_BUF_SIZE, | ||
| 597 | rx_buf->len - | ||
| 598 | rx_queue->efx->type->rx_buffer_padding, | ||
| 599 | RX_KER_BUF_REGION, 0, | ||
| 600 | RX_KER_BUF_ADR, rx_buf->dma_addr); | ||
| 601 | } | ||
| 602 | |||
| 603 | /* This writes to the RX_DESC_WPTR register for the specified receive | ||
| 604 | * descriptor ring. | ||
| 605 | */ | ||
| 606 | void falcon_notify_rx_desc(struct efx_rx_queue *rx_queue) | ||
| 607 | { | ||
| 608 | efx_dword_t reg; | ||
| 609 | unsigned write_ptr; | ||
| 610 | |||
| 611 | while (rx_queue->notified_count != rx_queue->added_count) { | ||
| 612 | falcon_build_rx_desc(rx_queue, | ||
| 613 | rx_queue->notified_count & | ||
| 614 | FALCON_RXD_RING_MASK); | ||
| 615 | ++rx_queue->notified_count; | ||
| 616 | } | ||
| 617 | |||
| 618 | wmb(); | ||
| 619 | write_ptr = rx_queue->added_count & FALCON_RXD_RING_MASK; | ||
| 620 | EFX_POPULATE_DWORD_1(reg, RX_DESC_WPTR_DWORD, write_ptr); | ||
| 621 | falcon_writel_page(rx_queue->efx, ®, | ||
| 622 | RX_DESC_UPD_REG_KER_DWORD, rx_queue->queue); | ||
| 623 | } | ||
| 624 | |||
| 625 | int falcon_probe_rx(struct efx_rx_queue *rx_queue) | ||
| 626 | { | ||
| 627 | struct efx_nic *efx = rx_queue->efx; | ||
| 628 | return falcon_alloc_special_buffer(efx, &rx_queue->rxd, | ||
| 629 | FALCON_RXD_RING_SIZE * | ||
| 630 | sizeof(efx_qword_t)); | ||
| 631 | } | ||
| 632 | |||
| 633 | int falcon_init_rx(struct efx_rx_queue *rx_queue) | ||
| 634 | { | ||
| 635 | efx_oword_t rx_desc_ptr; | ||
| 636 | struct efx_nic *efx = rx_queue->efx; | ||
| 637 | int rc; | ||
| 638 | int is_b0 = FALCON_REV(efx) >= FALCON_REV_B0; | ||
| 639 | int iscsi_digest_en = is_b0; | ||
| 640 | |||
| 641 | EFX_LOG(efx, "RX queue %d ring in special buffers %d-%d\n", | ||
| 642 | rx_queue->queue, rx_queue->rxd.index, | ||
| 643 | rx_queue->rxd.index + rx_queue->rxd.entries - 1); | ||
| 644 | |||
| 645 | /* Pin RX descriptor ring */ | ||
| 646 | rc = falcon_init_special_buffer(efx, &rx_queue->rxd); | ||
| 647 | if (rc) | ||
| 648 | return rc; | ||
| 649 | |||
| 650 | /* Push RX descriptor ring to card */ | ||
| 651 | EFX_POPULATE_OWORD_10(rx_desc_ptr, | ||
| 652 | RX_ISCSI_DDIG_EN, iscsi_digest_en, | ||
| 653 | RX_ISCSI_HDIG_EN, iscsi_digest_en, | ||
| 654 | RX_DESCQ_BUF_BASE_ID, rx_queue->rxd.index, | ||
| 655 | RX_DESCQ_EVQ_ID, rx_queue->channel->evqnum, | ||
| 656 | RX_DESCQ_OWNER_ID, 0, | ||
| 657 | RX_DESCQ_LABEL, rx_queue->queue, | ||
| 658 | RX_DESCQ_SIZE, FALCON_RXD_RING_ORDER, | ||
| 659 | RX_DESCQ_TYPE, 0 /* kernel queue */ , | ||
| 660 | /* For >=B0 this is scatter so disable */ | ||
| 661 | RX_DESCQ_JUMBO, !is_b0, | ||
| 662 | RX_DESCQ_EN, 1); | ||
| 663 | falcon_write_table(efx, &rx_desc_ptr, efx->type->rxd_ptr_tbl_base, | ||
| 664 | rx_queue->queue); | ||
| 665 | return 0; | ||
| 666 | } | ||
| 667 | |||
| 668 | static int falcon_flush_rx_queue(struct efx_rx_queue *rx_queue) | ||
| 669 | { | ||
| 670 | struct efx_nic *efx = rx_queue->efx; | ||
| 671 | struct efx_channel *channel = &efx->channel[0]; | ||
| 672 | unsigned int read_ptr, i; | ||
| 673 | efx_oword_t rx_flush_descq; | ||
| 674 | |||
| 675 | /* Post a flush command */ | ||
| 676 | EFX_POPULATE_OWORD_2(rx_flush_descq, | ||
| 677 | RX_FLUSH_DESCQ_CMD, 1, | ||
| 678 | RX_FLUSH_DESCQ, rx_queue->queue); | ||
| 679 | falcon_write(efx, &rx_flush_descq, RX_FLUSH_DESCQ_REG_KER); | ||
| 680 | msleep(FALCON_FLUSH_TIMEOUT); | ||
| 681 | |||
| 682 | if (EFX_WORKAROUND_7803(efx)) | ||
| 683 | return 0; | ||
| 684 | |||
| 685 | /* Look for a flush completed event */ | ||
| 686 | read_ptr = channel->eventq_read_ptr; | ||
| 687 | for (i = 0; i < FALCON_EVQ_SIZE; ++i) { | ||
| 688 | efx_qword_t *event = falcon_event(channel, read_ptr); | ||
| 689 | int ev_code, ev_sub_code, ev_queue, ev_failed; | ||
| 690 | if (!falcon_event_present(event)) | ||
| 691 | break; | ||
| 692 | |||
| 693 | ev_code = EFX_QWORD_FIELD(*event, EV_CODE); | ||
| 694 | ev_sub_code = EFX_QWORD_FIELD(*event, DRIVER_EV_SUB_CODE); | ||
| 695 | ev_queue = EFX_QWORD_FIELD(*event, DRIVER_EV_RX_DESCQ_ID); | ||
| 696 | ev_failed = EFX_QWORD_FIELD(*event, DRIVER_EV_RX_FLUSH_FAIL); | ||
| 697 | |||
| 698 | if ((ev_sub_code == RX_DESCQ_FLS_DONE_EV_DECODE) && | ||
| 699 | (ev_queue == rx_queue->queue)) { | ||
| 700 | if (ev_failed) { | ||
| 701 | EFX_INFO(efx, "rx queue %d flush command " | ||
| 702 | "failed\n", rx_queue->queue); | ||
| 703 | return -EAGAIN; | ||
| 704 | } else { | ||
| 705 | EFX_LOG(efx, "rx queue %d flush command " | ||
| 706 | "succesful\n", rx_queue->queue); | ||
| 707 | return 0; | ||
| 708 | } | ||
| 709 | } | ||
| 710 | |||
| 711 | read_ptr = (read_ptr + 1) & FALCON_EVQ_MASK; | ||
| 712 | } | ||
| 713 | |||
| 714 | if (EFX_WORKAROUND_11557(efx)) { | ||
| 715 | efx_oword_t reg; | ||
| 716 | int enabled; | ||
| 717 | |||
| 718 | falcon_read_table(efx, ®, efx->type->rxd_ptr_tbl_base, | ||
| 719 | rx_queue->queue); | ||
| 720 | enabled = EFX_OWORD_FIELD(reg, RX_DESCQ_EN); | ||
| 721 | if (!enabled) { | ||
| 722 | EFX_LOG(efx, "rx queue %d disabled without a " | ||
| 723 | "flush event seen\n", rx_queue->queue); | ||
| 724 | return 0; | ||
| 725 | } | ||
| 726 | } | ||
| 727 | |||
| 728 | EFX_ERR(efx, "rx queue %d flush command timed out\n", rx_queue->queue); | ||
| 729 | return -ETIMEDOUT; | ||
| 730 | } | ||
| 731 | |||
| 732 | void falcon_fini_rx(struct efx_rx_queue *rx_queue) | ||
| 733 | { | ||
| 734 | efx_oword_t rx_desc_ptr; | ||
| 735 | struct efx_nic *efx = rx_queue->efx; | ||
| 736 | int i, rc; | ||
| 737 | |||
| 738 | /* Try and flush the rx queue. This may need to be repeated */ | ||
| 739 | for (i = 0; i < 5; i++) { | ||
| 740 | rc = falcon_flush_rx_queue(rx_queue); | ||
| 741 | if (rc == -EAGAIN) | ||
| 742 | continue; | ||
| 743 | break; | ||
| 744 | } | ||
| 745 | if (rc) | ||
| 746 | EFX_ERR(efx, "failed to flush rx queue %d\n", rx_queue->queue); | ||
| 747 | |||
| 748 | /* Remove RX descriptor ring from card */ | ||
| 749 | EFX_ZERO_OWORD(rx_desc_ptr); | ||
| 750 | falcon_write_table(efx, &rx_desc_ptr, efx->type->rxd_ptr_tbl_base, | ||
| 751 | rx_queue->queue); | ||
| 752 | |||
| 753 | /* Unpin RX descriptor ring */ | ||
| 754 | falcon_fini_special_buffer(efx, &rx_queue->rxd); | ||
| 755 | } | ||
| 756 | |||
| 757 | /* Free buffers backing RX queue */ | ||
| 758 | void falcon_remove_rx(struct efx_rx_queue *rx_queue) | ||
| 759 | { | ||
| 760 | falcon_free_special_buffer(rx_queue->efx, &rx_queue->rxd); | ||
| 761 | } | ||
| 762 | |||
| 763 | /************************************************************************** | ||
| 764 | * | ||
| 765 | * Falcon event queue processing | ||
| 766 | * Event queues are processed by per-channel tasklets. | ||
| 767 | * | ||
| 768 | **************************************************************************/ | ||
| 769 | |||
| 770 | /* Update a channel's event queue's read pointer (RPTR) register | ||
| 771 | * | ||
| 772 | * This writes the EVQ_RPTR_REG register for the specified channel's | ||
| 773 | * event queue. | ||
| 774 | * | ||
| 775 | * Note that EVQ_RPTR_REG contains the index of the "last read" event, | ||
| 776 | * whereas channel->eventq_read_ptr contains the index of the "next to | ||
| 777 | * read" event. | ||
| 778 | */ | ||
| 779 | void falcon_eventq_read_ack(struct efx_channel *channel) | ||
| 780 | { | ||
| 781 | efx_dword_t reg; | ||
| 782 | struct efx_nic *efx = channel->efx; | ||
| 783 | |||
| 784 | EFX_POPULATE_DWORD_1(reg, EVQ_RPTR_DWORD, channel->eventq_read_ptr); | ||
| 785 | falcon_writel_table(efx, ®, efx->type->evq_rptr_tbl_base, | ||
| 786 | channel->evqnum); | ||
| 787 | } | ||
| 788 | |||
| 789 | /* Use HW to insert a SW defined event */ | ||
| 790 | void falcon_generate_event(struct efx_channel *channel, efx_qword_t *event) | ||
| 791 | { | ||
| 792 | efx_oword_t drv_ev_reg; | ||
| 793 | |||
| 794 | EFX_POPULATE_OWORD_2(drv_ev_reg, | ||
| 795 | DRV_EV_QID, channel->evqnum, | ||
| 796 | DRV_EV_DATA, | ||
| 797 | EFX_QWORD_FIELD64(*event, WHOLE_EVENT)); | ||
| 798 | falcon_write(channel->efx, &drv_ev_reg, DRV_EV_REG_KER); | ||
| 799 | } | ||
| 800 | |||
| 801 | /* Handle a transmit completion event | ||
| 802 | * | ||
| 803 | * Falcon batches TX completion events; the message we receive is of | ||
| 804 | * the form "complete all TX events up to this index". | ||
| 805 | */ | ||
| 806 | static inline void falcon_handle_tx_event(struct efx_channel *channel, | ||
| 807 | efx_qword_t *event) | ||
| 808 | { | ||
| 809 | unsigned int tx_ev_desc_ptr; | ||
| 810 | unsigned int tx_ev_q_label; | ||
| 811 | struct efx_tx_queue *tx_queue; | ||
| 812 | struct efx_nic *efx = channel->efx; | ||
| 813 | |||
| 814 | if (likely(EFX_QWORD_FIELD(*event, TX_EV_COMP))) { | ||
| 815 | /* Transmit completion */ | ||
| 816 | tx_ev_desc_ptr = EFX_QWORD_FIELD(*event, TX_EV_DESC_PTR); | ||
| 817 | tx_ev_q_label = EFX_QWORD_FIELD(*event, TX_EV_Q_LABEL); | ||
| 818 | tx_queue = &efx->tx_queue[tx_ev_q_label]; | ||
| 819 | efx_xmit_done(tx_queue, tx_ev_desc_ptr); | ||
| 820 | } else if (EFX_QWORD_FIELD(*event, TX_EV_WQ_FF_FULL)) { | ||
| 821 | /* Rewrite the FIFO write pointer */ | ||
| 822 | tx_ev_q_label = EFX_QWORD_FIELD(*event, TX_EV_Q_LABEL); | ||
| 823 | tx_queue = &efx->tx_queue[tx_ev_q_label]; | ||
| 824 | |||
| 825 | if (NET_DEV_REGISTERED(efx)) | ||
| 826 | netif_tx_lock(efx->net_dev); | ||
| 827 | falcon_notify_tx_desc(tx_queue); | ||
| 828 | if (NET_DEV_REGISTERED(efx)) | ||
| 829 | netif_tx_unlock(efx->net_dev); | ||
| 830 | } else if (EFX_QWORD_FIELD(*event, TX_EV_PKT_ERR) && | ||
| 831 | EFX_WORKAROUND_10727(efx)) { | ||
| 832 | efx_schedule_reset(efx, RESET_TYPE_TX_DESC_FETCH); | ||
| 833 | } else { | ||
| 834 | EFX_ERR(efx, "channel %d unexpected TX event " | ||
| 835 | EFX_QWORD_FMT"\n", channel->channel, | ||
| 836 | EFX_QWORD_VAL(*event)); | ||
| 837 | } | ||
| 838 | } | ||
| 839 | |||
| 840 | /* Check received packet's destination MAC address. */ | ||
| 841 | static int check_dest_mac(struct efx_rx_queue *rx_queue, | ||
| 842 | const efx_qword_t *event) | ||
| 843 | { | ||
| 844 | struct efx_rx_buffer *rx_buf; | ||
| 845 | struct efx_nic *efx = rx_queue->efx; | ||
| 846 | int rx_ev_desc_ptr; | ||
| 847 | struct ethhdr *eh; | ||
| 848 | |||
| 849 | if (efx->promiscuous) | ||
| 850 | return 1; | ||
| 851 | |||
| 852 | rx_ev_desc_ptr = EFX_QWORD_FIELD(*event, RX_EV_DESC_PTR); | ||
| 853 | rx_buf = efx_rx_buffer(rx_queue, rx_ev_desc_ptr); | ||
| 854 | eh = (struct ethhdr *)rx_buf->data; | ||
| 855 | if (memcmp(eh->h_dest, efx->net_dev->dev_addr, ETH_ALEN)) | ||
| 856 | return 0; | ||
| 857 | return 1; | ||
| 858 | } | ||
| 859 | |||
| 860 | /* Detect errors included in the rx_evt_pkt_ok bit. */ | ||
| 861 | static void falcon_handle_rx_not_ok(struct efx_rx_queue *rx_queue, | ||
| 862 | const efx_qword_t *event, | ||
| 863 | unsigned *rx_ev_pkt_ok, | ||
| 864 | int *discard, int byte_count) | ||
| 865 | { | ||
| 866 | struct efx_nic *efx = rx_queue->efx; | ||
| 867 | unsigned rx_ev_buf_owner_id_err, rx_ev_ip_hdr_chksum_err; | ||
| 868 | unsigned rx_ev_tcp_udp_chksum_err, rx_ev_eth_crc_err; | ||
| 869 | unsigned rx_ev_frm_trunc, rx_ev_drib_nib, rx_ev_tobe_disc; | ||
| 870 | unsigned rx_ev_pkt_type, rx_ev_other_err, rx_ev_pause_frm; | ||
| 871 | unsigned rx_ev_ip_frag_err, rx_ev_hdr_type, rx_ev_mcast_pkt; | ||
| 872 | int snap, non_ip; | ||
| 873 | |||
| 874 | rx_ev_hdr_type = EFX_QWORD_FIELD(*event, RX_EV_HDR_TYPE); | ||
| 875 | rx_ev_mcast_pkt = EFX_QWORD_FIELD(*event, RX_EV_MCAST_PKT); | ||
| 876 | rx_ev_tobe_disc = EFX_QWORD_FIELD(*event, RX_EV_TOBE_DISC); | ||
| 877 | rx_ev_pkt_type = EFX_QWORD_FIELD(*event, RX_EV_PKT_TYPE); | ||
| 878 | rx_ev_buf_owner_id_err = EFX_QWORD_FIELD(*event, | ||
| 879 | RX_EV_BUF_OWNER_ID_ERR); | ||
| 880 | rx_ev_ip_frag_err = EFX_QWORD_FIELD(*event, RX_EV_IF_FRAG_ERR); | ||
| 881 | rx_ev_ip_hdr_chksum_err = EFX_QWORD_FIELD(*event, | ||
| 882 | RX_EV_IP_HDR_CHKSUM_ERR); | ||
| 883 | rx_ev_tcp_udp_chksum_err = EFX_QWORD_FIELD(*event, | ||
| 884 | RX_EV_TCP_UDP_CHKSUM_ERR); | ||
| 885 | rx_ev_eth_crc_err = EFX_QWORD_FIELD(*event, RX_EV_ETH_CRC_ERR); | ||
| 886 | rx_ev_frm_trunc = EFX_QWORD_FIELD(*event, RX_EV_FRM_TRUNC); | ||
| 887 | rx_ev_drib_nib = ((FALCON_REV(efx) >= FALCON_REV_B0) ? | ||
| 888 | 0 : EFX_QWORD_FIELD(*event, RX_EV_DRIB_NIB)); | ||
| 889 | rx_ev_pause_frm = EFX_QWORD_FIELD(*event, RX_EV_PAUSE_FRM_ERR); | ||
| 890 | |||
| 891 | /* Every error apart from tobe_disc and pause_frm */ | ||
| 892 | rx_ev_other_err = (rx_ev_drib_nib | rx_ev_tcp_udp_chksum_err | | ||
| 893 | rx_ev_buf_owner_id_err | rx_ev_eth_crc_err | | ||
| 894 | rx_ev_frm_trunc | rx_ev_ip_hdr_chksum_err); | ||
| 895 | |||
| 896 | snap = (rx_ev_pkt_type == RX_EV_PKT_TYPE_LLC_DECODE) || | ||
| 897 | (rx_ev_pkt_type == RX_EV_PKT_TYPE_VLAN_LLC_DECODE); | ||
| 898 | non_ip = (rx_ev_hdr_type == RX_EV_HDR_TYPE_NON_IP_DECODE); | ||
| 899 | |||
| 900 | /* SFC bug 5475/8970: The Falcon XMAC incorrectly calculates the | ||
| 901 | * length field of an LLC frame, which sets TOBE_DISC. We could set | ||
| 902 | * PASS_LEN_ERR, but we want the MAC to filter out short frames (to | ||
| 903 | * protect the RX block). | ||
| 904 | * | ||
| 905 | * bug5475 - LLC/SNAP: Falcon identifies SNAP packets. | ||
| 906 | * bug8970 - LLC/noSNAP: Falcon does not provide an LLC flag. | ||
| 907 | * LLC can't encapsulate IP, so by definition | ||
| 908 | * these packets are NON_IP. | ||
| 909 | * | ||
| 910 | * Unicast mismatch will also cause TOBE_DISC, so the driver needs | ||
| 911 | * to check this. | ||
| 912 | */ | ||
| 913 | if (EFX_WORKAROUND_5475(efx) && rx_ev_tobe_disc && (snap || non_ip)) { | ||
| 914 | /* If all the other flags are zero then we can state the | ||
| 915 | * entire packet is ok, which will flag to the kernel not | ||
| 916 | * to recalculate checksums. | ||
| 917 | */ | ||
| 918 | if (!(non_ip | rx_ev_other_err | rx_ev_pause_frm)) | ||
| 919 | *rx_ev_pkt_ok = 1; | ||
| 920 | |||
| 921 | rx_ev_tobe_disc = 0; | ||
| 922 | |||
| 923 | /* TOBE_DISC is set for unicast mismatch. But given that | ||
| 924 | * we can't trust TOBE_DISC here, we must validate the dest | ||
| 925 | * MAC address ourselves. | ||
| 926 | */ | ||
| 927 | if (!rx_ev_mcast_pkt && !check_dest_mac(rx_queue, event)) | ||
| 928 | rx_ev_tobe_disc = 1; | ||
| 929 | } | ||
| 930 | |||
| 931 | /* Count errors that are not in MAC stats. */ | ||
| 932 | if (rx_ev_frm_trunc) | ||
| 933 | ++rx_queue->channel->n_rx_frm_trunc; | ||
| 934 | else if (rx_ev_tobe_disc) | ||
| 935 | ++rx_queue->channel->n_rx_tobe_disc; | ||
| 936 | else if (rx_ev_ip_hdr_chksum_err) | ||
| 937 | ++rx_queue->channel->n_rx_ip_hdr_chksum_err; | ||
| 938 | else if (rx_ev_tcp_udp_chksum_err) | ||
| 939 | ++rx_queue->channel->n_rx_tcp_udp_chksum_err; | ||
| 940 | if (rx_ev_ip_frag_err) | ||
| 941 | ++rx_queue->channel->n_rx_ip_frag_err; | ||
| 942 | |||
| 943 | /* The frame must be discarded if any of these are true. */ | ||
| 944 | *discard = (rx_ev_eth_crc_err | rx_ev_frm_trunc | rx_ev_drib_nib | | ||
| 945 | rx_ev_tobe_disc | rx_ev_pause_frm); | ||
| 946 | |||
| 947 | /* TOBE_DISC is expected on unicast mismatches; don't print out an | ||
| 948 | * error message. FRM_TRUNC indicates RXDP dropped the packet due | ||
| 949 | * to a FIFO overflow. | ||
| 950 | */ | ||
| 951 | #ifdef EFX_ENABLE_DEBUG | ||
| 952 | if (rx_ev_other_err) { | ||
| 953 | EFX_INFO_RL(efx, " RX queue %d unexpected RX event " | ||
| 954 | EFX_QWORD_FMT "%s%s%s%s%s%s%s%s%s\n", | ||
| 955 | rx_queue->queue, EFX_QWORD_VAL(*event), | ||
| 956 | rx_ev_buf_owner_id_err ? " [OWNER_ID_ERR]" : "", | ||
| 957 | rx_ev_ip_hdr_chksum_err ? | ||
| 958 | " [IP_HDR_CHKSUM_ERR]" : "", | ||
| 959 | rx_ev_tcp_udp_chksum_err ? | ||
| 960 | " [TCP_UDP_CHKSUM_ERR]" : "", | ||
| 961 | rx_ev_eth_crc_err ? " [ETH_CRC_ERR]" : "", | ||
| 962 | rx_ev_frm_trunc ? " [FRM_TRUNC]" : "", | ||
| 963 | rx_ev_drib_nib ? " [DRIB_NIB]" : "", | ||
| 964 | rx_ev_tobe_disc ? " [TOBE_DISC]" : "", | ||
| 965 | rx_ev_pause_frm ? " [PAUSE]" : "", | ||
| 966 | snap ? " [SNAP/LLC]" : ""); | ||
| 967 | } | ||
| 968 | #endif | ||
| 969 | |||
| 970 | if (unlikely(rx_ev_eth_crc_err && EFX_WORKAROUND_10750(efx) && | ||
| 971 | efx->phy_type == PHY_TYPE_10XPRESS)) | ||
| 972 | tenxpress_crc_err(efx); | ||
| 973 | } | ||
| 974 | |||
| 975 | /* Handle receive events that are not in-order. */ | ||
| 976 | static void falcon_handle_rx_bad_index(struct efx_rx_queue *rx_queue, | ||
| 977 | unsigned index) | ||
| 978 | { | ||
| 979 | struct efx_nic *efx = rx_queue->efx; | ||
| 980 | unsigned expected, dropped; | ||
| 981 | |||
| 982 | expected = rx_queue->removed_count & FALCON_RXD_RING_MASK; | ||
| 983 | dropped = ((index + FALCON_RXD_RING_SIZE - expected) & | ||
| 984 | FALCON_RXD_RING_MASK); | ||
| 985 | EFX_INFO(efx, "dropped %d events (index=%d expected=%d)\n", | ||
| 986 | dropped, index, expected); | ||
| 987 | |||
| 988 | efx_schedule_reset(efx, EFX_WORKAROUND_5676(efx) ? | ||
| 989 | RESET_TYPE_RX_RECOVERY : RESET_TYPE_DISABLE); | ||
| 990 | } | ||
| 991 | |||
| 992 | /* Handle a packet received event | ||
| 993 | * | ||
| 994 | * Falcon silicon gives a "discard" flag if it's a unicast packet with the | ||
| 995 | * wrong destination address | ||
| 996 | * Also "is multicast" and "matches multicast filter" flags can be used to | ||
| 997 | * discard non-matching multicast packets. | ||
| 998 | */ | ||
| 999 | static inline int falcon_handle_rx_event(struct efx_channel *channel, | ||
| 1000 | const efx_qword_t *event) | ||
| 1001 | { | ||
| 1002 | unsigned int rx_ev_q_label, rx_ev_desc_ptr, rx_ev_byte_cnt; | ||
| 1003 | unsigned int rx_ev_pkt_ok, rx_ev_hdr_type, rx_ev_mcast_pkt; | ||
| 1004 | unsigned expected_ptr; | ||
| 1005 | int discard = 0, checksummed; | ||
| 1006 | struct efx_rx_queue *rx_queue; | ||
| 1007 | struct efx_nic *efx = channel->efx; | ||
| 1008 | |||
| 1009 | /* Basic packet information */ | ||
| 1010 | rx_ev_byte_cnt = EFX_QWORD_FIELD(*event, RX_EV_BYTE_CNT); | ||
| 1011 | rx_ev_pkt_ok = EFX_QWORD_FIELD(*event, RX_EV_PKT_OK); | ||
| 1012 | rx_ev_hdr_type = EFX_QWORD_FIELD(*event, RX_EV_HDR_TYPE); | ||
| 1013 | WARN_ON(EFX_QWORD_FIELD(*event, RX_EV_JUMBO_CONT)); | ||
| 1014 | WARN_ON(EFX_QWORD_FIELD(*event, RX_EV_SOP) != 1); | ||
| 1015 | |||
| 1016 | rx_ev_q_label = EFX_QWORD_FIELD(*event, RX_EV_Q_LABEL); | ||
| 1017 | rx_queue = &efx->rx_queue[rx_ev_q_label]; | ||
| 1018 | |||
| 1019 | rx_ev_desc_ptr = EFX_QWORD_FIELD(*event, RX_EV_DESC_PTR); | ||
| 1020 | expected_ptr = rx_queue->removed_count & FALCON_RXD_RING_MASK; | ||
| 1021 | if (unlikely(rx_ev_desc_ptr != expected_ptr)) { | ||
| 1022 | falcon_handle_rx_bad_index(rx_queue, rx_ev_desc_ptr); | ||
| 1023 | return rx_ev_q_label; | ||
| 1024 | } | ||
| 1025 | |||
| 1026 | if (likely(rx_ev_pkt_ok)) { | ||
| 1027 | /* If packet is marked as OK and packet type is TCP/IPv4 or | ||
| 1028 | * UDP/IPv4, then we can rely on the hardware checksum. | ||
| 1029 | */ | ||
| 1030 | checksummed = RX_EV_HDR_TYPE_HAS_CHECKSUMS(rx_ev_hdr_type); | ||
| 1031 | } else { | ||
| 1032 | falcon_handle_rx_not_ok(rx_queue, event, &rx_ev_pkt_ok, | ||
| 1033 | &discard, rx_ev_byte_cnt); | ||
| 1034 | checksummed = 0; | ||
| 1035 | } | ||
| 1036 | |||
| 1037 | /* Detect multicast packets that didn't match the filter */ | ||
| 1038 | rx_ev_mcast_pkt = EFX_QWORD_FIELD(*event, RX_EV_MCAST_PKT); | ||
| 1039 | if (rx_ev_mcast_pkt) { | ||
| 1040 | unsigned int rx_ev_mcast_hash_match = | ||
| 1041 | EFX_QWORD_FIELD(*event, RX_EV_MCAST_HASH_MATCH); | ||
| 1042 | |||
| 1043 | if (unlikely(!rx_ev_mcast_hash_match)) | ||
| 1044 | discard = 1; | ||
| 1045 | } | ||
| 1046 | |||
| 1047 | /* Handle received packet */ | ||
| 1048 | efx_rx_packet(rx_queue, rx_ev_desc_ptr, rx_ev_byte_cnt, | ||
| 1049 | checksummed, discard); | ||
| 1050 | |||
| 1051 | return rx_ev_q_label; | ||
| 1052 | } | ||
| 1053 | |||
| 1054 | /* Global events are basically PHY events */ | ||
| 1055 | static void falcon_handle_global_event(struct efx_channel *channel, | ||
| 1056 | efx_qword_t *event) | ||
| 1057 | { | ||
| 1058 | struct efx_nic *efx = channel->efx; | ||
| 1059 | int is_phy_event = 0, handled = 0; | ||
| 1060 | |||
| 1061 | /* Check for interrupt on either port. Some boards have a | ||
| 1062 | * single PHY wired to the interrupt line for port 1. */ | ||
| 1063 | if (EFX_QWORD_FIELD(*event, G_PHY0_INTR) || | ||
| 1064 | EFX_QWORD_FIELD(*event, G_PHY1_INTR) || | ||
| 1065 | EFX_QWORD_FIELD(*event, XG_PHY_INTR)) | ||
| 1066 | is_phy_event = 1; | ||
| 1067 | |||
| 1068 | if ((FALCON_REV(efx) >= FALCON_REV_B0) && | ||
| 1069 | EFX_OWORD_FIELD(*event, XG_MNT_INTR_B0)) | ||
| 1070 | is_phy_event = 1; | ||
| 1071 | |||
| 1072 | if (is_phy_event) { | ||
| 1073 | efx->phy_op->clear_interrupt(efx); | ||
| 1074 | queue_work(efx->workqueue, &efx->reconfigure_work); | ||
| 1075 | handled = 1; | ||
| 1076 | } | ||
| 1077 | |||
| 1078 | if (EFX_QWORD_FIELD_VER(efx, *event, RX_RECOVERY)) { | ||
| 1079 | EFX_ERR(efx, "channel %d seen global RX_RESET " | ||
| 1080 | "event. Resetting.\n", channel->channel); | ||
| 1081 | |||
| 1082 | atomic_inc(&efx->rx_reset); | ||
| 1083 | efx_schedule_reset(efx, EFX_WORKAROUND_6555(efx) ? | ||
| 1084 | RESET_TYPE_RX_RECOVERY : RESET_TYPE_DISABLE); | ||
| 1085 | handled = 1; | ||
| 1086 | } | ||
| 1087 | |||
| 1088 | if (!handled) | ||
| 1089 | EFX_ERR(efx, "channel %d unknown global event " | ||
| 1090 | EFX_QWORD_FMT "\n", channel->channel, | ||
| 1091 | EFX_QWORD_VAL(*event)); | ||
| 1092 | } | ||
| 1093 | |||
| 1094 | static void falcon_handle_driver_event(struct efx_channel *channel, | ||
| 1095 | efx_qword_t *event) | ||
| 1096 | { | ||
| 1097 | struct efx_nic *efx = channel->efx; | ||
| 1098 | unsigned int ev_sub_code; | ||
| 1099 | unsigned int ev_sub_data; | ||
| 1100 | |||
| 1101 | ev_sub_code = EFX_QWORD_FIELD(*event, DRIVER_EV_SUB_CODE); | ||
| 1102 | ev_sub_data = EFX_QWORD_FIELD(*event, DRIVER_EV_SUB_DATA); | ||
| 1103 | |||
| 1104 | switch (ev_sub_code) { | ||
| 1105 | case TX_DESCQ_FLS_DONE_EV_DECODE: | ||
| 1106 | EFX_TRACE(efx, "channel %d TXQ %d flushed\n", | ||
| 1107 | channel->channel, ev_sub_data); | ||
| 1108 | break; | ||
| 1109 | case RX_DESCQ_FLS_DONE_EV_DECODE: | ||
| 1110 | EFX_TRACE(efx, "channel %d RXQ %d flushed\n", | ||
| 1111 | channel->channel, ev_sub_data); | ||
| 1112 | break; | ||
| 1113 | case EVQ_INIT_DONE_EV_DECODE: | ||
| 1114 | EFX_LOG(efx, "channel %d EVQ %d initialised\n", | ||
| 1115 | channel->channel, ev_sub_data); | ||
| 1116 | break; | ||
| 1117 | case SRM_UPD_DONE_EV_DECODE: | ||
| 1118 | EFX_TRACE(efx, "channel %d SRAM update done\n", | ||
| 1119 | channel->channel); | ||
| 1120 | break; | ||
| 1121 | case WAKE_UP_EV_DECODE: | ||
| 1122 | EFX_TRACE(efx, "channel %d RXQ %d wakeup event\n", | ||
| 1123 | channel->channel, ev_sub_data); | ||
| 1124 | break; | ||
| 1125 | case TIMER_EV_DECODE: | ||
| 1126 | EFX_TRACE(efx, "channel %d RX queue %d timer expired\n", | ||
| 1127 | channel->channel, ev_sub_data); | ||
| 1128 | break; | ||
| 1129 | case RX_RECOVERY_EV_DECODE: | ||
| 1130 | EFX_ERR(efx, "channel %d seen DRIVER RX_RESET event. " | ||
| 1131 | "Resetting.\n", channel->channel); | ||
| 1132 | efx_schedule_reset(efx, | ||
| 1133 | EFX_WORKAROUND_6555(efx) ? | ||
| 1134 | RESET_TYPE_RX_RECOVERY : | ||
| 1135 | RESET_TYPE_DISABLE); | ||
| 1136 | break; | ||
| 1137 | case RX_DSC_ERROR_EV_DECODE: | ||
| 1138 | EFX_ERR(efx, "RX DMA Q %d reports descriptor fetch error." | ||
| 1139 | " RX Q %d is disabled.\n", ev_sub_data, ev_sub_data); | ||
| 1140 | efx_schedule_reset(efx, RESET_TYPE_RX_DESC_FETCH); | ||
| 1141 | break; | ||
| 1142 | case TX_DSC_ERROR_EV_DECODE: | ||
| 1143 | EFX_ERR(efx, "TX DMA Q %d reports descriptor fetch error." | ||
| 1144 | " TX Q %d is disabled.\n", ev_sub_data, ev_sub_data); | ||
| 1145 | efx_schedule_reset(efx, RESET_TYPE_TX_DESC_FETCH); | ||
| 1146 | break; | ||
| 1147 | default: | ||
| 1148 | EFX_TRACE(efx, "channel %d unknown driver event code %d " | ||
| 1149 | "data %04x\n", channel->channel, ev_sub_code, | ||
| 1150 | ev_sub_data); | ||
| 1151 | break; | ||
| 1152 | } | ||
| 1153 | } | ||
| 1154 | |||
| 1155 | int falcon_process_eventq(struct efx_channel *channel, int *rx_quota) | ||
| 1156 | { | ||
| 1157 | unsigned int read_ptr; | ||
| 1158 | efx_qword_t event, *p_event; | ||
| 1159 | int ev_code; | ||
| 1160 | int rxq; | ||
| 1161 | int rxdmaqs = 0; | ||
| 1162 | |||
| 1163 | read_ptr = channel->eventq_read_ptr; | ||
| 1164 | |||
| 1165 | do { | ||
| 1166 | p_event = falcon_event(channel, read_ptr); | ||
| 1167 | event = *p_event; | ||
| 1168 | |||
| 1169 | if (!falcon_event_present(&event)) | ||
| 1170 | /* End of events */ | ||
| 1171 | break; | ||
| 1172 | |||
| 1173 | EFX_TRACE(channel->efx, "channel %d event is "EFX_QWORD_FMT"\n", | ||
| 1174 | channel->channel, EFX_QWORD_VAL(event)); | ||
| 1175 | |||
| 1176 | /* Clear this event by marking it all ones */ | ||
| 1177 | EFX_SET_QWORD(*p_event); | ||
| 1178 | |||
| 1179 | ev_code = EFX_QWORD_FIELD(event, EV_CODE); | ||
| 1180 | |||
| 1181 | switch (ev_code) { | ||
| 1182 | case RX_IP_EV_DECODE: | ||
| 1183 | rxq = falcon_handle_rx_event(channel, &event); | ||
| 1184 | rxdmaqs |= (1 << rxq); | ||
| 1185 | (*rx_quota)--; | ||
| 1186 | break; | ||
| 1187 | case TX_IP_EV_DECODE: | ||
| 1188 | falcon_handle_tx_event(channel, &event); | ||
| 1189 | break; | ||
| 1190 | case DRV_GEN_EV_DECODE: | ||
| 1191 | channel->eventq_magic | ||
| 1192 | = EFX_QWORD_FIELD(event, EVQ_MAGIC); | ||
| 1193 | EFX_LOG(channel->efx, "channel %d received generated " | ||
| 1194 | "event "EFX_QWORD_FMT"\n", channel->channel, | ||
| 1195 | EFX_QWORD_VAL(event)); | ||
| 1196 | break; | ||
| 1197 | case GLOBAL_EV_DECODE: | ||
| 1198 | falcon_handle_global_event(channel, &event); | ||
| 1199 | break; | ||
| 1200 | case DRIVER_EV_DECODE: | ||
| 1201 | falcon_handle_driver_event(channel, &event); | ||
| 1202 | break; | ||
| 1203 | default: | ||
| 1204 | EFX_ERR(channel->efx, "channel %d unknown event type %d" | ||
| 1205 | " (data " EFX_QWORD_FMT ")\n", channel->channel, | ||
| 1206 | ev_code, EFX_QWORD_VAL(event)); | ||
| 1207 | } | ||
| 1208 | |||
| 1209 | /* Increment read pointer */ | ||
| 1210 | read_ptr = (read_ptr + 1) & FALCON_EVQ_MASK; | ||
| 1211 | |||
| 1212 | } while (*rx_quota); | ||
| 1213 | |||
| 1214 | channel->eventq_read_ptr = read_ptr; | ||
| 1215 | return rxdmaqs; | ||
| 1216 | } | ||
| 1217 | |||
| 1218 | void falcon_set_int_moderation(struct efx_channel *channel) | ||
| 1219 | { | ||
| 1220 | efx_dword_t timer_cmd; | ||
| 1221 | struct efx_nic *efx = channel->efx; | ||
| 1222 | |||
| 1223 | /* Set timer register */ | ||
| 1224 | if (channel->irq_moderation) { | ||
| 1225 | /* Round to resolution supported by hardware. The value we | ||
| 1226 | * program is based at 0. So actual interrupt moderation | ||
| 1227 | * achieved is ((x + 1) * res). | ||
| 1228 | */ | ||
| 1229 | unsigned int res = 5; | ||
| 1230 | channel->irq_moderation -= (channel->irq_moderation % res); | ||
| 1231 | if (channel->irq_moderation < res) | ||
| 1232 | channel->irq_moderation = res; | ||
| 1233 | EFX_POPULATE_DWORD_2(timer_cmd, | ||
| 1234 | TIMER_MODE, TIMER_MODE_INT_HLDOFF, | ||
| 1235 | TIMER_VAL, | ||
| 1236 | (channel->irq_moderation / res) - 1); | ||
| 1237 | } else { | ||
| 1238 | EFX_POPULATE_DWORD_2(timer_cmd, | ||
| 1239 | TIMER_MODE, TIMER_MODE_DIS, | ||
| 1240 | TIMER_VAL, 0); | ||
| 1241 | } | ||
| 1242 | falcon_writel_page_locked(efx, &timer_cmd, TIMER_CMD_REG_KER, | ||
| 1243 | channel->evqnum); | ||
| 1244 | |||
| 1245 | } | ||
| 1246 | |||
| 1247 | /* Allocate buffer table entries for event queue */ | ||
| 1248 | int falcon_probe_eventq(struct efx_channel *channel) | ||
| 1249 | { | ||
| 1250 | struct efx_nic *efx = channel->efx; | ||
| 1251 | unsigned int evq_size; | ||
| 1252 | |||
| 1253 | evq_size = FALCON_EVQ_SIZE * sizeof(efx_qword_t); | ||
| 1254 | return falcon_alloc_special_buffer(efx, &channel->eventq, evq_size); | ||
| 1255 | } | ||
| 1256 | |||
| 1257 | int falcon_init_eventq(struct efx_channel *channel) | ||
| 1258 | { | ||
| 1259 | efx_oword_t evq_ptr; | ||
| 1260 | struct efx_nic *efx = channel->efx; | ||
| 1261 | int rc; | ||
| 1262 | |||
| 1263 | EFX_LOG(efx, "channel %d event queue in special buffers %d-%d\n", | ||
| 1264 | channel->channel, channel->eventq.index, | ||
| 1265 | channel->eventq.index + channel->eventq.entries - 1); | ||
| 1266 | |||
| 1267 | /* Pin event queue buffer */ | ||
| 1268 | rc = falcon_init_special_buffer(efx, &channel->eventq); | ||
| 1269 | if (rc) | ||
| 1270 | return rc; | ||
| 1271 | |||
| 1272 | /* Fill event queue with all ones (i.e. empty events) */ | ||
| 1273 | memset(channel->eventq.addr, 0xff, channel->eventq.len); | ||
| 1274 | |||
| 1275 | /* Push event queue to card */ | ||
| 1276 | EFX_POPULATE_OWORD_3(evq_ptr, | ||
| 1277 | EVQ_EN, 1, | ||
| 1278 | EVQ_SIZE, FALCON_EVQ_ORDER, | ||
| 1279 | EVQ_BUF_BASE_ID, channel->eventq.index); | ||
| 1280 | falcon_write_table(efx, &evq_ptr, efx->type->evq_ptr_tbl_base, | ||
| 1281 | channel->evqnum); | ||
| 1282 | |||
| 1283 | falcon_set_int_moderation(channel); | ||
| 1284 | |||
| 1285 | return 0; | ||
| 1286 | } | ||
| 1287 | |||
| 1288 | void falcon_fini_eventq(struct efx_channel *channel) | ||
| 1289 | { | ||
| 1290 | efx_oword_t eventq_ptr; | ||
| 1291 | struct efx_nic *efx = channel->efx; | ||
| 1292 | |||
| 1293 | /* Remove event queue from card */ | ||
| 1294 | EFX_ZERO_OWORD(eventq_ptr); | ||
| 1295 | falcon_write_table(efx, &eventq_ptr, efx->type->evq_ptr_tbl_base, | ||
| 1296 | channel->evqnum); | ||
| 1297 | |||
| 1298 | /* Unpin event queue */ | ||
| 1299 | falcon_fini_special_buffer(efx, &channel->eventq); | ||
| 1300 | } | ||
| 1301 | |||
| 1302 | /* Free buffers backing event queue */ | ||
| 1303 | void falcon_remove_eventq(struct efx_channel *channel) | ||
| 1304 | { | ||
| 1305 | falcon_free_special_buffer(channel->efx, &channel->eventq); | ||
| 1306 | } | ||
| 1307 | |||
| 1308 | |||
| 1309 | /* Generates a test event on the event queue. A subsequent call to | ||
| 1310 | * process_eventq() should pick up the event and place the value of | ||
| 1311 | * "magic" into channel->eventq_magic; | ||
| 1312 | */ | ||
| 1313 | void falcon_generate_test_event(struct efx_channel *channel, unsigned int magic) | ||
| 1314 | { | ||
| 1315 | efx_qword_t test_event; | ||
| 1316 | |||
| 1317 | EFX_POPULATE_QWORD_2(test_event, | ||
| 1318 | EV_CODE, DRV_GEN_EV_DECODE, | ||
| 1319 | EVQ_MAGIC, magic); | ||
| 1320 | falcon_generate_event(channel, &test_event); | ||
| 1321 | } | ||
| 1322 | |||
| 1323 | |||
| 1324 | /************************************************************************** | ||
| 1325 | * | ||
| 1326 | * Falcon hardware interrupts | ||
| 1327 | * The hardware interrupt handler does very little work; all the event | ||
| 1328 | * queue processing is carried out by per-channel tasklets. | ||
| 1329 | * | ||
| 1330 | **************************************************************************/ | ||
| 1331 | |||
| 1332 | /* Enable/disable/generate Falcon interrupts */ | ||
| 1333 | static inline void falcon_interrupts(struct efx_nic *efx, int enabled, | ||
| 1334 | int force) | ||
| 1335 | { | ||
| 1336 | efx_oword_t int_en_reg_ker; | ||
| 1337 | |||
| 1338 | EFX_POPULATE_OWORD_2(int_en_reg_ker, | ||
| 1339 | KER_INT_KER, force, | ||
| 1340 | DRV_INT_EN_KER, enabled); | ||
| 1341 | falcon_write(efx, &int_en_reg_ker, INT_EN_REG_KER); | ||
| 1342 | } | ||
| 1343 | |||
| 1344 | void falcon_enable_interrupts(struct efx_nic *efx) | ||
| 1345 | { | ||
| 1346 | efx_oword_t int_adr_reg_ker; | ||
| 1347 | struct efx_channel *channel; | ||
| 1348 | |||
| 1349 | EFX_ZERO_OWORD(*((efx_oword_t *) efx->irq_status.addr)); | ||
| 1350 | wmb(); /* Ensure interrupt vector is clear before interrupts enabled */ | ||
| 1351 | |||
| 1352 | /* Program address */ | ||
| 1353 | EFX_POPULATE_OWORD_2(int_adr_reg_ker, | ||
| 1354 | NORM_INT_VEC_DIS_KER, EFX_INT_MODE_USE_MSI(efx), | ||
| 1355 | INT_ADR_KER, efx->irq_status.dma_addr); | ||
| 1356 | falcon_write(efx, &int_adr_reg_ker, INT_ADR_REG_KER); | ||
| 1357 | |||
| 1358 | /* Enable interrupts */ | ||
| 1359 | falcon_interrupts(efx, 1, 0); | ||
| 1360 | |||
| 1361 | /* Force processing of all the channels to get the EVQ RPTRs up to | ||
| 1362 | date */ | ||
| 1363 | efx_for_each_channel_with_interrupt(channel, efx) | ||
| 1364 | efx_schedule_channel(channel); | ||
| 1365 | } | ||
| 1366 | |||
| 1367 | void falcon_disable_interrupts(struct efx_nic *efx) | ||
| 1368 | { | ||
| 1369 | /* Disable interrupts */ | ||
| 1370 | falcon_interrupts(efx, 0, 0); | ||
| 1371 | } | ||
| 1372 | |||
| 1373 | /* Generate a Falcon test interrupt | ||
| 1374 | * Interrupt must already have been enabled, otherwise nasty things | ||
| 1375 | * may happen. | ||
| 1376 | */ | ||
| 1377 | void falcon_generate_interrupt(struct efx_nic *efx) | ||
| 1378 | { | ||
| 1379 | falcon_interrupts(efx, 1, 1); | ||
| 1380 | } | ||
| 1381 | |||
| 1382 | /* Acknowledge a legacy interrupt from Falcon | ||
| 1383 | * | ||
| 1384 | * This acknowledges a legacy (not MSI) interrupt via INT_ACK_KER_REG. | ||
| 1385 | * | ||
| 1386 | * Due to SFC bug 3706 (silicon revision <=A1) reads can be duplicated in the | ||
| 1387 | * BIU. Interrupt acknowledge is read sensitive so must write instead | ||
| 1388 | * (then read to ensure the BIU collector is flushed) | ||
| 1389 | * | ||
| 1390 | * NB most hardware supports MSI interrupts | ||
| 1391 | */ | ||
| 1392 | static inline void falcon_irq_ack_a1(struct efx_nic *efx) | ||
| 1393 | { | ||
| 1394 | efx_dword_t reg; | ||
| 1395 | |||
| 1396 | EFX_POPULATE_DWORD_1(reg, INT_ACK_DUMMY_DATA, 0xb7eb7e); | ||
| 1397 | falcon_writel(efx, ®, INT_ACK_REG_KER_A1); | ||
| 1398 | falcon_readl(efx, ®, WORK_AROUND_BROKEN_PCI_READS_REG_KER_A1); | ||
| 1399 | } | ||
| 1400 | |||
| 1401 | /* Process a fatal interrupt | ||
| 1402 | * Disable bus mastering ASAP and schedule a reset | ||
| 1403 | */ | ||
| 1404 | static irqreturn_t falcon_fatal_interrupt(struct efx_nic *efx) | ||
| 1405 | { | ||
| 1406 | struct falcon_nic_data *nic_data = efx->nic_data; | ||
| 1407 | efx_oword_t *int_ker = (efx_oword_t *) efx->irq_status.addr; | ||
| 1408 | efx_oword_t fatal_intr; | ||
| 1409 | int error, mem_perr; | ||
| 1410 | static int n_int_errors; | ||
| 1411 | |||
| 1412 | falcon_read(efx, &fatal_intr, FATAL_INTR_REG_KER); | ||
| 1413 | error = EFX_OWORD_FIELD(fatal_intr, INT_KER_ERROR); | ||
| 1414 | |||
| 1415 | EFX_ERR(efx, "SYSTEM ERROR " EFX_OWORD_FMT " status " | ||
| 1416 | EFX_OWORD_FMT ": %s\n", EFX_OWORD_VAL(*int_ker), | ||
| 1417 | EFX_OWORD_VAL(fatal_intr), | ||
| 1418 | error ? "disabling bus mastering" : "no recognised error"); | ||
| 1419 | if (error == 0) | ||
| 1420 | goto out; | ||
| 1421 | |||
| 1422 | /* If this is a memory parity error dump which blocks are offending */ | ||
| 1423 | mem_perr = EFX_OWORD_FIELD(fatal_intr, MEM_PERR_INT_KER); | ||
| 1424 | if (mem_perr) { | ||
| 1425 | efx_oword_t reg; | ||
| 1426 | falcon_read(efx, ®, MEM_STAT_REG_KER); | ||
| 1427 | EFX_ERR(efx, "SYSTEM ERROR: memory parity error " | ||
| 1428 | EFX_OWORD_FMT "\n", EFX_OWORD_VAL(reg)); | ||
| 1429 | } | ||
| 1430 | |||
| 1431 | /* Disable DMA bus mastering on both devices */ | ||
| 1432 | pci_disable_device(efx->pci_dev); | ||
| 1433 | if (FALCON_IS_DUAL_FUNC(efx)) | ||
| 1434 | pci_disable_device(nic_data->pci_dev2); | ||
| 1435 | |||
| 1436 | if (++n_int_errors < FALCON_MAX_INT_ERRORS) { | ||
| 1437 | EFX_ERR(efx, "SYSTEM ERROR - reset scheduled\n"); | ||
| 1438 | efx_schedule_reset(efx, RESET_TYPE_INT_ERROR); | ||
| 1439 | } else { | ||
| 1440 | EFX_ERR(efx, "SYSTEM ERROR - max number of errors seen." | ||
| 1441 | "NIC will be disabled\n"); | ||
| 1442 | efx_schedule_reset(efx, RESET_TYPE_DISABLE); | ||
| 1443 | } | ||
| 1444 | out: | ||
| 1445 | return IRQ_HANDLED; | ||
| 1446 | } | ||
| 1447 | |||
| 1448 | /* Handle a legacy interrupt from Falcon | ||
| 1449 | * Acknowledges the interrupt and schedule event queue processing. | ||
| 1450 | */ | ||
| 1451 | static irqreturn_t falcon_legacy_interrupt_b0(int irq, void *dev_id) | ||
| 1452 | { | ||
| 1453 | struct efx_nic *efx = (struct efx_nic *)dev_id; | ||
| 1454 | efx_oword_t *int_ker = (efx_oword_t *) efx->irq_status.addr; | ||
| 1455 | struct efx_channel *channel; | ||
| 1456 | efx_dword_t reg; | ||
| 1457 | u32 queues; | ||
| 1458 | int syserr; | ||
| 1459 | |||
| 1460 | /* Read the ISR which also ACKs the interrupts */ | ||
| 1461 | falcon_readl(efx, ®, INT_ISR0_B0); | ||
| 1462 | queues = EFX_EXTRACT_DWORD(reg, 0, 31); | ||
| 1463 | |||
| 1464 | /* Check to see if we have a serious error condition */ | ||
| 1465 | syserr = EFX_OWORD_FIELD(*int_ker, FATAL_INT); | ||
| 1466 | if (unlikely(syserr)) | ||
| 1467 | return falcon_fatal_interrupt(efx); | ||
| 1468 | |||
| 1469 | if (queues == 0) | ||
| 1470 | return IRQ_NONE; | ||
| 1471 | |||
| 1472 | efx->last_irq_cpu = raw_smp_processor_id(); | ||
| 1473 | EFX_TRACE(efx, "IRQ %d on CPU %d status " EFX_DWORD_FMT "\n", | ||
| 1474 | irq, raw_smp_processor_id(), EFX_DWORD_VAL(reg)); | ||
| 1475 | |||
| 1476 | /* Schedule processing of any interrupting queues */ | ||
| 1477 | channel = &efx->channel[0]; | ||
| 1478 | while (queues) { | ||
| 1479 | if (queues & 0x01) | ||
| 1480 | efx_schedule_channel(channel); | ||
| 1481 | channel++; | ||
| 1482 | queues >>= 1; | ||
| 1483 | } | ||
| 1484 | |||
| 1485 | return IRQ_HANDLED; | ||
| 1486 | } | ||
| 1487 | |||
| 1488 | |||
| 1489 | static irqreturn_t falcon_legacy_interrupt_a1(int irq, void *dev_id) | ||
| 1490 | { | ||
| 1491 | struct efx_nic *efx = (struct efx_nic *)dev_id; | ||
| 1492 | efx_oword_t *int_ker = (efx_oword_t *) efx->irq_status.addr; | ||
| 1493 | struct efx_channel *channel; | ||
| 1494 | int syserr; | ||
| 1495 | int queues; | ||
| 1496 | |||
| 1497 | /* Check to see if this is our interrupt. If it isn't, we | ||
| 1498 | * exit without having touched the hardware. | ||
| 1499 | */ | ||
| 1500 | if (unlikely(EFX_OWORD_IS_ZERO(*int_ker))) { | ||
| 1501 | EFX_TRACE(efx, "IRQ %d on CPU %d not for me\n", irq, | ||
| 1502 | raw_smp_processor_id()); | ||
| 1503 | return IRQ_NONE; | ||
| 1504 | } | ||
| 1505 | efx->last_irq_cpu = raw_smp_processor_id(); | ||
| 1506 | EFX_TRACE(efx, "IRQ %d on CPU %d status " EFX_OWORD_FMT "\n", | ||
| 1507 | irq, raw_smp_processor_id(), EFX_OWORD_VAL(*int_ker)); | ||
| 1508 | |||
| 1509 | /* Check to see if we have a serious error condition */ | ||
| 1510 | syserr = EFX_OWORD_FIELD(*int_ker, FATAL_INT); | ||
| 1511 | if (unlikely(syserr)) | ||
| 1512 | return falcon_fatal_interrupt(efx); | ||
| 1513 | |||
| 1514 | /* Determine interrupting queues, clear interrupt status | ||
| 1515 | * register and acknowledge the device interrupt. | ||
| 1516 | */ | ||
| 1517 | BUILD_BUG_ON(INT_EVQS_WIDTH > EFX_MAX_CHANNELS); | ||
| 1518 | queues = EFX_OWORD_FIELD(*int_ker, INT_EVQS); | ||
| 1519 | EFX_ZERO_OWORD(*int_ker); | ||
| 1520 | wmb(); /* Ensure the vector is cleared before interrupt ack */ | ||
| 1521 | falcon_irq_ack_a1(efx); | ||
| 1522 | |||
| 1523 | /* Schedule processing of any interrupting queues */ | ||
| 1524 | channel = &efx->channel[0]; | ||
| 1525 | while (queues) { | ||
| 1526 | if (queues & 0x01) | ||
| 1527 | efx_schedule_channel(channel); | ||
| 1528 | channel++; | ||
| 1529 | queues >>= 1; | ||
| 1530 | } | ||
| 1531 | |||
| 1532 | return IRQ_HANDLED; | ||
| 1533 | } | ||
| 1534 | |||
| 1535 | /* Handle an MSI interrupt from Falcon | ||
| 1536 | * | ||
| 1537 | * Handle an MSI hardware interrupt. This routine schedules event | ||
| 1538 | * queue processing. No interrupt acknowledgement cycle is necessary. | ||
| 1539 | * Also, we never need to check that the interrupt is for us, since | ||
| 1540 | * MSI interrupts cannot be shared. | ||
| 1541 | */ | ||
| 1542 | static irqreturn_t falcon_msi_interrupt(int irq, void *dev_id) | ||
| 1543 | { | ||
| 1544 | struct efx_channel *channel = (struct efx_channel *)dev_id; | ||
| 1545 | struct efx_nic *efx = channel->efx; | ||
| 1546 | efx_oword_t *int_ker = (efx_oword_t *) efx->irq_status.addr; | ||
| 1547 | int syserr; | ||
| 1548 | |||
| 1549 | efx->last_irq_cpu = raw_smp_processor_id(); | ||
| 1550 | EFX_TRACE(efx, "IRQ %d on CPU %d status " EFX_OWORD_FMT "\n", | ||
| 1551 | irq, raw_smp_processor_id(), EFX_OWORD_VAL(*int_ker)); | ||
| 1552 | |||
| 1553 | /* Check to see if we have a serious error condition */ | ||
| 1554 | syserr = EFX_OWORD_FIELD(*int_ker, FATAL_INT); | ||
| 1555 | if (unlikely(syserr)) | ||
| 1556 | return falcon_fatal_interrupt(efx); | ||
| 1557 | |||
| 1558 | /* Schedule processing of the channel */ | ||
| 1559 | efx_schedule_channel(channel); | ||
| 1560 | |||
| 1561 | return IRQ_HANDLED; | ||
| 1562 | } | ||
| 1563 | |||
| 1564 | |||
| 1565 | /* Setup RSS indirection table. | ||
| 1566 | * This maps from the hash value of the packet to RXQ | ||
| 1567 | */ | ||
| 1568 | static void falcon_setup_rss_indir_table(struct efx_nic *efx) | ||
| 1569 | { | ||
| 1570 | int i = 0; | ||
| 1571 | unsigned long offset; | ||
| 1572 | efx_dword_t dword; | ||
| 1573 | |||
| 1574 | if (FALCON_REV(efx) < FALCON_REV_B0) | ||
| 1575 | return; | ||
| 1576 | |||
| 1577 | for (offset = RX_RSS_INDIR_TBL_B0; | ||
| 1578 | offset < RX_RSS_INDIR_TBL_B0 + 0x800; | ||
| 1579 | offset += 0x10) { | ||
| 1580 | EFX_POPULATE_DWORD_1(dword, RX_RSS_INDIR_ENT_B0, | ||
| 1581 | i % efx->rss_queues); | ||
| 1582 | falcon_writel(efx, &dword, offset); | ||
| 1583 | i++; | ||
| 1584 | } | ||
| 1585 | } | ||
| 1586 | |||
| 1587 | /* Hook interrupt handler(s) | ||
| 1588 | * Try MSI and then legacy interrupts. | ||
| 1589 | */ | ||
| 1590 | int falcon_init_interrupt(struct efx_nic *efx) | ||
| 1591 | { | ||
| 1592 | struct efx_channel *channel; | ||
| 1593 | int rc; | ||
| 1594 | |||
| 1595 | if (!EFX_INT_MODE_USE_MSI(efx)) { | ||
| 1596 | irq_handler_t handler; | ||
| 1597 | if (FALCON_REV(efx) >= FALCON_REV_B0) | ||
| 1598 | handler = falcon_legacy_interrupt_b0; | ||
| 1599 | else | ||
| 1600 | handler = falcon_legacy_interrupt_a1; | ||
| 1601 | |||
| 1602 | rc = request_irq(efx->legacy_irq, handler, IRQF_SHARED, | ||
| 1603 | efx->name, efx); | ||
| 1604 | if (rc) { | ||
| 1605 | EFX_ERR(efx, "failed to hook legacy IRQ %d\n", | ||
| 1606 | efx->pci_dev->irq); | ||
| 1607 | goto fail1; | ||
| 1608 | } | ||
| 1609 | return 0; | ||
| 1610 | } | ||
| 1611 | |||
| 1612 | /* Hook MSI or MSI-X interrupt */ | ||
| 1613 | efx_for_each_channel_with_interrupt(channel, efx) { | ||
| 1614 | rc = request_irq(channel->irq, falcon_msi_interrupt, | ||
| 1615 | IRQF_PROBE_SHARED, /* Not shared */ | ||
| 1616 | efx->name, channel); | ||
| 1617 | if (rc) { | ||
| 1618 | EFX_ERR(efx, "failed to hook IRQ %d\n", channel->irq); | ||
| 1619 | goto fail2; | ||
| 1620 | } | ||
| 1621 | } | ||
| 1622 | |||
| 1623 | return 0; | ||
| 1624 | |||
| 1625 | fail2: | ||
| 1626 | efx_for_each_channel_with_interrupt(channel, efx) | ||
| 1627 | free_irq(channel->irq, channel); | ||
| 1628 | fail1: | ||
| 1629 | return rc; | ||
| 1630 | } | ||
| 1631 | |||
| 1632 | void falcon_fini_interrupt(struct efx_nic *efx) | ||
| 1633 | { | ||
| 1634 | struct efx_channel *channel; | ||
| 1635 | efx_oword_t reg; | ||
| 1636 | |||
| 1637 | /* Disable MSI/MSI-X interrupts */ | ||
| 1638 | efx_for_each_channel_with_interrupt(channel, efx) | ||
| 1639 | if (channel->irq) | ||
| 1640 | free_irq(channel->irq, channel); | ||
| 1641 | |||
| 1642 | /* ACK legacy interrupt */ | ||
| 1643 | if (FALCON_REV(efx) >= FALCON_REV_B0) | ||
| 1644 | falcon_read(efx, ®, INT_ISR0_B0); | ||
| 1645 | else | ||
| 1646 | falcon_irq_ack_a1(efx); | ||
| 1647 | |||
| 1648 | /* Disable legacy interrupt */ | ||
| 1649 | if (efx->legacy_irq) | ||
| 1650 | free_irq(efx->legacy_irq, efx); | ||
| 1651 | } | ||
| 1652 | |||
| 1653 | /************************************************************************** | ||
| 1654 | * | ||
| 1655 | * EEPROM/flash | ||
| 1656 | * | ||
| 1657 | ************************************************************************** | ||
| 1658 | */ | ||
| 1659 | |||
| 1660 | #define FALCON_SPI_MAX_LEN sizeof(efx_oword_t) | ||
| 1661 | |||
| 1662 | /* Wait for SPI command completion */ | ||
| 1663 | static int falcon_spi_wait(struct efx_nic *efx) | ||
| 1664 | { | ||
| 1665 | efx_oword_t reg; | ||
| 1666 | int cmd_en, timer_active; | ||
| 1667 | int count; | ||
| 1668 | |||
| 1669 | count = 0; | ||
| 1670 | do { | ||
| 1671 | falcon_read(efx, ®, EE_SPI_HCMD_REG_KER); | ||
| 1672 | cmd_en = EFX_OWORD_FIELD(reg, EE_SPI_HCMD_CMD_EN); | ||
| 1673 | timer_active = EFX_OWORD_FIELD(reg, EE_WR_TIMER_ACTIVE); | ||
| 1674 | if (!cmd_en && !timer_active) | ||
| 1675 | return 0; | ||
| 1676 | udelay(10); | ||
| 1677 | } while (++count < 10000); /* wait upto 100msec */ | ||
| 1678 | EFX_ERR(efx, "timed out waiting for SPI\n"); | ||
| 1679 | return -ETIMEDOUT; | ||
| 1680 | } | ||
| 1681 | |||
| 1682 | static int | ||
| 1683 | falcon_spi_read(struct efx_nic *efx, int device_id, unsigned int command, | ||
| 1684 | unsigned int address, unsigned int addr_len, | ||
| 1685 | void *data, unsigned int len) | ||
| 1686 | { | ||
| 1687 | efx_oword_t reg; | ||
| 1688 | int rc; | ||
| 1689 | |||
| 1690 | BUG_ON(len > FALCON_SPI_MAX_LEN); | ||
| 1691 | |||
| 1692 | /* Check SPI not currently being accessed */ | ||
| 1693 | rc = falcon_spi_wait(efx); | ||
| 1694 | if (rc) | ||
| 1695 | return rc; | ||
| 1696 | |||
| 1697 | /* Program address register */ | ||
| 1698 | EFX_POPULATE_OWORD_1(reg, EE_SPI_HADR_ADR, address); | ||
| 1699 | falcon_write(efx, ®, EE_SPI_HADR_REG_KER); | ||
| 1700 | |||
| 1701 | /* Issue read command */ | ||
| 1702 | EFX_POPULATE_OWORD_7(reg, | ||
| 1703 | EE_SPI_HCMD_CMD_EN, 1, | ||
| 1704 | EE_SPI_HCMD_SF_SEL, device_id, | ||
| 1705 | EE_SPI_HCMD_DABCNT, len, | ||
| 1706 | EE_SPI_HCMD_READ, EE_SPI_READ, | ||
| 1707 | EE_SPI_HCMD_DUBCNT, 0, | ||
| 1708 | EE_SPI_HCMD_ADBCNT, addr_len, | ||
| 1709 | EE_SPI_HCMD_ENC, command); | ||
| 1710 | falcon_write(efx, ®, EE_SPI_HCMD_REG_KER); | ||
| 1711 | |||
| 1712 | /* Wait for read to complete */ | ||
| 1713 | rc = falcon_spi_wait(efx); | ||
| 1714 | if (rc) | ||
| 1715 | return rc; | ||
| 1716 | |||
| 1717 | /* Read data */ | ||
| 1718 | falcon_read(efx, ®, EE_SPI_HDATA_REG_KER); | ||
| 1719 | memcpy(data, ®, len); | ||
| 1720 | return 0; | ||
| 1721 | } | ||
| 1722 | |||
| 1723 | /************************************************************************** | ||
| 1724 | * | ||
| 1725 | * MAC wrapper | ||
| 1726 | * | ||
| 1727 | ************************************************************************** | ||
| 1728 | */ | ||
| 1729 | void falcon_drain_tx_fifo(struct efx_nic *efx) | ||
| 1730 | { | ||
| 1731 | efx_oword_t temp; | ||
| 1732 | int count; | ||
| 1733 | |||
| 1734 | if (FALCON_REV(efx) < FALCON_REV_B0) | ||
| 1735 | return; | ||
| 1736 | |||
| 1737 | falcon_read(efx, &temp, MAC0_CTRL_REG_KER); | ||
| 1738 | /* There is no point in draining more than once */ | ||
| 1739 | if (EFX_OWORD_FIELD(temp, TXFIFO_DRAIN_EN_B0)) | ||
| 1740 | return; | ||
| 1741 | |||
| 1742 | /* MAC stats will fail whilst the TX fifo is draining. Serialise | ||
| 1743 | * the drain sequence with the statistics fetch */ | ||
| 1744 | spin_lock(&efx->stats_lock); | ||
| 1745 | |||
| 1746 | EFX_SET_OWORD_FIELD(temp, TXFIFO_DRAIN_EN_B0, 1); | ||
| 1747 | falcon_write(efx, &temp, MAC0_CTRL_REG_KER); | ||
| 1748 | |||
| 1749 | /* Reset the MAC and EM block. */ | ||
| 1750 | falcon_read(efx, &temp, GLB_CTL_REG_KER); | ||
| 1751 | EFX_SET_OWORD_FIELD(temp, RST_XGTX, 1); | ||
| 1752 | EFX_SET_OWORD_FIELD(temp, RST_XGRX, 1); | ||
| 1753 | EFX_SET_OWORD_FIELD(temp, RST_EM, 1); | ||
| 1754 | falcon_write(efx, &temp, GLB_CTL_REG_KER); | ||
| 1755 | |||
| 1756 | count = 0; | ||
| 1757 | while (1) { | ||
| 1758 | falcon_read(efx, &temp, GLB_CTL_REG_KER); | ||
| 1759 | if (!EFX_OWORD_FIELD(temp, RST_XGTX) && | ||
| 1760 | !EFX_OWORD_FIELD(temp, RST_XGRX) && | ||
| 1761 | !EFX_OWORD_FIELD(temp, RST_EM)) { | ||
| 1762 | EFX_LOG(efx, "Completed MAC reset after %d loops\n", | ||
| 1763 | count); | ||
| 1764 | break; | ||
| 1765 | } | ||
| 1766 | if (count > 20) { | ||
| 1767 | EFX_ERR(efx, "MAC reset failed\n"); | ||
| 1768 | break; | ||
| 1769 | } | ||
| 1770 | count++; | ||
| 1771 | udelay(10); | ||
| 1772 | } | ||
| 1773 | |||
| 1774 | spin_unlock(&efx->stats_lock); | ||
| 1775 | |||
| 1776 | /* If we've reset the EM block and the link is up, then | ||
| 1777 | * we'll have to kick the XAUI link so the PHY can recover */ | ||
| 1778 | if (efx->link_up && EFX_WORKAROUND_5147(efx)) | ||
| 1779 | falcon_reset_xaui(efx); | ||
| 1780 | } | ||
| 1781 | |||
| 1782 | void falcon_deconfigure_mac_wrapper(struct efx_nic *efx) | ||
| 1783 | { | ||
| 1784 | efx_oword_t temp; | ||
| 1785 | |||
| 1786 | if (FALCON_REV(efx) < FALCON_REV_B0) | ||
| 1787 | return; | ||
| 1788 | |||
| 1789 | /* Isolate the MAC -> RX */ | ||
| 1790 | falcon_read(efx, &temp, RX_CFG_REG_KER); | ||
| 1791 | EFX_SET_OWORD_FIELD(temp, RX_INGR_EN_B0, 0); | ||
| 1792 | falcon_write(efx, &temp, RX_CFG_REG_KER); | ||
| 1793 | |||
| 1794 | if (!efx->link_up) | ||
| 1795 | falcon_drain_tx_fifo(efx); | ||
| 1796 | } | ||
| 1797 | |||
| 1798 | void falcon_reconfigure_mac_wrapper(struct efx_nic *efx) | ||
| 1799 | { | ||
| 1800 | efx_oword_t reg; | ||
| 1801 | int link_speed; | ||
| 1802 | unsigned int tx_fc; | ||
| 1803 | |||
| 1804 | if (efx->link_options & GM_LPA_10000) | ||
| 1805 | link_speed = 0x3; | ||
| 1806 | else if (efx->link_options & GM_LPA_1000) | ||
| 1807 | link_speed = 0x2; | ||
| 1808 | else if (efx->link_options & GM_LPA_100) | ||
| 1809 | link_speed = 0x1; | ||
| 1810 | else | ||
| 1811 | link_speed = 0x0; | ||
| 1812 | /* MAC_LINK_STATUS controls MAC backpressure but doesn't work | ||
| 1813 | * as advertised. Disable to ensure packets are not | ||
| 1814 | * indefinitely held and TX queue can be flushed at any point | ||
| 1815 | * while the link is down. */ | ||
| 1816 | EFX_POPULATE_OWORD_5(reg, | ||
| 1817 | MAC_XOFF_VAL, 0xffff /* max pause time */, | ||
| 1818 | MAC_BCAD_ACPT, 1, | ||
| 1819 | MAC_UC_PROM, efx->promiscuous, | ||
| 1820 | MAC_LINK_STATUS, 1, /* always set */ | ||
| 1821 | MAC_SPEED, link_speed); | ||
| 1822 | /* On B0, MAC backpressure can be disabled and packets get | ||
| 1823 | * discarded. */ | ||
| 1824 | if (FALCON_REV(efx) >= FALCON_REV_B0) { | ||
| 1825 | EFX_SET_OWORD_FIELD(reg, TXFIFO_DRAIN_EN_B0, | ||
| 1826 | !efx->link_up); | ||
| 1827 | } | ||
| 1828 | |||
| 1829 | falcon_write(efx, ®, MAC0_CTRL_REG_KER); | ||
| 1830 | |||
| 1831 | /* Restore the multicast hash registers. */ | ||
| 1832 | falcon_set_multicast_hash(efx); | ||
| 1833 | |||
| 1834 | /* Transmission of pause frames when RX crosses the threshold is | ||
| 1835 | * covered by RX_XOFF_MAC_EN and XM_TX_CFG_REG:XM_FCNTL. | ||
| 1836 | * Action on receipt of pause frames is controller by XM_DIS_FCNTL */ | ||
| 1837 | tx_fc = (efx->flow_control & EFX_FC_TX) ? 1 : 0; | ||
| 1838 | falcon_read(efx, ®, RX_CFG_REG_KER); | ||
| 1839 | EFX_SET_OWORD_FIELD_VER(efx, reg, RX_XOFF_MAC_EN, tx_fc); | ||
| 1840 | |||
| 1841 | /* Unisolate the MAC -> RX */ | ||
| 1842 | if (FALCON_REV(efx) >= FALCON_REV_B0) | ||
| 1843 | EFX_SET_OWORD_FIELD(reg, RX_INGR_EN_B0, 1); | ||
| 1844 | falcon_write(efx, ®, RX_CFG_REG_KER); | ||
| 1845 | } | ||
| 1846 | |||
| 1847 | int falcon_dma_stats(struct efx_nic *efx, unsigned int done_offset) | ||
| 1848 | { | ||
| 1849 | efx_oword_t reg; | ||
| 1850 | u32 *dma_done; | ||
| 1851 | int i; | ||
| 1852 | |||
| 1853 | if (disable_dma_stats) | ||
| 1854 | return 0; | ||
| 1855 | |||
| 1856 | /* Statistics fetch will fail if the MAC is in TX drain */ | ||
| 1857 | if (FALCON_REV(efx) >= FALCON_REV_B0) { | ||
| 1858 | efx_oword_t temp; | ||
| 1859 | falcon_read(efx, &temp, MAC0_CTRL_REG_KER); | ||
| 1860 | if (EFX_OWORD_FIELD(temp, TXFIFO_DRAIN_EN_B0)) | ||
| 1861 | return 0; | ||
| 1862 | } | ||
| 1863 | |||
| 1864 | dma_done = (efx->stats_buffer.addr + done_offset); | ||
| 1865 | *dma_done = FALCON_STATS_NOT_DONE; | ||
| 1866 | wmb(); /* ensure done flag is clear */ | ||
| 1867 | |||
| 1868 | /* Initiate DMA transfer of stats */ | ||
| 1869 | EFX_POPULATE_OWORD_2(reg, | ||
| 1870 | MAC_STAT_DMA_CMD, 1, | ||
| 1871 | MAC_STAT_DMA_ADR, | ||
| 1872 | efx->stats_buffer.dma_addr); | ||
| 1873 | falcon_write(efx, ®, MAC0_STAT_DMA_REG_KER); | ||
| 1874 | |||
| 1875 | /* Wait for transfer to complete */ | ||
| 1876 | for (i = 0; i < 400; i++) { | ||
| 1877 | if (*(volatile u32 *)dma_done == FALCON_STATS_DONE) | ||
| 1878 | return 0; | ||
| 1879 | udelay(10); | ||
| 1880 | } | ||
| 1881 | |||
| 1882 | EFX_ERR(efx, "timed out waiting for statistics\n"); | ||
| 1883 | return -ETIMEDOUT; | ||
| 1884 | } | ||
| 1885 | |||
| 1886 | /************************************************************************** | ||
| 1887 | * | ||
| 1888 | * PHY access via GMII | ||
| 1889 | * | ||
| 1890 | ************************************************************************** | ||
| 1891 | */ | ||
| 1892 | |||
| 1893 | /* Use the top bit of the MII PHY id to indicate the PHY type | ||
| 1894 | * (1G/10G), with the remaining bits as the actual PHY id. | ||
| 1895 | * | ||
| 1896 | * This allows us to avoid leaking information from the mii_if_info | ||
| 1897 | * structure into other data structures. | ||
| 1898 | */ | ||
| 1899 | #define FALCON_PHY_ID_ID_WIDTH EFX_WIDTH(MD_PRT_DEV_ADR) | ||
| 1900 | #define FALCON_PHY_ID_ID_MASK ((1 << FALCON_PHY_ID_ID_WIDTH) - 1) | ||
| 1901 | #define FALCON_PHY_ID_WIDTH (FALCON_PHY_ID_ID_WIDTH + 1) | ||
| 1902 | #define FALCON_PHY_ID_MASK ((1 << FALCON_PHY_ID_WIDTH) - 1) | ||
| 1903 | #define FALCON_PHY_ID_10G (1 << (FALCON_PHY_ID_WIDTH - 1)) | ||
| 1904 | |||
| 1905 | |||
| 1906 | /* Packing the clause 45 port and device fields into a single value */ | ||
| 1907 | #define MD_PRT_ADR_COMP_LBN (MD_PRT_ADR_LBN - MD_DEV_ADR_LBN) | ||
| 1908 | #define MD_PRT_ADR_COMP_WIDTH MD_PRT_ADR_WIDTH | ||
| 1909 | #define MD_DEV_ADR_COMP_LBN 0 | ||
| 1910 | #define MD_DEV_ADR_COMP_WIDTH MD_DEV_ADR_WIDTH | ||
| 1911 | |||
| 1912 | |||
| 1913 | /* Wait for GMII access to complete */ | ||
| 1914 | static int falcon_gmii_wait(struct efx_nic *efx) | ||
| 1915 | { | ||
| 1916 | efx_dword_t md_stat; | ||
| 1917 | int count; | ||
| 1918 | |||
| 1919 | for (count = 0; count < 1000; count++) { /* wait upto 10ms */ | ||
| 1920 | falcon_readl(efx, &md_stat, MD_STAT_REG_KER); | ||
| 1921 | if (EFX_DWORD_FIELD(md_stat, MD_BSY) == 0) { | ||
| 1922 | if (EFX_DWORD_FIELD(md_stat, MD_LNFL) != 0 || | ||
| 1923 | EFX_DWORD_FIELD(md_stat, MD_BSERR) != 0) { | ||
| 1924 | EFX_ERR(efx, "error from GMII access " | ||
| 1925 | EFX_DWORD_FMT"\n", | ||
| 1926 | EFX_DWORD_VAL(md_stat)); | ||
| 1927 | return -EIO; | ||
| 1928 | } | ||
| 1929 | return 0; | ||
| 1930 | } | ||
| 1931 | udelay(10); | ||
| 1932 | } | ||
| 1933 | EFX_ERR(efx, "timed out waiting for GMII\n"); | ||
| 1934 | return -ETIMEDOUT; | ||
| 1935 | } | ||
| 1936 | |||
| 1937 | /* Writes a GMII register of a PHY connected to Falcon using MDIO. */ | ||
| 1938 | static void falcon_mdio_write(struct net_device *net_dev, int phy_id, | ||
| 1939 | int addr, int value) | ||
| 1940 | { | ||
| 1941 | struct efx_nic *efx = (struct efx_nic *)net_dev->priv; | ||
| 1942 | unsigned int phy_id2 = phy_id & FALCON_PHY_ID_ID_MASK; | ||
| 1943 | efx_oword_t reg; | ||
| 1944 | |||
| 1945 | /* The 'generic' prt/dev packing in mdio_10g.h is conveniently | ||
| 1946 | * chosen so that the only current user, Falcon, can take the | ||
| 1947 | * packed value and use them directly. | ||
| 1948 | * Fail to build if this assumption is broken. | ||
| 1949 | */ | ||
| 1950 | BUILD_BUG_ON(FALCON_PHY_ID_10G != MDIO45_XPRT_ID_IS10G); | ||
| 1951 | BUILD_BUG_ON(FALCON_PHY_ID_ID_WIDTH != MDIO45_PRT_DEV_WIDTH); | ||
| 1952 | BUILD_BUG_ON(MD_PRT_ADR_COMP_LBN != MDIO45_PRT_ID_COMP_LBN); | ||
| 1953 | BUILD_BUG_ON(MD_DEV_ADR_COMP_LBN != MDIO45_DEV_ID_COMP_LBN); | ||
| 1954 | |||
| 1955 | if (phy_id2 == PHY_ADDR_INVALID) | ||
| 1956 | return; | ||
| 1957 | |||
| 1958 | /* See falcon_mdio_read for an explanation. */ | ||
| 1959 | if (!(phy_id & FALCON_PHY_ID_10G)) { | ||
| 1960 | int mmd = ffs(efx->phy_op->mmds) - 1; | ||
| 1961 | EFX_TRACE(efx, "Fixing erroneous clause22 write\n"); | ||
| 1962 | phy_id2 = mdio_clause45_pack(phy_id2, mmd) | ||
| 1963 | & FALCON_PHY_ID_ID_MASK; | ||
| 1964 | } | ||
| 1965 | |||
| 1966 | EFX_REGDUMP(efx, "writing GMII %d register %02x with %04x\n", phy_id, | ||
| 1967 | addr, value); | ||
| 1968 | |||
| 1969 | spin_lock_bh(&efx->phy_lock); | ||
| 1970 | |||
| 1971 | /* Check MII not currently being accessed */ | ||
| 1972 | if (falcon_gmii_wait(efx) != 0) | ||
| 1973 | goto out; | ||
| 1974 | |||
| 1975 | /* Write the address/ID register */ | ||
| 1976 | EFX_POPULATE_OWORD_1(reg, MD_PHY_ADR, addr); | ||
| 1977 | falcon_write(efx, ®, MD_PHY_ADR_REG_KER); | ||
| 1978 | |||
| 1979 | EFX_POPULATE_OWORD_1(reg, MD_PRT_DEV_ADR, phy_id2); | ||
| 1980 | falcon_write(efx, ®, MD_ID_REG_KER); | ||
| 1981 | |||
| 1982 | /* Write data */ | ||
| 1983 | EFX_POPULATE_OWORD_1(reg, MD_TXD, value); | ||
| 1984 | falcon_write(efx, ®, MD_TXD_REG_KER); | ||
| 1985 | |||
| 1986 | EFX_POPULATE_OWORD_2(reg, | ||
| 1987 | MD_WRC, 1, | ||
| 1988 | MD_GC, 0); | ||
| 1989 | falcon_write(efx, ®, MD_CS_REG_KER); | ||
| 1990 | |||
| 1991 | /* Wait for data to be written */ | ||
| 1992 | if (falcon_gmii_wait(efx) != 0) { | ||
| 1993 | /* Abort the write operation */ | ||
| 1994 | EFX_POPULATE_OWORD_2(reg, | ||
| 1995 | MD_WRC, 0, | ||
| 1996 | MD_GC, 1); | ||
| 1997 | falcon_write(efx, ®, MD_CS_REG_KER); | ||
| 1998 | udelay(10); | ||
| 1999 | } | ||
| 2000 | |||
| 2001 | out: | ||
| 2002 | spin_unlock_bh(&efx->phy_lock); | ||
| 2003 | } | ||
| 2004 | |||
| 2005 | /* Reads a GMII register from a PHY connected to Falcon. If no value | ||
| 2006 | * could be read, -1 will be returned. */ | ||
| 2007 | static int falcon_mdio_read(struct net_device *net_dev, int phy_id, int addr) | ||
| 2008 | { | ||
| 2009 | struct efx_nic *efx = (struct efx_nic *)net_dev->priv; | ||
| 2010 | unsigned int phy_addr = phy_id & FALCON_PHY_ID_ID_MASK; | ||
| 2011 | efx_oword_t reg; | ||
| 2012 | int value = -1; | ||
| 2013 | |||
| 2014 | if (phy_addr == PHY_ADDR_INVALID) | ||
| 2015 | return -1; | ||
| 2016 | |||
| 2017 | /* Our PHY code knows whether it needs to talk clause 22(1G) or 45(10G) | ||
| 2018 | * but the generic Linux code does not make any distinction or have | ||
| 2019 | * any state for this. | ||
| 2020 | * We spot the case where someone tried to talk 22 to a 45 PHY and | ||
| 2021 | * redirect the request to the lowest numbered MMD as a clause45 | ||
| 2022 | * request. This is enough to allow simple queries like id and link | ||
| 2023 | * state to succeed. TODO: We may need to do more in future. | ||
| 2024 | */ | ||
| 2025 | if (!(phy_id & FALCON_PHY_ID_10G)) { | ||
| 2026 | int mmd = ffs(efx->phy_op->mmds) - 1; | ||
| 2027 | EFX_TRACE(efx, "Fixing erroneous clause22 read\n"); | ||
| 2028 | phy_addr = mdio_clause45_pack(phy_addr, mmd) | ||
| 2029 | & FALCON_PHY_ID_ID_MASK; | ||
| 2030 | } | ||
| 2031 | |||
| 2032 | spin_lock_bh(&efx->phy_lock); | ||
| 2033 | |||
| 2034 | /* Check MII not currently being accessed */ | ||
| 2035 | if (falcon_gmii_wait(efx) != 0) | ||
| 2036 | goto out; | ||
| 2037 | |||
| 2038 | EFX_POPULATE_OWORD_1(reg, MD_PHY_ADR, addr); | ||
| 2039 | falcon_write(efx, ®, MD_PHY_ADR_REG_KER); | ||
| 2040 | |||
| 2041 | EFX_POPULATE_OWORD_1(reg, MD_PRT_DEV_ADR, phy_addr); | ||
| 2042 | falcon_write(efx, ®, MD_ID_REG_KER); | ||
| 2043 | |||
| 2044 | /* Request data to be read */ | ||
| 2045 | EFX_POPULATE_OWORD_2(reg, MD_RDC, 1, MD_GC, 0); | ||
| 2046 | falcon_write(efx, ®, MD_CS_REG_KER); | ||
| 2047 | |||
| 2048 | /* Wait for data to become available */ | ||
| 2049 | value = falcon_gmii_wait(efx); | ||
| 2050 | if (value == 0) { | ||
| 2051 | falcon_read(efx, ®, MD_RXD_REG_KER); | ||
| 2052 | value = EFX_OWORD_FIELD(reg, MD_RXD); | ||
| 2053 | EFX_REGDUMP(efx, "read from GMII %d register %02x, got %04x\n", | ||
| 2054 | phy_id, addr, value); | ||
| 2055 | } else { | ||
| 2056 | /* Abort the read operation */ | ||
| 2057 | EFX_POPULATE_OWORD_2(reg, | ||
| 2058 | MD_RIC, 0, | ||
| 2059 | MD_GC, 1); | ||
| 2060 | falcon_write(efx, ®, MD_CS_REG_KER); | ||
| 2061 | |||
| 2062 | EFX_LOG(efx, "read from GMII 0x%x register %02x, got " | ||
| 2063 | "error %d\n", phy_id, addr, value); | ||
| 2064 | } | ||
| 2065 | |||
| 2066 | out: | ||
| 2067 | spin_unlock_bh(&efx->phy_lock); | ||
| 2068 | |||
| 2069 | return value; | ||
| 2070 | } | ||
| 2071 | |||
| 2072 | static void falcon_init_mdio(struct mii_if_info *gmii) | ||
| 2073 | { | ||
| 2074 | gmii->mdio_read = falcon_mdio_read; | ||
| 2075 | gmii->mdio_write = falcon_mdio_write; | ||
| 2076 | gmii->phy_id_mask = FALCON_PHY_ID_MASK; | ||
| 2077 | gmii->reg_num_mask = ((1 << EFX_WIDTH(MD_PHY_ADR)) - 1); | ||
| 2078 | } | ||
| 2079 | |||
| 2080 | static int falcon_probe_phy(struct efx_nic *efx) | ||
| 2081 | { | ||
| 2082 | switch (efx->phy_type) { | ||
| 2083 | case PHY_TYPE_10XPRESS: | ||
| 2084 | efx->phy_op = &falcon_tenxpress_phy_ops; | ||
| 2085 | break; | ||
| 2086 | case PHY_TYPE_XFP: | ||
| 2087 | efx->phy_op = &falcon_xfp_phy_ops; | ||
| 2088 | break; | ||
| 2089 | default: | ||
| 2090 | EFX_ERR(efx, "Unknown PHY type %d\n", | ||
| 2091 | efx->phy_type); | ||
| 2092 | return -1; | ||
| 2093 | } | ||
| 2094 | return 0; | ||
| 2095 | } | ||
| 2096 | |||
| 2097 | /* This call is responsible for hooking in the MAC and PHY operations */ | ||
| 2098 | int falcon_probe_port(struct efx_nic *efx) | ||
| 2099 | { | ||
| 2100 | int rc; | ||
| 2101 | |||
| 2102 | /* Hook in PHY operations table */ | ||
| 2103 | rc = falcon_probe_phy(efx); | ||
| 2104 | if (rc) | ||
| 2105 | return rc; | ||
| 2106 | |||
| 2107 | /* Set up GMII structure for PHY */ | ||
| 2108 | efx->mii.supports_gmii = 1; | ||
| 2109 | falcon_init_mdio(&efx->mii); | ||
| 2110 | |||
| 2111 | /* Hardware flow ctrl. FalconA RX FIFO too small for pause generation */ | ||
| 2112 | if (FALCON_REV(efx) >= FALCON_REV_B0) | ||
| 2113 | efx->flow_control = EFX_FC_RX | EFX_FC_TX; | ||
| 2114 | else | ||
| 2115 | efx->flow_control = EFX_FC_RX; | ||
| 2116 | |||
| 2117 | /* Allocate buffer for stats */ | ||
| 2118 | rc = falcon_alloc_buffer(efx, &efx->stats_buffer, | ||
| 2119 | FALCON_MAC_STATS_SIZE); | ||
| 2120 | if (rc) | ||
| 2121 | return rc; | ||
| 2122 | EFX_LOG(efx, "stats buffer at %llx (virt %p phys %lx)\n", | ||
| 2123 | (unsigned long long)efx->stats_buffer.dma_addr, | ||
| 2124 | efx->stats_buffer.addr, | ||
| 2125 | virt_to_phys(efx->stats_buffer.addr)); | ||
| 2126 | |||
| 2127 | return 0; | ||
| 2128 | } | ||
| 2129 | |||
| 2130 | void falcon_remove_port(struct efx_nic *efx) | ||
| 2131 | { | ||
| 2132 | falcon_free_buffer(efx, &efx->stats_buffer); | ||
| 2133 | } | ||
| 2134 | |||
| 2135 | /************************************************************************** | ||
| 2136 | * | ||
| 2137 | * Multicast filtering | ||
| 2138 | * | ||
| 2139 | ************************************************************************** | ||
| 2140 | */ | ||
| 2141 | |||
| 2142 | void falcon_set_multicast_hash(struct efx_nic *efx) | ||
| 2143 | { | ||
| 2144 | union efx_multicast_hash *mc_hash = &efx->multicast_hash; | ||
| 2145 | |||
| 2146 | /* Broadcast packets go through the multicast hash filter. | ||
| 2147 | * ether_crc_le() of the broadcast address is 0xbe2612ff | ||
| 2148 | * so we always add bit 0xff to the mask. | ||
| 2149 | */ | ||
| 2150 | set_bit_le(0xff, mc_hash->byte); | ||
| 2151 | |||
| 2152 | falcon_write(efx, &mc_hash->oword[0], MAC_MCAST_HASH_REG0_KER); | ||
| 2153 | falcon_write(efx, &mc_hash->oword[1], MAC_MCAST_HASH_REG1_KER); | ||
| 2154 | } | ||
| 2155 | |||
| 2156 | /************************************************************************** | ||
| 2157 | * | ||
| 2158 | * Device reset | ||
| 2159 | * | ||
| 2160 | ************************************************************************** | ||
| 2161 | */ | ||
| 2162 | |||
| 2163 | /* Resets NIC to known state. This routine must be called in process | ||
| 2164 | * context and is allowed to sleep. */ | ||
| 2165 | int falcon_reset_hw(struct efx_nic *efx, enum reset_type method) | ||
| 2166 | { | ||
| 2167 | struct falcon_nic_data *nic_data = efx->nic_data; | ||
| 2168 | efx_oword_t glb_ctl_reg_ker; | ||
| 2169 | int rc; | ||
| 2170 | |||
| 2171 | EFX_LOG(efx, "performing hardware reset (%d)\n", method); | ||
| 2172 | |||
| 2173 | /* Initiate device reset */ | ||
| 2174 | if (method == RESET_TYPE_WORLD) { | ||
| 2175 | rc = pci_save_state(efx->pci_dev); | ||
| 2176 | if (rc) { | ||
| 2177 | EFX_ERR(efx, "failed to backup PCI state of primary " | ||
| 2178 | "function prior to hardware reset\n"); | ||
| 2179 | goto fail1; | ||
| 2180 | } | ||
| 2181 | if (FALCON_IS_DUAL_FUNC(efx)) { | ||
| 2182 | rc = pci_save_state(nic_data->pci_dev2); | ||
| 2183 | if (rc) { | ||
| 2184 | EFX_ERR(efx, "failed to backup PCI state of " | ||
| 2185 | "secondary function prior to " | ||
| 2186 | "hardware reset\n"); | ||
| 2187 | goto fail2; | ||
| 2188 | } | ||
| 2189 | } | ||
| 2190 | |||
| 2191 | EFX_POPULATE_OWORD_2(glb_ctl_reg_ker, | ||
| 2192 | EXT_PHY_RST_DUR, 0x7, | ||
| 2193 | SWRST, 1); | ||
| 2194 | } else { | ||
| 2195 | int reset_phy = (method == RESET_TYPE_INVISIBLE ? | ||
| 2196 | EXCLUDE_FROM_RESET : 0); | ||
| 2197 | |||
| 2198 | EFX_POPULATE_OWORD_7(glb_ctl_reg_ker, | ||
| 2199 | EXT_PHY_RST_CTL, reset_phy, | ||
| 2200 | PCIE_CORE_RST_CTL, EXCLUDE_FROM_RESET, | ||
| 2201 | PCIE_NSTCK_RST_CTL, EXCLUDE_FROM_RESET, | ||
| 2202 | PCIE_SD_RST_CTL, EXCLUDE_FROM_RESET, | ||
| 2203 | EE_RST_CTL, EXCLUDE_FROM_RESET, | ||
| 2204 | EXT_PHY_RST_DUR, 0x7 /* 10ms */, | ||
| 2205 | SWRST, 1); | ||
| 2206 | } | ||
| 2207 | falcon_write(efx, &glb_ctl_reg_ker, GLB_CTL_REG_KER); | ||
| 2208 | |||
| 2209 | EFX_LOG(efx, "waiting for hardware reset\n"); | ||
| 2210 | schedule_timeout_uninterruptible(HZ / 20); | ||
| 2211 | |||
| 2212 | /* Restore PCI configuration if needed */ | ||
| 2213 | if (method == RESET_TYPE_WORLD) { | ||
| 2214 | if (FALCON_IS_DUAL_FUNC(efx)) { | ||
| 2215 | rc = pci_restore_state(nic_data->pci_dev2); | ||
| 2216 | if (rc) { | ||
| 2217 | EFX_ERR(efx, "failed to restore PCI config for " | ||
| 2218 | "the secondary function\n"); | ||
| 2219 | goto fail3; | ||
| 2220 | } | ||
| 2221 | } | ||
| 2222 | rc = pci_restore_state(efx->pci_dev); | ||
| 2223 | if (rc) { | ||
| 2224 | EFX_ERR(efx, "failed to restore PCI config for the " | ||
| 2225 | "primary function\n"); | ||
| 2226 | goto fail4; | ||
| 2227 | } | ||
| 2228 | EFX_LOG(efx, "successfully restored PCI config\n"); | ||
| 2229 | } | ||
| 2230 | |||
| 2231 | /* Assert that reset complete */ | ||
| 2232 | falcon_read(efx, &glb_ctl_reg_ker, GLB_CTL_REG_KER); | ||
| 2233 | if (EFX_OWORD_FIELD(glb_ctl_reg_ker, SWRST) != 0) { | ||
| 2234 | rc = -ETIMEDOUT; | ||
| 2235 | EFX_ERR(efx, "timed out waiting for hardware reset\n"); | ||
| 2236 | goto fail5; | ||
| 2237 | } | ||
| 2238 | EFX_LOG(efx, "hardware reset complete\n"); | ||
| 2239 | |||
| 2240 | return 0; | ||
| 2241 | |||
| 2242 | /* pci_save_state() and pci_restore_state() MUST be called in pairs */ | ||
| 2243 | fail2: | ||
| 2244 | fail3: | ||
| 2245 | pci_restore_state(efx->pci_dev); | ||
| 2246 | fail1: | ||
| 2247 | fail4: | ||
| 2248 | fail5: | ||
| 2249 | return rc; | ||
| 2250 | } | ||
| 2251 | |||
| 2252 | /* Zeroes out the SRAM contents. This routine must be called in | ||
| 2253 | * process context and is allowed to sleep. | ||
| 2254 | */ | ||
| 2255 | static int falcon_reset_sram(struct efx_nic *efx) | ||
| 2256 | { | ||
| 2257 | efx_oword_t srm_cfg_reg_ker, gpio_cfg_reg_ker; | ||
| 2258 | int count; | ||
| 2259 | |||
| 2260 | /* Set the SRAM wake/sleep GPIO appropriately. */ | ||
| 2261 | falcon_read(efx, &gpio_cfg_reg_ker, GPIO_CTL_REG_KER); | ||
| 2262 | EFX_SET_OWORD_FIELD(gpio_cfg_reg_ker, GPIO1_OEN, 1); | ||
| 2263 | EFX_SET_OWORD_FIELD(gpio_cfg_reg_ker, GPIO1_OUT, 1); | ||
| 2264 | falcon_write(efx, &gpio_cfg_reg_ker, GPIO_CTL_REG_KER); | ||
| 2265 | |||
| 2266 | /* Initiate SRAM reset */ | ||
| 2267 | EFX_POPULATE_OWORD_2(srm_cfg_reg_ker, | ||
| 2268 | SRAM_OOB_BT_INIT_EN, 1, | ||
| 2269 | SRM_NUM_BANKS_AND_BANK_SIZE, 0); | ||
| 2270 | falcon_write(efx, &srm_cfg_reg_ker, SRM_CFG_REG_KER); | ||
| 2271 | |||
| 2272 | /* Wait for SRAM reset to complete */ | ||
| 2273 | count = 0; | ||
| 2274 | do { | ||
| 2275 | EFX_LOG(efx, "waiting for SRAM reset (attempt %d)...\n", count); | ||
| 2276 | |||
| 2277 | /* SRAM reset is slow; expect around 16ms */ | ||
| 2278 | schedule_timeout_uninterruptible(HZ / 50); | ||
| 2279 | |||
| 2280 | /* Check for reset complete */ | ||
| 2281 | falcon_read(efx, &srm_cfg_reg_ker, SRM_CFG_REG_KER); | ||
| 2282 | if (!EFX_OWORD_FIELD(srm_cfg_reg_ker, SRAM_OOB_BT_INIT_EN)) { | ||
| 2283 | EFX_LOG(efx, "SRAM reset complete\n"); | ||
| 2284 | |||
| 2285 | return 0; | ||
| 2286 | } | ||
| 2287 | } while (++count < 20); /* wait upto 0.4 sec */ | ||
| 2288 | |||
| 2289 | EFX_ERR(efx, "timed out waiting for SRAM reset\n"); | ||
| 2290 | return -ETIMEDOUT; | ||
| 2291 | } | ||
| 2292 | |||
| 2293 | /* Extract non-volatile configuration */ | ||
| 2294 | static int falcon_probe_nvconfig(struct efx_nic *efx) | ||
| 2295 | { | ||
| 2296 | struct falcon_nvconfig *nvconfig; | ||
| 2297 | efx_oword_t nic_stat; | ||
| 2298 | int device_id; | ||
| 2299 | unsigned addr_len; | ||
| 2300 | size_t offset, len; | ||
| 2301 | int magic_num, struct_ver, board_rev; | ||
| 2302 | int rc; | ||
| 2303 | |||
| 2304 | /* Find the boot device. */ | ||
| 2305 | falcon_read(efx, &nic_stat, NIC_STAT_REG); | ||
| 2306 | if (EFX_OWORD_FIELD(nic_stat, SF_PRST)) { | ||
| 2307 | device_id = EE_SPI_FLASH; | ||
| 2308 | addr_len = 3; | ||
| 2309 | } else if (EFX_OWORD_FIELD(nic_stat, EE_PRST)) { | ||
| 2310 | device_id = EE_SPI_EEPROM; | ||
| 2311 | addr_len = 2; | ||
| 2312 | } else { | ||
| 2313 | return -ENODEV; | ||
| 2314 | } | ||
| 2315 | |||
| 2316 | nvconfig = kmalloc(sizeof(*nvconfig), GFP_KERNEL); | ||
| 2317 | |||
| 2318 | /* Read the whole configuration structure into memory. */ | ||
| 2319 | for (offset = 0; offset < sizeof(*nvconfig); offset += len) { | ||
| 2320 | len = min(sizeof(*nvconfig) - offset, | ||
| 2321 | (size_t) FALCON_SPI_MAX_LEN); | ||
| 2322 | rc = falcon_spi_read(efx, device_id, SPI_READ, | ||
| 2323 | NVCONFIG_BASE + offset, addr_len, | ||
| 2324 | (char *)nvconfig + offset, len); | ||
| 2325 | if (rc) | ||
| 2326 | goto out; | ||
| 2327 | } | ||
| 2328 | |||
| 2329 | /* Read the MAC addresses */ | ||
| 2330 | memcpy(efx->mac_address, nvconfig->mac_address[0], ETH_ALEN); | ||
| 2331 | |||
| 2332 | /* Read the board configuration. */ | ||
| 2333 | magic_num = le16_to_cpu(nvconfig->board_magic_num); | ||
| 2334 | struct_ver = le16_to_cpu(nvconfig->board_struct_ver); | ||
| 2335 | |||
| 2336 | if (magic_num != NVCONFIG_BOARD_MAGIC_NUM || struct_ver < 2) { | ||
| 2337 | EFX_ERR(efx, "Non volatile memory bad magic=%x ver=%x " | ||
| 2338 | "therefore using defaults\n", magic_num, struct_ver); | ||
| 2339 | efx->phy_type = PHY_TYPE_NONE; | ||
| 2340 | efx->mii.phy_id = PHY_ADDR_INVALID; | ||
| 2341 | board_rev = 0; | ||
| 2342 | } else { | ||
| 2343 | struct falcon_nvconfig_board_v2 *v2 = &nvconfig->board_v2; | ||
| 2344 | |||
| 2345 | efx->phy_type = v2->port0_phy_type; | ||
| 2346 | efx->mii.phy_id = v2->port0_phy_addr; | ||
| 2347 | board_rev = le16_to_cpu(v2->board_revision); | ||
| 2348 | } | ||
| 2349 | |||
| 2350 | EFX_LOG(efx, "PHY is %d phy_id %d\n", efx->phy_type, efx->mii.phy_id); | ||
| 2351 | |||
| 2352 | efx_set_board_info(efx, board_rev); | ||
| 2353 | |||
| 2354 | out: | ||
| 2355 | kfree(nvconfig); | ||
| 2356 | return rc; | ||
| 2357 | } | ||
| 2358 | |||
| 2359 | /* Probe the NIC variant (revision, ASIC vs FPGA, function count, port | ||
| 2360 | * count, port speed). Set workaround and feature flags accordingly. | ||
| 2361 | */ | ||
| 2362 | static int falcon_probe_nic_variant(struct efx_nic *efx) | ||
| 2363 | { | ||
| 2364 | efx_oword_t altera_build; | ||
| 2365 | |||
| 2366 | falcon_read(efx, &altera_build, ALTERA_BUILD_REG_KER); | ||
| 2367 | if (EFX_OWORD_FIELD(altera_build, VER_ALL)) { | ||
| 2368 | EFX_ERR(efx, "Falcon FPGA not supported\n"); | ||
| 2369 | return -ENODEV; | ||
| 2370 | } | ||
| 2371 | |||
| 2372 | switch (FALCON_REV(efx)) { | ||
| 2373 | case FALCON_REV_A0: | ||
| 2374 | case 0xff: | ||
| 2375 | EFX_ERR(efx, "Falcon rev A0 not supported\n"); | ||
| 2376 | return -ENODEV; | ||
| 2377 | |||
| 2378 | case FALCON_REV_A1:{ | ||
| 2379 | efx_oword_t nic_stat; | ||
| 2380 | |||
| 2381 | falcon_read(efx, &nic_stat, NIC_STAT_REG); | ||
| 2382 | |||
| 2383 | if (EFX_OWORD_FIELD(nic_stat, STRAP_PCIE) == 0) { | ||
| 2384 | EFX_ERR(efx, "Falcon rev A1 PCI-X not supported\n"); | ||
| 2385 | return -ENODEV; | ||
| 2386 | } | ||
| 2387 | if (!EFX_OWORD_FIELD(nic_stat, STRAP_10G)) { | ||
| 2388 | EFX_ERR(efx, "1G mode not supported\n"); | ||
| 2389 | return -ENODEV; | ||
| 2390 | } | ||
| 2391 | break; | ||
| 2392 | } | ||
| 2393 | |||
| 2394 | case FALCON_REV_B0: | ||
| 2395 | break; | ||
| 2396 | |||
| 2397 | default: | ||
| 2398 | EFX_ERR(efx, "Unknown Falcon rev %d\n", FALCON_REV(efx)); | ||
| 2399 | return -ENODEV; | ||
| 2400 | } | ||
| 2401 | |||
| 2402 | return 0; | ||
| 2403 | } | ||
| 2404 | |||
| 2405 | int falcon_probe_nic(struct efx_nic *efx) | ||
| 2406 | { | ||
| 2407 | struct falcon_nic_data *nic_data; | ||
| 2408 | int rc; | ||
| 2409 | |||
| 2410 | /* Initialise I2C interface state */ | ||
| 2411 | efx->i2c.efx = efx; | ||
| 2412 | efx->i2c.op = &falcon_i2c_bit_operations; | ||
| 2413 | efx->i2c.sda = 1; | ||
| 2414 | efx->i2c.scl = 1; | ||
| 2415 | |||
| 2416 | /* Allocate storage for hardware specific data */ | ||
| 2417 | nic_data = kzalloc(sizeof(*nic_data), GFP_KERNEL); | ||
| 2418 | efx->nic_data = (void *) nic_data; | ||
| 2419 | |||
| 2420 | /* Determine number of ports etc. */ | ||
| 2421 | rc = falcon_probe_nic_variant(efx); | ||
| 2422 | if (rc) | ||
| 2423 | goto fail1; | ||
| 2424 | |||
| 2425 | /* Probe secondary function if expected */ | ||
| 2426 | if (FALCON_IS_DUAL_FUNC(efx)) { | ||
| 2427 | struct pci_dev *dev = pci_dev_get(efx->pci_dev); | ||
| 2428 | |||
| 2429 | while ((dev = pci_get_device(EFX_VENDID_SFC, FALCON_A_S_DEVID, | ||
| 2430 | dev))) { | ||
| 2431 | if (dev->bus == efx->pci_dev->bus && | ||
| 2432 | dev->devfn == efx->pci_dev->devfn + 1) { | ||
| 2433 | nic_data->pci_dev2 = dev; | ||
| 2434 | break; | ||
| 2435 | } | ||
| 2436 | } | ||
| 2437 | if (!nic_data->pci_dev2) { | ||
| 2438 | EFX_ERR(efx, "failed to find secondary function\n"); | ||
| 2439 | rc = -ENODEV; | ||
| 2440 | goto fail2; | ||
| 2441 | } | ||
| 2442 | } | ||
| 2443 | |||
| 2444 | /* Now we can reset the NIC */ | ||
| 2445 | rc = falcon_reset_hw(efx, RESET_TYPE_ALL); | ||
| 2446 | if (rc) { | ||
| 2447 | EFX_ERR(efx, "failed to reset NIC\n"); | ||
| 2448 | goto fail3; | ||
| 2449 | } | ||
| 2450 | |||
| 2451 | /* Allocate memory for INT_KER */ | ||
| 2452 | rc = falcon_alloc_buffer(efx, &efx->irq_status, sizeof(efx_oword_t)); | ||
| 2453 | if (rc) | ||
| 2454 | goto fail4; | ||
| 2455 | BUG_ON(efx->irq_status.dma_addr & 0x0f); | ||
| 2456 | |||
| 2457 | EFX_LOG(efx, "INT_KER at %llx (virt %p phys %lx)\n", | ||
| 2458 | (unsigned long long)efx->irq_status.dma_addr, | ||
| 2459 | efx->irq_status.addr, virt_to_phys(efx->irq_status.addr)); | ||
| 2460 | |||
| 2461 | /* Read in the non-volatile configuration */ | ||
| 2462 | rc = falcon_probe_nvconfig(efx); | ||
| 2463 | if (rc) | ||
| 2464 | goto fail5; | ||
| 2465 | |||
| 2466 | return 0; | ||
| 2467 | |||
| 2468 | fail5: | ||
| 2469 | falcon_free_buffer(efx, &efx->irq_status); | ||
| 2470 | fail4: | ||
| 2471 | /* fall-thru */ | ||
| 2472 | fail3: | ||
| 2473 | if (nic_data->pci_dev2) { | ||
| 2474 | pci_dev_put(nic_data->pci_dev2); | ||
| 2475 | nic_data->pci_dev2 = NULL; | ||
| 2476 | } | ||
| 2477 | fail2: | ||
| 2478 | /* fall-thru */ | ||
| 2479 | fail1: | ||
| 2480 | kfree(efx->nic_data); | ||
| 2481 | return rc; | ||
| 2482 | } | ||
| 2483 | |||
| 2484 | /* This call performs hardware-specific global initialisation, such as | ||
| 2485 | * defining the descriptor cache sizes and number of RSS channels. | ||
| 2486 | * It does not set up any buffers, descriptor rings or event queues. | ||
| 2487 | */ | ||
| 2488 | int falcon_init_nic(struct efx_nic *efx) | ||
| 2489 | { | ||
| 2490 | struct falcon_nic_data *data; | ||
| 2491 | efx_oword_t temp; | ||
| 2492 | unsigned thresh; | ||
| 2493 | int rc; | ||
| 2494 | |||
| 2495 | data = (struct falcon_nic_data *)efx->nic_data; | ||
| 2496 | |||
| 2497 | /* Set up the address region register. This is only needed | ||
| 2498 | * for the B0 FPGA, but since we are just pushing in the | ||
| 2499 | * reset defaults this may as well be unconditional. */ | ||
| 2500 | EFX_POPULATE_OWORD_4(temp, ADR_REGION0, 0, | ||
| 2501 | ADR_REGION1, (1 << 16), | ||
| 2502 | ADR_REGION2, (2 << 16), | ||
| 2503 | ADR_REGION3, (3 << 16)); | ||
| 2504 | falcon_write(efx, &temp, ADR_REGION_REG_KER); | ||
| 2505 | |||
| 2506 | /* Use on-chip SRAM */ | ||
| 2507 | falcon_read(efx, &temp, NIC_STAT_REG); | ||
| 2508 | EFX_SET_OWORD_FIELD(temp, ONCHIP_SRAM, 1); | ||
| 2509 | falcon_write(efx, &temp, NIC_STAT_REG); | ||
| 2510 | |||
| 2511 | /* Set buffer table mode */ | ||
| 2512 | EFX_POPULATE_OWORD_1(temp, BUF_TBL_MODE, BUF_TBL_MODE_FULL); | ||
| 2513 | falcon_write(efx, &temp, BUF_TBL_CFG_REG_KER); | ||
| 2514 | |||
| 2515 | rc = falcon_reset_sram(efx); | ||
| 2516 | if (rc) | ||
| 2517 | return rc; | ||
| 2518 | |||
| 2519 | /* Set positions of descriptor caches in SRAM. */ | ||
| 2520 | EFX_POPULATE_OWORD_1(temp, SRM_TX_DC_BASE_ADR, TX_DC_BASE / 8); | ||
| 2521 | falcon_write(efx, &temp, SRM_TX_DC_CFG_REG_KER); | ||
| 2522 | EFX_POPULATE_OWORD_1(temp, SRM_RX_DC_BASE_ADR, RX_DC_BASE / 8); | ||
| 2523 | falcon_write(efx, &temp, SRM_RX_DC_CFG_REG_KER); | ||
| 2524 | |||
| 2525 | /* Set TX descriptor cache size. */ | ||
| 2526 | BUILD_BUG_ON(TX_DC_ENTRIES != (16 << TX_DC_ENTRIES_ORDER)); | ||
| 2527 | EFX_POPULATE_OWORD_1(temp, TX_DC_SIZE, TX_DC_ENTRIES_ORDER); | ||
| 2528 | falcon_write(efx, &temp, TX_DC_CFG_REG_KER); | ||
| 2529 | |||
| 2530 | /* Set RX descriptor cache size. Set low watermark to size-8, as | ||
| 2531 | * this allows most efficient prefetching. | ||
| 2532 | */ | ||
| 2533 | BUILD_BUG_ON(RX_DC_ENTRIES != (16 << RX_DC_ENTRIES_ORDER)); | ||
| 2534 | EFX_POPULATE_OWORD_1(temp, RX_DC_SIZE, RX_DC_ENTRIES_ORDER); | ||
| 2535 | falcon_write(efx, &temp, RX_DC_CFG_REG_KER); | ||
| 2536 | EFX_POPULATE_OWORD_1(temp, RX_DC_PF_LWM, RX_DC_ENTRIES - 8); | ||
| 2537 | falcon_write(efx, &temp, RX_DC_PF_WM_REG_KER); | ||
| 2538 | |||
| 2539 | /* Clear the parity enables on the TX data fifos as | ||
| 2540 | * they produce false parity errors because of timing issues | ||
| 2541 | */ | ||
| 2542 | if (EFX_WORKAROUND_5129(efx)) { | ||
| 2543 | falcon_read(efx, &temp, SPARE_REG_KER); | ||
| 2544 | EFX_SET_OWORD_FIELD(temp, MEM_PERR_EN_TX_DATA, 0); | ||
| 2545 | falcon_write(efx, &temp, SPARE_REG_KER); | ||
| 2546 | } | ||
| 2547 | |||
| 2548 | /* Enable all the genuinely fatal interrupts. (They are still | ||
| 2549 | * masked by the overall interrupt mask, controlled by | ||
| 2550 | * falcon_interrupts()). | ||
| 2551 | * | ||
| 2552 | * Note: All other fatal interrupts are enabled | ||
| 2553 | */ | ||
| 2554 | EFX_POPULATE_OWORD_3(temp, | ||
| 2555 | ILL_ADR_INT_KER_EN, 1, | ||
| 2556 | RBUF_OWN_INT_KER_EN, 1, | ||
| 2557 | TBUF_OWN_INT_KER_EN, 1); | ||
| 2558 | EFX_INVERT_OWORD(temp); | ||
| 2559 | falcon_write(efx, &temp, FATAL_INTR_REG_KER); | ||
| 2560 | |||
| 2561 | /* Set number of RSS queues for receive path. */ | ||
| 2562 | falcon_read(efx, &temp, RX_FILTER_CTL_REG); | ||
| 2563 | if (FALCON_REV(efx) >= FALCON_REV_B0) | ||
| 2564 | EFX_SET_OWORD_FIELD(temp, NUM_KER, 0); | ||
| 2565 | else | ||
| 2566 | EFX_SET_OWORD_FIELD(temp, NUM_KER, efx->rss_queues - 1); | ||
| 2567 | if (EFX_WORKAROUND_7244(efx)) { | ||
| 2568 | EFX_SET_OWORD_FIELD(temp, UDP_FULL_SRCH_LIMIT, 8); | ||
| 2569 | EFX_SET_OWORD_FIELD(temp, UDP_WILD_SRCH_LIMIT, 8); | ||
| 2570 | EFX_SET_OWORD_FIELD(temp, TCP_FULL_SRCH_LIMIT, 8); | ||
| 2571 | EFX_SET_OWORD_FIELD(temp, TCP_WILD_SRCH_LIMIT, 8); | ||
| 2572 | } | ||
| 2573 | falcon_write(efx, &temp, RX_FILTER_CTL_REG); | ||
| 2574 | |||
| 2575 | falcon_setup_rss_indir_table(efx); | ||
| 2576 | |||
| 2577 | /* Setup RX. Wait for descriptor is broken and must | ||
| 2578 | * be disabled. RXDP recovery shouldn't be needed, but is. | ||
| 2579 | */ | ||
| 2580 | falcon_read(efx, &temp, RX_SELF_RST_REG_KER); | ||
| 2581 | EFX_SET_OWORD_FIELD(temp, RX_NODESC_WAIT_DIS, 1); | ||
| 2582 | EFX_SET_OWORD_FIELD(temp, RX_RECOVERY_EN, 1); | ||
| 2583 | if (EFX_WORKAROUND_5583(efx)) | ||
| 2584 | EFX_SET_OWORD_FIELD(temp, RX_ISCSI_DIS, 1); | ||
| 2585 | falcon_write(efx, &temp, RX_SELF_RST_REG_KER); | ||
| 2586 | |||
| 2587 | /* Disable the ugly timer-based TX DMA backoff and allow TX DMA to be | ||
| 2588 | * controlled by the RX FIFO fill level. Set arbitration to one pkt/Q. | ||
| 2589 | */ | ||
| 2590 | falcon_read(efx, &temp, TX_CFG2_REG_KER); | ||
| 2591 | EFX_SET_OWORD_FIELD(temp, TX_RX_SPACER, 0xfe); | ||
| 2592 | EFX_SET_OWORD_FIELD(temp, TX_RX_SPACER_EN, 1); | ||
| 2593 | EFX_SET_OWORD_FIELD(temp, TX_ONE_PKT_PER_Q, 1); | ||
| 2594 | EFX_SET_OWORD_FIELD(temp, TX_CSR_PUSH_EN, 0); | ||
| 2595 | EFX_SET_OWORD_FIELD(temp, TX_DIS_NON_IP_EV, 1); | ||
| 2596 | /* Enable SW_EV to inherit in char driver - assume harmless here */ | ||
| 2597 | EFX_SET_OWORD_FIELD(temp, TX_SW_EV_EN, 1); | ||
| 2598 | /* Prefetch threshold 2 => fetch when descriptor cache half empty */ | ||
| 2599 | EFX_SET_OWORD_FIELD(temp, TX_PREF_THRESHOLD, 2); | ||
| 2600 | /* Squash TX of packets of 16 bytes or less */ | ||
| 2601 | if (FALCON_REV(efx) >= FALCON_REV_B0 && EFX_WORKAROUND_9141(efx)) | ||
| 2602 | EFX_SET_OWORD_FIELD(temp, TX_FLUSH_MIN_LEN_EN_B0, 1); | ||
| 2603 | falcon_write(efx, &temp, TX_CFG2_REG_KER); | ||
| 2604 | |||
| 2605 | /* Do not enable TX_NO_EOP_DISC_EN, since it limits packets to 16 | ||
| 2606 | * descriptors (which is bad). | ||
| 2607 | */ | ||
| 2608 | falcon_read(efx, &temp, TX_CFG_REG_KER); | ||
| 2609 | EFX_SET_OWORD_FIELD(temp, TX_NO_EOP_DISC_EN, 0); | ||
| 2610 | falcon_write(efx, &temp, TX_CFG_REG_KER); | ||
| 2611 | |||
| 2612 | /* RX config */ | ||
| 2613 | falcon_read(efx, &temp, RX_CFG_REG_KER); | ||
| 2614 | EFX_SET_OWORD_FIELD_VER(efx, temp, RX_DESC_PUSH_EN, 0); | ||
| 2615 | if (EFX_WORKAROUND_7575(efx)) | ||
| 2616 | EFX_SET_OWORD_FIELD_VER(efx, temp, RX_USR_BUF_SIZE, | ||
| 2617 | (3 * 4096) / 32); | ||
| 2618 | if (FALCON_REV(efx) >= FALCON_REV_B0) | ||
| 2619 | EFX_SET_OWORD_FIELD(temp, RX_INGR_EN_B0, 1); | ||
| 2620 | |||
| 2621 | /* RX FIFO flow control thresholds */ | ||
| 2622 | thresh = ((rx_xon_thresh_bytes >= 0) ? | ||
| 2623 | rx_xon_thresh_bytes : efx->type->rx_xon_thresh); | ||
| 2624 | EFX_SET_OWORD_FIELD_VER(efx, temp, RX_XON_MAC_TH, thresh / 256); | ||
| 2625 | thresh = ((rx_xoff_thresh_bytes >= 0) ? | ||
| 2626 | rx_xoff_thresh_bytes : efx->type->rx_xoff_thresh); | ||
| 2627 | EFX_SET_OWORD_FIELD_VER(efx, temp, RX_XOFF_MAC_TH, thresh / 256); | ||
| 2628 | /* RX control FIFO thresholds [32 entries] */ | ||
| 2629 | EFX_SET_OWORD_FIELD_VER(efx, temp, RX_XON_TX_TH, 25); | ||
| 2630 | EFX_SET_OWORD_FIELD_VER(efx, temp, RX_XOFF_TX_TH, 20); | ||
| 2631 | falcon_write(efx, &temp, RX_CFG_REG_KER); | ||
| 2632 | |||
| 2633 | /* Set destination of both TX and RX Flush events */ | ||
| 2634 | if (FALCON_REV(efx) >= FALCON_REV_B0) { | ||
| 2635 | EFX_POPULATE_OWORD_1(temp, FLS_EVQ_ID, 0); | ||
| 2636 | falcon_write(efx, &temp, DP_CTRL_REG); | ||
| 2637 | } | ||
| 2638 | |||
| 2639 | return 0; | ||
| 2640 | } | ||
| 2641 | |||
| 2642 | void falcon_remove_nic(struct efx_nic *efx) | ||
| 2643 | { | ||
| 2644 | struct falcon_nic_data *nic_data = efx->nic_data; | ||
| 2645 | |||
| 2646 | falcon_free_buffer(efx, &efx->irq_status); | ||
| 2647 | |||
| 2648 | (void) falcon_reset_hw(efx, RESET_TYPE_ALL); | ||
| 2649 | |||
| 2650 | /* Release the second function after the reset */ | ||
| 2651 | if (nic_data->pci_dev2) { | ||
| 2652 | pci_dev_put(nic_data->pci_dev2); | ||
| 2653 | nic_data->pci_dev2 = NULL; | ||
| 2654 | } | ||
| 2655 | |||
| 2656 | /* Tear down the private nic state */ | ||
| 2657 | kfree(efx->nic_data); | ||
| 2658 | efx->nic_data = NULL; | ||
| 2659 | } | ||
| 2660 | |||
| 2661 | void falcon_update_nic_stats(struct efx_nic *efx) | ||
| 2662 | { | ||
| 2663 | efx_oword_t cnt; | ||
| 2664 | |||
| 2665 | falcon_read(efx, &cnt, RX_NODESC_DROP_REG_KER); | ||
| 2666 | efx->n_rx_nodesc_drop_cnt += EFX_OWORD_FIELD(cnt, RX_NODESC_DROP_CNT); | ||
| 2667 | } | ||
| 2668 | |||
| 2669 | /************************************************************************** | ||
| 2670 | * | ||
| 2671 | * Revision-dependent attributes used by efx.c | ||
| 2672 | * | ||
| 2673 | ************************************************************************** | ||
| 2674 | */ | ||
| 2675 | |||
| 2676 | struct efx_nic_type falcon_a_nic_type = { | ||
| 2677 | .mem_bar = 2, | ||
| 2678 | .mem_map_size = 0x20000, | ||
| 2679 | .txd_ptr_tbl_base = TX_DESC_PTR_TBL_KER_A1, | ||
| 2680 | .rxd_ptr_tbl_base = RX_DESC_PTR_TBL_KER_A1, | ||
| 2681 | .buf_tbl_base = BUF_TBL_KER_A1, | ||
| 2682 | .evq_ptr_tbl_base = EVQ_PTR_TBL_KER_A1, | ||
| 2683 | .evq_rptr_tbl_base = EVQ_RPTR_REG_KER_A1, | ||
| 2684 | .txd_ring_mask = FALCON_TXD_RING_MASK, | ||
| 2685 | .rxd_ring_mask = FALCON_RXD_RING_MASK, | ||
| 2686 | .evq_size = FALCON_EVQ_SIZE, | ||
| 2687 | .max_dma_mask = FALCON_DMA_MASK, | ||
| 2688 | .tx_dma_mask = FALCON_TX_DMA_MASK, | ||
| 2689 | .bug5391_mask = 0xf, | ||
| 2690 | .rx_xoff_thresh = 2048, | ||
| 2691 | .rx_xon_thresh = 512, | ||
| 2692 | .rx_buffer_padding = 0x24, | ||
| 2693 | .max_interrupt_mode = EFX_INT_MODE_MSI, | ||
| 2694 | .phys_addr_channels = 4, | ||
| 2695 | }; | ||
| 2696 | |||
| 2697 | struct efx_nic_type falcon_b_nic_type = { | ||
| 2698 | .mem_bar = 2, | ||
| 2699 | /* Map everything up to and including the RSS indirection | ||
| 2700 | * table. Don't map MSI-X table, MSI-X PBA since Linux | ||
| 2701 | * requires that they not be mapped. */ | ||
| 2702 | .mem_map_size = RX_RSS_INDIR_TBL_B0 + 0x800, | ||
| 2703 | .txd_ptr_tbl_base = TX_DESC_PTR_TBL_KER_B0, | ||
| 2704 | .rxd_ptr_tbl_base = RX_DESC_PTR_TBL_KER_B0, | ||
| 2705 | .buf_tbl_base = BUF_TBL_KER_B0, | ||
| 2706 | .evq_ptr_tbl_base = EVQ_PTR_TBL_KER_B0, | ||
| 2707 | .evq_rptr_tbl_base = EVQ_RPTR_REG_KER_B0, | ||
| 2708 | .txd_ring_mask = FALCON_TXD_RING_MASK, | ||
| 2709 | .rxd_ring_mask = FALCON_RXD_RING_MASK, | ||
| 2710 | .evq_size = FALCON_EVQ_SIZE, | ||
| 2711 | .max_dma_mask = FALCON_DMA_MASK, | ||
| 2712 | .tx_dma_mask = FALCON_TX_DMA_MASK, | ||
| 2713 | .bug5391_mask = 0, | ||
| 2714 | .rx_xoff_thresh = 54272, /* ~80Kb - 3*max MTU */ | ||
| 2715 | .rx_xon_thresh = 27648, /* ~3*max MTU */ | ||
| 2716 | .rx_buffer_padding = 0, | ||
| 2717 | .max_interrupt_mode = EFX_INT_MODE_MSIX, | ||
| 2718 | .phys_addr_channels = 32, /* Hardware limit is 64, but the legacy | ||
| 2719 | * interrupt handler only supports 32 | ||
| 2720 | * channels */ | ||
| 2721 | }; | ||
| 2722 | |||
diff --git a/drivers/net/sfc/falcon.h b/drivers/net/sfc/falcon.h new file mode 100644 index 000000000000..6117403b0c03 --- /dev/null +++ b/drivers/net/sfc/falcon.h | |||
| @@ -0,0 +1,130 @@ | |||
| 1 | /**************************************************************************** | ||
| 2 | * Driver for Solarflare Solarstorm network controllers and boards | ||
| 3 | * Copyright 2005-2006 Fen Systems Ltd. | ||
| 4 | * Copyright 2006-2008 Solarflare Communications Inc. | ||
| 5 | * | ||
| 6 | * This program is free software; you can redistribute it and/or modify it | ||
| 7 | * under the terms of the GNU General Public License version 2 as published | ||
| 8 | * by the Free Software Foundation, incorporated herein by reference. | ||
| 9 | */ | ||
| 10 | |||
| 11 | #ifndef EFX_FALCON_H | ||
| 12 | #define EFX_FALCON_H | ||
| 13 | |||
| 14 | #include "net_driver.h" | ||
| 15 | |||
| 16 | /* | ||
| 17 | * Falcon hardware control | ||
| 18 | */ | ||
| 19 | |||
| 20 | enum falcon_revision { | ||
| 21 | FALCON_REV_A0 = 0, | ||
| 22 | FALCON_REV_A1 = 1, | ||
| 23 | FALCON_REV_B0 = 2, | ||
| 24 | }; | ||
| 25 | |||
| 26 | #define FALCON_REV(efx) ((efx)->pci_dev->revision) | ||
| 27 | |||
| 28 | extern struct efx_nic_type falcon_a_nic_type; | ||
| 29 | extern struct efx_nic_type falcon_b_nic_type; | ||
| 30 | |||
| 31 | /************************************************************************** | ||
| 32 | * | ||
| 33 | * Externs | ||
| 34 | * | ||
| 35 | ************************************************************************** | ||
| 36 | */ | ||
| 37 | |||
| 38 | /* TX data path */ | ||
| 39 | extern int falcon_probe_tx(struct efx_tx_queue *tx_queue); | ||
| 40 | extern int falcon_init_tx(struct efx_tx_queue *tx_queue); | ||
| 41 | extern void falcon_fini_tx(struct efx_tx_queue *tx_queue); | ||
| 42 | extern void falcon_remove_tx(struct efx_tx_queue *tx_queue); | ||
| 43 | extern void falcon_push_buffers(struct efx_tx_queue *tx_queue); | ||
| 44 | |||
| 45 | /* RX data path */ | ||
| 46 | extern int falcon_probe_rx(struct efx_rx_queue *rx_queue); | ||
| 47 | extern int falcon_init_rx(struct efx_rx_queue *rx_queue); | ||
| 48 | extern void falcon_fini_rx(struct efx_rx_queue *rx_queue); | ||
| 49 | extern void falcon_remove_rx(struct efx_rx_queue *rx_queue); | ||
| 50 | extern void falcon_notify_rx_desc(struct efx_rx_queue *rx_queue); | ||
| 51 | |||
| 52 | /* Event data path */ | ||
| 53 | extern int falcon_probe_eventq(struct efx_channel *channel); | ||
| 54 | extern int falcon_init_eventq(struct efx_channel *channel); | ||
| 55 | extern void falcon_fini_eventq(struct efx_channel *channel); | ||
| 56 | extern void falcon_remove_eventq(struct efx_channel *channel); | ||
| 57 | extern int falcon_process_eventq(struct efx_channel *channel, int *rx_quota); | ||
| 58 | extern void falcon_eventq_read_ack(struct efx_channel *channel); | ||
| 59 | |||
| 60 | /* Ports */ | ||
| 61 | extern int falcon_probe_port(struct efx_nic *efx); | ||
| 62 | extern void falcon_remove_port(struct efx_nic *efx); | ||
| 63 | |||
| 64 | /* MAC/PHY */ | ||
| 65 | extern int falcon_xaui_link_ok(struct efx_nic *efx); | ||
| 66 | extern int falcon_dma_stats(struct efx_nic *efx, | ||
| 67 | unsigned int done_offset); | ||
| 68 | extern void falcon_drain_tx_fifo(struct efx_nic *efx); | ||
| 69 | extern void falcon_deconfigure_mac_wrapper(struct efx_nic *efx); | ||
| 70 | extern void falcon_reconfigure_mac_wrapper(struct efx_nic *efx); | ||
| 71 | |||
| 72 | /* Interrupts and test events */ | ||
| 73 | extern int falcon_init_interrupt(struct efx_nic *efx); | ||
| 74 | extern void falcon_enable_interrupts(struct efx_nic *efx); | ||
| 75 | extern void falcon_generate_test_event(struct efx_channel *channel, | ||
| 76 | unsigned int magic); | ||
| 77 | extern void falcon_generate_interrupt(struct efx_nic *efx); | ||
| 78 | extern void falcon_set_int_moderation(struct efx_channel *channel); | ||
| 79 | extern void falcon_disable_interrupts(struct efx_nic *efx); | ||
| 80 | extern void falcon_fini_interrupt(struct efx_nic *efx); | ||
| 81 | |||
| 82 | /* Global Resources */ | ||
| 83 | extern int falcon_probe_nic(struct efx_nic *efx); | ||
| 84 | extern int falcon_probe_resources(struct efx_nic *efx); | ||
| 85 | extern int falcon_init_nic(struct efx_nic *efx); | ||
| 86 | extern int falcon_reset_hw(struct efx_nic *efx, enum reset_type method); | ||
| 87 | extern void falcon_remove_resources(struct efx_nic *efx); | ||
| 88 | extern void falcon_remove_nic(struct efx_nic *efx); | ||
| 89 | extern void falcon_update_nic_stats(struct efx_nic *efx); | ||
| 90 | extern void falcon_set_multicast_hash(struct efx_nic *efx); | ||
| 91 | extern int falcon_reset_xaui(struct efx_nic *efx); | ||
| 92 | |||
| 93 | /************************************************************************** | ||
| 94 | * | ||
| 95 | * Falcon MAC stats | ||
| 96 | * | ||
| 97 | ************************************************************************** | ||
| 98 | */ | ||
| 99 | |||
| 100 | #define FALCON_STAT_OFFSET(falcon_stat) EFX_VAL(falcon_stat, offset) | ||
| 101 | #define FALCON_STAT_WIDTH(falcon_stat) EFX_VAL(falcon_stat, WIDTH) | ||
| 102 | |||
| 103 | /* Retrieve statistic from statistics block */ | ||
| 104 | #define FALCON_STAT(efx, falcon_stat, efx_stat) do { \ | ||
| 105 | if (FALCON_STAT_WIDTH(falcon_stat) == 16) \ | ||
| 106 | (efx)->mac_stats.efx_stat += le16_to_cpu( \ | ||
| 107 | *((__force __le16 *) \ | ||
| 108 | (efx->stats_buffer.addr + \ | ||
| 109 | FALCON_STAT_OFFSET(falcon_stat)))); \ | ||
| 110 | else if (FALCON_STAT_WIDTH(falcon_stat) == 32) \ | ||
| 111 | (efx)->mac_stats.efx_stat += le32_to_cpu( \ | ||
| 112 | *((__force __le32 *) \ | ||
| 113 | (efx->stats_buffer.addr + \ | ||
| 114 | FALCON_STAT_OFFSET(falcon_stat)))); \ | ||
| 115 | else \ | ||
| 116 | (efx)->mac_stats.efx_stat += le64_to_cpu( \ | ||
| 117 | *((__force __le64 *) \ | ||
| 118 | (efx->stats_buffer.addr + \ | ||
| 119 | FALCON_STAT_OFFSET(falcon_stat)))); \ | ||
| 120 | } while (0) | ||
| 121 | |||
| 122 | #define FALCON_MAC_STATS_SIZE 0x100 | ||
| 123 | |||
| 124 | #define MAC_DATA_LBN 0 | ||
| 125 | #define MAC_DATA_WIDTH 32 | ||
| 126 | |||
| 127 | extern void falcon_generate_event(struct efx_channel *channel, | ||
| 128 | efx_qword_t *event); | ||
| 129 | |||
| 130 | #endif /* EFX_FALCON_H */ | ||
diff --git a/drivers/net/sfc/falcon_hwdefs.h b/drivers/net/sfc/falcon_hwdefs.h new file mode 100644 index 000000000000..0485a63eaff6 --- /dev/null +++ b/drivers/net/sfc/falcon_hwdefs.h | |||
| @@ -0,0 +1,1135 @@ | |||
| 1 | /**************************************************************************** | ||
| 2 | * Driver for Solarflare Solarstorm network controllers and boards | ||
| 3 | * Copyright 2005-2006 Fen Systems Ltd. | ||
| 4 | * Copyright 2006-2008 Solarflare Communications Inc. | ||
| 5 | * | ||
| 6 | * This program is free software; you can redistribute it and/or modify it | ||
| 7 | * under the terms of the GNU General Public License version 2 as published | ||
| 8 | * by the Free Software Foundation, incorporated herein by reference. | ||
| 9 | */ | ||
| 10 | |||
| 11 | #ifndef EFX_FALCON_HWDEFS_H | ||
| 12 | #define EFX_FALCON_HWDEFS_H | ||
| 13 | |||
| 14 | /* | ||
| 15 | * Falcon hardware value definitions. | ||
| 16 | * Falcon is the internal codename for the SFC4000 controller that is | ||
| 17 | * present in SFE400X evaluation boards | ||
| 18 | */ | ||
| 19 | |||
| 20 | /************************************************************************** | ||
| 21 | * | ||
| 22 | * Falcon registers | ||
| 23 | * | ||
| 24 | ************************************************************************** | ||
| 25 | */ | ||
| 26 | |||
| 27 | /* Address region register */ | ||
| 28 | #define ADR_REGION_REG_KER 0x00 | ||
| 29 | #define ADR_REGION0_LBN 0 | ||
| 30 | #define ADR_REGION0_WIDTH 18 | ||
| 31 | #define ADR_REGION1_LBN 32 | ||
| 32 | #define ADR_REGION1_WIDTH 18 | ||
| 33 | #define ADR_REGION2_LBN 64 | ||
| 34 | #define ADR_REGION2_WIDTH 18 | ||
| 35 | #define ADR_REGION3_LBN 96 | ||
| 36 | #define ADR_REGION3_WIDTH 18 | ||
| 37 | |||
| 38 | /* Interrupt enable register */ | ||
| 39 | #define INT_EN_REG_KER 0x0010 | ||
| 40 | #define KER_INT_KER_LBN 3 | ||
| 41 | #define KER_INT_KER_WIDTH 1 | ||
| 42 | #define DRV_INT_EN_KER_LBN 0 | ||
| 43 | #define DRV_INT_EN_KER_WIDTH 1 | ||
| 44 | |||
| 45 | /* Interrupt status address register */ | ||
| 46 | #define INT_ADR_REG_KER 0x0030 | ||
| 47 | #define NORM_INT_VEC_DIS_KER_LBN 64 | ||
| 48 | #define NORM_INT_VEC_DIS_KER_WIDTH 1 | ||
| 49 | #define INT_ADR_KER_LBN 0 | ||
| 50 | #define INT_ADR_KER_WIDTH EFX_DMA_TYPE_WIDTH(64) /* not 46 for this one */ | ||
| 51 | |||
| 52 | /* Interrupt status register (B0 only) */ | ||
| 53 | #define INT_ISR0_B0 0x90 | ||
| 54 | #define INT_ISR1_B0 0xA0 | ||
| 55 | |||
| 56 | /* Interrupt acknowledge register (A0/A1 only) */ | ||
| 57 | #define INT_ACK_REG_KER_A1 0x0050 | ||
| 58 | #define INT_ACK_DUMMY_DATA_LBN 0 | ||
| 59 | #define INT_ACK_DUMMY_DATA_WIDTH 32 | ||
| 60 | |||
| 61 | /* Interrupt acknowledge work-around register (A0/A1 only )*/ | ||
| 62 | #define WORK_AROUND_BROKEN_PCI_READS_REG_KER_A1 0x0070 | ||
| 63 | |||
| 64 | /* SPI host command register */ | ||
| 65 | #define EE_SPI_HCMD_REG_KER 0x0100 | ||
| 66 | #define EE_SPI_HCMD_CMD_EN_LBN 31 | ||
| 67 | #define EE_SPI_HCMD_CMD_EN_WIDTH 1 | ||
| 68 | #define EE_WR_TIMER_ACTIVE_LBN 28 | ||
| 69 | #define EE_WR_TIMER_ACTIVE_WIDTH 1 | ||
| 70 | #define EE_SPI_HCMD_SF_SEL_LBN 24 | ||
| 71 | #define EE_SPI_HCMD_SF_SEL_WIDTH 1 | ||
| 72 | #define EE_SPI_EEPROM 0 | ||
| 73 | #define EE_SPI_FLASH 1 | ||
| 74 | #define EE_SPI_HCMD_DABCNT_LBN 16 | ||
| 75 | #define EE_SPI_HCMD_DABCNT_WIDTH 5 | ||
| 76 | #define EE_SPI_HCMD_READ_LBN 15 | ||
| 77 | #define EE_SPI_HCMD_READ_WIDTH 1 | ||
| 78 | #define EE_SPI_READ 1 | ||
| 79 | #define EE_SPI_WRITE 0 | ||
| 80 | #define EE_SPI_HCMD_DUBCNT_LBN 12 | ||
| 81 | #define EE_SPI_HCMD_DUBCNT_WIDTH 2 | ||
| 82 | #define EE_SPI_HCMD_ADBCNT_LBN 8 | ||
| 83 | #define EE_SPI_HCMD_ADBCNT_WIDTH 2 | ||
| 84 | #define EE_SPI_HCMD_ENC_LBN 0 | ||
| 85 | #define EE_SPI_HCMD_ENC_WIDTH 8 | ||
| 86 | |||
| 87 | /* SPI host address register */ | ||
| 88 | #define EE_SPI_HADR_REG_KER 0x0110 | ||
| 89 | #define EE_SPI_HADR_ADR_LBN 0 | ||
| 90 | #define EE_SPI_HADR_ADR_WIDTH 24 | ||
| 91 | |||
| 92 | /* SPI host data register */ | ||
| 93 | #define EE_SPI_HDATA_REG_KER 0x0120 | ||
| 94 | |||
| 95 | /* PCIE CORE ACCESS REG */ | ||
| 96 | #define PCIE_CORE_ADDR_PCIE_DEVICE_CTRL_STAT 0x68 | ||
| 97 | #define PCIE_CORE_ADDR_PCIE_LINK_CTRL_STAT 0x70 | ||
| 98 | #define PCIE_CORE_ADDR_ACK_RPL_TIMER 0x700 | ||
| 99 | #define PCIE_CORE_ADDR_ACK_FREQ 0x70C | ||
| 100 | |||
| 101 | /* NIC status register */ | ||
| 102 | #define NIC_STAT_REG 0x0200 | ||
| 103 | #define ONCHIP_SRAM_LBN 16 | ||
| 104 | #define ONCHIP_SRAM_WIDTH 1 | ||
| 105 | #define SF_PRST_LBN 9 | ||
| 106 | #define SF_PRST_WIDTH 1 | ||
| 107 | #define EE_PRST_LBN 8 | ||
| 108 | #define EE_PRST_WIDTH 1 | ||
| 109 | /* See pic_mode_t for decoding of this field */ | ||
| 110 | /* These bit definitions are extrapolated from the list of numerical | ||
| 111 | * values for STRAP_PINS. | ||
| 112 | */ | ||
| 113 | #define STRAP_10G_LBN 2 | ||
| 114 | #define STRAP_10G_WIDTH 1 | ||
| 115 | #define STRAP_PCIE_LBN 0 | ||
| 116 | #define STRAP_PCIE_WIDTH 1 | ||
| 117 | |||
| 118 | /* GPIO control register */ | ||
| 119 | #define GPIO_CTL_REG_KER 0x0210 | ||
| 120 | #define GPIO_OUTPUTS_LBN (16) | ||
| 121 | #define GPIO_OUTPUTS_WIDTH (4) | ||
| 122 | #define GPIO_INPUTS_LBN (8) | ||
| 123 | #define GPIO_DIRECTION_LBN (24) | ||
| 124 | #define GPIO_DIRECTION_WIDTH (4) | ||
| 125 | #define GPIO_DIRECTION_OUT (1) | ||
| 126 | #define GPIO_SRAM_SLEEP (1 << 1) | ||
| 127 | |||
| 128 | #define GPIO3_OEN_LBN (GPIO_DIRECTION_LBN + 3) | ||
| 129 | #define GPIO3_OEN_WIDTH 1 | ||
| 130 | #define GPIO2_OEN_LBN (GPIO_DIRECTION_LBN + 2) | ||
| 131 | #define GPIO2_OEN_WIDTH 1 | ||
| 132 | #define GPIO1_OEN_LBN (GPIO_DIRECTION_LBN + 1) | ||
| 133 | #define GPIO1_OEN_WIDTH 1 | ||
| 134 | #define GPIO0_OEN_LBN (GPIO_DIRECTION_LBN + 0) | ||
| 135 | #define GPIO0_OEN_WIDTH 1 | ||
| 136 | |||
| 137 | #define GPIO3_OUT_LBN (GPIO_OUTPUTS_LBN + 3) | ||
| 138 | #define GPIO3_OUT_WIDTH 1 | ||
| 139 | #define GPIO2_OUT_LBN (GPIO_OUTPUTS_LBN + 2) | ||
| 140 | #define GPIO2_OUT_WIDTH 1 | ||
| 141 | #define GPIO1_OUT_LBN (GPIO_OUTPUTS_LBN + 1) | ||
| 142 | #define GPIO1_OUT_WIDTH 1 | ||
| 143 | #define GPIO0_OUT_LBN (GPIO_OUTPUTS_LBN + 0) | ||
| 144 | #define GPIO0_OUT_WIDTH 1 | ||
| 145 | |||
| 146 | #define GPIO3_IN_LBN (GPIO_INPUTS_LBN + 3) | ||
| 147 | #define GPIO3_IN_WIDTH 1 | ||
| 148 | #define GPIO2_IN_WIDTH 1 | ||
| 149 | #define GPIO1_IN_WIDTH 1 | ||
| 150 | #define GPIO0_IN_LBN (GPIO_INPUTS_LBN + 0) | ||
| 151 | #define GPIO0_IN_WIDTH 1 | ||
| 152 | |||
| 153 | /* Global control register */ | ||
| 154 | #define GLB_CTL_REG_KER 0x0220 | ||
| 155 | #define EXT_PHY_RST_CTL_LBN 63 | ||
| 156 | #define EXT_PHY_RST_CTL_WIDTH 1 | ||
| 157 | #define PCIE_SD_RST_CTL_LBN 61 | ||
| 158 | #define PCIE_SD_RST_CTL_WIDTH 1 | ||
| 159 | |||
| 160 | #define PCIE_NSTCK_RST_CTL_LBN 58 | ||
| 161 | #define PCIE_NSTCK_RST_CTL_WIDTH 1 | ||
| 162 | #define PCIE_CORE_RST_CTL_LBN 57 | ||
| 163 | #define PCIE_CORE_RST_CTL_WIDTH 1 | ||
| 164 | #define EE_RST_CTL_LBN 49 | ||
| 165 | #define EE_RST_CTL_WIDTH 1 | ||
| 166 | #define RST_XGRX_LBN 24 | ||
| 167 | #define RST_XGRX_WIDTH 1 | ||
| 168 | #define RST_XGTX_LBN 23 | ||
| 169 | #define RST_XGTX_WIDTH 1 | ||
| 170 | #define RST_EM_LBN 22 | ||
| 171 | #define RST_EM_WIDTH 1 | ||
| 172 | #define EXT_PHY_RST_DUR_LBN 1 | ||
| 173 | #define EXT_PHY_RST_DUR_WIDTH 3 | ||
| 174 | #define SWRST_LBN 0 | ||
| 175 | #define SWRST_WIDTH 1 | ||
| 176 | #define INCLUDE_IN_RESET 0 | ||
| 177 | #define EXCLUDE_FROM_RESET 1 | ||
| 178 | |||
| 179 | /* Fatal interrupt register */ | ||
| 180 | #define FATAL_INTR_REG_KER 0x0230 | ||
| 181 | #define RBUF_OWN_INT_KER_EN_LBN 39 | ||
| 182 | #define RBUF_OWN_INT_KER_EN_WIDTH 1 | ||
| 183 | #define TBUF_OWN_INT_KER_EN_LBN 38 | ||
| 184 | #define TBUF_OWN_INT_KER_EN_WIDTH 1 | ||
| 185 | #define ILL_ADR_INT_KER_EN_LBN 33 | ||
| 186 | #define ILL_ADR_INT_KER_EN_WIDTH 1 | ||
| 187 | #define MEM_PERR_INT_KER_LBN 8 | ||
| 188 | #define MEM_PERR_INT_KER_WIDTH 1 | ||
| 189 | #define INT_KER_ERROR_LBN 0 | ||
| 190 | #define INT_KER_ERROR_WIDTH 12 | ||
| 191 | |||
| 192 | #define DP_CTRL_REG 0x250 | ||
| 193 | #define FLS_EVQ_ID_LBN 0 | ||
| 194 | #define FLS_EVQ_ID_WIDTH 11 | ||
| 195 | |||
| 196 | #define MEM_STAT_REG_KER 0x260 | ||
| 197 | |||
| 198 | /* Debug probe register */ | ||
| 199 | #define DEBUG_BLK_SEL_MISC 7 | ||
| 200 | #define DEBUG_BLK_SEL_SERDES 6 | ||
| 201 | #define DEBUG_BLK_SEL_EM 5 | ||
| 202 | #define DEBUG_BLK_SEL_SR 4 | ||
| 203 | #define DEBUG_BLK_SEL_EV 3 | ||
| 204 | #define DEBUG_BLK_SEL_RX 2 | ||
| 205 | #define DEBUG_BLK_SEL_TX 1 | ||
| 206 | #define DEBUG_BLK_SEL_BIU 0 | ||
| 207 | |||
| 208 | /* FPGA build version */ | ||
| 209 | #define ALTERA_BUILD_REG_KER 0x0300 | ||
| 210 | #define VER_ALL_LBN 0 | ||
| 211 | #define VER_ALL_WIDTH 32 | ||
| 212 | |||
| 213 | /* Spare EEPROM bits register (flash 0x390) */ | ||
| 214 | #define SPARE_REG_KER 0x310 | ||
| 215 | #define MEM_PERR_EN_TX_DATA_LBN 72 | ||
| 216 | #define MEM_PERR_EN_TX_DATA_WIDTH 2 | ||
| 217 | |||
| 218 | /* Timer table for kernel access */ | ||
| 219 | #define TIMER_CMD_REG_KER 0x420 | ||
| 220 | #define TIMER_MODE_LBN 12 | ||
| 221 | #define TIMER_MODE_WIDTH 2 | ||
| 222 | #define TIMER_MODE_DIS 0 | ||
| 223 | #define TIMER_MODE_INT_HLDOFF 2 | ||
| 224 | #define TIMER_VAL_LBN 0 | ||
| 225 | #define TIMER_VAL_WIDTH 12 | ||
| 226 | |||
| 227 | /* Driver generated event register */ | ||
| 228 | #define DRV_EV_REG_KER 0x440 | ||
| 229 | #define DRV_EV_QID_LBN 64 | ||
| 230 | #define DRV_EV_QID_WIDTH 12 | ||
| 231 | #define DRV_EV_DATA_LBN 0 | ||
| 232 | #define DRV_EV_DATA_WIDTH 64 | ||
| 233 | |||
| 234 | /* Buffer table configuration register */ | ||
| 235 | #define BUF_TBL_CFG_REG_KER 0x600 | ||
| 236 | #define BUF_TBL_MODE_LBN 3 | ||
| 237 | #define BUF_TBL_MODE_WIDTH 1 | ||
| 238 | #define BUF_TBL_MODE_HALF 0 | ||
| 239 | #define BUF_TBL_MODE_FULL 1 | ||
| 240 | |||
| 241 | /* SRAM receive descriptor cache configuration register */ | ||
| 242 | #define SRM_RX_DC_CFG_REG_KER 0x610 | ||
| 243 | #define SRM_RX_DC_BASE_ADR_LBN 0 | ||
| 244 | #define SRM_RX_DC_BASE_ADR_WIDTH 21 | ||
| 245 | |||
| 246 | /* SRAM transmit descriptor cache configuration register */ | ||
| 247 | #define SRM_TX_DC_CFG_REG_KER 0x620 | ||
| 248 | #define SRM_TX_DC_BASE_ADR_LBN 0 | ||
| 249 | #define SRM_TX_DC_BASE_ADR_WIDTH 21 | ||
| 250 | |||
| 251 | /* SRAM configuration register */ | ||
| 252 | #define SRM_CFG_REG_KER 0x630 | ||
| 253 | #define SRAM_OOB_BT_INIT_EN_LBN 3 | ||
| 254 | #define SRAM_OOB_BT_INIT_EN_WIDTH 1 | ||
| 255 | #define SRM_NUM_BANKS_AND_BANK_SIZE_LBN 0 | ||
| 256 | #define SRM_NUM_BANKS_AND_BANK_SIZE_WIDTH 3 | ||
| 257 | #define SRM_NB_BSZ_1BANKS_2M 0 | ||
| 258 | #define SRM_NB_BSZ_1BANKS_4M 1 | ||
| 259 | #define SRM_NB_BSZ_1BANKS_8M 2 | ||
| 260 | #define SRM_NB_BSZ_DEFAULT 3 /* char driver will set the default */ | ||
| 261 | #define SRM_NB_BSZ_2BANKS_4M 4 | ||
| 262 | #define SRM_NB_BSZ_2BANKS_8M 5 | ||
| 263 | #define SRM_NB_BSZ_2BANKS_16M 6 | ||
| 264 | #define SRM_NB_BSZ_RESERVED 7 | ||
| 265 | |||
| 266 | /* Special buffer table update register */ | ||
| 267 | #define BUF_TBL_UPD_REG_KER 0x0650 | ||
| 268 | #define BUF_UPD_CMD_LBN 63 | ||
| 269 | #define BUF_UPD_CMD_WIDTH 1 | ||
| 270 | #define BUF_CLR_CMD_LBN 62 | ||
| 271 | #define BUF_CLR_CMD_WIDTH 1 | ||
| 272 | #define BUF_CLR_END_ID_LBN 32 | ||
| 273 | #define BUF_CLR_END_ID_WIDTH 20 | ||
| 274 | #define BUF_CLR_START_ID_LBN 0 | ||
| 275 | #define BUF_CLR_START_ID_WIDTH 20 | ||
| 276 | |||
| 277 | /* Receive configuration register */ | ||
| 278 | #define RX_CFG_REG_KER 0x800 | ||
| 279 | |||
| 280 | /* B0 */ | ||
| 281 | #define RX_INGR_EN_B0_LBN 47 | ||
| 282 | #define RX_INGR_EN_B0_WIDTH 1 | ||
| 283 | #define RX_DESC_PUSH_EN_B0_LBN 43 | ||
| 284 | #define RX_DESC_PUSH_EN_B0_WIDTH 1 | ||
| 285 | #define RX_XON_TX_TH_B0_LBN 33 | ||
| 286 | #define RX_XON_TX_TH_B0_WIDTH 5 | ||
| 287 | #define RX_XOFF_TX_TH_B0_LBN 28 | ||
| 288 | #define RX_XOFF_TX_TH_B0_WIDTH 5 | ||
| 289 | #define RX_USR_BUF_SIZE_B0_LBN 19 | ||
| 290 | #define RX_USR_BUF_SIZE_B0_WIDTH 9 | ||
| 291 | #define RX_XON_MAC_TH_B0_LBN 10 | ||
| 292 | #define RX_XON_MAC_TH_B0_WIDTH 9 | ||
| 293 | #define RX_XOFF_MAC_TH_B0_LBN 1 | ||
| 294 | #define RX_XOFF_MAC_TH_B0_WIDTH 9 | ||
| 295 | #define RX_XOFF_MAC_EN_B0_LBN 0 | ||
| 296 | #define RX_XOFF_MAC_EN_B0_WIDTH 1 | ||
| 297 | |||
| 298 | /* A1 */ | ||
| 299 | #define RX_DESC_PUSH_EN_A1_LBN 35 | ||
| 300 | #define RX_DESC_PUSH_EN_A1_WIDTH 1 | ||
| 301 | #define RX_XON_TX_TH_A1_LBN 25 | ||
| 302 | #define RX_XON_TX_TH_A1_WIDTH 5 | ||
| 303 | #define RX_XOFF_TX_TH_A1_LBN 20 | ||
| 304 | #define RX_XOFF_TX_TH_A1_WIDTH 5 | ||
| 305 | #define RX_USR_BUF_SIZE_A1_LBN 11 | ||
| 306 | #define RX_USR_BUF_SIZE_A1_WIDTH 9 | ||
| 307 | #define RX_XON_MAC_TH_A1_LBN 6 | ||
| 308 | #define RX_XON_MAC_TH_A1_WIDTH 5 | ||
| 309 | #define RX_XOFF_MAC_TH_A1_LBN 1 | ||
| 310 | #define RX_XOFF_MAC_TH_A1_WIDTH 5 | ||
| 311 | #define RX_XOFF_MAC_EN_A1_LBN 0 | ||
| 312 | #define RX_XOFF_MAC_EN_A1_WIDTH 1 | ||
| 313 | |||
| 314 | /* Receive filter control register */ | ||
| 315 | #define RX_FILTER_CTL_REG 0x810 | ||
| 316 | #define UDP_FULL_SRCH_LIMIT_LBN 32 | ||
| 317 | #define UDP_FULL_SRCH_LIMIT_WIDTH 8 | ||
| 318 | #define NUM_KER_LBN 24 | ||
| 319 | #define NUM_KER_WIDTH 2 | ||
| 320 | #define UDP_WILD_SRCH_LIMIT_LBN 16 | ||
| 321 | #define UDP_WILD_SRCH_LIMIT_WIDTH 8 | ||
| 322 | #define TCP_WILD_SRCH_LIMIT_LBN 8 | ||
| 323 | #define TCP_WILD_SRCH_LIMIT_WIDTH 8 | ||
| 324 | #define TCP_FULL_SRCH_LIMIT_LBN 0 | ||
| 325 | #define TCP_FULL_SRCH_LIMIT_WIDTH 8 | ||
| 326 | |||
| 327 | /* RX queue flush register */ | ||
| 328 | #define RX_FLUSH_DESCQ_REG_KER 0x0820 | ||
| 329 | #define RX_FLUSH_DESCQ_CMD_LBN 24 | ||
| 330 | #define RX_FLUSH_DESCQ_CMD_WIDTH 1 | ||
| 331 | #define RX_FLUSH_DESCQ_LBN 0 | ||
| 332 | #define RX_FLUSH_DESCQ_WIDTH 12 | ||
| 333 | |||
| 334 | /* Receive descriptor update register */ | ||
| 335 | #define RX_DESC_UPD_REG_KER_DWORD (0x830 + 12) | ||
| 336 | #define RX_DESC_WPTR_DWORD_LBN 0 | ||
| 337 | #define RX_DESC_WPTR_DWORD_WIDTH 12 | ||
| 338 | |||
| 339 | /* Receive descriptor cache configuration register */ | ||
| 340 | #define RX_DC_CFG_REG_KER 0x840 | ||
| 341 | #define RX_DC_SIZE_LBN 0 | ||
| 342 | #define RX_DC_SIZE_WIDTH 2 | ||
| 343 | |||
| 344 | #define RX_DC_PF_WM_REG_KER 0x850 | ||
| 345 | #define RX_DC_PF_LWM_LBN 0 | ||
| 346 | #define RX_DC_PF_LWM_WIDTH 6 | ||
| 347 | |||
| 348 | /* RX no descriptor drop counter */ | ||
| 349 | #define RX_NODESC_DROP_REG_KER 0x880 | ||
| 350 | #define RX_NODESC_DROP_CNT_LBN 0 | ||
| 351 | #define RX_NODESC_DROP_CNT_WIDTH 16 | ||
| 352 | |||
| 353 | /* RX black magic register */ | ||
| 354 | #define RX_SELF_RST_REG_KER 0x890 | ||
| 355 | #define RX_ISCSI_DIS_LBN 17 | ||
| 356 | #define RX_ISCSI_DIS_WIDTH 1 | ||
| 357 | #define RX_NODESC_WAIT_DIS_LBN 9 | ||
| 358 | #define RX_NODESC_WAIT_DIS_WIDTH 1 | ||
| 359 | #define RX_RECOVERY_EN_LBN 8 | ||
| 360 | #define RX_RECOVERY_EN_WIDTH 1 | ||
| 361 | |||
| 362 | /* TX queue flush register */ | ||
| 363 | #define TX_FLUSH_DESCQ_REG_KER 0x0a00 | ||
| 364 | #define TX_FLUSH_DESCQ_CMD_LBN 12 | ||
| 365 | #define TX_FLUSH_DESCQ_CMD_WIDTH 1 | ||
| 366 | #define TX_FLUSH_DESCQ_LBN 0 | ||
| 367 | #define TX_FLUSH_DESCQ_WIDTH 12 | ||
| 368 | |||
| 369 | /* Transmit descriptor update register */ | ||
| 370 | #define TX_DESC_UPD_REG_KER_DWORD (0xa10 + 12) | ||
| 371 | #define TX_DESC_WPTR_DWORD_LBN 0 | ||
| 372 | #define TX_DESC_WPTR_DWORD_WIDTH 12 | ||
| 373 | |||
| 374 | /* Transmit descriptor cache configuration register */ | ||
| 375 | #define TX_DC_CFG_REG_KER 0xa20 | ||
| 376 | #define TX_DC_SIZE_LBN 0 | ||
| 377 | #define TX_DC_SIZE_WIDTH 2 | ||
| 378 | |||
| 379 | /* Transmit checksum configuration register (A0/A1 only) */ | ||
| 380 | #define TX_CHKSM_CFG_REG_KER_A1 0xa30 | ||
| 381 | |||
| 382 | /* Transmit configuration register */ | ||
| 383 | #define TX_CFG_REG_KER 0xa50 | ||
| 384 | #define TX_NO_EOP_DISC_EN_LBN 5 | ||
| 385 | #define TX_NO_EOP_DISC_EN_WIDTH 1 | ||
| 386 | |||
| 387 | /* Transmit configuration register 2 */ | ||
| 388 | #define TX_CFG2_REG_KER 0xa80 | ||
| 389 | #define TX_CSR_PUSH_EN_LBN 89 | ||
| 390 | #define TX_CSR_PUSH_EN_WIDTH 1 | ||
| 391 | #define TX_RX_SPACER_LBN 64 | ||
| 392 | #define TX_RX_SPACER_WIDTH 8 | ||
| 393 | #define TX_SW_EV_EN_LBN 59 | ||
| 394 | #define TX_SW_EV_EN_WIDTH 1 | ||
| 395 | #define TX_RX_SPACER_EN_LBN 57 | ||
| 396 | #define TX_RX_SPACER_EN_WIDTH 1 | ||
| 397 | #define TX_PREF_THRESHOLD_LBN 19 | ||
| 398 | #define TX_PREF_THRESHOLD_WIDTH 2 | ||
| 399 | #define TX_ONE_PKT_PER_Q_LBN 18 | ||
| 400 | #define TX_ONE_PKT_PER_Q_WIDTH 1 | ||
| 401 | #define TX_DIS_NON_IP_EV_LBN 17 | ||
| 402 | #define TX_DIS_NON_IP_EV_WIDTH 1 | ||
| 403 | #define TX_FLUSH_MIN_LEN_EN_B0_LBN 7 | ||
| 404 | #define TX_FLUSH_MIN_LEN_EN_B0_WIDTH 1 | ||
| 405 | |||
| 406 | /* PHY management transmit data register */ | ||
| 407 | #define MD_TXD_REG_KER 0xc00 | ||
| 408 | #define MD_TXD_LBN 0 | ||
| 409 | #define MD_TXD_WIDTH 16 | ||
| 410 | |||
| 411 | /* PHY management receive data register */ | ||
| 412 | #define MD_RXD_REG_KER 0xc10 | ||
| 413 | #define MD_RXD_LBN 0 | ||
| 414 | #define MD_RXD_WIDTH 16 | ||
| 415 | |||
| 416 | /* PHY management configuration & status register */ | ||
| 417 | #define MD_CS_REG_KER 0xc20 | ||
| 418 | #define MD_GC_LBN 4 | ||
| 419 | #define MD_GC_WIDTH 1 | ||
| 420 | #define MD_RIC_LBN 2 | ||
| 421 | #define MD_RIC_WIDTH 1 | ||
| 422 | #define MD_RDC_LBN 1 | ||
| 423 | #define MD_RDC_WIDTH 1 | ||
| 424 | #define MD_WRC_LBN 0 | ||
| 425 | #define MD_WRC_WIDTH 1 | ||
| 426 | |||
| 427 | /* PHY management PHY address register */ | ||
| 428 | #define MD_PHY_ADR_REG_KER 0xc30 | ||
| 429 | #define MD_PHY_ADR_LBN 0 | ||
| 430 | #define MD_PHY_ADR_WIDTH 16 | ||
| 431 | |||
| 432 | /* PHY management ID register */ | ||
| 433 | #define MD_ID_REG_KER 0xc40 | ||
| 434 | #define MD_PRT_ADR_LBN 11 | ||
| 435 | #define MD_PRT_ADR_WIDTH 5 | ||
| 436 | #define MD_DEV_ADR_LBN 6 | ||
| 437 | #define MD_DEV_ADR_WIDTH 5 | ||
| 438 | /* Used for writing both at once */ | ||
| 439 | #define MD_PRT_DEV_ADR_LBN 6 | ||
| 440 | #define MD_PRT_DEV_ADR_WIDTH 10 | ||
| 441 | |||
| 442 | /* PHY management status & mask register (DWORD read only) */ | ||
| 443 | #define MD_STAT_REG_KER 0xc50 | ||
| 444 | #define MD_BSERR_LBN 2 | ||
| 445 | #define MD_BSERR_WIDTH 1 | ||
| 446 | #define MD_LNFL_LBN 1 | ||
| 447 | #define MD_LNFL_WIDTH 1 | ||
| 448 | #define MD_BSY_LBN 0 | ||
| 449 | #define MD_BSY_WIDTH 1 | ||
| 450 | |||
| 451 | /* Port 0 and 1 MAC stats registers */ | ||
| 452 | #define MAC0_STAT_DMA_REG_KER 0xc60 | ||
| 453 | #define MAC_STAT_DMA_CMD_LBN 48 | ||
| 454 | #define MAC_STAT_DMA_CMD_WIDTH 1 | ||
| 455 | #define MAC_STAT_DMA_ADR_LBN 0 | ||
| 456 | #define MAC_STAT_DMA_ADR_WIDTH EFX_DMA_TYPE_WIDTH(46) | ||
| 457 | |||
| 458 | /* Port 0 and 1 MAC control registers */ | ||
| 459 | #define MAC0_CTRL_REG_KER 0xc80 | ||
| 460 | #define MAC_XOFF_VAL_LBN 16 | ||
| 461 | #define MAC_XOFF_VAL_WIDTH 16 | ||
| 462 | #define TXFIFO_DRAIN_EN_B0_LBN 7 | ||
| 463 | #define TXFIFO_DRAIN_EN_B0_WIDTH 1 | ||
| 464 | #define MAC_BCAD_ACPT_LBN 4 | ||
| 465 | #define MAC_BCAD_ACPT_WIDTH 1 | ||
| 466 | #define MAC_UC_PROM_LBN 3 | ||
| 467 | #define MAC_UC_PROM_WIDTH 1 | ||
| 468 | #define MAC_LINK_STATUS_LBN 2 | ||
| 469 | #define MAC_LINK_STATUS_WIDTH 1 | ||
| 470 | #define MAC_SPEED_LBN 0 | ||
| 471 | #define MAC_SPEED_WIDTH 2 | ||
| 472 | |||
| 473 | /* 10G XAUI XGXS default values */ | ||
| 474 | #define XX_TXDRV_DEQ_DEFAULT 0xe /* deq=.6 */ | ||
| 475 | #define XX_TXDRV_DTX_DEFAULT 0x5 /* 1.25 */ | ||
| 476 | #define XX_SD_CTL_DRV_DEFAULT 0 /* 20mA */ | ||
| 477 | |||
| 478 | /* Multicast address hash table */ | ||
| 479 | #define MAC_MCAST_HASH_REG0_KER 0xca0 | ||
| 480 | #define MAC_MCAST_HASH_REG1_KER 0xcb0 | ||
| 481 | |||
| 482 | /* GMAC registers */ | ||
| 483 | #define FALCON_GMAC_REGBANK 0xe00 | ||
| 484 | #define FALCON_GMAC_REGBANK_SIZE 0x200 | ||
| 485 | #define FALCON_GMAC_REG_SIZE 0x10 | ||
| 486 | |||
| 487 | /* XMAC registers */ | ||
| 488 | #define FALCON_XMAC_REGBANK 0x1200 | ||
| 489 | #define FALCON_XMAC_REGBANK_SIZE 0x200 | ||
| 490 | #define FALCON_XMAC_REG_SIZE 0x10 | ||
| 491 | |||
| 492 | /* XGMAC address register low */ | ||
| 493 | #define XM_ADR_LO_REG_MAC 0x00 | ||
| 494 | #define XM_ADR_3_LBN 24 | ||
| 495 | #define XM_ADR_3_WIDTH 8 | ||
| 496 | #define XM_ADR_2_LBN 16 | ||
| 497 | #define XM_ADR_2_WIDTH 8 | ||
| 498 | #define XM_ADR_1_LBN 8 | ||
| 499 | #define XM_ADR_1_WIDTH 8 | ||
| 500 | #define XM_ADR_0_LBN 0 | ||
| 501 | #define XM_ADR_0_WIDTH 8 | ||
| 502 | |||
| 503 | /* XGMAC address register high */ | ||
| 504 | #define XM_ADR_HI_REG_MAC 0x01 | ||
| 505 | #define XM_ADR_5_LBN 8 | ||
| 506 | #define XM_ADR_5_WIDTH 8 | ||
| 507 | #define XM_ADR_4_LBN 0 | ||
| 508 | #define XM_ADR_4_WIDTH 8 | ||
| 509 | |||
| 510 | /* XGMAC global configuration */ | ||
| 511 | #define XM_GLB_CFG_REG_MAC 0x02 | ||
| 512 | #define XM_RX_STAT_EN_LBN 11 | ||
| 513 | #define XM_RX_STAT_EN_WIDTH 1 | ||
| 514 | #define XM_TX_STAT_EN_LBN 10 | ||
| 515 | #define XM_TX_STAT_EN_WIDTH 1 | ||
| 516 | #define XM_RX_JUMBO_MODE_LBN 6 | ||
| 517 | #define XM_RX_JUMBO_MODE_WIDTH 1 | ||
| 518 | #define XM_INTCLR_MODE_LBN 3 | ||
| 519 | #define XM_INTCLR_MODE_WIDTH 1 | ||
| 520 | #define XM_CORE_RST_LBN 0 | ||
| 521 | #define XM_CORE_RST_WIDTH 1 | ||
| 522 | |||
| 523 | /* XGMAC transmit configuration */ | ||
| 524 | #define XM_TX_CFG_REG_MAC 0x03 | ||
| 525 | #define XM_IPG_LBN 16 | ||
| 526 | #define XM_IPG_WIDTH 4 | ||
| 527 | #define XM_FCNTL_LBN 10 | ||
| 528 | #define XM_FCNTL_WIDTH 1 | ||
| 529 | #define XM_TXCRC_LBN 8 | ||
| 530 | #define XM_TXCRC_WIDTH 1 | ||
| 531 | #define XM_AUTO_PAD_LBN 5 | ||
| 532 | #define XM_AUTO_PAD_WIDTH 1 | ||
| 533 | #define XM_TX_PRMBL_LBN 2 | ||
| 534 | #define XM_TX_PRMBL_WIDTH 1 | ||
| 535 | #define XM_TXEN_LBN 1 | ||
| 536 | #define XM_TXEN_WIDTH 1 | ||
| 537 | |||
| 538 | /* XGMAC receive configuration */ | ||
| 539 | #define XM_RX_CFG_REG_MAC 0x04 | ||
| 540 | #define XM_PASS_CRC_ERR_LBN 25 | ||
| 541 | #define XM_PASS_CRC_ERR_WIDTH 1 | ||
| 542 | #define XM_ACPT_ALL_MCAST_LBN 11 | ||
| 543 | #define XM_ACPT_ALL_MCAST_WIDTH 1 | ||
| 544 | #define XM_ACPT_ALL_UCAST_LBN 9 | ||
| 545 | #define XM_ACPT_ALL_UCAST_WIDTH 1 | ||
| 546 | #define XM_AUTO_DEPAD_LBN 8 | ||
| 547 | #define XM_AUTO_DEPAD_WIDTH 1 | ||
| 548 | #define XM_RXEN_LBN 1 | ||
| 549 | #define XM_RXEN_WIDTH 1 | ||
| 550 | |||
| 551 | /* XGMAC management interrupt mask register */ | ||
| 552 | #define XM_MGT_INT_MSK_REG_MAC_B0 0x5 | ||
| 553 | #define XM_MSK_PRMBLE_ERR_LBN 2 | ||
| 554 | #define XM_MSK_PRMBLE_ERR_WIDTH 1 | ||
| 555 | #define XM_MSK_RMTFLT_LBN 1 | ||
| 556 | #define XM_MSK_RMTFLT_WIDTH 1 | ||
| 557 | #define XM_MSK_LCLFLT_LBN 0 | ||
| 558 | #define XM_MSK_LCLFLT_WIDTH 1 | ||
| 559 | |||
| 560 | /* XGMAC flow control register */ | ||
| 561 | #define XM_FC_REG_MAC 0x7 | ||
| 562 | #define XM_PAUSE_TIME_LBN 16 | ||
| 563 | #define XM_PAUSE_TIME_WIDTH 16 | ||
| 564 | #define XM_DIS_FCNTL_LBN 0 | ||
| 565 | #define XM_DIS_FCNTL_WIDTH 1 | ||
| 566 | |||
| 567 | /* XGMAC pause time count register */ | ||
| 568 | #define XM_PAUSE_TIME_REG_MAC 0x9 | ||
| 569 | |||
| 570 | /* XGMAC transmit parameter register */ | ||
| 571 | #define XM_TX_PARAM_REG_MAC 0x0d | ||
| 572 | #define XM_TX_JUMBO_MODE_LBN 31 | ||
| 573 | #define XM_TX_JUMBO_MODE_WIDTH 1 | ||
| 574 | #define XM_MAX_TX_FRM_SIZE_LBN 16 | ||
| 575 | #define XM_MAX_TX_FRM_SIZE_WIDTH 14 | ||
| 576 | |||
| 577 | /* XGMAC receive parameter register */ | ||
| 578 | #define XM_RX_PARAM_REG_MAC 0x0e | ||
| 579 | #define XM_MAX_RX_FRM_SIZE_LBN 0 | ||
| 580 | #define XM_MAX_RX_FRM_SIZE_WIDTH 14 | ||
| 581 | |||
| 582 | /* XGMAC management interrupt status register */ | ||
| 583 | #define XM_MGT_INT_REG_MAC_B0 0x0f | ||
| 584 | #define XM_PRMBLE_ERR 2 | ||
| 585 | #define XM_PRMBLE_WIDTH 1 | ||
| 586 | #define XM_RMTFLT_LBN 1 | ||
| 587 | #define XM_RMTFLT_WIDTH 1 | ||
| 588 | #define XM_LCLFLT_LBN 0 | ||
| 589 | #define XM_LCLFLT_WIDTH 1 | ||
| 590 | |||
| 591 | /* XGXS/XAUI powerdown/reset register */ | ||
| 592 | #define XX_PWR_RST_REG_MAC 0x10 | ||
| 593 | |||
| 594 | #define XX_PWRDND_EN_LBN 15 | ||
| 595 | #define XX_PWRDND_EN_WIDTH 1 | ||
| 596 | #define XX_PWRDNC_EN_LBN 14 | ||
| 597 | #define XX_PWRDNC_EN_WIDTH 1 | ||
| 598 | #define XX_PWRDNB_EN_LBN 13 | ||
| 599 | #define XX_PWRDNB_EN_WIDTH 1 | ||
| 600 | #define XX_PWRDNA_EN_LBN 12 | ||
| 601 | #define XX_PWRDNA_EN_WIDTH 1 | ||
| 602 | #define XX_RSTPLLCD_EN_LBN 9 | ||
| 603 | #define XX_RSTPLLCD_EN_WIDTH 1 | ||
| 604 | #define XX_RSTPLLAB_EN_LBN 8 | ||
| 605 | #define XX_RSTPLLAB_EN_WIDTH 1 | ||
| 606 | #define XX_RESETD_EN_LBN 7 | ||
| 607 | #define XX_RESETD_EN_WIDTH 1 | ||
| 608 | #define XX_RESETC_EN_LBN 6 | ||
| 609 | #define XX_RESETC_EN_WIDTH 1 | ||
| 610 | #define XX_RESETB_EN_LBN 5 | ||
| 611 | #define XX_RESETB_EN_WIDTH 1 | ||
| 612 | #define XX_RESETA_EN_LBN 4 | ||
| 613 | #define XX_RESETA_EN_WIDTH 1 | ||
| 614 | #define XX_RSTXGXSRX_EN_LBN 2 | ||
| 615 | #define XX_RSTXGXSRX_EN_WIDTH 1 | ||
| 616 | #define XX_RSTXGXSTX_EN_LBN 1 | ||
| 617 | #define XX_RSTXGXSTX_EN_WIDTH 1 | ||
| 618 | #define XX_RST_XX_EN_LBN 0 | ||
| 619 | #define XX_RST_XX_EN_WIDTH 1 | ||
| 620 | |||
| 621 | /* XGXS/XAUI powerdown/reset control register */ | ||
| 622 | #define XX_SD_CTL_REG_MAC 0x11 | ||
| 623 | #define XX_HIDRVD_LBN 15 | ||
| 624 | #define XX_HIDRVD_WIDTH 1 | ||
| 625 | #define XX_LODRVD_LBN 14 | ||
| 626 | #define XX_LODRVD_WIDTH 1 | ||
| 627 | #define XX_HIDRVC_LBN 13 | ||
| 628 | #define XX_HIDRVC_WIDTH 1 | ||
| 629 | #define XX_LODRVC_LBN 12 | ||
| 630 | #define XX_LODRVC_WIDTH 1 | ||
| 631 | #define XX_HIDRVB_LBN 11 | ||
| 632 | #define XX_HIDRVB_WIDTH 1 | ||
| 633 | #define XX_LODRVB_LBN 10 | ||
| 634 | #define XX_LODRVB_WIDTH 1 | ||
| 635 | #define XX_HIDRVA_LBN 9 | ||
| 636 | #define XX_HIDRVA_WIDTH 1 | ||
| 637 | #define XX_LODRVA_LBN 8 | ||
| 638 | #define XX_LODRVA_WIDTH 1 | ||
| 639 | |||
| 640 | #define XX_TXDRV_CTL_REG_MAC 0x12 | ||
| 641 | #define XX_DEQD_LBN 28 | ||
| 642 | #define XX_DEQD_WIDTH 4 | ||
| 643 | #define XX_DEQC_LBN 24 | ||
| 644 | #define XX_DEQC_WIDTH 4 | ||
| 645 | #define XX_DEQB_LBN 20 | ||
| 646 | #define XX_DEQB_WIDTH 4 | ||
| 647 | #define XX_DEQA_LBN 16 | ||
| 648 | #define XX_DEQA_WIDTH 4 | ||
| 649 | #define XX_DTXD_LBN 12 | ||
| 650 | #define XX_DTXD_WIDTH 4 | ||
| 651 | #define XX_DTXC_LBN 8 | ||
| 652 | #define XX_DTXC_WIDTH 4 | ||
| 653 | #define XX_DTXB_LBN 4 | ||
| 654 | #define XX_DTXB_WIDTH 4 | ||
| 655 | #define XX_DTXA_LBN 0 | ||
| 656 | #define XX_DTXA_WIDTH 4 | ||
| 657 | |||
| 658 | /* XAUI XGXS core status register */ | ||
| 659 | #define XX_FORCE_SIG_DECODE_FORCED 0xff | ||
| 660 | #define XX_CORE_STAT_REG_MAC 0x16 | ||
| 661 | #define XX_ALIGN_DONE_LBN 20 | ||
| 662 | #define XX_ALIGN_DONE_WIDTH 1 | ||
| 663 | #define XX_SYNC_STAT_LBN 16 | ||
| 664 | #define XX_SYNC_STAT_WIDTH 4 | ||
| 665 | #define XX_SYNC_STAT_DECODE_SYNCED 0xf | ||
| 666 | #define XX_COMMA_DET_LBN 12 | ||
| 667 | #define XX_COMMA_DET_WIDTH 4 | ||
| 668 | #define XX_COMMA_DET_DECODE_DETECTED 0xf | ||
| 669 | #define XX_COMMA_DET_RESET 0xf | ||
| 670 | #define XX_CHARERR_LBN 4 | ||
| 671 | #define XX_CHARERR_WIDTH 4 | ||
| 672 | #define XX_CHARERR_RESET 0xf | ||
| 673 | #define XX_DISPERR_LBN 0 | ||
| 674 | #define XX_DISPERR_WIDTH 4 | ||
| 675 | #define XX_DISPERR_RESET 0xf | ||
| 676 | |||
| 677 | /* Receive filter table */ | ||
| 678 | #define RX_FILTER_TBL0 0xF00000 | ||
| 679 | |||
| 680 | /* Receive descriptor pointer table */ | ||
| 681 | #define RX_DESC_PTR_TBL_KER_A1 0x11800 | ||
| 682 | #define RX_DESC_PTR_TBL_KER_B0 0xF40000 | ||
| 683 | #define RX_DESC_PTR_TBL_KER_P0 0x900 | ||
| 684 | #define RX_ISCSI_DDIG_EN_LBN 88 | ||
| 685 | #define RX_ISCSI_DDIG_EN_WIDTH 1 | ||
| 686 | #define RX_ISCSI_HDIG_EN_LBN 87 | ||
| 687 | #define RX_ISCSI_HDIG_EN_WIDTH 1 | ||
| 688 | #define RX_DESCQ_BUF_BASE_ID_LBN 36 | ||
| 689 | #define RX_DESCQ_BUF_BASE_ID_WIDTH 20 | ||
| 690 | #define RX_DESCQ_EVQ_ID_LBN 24 | ||
| 691 | #define RX_DESCQ_EVQ_ID_WIDTH 12 | ||
| 692 | #define RX_DESCQ_OWNER_ID_LBN 10 | ||
| 693 | #define RX_DESCQ_OWNER_ID_WIDTH 14 | ||
| 694 | #define RX_DESCQ_LABEL_LBN 5 | ||
| 695 | #define RX_DESCQ_LABEL_WIDTH 5 | ||
| 696 | #define RX_DESCQ_SIZE_LBN 3 | ||
| 697 | #define RX_DESCQ_SIZE_WIDTH 2 | ||
| 698 | #define RX_DESCQ_SIZE_4K 3 | ||
| 699 | #define RX_DESCQ_SIZE_2K 2 | ||
| 700 | #define RX_DESCQ_SIZE_1K 1 | ||
| 701 | #define RX_DESCQ_SIZE_512 0 | ||
| 702 | #define RX_DESCQ_TYPE_LBN 2 | ||
| 703 | #define RX_DESCQ_TYPE_WIDTH 1 | ||
| 704 | #define RX_DESCQ_JUMBO_LBN 1 | ||
| 705 | #define RX_DESCQ_JUMBO_WIDTH 1 | ||
| 706 | #define RX_DESCQ_EN_LBN 0 | ||
| 707 | #define RX_DESCQ_EN_WIDTH 1 | ||
| 708 | |||
| 709 | /* Transmit descriptor pointer table */ | ||
| 710 | #define TX_DESC_PTR_TBL_KER_A1 0x11900 | ||
| 711 | #define TX_DESC_PTR_TBL_KER_B0 0xF50000 | ||
| 712 | #define TX_DESC_PTR_TBL_KER_P0 0xa40 | ||
| 713 | #define TX_NON_IP_DROP_DIS_B0_LBN 91 | ||
| 714 | #define TX_NON_IP_DROP_DIS_B0_WIDTH 1 | ||
| 715 | #define TX_IP_CHKSM_DIS_B0_LBN 90 | ||
| 716 | #define TX_IP_CHKSM_DIS_B0_WIDTH 1 | ||
| 717 | #define TX_TCP_CHKSM_DIS_B0_LBN 89 | ||
| 718 | #define TX_TCP_CHKSM_DIS_B0_WIDTH 1 | ||
| 719 | #define TX_DESCQ_EN_LBN 88 | ||
| 720 | #define TX_DESCQ_EN_WIDTH 1 | ||
| 721 | #define TX_ISCSI_DDIG_EN_LBN 87 | ||
| 722 | #define TX_ISCSI_DDIG_EN_WIDTH 1 | ||
| 723 | #define TX_ISCSI_HDIG_EN_LBN 86 | ||
| 724 | #define TX_ISCSI_HDIG_EN_WIDTH 1 | ||
| 725 | #define TX_DESCQ_BUF_BASE_ID_LBN 36 | ||
| 726 | #define TX_DESCQ_BUF_BASE_ID_WIDTH 20 | ||
| 727 | #define TX_DESCQ_EVQ_ID_LBN 24 | ||
| 728 | #define TX_DESCQ_EVQ_ID_WIDTH 12 | ||
| 729 | #define TX_DESCQ_OWNER_ID_LBN 10 | ||
| 730 | #define TX_DESCQ_OWNER_ID_WIDTH 14 | ||
| 731 | #define TX_DESCQ_LABEL_LBN 5 | ||
| 732 | #define TX_DESCQ_LABEL_WIDTH 5 | ||
| 733 | #define TX_DESCQ_SIZE_LBN 3 | ||
| 734 | #define TX_DESCQ_SIZE_WIDTH 2 | ||
| 735 | #define TX_DESCQ_SIZE_4K 3 | ||
| 736 | #define TX_DESCQ_SIZE_2K 2 | ||
| 737 | #define TX_DESCQ_SIZE_1K 1 | ||
| 738 | #define TX_DESCQ_SIZE_512 0 | ||
| 739 | #define TX_DESCQ_TYPE_LBN 1 | ||
| 740 | #define TX_DESCQ_TYPE_WIDTH 2 | ||
| 741 | |||
| 742 | /* Event queue pointer */ | ||
| 743 | #define EVQ_PTR_TBL_KER_A1 0x11a00 | ||
| 744 | #define EVQ_PTR_TBL_KER_B0 0xf60000 | ||
| 745 | #define EVQ_PTR_TBL_KER_P0 0x500 | ||
| 746 | #define EVQ_EN_LBN 23 | ||
| 747 | #define EVQ_EN_WIDTH 1 | ||
| 748 | #define EVQ_SIZE_LBN 20 | ||
| 749 | #define EVQ_SIZE_WIDTH 3 | ||
| 750 | #define EVQ_SIZE_32K 6 | ||
| 751 | #define EVQ_SIZE_16K 5 | ||
| 752 | #define EVQ_SIZE_8K 4 | ||
| 753 | #define EVQ_SIZE_4K 3 | ||
| 754 | #define EVQ_SIZE_2K 2 | ||
| 755 | #define EVQ_SIZE_1K 1 | ||
| 756 | #define EVQ_SIZE_512 0 | ||
| 757 | #define EVQ_BUF_BASE_ID_LBN 0 | ||
| 758 | #define EVQ_BUF_BASE_ID_WIDTH 20 | ||
| 759 | |||
| 760 | /* Event queue read pointer */ | ||
| 761 | #define EVQ_RPTR_REG_KER_A1 0x11b00 | ||
| 762 | #define EVQ_RPTR_REG_KER_B0 0xfa0000 | ||
| 763 | #define EVQ_RPTR_REG_KER_DWORD (EVQ_RPTR_REG_KER + 0) | ||
| 764 | #define EVQ_RPTR_DWORD_LBN 0 | ||
| 765 | #define EVQ_RPTR_DWORD_WIDTH 14 | ||
| 766 | |||
| 767 | /* RSS indirection table */ | ||
| 768 | #define RX_RSS_INDIR_TBL_B0 0xFB0000 | ||
| 769 | #define RX_RSS_INDIR_ENT_B0_LBN 0 | ||
| 770 | #define RX_RSS_INDIR_ENT_B0_WIDTH 6 | ||
| 771 | |||
| 772 | /* Special buffer descriptors (full-mode) */ | ||
| 773 | #define BUF_FULL_TBL_KER_A1 0x8000 | ||
| 774 | #define BUF_FULL_TBL_KER_B0 0x800000 | ||
| 775 | #define IP_DAT_BUF_SIZE_LBN 50 | ||
| 776 | #define IP_DAT_BUF_SIZE_WIDTH 1 | ||
| 777 | #define IP_DAT_BUF_SIZE_8K 1 | ||
| 778 | #define IP_DAT_BUF_SIZE_4K 0 | ||
| 779 | #define BUF_ADR_REGION_LBN 48 | ||
| 780 | #define BUF_ADR_REGION_WIDTH 2 | ||
| 781 | #define BUF_ADR_FBUF_LBN 14 | ||
| 782 | #define BUF_ADR_FBUF_WIDTH 34 | ||
| 783 | #define BUF_OWNER_ID_FBUF_LBN 0 | ||
| 784 | #define BUF_OWNER_ID_FBUF_WIDTH 14 | ||
| 785 | |||
| 786 | /* Transmit descriptor */ | ||
| 787 | #define TX_KER_PORT_LBN 63 | ||
| 788 | #define TX_KER_PORT_WIDTH 1 | ||
| 789 | #define TX_KER_CONT_LBN 62 | ||
| 790 | #define TX_KER_CONT_WIDTH 1 | ||
| 791 | #define TX_KER_BYTE_CNT_LBN 48 | ||
| 792 | #define TX_KER_BYTE_CNT_WIDTH 14 | ||
| 793 | #define TX_KER_BUF_REGION_LBN 46 | ||
| 794 | #define TX_KER_BUF_REGION_WIDTH 2 | ||
| 795 | #define TX_KER_BUF_REGION0_DECODE 0 | ||
| 796 | #define TX_KER_BUF_REGION1_DECODE 1 | ||
| 797 | #define TX_KER_BUF_REGION2_DECODE 2 | ||
| 798 | #define TX_KER_BUF_REGION3_DECODE 3 | ||
| 799 | #define TX_KER_BUF_ADR_LBN 0 | ||
| 800 | #define TX_KER_BUF_ADR_WIDTH EFX_DMA_TYPE_WIDTH(46) | ||
| 801 | |||
| 802 | /* Receive descriptor */ | ||
| 803 | #define RX_KER_BUF_SIZE_LBN 48 | ||
| 804 | #define RX_KER_BUF_SIZE_WIDTH 14 | ||
| 805 | #define RX_KER_BUF_REGION_LBN 46 | ||
| 806 | #define RX_KER_BUF_REGION_WIDTH 2 | ||
| 807 | #define RX_KER_BUF_REGION0_DECODE 0 | ||
| 808 | #define RX_KER_BUF_REGION1_DECODE 1 | ||
| 809 | #define RX_KER_BUF_REGION2_DECODE 2 | ||
| 810 | #define RX_KER_BUF_REGION3_DECODE 3 | ||
| 811 | #define RX_KER_BUF_ADR_LBN 0 | ||
| 812 | #define RX_KER_BUF_ADR_WIDTH EFX_DMA_TYPE_WIDTH(46) | ||
| 813 | |||
| 814 | /************************************************************************** | ||
| 815 | * | ||
| 816 | * Falcon events | ||
| 817 | * | ||
| 818 | ************************************************************************** | ||
| 819 | */ | ||
| 820 | |||
| 821 | /* Event queue entries */ | ||
| 822 | #define EV_CODE_LBN 60 | ||
| 823 | #define EV_CODE_WIDTH 4 | ||
| 824 | #define RX_IP_EV_DECODE 0 | ||
| 825 | #define TX_IP_EV_DECODE 2 | ||
| 826 | #define DRIVER_EV_DECODE 5 | ||
| 827 | #define GLOBAL_EV_DECODE 6 | ||
| 828 | #define DRV_GEN_EV_DECODE 7 | ||
| 829 | #define WHOLE_EVENT_LBN 0 | ||
| 830 | #define WHOLE_EVENT_WIDTH 64 | ||
| 831 | |||
| 832 | /* Receive events */ | ||
| 833 | #define RX_EV_PKT_OK_LBN 56 | ||
| 834 | #define RX_EV_PKT_OK_WIDTH 1 | ||
| 835 | #define RX_EV_PAUSE_FRM_ERR_LBN 55 | ||
| 836 | #define RX_EV_PAUSE_FRM_ERR_WIDTH 1 | ||
| 837 | #define RX_EV_BUF_OWNER_ID_ERR_LBN 54 | ||
| 838 | #define RX_EV_BUF_OWNER_ID_ERR_WIDTH 1 | ||
| 839 | #define RX_EV_IF_FRAG_ERR_LBN 53 | ||
| 840 | #define RX_EV_IF_FRAG_ERR_WIDTH 1 | ||
| 841 | #define RX_EV_IP_HDR_CHKSUM_ERR_LBN 52 | ||
| 842 | #define RX_EV_IP_HDR_CHKSUM_ERR_WIDTH 1 | ||
| 843 | #define RX_EV_TCP_UDP_CHKSUM_ERR_LBN 51 | ||
| 844 | #define RX_EV_TCP_UDP_CHKSUM_ERR_WIDTH 1 | ||
| 845 | #define RX_EV_ETH_CRC_ERR_LBN 50 | ||
| 846 | #define RX_EV_ETH_CRC_ERR_WIDTH 1 | ||
| 847 | #define RX_EV_FRM_TRUNC_LBN 49 | ||
| 848 | #define RX_EV_FRM_TRUNC_WIDTH 1 | ||
| 849 | #define RX_EV_DRIB_NIB_LBN 48 | ||
| 850 | #define RX_EV_DRIB_NIB_WIDTH 1 | ||
| 851 | #define RX_EV_TOBE_DISC_LBN 47 | ||
| 852 | #define RX_EV_TOBE_DISC_WIDTH 1 | ||
| 853 | #define RX_EV_PKT_TYPE_LBN 44 | ||
| 854 | #define RX_EV_PKT_TYPE_WIDTH 3 | ||
| 855 | #define RX_EV_PKT_TYPE_ETH_DECODE 0 | ||
| 856 | #define RX_EV_PKT_TYPE_LLC_DECODE 1 | ||
| 857 | #define RX_EV_PKT_TYPE_JUMBO_DECODE 2 | ||
| 858 | #define RX_EV_PKT_TYPE_VLAN_DECODE 3 | ||
| 859 | #define RX_EV_PKT_TYPE_VLAN_LLC_DECODE 4 | ||
| 860 | #define RX_EV_PKT_TYPE_VLAN_JUMBO_DECODE 5 | ||
| 861 | #define RX_EV_HDR_TYPE_LBN 42 | ||
| 862 | #define RX_EV_HDR_TYPE_WIDTH 2 | ||
| 863 | #define RX_EV_HDR_TYPE_TCP_IPV4_DECODE 0 | ||
| 864 | #define RX_EV_HDR_TYPE_UDP_IPV4_DECODE 1 | ||
| 865 | #define RX_EV_HDR_TYPE_OTHER_IP_DECODE 2 | ||
| 866 | #define RX_EV_HDR_TYPE_NON_IP_DECODE 3 | ||
| 867 | #define RX_EV_HDR_TYPE_HAS_CHECKSUMS(hdr_type) \ | ||
| 868 | ((hdr_type) <= RX_EV_HDR_TYPE_UDP_IPV4_DECODE) | ||
| 869 | #define RX_EV_MCAST_HASH_MATCH_LBN 40 | ||
| 870 | #define RX_EV_MCAST_HASH_MATCH_WIDTH 1 | ||
| 871 | #define RX_EV_MCAST_PKT_LBN 39 | ||
| 872 | #define RX_EV_MCAST_PKT_WIDTH 1 | ||
| 873 | #define RX_EV_Q_LABEL_LBN 32 | ||
| 874 | #define RX_EV_Q_LABEL_WIDTH 5 | ||
| 875 | #define RX_EV_JUMBO_CONT_LBN 31 | ||
| 876 | #define RX_EV_JUMBO_CONT_WIDTH 1 | ||
| 877 | #define RX_EV_BYTE_CNT_LBN 16 | ||
| 878 | #define RX_EV_BYTE_CNT_WIDTH 14 | ||
| 879 | #define RX_EV_SOP_LBN 15 | ||
| 880 | #define RX_EV_SOP_WIDTH 1 | ||
| 881 | #define RX_EV_DESC_PTR_LBN 0 | ||
| 882 | #define RX_EV_DESC_PTR_WIDTH 12 | ||
| 883 | |||
| 884 | /* Transmit events */ | ||
| 885 | #define TX_EV_PKT_ERR_LBN 38 | ||
| 886 | #define TX_EV_PKT_ERR_WIDTH 1 | ||
| 887 | #define TX_EV_Q_LABEL_LBN 32 | ||
| 888 | #define TX_EV_Q_LABEL_WIDTH 5 | ||
| 889 | #define TX_EV_WQ_FF_FULL_LBN 15 | ||
| 890 | #define TX_EV_WQ_FF_FULL_WIDTH 1 | ||
| 891 | #define TX_EV_COMP_LBN 12 | ||
| 892 | #define TX_EV_COMP_WIDTH 1 | ||
| 893 | #define TX_EV_DESC_PTR_LBN 0 | ||
| 894 | #define TX_EV_DESC_PTR_WIDTH 12 | ||
| 895 | |||
| 896 | /* Driver events */ | ||
| 897 | #define DRIVER_EV_SUB_CODE_LBN 56 | ||
| 898 | #define DRIVER_EV_SUB_CODE_WIDTH 4 | ||
| 899 | #define DRIVER_EV_SUB_DATA_LBN 0 | ||
| 900 | #define DRIVER_EV_SUB_DATA_WIDTH 14 | ||
| 901 | #define TX_DESCQ_FLS_DONE_EV_DECODE 0 | ||
| 902 | #define RX_DESCQ_FLS_DONE_EV_DECODE 1 | ||
| 903 | #define EVQ_INIT_DONE_EV_DECODE 2 | ||
| 904 | #define EVQ_NOT_EN_EV_DECODE 3 | ||
| 905 | #define RX_DESCQ_FLSFF_OVFL_EV_DECODE 4 | ||
| 906 | #define SRM_UPD_DONE_EV_DECODE 5 | ||
| 907 | #define WAKE_UP_EV_DECODE 6 | ||
| 908 | #define TX_PKT_NON_TCP_UDP_DECODE 9 | ||
| 909 | #define TIMER_EV_DECODE 10 | ||
| 910 | #define RX_RECOVERY_EV_DECODE 11 | ||
| 911 | #define RX_DSC_ERROR_EV_DECODE 14 | ||
| 912 | #define TX_DSC_ERROR_EV_DECODE 15 | ||
| 913 | #define DRIVER_EV_TX_DESCQ_ID_LBN 0 | ||
| 914 | #define DRIVER_EV_TX_DESCQ_ID_WIDTH 12 | ||
| 915 | #define DRIVER_EV_RX_FLUSH_FAIL_LBN 12 | ||
| 916 | #define DRIVER_EV_RX_FLUSH_FAIL_WIDTH 1 | ||
| 917 | #define DRIVER_EV_RX_DESCQ_ID_LBN 0 | ||
| 918 | #define DRIVER_EV_RX_DESCQ_ID_WIDTH 12 | ||
| 919 | #define SRM_CLR_EV_DECODE 0 | ||
| 920 | #define SRM_UPD_EV_DECODE 1 | ||
| 921 | #define SRM_ILLCLR_EV_DECODE 2 | ||
| 922 | |||
| 923 | /* Global events */ | ||
| 924 | #define RX_RECOVERY_B0_LBN 12 | ||
| 925 | #define RX_RECOVERY_B0_WIDTH 1 | ||
| 926 | #define XG_MNT_INTR_B0_LBN 11 | ||
| 927 | #define XG_MNT_INTR_B0_WIDTH 1 | ||
| 928 | #define RX_RECOVERY_A1_LBN 11 | ||
| 929 | #define RX_RECOVERY_A1_WIDTH 1 | ||
| 930 | #define XG_PHY_INTR_LBN 9 | ||
| 931 | #define XG_PHY_INTR_WIDTH 1 | ||
| 932 | #define G_PHY1_INTR_LBN 8 | ||
| 933 | #define G_PHY1_INTR_WIDTH 1 | ||
| 934 | #define G_PHY0_INTR_LBN 7 | ||
| 935 | #define G_PHY0_INTR_WIDTH 1 | ||
| 936 | |||
| 937 | /* Driver-generated test events */ | ||
| 938 | #define EVQ_MAGIC_LBN 0 | ||
| 939 | #define EVQ_MAGIC_WIDTH 32 | ||
| 940 | |||
| 941 | /************************************************************************** | ||
| 942 | * | ||
| 943 | * Falcon MAC stats | ||
| 944 | * | ||
| 945 | ************************************************************************** | ||
| 946 | * | ||
| 947 | */ | ||
| 948 | #define GRxGoodOct_offset 0x0 | ||
| 949 | #define GRxBadOct_offset 0x8 | ||
| 950 | #define GRxMissPkt_offset 0x10 | ||
| 951 | #define GRxFalseCRS_offset 0x14 | ||
| 952 | #define GRxPausePkt_offset 0x18 | ||
| 953 | #define GRxBadPkt_offset 0x1C | ||
| 954 | #define GRxUcastPkt_offset 0x20 | ||
| 955 | #define GRxMcastPkt_offset 0x24 | ||
| 956 | #define GRxBcastPkt_offset 0x28 | ||
| 957 | #define GRxGoodLt64Pkt_offset 0x2C | ||
| 958 | #define GRxBadLt64Pkt_offset 0x30 | ||
| 959 | #define GRx64Pkt_offset 0x34 | ||
| 960 | #define GRx65to127Pkt_offset 0x38 | ||
| 961 | #define GRx128to255Pkt_offset 0x3C | ||
| 962 | #define GRx256to511Pkt_offset 0x40 | ||
| 963 | #define GRx512to1023Pkt_offset 0x44 | ||
| 964 | #define GRx1024to15xxPkt_offset 0x48 | ||
| 965 | #define GRx15xxtoJumboPkt_offset 0x4C | ||
| 966 | #define GRxGtJumboPkt_offset 0x50 | ||
| 967 | #define GRxFcsErr64to15xxPkt_offset 0x54 | ||
| 968 | #define GRxFcsErr15xxtoJumboPkt_offset 0x58 | ||
| 969 | #define GRxFcsErrGtJumboPkt_offset 0x5C | ||
| 970 | #define GTxGoodBadOct_offset 0x80 | ||
| 971 | #define GTxGoodOct_offset 0x88 | ||
| 972 | #define GTxSglColPkt_offset 0x90 | ||
| 973 | #define GTxMultColPkt_offset 0x94 | ||
| 974 | #define GTxExColPkt_offset 0x98 | ||
| 975 | #define GTxDefPkt_offset 0x9C | ||
| 976 | #define GTxLateCol_offset 0xA0 | ||
| 977 | #define GTxExDefPkt_offset 0xA4 | ||
| 978 | #define GTxPausePkt_offset 0xA8 | ||
| 979 | #define GTxBadPkt_offset 0xAC | ||
| 980 | #define GTxUcastPkt_offset 0xB0 | ||
| 981 | #define GTxMcastPkt_offset 0xB4 | ||
| 982 | #define GTxBcastPkt_offset 0xB8 | ||
| 983 | #define GTxLt64Pkt_offset 0xBC | ||
| 984 | #define GTx64Pkt_offset 0xC0 | ||
| 985 | #define GTx65to127Pkt_offset 0xC4 | ||
| 986 | #define GTx128to255Pkt_offset 0xC8 | ||
| 987 | #define GTx256to511Pkt_offset 0xCC | ||
| 988 | #define GTx512to1023Pkt_offset 0xD0 | ||
| 989 | #define GTx1024to15xxPkt_offset 0xD4 | ||
| 990 | #define GTx15xxtoJumboPkt_offset 0xD8 | ||
| 991 | #define GTxGtJumboPkt_offset 0xDC | ||
| 992 | #define GTxNonTcpUdpPkt_offset 0xE0 | ||
| 993 | #define GTxMacSrcErrPkt_offset 0xE4 | ||
| 994 | #define GTxIpSrcErrPkt_offset 0xE8 | ||
| 995 | #define GDmaDone_offset 0xEC | ||
| 996 | |||
| 997 | #define XgRxOctets_offset 0x0 | ||
| 998 | #define XgRxOctets_WIDTH 48 | ||
| 999 | #define XgRxOctetsOK_offset 0x8 | ||
| 1000 | #define XgRxOctetsOK_WIDTH 48 | ||
| 1001 | #define XgRxPkts_offset 0x10 | ||
| 1002 | #define XgRxPkts_WIDTH 32 | ||
| 1003 | #define XgRxPktsOK_offset 0x14 | ||
| 1004 | #define XgRxPktsOK_WIDTH 32 | ||
| 1005 | #define XgRxBroadcastPkts_offset 0x18 | ||
| 1006 | #define XgRxBroadcastPkts_WIDTH 32 | ||
| 1007 | #define XgRxMulticastPkts_offset 0x1C | ||
| 1008 | #define XgRxMulticastPkts_WIDTH 32 | ||
| 1009 | #define XgRxUnicastPkts_offset 0x20 | ||
| 1010 | #define XgRxUnicastPkts_WIDTH 32 | ||
| 1011 | #define XgRxUndersizePkts_offset 0x24 | ||
| 1012 | #define XgRxUndersizePkts_WIDTH 32 | ||
| 1013 | #define XgRxOversizePkts_offset 0x28 | ||
| 1014 | #define XgRxOversizePkts_WIDTH 32 | ||
| 1015 | #define XgRxJabberPkts_offset 0x2C | ||
| 1016 | #define XgRxJabberPkts_WIDTH 32 | ||
| 1017 | #define XgRxUndersizeFCSerrorPkts_offset 0x30 | ||
| 1018 | #define XgRxUndersizeFCSerrorPkts_WIDTH 32 | ||
| 1019 | #define XgRxDropEvents_offset 0x34 | ||
| 1020 | #define XgRxDropEvents_WIDTH 32 | ||
| 1021 | #define XgRxFCSerrorPkts_offset 0x38 | ||
| 1022 | #define XgRxFCSerrorPkts_WIDTH 32 | ||
| 1023 | #define XgRxAlignError_offset 0x3C | ||
| 1024 | #define XgRxAlignError_WIDTH 32 | ||
| 1025 | #define XgRxSymbolError_offset 0x40 | ||
| 1026 | #define XgRxSymbolError_WIDTH 32 | ||
| 1027 | #define XgRxInternalMACError_offset 0x44 | ||
| 1028 | #define XgRxInternalMACError_WIDTH 32 | ||
| 1029 | #define XgRxControlPkts_offset 0x48 | ||
| 1030 | #define XgRxControlPkts_WIDTH 32 | ||
| 1031 | #define XgRxPausePkts_offset 0x4C | ||
| 1032 | #define XgRxPausePkts_WIDTH 32 | ||
| 1033 | #define XgRxPkts64Octets_offset 0x50 | ||
| 1034 | #define XgRxPkts64Octets_WIDTH 32 | ||
| 1035 | #define XgRxPkts65to127Octets_offset 0x54 | ||
| 1036 | #define XgRxPkts65to127Octets_WIDTH 32 | ||
| 1037 | #define XgRxPkts128to255Octets_offset 0x58 | ||
| 1038 | #define XgRxPkts128to255Octets_WIDTH 32 | ||
| 1039 | #define XgRxPkts256to511Octets_offset 0x5C | ||
| 1040 | #define XgRxPkts256to511Octets_WIDTH 32 | ||
| 1041 | #define XgRxPkts512to1023Octets_offset 0x60 | ||
| 1042 | #define XgRxPkts512to1023Octets_WIDTH 32 | ||
| 1043 | #define XgRxPkts1024to15xxOctets_offset 0x64 | ||
| 1044 | #define XgRxPkts1024to15xxOctets_WIDTH 32 | ||
| 1045 | #define XgRxPkts15xxtoMaxOctets_offset 0x68 | ||
| 1046 | #define XgRxPkts15xxtoMaxOctets_WIDTH 32 | ||
| 1047 | #define XgRxLengthError_offset 0x6C | ||
| 1048 | #define XgRxLengthError_WIDTH 32 | ||
| 1049 | #define XgTxPkts_offset 0x80 | ||
| 1050 | #define XgTxPkts_WIDTH 32 | ||
| 1051 | #define XgTxOctets_offset 0x88 | ||
| 1052 | #define XgTxOctets_WIDTH 48 | ||
| 1053 | #define XgTxMulticastPkts_offset 0x90 | ||
| 1054 | #define XgTxMulticastPkts_WIDTH 32 | ||
| 1055 | #define XgTxBroadcastPkts_offset 0x94 | ||
| 1056 | #define XgTxBroadcastPkts_WIDTH 32 | ||
| 1057 | #define XgTxUnicastPkts_offset 0x98 | ||
| 1058 | #define XgTxUnicastPkts_WIDTH 32 | ||
| 1059 | #define XgTxControlPkts_offset 0x9C | ||
| 1060 | #define XgTxControlPkts_WIDTH 32 | ||
| 1061 | #define XgTxPausePkts_offset 0xA0 | ||
| 1062 | #define XgTxPausePkts_WIDTH 32 | ||
| 1063 | #define XgTxPkts64Octets_offset 0xA4 | ||
| 1064 | #define XgTxPkts64Octets_WIDTH 32 | ||
| 1065 | #define XgTxPkts65to127Octets_offset 0xA8 | ||
| 1066 | #define XgTxPkts65to127Octets_WIDTH 32 | ||
| 1067 | #define XgTxPkts128to255Octets_offset 0xAC | ||
| 1068 | #define XgTxPkts128to255Octets_WIDTH 32 | ||
| 1069 | #define XgTxPkts256to511Octets_offset 0xB0 | ||
| 1070 | #define XgTxPkts256to511Octets_WIDTH 32 | ||
| 1071 | #define XgTxPkts512to1023Octets_offset 0xB4 | ||
| 1072 | #define XgTxPkts512to1023Octets_WIDTH 32 | ||
| 1073 | #define XgTxPkts1024to15xxOctets_offset 0xB8 | ||
| 1074 | #define XgTxPkts1024to15xxOctets_WIDTH 32 | ||
| 1075 | #define XgTxPkts1519toMaxOctets_offset 0xBC | ||
| 1076 | #define XgTxPkts1519toMaxOctets_WIDTH 32 | ||
| 1077 | #define XgTxUndersizePkts_offset 0xC0 | ||
| 1078 | #define XgTxUndersizePkts_WIDTH 32 | ||
| 1079 | #define XgTxOversizePkts_offset 0xC4 | ||
| 1080 | #define XgTxOversizePkts_WIDTH 32 | ||
| 1081 | #define XgTxNonTcpUdpPkt_offset 0xC8 | ||
| 1082 | #define XgTxNonTcpUdpPkt_WIDTH 16 | ||
| 1083 | #define XgTxMacSrcErrPkt_offset 0xCC | ||
| 1084 | #define XgTxMacSrcErrPkt_WIDTH 16 | ||
| 1085 | #define XgTxIpSrcErrPkt_offset 0xD0 | ||
| 1086 | #define XgTxIpSrcErrPkt_WIDTH 16 | ||
| 1087 | #define XgDmaDone_offset 0xD4 | ||
| 1088 | |||
| 1089 | #define FALCON_STATS_NOT_DONE 0x00000000 | ||
| 1090 | #define FALCON_STATS_DONE 0xffffffff | ||
| 1091 | |||
| 1092 | /* Interrupt status register bits */ | ||
| 1093 | #define FATAL_INT_LBN 64 | ||
| 1094 | #define FATAL_INT_WIDTH 1 | ||
| 1095 | #define INT_EVQS_LBN 40 | ||
| 1096 | #define INT_EVQS_WIDTH 4 | ||
| 1097 | |||
| 1098 | /************************************************************************** | ||
| 1099 | * | ||
| 1100 | * Falcon non-volatile configuration | ||
| 1101 | * | ||
| 1102 | ************************************************************************** | ||
| 1103 | */ | ||
| 1104 | |||
| 1105 | /* Board configuration v2 (v1 is obsolete; later versions are compatible) */ | ||
| 1106 | struct falcon_nvconfig_board_v2 { | ||
| 1107 | __le16 nports; | ||
| 1108 | u8 port0_phy_addr; | ||
| 1109 | u8 port0_phy_type; | ||
| 1110 | u8 port1_phy_addr; | ||
| 1111 | u8 port1_phy_type; | ||
| 1112 | __le16 asic_sub_revision; | ||
| 1113 | __le16 board_revision; | ||
| 1114 | } __attribute__ ((packed)); | ||
| 1115 | |||
| 1116 | #define NVCONFIG_BASE 0x300 | ||
| 1117 | #define NVCONFIG_BOARD_MAGIC_NUM 0xFA1C | ||
| 1118 | struct falcon_nvconfig { | ||
| 1119 | efx_oword_t ee_vpd_cfg_reg; /* 0x300 */ | ||
| 1120 | u8 mac_address[2][8]; /* 0x310 */ | ||
| 1121 | efx_oword_t pcie_sd_ctl0123_reg; /* 0x320 */ | ||
| 1122 | efx_oword_t pcie_sd_ctl45_reg; /* 0x330 */ | ||
| 1123 | efx_oword_t pcie_pcs_ctl_stat_reg; /* 0x340 */ | ||
| 1124 | efx_oword_t hw_init_reg; /* 0x350 */ | ||
| 1125 | efx_oword_t nic_stat_reg; /* 0x360 */ | ||
| 1126 | efx_oword_t glb_ctl_reg; /* 0x370 */ | ||
| 1127 | efx_oword_t srm_cfg_reg; /* 0x380 */ | ||
| 1128 | efx_oword_t spare_reg; /* 0x390 */ | ||
| 1129 | __le16 board_magic_num; /* 0x3A0 */ | ||
| 1130 | __le16 board_struct_ver; | ||
| 1131 | __le16 board_checksum; | ||
| 1132 | struct falcon_nvconfig_board_v2 board_v2; | ||
| 1133 | } __attribute__ ((packed)); | ||
| 1134 | |||
| 1135 | #endif /* EFX_FALCON_HWDEFS_H */ | ||
diff --git a/drivers/net/sfc/falcon_io.h b/drivers/net/sfc/falcon_io.h new file mode 100644 index 000000000000..ea08184ddfa9 --- /dev/null +++ b/drivers/net/sfc/falcon_io.h | |||
| @@ -0,0 +1,243 @@ | |||
| 1 | /**************************************************************************** | ||
| 2 | * Driver for Solarflare Solarstorm network controllers and boards | ||
| 3 | * Copyright 2005-2006 Fen Systems Ltd. | ||
| 4 | * Copyright 2006-2008 Solarflare Communications Inc. | ||
| 5 | * | ||
| 6 | * This program is free software; you can redistribute it and/or modify it | ||
| 7 | * under the terms of the GNU General Public License version 2 as published | ||
| 8 | * by the Free Software Foundation, incorporated herein by reference. | ||
| 9 | */ | ||
| 10 | |||
| 11 | #ifndef EFX_FALCON_IO_H | ||
| 12 | #define EFX_FALCON_IO_H | ||
| 13 | |||
| 14 | #include <linux/io.h> | ||
| 15 | #include <linux/spinlock.h> | ||
| 16 | #include "net_driver.h" | ||
| 17 | |||
| 18 | /************************************************************************** | ||
| 19 | * | ||
| 20 | * Falcon hardware access | ||
| 21 | * | ||
| 22 | ************************************************************************** | ||
| 23 | * | ||
| 24 | * Notes on locking strategy: | ||
| 25 | * | ||
| 26 | * Most Falcon registers require 16-byte (or 8-byte, for SRAM | ||
| 27 | * registers) atomic writes which necessitates locking. | ||
| 28 | * Under normal operation few writes to the Falcon BAR are made and these | ||
| 29 | * registers (EVQ_RPTR_REG, RX_DESC_UPD_REG and TX_DESC_UPD_REG) are special | ||
| 30 | * cased to allow 4-byte (hence lockless) accesses. | ||
| 31 | * | ||
| 32 | * It *is* safe to write to these 4-byte registers in the middle of an | ||
| 33 | * access to an 8-byte or 16-byte register. We therefore use a | ||
| 34 | * spinlock to protect accesses to the larger registers, but no locks | ||
| 35 | * for the 4-byte registers. | ||
| 36 | * | ||
| 37 | * A write barrier is needed to ensure that DW3 is written after DW0/1/2 | ||
| 38 | * due to the way the 16byte registers are "collected" in the Falcon BIU | ||
| 39 | * | ||
| 40 | * We also lock when carrying out reads, to ensure consistency of the | ||
| 41 | * data (made possible since the BIU reads all 128 bits into a cache). | ||
| 42 | * Reads are very rare, so this isn't a significant performance | ||
| 43 | * impact. (Most data transferred from NIC to host is DMAed directly | ||
| 44 | * into host memory). | ||
| 45 | * | ||
| 46 | * I/O BAR access uses locks for both reads and writes (but is only provided | ||
| 47 | * for testing purposes). | ||
| 48 | */ | ||
| 49 | |||
| 50 | /* Special buffer descriptors (Falcon SRAM) */ | ||
| 51 | #define BUF_TBL_KER_A1 0x18000 | ||
| 52 | #define BUF_TBL_KER_B0 0x800000 | ||
| 53 | |||
| 54 | |||
| 55 | #if BITS_PER_LONG == 64 | ||
| 56 | #define FALCON_USE_QWORD_IO 1 | ||
| 57 | #endif | ||
| 58 | |||
| 59 | #define _falcon_writeq(efx, value, reg) \ | ||
| 60 | __raw_writeq((__force u64) (value), (efx)->membase + (reg)) | ||
| 61 | #define _falcon_writel(efx, value, reg) \ | ||
| 62 | __raw_writel((__force u32) (value), (efx)->membase + (reg)) | ||
| 63 | #define _falcon_readq(efx, reg) \ | ||
| 64 | ((__force __le64) __raw_readq((efx)->membase + (reg))) | ||
| 65 | #define _falcon_readl(efx, reg) \ | ||
| 66 | ((__force __le32) __raw_readl((efx)->membase + (reg))) | ||
| 67 | |||
| 68 | /* Writes to a normal 16-byte Falcon register, locking as appropriate. */ | ||
| 69 | static inline void falcon_write(struct efx_nic *efx, efx_oword_t *value, | ||
| 70 | unsigned int reg) | ||
| 71 | { | ||
| 72 | unsigned long flags; | ||
| 73 | |||
| 74 | EFX_REGDUMP(efx, "writing register %x with " EFX_OWORD_FMT "\n", reg, | ||
| 75 | EFX_OWORD_VAL(*value)); | ||
| 76 | |||
| 77 | spin_lock_irqsave(&efx->biu_lock, flags); | ||
| 78 | #ifdef FALCON_USE_QWORD_IO | ||
| 79 | _falcon_writeq(efx, value->u64[0], reg + 0); | ||
| 80 | wmb(); | ||
| 81 | _falcon_writeq(efx, value->u64[1], reg + 8); | ||
| 82 | #else | ||
| 83 | _falcon_writel(efx, value->u32[0], reg + 0); | ||
| 84 | _falcon_writel(efx, value->u32[1], reg + 4); | ||
| 85 | _falcon_writel(efx, value->u32[2], reg + 8); | ||
| 86 | wmb(); | ||
| 87 | _falcon_writel(efx, value->u32[3], reg + 12); | ||
| 88 | #endif | ||
| 89 | mmiowb(); | ||
| 90 | spin_unlock_irqrestore(&efx->biu_lock, flags); | ||
| 91 | } | ||
| 92 | |||
| 93 | /* Writes to an 8-byte Falcon SRAM register, locking as appropriate. */ | ||
| 94 | static inline void falcon_write_sram(struct efx_nic *efx, efx_qword_t *value, | ||
| 95 | unsigned int index) | ||
| 96 | { | ||
| 97 | unsigned int reg = efx->type->buf_tbl_base + (index * sizeof(*value)); | ||
| 98 | unsigned long flags; | ||
| 99 | |||
| 100 | EFX_REGDUMP(efx, "writing SRAM register %x with " EFX_QWORD_FMT "\n", | ||
| 101 | reg, EFX_QWORD_VAL(*value)); | ||
| 102 | |||
| 103 | spin_lock_irqsave(&efx->biu_lock, flags); | ||
| 104 | #ifdef FALCON_USE_QWORD_IO | ||
| 105 | _falcon_writeq(efx, value->u64[0], reg + 0); | ||
| 106 | #else | ||
| 107 | _falcon_writel(efx, value->u32[0], reg + 0); | ||
| 108 | wmb(); | ||
| 109 | _falcon_writel(efx, value->u32[1], reg + 4); | ||
| 110 | #endif | ||
| 111 | mmiowb(); | ||
| 112 | spin_unlock_irqrestore(&efx->biu_lock, flags); | ||
| 113 | } | ||
| 114 | |||
| 115 | /* Write dword to Falcon register that allows partial writes | ||
| 116 | * | ||
| 117 | * Some Falcon registers (EVQ_RPTR_REG, RX_DESC_UPD_REG and | ||
| 118 | * TX_DESC_UPD_REG) can be written to as a single dword. This allows | ||
| 119 | * for lockless writes. | ||
| 120 | */ | ||
| 121 | static inline void falcon_writel(struct efx_nic *efx, efx_dword_t *value, | ||
| 122 | unsigned int reg) | ||
| 123 | { | ||
| 124 | EFX_REGDUMP(efx, "writing partial register %x with "EFX_DWORD_FMT"\n", | ||
| 125 | reg, EFX_DWORD_VAL(*value)); | ||
| 126 | |||
| 127 | /* No lock required */ | ||
| 128 | _falcon_writel(efx, value->u32[0], reg); | ||
| 129 | } | ||
| 130 | |||
| 131 | /* Read from a Falcon register | ||
| 132 | * | ||
| 133 | * This reads an entire 16-byte Falcon register in one go, locking as | ||
| 134 | * appropriate. It is essential to read the first dword first, as this | ||
| 135 | * prompts Falcon to load the current value into the shadow register. | ||
| 136 | */ | ||
| 137 | static inline void falcon_read(struct efx_nic *efx, efx_oword_t *value, | ||
| 138 | unsigned int reg) | ||
| 139 | { | ||
| 140 | unsigned long flags; | ||
| 141 | |||
| 142 | spin_lock_irqsave(&efx->biu_lock, flags); | ||
| 143 | value->u32[0] = _falcon_readl(efx, reg + 0); | ||
| 144 | rmb(); | ||
| 145 | value->u32[1] = _falcon_readl(efx, reg + 4); | ||
| 146 | value->u32[2] = _falcon_readl(efx, reg + 8); | ||
| 147 | value->u32[3] = _falcon_readl(efx, reg + 12); | ||
| 148 | spin_unlock_irqrestore(&efx->biu_lock, flags); | ||
| 149 | |||
| 150 | EFX_REGDUMP(efx, "read from register %x, got " EFX_OWORD_FMT "\n", reg, | ||
| 151 | EFX_OWORD_VAL(*value)); | ||
| 152 | } | ||
| 153 | |||
| 154 | /* This reads an 8-byte Falcon SRAM entry in one go. */ | ||
| 155 | static inline void falcon_read_sram(struct efx_nic *efx, efx_qword_t *value, | ||
| 156 | unsigned int index) | ||
| 157 | { | ||
| 158 | unsigned int reg = efx->type->buf_tbl_base + (index * sizeof(*value)); | ||
| 159 | unsigned long flags; | ||
| 160 | |||
| 161 | spin_lock_irqsave(&efx->biu_lock, flags); | ||
| 162 | #ifdef FALCON_USE_QWORD_IO | ||
| 163 | value->u64[0] = _falcon_readq(efx, reg + 0); | ||
| 164 | #else | ||
| 165 | value->u32[0] = _falcon_readl(efx, reg + 0); | ||
| 166 | rmb(); | ||
| 167 | value->u32[1] = _falcon_readl(efx, reg + 4); | ||
| 168 | #endif | ||
| 169 | spin_unlock_irqrestore(&efx->biu_lock, flags); | ||
| 170 | |||
| 171 | EFX_REGDUMP(efx, "read from SRAM register %x, got "EFX_QWORD_FMT"\n", | ||
| 172 | reg, EFX_QWORD_VAL(*value)); | ||
| 173 | } | ||
| 174 | |||
| 175 | /* Read dword from Falcon register that allows partial writes (sic) */ | ||
| 176 | static inline void falcon_readl(struct efx_nic *efx, efx_dword_t *value, | ||
| 177 | unsigned int reg) | ||
| 178 | { | ||
| 179 | value->u32[0] = _falcon_readl(efx, reg); | ||
| 180 | EFX_REGDUMP(efx, "read from register %x, got "EFX_DWORD_FMT"\n", | ||
| 181 | reg, EFX_DWORD_VAL(*value)); | ||
| 182 | } | ||
| 183 | |||
| 184 | /* Write to a register forming part of a table */ | ||
| 185 | static inline void falcon_write_table(struct efx_nic *efx, efx_oword_t *value, | ||
| 186 | unsigned int reg, unsigned int index) | ||
| 187 | { | ||
| 188 | falcon_write(efx, value, reg + index * sizeof(efx_oword_t)); | ||
| 189 | } | ||
| 190 | |||
| 191 | /* Read to a register forming part of a table */ | ||
| 192 | static inline void falcon_read_table(struct efx_nic *efx, efx_oword_t *value, | ||
| 193 | unsigned int reg, unsigned int index) | ||
| 194 | { | ||
| 195 | falcon_read(efx, value, reg + index * sizeof(efx_oword_t)); | ||
| 196 | } | ||
| 197 | |||
| 198 | /* Write to a dword register forming part of a table */ | ||
| 199 | static inline void falcon_writel_table(struct efx_nic *efx, efx_dword_t *value, | ||
| 200 | unsigned int reg, unsigned int index) | ||
| 201 | { | ||
| 202 | falcon_writel(efx, value, reg + index * sizeof(efx_oword_t)); | ||
| 203 | } | ||
| 204 | |||
| 205 | /* Page-mapped register block size */ | ||
| 206 | #define FALCON_PAGE_BLOCK_SIZE 0x2000 | ||
| 207 | |||
| 208 | /* Calculate offset to page-mapped register block */ | ||
| 209 | #define FALCON_PAGED_REG(page, reg) \ | ||
| 210 | ((page) * FALCON_PAGE_BLOCK_SIZE + (reg)) | ||
| 211 | |||
| 212 | /* As for falcon_write(), but for a page-mapped register. */ | ||
| 213 | static inline void falcon_write_page(struct efx_nic *efx, efx_oword_t *value, | ||
| 214 | unsigned int reg, unsigned int page) | ||
| 215 | { | ||
| 216 | falcon_write(efx, value, FALCON_PAGED_REG(page, reg)); | ||
| 217 | } | ||
| 218 | |||
| 219 | /* As for falcon_writel(), but for a page-mapped register. */ | ||
| 220 | static inline void falcon_writel_page(struct efx_nic *efx, efx_dword_t *value, | ||
| 221 | unsigned int reg, unsigned int page) | ||
| 222 | { | ||
| 223 | falcon_writel(efx, value, FALCON_PAGED_REG(page, reg)); | ||
| 224 | } | ||
| 225 | |||
| 226 | /* Write dword to Falcon page-mapped register with an extra lock. | ||
| 227 | * | ||
| 228 | * As for falcon_writel_page(), but for a register that suffers from | ||
| 229 | * SFC bug 3181. Take out a lock so the BIU collector cannot be | ||
| 230 | * confused. */ | ||
| 231 | static inline void falcon_writel_page_locked(struct efx_nic *efx, | ||
| 232 | efx_dword_t *value, | ||
| 233 | unsigned int reg, | ||
| 234 | unsigned int page) | ||
| 235 | { | ||
| 236 | unsigned long flags; | ||
| 237 | |||
| 238 | spin_lock_irqsave(&efx->biu_lock, flags); | ||
| 239 | falcon_writel(efx, value, FALCON_PAGED_REG(page, reg)); | ||
| 240 | spin_unlock_irqrestore(&efx->biu_lock, flags); | ||
| 241 | } | ||
| 242 | |||
| 243 | #endif /* EFX_FALCON_IO_H */ | ||
diff --git a/drivers/net/sfc/falcon_xmac.c b/drivers/net/sfc/falcon_xmac.c new file mode 100644 index 000000000000..aa7521b24a5d --- /dev/null +++ b/drivers/net/sfc/falcon_xmac.c | |||
| @@ -0,0 +1,585 @@ | |||
| 1 | /**************************************************************************** | ||
| 2 | * Driver for Solarflare Solarstorm network controllers and boards | ||
| 3 | * Copyright 2005-2006 Fen Systems Ltd. | ||
| 4 | * Copyright 2006-2008 Solarflare Communications Inc. | ||
| 5 | * | ||
| 6 | * This program is free software; you can redistribute it and/or modify it | ||
| 7 | * under the terms of the GNU General Public License version 2 as published | ||
| 8 | * by the Free Software Foundation, incorporated herein by reference. | ||
| 9 | */ | ||
| 10 | |||
| 11 | #include <linux/delay.h> | ||
| 12 | #include "net_driver.h" | ||
| 13 | #include "efx.h" | ||
| 14 | #include "falcon.h" | ||
| 15 | #include "falcon_hwdefs.h" | ||
| 16 | #include "falcon_io.h" | ||
| 17 | #include "mac.h" | ||
| 18 | #include "gmii.h" | ||
| 19 | #include "mdio_10g.h" | ||
| 20 | #include "phy.h" | ||
| 21 | #include "boards.h" | ||
| 22 | #include "workarounds.h" | ||
| 23 | |||
| 24 | /************************************************************************** | ||
| 25 | * | ||
| 26 | * MAC register access | ||
| 27 | * | ||
| 28 | **************************************************************************/ | ||
| 29 | |||
| 30 | /* Offset of an XMAC register within Falcon */ | ||
| 31 | #define FALCON_XMAC_REG(mac_reg) \ | ||
| 32 | (FALCON_XMAC_REGBANK + ((mac_reg) * FALCON_XMAC_REG_SIZE)) | ||
| 33 | |||
| 34 | void falcon_xmac_writel(struct efx_nic *efx, | ||
| 35 | efx_dword_t *value, unsigned int mac_reg) | ||
| 36 | { | ||
| 37 | efx_oword_t temp; | ||
| 38 | |||
| 39 | EFX_POPULATE_OWORD_1(temp, MAC_DATA, EFX_DWORD_FIELD(*value, MAC_DATA)); | ||
| 40 | falcon_write(efx, &temp, FALCON_XMAC_REG(mac_reg)); | ||
| 41 | } | ||
| 42 | |||
| 43 | void falcon_xmac_readl(struct efx_nic *efx, | ||
| 44 | efx_dword_t *value, unsigned int mac_reg) | ||
| 45 | { | ||
| 46 | efx_oword_t temp; | ||
| 47 | |||
| 48 | falcon_read(efx, &temp, FALCON_XMAC_REG(mac_reg)); | ||
| 49 | EFX_POPULATE_DWORD_1(*value, MAC_DATA, EFX_OWORD_FIELD(temp, MAC_DATA)); | ||
| 50 | } | ||
| 51 | |||
| 52 | /************************************************************************** | ||
| 53 | * | ||
| 54 | * MAC operations | ||
| 55 | * | ||
| 56 | *************************************************************************/ | ||
| 57 | static int falcon_reset_xmac(struct efx_nic *efx) | ||
| 58 | { | ||
| 59 | efx_dword_t reg; | ||
| 60 | int count; | ||
| 61 | |||
| 62 | EFX_POPULATE_DWORD_1(reg, XM_CORE_RST, 1); | ||
| 63 | falcon_xmac_writel(efx, ®, XM_GLB_CFG_REG_MAC); | ||
| 64 | |||
| 65 | for (count = 0; count < 10000; count++) { /* wait upto 100ms */ | ||
| 66 | falcon_xmac_readl(efx, ®, XM_GLB_CFG_REG_MAC); | ||
| 67 | if (EFX_DWORD_FIELD(reg, XM_CORE_RST) == 0) | ||
| 68 | return 0; | ||
| 69 | udelay(10); | ||
| 70 | } | ||
| 71 | |||
| 72 | EFX_ERR(efx, "timed out waiting for XMAC core reset\n"); | ||
| 73 | return -ETIMEDOUT; | ||
| 74 | } | ||
| 75 | |||
| 76 | /* Configure the XAUI driver that is an output from Falcon */ | ||
| 77 | static void falcon_setup_xaui(struct efx_nic *efx) | ||
| 78 | { | ||
| 79 | efx_dword_t sdctl, txdrv; | ||
| 80 | |||
| 81 | /* Move the XAUI into low power, unless there is no PHY, in | ||
| 82 | * which case the XAUI will have to drive a cable. */ | ||
| 83 | if (efx->phy_type == PHY_TYPE_NONE) | ||
| 84 | return; | ||
| 85 | |||
| 86 | falcon_xmac_readl(efx, &sdctl, XX_SD_CTL_REG_MAC); | ||
| 87 | EFX_SET_DWORD_FIELD(sdctl, XX_HIDRVD, XX_SD_CTL_DRV_DEFAULT); | ||
| 88 | EFX_SET_DWORD_FIELD(sdctl, XX_LODRVD, XX_SD_CTL_DRV_DEFAULT); | ||
| 89 | EFX_SET_DWORD_FIELD(sdctl, XX_HIDRVC, XX_SD_CTL_DRV_DEFAULT); | ||
| 90 | EFX_SET_DWORD_FIELD(sdctl, XX_LODRVC, XX_SD_CTL_DRV_DEFAULT); | ||
| 91 | EFX_SET_DWORD_FIELD(sdctl, XX_HIDRVB, XX_SD_CTL_DRV_DEFAULT); | ||
| 92 | EFX_SET_DWORD_FIELD(sdctl, XX_LODRVB, XX_SD_CTL_DRV_DEFAULT); | ||
| 93 | EFX_SET_DWORD_FIELD(sdctl, XX_HIDRVA, XX_SD_CTL_DRV_DEFAULT); | ||
| 94 | EFX_SET_DWORD_FIELD(sdctl, XX_LODRVA, XX_SD_CTL_DRV_DEFAULT); | ||
| 95 | falcon_xmac_writel(efx, &sdctl, XX_SD_CTL_REG_MAC); | ||
| 96 | |||
| 97 | EFX_POPULATE_DWORD_8(txdrv, | ||
| 98 | XX_DEQD, XX_TXDRV_DEQ_DEFAULT, | ||
| 99 | XX_DEQC, XX_TXDRV_DEQ_DEFAULT, | ||
| 100 | XX_DEQB, XX_TXDRV_DEQ_DEFAULT, | ||
| 101 | XX_DEQA, XX_TXDRV_DEQ_DEFAULT, | ||
| 102 | XX_DTXD, XX_TXDRV_DTX_DEFAULT, | ||
| 103 | XX_DTXC, XX_TXDRV_DTX_DEFAULT, | ||
| 104 | XX_DTXB, XX_TXDRV_DTX_DEFAULT, | ||
| 105 | XX_DTXA, XX_TXDRV_DTX_DEFAULT); | ||
| 106 | falcon_xmac_writel(efx, &txdrv, XX_TXDRV_CTL_REG_MAC); | ||
| 107 | } | ||
| 108 | |||
| 109 | static void falcon_hold_xaui_in_rst(struct efx_nic *efx) | ||
| 110 | { | ||
| 111 | efx_dword_t reg; | ||
| 112 | |||
| 113 | EFX_ZERO_DWORD(reg); | ||
| 114 | EFX_SET_DWORD_FIELD(reg, XX_PWRDNA_EN, 1); | ||
| 115 | EFX_SET_DWORD_FIELD(reg, XX_PWRDNB_EN, 1); | ||
| 116 | EFX_SET_DWORD_FIELD(reg, XX_PWRDNC_EN, 1); | ||
| 117 | EFX_SET_DWORD_FIELD(reg, XX_PWRDND_EN, 1); | ||
| 118 | EFX_SET_DWORD_FIELD(reg, XX_RSTPLLAB_EN, 1); | ||
| 119 | EFX_SET_DWORD_FIELD(reg, XX_RSTPLLCD_EN, 1); | ||
| 120 | EFX_SET_DWORD_FIELD(reg, XX_RESETA_EN, 1); | ||
| 121 | EFX_SET_DWORD_FIELD(reg, XX_RESETB_EN, 1); | ||
| 122 | EFX_SET_DWORD_FIELD(reg, XX_RESETC_EN, 1); | ||
| 123 | EFX_SET_DWORD_FIELD(reg, XX_RESETD_EN, 1); | ||
| 124 | EFX_SET_DWORD_FIELD(reg, XX_RSTXGXSRX_EN, 1); | ||
| 125 | EFX_SET_DWORD_FIELD(reg, XX_RSTXGXSTX_EN, 1); | ||
| 126 | falcon_xmac_writel(efx, ®, XX_PWR_RST_REG_MAC); | ||
| 127 | udelay(10); | ||
| 128 | } | ||
| 129 | |||
| 130 | static int _falcon_reset_xaui_a(struct efx_nic *efx) | ||
| 131 | { | ||
| 132 | efx_dword_t reg; | ||
| 133 | |||
| 134 | falcon_hold_xaui_in_rst(efx); | ||
| 135 | falcon_xmac_readl(efx, ®, XX_PWR_RST_REG_MAC); | ||
| 136 | |||
| 137 | /* Follow the RAMBUS XAUI data reset sequencing | ||
| 138 | * Channels A and B first: power down, reset PLL, reset, clear | ||
| 139 | */ | ||
| 140 | EFX_SET_DWORD_FIELD(reg, XX_PWRDNA_EN, 0); | ||
| 141 | EFX_SET_DWORD_FIELD(reg, XX_PWRDNB_EN, 0); | ||
| 142 | falcon_xmac_writel(efx, ®, XX_PWR_RST_REG_MAC); | ||
| 143 | udelay(10); | ||
| 144 | |||
| 145 | EFX_SET_DWORD_FIELD(reg, XX_RSTPLLAB_EN, 0); | ||
| 146 | falcon_xmac_writel(efx, ®, XX_PWR_RST_REG_MAC); | ||
| 147 | udelay(10); | ||
| 148 | |||
| 149 | EFX_SET_DWORD_FIELD(reg, XX_RESETA_EN, 0); | ||
| 150 | EFX_SET_DWORD_FIELD(reg, XX_RESETB_EN, 0); | ||
| 151 | falcon_xmac_writel(efx, ®, XX_PWR_RST_REG_MAC); | ||
| 152 | udelay(10); | ||
| 153 | |||
| 154 | /* Channels C and D: power down, reset PLL, reset, clear */ | ||
| 155 | EFX_SET_DWORD_FIELD(reg, XX_PWRDNC_EN, 0); | ||
| 156 | EFX_SET_DWORD_FIELD(reg, XX_PWRDND_EN, 0); | ||
| 157 | falcon_xmac_writel(efx, ®, XX_PWR_RST_REG_MAC); | ||
| 158 | udelay(10); | ||
| 159 | |||
| 160 | EFX_SET_DWORD_FIELD(reg, XX_RSTPLLCD_EN, 0); | ||
| 161 | falcon_xmac_writel(efx, ®, XX_PWR_RST_REG_MAC); | ||
| 162 | udelay(10); | ||
| 163 | |||
| 164 | EFX_SET_DWORD_FIELD(reg, XX_RESETC_EN, 0); | ||
| 165 | EFX_SET_DWORD_FIELD(reg, XX_RESETD_EN, 0); | ||
| 166 | falcon_xmac_writel(efx, ®, XX_PWR_RST_REG_MAC); | ||
| 167 | udelay(10); | ||
| 168 | |||
| 169 | /* Setup XAUI */ | ||
| 170 | falcon_setup_xaui(efx); | ||
| 171 | udelay(10); | ||
| 172 | |||
| 173 | /* Take XGXS out of reset */ | ||
| 174 | EFX_ZERO_DWORD(reg); | ||
| 175 | falcon_xmac_writel(efx, ®, XX_PWR_RST_REG_MAC); | ||
| 176 | udelay(10); | ||
| 177 | |||
| 178 | return 0; | ||
| 179 | } | ||
| 180 | |||
| 181 | static int _falcon_reset_xaui_b(struct efx_nic *efx) | ||
| 182 | { | ||
| 183 | efx_dword_t reg; | ||
| 184 | int count; | ||
| 185 | |||
| 186 | EFX_POPULATE_DWORD_1(reg, XX_RST_XX_EN, 1); | ||
| 187 | falcon_xmac_writel(efx, ®, XX_PWR_RST_REG_MAC); | ||
| 188 | |||
| 189 | /* Give some time for the link to establish */ | ||
| 190 | for (count = 0; count < 1000; count++) { /* wait upto 10ms */ | ||
| 191 | falcon_xmac_readl(efx, ®, XX_PWR_RST_REG_MAC); | ||
| 192 | if (EFX_DWORD_FIELD(reg, XX_RST_XX_EN) == 0) { | ||
| 193 | falcon_setup_xaui(efx); | ||
| 194 | return 0; | ||
| 195 | } | ||
| 196 | udelay(10); | ||
| 197 | } | ||
| 198 | EFX_ERR(efx, "timed out waiting for XAUI/XGXS reset\n"); | ||
| 199 | return -ETIMEDOUT; | ||
| 200 | } | ||
| 201 | |||
| 202 | int falcon_reset_xaui(struct efx_nic *efx) | ||
| 203 | { | ||
| 204 | int rc; | ||
| 205 | |||
| 206 | if (EFX_WORKAROUND_9388(efx)) { | ||
| 207 | falcon_hold_xaui_in_rst(efx); | ||
| 208 | efx->phy_op->reset_xaui(efx); | ||
| 209 | rc = _falcon_reset_xaui_a(efx); | ||
| 210 | } else { | ||
| 211 | rc = _falcon_reset_xaui_b(efx); | ||
| 212 | } | ||
| 213 | return rc; | ||
| 214 | } | ||
| 215 | |||
| 216 | static int falcon_xgmii_status(struct efx_nic *efx) | ||
| 217 | { | ||
| 218 | efx_dword_t reg; | ||
| 219 | |||
| 220 | if (FALCON_REV(efx) < FALCON_REV_B0) | ||
| 221 | return 1; | ||
| 222 | |||
| 223 | /* The ISR latches, so clear it and re-read */ | ||
| 224 | falcon_xmac_readl(efx, ®, XM_MGT_INT_REG_MAC_B0); | ||
| 225 | falcon_xmac_readl(efx, ®, XM_MGT_INT_REG_MAC_B0); | ||
| 226 | |||
| 227 | if (EFX_DWORD_FIELD(reg, XM_LCLFLT) || | ||
| 228 | EFX_DWORD_FIELD(reg, XM_RMTFLT)) { | ||
| 229 | EFX_INFO(efx, "MGT_INT: "EFX_DWORD_FMT"\n", EFX_DWORD_VAL(reg)); | ||
| 230 | return 0; | ||
| 231 | } | ||
| 232 | |||
| 233 | return 1; | ||
| 234 | } | ||
| 235 | |||
| 236 | static void falcon_mask_status_intr(struct efx_nic *efx, int enable) | ||
| 237 | { | ||
| 238 | efx_dword_t reg; | ||
| 239 | |||
| 240 | if (FALCON_REV(efx) < FALCON_REV_B0) | ||
| 241 | return; | ||
| 242 | |||
| 243 | /* Flush the ISR */ | ||
| 244 | if (enable) | ||
| 245 | falcon_xmac_readl(efx, ®, XM_MGT_INT_REG_MAC_B0); | ||
| 246 | |||
| 247 | EFX_POPULATE_DWORD_2(reg, | ||
| 248 | XM_MSK_RMTFLT, !enable, | ||
| 249 | XM_MSK_LCLFLT, !enable); | ||
| 250 | falcon_xmac_writel(efx, ®, XM_MGT_INT_MSK_REG_MAC_B0); | ||
| 251 | } | ||
| 252 | |||
| 253 | int falcon_init_xmac(struct efx_nic *efx) | ||
| 254 | { | ||
| 255 | int rc; | ||
| 256 | |||
| 257 | /* Initialize the PHY first so the clock is around */ | ||
| 258 | rc = efx->phy_op->init(efx); | ||
| 259 | if (rc) | ||
| 260 | goto fail1; | ||
| 261 | |||
| 262 | rc = falcon_reset_xaui(efx); | ||
| 263 | if (rc) | ||
| 264 | goto fail2; | ||
| 265 | |||
| 266 | /* Wait again. Give the PHY and MAC time to come back */ | ||
| 267 | schedule_timeout_uninterruptible(HZ / 10); | ||
| 268 | |||
| 269 | rc = falcon_reset_xmac(efx); | ||
| 270 | if (rc) | ||
| 271 | goto fail2; | ||
| 272 | |||
| 273 | falcon_mask_status_intr(efx, 1); | ||
| 274 | return 0; | ||
| 275 | |||
| 276 | fail2: | ||
| 277 | efx->phy_op->fini(efx); | ||
| 278 | fail1: | ||
| 279 | return rc; | ||
| 280 | } | ||
| 281 | |||
| 282 | int falcon_xaui_link_ok(struct efx_nic *efx) | ||
| 283 | { | ||
| 284 | efx_dword_t reg; | ||
| 285 | int align_done, sync_status, link_ok = 0; | ||
| 286 | |||
| 287 | /* Read link status */ | ||
| 288 | falcon_xmac_readl(efx, ®, XX_CORE_STAT_REG_MAC); | ||
| 289 | |||
| 290 | align_done = EFX_DWORD_FIELD(reg, XX_ALIGN_DONE); | ||
| 291 | sync_status = EFX_DWORD_FIELD(reg, XX_SYNC_STAT); | ||
| 292 | if (align_done && (sync_status == XX_SYNC_STAT_DECODE_SYNCED)) | ||
| 293 | link_ok = 1; | ||
| 294 | |||
| 295 | /* Clear link status ready for next read */ | ||
| 296 | EFX_SET_DWORD_FIELD(reg, XX_COMMA_DET, XX_COMMA_DET_RESET); | ||
| 297 | EFX_SET_DWORD_FIELD(reg, XX_CHARERR, XX_CHARERR_RESET); | ||
| 298 | EFX_SET_DWORD_FIELD(reg, XX_DISPERR, XX_DISPERR_RESET); | ||
| 299 | falcon_xmac_writel(efx, ®, XX_CORE_STAT_REG_MAC); | ||
| 300 | |||
| 301 | /* If the link is up, then check the phy side of the xaui link | ||
| 302 | * (error conditions from the wire side propoagate back through | ||
| 303 | * the phy to the xaui side). */ | ||
| 304 | if (efx->link_up && link_ok) { | ||
| 305 | int has_phyxs = efx->phy_op->mmds & (1 << MDIO_MMD_PHYXS); | ||
| 306 | if (has_phyxs) | ||
| 307 | link_ok = mdio_clause45_phyxgxs_lane_sync(efx); | ||
| 308 | } | ||
| 309 | |||
| 310 | /* If the PHY and XAUI links are up, then check the mac's xgmii | ||
| 311 | * fault state */ | ||
| 312 | if (efx->link_up && link_ok) | ||
| 313 | link_ok = falcon_xgmii_status(efx); | ||
| 314 | |||
| 315 | return link_ok; | ||
| 316 | } | ||
| 317 | |||
| 318 | static void falcon_reconfigure_xmac_core(struct efx_nic *efx) | ||
| 319 | { | ||
| 320 | unsigned int max_frame_len; | ||
| 321 | efx_dword_t reg; | ||
| 322 | int rx_fc = (efx->flow_control & EFX_FC_RX) ? 1 : 0; | ||
| 323 | |||
| 324 | /* Configure MAC - cut-thru mode is hard wired on */ | ||
| 325 | EFX_POPULATE_DWORD_3(reg, | ||
| 326 | XM_RX_JUMBO_MODE, 1, | ||
| 327 | XM_TX_STAT_EN, 1, | ||
| 328 | XM_RX_STAT_EN, 1); | ||
| 329 | falcon_xmac_writel(efx, ®, XM_GLB_CFG_REG_MAC); | ||
| 330 | |||
| 331 | /* Configure TX */ | ||
| 332 | EFX_POPULATE_DWORD_6(reg, | ||
| 333 | XM_TXEN, 1, | ||
| 334 | XM_TX_PRMBL, 1, | ||
| 335 | XM_AUTO_PAD, 1, | ||
| 336 | XM_TXCRC, 1, | ||
| 337 | XM_FCNTL, 1, | ||
| 338 | XM_IPG, 0x3); | ||
| 339 | falcon_xmac_writel(efx, ®, XM_TX_CFG_REG_MAC); | ||
| 340 | |||
| 341 | /* Configure RX */ | ||
| 342 | EFX_POPULATE_DWORD_5(reg, | ||
| 343 | XM_RXEN, 1, | ||
| 344 | XM_AUTO_DEPAD, 0, | ||
| 345 | XM_ACPT_ALL_MCAST, 1, | ||
| 346 | XM_ACPT_ALL_UCAST, efx->promiscuous, | ||
| 347 | XM_PASS_CRC_ERR, 1); | ||
| 348 | falcon_xmac_writel(efx, ®, XM_RX_CFG_REG_MAC); | ||
| 349 | |||
| 350 | /* Set frame length */ | ||
| 351 | max_frame_len = EFX_MAX_FRAME_LEN(efx->net_dev->mtu); | ||
| 352 | EFX_POPULATE_DWORD_1(reg, XM_MAX_RX_FRM_SIZE, max_frame_len); | ||
| 353 | falcon_xmac_writel(efx, ®, XM_RX_PARAM_REG_MAC); | ||
| 354 | EFX_POPULATE_DWORD_2(reg, | ||
| 355 | XM_MAX_TX_FRM_SIZE, max_frame_len, | ||
| 356 | XM_TX_JUMBO_MODE, 1); | ||
| 357 | falcon_xmac_writel(efx, ®, XM_TX_PARAM_REG_MAC); | ||
| 358 | |||
| 359 | EFX_POPULATE_DWORD_2(reg, | ||
| 360 | XM_PAUSE_TIME, 0xfffe, /* MAX PAUSE TIME */ | ||
| 361 | XM_DIS_FCNTL, rx_fc ? 0 : 1); | ||
| 362 | falcon_xmac_writel(efx, ®, XM_FC_REG_MAC); | ||
| 363 | |||
| 364 | /* Set MAC address */ | ||
| 365 | EFX_POPULATE_DWORD_4(reg, | ||
| 366 | XM_ADR_0, efx->net_dev->dev_addr[0], | ||
| 367 | XM_ADR_1, efx->net_dev->dev_addr[1], | ||
| 368 | XM_ADR_2, efx->net_dev->dev_addr[2], | ||
| 369 | XM_ADR_3, efx->net_dev->dev_addr[3]); | ||
| 370 | falcon_xmac_writel(efx, ®, XM_ADR_LO_REG_MAC); | ||
| 371 | EFX_POPULATE_DWORD_2(reg, | ||
| 372 | XM_ADR_4, efx->net_dev->dev_addr[4], | ||
| 373 | XM_ADR_5, efx->net_dev->dev_addr[5]); | ||
| 374 | falcon_xmac_writel(efx, ®, XM_ADR_HI_REG_MAC); | ||
| 375 | } | ||
| 376 | |||
| 377 | /* Try and bring the Falcon side of the Falcon-Phy XAUI link fails | ||
| 378 | * to come back up. Bash it until it comes back up */ | ||
| 379 | static int falcon_check_xaui_link_up(struct efx_nic *efx) | ||
| 380 | { | ||
| 381 | int max_tries, tries; | ||
| 382 | tries = EFX_WORKAROUND_5147(efx) ? 5 : 1; | ||
| 383 | max_tries = tries; | ||
| 384 | |||
| 385 | if (efx->phy_type == PHY_TYPE_NONE) | ||
| 386 | return 0; | ||
| 387 | |||
| 388 | while (tries) { | ||
| 389 | if (falcon_xaui_link_ok(efx)) | ||
| 390 | return 1; | ||
| 391 | |||
| 392 | EFX_LOG(efx, "%s Clobbering XAUI (%d tries left).\n", | ||
| 393 | __func__, tries); | ||
| 394 | (void) falcon_reset_xaui(efx); | ||
| 395 | udelay(200); | ||
| 396 | tries--; | ||
| 397 | } | ||
| 398 | |||
| 399 | EFX_ERR(efx, "Failed to bring XAUI link back up in %d tries!\n", | ||
| 400 | max_tries); | ||
| 401 | return 0; | ||
| 402 | } | ||
| 403 | |||
| 404 | void falcon_reconfigure_xmac(struct efx_nic *efx) | ||
| 405 | { | ||
| 406 | int xaui_link_ok; | ||
| 407 | |||
| 408 | falcon_mask_status_intr(efx, 0); | ||
| 409 | |||
| 410 | falcon_deconfigure_mac_wrapper(efx); | ||
| 411 | efx->phy_op->reconfigure(efx); | ||
| 412 | falcon_reconfigure_xmac_core(efx); | ||
| 413 | falcon_reconfigure_mac_wrapper(efx); | ||
| 414 | |||
| 415 | /* Ensure XAUI link is up */ | ||
| 416 | xaui_link_ok = falcon_check_xaui_link_up(efx); | ||
| 417 | |||
| 418 | if (xaui_link_ok && efx->link_up) | ||
| 419 | falcon_mask_status_intr(efx, 1); | ||
| 420 | } | ||
| 421 | |||
| 422 | void falcon_fini_xmac(struct efx_nic *efx) | ||
| 423 | { | ||
| 424 | /* Isolate the MAC - PHY */ | ||
| 425 | falcon_deconfigure_mac_wrapper(efx); | ||
| 426 | |||
| 427 | /* Potentially power down the PHY */ | ||
| 428 | efx->phy_op->fini(efx); | ||
| 429 | } | ||
| 430 | |||
| 431 | void falcon_update_stats_xmac(struct efx_nic *efx) | ||
| 432 | { | ||
| 433 | struct efx_mac_stats *mac_stats = &efx->mac_stats; | ||
| 434 | int rc; | ||
| 435 | |||
| 436 | rc = falcon_dma_stats(efx, XgDmaDone_offset); | ||
| 437 | if (rc) | ||
| 438 | return; | ||
| 439 | |||
| 440 | /* Update MAC stats from DMAed values */ | ||
| 441 | FALCON_STAT(efx, XgRxOctets, rx_bytes); | ||
| 442 | FALCON_STAT(efx, XgRxOctetsOK, rx_good_bytes); | ||
| 443 | FALCON_STAT(efx, XgRxPkts, rx_packets); | ||
| 444 | FALCON_STAT(efx, XgRxPktsOK, rx_good); | ||
| 445 | FALCON_STAT(efx, XgRxBroadcastPkts, rx_broadcast); | ||
| 446 | FALCON_STAT(efx, XgRxMulticastPkts, rx_multicast); | ||
| 447 | FALCON_STAT(efx, XgRxUnicastPkts, rx_unicast); | ||
| 448 | FALCON_STAT(efx, XgRxUndersizePkts, rx_lt64); | ||
| 449 | FALCON_STAT(efx, XgRxOversizePkts, rx_gtjumbo); | ||
| 450 | FALCON_STAT(efx, XgRxJabberPkts, rx_bad_gtjumbo); | ||
| 451 | FALCON_STAT(efx, XgRxUndersizeFCSerrorPkts, rx_bad_lt64); | ||
| 452 | FALCON_STAT(efx, XgRxDropEvents, rx_overflow); | ||
| 453 | FALCON_STAT(efx, XgRxFCSerrorPkts, rx_bad); | ||
| 454 | FALCON_STAT(efx, XgRxAlignError, rx_align_error); | ||
| 455 | FALCON_STAT(efx, XgRxSymbolError, rx_symbol_error); | ||
| 456 | FALCON_STAT(efx, XgRxInternalMACError, rx_internal_error); | ||
| 457 | FALCON_STAT(efx, XgRxControlPkts, rx_control); | ||
| 458 | FALCON_STAT(efx, XgRxPausePkts, rx_pause); | ||
| 459 | FALCON_STAT(efx, XgRxPkts64Octets, rx_64); | ||
| 460 | FALCON_STAT(efx, XgRxPkts65to127Octets, rx_65_to_127); | ||
| 461 | FALCON_STAT(efx, XgRxPkts128to255Octets, rx_128_to_255); | ||
| 462 | FALCON_STAT(efx, XgRxPkts256to511Octets, rx_256_to_511); | ||
| 463 | FALCON_STAT(efx, XgRxPkts512to1023Octets, rx_512_to_1023); | ||
| 464 | FALCON_STAT(efx, XgRxPkts1024to15xxOctets, rx_1024_to_15xx); | ||
| 465 | FALCON_STAT(efx, XgRxPkts15xxtoMaxOctets, rx_15xx_to_jumbo); | ||
| 466 | FALCON_STAT(efx, XgRxLengthError, rx_length_error); | ||
| 467 | FALCON_STAT(efx, XgTxPkts, tx_packets); | ||
| 468 | FALCON_STAT(efx, XgTxOctets, tx_bytes); | ||
| 469 | FALCON_STAT(efx, XgTxMulticastPkts, tx_multicast); | ||
| 470 | FALCON_STAT(efx, XgTxBroadcastPkts, tx_broadcast); | ||
| 471 | FALCON_STAT(efx, XgTxUnicastPkts, tx_unicast); | ||
| 472 | FALCON_STAT(efx, XgTxControlPkts, tx_control); | ||
| 473 | FALCON_STAT(efx, XgTxPausePkts, tx_pause); | ||
| 474 | FALCON_STAT(efx, XgTxPkts64Octets, tx_64); | ||
| 475 | FALCON_STAT(efx, XgTxPkts65to127Octets, tx_65_to_127); | ||
| 476 | FALCON_STAT(efx, XgTxPkts128to255Octets, tx_128_to_255); | ||
| 477 | FALCON_STAT(efx, XgTxPkts256to511Octets, tx_256_to_511); | ||
| 478 | FALCON_STAT(efx, XgTxPkts512to1023Octets, tx_512_to_1023); | ||
| 479 | FALCON_STAT(efx, XgTxPkts1024to15xxOctets, tx_1024_to_15xx); | ||
| 480 | FALCON_STAT(efx, XgTxPkts1519toMaxOctets, tx_15xx_to_jumbo); | ||
| 481 | FALCON_STAT(efx, XgTxUndersizePkts, tx_lt64); | ||
| 482 | FALCON_STAT(efx, XgTxOversizePkts, tx_gtjumbo); | ||
| 483 | FALCON_STAT(efx, XgTxNonTcpUdpPkt, tx_non_tcpudp); | ||
| 484 | FALCON_STAT(efx, XgTxMacSrcErrPkt, tx_mac_src_error); | ||
| 485 | FALCON_STAT(efx, XgTxIpSrcErrPkt, tx_ip_src_error); | ||
| 486 | |||
| 487 | /* Update derived statistics */ | ||
| 488 | mac_stats->tx_good_bytes = | ||
| 489 | (mac_stats->tx_bytes - mac_stats->tx_bad_bytes); | ||
| 490 | mac_stats->rx_bad_bytes = | ||
| 491 | (mac_stats->rx_bytes - mac_stats->rx_good_bytes); | ||
| 492 | } | ||
| 493 | |||
| 494 | #define EFX_XAUI_RETRAIN_MAX 8 | ||
| 495 | |||
| 496 | int falcon_check_xmac(struct efx_nic *efx) | ||
| 497 | { | ||
| 498 | unsigned xaui_link_ok; | ||
| 499 | int rc; | ||
| 500 | |||
| 501 | falcon_mask_status_intr(efx, 0); | ||
| 502 | xaui_link_ok = falcon_xaui_link_ok(efx); | ||
| 503 | |||
| 504 | if (EFX_WORKAROUND_5147(efx) && !xaui_link_ok) | ||
| 505 | (void) falcon_reset_xaui(efx); | ||
| 506 | |||
| 507 | /* Call the PHY check_hw routine */ | ||
| 508 | rc = efx->phy_op->check_hw(efx); | ||
| 509 | |||
| 510 | /* Unmask interrupt if everything was (and still is) ok */ | ||
| 511 | if (xaui_link_ok && efx->link_up) | ||
| 512 | falcon_mask_status_intr(efx, 1); | ||
| 513 | |||
| 514 | return rc; | ||
| 515 | } | ||
| 516 | |||
| 517 | /* Simulate a PHY event */ | ||
| 518 | void falcon_xmac_sim_phy_event(struct efx_nic *efx) | ||
| 519 | { | ||
| 520 | efx_qword_t phy_event; | ||
| 521 | |||
| 522 | EFX_POPULATE_QWORD_2(phy_event, | ||
| 523 | EV_CODE, GLOBAL_EV_DECODE, | ||
| 524 | XG_PHY_INTR, 1); | ||
| 525 | falcon_generate_event(&efx->channel[0], &phy_event); | ||
| 526 | } | ||
| 527 | |||
| 528 | int falcon_xmac_get_settings(struct efx_nic *efx, struct ethtool_cmd *ecmd) | ||
| 529 | { | ||
| 530 | mdio_clause45_get_settings(efx, ecmd); | ||
| 531 | ecmd->transceiver = XCVR_INTERNAL; | ||
| 532 | ecmd->phy_address = efx->mii.phy_id; | ||
| 533 | ecmd->autoneg = AUTONEG_DISABLE; | ||
| 534 | ecmd->duplex = DUPLEX_FULL; | ||
| 535 | return 0; | ||
| 536 | } | ||
| 537 | |||
| 538 | int falcon_xmac_set_settings(struct efx_nic *efx, struct ethtool_cmd *ecmd) | ||
| 539 | { | ||
| 540 | if (ecmd->transceiver != XCVR_INTERNAL) | ||
| 541 | return -EINVAL; | ||
| 542 | if (ecmd->autoneg != AUTONEG_DISABLE) | ||
| 543 | return -EINVAL; | ||
| 544 | if (ecmd->duplex != DUPLEX_FULL) | ||
| 545 | return -EINVAL; | ||
| 546 | |||
| 547 | return mdio_clause45_set_settings(efx, ecmd); | ||
| 548 | } | ||
| 549 | |||
| 550 | |||
| 551 | int falcon_xmac_set_pause(struct efx_nic *efx, enum efx_fc_type flow_control) | ||
| 552 | { | ||
| 553 | int reset; | ||
| 554 | |||
| 555 | if (flow_control & EFX_FC_AUTO) { | ||
| 556 | EFX_LOG(efx, "10G does not support flow control " | ||
| 557 | "autonegotiation\n"); | ||
| 558 | return -EINVAL; | ||
| 559 | } | ||
| 560 | |||
| 561 | if ((flow_control & EFX_FC_TX) && !(flow_control & EFX_FC_RX)) | ||
| 562 | return -EINVAL; | ||
| 563 | |||
| 564 | /* TX flow control may automatically turn itself off if the | ||
| 565 | * link partner (intermittently) stops responding to pause | ||
| 566 | * frames. There isn't any indication that this has happened, | ||
| 567 | * so the best we do is leave it up to the user to spot this | ||
| 568 | * and fix it be cycling transmit flow control on this end. */ | ||
| 569 | reset = ((flow_control & EFX_FC_TX) && | ||
| 570 | !(efx->flow_control & EFX_FC_TX)); | ||
| 571 | if (EFX_WORKAROUND_11482(efx) && reset) { | ||
| 572 | if (FALCON_REV(efx) >= FALCON_REV_B0) { | ||
| 573 | /* Recover by resetting the EM block */ | ||
| 574 | if (efx->link_up) | ||
| 575 | falcon_drain_tx_fifo(efx); | ||
| 576 | } else { | ||
| 577 | /* Schedule a reset to recover */ | ||
| 578 | efx_schedule_reset(efx, RESET_TYPE_INVISIBLE); | ||
| 579 | } | ||
| 580 | } | ||
| 581 | |||
| 582 | efx->flow_control = flow_control; | ||
| 583 | |||
| 584 | return 0; | ||
| 585 | } | ||
diff --git a/drivers/net/sfc/gmii.h b/drivers/net/sfc/gmii.h new file mode 100644 index 000000000000..d25bbd1297f4 --- /dev/null +++ b/drivers/net/sfc/gmii.h | |||
| @@ -0,0 +1,195 @@ | |||
| 1 | /**************************************************************************** | ||
| 2 | * Driver for Solarflare Solarstorm network controllers and boards | ||
| 3 | * Copyright 2005-2006 Fen Systems Ltd. | ||
| 4 | * Copyright 2006 Solarflare Communications Inc. | ||
| 5 | * | ||
| 6 | * This program is free software; you can redistribute it and/or modify it | ||
| 7 | * under the terms of the GNU General Public License version 2 as published | ||
| 8 | * by the Free Software Foundation, incorporated herein by reference. | ||
| 9 | */ | ||
| 10 | |||
| 11 | #ifndef EFX_GMII_H | ||
| 12 | #define EFX_GMII_H | ||
| 13 | |||
| 14 | /* | ||
| 15 | * GMII interface | ||
| 16 | */ | ||
| 17 | |||
| 18 | #include <linux/mii.h> | ||
| 19 | |||
| 20 | /* GMII registers, excluding registers already defined as MII | ||
| 21 | * registers in mii.h | ||
| 22 | */ | ||
| 23 | #define GMII_IER 0x12 /* Interrupt enable register */ | ||
| 24 | #define GMII_ISR 0x13 /* Interrupt status register */ | ||
| 25 | |||
| 26 | /* Interrupt enable register */ | ||
| 27 | #define IER_ANEG_ERR 0x8000 /* Bit 15 - autonegotiation error */ | ||
| 28 | #define IER_SPEED_CHG 0x4000 /* Bit 14 - speed changed */ | ||
| 29 | #define IER_DUPLEX_CHG 0x2000 /* Bit 13 - duplex changed */ | ||
| 30 | #define IER_PAGE_RCVD 0x1000 /* Bit 12 - page received */ | ||
| 31 | #define IER_ANEG_DONE 0x0800 /* Bit 11 - autonegotiation complete */ | ||
| 32 | #define IER_LINK_CHG 0x0400 /* Bit 10 - link status changed */ | ||
| 33 | #define IER_SYM_ERR 0x0200 /* Bit 9 - symbol error */ | ||
| 34 | #define IER_FALSE_CARRIER 0x0100 /* Bit 8 - false carrier */ | ||
| 35 | #define IER_FIFO_ERR 0x0080 /* Bit 7 - FIFO over/underflow */ | ||
| 36 | #define IER_MDIX_CHG 0x0040 /* Bit 6 - MDI crossover changed */ | ||
| 37 | #define IER_DOWNSHIFT 0x0020 /* Bit 5 - downshift */ | ||
| 38 | #define IER_ENERGY 0x0010 /* Bit 4 - energy detect */ | ||
| 39 | #define IER_DTE_POWER 0x0004 /* Bit 2 - DTE power detect */ | ||
| 40 | #define IER_POLARITY_CHG 0x0002 /* Bit 1 - polarity changed */ | ||
| 41 | #define IER_JABBER 0x0001 /* Bit 0 - jabber */ | ||
| 42 | |||
| 43 | /* Interrupt status register */ | ||
| 44 | #define ISR_ANEG_ERR 0x8000 /* Bit 15 - autonegotiation error */ | ||
| 45 | #define ISR_SPEED_CHG 0x4000 /* Bit 14 - speed changed */ | ||
| 46 | #define ISR_DUPLEX_CHG 0x2000 /* Bit 13 - duplex changed */ | ||
| 47 | #define ISR_PAGE_RCVD 0x1000 /* Bit 12 - page received */ | ||
| 48 | #define ISR_ANEG_DONE 0x0800 /* Bit 11 - autonegotiation complete */ | ||
| 49 | #define ISR_LINK_CHG 0x0400 /* Bit 10 - link status changed */ | ||
| 50 | #define ISR_SYM_ERR 0x0200 /* Bit 9 - symbol error */ | ||
| 51 | #define ISR_FALSE_CARRIER 0x0100 /* Bit 8 - false carrier */ | ||
| 52 | #define ISR_FIFO_ERR 0x0080 /* Bit 7 - FIFO over/underflow */ | ||
| 53 | #define ISR_MDIX_CHG 0x0040 /* Bit 6 - MDI crossover changed */ | ||
| 54 | #define ISR_DOWNSHIFT 0x0020 /* Bit 5 - downshift */ | ||
| 55 | #define ISR_ENERGY 0x0010 /* Bit 4 - energy detect */ | ||
| 56 | #define ISR_DTE_POWER 0x0004 /* Bit 2 - DTE power detect */ | ||
| 57 | #define ISR_POLARITY_CHG 0x0002 /* Bit 1 - polarity changed */ | ||
| 58 | #define ISR_JABBER 0x0001 /* Bit 0 - jabber */ | ||
| 59 | |||
| 60 | /* Logically extended advertisement register */ | ||
| 61 | #define GM_ADVERTISE_SLCT ADVERTISE_SLCT | ||
| 62 | #define GM_ADVERTISE_CSMA ADVERTISE_CSMA | ||
| 63 | #define GM_ADVERTISE_10HALF ADVERTISE_10HALF | ||
| 64 | #define GM_ADVERTISE_1000XFULL ADVERTISE_1000XFULL | ||
| 65 | #define GM_ADVERTISE_10FULL ADVERTISE_10FULL | ||
| 66 | #define GM_ADVERTISE_1000XHALF ADVERTISE_1000XHALF | ||
| 67 | #define GM_ADVERTISE_100HALF ADVERTISE_100HALF | ||
| 68 | #define GM_ADVERTISE_1000XPAUSE ADVERTISE_1000XPAUSE | ||
| 69 | #define GM_ADVERTISE_100FULL ADVERTISE_100FULL | ||
| 70 | #define GM_ADVERTISE_1000XPSE_ASYM ADVERTISE_1000XPSE_ASYM | ||
| 71 | #define GM_ADVERTISE_100BASE4 ADVERTISE_100BASE4 | ||
| 72 | #define GM_ADVERTISE_PAUSE_CAP ADVERTISE_PAUSE_CAP | ||
| 73 | #define GM_ADVERTISE_PAUSE_ASYM ADVERTISE_PAUSE_ASYM | ||
| 74 | #define GM_ADVERTISE_RESV ADVERTISE_RESV | ||
| 75 | #define GM_ADVERTISE_RFAULT ADVERTISE_RFAULT | ||
| 76 | #define GM_ADVERTISE_LPACK ADVERTISE_LPACK | ||
| 77 | #define GM_ADVERTISE_NPAGE ADVERTISE_NPAGE | ||
| 78 | #define GM_ADVERTISE_1000FULL (ADVERTISE_1000FULL << 8) | ||
| 79 | #define GM_ADVERTISE_1000HALF (ADVERTISE_1000HALF << 8) | ||
| 80 | #define GM_ADVERTISE_1000 (GM_ADVERTISE_1000FULL | \ | ||
| 81 | GM_ADVERTISE_1000HALF) | ||
| 82 | #define GM_ADVERTISE_FULL (GM_ADVERTISE_1000FULL | \ | ||
| 83 | ADVERTISE_FULL) | ||
| 84 | #define GM_ADVERTISE_ALL (GM_ADVERTISE_1000FULL | \ | ||
| 85 | GM_ADVERTISE_1000HALF | \ | ||
| 86 | ADVERTISE_ALL) | ||
| 87 | |||
| 88 | /* Logically extended link partner ability register */ | ||
| 89 | #define GM_LPA_SLCT LPA_SLCT | ||
| 90 | #define GM_LPA_10HALF LPA_10HALF | ||
| 91 | #define GM_LPA_1000XFULL LPA_1000XFULL | ||
| 92 | #define GM_LPA_10FULL LPA_10FULL | ||
| 93 | #define GM_LPA_1000XHALF LPA_1000XHALF | ||
| 94 | #define GM_LPA_100HALF LPA_100HALF | ||
| 95 | #define GM_LPA_1000XPAUSE LPA_1000XPAUSE | ||
| 96 | #define GM_LPA_100FULL LPA_100FULL | ||
| 97 | #define GM_LPA_1000XPAUSE_ASYM LPA_1000XPAUSE_ASYM | ||
| 98 | #define GM_LPA_100BASE4 LPA_100BASE4 | ||
| 99 | #define GM_LPA_PAUSE_CAP LPA_PAUSE_CAP | ||
| 100 | #define GM_LPA_PAUSE_ASYM LPA_PAUSE_ASYM | ||
| 101 | #define GM_LPA_RESV LPA_RESV | ||
| 102 | #define GM_LPA_RFAULT LPA_RFAULT | ||
| 103 | #define GM_LPA_LPACK LPA_LPACK | ||
| 104 | #define GM_LPA_NPAGE LPA_NPAGE | ||
| 105 | #define GM_LPA_1000FULL (LPA_1000FULL << 6) | ||
| 106 | #define GM_LPA_1000HALF (LPA_1000HALF << 6) | ||
| 107 | #define GM_LPA_10000FULL 0x00040000 | ||
| 108 | #define GM_LPA_10000HALF 0x00080000 | ||
| 109 | #define GM_LPA_DUPLEX (GM_LPA_1000FULL | GM_LPA_10000FULL \ | ||
| 110 | | LPA_DUPLEX) | ||
| 111 | #define GM_LPA_10 (LPA_10FULL | LPA_10HALF) | ||
| 112 | #define GM_LPA_100 LPA_100 | ||
| 113 | #define GM_LPA_1000 (GM_LPA_1000FULL | GM_LPA_1000HALF) | ||
| 114 | #define GM_LPA_10000 (GM_LPA_10000FULL | GM_LPA_10000HALF) | ||
| 115 | |||
| 116 | /* Retrieve GMII autonegotiation advertised abilities | ||
| 117 | * | ||
| 118 | * The MII advertisment register (MII_ADVERTISE) is logically extended | ||
| 119 | * to include advertisement bits ADVERTISE_1000FULL and | ||
| 120 | * ADVERTISE_1000HALF from MII_CTRL1000. The result can be tested | ||
| 121 | * against the GM_ADVERTISE_xxx constants. | ||
| 122 | */ | ||
| 123 | static inline unsigned int gmii_advertised(struct mii_if_info *gmii) | ||
| 124 | { | ||
| 125 | unsigned int advertise; | ||
| 126 | unsigned int ctrl1000; | ||
| 127 | |||
| 128 | advertise = gmii->mdio_read(gmii->dev, gmii->phy_id, MII_ADVERTISE); | ||
| 129 | ctrl1000 = gmii->mdio_read(gmii->dev, gmii->phy_id, MII_CTRL1000); | ||
| 130 | return (((ctrl1000 << 8) & GM_ADVERTISE_1000) | advertise); | ||
| 131 | } | ||
| 132 | |||
| 133 | /* Retrieve GMII autonegotiation link partner abilities | ||
| 134 | * | ||
| 135 | * The MII link partner ability register (MII_LPA) is logically | ||
| 136 | * extended by adding bits LPA_1000HALF and LPA_1000FULL from | ||
| 137 | * MII_STAT1000. The result can be tested against the GM_LPA_xxx | ||
| 138 | * constants. | ||
| 139 | */ | ||
| 140 | static inline unsigned int gmii_lpa(struct mii_if_info *gmii) | ||
| 141 | { | ||
| 142 | unsigned int lpa; | ||
| 143 | unsigned int stat1000; | ||
| 144 | |||
| 145 | lpa = gmii->mdio_read(gmii->dev, gmii->phy_id, MII_LPA); | ||
| 146 | stat1000 = gmii->mdio_read(gmii->dev, gmii->phy_id, MII_STAT1000); | ||
| 147 | return (((stat1000 << 6) & GM_LPA_1000) | lpa); | ||
| 148 | } | ||
| 149 | |||
| 150 | /* Calculate GMII autonegotiated link technology | ||
| 151 | * | ||
| 152 | * "negotiated" should be the result of gmii_advertised() logically | ||
| 153 | * ANDed with the result of gmii_lpa(). | ||
| 154 | * | ||
| 155 | * "tech" will be negotiated with the unused bits masked out. For | ||
| 156 | * example, if both ends of the link are capable of both | ||
| 157 | * GM_LPA_1000FULL and GM_LPA_100FULL, GM_LPA_100FULL will be masked | ||
| 158 | * out. | ||
| 159 | */ | ||
| 160 | static inline unsigned int gmii_nway_result(unsigned int negotiated) | ||
| 161 | { | ||
| 162 | unsigned int other_bits; | ||
| 163 | |||
| 164 | /* Mask out the speed and duplexity bits */ | ||
| 165 | other_bits = negotiated & ~(GM_LPA_10 | GM_LPA_100 | GM_LPA_1000); | ||
| 166 | |||
| 167 | if (negotiated & GM_LPA_1000FULL) | ||
| 168 | return (other_bits | GM_LPA_1000FULL); | ||
| 169 | else if (negotiated & GM_LPA_1000HALF) | ||
| 170 | return (other_bits | GM_LPA_1000HALF); | ||
| 171 | else | ||
| 172 | return (other_bits | mii_nway_result(negotiated)); | ||
| 173 | } | ||
| 174 | |||
| 175 | /* Calculate GMII non-autonegotiated link technology | ||
| 176 | * | ||
| 177 | * This provides an equivalent to gmii_nway_result for the case when | ||
| 178 | * autonegotiation is disabled. | ||
| 179 | */ | ||
| 180 | static inline unsigned int gmii_forced_result(unsigned int bmcr) | ||
| 181 | { | ||
| 182 | unsigned int result; | ||
| 183 | int full_duplex; | ||
| 184 | |||
| 185 | full_duplex = bmcr & BMCR_FULLDPLX; | ||
| 186 | if (bmcr & BMCR_SPEED1000) | ||
| 187 | result = full_duplex ? GM_LPA_1000FULL : GM_LPA_1000HALF; | ||
| 188 | else if (bmcr & BMCR_SPEED100) | ||
| 189 | result = full_duplex ? GM_LPA_100FULL : GM_LPA_100HALF; | ||
| 190 | else | ||
| 191 | result = full_duplex ? GM_LPA_10FULL : GM_LPA_10HALF; | ||
| 192 | return result; | ||
| 193 | } | ||
| 194 | |||
| 195 | #endif /* EFX_GMII_H */ | ||
diff --git a/drivers/net/sfc/i2c-direct.c b/drivers/net/sfc/i2c-direct.c new file mode 100644 index 000000000000..b6c62d0ed9c2 --- /dev/null +++ b/drivers/net/sfc/i2c-direct.c | |||
| @@ -0,0 +1,381 @@ | |||
| 1 | /**************************************************************************** | ||
| 2 | * Driver for Solarflare Solarstorm network controllers and boards | ||
| 3 | * Copyright 2005 Fen Systems Ltd. | ||
| 4 | * Copyright 2006-2008 Solarflare Communications Inc. | ||
| 5 | * | ||
| 6 | * This program is free software; you can redistribute it and/or modify it | ||
| 7 | * under the terms of the GNU General Public License version 2 as published | ||
| 8 | * by the Free Software Foundation, incorporated herein by reference. | ||
| 9 | */ | ||
| 10 | |||
| 11 | #include <linux/delay.h> | ||
| 12 | #include "net_driver.h" | ||
| 13 | #include "i2c-direct.h" | ||
| 14 | |||
| 15 | /* | ||
| 16 | * I2C data (SDA) and clock (SCL) line read/writes with appropriate | ||
| 17 | * delays. | ||
| 18 | */ | ||
| 19 | |||
| 20 | static inline void setsda(struct efx_i2c_interface *i2c, int state) | ||
| 21 | { | ||
| 22 | udelay(i2c->op->udelay); | ||
| 23 | i2c->sda = state; | ||
| 24 | i2c->op->setsda(i2c); | ||
| 25 | udelay(i2c->op->udelay); | ||
| 26 | } | ||
| 27 | |||
| 28 | static inline void setscl(struct efx_i2c_interface *i2c, int state) | ||
| 29 | { | ||
| 30 | udelay(i2c->op->udelay); | ||
| 31 | i2c->scl = state; | ||
| 32 | i2c->op->setscl(i2c); | ||
| 33 | udelay(i2c->op->udelay); | ||
| 34 | } | ||
| 35 | |||
| 36 | static inline int getsda(struct efx_i2c_interface *i2c) | ||
| 37 | { | ||
| 38 | int sda; | ||
| 39 | |||
| 40 | udelay(i2c->op->udelay); | ||
| 41 | sda = i2c->op->getsda(i2c); | ||
| 42 | udelay(i2c->op->udelay); | ||
| 43 | return sda; | ||
| 44 | } | ||
| 45 | |||
| 46 | static inline int getscl(struct efx_i2c_interface *i2c) | ||
| 47 | { | ||
| 48 | int scl; | ||
| 49 | |||
| 50 | udelay(i2c->op->udelay); | ||
| 51 | scl = i2c->op->getscl(i2c); | ||
| 52 | udelay(i2c->op->udelay); | ||
| 53 | return scl; | ||
| 54 | } | ||
| 55 | |||
| 56 | /* | ||
| 57 | * I2C low-level protocol operations | ||
| 58 | * | ||
| 59 | */ | ||
| 60 | |||
| 61 | static inline void i2c_release(struct efx_i2c_interface *i2c) | ||
| 62 | { | ||
| 63 | EFX_WARN_ON_PARANOID(!i2c->scl); | ||
| 64 | EFX_WARN_ON_PARANOID(!i2c->sda); | ||
| 65 | /* Devices may time out if operations do not end */ | ||
| 66 | setscl(i2c, 1); | ||
| 67 | setsda(i2c, 1); | ||
| 68 | EFX_BUG_ON_PARANOID(getsda(i2c) != 1); | ||
| 69 | EFX_BUG_ON_PARANOID(getscl(i2c) != 1); | ||
| 70 | } | ||
| 71 | |||
| 72 | static inline void i2c_start(struct efx_i2c_interface *i2c) | ||
| 73 | { | ||
| 74 | /* We may be restarting immediately after a {send,recv}_bit, | ||
| 75 | * so SCL will not necessarily already be high. | ||
| 76 | */ | ||
| 77 | EFX_WARN_ON_PARANOID(!i2c->sda); | ||
| 78 | setscl(i2c, 1); | ||
| 79 | setsda(i2c, 0); | ||
| 80 | setscl(i2c, 0); | ||
| 81 | setsda(i2c, 1); | ||
| 82 | } | ||
| 83 | |||
| 84 | static inline void i2c_send_bit(struct efx_i2c_interface *i2c, int bit) | ||
| 85 | { | ||
| 86 | EFX_WARN_ON_PARANOID(i2c->scl != 0); | ||
| 87 | setsda(i2c, bit); | ||
| 88 | setscl(i2c, 1); | ||
| 89 | setscl(i2c, 0); | ||
| 90 | setsda(i2c, 1); | ||
| 91 | } | ||
| 92 | |||
| 93 | static inline int i2c_recv_bit(struct efx_i2c_interface *i2c) | ||
| 94 | { | ||
| 95 | int bit; | ||
| 96 | |||
| 97 | EFX_WARN_ON_PARANOID(i2c->scl != 0); | ||
| 98 | EFX_WARN_ON_PARANOID(!i2c->sda); | ||
| 99 | setscl(i2c, 1); | ||
| 100 | bit = getsda(i2c); | ||
| 101 | setscl(i2c, 0); | ||
| 102 | return bit; | ||
| 103 | } | ||
| 104 | |||
| 105 | static inline void i2c_stop(struct efx_i2c_interface *i2c) | ||
| 106 | { | ||
| 107 | EFX_WARN_ON_PARANOID(i2c->scl != 0); | ||
| 108 | setsda(i2c, 0); | ||
| 109 | setscl(i2c, 1); | ||
| 110 | setsda(i2c, 1); | ||
| 111 | } | ||
| 112 | |||
| 113 | /* | ||
| 114 | * I2C mid-level protocol operations | ||
| 115 | * | ||
| 116 | */ | ||
| 117 | |||
| 118 | /* Sends a byte via the I2C bus and checks for an acknowledgement from | ||
| 119 | * the slave device. | ||
| 120 | */ | ||
| 121 | static int i2c_send_byte(struct efx_i2c_interface *i2c, u8 byte) | ||
| 122 | { | ||
| 123 | int i; | ||
| 124 | |||
| 125 | /* Send byte */ | ||
| 126 | for (i = 0; i < 8; i++) { | ||
| 127 | i2c_send_bit(i2c, !!(byte & 0x80)); | ||
| 128 | byte <<= 1; | ||
| 129 | } | ||
| 130 | |||
| 131 | /* Check for acknowledgement from slave */ | ||
| 132 | return (i2c_recv_bit(i2c) == 0 ? 0 : -EIO); | ||
| 133 | } | ||
| 134 | |||
| 135 | /* Receives a byte via the I2C bus and sends ACK/NACK to the slave device. */ | ||
| 136 | static u8 i2c_recv_byte(struct efx_i2c_interface *i2c, int ack) | ||
| 137 | { | ||
| 138 | u8 value = 0; | ||
| 139 | int i; | ||
| 140 | |||
| 141 | /* Receive byte */ | ||
| 142 | for (i = 0; i < 8; i++) | ||
| 143 | value = (value << 1) | i2c_recv_bit(i2c); | ||
| 144 | |||
| 145 | /* Send ACK/NACK */ | ||
| 146 | i2c_send_bit(i2c, (ack ? 0 : 1)); | ||
| 147 | |||
| 148 | return value; | ||
| 149 | } | ||
| 150 | |||
| 151 | /* Calculate command byte for a read operation */ | ||
| 152 | static inline u8 i2c_read_cmd(u8 device_id) | ||
| 153 | { | ||
| 154 | return ((device_id << 1) | 1); | ||
| 155 | } | ||
| 156 | |||
| 157 | /* Calculate command byte for a write operation */ | ||
| 158 | static inline u8 i2c_write_cmd(u8 device_id) | ||
| 159 | { | ||
| 160 | return ((device_id << 1) | 0); | ||
| 161 | } | ||
| 162 | |||
| 163 | int efx_i2c_check_presence(struct efx_i2c_interface *i2c, u8 device_id) | ||
| 164 | { | ||
| 165 | int rc; | ||
| 166 | |||
| 167 | /* If someone is driving the bus low we just give up. */ | ||
| 168 | if (getsda(i2c) == 0 || getscl(i2c) == 0) { | ||
| 169 | EFX_ERR(i2c->efx, "%s someone is holding the I2C bus low." | ||
| 170 | " Giving up.\n", __func__); | ||
| 171 | return -EFAULT; | ||
| 172 | } | ||
| 173 | |||
| 174 | /* Pretend to initiate a device write */ | ||
| 175 | i2c_start(i2c); | ||
| 176 | rc = i2c_send_byte(i2c, i2c_write_cmd(device_id)); | ||
| 177 | if (rc) | ||
| 178 | goto out; | ||
| 179 | |||
| 180 | out: | ||
| 181 | i2c_stop(i2c); | ||
| 182 | i2c_release(i2c); | ||
| 183 | |||
| 184 | return rc; | ||
| 185 | } | ||
| 186 | |||
| 187 | /* This performs a fast read of one or more consecutive bytes from an | ||
| 188 | * I2C device. Not all devices support consecutive reads of more than | ||
| 189 | * one byte; for these devices use efx_i2c_read() instead. | ||
| 190 | */ | ||
| 191 | int efx_i2c_fast_read(struct efx_i2c_interface *i2c, | ||
| 192 | u8 device_id, u8 offset, u8 *data, unsigned int len) | ||
| 193 | { | ||
| 194 | int i; | ||
| 195 | int rc; | ||
| 196 | |||
| 197 | EFX_WARN_ON_PARANOID(getsda(i2c) != 1); | ||
| 198 | EFX_WARN_ON_PARANOID(getscl(i2c) != 1); | ||
| 199 | EFX_WARN_ON_PARANOID(data == NULL); | ||
| 200 | EFX_WARN_ON_PARANOID(len < 1); | ||
| 201 | |||
| 202 | /* Select device and starting offset */ | ||
| 203 | i2c_start(i2c); | ||
| 204 | rc = i2c_send_byte(i2c, i2c_write_cmd(device_id)); | ||
| 205 | if (rc) | ||
| 206 | goto out; | ||
| 207 | rc = i2c_send_byte(i2c, offset); | ||
| 208 | if (rc) | ||
| 209 | goto out; | ||
| 210 | |||
| 211 | /* Read data from device */ | ||
| 212 | i2c_start(i2c); | ||
| 213 | rc = i2c_send_byte(i2c, i2c_read_cmd(device_id)); | ||
| 214 | if (rc) | ||
| 215 | goto out; | ||
| 216 | for (i = 0; i < (len - 1); i++) | ||
| 217 | /* Read and acknowledge all but the last byte */ | ||
| 218 | data[i] = i2c_recv_byte(i2c, 1); | ||
| 219 | /* Read last byte with no acknowledgement */ | ||
| 220 | data[i] = i2c_recv_byte(i2c, 0); | ||
| 221 | |||
| 222 | out: | ||
| 223 | i2c_stop(i2c); | ||
| 224 | i2c_release(i2c); | ||
| 225 | |||
| 226 | return rc; | ||
| 227 | } | ||
| 228 | |||
| 229 | /* This performs a fast write of one or more consecutive bytes to an | ||
| 230 | * I2C device. Not all devices support consecutive writes of more | ||
| 231 | * than one byte; for these devices use efx_i2c_write() instead. | ||
| 232 | */ | ||
| 233 | int efx_i2c_fast_write(struct efx_i2c_interface *i2c, | ||
| 234 | u8 device_id, u8 offset, | ||
| 235 | const u8 *data, unsigned int len) | ||
| 236 | { | ||
| 237 | int i; | ||
| 238 | int rc; | ||
| 239 | |||
| 240 | EFX_WARN_ON_PARANOID(getsda(i2c) != 1); | ||
| 241 | EFX_WARN_ON_PARANOID(getscl(i2c) != 1); | ||
| 242 | EFX_WARN_ON_PARANOID(len < 1); | ||
| 243 | |||
| 244 | /* Select device and starting offset */ | ||
| 245 | i2c_start(i2c); | ||
| 246 | rc = i2c_send_byte(i2c, i2c_write_cmd(device_id)); | ||
| 247 | if (rc) | ||
| 248 | goto out; | ||
| 249 | rc = i2c_send_byte(i2c, offset); | ||
| 250 | if (rc) | ||
| 251 | goto out; | ||
| 252 | |||
| 253 | /* Write data to device */ | ||
| 254 | for (i = 0; i < len; i++) { | ||
| 255 | rc = i2c_send_byte(i2c, data[i]); | ||
| 256 | if (rc) | ||
| 257 | goto out; | ||
| 258 | } | ||
| 259 | |||
| 260 | out: | ||
| 261 | i2c_stop(i2c); | ||
| 262 | i2c_release(i2c); | ||
| 263 | |||
| 264 | return rc; | ||
| 265 | } | ||
| 266 | |||
| 267 | /* I2C byte-by-byte read */ | ||
| 268 | int efx_i2c_read(struct efx_i2c_interface *i2c, | ||
| 269 | u8 device_id, u8 offset, u8 *data, unsigned int len) | ||
| 270 | { | ||
| 271 | int rc; | ||
| 272 | |||
| 273 | /* i2c_fast_read with length 1 is a single byte read */ | ||
| 274 | for (; len > 0; offset++, data++, len--) { | ||
| 275 | rc = efx_i2c_fast_read(i2c, device_id, offset, data, 1); | ||
| 276 | if (rc) | ||
| 277 | return rc; | ||
| 278 | } | ||
| 279 | |||
| 280 | return 0; | ||
| 281 | } | ||
| 282 | |||
| 283 | /* I2C byte-by-byte write */ | ||
| 284 | int efx_i2c_write(struct efx_i2c_interface *i2c, | ||
| 285 | u8 device_id, u8 offset, const u8 *data, unsigned int len) | ||
| 286 | { | ||
| 287 | int rc; | ||
| 288 | |||
| 289 | /* i2c_fast_write with length 1 is a single byte write */ | ||
| 290 | for (; len > 0; offset++, data++, len--) { | ||
| 291 | rc = efx_i2c_fast_write(i2c, device_id, offset, data, 1); | ||
| 292 | if (rc) | ||
| 293 | return rc; | ||
| 294 | mdelay(i2c->op->mdelay); | ||
| 295 | } | ||
| 296 | |||
| 297 | return 0; | ||
| 298 | } | ||
| 299 | |||
| 300 | |||
| 301 | /* This is just a slightly neater wrapper round efx_i2c_fast_write | ||
| 302 | * in the case where the target doesn't take an offset | ||
| 303 | */ | ||
| 304 | int efx_i2c_send_bytes(struct efx_i2c_interface *i2c, | ||
| 305 | u8 device_id, const u8 *data, unsigned int len) | ||
| 306 | { | ||
| 307 | return efx_i2c_fast_write(i2c, device_id, data[0], data + 1, len - 1); | ||
| 308 | } | ||
| 309 | |||
| 310 | /* I2C receiving of bytes - does not send an offset byte */ | ||
| 311 | int efx_i2c_recv_bytes(struct efx_i2c_interface *i2c, u8 device_id, | ||
| 312 | u8 *bytes, unsigned int len) | ||
| 313 | { | ||
| 314 | int i; | ||
| 315 | int rc; | ||
| 316 | |||
| 317 | EFX_WARN_ON_PARANOID(getsda(i2c) != 1); | ||
| 318 | EFX_WARN_ON_PARANOID(getscl(i2c) != 1); | ||
| 319 | EFX_WARN_ON_PARANOID(len < 1); | ||
| 320 | |||
| 321 | /* Select device */ | ||
| 322 | i2c_start(i2c); | ||
| 323 | |||
| 324 | /* Read data from device */ | ||
| 325 | rc = i2c_send_byte(i2c, i2c_read_cmd(device_id)); | ||
| 326 | if (rc) | ||
| 327 | goto out; | ||
| 328 | |||
| 329 | for (i = 0; i < (len - 1); i++) | ||
| 330 | /* Read and acknowledge all but the last byte */ | ||
| 331 | bytes[i] = i2c_recv_byte(i2c, 1); | ||
| 332 | /* Read last byte with no acknowledgement */ | ||
| 333 | bytes[i] = i2c_recv_byte(i2c, 0); | ||
| 334 | |||
| 335 | out: | ||
| 336 | i2c_stop(i2c); | ||
| 337 | i2c_release(i2c); | ||
| 338 | |||
| 339 | return rc; | ||
| 340 | } | ||
| 341 | |||
| 342 | /* SMBus and some I2C devices will time out if the I2C clock is | ||
| 343 | * held low for too long. This is most likely to happen in virtualised | ||
| 344 | * systems (when the entire domain is descheduled) but could in | ||
| 345 | * principle happen due to preemption on any busy system (and given the | ||
| 346 | * potential length of an I2C operation turning preemption off is not | ||
| 347 | * a sensible option). The following functions deal with the failure by | ||
| 348 | * retrying up to a fixed number of times. | ||
| 349 | */ | ||
| 350 | |||
| 351 | #define I2C_MAX_RETRIES (10) | ||
| 352 | |||
| 353 | /* The timeout problem will result in -EIO. If the wrapped function | ||
| 354 | * returns any other error, pass this up and do not retry. */ | ||
| 355 | #define RETRY_WRAPPER(_f) \ | ||
| 356 | int retries = I2C_MAX_RETRIES; \ | ||
| 357 | int rc; \ | ||
| 358 | while (retries) { \ | ||
| 359 | rc = _f; \ | ||
| 360 | if (rc != -EIO) \ | ||
| 361 | return rc; \ | ||
| 362 | retries--; \ | ||
| 363 | } \ | ||
| 364 | return rc; \ | ||
| 365 | |||
| 366 | int efx_i2c_check_presence_retry(struct efx_i2c_interface *i2c, u8 device_id) | ||
| 367 | { | ||
| 368 | RETRY_WRAPPER(efx_i2c_check_presence(i2c, device_id)) | ||
| 369 | } | ||
| 370 | |||
| 371 | int efx_i2c_read_retry(struct efx_i2c_interface *i2c, | ||
| 372 | u8 device_id, u8 offset, u8 *data, unsigned int len) | ||
| 373 | { | ||
| 374 | RETRY_WRAPPER(efx_i2c_read(i2c, device_id, offset, data, len)) | ||
| 375 | } | ||
| 376 | |||
| 377 | int efx_i2c_write_retry(struct efx_i2c_interface *i2c, | ||
| 378 | u8 device_id, u8 offset, const u8 *data, unsigned int len) | ||
| 379 | { | ||
| 380 | RETRY_WRAPPER(efx_i2c_write(i2c, device_id, offset, data, len)) | ||
| 381 | } | ||
diff --git a/drivers/net/sfc/i2c-direct.h b/drivers/net/sfc/i2c-direct.h new file mode 100644 index 000000000000..291e561071f5 --- /dev/null +++ b/drivers/net/sfc/i2c-direct.h | |||
| @@ -0,0 +1,91 @@ | |||
| 1 | /**************************************************************************** | ||
| 2 | * Driver for Solarflare Solarstorm network controllers and boards | ||
| 3 | * Copyright 2005 Fen Systems Ltd. | ||
| 4 | * Copyright 2006 Solarflare Communications Inc. | ||
| 5 | * | ||
| 6 | * This program is free software; you can redistribute it and/or modify it | ||
| 7 | * under the terms of the GNU General Public License version 2 as published | ||
| 8 | * by the Free Software Foundation, incorporated herein by reference. | ||
| 9 | */ | ||
| 10 | |||
| 11 | #ifndef EFX_I2C_DIRECT_H | ||
| 12 | #define EFX_I2C_DIRECT_H | ||
| 13 | |||
| 14 | #include "net_driver.h" | ||
| 15 | |||
| 16 | /* | ||
| 17 | * Direct control of an I2C bus | ||
| 18 | */ | ||
| 19 | |||
| 20 | struct efx_i2c_interface; | ||
| 21 | |||
| 22 | /** | ||
| 23 | * struct efx_i2c_bit_operations - I2C bus direct control methods | ||
| 24 | * | ||
| 25 | * I2C bus direct control methods. | ||
| 26 | * | ||
| 27 | * @setsda: Set state of SDA line | ||
| 28 | * @setscl: Set state of SCL line | ||
| 29 | * @getsda: Get state of SDA line | ||
| 30 | * @getscl: Get state of SCL line | ||
| 31 | * @udelay: Delay between each bit operation | ||
| 32 | * @mdelay: Delay between each byte write | ||
| 33 | */ | ||
| 34 | struct efx_i2c_bit_operations { | ||
| 35 | void (*setsda) (struct efx_i2c_interface *i2c); | ||
| 36 | void (*setscl) (struct efx_i2c_interface *i2c); | ||
| 37 | int (*getsda) (struct efx_i2c_interface *i2c); | ||
| 38 | int (*getscl) (struct efx_i2c_interface *i2c); | ||
| 39 | unsigned int udelay; | ||
| 40 | unsigned int mdelay; | ||
| 41 | }; | ||
| 42 | |||
| 43 | /** | ||
| 44 | * struct efx_i2c_interface - an I2C interface | ||
| 45 | * | ||
| 46 | * An I2C interface. | ||
| 47 | * | ||
| 48 | * @efx: Attached Efx NIC | ||
| 49 | * @op: I2C bus control methods | ||
| 50 | * @sda: Current output state of SDA line | ||
| 51 | * @scl: Current output state of SCL line | ||
| 52 | */ | ||
| 53 | struct efx_i2c_interface { | ||
| 54 | struct efx_nic *efx; | ||
| 55 | struct efx_i2c_bit_operations *op; | ||
| 56 | unsigned int sda:1; | ||
| 57 | unsigned int scl:1; | ||
| 58 | }; | ||
| 59 | |||
| 60 | extern int efx_i2c_check_presence(struct efx_i2c_interface *i2c, u8 device_id); | ||
| 61 | extern int efx_i2c_fast_read(struct efx_i2c_interface *i2c, | ||
| 62 | u8 device_id, u8 offset, | ||
| 63 | u8 *data, unsigned int len); | ||
| 64 | extern int efx_i2c_fast_write(struct efx_i2c_interface *i2c, | ||
| 65 | u8 device_id, u8 offset, | ||
| 66 | const u8 *data, unsigned int len); | ||
| 67 | extern int efx_i2c_read(struct efx_i2c_interface *i2c, | ||
| 68 | u8 device_id, u8 offset, u8 *data, unsigned int len); | ||
| 69 | extern int efx_i2c_write(struct efx_i2c_interface *i2c, | ||
| 70 | u8 device_id, u8 offset, | ||
| 71 | const u8 *data, unsigned int len); | ||
| 72 | |||
| 73 | extern int efx_i2c_send_bytes(struct efx_i2c_interface *i2c, u8 device_id, | ||
| 74 | const u8 *bytes, unsigned int len); | ||
| 75 | |||
| 76 | extern int efx_i2c_recv_bytes(struct efx_i2c_interface *i2c, u8 device_id, | ||
| 77 | u8 *bytes, unsigned int len); | ||
| 78 | |||
| 79 | |||
| 80 | /* Versions of the API that retry on failure. */ | ||
| 81 | extern int efx_i2c_check_presence_retry(struct efx_i2c_interface *i2c, | ||
| 82 | u8 device_id); | ||
| 83 | |||
| 84 | extern int efx_i2c_read_retry(struct efx_i2c_interface *i2c, | ||
| 85 | u8 device_id, u8 offset, u8 *data, unsigned int len); | ||
| 86 | |||
| 87 | extern int efx_i2c_write_retry(struct efx_i2c_interface *i2c, | ||
| 88 | u8 device_id, u8 offset, | ||
| 89 | const u8 *data, unsigned int len); | ||
| 90 | |||
| 91 | #endif /* EFX_I2C_DIRECT_H */ | ||
diff --git a/drivers/net/sfc/mac.h b/drivers/net/sfc/mac.h new file mode 100644 index 000000000000..edd07d4dee18 --- /dev/null +++ b/drivers/net/sfc/mac.h | |||
| @@ -0,0 +1,33 @@ | |||
| 1 | /**************************************************************************** | ||
| 2 | * Driver for Solarflare Solarstorm network controllers and boards | ||
| 3 | * Copyright 2005-2006 Fen Systems Ltd. | ||
| 4 | * Copyright 2006-2007 Solarflare Communications Inc. | ||
| 5 | * | ||
| 6 | * This program is free software; you can redistribute it and/or modify it | ||
| 7 | * under the terms of the GNU General Public License version 2 as published | ||
| 8 | * by the Free Software Foundation, incorporated herein by reference. | ||
| 9 | */ | ||
| 10 | |||
| 11 | #ifndef EFX_MAC_H | ||
| 12 | #define EFX_MAC_H | ||
| 13 | |||
| 14 | #include "net_driver.h" | ||
| 15 | |||
| 16 | extern void falcon_xmac_writel(struct efx_nic *efx, | ||
| 17 | efx_dword_t *value, unsigned int mac_reg); | ||
| 18 | extern void falcon_xmac_readl(struct efx_nic *efx, | ||
| 19 | efx_dword_t *value, unsigned int mac_reg); | ||
| 20 | extern int falcon_init_xmac(struct efx_nic *efx); | ||
| 21 | extern void falcon_reconfigure_xmac(struct efx_nic *efx); | ||
| 22 | extern void falcon_update_stats_xmac(struct efx_nic *efx); | ||
| 23 | extern void falcon_fini_xmac(struct efx_nic *efx); | ||
| 24 | extern int falcon_check_xmac(struct efx_nic *efx); | ||
| 25 | extern void falcon_xmac_sim_phy_event(struct efx_nic *efx); | ||
| 26 | extern int falcon_xmac_get_settings(struct efx_nic *efx, | ||
| 27 | struct ethtool_cmd *ecmd); | ||
| 28 | extern int falcon_xmac_set_settings(struct efx_nic *efx, | ||
| 29 | struct ethtool_cmd *ecmd); | ||
| 30 | extern int falcon_xmac_set_pause(struct efx_nic *efx, | ||
| 31 | enum efx_fc_type pause_params); | ||
| 32 | |||
| 33 | #endif | ||
diff --git a/drivers/net/sfc/mdio_10g.c b/drivers/net/sfc/mdio_10g.c new file mode 100644 index 000000000000..dc06bb0aa575 --- /dev/null +++ b/drivers/net/sfc/mdio_10g.c | |||
| @@ -0,0 +1,282 @@ | |||
| 1 | /**************************************************************************** | ||
| 2 | * Driver for Solarflare Solarstorm network controllers and boards | ||
| 3 | * Copyright 2006-2008 Solarflare Communications Inc. | ||
| 4 | * | ||
| 5 | * This program is free software; you can redistribute it and/or modify it | ||
| 6 | * under the terms of the GNU General Public License version 2 as published | ||
| 7 | * by the Free Software Foundation, incorporated herein by reference. | ||
| 8 | */ | ||
| 9 | /* | ||
| 10 | * Useful functions for working with MDIO clause 45 PHYs | ||
| 11 | */ | ||
| 12 | #include <linux/types.h> | ||
| 13 | #include <linux/ethtool.h> | ||
| 14 | #include <linux/delay.h> | ||
| 15 | #include "net_driver.h" | ||
| 16 | #include "mdio_10g.h" | ||
| 17 | #include "boards.h" | ||
| 18 | |||
| 19 | int mdio_clause45_reset_mmd(struct efx_nic *port, int mmd, | ||
| 20 | int spins, int spintime) | ||
| 21 | { | ||
| 22 | u32 ctrl; | ||
| 23 | int phy_id = port->mii.phy_id; | ||
| 24 | |||
| 25 | /* Catch callers passing values in the wrong units (or just silly) */ | ||
| 26 | EFX_BUG_ON_PARANOID(spins * spintime >= 5000); | ||
| 27 | |||
| 28 | mdio_clause45_write(port, phy_id, mmd, MDIO_MMDREG_CTRL1, | ||
| 29 | (1 << MDIO_MMDREG_CTRL1_RESET_LBN)); | ||
| 30 | /* Wait for the reset bit to clear. */ | ||
| 31 | do { | ||
| 32 | msleep(spintime); | ||
| 33 | ctrl = mdio_clause45_read(port, phy_id, mmd, MDIO_MMDREG_CTRL1); | ||
| 34 | spins--; | ||
| 35 | |||
| 36 | } while (spins && (ctrl & (1 << MDIO_MMDREG_CTRL1_RESET_LBN))); | ||
| 37 | |||
| 38 | return spins ? spins : -ETIMEDOUT; | ||
| 39 | } | ||
| 40 | |||
| 41 | static int mdio_clause45_check_mmd(struct efx_nic *efx, int mmd, | ||
| 42 | int fault_fatal) | ||
| 43 | { | ||
| 44 | int status; | ||
| 45 | int phy_id = efx->mii.phy_id; | ||
| 46 | |||
| 47 | /* Read MMD STATUS2 to check it is responding. */ | ||
| 48 | status = mdio_clause45_read(efx, phy_id, mmd, MDIO_MMDREG_STAT2); | ||
| 49 | if (((status >> MDIO_MMDREG_STAT2_PRESENT_LBN) & | ||
| 50 | ((1 << MDIO_MMDREG_STAT2_PRESENT_WIDTH) - 1)) != | ||
| 51 | MDIO_MMDREG_STAT2_PRESENT_VAL) { | ||
| 52 | EFX_ERR(efx, "PHY MMD %d not responding.\n", mmd); | ||
| 53 | return -EIO; | ||
| 54 | } | ||
| 55 | |||
| 56 | /* Read MMD STATUS 1 to check for fault. */ | ||
| 57 | status = mdio_clause45_read(efx, phy_id, mmd, MDIO_MMDREG_STAT1); | ||
| 58 | if ((status & (1 << MDIO_MMDREG_STAT1_FAULT_LBN)) != 0) { | ||
| 59 | if (fault_fatal) { | ||
| 60 | EFX_ERR(efx, "PHY MMD %d reporting fatal" | ||
| 61 | " fault: status %x\n", mmd, status); | ||
| 62 | return -EIO; | ||
| 63 | } else { | ||
| 64 | EFX_LOG(efx, "PHY MMD %d reporting status" | ||
| 65 | " %x (expected)\n", mmd, status); | ||
| 66 | } | ||
| 67 | } | ||
| 68 | return 0; | ||
| 69 | } | ||
| 70 | |||
| 71 | /* This ought to be ridiculous overkill. We expect it to fail rarely */ | ||
| 72 | #define MDIO45_RESET_TIME 1000 /* ms */ | ||
| 73 | #define MDIO45_RESET_ITERS 100 | ||
| 74 | |||
| 75 | int mdio_clause45_wait_reset_mmds(struct efx_nic *efx, | ||
| 76 | unsigned int mmd_mask) | ||
| 77 | { | ||
| 78 | const int spintime = MDIO45_RESET_TIME / MDIO45_RESET_ITERS; | ||
| 79 | int tries = MDIO45_RESET_ITERS; | ||
| 80 | int rc = 0; | ||
| 81 | int in_reset; | ||
| 82 | |||
| 83 | while (tries) { | ||
| 84 | int mask = mmd_mask; | ||
| 85 | int mmd = 0; | ||
| 86 | int stat; | ||
| 87 | in_reset = 0; | ||
| 88 | while (mask) { | ||
| 89 | if (mask & 1) { | ||
| 90 | stat = mdio_clause45_read(efx, | ||
| 91 | efx->mii.phy_id, | ||
| 92 | mmd, | ||
| 93 | MDIO_MMDREG_CTRL1); | ||
| 94 | if (stat < 0) { | ||
| 95 | EFX_ERR(efx, "failed to read status of" | ||
| 96 | " MMD %d\n", mmd); | ||
| 97 | return -EIO; | ||
| 98 | } | ||
| 99 | if (stat & (1 << MDIO_MMDREG_CTRL1_RESET_LBN)) | ||
| 100 | in_reset |= (1 << mmd); | ||
| 101 | } | ||
| 102 | mask = mask >> 1; | ||
| 103 | mmd++; | ||
| 104 | } | ||
| 105 | if (!in_reset) | ||
| 106 | break; | ||
| 107 | tries--; | ||
| 108 | msleep(spintime); | ||
| 109 | } | ||
| 110 | if (in_reset != 0) { | ||
| 111 | EFX_ERR(efx, "not all MMDs came out of reset in time." | ||
| 112 | " MMDs still in reset: %x\n", in_reset); | ||
| 113 | rc = -ETIMEDOUT; | ||
| 114 | } | ||
| 115 | return rc; | ||
| 116 | } | ||
| 117 | |||
| 118 | int mdio_clause45_check_mmds(struct efx_nic *efx, | ||
| 119 | unsigned int mmd_mask, unsigned int fatal_mask) | ||
| 120 | { | ||
| 121 | int devices, mmd = 0; | ||
| 122 | int probe_mmd; | ||
| 123 | |||
| 124 | /* Historically we have probed the PHYXS to find out what devices are | ||
| 125 | * present,but that doesn't work so well if the PHYXS isn't expected | ||
| 126 | * to exist, if so just find the first item in the list supplied. */ | ||
| 127 | probe_mmd = (mmd_mask & MDIO_MMDREG_DEVS0_PHYXS) ? MDIO_MMD_PHYXS : | ||
| 128 | __ffs(mmd_mask); | ||
| 129 | devices = mdio_clause45_read(efx, efx->mii.phy_id, | ||
| 130 | probe_mmd, MDIO_MMDREG_DEVS0); | ||
| 131 | |||
| 132 | /* Check all the expected MMDs are present */ | ||
| 133 | if (devices < 0) { | ||
| 134 | EFX_ERR(efx, "failed to read devices present\n"); | ||
| 135 | return -EIO; | ||
| 136 | } | ||
| 137 | if ((devices & mmd_mask) != mmd_mask) { | ||
| 138 | EFX_ERR(efx, "required MMDs not present: got %x, " | ||
| 139 | "wanted %x\n", devices, mmd_mask); | ||
| 140 | return -ENODEV; | ||
| 141 | } | ||
| 142 | EFX_TRACE(efx, "Devices present: %x\n", devices); | ||
| 143 | |||
| 144 | /* Check all required MMDs are responding and happy. */ | ||
| 145 | while (mmd_mask) { | ||
| 146 | if (mmd_mask & 1) { | ||
| 147 | int fault_fatal = fatal_mask & 1; | ||
| 148 | if (mdio_clause45_check_mmd(efx, mmd, fault_fatal)) | ||
| 149 | return -EIO; | ||
| 150 | } | ||
| 151 | mmd_mask = mmd_mask >> 1; | ||
| 152 | fatal_mask = fatal_mask >> 1; | ||
| 153 | mmd++; | ||
| 154 | } | ||
| 155 | |||
| 156 | return 0; | ||
| 157 | } | ||
| 158 | |||
| 159 | int mdio_clause45_links_ok(struct efx_nic *efx, unsigned int mmd_mask) | ||
| 160 | { | ||
| 161 | int phy_id = efx->mii.phy_id; | ||
| 162 | int status; | ||
| 163 | int ok = 1; | ||
| 164 | int mmd = 0; | ||
| 165 | int good; | ||
| 166 | |||
| 167 | while (mmd_mask) { | ||
| 168 | if (mmd_mask & 1) { | ||
| 169 | /* Double reads because link state is latched, and a | ||
| 170 | * read moves the current state into the register */ | ||
| 171 | status = mdio_clause45_read(efx, phy_id, | ||
| 172 | mmd, MDIO_MMDREG_STAT1); | ||
| 173 | status = mdio_clause45_read(efx, phy_id, | ||
| 174 | mmd, MDIO_MMDREG_STAT1); | ||
| 175 | |||
| 176 | good = status & (1 << MDIO_MMDREG_STAT1_LINK_LBN); | ||
| 177 | ok = ok && good; | ||
| 178 | } | ||
| 179 | mmd_mask = (mmd_mask >> 1); | ||
| 180 | mmd++; | ||
| 181 | } | ||
| 182 | return ok; | ||
| 183 | } | ||
| 184 | |||
| 185 | /** | ||
| 186 | * mdio_clause45_get_settings - Read (some of) the PHY settings over MDIO. | ||
| 187 | * @efx: Efx NIC | ||
| 188 | * @ecmd: Buffer for settings | ||
| 189 | * | ||
| 190 | * On return the 'port', 'speed', 'supported' and 'advertising' fields of | ||
| 191 | * ecmd have been filled out based on the PMA type. | ||
| 192 | */ | ||
| 193 | void mdio_clause45_get_settings(struct efx_nic *efx, | ||
| 194 | struct ethtool_cmd *ecmd) | ||
| 195 | { | ||
| 196 | int pma_type; | ||
| 197 | |||
| 198 | /* If no PMA is present we are presumably talking something XAUI-ish | ||
| 199 | * like CX4. Which we report as FIBRE (see below) */ | ||
| 200 | if ((efx->phy_op->mmds & DEV_PRESENT_BIT(MDIO_MMD_PMAPMD)) == 0) { | ||
| 201 | ecmd->speed = SPEED_10000; | ||
| 202 | ecmd->port = PORT_FIBRE; | ||
| 203 | ecmd->supported = SUPPORTED_FIBRE; | ||
| 204 | ecmd->advertising = ADVERTISED_FIBRE; | ||
| 205 | return; | ||
| 206 | } | ||
| 207 | |||
| 208 | pma_type = mdio_clause45_read(efx, efx->mii.phy_id, | ||
| 209 | MDIO_MMD_PMAPMD, MDIO_MMDREG_CTRL2); | ||
| 210 | pma_type &= MDIO_PMAPMD_CTRL2_TYPE_MASK; | ||
| 211 | |||
| 212 | switch (pma_type) { | ||
| 213 | /* We represent CX4 as fibre in the absence of anything | ||
| 214 | better. */ | ||
| 215 | case MDIO_PMAPMD_CTRL2_10G_CX4: | ||
| 216 | ecmd->speed = SPEED_10000; | ||
| 217 | ecmd->port = PORT_FIBRE; | ||
| 218 | ecmd->supported = SUPPORTED_FIBRE; | ||
| 219 | ecmd->advertising = ADVERTISED_FIBRE; | ||
| 220 | break; | ||
| 221 | /* 10G Base-T */ | ||
| 222 | case MDIO_PMAPMD_CTRL2_10G_BT: | ||
| 223 | ecmd->speed = SPEED_10000; | ||
| 224 | ecmd->port = PORT_TP; | ||
| 225 | ecmd->supported = SUPPORTED_TP | SUPPORTED_10000baseT_Full; | ||
| 226 | ecmd->advertising = (ADVERTISED_FIBRE | ||
| 227 | | ADVERTISED_10000baseT_Full); | ||
| 228 | break; | ||
| 229 | case MDIO_PMAPMD_CTRL2_1G_BT: | ||
| 230 | ecmd->speed = SPEED_1000; | ||
| 231 | ecmd->port = PORT_TP; | ||
| 232 | ecmd->supported = SUPPORTED_TP | SUPPORTED_1000baseT_Full; | ||
| 233 | ecmd->advertising = (ADVERTISED_FIBRE | ||
| 234 | | ADVERTISED_1000baseT_Full); | ||
| 235 | break; | ||
| 236 | case MDIO_PMAPMD_CTRL2_100_BT: | ||
| 237 | ecmd->speed = SPEED_100; | ||
| 238 | ecmd->port = PORT_TP; | ||
| 239 | ecmd->supported = SUPPORTED_TP | SUPPORTED_100baseT_Full; | ||
| 240 | ecmd->advertising = (ADVERTISED_FIBRE | ||
| 241 | | ADVERTISED_100baseT_Full); | ||
| 242 | break; | ||
| 243 | case MDIO_PMAPMD_CTRL2_10_BT: | ||
| 244 | ecmd->speed = SPEED_10; | ||
| 245 | ecmd->port = PORT_TP; | ||
| 246 | ecmd->supported = SUPPORTED_TP | SUPPORTED_10baseT_Full; | ||
| 247 | ecmd->advertising = ADVERTISED_FIBRE | ADVERTISED_10baseT_Full; | ||
| 248 | break; | ||
| 249 | /* All the other defined modes are flavours of | ||
| 250 | * 10G optical */ | ||
| 251 | default: | ||
| 252 | ecmd->speed = SPEED_10000; | ||
| 253 | ecmd->port = PORT_FIBRE; | ||
| 254 | ecmd->supported = SUPPORTED_FIBRE; | ||
| 255 | ecmd->advertising = ADVERTISED_FIBRE; | ||
| 256 | break; | ||
| 257 | } | ||
| 258 | } | ||
| 259 | |||
| 260 | /** | ||
| 261 | * mdio_clause45_set_settings - Set (some of) the PHY settings over MDIO. | ||
| 262 | * @efx: Efx NIC | ||
| 263 | * @ecmd: New settings | ||
| 264 | * | ||
| 265 | * Currently this just enforces that we are _not_ changing the | ||
| 266 | * 'port', 'speed', 'supported' or 'advertising' settings as these | ||
| 267 | * cannot be changed on any currently supported PHY. | ||
| 268 | */ | ||
| 269 | int mdio_clause45_set_settings(struct efx_nic *efx, | ||
| 270 | struct ethtool_cmd *ecmd) | ||
| 271 | { | ||
| 272 | struct ethtool_cmd tmpcmd; | ||
| 273 | mdio_clause45_get_settings(efx, &tmpcmd); | ||
| 274 | /* None of the current PHYs support more than one mode | ||
| 275 | * of operation (and only 10GBT ever will), so keep things | ||
| 276 | * simple for now */ | ||
| 277 | if ((ecmd->speed == tmpcmd.speed) && (ecmd->port == tmpcmd.port) && | ||
| 278 | (ecmd->supported == tmpcmd.supported) && | ||
| 279 | (ecmd->advertising == tmpcmd.advertising)) | ||
| 280 | return 0; | ||
| 281 | return -EOPNOTSUPP; | ||
| 282 | } | ||
diff --git a/drivers/net/sfc/mdio_10g.h b/drivers/net/sfc/mdio_10g.h new file mode 100644 index 000000000000..2214b6d820a7 --- /dev/null +++ b/drivers/net/sfc/mdio_10g.h | |||
| @@ -0,0 +1,232 @@ | |||
| 1 | /**************************************************************************** | ||
| 2 | * Driver for Solarflare Solarstorm network controllers and boards | ||
| 3 | * Copyright 2006-2008 Solarflare Communications Inc. | ||
| 4 | * | ||
| 5 | * This program is free software; you can redistribute it and/or modify it | ||
| 6 | * under the terms of the GNU General Public License version 2 as published | ||
| 7 | * by the Free Software Foundation, incorporated herein by reference. | ||
| 8 | */ | ||
| 9 | |||
| 10 | #ifndef EFX_MDIO_10G_H | ||
| 11 | #define EFX_MDIO_10G_H | ||
| 12 | |||
| 13 | /* | ||
| 14 | * Definitions needed for doing 10G MDIO as specified in clause 45 | ||
| 15 | * MDIO, which do not appear in Linux yet. Also some helper functions. | ||
| 16 | */ | ||
| 17 | |||
| 18 | #include "efx.h" | ||
| 19 | #include "boards.h" | ||
| 20 | |||
| 21 | /* Numbering of the MDIO Manageable Devices (MMDs) */ | ||
| 22 | /* Physical Medium Attachment/ Physical Medium Dependent sublayer */ | ||
| 23 | #define MDIO_MMD_PMAPMD (1) | ||
| 24 | /* WAN Interface Sublayer */ | ||
| 25 | #define MDIO_MMD_WIS (2) | ||
| 26 | /* Physical Coding Sublayer */ | ||
| 27 | #define MDIO_MMD_PCS (3) | ||
| 28 | /* PHY Extender Sublayer */ | ||
| 29 | #define MDIO_MMD_PHYXS (4) | ||
| 30 | /* Extender Sublayer */ | ||
| 31 | #define MDIO_MMD_DTEXS (5) | ||
| 32 | /* Transmission convergence */ | ||
| 33 | #define MDIO_MMD_TC (6) | ||
| 34 | /* Auto negotiation */ | ||
| 35 | #define MDIO_MMD_AN (7) | ||
| 36 | |||
| 37 | /* Generic register locations */ | ||
| 38 | #define MDIO_MMDREG_CTRL1 (0) | ||
| 39 | #define MDIO_MMDREG_STAT1 (1) | ||
| 40 | #define MDIO_MMDREG_IDHI (2) | ||
| 41 | #define MDIO_MMDREG_IDLOW (3) | ||
| 42 | #define MDIO_MMDREG_SPEED (4) | ||
| 43 | #define MDIO_MMDREG_DEVS0 (5) | ||
| 44 | #define MDIO_MMDREG_DEVS1 (6) | ||
| 45 | #define MDIO_MMDREG_CTRL2 (7) | ||
| 46 | #define MDIO_MMDREG_STAT2 (8) | ||
| 47 | |||
| 48 | /* Bits in MMDREG_CTRL1 */ | ||
| 49 | /* Reset */ | ||
| 50 | #define MDIO_MMDREG_CTRL1_RESET_LBN (15) | ||
| 51 | #define MDIO_MMDREG_CTRL1_RESET_WIDTH (1) | ||
| 52 | |||
| 53 | /* Bits in MMDREG_STAT1 */ | ||
| 54 | #define MDIO_MMDREG_STAT1_FAULT_LBN (7) | ||
| 55 | #define MDIO_MMDREG_STAT1_FAULT_WIDTH (1) | ||
| 56 | /* Link state */ | ||
| 57 | #define MDIO_MMDREG_STAT1_LINK_LBN (2) | ||
| 58 | #define MDIO_MMDREG_STAT1_LINK_WIDTH (1) | ||
| 59 | |||
| 60 | /* Bits in ID reg */ | ||
| 61 | #define MDIO_ID_REV(_id32) (_id32 & 0xf) | ||
| 62 | #define MDIO_ID_MODEL(_id32) ((_id32 >> 4) & 0x3f) | ||
| 63 | #define MDIO_ID_OUI(_id32) (_id32 >> 10) | ||
| 64 | |||
| 65 | /* Bits in MMDREG_DEVS0. Someone thoughtfully layed things out | ||
| 66 | * so the 'bit present' bit number of an MMD is the number of | ||
| 67 | * that MMD */ | ||
| 68 | #define DEV_PRESENT_BIT(_b) (1 << _b) | ||
| 69 | |||
| 70 | #define MDIO_MMDREG_DEVS0_PHYXS DEV_PRESENT_BIT(MDIO_MMD_PHYXS) | ||
| 71 | #define MDIO_MMDREG_DEVS0_PCS DEV_PRESENT_BIT(MDIO_MMD_PCS) | ||
| 72 | #define MDIO_MMDREG_DEVS0_PMAPMD DEV_PRESENT_BIT(MDIO_MMD_PMAPMD) | ||
| 73 | |||
| 74 | /* Bits in MMDREG_STAT2 */ | ||
| 75 | #define MDIO_MMDREG_STAT2_PRESENT_VAL (2) | ||
| 76 | #define MDIO_MMDREG_STAT2_PRESENT_LBN (14) | ||
| 77 | #define MDIO_MMDREG_STAT2_PRESENT_WIDTH (2) | ||
| 78 | |||
| 79 | /* PMA type (4 bits) */ | ||
| 80 | #define MDIO_PMAPMD_CTRL2_10G_CX4 (0x0) | ||
| 81 | #define MDIO_PMAPMD_CTRL2_10G_EW (0x1) | ||
| 82 | #define MDIO_PMAPMD_CTRL2_10G_LW (0x2) | ||
| 83 | #define MDIO_PMAPMD_CTRL2_10G_SW (0x3) | ||
| 84 | #define MDIO_PMAPMD_CTRL2_10G_LX4 (0x4) | ||
| 85 | #define MDIO_PMAPMD_CTRL2_10G_ER (0x5) | ||
| 86 | #define MDIO_PMAPMD_CTRL2_10G_LR (0x6) | ||
| 87 | #define MDIO_PMAPMD_CTRL2_10G_SR (0x7) | ||
| 88 | /* Reserved */ | ||
| 89 | #define MDIO_PMAPMD_CTRL2_10G_BT (0x9) | ||
| 90 | /* Reserved */ | ||
| 91 | /* Reserved */ | ||
| 92 | #define MDIO_PMAPMD_CTRL2_1G_BT (0xc) | ||
| 93 | /* Reserved */ | ||
| 94 | #define MDIO_PMAPMD_CTRL2_100_BT (0xe) | ||
| 95 | #define MDIO_PMAPMD_CTRL2_10_BT (0xf) | ||
| 96 | #define MDIO_PMAPMD_CTRL2_TYPE_MASK (0xf) | ||
| 97 | |||
| 98 | /* /\* PHY XGXS lane state *\/ */ | ||
| 99 | #define MDIO_PHYXS_LANE_STATE (0x18) | ||
| 100 | #define MDIO_PHYXS_LANE_ALIGNED_LBN (12) | ||
| 101 | |||
| 102 | /* AN registers */ | ||
| 103 | #define MDIO_AN_STATUS (1) | ||
| 104 | #define MDIO_AN_STATUS_XNP_LBN (7) | ||
| 105 | #define MDIO_AN_STATUS_PAGE_LBN (6) | ||
| 106 | #define MDIO_AN_STATUS_AN_DONE_LBN (5) | ||
| 107 | #define MDIO_AN_STATUS_LP_AN_CAP_LBN (0) | ||
| 108 | |||
| 109 | #define MDIO_AN_10GBT_STATUS (33) | ||
| 110 | #define MDIO_AN_10GBT_STATUS_MS_FLT_LBN (15) /* MASTER/SLAVE config fault */ | ||
| 111 | #define MDIO_AN_10GBT_STATUS_MS_LBN (14) /* MASTER/SLAVE config */ | ||
| 112 | #define MDIO_AN_10GBT_STATUS_LOC_OK_LBN (13) /* Local OK */ | ||
| 113 | #define MDIO_AN_10GBT_STATUS_REM_OK_LBN (12) /* Remote OK */ | ||
| 114 | #define MDIO_AN_10GBT_STATUS_LP_10G_LBN (11) /* Link partner is 10GBT capable */ | ||
| 115 | #define MDIO_AN_10GBT_STATUS_LP_LTA_LBN (10) /* LP loop timing ability */ | ||
| 116 | #define MDIO_AN_10GBT_STATUS_LP_TRR_LBN (9) /* LP Training Reset Request */ | ||
| 117 | |||
| 118 | |||
| 119 | /* Packing of the prt and dev arguments of clause 45 style MDIO into a | ||
| 120 | * single int so they can be passed into the mdio_read/write functions | ||
| 121 | * that currently exist. Note that as Falcon is the only current user, | ||
| 122 | * the packed form is chosen to match what Falcon needs to write into | ||
| 123 | * a register. This is checked at compile-time so do not change it. If | ||
| 124 | * your target chip needs things layed out differently you will need | ||
| 125 | * to unpack the arguments in your chip-specific mdio functions. | ||
| 126 | */ | ||
| 127 | /* These are defined by the standard. */ | ||
| 128 | #define MDIO45_PRT_ID_WIDTH (5) | ||
| 129 | #define MDIO45_DEV_ID_WIDTH (5) | ||
| 130 | |||
| 131 | /* The prt ID is just packed in immediately to the left of the dev ID */ | ||
| 132 | #define MDIO45_PRT_DEV_WIDTH (MDIO45_PRT_ID_WIDTH + MDIO45_DEV_ID_WIDTH) | ||
| 133 | |||
| 134 | #define MDIO45_PRT_ID_MASK ((1 << MDIO45_PRT_DEV_WIDTH) - 1) | ||
| 135 | /* This is the prt + dev extended by 1 bit to hold the 'is clause 45' flag. */ | ||
| 136 | #define MDIO45_XPRT_ID_WIDTH (MDIO45_PRT_DEV_WIDTH + 1) | ||
| 137 | #define MDIO45_XPRT_ID_MASK ((1 << MDIO45_XPRT_ID_WIDTH) - 1) | ||
| 138 | #define MDIO45_XPRT_ID_IS10G (1 << (MDIO45_XPRT_ID_WIDTH - 1)) | ||
| 139 | |||
| 140 | |||
| 141 | #define MDIO45_PRT_ID_COMP_LBN MDIO45_DEV_ID_WIDTH | ||
| 142 | #define MDIO45_PRT_ID_COMP_WIDTH MDIO45_PRT_ID_WIDTH | ||
| 143 | #define MDIO45_DEV_ID_COMP_LBN 0 | ||
| 144 | #define MDIO45_DEV_ID_COMP_WIDTH MDIO45_DEV_ID_WIDTH | ||
| 145 | |||
| 146 | /* Compose port and device into a phy_id */ | ||
| 147 | static inline int mdio_clause45_pack(u8 prt, u8 dev) | ||
| 148 | { | ||
| 149 | efx_dword_t phy_id; | ||
| 150 | EFX_POPULATE_DWORD_2(phy_id, MDIO45_PRT_ID_COMP, prt, | ||
| 151 | MDIO45_DEV_ID_COMP, dev); | ||
| 152 | return MDIO45_XPRT_ID_IS10G | EFX_DWORD_VAL(phy_id); | ||
| 153 | } | ||
| 154 | |||
| 155 | static inline void mdio_clause45_unpack(u32 val, u8 *prt, u8 *dev) | ||
| 156 | { | ||
| 157 | efx_dword_t phy_id; | ||
| 158 | EFX_POPULATE_DWORD_1(phy_id, EFX_DWORD_0, val); | ||
| 159 | *prt = EFX_DWORD_FIELD(phy_id, MDIO45_PRT_ID_COMP); | ||
| 160 | *dev = EFX_DWORD_FIELD(phy_id, MDIO45_DEV_ID_COMP); | ||
| 161 | } | ||
| 162 | |||
| 163 | static inline int mdio_clause45_read(struct efx_nic *efx, | ||
| 164 | u8 prt, u8 dev, u16 addr) | ||
| 165 | { | ||
| 166 | return efx->mii.mdio_read(efx->net_dev, | ||
| 167 | mdio_clause45_pack(prt, dev), addr); | ||
| 168 | } | ||
| 169 | |||
| 170 | static inline void mdio_clause45_write(struct efx_nic *efx, | ||
| 171 | u8 prt, u8 dev, u16 addr, int value) | ||
| 172 | { | ||
| 173 | efx->mii.mdio_write(efx->net_dev, | ||
| 174 | mdio_clause45_pack(prt, dev), addr, value); | ||
| 175 | } | ||
| 176 | |||
| 177 | |||
| 178 | static inline u32 mdio_clause45_read_id(struct efx_nic *efx, int mmd) | ||
| 179 | { | ||
| 180 | int phy_id = efx->mii.phy_id; | ||
| 181 | u16 id_low = mdio_clause45_read(efx, phy_id, mmd, MDIO_MMDREG_IDLOW); | ||
| 182 | u16 id_hi = mdio_clause45_read(efx, phy_id, mmd, MDIO_MMDREG_IDHI); | ||
| 183 | return (id_hi << 16) | (id_low); | ||
| 184 | } | ||
| 185 | |||
| 186 | static inline int mdio_clause45_phyxgxs_lane_sync(struct efx_nic *efx) | ||
| 187 | { | ||
| 188 | int i, sync, lane_status; | ||
| 189 | |||
| 190 | for (i = 0; i < 2; ++i) | ||
| 191 | lane_status = mdio_clause45_read(efx, efx->mii.phy_id, | ||
| 192 | MDIO_MMD_PHYXS, | ||
| 193 | MDIO_PHYXS_LANE_STATE); | ||
| 194 | |||
| 195 | sync = (lane_status & (1 << MDIO_PHYXS_LANE_ALIGNED_LBN)) != 0; | ||
| 196 | if (!sync) | ||
| 197 | EFX_INFO(efx, "XGXS lane status: %x\n", lane_status); | ||
| 198 | return sync; | ||
| 199 | } | ||
| 200 | |||
| 201 | extern const char *mdio_clause45_mmd_name(int mmd); | ||
| 202 | |||
| 203 | /* | ||
| 204 | * Reset a specific MMD and wait for reset to clear. | ||
| 205 | * Return number of spins left (>0) on success, -%ETIMEDOUT on failure. | ||
| 206 | * | ||
| 207 | * This function will sleep | ||
| 208 | */ | ||
| 209 | extern int mdio_clause45_reset_mmd(struct efx_nic *efx, int mmd, | ||
| 210 | int spins, int spintime); | ||
| 211 | |||
| 212 | /* As mdio_clause45_check_mmd but for multiple MMDs */ | ||
| 213 | int mdio_clause45_check_mmds(struct efx_nic *efx, | ||
| 214 | unsigned int mmd_mask, unsigned int fatal_mask); | ||
| 215 | |||
| 216 | /* Check the link status of specified mmds in bit mask */ | ||
| 217 | extern int mdio_clause45_links_ok(struct efx_nic *efx, | ||
| 218 | unsigned int mmd_mask); | ||
| 219 | |||
| 220 | /* Read (some of) the PHY settings over MDIO */ | ||
| 221 | extern void mdio_clause45_get_settings(struct efx_nic *efx, | ||
| 222 | struct ethtool_cmd *ecmd); | ||
| 223 | |||
| 224 | /* Set (some of) the PHY settings over MDIO */ | ||
| 225 | extern int mdio_clause45_set_settings(struct efx_nic *efx, | ||
| 226 | struct ethtool_cmd *ecmd); | ||
| 227 | |||
| 228 | /* Wait for specified MMDs to exit reset within a timeout */ | ||
| 229 | extern int mdio_clause45_wait_reset_mmds(struct efx_nic *efx, | ||
| 230 | unsigned int mmd_mask); | ||
| 231 | |||
| 232 | #endif /* EFX_MDIO_10G_H */ | ||
diff --git a/drivers/net/sfc/net_driver.h b/drivers/net/sfc/net_driver.h new file mode 100644 index 000000000000..c505482c2520 --- /dev/null +++ b/drivers/net/sfc/net_driver.h | |||
| @@ -0,0 +1,883 @@ | |||
| 1 | /**************************************************************************** | ||
| 2 | * Driver for Solarflare Solarstorm network controllers and boards | ||
| 3 | * Copyright 2005-2006 Fen Systems Ltd. | ||
| 4 | * Copyright 2005-2008 Solarflare Communications Inc. | ||
| 5 | * | ||
| 6 | * This program is free software; you can redistribute it and/or modify it | ||
| 7 | * under the terms of the GNU General Public License version 2 as published | ||
| 8 | * by the Free Software Foundation, incorporated herein by reference. | ||
| 9 | */ | ||
| 10 | |||
| 11 | /* Common definitions for all Efx net driver code */ | ||
| 12 | |||
| 13 | #ifndef EFX_NET_DRIVER_H | ||
| 14 | #define EFX_NET_DRIVER_H | ||
| 15 | |||
| 16 | #include <linux/version.h> | ||
| 17 | #include <linux/netdevice.h> | ||
| 18 | #include <linux/etherdevice.h> | ||
| 19 | #include <linux/ethtool.h> | ||
| 20 | #include <linux/if_vlan.h> | ||
| 21 | #include <linux/timer.h> | ||
| 22 | #include <linux/mii.h> | ||
| 23 | #include <linux/list.h> | ||
| 24 | #include <linux/pci.h> | ||
| 25 | #include <linux/device.h> | ||
| 26 | #include <linux/highmem.h> | ||
| 27 | #include <linux/workqueue.h> | ||
| 28 | #include <linux/inet_lro.h> | ||
| 29 | |||
| 30 | #include "enum.h" | ||
| 31 | #include "bitfield.h" | ||
| 32 | #include "i2c-direct.h" | ||
| 33 | |||
| 34 | #define EFX_MAX_LRO_DESCRIPTORS 8 | ||
| 35 | #define EFX_MAX_LRO_AGGR MAX_SKB_FRAGS | ||
| 36 | |||
| 37 | /************************************************************************** | ||
| 38 | * | ||
| 39 | * Build definitions | ||
| 40 | * | ||
| 41 | **************************************************************************/ | ||
| 42 | #ifndef EFX_DRIVER_NAME | ||
| 43 | #define EFX_DRIVER_NAME "sfc" | ||
| 44 | #endif | ||
| 45 | #define EFX_DRIVER_VERSION "2.2.0136" | ||
| 46 | |||
| 47 | #ifdef EFX_ENABLE_DEBUG | ||
| 48 | #define EFX_BUG_ON_PARANOID(x) BUG_ON(x) | ||
| 49 | #define EFX_WARN_ON_PARANOID(x) WARN_ON(x) | ||
| 50 | #else | ||
| 51 | #define EFX_BUG_ON_PARANOID(x) do {} while (0) | ||
| 52 | #define EFX_WARN_ON_PARANOID(x) do {} while (0) | ||
| 53 | #endif | ||
| 54 | |||
| 55 | #define NET_DEV_REGISTERED(efx) \ | ||
| 56 | ((efx)->net_dev->reg_state == NETREG_REGISTERED) | ||
| 57 | |||
| 58 | /* Include net device name in log messages if it has been registered. | ||
| 59 | * Use efx->name not efx->net_dev->name so that races with (un)registration | ||
| 60 | * are harmless. | ||
| 61 | */ | ||
| 62 | #define NET_DEV_NAME(efx) (NET_DEV_REGISTERED(efx) ? (efx)->name : "") | ||
| 63 | |||
| 64 | /* Un-rate-limited logging */ | ||
| 65 | #define EFX_ERR(efx, fmt, args...) \ | ||
| 66 | dev_err(&((efx)->pci_dev->dev), "ERR: %s " fmt, NET_DEV_NAME(efx), ##args) | ||
| 67 | |||
| 68 | #define EFX_INFO(efx, fmt, args...) \ | ||
| 69 | dev_info(&((efx)->pci_dev->dev), "INFO: %s " fmt, NET_DEV_NAME(efx), ##args) | ||
| 70 | |||
| 71 | #ifdef EFX_ENABLE_DEBUG | ||
| 72 | #define EFX_LOG(efx, fmt, args...) \ | ||
| 73 | dev_info(&((efx)->pci_dev->dev), "DBG: %s " fmt, NET_DEV_NAME(efx), ##args) | ||
| 74 | #else | ||
| 75 | #define EFX_LOG(efx, fmt, args...) \ | ||
| 76 | dev_dbg(&((efx)->pci_dev->dev), "DBG: %s " fmt, NET_DEV_NAME(efx), ##args) | ||
| 77 | #endif | ||
| 78 | |||
| 79 | #define EFX_TRACE(efx, fmt, args...) do {} while (0) | ||
| 80 | |||
| 81 | #define EFX_REGDUMP(efx, fmt, args...) do {} while (0) | ||
| 82 | |||
| 83 | /* Rate-limited logging */ | ||
| 84 | #define EFX_ERR_RL(efx, fmt, args...) \ | ||
| 85 | do {if (net_ratelimit()) EFX_ERR(efx, fmt, ##args); } while (0) | ||
| 86 | |||
| 87 | #define EFX_INFO_RL(efx, fmt, args...) \ | ||
| 88 | do {if (net_ratelimit()) EFX_INFO(efx, fmt, ##args); } while (0) | ||
| 89 | |||
| 90 | #define EFX_LOG_RL(efx, fmt, args...) \ | ||
| 91 | do {if (net_ratelimit()) EFX_LOG(efx, fmt, ##args); } while (0) | ||
| 92 | |||
| 93 | /* Kernel headers may redefine inline anyway */ | ||
| 94 | #ifndef inline | ||
| 95 | #define inline inline __attribute__ ((always_inline)) | ||
| 96 | #endif | ||
| 97 | |||
| 98 | /************************************************************************** | ||
| 99 | * | ||
| 100 | * Efx data structures | ||
| 101 | * | ||
| 102 | **************************************************************************/ | ||
| 103 | |||
| 104 | #define EFX_MAX_CHANNELS 32 | ||
| 105 | #define EFX_MAX_TX_QUEUES 1 | ||
| 106 | #define EFX_MAX_RX_QUEUES EFX_MAX_CHANNELS | ||
| 107 | |||
| 108 | /** | ||
| 109 | * struct efx_special_buffer - An Efx special buffer | ||
| 110 | * @addr: CPU base address of the buffer | ||
| 111 | * @dma_addr: DMA base address of the buffer | ||
| 112 | * @len: Buffer length, in bytes | ||
| 113 | * @index: Buffer index within controller;s buffer table | ||
| 114 | * @entries: Number of buffer table entries | ||
| 115 | * | ||
| 116 | * Special buffers are used for the event queues and the TX and RX | ||
| 117 | * descriptor queues for each channel. They are *not* used for the | ||
| 118 | * actual transmit and receive buffers. | ||
| 119 | * | ||
| 120 | * Note that for Falcon, TX and RX descriptor queues live in host memory. | ||
| 121 | * Allocation and freeing procedures must take this into account. | ||
| 122 | */ | ||
| 123 | struct efx_special_buffer { | ||
| 124 | void *addr; | ||
| 125 | dma_addr_t dma_addr; | ||
| 126 | unsigned int len; | ||
| 127 | int index; | ||
| 128 | int entries; | ||
| 129 | }; | ||
| 130 | |||
| 131 | /** | ||
| 132 | * struct efx_tx_buffer - An Efx TX buffer | ||
| 133 | * @skb: The associated socket buffer. | ||
| 134 | * Set only on the final fragment of a packet; %NULL for all other | ||
| 135 | * fragments. When this fragment completes, then we can free this | ||
| 136 | * skb. | ||
| 137 | * @dma_addr: DMA address of the fragment. | ||
| 138 | * @len: Length of this fragment. | ||
| 139 | * This field is zero when the queue slot is empty. | ||
| 140 | * @continuation: True if this fragment is not the end of a packet. | ||
| 141 | * @unmap_single: True if pci_unmap_single should be used. | ||
| 142 | * @unmap_addr: DMA address to unmap | ||
| 143 | * @unmap_len: Length of this fragment to unmap | ||
| 144 | */ | ||
| 145 | struct efx_tx_buffer { | ||
| 146 | const struct sk_buff *skb; | ||
| 147 | dma_addr_t dma_addr; | ||
| 148 | unsigned short len; | ||
| 149 | unsigned char continuation; | ||
| 150 | unsigned char unmap_single; | ||
| 151 | dma_addr_t unmap_addr; | ||
| 152 | unsigned short unmap_len; | ||
| 153 | }; | ||
| 154 | |||
| 155 | /** | ||
| 156 | * struct efx_tx_queue - An Efx TX queue | ||
| 157 | * | ||
| 158 | * This is a ring buffer of TX fragments. | ||
| 159 | * Since the TX completion path always executes on the same | ||
| 160 | * CPU and the xmit path can operate on different CPUs, | ||
| 161 | * performance is increased by ensuring that the completion | ||
| 162 | * path and the xmit path operate on different cache lines. | ||
| 163 | * This is particularly important if the xmit path is always | ||
| 164 | * executing on one CPU which is different from the completion | ||
| 165 | * path. There is also a cache line for members which are | ||
| 166 | * read but not written on the fast path. | ||
| 167 | * | ||
| 168 | * @efx: The associated Efx NIC | ||
| 169 | * @queue: DMA queue number | ||
| 170 | * @used: Queue is used by net driver | ||
| 171 | * @channel: The associated channel | ||
| 172 | * @buffer: The software buffer ring | ||
| 173 | * @txd: The hardware descriptor ring | ||
| 174 | * @read_count: Current read pointer. | ||
| 175 | * This is the number of buffers that have been removed from both rings. | ||
| 176 | * @stopped: Stopped flag. | ||
| 177 | * Set if this TX queue is currently stopping its port. | ||
| 178 | * @insert_count: Current insert pointer | ||
| 179 | * This is the number of buffers that have been added to the | ||
| 180 | * software ring. | ||
| 181 | * @write_count: Current write pointer | ||
| 182 | * This is the number of buffers that have been added to the | ||
| 183 | * hardware ring. | ||
| 184 | * @old_read_count: The value of read_count when last checked. | ||
| 185 | * This is here for performance reasons. The xmit path will | ||
| 186 | * only get the up-to-date value of read_count if this | ||
| 187 | * variable indicates that the queue is full. This is to | ||
| 188 | * avoid cache-line ping-pong between the xmit path and the | ||
| 189 | * completion path. | ||
| 190 | */ | ||
| 191 | struct efx_tx_queue { | ||
| 192 | /* Members which don't change on the fast path */ | ||
| 193 | struct efx_nic *efx ____cacheline_aligned_in_smp; | ||
| 194 | int queue; | ||
| 195 | int used; | ||
| 196 | struct efx_channel *channel; | ||
| 197 | struct efx_nic *nic; | ||
| 198 | struct efx_tx_buffer *buffer; | ||
| 199 | struct efx_special_buffer txd; | ||
| 200 | |||
| 201 | /* Members used mainly on the completion path */ | ||
| 202 | unsigned int read_count ____cacheline_aligned_in_smp; | ||
| 203 | int stopped; | ||
| 204 | |||
| 205 | /* Members used only on the xmit path */ | ||
| 206 | unsigned int insert_count ____cacheline_aligned_in_smp; | ||
| 207 | unsigned int write_count; | ||
| 208 | unsigned int old_read_count; | ||
| 209 | }; | ||
| 210 | |||
| 211 | /** | ||
| 212 | * struct efx_rx_buffer - An Efx RX data buffer | ||
| 213 | * @dma_addr: DMA base address of the buffer | ||
| 214 | * @skb: The associated socket buffer, if any. | ||
| 215 | * If both this and page are %NULL, the buffer slot is currently free. | ||
| 216 | * @page: The associated page buffer, if any. | ||
| 217 | * If both this and skb are %NULL, the buffer slot is currently free. | ||
| 218 | * @data: Pointer to ethernet header | ||
| 219 | * @len: Buffer length, in bytes. | ||
| 220 | * @unmap_addr: DMA address to unmap | ||
| 221 | */ | ||
| 222 | struct efx_rx_buffer { | ||
| 223 | dma_addr_t dma_addr; | ||
| 224 | struct sk_buff *skb; | ||
| 225 | struct page *page; | ||
| 226 | char *data; | ||
| 227 | unsigned int len; | ||
| 228 | dma_addr_t unmap_addr; | ||
| 229 | }; | ||
| 230 | |||
| 231 | /** | ||
| 232 | * struct efx_rx_queue - An Efx RX queue | ||
| 233 | * @efx: The associated Efx NIC | ||
| 234 | * @queue: DMA queue number | ||
| 235 | * @used: Queue is used by net driver | ||
| 236 | * @channel: The associated channel | ||
| 237 | * @buffer: The software buffer ring | ||
| 238 | * @rxd: The hardware descriptor ring | ||
| 239 | * @added_count: Number of buffers added to the receive queue. | ||
| 240 | * @notified_count: Number of buffers given to NIC (<= @added_count). | ||
| 241 | * @removed_count: Number of buffers removed from the receive queue. | ||
| 242 | * @add_lock: Receive queue descriptor add spin lock. | ||
| 243 | * This lock must be held in order to add buffers to the RX | ||
| 244 | * descriptor ring (rxd and buffer) and to update added_count (but | ||
| 245 | * not removed_count). | ||
| 246 | * @max_fill: RX descriptor maximum fill level (<= ring size) | ||
| 247 | * @fast_fill_trigger: RX descriptor fill level that will trigger a fast fill | ||
| 248 | * (<= @max_fill) | ||
| 249 | * @fast_fill_limit: The level to which a fast fill will fill | ||
| 250 | * (@fast_fill_trigger <= @fast_fill_limit <= @max_fill) | ||
| 251 | * @min_fill: RX descriptor minimum non-zero fill level. | ||
| 252 | * This records the minimum fill level observed when a ring | ||
| 253 | * refill was triggered. | ||
| 254 | * @min_overfill: RX descriptor minimum overflow fill level. | ||
| 255 | * This records the minimum fill level at which RX queue | ||
| 256 | * overflow was observed. It should never be set. | ||
| 257 | * @alloc_page_count: RX allocation strategy counter. | ||
| 258 | * @alloc_skb_count: RX allocation strategy counter. | ||
| 259 | * @work: Descriptor push work thread | ||
| 260 | * @buf_page: Page for next RX buffer. | ||
| 261 | * We can use a single page for multiple RX buffers. This tracks | ||
| 262 | * the remaining space in the allocation. | ||
| 263 | * @buf_dma_addr: Page's DMA address. | ||
| 264 | * @buf_data: Page's host address. | ||
| 265 | */ | ||
| 266 | struct efx_rx_queue { | ||
| 267 | struct efx_nic *efx; | ||
| 268 | int queue; | ||
| 269 | int used; | ||
| 270 | struct efx_channel *channel; | ||
| 271 | struct efx_rx_buffer *buffer; | ||
| 272 | struct efx_special_buffer rxd; | ||
| 273 | |||
| 274 | int added_count; | ||
| 275 | int notified_count; | ||
| 276 | int removed_count; | ||
| 277 | spinlock_t add_lock; | ||
| 278 | unsigned int max_fill; | ||
| 279 | unsigned int fast_fill_trigger; | ||
| 280 | unsigned int fast_fill_limit; | ||
| 281 | unsigned int min_fill; | ||
| 282 | unsigned int min_overfill; | ||
| 283 | unsigned int alloc_page_count; | ||
| 284 | unsigned int alloc_skb_count; | ||
| 285 | struct delayed_work work; | ||
| 286 | unsigned int slow_fill_count; | ||
| 287 | |||
| 288 | struct page *buf_page; | ||
| 289 | dma_addr_t buf_dma_addr; | ||
| 290 | char *buf_data; | ||
| 291 | }; | ||
| 292 | |||
| 293 | /** | ||
| 294 | * struct efx_buffer - An Efx general-purpose buffer | ||
| 295 | * @addr: host base address of the buffer | ||
| 296 | * @dma_addr: DMA base address of the buffer | ||
| 297 | * @len: Buffer length, in bytes | ||
| 298 | * | ||
| 299 | * Falcon uses these buffers for its interrupt status registers and | ||
| 300 | * MAC stats dumps. | ||
| 301 | */ | ||
| 302 | struct efx_buffer { | ||
| 303 | void *addr; | ||
| 304 | dma_addr_t dma_addr; | ||
| 305 | unsigned int len; | ||
| 306 | }; | ||
| 307 | |||
| 308 | |||
| 309 | /* Flags for channel->used_flags */ | ||
| 310 | #define EFX_USED_BY_RX 1 | ||
| 311 | #define EFX_USED_BY_TX 2 | ||
| 312 | #define EFX_USED_BY_RX_TX (EFX_USED_BY_RX | EFX_USED_BY_TX) | ||
| 313 | |||
| 314 | enum efx_rx_alloc_method { | ||
| 315 | RX_ALLOC_METHOD_AUTO = 0, | ||
| 316 | RX_ALLOC_METHOD_SKB = 1, | ||
| 317 | RX_ALLOC_METHOD_PAGE = 2, | ||
| 318 | }; | ||
| 319 | |||
| 320 | /** | ||
| 321 | * struct efx_channel - An Efx channel | ||
| 322 | * | ||
| 323 | * A channel comprises an event queue, at least one TX queue, at least | ||
| 324 | * one RX queue, and an associated tasklet for processing the event | ||
| 325 | * queue. | ||
| 326 | * | ||
| 327 | * @efx: Associated Efx NIC | ||
| 328 | * @evqnum: Event queue number | ||
| 329 | * @channel: Channel instance number | ||
| 330 | * @used_flags: Channel is used by net driver | ||
| 331 | * @enabled: Channel enabled indicator | ||
| 332 | * @irq: IRQ number (MSI and MSI-X only) | ||
| 333 | * @has_interrupt: Channel has an interrupt | ||
| 334 | * @irq_moderation: IRQ moderation value (in us) | ||
| 335 | * @napi_dev: Net device used with NAPI | ||
| 336 | * @napi_str: NAPI control structure | ||
| 337 | * @reset_work: Scheduled reset work thread | ||
| 338 | * @work_pending: Is work pending via NAPI? | ||
| 339 | * @eventq: Event queue buffer | ||
| 340 | * @eventq_read_ptr: Event queue read pointer | ||
| 341 | * @last_eventq_read_ptr: Last event queue read pointer value. | ||
| 342 | * @eventq_magic: Event queue magic value for driver-generated test events | ||
| 343 | * @lro_mgr: LRO state | ||
| 344 | * @rx_alloc_level: Watermark based heuristic counter for pushing descriptors | ||
| 345 | * and diagnostic counters | ||
| 346 | * @rx_alloc_push_pages: RX allocation method currently in use for pushing | ||
| 347 | * descriptors | ||
| 348 | * @rx_alloc_pop_pages: RX allocation method currently in use for popping | ||
| 349 | * descriptors | ||
| 350 | * @n_rx_tobe_disc: Count of RX_TOBE_DISC errors | ||
| 351 | * @n_rx_ip_frag_err: Count of RX IP fragment errors | ||
| 352 | * @n_rx_ip_hdr_chksum_err: Count of RX IP header checksum errors | ||
| 353 | * @n_rx_tcp_udp_chksum_err: Count of RX TCP and UDP checksum errors | ||
| 354 | * @n_rx_frm_trunc: Count of RX_FRM_TRUNC errors | ||
| 355 | * @n_rx_overlength: Count of RX_OVERLENGTH errors | ||
| 356 | * @n_skbuff_leaks: Count of skbuffs leaked due to RX overrun | ||
| 357 | */ | ||
| 358 | struct efx_channel { | ||
| 359 | struct efx_nic *efx; | ||
| 360 | int evqnum; | ||
| 361 | int channel; | ||
| 362 | int used_flags; | ||
| 363 | int enabled; | ||
| 364 | int irq; | ||
| 365 | unsigned int has_interrupt; | ||
| 366 | unsigned int irq_moderation; | ||
| 367 | struct net_device *napi_dev; | ||
| 368 | struct napi_struct napi_str; | ||
| 369 | struct work_struct reset_work; | ||
| 370 | int work_pending; | ||
| 371 | struct efx_special_buffer eventq; | ||
| 372 | unsigned int eventq_read_ptr; | ||
| 373 | unsigned int last_eventq_read_ptr; | ||
| 374 | unsigned int eventq_magic; | ||
| 375 | |||
| 376 | struct net_lro_mgr lro_mgr; | ||
| 377 | int rx_alloc_level; | ||
| 378 | int rx_alloc_push_pages; | ||
| 379 | int rx_alloc_pop_pages; | ||
| 380 | |||
| 381 | unsigned n_rx_tobe_disc; | ||
| 382 | unsigned n_rx_ip_frag_err; | ||
| 383 | unsigned n_rx_ip_hdr_chksum_err; | ||
| 384 | unsigned n_rx_tcp_udp_chksum_err; | ||
| 385 | unsigned n_rx_frm_trunc; | ||
| 386 | unsigned n_rx_overlength; | ||
| 387 | unsigned n_skbuff_leaks; | ||
| 388 | |||
| 389 | /* Used to pipeline received packets in order to optimise memory | ||
| 390 | * access with prefetches. | ||
| 391 | */ | ||
| 392 | struct efx_rx_buffer *rx_pkt; | ||
| 393 | int rx_pkt_csummed; | ||
| 394 | |||
| 395 | }; | ||
| 396 | |||
| 397 | /** | ||
| 398 | * struct efx_blinker - S/W LED blinking context | ||
| 399 | * @led_num: LED ID (board-specific meaning) | ||
| 400 | * @state: Current state - on or off | ||
| 401 | * @resubmit: Timer resubmission flag | ||
| 402 | * @timer: Control timer for blinking | ||
| 403 | */ | ||
| 404 | struct efx_blinker { | ||
| 405 | int led_num; | ||
| 406 | int state; | ||
| 407 | int resubmit; | ||
| 408 | struct timer_list timer; | ||
| 409 | }; | ||
| 410 | |||
| 411 | |||
| 412 | /** | ||
| 413 | * struct efx_board - board information | ||
| 414 | * @type: Board model type | ||
| 415 | * @major: Major rev. ('A', 'B' ...) | ||
| 416 | * @minor: Minor rev. (0, 1, ...) | ||
| 417 | * @init: Initialisation function | ||
| 418 | * @init_leds: Sets up board LEDs | ||
| 419 | * @set_fault_led: Turns the fault LED on or off | ||
| 420 | * @blink: Starts/stops blinking | ||
| 421 | * @blinker: used to blink LEDs in software | ||
| 422 | */ | ||
| 423 | struct efx_board { | ||
| 424 | int type; | ||
| 425 | int major; | ||
| 426 | int minor; | ||
| 427 | int (*init) (struct efx_nic *nic); | ||
| 428 | /* As the LEDs are typically attached to the PHY, LEDs | ||
| 429 | * have a separate init callback that happens later than | ||
| 430 | * board init. */ | ||
| 431 | int (*init_leds)(struct efx_nic *efx); | ||
| 432 | void (*set_fault_led) (struct efx_nic *efx, int state); | ||
| 433 | void (*blink) (struct efx_nic *efx, int start); | ||
| 434 | struct efx_blinker blinker; | ||
| 435 | }; | ||
| 436 | |||
| 437 | enum efx_int_mode { | ||
| 438 | /* Be careful if altering to correct macro below */ | ||
| 439 | EFX_INT_MODE_MSIX = 0, | ||
| 440 | EFX_INT_MODE_MSI = 1, | ||
| 441 | EFX_INT_MODE_LEGACY = 2, | ||
| 442 | EFX_INT_MODE_MAX /* Insert any new items before this */ | ||
| 443 | }; | ||
| 444 | #define EFX_INT_MODE_USE_MSI(x) (((x)->interrupt_mode) <= EFX_INT_MODE_MSI) | ||
| 445 | |||
| 446 | enum phy_type { | ||
| 447 | PHY_TYPE_NONE = 0, | ||
| 448 | PHY_TYPE_CX4_RTMR = 1, | ||
| 449 | PHY_TYPE_1G_ALASKA = 2, | ||
| 450 | PHY_TYPE_10XPRESS = 3, | ||
| 451 | PHY_TYPE_XFP = 4, | ||
| 452 | PHY_TYPE_PM8358 = 6, | ||
| 453 | PHY_TYPE_MAX /* Insert any new items before this */ | ||
| 454 | }; | ||
| 455 | |||
| 456 | #define PHY_ADDR_INVALID 0xff | ||
| 457 | |||
| 458 | enum nic_state { | ||
| 459 | STATE_INIT = 0, | ||
| 460 | STATE_RUNNING = 1, | ||
| 461 | STATE_FINI = 2, | ||
| 462 | STATE_RESETTING = 3, /* rtnl_lock always held */ | ||
| 463 | STATE_DISABLED = 4, | ||
| 464 | STATE_MAX, | ||
| 465 | }; | ||
| 466 | |||
| 467 | /* | ||
| 468 | * Alignment of page-allocated RX buffers | ||
| 469 | * | ||
| 470 | * Controls the number of bytes inserted at the start of an RX buffer. | ||
| 471 | * This is the equivalent of NET_IP_ALIGN [which controls the alignment | ||
| 472 | * of the skb->head for hardware DMA]. | ||
| 473 | */ | ||
| 474 | #if defined(__i386__) || defined(__x86_64__) | ||
| 475 | #define EFX_PAGE_IP_ALIGN 0 | ||
| 476 | #else | ||
| 477 | #define EFX_PAGE_IP_ALIGN NET_IP_ALIGN | ||
| 478 | #endif | ||
| 479 | |||
| 480 | /* | ||
| 481 | * Alignment of the skb->head which wraps a page-allocated RX buffer | ||
| 482 | * | ||
| 483 | * The skb allocated to wrap an rx_buffer can have this alignment. Since | ||
| 484 | * the data is memcpy'd from the rx_buf, it does not need to be equal to | ||
| 485 | * EFX_PAGE_IP_ALIGN. | ||
| 486 | */ | ||
| 487 | #define EFX_PAGE_SKB_ALIGN 2 | ||
| 488 | |||
| 489 | /* Forward declaration */ | ||
| 490 | struct efx_nic; | ||
| 491 | |||
| 492 | /* Pseudo bit-mask flow control field */ | ||
| 493 | enum efx_fc_type { | ||
| 494 | EFX_FC_RX = 1, | ||
| 495 | EFX_FC_TX = 2, | ||
| 496 | EFX_FC_AUTO = 4, | ||
| 497 | }; | ||
| 498 | |||
| 499 | /** | ||
| 500 | * struct efx_phy_operations - Efx PHY operations table | ||
| 501 | * @init: Initialise PHY | ||
| 502 | * @fini: Shut down PHY | ||
| 503 | * @reconfigure: Reconfigure PHY (e.g. for new link parameters) | ||
| 504 | * @clear_interrupt: Clear down interrupt | ||
| 505 | * @blink: Blink LEDs | ||
| 506 | * @check_hw: Check hardware | ||
| 507 | * @reset_xaui: Reset XAUI side of PHY for (software sequenced reset) | ||
| 508 | * @mmds: MMD presence mask | ||
| 509 | */ | ||
| 510 | struct efx_phy_operations { | ||
| 511 | int (*init) (struct efx_nic *efx); | ||
| 512 | void (*fini) (struct efx_nic *efx); | ||
| 513 | void (*reconfigure) (struct efx_nic *efx); | ||
| 514 | void (*clear_interrupt) (struct efx_nic *efx); | ||
| 515 | int (*check_hw) (struct efx_nic *efx); | ||
| 516 | void (*reset_xaui) (struct efx_nic *efx); | ||
| 517 | int mmds; | ||
| 518 | }; | ||
| 519 | |||
| 520 | /* | ||
| 521 | * Efx extended statistics | ||
| 522 | * | ||
| 523 | * Not all statistics are provided by all supported MACs. The purpose | ||
| 524 | * is this structure is to contain the raw statistics provided by each | ||
| 525 | * MAC. | ||
| 526 | */ | ||
| 527 | struct efx_mac_stats { | ||
| 528 | u64 tx_bytes; | ||
| 529 | u64 tx_good_bytes; | ||
| 530 | u64 tx_bad_bytes; | ||
| 531 | unsigned long tx_packets; | ||
| 532 | unsigned long tx_bad; | ||
| 533 | unsigned long tx_pause; | ||
| 534 | unsigned long tx_control; | ||
| 535 | unsigned long tx_unicast; | ||
| 536 | unsigned long tx_multicast; | ||
| 537 | unsigned long tx_broadcast; | ||
| 538 | unsigned long tx_lt64; | ||
| 539 | unsigned long tx_64; | ||
| 540 | unsigned long tx_65_to_127; | ||
| 541 | unsigned long tx_128_to_255; | ||
| 542 | unsigned long tx_256_to_511; | ||
| 543 | unsigned long tx_512_to_1023; | ||
| 544 | unsigned long tx_1024_to_15xx; | ||
| 545 | unsigned long tx_15xx_to_jumbo; | ||
| 546 | unsigned long tx_gtjumbo; | ||
| 547 | unsigned long tx_collision; | ||
| 548 | unsigned long tx_single_collision; | ||
| 549 | unsigned long tx_multiple_collision; | ||
| 550 | unsigned long tx_excessive_collision; | ||
| 551 | unsigned long tx_deferred; | ||
| 552 | unsigned long tx_late_collision; | ||
| 553 | unsigned long tx_excessive_deferred; | ||
| 554 | unsigned long tx_non_tcpudp; | ||
| 555 | unsigned long tx_mac_src_error; | ||
| 556 | unsigned long tx_ip_src_error; | ||
| 557 | u64 rx_bytes; | ||
| 558 | u64 rx_good_bytes; | ||
| 559 | u64 rx_bad_bytes; | ||
| 560 | unsigned long rx_packets; | ||
| 561 | unsigned long rx_good; | ||
| 562 | unsigned long rx_bad; | ||
| 563 | unsigned long rx_pause; | ||
| 564 | unsigned long rx_control; | ||
| 565 | unsigned long rx_unicast; | ||
| 566 | unsigned long rx_multicast; | ||
| 567 | unsigned long rx_broadcast; | ||
| 568 | unsigned long rx_lt64; | ||
| 569 | unsigned long rx_64; | ||
| 570 | unsigned long rx_65_to_127; | ||
| 571 | unsigned long rx_128_to_255; | ||
| 572 | unsigned long rx_256_to_511; | ||
| 573 | unsigned long rx_512_to_1023; | ||
| 574 | unsigned long rx_1024_to_15xx; | ||
| 575 | unsigned long rx_15xx_to_jumbo; | ||
| 576 | unsigned long rx_gtjumbo; | ||
| 577 | unsigned long rx_bad_lt64; | ||
| 578 | unsigned long rx_bad_64_to_15xx; | ||
| 579 | unsigned long rx_bad_15xx_to_jumbo; | ||
| 580 | unsigned long rx_bad_gtjumbo; | ||
| 581 | unsigned long rx_overflow; | ||
| 582 | unsigned long rx_missed; | ||
| 583 | unsigned long rx_false_carrier; | ||
| 584 | unsigned long rx_symbol_error; | ||
| 585 | unsigned long rx_align_error; | ||
| 586 | unsigned long rx_length_error; | ||
| 587 | unsigned long rx_internal_error; | ||
| 588 | unsigned long rx_good_lt64; | ||
| 589 | }; | ||
| 590 | |||
| 591 | /* Number of bits used in a multicast filter hash address */ | ||
| 592 | #define EFX_MCAST_HASH_BITS 8 | ||
| 593 | |||
| 594 | /* Number of (single-bit) entries in a multicast filter hash */ | ||
| 595 | #define EFX_MCAST_HASH_ENTRIES (1 << EFX_MCAST_HASH_BITS) | ||
| 596 | |||
| 597 | /* An Efx multicast filter hash */ | ||
| 598 | union efx_multicast_hash { | ||
| 599 | u8 byte[EFX_MCAST_HASH_ENTRIES / 8]; | ||
| 600 | efx_oword_t oword[EFX_MCAST_HASH_ENTRIES / sizeof(efx_oword_t) / 8]; | ||
| 601 | }; | ||
| 602 | |||
| 603 | /** | ||
| 604 | * struct efx_nic - an Efx NIC | ||
| 605 | * @name: Device name (net device name or bus id before net device registered) | ||
| 606 | * @pci_dev: The PCI device | ||
| 607 | * @type: Controller type attributes | ||
| 608 | * @legacy_irq: IRQ number | ||
| 609 | * @workqueue: Workqueue for resets, port reconfigures and the HW monitor | ||
| 610 | * @reset_work: Scheduled reset workitem | ||
| 611 | * @monitor_work: Hardware monitor workitem | ||
| 612 | * @membase_phys: Memory BAR value as physical address | ||
| 613 | * @membase: Memory BAR value | ||
| 614 | * @biu_lock: BIU (bus interface unit) lock | ||
| 615 | * @interrupt_mode: Interrupt mode | ||
| 616 | * @i2c: I2C interface | ||
| 617 | * @board_info: Board-level information | ||
| 618 | * @state: Device state flag. Serialised by the rtnl_lock. | ||
| 619 | * @reset_pending: Pending reset method (normally RESET_TYPE_NONE) | ||
| 620 | * @tx_queue: TX DMA queues | ||
| 621 | * @rx_queue: RX DMA queues | ||
| 622 | * @channel: Channels | ||
| 623 | * @rss_queues: Number of RSS queues | ||
| 624 | * @rx_buffer_len: RX buffer length | ||
| 625 | * @rx_buffer_order: Order (log2) of number of pages for each RX buffer | ||
| 626 | * @irq_status: Interrupt status buffer | ||
| 627 | * @last_irq_cpu: Last CPU to handle interrupt. | ||
| 628 | * This register is written with the SMP processor ID whenever an | ||
| 629 | * interrupt is handled. It is used by falcon_test_interrupt() | ||
| 630 | * to verify that an interrupt has occurred. | ||
| 631 | * @n_rx_nodesc_drop_cnt: RX no descriptor drop count | ||
| 632 | * @nic_data: Hardware dependant state | ||
| 633 | * @mac_lock: MAC access lock. Protects @port_enabled, efx_monitor() and | ||
| 634 | * efx_reconfigure_port() | ||
| 635 | * @port_enabled: Port enabled indicator. | ||
| 636 | * Serialises efx_stop_all(), efx_start_all() and efx_monitor() and | ||
| 637 | * efx_reconfigure_work with kernel interfaces. Safe to read under any | ||
| 638 | * one of the rtnl_lock, mac_lock, or netif_tx_lock, but all three must | ||
| 639 | * be held to modify it. | ||
| 640 | * @port_initialized: Port initialized? | ||
| 641 | * @net_dev: Operating system network device. Consider holding the rtnl lock | ||
| 642 | * @rx_checksum_enabled: RX checksumming enabled | ||
| 643 | * @netif_stop_count: Port stop count | ||
| 644 | * @netif_stop_lock: Port stop lock | ||
| 645 | * @mac_stats: MAC statistics. These include all statistics the MACs | ||
| 646 | * can provide. Generic code converts these into a standard | ||
| 647 | * &struct net_device_stats. | ||
| 648 | * @stats_buffer: DMA buffer for statistics | ||
| 649 | * @stats_lock: Statistics update lock | ||
| 650 | * @mac_address: Permanent MAC address | ||
| 651 | * @phy_type: PHY type | ||
| 652 | * @phy_lock: PHY access lock | ||
| 653 | * @phy_op: PHY interface | ||
| 654 | * @phy_data: PHY private data (including PHY-specific stats) | ||
| 655 | * @mii: PHY interface | ||
| 656 | * @phy_powered: PHY power state | ||
| 657 | * @tx_disabled: PHY transmitter turned off | ||
| 658 | * @link_up: Link status | ||
| 659 | * @link_options: Link options (MII/GMII format) | ||
| 660 | * @n_link_state_changes: Number of times the link has changed state | ||
| 661 | * @promiscuous: Promiscuous flag. Protected by netif_tx_lock. | ||
| 662 | * @multicast_hash: Multicast hash table | ||
| 663 | * @flow_control: Flow control flags - separate RX/TX so can't use link_options | ||
| 664 | * @reconfigure_work: work item for dealing with PHY events | ||
| 665 | * | ||
| 666 | * The @priv field of the corresponding &struct net_device points to | ||
| 667 | * this. | ||
| 668 | */ | ||
| 669 | struct efx_nic { | ||
| 670 | char name[IFNAMSIZ]; | ||
| 671 | struct pci_dev *pci_dev; | ||
| 672 | const struct efx_nic_type *type; | ||
| 673 | int legacy_irq; | ||
| 674 | struct workqueue_struct *workqueue; | ||
| 675 | struct work_struct reset_work; | ||
| 676 | struct delayed_work monitor_work; | ||
| 677 | unsigned long membase_phys; | ||
| 678 | void __iomem *membase; | ||
| 679 | spinlock_t biu_lock; | ||
| 680 | enum efx_int_mode interrupt_mode; | ||
| 681 | |||
| 682 | struct efx_i2c_interface i2c; | ||
| 683 | struct efx_board board_info; | ||
| 684 | |||
| 685 | enum nic_state state; | ||
| 686 | enum reset_type reset_pending; | ||
| 687 | |||
| 688 | struct efx_tx_queue tx_queue[EFX_MAX_TX_QUEUES]; | ||
| 689 | struct efx_rx_queue rx_queue[EFX_MAX_RX_QUEUES]; | ||
| 690 | struct efx_channel channel[EFX_MAX_CHANNELS]; | ||
| 691 | |||
| 692 | int rss_queues; | ||
| 693 | unsigned int rx_buffer_len; | ||
| 694 | unsigned int rx_buffer_order; | ||
| 695 | |||
| 696 | struct efx_buffer irq_status; | ||
| 697 | volatile signed int last_irq_cpu; | ||
| 698 | |||
| 699 | unsigned n_rx_nodesc_drop_cnt; | ||
| 700 | |||
| 701 | void *nic_data; | ||
| 702 | |||
| 703 | struct mutex mac_lock; | ||
| 704 | int port_enabled; | ||
| 705 | |||
| 706 | int port_initialized; | ||
| 707 | struct net_device *net_dev; | ||
| 708 | int rx_checksum_enabled; | ||
| 709 | |||
| 710 | atomic_t netif_stop_count; | ||
| 711 | spinlock_t netif_stop_lock; | ||
| 712 | |||
| 713 | struct efx_mac_stats mac_stats; | ||
| 714 | struct efx_buffer stats_buffer; | ||
| 715 | spinlock_t stats_lock; | ||
| 716 | |||
| 717 | unsigned char mac_address[ETH_ALEN]; | ||
| 718 | |||
| 719 | enum phy_type phy_type; | ||
| 720 | spinlock_t phy_lock; | ||
| 721 | struct efx_phy_operations *phy_op; | ||
| 722 | void *phy_data; | ||
| 723 | struct mii_if_info mii; | ||
| 724 | |||
| 725 | int link_up; | ||
| 726 | unsigned int link_options; | ||
| 727 | unsigned int n_link_state_changes; | ||
| 728 | |||
| 729 | int promiscuous; | ||
| 730 | union efx_multicast_hash multicast_hash; | ||
| 731 | enum efx_fc_type flow_control; | ||
| 732 | struct work_struct reconfigure_work; | ||
| 733 | |||
| 734 | atomic_t rx_reset; | ||
| 735 | }; | ||
| 736 | |||
| 737 | /** | ||
| 738 | * struct efx_nic_type - Efx device type definition | ||
| 739 | * @mem_bar: Memory BAR number | ||
| 740 | * @mem_map_size: Memory BAR mapped size | ||
| 741 | * @txd_ptr_tbl_base: TX descriptor ring base address | ||
| 742 | * @rxd_ptr_tbl_base: RX descriptor ring base address | ||
| 743 | * @buf_tbl_base: Buffer table base address | ||
| 744 | * @evq_ptr_tbl_base: Event queue pointer table base address | ||
| 745 | * @evq_rptr_tbl_base: Event queue read-pointer table base address | ||
| 746 | * @txd_ring_mask: TX descriptor ring size - 1 (must be a power of two - 1) | ||
| 747 | * @rxd_ring_mask: RX descriptor ring size - 1 (must be a power of two - 1) | ||
| 748 | * @evq_size: Event queue size (must be a power of two) | ||
| 749 | * @max_dma_mask: Maximum possible DMA mask | ||
| 750 | * @tx_dma_mask: TX DMA mask | ||
| 751 | * @bug5391_mask: Address mask for bug 5391 workaround | ||
| 752 | * @rx_xoff_thresh: RX FIFO XOFF watermark (bytes) | ||
| 753 | * @rx_xon_thresh: RX FIFO XON watermark (bytes) | ||
| 754 | * @rx_buffer_padding: Padding added to each RX buffer | ||
| 755 | * @max_interrupt_mode: Highest capability interrupt mode supported | ||
| 756 | * from &enum efx_init_mode. | ||
| 757 | * @phys_addr_channels: Number of channels with physically addressed | ||
| 758 | * descriptors | ||
| 759 | */ | ||
| 760 | struct efx_nic_type { | ||
| 761 | unsigned int mem_bar; | ||
| 762 | unsigned int mem_map_size; | ||
| 763 | unsigned int txd_ptr_tbl_base; | ||
| 764 | unsigned int rxd_ptr_tbl_base; | ||
| 765 | unsigned int buf_tbl_base; | ||
| 766 | unsigned int evq_ptr_tbl_base; | ||
| 767 | unsigned int evq_rptr_tbl_base; | ||
| 768 | |||
| 769 | unsigned int txd_ring_mask; | ||
| 770 | unsigned int rxd_ring_mask; | ||
| 771 | unsigned int evq_size; | ||
| 772 | dma_addr_t max_dma_mask; | ||
| 773 | unsigned int tx_dma_mask; | ||
| 774 | unsigned bug5391_mask; | ||
| 775 | |||
| 776 | int rx_xoff_thresh; | ||
| 777 | int rx_xon_thresh; | ||
| 778 | unsigned int rx_buffer_padding; | ||
| 779 | unsigned int max_interrupt_mode; | ||
| 780 | unsigned int phys_addr_channels; | ||
| 781 | }; | ||
| 782 | |||
| 783 | /************************************************************************** | ||
| 784 | * | ||
| 785 | * Prototypes and inline functions | ||
| 786 | * | ||
| 787 | *************************************************************************/ | ||
| 788 | |||
| 789 | /* Iterate over all used channels */ | ||
| 790 | #define efx_for_each_channel(_channel, _efx) \ | ||
| 791 | for (_channel = &_efx->channel[0]; \ | ||
| 792 | _channel < &_efx->channel[EFX_MAX_CHANNELS]; \ | ||
| 793 | _channel++) \ | ||
| 794 | if (!_channel->used_flags) \ | ||
| 795 | continue; \ | ||
| 796 | else | ||
| 797 | |||
| 798 | /* Iterate over all used channels with interrupts */ | ||
| 799 | #define efx_for_each_channel_with_interrupt(_channel, _efx) \ | ||
| 800 | for (_channel = &_efx->channel[0]; \ | ||
| 801 | _channel < &_efx->channel[EFX_MAX_CHANNELS]; \ | ||
| 802 | _channel++) \ | ||
| 803 | if (!(_channel->used_flags && _channel->has_interrupt)) \ | ||
| 804 | continue; \ | ||
| 805 | else | ||
| 806 | |||
| 807 | /* Iterate over all used TX queues */ | ||
| 808 | #define efx_for_each_tx_queue(_tx_queue, _efx) \ | ||
| 809 | for (_tx_queue = &_efx->tx_queue[0]; \ | ||
| 810 | _tx_queue < &_efx->tx_queue[EFX_MAX_TX_QUEUES]; \ | ||
| 811 | _tx_queue++) \ | ||
| 812 | if (!_tx_queue->used) \ | ||
| 813 | continue; \ | ||
| 814 | else | ||
| 815 | |||
| 816 | /* Iterate over all TX queues belonging to a channel */ | ||
| 817 | #define efx_for_each_channel_tx_queue(_tx_queue, _channel) \ | ||
| 818 | for (_tx_queue = &_channel->efx->tx_queue[0]; \ | ||
| 819 | _tx_queue < &_channel->efx->tx_queue[EFX_MAX_TX_QUEUES]; \ | ||
| 820 | _tx_queue++) \ | ||
| 821 | if ((!_tx_queue->used) || \ | ||
| 822 | (_tx_queue->channel != _channel)) \ | ||
| 823 | continue; \ | ||
| 824 | else | ||
| 825 | |||
| 826 | /* Iterate over all used RX queues */ | ||
| 827 | #define efx_for_each_rx_queue(_rx_queue, _efx) \ | ||
| 828 | for (_rx_queue = &_efx->rx_queue[0]; \ | ||
| 829 | _rx_queue < &_efx->rx_queue[EFX_MAX_RX_QUEUES]; \ | ||
| 830 | _rx_queue++) \ | ||
| 831 | if (!_rx_queue->used) \ | ||
| 832 | continue; \ | ||
| 833 | else | ||
| 834 | |||
| 835 | /* Iterate over all RX queues belonging to a channel */ | ||
| 836 | #define efx_for_each_channel_rx_queue(_rx_queue, _channel) \ | ||
| 837 | for (_rx_queue = &_channel->efx->rx_queue[0]; \ | ||
| 838 | _rx_queue < &_channel->efx->rx_queue[EFX_MAX_RX_QUEUES]; \ | ||
| 839 | _rx_queue++) \ | ||
| 840 | if ((!_rx_queue->used) || \ | ||
| 841 | (_rx_queue->channel != _channel)) \ | ||
| 842 | continue; \ | ||
| 843 | else | ||
| 844 | |||
| 845 | /* Returns a pointer to the specified receive buffer in the RX | ||
| 846 | * descriptor queue. | ||
| 847 | */ | ||
| 848 | static inline struct efx_rx_buffer *efx_rx_buffer(struct efx_rx_queue *rx_queue, | ||
| 849 | unsigned int index) | ||
| 850 | { | ||
| 851 | return (&rx_queue->buffer[index]); | ||
| 852 | } | ||
| 853 | |||
| 854 | /* Set bit in a little-endian bitfield */ | ||
| 855 | static inline void set_bit_le(int nr, unsigned char *addr) | ||
| 856 | { | ||
| 857 | addr[nr / 8] |= (1 << (nr % 8)); | ||
| 858 | } | ||
| 859 | |||
| 860 | /* Clear bit in a little-endian bitfield */ | ||
| 861 | static inline void clear_bit_le(int nr, unsigned char *addr) | ||
| 862 | { | ||
| 863 | addr[nr / 8] &= ~(1 << (nr % 8)); | ||
| 864 | } | ||
| 865 | |||
| 866 | |||
| 867 | /** | ||
| 868 | * EFX_MAX_FRAME_LEN - calculate maximum frame length | ||
| 869 | * | ||
| 870 | * This calculates the maximum frame length that will be used for a | ||
| 871 | * given MTU. The frame length will be equal to the MTU plus a | ||
| 872 | * constant amount of header space and padding. This is the quantity | ||
| 873 | * that the net driver will program into the MAC as the maximum frame | ||
| 874 | * length. | ||
| 875 | * | ||
| 876 | * The 10G MAC used in Falcon requires 8-byte alignment on the frame | ||
| 877 | * length, so we round up to the nearest 8. | ||
| 878 | */ | ||
| 879 | #define EFX_MAX_FRAME_LEN(mtu) \ | ||
| 880 | ((((mtu) + ETH_HLEN + VLAN_HLEN + 4/* FCS */) + 7) & ~7) | ||
| 881 | |||
| 882 | |||
| 883 | #endif /* EFX_NET_DRIVER_H */ | ||
diff --git a/drivers/net/sfc/phy.h b/drivers/net/sfc/phy.h new file mode 100644 index 000000000000..9d02c84e6b2d --- /dev/null +++ b/drivers/net/sfc/phy.h | |||
| @@ -0,0 +1,48 @@ | |||
| 1 | /**************************************************************************** | ||
| 2 | * Driver for Solarflare Solarstorm network controllers and boards | ||
| 3 | * Copyright 2007 Solarflare Communications Inc. | ||
| 4 | * | ||
| 5 | * This program is free software; you can redistribute it and/or modify it | ||
| 6 | * under the terms of the GNU General Public License version 2 as published | ||
| 7 | * by the Free Software Foundation, incorporated herein by reference. | ||
| 8 | */ | ||
| 9 | |||
| 10 | #ifndef EFX_PHY_H | ||
| 11 | #define EFX_PHY_H | ||
| 12 | |||
| 13 | /**************************************************************************** | ||
| 14 | * 10Xpress (SFX7101) PHY | ||
| 15 | */ | ||
| 16 | extern struct efx_phy_operations falcon_tenxpress_phy_ops; | ||
| 17 | |||
| 18 | enum tenxpress_state { | ||
| 19 | TENXPRESS_STATUS_OFF = 0, | ||
| 20 | TENXPRESS_STATUS_OTEMP = 1, | ||
| 21 | TENXPRESS_STATUS_NORMAL = 2, | ||
| 22 | }; | ||
| 23 | |||
| 24 | extern void tenxpress_set_state(struct efx_nic *efx, | ||
| 25 | enum tenxpress_state state); | ||
| 26 | extern void tenxpress_phy_blink(struct efx_nic *efx, int blink); | ||
| 27 | extern void tenxpress_crc_err(struct efx_nic *efx); | ||
| 28 | |||
| 29 | /**************************************************************************** | ||
| 30 | * Exported functions from the driver for XFP optical PHYs | ||
| 31 | */ | ||
| 32 | extern struct efx_phy_operations falcon_xfp_phy_ops; | ||
| 33 | |||
| 34 | /* The QUAKE XFP PHY provides various H/W control states for LEDs */ | ||
| 35 | #define QUAKE_LED_LINK_INVAL (0) | ||
| 36 | #define QUAKE_LED_LINK_STAT (1) | ||
| 37 | #define QUAKE_LED_LINK_ACT (2) | ||
| 38 | #define QUAKE_LED_LINK_ACTSTAT (3) | ||
| 39 | #define QUAKE_LED_OFF (4) | ||
| 40 | #define QUAKE_LED_ON (5) | ||
| 41 | #define QUAKE_LED_LINK_INPUT (6) /* Pin is an input. */ | ||
| 42 | /* What link the LED tracks */ | ||
| 43 | #define QUAKE_LED_TXLINK (0) | ||
| 44 | #define QUAKE_LED_RXLINK (8) | ||
| 45 | |||
| 46 | extern void xfp_set_led(struct efx_nic *p, int led, int state); | ||
| 47 | |||
| 48 | #endif | ||
diff --git a/drivers/net/sfc/rx.c b/drivers/net/sfc/rx.c new file mode 100644 index 000000000000..551299b462ae --- /dev/null +++ b/drivers/net/sfc/rx.c | |||
| @@ -0,0 +1,875 @@ | |||
| 1 | /**************************************************************************** | ||
| 2 | * Driver for Solarflare Solarstorm network controllers and boards | ||
| 3 | * Copyright 2005-2006 Fen Systems Ltd. | ||
| 4 | * Copyright 2005-2008 Solarflare Communications Inc. | ||
| 5 | * | ||
| 6 | * This program is free software; you can redistribute it and/or modify it | ||
| 7 | * under the terms of the GNU General Public License version 2 as published | ||
| 8 | * by the Free Software Foundation, incorporated herein by reference. | ||
| 9 | */ | ||
| 10 | |||
| 11 | #include <linux/socket.h> | ||
| 12 | #include <linux/in.h> | ||
| 13 | #include <linux/ip.h> | ||
| 14 | #include <linux/tcp.h> | ||
| 15 | #include <linux/udp.h> | ||
| 16 | #include <net/ip.h> | ||
| 17 | #include <net/checksum.h> | ||
| 18 | #include "net_driver.h" | ||
| 19 | #include "rx.h" | ||
| 20 | #include "efx.h" | ||
| 21 | #include "falcon.h" | ||
| 22 | #include "workarounds.h" | ||
| 23 | |||
| 24 | /* Number of RX descriptors pushed at once. */ | ||
| 25 | #define EFX_RX_BATCH 8 | ||
| 26 | |||
| 27 | /* Size of buffer allocated for skb header area. */ | ||
| 28 | #define EFX_SKB_HEADERS 64u | ||
| 29 | |||
| 30 | /* | ||
| 31 | * rx_alloc_method - RX buffer allocation method | ||
| 32 | * | ||
| 33 | * This driver supports two methods for allocating and using RX buffers: | ||
| 34 | * each RX buffer may be backed by an skb or by an order-n page. | ||
| 35 | * | ||
| 36 | * When LRO is in use then the second method has a lower overhead, | ||
| 37 | * since we don't have to allocate then free skbs on reassembled frames. | ||
| 38 | * | ||
| 39 | * Values: | ||
| 40 | * - RX_ALLOC_METHOD_AUTO = 0 | ||
| 41 | * - RX_ALLOC_METHOD_SKB = 1 | ||
| 42 | * - RX_ALLOC_METHOD_PAGE = 2 | ||
| 43 | * | ||
| 44 | * The heuristic for %RX_ALLOC_METHOD_AUTO is a simple hysteresis count | ||
| 45 | * controlled by the parameters below. | ||
| 46 | * | ||
| 47 | * - Since pushing and popping descriptors are separated by the rx_queue | ||
| 48 | * size, so the watermarks should be ~rxd_size. | ||
| 49 | * - The performance win by using page-based allocation for LRO is less | ||
| 50 | * than the performance hit of using page-based allocation of non-LRO, | ||
| 51 | * so the watermarks should reflect this. | ||
| 52 | * | ||
| 53 | * Per channel we maintain a single variable, updated by each channel: | ||
| 54 | * | ||
| 55 | * rx_alloc_level += (lro_performed ? RX_ALLOC_FACTOR_LRO : | ||
| 56 | * RX_ALLOC_FACTOR_SKB) | ||
| 57 | * Per NAPI poll interval, we constrain rx_alloc_level to 0..MAX (which | ||
| 58 | * limits the hysteresis), and update the allocation strategy: | ||
| 59 | * | ||
| 60 | * rx_alloc_method = (rx_alloc_level > RX_ALLOC_LEVEL_LRO ? | ||
| 61 | * RX_ALLOC_METHOD_PAGE : RX_ALLOC_METHOD_SKB) | ||
| 62 | */ | ||
| 63 | static int rx_alloc_method = RX_ALLOC_METHOD_PAGE; | ||
| 64 | |||
| 65 | #define RX_ALLOC_LEVEL_LRO 0x2000 | ||
| 66 | #define RX_ALLOC_LEVEL_MAX 0x3000 | ||
| 67 | #define RX_ALLOC_FACTOR_LRO 1 | ||
| 68 | #define RX_ALLOC_FACTOR_SKB (-2) | ||
| 69 | |||
| 70 | /* This is the percentage fill level below which new RX descriptors | ||
| 71 | * will be added to the RX descriptor ring. | ||
| 72 | */ | ||
| 73 | static unsigned int rx_refill_threshold = 90; | ||
| 74 | |||
| 75 | /* This is the percentage fill level to which an RX queue will be refilled | ||
| 76 | * when the "RX refill threshold" is reached. | ||
| 77 | */ | ||
| 78 | static unsigned int rx_refill_limit = 95; | ||
| 79 | |||
| 80 | /* | ||
| 81 | * RX maximum head room required. | ||
| 82 | * | ||
| 83 | * This must be at least 1 to prevent overflow and at least 2 to allow | ||
| 84 | * pipelined receives. | ||
| 85 | */ | ||
| 86 | #define EFX_RXD_HEAD_ROOM 2 | ||
| 87 | |||
| 88 | /* Macros for zero-order pages (potentially) containing multiple RX buffers */ | ||
| 89 | #define RX_DATA_OFFSET(_data) \ | ||
| 90 | (((unsigned long) (_data)) & (PAGE_SIZE-1)) | ||
| 91 | #define RX_BUF_OFFSET(_rx_buf) \ | ||
| 92 | RX_DATA_OFFSET((_rx_buf)->data) | ||
| 93 | |||
| 94 | #define RX_PAGE_SIZE(_efx) \ | ||
| 95 | (PAGE_SIZE * (1u << (_efx)->rx_buffer_order)) | ||
| 96 | |||
| 97 | |||
| 98 | /************************************************************************** | ||
| 99 | * | ||
| 100 | * Linux generic LRO handling | ||
| 101 | * | ||
| 102 | ************************************************************************** | ||
| 103 | */ | ||
| 104 | |||
| 105 | static int efx_lro_get_skb_hdr(struct sk_buff *skb, void **ip_hdr, | ||
| 106 | void **tcpudp_hdr, u64 *hdr_flags, void *priv) | ||
| 107 | { | ||
| 108 | struct efx_channel *channel = (struct efx_channel *)priv; | ||
| 109 | struct iphdr *iph; | ||
| 110 | struct tcphdr *th; | ||
| 111 | |||
| 112 | iph = (struct iphdr *)skb->data; | ||
| 113 | if (skb->protocol != htons(ETH_P_IP) || iph->protocol != IPPROTO_TCP) | ||
| 114 | goto fail; | ||
| 115 | |||
| 116 | th = (struct tcphdr *)(skb->data + iph->ihl * 4); | ||
| 117 | |||
| 118 | *tcpudp_hdr = th; | ||
| 119 | *ip_hdr = iph; | ||
| 120 | *hdr_flags = LRO_IPV4 | LRO_TCP; | ||
| 121 | |||
| 122 | channel->rx_alloc_level += RX_ALLOC_FACTOR_LRO; | ||
| 123 | return 0; | ||
| 124 | fail: | ||
| 125 | channel->rx_alloc_level += RX_ALLOC_FACTOR_SKB; | ||
| 126 | return -1; | ||
| 127 | } | ||
| 128 | |||
| 129 | static int efx_get_frag_hdr(struct skb_frag_struct *frag, void **mac_hdr, | ||
| 130 | void **ip_hdr, void **tcpudp_hdr, u64 *hdr_flags, | ||
| 131 | void *priv) | ||
| 132 | { | ||
| 133 | struct efx_channel *channel = (struct efx_channel *)priv; | ||
| 134 | struct ethhdr *eh; | ||
| 135 | struct iphdr *iph; | ||
| 136 | |||
| 137 | /* We support EtherII and VLAN encapsulated IPv4 */ | ||
| 138 | eh = (struct ethhdr *)(page_address(frag->page) + frag->page_offset); | ||
| 139 | *mac_hdr = eh; | ||
| 140 | |||
| 141 | if (eh->h_proto == htons(ETH_P_IP)) { | ||
| 142 | iph = (struct iphdr *)(eh + 1); | ||
| 143 | } else { | ||
| 144 | struct vlan_ethhdr *veh = (struct vlan_ethhdr *)eh; | ||
| 145 | if (veh->h_vlan_encapsulated_proto != htons(ETH_P_IP)) | ||
| 146 | goto fail; | ||
| 147 | |||
| 148 | iph = (struct iphdr *)(veh + 1); | ||
| 149 | } | ||
| 150 | *ip_hdr = iph; | ||
| 151 | |||
| 152 | /* We can only do LRO over TCP */ | ||
| 153 | if (iph->protocol != IPPROTO_TCP) | ||
| 154 | goto fail; | ||
| 155 | |||
| 156 | *hdr_flags = LRO_IPV4 | LRO_TCP; | ||
| 157 | *tcpudp_hdr = (struct tcphdr *)((u8 *) iph + iph->ihl * 4); | ||
| 158 | |||
| 159 | channel->rx_alloc_level += RX_ALLOC_FACTOR_LRO; | ||
| 160 | return 0; | ||
| 161 | fail: | ||
| 162 | channel->rx_alloc_level += RX_ALLOC_FACTOR_SKB; | ||
| 163 | return -1; | ||
| 164 | } | ||
| 165 | |||
| 166 | int efx_lro_init(struct net_lro_mgr *lro_mgr, struct efx_nic *efx) | ||
| 167 | { | ||
| 168 | size_t s = sizeof(struct net_lro_desc) * EFX_MAX_LRO_DESCRIPTORS; | ||
| 169 | struct net_lro_desc *lro_arr; | ||
| 170 | |||
| 171 | /* Allocate the LRO descriptors structure */ | ||
| 172 | lro_arr = kzalloc(s, GFP_KERNEL); | ||
| 173 | if (lro_arr == NULL) | ||
| 174 | return -ENOMEM; | ||
| 175 | |||
| 176 | lro_mgr->lro_arr = lro_arr; | ||
| 177 | lro_mgr->max_desc = EFX_MAX_LRO_DESCRIPTORS; | ||
| 178 | lro_mgr->max_aggr = EFX_MAX_LRO_AGGR; | ||
| 179 | lro_mgr->frag_align_pad = EFX_PAGE_SKB_ALIGN; | ||
| 180 | |||
| 181 | lro_mgr->get_skb_header = efx_lro_get_skb_hdr; | ||
| 182 | lro_mgr->get_frag_header = efx_get_frag_hdr; | ||
| 183 | lro_mgr->dev = efx->net_dev; | ||
| 184 | |||
| 185 | lro_mgr->features = LRO_F_NAPI; | ||
| 186 | |||
| 187 | /* We can pass packets up with the checksum intact */ | ||
| 188 | lro_mgr->ip_summed = CHECKSUM_UNNECESSARY; | ||
| 189 | |||
| 190 | lro_mgr->ip_summed_aggr = CHECKSUM_UNNECESSARY; | ||
| 191 | |||
| 192 | return 0; | ||
| 193 | } | ||
| 194 | |||
| 195 | void efx_lro_fini(struct net_lro_mgr *lro_mgr) | ||
| 196 | { | ||
| 197 | kfree(lro_mgr->lro_arr); | ||
| 198 | lro_mgr->lro_arr = NULL; | ||
| 199 | } | ||
| 200 | |||
| 201 | /** | ||
| 202 | * efx_init_rx_buffer_skb - create new RX buffer using skb-based allocation | ||
| 203 | * | ||
| 204 | * @rx_queue: Efx RX queue | ||
| 205 | * @rx_buf: RX buffer structure to populate | ||
| 206 | * | ||
| 207 | * This allocates memory for a new receive buffer, maps it for DMA, | ||
| 208 | * and populates a struct efx_rx_buffer with the relevant | ||
| 209 | * information. Return a negative error code or 0 on success. | ||
| 210 | */ | ||
| 211 | static inline int efx_init_rx_buffer_skb(struct efx_rx_queue *rx_queue, | ||
| 212 | struct efx_rx_buffer *rx_buf) | ||
| 213 | { | ||
| 214 | struct efx_nic *efx = rx_queue->efx; | ||
| 215 | struct net_device *net_dev = efx->net_dev; | ||
| 216 | int skb_len = efx->rx_buffer_len; | ||
| 217 | |||
| 218 | rx_buf->skb = netdev_alloc_skb(net_dev, skb_len); | ||
| 219 | if (unlikely(!rx_buf->skb)) | ||
| 220 | return -ENOMEM; | ||
| 221 | |||
| 222 | /* Adjust the SKB for padding and checksum */ | ||
| 223 | skb_reserve(rx_buf->skb, NET_IP_ALIGN); | ||
| 224 | rx_buf->len = skb_len - NET_IP_ALIGN; | ||
| 225 | rx_buf->data = (char *)rx_buf->skb->data; | ||
| 226 | rx_buf->skb->ip_summed = CHECKSUM_UNNECESSARY; | ||
| 227 | |||
| 228 | rx_buf->dma_addr = pci_map_single(efx->pci_dev, | ||
| 229 | rx_buf->data, rx_buf->len, | ||
| 230 | PCI_DMA_FROMDEVICE); | ||
| 231 | |||
| 232 | if (unlikely(pci_dma_mapping_error(rx_buf->dma_addr))) { | ||
| 233 | dev_kfree_skb_any(rx_buf->skb); | ||
| 234 | rx_buf->skb = NULL; | ||
| 235 | return -EIO; | ||
| 236 | } | ||
| 237 | |||
| 238 | return 0; | ||
| 239 | } | ||
| 240 | |||
| 241 | /** | ||
| 242 | * efx_init_rx_buffer_page - create new RX buffer using page-based allocation | ||
| 243 | * | ||
| 244 | * @rx_queue: Efx RX queue | ||
| 245 | * @rx_buf: RX buffer structure to populate | ||
| 246 | * | ||
| 247 | * This allocates memory for a new receive buffer, maps it for DMA, | ||
| 248 | * and populates a struct efx_rx_buffer with the relevant | ||
| 249 | * information. Return a negative error code or 0 on success. | ||
| 250 | */ | ||
| 251 | static inline int efx_init_rx_buffer_page(struct efx_rx_queue *rx_queue, | ||
| 252 | struct efx_rx_buffer *rx_buf) | ||
| 253 | { | ||
| 254 | struct efx_nic *efx = rx_queue->efx; | ||
| 255 | int bytes, space, offset; | ||
| 256 | |||
| 257 | bytes = efx->rx_buffer_len - EFX_PAGE_IP_ALIGN; | ||
| 258 | |||
| 259 | /* If there is space left in the previously allocated page, | ||
| 260 | * then use it. Otherwise allocate a new one */ | ||
| 261 | rx_buf->page = rx_queue->buf_page; | ||
| 262 | if (rx_buf->page == NULL) { | ||
| 263 | dma_addr_t dma_addr; | ||
| 264 | |||
| 265 | rx_buf->page = alloc_pages(__GFP_COLD | __GFP_COMP | GFP_ATOMIC, | ||
| 266 | efx->rx_buffer_order); | ||
| 267 | if (unlikely(rx_buf->page == NULL)) | ||
| 268 | return -ENOMEM; | ||
| 269 | |||
| 270 | dma_addr = pci_map_page(efx->pci_dev, rx_buf->page, | ||
| 271 | 0, RX_PAGE_SIZE(efx), | ||
| 272 | PCI_DMA_FROMDEVICE); | ||
| 273 | |||
| 274 | if (unlikely(pci_dma_mapping_error(dma_addr))) { | ||
| 275 | __free_pages(rx_buf->page, efx->rx_buffer_order); | ||
| 276 | rx_buf->page = NULL; | ||
| 277 | return -EIO; | ||
| 278 | } | ||
| 279 | |||
| 280 | rx_queue->buf_page = rx_buf->page; | ||
| 281 | rx_queue->buf_dma_addr = dma_addr; | ||
| 282 | rx_queue->buf_data = ((char *) page_address(rx_buf->page) + | ||
| 283 | EFX_PAGE_IP_ALIGN); | ||
| 284 | } | ||
| 285 | |||
| 286 | offset = RX_DATA_OFFSET(rx_queue->buf_data); | ||
| 287 | rx_buf->len = bytes; | ||
| 288 | rx_buf->dma_addr = rx_queue->buf_dma_addr + offset; | ||
| 289 | rx_buf->data = rx_queue->buf_data; | ||
| 290 | |||
| 291 | /* Try to pack multiple buffers per page */ | ||
| 292 | if (efx->rx_buffer_order == 0) { | ||
| 293 | /* The next buffer starts on the next 512 byte boundary */ | ||
| 294 | rx_queue->buf_data += ((bytes + 0x1ff) & ~0x1ff); | ||
| 295 | offset += ((bytes + 0x1ff) & ~0x1ff); | ||
| 296 | |||
| 297 | space = RX_PAGE_SIZE(efx) - offset; | ||
| 298 | if (space >= bytes) { | ||
| 299 | /* Refs dropped on kernel releasing each skb */ | ||
| 300 | get_page(rx_queue->buf_page); | ||
| 301 | goto out; | ||
| 302 | } | ||
| 303 | } | ||
| 304 | |||
| 305 | /* This is the final RX buffer for this page, so mark it for | ||
| 306 | * unmapping */ | ||
| 307 | rx_queue->buf_page = NULL; | ||
| 308 | rx_buf->unmap_addr = rx_queue->buf_dma_addr; | ||
| 309 | |||
| 310 | out: | ||
| 311 | return 0; | ||
| 312 | } | ||
| 313 | |||
| 314 | /* This allocates memory for a new receive buffer, maps it for DMA, | ||
| 315 | * and populates a struct efx_rx_buffer with the relevant | ||
| 316 | * information. | ||
| 317 | */ | ||
| 318 | static inline int efx_init_rx_buffer(struct efx_rx_queue *rx_queue, | ||
| 319 | struct efx_rx_buffer *new_rx_buf) | ||
| 320 | { | ||
| 321 | int rc = 0; | ||
| 322 | |||
| 323 | if (rx_queue->channel->rx_alloc_push_pages) { | ||
| 324 | new_rx_buf->skb = NULL; | ||
| 325 | rc = efx_init_rx_buffer_page(rx_queue, new_rx_buf); | ||
| 326 | rx_queue->alloc_page_count++; | ||
| 327 | } else { | ||
| 328 | new_rx_buf->page = NULL; | ||
| 329 | rc = efx_init_rx_buffer_skb(rx_queue, new_rx_buf); | ||
| 330 | rx_queue->alloc_skb_count++; | ||
| 331 | } | ||
| 332 | |||
| 333 | if (unlikely(rc < 0)) | ||
| 334 | EFX_LOG_RL(rx_queue->efx, "%s RXQ[%d] =%d\n", __func__, | ||
| 335 | rx_queue->queue, rc); | ||
| 336 | return rc; | ||
| 337 | } | ||
| 338 | |||
| 339 | static inline void efx_unmap_rx_buffer(struct efx_nic *efx, | ||
| 340 | struct efx_rx_buffer *rx_buf) | ||
| 341 | { | ||
| 342 | if (rx_buf->page) { | ||
| 343 | EFX_BUG_ON_PARANOID(rx_buf->skb); | ||
| 344 | if (rx_buf->unmap_addr) { | ||
| 345 | pci_unmap_page(efx->pci_dev, rx_buf->unmap_addr, | ||
| 346 | RX_PAGE_SIZE(efx), PCI_DMA_FROMDEVICE); | ||
| 347 | rx_buf->unmap_addr = 0; | ||
| 348 | } | ||
| 349 | } else if (likely(rx_buf->skb)) { | ||
| 350 | pci_unmap_single(efx->pci_dev, rx_buf->dma_addr, | ||
| 351 | rx_buf->len, PCI_DMA_FROMDEVICE); | ||
| 352 | } | ||
| 353 | } | ||
| 354 | |||
| 355 | static inline void efx_free_rx_buffer(struct efx_nic *efx, | ||
| 356 | struct efx_rx_buffer *rx_buf) | ||
| 357 | { | ||
| 358 | if (rx_buf->page) { | ||
| 359 | __free_pages(rx_buf->page, efx->rx_buffer_order); | ||
| 360 | rx_buf->page = NULL; | ||
| 361 | } else if (likely(rx_buf->skb)) { | ||
| 362 | dev_kfree_skb_any(rx_buf->skb); | ||
| 363 | rx_buf->skb = NULL; | ||
| 364 | } | ||
| 365 | } | ||
| 366 | |||
| 367 | static inline void efx_fini_rx_buffer(struct efx_rx_queue *rx_queue, | ||
| 368 | struct efx_rx_buffer *rx_buf) | ||
| 369 | { | ||
| 370 | efx_unmap_rx_buffer(rx_queue->efx, rx_buf); | ||
| 371 | efx_free_rx_buffer(rx_queue->efx, rx_buf); | ||
| 372 | } | ||
| 373 | |||
| 374 | /** | ||
| 375 | * efx_fast_push_rx_descriptors - push new RX descriptors quickly | ||
| 376 | * @rx_queue: RX descriptor queue | ||
| 377 | * @retry: Recheck the fill level | ||
| 378 | * This will aim to fill the RX descriptor queue up to | ||
| 379 | * @rx_queue->@fast_fill_limit. If there is insufficient atomic | ||
| 380 | * memory to do so, the caller should retry. | ||
| 381 | */ | ||
| 382 | static int __efx_fast_push_rx_descriptors(struct efx_rx_queue *rx_queue, | ||
| 383 | int retry) | ||
| 384 | { | ||
| 385 | struct efx_rx_buffer *rx_buf; | ||
| 386 | unsigned fill_level, index; | ||
| 387 | int i, space, rc = 0; | ||
| 388 | |||
| 389 | /* Calculate current fill level. Do this outside the lock, | ||
| 390 | * because most of the time we'll end up not wanting to do the | ||
| 391 | * fill anyway. | ||
| 392 | */ | ||
| 393 | fill_level = (rx_queue->added_count - rx_queue->removed_count); | ||
| 394 | EFX_BUG_ON_PARANOID(fill_level > | ||
| 395 | rx_queue->efx->type->rxd_ring_mask + 1); | ||
| 396 | |||
| 397 | /* Don't fill if we don't need to */ | ||
| 398 | if (fill_level >= rx_queue->fast_fill_trigger) | ||
| 399 | return 0; | ||
| 400 | |||
| 401 | /* Record minimum fill level */ | ||
| 402 | if (unlikely(fill_level < rx_queue->min_fill)) | ||
| 403 | if (fill_level) | ||
| 404 | rx_queue->min_fill = fill_level; | ||
| 405 | |||
| 406 | /* Acquire RX add lock. If this lock is contended, then a fast | ||
| 407 | * fill must already be in progress (e.g. in the refill | ||
| 408 | * tasklet), so we don't need to do anything | ||
| 409 | */ | ||
| 410 | if (!spin_trylock_bh(&rx_queue->add_lock)) | ||
| 411 | return -1; | ||
| 412 | |||
| 413 | retry: | ||
| 414 | /* Recalculate current fill level now that we have the lock */ | ||
| 415 | fill_level = (rx_queue->added_count - rx_queue->removed_count); | ||
| 416 | EFX_BUG_ON_PARANOID(fill_level > | ||
| 417 | rx_queue->efx->type->rxd_ring_mask + 1); | ||
| 418 | space = rx_queue->fast_fill_limit - fill_level; | ||
| 419 | if (space < EFX_RX_BATCH) | ||
| 420 | goto out_unlock; | ||
| 421 | |||
| 422 | EFX_TRACE(rx_queue->efx, "RX queue %d fast-filling descriptor ring from" | ||
| 423 | " level %d to level %d using %s allocation\n", | ||
| 424 | rx_queue->queue, fill_level, rx_queue->fast_fill_limit, | ||
| 425 | rx_queue->channel->rx_alloc_push_pages ? "page" : "skb"); | ||
| 426 | |||
| 427 | do { | ||
| 428 | for (i = 0; i < EFX_RX_BATCH; ++i) { | ||
| 429 | index = (rx_queue->added_count & | ||
| 430 | rx_queue->efx->type->rxd_ring_mask); | ||
| 431 | rx_buf = efx_rx_buffer(rx_queue, index); | ||
| 432 | rc = efx_init_rx_buffer(rx_queue, rx_buf); | ||
| 433 | if (unlikely(rc)) | ||
| 434 | goto out; | ||
| 435 | ++rx_queue->added_count; | ||
| 436 | } | ||
| 437 | } while ((space -= EFX_RX_BATCH) >= EFX_RX_BATCH); | ||
| 438 | |||
| 439 | EFX_TRACE(rx_queue->efx, "RX queue %d fast-filled descriptor ring " | ||
| 440 | "to level %d\n", rx_queue->queue, | ||
| 441 | rx_queue->added_count - rx_queue->removed_count); | ||
| 442 | |||
| 443 | out: | ||
| 444 | /* Send write pointer to card. */ | ||
| 445 | falcon_notify_rx_desc(rx_queue); | ||
| 446 | |||
| 447 | /* If the fast fill is running inside from the refill tasklet, then | ||
| 448 | * for SMP systems it may be running on a different CPU to | ||
| 449 | * RX event processing, which means that the fill level may now be | ||
| 450 | * out of date. */ | ||
| 451 | if (unlikely(retry && (rc == 0))) | ||
| 452 | goto retry; | ||
| 453 | |||
| 454 | out_unlock: | ||
| 455 | spin_unlock_bh(&rx_queue->add_lock); | ||
| 456 | |||
| 457 | return rc; | ||
| 458 | } | ||
| 459 | |||
| 460 | /** | ||
| 461 | * efx_fast_push_rx_descriptors - push new RX descriptors quickly | ||
| 462 | * @rx_queue: RX descriptor queue | ||
| 463 | * | ||
| 464 | * This will aim to fill the RX descriptor queue up to | ||
| 465 | * @rx_queue->@fast_fill_limit. If there is insufficient memory to do so, | ||
| 466 | * it will schedule a work item to immediately continue the fast fill | ||
| 467 | */ | ||
| 468 | void efx_fast_push_rx_descriptors(struct efx_rx_queue *rx_queue) | ||
| 469 | { | ||
| 470 | int rc; | ||
| 471 | |||
| 472 | rc = __efx_fast_push_rx_descriptors(rx_queue, 0); | ||
| 473 | if (unlikely(rc)) { | ||
| 474 | /* Schedule the work item to run immediately. The hope is | ||
| 475 | * that work is immediately pending to free some memory | ||
| 476 | * (e.g. an RX event or TX completion) | ||
| 477 | */ | ||
| 478 | efx_schedule_slow_fill(rx_queue, 0); | ||
| 479 | } | ||
| 480 | } | ||
| 481 | |||
| 482 | void efx_rx_work(struct work_struct *data) | ||
| 483 | { | ||
| 484 | struct efx_rx_queue *rx_queue; | ||
| 485 | int rc; | ||
| 486 | |||
| 487 | rx_queue = container_of(data, struct efx_rx_queue, work.work); | ||
| 488 | |||
| 489 | if (unlikely(!rx_queue->channel->enabled)) | ||
| 490 | return; | ||
| 491 | |||
| 492 | EFX_TRACE(rx_queue->efx, "RX queue %d worker thread executing on CPU " | ||
| 493 | "%d\n", rx_queue->queue, raw_smp_processor_id()); | ||
| 494 | |||
| 495 | ++rx_queue->slow_fill_count; | ||
| 496 | /* Push new RX descriptors, allowing at least 1 jiffy for | ||
| 497 | * the kernel to free some more memory. */ | ||
| 498 | rc = __efx_fast_push_rx_descriptors(rx_queue, 1); | ||
| 499 | if (rc) | ||
| 500 | efx_schedule_slow_fill(rx_queue, 1); | ||
| 501 | } | ||
| 502 | |||
| 503 | static inline void efx_rx_packet__check_len(struct efx_rx_queue *rx_queue, | ||
| 504 | struct efx_rx_buffer *rx_buf, | ||
| 505 | int len, int *discard, | ||
| 506 | int *leak_packet) | ||
| 507 | { | ||
| 508 | struct efx_nic *efx = rx_queue->efx; | ||
| 509 | unsigned max_len = rx_buf->len - efx->type->rx_buffer_padding; | ||
| 510 | |||
| 511 | if (likely(len <= max_len)) | ||
| 512 | return; | ||
| 513 | |||
| 514 | /* The packet must be discarded, but this is only a fatal error | ||
| 515 | * if the caller indicated it was | ||
| 516 | */ | ||
| 517 | *discard = 1; | ||
| 518 | |||
| 519 | if ((len > rx_buf->len) && EFX_WORKAROUND_8071(efx)) { | ||
| 520 | EFX_ERR_RL(efx, " RX queue %d seriously overlength " | ||
| 521 | "RX event (0x%x > 0x%x+0x%x). Leaking\n", | ||
| 522 | rx_queue->queue, len, max_len, | ||
| 523 | efx->type->rx_buffer_padding); | ||
| 524 | /* If this buffer was skb-allocated, then the meta | ||
| 525 | * data at the end of the skb will be trashed. So | ||
| 526 | * we have no choice but to leak the fragment. | ||
| 527 | */ | ||
| 528 | *leak_packet = (rx_buf->skb != NULL); | ||
| 529 | efx_schedule_reset(efx, RESET_TYPE_RX_RECOVERY); | ||
| 530 | } else { | ||
| 531 | EFX_ERR_RL(efx, " RX queue %d overlength RX event " | ||
| 532 | "(0x%x > 0x%x)\n", rx_queue->queue, len, max_len); | ||
| 533 | } | ||
| 534 | |||
| 535 | rx_queue->channel->n_rx_overlength++; | ||
| 536 | } | ||
| 537 | |||
| 538 | /* Pass a received packet up through the generic LRO stack | ||
| 539 | * | ||
| 540 | * Handles driverlink veto, and passes the fragment up via | ||
| 541 | * the appropriate LRO method | ||
| 542 | */ | ||
| 543 | static inline void efx_rx_packet_lro(struct efx_channel *channel, | ||
| 544 | struct efx_rx_buffer *rx_buf) | ||
| 545 | { | ||
| 546 | struct net_lro_mgr *lro_mgr = &channel->lro_mgr; | ||
| 547 | void *priv = channel; | ||
| 548 | |||
| 549 | /* Pass the skb/page into the LRO engine */ | ||
| 550 | if (rx_buf->page) { | ||
| 551 | struct skb_frag_struct frags; | ||
| 552 | |||
| 553 | frags.page = rx_buf->page; | ||
| 554 | frags.page_offset = RX_BUF_OFFSET(rx_buf); | ||
| 555 | frags.size = rx_buf->len; | ||
| 556 | |||
| 557 | lro_receive_frags(lro_mgr, &frags, rx_buf->len, | ||
| 558 | rx_buf->len, priv, 0); | ||
| 559 | |||
| 560 | EFX_BUG_ON_PARANOID(rx_buf->skb); | ||
| 561 | rx_buf->page = NULL; | ||
| 562 | } else { | ||
| 563 | EFX_BUG_ON_PARANOID(!rx_buf->skb); | ||
| 564 | |||
| 565 | lro_receive_skb(lro_mgr, rx_buf->skb, priv); | ||
| 566 | rx_buf->skb = NULL; | ||
| 567 | } | ||
| 568 | } | ||
| 569 | |||
| 570 | /* Allocate and construct an SKB around a struct page.*/ | ||
| 571 | static inline struct sk_buff *efx_rx_mk_skb(struct efx_rx_buffer *rx_buf, | ||
| 572 | struct efx_nic *efx, | ||
| 573 | int hdr_len) | ||
| 574 | { | ||
| 575 | struct sk_buff *skb; | ||
| 576 | |||
| 577 | /* Allocate an SKB to store the headers */ | ||
| 578 | skb = netdev_alloc_skb(efx->net_dev, hdr_len + EFX_PAGE_SKB_ALIGN); | ||
| 579 | if (unlikely(skb == NULL)) { | ||
| 580 | EFX_ERR_RL(efx, "RX out of memory for skb\n"); | ||
| 581 | return NULL; | ||
| 582 | } | ||
| 583 | |||
| 584 | EFX_BUG_ON_PARANOID(skb_shinfo(skb)->nr_frags); | ||
| 585 | EFX_BUG_ON_PARANOID(rx_buf->len < hdr_len); | ||
| 586 | |||
| 587 | skb->ip_summed = CHECKSUM_UNNECESSARY; | ||
| 588 | skb_reserve(skb, EFX_PAGE_SKB_ALIGN); | ||
| 589 | |||
| 590 | skb->len = rx_buf->len; | ||
| 591 | skb->truesize = rx_buf->len + sizeof(struct sk_buff); | ||
| 592 | memcpy(skb->data, rx_buf->data, hdr_len); | ||
| 593 | skb->tail += hdr_len; | ||
| 594 | |||
| 595 | /* Append the remaining page onto the frag list */ | ||
| 596 | if (unlikely(rx_buf->len > hdr_len)) { | ||
| 597 | struct skb_frag_struct *frag = skb_shinfo(skb)->frags; | ||
| 598 | frag->page = rx_buf->page; | ||
| 599 | frag->page_offset = RX_BUF_OFFSET(rx_buf) + hdr_len; | ||
| 600 | frag->size = skb->len - hdr_len; | ||
| 601 | skb_shinfo(skb)->nr_frags = 1; | ||
| 602 | skb->data_len = frag->size; | ||
| 603 | } else { | ||
| 604 | __free_pages(rx_buf->page, efx->rx_buffer_order); | ||
| 605 | skb->data_len = 0; | ||
| 606 | } | ||
| 607 | |||
| 608 | /* Ownership has transferred from the rx_buf to skb */ | ||
| 609 | rx_buf->page = NULL; | ||
| 610 | |||
| 611 | /* Move past the ethernet header */ | ||
| 612 | skb->protocol = eth_type_trans(skb, efx->net_dev); | ||
| 613 | |||
| 614 | return skb; | ||
| 615 | } | ||
| 616 | |||
| 617 | void efx_rx_packet(struct efx_rx_queue *rx_queue, unsigned int index, | ||
| 618 | unsigned int len, int checksummed, int discard) | ||
| 619 | { | ||
| 620 | struct efx_nic *efx = rx_queue->efx; | ||
| 621 | struct efx_rx_buffer *rx_buf; | ||
| 622 | int leak_packet = 0; | ||
| 623 | |||
| 624 | rx_buf = efx_rx_buffer(rx_queue, index); | ||
| 625 | EFX_BUG_ON_PARANOID(!rx_buf->data); | ||
| 626 | EFX_BUG_ON_PARANOID(rx_buf->skb && rx_buf->page); | ||
| 627 | EFX_BUG_ON_PARANOID(!(rx_buf->skb || rx_buf->page)); | ||
| 628 | |||
| 629 | /* This allows the refill path to post another buffer. | ||
| 630 | * EFX_RXD_HEAD_ROOM ensures that the slot we are using | ||
| 631 | * isn't overwritten yet. | ||
| 632 | */ | ||
| 633 | rx_queue->removed_count++; | ||
| 634 | |||
| 635 | /* Validate the length encoded in the event vs the descriptor pushed */ | ||
| 636 | efx_rx_packet__check_len(rx_queue, rx_buf, len, | ||
| 637 | &discard, &leak_packet); | ||
| 638 | |||
| 639 | EFX_TRACE(efx, "RX queue %d received id %x at %llx+%x %s%s\n", | ||
| 640 | rx_queue->queue, index, | ||
| 641 | (unsigned long long)rx_buf->dma_addr, len, | ||
| 642 | (checksummed ? " [SUMMED]" : ""), | ||
| 643 | (discard ? " [DISCARD]" : "")); | ||
| 644 | |||
| 645 | /* Discard packet, if instructed to do so */ | ||
| 646 | if (unlikely(discard)) { | ||
| 647 | if (unlikely(leak_packet)) | ||
| 648 | rx_queue->channel->n_skbuff_leaks++; | ||
| 649 | else | ||
| 650 | /* We haven't called efx_unmap_rx_buffer yet, | ||
| 651 | * so fini the entire rx_buffer here */ | ||
| 652 | efx_fini_rx_buffer(rx_queue, rx_buf); | ||
| 653 | return; | ||
| 654 | } | ||
| 655 | |||
| 656 | /* Release card resources - assumes all RX buffers consumed in-order | ||
| 657 | * per RX queue | ||
| 658 | */ | ||
| 659 | efx_unmap_rx_buffer(efx, rx_buf); | ||
| 660 | |||
| 661 | /* Prefetch nice and early so data will (hopefully) be in cache by | ||
| 662 | * the time we look at it. | ||
| 663 | */ | ||
| 664 | prefetch(rx_buf->data); | ||
| 665 | |||
| 666 | /* Pipeline receives so that we give time for packet headers to be | ||
| 667 | * prefetched into cache. | ||
| 668 | */ | ||
| 669 | rx_buf->len = len; | ||
| 670 | if (rx_queue->channel->rx_pkt) | ||
| 671 | __efx_rx_packet(rx_queue->channel, | ||
| 672 | rx_queue->channel->rx_pkt, | ||
| 673 | rx_queue->channel->rx_pkt_csummed); | ||
| 674 | rx_queue->channel->rx_pkt = rx_buf; | ||
| 675 | rx_queue->channel->rx_pkt_csummed = checksummed; | ||
| 676 | } | ||
| 677 | |||
| 678 | /* Handle a received packet. Second half: Touches packet payload. */ | ||
| 679 | void __efx_rx_packet(struct efx_channel *channel, | ||
| 680 | struct efx_rx_buffer *rx_buf, int checksummed) | ||
| 681 | { | ||
| 682 | struct efx_nic *efx = channel->efx; | ||
| 683 | struct sk_buff *skb; | ||
| 684 | int lro = efx->net_dev->features & NETIF_F_LRO; | ||
| 685 | |||
| 686 | if (rx_buf->skb) { | ||
| 687 | prefetch(skb_shinfo(rx_buf->skb)); | ||
| 688 | |||
| 689 | skb_put(rx_buf->skb, rx_buf->len); | ||
| 690 | |||
| 691 | /* Move past the ethernet header. rx_buf->data still points | ||
| 692 | * at the ethernet header */ | ||
| 693 | rx_buf->skb->protocol = eth_type_trans(rx_buf->skb, | ||
| 694 | efx->net_dev); | ||
| 695 | } | ||
| 696 | |||
| 697 | /* Both our generic-LRO and SFC-SSR support skb and page based | ||
| 698 | * allocation, but neither support switching from one to the | ||
| 699 | * other on the fly. If we spot that the allocation mode has | ||
| 700 | * changed, then flush the LRO state. | ||
| 701 | */ | ||
| 702 | if (unlikely(channel->rx_alloc_pop_pages != (rx_buf->page != NULL))) { | ||
| 703 | efx_flush_lro(channel); | ||
| 704 | channel->rx_alloc_pop_pages = (rx_buf->page != NULL); | ||
| 705 | } | ||
| 706 | if (likely(checksummed && lro)) { | ||
| 707 | efx_rx_packet_lro(channel, rx_buf); | ||
| 708 | goto done; | ||
| 709 | } | ||
| 710 | |||
| 711 | /* Form an skb if required */ | ||
| 712 | if (rx_buf->page) { | ||
| 713 | int hdr_len = min(rx_buf->len, EFX_SKB_HEADERS); | ||
| 714 | skb = efx_rx_mk_skb(rx_buf, efx, hdr_len); | ||
| 715 | if (unlikely(skb == NULL)) { | ||
| 716 | efx_free_rx_buffer(efx, rx_buf); | ||
| 717 | goto done; | ||
| 718 | } | ||
| 719 | } else { | ||
| 720 | /* We now own the SKB */ | ||
| 721 | skb = rx_buf->skb; | ||
| 722 | rx_buf->skb = NULL; | ||
| 723 | } | ||
| 724 | |||
| 725 | EFX_BUG_ON_PARANOID(rx_buf->page); | ||
| 726 | EFX_BUG_ON_PARANOID(rx_buf->skb); | ||
| 727 | EFX_BUG_ON_PARANOID(!skb); | ||
| 728 | |||
| 729 | /* Set the SKB flags */ | ||
| 730 | if (unlikely(!checksummed || !efx->rx_checksum_enabled)) | ||
| 731 | skb->ip_summed = CHECKSUM_NONE; | ||
| 732 | |||
| 733 | /* Pass the packet up */ | ||
| 734 | netif_receive_skb(skb); | ||
| 735 | |||
| 736 | /* Update allocation strategy method */ | ||
| 737 | channel->rx_alloc_level += RX_ALLOC_FACTOR_SKB; | ||
| 738 | |||
| 739 | /* fall-thru */ | ||
| 740 | done: | ||
| 741 | efx->net_dev->last_rx = jiffies; | ||
| 742 | } | ||
| 743 | |||
| 744 | void efx_rx_strategy(struct efx_channel *channel) | ||
| 745 | { | ||
| 746 | enum efx_rx_alloc_method method = rx_alloc_method; | ||
| 747 | |||
| 748 | /* Only makes sense to use page based allocation if LRO is enabled */ | ||
| 749 | if (!(channel->efx->net_dev->features & NETIF_F_LRO)) { | ||
| 750 | method = RX_ALLOC_METHOD_SKB; | ||
| 751 | } else if (method == RX_ALLOC_METHOD_AUTO) { | ||
| 752 | /* Constrain the rx_alloc_level */ | ||
| 753 | if (channel->rx_alloc_level < 0) | ||
| 754 | channel->rx_alloc_level = 0; | ||
| 755 | else if (channel->rx_alloc_level > RX_ALLOC_LEVEL_MAX) | ||
| 756 | channel->rx_alloc_level = RX_ALLOC_LEVEL_MAX; | ||
| 757 | |||
| 758 | /* Decide on the allocation method */ | ||
| 759 | method = ((channel->rx_alloc_level > RX_ALLOC_LEVEL_LRO) ? | ||
| 760 | RX_ALLOC_METHOD_PAGE : RX_ALLOC_METHOD_SKB); | ||
| 761 | } | ||
| 762 | |||
| 763 | /* Push the option */ | ||
| 764 | channel->rx_alloc_push_pages = (method == RX_ALLOC_METHOD_PAGE); | ||
| 765 | } | ||
| 766 | |||
| 767 | int efx_probe_rx_queue(struct efx_rx_queue *rx_queue) | ||
| 768 | { | ||
| 769 | struct efx_nic *efx = rx_queue->efx; | ||
| 770 | unsigned int rxq_size; | ||
| 771 | int rc; | ||
| 772 | |||
| 773 | EFX_LOG(efx, "creating RX queue %d\n", rx_queue->queue); | ||
| 774 | |||
| 775 | /* Allocate RX buffers */ | ||
| 776 | rxq_size = (efx->type->rxd_ring_mask + 1) * sizeof(*rx_queue->buffer); | ||
| 777 | rx_queue->buffer = kzalloc(rxq_size, GFP_KERNEL); | ||
| 778 | if (!rx_queue->buffer) { | ||
| 779 | rc = -ENOMEM; | ||
| 780 | goto fail1; | ||
| 781 | } | ||
| 782 | |||
| 783 | rc = falcon_probe_rx(rx_queue); | ||
| 784 | if (rc) | ||
| 785 | goto fail2; | ||
| 786 | |||
| 787 | return 0; | ||
| 788 | |||
| 789 | fail2: | ||
| 790 | kfree(rx_queue->buffer); | ||
| 791 | rx_queue->buffer = NULL; | ||
| 792 | fail1: | ||
| 793 | rx_queue->used = 0; | ||
| 794 | |||
| 795 | return rc; | ||
| 796 | } | ||
| 797 | |||
| 798 | int efx_init_rx_queue(struct efx_rx_queue *rx_queue) | ||
| 799 | { | ||
| 800 | struct efx_nic *efx = rx_queue->efx; | ||
| 801 | unsigned int max_fill, trigger, limit; | ||
| 802 | |||
| 803 | EFX_LOG(rx_queue->efx, "initialising RX queue %d\n", rx_queue->queue); | ||
| 804 | |||
| 805 | /* Initialise ptr fields */ | ||
| 806 | rx_queue->added_count = 0; | ||
| 807 | rx_queue->notified_count = 0; | ||
| 808 | rx_queue->removed_count = 0; | ||
| 809 | rx_queue->min_fill = -1U; | ||
| 810 | rx_queue->min_overfill = -1U; | ||
| 811 | |||
| 812 | /* Initialise limit fields */ | ||
| 813 | max_fill = efx->type->rxd_ring_mask + 1 - EFX_RXD_HEAD_ROOM; | ||
| 814 | trigger = max_fill * min(rx_refill_threshold, 100U) / 100U; | ||
| 815 | limit = max_fill * min(rx_refill_limit, 100U) / 100U; | ||
| 816 | |||
| 817 | rx_queue->max_fill = max_fill; | ||
| 818 | rx_queue->fast_fill_trigger = trigger; | ||
| 819 | rx_queue->fast_fill_limit = limit; | ||
| 820 | |||
| 821 | /* Set up RX descriptor ring */ | ||
| 822 | return falcon_init_rx(rx_queue); | ||
| 823 | } | ||
| 824 | |||
| 825 | void efx_fini_rx_queue(struct efx_rx_queue *rx_queue) | ||
| 826 | { | ||
| 827 | int i; | ||
| 828 | struct efx_rx_buffer *rx_buf; | ||
| 829 | |||
| 830 | EFX_LOG(rx_queue->efx, "shutting down RX queue %d\n", rx_queue->queue); | ||
| 831 | |||
| 832 | falcon_fini_rx(rx_queue); | ||
| 833 | |||
| 834 | /* Release RX buffers NB start at index 0 not current HW ptr */ | ||
| 835 | if (rx_queue->buffer) { | ||
| 836 | for (i = 0; i <= rx_queue->efx->type->rxd_ring_mask; i++) { | ||
| 837 | rx_buf = efx_rx_buffer(rx_queue, i); | ||
| 838 | efx_fini_rx_buffer(rx_queue, rx_buf); | ||
| 839 | } | ||
| 840 | } | ||
| 841 | |||
| 842 | /* For a page that is part-way through splitting into RX buffers */ | ||
| 843 | if (rx_queue->buf_page != NULL) { | ||
| 844 | pci_unmap_page(rx_queue->efx->pci_dev, rx_queue->buf_dma_addr, | ||
| 845 | RX_PAGE_SIZE(rx_queue->efx), PCI_DMA_FROMDEVICE); | ||
| 846 | __free_pages(rx_queue->buf_page, | ||
| 847 | rx_queue->efx->rx_buffer_order); | ||
| 848 | rx_queue->buf_page = NULL; | ||
| 849 | } | ||
| 850 | } | ||
| 851 | |||
| 852 | void efx_remove_rx_queue(struct efx_rx_queue *rx_queue) | ||
| 853 | { | ||
| 854 | EFX_LOG(rx_queue->efx, "destroying RX queue %d\n", rx_queue->queue); | ||
| 855 | |||
| 856 | falcon_remove_rx(rx_queue); | ||
| 857 | |||
| 858 | kfree(rx_queue->buffer); | ||
| 859 | rx_queue->buffer = NULL; | ||
| 860 | rx_queue->used = 0; | ||
| 861 | } | ||
| 862 | |||
| 863 | void efx_flush_lro(struct efx_channel *channel) | ||
| 864 | { | ||
| 865 | lro_flush_all(&channel->lro_mgr); | ||
| 866 | } | ||
| 867 | |||
| 868 | |||
| 869 | module_param(rx_alloc_method, int, 0644); | ||
| 870 | MODULE_PARM_DESC(rx_alloc_method, "Allocation method used for RX buffers"); | ||
| 871 | |||
| 872 | module_param(rx_refill_threshold, uint, 0444); | ||
| 873 | MODULE_PARM_DESC(rx_refill_threshold, | ||
| 874 | "RX descriptor ring fast/slow fill threshold (%)"); | ||
| 875 | |||
diff --git a/drivers/net/sfc/rx.h b/drivers/net/sfc/rx.h new file mode 100644 index 000000000000..f35e377bfc5f --- /dev/null +++ b/drivers/net/sfc/rx.h | |||
| @@ -0,0 +1,29 @@ | |||
| 1 | /**************************************************************************** | ||
| 2 | * Driver for Solarflare Solarstorm network controllers and boards | ||
| 3 | * Copyright 2006 Solarflare Communications Inc. | ||
| 4 | * | ||
| 5 | * This program is free software; you can redistribute it and/or modify it | ||
| 6 | * under the terms of the GNU General Public License version 2 as published | ||
| 7 | * by the Free Software Foundation, incorporated herein by reference. | ||
| 8 | */ | ||
| 9 | |||
| 10 | #ifndef EFX_RX_H | ||
| 11 | #define EFX_RX_H | ||
| 12 | |||
| 13 | #include "net_driver.h" | ||
| 14 | |||
| 15 | int efx_probe_rx_queue(struct efx_rx_queue *rx_queue); | ||
| 16 | void efx_remove_rx_queue(struct efx_rx_queue *rx_queue); | ||
| 17 | int efx_init_rx_queue(struct efx_rx_queue *rx_queue); | ||
| 18 | void efx_fini_rx_queue(struct efx_rx_queue *rx_queue); | ||
| 19 | |||
| 20 | int efx_lro_init(struct net_lro_mgr *lro_mgr, struct efx_nic *efx); | ||
| 21 | void efx_lro_fini(struct net_lro_mgr *lro_mgr); | ||
| 22 | void efx_flush_lro(struct efx_channel *channel); | ||
| 23 | void efx_rx_strategy(struct efx_channel *channel); | ||
| 24 | void efx_fast_push_rx_descriptors(struct efx_rx_queue *rx_queue); | ||
| 25 | void efx_rx_work(struct work_struct *data); | ||
| 26 | void __efx_rx_packet(struct efx_channel *channel, | ||
| 27 | struct efx_rx_buffer *rx_buf, int checksummed); | ||
| 28 | |||
| 29 | #endif /* EFX_RX_H */ | ||
diff --git a/drivers/net/sfc/sfe4001.c b/drivers/net/sfc/sfe4001.c new file mode 100644 index 000000000000..11fa9fb8f48b --- /dev/null +++ b/drivers/net/sfc/sfe4001.c | |||
| @@ -0,0 +1,252 @@ | |||
| 1 | /**************************************************************************** | ||
| 2 | * Driver for Solarflare Solarstorm network controllers and boards | ||
| 3 | * Copyright 2007 Solarflare Communications Inc. | ||
| 4 | * | ||
| 5 | * This program is free software; you can redistribute it and/or modify it | ||
| 6 | * under the terms of the GNU General Public License version 2 as published | ||
| 7 | * by the Free Software Foundation, incorporated herein by reference. | ||
| 8 | */ | ||
| 9 | |||
| 10 | /***************************************************************************** | ||
| 11 | * Support for the SFE4001 NIC: driver code for the PCA9539 I/O expander that | ||
| 12 | * controls the PHY power rails, and for the MAX6647 temp. sensor used to check | ||
| 13 | * the PHY | ||
| 14 | */ | ||
| 15 | #include <linux/delay.h> | ||
| 16 | #include "efx.h" | ||
| 17 | #include "phy.h" | ||
| 18 | #include "boards.h" | ||
| 19 | #include "falcon.h" | ||
| 20 | #include "falcon_hwdefs.h" | ||
| 21 | #include "mac.h" | ||
| 22 | |||
| 23 | /************************************************************************** | ||
| 24 | * | ||
| 25 | * I2C IO Expander device | ||
| 26 | * | ||
| 27 | **************************************************************************/ | ||
| 28 | #define PCA9539 0x74 | ||
| 29 | |||
| 30 | #define P0_IN 0x00 | ||
| 31 | #define P0_OUT 0x02 | ||
| 32 | #define P0_INVERT 0x04 | ||
| 33 | #define P0_CONFIG 0x06 | ||
| 34 | |||
| 35 | #define P0_EN_1V0X_LBN 0 | ||
| 36 | #define P0_EN_1V0X_WIDTH 1 | ||
| 37 | #define P0_EN_1V2_LBN 1 | ||
| 38 | #define P0_EN_1V2_WIDTH 1 | ||
| 39 | #define P0_EN_2V5_LBN 2 | ||
| 40 | #define P0_EN_2V5_WIDTH 1 | ||
| 41 | #define P0_EN_3V3X_LBN 3 | ||
| 42 | #define P0_EN_3V3X_WIDTH 1 | ||
| 43 | #define P0_EN_5V_LBN 4 | ||
| 44 | #define P0_EN_5V_WIDTH 1 | ||
| 45 | #define P0_SHORTEN_JTAG_LBN 5 | ||
| 46 | #define P0_SHORTEN_JTAG_WIDTH 1 | ||
| 47 | #define P0_X_TRST_LBN 6 | ||
| 48 | #define P0_X_TRST_WIDTH 1 | ||
| 49 | #define P0_DSP_RESET_LBN 7 | ||
| 50 | #define P0_DSP_RESET_WIDTH 1 | ||
| 51 | |||
| 52 | #define P1_IN 0x01 | ||
| 53 | #define P1_OUT 0x03 | ||
| 54 | #define P1_INVERT 0x05 | ||
| 55 | #define P1_CONFIG 0x07 | ||
| 56 | |||
| 57 | #define P1_AFE_PWD_LBN 0 | ||
| 58 | #define P1_AFE_PWD_WIDTH 1 | ||
| 59 | #define P1_DSP_PWD25_LBN 1 | ||
| 60 | #define P1_DSP_PWD25_WIDTH 1 | ||
| 61 | #define P1_RESERVED_LBN 2 | ||
| 62 | #define P1_RESERVED_WIDTH 2 | ||
| 63 | #define P1_SPARE_LBN 4 | ||
| 64 | #define P1_SPARE_WIDTH 4 | ||
| 65 | |||
| 66 | |||
| 67 | /************************************************************************** | ||
| 68 | * | ||
| 69 | * Temperature Sensor | ||
| 70 | * | ||
| 71 | **************************************************************************/ | ||
| 72 | #define MAX6647 0x4e | ||
| 73 | |||
| 74 | #define RLTS 0x00 | ||
| 75 | #define RLTE 0x01 | ||
| 76 | #define RSL 0x02 | ||
| 77 | #define RCL 0x03 | ||
| 78 | #define RCRA 0x04 | ||
| 79 | #define RLHN 0x05 | ||
| 80 | #define RLLI 0x06 | ||
| 81 | #define RRHI 0x07 | ||
| 82 | #define RRLS 0x08 | ||
| 83 | #define WCRW 0x0a | ||
| 84 | #define WLHO 0x0b | ||
| 85 | #define WRHA 0x0c | ||
| 86 | #define WRLN 0x0e | ||
| 87 | #define OSHT 0x0f | ||
| 88 | #define REET 0x10 | ||
| 89 | #define RIET 0x11 | ||
| 90 | #define RWOE 0x19 | ||
| 91 | #define RWOI 0x20 | ||
| 92 | #define HYS 0x21 | ||
| 93 | #define QUEUE 0x22 | ||
| 94 | #define MFID 0xfe | ||
| 95 | #define REVID 0xff | ||
| 96 | |||
| 97 | /* Status bits */ | ||
| 98 | #define MAX6647_BUSY (1 << 7) /* ADC is converting */ | ||
| 99 | #define MAX6647_LHIGH (1 << 6) /* Local high temp. alarm */ | ||
| 100 | #define MAX6647_LLOW (1 << 5) /* Local low temp. alarm */ | ||
| 101 | #define MAX6647_RHIGH (1 << 4) /* Remote high temp. alarm */ | ||
| 102 | #define MAX6647_RLOW (1 << 3) /* Remote low temp. alarm */ | ||
| 103 | #define MAX6647_FAULT (1 << 2) /* DXN/DXP short/open circuit */ | ||
| 104 | #define MAX6647_EOT (1 << 1) /* Remote junction overtemp. */ | ||
| 105 | #define MAX6647_IOT (1 << 0) /* Local junction overtemp. */ | ||
| 106 | |||
| 107 | static const u8 xgphy_max_temperature = 90; | ||
| 108 | |||
| 109 | void sfe4001_poweroff(struct efx_nic *efx) | ||
| 110 | { | ||
| 111 | struct efx_i2c_interface *i2c = &efx->i2c; | ||
| 112 | |||
| 113 | u8 cfg, out, in; | ||
| 114 | |||
| 115 | EFX_INFO(efx, "%s\n", __func__); | ||
| 116 | |||
| 117 | /* Turn off all power rails */ | ||
| 118 | out = 0xff; | ||
| 119 | (void) efx_i2c_write(i2c, PCA9539, P0_OUT, &out, 1); | ||
| 120 | |||
| 121 | /* Disable port 1 outputs on IO expander */ | ||
| 122 | cfg = 0xff; | ||
| 123 | (void) efx_i2c_write(i2c, PCA9539, P1_CONFIG, &cfg, 1); | ||
| 124 | |||
| 125 | /* Disable port 0 outputs on IO expander */ | ||
| 126 | cfg = 0xff; | ||
| 127 | (void) efx_i2c_write(i2c, PCA9539, P0_CONFIG, &cfg, 1); | ||
| 128 | |||
| 129 | /* Clear any over-temperature alert */ | ||
| 130 | (void) efx_i2c_read(i2c, MAX6647, RSL, &in, 1); | ||
| 131 | } | ||
| 132 | |||
| 133 | /* This board uses an I2C expander to provider power to the PHY, which needs to | ||
| 134 | * be turned on before the PHY can be used. | ||
| 135 | * Context: Process context, rtnl lock held | ||
| 136 | */ | ||
| 137 | int sfe4001_poweron(struct efx_nic *efx) | ||
| 138 | { | ||
| 139 | struct efx_i2c_interface *i2c = &efx->i2c; | ||
| 140 | unsigned int count; | ||
| 141 | int rc; | ||
| 142 | u8 out, in, cfg; | ||
| 143 | efx_dword_t reg; | ||
| 144 | |||
| 145 | /* 10Xpress has fixed-function LED pins, so there is no board-specific | ||
| 146 | * blink code. */ | ||
| 147 | efx->board_info.blink = tenxpress_phy_blink; | ||
| 148 | |||
| 149 | /* Ensure that XGXS and XAUI SerDes are held in reset */ | ||
| 150 | EFX_POPULATE_DWORD_7(reg, XX_PWRDNA_EN, 1, | ||
| 151 | XX_PWRDNB_EN, 1, | ||
| 152 | XX_RSTPLLAB_EN, 1, | ||
| 153 | XX_RESETA_EN, 1, | ||
| 154 | XX_RESETB_EN, 1, | ||
| 155 | XX_RSTXGXSRX_EN, 1, | ||
| 156 | XX_RSTXGXSTX_EN, 1); | ||
| 157 | falcon_xmac_writel(efx, ®, XX_PWR_RST_REG_MAC); | ||
| 158 | udelay(10); | ||
| 159 | |||
| 160 | /* Set DSP over-temperature alert threshold */ | ||
| 161 | EFX_INFO(efx, "DSP cut-out at %dC\n", xgphy_max_temperature); | ||
| 162 | rc = efx_i2c_write(i2c, MAX6647, WLHO, | ||
| 163 | &xgphy_max_temperature, 1); | ||
| 164 | if (rc) | ||
| 165 | goto fail1; | ||
| 166 | |||
| 167 | /* Read it back and verify */ | ||
| 168 | rc = efx_i2c_read(i2c, MAX6647, RLHN, &in, 1); | ||
| 169 | if (rc) | ||
| 170 | goto fail1; | ||
| 171 | if (in != xgphy_max_temperature) { | ||
| 172 | rc = -EFAULT; | ||
| 173 | goto fail1; | ||
| 174 | } | ||
| 175 | |||
| 176 | /* Clear any previous over-temperature alert */ | ||
| 177 | rc = efx_i2c_read(i2c, MAX6647, RSL, &in, 1); | ||
| 178 | if (rc) | ||
| 179 | goto fail1; | ||
| 180 | |||
| 181 | /* Enable port 0 and port 1 outputs on IO expander */ | ||
| 182 | cfg = 0x00; | ||
| 183 | rc = efx_i2c_write(i2c, PCA9539, P0_CONFIG, &cfg, 1); | ||
| 184 | if (rc) | ||
| 185 | goto fail1; | ||
| 186 | cfg = 0xff & ~(1 << P1_SPARE_LBN); | ||
| 187 | rc = efx_i2c_write(i2c, PCA9539, P1_CONFIG, &cfg, 1); | ||
| 188 | if (rc) | ||
| 189 | goto fail2; | ||
| 190 | |||
| 191 | /* Turn all power off then wait 1 sec. This ensures PHY is reset */ | ||
| 192 | out = 0xff & ~((0 << P0_EN_1V2_LBN) | (0 << P0_EN_2V5_LBN) | | ||
| 193 | (0 << P0_EN_3V3X_LBN) | (0 << P0_EN_5V_LBN) | | ||
| 194 | (0 << P0_EN_1V0X_LBN)); | ||
| 195 | rc = efx_i2c_write(i2c, PCA9539, P0_OUT, &out, 1); | ||
| 196 | if (rc) | ||
| 197 | goto fail3; | ||
| 198 | |||
| 199 | schedule_timeout_uninterruptible(HZ); | ||
| 200 | count = 0; | ||
| 201 | do { | ||
| 202 | /* Turn on 1.2V, 2.5V, 3.3V and 5V power rails */ | ||
| 203 | out = 0xff & ~((1 << P0_EN_1V2_LBN) | (1 << P0_EN_2V5_LBN) | | ||
| 204 | (1 << P0_EN_3V3X_LBN) | (1 << P0_EN_5V_LBN) | | ||
| 205 | (1 << P0_X_TRST_LBN)); | ||
| 206 | |||
| 207 | rc = efx_i2c_write(i2c, PCA9539, P0_OUT, &out, 1); | ||
| 208 | if (rc) | ||
| 209 | goto fail3; | ||
| 210 | msleep(10); | ||
| 211 | |||
| 212 | /* Turn on 1V power rail */ | ||
| 213 | out &= ~(1 << P0_EN_1V0X_LBN); | ||
| 214 | rc = efx_i2c_write(i2c, PCA9539, P0_OUT, &out, 1); | ||
| 215 | if (rc) | ||
| 216 | goto fail3; | ||
| 217 | |||
| 218 | EFX_INFO(efx, "waiting for power (attempt %d)...\n", count); | ||
| 219 | |||
| 220 | schedule_timeout_uninterruptible(HZ); | ||
| 221 | |||
| 222 | /* Check DSP is powered */ | ||
| 223 | rc = efx_i2c_read(i2c, PCA9539, P1_IN, &in, 1); | ||
| 224 | if (rc) | ||
| 225 | goto fail3; | ||
| 226 | if (in & (1 << P1_AFE_PWD_LBN)) | ||
| 227 | goto done; | ||
| 228 | |||
| 229 | } while (++count < 20); | ||
| 230 | |||
| 231 | EFX_INFO(efx, "timed out waiting for power\n"); | ||
| 232 | rc = -ETIMEDOUT; | ||
| 233 | goto fail3; | ||
| 234 | |||
| 235 | done: | ||
| 236 | EFX_INFO(efx, "PHY is powered on\n"); | ||
| 237 | return 0; | ||
| 238 | |||
| 239 | fail3: | ||
| 240 | /* Turn off all power rails */ | ||
| 241 | out = 0xff; | ||
| 242 | (void) efx_i2c_write(i2c, PCA9539, P0_OUT, &out, 1); | ||
| 243 | /* Disable port 1 outputs on IO expander */ | ||
| 244 | out = 0xff; | ||
| 245 | (void) efx_i2c_write(i2c, PCA9539, P1_CONFIG, &out, 1); | ||
| 246 | fail2: | ||
| 247 | /* Disable port 0 outputs on IO expander */ | ||
| 248 | out = 0xff; | ||
| 249 | (void) efx_i2c_write(i2c, PCA9539, P0_CONFIG, &out, 1); | ||
| 250 | fail1: | ||
| 251 | return rc; | ||
| 252 | } | ||
diff --git a/drivers/net/sfc/spi.h b/drivers/net/sfc/spi.h new file mode 100644 index 000000000000..34412f3d41c9 --- /dev/null +++ b/drivers/net/sfc/spi.h | |||
| @@ -0,0 +1,71 @@ | |||
| 1 | /**************************************************************************** | ||
| 2 | * Driver for Solarflare Solarstorm network controllers and boards | ||
| 3 | * Copyright 2005 Fen Systems Ltd. | ||
| 4 | * Copyright 2006 Solarflare Communications Inc. | ||
| 5 | * | ||
| 6 | * This program is free software; you can redistribute it and/or modify it | ||
| 7 | * under the terms of the GNU General Public License version 2 as published | ||
| 8 | * by the Free Software Foundation, incorporated herein by reference. | ||
| 9 | */ | ||
| 10 | |||
| 11 | #ifndef EFX_SPI_H | ||
| 12 | #define EFX_SPI_H | ||
| 13 | |||
| 14 | #include "net_driver.h" | ||
| 15 | |||
| 16 | /************************************************************************** | ||
| 17 | * | ||
| 18 | * Basic SPI command set and bit definitions | ||
| 19 | * | ||
| 20 | *************************************************************************/ | ||
| 21 | |||
| 22 | /* | ||
| 23 | * Commands common to all known devices. | ||
| 24 | * | ||
| 25 | */ | ||
| 26 | |||
| 27 | /* Write status register */ | ||
| 28 | #define SPI_WRSR 0x01 | ||
| 29 | |||
| 30 | /* Write data to memory array */ | ||
| 31 | #define SPI_WRITE 0x02 | ||
| 32 | |||
| 33 | /* Read data from memory array */ | ||
| 34 | #define SPI_READ 0x03 | ||
| 35 | |||
| 36 | /* Reset write enable latch */ | ||
| 37 | #define SPI_WRDI 0x04 | ||
| 38 | |||
| 39 | /* Read status register */ | ||
| 40 | #define SPI_RDSR 0x05 | ||
| 41 | |||
| 42 | /* Set write enable latch */ | ||
| 43 | #define SPI_WREN 0x06 | ||
| 44 | |||
| 45 | /* SST: Enable write to status register */ | ||
| 46 | #define SPI_SST_EWSR 0x50 | ||
| 47 | |||
| 48 | /* | ||
| 49 | * Status register bits. Not all bits are supported on all devices. | ||
| 50 | * | ||
| 51 | */ | ||
| 52 | |||
| 53 | /* Write-protect pin enabled */ | ||
| 54 | #define SPI_STATUS_WPEN 0x80 | ||
| 55 | |||
| 56 | /* Block protection bit 2 */ | ||
| 57 | #define SPI_STATUS_BP2 0x10 | ||
| 58 | |||
| 59 | /* Block protection bit 1 */ | ||
| 60 | #define SPI_STATUS_BP1 0x08 | ||
| 61 | |||
| 62 | /* Block protection bit 0 */ | ||
| 63 | #define SPI_STATUS_BP0 0x04 | ||
| 64 | |||
| 65 | /* State of the write enable latch */ | ||
| 66 | #define SPI_STATUS_WEN 0x02 | ||
| 67 | |||
| 68 | /* Device busy flag */ | ||
| 69 | #define SPI_STATUS_NRDY 0x01 | ||
| 70 | |||
| 71 | #endif /* EFX_SPI_H */ | ||
diff --git a/drivers/net/sfc/tenxpress.c b/drivers/net/sfc/tenxpress.c new file mode 100644 index 000000000000..a2e9f79e47b1 --- /dev/null +++ b/drivers/net/sfc/tenxpress.c | |||
| @@ -0,0 +1,434 @@ | |||
| 1 | /**************************************************************************** | ||
| 2 | * Driver for Solarflare 802.3an compliant PHY | ||
| 3 | * Copyright 2007 Solarflare Communications Inc. | ||
| 4 | * | ||
| 5 | * This program is free software; you can redistribute it and/or modify it | ||
| 6 | * under the terms of the GNU General Public License version 2 as published | ||
| 7 | * by the Free Software Foundation, incorporated herein by reference. | ||
| 8 | */ | ||
| 9 | |||
| 10 | #include <linux/delay.h> | ||
| 11 | #include <linux/seq_file.h> | ||
| 12 | #include "efx.h" | ||
| 13 | #include "gmii.h" | ||
| 14 | #include "mdio_10g.h" | ||
| 15 | #include "falcon.h" | ||
| 16 | #include "phy.h" | ||
| 17 | #include "falcon_hwdefs.h" | ||
| 18 | #include "boards.h" | ||
| 19 | #include "mac.h" | ||
| 20 | |||
| 21 | /* We expect these MMDs to be in the package */ | ||
| 22 | /* AN not here as mdio_check_mmds() requires STAT2 support */ | ||
| 23 | #define TENXPRESS_REQUIRED_DEVS (MDIO_MMDREG_DEVS0_PMAPMD | \ | ||
| 24 | MDIO_MMDREG_DEVS0_PCS | \ | ||
| 25 | MDIO_MMDREG_DEVS0_PHYXS) | ||
| 26 | |||
| 27 | /* We complain if we fail to see the link partner as 10G capable this many | ||
| 28 | * times in a row (must be > 1 as sampling the autoneg. registers is racy) | ||
| 29 | */ | ||
| 30 | #define MAX_BAD_LP_TRIES (5) | ||
| 31 | |||
| 32 | /* Extended control register */ | ||
| 33 | #define PMA_PMD_XCONTROL_REG 0xc000 | ||
| 34 | #define PMA_PMD_LNPGA_POWERDOWN_LBN 8 | ||
| 35 | #define PMA_PMD_LNPGA_POWERDOWN_WIDTH 1 | ||
| 36 | |||
| 37 | /* extended status register */ | ||
| 38 | #define PMA_PMD_XSTATUS_REG 0xc001 | ||
| 39 | #define PMA_PMD_XSTAT_FLP_LBN (12) | ||
| 40 | |||
| 41 | /* LED control register */ | ||
| 42 | #define PMA_PMD_LED_CTRL_REG (0xc007) | ||
| 43 | #define PMA_PMA_LED_ACTIVITY_LBN (3) | ||
| 44 | |||
| 45 | /* LED function override register */ | ||
| 46 | #define PMA_PMD_LED_OVERR_REG (0xc009) | ||
| 47 | /* Bit positions for different LEDs (there are more but not wired on SFE4001)*/ | ||
| 48 | #define PMA_PMD_LED_LINK_LBN (0) | ||
| 49 | #define PMA_PMD_LED_SPEED_LBN (2) | ||
| 50 | #define PMA_PMD_LED_TX_LBN (4) | ||
| 51 | #define PMA_PMD_LED_RX_LBN (6) | ||
| 52 | /* Override settings */ | ||
| 53 | #define PMA_PMD_LED_AUTO (0) /* H/W control */ | ||
| 54 | #define PMA_PMD_LED_ON (1) | ||
| 55 | #define PMA_PMD_LED_OFF (2) | ||
| 56 | #define PMA_PMD_LED_FLASH (3) | ||
| 57 | /* All LEDs under hardware control */ | ||
| 58 | #define PMA_PMD_LED_FULL_AUTO (0) | ||
| 59 | /* Green and Amber under hardware control, Red off */ | ||
| 60 | #define PMA_PMD_LED_DEFAULT (PMA_PMD_LED_OFF << PMA_PMD_LED_RX_LBN) | ||
| 61 | |||
| 62 | |||
| 63 | /* Self test (BIST) control register */ | ||
| 64 | #define PMA_PMD_BIST_CTRL_REG (0xc014) | ||
| 65 | #define PMA_PMD_BIST_BER_LBN (2) /* Run BER test */ | ||
| 66 | #define PMA_PMD_BIST_CONT_LBN (1) /* Run continuous BIST until cleared */ | ||
| 67 | #define PMA_PMD_BIST_SINGLE_LBN (0) /* Run 1 BIST iteration (self clears) */ | ||
| 68 | /* Self test status register */ | ||
| 69 | #define PMA_PMD_BIST_STAT_REG (0xc015) | ||
| 70 | #define PMA_PMD_BIST_ENX_LBN (3) | ||
| 71 | #define PMA_PMD_BIST_PMA_LBN (2) | ||
| 72 | #define PMA_PMD_BIST_RXD_LBN (1) | ||
| 73 | #define PMA_PMD_BIST_AFE_LBN (0) | ||
| 74 | |||
| 75 | #define BIST_MAX_DELAY (1000) | ||
| 76 | #define BIST_POLL_DELAY (10) | ||
| 77 | |||
| 78 | /* Misc register defines */ | ||
| 79 | #define PCS_CLOCK_CTRL_REG 0xd801 | ||
| 80 | #define PLL312_RST_N_LBN 2 | ||
| 81 | |||
| 82 | #define PCS_SOFT_RST2_REG 0xd806 | ||
| 83 | #define SERDES_RST_N_LBN 13 | ||
| 84 | #define XGXS_RST_N_LBN 12 | ||
| 85 | |||
| 86 | #define PCS_TEST_SELECT_REG 0xd807 /* PRM 10.5.8 */ | ||
| 87 | #define CLK312_EN_LBN 3 | ||
| 88 | |||
| 89 | /* Boot status register */ | ||
| 90 | #define PCS_BOOT_STATUS_REG (0xd000) | ||
| 91 | #define PCS_BOOT_FATAL_ERR_LBN (0) | ||
| 92 | #define PCS_BOOT_PROGRESS_LBN (1) | ||
| 93 | #define PCS_BOOT_PROGRESS_WIDTH (2) | ||
| 94 | #define PCS_BOOT_COMPLETE_LBN (3) | ||
| 95 | #define PCS_BOOT_MAX_DELAY (100) | ||
| 96 | #define PCS_BOOT_POLL_DELAY (10) | ||
| 97 | |||
| 98 | /* Time to wait between powering down the LNPGA and turning off the power | ||
| 99 | * rails */ | ||
| 100 | #define LNPGA_PDOWN_WAIT (HZ / 5) | ||
| 101 | |||
| 102 | static int crc_error_reset_threshold = 100; | ||
| 103 | module_param(crc_error_reset_threshold, int, 0644); | ||
| 104 | MODULE_PARM_DESC(crc_error_reset_threshold, | ||
| 105 | "Max number of CRC errors before XAUI reset"); | ||
| 106 | |||
| 107 | struct tenxpress_phy_data { | ||
| 108 | enum tenxpress_state state; | ||
| 109 | atomic_t bad_crc_count; | ||
| 110 | int bad_lp_tries; | ||
| 111 | }; | ||
| 112 | |||
| 113 | static int tenxpress_state_is(struct efx_nic *efx, int state) | ||
| 114 | { | ||
| 115 | struct tenxpress_phy_data *phy_data = efx->phy_data; | ||
| 116 | return (phy_data != NULL) && (state == phy_data->state); | ||
| 117 | } | ||
| 118 | |||
| 119 | void tenxpress_set_state(struct efx_nic *efx, | ||
| 120 | enum tenxpress_state state) | ||
| 121 | { | ||
| 122 | struct tenxpress_phy_data *phy_data = efx->phy_data; | ||
| 123 | if (phy_data != NULL) | ||
| 124 | phy_data->state = state; | ||
| 125 | } | ||
| 126 | |||
| 127 | void tenxpress_crc_err(struct efx_nic *efx) | ||
| 128 | { | ||
| 129 | struct tenxpress_phy_data *phy_data = efx->phy_data; | ||
| 130 | if (phy_data != NULL) | ||
| 131 | atomic_inc(&phy_data->bad_crc_count); | ||
| 132 | } | ||
| 133 | |||
| 134 | /* Check that the C166 has booted successfully */ | ||
| 135 | static int tenxpress_phy_check(struct efx_nic *efx) | ||
| 136 | { | ||
| 137 | int phy_id = efx->mii.phy_id; | ||
| 138 | int count = PCS_BOOT_MAX_DELAY / PCS_BOOT_POLL_DELAY; | ||
| 139 | int boot_stat; | ||
| 140 | |||
| 141 | /* Wait for the boot to complete (or not) */ | ||
| 142 | while (count) { | ||
| 143 | boot_stat = mdio_clause45_read(efx, phy_id, | ||
| 144 | MDIO_MMD_PCS, | ||
| 145 | PCS_BOOT_STATUS_REG); | ||
| 146 | if (boot_stat & (1 << PCS_BOOT_COMPLETE_LBN)) | ||
| 147 | break; | ||
| 148 | count--; | ||
| 149 | udelay(PCS_BOOT_POLL_DELAY); | ||
| 150 | } | ||
| 151 | |||
| 152 | if (!count) { | ||
| 153 | EFX_ERR(efx, "%s: PHY boot timed out. Last status " | ||
| 154 | "%x\n", __func__, | ||
| 155 | (boot_stat >> PCS_BOOT_PROGRESS_LBN) & | ||
| 156 | ((1 << PCS_BOOT_PROGRESS_WIDTH) - 1)); | ||
| 157 | return -ETIMEDOUT; | ||
| 158 | } | ||
| 159 | |||
| 160 | return 0; | ||
| 161 | } | ||
| 162 | |||
| 163 | static void tenxpress_reset_xaui(struct efx_nic *efx); | ||
| 164 | |||
| 165 | static int tenxpress_init(struct efx_nic *efx) | ||
| 166 | { | ||
| 167 | int rc, reg; | ||
| 168 | |||
| 169 | /* Turn on the clock */ | ||
| 170 | reg = (1 << CLK312_EN_LBN); | ||
| 171 | mdio_clause45_write(efx, efx->mii.phy_id, | ||
| 172 | MDIO_MMD_PCS, PCS_TEST_SELECT_REG, reg); | ||
| 173 | |||
| 174 | rc = tenxpress_phy_check(efx); | ||
| 175 | if (rc < 0) | ||
| 176 | return rc; | ||
| 177 | |||
| 178 | /* Set the LEDs up as: Green = Link, Amber = Link/Act, Red = Off */ | ||
| 179 | reg = mdio_clause45_read(efx, efx->mii.phy_id, | ||
| 180 | MDIO_MMD_PMAPMD, PMA_PMD_LED_CTRL_REG); | ||
| 181 | reg |= (1 << PMA_PMA_LED_ACTIVITY_LBN); | ||
| 182 | mdio_clause45_write(efx, efx->mii.phy_id, MDIO_MMD_PMAPMD, | ||
| 183 | PMA_PMD_LED_CTRL_REG, reg); | ||
| 184 | |||
| 185 | reg = PMA_PMD_LED_DEFAULT; | ||
| 186 | mdio_clause45_write(efx, efx->mii.phy_id, MDIO_MMD_PMAPMD, | ||
| 187 | PMA_PMD_LED_OVERR_REG, reg); | ||
| 188 | |||
| 189 | return rc; | ||
| 190 | } | ||
| 191 | |||
| 192 | static int tenxpress_phy_init(struct efx_nic *efx) | ||
| 193 | { | ||
| 194 | struct tenxpress_phy_data *phy_data; | ||
| 195 | int rc = 0; | ||
| 196 | |||
| 197 | phy_data = kzalloc(sizeof(*phy_data), GFP_KERNEL); | ||
| 198 | efx->phy_data = phy_data; | ||
| 199 | |||
| 200 | tenxpress_set_state(efx, TENXPRESS_STATUS_NORMAL); | ||
| 201 | |||
| 202 | rc = mdio_clause45_wait_reset_mmds(efx, | ||
| 203 | TENXPRESS_REQUIRED_DEVS); | ||
| 204 | if (rc < 0) | ||
| 205 | goto fail; | ||
| 206 | |||
| 207 | rc = mdio_clause45_check_mmds(efx, TENXPRESS_REQUIRED_DEVS, 0); | ||
| 208 | if (rc < 0) | ||
| 209 | goto fail; | ||
| 210 | |||
| 211 | rc = tenxpress_init(efx); | ||
| 212 | if (rc < 0) | ||
| 213 | goto fail; | ||
| 214 | |||
| 215 | schedule_timeout_uninterruptible(HZ / 5); /* 200ms */ | ||
| 216 | |||
| 217 | /* Let XGXS and SerDes out of reset and resets 10XPress */ | ||
| 218 | falcon_reset_xaui(efx); | ||
| 219 | |||
| 220 | return 0; | ||
| 221 | |||
| 222 | fail: | ||
| 223 | kfree(efx->phy_data); | ||
| 224 | efx->phy_data = NULL; | ||
| 225 | return rc; | ||
| 226 | } | ||
| 227 | |||
| 228 | static void tenxpress_set_bad_lp(struct efx_nic *efx, int bad_lp) | ||
| 229 | { | ||
| 230 | struct tenxpress_phy_data *pd = efx->phy_data; | ||
| 231 | int reg; | ||
| 232 | |||
| 233 | /* Nothing to do if all is well and was previously so. */ | ||
| 234 | if (!(bad_lp || pd->bad_lp_tries)) | ||
| 235 | return; | ||
| 236 | |||
| 237 | reg = mdio_clause45_read(efx, efx->mii.phy_id, | ||
| 238 | MDIO_MMD_PMAPMD, PMA_PMD_LED_OVERR_REG); | ||
| 239 | |||
| 240 | if (bad_lp) | ||
| 241 | pd->bad_lp_tries++; | ||
| 242 | else | ||
| 243 | pd->bad_lp_tries = 0; | ||
| 244 | |||
| 245 | if (pd->bad_lp_tries == MAX_BAD_LP_TRIES) { | ||
| 246 | pd->bad_lp_tries = 0; /* Restart count */ | ||
| 247 | reg &= ~(PMA_PMD_LED_FLASH << PMA_PMD_LED_RX_LBN); | ||
| 248 | reg |= (PMA_PMD_LED_FLASH << PMA_PMD_LED_RX_LBN); | ||
| 249 | EFX_ERR(efx, "This NIC appears to be plugged into" | ||
| 250 | " a port that is not 10GBASE-T capable.\n" | ||
| 251 | " This PHY is 10GBASE-T ONLY, so no link can" | ||
| 252 | " be established.\n"); | ||
| 253 | } else { | ||
| 254 | reg |= (PMA_PMD_LED_OFF << PMA_PMD_LED_RX_LBN); | ||
| 255 | } | ||
| 256 | mdio_clause45_write(efx, efx->mii.phy_id, MDIO_MMD_PMAPMD, | ||
| 257 | PMA_PMD_LED_OVERR_REG, reg); | ||
| 258 | } | ||
| 259 | |||
| 260 | /* Check link status and return a boolean OK value. If the link is NOT | ||
| 261 | * OK we have a quick rummage round to see if we appear to be plugged | ||
| 262 | * into a non-10GBT port and if so warn the user that they won't get | ||
| 263 | * link any time soon as we are 10GBT only, unless caller specified | ||
| 264 | * not to do this check (it isn't useful in loopback) */ | ||
| 265 | static int tenxpress_link_ok(struct efx_nic *efx, int check_lp) | ||
| 266 | { | ||
| 267 | int ok = mdio_clause45_links_ok(efx, TENXPRESS_REQUIRED_DEVS); | ||
| 268 | |||
| 269 | if (ok) { | ||
| 270 | tenxpress_set_bad_lp(efx, 0); | ||
| 271 | } else if (check_lp) { | ||
| 272 | /* Are we plugged into the wrong sort of link? */ | ||
| 273 | int bad_lp = 0; | ||
| 274 | int phy_id = efx->mii.phy_id; | ||
| 275 | int an_stat = mdio_clause45_read(efx, phy_id, MDIO_MMD_AN, | ||
| 276 | MDIO_AN_STATUS); | ||
| 277 | int xphy_stat = mdio_clause45_read(efx, phy_id, | ||
| 278 | MDIO_MMD_PMAPMD, | ||
| 279 | PMA_PMD_XSTATUS_REG); | ||
| 280 | /* Are we plugged into anything that sends FLPs? If | ||
| 281 | * not we can't distinguish between not being plugged | ||
| 282 | * in and being plugged into a non-AN antique. The FLP | ||
| 283 | * bit has the advantage of not clearing when autoneg | ||
| 284 | * restarts. */ | ||
| 285 | if (!(xphy_stat & (1 << PMA_PMD_XSTAT_FLP_LBN))) { | ||
| 286 | tenxpress_set_bad_lp(efx, 0); | ||
| 287 | return ok; | ||
| 288 | } | ||
| 289 | |||
| 290 | /* If it can do 10GBT it must be XNP capable */ | ||
| 291 | bad_lp = !(an_stat & (1 << MDIO_AN_STATUS_XNP_LBN)); | ||
| 292 | if (!bad_lp && (an_stat & (1 << MDIO_AN_STATUS_PAGE_LBN))) { | ||
| 293 | bad_lp = !(mdio_clause45_read(efx, phy_id, | ||
| 294 | MDIO_MMD_AN, MDIO_AN_10GBT_STATUS) & | ||
| 295 | (1 << MDIO_AN_10GBT_STATUS_LP_10G_LBN)); | ||
| 296 | } | ||
| 297 | tenxpress_set_bad_lp(efx, bad_lp); | ||
| 298 | } | ||
| 299 | return ok; | ||
| 300 | } | ||
| 301 | |||
| 302 | static void tenxpress_phy_reconfigure(struct efx_nic *efx) | ||
| 303 | { | ||
| 304 | if (!tenxpress_state_is(efx, TENXPRESS_STATUS_NORMAL)) | ||
| 305 | return; | ||
| 306 | |||
| 307 | efx->link_up = tenxpress_link_ok(efx, 0); | ||
| 308 | efx->link_options = GM_LPA_10000FULL; | ||
| 309 | } | ||
| 310 | |||
| 311 | static void tenxpress_phy_clear_interrupt(struct efx_nic *efx) | ||
| 312 | { | ||
| 313 | /* Nothing done here - LASI interrupts aren't reliable so poll */ | ||
| 314 | } | ||
| 315 | |||
| 316 | |||
| 317 | /* Poll PHY for interrupt */ | ||
| 318 | static int tenxpress_phy_check_hw(struct efx_nic *efx) | ||
| 319 | { | ||
| 320 | struct tenxpress_phy_data *phy_data = efx->phy_data; | ||
| 321 | int phy_up = tenxpress_state_is(efx, TENXPRESS_STATUS_NORMAL); | ||
| 322 | int link_ok; | ||
| 323 | |||
| 324 | link_ok = phy_up && tenxpress_link_ok(efx, 1); | ||
| 325 | |||
| 326 | if (link_ok != efx->link_up) | ||
| 327 | falcon_xmac_sim_phy_event(efx); | ||
| 328 | |||
| 329 | /* Nothing to check if we've already shut down the PHY */ | ||
| 330 | if (!phy_up) | ||
| 331 | return 0; | ||
| 332 | |||
| 333 | if (atomic_read(&phy_data->bad_crc_count) > crc_error_reset_threshold) { | ||
| 334 | EFX_ERR(efx, "Resetting XAUI due to too many CRC errors\n"); | ||
| 335 | falcon_reset_xaui(efx); | ||
| 336 | atomic_set(&phy_data->bad_crc_count, 0); | ||
| 337 | } | ||
| 338 | |||
| 339 | return 0; | ||
| 340 | } | ||
| 341 | |||
| 342 | static void tenxpress_phy_fini(struct efx_nic *efx) | ||
| 343 | { | ||
| 344 | int reg; | ||
| 345 | |||
| 346 | /* Power down the LNPGA */ | ||
| 347 | reg = (1 << PMA_PMD_LNPGA_POWERDOWN_LBN); | ||
| 348 | mdio_clause45_write(efx, efx->mii.phy_id, MDIO_MMD_PMAPMD, | ||
| 349 | PMA_PMD_XCONTROL_REG, reg); | ||
| 350 | |||
| 351 | /* Waiting here ensures that the board fini, which can turn off the | ||
| 352 | * power to the PHY, won't get run until the LNPGA powerdown has been | ||
| 353 | * given long enough to complete. */ | ||
| 354 | schedule_timeout_uninterruptible(LNPGA_PDOWN_WAIT); /* 200 ms */ | ||
| 355 | |||
| 356 | kfree(efx->phy_data); | ||
| 357 | efx->phy_data = NULL; | ||
| 358 | } | ||
| 359 | |||
| 360 | |||
| 361 | /* Set the RX and TX LEDs and Link LED flashing. The other LEDs | ||
| 362 | * (which probably aren't wired anyway) are left in AUTO mode */ | ||
| 363 | void tenxpress_phy_blink(struct efx_nic *efx, int blink) | ||
| 364 | { | ||
| 365 | int reg; | ||
| 366 | |||
| 367 | if (blink) | ||
| 368 | reg = (PMA_PMD_LED_FLASH << PMA_PMD_LED_TX_LBN) | | ||
| 369 | (PMA_PMD_LED_FLASH << PMA_PMD_LED_RX_LBN) | | ||
| 370 | (PMA_PMD_LED_FLASH << PMA_PMD_LED_LINK_LBN); | ||
| 371 | else | ||
| 372 | reg = PMA_PMD_LED_DEFAULT; | ||
| 373 | |||
| 374 | mdio_clause45_write(efx, efx->mii.phy_id, MDIO_MMD_PMAPMD, | ||
| 375 | PMA_PMD_LED_OVERR_REG, reg); | ||
| 376 | } | ||
| 377 | |||
| 378 | static void tenxpress_reset_xaui(struct efx_nic *efx) | ||
| 379 | { | ||
| 380 | int phy = efx->mii.phy_id; | ||
| 381 | int clk_ctrl, test_select, soft_rst2; | ||
| 382 | |||
| 383 | /* Real work is done on clock_ctrl other resets are thought to be | ||
| 384 | * optional but make the reset more reliable | ||
| 385 | */ | ||
| 386 | |||
| 387 | /* Read */ | ||
| 388 | clk_ctrl = mdio_clause45_read(efx, phy, MDIO_MMD_PCS, | ||
| 389 | PCS_CLOCK_CTRL_REG); | ||
| 390 | test_select = mdio_clause45_read(efx, phy, MDIO_MMD_PCS, | ||
| 391 | PCS_TEST_SELECT_REG); | ||
| 392 | soft_rst2 = mdio_clause45_read(efx, phy, MDIO_MMD_PCS, | ||
| 393 | PCS_SOFT_RST2_REG); | ||
| 394 | |||
| 395 | /* Put in reset */ | ||
| 396 | test_select &= ~(1 << CLK312_EN_LBN); | ||
| 397 | mdio_clause45_write(efx, phy, MDIO_MMD_PCS, | ||
| 398 | PCS_TEST_SELECT_REG, test_select); | ||
| 399 | |||
| 400 | soft_rst2 &= ~((1 << XGXS_RST_N_LBN) | (1 << SERDES_RST_N_LBN)); | ||
| 401 | mdio_clause45_write(efx, phy, MDIO_MMD_PCS, | ||
| 402 | PCS_SOFT_RST2_REG, soft_rst2); | ||
| 403 | |||
| 404 | clk_ctrl &= ~(1 << PLL312_RST_N_LBN); | ||
| 405 | mdio_clause45_write(efx, phy, MDIO_MMD_PCS, | ||
| 406 | PCS_CLOCK_CTRL_REG, clk_ctrl); | ||
| 407 | udelay(10); | ||
| 408 | |||
| 409 | /* Remove reset */ | ||
| 410 | clk_ctrl |= (1 << PLL312_RST_N_LBN); | ||
| 411 | mdio_clause45_write(efx, phy, MDIO_MMD_PCS, | ||
| 412 | PCS_CLOCK_CTRL_REG, clk_ctrl); | ||
| 413 | udelay(10); | ||
| 414 | |||
| 415 | soft_rst2 |= ((1 << XGXS_RST_N_LBN) | (1 << SERDES_RST_N_LBN)); | ||
| 416 | mdio_clause45_write(efx, phy, MDIO_MMD_PCS, | ||
| 417 | PCS_SOFT_RST2_REG, soft_rst2); | ||
| 418 | udelay(10); | ||
| 419 | |||
| 420 | test_select |= (1 << CLK312_EN_LBN); | ||
| 421 | mdio_clause45_write(efx, phy, MDIO_MMD_PCS, | ||
| 422 | PCS_TEST_SELECT_REG, test_select); | ||
| 423 | udelay(10); | ||
| 424 | } | ||
| 425 | |||
| 426 | struct efx_phy_operations falcon_tenxpress_phy_ops = { | ||
| 427 | .init = tenxpress_phy_init, | ||
| 428 | .reconfigure = tenxpress_phy_reconfigure, | ||
| 429 | .check_hw = tenxpress_phy_check_hw, | ||
| 430 | .fini = tenxpress_phy_fini, | ||
| 431 | .clear_interrupt = tenxpress_phy_clear_interrupt, | ||
| 432 | .reset_xaui = tenxpress_reset_xaui, | ||
| 433 | .mmds = TENXPRESS_REQUIRED_DEVS, | ||
| 434 | }; | ||
diff --git a/drivers/net/sfc/tx.c b/drivers/net/sfc/tx.c new file mode 100644 index 000000000000..fbb866b2185e --- /dev/null +++ b/drivers/net/sfc/tx.c | |||
| @@ -0,0 +1,452 @@ | |||
| 1 | /**************************************************************************** | ||
| 2 | * Driver for Solarflare Solarstorm network controllers and boards | ||
| 3 | * Copyright 2005-2006 Fen Systems Ltd. | ||
| 4 | * Copyright 2005-2008 Solarflare Communications Inc. | ||
| 5 | * | ||
| 6 | * This program is free software; you can redistribute it and/or modify it | ||
| 7 | * under the terms of the GNU General Public License version 2 as published | ||
| 8 | * by the Free Software Foundation, incorporated herein by reference. | ||
| 9 | */ | ||
| 10 | |||
| 11 | #include <linux/pci.h> | ||
| 12 | #include <linux/tcp.h> | ||
| 13 | #include <linux/ip.h> | ||
| 14 | #include <linux/in.h> | ||
| 15 | #include <linux/if_ether.h> | ||
| 16 | #include <linux/highmem.h> | ||
| 17 | #include "net_driver.h" | ||
| 18 | #include "tx.h" | ||
| 19 | #include "efx.h" | ||
| 20 | #include "falcon.h" | ||
| 21 | #include "workarounds.h" | ||
| 22 | |||
| 23 | /* | ||
| 24 | * TX descriptor ring full threshold | ||
| 25 | * | ||
| 26 | * The tx_queue descriptor ring fill-level must fall below this value | ||
| 27 | * before we restart the netif queue | ||
| 28 | */ | ||
| 29 | #define EFX_NETDEV_TX_THRESHOLD(_tx_queue) \ | ||
| 30 | (_tx_queue->efx->type->txd_ring_mask / 2u) | ||
| 31 | |||
| 32 | /* We want to be able to nest calls to netif_stop_queue(), since each | ||
| 33 | * channel can have an individual stop on the queue. | ||
| 34 | */ | ||
| 35 | void efx_stop_queue(struct efx_nic *efx) | ||
| 36 | { | ||
| 37 | spin_lock_bh(&efx->netif_stop_lock); | ||
| 38 | EFX_TRACE(efx, "stop TX queue\n"); | ||
| 39 | |||
| 40 | atomic_inc(&efx->netif_stop_count); | ||
| 41 | netif_stop_queue(efx->net_dev); | ||
| 42 | |||
| 43 | spin_unlock_bh(&efx->netif_stop_lock); | ||
| 44 | } | ||
| 45 | |||
| 46 | /* Wake netif's TX queue | ||
| 47 | * We want to be able to nest calls to netif_stop_queue(), since each | ||
| 48 | * channel can have an individual stop on the queue. | ||
| 49 | */ | ||
| 50 | inline void efx_wake_queue(struct efx_nic *efx) | ||
| 51 | { | ||
| 52 | local_bh_disable(); | ||
| 53 | if (atomic_dec_and_lock(&efx->netif_stop_count, | ||
| 54 | &efx->netif_stop_lock)) { | ||
| 55 | EFX_TRACE(efx, "waking TX queue\n"); | ||
| 56 | netif_wake_queue(efx->net_dev); | ||
| 57 | spin_unlock(&efx->netif_stop_lock); | ||
| 58 | } | ||
| 59 | local_bh_enable(); | ||
| 60 | } | ||
| 61 | |||
| 62 | static inline void efx_dequeue_buffer(struct efx_tx_queue *tx_queue, | ||
| 63 | struct efx_tx_buffer *buffer) | ||
| 64 | { | ||
| 65 | if (buffer->unmap_len) { | ||
| 66 | struct pci_dev *pci_dev = tx_queue->efx->pci_dev; | ||
| 67 | if (buffer->unmap_single) | ||
| 68 | pci_unmap_single(pci_dev, buffer->unmap_addr, | ||
| 69 | buffer->unmap_len, PCI_DMA_TODEVICE); | ||
| 70 | else | ||
| 71 | pci_unmap_page(pci_dev, buffer->unmap_addr, | ||
| 72 | buffer->unmap_len, PCI_DMA_TODEVICE); | ||
| 73 | buffer->unmap_len = 0; | ||
| 74 | buffer->unmap_single = 0; | ||
| 75 | } | ||
| 76 | |||
| 77 | if (buffer->skb) { | ||
| 78 | dev_kfree_skb_any((struct sk_buff *) buffer->skb); | ||
| 79 | buffer->skb = NULL; | ||
| 80 | EFX_TRACE(tx_queue->efx, "TX queue %d transmission id %x " | ||
| 81 | "complete\n", tx_queue->queue, read_ptr); | ||
| 82 | } | ||
| 83 | } | ||
| 84 | |||
| 85 | |||
| 86 | /* | ||
| 87 | * Add a socket buffer to a TX queue | ||
| 88 | * | ||
| 89 | * This maps all fragments of a socket buffer for DMA and adds them to | ||
| 90 | * the TX queue. The queue's insert pointer will be incremented by | ||
| 91 | * the number of fragments in the socket buffer. | ||
| 92 | * | ||
| 93 | * If any DMA mapping fails, any mapped fragments will be unmapped, | ||
| 94 | * the queue's insert pointer will be restored to its original value. | ||
| 95 | * | ||
| 96 | * Returns NETDEV_TX_OK or NETDEV_TX_BUSY | ||
| 97 | * You must hold netif_tx_lock() to call this function. | ||
| 98 | */ | ||
| 99 | static inline int efx_enqueue_skb(struct efx_tx_queue *tx_queue, | ||
| 100 | const struct sk_buff *skb) | ||
| 101 | { | ||
| 102 | struct efx_nic *efx = tx_queue->efx; | ||
| 103 | struct pci_dev *pci_dev = efx->pci_dev; | ||
| 104 | struct efx_tx_buffer *buffer; | ||
| 105 | skb_frag_t *fragment; | ||
| 106 | struct page *page; | ||
| 107 | int page_offset; | ||
| 108 | unsigned int len, unmap_len = 0, fill_level, insert_ptr, misalign; | ||
| 109 | dma_addr_t dma_addr, unmap_addr = 0; | ||
| 110 | unsigned int dma_len; | ||
| 111 | unsigned unmap_single; | ||
| 112 | int q_space, i = 0; | ||
| 113 | int rc = NETDEV_TX_OK; | ||
| 114 | |||
| 115 | EFX_BUG_ON_PARANOID(tx_queue->write_count != tx_queue->insert_count); | ||
| 116 | |||
| 117 | /* Get size of the initial fragment */ | ||
| 118 | len = skb_headlen(skb); | ||
| 119 | |||
| 120 | fill_level = tx_queue->insert_count - tx_queue->old_read_count; | ||
| 121 | q_space = efx->type->txd_ring_mask - 1 - fill_level; | ||
| 122 | |||
| 123 | /* Map for DMA. Use pci_map_single rather than pci_map_page | ||
| 124 | * since this is more efficient on machines with sparse | ||
| 125 | * memory. | ||
| 126 | */ | ||
| 127 | unmap_single = 1; | ||
| 128 | dma_addr = pci_map_single(pci_dev, skb->data, len, PCI_DMA_TODEVICE); | ||
| 129 | |||
| 130 | /* Process all fragments */ | ||
| 131 | while (1) { | ||
| 132 | if (unlikely(pci_dma_mapping_error(dma_addr))) | ||
| 133 | goto pci_err; | ||
| 134 | |||
| 135 | /* Store fields for marking in the per-fragment final | ||
| 136 | * descriptor */ | ||
| 137 | unmap_len = len; | ||
| 138 | unmap_addr = dma_addr; | ||
| 139 | |||
| 140 | /* Add to TX queue, splitting across DMA boundaries */ | ||
| 141 | do { | ||
| 142 | if (unlikely(q_space-- <= 0)) { | ||
| 143 | /* It might be that completions have | ||
| 144 | * happened since the xmit path last | ||
| 145 | * checked. Update the xmit path's | ||
| 146 | * copy of read_count. | ||
| 147 | */ | ||
| 148 | ++tx_queue->stopped; | ||
| 149 | /* This memory barrier protects the | ||
| 150 | * change of stopped from the access | ||
| 151 | * of read_count. */ | ||
| 152 | smp_mb(); | ||
| 153 | tx_queue->old_read_count = | ||
| 154 | *(volatile unsigned *) | ||
| 155 | &tx_queue->read_count; | ||
| 156 | fill_level = (tx_queue->insert_count | ||
| 157 | - tx_queue->old_read_count); | ||
| 158 | q_space = (efx->type->txd_ring_mask - 1 - | ||
| 159 | fill_level); | ||
| 160 | if (unlikely(q_space-- <= 0)) | ||
| 161 | goto stop; | ||
| 162 | smp_mb(); | ||
| 163 | --tx_queue->stopped; | ||
| 164 | } | ||
| 165 | |||
| 166 | insert_ptr = (tx_queue->insert_count & | ||
| 167 | efx->type->txd_ring_mask); | ||
| 168 | buffer = &tx_queue->buffer[insert_ptr]; | ||
| 169 | EFX_BUG_ON_PARANOID(buffer->skb); | ||
| 170 | EFX_BUG_ON_PARANOID(buffer->len); | ||
| 171 | EFX_BUG_ON_PARANOID(buffer->continuation != 1); | ||
| 172 | EFX_BUG_ON_PARANOID(buffer->unmap_len); | ||
| 173 | |||
| 174 | dma_len = (((~dma_addr) & efx->type->tx_dma_mask) + 1); | ||
| 175 | if (likely(dma_len > len)) | ||
| 176 | dma_len = len; | ||
| 177 | |||
| 178 | misalign = (unsigned)dma_addr & efx->type->bug5391_mask; | ||
| 179 | if (misalign && dma_len + misalign > 512) | ||
| 180 | dma_len = 512 - misalign; | ||
| 181 | |||
| 182 | /* Fill out per descriptor fields */ | ||
| 183 | buffer->len = dma_len; | ||
| 184 | buffer->dma_addr = dma_addr; | ||
| 185 | len -= dma_len; | ||
| 186 | dma_addr += dma_len; | ||
| 187 | ++tx_queue->insert_count; | ||
| 188 | } while (len); | ||
| 189 | |||
| 190 | /* Transfer ownership of the unmapping to the final buffer */ | ||
| 191 | buffer->unmap_addr = unmap_addr; | ||
| 192 | buffer->unmap_single = unmap_single; | ||
| 193 | buffer->unmap_len = unmap_len; | ||
| 194 | unmap_len = 0; | ||
| 195 | |||
| 196 | /* Get address and size of next fragment */ | ||
| 197 | if (i >= skb_shinfo(skb)->nr_frags) | ||
| 198 | break; | ||
| 199 | fragment = &skb_shinfo(skb)->frags[i]; | ||
| 200 | len = fragment->size; | ||
| 201 | page = fragment->page; | ||
| 202 | page_offset = fragment->page_offset; | ||
| 203 | i++; | ||
| 204 | /* Map for DMA */ | ||
| 205 | unmap_single = 0; | ||
| 206 | dma_addr = pci_map_page(pci_dev, page, page_offset, len, | ||
| 207 | PCI_DMA_TODEVICE); | ||
| 208 | } | ||
| 209 | |||
| 210 | /* Transfer ownership of the skb to the final buffer */ | ||
| 211 | buffer->skb = skb; | ||
| 212 | buffer->continuation = 0; | ||
| 213 | |||
| 214 | /* Pass off to hardware */ | ||
| 215 | falcon_push_buffers(tx_queue); | ||
| 216 | |||
| 217 | return NETDEV_TX_OK; | ||
| 218 | |||
| 219 | pci_err: | ||
| 220 | EFX_ERR_RL(efx, " TX queue %d could not map skb with %d bytes %d " | ||
| 221 | "fragments for DMA\n", tx_queue->queue, skb->len, | ||
| 222 | skb_shinfo(skb)->nr_frags + 1); | ||
| 223 | |||
| 224 | /* Mark the packet as transmitted, and free the SKB ourselves */ | ||
| 225 | dev_kfree_skb_any((struct sk_buff *)skb); | ||
| 226 | goto unwind; | ||
| 227 | |||
| 228 | stop: | ||
| 229 | rc = NETDEV_TX_BUSY; | ||
| 230 | |||
| 231 | if (tx_queue->stopped == 1) | ||
| 232 | efx_stop_queue(efx); | ||
| 233 | |||
| 234 | unwind: | ||
| 235 | /* Work backwards until we hit the original insert pointer value */ | ||
| 236 | while (tx_queue->insert_count != tx_queue->write_count) { | ||
| 237 | --tx_queue->insert_count; | ||
| 238 | insert_ptr = tx_queue->insert_count & efx->type->txd_ring_mask; | ||
| 239 | buffer = &tx_queue->buffer[insert_ptr]; | ||
| 240 | efx_dequeue_buffer(tx_queue, buffer); | ||
| 241 | buffer->len = 0; | ||
| 242 | } | ||
| 243 | |||
| 244 | /* Free the fragment we were mid-way through pushing */ | ||
| 245 | if (unmap_len) | ||
| 246 | pci_unmap_page(pci_dev, unmap_addr, unmap_len, | ||
| 247 | PCI_DMA_TODEVICE); | ||
| 248 | |||
| 249 | return rc; | ||
| 250 | } | ||
| 251 | |||
| 252 | /* Remove packets from the TX queue | ||
| 253 | * | ||
| 254 | * This removes packets from the TX queue, up to and including the | ||
| 255 | * specified index. | ||
| 256 | */ | ||
| 257 | static inline void efx_dequeue_buffers(struct efx_tx_queue *tx_queue, | ||
| 258 | unsigned int index) | ||
| 259 | { | ||
| 260 | struct efx_nic *efx = tx_queue->efx; | ||
| 261 | unsigned int stop_index, read_ptr; | ||
| 262 | unsigned int mask = tx_queue->efx->type->txd_ring_mask; | ||
| 263 | |||
| 264 | stop_index = (index + 1) & mask; | ||
| 265 | read_ptr = tx_queue->read_count & mask; | ||
| 266 | |||
| 267 | while (read_ptr != stop_index) { | ||
| 268 | struct efx_tx_buffer *buffer = &tx_queue->buffer[read_ptr]; | ||
| 269 | if (unlikely(buffer->len == 0)) { | ||
| 270 | EFX_ERR(tx_queue->efx, "TX queue %d spurious TX " | ||
| 271 | "completion id %x\n", tx_queue->queue, | ||
| 272 | read_ptr); | ||
| 273 | efx_schedule_reset(efx, RESET_TYPE_TX_SKIP); | ||
| 274 | return; | ||
| 275 | } | ||
| 276 | |||
| 277 | efx_dequeue_buffer(tx_queue, buffer); | ||
| 278 | buffer->continuation = 1; | ||
| 279 | buffer->len = 0; | ||
| 280 | |||
| 281 | ++tx_queue->read_count; | ||
| 282 | read_ptr = tx_queue->read_count & mask; | ||
| 283 | } | ||
| 284 | } | ||
| 285 | |||
| 286 | /* Initiate a packet transmission on the specified TX queue. | ||
| 287 | * Note that returning anything other than NETDEV_TX_OK will cause the | ||
| 288 | * OS to free the skb. | ||
| 289 | * | ||
| 290 | * This function is split out from efx_hard_start_xmit to allow the | ||
| 291 | * loopback test to direct packets via specific TX queues. It is | ||
| 292 | * therefore a non-static inline, so as not to penalise performance | ||
| 293 | * for non-loopback transmissions. | ||
| 294 | * | ||
| 295 | * Context: netif_tx_lock held | ||
| 296 | */ | ||
| 297 | inline int efx_xmit(struct efx_nic *efx, | ||
| 298 | struct efx_tx_queue *tx_queue, struct sk_buff *skb) | ||
| 299 | { | ||
| 300 | int rc; | ||
| 301 | |||
| 302 | /* Map fragments for DMA and add to TX queue */ | ||
| 303 | rc = efx_enqueue_skb(tx_queue, skb); | ||
| 304 | if (unlikely(rc != NETDEV_TX_OK)) | ||
| 305 | goto out; | ||
| 306 | |||
| 307 | /* Update last TX timer */ | ||
| 308 | efx->net_dev->trans_start = jiffies; | ||
| 309 | |||
| 310 | out: | ||
| 311 | return rc; | ||
| 312 | } | ||
| 313 | |||
| 314 | /* Initiate a packet transmission. We use one channel per CPU | ||
| 315 | * (sharing when we have more CPUs than channels). On Falcon, the TX | ||
| 316 | * completion events will be directed back to the CPU that transmitted | ||
| 317 | * the packet, which should be cache-efficient. | ||
| 318 | * | ||
| 319 | * Context: non-blocking. | ||
| 320 | * Note that returning anything other than NETDEV_TX_OK will cause the | ||
| 321 | * OS to free the skb. | ||
| 322 | */ | ||
| 323 | int efx_hard_start_xmit(struct sk_buff *skb, struct net_device *net_dev) | ||
| 324 | { | ||
| 325 | struct efx_nic *efx = net_dev->priv; | ||
| 326 | return efx_xmit(efx, &efx->tx_queue[0], skb); | ||
| 327 | } | ||
| 328 | |||
| 329 | void efx_xmit_done(struct efx_tx_queue *tx_queue, unsigned int index) | ||
| 330 | { | ||
| 331 | unsigned fill_level; | ||
| 332 | struct efx_nic *efx = tx_queue->efx; | ||
| 333 | |||
| 334 | EFX_BUG_ON_PARANOID(index > efx->type->txd_ring_mask); | ||
| 335 | |||
| 336 | efx_dequeue_buffers(tx_queue, index); | ||
| 337 | |||
| 338 | /* See if we need to restart the netif queue. This barrier | ||
| 339 | * separates the update of read_count from the test of | ||
| 340 | * stopped. */ | ||
| 341 | smp_mb(); | ||
| 342 | if (unlikely(tx_queue->stopped)) { | ||
| 343 | fill_level = tx_queue->insert_count - tx_queue->read_count; | ||
| 344 | if (fill_level < EFX_NETDEV_TX_THRESHOLD(tx_queue)) { | ||
| 345 | EFX_BUG_ON_PARANOID(!NET_DEV_REGISTERED(efx)); | ||
| 346 | |||
| 347 | /* Do this under netif_tx_lock(), to avoid racing | ||
| 348 | * with efx_xmit(). */ | ||
| 349 | netif_tx_lock(efx->net_dev); | ||
| 350 | if (tx_queue->stopped) { | ||
| 351 | tx_queue->stopped = 0; | ||
| 352 | efx_wake_queue(efx); | ||
| 353 | } | ||
| 354 | netif_tx_unlock(efx->net_dev); | ||
| 355 | } | ||
| 356 | } | ||
| 357 | } | ||
| 358 | |||
| 359 | int efx_probe_tx_queue(struct efx_tx_queue *tx_queue) | ||
| 360 | { | ||
| 361 | struct efx_nic *efx = tx_queue->efx; | ||
| 362 | unsigned int txq_size; | ||
| 363 | int i, rc; | ||
| 364 | |||
| 365 | EFX_LOG(efx, "creating TX queue %d\n", tx_queue->queue); | ||
| 366 | |||
| 367 | /* Allocate software ring */ | ||
| 368 | txq_size = (efx->type->txd_ring_mask + 1) * sizeof(*tx_queue->buffer); | ||
| 369 | tx_queue->buffer = kzalloc(txq_size, GFP_KERNEL); | ||
| 370 | if (!tx_queue->buffer) { | ||
| 371 | rc = -ENOMEM; | ||
| 372 | goto fail1; | ||
| 373 | } | ||
| 374 | for (i = 0; i <= efx->type->txd_ring_mask; ++i) | ||
| 375 | tx_queue->buffer[i].continuation = 1; | ||
| 376 | |||
| 377 | /* Allocate hardware ring */ | ||
| 378 | rc = falcon_probe_tx(tx_queue); | ||
| 379 | if (rc) | ||
| 380 | goto fail2; | ||
| 381 | |||
| 382 | return 0; | ||
| 383 | |||
| 384 | fail2: | ||
| 385 | kfree(tx_queue->buffer); | ||
| 386 | tx_queue->buffer = NULL; | ||
| 387 | fail1: | ||
| 388 | tx_queue->used = 0; | ||
| 389 | |||
| 390 | return rc; | ||
| 391 | } | ||
| 392 | |||
| 393 | int efx_init_tx_queue(struct efx_tx_queue *tx_queue) | ||
| 394 | { | ||
| 395 | EFX_LOG(tx_queue->efx, "initialising TX queue %d\n", tx_queue->queue); | ||
| 396 | |||
| 397 | tx_queue->insert_count = 0; | ||
| 398 | tx_queue->write_count = 0; | ||
| 399 | tx_queue->read_count = 0; | ||
| 400 | tx_queue->old_read_count = 0; | ||
| 401 | BUG_ON(tx_queue->stopped); | ||
| 402 | |||
| 403 | /* Set up TX descriptor ring */ | ||
| 404 | return falcon_init_tx(tx_queue); | ||
| 405 | } | ||
| 406 | |||
| 407 | void efx_release_tx_buffers(struct efx_tx_queue *tx_queue) | ||
| 408 | { | ||
| 409 | struct efx_tx_buffer *buffer; | ||
| 410 | |||
| 411 | if (!tx_queue->buffer) | ||
| 412 | return; | ||
| 413 | |||
| 414 | /* Free any buffers left in the ring */ | ||
| 415 | while (tx_queue->read_count != tx_queue->write_count) { | ||
| 416 | buffer = &tx_queue->buffer[tx_queue->read_count & | ||
| 417 | tx_queue->efx->type->txd_ring_mask]; | ||
| 418 | efx_dequeue_buffer(tx_queue, buffer); | ||
| 419 | buffer->continuation = 1; | ||
| 420 | buffer->len = 0; | ||
| 421 | |||
| 422 | ++tx_queue->read_count; | ||
| 423 | } | ||
| 424 | } | ||
| 425 | |||
| 426 | void efx_fini_tx_queue(struct efx_tx_queue *tx_queue) | ||
| 427 | { | ||
| 428 | EFX_LOG(tx_queue->efx, "shutting down TX queue %d\n", tx_queue->queue); | ||
| 429 | |||
| 430 | /* Flush TX queue, remove descriptor ring */ | ||
| 431 | falcon_fini_tx(tx_queue); | ||
| 432 | |||
| 433 | efx_release_tx_buffers(tx_queue); | ||
| 434 | |||
| 435 | /* Release queue's stop on port, if any */ | ||
| 436 | if (tx_queue->stopped) { | ||
| 437 | tx_queue->stopped = 0; | ||
| 438 | efx_wake_queue(tx_queue->efx); | ||
| 439 | } | ||
| 440 | } | ||
| 441 | |||
| 442 | void efx_remove_tx_queue(struct efx_tx_queue *tx_queue) | ||
| 443 | { | ||
| 444 | EFX_LOG(tx_queue->efx, "destroying TX queue %d\n", tx_queue->queue); | ||
| 445 | falcon_remove_tx(tx_queue); | ||
| 446 | |||
| 447 | kfree(tx_queue->buffer); | ||
| 448 | tx_queue->buffer = NULL; | ||
| 449 | tx_queue->used = 0; | ||
| 450 | } | ||
| 451 | |||
| 452 | |||
diff --git a/drivers/net/sfc/tx.h b/drivers/net/sfc/tx.h new file mode 100644 index 000000000000..1526a73b4b51 --- /dev/null +++ b/drivers/net/sfc/tx.h | |||
| @@ -0,0 +1,24 @@ | |||
| 1 | /**************************************************************************** | ||
| 2 | * Driver for Solarflare Solarstorm network controllers and boards | ||
| 3 | * Copyright 2006 Fen Systems Ltd. | ||
| 4 | * Copyright 2006-2008 Solarflare Communications Inc. | ||
| 5 | * | ||
| 6 | * This program is free software; you can redistribute it and/or modify it | ||
| 7 | * under the terms of the GNU General Public License version 2 as published | ||
| 8 | * by the Free Software Foundation, incorporated herein by reference. | ||
| 9 | */ | ||
| 10 | |||
| 11 | #ifndef EFX_TX_H | ||
| 12 | #define EFX_TX_H | ||
| 13 | |||
| 14 | #include "net_driver.h" | ||
| 15 | |||
| 16 | int efx_probe_tx_queue(struct efx_tx_queue *tx_queue); | ||
| 17 | void efx_remove_tx_queue(struct efx_tx_queue *tx_queue); | ||
| 18 | int efx_init_tx_queue(struct efx_tx_queue *tx_queue); | ||
| 19 | void efx_fini_tx_queue(struct efx_tx_queue *tx_queue); | ||
| 20 | |||
| 21 | int efx_hard_start_xmit(struct sk_buff *skb, struct net_device *net_dev); | ||
| 22 | void efx_release_tx_buffers(struct efx_tx_queue *tx_queue); | ||
| 23 | |||
| 24 | #endif /* EFX_TX_H */ | ||
diff --git a/drivers/net/sfc/workarounds.h b/drivers/net/sfc/workarounds.h new file mode 100644 index 000000000000..dca62f190198 --- /dev/null +++ b/drivers/net/sfc/workarounds.h | |||
| @@ -0,0 +1,56 @@ | |||
| 1 | /**************************************************************************** | ||
| 2 | * Driver for Solarflare Solarstorm network controllers and boards | ||
| 3 | * Copyright 2006-2008 Solarflare Communications Inc. | ||
| 4 | * | ||
| 5 | * This program is free software; you can redistribute it and/or modify it | ||
| 6 | * under the terms of the GNU General Public License version 2 as published | ||
| 7 | * by the Free Software Foundation, incorporated herein by reference. | ||
| 8 | */ | ||
| 9 | |||
| 10 | #ifndef EFX_WORKAROUNDS_H | ||
| 11 | #define EFX_WORKAROUNDS_H | ||
| 12 | |||
| 13 | /* | ||
| 14 | * Hardware workarounds. | ||
| 15 | * Bug numbers are from Solarflare's Bugzilla. | ||
| 16 | */ | ||
| 17 | |||
| 18 | #define EFX_WORKAROUND_ALWAYS(efx) 1 | ||
| 19 | #define EFX_WORKAROUND_FALCON_A(efx) (FALCON_REV(efx) <= FALCON_REV_A1) | ||
| 20 | |||
| 21 | /* XAUI resets if link not detected */ | ||
| 22 | #define EFX_WORKAROUND_5147 EFX_WORKAROUND_ALWAYS | ||
| 23 | /* SNAP frames have TOBE_DISC set */ | ||
| 24 | #define EFX_WORKAROUND_5475 EFX_WORKAROUND_ALWAYS | ||
| 25 | /* RX PCIe double split performance issue */ | ||
| 26 | #define EFX_WORKAROUND_7575 EFX_WORKAROUND_ALWAYS | ||
| 27 | /* TX pkt parser problem with <= 16 byte TXes */ | ||
| 28 | #define EFX_WORKAROUND_9141 EFX_WORKAROUND_ALWAYS | ||
| 29 | /* XGXS and XAUI reset sequencing in SW */ | ||
| 30 | #define EFX_WORKAROUND_9388 EFX_WORKAROUND_ALWAYS | ||
| 31 | /* Low rate CRC errors require XAUI reset */ | ||
| 32 | #define EFX_WORKAROUND_10750 EFX_WORKAROUND_ALWAYS | ||
| 33 | /* TX_EV_PKT_ERR can be caused by a dangling TX descriptor | ||
| 34 | * or a PCIe error (bug 11028) */ | ||
| 35 | #define EFX_WORKAROUND_10727 EFX_WORKAROUND_ALWAYS | ||
| 36 | /* Transmit flow control may get disabled */ | ||
| 37 | #define EFX_WORKAROUND_11482 EFX_WORKAROUND_ALWAYS | ||
| 38 | /* Flush events can take a very long time to appear */ | ||
| 39 | #define EFX_WORKAROUND_11557 EFX_WORKAROUND_ALWAYS | ||
| 40 | |||
| 41 | /* Spurious parity errors in TSORT buffers */ | ||
| 42 | #define EFX_WORKAROUND_5129 EFX_WORKAROUND_FALCON_A | ||
| 43 | /* iSCSI parsing errors */ | ||
| 44 | #define EFX_WORKAROUND_5583 EFX_WORKAROUND_FALCON_A | ||
| 45 | /* RX events go missing */ | ||
| 46 | #define EFX_WORKAROUND_5676 EFX_WORKAROUND_FALCON_A | ||
| 47 | /* RX_RESET on A1 */ | ||
| 48 | #define EFX_WORKAROUND_6555 EFX_WORKAROUND_FALCON_A | ||
| 49 | /* Increase filter depth to avoid RX_RESET */ | ||
| 50 | #define EFX_WORKAROUND_7244 EFX_WORKAROUND_FALCON_A | ||
| 51 | /* Flushes may never complete */ | ||
| 52 | #define EFX_WORKAROUND_7803 EFX_WORKAROUND_FALCON_A | ||
| 53 | /* Leak overlength packets rather than free */ | ||
| 54 | #define EFX_WORKAROUND_8071 EFX_WORKAROUND_FALCON_A | ||
| 55 | |||
| 56 | #endif /* EFX_WORKAROUNDS_H */ | ||
diff --git a/drivers/net/sfc/xenpack.h b/drivers/net/sfc/xenpack.h new file mode 100644 index 000000000000..b0d1f225b70a --- /dev/null +++ b/drivers/net/sfc/xenpack.h | |||
| @@ -0,0 +1,62 @@ | |||
| 1 | /**************************************************************************** | ||
| 2 | * Driver for Solarflare Solarstorm network controllers and boards | ||
| 3 | * Copyright 2006 Solarflare Communications Inc. | ||
| 4 | * | ||
| 5 | * This program is free software; you can redistribute it and/or modify it | ||
| 6 | * under the terms of the GNU General Public License version 2 as published | ||
| 7 | * by the Free Software Foundation, incorporated herein by reference. | ||
| 8 | */ | ||
| 9 | |||
| 10 | #ifndef EFX_XENPACK_H | ||
| 11 | #define EFX_XENPACK_H | ||
| 12 | |||
| 13 | /* Exported functions from Xenpack standard PHY control */ | ||
| 14 | |||
| 15 | #include "mdio_10g.h" | ||
| 16 | |||
| 17 | /****************************************************************************/ | ||
| 18 | /* XENPACK MDIO register extensions */ | ||
| 19 | #define MDIO_XP_LASI_RX_CTRL (0x9000) | ||
| 20 | #define MDIO_XP_LASI_TX_CTRL (0x9001) | ||
| 21 | #define MDIO_XP_LASI_CTRL (0x9002) | ||
| 22 | #define MDIO_XP_LASI_RX_STAT (0x9003) | ||
| 23 | #define MDIO_XP_LASI_TX_STAT (0x9004) | ||
| 24 | #define MDIO_XP_LASI_STAT (0x9005) | ||
| 25 | |||
| 26 | /* Control/Status bits */ | ||
| 27 | #define XP_LASI_LS_ALARM (1 << 0) | ||
| 28 | #define XP_LASI_TX_ALARM (1 << 1) | ||
| 29 | #define XP_LASI_RX_ALARM (1 << 2) | ||
| 30 | /* These two are Quake vendor extensions to the standard XENPACK defines */ | ||
| 31 | #define XP_LASI_LS_INTB (1 << 3) | ||
| 32 | #define XP_LASI_TEST (1 << 7) | ||
| 33 | |||
| 34 | /* Enable LASI interrupts for PHY */ | ||
| 35 | static inline void xenpack_enable_lasi_irqs(struct efx_nic *efx) | ||
| 36 | { | ||
| 37 | int reg; | ||
| 38 | int phy_id = efx->mii.phy_id; | ||
| 39 | /* Read to clear LASI status register */ | ||
| 40 | reg = mdio_clause45_read(efx, phy_id, MDIO_MMD_PMAPMD, | ||
| 41 | MDIO_XP_LASI_STAT); | ||
| 42 | |||
| 43 | mdio_clause45_write(efx, phy_id, MDIO_MMD_PMAPMD, | ||
| 44 | MDIO_XP_LASI_CTRL, XP_LASI_LS_ALARM); | ||
| 45 | } | ||
| 46 | |||
| 47 | /* Read the LASI interrupt status to clear the interrupt. */ | ||
| 48 | static inline int xenpack_clear_lasi_irqs(struct efx_nic *efx) | ||
| 49 | { | ||
| 50 | /* Read to clear link status alarm */ | ||
| 51 | return mdio_clause45_read(efx, efx->mii.phy_id, | ||
| 52 | MDIO_MMD_PMAPMD, MDIO_XP_LASI_STAT); | ||
| 53 | } | ||
| 54 | |||
| 55 | /* Turn off LASI interrupts */ | ||
| 56 | static inline void xenpack_disable_lasi_irqs(struct efx_nic *efx) | ||
| 57 | { | ||
| 58 | mdio_clause45_write(efx, efx->mii.phy_id, MDIO_MMD_PMAPMD, | ||
| 59 | MDIO_XP_LASI_CTRL, 0); | ||
| 60 | } | ||
| 61 | |||
| 62 | #endif /* EFX_XENPACK_H */ | ||
diff --git a/drivers/net/sfc/xfp_phy.c b/drivers/net/sfc/xfp_phy.c new file mode 100644 index 000000000000..66dd5bf1eaa9 --- /dev/null +++ b/drivers/net/sfc/xfp_phy.c | |||
| @@ -0,0 +1,132 @@ | |||
| 1 | /**************************************************************************** | ||
| 2 | * Driver for Solarflare Solarstorm network controllers and boards | ||
| 3 | * Copyright 2006-2008 Solarflare Communications Inc. | ||
| 4 | * | ||
| 5 | * This program is free software; you can redistribute it and/or modify it | ||
| 6 | * under the terms of the GNU General Public License version 2 as published | ||
| 7 | * by the Free Software Foundation, incorporated herein by reference. | ||
| 8 | */ | ||
| 9 | /* | ||
| 10 | * Driver for XFP optical PHYs (plus some support specific to the Quake 2032) | ||
| 11 | * See www.amcc.com for details (search for qt2032) | ||
| 12 | */ | ||
| 13 | |||
| 14 | #include <linux/timer.h> | ||
| 15 | #include <linux/delay.h> | ||
| 16 | #include "efx.h" | ||
| 17 | #include "gmii.h" | ||
| 18 | #include "mdio_10g.h" | ||
| 19 | #include "xenpack.h" | ||
| 20 | #include "phy.h" | ||
| 21 | #include "mac.h" | ||
| 22 | |||
| 23 | #define XFP_REQUIRED_DEVS (MDIO_MMDREG_DEVS0_PCS | \ | ||
| 24 | MDIO_MMDREG_DEVS0_PMAPMD | \ | ||
| 25 | MDIO_MMDREG_DEVS0_PHYXS) | ||
| 26 | |||
| 27 | /****************************************************************************/ | ||
| 28 | /* Quake-specific MDIO registers */ | ||
| 29 | #define MDIO_QUAKE_LED0_REG (0xD006) | ||
| 30 | |||
| 31 | void xfp_set_led(struct efx_nic *p, int led, int mode) | ||
| 32 | { | ||
| 33 | int addr = MDIO_QUAKE_LED0_REG + led; | ||
| 34 | mdio_clause45_write(p, p->mii.phy_id, MDIO_MMD_PMAPMD, addr, | ||
| 35 | mode); | ||
| 36 | } | ||
| 37 | |||
| 38 | #define XFP_MAX_RESET_TIME 500 | ||
| 39 | #define XFP_RESET_WAIT 10 | ||
| 40 | |||
| 41 | /* Reset the PHYXS MMD. This is documented (for the Quake PHY) as doing | ||
| 42 | * a complete soft reset. | ||
| 43 | */ | ||
| 44 | static int xfp_reset_phy(struct efx_nic *efx) | ||
| 45 | { | ||
| 46 | int rc; | ||
| 47 | |||
| 48 | rc = mdio_clause45_reset_mmd(efx, MDIO_MMD_PHYXS, | ||
| 49 | XFP_MAX_RESET_TIME / XFP_RESET_WAIT, | ||
| 50 | XFP_RESET_WAIT); | ||
| 51 | if (rc < 0) | ||
| 52 | goto fail; | ||
| 53 | |||
| 54 | /* Wait 250ms for the PHY to complete bootup */ | ||
| 55 | msleep(250); | ||
| 56 | |||
| 57 | /* Check that all the MMDs we expect are present and responding. We | ||
| 58 | * expect faults on some if the link is down, but not on the PHY XS */ | ||
| 59 | rc = mdio_clause45_check_mmds(efx, XFP_REQUIRED_DEVS, | ||
| 60 | MDIO_MMDREG_DEVS0_PHYXS); | ||
| 61 | if (rc < 0) | ||
| 62 | goto fail; | ||
| 63 | |||
| 64 | efx->board_info.init_leds(efx); | ||
| 65 | |||
| 66 | return rc; | ||
| 67 | |||
| 68 | fail: | ||
| 69 | EFX_ERR(efx, "XFP: reset timed out!\n"); | ||
| 70 | return rc; | ||
| 71 | } | ||
| 72 | |||
| 73 | static int xfp_phy_init(struct efx_nic *efx) | ||
| 74 | { | ||
| 75 | u32 devid = mdio_clause45_read_id(efx, MDIO_MMD_PHYXS); | ||
| 76 | int rc; | ||
| 77 | |||
| 78 | EFX_INFO(efx, "XFP: PHY ID reg %x (OUI %x model %x revision" | ||
| 79 | " %x)\n", devid, MDIO_ID_OUI(devid), MDIO_ID_MODEL(devid), | ||
| 80 | MDIO_ID_REV(devid)); | ||
| 81 | |||
| 82 | rc = xfp_reset_phy(efx); | ||
| 83 | |||
| 84 | EFX_INFO(efx, "XFP: PHY init %s.\n", | ||
| 85 | rc ? "failed" : "successful"); | ||
| 86 | |||
| 87 | return rc; | ||
| 88 | } | ||
| 89 | |||
| 90 | static void xfp_phy_clear_interrupt(struct efx_nic *efx) | ||
| 91 | { | ||
| 92 | xenpack_clear_lasi_irqs(efx); | ||
| 93 | } | ||
| 94 | |||
| 95 | static int xfp_link_ok(struct efx_nic *efx) | ||
| 96 | { | ||
| 97 | return mdio_clause45_links_ok(efx, XFP_REQUIRED_DEVS); | ||
| 98 | } | ||
| 99 | |||
| 100 | static int xfp_phy_check_hw(struct efx_nic *efx) | ||
| 101 | { | ||
| 102 | int rc = 0; | ||
| 103 | int link_up = xfp_link_ok(efx); | ||
| 104 | /* Simulate a PHY event if link state has changed */ | ||
| 105 | if (link_up != efx->link_up) | ||
| 106 | falcon_xmac_sim_phy_event(efx); | ||
| 107 | |||
| 108 | return rc; | ||
| 109 | } | ||
| 110 | |||
| 111 | static void xfp_phy_reconfigure(struct efx_nic *efx) | ||
| 112 | { | ||
| 113 | efx->link_up = xfp_link_ok(efx); | ||
| 114 | efx->link_options = GM_LPA_10000FULL; | ||
| 115 | } | ||
| 116 | |||
| 117 | |||
| 118 | static void xfp_phy_fini(struct efx_nic *efx) | ||
| 119 | { | ||
| 120 | /* Clobber the LED if it was blinking */ | ||
| 121 | efx->board_info.blink(efx, 0); | ||
| 122 | } | ||
| 123 | |||
| 124 | struct efx_phy_operations falcon_xfp_phy_ops = { | ||
| 125 | .init = xfp_phy_init, | ||
| 126 | .reconfigure = xfp_phy_reconfigure, | ||
| 127 | .check_hw = xfp_phy_check_hw, | ||
| 128 | .fini = xfp_phy_fini, | ||
| 129 | .clear_interrupt = xfp_phy_clear_interrupt, | ||
| 130 | .reset_xaui = efx_port_dummy_op_void, | ||
| 131 | .mmds = XFP_REQUIRED_DEVS, | ||
| 132 | }; | ||
diff --git a/drivers/net/sis190.c b/drivers/net/sis190.c index 20745fd4e973..abc63b0663be 100644 --- a/drivers/net/sis190.c +++ b/drivers/net/sis190.c | |||
| @@ -212,6 +212,12 @@ enum _DescStatusBit { | |||
| 212 | THOL2 = 0x20000000, | 212 | THOL2 = 0x20000000, |
| 213 | THOL1 = 0x10000000, | 213 | THOL1 = 0x10000000, |
| 214 | THOL0 = 0x00000000, | 214 | THOL0 = 0x00000000, |
| 215 | |||
| 216 | WND = 0x00080000, | ||
| 217 | TABRT = 0x00040000, | ||
| 218 | FIFO = 0x00020000, | ||
| 219 | LINK = 0x00010000, | ||
| 220 | ColCountMask = 0x0000ffff, | ||
| 215 | /* RxDesc.status */ | 221 | /* RxDesc.status */ |
| 216 | IPON = 0x20000000, | 222 | IPON = 0x20000000, |
| 217 | TCPON = 0x10000000, | 223 | TCPON = 0x10000000, |
| @@ -480,30 +486,23 @@ static inline void sis190_make_unusable_by_asic(struct RxDesc *desc) | |||
| 480 | desc->status = 0x0; | 486 | desc->status = 0x0; |
| 481 | } | 487 | } |
| 482 | 488 | ||
| 483 | static int sis190_alloc_rx_skb(struct pci_dev *pdev, struct sk_buff **sk_buff, | 489 | static struct sk_buff *sis190_alloc_rx_skb(struct sis190_private *tp, |
| 484 | struct RxDesc *desc, u32 rx_buf_sz) | 490 | struct RxDesc *desc) |
| 485 | { | 491 | { |
| 492 | u32 rx_buf_sz = tp->rx_buf_sz; | ||
| 486 | struct sk_buff *skb; | 493 | struct sk_buff *skb; |
| 487 | dma_addr_t mapping; | ||
| 488 | int ret = 0; | ||
| 489 | |||
| 490 | skb = dev_alloc_skb(rx_buf_sz); | ||
| 491 | if (!skb) | ||
| 492 | goto err_out; | ||
| 493 | |||
| 494 | *sk_buff = skb; | ||
| 495 | 494 | ||
| 496 | mapping = pci_map_single(pdev, skb->data, rx_buf_sz, | 495 | skb = netdev_alloc_skb(tp->dev, rx_buf_sz); |
| 497 | PCI_DMA_FROMDEVICE); | 496 | if (likely(skb)) { |
| 497 | dma_addr_t mapping; | ||
| 498 | 498 | ||
| 499 | sis190_map_to_asic(desc, mapping, rx_buf_sz); | 499 | mapping = pci_map_single(tp->pci_dev, skb->data, tp->rx_buf_sz, |
| 500 | out: | 500 | PCI_DMA_FROMDEVICE); |
| 501 | return ret; | 501 | sis190_map_to_asic(desc, mapping, rx_buf_sz); |
| 502 | } else | ||
| 503 | sis190_make_unusable_by_asic(desc); | ||
| 502 | 504 | ||
| 503 | err_out: | 505 | return skb; |
| 504 | ret = -ENOMEM; | ||
| 505 | sis190_make_unusable_by_asic(desc); | ||
| 506 | goto out; | ||
| 507 | } | 506 | } |
| 508 | 507 | ||
| 509 | static u32 sis190_rx_fill(struct sis190_private *tp, struct net_device *dev, | 508 | static u32 sis190_rx_fill(struct sis190_private *tp, struct net_device *dev, |
| @@ -512,37 +511,41 @@ static u32 sis190_rx_fill(struct sis190_private *tp, struct net_device *dev, | |||
| 512 | u32 cur; | 511 | u32 cur; |
| 513 | 512 | ||
| 514 | for (cur = start; cur < end; cur++) { | 513 | for (cur = start; cur < end; cur++) { |
| 515 | int ret, i = cur % NUM_RX_DESC; | 514 | unsigned int i = cur % NUM_RX_DESC; |
| 516 | 515 | ||
| 517 | if (tp->Rx_skbuff[i]) | 516 | if (tp->Rx_skbuff[i]) |
| 518 | continue; | 517 | continue; |
| 519 | 518 | ||
| 520 | ret = sis190_alloc_rx_skb(tp->pci_dev, tp->Rx_skbuff + i, | 519 | tp->Rx_skbuff[i] = sis190_alloc_rx_skb(tp, tp->RxDescRing + i); |
| 521 | tp->RxDescRing + i, tp->rx_buf_sz); | 520 | |
| 522 | if (ret < 0) | 521 | if (!tp->Rx_skbuff[i]) |
| 523 | break; | 522 | break; |
| 524 | } | 523 | } |
| 525 | return cur - start; | 524 | return cur - start; |
| 526 | } | 525 | } |
| 527 | 526 | ||
| 528 | static inline int sis190_try_rx_copy(struct sk_buff **sk_buff, int pkt_size, | 527 | static bool sis190_try_rx_copy(struct sis190_private *tp, |
| 529 | struct RxDesc *desc, int rx_buf_sz) | 528 | struct sk_buff **sk_buff, int pkt_size, |
| 529 | dma_addr_t addr) | ||
| 530 | { | 530 | { |
| 531 | int ret = -1; | 531 | struct sk_buff *skb; |
| 532 | bool done = false; | ||
| 532 | 533 | ||
| 533 | if (pkt_size < rx_copybreak) { | 534 | if (pkt_size >= rx_copybreak) |
| 534 | struct sk_buff *skb; | 535 | goto out; |
| 535 | 536 | ||
| 536 | skb = dev_alloc_skb(pkt_size + NET_IP_ALIGN); | 537 | skb = netdev_alloc_skb(tp->dev, pkt_size + 2); |
| 537 | if (skb) { | 538 | if (!skb) |
| 538 | skb_reserve(skb, NET_IP_ALIGN); | 539 | goto out; |
| 539 | skb_copy_to_linear_data(skb, sk_buff[0]->data, pkt_size); | 540 | |
| 540 | *sk_buff = skb; | 541 | pci_dma_sync_single_for_device(tp->pci_dev, addr, pkt_size, |
| 541 | sis190_give_to_asic(desc, rx_buf_sz); | 542 | PCI_DMA_FROMDEVICE); |
| 542 | ret = 0; | 543 | skb_reserve(skb, 2); |
| 543 | } | 544 | skb_copy_to_linear_data(skb, sk_buff[0]->data, pkt_size); |
| 544 | } | 545 | *sk_buff = skb; |
| 545 | return ret; | 546 | done = true; |
| 547 | out: | ||
| 548 | return done; | ||
| 546 | } | 549 | } |
| 547 | 550 | ||
| 548 | static inline int sis190_rx_pkt_err(u32 status, struct net_device_stats *stats) | 551 | static inline int sis190_rx_pkt_err(u32 status, struct net_device_stats *stats) |
| @@ -592,9 +595,9 @@ static int sis190_rx_interrupt(struct net_device *dev, | |||
| 592 | sis190_give_to_asic(desc, tp->rx_buf_sz); | 595 | sis190_give_to_asic(desc, tp->rx_buf_sz); |
| 593 | else { | 596 | else { |
| 594 | struct sk_buff *skb = tp->Rx_skbuff[entry]; | 597 | struct sk_buff *skb = tp->Rx_skbuff[entry]; |
| 598 | dma_addr_t addr = le32_to_cpu(desc->addr); | ||
| 595 | int pkt_size = (status & RxSizeMask) - 4; | 599 | int pkt_size = (status & RxSizeMask) - 4; |
| 596 | void (*pci_action)(struct pci_dev *, dma_addr_t, | 600 | struct pci_dev *pdev = tp->pci_dev; |
| 597 | size_t, int) = pci_dma_sync_single_for_device; | ||
| 598 | 601 | ||
| 599 | if (unlikely(pkt_size > tp->rx_buf_sz)) { | 602 | if (unlikely(pkt_size > tp->rx_buf_sz)) { |
| 600 | net_intr(tp, KERN_INFO | 603 | net_intr(tp, KERN_INFO |
| @@ -606,20 +609,18 @@ static int sis190_rx_interrupt(struct net_device *dev, | |||
| 606 | continue; | 609 | continue; |
| 607 | } | 610 | } |
| 608 | 611 | ||
| 609 | pci_dma_sync_single_for_cpu(tp->pci_dev, | ||
| 610 | le32_to_cpu(desc->addr), tp->rx_buf_sz, | ||
| 611 | PCI_DMA_FROMDEVICE); | ||
| 612 | 612 | ||
| 613 | if (sis190_try_rx_copy(&skb, pkt_size, desc, | 613 | if (sis190_try_rx_copy(tp, &skb, pkt_size, addr)) { |
| 614 | tp->rx_buf_sz)) { | 614 | pci_dma_sync_single_for_device(pdev, addr, |
| 615 | pci_action = pci_unmap_single; | 615 | tp->rx_buf_sz, PCI_DMA_FROMDEVICE); |
| 616 | sis190_give_to_asic(desc, tp->rx_buf_sz); | ||
| 617 | } else { | ||
| 618 | pci_unmap_single(pdev, addr, tp->rx_buf_sz, | ||
| 619 | PCI_DMA_FROMDEVICE); | ||
| 616 | tp->Rx_skbuff[entry] = NULL; | 620 | tp->Rx_skbuff[entry] = NULL; |
| 617 | sis190_make_unusable_by_asic(desc); | 621 | sis190_make_unusable_by_asic(desc); |
| 618 | } | 622 | } |
| 619 | 623 | ||
| 620 | pci_action(tp->pci_dev, le32_to_cpu(desc->addr), | ||
| 621 | tp->rx_buf_sz, PCI_DMA_FROMDEVICE); | ||
| 622 | |||
| 623 | skb_put(skb, pkt_size); | 624 | skb_put(skb, pkt_size); |
| 624 | skb->protocol = eth_type_trans(skb, dev); | 625 | skb->protocol = eth_type_trans(skb, dev); |
| 625 | 626 | ||
| @@ -658,9 +659,31 @@ static void sis190_unmap_tx_skb(struct pci_dev *pdev, struct sk_buff *skb, | |||
| 658 | memset(desc, 0x00, sizeof(*desc)); | 659 | memset(desc, 0x00, sizeof(*desc)); |
| 659 | } | 660 | } |
| 660 | 661 | ||
| 662 | static inline int sis190_tx_pkt_err(u32 status, struct net_device_stats *stats) | ||
| 663 | { | ||
| 664 | #define TxErrMask (WND | TABRT | FIFO | LINK) | ||
| 665 | |||
| 666 | if (!unlikely(status & TxErrMask)) | ||
| 667 | return 0; | ||
| 668 | |||
| 669 | if (status & WND) | ||
| 670 | stats->tx_window_errors++; | ||
| 671 | if (status & TABRT) | ||
| 672 | stats->tx_aborted_errors++; | ||
| 673 | if (status & FIFO) | ||
| 674 | stats->tx_fifo_errors++; | ||
| 675 | if (status & LINK) | ||
| 676 | stats->tx_carrier_errors++; | ||
| 677 | |||
| 678 | stats->tx_errors++; | ||
| 679 | |||
| 680 | return -1; | ||
| 681 | } | ||
| 682 | |||
| 661 | static void sis190_tx_interrupt(struct net_device *dev, | 683 | static void sis190_tx_interrupt(struct net_device *dev, |
| 662 | struct sis190_private *tp, void __iomem *ioaddr) | 684 | struct sis190_private *tp, void __iomem *ioaddr) |
| 663 | { | 685 | { |
| 686 | struct net_device_stats *stats = &dev->stats; | ||
| 664 | u32 pending, dirty_tx = tp->dirty_tx; | 687 | u32 pending, dirty_tx = tp->dirty_tx; |
| 665 | /* | 688 | /* |
| 666 | * It would not be needed if queueing was allowed to be enabled | 689 | * It would not be needed if queueing was allowed to be enabled |
| @@ -675,15 +698,19 @@ static void sis190_tx_interrupt(struct net_device *dev, | |||
| 675 | for (; pending; pending--, dirty_tx++) { | 698 | for (; pending; pending--, dirty_tx++) { |
| 676 | unsigned int entry = dirty_tx % NUM_TX_DESC; | 699 | unsigned int entry = dirty_tx % NUM_TX_DESC; |
| 677 | struct TxDesc *txd = tp->TxDescRing + entry; | 700 | struct TxDesc *txd = tp->TxDescRing + entry; |
| 701 | u32 status = le32_to_cpu(txd->status); | ||
| 678 | struct sk_buff *skb; | 702 | struct sk_buff *skb; |
| 679 | 703 | ||
| 680 | if (le32_to_cpu(txd->status) & OWNbit) | 704 | if (status & OWNbit) |
| 681 | break; | 705 | break; |
| 682 | 706 | ||
| 683 | skb = tp->Tx_skbuff[entry]; | 707 | skb = tp->Tx_skbuff[entry]; |
| 684 | 708 | ||
| 685 | dev->stats.tx_packets++; | 709 | if (likely(sis190_tx_pkt_err(status, stats) == 0)) { |
| 686 | dev->stats.tx_bytes += skb->len; | 710 | stats->tx_packets++; |
| 711 | stats->tx_bytes += skb->len; | ||
| 712 | stats->collisions += ((status & ColCountMask) - 1); | ||
| 713 | } | ||
| 687 | 714 | ||
| 688 | sis190_unmap_tx_skb(tp->pci_dev, skb, txd); | 715 | sis190_unmap_tx_skb(tp->pci_dev, skb, txd); |
| 689 | tp->Tx_skbuff[entry] = NULL; | 716 | tp->Tx_skbuff[entry] = NULL; |
| @@ -904,10 +931,9 @@ static void sis190_phy_task(struct work_struct *work) | |||
| 904 | mod_timer(&tp->timer, jiffies + HZ/10); | 931 | mod_timer(&tp->timer, jiffies + HZ/10); |
| 905 | } else if (!(mdio_read_latched(ioaddr, phy_id, MII_BMSR) & | 932 | } else if (!(mdio_read_latched(ioaddr, phy_id, MII_BMSR) & |
| 906 | BMSR_ANEGCOMPLETE)) { | 933 | BMSR_ANEGCOMPLETE)) { |
| 907 | net_link(tp, KERN_WARNING "%s: PHY reset until link up.\n", | ||
| 908 | dev->name); | ||
| 909 | netif_carrier_off(dev); | 934 | netif_carrier_off(dev); |
| 910 | mdio_write(ioaddr, phy_id, MII_BMCR, val | BMCR_RESET); | 935 | net_link(tp, KERN_WARNING "%s: auto-negotiating...\n", |
| 936 | dev->name); | ||
| 911 | mod_timer(&tp->timer, jiffies + SIS190_PHY_TIMEOUT); | 937 | mod_timer(&tp->timer, jiffies + SIS190_PHY_TIMEOUT); |
| 912 | } else { | 938 | } else { |
| 913 | /* Rejoice ! */ | 939 | /* Rejoice ! */ |
diff --git a/drivers/net/slip.c b/drivers/net/slip.c index 5a55ede352f4..84af68fdb6c2 100644 --- a/drivers/net/slip.c +++ b/drivers/net/slip.c | |||
| @@ -396,14 +396,14 @@ static void sl_encaps(struct slip *sl, unsigned char *icp, int len) | |||
| 396 | 396 | ||
| 397 | /* Order of next two lines is *very* important. | 397 | /* Order of next two lines is *very* important. |
| 398 | * When we are sending a little amount of data, | 398 | * When we are sending a little amount of data, |
| 399 | * the transfer may be completed inside driver.write() | 399 | * the transfer may be completed inside the ops->write() |
| 400 | * routine, because it's running with interrupts enabled. | 400 | * routine, because it's running with interrupts enabled. |
| 401 | * In this case we *never* got WRITE_WAKEUP event, | 401 | * In this case we *never* got WRITE_WAKEUP event, |
| 402 | * if we did not request it before write operation. | 402 | * if we did not request it before write operation. |
| 403 | * 14 Oct 1994 Dmitry Gorodchanin. | 403 | * 14 Oct 1994 Dmitry Gorodchanin. |
| 404 | */ | 404 | */ |
| 405 | sl->tty->flags |= (1 << TTY_DO_WRITE_WAKEUP); | 405 | sl->tty->flags |= (1 << TTY_DO_WRITE_WAKEUP); |
| 406 | actual = sl->tty->driver->write(sl->tty, sl->xbuff, count); | 406 | actual = sl->tty->ops->write(sl->tty, sl->xbuff, count); |
| 407 | #ifdef SL_CHECK_TRANSMIT | 407 | #ifdef SL_CHECK_TRANSMIT |
| 408 | sl->dev->trans_start = jiffies; | 408 | sl->dev->trans_start = jiffies; |
| 409 | #endif | 409 | #endif |
| @@ -437,7 +437,7 @@ static void slip_write_wakeup(struct tty_struct *tty) | |||
| 437 | return; | 437 | return; |
| 438 | } | 438 | } |
| 439 | 439 | ||
| 440 | actual = tty->driver->write(tty, sl->xhead, sl->xleft); | 440 | actual = tty->ops->write(tty, sl->xhead, sl->xleft); |
| 441 | sl->xleft -= actual; | 441 | sl->xleft -= actual; |
| 442 | sl->xhead += actual; | 442 | sl->xhead += actual; |
| 443 | } | 443 | } |
| @@ -462,7 +462,7 @@ static void sl_tx_timeout(struct net_device *dev) | |||
| 462 | } | 462 | } |
| 463 | printk(KERN_WARNING "%s: transmit timed out, %s?\n", | 463 | printk(KERN_WARNING "%s: transmit timed out, %s?\n", |
| 464 | dev->name, | 464 | dev->name, |
| 465 | (sl->tty->driver->chars_in_buffer(sl->tty) || sl->xleft) ? | 465 | (tty_chars_in_buffer(sl->tty) || sl->xleft) ? |
| 466 | "bad line quality" : "driver error"); | 466 | "bad line quality" : "driver error"); |
| 467 | sl->xleft = 0; | 467 | sl->xleft = 0; |
| 468 | sl->tty->flags &= ~(1 << TTY_DO_WRITE_WAKEUP); | 468 | sl->tty->flags &= ~(1 << TTY_DO_WRITE_WAKEUP); |
| @@ -830,6 +830,9 @@ static int slip_open(struct tty_struct *tty) | |||
| 830 | if (!capable(CAP_NET_ADMIN)) | 830 | if (!capable(CAP_NET_ADMIN)) |
| 831 | return -EPERM; | 831 | return -EPERM; |
| 832 | 832 | ||
| 833 | if (tty->ops->write == NULL) | ||
| 834 | return -EOPNOTSUPP; | ||
| 835 | |||
| 833 | /* RTnetlink lock is misused here to serialize concurrent | 836 | /* RTnetlink lock is misused here to serialize concurrent |
| 834 | opens of slip channels. There are better ways, but it is | 837 | opens of slip channels. There are better ways, but it is |
| 835 | the simplest one. | 838 | the simplest one. |
| @@ -1432,7 +1435,7 @@ static void sl_outfill(unsigned long sls) | |||
| 1432 | /* put END into tty queue. Is it right ??? */ | 1435 | /* put END into tty queue. Is it right ??? */ |
| 1433 | if (!netif_queue_stopped(sl->dev)) { | 1436 | if (!netif_queue_stopped(sl->dev)) { |
| 1434 | /* if device busy no outfill */ | 1437 | /* if device busy no outfill */ |
| 1435 | sl->tty->driver->write(sl->tty, &s, 1); | 1438 | sl->tty->ops->write(sl->tty, &s, 1); |
| 1436 | } | 1439 | } |
| 1437 | } else | 1440 | } else |
| 1438 | set_bit(SLF_OUTWAIT, &sl->flags); | 1441 | set_bit(SLF_OUTWAIT, &sl->flags); |
diff --git a/drivers/net/wan/pc300_tty.c b/drivers/net/wan/pc300_tty.c index 63abfd72542d..e03eef2f2282 100644 --- a/drivers/net/wan/pc300_tty.c +++ b/drivers/net/wan/pc300_tty.c | |||
| @@ -178,6 +178,20 @@ static void cpc_tty_signal_on(pc300dev_t *pc300dev, unsigned char signal) | |||
| 178 | CPC_TTY_UNLOCK(card,flags); | 178 | CPC_TTY_UNLOCK(card,flags); |
| 179 | } | 179 | } |
| 180 | 180 | ||
| 181 | |||
| 182 | static const struct tty_operations pc300_ops = { | ||
| 183 | .open = cpc_tty_open, | ||
| 184 | .close = cpc_tty_close, | ||
| 185 | .write = cpc_tty_write, | ||
| 186 | .write_room = cpc_tty_write_room, | ||
| 187 | .chars_in_buffer = cpc_tty_chars_in_buffer, | ||
| 188 | .tiocmset = pc300_tiocmset, | ||
| 189 | .tiocmget = pc300_tiocmget, | ||
| 190 | .flush_buffer = cpc_tty_flush_buffer, | ||
| 191 | .hangup = cpc_tty_hangup, | ||
| 192 | }; | ||
| 193 | |||
| 194 | |||
| 181 | /* | 195 | /* |
| 182 | * PC300 TTY initialization routine | 196 | * PC300 TTY initialization routine |
| 183 | * | 197 | * |
| @@ -225,15 +239,7 @@ void cpc_tty_init(pc300dev_t *pc300dev) | |||
| 225 | serial_drv.flags = TTY_DRIVER_REAL_RAW; | 239 | serial_drv.flags = TTY_DRIVER_REAL_RAW; |
| 226 | 240 | ||
| 227 | /* interface routines from the upper tty layer to the tty driver */ | 241 | /* interface routines from the upper tty layer to the tty driver */ |
| 228 | serial_drv.open = cpc_tty_open; | 242 | tty_set_operations(&serial_drv, &pc300_ops); |
| 229 | serial_drv.close = cpc_tty_close; | ||
| 230 | serial_drv.write = cpc_tty_write; | ||
| 231 | serial_drv.write_room = cpc_tty_write_room; | ||
| 232 | serial_drv.chars_in_buffer = cpc_tty_chars_in_buffer; | ||
| 233 | serial_drv.tiocmset = pc300_tiocmset; | ||
| 234 | serial_drv.tiocmget = pc300_tiocmget; | ||
| 235 | serial_drv.flush_buffer = cpc_tty_flush_buffer; | ||
| 236 | serial_drv.hangup = cpc_tty_hangup; | ||
| 237 | 243 | ||
| 238 | /* register the TTY driver */ | 244 | /* register the TTY driver */ |
| 239 | if (tty_register_driver(&serial_drv)) { | 245 | if (tty_register_driver(&serial_drv)) { |
diff --git a/drivers/net/wan/x25_asy.c b/drivers/net/wan/x25_asy.c index 0f8aca8a4d43..249e18053d5f 100644 --- a/drivers/net/wan/x25_asy.c +++ b/drivers/net/wan/x25_asy.c | |||
| @@ -17,7 +17,7 @@ | |||
| 17 | #include <linux/module.h> | 17 | #include <linux/module.h> |
| 18 | 18 | ||
| 19 | #include <asm/system.h> | 19 | #include <asm/system.h> |
| 20 | #include <asm/uaccess.h> | 20 | #include <linux/uaccess.h> |
| 21 | #include <linux/bitops.h> | 21 | #include <linux/bitops.h> |
| 22 | #include <linux/string.h> | 22 | #include <linux/string.h> |
| 23 | #include <linux/mm.h> | 23 | #include <linux/mm.h> |
| @@ -95,7 +95,7 @@ static struct x25_asy *x25_asy_alloc(void) | |||
| 95 | x25_asy_devs[i] = dev; | 95 | x25_asy_devs[i] = dev; |
| 96 | return sl; | 96 | return sl; |
| 97 | } else { | 97 | } else { |
| 98 | printk("x25_asy_alloc() - register_netdev() failure.\n"); | 98 | printk(KERN_WARNING "x25_asy_alloc() - register_netdev() failure.\n"); |
| 99 | free_netdev(dev); | 99 | free_netdev(dev); |
| 100 | } | 100 | } |
| 101 | } | 101 | } |
| @@ -112,23 +112,22 @@ static void x25_asy_free(struct x25_asy *sl) | |||
| 112 | kfree(sl->xbuff); | 112 | kfree(sl->xbuff); |
| 113 | sl->xbuff = NULL; | 113 | sl->xbuff = NULL; |
| 114 | 114 | ||
| 115 | if (!test_and_clear_bit(SLF_INUSE, &sl->flags)) { | 115 | if (!test_and_clear_bit(SLF_INUSE, &sl->flags)) |
| 116 | printk("%s: x25_asy_free for already free unit.\n", sl->dev->name); | 116 | printk(KERN_ERR "%s: x25_asy_free for already free unit.\n", |
| 117 | } | 117 | sl->dev->name); |
| 118 | } | 118 | } |
| 119 | 119 | ||
| 120 | static int x25_asy_change_mtu(struct net_device *dev, int newmtu) | 120 | static int x25_asy_change_mtu(struct net_device *dev, int newmtu) |
| 121 | { | 121 | { |
| 122 | struct x25_asy *sl = dev->priv; | 122 | struct x25_asy *sl = dev->priv; |
| 123 | unsigned char *xbuff, *rbuff; | 123 | unsigned char *xbuff, *rbuff; |
| 124 | int len = 2* newmtu; | 124 | int len = 2 * newmtu; |
| 125 | 125 | ||
| 126 | xbuff = kmalloc(len + 4, GFP_ATOMIC); | 126 | xbuff = kmalloc(len + 4, GFP_ATOMIC); |
| 127 | rbuff = kmalloc(len + 4, GFP_ATOMIC); | 127 | rbuff = kmalloc(len + 4, GFP_ATOMIC); |
| 128 | 128 | ||
| 129 | if (xbuff == NULL || rbuff == NULL) | 129 | if (xbuff == NULL || rbuff == NULL) { |
| 130 | { | 130 | printk(KERN_WARNING "%s: unable to grow X.25 buffers, MTU change cancelled.\n", |
| 131 | printk("%s: unable to grow X.25 buffers, MTU change cancelled.\n", | ||
| 132 | dev->name); | 131 | dev->name); |
| 133 | kfree(xbuff); | 132 | kfree(xbuff); |
| 134 | kfree(rbuff); | 133 | kfree(rbuff); |
| @@ -193,25 +192,23 @@ static void x25_asy_bump(struct x25_asy *sl) | |||
| 193 | int err; | 192 | int err; |
| 194 | 193 | ||
| 195 | count = sl->rcount; | 194 | count = sl->rcount; |
| 196 | sl->stats.rx_bytes+=count; | 195 | sl->stats.rx_bytes += count; |
| 197 | 196 | ||
| 198 | skb = dev_alloc_skb(count+1); | 197 | skb = dev_alloc_skb(count+1); |
| 199 | if (skb == NULL) | 198 | if (skb == NULL) { |
| 200 | { | 199 | printk(KERN_WARNING "%s: memory squeeze, dropping packet.\n", |
| 201 | printk("%s: memory squeeze, dropping packet.\n", sl->dev->name); | 200 | sl->dev->name); |
| 202 | sl->stats.rx_dropped++; | 201 | sl->stats.rx_dropped++; |
| 203 | return; | 202 | return; |
| 204 | } | 203 | } |
| 205 | skb_push(skb,1); /* LAPB internal control */ | 204 | skb_push(skb, 1); /* LAPB internal control */ |
| 206 | memcpy(skb_put(skb,count), sl->rbuff, count); | 205 | memcpy(skb_put(skb, count), sl->rbuff, count); |
| 207 | skb->protocol = x25_type_trans(skb, sl->dev); | 206 | skb->protocol = x25_type_trans(skb, sl->dev); |
| 208 | if((err=lapb_data_received(skb->dev, skb))!=LAPB_OK) | 207 | err = lapb_data_received(skb->dev, skb); |
| 209 | { | 208 | if (err != LAPB_OK) { |
| 210 | kfree_skb(skb); | 209 | kfree_skb(skb); |
| 211 | printk(KERN_DEBUG "x25_asy: data received err - %d\n",err); | 210 | printk(KERN_DEBUG "x25_asy: data received err - %d\n", err); |
| 212 | } | 211 | } else { |
| 213 | else | ||
| 214 | { | ||
| 215 | netif_rx(skb); | 212 | netif_rx(skb); |
| 216 | sl->dev->last_rx = jiffies; | 213 | sl->dev->last_rx = jiffies; |
| 217 | sl->stats.rx_packets++; | 214 | sl->stats.rx_packets++; |
| @@ -224,10 +221,11 @@ static void x25_asy_encaps(struct x25_asy *sl, unsigned char *icp, int len) | |||
| 224 | unsigned char *p; | 221 | unsigned char *p; |
| 225 | int actual, count, mtu = sl->dev->mtu; | 222 | int actual, count, mtu = sl->dev->mtu; |
| 226 | 223 | ||
| 227 | if (len > mtu) | 224 | if (len > mtu) { |
| 228 | { /* Sigh, shouldn't occur BUT ... */ | 225 | /* Sigh, shouldn't occur BUT ... */ |
| 229 | len = mtu; | 226 | len = mtu; |
| 230 | printk ("%s: truncating oversized transmit packet!\n", sl->dev->name); | 227 | printk(KERN_DEBUG "%s: truncating oversized transmit packet!\n", |
| 228 | sl->dev->name); | ||
| 231 | sl->stats.tx_dropped++; | 229 | sl->stats.tx_dropped++; |
| 232 | x25_asy_unlock(sl); | 230 | x25_asy_unlock(sl); |
| 233 | return; | 231 | return; |
| @@ -245,7 +243,7 @@ static void x25_asy_encaps(struct x25_asy *sl, unsigned char *icp, int len) | |||
| 245 | * 14 Oct 1994 Dmitry Gorodchanin. | 243 | * 14 Oct 1994 Dmitry Gorodchanin. |
| 246 | */ | 244 | */ |
| 247 | sl->tty->flags |= (1 << TTY_DO_WRITE_WAKEUP); | 245 | sl->tty->flags |= (1 << TTY_DO_WRITE_WAKEUP); |
| 248 | actual = sl->tty->driver->write(sl->tty, sl->xbuff, count); | 246 | actual = sl->tty->ops->write(sl->tty, sl->xbuff, count); |
| 249 | sl->xleft = count - actual; | 247 | sl->xleft = count - actual; |
| 250 | sl->xhead = sl->xbuff + actual; | 248 | sl->xhead = sl->xbuff + actual; |
| 251 | /* VSV */ | 249 | /* VSV */ |
| @@ -265,8 +263,7 @@ static void x25_asy_write_wakeup(struct tty_struct *tty) | |||
| 265 | if (!sl || sl->magic != X25_ASY_MAGIC || !netif_running(sl->dev)) | 263 | if (!sl || sl->magic != X25_ASY_MAGIC || !netif_running(sl->dev)) |
| 266 | return; | 264 | return; |
| 267 | 265 | ||
| 268 | if (sl->xleft <= 0) | 266 | if (sl->xleft <= 0) { |
| 269 | { | ||
| 270 | /* Now serial buffer is almost free & we can start | 267 | /* Now serial buffer is almost free & we can start |
| 271 | * transmission of another packet */ | 268 | * transmission of another packet */ |
| 272 | sl->stats.tx_packets++; | 269 | sl->stats.tx_packets++; |
| @@ -275,14 +272,14 @@ static void x25_asy_write_wakeup(struct tty_struct *tty) | |||
| 275 | return; | 272 | return; |
| 276 | } | 273 | } |
| 277 | 274 | ||
| 278 | actual = tty->driver->write(tty, sl->xhead, sl->xleft); | 275 | actual = tty->ops->write(tty, sl->xhead, sl->xleft); |
| 279 | sl->xleft -= actual; | 276 | sl->xleft -= actual; |
| 280 | sl->xhead += actual; | 277 | sl->xhead += actual; |
| 281 | } | 278 | } |
| 282 | 279 | ||
| 283 | static void x25_asy_timeout(struct net_device *dev) | 280 | static void x25_asy_timeout(struct net_device *dev) |
| 284 | { | 281 | { |
| 285 | struct x25_asy *sl = (struct x25_asy*)(dev->priv); | 282 | struct x25_asy *sl = dev->priv; |
| 286 | 283 | ||
| 287 | spin_lock(&sl->lock); | 284 | spin_lock(&sl->lock); |
| 288 | if (netif_queue_stopped(dev)) { | 285 | if (netif_queue_stopped(dev)) { |
| @@ -290,7 +287,7 @@ static void x25_asy_timeout(struct net_device *dev) | |||
| 290 | * 14 Oct 1994 Dmitry Gorodchanin. | 287 | * 14 Oct 1994 Dmitry Gorodchanin. |
| 291 | */ | 288 | */ |
| 292 | printk(KERN_WARNING "%s: transmit timed out, %s?\n", dev->name, | 289 | printk(KERN_WARNING "%s: transmit timed out, %s?\n", dev->name, |
| 293 | (sl->tty->driver->chars_in_buffer(sl->tty) || sl->xleft) ? | 290 | (tty_chars_in_buffer(sl->tty) || sl->xleft) ? |
| 294 | "bad line quality" : "driver error"); | 291 | "bad line quality" : "driver error"); |
| 295 | sl->xleft = 0; | 292 | sl->xleft = 0; |
| 296 | sl->tty->flags &= ~(1 << TTY_DO_WRITE_WAKEUP); | 293 | sl->tty->flags &= ~(1 << TTY_DO_WRITE_WAKEUP); |
| @@ -303,31 +300,34 @@ static void x25_asy_timeout(struct net_device *dev) | |||
| 303 | 300 | ||
| 304 | static int x25_asy_xmit(struct sk_buff *skb, struct net_device *dev) | 301 | static int x25_asy_xmit(struct sk_buff *skb, struct net_device *dev) |
| 305 | { | 302 | { |
| 306 | struct x25_asy *sl = (struct x25_asy*)(dev->priv); | 303 | struct x25_asy *sl = dev->priv; |
| 307 | int err; | 304 | int err; |
| 308 | 305 | ||
| 309 | if (!netif_running(sl->dev)) { | 306 | if (!netif_running(sl->dev)) { |
| 310 | printk("%s: xmit call when iface is down\n", dev->name); | 307 | printk(KERN_ERR "%s: xmit call when iface is down\n", |
| 308 | dev->name); | ||
| 311 | kfree_skb(skb); | 309 | kfree_skb(skb); |
| 312 | return 0; | 310 | return 0; |
| 313 | } | 311 | } |
| 314 | 312 | ||
| 315 | switch(skb->data[0]) | 313 | switch (skb->data[0]) { |
| 316 | { | 314 | case 0x00: |
| 317 | case 0x00:break; | 315 | break; |
| 318 | case 0x01: /* Connection request .. do nothing */ | 316 | case 0x01: /* Connection request .. do nothing */ |
| 319 | if((err=lapb_connect_request(dev))!=LAPB_OK) | 317 | err = lapb_connect_request(dev); |
| 320 | printk(KERN_ERR "x25_asy: lapb_connect_request error - %d\n", err); | 318 | if (err != LAPB_OK) |
| 321 | kfree_skb(skb); | 319 | printk(KERN_ERR "x25_asy: lapb_connect_request error - %d\n", err); |
| 322 | return 0; | 320 | kfree_skb(skb); |
| 323 | case 0x02: /* Disconnect request .. do nothing - hang up ?? */ | 321 | return 0; |
| 324 | if((err=lapb_disconnect_request(dev))!=LAPB_OK) | 322 | case 0x02: /* Disconnect request .. do nothing - hang up ?? */ |
| 325 | printk(KERN_ERR "x25_asy: lapb_disconnect_request error - %d\n", err); | 323 | err = lapb_disconnect_request(dev); |
| 326 | default: | 324 | if (err != LAPB_OK) |
| 327 | kfree_skb(skb); | 325 | printk(KERN_ERR "x25_asy: lapb_disconnect_request error - %d\n", err); |
| 328 | return 0; | 326 | default: |
| 327 | kfree_skb(skb); | ||
| 328 | return 0; | ||
| 329 | } | 329 | } |
| 330 | skb_pull(skb,1); /* Remove control byte */ | 330 | skb_pull(skb, 1); /* Remove control byte */ |
| 331 | /* | 331 | /* |
| 332 | * If we are busy already- too bad. We ought to be able | 332 | * If we are busy already- too bad. We ought to be able |
| 333 | * to queue things at this point, to allow for a little | 333 | * to queue things at this point, to allow for a little |
| @@ -338,10 +338,10 @@ static int x25_asy_xmit(struct sk_buff *skb, struct net_device *dev) | |||
| 338 | * So, no queues ! | 338 | * So, no queues ! |
| 339 | * 14 Oct 1994 Dmitry Gorodchanin. | 339 | * 14 Oct 1994 Dmitry Gorodchanin. |
| 340 | */ | 340 | */ |
| 341 | 341 | ||
| 342 | if((err=lapb_data_request(dev,skb))!=LAPB_OK) | 342 | err = lapb_data_request(dev, skb); |
| 343 | { | 343 | if (err != LAPB_OK) { |
| 344 | printk(KERN_ERR "lapbeth: lapb_data_request error - %d\n", err); | 344 | printk(KERN_ERR "x25_asy: lapb_data_request error - %d\n", err); |
| 345 | kfree_skb(skb); | 345 | kfree_skb(skb); |
| 346 | return 0; | 346 | return 0; |
| 347 | } | 347 | } |
| @@ -357,7 +357,7 @@ static int x25_asy_xmit(struct sk_buff *skb, struct net_device *dev) | |||
| 357 | * Called when I frame data arrives. We did the work above - throw it | 357 | * Called when I frame data arrives. We did the work above - throw it |
| 358 | * at the net layer. | 358 | * at the net layer. |
| 359 | */ | 359 | */ |
| 360 | 360 | ||
| 361 | static int x25_asy_data_indication(struct net_device *dev, struct sk_buff *skb) | 361 | static int x25_asy_data_indication(struct net_device *dev, struct sk_buff *skb) |
| 362 | { | 362 | { |
| 363 | skb->dev->last_rx = jiffies; | 363 | skb->dev->last_rx = jiffies; |
| @@ -369,24 +369,22 @@ static int x25_asy_data_indication(struct net_device *dev, struct sk_buff *skb) | |||
| 369 | * busy cases too well. Its tricky to see how to do this nicely - | 369 | * busy cases too well. Its tricky to see how to do this nicely - |
| 370 | * perhaps lapb should allow us to bounce this ? | 370 | * perhaps lapb should allow us to bounce this ? |
| 371 | */ | 371 | */ |
| 372 | 372 | ||
| 373 | static void x25_asy_data_transmit(struct net_device *dev, struct sk_buff *skb) | 373 | static void x25_asy_data_transmit(struct net_device *dev, struct sk_buff *skb) |
| 374 | { | 374 | { |
| 375 | struct x25_asy *sl=dev->priv; | 375 | struct x25_asy *sl = dev->priv; |
| 376 | 376 | ||
| 377 | spin_lock(&sl->lock); | 377 | spin_lock(&sl->lock); |
| 378 | if (netif_queue_stopped(sl->dev) || sl->tty == NULL) | 378 | if (netif_queue_stopped(sl->dev) || sl->tty == NULL) { |
| 379 | { | ||
| 380 | spin_unlock(&sl->lock); | 379 | spin_unlock(&sl->lock); |
| 381 | printk(KERN_ERR "x25_asy: tbusy drop\n"); | 380 | printk(KERN_ERR "x25_asy: tbusy drop\n"); |
| 382 | kfree_skb(skb); | 381 | kfree_skb(skb); |
| 383 | return; | 382 | return; |
| 384 | } | 383 | } |
| 385 | /* We were not busy, so we are now... :-) */ | 384 | /* We were not busy, so we are now... :-) */ |
| 386 | if (skb != NULL) | 385 | if (skb != NULL) { |
| 387 | { | ||
| 388 | x25_asy_lock(sl); | 386 | x25_asy_lock(sl); |
| 389 | sl->stats.tx_bytes+=skb->len; | 387 | sl->stats.tx_bytes += skb->len; |
| 390 | x25_asy_encaps(sl, skb->data, skb->len); | 388 | x25_asy_encaps(sl, skb->data, skb->len); |
| 391 | dev_kfree_skb(skb); | 389 | dev_kfree_skb(skb); |
| 392 | } | 390 | } |
| @@ -396,15 +394,16 @@ static void x25_asy_data_transmit(struct net_device *dev, struct sk_buff *skb) | |||
| 396 | /* | 394 | /* |
| 397 | * LAPB connection establish/down information. | 395 | * LAPB connection establish/down information. |
| 398 | */ | 396 | */ |
| 399 | 397 | ||
| 400 | static void x25_asy_connected(struct net_device *dev, int reason) | 398 | static void x25_asy_connected(struct net_device *dev, int reason) |
| 401 | { | 399 | { |
| 402 | struct x25_asy *sl = dev->priv; | 400 | struct x25_asy *sl = dev->priv; |
| 403 | struct sk_buff *skb; | 401 | struct sk_buff *skb; |
| 404 | unsigned char *ptr; | 402 | unsigned char *ptr; |
| 405 | 403 | ||
| 406 | if ((skb = dev_alloc_skb(1)) == NULL) { | 404 | skb = dev_alloc_skb(1); |
| 407 | printk(KERN_ERR "lapbeth: out of memory\n"); | 405 | if (skb == NULL) { |
| 406 | printk(KERN_ERR "x25_asy: out of memory\n"); | ||
| 408 | return; | 407 | return; |
| 409 | } | 408 | } |
| 410 | 409 | ||
| @@ -422,7 +421,8 @@ static void x25_asy_disconnected(struct net_device *dev, int reason) | |||
| 422 | struct sk_buff *skb; | 421 | struct sk_buff *skb; |
| 423 | unsigned char *ptr; | 422 | unsigned char *ptr; |
| 424 | 423 | ||
| 425 | if ((skb = dev_alloc_skb(1)) == NULL) { | 424 | skb = dev_alloc_skb(1); |
| 425 | if (skb == NULL) { | ||
| 426 | printk(KERN_ERR "x25_asy: out of memory\n"); | 426 | printk(KERN_ERR "x25_asy: out of memory\n"); |
| 427 | return; | 427 | return; |
| 428 | } | 428 | } |
| @@ -449,7 +449,7 @@ static struct lapb_register_struct x25_asy_callbacks = { | |||
| 449 | /* Open the low-level part of the X.25 channel. Easy! */ | 449 | /* Open the low-level part of the X.25 channel. Easy! */ |
| 450 | static int x25_asy_open(struct net_device *dev) | 450 | static int x25_asy_open(struct net_device *dev) |
| 451 | { | 451 | { |
| 452 | struct x25_asy *sl = (struct x25_asy*)(dev->priv); | 452 | struct x25_asy *sl = dev->priv; |
| 453 | unsigned long len; | 453 | unsigned long len; |
| 454 | int err; | 454 | int err; |
| 455 | 455 | ||
| @@ -466,13 +466,11 @@ static int x25_asy_open(struct net_device *dev) | |||
| 466 | len = dev->mtu * 2; | 466 | len = dev->mtu * 2; |
| 467 | 467 | ||
| 468 | sl->rbuff = kmalloc(len + 4, GFP_KERNEL); | 468 | sl->rbuff = kmalloc(len + 4, GFP_KERNEL); |
| 469 | if (sl->rbuff == NULL) { | 469 | if (sl->rbuff == NULL) |
| 470 | goto norbuff; | 470 | goto norbuff; |
| 471 | } | ||
| 472 | sl->xbuff = kmalloc(len + 4, GFP_KERNEL); | 471 | sl->xbuff = kmalloc(len + 4, GFP_KERNEL); |
| 473 | if (sl->xbuff == NULL) { | 472 | if (sl->xbuff == NULL) |
| 474 | goto noxbuff; | 473 | goto noxbuff; |
| 475 | } | ||
| 476 | 474 | ||
| 477 | sl->buffsize = len; | 475 | sl->buffsize = len; |
| 478 | sl->rcount = 0; | 476 | sl->rcount = 0; |
| @@ -480,11 +478,12 @@ static int x25_asy_open(struct net_device *dev) | |||
| 480 | sl->flags &= (1 << SLF_INUSE); /* Clear ESCAPE & ERROR flags */ | 478 | sl->flags &= (1 << SLF_INUSE); /* Clear ESCAPE & ERROR flags */ |
| 481 | 479 | ||
| 482 | netif_start_queue(dev); | 480 | netif_start_queue(dev); |
| 483 | 481 | ||
| 484 | /* | 482 | /* |
| 485 | * Now attach LAPB | 483 | * Now attach LAPB |
| 486 | */ | 484 | */ |
| 487 | if((err=lapb_register(dev, &x25_asy_callbacks))==LAPB_OK) | 485 | err = lapb_register(dev, &x25_asy_callbacks); |
| 486 | if (err == LAPB_OK) | ||
| 488 | return 0; | 487 | return 0; |
| 489 | 488 | ||
| 490 | /* Cleanup */ | 489 | /* Cleanup */ |
| @@ -499,18 +498,20 @@ norbuff: | |||
| 499 | /* Close the low-level part of the X.25 channel. Easy! */ | 498 | /* Close the low-level part of the X.25 channel. Easy! */ |
| 500 | static int x25_asy_close(struct net_device *dev) | 499 | static int x25_asy_close(struct net_device *dev) |
| 501 | { | 500 | { |
| 502 | struct x25_asy *sl = (struct x25_asy*)(dev->priv); | 501 | struct x25_asy *sl = dev->priv; |
| 503 | int err; | 502 | int err; |
| 504 | 503 | ||
| 505 | spin_lock(&sl->lock); | 504 | spin_lock(&sl->lock); |
| 506 | if (sl->tty) | 505 | if (sl->tty) |
| 507 | sl->tty->flags &= ~(1 << TTY_DO_WRITE_WAKEUP); | 506 | sl->tty->flags &= ~(1 << TTY_DO_WRITE_WAKEUP); |
| 508 | 507 | ||
| 509 | netif_stop_queue(dev); | 508 | netif_stop_queue(dev); |
| 510 | sl->rcount = 0; | 509 | sl->rcount = 0; |
| 511 | sl->xleft = 0; | 510 | sl->xleft = 0; |
| 512 | if((err=lapb_unregister(dev))!=LAPB_OK) | 511 | err = lapb_unregister(dev); |
| 513 | printk(KERN_ERR "x25_asy_close: lapb_unregister error -%d\n",err); | 512 | if (err != LAPB_OK) |
| 513 | printk(KERN_ERR "x25_asy_close: lapb_unregister error -%d\n", | ||
| 514 | err); | ||
| 514 | spin_unlock(&sl->lock); | 515 | spin_unlock(&sl->lock); |
| 515 | return 0; | 516 | return 0; |
| 516 | } | 517 | } |
| @@ -521,8 +522,9 @@ static int x25_asy_close(struct net_device *dev) | |||
| 521 | * a block of X.25 data has been received, which can now be decapsulated | 522 | * a block of X.25 data has been received, which can now be decapsulated |
| 522 | * and sent on to some IP layer for further processing. | 523 | * and sent on to some IP layer for further processing. |
| 523 | */ | 524 | */ |
| 524 | 525 | ||
| 525 | static void x25_asy_receive_buf(struct tty_struct *tty, const unsigned char *cp, char *fp, int count) | 526 | static void x25_asy_receive_buf(struct tty_struct *tty, |
| 527 | const unsigned char *cp, char *fp, int count) | ||
| 526 | { | 528 | { |
| 527 | struct x25_asy *sl = (struct x25_asy *) tty->disc_data; | 529 | struct x25_asy *sl = (struct x25_asy *) tty->disc_data; |
| 528 | 530 | ||
| @@ -533,9 +535,8 @@ static void x25_asy_receive_buf(struct tty_struct *tty, const unsigned char *cp, | |||
| 533 | /* Read the characters out of the buffer */ | 535 | /* Read the characters out of the buffer */ |
| 534 | while (count--) { | 536 | while (count--) { |
| 535 | if (fp && *fp++) { | 537 | if (fp && *fp++) { |
| 536 | if (!test_and_set_bit(SLF_ERROR, &sl->flags)) { | 538 | if (!test_and_set_bit(SLF_ERROR, &sl->flags)) |
| 537 | sl->stats.rx_errors++; | 539 | sl->stats.rx_errors++; |
| 538 | } | ||
| 539 | cp++; | 540 | cp++; |
| 540 | continue; | 541 | continue; |
| 541 | } | 542 | } |
| @@ -556,31 +557,31 @@ static int x25_asy_open_tty(struct tty_struct *tty) | |||
| 556 | struct x25_asy *sl = (struct x25_asy *) tty->disc_data; | 557 | struct x25_asy *sl = (struct x25_asy *) tty->disc_data; |
| 557 | int err; | 558 | int err; |
| 558 | 559 | ||
| 560 | if (tty->ops->write == NULL) | ||
| 561 | return -EOPNOTSUPP; | ||
| 562 | |||
| 559 | /* First make sure we're not already connected. */ | 563 | /* First make sure we're not already connected. */ |
| 560 | if (sl && sl->magic == X25_ASY_MAGIC) { | 564 | if (sl && sl->magic == X25_ASY_MAGIC) |
| 561 | return -EEXIST; | 565 | return -EEXIST; |
| 562 | } | ||
| 563 | 566 | ||
| 564 | /* OK. Find a free X.25 channel to use. */ | 567 | /* OK. Find a free X.25 channel to use. */ |
| 565 | if ((sl = x25_asy_alloc()) == NULL) { | 568 | sl = x25_asy_alloc(); |
| 569 | if (sl == NULL) | ||
| 566 | return -ENFILE; | 570 | return -ENFILE; |
| 567 | } | ||
| 568 | 571 | ||
| 569 | sl->tty = tty; | 572 | sl->tty = tty; |
| 570 | tty->disc_data = sl; | 573 | tty->disc_data = sl; |
| 571 | tty->receive_room = 65536; | 574 | tty->receive_room = 65536; |
| 572 | if (tty->driver->flush_buffer) { | 575 | tty_driver_flush_buffer(tty); |
| 573 | tty->driver->flush_buffer(tty); | ||
| 574 | } | ||
| 575 | tty_ldisc_flush(tty); | 576 | tty_ldisc_flush(tty); |
| 576 | 577 | ||
| 577 | /* Restore default settings */ | 578 | /* Restore default settings */ |
| 578 | sl->dev->type = ARPHRD_X25; | 579 | sl->dev->type = ARPHRD_X25; |
| 579 | 580 | ||
| 580 | /* Perform the low-level X.25 async init */ | 581 | /* Perform the low-level X.25 async init */ |
| 581 | if ((err = x25_asy_open(sl->dev))) | 582 | err = x25_asy_open(sl->dev); |
| 583 | if (err) | ||
| 582 | return err; | 584 | return err; |
| 583 | |||
| 584 | /* Done. We have linked the TTY line to a channel. */ | 585 | /* Done. We have linked the TTY line to a channel. */ |
| 585 | return sl->dev->base_addr; | 586 | return sl->dev->base_addr; |
| 586 | } | 587 | } |
| @@ -601,9 +602,7 @@ static void x25_asy_close_tty(struct tty_struct *tty) | |||
| 601 | return; | 602 | return; |
| 602 | 603 | ||
| 603 | if (sl->dev->flags & IFF_UP) | 604 | if (sl->dev->flags & IFF_UP) |
| 604 | { | 605 | dev_close(sl->dev); |
| 605 | (void) dev_close(sl->dev); | ||
| 606 | } | ||
| 607 | 606 | ||
| 608 | tty->disc_data = NULL; | 607 | tty->disc_data = NULL; |
| 609 | sl->tty = NULL; | 608 | sl->tty = NULL; |
| @@ -613,8 +612,7 @@ static void x25_asy_close_tty(struct tty_struct *tty) | |||
| 613 | 612 | ||
| 614 | static struct net_device_stats *x25_asy_get_stats(struct net_device *dev) | 613 | static struct net_device_stats *x25_asy_get_stats(struct net_device *dev) |
| 615 | { | 614 | { |
| 616 | struct x25_asy *sl = (struct x25_asy*)(dev->priv); | 615 | struct x25_asy *sl = dev->priv; |
| 617 | |||
| 618 | return &sl->stats; | 616 | return &sl->stats; |
| 619 | } | 617 | } |
| 620 | 618 | ||
| @@ -641,21 +639,19 @@ int x25_asy_esc(unsigned char *s, unsigned char *d, int len) | |||
| 641 | * character sequence, according to the X.25 protocol. | 639 | * character sequence, according to the X.25 protocol. |
| 642 | */ | 640 | */ |
| 643 | 641 | ||
| 644 | while (len-- > 0) | 642 | while (len-- > 0) { |
| 645 | { | 643 | switch (c = *s++) { |
| 646 | switch(c = *s++) | 644 | case X25_END: |
| 647 | { | 645 | *ptr++ = X25_ESC; |
| 648 | case X25_END: | 646 | *ptr++ = X25_ESCAPE(X25_END); |
| 649 | *ptr++ = X25_ESC; | 647 | break; |
| 650 | *ptr++ = X25_ESCAPE(X25_END); | 648 | case X25_ESC: |
| 651 | break; | 649 | *ptr++ = X25_ESC; |
| 652 | case X25_ESC: | 650 | *ptr++ = X25_ESCAPE(X25_ESC); |
| 653 | *ptr++ = X25_ESC; | 651 | break; |
| 654 | *ptr++ = X25_ESCAPE(X25_ESC); | 652 | default: |
| 655 | break; | 653 | *ptr++ = c; |
| 656 | default: | 654 | break; |
| 657 | *ptr++ = c; | ||
| 658 | break; | ||
| 659 | } | 655 | } |
| 660 | } | 656 | } |
| 661 | *ptr++ = X25_END; | 657 | *ptr++ = X25_END; |
| @@ -665,31 +661,25 @@ int x25_asy_esc(unsigned char *s, unsigned char *d, int len) | |||
| 665 | static void x25_asy_unesc(struct x25_asy *sl, unsigned char s) | 661 | static void x25_asy_unesc(struct x25_asy *sl, unsigned char s) |
| 666 | { | 662 | { |
| 667 | 663 | ||
| 668 | switch(s) | 664 | switch (s) { |
| 669 | { | 665 | case X25_END: |
| 670 | case X25_END: | 666 | if (!test_and_clear_bit(SLF_ERROR, &sl->flags) |
| 671 | if (!test_and_clear_bit(SLF_ERROR, &sl->flags) && (sl->rcount > 2)) | 667 | && sl->rcount > 2) |
| 672 | { | 668 | x25_asy_bump(sl); |
| 673 | x25_asy_bump(sl); | 669 | clear_bit(SLF_ESCAPE, &sl->flags); |
| 674 | } | 670 | sl->rcount = 0; |
| 675 | clear_bit(SLF_ESCAPE, &sl->flags); | 671 | return; |
| 676 | sl->rcount = 0; | 672 | case X25_ESC: |
| 677 | return; | 673 | set_bit(SLF_ESCAPE, &sl->flags); |
| 678 | 674 | return; | |
| 679 | case X25_ESC: | 675 | case X25_ESCAPE(X25_ESC): |
| 680 | set_bit(SLF_ESCAPE, &sl->flags); | 676 | case X25_ESCAPE(X25_END): |
| 681 | return; | 677 | if (test_and_clear_bit(SLF_ESCAPE, &sl->flags)) |
| 682 | 678 | s = X25_UNESCAPE(s); | |
| 683 | case X25_ESCAPE(X25_ESC): | 679 | break; |
| 684 | case X25_ESCAPE(X25_END): | 680 | } |
| 685 | if (test_and_clear_bit(SLF_ESCAPE, &sl->flags)) | 681 | if (!test_bit(SLF_ERROR, &sl->flags)) { |
| 686 | s = X25_UNESCAPE(s); | 682 | if (sl->rcount < sl->buffsize) { |
| 687 | break; | ||
| 688 | } | ||
| 689 | if (!test_bit(SLF_ERROR, &sl->flags)) | ||
| 690 | { | ||
| 691 | if (sl->rcount < sl->buffsize) | ||
| 692 | { | ||
| 693 | sl->rbuff[sl->rcount++] = s; | 683 | sl->rbuff[sl->rcount++] = s; |
| 694 | return; | 684 | return; |
| 695 | } | 685 | } |
| @@ -709,7 +699,7 @@ static int x25_asy_ioctl(struct tty_struct *tty, struct file *file, | |||
| 709 | if (!sl || sl->magic != X25_ASY_MAGIC) | 699 | if (!sl || sl->magic != X25_ASY_MAGIC) |
| 710 | return -EINVAL; | 700 | return -EINVAL; |
| 711 | 701 | ||
| 712 | switch(cmd) { | 702 | switch (cmd) { |
| 713 | case SIOCGIFNAME: | 703 | case SIOCGIFNAME: |
| 714 | if (copy_to_user((void __user *)arg, sl->dev->name, | 704 | if (copy_to_user((void __user *)arg, sl->dev->name, |
| 715 | strlen(sl->dev->name) + 1)) | 705 | strlen(sl->dev->name) + 1)) |
| @@ -724,8 +714,8 @@ static int x25_asy_ioctl(struct tty_struct *tty, struct file *file, | |||
| 724 | 714 | ||
| 725 | static int x25_asy_open_dev(struct net_device *dev) | 715 | static int x25_asy_open_dev(struct net_device *dev) |
| 726 | { | 716 | { |
| 727 | struct x25_asy *sl = (struct x25_asy*)(dev->priv); | 717 | struct x25_asy *sl = dev->priv; |
| 728 | if(sl->tty==NULL) | 718 | if (sl->tty == NULL) |
| 729 | return -ENODEV; | 719 | return -ENODEV; |
| 730 | return 0; | 720 | return 0; |
| 731 | } | 721 | } |
| @@ -741,9 +731,9 @@ static void x25_asy_setup(struct net_device *dev) | |||
| 741 | set_bit(SLF_INUSE, &sl->flags); | 731 | set_bit(SLF_INUSE, &sl->flags); |
| 742 | 732 | ||
| 743 | /* | 733 | /* |
| 744 | * Finish setting up the DEVICE info. | 734 | * Finish setting up the DEVICE info. |
| 745 | */ | 735 | */ |
| 746 | 736 | ||
| 747 | dev->mtu = SL_MTU; | 737 | dev->mtu = SL_MTU; |
| 748 | dev->hard_start_xmit = x25_asy_xmit; | 738 | dev->hard_start_xmit = x25_asy_xmit; |
| 749 | dev->tx_timeout = x25_asy_timeout; | 739 | dev->tx_timeout = x25_asy_timeout; |
| @@ -778,9 +768,10 @@ static int __init init_x25_asy(void) | |||
| 778 | x25_asy_maxdev = 4; /* Sanity */ | 768 | x25_asy_maxdev = 4; /* Sanity */ |
| 779 | 769 | ||
| 780 | printk(KERN_INFO "X.25 async: version 0.00 ALPHA " | 770 | printk(KERN_INFO "X.25 async: version 0.00 ALPHA " |
| 781 | "(dynamic channels, max=%d).\n", x25_asy_maxdev ); | 771 | "(dynamic channels, max=%d).\n", x25_asy_maxdev); |
| 782 | 772 | ||
| 783 | x25_asy_devs = kcalloc(x25_asy_maxdev, sizeof(struct net_device*), GFP_KERNEL); | 773 | x25_asy_devs = kcalloc(x25_asy_maxdev, sizeof(struct net_device *), |
| 774 | GFP_KERNEL); | ||
| 784 | if (!x25_asy_devs) { | 775 | if (!x25_asy_devs) { |
| 785 | printk(KERN_WARNING "X25 async: Can't allocate x25_asy_ctrls[] " | 776 | printk(KERN_WARNING "X25 async: Can't allocate x25_asy_ctrls[] " |
| 786 | "array! Uaargh! (-> No X.25 available)\n"); | 777 | "array! Uaargh! (-> No X.25 available)\n"); |
| @@ -802,7 +793,7 @@ static void __exit exit_x25_asy(void) | |||
| 802 | struct x25_asy *sl = dev->priv; | 793 | struct x25_asy *sl = dev->priv; |
| 803 | 794 | ||
| 804 | spin_lock_bh(&sl->lock); | 795 | spin_lock_bh(&sl->lock); |
| 805 | if (sl->tty) | 796 | if (sl->tty) |
| 806 | tty_hangup(sl->tty); | 797 | tty_hangup(sl->tty); |
| 807 | 798 | ||
| 808 | spin_unlock_bh(&sl->lock); | 799 | spin_unlock_bh(&sl->lock); |
diff --git a/drivers/net/wireless/iwlwifi/Kconfig b/drivers/net/wireless/iwlwifi/Kconfig index 9a25f550fd16..d5b7a76fcaad 100644 --- a/drivers/net/wireless/iwlwifi/Kconfig +++ b/drivers/net/wireless/iwlwifi/Kconfig | |||
| @@ -6,6 +6,10 @@ config IWLCORE | |||
| 6 | tristate "Intel Wireless Wifi Core" | 6 | tristate "Intel Wireless Wifi Core" |
| 7 | depends on PCI && MAC80211 && WLAN_80211 && EXPERIMENTAL | 7 | depends on PCI && MAC80211 && WLAN_80211 && EXPERIMENTAL |
| 8 | select IWLWIFI | 8 | select IWLWIFI |
| 9 | select MAC80211_LEDS if IWLWIFI_LEDS | ||
| 10 | select LEDS_CLASS if IWLWIFI_LEDS | ||
| 11 | select RFKILL if IWLWIFI_RFKILL | ||
| 12 | select RFKILL_INPUT if IWLWIFI_RFKILL | ||
| 9 | 13 | ||
| 10 | config IWLWIFI_LEDS | 14 | config IWLWIFI_LEDS |
| 11 | bool | 15 | bool |
| @@ -14,8 +18,6 @@ config IWLWIFI_LEDS | |||
| 14 | config IWLWIFI_RFKILL | 18 | config IWLWIFI_RFKILL |
| 15 | boolean "IWLWIFI RF kill support" | 19 | boolean "IWLWIFI RF kill support" |
| 16 | depends on IWLCORE | 20 | depends on IWLCORE |
| 17 | select RFKILL | ||
| 18 | select RFKILL_INPUT | ||
| 19 | 21 | ||
| 20 | config IWL4965 | 22 | config IWL4965 |
| 21 | tristate "Intel Wireless WiFi 4965AGN" | 23 | tristate "Intel Wireless WiFi 4965AGN" |
| @@ -55,8 +57,6 @@ config IWL4965_HT | |||
| 55 | config IWL4965_LEDS | 57 | config IWL4965_LEDS |
| 56 | bool "Enable LEDS features in iwl4965 driver" | 58 | bool "Enable LEDS features in iwl4965 driver" |
| 57 | depends on IWL4965 | 59 | depends on IWL4965 |
| 58 | select MAC80211_LEDS | ||
| 59 | select LEDS_CLASS | ||
| 60 | select IWLWIFI_LEDS | 60 | select IWLWIFI_LEDS |
| 61 | ---help--- | 61 | ---help--- |
| 62 | This option enables LEDS for the iwlwifi drivers | 62 | This option enables LEDS for the iwlwifi drivers |
| @@ -112,6 +112,8 @@ config IWL3945 | |||
| 112 | depends on PCI && MAC80211 && WLAN_80211 && EXPERIMENTAL | 112 | depends on PCI && MAC80211 && WLAN_80211 && EXPERIMENTAL |
| 113 | select FW_LOADER | 113 | select FW_LOADER |
| 114 | select IWLWIFI | 114 | select IWLWIFI |
| 115 | select MAC80211_LEDS if IWL3945_LEDS | ||
| 116 | select LEDS_CLASS if IWL3945_LEDS | ||
| 115 | ---help--- | 117 | ---help--- |
| 116 | Select to build the driver supporting the: | 118 | Select to build the driver supporting the: |
| 117 | 119 | ||
| @@ -143,8 +145,6 @@ config IWL3945_SPECTRUM_MEASUREMENT | |||
| 143 | config IWL3945_LEDS | 145 | config IWL3945_LEDS |
| 144 | bool "Enable LEDS features in iwl3945 driver" | 146 | bool "Enable LEDS features in iwl3945 driver" |
| 145 | depends on IWL3945 | 147 | depends on IWL3945 |
| 146 | select MAC80211_LEDS | ||
| 147 | select LEDS_CLASS | ||
| 148 | ---help--- | 148 | ---help--- |
| 149 | This option enables LEDS for the iwl3945 driver. | 149 | This option enables LEDS for the iwl3945 driver. |
| 150 | 150 | ||
diff --git a/drivers/net/wireless/strip.c b/drivers/net/wireless/strip.c index bced3fe1cf8a..5dd23c93497d 100644 --- a/drivers/net/wireless/strip.c +++ b/drivers/net/wireless/strip.c | |||
| @@ -768,41 +768,17 @@ static __u8 *UnStuffData(__u8 * src, __u8 * end, __u8 * dst, | |||
| 768 | /* General routines for STRIP */ | 768 | /* General routines for STRIP */ |
| 769 | 769 | ||
| 770 | /* | 770 | /* |
| 771 | * get_baud returns the current baud rate, as one of the constants defined in | ||
| 772 | * termbits.h | ||
| 773 | * If the user has issued a baud rate override using the 'setserial' command | ||
| 774 | * and the logical current rate is set to 38.4, then the true baud rate | ||
| 775 | * currently in effect (57.6 or 115.2) is returned. | ||
| 776 | */ | ||
| 777 | static unsigned int get_baud(struct tty_struct *tty) | ||
| 778 | { | ||
| 779 | if (!tty || !tty->termios) | ||
| 780 | return (0); | ||
| 781 | if ((tty->termios->c_cflag & CBAUD) == B38400 && tty->driver_data) { | ||
| 782 | struct async_struct *info = | ||
| 783 | (struct async_struct *) tty->driver_data; | ||
| 784 | if ((info->flags & ASYNC_SPD_MASK) == ASYNC_SPD_HI) | ||
| 785 | return (B57600); | ||
| 786 | if ((info->flags & ASYNC_SPD_MASK) == ASYNC_SPD_VHI) | ||
| 787 | return (B115200); | ||
| 788 | } | ||
| 789 | return (tty->termios->c_cflag & CBAUD); | ||
| 790 | } | ||
| 791 | |||
| 792 | /* | ||
| 793 | * set_baud sets the baud rate to the rate defined by baudcode | 771 | * set_baud sets the baud rate to the rate defined by baudcode |
| 794 | * Note: The rate B38400 should be avoided, because the user may have | ||
| 795 | * issued a 'setserial' speed override to map that to a different speed. | ||
| 796 | * We could achieve a true rate of 38400 if we needed to by cancelling | ||
| 797 | * any user speed override that is in place, but that might annoy the | ||
| 798 | * user, so it is simplest to just avoid using 38400. | ||
| 799 | */ | 772 | */ |
| 800 | static void set_baud(struct tty_struct *tty, unsigned int baudcode) | 773 | static void set_baud(struct tty_struct *tty, speed_t baudrate) |
| 801 | { | 774 | { |
| 802 | struct ktermios old_termios = *(tty->termios); | 775 | struct ktermios old_termios; |
| 803 | tty->termios->c_cflag &= ~CBAUD; /* Clear the old baud setting */ | 776 | |
| 804 | tty->termios->c_cflag |= baudcode; /* Set the new baud setting */ | 777 | mutex_lock(&tty->termios_mutex); |
| 805 | tty->driver->set_termios(tty, &old_termios); | 778 | old_termios =*(tty->termios); |
| 779 | tty_encode_baud_rate(tty, baudrate, baudrate); | ||
| 780 | tty->ops->set_termios(tty, &old_termios); | ||
| 781 | mutex_unlock(&tty->termios_mutex); | ||
| 806 | } | 782 | } |
| 807 | 783 | ||
| 808 | /* | 784 | /* |
| @@ -1217,7 +1193,7 @@ static void ResetRadio(struct strip *strip_info) | |||
| 1217 | strip_info->watchdog_doreset = jiffies + 1 * HZ; | 1193 | strip_info->watchdog_doreset = jiffies + 1 * HZ; |
| 1218 | 1194 | ||
| 1219 | /* If the user has selected a baud rate above 38.4 see what magic we have to do */ | 1195 | /* If the user has selected a baud rate above 38.4 see what magic we have to do */ |
| 1220 | if (strip_info->user_baud > B38400) { | 1196 | if (strip_info->user_baud > 38400) { |
| 1221 | /* | 1197 | /* |
| 1222 | * Subtle stuff: Pay attention :-) | 1198 | * Subtle stuff: Pay attention :-) |
| 1223 | * If the serial port is currently at the user's selected (>38.4) rate, | 1199 | * If the serial port is currently at the user's selected (>38.4) rate, |
| @@ -1227,17 +1203,17 @@ static void ResetRadio(struct strip *strip_info) | |||
| 1227 | * issued the ATS304 command last time through, so this time we restore | 1203 | * issued the ATS304 command last time through, so this time we restore |
| 1228 | * the user's selected rate and issue the normal starmode reset string. | 1204 | * the user's selected rate and issue the normal starmode reset string. |
| 1229 | */ | 1205 | */ |
| 1230 | if (strip_info->user_baud == get_baud(tty)) { | 1206 | if (strip_info->user_baud == tty_get_baud_rate(tty)) { |
| 1231 | static const char b0[] = "ate0q1s304=57600\r"; | 1207 | static const char b0[] = "ate0q1s304=57600\r"; |
| 1232 | static const char b1[] = "ate0q1s304=115200\r"; | 1208 | static const char b1[] = "ate0q1s304=115200\r"; |
| 1233 | static const StringDescriptor baudstring[2] = | 1209 | static const StringDescriptor baudstring[2] = |
| 1234 | { {b0, sizeof(b0) - 1} | 1210 | { {b0, sizeof(b0) - 1} |
| 1235 | , {b1, sizeof(b1) - 1} | 1211 | , {b1, sizeof(b1) - 1} |
| 1236 | }; | 1212 | }; |
| 1237 | set_baud(tty, B19200); | 1213 | set_baud(tty, 19200); |
| 1238 | if (strip_info->user_baud == B57600) | 1214 | if (strip_info->user_baud == 57600) |
| 1239 | s = baudstring[0]; | 1215 | s = baudstring[0]; |
| 1240 | else if (strip_info->user_baud == B115200) | 1216 | else if (strip_info->user_baud == 115200) |
| 1241 | s = baudstring[1]; | 1217 | s = baudstring[1]; |
| 1242 | else | 1218 | else |
| 1243 | s = baudstring[1]; /* For now */ | 1219 | s = baudstring[1]; /* For now */ |
| @@ -1245,7 +1221,7 @@ static void ResetRadio(struct strip *strip_info) | |||
| 1245 | set_baud(tty, strip_info->user_baud); | 1221 | set_baud(tty, strip_info->user_baud); |
| 1246 | } | 1222 | } |
| 1247 | 1223 | ||
| 1248 | tty->driver->write(tty, s.string, s.length); | 1224 | tty->ops->write(tty, s.string, s.length); |
| 1249 | #ifdef EXT_COUNTERS | 1225 | #ifdef EXT_COUNTERS |
| 1250 | strip_info->tx_ebytes += s.length; | 1226 | strip_info->tx_ebytes += s.length; |
| 1251 | #endif | 1227 | #endif |
| @@ -1267,7 +1243,7 @@ static void strip_write_some_more(struct tty_struct *tty) | |||
| 1267 | 1243 | ||
| 1268 | if (strip_info->tx_left > 0) { | 1244 | if (strip_info->tx_left > 0) { |
| 1269 | int num_written = | 1245 | int num_written = |
| 1270 | tty->driver->write(tty, strip_info->tx_head, | 1246 | tty->ops->write(tty, strip_info->tx_head, |
| 1271 | strip_info->tx_left); | 1247 | strip_info->tx_left); |
| 1272 | strip_info->tx_left -= num_written; | 1248 | strip_info->tx_left -= num_written; |
| 1273 | strip_info->tx_head += num_written; | 1249 | strip_info->tx_head += num_written; |
| @@ -2457,7 +2433,7 @@ static int strip_open_low(struct net_device *dev) | |||
| 2457 | strip_info->working = FALSE; | 2433 | strip_info->working = FALSE; |
| 2458 | strip_info->firmware_level = NoStructure; | 2434 | strip_info->firmware_level = NoStructure; |
| 2459 | strip_info->next_command = CompatibilityCommand; | 2435 | strip_info->next_command = CompatibilityCommand; |
| 2460 | strip_info->user_baud = get_baud(strip_info->tty); | 2436 | strip_info->user_baud = tty_get_baud_rate(strip_info->tty); |
| 2461 | 2437 | ||
| 2462 | printk(KERN_INFO "%s: Initializing Radio.\n", | 2438 | printk(KERN_INFO "%s: Initializing Radio.\n", |
| 2463 | strip_info->dev->name); | 2439 | strip_info->dev->name); |
| @@ -2632,6 +2608,13 @@ static int strip_open(struct tty_struct *tty) | |||
| 2632 | return -EEXIST; | 2608 | return -EEXIST; |
| 2633 | 2609 | ||
| 2634 | /* | 2610 | /* |
| 2611 | * We need a write method. | ||
| 2612 | */ | ||
| 2613 | |||
| 2614 | if (tty->ops->write == NULL) | ||
| 2615 | return -EOPNOTSUPP; | ||
| 2616 | |||
| 2617 | /* | ||
| 2635 | * OK. Find a free STRIP channel to use. | 2618 | * OK. Find a free STRIP channel to use. |
| 2636 | */ | 2619 | */ |
| 2637 | if ((strip_info = strip_alloc()) == NULL) | 2620 | if ((strip_info = strip_alloc()) == NULL) |
| @@ -2652,8 +2635,7 @@ static int strip_open(struct tty_struct *tty) | |||
| 2652 | tty->disc_data = strip_info; | 2635 | tty->disc_data = strip_info; |
| 2653 | tty->receive_room = 65536; | 2636 | tty->receive_room = 65536; |
| 2654 | 2637 | ||
| 2655 | if (tty->driver->flush_buffer) | 2638 | tty_driver_flush_buffer(tty); |
| 2656 | tty->driver->flush_buffer(tty); | ||
| 2657 | 2639 | ||
| 2658 | /* | 2640 | /* |
| 2659 | * Restore default settings | 2641 | * Restore default settings |
diff --git a/drivers/parport/ieee1284.c b/drivers/parport/ieee1284.c index 54a6ef72906e..0338b0912674 100644 --- a/drivers/parport/ieee1284.c +++ b/drivers/parport/ieee1284.c | |||
| @@ -76,7 +76,7 @@ int parport_wait_event (struct parport *port, signed long timeout) | |||
| 76 | semaphore. */ | 76 | semaphore. */ |
| 77 | return 1; | 77 | return 1; |
| 78 | 78 | ||
| 79 | init_timer (&timer); | 79 | init_timer_on_stack(&timer); |
| 80 | timer.expires = jiffies + timeout; | 80 | timer.expires = jiffies + timeout; |
| 81 | timer.function = timeout_waiting_on_port; | 81 | timer.function = timeout_waiting_on_port; |
| 82 | port_from_cookie[port->number % PARPORT_MAX] = port; | 82 | port_from_cookie[port->number % PARPORT_MAX] = port; |
| @@ -88,6 +88,8 @@ int parport_wait_event (struct parport *port, signed long timeout) | |||
| 88 | /* Timed out. */ | 88 | /* Timed out. */ |
| 89 | ret = 1; | 89 | ret = 1; |
| 90 | 90 | ||
| 91 | destroy_timer_on_stack(&timer); | ||
| 92 | |||
| 91 | return ret; | 93 | return ret; |
| 92 | } | 94 | } |
| 93 | 95 | ||
diff --git a/drivers/parport/parport_gsc.c b/drivers/parport/parport_gsc.c index 0e77ae2b71a0..e6a7e847ee80 100644 --- a/drivers/parport/parport_gsc.c +++ b/drivers/parport/parport_gsc.c | |||
| @@ -365,11 +365,11 @@ static int __devinit parport_init_chip(struct parisc_device *dev) | |||
| 365 | if (boot_cpu_data.cpu_type > pcxt && !pdc_add_valid(port+4)) { | 365 | if (boot_cpu_data.cpu_type > pcxt && !pdc_add_valid(port+4)) { |
| 366 | 366 | ||
| 367 | /* Initialize bidirectional-mode (0x10) & data-tranfer-mode #1 (0x20) */ | 367 | /* Initialize bidirectional-mode (0x10) & data-tranfer-mode #1 (0x20) */ |
| 368 | printk("%s: initialize bidirectional-mode.\n", __FUNCTION__); | 368 | printk("%s: initialize bidirectional-mode.\n", __func__); |
| 369 | parport_writeb ( (0x10 + 0x20), port + 4); | 369 | parport_writeb ( (0x10 + 0x20), port + 4); |
| 370 | 370 | ||
| 371 | } else { | 371 | } else { |
| 372 | printk("%s: enhanced parport-modes not supported.\n", __FUNCTION__); | 372 | printk("%s: enhanced parport-modes not supported.\n", __func__); |
| 373 | } | 373 | } |
| 374 | 374 | ||
| 375 | p = parport_gsc_probe_port(port, 0, dev->irq, | 375 | p = parport_gsc_probe_port(port, 0, dev->irq, |
diff --git a/drivers/parport/parport_pc.c b/drivers/parport/parport_pc.c index e71092e80288..e0c2a4584ec6 100644 --- a/drivers/parport/parport_pc.c +++ b/drivers/parport/parport_pc.c | |||
| @@ -1415,7 +1415,7 @@ static void __devinit winbond_check(int io, int key) | |||
| 1415 | { | 1415 | { |
| 1416 | int devid,devrev,oldid,x_devid,x_devrev,x_oldid; | 1416 | int devid,devrev,oldid,x_devid,x_devrev,x_oldid; |
| 1417 | 1417 | ||
| 1418 | if (!request_region(io, 3, __FUNCTION__)) | 1418 | if (!request_region(io, 3, __func__)) |
| 1419 | return; | 1419 | return; |
| 1420 | 1420 | ||
| 1421 | /* First probe without key */ | 1421 | /* First probe without key */ |
| @@ -1449,7 +1449,7 @@ static void __devinit winbond_check2(int io,int key) | |||
| 1449 | { | 1449 | { |
| 1450 | int devid,devrev,oldid,x_devid,x_devrev,x_oldid; | 1450 | int devid,devrev,oldid,x_devid,x_devrev,x_oldid; |
| 1451 | 1451 | ||
| 1452 | if (!request_region(io, 3, __FUNCTION__)) | 1452 | if (!request_region(io, 3, __func__)) |
| 1453 | return; | 1453 | return; |
| 1454 | 1454 | ||
| 1455 | /* First probe without the key */ | 1455 | /* First probe without the key */ |
| @@ -1482,7 +1482,7 @@ static void __devinit smsc_check(int io, int key) | |||
| 1482 | { | 1482 | { |
| 1483 | int id,rev,oldid,oldrev,x_id,x_rev,x_oldid,x_oldrev; | 1483 | int id,rev,oldid,oldrev,x_id,x_rev,x_oldid,x_oldrev; |
| 1484 | 1484 | ||
| 1485 | if (!request_region(io, 3, __FUNCTION__)) | 1485 | if (!request_region(io, 3, __func__)) |
| 1486 | return; | 1486 | return; |
| 1487 | 1487 | ||
| 1488 | /* First probe without the key */ | 1488 | /* First probe without the key */ |
| @@ -1547,7 +1547,7 @@ static void __devinit detect_and_report_it87(void) | |||
| 1547 | u8 r; | 1547 | u8 r; |
| 1548 | if (verbose_probing) | 1548 | if (verbose_probing) |
| 1549 | printk(KERN_DEBUG "IT8705 Super-IO detection, now testing port 2E ...\n"); | 1549 | printk(KERN_DEBUG "IT8705 Super-IO detection, now testing port 2E ...\n"); |
| 1550 | if (!request_region(0x2e, 1, __FUNCTION__)) | 1550 | if (!request_region(0x2e, 1, __func__)) |
| 1551 | return; | 1551 | return; |
| 1552 | outb(0x87, 0x2e); | 1552 | outb(0x87, 0x2e); |
| 1553 | outb(0x01, 0x2e); | 1553 | outb(0x01, 0x2e); |
diff --git a/drivers/s390/char/con3215.c b/drivers/s390/char/con3215.c index 0e1f35c9ed9d..3e5653c92f4b 100644 --- a/drivers/s390/char/con3215.c +++ b/drivers/s390/char/con3215.c | |||
| @@ -982,15 +982,16 @@ tty3215_write(struct tty_struct * tty, | |||
| 982 | /* | 982 | /* |
| 983 | * Put character routine for 3215 ttys | 983 | * Put character routine for 3215 ttys |
| 984 | */ | 984 | */ |
| 985 | static void | 985 | static int |
| 986 | tty3215_put_char(struct tty_struct *tty, unsigned char ch) | 986 | tty3215_put_char(struct tty_struct *tty, unsigned char ch) |
| 987 | { | 987 | { |
| 988 | struct raw3215_info *raw; | 988 | struct raw3215_info *raw; |
| 989 | 989 | ||
| 990 | if (!tty) | 990 | if (!tty) |
| 991 | return; | 991 | return 0; |
| 992 | raw = (struct raw3215_info *) tty->driver_data; | 992 | raw = (struct raw3215_info *) tty->driver_data; |
| 993 | raw3215_putchar(raw, ch); | 993 | raw3215_putchar(raw, ch); |
| 994 | return 1; | ||
| 994 | } | 995 | } |
| 995 | 996 | ||
| 996 | static void | 997 | static void |
diff --git a/drivers/s390/char/sclp_config.c b/drivers/s390/char/sclp_config.c index b8f35bc52b7b..9e784d5f7f57 100644 --- a/drivers/s390/char/sclp_config.c +++ b/drivers/s390/char/sclp_config.c | |||
| @@ -10,6 +10,7 @@ | |||
| 10 | #include <linux/cpu.h> | 10 | #include <linux/cpu.h> |
| 11 | #include <linux/sysdev.h> | 11 | #include <linux/sysdev.h> |
| 12 | #include <linux/workqueue.h> | 12 | #include <linux/workqueue.h> |
| 13 | #include <asm/smp.h> | ||
| 13 | #include "sclp.h" | 14 | #include "sclp.h" |
| 14 | 15 | ||
| 15 | #define TAG "sclp_config: " | 16 | #define TAG "sclp_config: " |
| @@ -19,9 +20,11 @@ struct conf_mgm_data { | |||
| 19 | u8 ev_qualifier; | 20 | u8 ev_qualifier; |
| 20 | } __attribute__((packed)); | 21 | } __attribute__((packed)); |
| 21 | 22 | ||
| 23 | #define EV_QUAL_CPU_CHANGE 1 | ||
| 22 | #define EV_QUAL_CAP_CHANGE 3 | 24 | #define EV_QUAL_CAP_CHANGE 3 |
| 23 | 25 | ||
| 24 | static struct work_struct sclp_cpu_capability_work; | 26 | static struct work_struct sclp_cpu_capability_work; |
| 27 | static struct work_struct sclp_cpu_change_work; | ||
| 25 | 28 | ||
| 26 | static void sclp_cpu_capability_notify(struct work_struct *work) | 29 | static void sclp_cpu_capability_notify(struct work_struct *work) |
| 27 | { | 30 | { |
| @@ -37,13 +40,24 @@ static void sclp_cpu_capability_notify(struct work_struct *work) | |||
| 37 | put_online_cpus(); | 40 | put_online_cpus(); |
| 38 | } | 41 | } |
| 39 | 42 | ||
| 43 | static void sclp_cpu_change_notify(struct work_struct *work) | ||
| 44 | { | ||
| 45 | smp_rescan_cpus(); | ||
| 46 | } | ||
| 47 | |||
| 40 | static void sclp_conf_receiver_fn(struct evbuf_header *evbuf) | 48 | static void sclp_conf_receiver_fn(struct evbuf_header *evbuf) |
| 41 | { | 49 | { |
| 42 | struct conf_mgm_data *cdata; | 50 | struct conf_mgm_data *cdata; |
| 43 | 51 | ||
| 44 | cdata = (struct conf_mgm_data *)(evbuf + 1); | 52 | cdata = (struct conf_mgm_data *)(evbuf + 1); |
| 45 | if (cdata->ev_qualifier == EV_QUAL_CAP_CHANGE) | 53 | switch (cdata->ev_qualifier) { |
| 54 | case EV_QUAL_CPU_CHANGE: | ||
| 55 | schedule_work(&sclp_cpu_change_work); | ||
| 56 | break; | ||
| 57 | case EV_QUAL_CAP_CHANGE: | ||
| 46 | schedule_work(&sclp_cpu_capability_work); | 58 | schedule_work(&sclp_cpu_capability_work); |
| 59 | break; | ||
| 60 | } | ||
| 47 | } | 61 | } |
| 48 | 62 | ||
| 49 | static struct sclp_register sclp_conf_register = | 63 | static struct sclp_register sclp_conf_register = |
| @@ -57,6 +71,7 @@ static int __init sclp_conf_init(void) | |||
| 57 | int rc; | 71 | int rc; |
| 58 | 72 | ||
| 59 | INIT_WORK(&sclp_cpu_capability_work, sclp_cpu_capability_notify); | 73 | INIT_WORK(&sclp_cpu_capability_work, sclp_cpu_capability_notify); |
| 74 | INIT_WORK(&sclp_cpu_change_work, sclp_cpu_change_notify); | ||
| 60 | 75 | ||
| 61 | rc = sclp_register(&sclp_conf_register); | 76 | rc = sclp_register(&sclp_conf_register); |
| 62 | if (rc) { | 77 | if (rc) { |
diff --git a/drivers/s390/char/sclp_tty.c b/drivers/s390/char/sclp_tty.c index e3b3d390b4a3..40b11521cd20 100644 --- a/drivers/s390/char/sclp_tty.c +++ b/drivers/s390/char/sclp_tty.c | |||
| @@ -412,14 +412,14 @@ sclp_tty_write(struct tty_struct *tty, const unsigned char *buf, int count) | |||
| 412 | * - including previous characters from sclp_tty_put_char() and strings from | 412 | * - including previous characters from sclp_tty_put_char() and strings from |
| 413 | * sclp_write() without final '\n' - will be written. | 413 | * sclp_write() without final '\n' - will be written. |
| 414 | */ | 414 | */ |
| 415 | static void | 415 | static int |
| 416 | sclp_tty_put_char(struct tty_struct *tty, unsigned char ch) | 416 | sclp_tty_put_char(struct tty_struct *tty, unsigned char ch) |
| 417 | { | 417 | { |
| 418 | sclp_tty_chars[sclp_tty_chars_count++] = ch; | 418 | sclp_tty_chars[sclp_tty_chars_count++] = ch; |
| 419 | if (ch == '\n' || sclp_tty_chars_count >= SCLP_TTY_BUF_SIZE) { | 419 | if (ch == '\n' || sclp_tty_chars_count >= SCLP_TTY_BUF_SIZE) { |
| 420 | sclp_tty_write_string(sclp_tty_chars, sclp_tty_chars_count); | 420 | sclp_tty_write_string(sclp_tty_chars, sclp_tty_chars_count); |
| 421 | sclp_tty_chars_count = 0; | 421 | sclp_tty_chars_count = 0; |
| 422 | } | 422 | } return 1; |
| 423 | } | 423 | } |
| 424 | 424 | ||
| 425 | /* | 425 | /* |
diff --git a/drivers/s390/char/sclp_vt220.c b/drivers/s390/char/sclp_vt220.c index ed507594e62b..35707c04e613 100644 --- a/drivers/s390/char/sclp_vt220.c +++ b/drivers/s390/char/sclp_vt220.c | |||
| @@ -524,11 +524,15 @@ sclp_vt220_close(struct tty_struct *tty, struct file *filp) | |||
| 524 | * NOTE: include/linux/tty_driver.h specifies that a character should be | 524 | * NOTE: include/linux/tty_driver.h specifies that a character should be |
| 525 | * ignored if there is no room in the queue. This driver implements a different | 525 | * ignored if there is no room in the queue. This driver implements a different |
| 526 | * semantic in that it will block when there is no more room left. | 526 | * semantic in that it will block when there is no more room left. |
| 527 | * | ||
| 528 | * FIXME: putchar can currently be called from BH and other non blocking | ||
| 529 | * handlers so this semantic isn't a good idea. | ||
| 527 | */ | 530 | */ |
| 528 | static void | 531 | static int |
| 529 | sclp_vt220_put_char(struct tty_struct *tty, unsigned char ch) | 532 | sclp_vt220_put_char(struct tty_struct *tty, unsigned char ch) |
| 530 | { | 533 | { |
| 531 | __sclp_vt220_write(&ch, 1, 0, 0, 1); | 534 | __sclp_vt220_write(&ch, 1, 0, 0, 1); |
| 535 | return 1; | ||
| 532 | } | 536 | } |
| 533 | 537 | ||
| 534 | /* | 538 | /* |
diff --git a/drivers/s390/char/tty3270.c b/drivers/s390/char/tty3270.c index 70b1980a08b6..c1f2adefad41 100644 --- a/drivers/s390/char/tty3270.c +++ b/drivers/s390/char/tty3270.c | |||
| @@ -965,7 +965,7 @@ tty3270_write_room(struct tty_struct *tty) | |||
| 965 | * Insert character into the screen at the current position with the | 965 | * Insert character into the screen at the current position with the |
| 966 | * current color and highlight. This function does NOT do cursor movement. | 966 | * current color and highlight. This function does NOT do cursor movement. |
| 967 | */ | 967 | */ |
| 968 | static void | 968 | static int |
| 969 | tty3270_put_character(struct tty3270 *tp, char ch) | 969 | tty3270_put_character(struct tty3270 *tp, char ch) |
| 970 | { | 970 | { |
| 971 | struct tty3270_line *line; | 971 | struct tty3270_line *line; |
| @@ -986,6 +986,7 @@ tty3270_put_character(struct tty3270 *tp, char ch) | |||
| 986 | cell->character = tp->view.ascebc[(unsigned int) ch]; | 986 | cell->character = tp->view.ascebc[(unsigned int) ch]; |
| 987 | cell->highlight = tp->highlight; | 987 | cell->highlight = tp->highlight; |
| 988 | cell->f_color = tp->f_color; | 988 | cell->f_color = tp->f_color; |
| 989 | return 1; | ||
| 989 | } | 990 | } |
| 990 | 991 | ||
| 991 | /* | 992 | /* |
diff --git a/drivers/s390/cio/ccwgroup.c b/drivers/s390/cio/ccwgroup.c index fe1ad1722158..26a930e832bd 100644 --- a/drivers/s390/cio/ccwgroup.c +++ b/drivers/s390/cio/ccwgroup.c | |||
| @@ -152,44 +152,89 @@ __ccwgroup_create_symlinks(struct ccwgroup_device *gdev) | |||
| 152 | return 0; | 152 | return 0; |
| 153 | } | 153 | } |
| 154 | 154 | ||
| 155 | static int __get_next_bus_id(const char **buf, char *bus_id) | ||
| 156 | { | ||
| 157 | int rc, len; | ||
| 158 | char *start, *end; | ||
| 159 | |||
| 160 | start = (char *)*buf; | ||
| 161 | end = strchr(start, ','); | ||
| 162 | if (!end) { | ||
| 163 | /* Last entry. Strip trailing newline, if applicable. */ | ||
| 164 | end = strchr(start, '\n'); | ||
| 165 | if (end) | ||
| 166 | *end = '\0'; | ||
| 167 | len = strlen(start) + 1; | ||
| 168 | } else { | ||
| 169 | len = end - start + 1; | ||
| 170 | end++; | ||
| 171 | } | ||
| 172 | if (len < BUS_ID_SIZE) { | ||
| 173 | strlcpy(bus_id, start, len); | ||
| 174 | rc = 0; | ||
| 175 | } else | ||
| 176 | rc = -EINVAL; | ||
| 177 | *buf = end; | ||
| 178 | return rc; | ||
| 179 | } | ||
| 180 | |||
| 181 | static int __is_valid_bus_id(char bus_id[BUS_ID_SIZE]) | ||
| 182 | { | ||
| 183 | int cssid, ssid, devno; | ||
| 184 | |||
| 185 | /* Must be of form %x.%x.%04x */ | ||
| 186 | if (sscanf(bus_id, "%x.%1x.%04x", &cssid, &ssid, &devno) != 3) | ||
| 187 | return 0; | ||
| 188 | return 1; | ||
| 189 | } | ||
| 190 | |||
| 155 | /** | 191 | /** |
| 156 | * ccwgroup_create() - create and register a ccw group device | 192 | * ccwgroup_create_from_string() - create and register a ccw group device |
| 157 | * @root: parent device for the new device | 193 | * @root: parent device for the new device |
| 158 | * @creator_id: identifier of creating driver | 194 | * @creator_id: identifier of creating driver |
| 159 | * @cdrv: ccw driver of slave devices | 195 | * @cdrv: ccw driver of slave devices |
| 160 | * @argc: number of slave devices | 196 | * @num_devices: number of slave devices |
| 161 | * @argv: bus ids of slave devices | 197 | * @buf: buffer containing comma separated bus ids of slave devices |
| 162 | * | 198 | * |
| 163 | * Create and register a new ccw group device as a child of @root. Slave | 199 | * Create and register a new ccw group device as a child of @root. Slave |
| 164 | * devices are obtained from the list of bus ids given in @argv[] and must all | 200 | * devices are obtained from the list of bus ids given in @buf and must all |
| 165 | * belong to @cdrv. | 201 | * belong to @cdrv. |
| 166 | * Returns: | 202 | * Returns: |
| 167 | * %0 on success and an error code on failure. | 203 | * %0 on success and an error code on failure. |
| 168 | * Context: | 204 | * Context: |
| 169 | * non-atomic | 205 | * non-atomic |
| 170 | */ | 206 | */ |
| 171 | int ccwgroup_create(struct device *root, unsigned int creator_id, | 207 | int ccwgroup_create_from_string(struct device *root, unsigned int creator_id, |
| 172 | struct ccw_driver *cdrv, int argc, char *argv[]) | 208 | struct ccw_driver *cdrv, int num_devices, |
| 209 | const char *buf) | ||
| 173 | { | 210 | { |
| 174 | struct ccwgroup_device *gdev; | 211 | struct ccwgroup_device *gdev; |
| 175 | int i; | 212 | int rc, i; |
| 176 | int rc; | 213 | char tmp_bus_id[BUS_ID_SIZE]; |
| 214 | const char *curr_buf; | ||
| 177 | 215 | ||
| 178 | if (argc > 256) /* disallow dumb users */ | 216 | gdev = kzalloc(sizeof(*gdev) + num_devices * sizeof(gdev->cdev[0]), |
| 179 | return -EINVAL; | 217 | GFP_KERNEL); |
| 180 | |||
| 181 | gdev = kzalloc(sizeof(*gdev) + argc*sizeof(gdev->cdev[0]), GFP_KERNEL); | ||
| 182 | if (!gdev) | 218 | if (!gdev) |
| 183 | return -ENOMEM; | 219 | return -ENOMEM; |
| 184 | 220 | ||
| 185 | atomic_set(&gdev->onoff, 0); | 221 | atomic_set(&gdev->onoff, 0); |
| 186 | mutex_init(&gdev->reg_mutex); | 222 | mutex_init(&gdev->reg_mutex); |
| 187 | mutex_lock(&gdev->reg_mutex); | 223 | mutex_lock(&gdev->reg_mutex); |
| 188 | for (i = 0; i < argc; i++) { | 224 | curr_buf = buf; |
| 189 | gdev->cdev[i] = get_ccwdev_by_busid(cdrv, argv[i]); | 225 | for (i = 0; i < num_devices && curr_buf; i++) { |
| 190 | 226 | rc = __get_next_bus_id(&curr_buf, tmp_bus_id); | |
| 191 | /* all devices have to be of the same type in | 227 | if (rc != 0) |
| 192 | * order to be grouped */ | 228 | goto error; |
| 229 | if (!__is_valid_bus_id(tmp_bus_id)) { | ||
| 230 | rc = -EINVAL; | ||
| 231 | goto error; | ||
| 232 | } | ||
| 233 | gdev->cdev[i] = get_ccwdev_by_busid(cdrv, tmp_bus_id); | ||
| 234 | /* | ||
| 235 | * All devices have to be of the same type in | ||
| 236 | * order to be grouped. | ||
| 237 | */ | ||
| 193 | if (!gdev->cdev[i] | 238 | if (!gdev->cdev[i] |
| 194 | || gdev->cdev[i]->id.driver_info != | 239 | || gdev->cdev[i]->id.driver_info != |
| 195 | gdev->cdev[0]->id.driver_info) { | 240 | gdev->cdev[0]->id.driver_info) { |
| @@ -203,9 +248,18 @@ int ccwgroup_create(struct device *root, unsigned int creator_id, | |||
| 203 | } | 248 | } |
| 204 | dev_set_drvdata(&gdev->cdev[i]->dev, gdev); | 249 | dev_set_drvdata(&gdev->cdev[i]->dev, gdev); |
| 205 | } | 250 | } |
| 206 | 251 | /* Check for sufficient number of bus ids. */ | |
| 252 | if (i < num_devices && !curr_buf) { | ||
| 253 | rc = -EINVAL; | ||
| 254 | goto error; | ||
| 255 | } | ||
| 256 | /* Check for trailing stuff. */ | ||
| 257 | if (i == num_devices && strlen(curr_buf) > 0) { | ||
| 258 | rc = -EINVAL; | ||
| 259 | goto error; | ||
| 260 | } | ||
| 207 | gdev->creator_id = creator_id; | 261 | gdev->creator_id = creator_id; |
| 208 | gdev->count = argc; | 262 | gdev->count = num_devices; |
| 209 | gdev->dev.bus = &ccwgroup_bus_type; | 263 | gdev->dev.bus = &ccwgroup_bus_type; |
| 210 | gdev->dev.parent = root; | 264 | gdev->dev.parent = root; |
| 211 | gdev->dev.release = ccwgroup_release; | 265 | gdev->dev.release = ccwgroup_release; |
| @@ -233,7 +287,7 @@ int ccwgroup_create(struct device *root, unsigned int creator_id, | |||
| 233 | device_remove_file(&gdev->dev, &dev_attr_ungroup); | 287 | device_remove_file(&gdev->dev, &dev_attr_ungroup); |
| 234 | device_unregister(&gdev->dev); | 288 | device_unregister(&gdev->dev); |
| 235 | error: | 289 | error: |
| 236 | for (i = 0; i < argc; i++) | 290 | for (i = 0; i < num_devices; i++) |
| 237 | if (gdev->cdev[i]) { | 291 | if (gdev->cdev[i]) { |
| 238 | if (dev_get_drvdata(&gdev->cdev[i]->dev) == gdev) | 292 | if (dev_get_drvdata(&gdev->cdev[i]->dev) == gdev) |
| 239 | dev_set_drvdata(&gdev->cdev[i]->dev, NULL); | 293 | dev_set_drvdata(&gdev->cdev[i]->dev, NULL); |
| @@ -243,6 +297,7 @@ error: | |||
| 243 | put_device(&gdev->dev); | 297 | put_device(&gdev->dev); |
| 244 | return rc; | 298 | return rc; |
| 245 | } | 299 | } |
| 300 | EXPORT_SYMBOL(ccwgroup_create_from_string); | ||
| 246 | 301 | ||
| 247 | static int __init | 302 | static int __init |
| 248 | init_ccwgroup (void) | 303 | init_ccwgroup (void) |
| @@ -318,7 +373,7 @@ ccwgroup_online_store (struct device *dev, struct device_attribute *attr, const | |||
| 318 | { | 373 | { |
| 319 | struct ccwgroup_device *gdev; | 374 | struct ccwgroup_device *gdev; |
| 320 | struct ccwgroup_driver *gdrv; | 375 | struct ccwgroup_driver *gdrv; |
| 321 | unsigned int value; | 376 | unsigned long value; |
| 322 | int ret; | 377 | int ret; |
| 323 | 378 | ||
| 324 | gdev = to_ccwgroupdev(dev); | 379 | gdev = to_ccwgroupdev(dev); |
| @@ -329,7 +384,9 @@ ccwgroup_online_store (struct device *dev, struct device_attribute *attr, const | |||
| 329 | if (!try_module_get(gdrv->owner)) | 384 | if (!try_module_get(gdrv->owner)) |
| 330 | return -EINVAL; | 385 | return -EINVAL; |
| 331 | 386 | ||
| 332 | value = simple_strtoul(buf, NULL, 0); | 387 | ret = strict_strtoul(buf, 0, &value); |
| 388 | if (ret) | ||
| 389 | goto out; | ||
| 333 | ret = count; | 390 | ret = count; |
| 334 | if (value == 1) | 391 | if (value == 1) |
| 335 | ccwgroup_set_online(gdev); | 392 | ccwgroup_set_online(gdev); |
| @@ -337,6 +394,7 @@ ccwgroup_online_store (struct device *dev, struct device_attribute *attr, const | |||
| 337 | ccwgroup_set_offline(gdev); | 394 | ccwgroup_set_offline(gdev); |
| 338 | else | 395 | else |
| 339 | ret = -EINVAL; | 396 | ret = -EINVAL; |
| 397 | out: | ||
| 340 | module_put(gdrv->owner); | 398 | module_put(gdrv->owner); |
| 341 | return ret; | 399 | return ret; |
| 342 | } | 400 | } |
| @@ -518,6 +576,5 @@ void ccwgroup_remove_ccwdev(struct ccw_device *cdev) | |||
| 518 | MODULE_LICENSE("GPL"); | 576 | MODULE_LICENSE("GPL"); |
| 519 | EXPORT_SYMBOL(ccwgroup_driver_register); | 577 | EXPORT_SYMBOL(ccwgroup_driver_register); |
| 520 | EXPORT_SYMBOL(ccwgroup_driver_unregister); | 578 | EXPORT_SYMBOL(ccwgroup_driver_unregister); |
| 521 | EXPORT_SYMBOL(ccwgroup_create); | ||
| 522 | EXPORT_SYMBOL(ccwgroup_probe_ccwdev); | 579 | EXPORT_SYMBOL(ccwgroup_probe_ccwdev); |
| 523 | EXPORT_SYMBOL(ccwgroup_remove_ccwdev); | 580 | EXPORT_SYMBOL(ccwgroup_remove_ccwdev); |
diff --git a/drivers/s390/cio/cio.c b/drivers/s390/cio/cio.c index 23ffcc4768a7..08a578161306 100644 --- a/drivers/s390/cio/cio.c +++ b/drivers/s390/cio/cio.c | |||
| @@ -407,8 +407,7 @@ cio_modify (struct subchannel *sch) | |||
| 407 | /* | 407 | /* |
| 408 | * Enable subchannel. | 408 | * Enable subchannel. |
| 409 | */ | 409 | */ |
| 410 | int cio_enable_subchannel(struct subchannel *sch, unsigned int isc, | 410 | int cio_enable_subchannel(struct subchannel *sch, u32 intparm) |
| 411 | u32 intparm) | ||
| 412 | { | 411 | { |
| 413 | char dbf_txt[15]; | 412 | char dbf_txt[15]; |
| 414 | int ccode; | 413 | int ccode; |
| @@ -426,7 +425,7 @@ int cio_enable_subchannel(struct subchannel *sch, unsigned int isc, | |||
| 426 | 425 | ||
| 427 | for (retry = 5, ret = 0; retry > 0; retry--) { | 426 | for (retry = 5, ret = 0; retry > 0; retry--) { |
| 428 | sch->schib.pmcw.ena = 1; | 427 | sch->schib.pmcw.ena = 1; |
| 429 | sch->schib.pmcw.isc = isc; | 428 | sch->schib.pmcw.isc = sch->isc; |
| 430 | sch->schib.pmcw.intparm = intparm; | 429 | sch->schib.pmcw.intparm = intparm; |
| 431 | ret = cio_modify(sch); | 430 | ret = cio_modify(sch); |
| 432 | if (ret == -ENODEV) | 431 | if (ret == -ENODEV) |
| @@ -600,6 +599,7 @@ cio_validate_subchannel (struct subchannel *sch, struct subchannel_id schid) | |||
| 600 | else | 599 | else |
| 601 | sch->opm = chp_get_sch_opm(sch); | 600 | sch->opm = chp_get_sch_opm(sch); |
| 602 | sch->lpm = sch->schib.pmcw.pam & sch->opm; | 601 | sch->lpm = sch->schib.pmcw.pam & sch->opm; |
| 602 | sch->isc = 3; | ||
| 603 | 603 | ||
| 604 | CIO_DEBUG(KERN_INFO, 0, | 604 | CIO_DEBUG(KERN_INFO, 0, |
| 605 | "Detected device %04x on subchannel 0.%x.%04X" | 605 | "Detected device %04x on subchannel 0.%x.%04X" |
| @@ -610,13 +610,11 @@ cio_validate_subchannel (struct subchannel *sch, struct subchannel_id schid) | |||
| 610 | 610 | ||
| 611 | /* | 611 | /* |
| 612 | * We now have to initially ... | 612 | * We now have to initially ... |
| 613 | * ... set "interruption subclass" | ||
| 614 | * ... enable "concurrent sense" | 613 | * ... enable "concurrent sense" |
| 615 | * ... enable "multipath mode" if more than one | 614 | * ... enable "multipath mode" if more than one |
| 616 | * CHPID is available. This is done regardless | 615 | * CHPID is available. This is done regardless |
| 617 | * whether multiple paths are available for us. | 616 | * whether multiple paths are available for us. |
| 618 | */ | 617 | */ |
| 619 | sch->schib.pmcw.isc = 3; /* could be smth. else */ | ||
| 620 | sch->schib.pmcw.csense = 1; /* concurrent sense */ | 618 | sch->schib.pmcw.csense = 1; /* concurrent sense */ |
| 621 | sch->schib.pmcw.ena = 0; | 619 | sch->schib.pmcw.ena = 0; |
| 622 | if ((sch->lpm & (sch->lpm - 1)) != 0) | 620 | if ((sch->lpm & (sch->lpm - 1)) != 0) |
| @@ -812,6 +810,7 @@ cio_probe_console(void) | |||
| 812 | * enable console I/O-interrupt subclass 7 | 810 | * enable console I/O-interrupt subclass 7 |
| 813 | */ | 811 | */ |
| 814 | ctl_set_bit(6, 24); | 812 | ctl_set_bit(6, 24); |
| 813 | console_subchannel.isc = 7; | ||
| 815 | console_subchannel.schib.pmcw.isc = 7; | 814 | console_subchannel.schib.pmcw.isc = 7; |
| 816 | console_subchannel.schib.pmcw.intparm = | 815 | console_subchannel.schib.pmcw.intparm = |
| 817 | (u32)(addr_t)&console_subchannel; | 816 | (u32)(addr_t)&console_subchannel; |
diff --git a/drivers/s390/cio/cio.h b/drivers/s390/cio/cio.h index 08f2235c5a6f..3c75412904dc 100644 --- a/drivers/s390/cio/cio.h +++ b/drivers/s390/cio/cio.h | |||
| @@ -74,6 +74,7 @@ struct subchannel { | |||
| 74 | __u8 lpm; /* logical path mask */ | 74 | __u8 lpm; /* logical path mask */ |
| 75 | __u8 opm; /* operational path mask */ | 75 | __u8 opm; /* operational path mask */ |
| 76 | struct schib schib; /* subchannel information block */ | 76 | struct schib schib; /* subchannel information block */ |
| 77 | int isc; /* desired interruption subclass */ | ||
| 77 | struct chsc_ssd_info ssd_info; /* subchannel description */ | 78 | struct chsc_ssd_info ssd_info; /* subchannel description */ |
| 78 | struct device dev; /* entry in device tree */ | 79 | struct device dev; /* entry in device tree */ |
| 79 | struct css_driver *driver; | 80 | struct css_driver *driver; |
| @@ -85,7 +86,7 @@ struct subchannel { | |||
| 85 | #define to_subchannel(n) container_of(n, struct subchannel, dev) | 86 | #define to_subchannel(n) container_of(n, struct subchannel, dev) |
| 86 | 87 | ||
| 87 | extern int cio_validate_subchannel (struct subchannel *, struct subchannel_id); | 88 | extern int cio_validate_subchannel (struct subchannel *, struct subchannel_id); |
| 88 | extern int cio_enable_subchannel(struct subchannel *, unsigned int, u32); | 89 | extern int cio_enable_subchannel(struct subchannel *, u32); |
| 89 | extern int cio_disable_subchannel (struct subchannel *); | 90 | extern int cio_disable_subchannel (struct subchannel *); |
| 90 | extern int cio_cancel (struct subchannel *); | 91 | extern int cio_cancel (struct subchannel *); |
| 91 | extern int cio_clear (struct subchannel *); | 92 | extern int cio_clear (struct subchannel *); |
diff --git a/drivers/s390/cio/cmf.c b/drivers/s390/cio/cmf.c index f4c132ab39ed..2808b6833b9e 100644 --- a/drivers/s390/cio/cmf.c +++ b/drivers/s390/cio/cmf.c | |||
| @@ -1219,16 +1219,21 @@ static ssize_t cmb_enable_store(struct device *dev, | |||
| 1219 | { | 1219 | { |
| 1220 | struct ccw_device *cdev; | 1220 | struct ccw_device *cdev; |
| 1221 | int ret; | 1221 | int ret; |
| 1222 | unsigned long val; | ||
| 1223 | |||
| 1224 | ret = strict_strtoul(buf, 16, &val); | ||
| 1225 | if (ret) | ||
| 1226 | return ret; | ||
| 1222 | 1227 | ||
| 1223 | cdev = to_ccwdev(dev); | 1228 | cdev = to_ccwdev(dev); |
| 1224 | 1229 | ||
| 1225 | switch (buf[0]) { | 1230 | switch (val) { |
| 1226 | case '0': | 1231 | case 0: |
| 1227 | ret = disable_cmf(cdev); | 1232 | ret = disable_cmf(cdev); |
| 1228 | if (ret) | 1233 | if (ret) |
| 1229 | dev_info(&cdev->dev, "disable_cmf failed (%d)\n", ret); | 1234 | dev_info(&cdev->dev, "disable_cmf failed (%d)\n", ret); |
| 1230 | break; | 1235 | break; |
| 1231 | case '1': | 1236 | case 1: |
| 1232 | ret = enable_cmf(cdev); | 1237 | ret = enable_cmf(cdev); |
| 1233 | if (ret && ret != -EBUSY) | 1238 | if (ret && ret != -EBUSY) |
| 1234 | dev_info(&cdev->dev, "enable_cmf failed (%d)\n", ret); | 1239 | dev_info(&cdev->dev, "enable_cmf failed (%d)\n", ret); |
diff --git a/drivers/s390/cio/css.c b/drivers/s390/cio/css.c index c1afab5f72d6..595e327d2f76 100644 --- a/drivers/s390/cio/css.c +++ b/drivers/s390/cio/css.c | |||
| @@ -705,13 +705,17 @@ css_cm_enable_store(struct device *dev, struct device_attribute *attr, | |||
| 705 | { | 705 | { |
| 706 | struct channel_subsystem *css = to_css(dev); | 706 | struct channel_subsystem *css = to_css(dev); |
| 707 | int ret; | 707 | int ret; |
| 708 | unsigned long val; | ||
| 708 | 709 | ||
| 710 | ret = strict_strtoul(buf, 16, &val); | ||
| 711 | if (ret) | ||
| 712 | return ret; | ||
| 709 | mutex_lock(&css->mutex); | 713 | mutex_lock(&css->mutex); |
| 710 | switch (buf[0]) { | 714 | switch (val) { |
| 711 | case '0': | 715 | case 0: |
| 712 | ret = css->cm_enabled ? chsc_secm(css, 0) : 0; | 716 | ret = css->cm_enabled ? chsc_secm(css, 0) : 0; |
| 713 | break; | 717 | break; |
| 714 | case '1': | 718 | case 1: |
| 715 | ret = css->cm_enabled ? 0 : chsc_secm(css, 1); | 719 | ret = css->cm_enabled ? 0 : chsc_secm(css, 1); |
| 716 | break; | 720 | break; |
| 717 | default: | 721 | default: |
diff --git a/drivers/s390/cio/device.c b/drivers/s390/cio/device.c index e0c7adb8958e..abfd601d237a 100644 --- a/drivers/s390/cio/device.c +++ b/drivers/s390/cio/device.c | |||
| @@ -512,8 +512,8 @@ static ssize_t online_store (struct device *dev, struct device_attribute *attr, | |||
| 512 | const char *buf, size_t count) | 512 | const char *buf, size_t count) |
| 513 | { | 513 | { |
| 514 | struct ccw_device *cdev = to_ccwdev(dev); | 514 | struct ccw_device *cdev = to_ccwdev(dev); |
| 515 | int i, force; | 515 | int force, ret; |
| 516 | char *tmp; | 516 | unsigned long i; |
| 517 | 517 | ||
| 518 | if (atomic_cmpxchg(&cdev->private->onoff, 0, 1) != 0) | 518 | if (atomic_cmpxchg(&cdev->private->onoff, 0, 1) != 0) |
| 519 | return -EAGAIN; | 519 | return -EAGAIN; |
| @@ -525,25 +525,30 @@ static ssize_t online_store (struct device *dev, struct device_attribute *attr, | |||
| 525 | if (!strncmp(buf, "force\n", count)) { | 525 | if (!strncmp(buf, "force\n", count)) { |
| 526 | force = 1; | 526 | force = 1; |
| 527 | i = 1; | 527 | i = 1; |
| 528 | ret = 0; | ||
| 528 | } else { | 529 | } else { |
| 529 | force = 0; | 530 | force = 0; |
| 530 | i = simple_strtoul(buf, &tmp, 16); | 531 | ret = strict_strtoul(buf, 16, &i); |
| 531 | } | 532 | } |
| 532 | 533 | if (ret) | |
| 534 | goto out; | ||
| 533 | switch (i) { | 535 | switch (i) { |
| 534 | case 0: | 536 | case 0: |
| 535 | online_store_handle_offline(cdev); | 537 | online_store_handle_offline(cdev); |
| 538 | ret = count; | ||
| 536 | break; | 539 | break; |
| 537 | case 1: | 540 | case 1: |
| 538 | online_store_handle_online(cdev, force); | 541 | online_store_handle_online(cdev, force); |
| 542 | ret = count; | ||
| 539 | break; | 543 | break; |
| 540 | default: | 544 | default: |
| 541 | count = -EINVAL; | 545 | ret = -EINVAL; |
| 542 | } | 546 | } |
| 547 | out: | ||
| 543 | if (cdev->drv) | 548 | if (cdev->drv) |
| 544 | module_put(cdev->drv->owner); | 549 | module_put(cdev->drv->owner); |
| 545 | atomic_set(&cdev->private->onoff, 0); | 550 | atomic_set(&cdev->private->onoff, 0); |
| 546 | return count; | 551 | return ret; |
| 547 | } | 552 | } |
| 548 | 553 | ||
| 549 | static ssize_t | 554 | static ssize_t |
diff --git a/drivers/s390/cio/device_fsm.c b/drivers/s390/cio/device_fsm.c index 4b92c84fb438..99403b0a97a7 100644 --- a/drivers/s390/cio/device_fsm.c +++ b/drivers/s390/cio/device_fsm.c | |||
| @@ -555,8 +555,7 @@ ccw_device_recognition(struct ccw_device *cdev) | |||
| 555 | (cdev->private->state != DEV_STATE_BOXED)) | 555 | (cdev->private->state != DEV_STATE_BOXED)) |
| 556 | return -EINVAL; | 556 | return -EINVAL; |
| 557 | sch = to_subchannel(cdev->dev.parent); | 557 | sch = to_subchannel(cdev->dev.parent); |
| 558 | ret = cio_enable_subchannel(sch, sch->schib.pmcw.isc, | 558 | ret = cio_enable_subchannel(sch, (u32)(addr_t)sch); |
| 559 | (u32)(addr_t)sch); | ||
| 560 | if (ret != 0) | 559 | if (ret != 0) |
| 561 | /* Couldn't enable the subchannel for i/o. Sick device. */ | 560 | /* Couldn't enable the subchannel for i/o. Sick device. */ |
| 562 | return ret; | 561 | return ret; |
| @@ -667,8 +666,7 @@ ccw_device_online(struct ccw_device *cdev) | |||
| 667 | sch = to_subchannel(cdev->dev.parent); | 666 | sch = to_subchannel(cdev->dev.parent); |
| 668 | if (css_init_done && !get_device(&cdev->dev)) | 667 | if (css_init_done && !get_device(&cdev->dev)) |
| 669 | return -ENODEV; | 668 | return -ENODEV; |
| 670 | ret = cio_enable_subchannel(sch, sch->schib.pmcw.isc, | 669 | ret = cio_enable_subchannel(sch, (u32)(addr_t)sch); |
| 671 | (u32)(addr_t)sch); | ||
| 672 | if (ret != 0) { | 670 | if (ret != 0) { |
| 673 | /* Couldn't enable the subchannel for i/o. Sick device. */ | 671 | /* Couldn't enable the subchannel for i/o. Sick device. */ |
| 674 | if (ret == -ENODEV) | 672 | if (ret == -ENODEV) |
| @@ -1048,8 +1046,7 @@ ccw_device_start_id(struct ccw_device *cdev, enum dev_event dev_event) | |||
| 1048 | struct subchannel *sch; | 1046 | struct subchannel *sch; |
| 1049 | 1047 | ||
| 1050 | sch = to_subchannel(cdev->dev.parent); | 1048 | sch = to_subchannel(cdev->dev.parent); |
| 1051 | if (cio_enable_subchannel(sch, sch->schib.pmcw.isc, | 1049 | if (cio_enable_subchannel(sch, (u32)(addr_t)sch) != 0) |
| 1052 | (u32)(addr_t)sch) != 0) | ||
| 1053 | /* Couldn't enable the subchannel for i/o. Sick device. */ | 1050 | /* Couldn't enable the subchannel for i/o. Sick device. */ |
| 1054 | return; | 1051 | return; |
| 1055 | 1052 | ||
| @@ -1082,7 +1079,6 @@ device_trigger_reprobe(struct subchannel *sch) | |||
| 1082 | */ | 1079 | */ |
| 1083 | sch->lpm = sch->schib.pmcw.pam & sch->opm; | 1080 | sch->lpm = sch->schib.pmcw.pam & sch->opm; |
| 1084 | /* Re-set some bits in the pmcw that were lost. */ | 1081 | /* Re-set some bits in the pmcw that were lost. */ |
| 1085 | sch->schib.pmcw.isc = 3; | ||
| 1086 | sch->schib.pmcw.csense = 1; | 1082 | sch->schib.pmcw.csense = 1; |
| 1087 | sch->schib.pmcw.ena = 0; | 1083 | sch->schib.pmcw.ena = 0; |
| 1088 | if ((sch->lpm & (sch->lpm - 1)) != 0) | 1084 | if ((sch->lpm & (sch->lpm - 1)) != 0) |
diff --git a/drivers/s390/cio/device_ops.c b/drivers/s390/cio/device_ops.c index a1718a0aa539..f308ad55a6d5 100644 --- a/drivers/s390/cio/device_ops.c +++ b/drivers/s390/cio/device_ops.c | |||
| @@ -508,7 +508,7 @@ ccw_device_stlck(struct ccw_device *cdev) | |||
| 508 | return -ENOMEM; | 508 | return -ENOMEM; |
| 509 | } | 509 | } |
| 510 | spin_lock_irqsave(sch->lock, flags); | 510 | spin_lock_irqsave(sch->lock, flags); |
| 511 | ret = cio_enable_subchannel(sch, 3, (u32)(addr_t)sch); | 511 | ret = cio_enable_subchannel(sch, (u32)(addr_t)sch); |
| 512 | if (ret) | 512 | if (ret) |
| 513 | goto out_unlock; | 513 | goto out_unlock; |
| 514 | /* | 514 | /* |
diff --git a/drivers/s390/cio/qdio.c b/drivers/s390/cio/qdio.c index 43876e287370..445cf364e461 100644 --- a/drivers/s390/cio/qdio.c +++ b/drivers/s390/cio/qdio.c | |||
| @@ -3663,11 +3663,11 @@ qdio_performance_stats_show(struct bus_type *bus, char *buf) | |||
| 3663 | static ssize_t | 3663 | static ssize_t |
| 3664 | qdio_performance_stats_store(struct bus_type *bus, const char *buf, size_t count) | 3664 | qdio_performance_stats_store(struct bus_type *bus, const char *buf, size_t count) |
| 3665 | { | 3665 | { |
| 3666 | char *tmp; | 3666 | unsigned long i; |
| 3667 | int i; | 3667 | int ret; |
| 3668 | 3668 | ||
| 3669 | i = simple_strtoul(buf, &tmp, 16); | 3669 | ret = strict_strtoul(buf, 16, &i); |
| 3670 | if ((i == 0) || (i == 1)) { | 3670 | if (!ret && ((i == 0) || (i == 1))) { |
| 3671 | if (i == qdio_performance_stats) | 3671 | if (i == qdio_performance_stats) |
| 3672 | return count; | 3672 | return count; |
| 3673 | qdio_performance_stats = i; | 3673 | qdio_performance_stats = i; |
diff --git a/drivers/s390/kvm/kvm_virtio.c b/drivers/s390/kvm/kvm_virtio.c index bbef3764fbf8..47a7e6200b26 100644 --- a/drivers/s390/kvm/kvm_virtio.c +++ b/drivers/s390/kvm/kvm_virtio.c | |||
| @@ -17,6 +17,7 @@ | |||
| 17 | #include <linux/virtio_config.h> | 17 | #include <linux/virtio_config.h> |
| 18 | #include <linux/interrupt.h> | 18 | #include <linux/interrupt.h> |
| 19 | #include <linux/virtio_ring.h> | 19 | #include <linux/virtio_ring.h> |
| 20 | #include <linux/pfn.h> | ||
| 20 | #include <asm/io.h> | 21 | #include <asm/io.h> |
| 21 | #include <asm/kvm_para.h> | 22 | #include <asm/kvm_para.h> |
| 22 | #include <asm/kvm_virtio.h> | 23 | #include <asm/kvm_virtio.h> |
| @@ -180,11 +181,10 @@ static struct virtqueue *kvm_find_vq(struct virtio_device *vdev, | |||
| 180 | 181 | ||
| 181 | config = kvm_vq_config(kdev->desc)+index; | 182 | config = kvm_vq_config(kdev->desc)+index; |
| 182 | 183 | ||
| 183 | if (add_shared_memory(config->address, | 184 | err = vmem_add_mapping(config->address, |
| 184 | vring_size(config->num, PAGE_SIZE))) { | 185 | vring_size(config->num, PAGE_SIZE)); |
| 185 | err = -ENOMEM; | 186 | if (err) |
| 186 | goto out; | 187 | goto out; |
| 187 | } | ||
| 188 | 188 | ||
| 189 | vq = vring_new_virtqueue(config->num, vdev, (void *) config->address, | 189 | vq = vring_new_virtqueue(config->num, vdev, (void *) config->address, |
| 190 | kvm_notify, callback); | 190 | kvm_notify, callback); |
| @@ -202,8 +202,8 @@ static struct virtqueue *kvm_find_vq(struct virtio_device *vdev, | |||
| 202 | vq->priv = config; | 202 | vq->priv = config; |
| 203 | return vq; | 203 | return vq; |
| 204 | unmap: | 204 | unmap: |
| 205 | remove_shared_memory(config->address, vring_size(config->num, | 205 | vmem_remove_mapping(config->address, |
| 206 | PAGE_SIZE)); | 206 | vring_size(config->num, PAGE_SIZE)); |
| 207 | out: | 207 | out: |
| 208 | return ERR_PTR(err); | 208 | return ERR_PTR(err); |
| 209 | } | 209 | } |
| @@ -213,8 +213,8 @@ static void kvm_del_vq(struct virtqueue *vq) | |||
| 213 | struct kvm_vqconfig *config = vq->priv; | 213 | struct kvm_vqconfig *config = vq->priv; |
| 214 | 214 | ||
| 215 | vring_del_virtqueue(vq); | 215 | vring_del_virtqueue(vq); |
| 216 | remove_shared_memory(config->address, | 216 | vmem_remove_mapping(config->address, |
| 217 | vring_size(config->num, PAGE_SIZE)); | 217 | vring_size(config->num, PAGE_SIZE)); |
| 218 | } | 218 | } |
| 219 | 219 | ||
| 220 | /* | 220 | /* |
| @@ -318,12 +318,13 @@ static int __init kvm_devices_init(void) | |||
| 318 | return rc; | 318 | return rc; |
| 319 | } | 319 | } |
| 320 | 320 | ||
| 321 | if (add_shared_memory((max_pfn) << PAGE_SHIFT, PAGE_SIZE)) { | 321 | rc = vmem_add_mapping(PFN_PHYS(max_pfn), PAGE_SIZE); |
| 322 | if (rc) { | ||
| 322 | device_unregister(&kvm_root); | 323 | device_unregister(&kvm_root); |
| 323 | return -ENOMEM; | 324 | return rc; |
| 324 | } | 325 | } |
| 325 | 326 | ||
| 326 | kvm_devices = (void *) (max_pfn << PAGE_SHIFT); | 327 | kvm_devices = (void *) PFN_PHYS(max_pfn); |
| 327 | 328 | ||
| 328 | ctl_set_bit(0, 9); | 329 | ctl_set_bit(0, 9); |
| 329 | register_external_interrupt(0x2603, kvm_extint_handler); | 330 | register_external_interrupt(0x2603, kvm_extint_handler); |
diff --git a/drivers/s390/net/cu3088.c b/drivers/s390/net/cu3088.c index 76728ae4b843..8e7697305a4c 100644 --- a/drivers/s390/net/cu3088.c +++ b/drivers/s390/net/cu3088.c | |||
| @@ -62,30 +62,14 @@ static struct device *cu3088_root_dev; | |||
| 62 | static ssize_t | 62 | static ssize_t |
| 63 | group_write(struct device_driver *drv, const char *buf, size_t count) | 63 | group_write(struct device_driver *drv, const char *buf, size_t count) |
| 64 | { | 64 | { |
| 65 | const char *start, *end; | ||
| 66 | char bus_ids[2][BUS_ID_SIZE], *argv[2]; | ||
| 67 | int i; | ||
| 68 | int ret; | 65 | int ret; |
| 69 | struct ccwgroup_driver *cdrv; | 66 | struct ccwgroup_driver *cdrv; |
| 70 | 67 | ||
| 71 | cdrv = to_ccwgroupdrv(drv); | 68 | cdrv = to_ccwgroupdrv(drv); |
| 72 | if (!cdrv) | 69 | if (!cdrv) |
| 73 | return -EINVAL; | 70 | return -EINVAL; |
| 74 | start = buf; | 71 | ret = ccwgroup_create_from_string(cu3088_root_dev, cdrv->driver_id, |
| 75 | for (i=0; i<2; i++) { | 72 | &cu3088_driver, 2, buf); |
| 76 | static const char delim[] = {',', '\n'}; | ||
| 77 | int len; | ||
| 78 | |||
| 79 | if (!(end = strchr(start, delim[i]))) | ||
| 80 | return -EINVAL; | ||
| 81 | len = min_t(ptrdiff_t, BUS_ID_SIZE, end - start + 1); | ||
| 82 | strlcpy (bus_ids[i], start, len); | ||
| 83 | argv[i] = bus_ids[i]; | ||
| 84 | start = end + 1; | ||
| 85 | } | ||
| 86 | |||
| 87 | ret = ccwgroup_create(cu3088_root_dev, cdrv->driver_id, | ||
| 88 | &cu3088_driver, 2, argv); | ||
| 89 | 73 | ||
| 90 | return (ret == 0) ? count : ret; | 74 | return (ret == 0) ? count : ret; |
| 91 | } | 75 | } |
diff --git a/drivers/s390/net/lcs.c b/drivers/s390/net/lcs.c index f51ed9972587..dd22f4b37037 100644 --- a/drivers/s390/net/lcs.c +++ b/drivers/s390/net/lcs.c | |||
| @@ -1793,7 +1793,8 @@ lcs_get_skb(struct lcs_card *card, char *skb_data, unsigned int skb_len) | |||
| 1793 | skb->protocol = card->lan_type_trans(skb, card->dev); | 1793 | skb->protocol = card->lan_type_trans(skb, card->dev); |
| 1794 | card->stats.rx_bytes += skb_len; | 1794 | card->stats.rx_bytes += skb_len; |
| 1795 | card->stats.rx_packets++; | 1795 | card->stats.rx_packets++; |
| 1796 | *((__u32 *)skb->cb) = ++card->pkt_seq; | 1796 | if (skb->protocol == htons(ETH_P_802_2)) |
| 1797 | *((__u32 *)skb->cb) = ++card->pkt_seq; | ||
| 1797 | netif_rx(skb); | 1798 | netif_rx(skb); |
| 1798 | } | 1799 | } |
| 1799 | 1800 | ||
diff --git a/drivers/s390/net/netiucv.c b/drivers/s390/net/netiucv.c index 8f876f6ab367..e4ba6a0372ac 100644 --- a/drivers/s390/net/netiucv.c +++ b/drivers/s390/net/netiucv.c | |||
| @@ -1313,8 +1313,6 @@ static int netiucv_tx(struct sk_buff *skb, struct net_device *dev) | |||
| 1313 | * and throw away packet. | 1313 | * and throw away packet. |
| 1314 | */ | 1314 | */ |
| 1315 | if (fsm_getstate(privptr->fsm) != DEV_STATE_RUNNING) { | 1315 | if (fsm_getstate(privptr->fsm) != DEV_STATE_RUNNING) { |
| 1316 | if (!in_atomic()) | ||
| 1317 | fsm_event(privptr->fsm, DEV_EVENT_START, dev); | ||
| 1318 | dev_kfree_skb(skb); | 1316 | dev_kfree_skb(skb); |
| 1319 | privptr->stats.tx_dropped++; | 1317 | privptr->stats.tx_dropped++; |
| 1320 | privptr->stats.tx_errors++; | 1318 | privptr->stats.tx_errors++; |
| @@ -2147,6 +2145,7 @@ static int __init netiucv_init(void) | |||
| 2147 | if (rc) | 2145 | if (rc) |
| 2148 | goto out_dbf; | 2146 | goto out_dbf; |
| 2149 | IUCV_DBF_TEXT(trace, 3, __func__); | 2147 | IUCV_DBF_TEXT(trace, 3, __func__); |
| 2148 | netiucv_driver.groups = netiucv_drv_attr_groups; | ||
| 2150 | rc = driver_register(&netiucv_driver); | 2149 | rc = driver_register(&netiucv_driver); |
| 2151 | if (rc) { | 2150 | if (rc) { |
| 2152 | PRINT_ERR("NETIUCV: failed to register driver.\n"); | 2151 | PRINT_ERR("NETIUCV: failed to register driver.\n"); |
diff --git a/drivers/s390/net/qeth_core.h b/drivers/s390/net/qeth_core.h index 66f4f12503c9..699ac11debd8 100644 --- a/drivers/s390/net/qeth_core.h +++ b/drivers/s390/net/qeth_core.h | |||
| @@ -72,22 +72,7 @@ struct qeth_dbf_info { | |||
| 72 | debug_sprintf_event(qeth_dbf[QETH_DBF_MSG].id, level, text) | 72 | debug_sprintf_event(qeth_dbf[QETH_DBF_MSG].id, level, text) |
| 73 | 73 | ||
| 74 | #define QETH_DBF_TEXT_(name, level, text...) \ | 74 | #define QETH_DBF_TEXT_(name, level, text...) \ |
| 75 | do { \ | 75 | qeth_dbf_longtext(QETH_DBF_##name, level, text) |
| 76 | if (qeth_dbf_passes(qeth_dbf[QETH_DBF_##name].id, level)) { \ | ||
| 77 | char *dbf_txt_buf = \ | ||
| 78 | get_cpu_var(QETH_DBF_TXT_BUF); \ | ||
| 79 | sprintf(dbf_txt_buf, text); \ | ||
| 80 | debug_text_event(qeth_dbf[QETH_DBF_##name].id, \ | ||
| 81 | level, dbf_txt_buf); \ | ||
| 82 | put_cpu_var(QETH_DBF_TXT_BUF); \ | ||
| 83 | } \ | ||
| 84 | } while (0) | ||
| 85 | |||
| 86 | /* Allow to sort out low debug levels early to avoid wasted sprints */ | ||
| 87 | static inline int qeth_dbf_passes(debug_info_t *dbf_grp, int level) | ||
| 88 | { | ||
| 89 | return (level <= dbf_grp->level); | ||
| 90 | } | ||
| 91 | 76 | ||
| 92 | /** | 77 | /** |
| 93 | * some more debug stuff | 78 | * some more debug stuff |
| @@ -773,27 +758,6 @@ static inline int qeth_get_micros(void) | |||
| 773 | return (int) (get_clock() >> 12); | 758 | return (int) (get_clock() >> 12); |
| 774 | } | 759 | } |
| 775 | 760 | ||
| 776 | static inline void *qeth_push_skb(struct qeth_card *card, struct sk_buff *skb, | ||
| 777 | int size) | ||
| 778 | { | ||
| 779 | void *hdr; | ||
| 780 | |||
| 781 | hdr = (void *) skb_push(skb, size); | ||
| 782 | /* | ||
| 783 | * sanity check, the Linux memory allocation scheme should | ||
| 784 | * never present us cases like this one (the qdio header size plus | ||
| 785 | * the first 40 bytes of the paket cross a 4k boundary) | ||
| 786 | */ | ||
| 787 | if ((((unsigned long) hdr) & (~(PAGE_SIZE - 1))) != | ||
| 788 | (((unsigned long) hdr + size + | ||
| 789 | QETH_IP_HEADER_SIZE) & (~(PAGE_SIZE - 1)))) { | ||
| 790 | PRINT_ERR("Misaligned packet on interface %s. Discarded.", | ||
| 791 | QETH_CARD_IFNAME(card)); | ||
| 792 | return NULL; | ||
| 793 | } | ||
| 794 | return hdr; | ||
| 795 | } | ||
| 796 | |||
| 797 | static inline int qeth_get_ip_version(struct sk_buff *skb) | 761 | static inline int qeth_get_ip_version(struct sk_buff *skb) |
| 798 | { | 762 | { |
| 799 | switch (skb->protocol) { | 763 | switch (skb->protocol) { |
| @@ -806,6 +770,12 @@ static inline int qeth_get_ip_version(struct sk_buff *skb) | |||
| 806 | } | 770 | } |
| 807 | } | 771 | } |
| 808 | 772 | ||
| 773 | static inline void qeth_put_buffer_pool_entry(struct qeth_card *card, | ||
| 774 | struct qeth_buffer_pool_entry *entry) | ||
| 775 | { | ||
| 776 | list_add_tail(&entry->list, &card->qdio.in_buf_pool.entry_list); | ||
| 777 | } | ||
| 778 | |||
| 809 | struct qeth_eddp_context; | 779 | struct qeth_eddp_context; |
| 810 | extern struct ccwgroup_driver qeth_l2_ccwgroup_driver; | 780 | extern struct ccwgroup_driver qeth_l2_ccwgroup_driver; |
| 811 | extern struct ccwgroup_driver qeth_l3_ccwgroup_driver; | 781 | extern struct ccwgroup_driver qeth_l3_ccwgroup_driver; |
| @@ -843,8 +813,6 @@ struct qeth_cmd_buffer *qeth_get_ipacmd_buffer(struct qeth_card *, | |||
| 843 | int qeth_query_setadapterparms(struct qeth_card *); | 813 | int qeth_query_setadapterparms(struct qeth_card *); |
| 844 | int qeth_check_qdio_errors(struct qdio_buffer *, unsigned int, | 814 | int qeth_check_qdio_errors(struct qdio_buffer *, unsigned int, |
| 845 | unsigned int, const char *); | 815 | unsigned int, const char *); |
| 846 | void qeth_put_buffer_pool_entry(struct qeth_card *, | ||
| 847 | struct qeth_buffer_pool_entry *); | ||
| 848 | void qeth_queue_input_buffer(struct qeth_card *, int); | 816 | void qeth_queue_input_buffer(struct qeth_card *, int); |
| 849 | struct sk_buff *qeth_core_get_next_skb(struct qeth_card *, | 817 | struct sk_buff *qeth_core_get_next_skb(struct qeth_card *, |
| 850 | struct qdio_buffer *, struct qdio_buffer_element **, int *, | 818 | struct qdio_buffer *, struct qdio_buffer_element **, int *, |
| @@ -880,8 +848,6 @@ int qeth_send_control_data(struct qeth_card *, int, struct qeth_cmd_buffer *, | |||
| 880 | void *reply_param); | 848 | void *reply_param); |
| 881 | int qeth_get_cast_type(struct qeth_card *, struct sk_buff *); | 849 | int qeth_get_cast_type(struct qeth_card *, struct sk_buff *); |
| 882 | int qeth_get_priority_queue(struct qeth_card *, struct sk_buff *, int, int); | 850 | int qeth_get_priority_queue(struct qeth_card *, struct sk_buff *, int, int); |
| 883 | struct sk_buff *qeth_prepare_skb(struct qeth_card *, struct sk_buff *, | ||
| 884 | struct qeth_hdr **); | ||
| 885 | int qeth_get_elements_no(struct qeth_card *, void *, struct sk_buff *, int); | 851 | int qeth_get_elements_no(struct qeth_card *, void *, struct sk_buff *, int); |
| 886 | int qeth_do_send_packet_fast(struct qeth_card *, struct qeth_qdio_out_q *, | 852 | int qeth_do_send_packet_fast(struct qeth_card *, struct qeth_qdio_out_q *, |
| 887 | struct sk_buff *, struct qeth_hdr *, int, | 853 | struct sk_buff *, struct qeth_hdr *, int, |
| @@ -894,6 +860,8 @@ void qeth_core_get_ethtool_stats(struct net_device *, | |||
| 894 | struct ethtool_stats *, u64 *); | 860 | struct ethtool_stats *, u64 *); |
| 895 | void qeth_core_get_strings(struct net_device *, u32, u8 *); | 861 | void qeth_core_get_strings(struct net_device *, u32, u8 *); |
| 896 | void qeth_core_get_drvinfo(struct net_device *, struct ethtool_drvinfo *); | 862 | void qeth_core_get_drvinfo(struct net_device *, struct ethtool_drvinfo *); |
| 863 | void qeth_dbf_longtext(enum qeth_dbf_names dbf_nix, int level, char *text, ...); | ||
| 864 | int qeth_core_ethtool_get_settings(struct net_device *, struct ethtool_cmd *); | ||
| 897 | 865 | ||
| 898 | /* exports for OSN */ | 866 | /* exports for OSN */ |
| 899 | int qeth_osn_assist(struct net_device *, void *, int); | 867 | int qeth_osn_assist(struct net_device *, void *, int); |
diff --git a/drivers/s390/net/qeth_core_main.c b/drivers/s390/net/qeth_core_main.c index 055f5c3e7b56..436bf1f6d4a6 100644 --- a/drivers/s390/net/qeth_core_main.c +++ b/drivers/s390/net/qeth_core_main.c | |||
| @@ -26,9 +26,6 @@ | |||
| 26 | #include "qeth_core.h" | 26 | #include "qeth_core.h" |
| 27 | #include "qeth_core_offl.h" | 27 | #include "qeth_core_offl.h" |
| 28 | 28 | ||
| 29 | static DEFINE_PER_CPU(char[256], qeth_core_dbf_txt_buf); | ||
| 30 | #define QETH_DBF_TXT_BUF qeth_core_dbf_txt_buf | ||
| 31 | |||
| 32 | struct qeth_dbf_info qeth_dbf[QETH_DBF_INFOS] = { | 29 | struct qeth_dbf_info qeth_dbf[QETH_DBF_INFOS] = { |
| 33 | /* define dbf - Name, Pages, Areas, Maxlen, Level, View, Handle */ | 30 | /* define dbf - Name, Pages, Areas, Maxlen, Level, View, Handle */ |
| 34 | /* N P A M L V H */ | 31 | /* N P A M L V H */ |
| @@ -2255,14 +2252,6 @@ void qeth_print_status_message(struct qeth_card *card) | |||
| 2255 | } | 2252 | } |
| 2256 | EXPORT_SYMBOL_GPL(qeth_print_status_message); | 2253 | EXPORT_SYMBOL_GPL(qeth_print_status_message); |
| 2257 | 2254 | ||
| 2258 | void qeth_put_buffer_pool_entry(struct qeth_card *card, | ||
| 2259 | struct qeth_buffer_pool_entry *entry) | ||
| 2260 | { | ||
| 2261 | QETH_DBF_TEXT(TRACE, 6, "ptbfplen"); | ||
| 2262 | list_add_tail(&entry->list, &card->qdio.in_buf_pool.entry_list); | ||
| 2263 | } | ||
| 2264 | EXPORT_SYMBOL_GPL(qeth_put_buffer_pool_entry); | ||
| 2265 | |||
| 2266 | static void qeth_initialize_working_pool_list(struct qeth_card *card) | 2255 | static void qeth_initialize_working_pool_list(struct qeth_card *card) |
| 2267 | { | 2256 | { |
| 2268 | struct qeth_buffer_pool_entry *entry; | 2257 | struct qeth_buffer_pool_entry *entry; |
| @@ -2603,7 +2592,6 @@ void qeth_queue_input_buffer(struct qeth_card *card, int index) | |||
| 2603 | int rc; | 2592 | int rc; |
| 2604 | int newcount = 0; | 2593 | int newcount = 0; |
| 2605 | 2594 | ||
| 2606 | QETH_DBF_TEXT(TRACE, 6, "queinbuf"); | ||
| 2607 | count = (index < queue->next_buf_to_init)? | 2595 | count = (index < queue->next_buf_to_init)? |
| 2608 | card->qdio.in_buf_pool.buf_count - | 2596 | card->qdio.in_buf_pool.buf_count - |
| 2609 | (queue->next_buf_to_init - index) : | 2597 | (queue->next_buf_to_init - index) : |
| @@ -2792,8 +2780,6 @@ static void qeth_flush_buffers(struct qeth_qdio_out_q *queue, int under_int, | |||
| 2792 | int i; | 2780 | int i; |
| 2793 | unsigned int qdio_flags; | 2781 | unsigned int qdio_flags; |
| 2794 | 2782 | ||
| 2795 | QETH_DBF_TEXT(TRACE, 6, "flushbuf"); | ||
| 2796 | |||
| 2797 | for (i = index; i < index + count; ++i) { | 2783 | for (i = index; i < index + count; ++i) { |
| 2798 | buf = &queue->bufs[i % QDIO_MAX_BUFFERS_PER_Q]; | 2784 | buf = &queue->bufs[i % QDIO_MAX_BUFFERS_PER_Q]; |
| 2799 | buf->buffer->element[buf->next_element_to_fill - 1].flags |= | 2785 | buf->buffer->element[buf->next_element_to_fill - 1].flags |= |
| @@ -3037,49 +3023,6 @@ int qeth_get_priority_queue(struct qeth_card *card, struct sk_buff *skb, | |||
| 3037 | } | 3023 | } |
| 3038 | EXPORT_SYMBOL_GPL(qeth_get_priority_queue); | 3024 | EXPORT_SYMBOL_GPL(qeth_get_priority_queue); |
| 3039 | 3025 | ||
| 3040 | static void __qeth_free_new_skb(struct sk_buff *orig_skb, | ||
| 3041 | struct sk_buff *new_skb) | ||
| 3042 | { | ||
| 3043 | if (orig_skb != new_skb) | ||
| 3044 | dev_kfree_skb_any(new_skb); | ||
| 3045 | } | ||
| 3046 | |||
| 3047 | static inline struct sk_buff *qeth_realloc_headroom(struct qeth_card *card, | ||
| 3048 | struct sk_buff *skb, int size) | ||
| 3049 | { | ||
| 3050 | struct sk_buff *new_skb = skb; | ||
| 3051 | |||
| 3052 | if (skb_headroom(skb) >= size) | ||
| 3053 | return skb; | ||
| 3054 | new_skb = skb_realloc_headroom(skb, size); | ||
| 3055 | if (!new_skb) | ||
| 3056 | PRINT_ERR("Could not realloc headroom for qeth_hdr " | ||
| 3057 | "on interface %s", QETH_CARD_IFNAME(card)); | ||
| 3058 | return new_skb; | ||
| 3059 | } | ||
| 3060 | |||
| 3061 | struct sk_buff *qeth_prepare_skb(struct qeth_card *card, struct sk_buff *skb, | ||
| 3062 | struct qeth_hdr **hdr) | ||
| 3063 | { | ||
| 3064 | struct sk_buff *new_skb; | ||
| 3065 | |||
| 3066 | QETH_DBF_TEXT(TRACE, 6, "prepskb"); | ||
| 3067 | |||
| 3068 | new_skb = qeth_realloc_headroom(card, skb, | ||
| 3069 | sizeof(struct qeth_hdr)); | ||
| 3070 | if (!new_skb) | ||
| 3071 | return NULL; | ||
| 3072 | |||
| 3073 | *hdr = ((struct qeth_hdr *)qeth_push_skb(card, new_skb, | ||
| 3074 | sizeof(struct qeth_hdr))); | ||
| 3075 | if (*hdr == NULL) { | ||
| 3076 | __qeth_free_new_skb(skb, new_skb); | ||
| 3077 | return NULL; | ||
| 3078 | } | ||
| 3079 | return new_skb; | ||
| 3080 | } | ||
| 3081 | EXPORT_SYMBOL_GPL(qeth_prepare_skb); | ||
| 3082 | |||
| 3083 | int qeth_get_elements_no(struct qeth_card *card, void *hdr, | 3026 | int qeth_get_elements_no(struct qeth_card *card, void *hdr, |
| 3084 | struct sk_buff *skb, int elems) | 3027 | struct sk_buff *skb, int elems) |
| 3085 | { | 3028 | { |
| @@ -3100,8 +3043,8 @@ int qeth_get_elements_no(struct qeth_card *card, void *hdr, | |||
| 3100 | } | 3043 | } |
| 3101 | EXPORT_SYMBOL_GPL(qeth_get_elements_no); | 3044 | EXPORT_SYMBOL_GPL(qeth_get_elements_no); |
| 3102 | 3045 | ||
| 3103 | static void __qeth_fill_buffer(struct sk_buff *skb, struct qdio_buffer *buffer, | 3046 | static inline void __qeth_fill_buffer(struct sk_buff *skb, |
| 3104 | int is_tso, int *next_element_to_fill) | 3047 | struct qdio_buffer *buffer, int is_tso, int *next_element_to_fill) |
| 3105 | { | 3048 | { |
| 3106 | int length = skb->len; | 3049 | int length = skb->len; |
| 3107 | int length_here; | 3050 | int length_here; |
| @@ -3143,15 +3086,13 @@ static void __qeth_fill_buffer(struct sk_buff *skb, struct qdio_buffer *buffer, | |||
| 3143 | *next_element_to_fill = element; | 3086 | *next_element_to_fill = element; |
| 3144 | } | 3087 | } |
| 3145 | 3088 | ||
| 3146 | static int qeth_fill_buffer(struct qeth_qdio_out_q *queue, | 3089 | static inline int qeth_fill_buffer(struct qeth_qdio_out_q *queue, |
| 3147 | struct qeth_qdio_out_buffer *buf, struct sk_buff *skb) | 3090 | struct qeth_qdio_out_buffer *buf, struct sk_buff *skb) |
| 3148 | { | 3091 | { |
| 3149 | struct qdio_buffer *buffer; | 3092 | struct qdio_buffer *buffer; |
| 3150 | struct qeth_hdr_tso *hdr; | 3093 | struct qeth_hdr_tso *hdr; |
| 3151 | int flush_cnt = 0, hdr_len, large_send = 0; | 3094 | int flush_cnt = 0, hdr_len, large_send = 0; |
| 3152 | 3095 | ||
| 3153 | QETH_DBF_TEXT(TRACE, 6, "qdfillbf"); | ||
| 3154 | |||
| 3155 | buffer = buf->buffer; | 3096 | buffer = buf->buffer; |
| 3156 | atomic_inc(&skb->users); | 3097 | atomic_inc(&skb->users); |
| 3157 | skb_queue_tail(&buf->skb_list, skb); | 3098 | skb_queue_tail(&buf->skb_list, skb); |
| @@ -3210,8 +3151,6 @@ int qeth_do_send_packet_fast(struct qeth_card *card, | |||
| 3210 | int flush_cnt = 0; | 3151 | int flush_cnt = 0; |
| 3211 | int index; | 3152 | int index; |
| 3212 | 3153 | ||
| 3213 | QETH_DBF_TEXT(TRACE, 6, "dosndpfa"); | ||
| 3214 | |||
| 3215 | /* spin until we get the queue ... */ | 3154 | /* spin until we get the queue ... */ |
| 3216 | while (atomic_cmpxchg(&queue->state, QETH_OUT_Q_UNLOCKED, | 3155 | while (atomic_cmpxchg(&queue->state, QETH_OUT_Q_UNLOCKED, |
| 3217 | QETH_OUT_Q_LOCKED) != QETH_OUT_Q_UNLOCKED); | 3156 | QETH_OUT_Q_LOCKED) != QETH_OUT_Q_UNLOCKED); |
| @@ -3263,8 +3202,6 @@ int qeth_do_send_packet(struct qeth_card *card, struct qeth_qdio_out_q *queue, | |||
| 3263 | int tmp; | 3202 | int tmp; |
| 3264 | int rc = 0; | 3203 | int rc = 0; |
| 3265 | 3204 | ||
| 3266 | QETH_DBF_TEXT(TRACE, 6, "dosndpkt"); | ||
| 3267 | |||
| 3268 | /* spin until we get the queue ... */ | 3205 | /* spin until we get the queue ... */ |
| 3269 | while (atomic_cmpxchg(&queue->state, QETH_OUT_Q_UNLOCKED, | 3206 | while (atomic_cmpxchg(&queue->state, QETH_OUT_Q_UNLOCKED, |
| 3270 | QETH_OUT_Q_LOCKED) != QETH_OUT_Q_UNLOCKED); | 3207 | QETH_OUT_Q_LOCKED) != QETH_OUT_Q_UNLOCKED); |
| @@ -3827,27 +3764,8 @@ static struct ccw_driver qeth_ccw_driver = { | |||
| 3827 | static int qeth_core_driver_group(const char *buf, struct device *root_dev, | 3764 | static int qeth_core_driver_group(const char *buf, struct device *root_dev, |
| 3828 | unsigned long driver_id) | 3765 | unsigned long driver_id) |
| 3829 | { | 3766 | { |
| 3830 | const char *start, *end; | 3767 | return ccwgroup_create_from_string(root_dev, driver_id, |
| 3831 | char bus_ids[3][BUS_ID_SIZE], *argv[3]; | 3768 | &qeth_ccw_driver, 3, buf); |
| 3832 | int i; | ||
| 3833 | |||
| 3834 | start = buf; | ||
| 3835 | for (i = 0; i < 3; i++) { | ||
| 3836 | static const char delim[] = { ',', ',', '\n' }; | ||
| 3837 | int len; | ||
| 3838 | |||
| 3839 | end = strchr(start, delim[i]); | ||
| 3840 | if (!end) | ||
| 3841 | return -EINVAL; | ||
| 3842 | len = min_t(ptrdiff_t, BUS_ID_SIZE, end - start); | ||
| 3843 | strncpy(bus_ids[i], start, len); | ||
| 3844 | bus_ids[i][len] = '\0'; | ||
| 3845 | start = end + 1; | ||
| 3846 | argv[i] = bus_ids[i]; | ||
| 3847 | } | ||
| 3848 | |||
| 3849 | return (ccwgroup_create(root_dev, driver_id, | ||
| 3850 | &qeth_ccw_driver, 3, argv)); | ||
| 3851 | } | 3769 | } |
| 3852 | 3770 | ||
| 3853 | int qeth_core_hardsetup_card(struct qeth_card *card) | 3771 | int qeth_core_hardsetup_card(struct qeth_card *card) |
| @@ -3885,8 +3803,9 @@ retry: | |||
| 3885 | QETH_DBF_TEXT_(SETUP, 2, "2err%d", rc); | 3803 | QETH_DBF_TEXT_(SETUP, 2, "2err%d", rc); |
| 3886 | return rc; | 3804 | return rc; |
| 3887 | } | 3805 | } |
| 3888 | 3806 | mpno = qdio_get_ssqd_pct(CARD_DDEV(card)); | |
| 3889 | mpno = QETH_MAX_PORTNO; | 3807 | if (mpno) |
| 3808 | mpno = min(mpno - 1, QETH_MAX_PORTNO); | ||
| 3890 | if (card->info.portno > mpno) { | 3809 | if (card->info.portno > mpno) { |
| 3891 | PRINT_ERR("Device %s does not offer port number %d \n.", | 3810 | PRINT_ERR("Device %s does not offer port number %d \n.", |
| 3892 | CARD_BUS_ID(card), card->info.portno); | 3811 | CARD_BUS_ID(card), card->info.portno); |
| @@ -3980,7 +3899,6 @@ struct sk_buff *qeth_core_get_next_skb(struct qeth_card *card, | |||
| 3980 | int use_rx_sg = 0; | 3899 | int use_rx_sg = 0; |
| 3981 | int frag = 0; | 3900 | int frag = 0; |
| 3982 | 3901 | ||
| 3983 | QETH_DBF_TEXT(TRACE, 6, "nextskb"); | ||
| 3984 | /* qeth_hdr must not cross element boundaries */ | 3902 | /* qeth_hdr must not cross element boundaries */ |
| 3985 | if (element->length < offset + sizeof(struct qeth_hdr)) { | 3903 | if (element->length < offset + sizeof(struct qeth_hdr)) { |
| 3986 | if (qeth_is_last_sbale(element)) | 3904 | if (qeth_is_last_sbale(element)) |
| @@ -4086,6 +4004,18 @@ static void qeth_unregister_dbf_views(void) | |||
| 4086 | } | 4004 | } |
| 4087 | } | 4005 | } |
| 4088 | 4006 | ||
| 4007 | void qeth_dbf_longtext(enum qeth_dbf_names dbf_nix, int level, char *text, ...) | ||
| 4008 | { | ||
| 4009 | char dbf_txt_buf[32]; | ||
| 4010 | |||
| 4011 | if (level > (qeth_dbf[dbf_nix].id)->level) | ||
| 4012 | return; | ||
| 4013 | snprintf(dbf_txt_buf, sizeof(dbf_txt_buf), text); | ||
| 4014 | debug_text_event(qeth_dbf[dbf_nix].id, level, dbf_txt_buf); | ||
| 4015 | |||
| 4016 | } | ||
| 4017 | EXPORT_SYMBOL_GPL(qeth_dbf_longtext); | ||
| 4018 | |||
| 4089 | static int qeth_register_dbf_views(void) | 4019 | static int qeth_register_dbf_views(void) |
| 4090 | { | 4020 | { |
| 4091 | int ret; | 4021 | int ret; |
| @@ -4433,6 +4363,96 @@ void qeth_core_get_drvinfo(struct net_device *dev, | |||
| 4433 | } | 4363 | } |
| 4434 | EXPORT_SYMBOL_GPL(qeth_core_get_drvinfo); | 4364 | EXPORT_SYMBOL_GPL(qeth_core_get_drvinfo); |
| 4435 | 4365 | ||
| 4366 | int qeth_core_ethtool_get_settings(struct net_device *netdev, | ||
| 4367 | struct ethtool_cmd *ecmd) | ||
| 4368 | { | ||
| 4369 | struct qeth_card *card = netdev_priv(netdev); | ||
| 4370 | enum qeth_link_types link_type; | ||
| 4371 | |||
| 4372 | if ((card->info.type == QETH_CARD_TYPE_IQD) || (card->info.guestlan)) | ||
| 4373 | link_type = QETH_LINK_TYPE_10GBIT_ETH; | ||
| 4374 | else | ||
| 4375 | link_type = card->info.link_type; | ||
| 4376 | |||
| 4377 | ecmd->transceiver = XCVR_INTERNAL; | ||
| 4378 | ecmd->supported = SUPPORTED_Autoneg; | ||
| 4379 | ecmd->advertising = ADVERTISED_Autoneg; | ||
| 4380 | ecmd->duplex = DUPLEX_FULL; | ||
| 4381 | ecmd->autoneg = AUTONEG_ENABLE; | ||
| 4382 | |||
| 4383 | switch (link_type) { | ||
| 4384 | case QETH_LINK_TYPE_FAST_ETH: | ||
| 4385 | case QETH_LINK_TYPE_LANE_ETH100: | ||
| 4386 | ecmd->supported |= SUPPORTED_10baseT_Half | | ||
| 4387 | SUPPORTED_10baseT_Full | | ||
| 4388 | SUPPORTED_100baseT_Half | | ||
| 4389 | SUPPORTED_100baseT_Full | | ||
| 4390 | SUPPORTED_TP; | ||
| 4391 | ecmd->advertising |= ADVERTISED_10baseT_Half | | ||
| 4392 | ADVERTISED_10baseT_Full | | ||
| 4393 | ADVERTISED_100baseT_Half | | ||
| 4394 | ADVERTISED_100baseT_Full | | ||
| 4395 | ADVERTISED_TP; | ||
| 4396 | ecmd->speed = SPEED_100; | ||
| 4397 | ecmd->port = PORT_TP; | ||
| 4398 | break; | ||
| 4399 | |||
| 4400 | case QETH_LINK_TYPE_GBIT_ETH: | ||
| 4401 | case QETH_LINK_TYPE_LANE_ETH1000: | ||
| 4402 | ecmd->supported |= SUPPORTED_10baseT_Half | | ||
| 4403 | SUPPORTED_10baseT_Full | | ||
| 4404 | SUPPORTED_100baseT_Half | | ||
| 4405 | SUPPORTED_100baseT_Full | | ||
| 4406 | SUPPORTED_1000baseT_Half | | ||
| 4407 | SUPPORTED_1000baseT_Full | | ||
| 4408 | SUPPORTED_FIBRE; | ||
| 4409 | ecmd->advertising |= ADVERTISED_10baseT_Half | | ||
| 4410 | ADVERTISED_10baseT_Full | | ||
| 4411 | ADVERTISED_100baseT_Half | | ||
| 4412 | ADVERTISED_100baseT_Full | | ||
| 4413 | ADVERTISED_1000baseT_Half | | ||
| 4414 | ADVERTISED_1000baseT_Full | | ||
| 4415 | ADVERTISED_FIBRE; | ||
| 4416 | ecmd->speed = SPEED_1000; | ||
| 4417 | ecmd->port = PORT_FIBRE; | ||
| 4418 | break; | ||
| 4419 | |||
| 4420 | case QETH_LINK_TYPE_10GBIT_ETH: | ||
| 4421 | ecmd->supported |= SUPPORTED_10baseT_Half | | ||
| 4422 | SUPPORTED_10baseT_Full | | ||
| 4423 | SUPPORTED_100baseT_Half | | ||
| 4424 | SUPPORTED_100baseT_Full | | ||
| 4425 | SUPPORTED_1000baseT_Half | | ||
| 4426 | SUPPORTED_1000baseT_Full | | ||
| 4427 | SUPPORTED_10000baseT_Full | | ||
| 4428 | SUPPORTED_FIBRE; | ||
| 4429 | ecmd->advertising |= ADVERTISED_10baseT_Half | | ||
| 4430 | ADVERTISED_10baseT_Full | | ||
| 4431 | ADVERTISED_100baseT_Half | | ||
| 4432 | ADVERTISED_100baseT_Full | | ||
| 4433 | ADVERTISED_1000baseT_Half | | ||
| 4434 | ADVERTISED_1000baseT_Full | | ||
| 4435 | ADVERTISED_10000baseT_Full | | ||
| 4436 | ADVERTISED_FIBRE; | ||
| 4437 | ecmd->speed = SPEED_10000; | ||
| 4438 | ecmd->port = PORT_FIBRE; | ||
| 4439 | break; | ||
| 4440 | |||
| 4441 | default: | ||
| 4442 | ecmd->supported |= SUPPORTED_10baseT_Half | | ||
| 4443 | SUPPORTED_10baseT_Full | | ||
| 4444 | SUPPORTED_TP; | ||
| 4445 | ecmd->advertising |= ADVERTISED_10baseT_Half | | ||
| 4446 | ADVERTISED_10baseT_Full | | ||
| 4447 | ADVERTISED_TP; | ||
| 4448 | ecmd->speed = SPEED_10; | ||
| 4449 | ecmd->port = PORT_TP; | ||
| 4450 | } | ||
| 4451 | |||
| 4452 | return 0; | ||
| 4453 | } | ||
| 4454 | EXPORT_SYMBOL_GPL(qeth_core_ethtool_get_settings); | ||
| 4455 | |||
| 4436 | static int __init qeth_core_init(void) | 4456 | static int __init qeth_core_init(void) |
| 4437 | { | 4457 | { |
| 4438 | int rc; | 4458 | int rc; |
diff --git a/drivers/s390/net/qeth_l2_main.c b/drivers/s390/net/qeth_l2_main.c index 3921d1631a78..86ec50ddae13 100644 --- a/drivers/s390/net/qeth_l2_main.c +++ b/drivers/s390/net/qeth_l2_main.c | |||
| @@ -22,9 +22,6 @@ | |||
| 22 | #include "qeth_core.h" | 22 | #include "qeth_core.h" |
| 23 | #include "qeth_core_offl.h" | 23 | #include "qeth_core_offl.h" |
| 24 | 24 | ||
| 25 | #define QETH_DBF_TXT_BUF qeth_l2_dbf_txt_buf | ||
| 26 | static DEFINE_PER_CPU(char[256], qeth_l2_dbf_txt_buf); | ||
| 27 | |||
| 28 | static int qeth_l2_set_offline(struct ccwgroup_device *); | 25 | static int qeth_l2_set_offline(struct ccwgroup_device *); |
| 29 | static int qeth_l2_stop(struct net_device *); | 26 | static int qeth_l2_stop(struct net_device *); |
| 30 | static int qeth_l2_send_delmac(struct qeth_card *, __u8 *); | 27 | static int qeth_l2_send_delmac(struct qeth_card *, __u8 *); |
| @@ -635,8 +632,6 @@ static int qeth_l2_hard_start_xmit(struct sk_buff *skb, struct net_device *dev) | |||
| 635 | enum qeth_large_send_types large_send = QETH_LARGE_SEND_NO; | 632 | enum qeth_large_send_types large_send = QETH_LARGE_SEND_NO; |
| 636 | struct qeth_eddp_context *ctx = NULL; | 633 | struct qeth_eddp_context *ctx = NULL; |
| 637 | 634 | ||
| 638 | QETH_DBF_TEXT(TRACE, 6, "l2xmit"); | ||
| 639 | |||
| 640 | if ((card->state != CARD_STATE_UP) || !card->lan_online) { | 635 | if ((card->state != CARD_STATE_UP) || !card->lan_online) { |
| 641 | card->stats.tx_carrier_errors++; | 636 | card->stats.tx_carrier_errors++; |
| 642 | goto tx_drop; | 637 | goto tx_drop; |
| @@ -658,9 +653,12 @@ static int qeth_l2_hard_start_xmit(struct sk_buff *skb, struct net_device *dev) | |||
| 658 | if (card->info.type == QETH_CARD_TYPE_OSN) | 653 | if (card->info.type == QETH_CARD_TYPE_OSN) |
| 659 | hdr = (struct qeth_hdr *)skb->data; | 654 | hdr = (struct qeth_hdr *)skb->data; |
| 660 | else { | 655 | else { |
| 661 | new_skb = qeth_prepare_skb(card, skb, &hdr); | 656 | /* create a clone with writeable headroom */ |
| 657 | new_skb = skb_realloc_headroom(skb, sizeof(struct qeth_hdr)); | ||
| 662 | if (!new_skb) | 658 | if (!new_skb) |
| 663 | goto tx_drop; | 659 | goto tx_drop; |
| 660 | hdr = (struct qeth_hdr *)skb_push(new_skb, | ||
| 661 | sizeof(struct qeth_hdr)); | ||
| 664 | qeth_l2_fill_header(card, hdr, new_skb, ipv, cast_type); | 662 | qeth_l2_fill_header(card, hdr, new_skb, ipv, cast_type); |
| 665 | } | 663 | } |
| 666 | 664 | ||
| @@ -747,7 +745,6 @@ static void qeth_l2_qdio_input_handler(struct ccw_device *ccwdev, | |||
| 747 | int index; | 745 | int index; |
| 748 | int i; | 746 | int i; |
| 749 | 747 | ||
| 750 | QETH_DBF_TEXT(TRACE, 6, "qdinput"); | ||
| 751 | card = (struct qeth_card *) card_ptr; | 748 | card = (struct qeth_card *) card_ptr; |
| 752 | net_dev = card->dev; | 749 | net_dev = card->dev; |
| 753 | if (card->options.performance_stats) { | 750 | if (card->options.performance_stats) { |
| @@ -852,6 +849,22 @@ static void qeth_l2_remove_device(struct ccwgroup_device *cgdev) | |||
| 852 | return; | 849 | return; |
| 853 | } | 850 | } |
| 854 | 851 | ||
| 852 | static int qeth_l2_ethtool_set_tso(struct net_device *dev, u32 data) | ||
| 853 | { | ||
| 854 | struct qeth_card *card = netdev_priv(dev); | ||
| 855 | |||
| 856 | if (data) { | ||
| 857 | if (card->options.large_send == QETH_LARGE_SEND_NO) { | ||
| 858 | card->options.large_send = QETH_LARGE_SEND_EDDP; | ||
| 859 | dev->features |= NETIF_F_TSO; | ||
| 860 | } | ||
| 861 | } else { | ||
| 862 | dev->features &= ~NETIF_F_TSO; | ||
| 863 | card->options.large_send = QETH_LARGE_SEND_NO; | ||
| 864 | } | ||
| 865 | return 0; | ||
| 866 | } | ||
| 867 | |||
| 855 | static struct ethtool_ops qeth_l2_ethtool_ops = { | 868 | static struct ethtool_ops qeth_l2_ethtool_ops = { |
| 856 | .get_link = ethtool_op_get_link, | 869 | .get_link = ethtool_op_get_link, |
| 857 | .get_tx_csum = ethtool_op_get_tx_csum, | 870 | .get_tx_csum = ethtool_op_get_tx_csum, |
| @@ -859,11 +872,12 @@ static struct ethtool_ops qeth_l2_ethtool_ops = { | |||
| 859 | .get_sg = ethtool_op_get_sg, | 872 | .get_sg = ethtool_op_get_sg, |
| 860 | .set_sg = ethtool_op_set_sg, | 873 | .set_sg = ethtool_op_set_sg, |
| 861 | .get_tso = ethtool_op_get_tso, | 874 | .get_tso = ethtool_op_get_tso, |
| 862 | .set_tso = ethtool_op_set_tso, | 875 | .set_tso = qeth_l2_ethtool_set_tso, |
| 863 | .get_strings = qeth_core_get_strings, | 876 | .get_strings = qeth_core_get_strings, |
| 864 | .get_ethtool_stats = qeth_core_get_ethtool_stats, | 877 | .get_ethtool_stats = qeth_core_get_ethtool_stats, |
| 865 | .get_stats_count = qeth_core_get_stats_count, | 878 | .get_stats_count = qeth_core_get_stats_count, |
| 866 | .get_drvinfo = qeth_core_get_drvinfo, | 879 | .get_drvinfo = qeth_core_get_drvinfo, |
| 880 | .get_settings = qeth_core_ethtool_get_settings, | ||
| 867 | }; | 881 | }; |
| 868 | 882 | ||
| 869 | static struct ethtool_ops qeth_l2_osn_ops = { | 883 | static struct ethtool_ops qeth_l2_osn_ops = { |
diff --git a/drivers/s390/net/qeth_l3.h b/drivers/s390/net/qeth_l3.h index 1be353593a59..9f143c83bba3 100644 --- a/drivers/s390/net/qeth_l3.h +++ b/drivers/s390/net/qeth_l3.h | |||
| @@ -13,9 +13,6 @@ | |||
| 13 | 13 | ||
| 14 | #include "qeth_core.h" | 14 | #include "qeth_core.h" |
| 15 | 15 | ||
| 16 | #define QETH_DBF_TXT_BUF qeth_l3_dbf_txt_buf | ||
| 17 | DECLARE_PER_CPU(char[256], qeth_l3_dbf_txt_buf); | ||
| 18 | |||
| 19 | struct qeth_ipaddr { | 16 | struct qeth_ipaddr { |
| 20 | struct list_head entry; | 17 | struct list_head entry; |
| 21 | enum qeth_ip_types type; | 18 | enum qeth_ip_types type; |
diff --git a/drivers/s390/net/qeth_l3_main.c b/drivers/s390/net/qeth_l3_main.c index e1bfe56087d6..94a8ead64ed4 100644 --- a/drivers/s390/net/qeth_l3_main.c +++ b/drivers/s390/net/qeth_l3_main.c | |||
| @@ -28,8 +28,6 @@ | |||
| 28 | #include "qeth_l3.h" | 28 | #include "qeth_l3.h" |
| 29 | #include "qeth_core_offl.h" | 29 | #include "qeth_core_offl.h" |
| 30 | 30 | ||
| 31 | DEFINE_PER_CPU(char[256], qeth_l3_dbf_txt_buf); | ||
| 32 | |||
| 33 | static int qeth_l3_set_offline(struct ccwgroup_device *); | 31 | static int qeth_l3_set_offline(struct ccwgroup_device *); |
| 34 | static int qeth_l3_recover(void *); | 32 | static int qeth_l3_recover(void *); |
| 35 | static int qeth_l3_stop(struct net_device *); | 33 | static int qeth_l3_stop(struct net_device *); |
| @@ -2093,6 +2091,11 @@ static int qeth_l3_stop_card(struct qeth_card *card, int recovery_mode) | |||
| 2093 | (card->state == CARD_STATE_UP)) { | 2091 | (card->state == CARD_STATE_UP)) { |
| 2094 | if (recovery_mode) | 2092 | if (recovery_mode) |
| 2095 | qeth_l3_stop(card->dev); | 2093 | qeth_l3_stop(card->dev); |
| 2094 | else { | ||
| 2095 | rtnl_lock(); | ||
| 2096 | dev_close(card->dev); | ||
| 2097 | rtnl_unlock(); | ||
| 2098 | } | ||
| 2096 | if (!card->use_hard_stop) { | 2099 | if (!card->use_hard_stop) { |
| 2097 | rc = qeth_send_stoplan(card); | 2100 | rc = qeth_send_stoplan(card); |
| 2098 | if (rc) | 2101 | if (rc) |
| @@ -2559,8 +2562,6 @@ static int qeth_l3_do_ioctl(struct net_device *dev, struct ifreq *rq, int cmd) | |||
| 2559 | static void qeth_l3_fill_header(struct qeth_card *card, struct qeth_hdr *hdr, | 2562 | static void qeth_l3_fill_header(struct qeth_card *card, struct qeth_hdr *hdr, |
| 2560 | struct sk_buff *skb, int ipv, int cast_type) | 2563 | struct sk_buff *skb, int ipv, int cast_type) |
| 2561 | { | 2564 | { |
| 2562 | QETH_DBF_TEXT(TRACE, 6, "fillhdr"); | ||
| 2563 | |||
| 2564 | memset(hdr, 0, sizeof(struct qeth_hdr)); | 2565 | memset(hdr, 0, sizeof(struct qeth_hdr)); |
| 2565 | hdr->hdr.l3.id = QETH_HEADER_TYPE_LAYER3; | 2566 | hdr->hdr.l3.id = QETH_HEADER_TYPE_LAYER3; |
| 2566 | hdr->hdr.l3.ext_flags = 0; | 2567 | hdr->hdr.l3.ext_flags = 0; |
| @@ -2570,9 +2571,10 @@ static void qeth_l3_fill_header(struct qeth_card *card, struct qeth_hdr *hdr, | |||
| 2570 | * v6 uses passthrough, v4 sets the tag in the QDIO header. | 2571 | * v6 uses passthrough, v4 sets the tag in the QDIO header. |
| 2571 | */ | 2572 | */ |
| 2572 | if (card->vlangrp && vlan_tx_tag_present(skb)) { | 2573 | if (card->vlangrp && vlan_tx_tag_present(skb)) { |
| 2573 | hdr->hdr.l3.ext_flags = (ipv == 4) ? | 2574 | if ((ipv == 4) || (card->info.type == QETH_CARD_TYPE_IQD)) |
| 2574 | QETH_HDR_EXT_VLAN_FRAME : | 2575 | hdr->hdr.l3.ext_flags = QETH_HDR_EXT_VLAN_FRAME; |
| 2575 | QETH_HDR_EXT_INCLUDE_VLAN_TAG; | 2576 | else |
| 2577 | hdr->hdr.l3.ext_flags = QETH_HDR_EXT_INCLUDE_VLAN_TAG; | ||
| 2576 | hdr->hdr.l3.vlan_id = vlan_tx_tag_get(skb); | 2578 | hdr->hdr.l3.vlan_id = vlan_tx_tag_get(skb); |
| 2577 | } | 2579 | } |
| 2578 | 2580 | ||
| @@ -2638,8 +2640,6 @@ static int qeth_l3_hard_start_xmit(struct sk_buff *skb, struct net_device *dev) | |||
| 2638 | enum qeth_large_send_types large_send = QETH_LARGE_SEND_NO; | 2640 | enum qeth_large_send_types large_send = QETH_LARGE_SEND_NO; |
| 2639 | struct qeth_eddp_context *ctx = NULL; | 2641 | struct qeth_eddp_context *ctx = NULL; |
| 2640 | 2642 | ||
| 2641 | QETH_DBF_TEXT(TRACE, 6, "l3xmit"); | ||
| 2642 | |||
| 2643 | if ((card->info.type == QETH_CARD_TYPE_IQD) && | 2643 | if ((card->info.type == QETH_CARD_TYPE_IQD) && |
| 2644 | (skb->protocol != htons(ETH_P_IPV6)) && | 2644 | (skb->protocol != htons(ETH_P_IPV6)) && |
| 2645 | (skb->protocol != htons(ETH_P_IP))) | 2645 | (skb->protocol != htons(ETH_P_IP))) |
| @@ -2890,6 +2890,7 @@ static struct ethtool_ops qeth_l3_ethtool_ops = { | |||
| 2890 | .get_ethtool_stats = qeth_core_get_ethtool_stats, | 2890 | .get_ethtool_stats = qeth_core_get_ethtool_stats, |
| 2891 | .get_stats_count = qeth_core_get_stats_count, | 2891 | .get_stats_count = qeth_core_get_stats_count, |
| 2892 | .get_drvinfo = qeth_core_get_drvinfo, | 2892 | .get_drvinfo = qeth_core_get_drvinfo, |
| 2893 | .get_settings = qeth_core_ethtool_get_settings, | ||
| 2893 | }; | 2894 | }; |
| 2894 | 2895 | ||
| 2895 | /* | 2896 | /* |
| @@ -2982,7 +2983,6 @@ static void qeth_l3_qdio_input_handler(struct ccw_device *ccwdev, | |||
| 2982 | int index; | 2983 | int index; |
| 2983 | int i; | 2984 | int i; |
| 2984 | 2985 | ||
| 2985 | QETH_DBF_TEXT(TRACE, 6, "qdinput"); | ||
| 2986 | card = (struct qeth_card *) card_ptr; | 2986 | card = (struct qeth_card *) card_ptr; |
| 2987 | net_dev = card->dev; | 2987 | net_dev = card->dev; |
| 2988 | if (card->options.performance_stats) { | 2988 | if (card->options.performance_stats) { |
| @@ -3140,9 +3140,15 @@ static int __qeth_l3_set_online(struct ccwgroup_device *gdev, int recovery_mode) | |||
| 3140 | netif_carrier_on(card->dev); | 3140 | netif_carrier_on(card->dev); |
| 3141 | 3141 | ||
| 3142 | qeth_set_allowed_threads(card, 0xffffffff, 0); | 3142 | qeth_set_allowed_threads(card, 0xffffffff, 0); |
| 3143 | if ((recover_flag == CARD_STATE_RECOVER) && recovery_mode) { | 3143 | if (recover_flag == CARD_STATE_RECOVER) { |
| 3144 | if (recovery_mode) | ||
| 3144 | qeth_l3_open(card->dev); | 3145 | qeth_l3_open(card->dev); |
| 3145 | qeth_l3_set_multicast_list(card->dev); | 3146 | else { |
| 3147 | rtnl_lock(); | ||
| 3148 | dev_open(card->dev); | ||
| 3149 | rtnl_unlock(); | ||
| 3150 | } | ||
| 3151 | qeth_l3_set_multicast_list(card->dev); | ||
| 3146 | } | 3152 | } |
| 3147 | /* let user_space know that device is online */ | 3153 | /* let user_space know that device is online */ |
| 3148 | kobject_uevent(&gdev->dev.kobj, KOBJ_CHANGE); | 3154 | kobject_uevent(&gdev->dev.kobj, KOBJ_CHANGE); |
diff --git a/drivers/sbus/char/cpwatchdog.c b/drivers/sbus/char/cpwatchdog.c index a4e758143665..235703414370 100644 --- a/drivers/sbus/char/cpwatchdog.c +++ b/drivers/sbus/char/cpwatchdog.c | |||
| @@ -637,7 +637,7 @@ static int wd_inittimer(int whichdog) | |||
| 637 | break; | 637 | break; |
| 638 | default: | 638 | default: |
| 639 | printk("%s: %s: invalid watchdog id: %i\n", | 639 | printk("%s: %s: invalid watchdog id: %i\n", |
| 640 | WD_OBPNAME, __FUNCTION__, whichdog); | 640 | WD_OBPNAME, __func__, whichdog); |
| 641 | return(1); | 641 | return(1); |
| 642 | } | 642 | } |
| 643 | if(0 != misc_register(whichmisc)) | 643 | if(0 != misc_register(whichmisc)) |
diff --git a/drivers/sbus/char/uctrl.c b/drivers/sbus/char/uctrl.c index 44d2ef906ac7..383f32c1d347 100644 --- a/drivers/sbus/char/uctrl.c +++ b/drivers/sbus/char/uctrl.c | |||
| @@ -393,13 +393,13 @@ static int __init ts102_uctrl_init(void) | |||
| 393 | err = request_irq(driver->irq, uctrl_interrupt, 0, "uctrl", driver); | 393 | err = request_irq(driver->irq, uctrl_interrupt, 0, "uctrl", driver); |
| 394 | if (err) { | 394 | if (err) { |
| 395 | printk("%s: unable to register irq %d\n", | 395 | printk("%s: unable to register irq %d\n", |
| 396 | __FUNCTION__, driver->irq); | 396 | __func__, driver->irq); |
| 397 | return err; | 397 | return err; |
| 398 | } | 398 | } |
| 399 | 399 | ||
| 400 | if (misc_register(&uctrl_dev)) { | 400 | if (misc_register(&uctrl_dev)) { |
| 401 | printk("%s: unable to get misc minor %d\n", | 401 | printk("%s: unable to get misc minor %d\n", |
| 402 | __FUNCTION__, uctrl_dev.minor); | 402 | __func__, uctrl_dev.minor); |
| 403 | free_irq(driver->irq, driver); | 403 | free_irq(driver->irq, driver); |
| 404 | return -ENODEV; | 404 | return -ENODEV; |
| 405 | } | 405 | } |
diff --git a/drivers/serial/68328serial.c b/drivers/serial/68328serial.c index 2b8a410e0959..bbf5bc5892c7 100644 --- a/drivers/serial/68328serial.c +++ b/drivers/serial/68328serial.c | |||
| @@ -200,7 +200,7 @@ static void rs_stop(struct tty_struct *tty) | |||
| 200 | local_irq_restore(flags); | 200 | local_irq_restore(flags); |
| 201 | } | 201 | } |
| 202 | 202 | ||
| 203 | static void rs_put_char(char ch) | 203 | static int rs_put_char(char ch) |
| 204 | { | 204 | { |
| 205 | int flags, loops = 0; | 205 | int flags, loops = 0; |
| 206 | 206 | ||
| @@ -214,6 +214,7 @@ static void rs_put_char(char ch) | |||
| 214 | UTX_TXDATA = ch; | 214 | UTX_TXDATA = ch; |
| 215 | udelay(5); | 215 | udelay(5); |
| 216 | local_irq_restore(flags); | 216 | local_irq_restore(flags); |
| 217 | return 1; | ||
| 217 | } | 218 | } |
| 218 | 219 | ||
| 219 | static void rs_start(struct tty_struct *tty) | 220 | static void rs_start(struct tty_struct *tty) |
| @@ -1017,18 +1018,6 @@ static int rs_ioctl(struct tty_struct *tty, struct file * file, | |||
| 1017 | tty_wait_until_sent(tty, 0); | 1018 | tty_wait_until_sent(tty, 0); |
| 1018 | send_break(info, arg ? arg*(100) : 250); | 1019 | send_break(info, arg ? arg*(100) : 250); |
| 1019 | return 0; | 1020 | return 0; |
| 1020 | case TIOCGSOFTCAR: | ||
| 1021 | error = put_user(C_CLOCAL(tty) ? 1 : 0, | ||
| 1022 | (unsigned long *) arg); | ||
| 1023 | if (error) | ||
| 1024 | return error; | ||
| 1025 | return 0; | ||
| 1026 | case TIOCSSOFTCAR: | ||
| 1027 | get_user(arg, (unsigned long *) arg); | ||
| 1028 | tty->termios->c_cflag = | ||
| 1029 | ((tty->termios->c_cflag & ~CLOCAL) | | ||
| 1030 | (arg ? CLOCAL : 0)); | ||
| 1031 | return 0; | ||
| 1032 | case TIOCGSERIAL: | 1021 | case TIOCGSERIAL: |
| 1033 | if (access_ok(VERIFY_WRITE, (void *) arg, | 1022 | if (access_ok(VERIFY_WRITE, (void *) arg, |
| 1034 | sizeof(struct serial_struct))) | 1023 | sizeof(struct serial_struct))) |
| @@ -1061,9 +1050,6 @@ static void rs_set_termios(struct tty_struct *tty, struct ktermios *old_termios) | |||
| 1061 | { | 1050 | { |
| 1062 | struct m68k_serial *info = (struct m68k_serial *)tty->driver_data; | 1051 | struct m68k_serial *info = (struct m68k_serial *)tty->driver_data; |
| 1063 | 1052 | ||
| 1064 | if (tty->termios->c_cflag == old_termios->c_cflag) | ||
| 1065 | return; | ||
| 1066 | |||
| 1067 | change_speed(info); | 1053 | change_speed(info); |
| 1068 | 1054 | ||
| 1069 | if ((old_termios->c_cflag & CRTSCTS) && | 1055 | if ((old_termios->c_cflag & CRTSCTS) && |
| @@ -1140,8 +1126,7 @@ static void rs_close(struct tty_struct *tty, struct file * filp) | |||
| 1140 | uart->ustcnt &= ~(USTCNT_RXEN | USTCNT_RX_INTR_MASK); | 1126 | uart->ustcnt &= ~(USTCNT_RXEN | USTCNT_RX_INTR_MASK); |
| 1141 | 1127 | ||
| 1142 | shutdown(info); | 1128 | shutdown(info); |
| 1143 | if (tty->driver->flush_buffer) | 1129 | rs_flush_buffer(tty); |
| 1144 | tty->driver->flush_buffer(tty); | ||
| 1145 | 1130 | ||
| 1146 | tty_ldisc_flush(tty); | 1131 | tty_ldisc_flush(tty); |
| 1147 | tty->closing = 0; | 1132 | tty->closing = 0; |
diff --git a/drivers/serial/68360serial.c b/drivers/serial/68360serial.c index f59463601874..d9d4e9552a4d 100644 --- a/drivers/serial/68360serial.c +++ b/drivers/serial/68360serial.c | |||
| @@ -995,10 +995,10 @@ static void rs_360_put_char(struct tty_struct *tty, unsigned char ch) | |||
| 995 | volatile QUICC_BD *bdp; | 995 | volatile QUICC_BD *bdp; |
| 996 | 996 | ||
| 997 | if (serial_paranoia_check(info, tty->name, "rs_put_char")) | 997 | if (serial_paranoia_check(info, tty->name, "rs_put_char")) |
| 998 | return; | 998 | return 0; |
| 999 | 999 | ||
| 1000 | if (!tty) | 1000 | if (!tty) |
| 1001 | return; | 1001 | return 0; |
| 1002 | 1002 | ||
| 1003 | bdp = info->tx_cur; | 1003 | bdp = info->tx_cur; |
| 1004 | while (bdp->status & BD_SC_READY); | 1004 | while (bdp->status & BD_SC_READY); |
| @@ -1016,6 +1016,7 @@ static void rs_360_put_char(struct tty_struct *tty, unsigned char ch) | |||
| 1016 | bdp++; | 1016 | bdp++; |
| 1017 | 1017 | ||
| 1018 | info->tx_cur = (QUICC_BD *)bdp; | 1018 | info->tx_cur = (QUICC_BD *)bdp; |
| 1019 | return 1; | ||
| 1019 | 1020 | ||
| 1020 | } | 1021 | } |
| 1021 | 1022 | ||
| @@ -1246,7 +1247,7 @@ static int rs_360_tiocmget(struct tty_struct *tty, struct file *file) | |||
| 1246 | #ifdef modem_control | 1247 | #ifdef modem_control |
| 1247 | unsigned char control, status; | 1248 | unsigned char control, status; |
| 1248 | 1249 | ||
| 1249 | if (serial_paranoia_check(info, tty->name, __FUNCTION__)) | 1250 | if (serial_paranoia_check(info, tty->name, __func__)) |
| 1250 | return -ENODEV; | 1251 | return -ENODEV; |
| 1251 | 1252 | ||
| 1252 | if (tty->flags & (1 << TTY_IO_ERROR)) | 1253 | if (tty->flags & (1 << TTY_IO_ERROR)) |
| @@ -1277,12 +1278,12 @@ static int rs_360_tiocmset(struct tty_struct *tty, struct file *file, | |||
| 1277 | ser_info_t *info = (ser_info_t *)tty->driver_data; | 1278 | ser_info_t *info = (ser_info_t *)tty->driver_data; |
| 1278 | unsigned int arg; | 1279 | unsigned int arg; |
| 1279 | 1280 | ||
| 1280 | if (serial_paranoia_check(info, tty->name, __FUNCTION__)) | 1281 | if (serial_paranoia_check(info, tty->name, __func__)) |
| 1281 | return -ENODEV; | 1282 | return -ENODEV; |
| 1282 | 1283 | ||
| 1283 | if (tty->flags & (1 << TTY_IO_ERROR)) | 1284 | if (tty->flags & (1 << TTY_IO_ERROR)) |
| 1284 | return -EIO; | 1285 | return -EIO; |
| 1285 | 1286 | /* FIXME: locking on info->mcr */ | |
| 1286 | if (set & TIOCM_RTS) | 1287 | if (set & TIOCM_RTS) |
| 1287 | info->mcr |= UART_MCR_RTS; | 1288 | info->mcr |= UART_MCR_RTS; |
| 1288 | if (set & TIOCM_DTR) | 1289 | if (set & TIOCM_DTR) |
| @@ -1436,18 +1437,6 @@ static int rs_360_ioctl(struct tty_struct *tty, struct file * file, | |||
| 1436 | return retval; | 1437 | return retval; |
| 1437 | end_break(info); | 1438 | end_break(info); |
| 1438 | return 0; | 1439 | return 0; |
| 1439 | case TIOCGSOFTCAR: | ||
| 1440 | /* return put_user(C_CLOCAL(tty) ? 1 : 0, (int *) arg); */ | ||
| 1441 | put_user(C_CLOCAL(tty) ? 1 : 0, (int *) arg); | ||
| 1442 | return 0; | ||
| 1443 | case TIOCSSOFTCAR: | ||
| 1444 | error = get_user(arg, (unsigned int *) arg); | ||
| 1445 | if (error) | ||
| 1446 | return error; | ||
| 1447 | tty->termios->c_cflag = | ||
| 1448 | ((tty->termios->c_cflag & ~CLOCAL) | | ||
| 1449 | (arg ? CLOCAL : 0)); | ||
| 1450 | return 0; | ||
| 1451 | #ifdef maybe | 1440 | #ifdef maybe |
| 1452 | case TIOCSERGETLSR: /* Get line status register */ | 1441 | case TIOCSERGETLSR: /* Get line status register */ |
| 1453 | return get_lsr_info(info, (unsigned int *) arg); | 1442 | return get_lsr_info(info, (unsigned int *) arg); |
| @@ -1665,8 +1654,7 @@ static void rs_360_close(struct tty_struct *tty, struct file * filp) | |||
| 1665 | rs_360_wait_until_sent(tty, info->timeout); | 1654 | rs_360_wait_until_sent(tty, info->timeout); |
| 1666 | } | 1655 | } |
| 1667 | shutdown(info); | 1656 | shutdown(info); |
| 1668 | if (tty->driver->flush_buffer) | 1657 | rs_360_flush_buffer(tty); |
| 1669 | tty->driver->flush_buffer(tty); | ||
| 1670 | tty_ldisc_flush(tty); | 1658 | tty_ldisc_flush(tty); |
| 1671 | tty->closing = 0; | 1659 | tty->closing = 0; |
| 1672 | info->event = 0; | 1660 | info->event = 0; |
| @@ -1717,6 +1705,7 @@ static void rs_360_wait_until_sent(struct tty_struct *tty, int timeout) | |||
| 1717 | printk("jiff=%lu...", jiffies); | 1705 | printk("jiff=%lu...", jiffies); |
| 1718 | #endif | 1706 | #endif |
| 1719 | 1707 | ||
| 1708 | lock_kernel(); | ||
| 1720 | /* We go through the loop at least once because we can't tell | 1709 | /* We go through the loop at least once because we can't tell |
| 1721 | * exactly when the last character exits the shifter. There can | 1710 | * exactly when the last character exits the shifter. There can |
| 1722 | * be at least two characters waiting to be sent after the buffers | 1711 | * be at least two characters waiting to be sent after the buffers |
| @@ -1745,6 +1734,7 @@ static void rs_360_wait_until_sent(struct tty_struct *tty, int timeout) | |||
| 1745 | bdp--; | 1734 | bdp--; |
| 1746 | } while (bdp->status & BD_SC_READY); | 1735 | } while (bdp->status & BD_SC_READY); |
| 1747 | current->state = TASK_RUNNING; | 1736 | current->state = TASK_RUNNING; |
| 1737 | unlock_kernel(); | ||
| 1748 | #ifdef SERIAL_DEBUG_RS_WAIT_UNTIL_SENT | 1738 | #ifdef SERIAL_DEBUG_RS_WAIT_UNTIL_SENT |
| 1749 | printk("lsr = %d (jiff=%lu)...done\n", lsr, jiffies); | 1739 | printk("lsr = %d (jiff=%lu)...done\n", lsr, jiffies); |
| 1750 | #endif | 1740 | #endif |
diff --git a/drivers/serial/8250_early.c b/drivers/serial/8250_early.c index 38776e8b064b..cd898704ba4f 100644 --- a/drivers/serial/8250_early.c +++ b/drivers/serial/8250_early.c | |||
| @@ -156,7 +156,7 @@ static int __init parse_options(struct early_serial8250_device *device, | |||
| 156 | port->membase = ioremap(port->mapbase, 64); | 156 | port->membase = ioremap(port->mapbase, 64); |
| 157 | if (!port->membase) { | 157 | if (!port->membase) { |
| 158 | printk(KERN_ERR "%s: Couldn't ioremap 0x%llx\n", | 158 | printk(KERN_ERR "%s: Couldn't ioremap 0x%llx\n", |
| 159 | __FUNCTION__, | 159 | __func__, |
| 160 | (unsigned long long)port->mapbase); | 160 | (unsigned long long)port->mapbase); |
| 161 | return -ENOMEM; | 161 | return -ENOMEM; |
| 162 | } | 162 | } |
diff --git a/drivers/serial/Kconfig b/drivers/serial/Kconfig index 34b809e3b596..36acbcca2d48 100644 --- a/drivers/serial/Kconfig +++ b/drivers/serial/Kconfig | |||
| @@ -1355,4 +1355,47 @@ config SERIAL_SC26XX_CONSOLE | |||
| 1355 | help | 1355 | help |
| 1356 | Support for Console on SC2681/SC2692 serial ports. | 1356 | Support for Console on SC2681/SC2692 serial ports. |
| 1357 | 1357 | ||
| 1358 | config SERIAL_BFIN_SPORT | ||
| 1359 | tristate "Blackfin SPORT emulate UART (EXPERIMENTAL)" | ||
| 1360 | depends on BFIN && EXPERIMENTAL | ||
| 1361 | select SERIAL_CORE | ||
| 1362 | help | ||
| 1363 | Enble support SPORT emulate UART on Blackfin series. | ||
| 1364 | |||
| 1365 | To compile this driver as a module, choose M here: the | ||
| 1366 | module will be called bfin_sport_uart. | ||
| 1367 | |||
| 1368 | choice | ||
| 1369 | prompt "Baud rate for Blackfin SPORT UART" | ||
| 1370 | depends on SERIAL_BFIN_SPORT | ||
| 1371 | default SERIAL_SPORT_BAUD_RATE_57600 | ||
| 1372 | help | ||
| 1373 | Choose a baud rate for the SPORT UART, other uart settings are | ||
| 1374 | 8 bit, 1 stop bit, no parity, no flow control. | ||
| 1375 | |||
| 1376 | config SERIAL_SPORT_BAUD_RATE_115200 | ||
| 1377 | bool "115200" | ||
| 1378 | |||
| 1379 | config SERIAL_SPORT_BAUD_RATE_57600 | ||
| 1380 | bool "57600" | ||
| 1381 | |||
| 1382 | config SERIAL_SPORT_BAUD_RATE_38400 | ||
| 1383 | bool "38400" | ||
| 1384 | |||
| 1385 | config SERIAL_SPORT_BAUD_RATE_19200 | ||
| 1386 | bool "19200" | ||
| 1387 | |||
| 1388 | config SERIAL_SPORT_BAUD_RATE_9600 | ||
| 1389 | bool "9600" | ||
| 1390 | endchoice | ||
| 1391 | |||
| 1392 | config SPORT_BAUD_RATE | ||
| 1393 | int | ||
| 1394 | depends on SERIAL_BFIN_SPORT | ||
| 1395 | default 115200 if (SERIAL_SPORT_BAUD_RATE_115200) | ||
| 1396 | default 57600 if (SERIAL_SPORT_BAUD_RATE_57600) | ||
| 1397 | default 38400 if (SERIAL_SPORT_BAUD_RATE_38400) | ||
| 1398 | default 19200 if (SERIAL_SPORT_BAUD_RATE_19200) | ||
| 1399 | default 9600 if (SERIAL_SPORT_BAUD_RATE_9600) | ||
| 1400 | |||
| 1358 | endmenu | 1401 | endmenu |
diff --git a/drivers/serial/Makefile b/drivers/serial/Makefile index f02ff9fad017..0d9c09b1e836 100644 --- a/drivers/serial/Makefile +++ b/drivers/serial/Makefile | |||
| @@ -27,6 +27,7 @@ obj-$(CONFIG_SERIAL_PXA) += pxa.o | |||
| 27 | obj-$(CONFIG_SERIAL_PNX8XXX) += pnx8xxx_uart.o | 27 | obj-$(CONFIG_SERIAL_PNX8XXX) += pnx8xxx_uart.o |
| 28 | obj-$(CONFIG_SERIAL_SA1100) += sa1100.o | 28 | obj-$(CONFIG_SERIAL_SA1100) += sa1100.o |
| 29 | obj-$(CONFIG_SERIAL_BFIN) += bfin_5xx.o | 29 | obj-$(CONFIG_SERIAL_BFIN) += bfin_5xx.o |
| 30 | obj-$(CONFIG_SERIAL_BFIN_SPORT) += bfin_sport_uart.o | ||
| 30 | obj-$(CONFIG_SERIAL_S3C2410) += s3c2410.o | 31 | obj-$(CONFIG_SERIAL_S3C2410) += s3c2410.o |
| 31 | obj-$(CONFIG_SERIAL_SUNCORE) += suncore.o | 32 | obj-$(CONFIG_SERIAL_SUNCORE) += suncore.o |
| 32 | obj-$(CONFIG_SERIAL_SUNHV) += sunhv.o | 33 | obj-$(CONFIG_SERIAL_SUNHV) += sunhv.o |
diff --git a/drivers/serial/bfin_5xx.c b/drivers/serial/bfin_5xx.c index 5f55534a290b..8a2f6a1baa74 100644 --- a/drivers/serial/bfin_5xx.c +++ b/drivers/serial/bfin_5xx.c | |||
| @@ -762,7 +762,7 @@ bfin_serial_set_termios(struct uart_port *port, struct ktermios *termios, | |||
| 762 | break; | 762 | break; |
| 763 | default: | 763 | default: |
| 764 | printk(KERN_ERR "%s: word lengh not supported\n", | 764 | printk(KERN_ERR "%s: word lengh not supported\n", |
| 765 | __FUNCTION__); | 765 | __func__); |
| 766 | } | 766 | } |
| 767 | 767 | ||
| 768 | if (termios->c_cflag & CSTOPB) | 768 | if (termios->c_cflag & CSTOPB) |
| @@ -1029,7 +1029,7 @@ bfin_serial_console_get_options(struct bfin_serial_port *uart, int *baud, | |||
| 1029 | 1029 | ||
| 1030 | *baud = get_sclk() / (16*(dll | dlh << 8)); | 1030 | *baud = get_sclk() / (16*(dll | dlh << 8)); |
| 1031 | } | 1031 | } |
| 1032 | pr_debug("%s:baud = %d, parity = %c, bits= %d\n", __FUNCTION__, *baud, *parity, *bits); | 1032 | pr_debug("%s:baud = %d, parity = %c, bits= %d\n", __func__, *baud, *parity, *bits); |
| 1033 | } | 1033 | } |
| 1034 | #endif | 1034 | #endif |
| 1035 | 1035 | ||
diff --git a/drivers/serial/bfin_sport_uart.c b/drivers/serial/bfin_sport_uart.c new file mode 100644 index 000000000000..aca1240ad808 --- /dev/null +++ b/drivers/serial/bfin_sport_uart.c | |||
| @@ -0,0 +1,614 @@ | |||
| 1 | /* | ||
| 2 | * File: linux/drivers/serial/bfin_sport_uart.c | ||
| 3 | * | ||
| 4 | * Based on: drivers/serial/bfin_5xx.c by Aubrey Li. | ||
| 5 | * Author: Roy Huang <roy.huang@analog.com> | ||
| 6 | * | ||
| 7 | * Created: Nov 22, 2006 | ||
| 8 | * Copyright: (c) 2006-2007 Analog Devices Inc. | ||
| 9 | * Description: this driver enable SPORTs on Blackfin emulate UART. | ||
| 10 | * | ||
| 11 | * This program is free software; you can redistribute it and/or modify | ||
| 12 | * it under the terms of the GNU General Public License as published by | ||
| 13 | * the Free Software Foundation; either version 2 of the License, or | ||
| 14 | * (at your option) any later version. | ||
| 15 | * | ||
| 16 | * This program is distributed in the hope that it will be useful, | ||
| 17 | * but WITHOUT ANY WARRANTY; without even the implied warranty of | ||
| 18 | * MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE. See the | ||
| 19 | * GNU General Public License for more details. | ||
| 20 | * | ||
| 21 | * You should have received a copy of the GNU General Public License | ||
| 22 | * along with this program; if not, see the file COPYING, or write | ||
| 23 | * to the Free Software Foundation, Inc., | ||
| 24 | * 51 Franklin St, Fifth Floor, Boston, MA 02110-1301 USA | ||
| 25 | */ | ||
| 26 | |||
| 27 | /* | ||
| 28 | * This driver and the hardware supported are in term of EE-191 of ADI. | ||
| 29 | * http://www.analog.com/UploadedFiles/Application_Notes/399447663EE191.pdf | ||
| 30 | * This application note describe how to implement a UART on a Sharc DSP, | ||
| 31 | * but this driver is implemented on Blackfin Processor. | ||
| 32 | */ | ||
| 33 | |||
| 34 | /* After reset, there is a prelude of low level pulse when transmit data first | ||
| 35 | * time. No addtional pulse in following transmit. | ||
| 36 | * According to document: | ||
| 37 | * The SPORTs are ready to start transmitting or receiving data no later than | ||
| 38 | * three serial clock cycles after they are enabled in the SPORTx_TCR1 or | ||
| 39 | * SPORTx_RCR1 register. No serial clock cycles are lost from this point on. | ||
| 40 | * The first internal frame sync will occur one frame sync delay after the | ||
| 41 | * SPORTs are ready. External frame syncs can occur as soon as the SPORT is | ||
| 42 | * ready. | ||
| 43 | */ | ||
| 44 | |||
| 45 | /* Thanks to Axel Alatalo <axel@rubico.se> for fixing sport rx bug. Sometimes | ||
| 46 | * sport receives data incorrectly. The following is Axel's words. | ||
| 47 | * As EE-191, sport rx samples 3 times of the UART baudrate and takes the | ||
| 48 | * middle smaple of every 3 samples as the data bit. For a 8-N-1 UART setting, | ||
| 49 | * 30 samples will be required for a byte. If transmitter sends a 1/3 bit short | ||
| 50 | * byte due to buadrate drift, then the 30th sample of a byte, this sample is | ||
| 51 | * also the third sample of the stop bit, will happens on the immediately | ||
| 52 | * following start bit which will be thrown away and missed. Thus since parts | ||
| 53 | * of the startbit will be missed and the receiver will begin to drift, the | ||
| 54 | * effect accumulates over time until synchronization is lost. | ||
| 55 | * If only require 2 samples of the stopbit (by sampling in total 29 samples), | ||
| 56 | * then a to short byte as in the case above will be tolerated. Then the 1/3 | ||
| 57 | * early startbit will trigger a framesync since the last read is complete | ||
| 58 | * after only 2/3 stopbit and framesync is active during the last 1/3 looking | ||
| 59 | * for a possible early startbit. */ | ||
| 60 | |||
| 61 | //#define DEBUG | ||
| 62 | |||
| 63 | #include <linux/module.h> | ||
| 64 | #include <linux/ioport.h> | ||
| 65 | #include <linux/init.h> | ||
| 66 | #include <linux/console.h> | ||
| 67 | #include <linux/sysrq.h> | ||
| 68 | #include <linux/platform_device.h> | ||
| 69 | #include <linux/tty.h> | ||
| 70 | #include <linux/tty_flip.h> | ||
| 71 | #include <linux/serial_core.h> | ||
| 72 | |||
| 73 | #include <asm/delay.h> | ||
| 74 | #include <asm/portmux.h> | ||
| 75 | |||
| 76 | #include "bfin_sport_uart.h" | ||
| 77 | |||
| 78 | unsigned short bfin_uart_pin_req_sport0[] = | ||
| 79 | {P_SPORT0_TFS, P_SPORT0_DTPRI, P_SPORT0_TSCLK, P_SPORT0_RFS, \ | ||
| 80 | P_SPORT0_DRPRI, P_SPORT0_RSCLK, P_SPORT0_DRSEC, P_SPORT0_DTSEC, 0}; | ||
| 81 | |||
| 82 | unsigned short bfin_uart_pin_req_sport1[] = | ||
| 83 | {P_SPORT1_TFS, P_SPORT1_DTPRI, P_SPORT1_TSCLK, P_SPORT1_RFS, \ | ||
| 84 | P_SPORT1_DRPRI, P_SPORT1_RSCLK, P_SPORT1_DRSEC, P_SPORT1_DTSEC, 0}; | ||
| 85 | |||
| 86 | #define DRV_NAME "bfin-sport-uart" | ||
| 87 | |||
| 88 | struct sport_uart_port { | ||
| 89 | struct uart_port port; | ||
| 90 | char *name; | ||
| 91 | |||
| 92 | int tx_irq; | ||
| 93 | int rx_irq; | ||
| 94 | int err_irq; | ||
| 95 | }; | ||
| 96 | |||
| 97 | static void sport_uart_tx_chars(struct sport_uart_port *up); | ||
| 98 | static void sport_stop_tx(struct uart_port *port); | ||
| 99 | |||
| 100 | static inline void tx_one_byte(struct sport_uart_port *up, unsigned int value) | ||
| 101 | { | ||
| 102 | pr_debug("%s value:%x\n", __FUNCTION__, value); | ||
| 103 | /* Place a Start and Stop bit */ | ||
| 104 | __asm__ volatile ( | ||
| 105 | "R2 = b#01111111100;\n\t" | ||
| 106 | "R3 = b#10000000001;\n\t" | ||
| 107 | "%0 <<= 2;\n\t" | ||
| 108 | "%0 = %0 & R2;\n\t" | ||
| 109 | "%0 = %0 | R3;\n\t" | ||
| 110 | :"=r"(value) | ||
| 111 | :"0"(value) | ||
| 112 | :"R2", "R3"); | ||
| 113 | pr_debug("%s value:%x\n", __FUNCTION__, value); | ||
| 114 | |||
| 115 | SPORT_PUT_TX(up, value); | ||
| 116 | } | ||
| 117 | |||
| 118 | static inline unsigned int rx_one_byte(struct sport_uart_port *up) | ||
| 119 | { | ||
| 120 | unsigned int value, extract; | ||
| 121 | |||
| 122 | value = SPORT_GET_RX32(up); | ||
| 123 | pr_debug("%s value:%x\n", __FUNCTION__, value); | ||
| 124 | |||
| 125 | /* Extract 8 bits data */ | ||
| 126 | __asm__ volatile ( | ||
| 127 | "R5 = 0;\n\t" | ||
| 128 | "P0 = 8;\n\t" | ||
| 129 | "R1 = 0x1801(Z);\n\t" | ||
| 130 | "R3 = 0x0300(Z);\n\t" | ||
| 131 | "R4 = 0;\n\t" | ||
| 132 | "LSETUP(loop_s, loop_e) LC0 = P0;\nloop_s:\t" | ||
| 133 | "R2 = extract(%1, R1.L)(Z);\n\t" | ||
| 134 | "R2 <<= R4;\n\t" | ||
| 135 | "R5 = R5 | R2;\n\t" | ||
| 136 | "R1 = R1 - R3;\nloop_e:\t" | ||
| 137 | "R4 += 1;\n\t" | ||
| 138 | "%0 = R5;\n\t" | ||
| 139 | :"=r"(extract) | ||
| 140 | :"r"(value) | ||
| 141 | :"P0", "R1", "R2","R3","R4", "R5"); | ||
| 142 | |||
| 143 | pr_debug(" extract:%x\n", extract); | ||
| 144 | return extract; | ||
| 145 | } | ||
| 146 | |||
| 147 | static int sport_uart_setup(struct sport_uart_port *up, int sclk, int baud_rate) | ||
| 148 | { | ||
| 149 | int tclkdiv, tfsdiv, rclkdiv; | ||
| 150 | |||
| 151 | /* Set TCR1 and TCR2 */ | ||
| 152 | SPORT_PUT_TCR1(up, (LTFS | ITFS | TFSR | TLSBIT | ITCLK)); | ||
| 153 | SPORT_PUT_TCR2(up, 10); | ||
| 154 | pr_debug("%s TCR1:%x, TCR2:%x\n", __FUNCTION__, SPORT_GET_TCR1(up), SPORT_GET_TCR2(up)); | ||
| 155 | |||
| 156 | /* Set RCR1 and RCR2 */ | ||
| 157 | SPORT_PUT_RCR1(up, (RCKFE | LARFS | LRFS | RFSR | IRCLK)); | ||
| 158 | SPORT_PUT_RCR2(up, 28); | ||
| 159 | pr_debug("%s RCR1:%x, RCR2:%x\n", __FUNCTION__, SPORT_GET_RCR1(up), SPORT_GET_RCR2(up)); | ||
| 160 | |||
| 161 | tclkdiv = sclk/(2 * baud_rate) - 1; | ||
| 162 | tfsdiv = 12; | ||
| 163 | rclkdiv = sclk/(2 * baud_rate * 3) - 1; | ||
| 164 | SPORT_PUT_TCLKDIV(up, tclkdiv); | ||
| 165 | SPORT_PUT_TFSDIV(up, tfsdiv); | ||
| 166 | SPORT_PUT_RCLKDIV(up, rclkdiv); | ||
| 167 | SSYNC(); | ||
| 168 | pr_debug("%s sclk:%d, baud_rate:%d, tclkdiv:%d, tfsdiv:%d, rclkdiv:%d\n", | ||
| 169 | __FUNCTION__, sclk, baud_rate, tclkdiv, tfsdiv, rclkdiv); | ||
| 170 | |||
| 171 | return 0; | ||
| 172 | } | ||
| 173 | |||
| 174 | static irqreturn_t sport_uart_rx_irq(int irq, void *dev_id) | ||
| 175 | { | ||
| 176 | struct sport_uart_port *up = dev_id; | ||
| 177 | struct tty_struct *tty = up->port.info->tty; | ||
| 178 | unsigned int ch; | ||
| 179 | |||
| 180 | do { | ||
| 181 | ch = rx_one_byte(up); | ||
| 182 | up->port.icount.rx++; | ||
| 183 | |||
| 184 | if (uart_handle_sysrq_char(&up->port, ch)) | ||
| 185 | ; | ||
| 186 | else | ||
| 187 | tty_insert_flip_char(tty, ch, TTY_NORMAL); | ||
| 188 | } while (SPORT_GET_STAT(up) & RXNE); | ||
| 189 | tty_flip_buffer_push(tty); | ||
| 190 | |||
| 191 | return IRQ_HANDLED; | ||
| 192 | } | ||
| 193 | |||
| 194 | static irqreturn_t sport_uart_tx_irq(int irq, void *dev_id) | ||
| 195 | { | ||
| 196 | sport_uart_tx_chars(dev_id); | ||
| 197 | |||
| 198 | return IRQ_HANDLED; | ||
| 199 | } | ||
| 200 | |||
| 201 | static irqreturn_t sport_uart_err_irq(int irq, void *dev_id) | ||
| 202 | { | ||
| 203 | struct sport_uart_port *up = dev_id; | ||
| 204 | struct tty_struct *tty = up->port.info->tty; | ||
| 205 | unsigned int stat = SPORT_GET_STAT(up); | ||
| 206 | |||
| 207 | /* Overflow in RX FIFO */ | ||
| 208 | if (stat & ROVF) { | ||
| 209 | up->port.icount.overrun++; | ||
| 210 | tty_insert_flip_char(tty, 0, TTY_OVERRUN); | ||
| 211 | SPORT_PUT_STAT(up, ROVF); /* Clear ROVF bit */ | ||
| 212 | } | ||
| 213 | /* These should not happen */ | ||
| 214 | if (stat & (TOVF | TUVF | RUVF)) { | ||
| 215 | printk(KERN_ERR "SPORT Error:%s %s %s\n", | ||
| 216 | (stat & TOVF)?"TX overflow":"", | ||
| 217 | (stat & TUVF)?"TX underflow":"", | ||
| 218 | (stat & RUVF)?"RX underflow":""); | ||
| 219 | SPORT_PUT_TCR1(up, SPORT_GET_TCR1(up) & ~TSPEN); | ||
| 220 | SPORT_PUT_RCR1(up, SPORT_GET_RCR1(up) & ~RSPEN); | ||
| 221 | } | ||
| 222 | SSYNC(); | ||
| 223 | |||
| 224 | return IRQ_HANDLED; | ||
| 225 | } | ||
| 226 | |||
| 227 | /* Reqeust IRQ, Setup clock */ | ||
| 228 | static int sport_startup(struct uart_port *port) | ||
| 229 | { | ||
| 230 | struct sport_uart_port *up = (struct sport_uart_port *)port; | ||
| 231 | char buffer[20]; | ||
| 232 | int retval; | ||
| 233 | |||
| 234 | pr_debug("%s enter\n", __FUNCTION__); | ||
| 235 | memset(buffer, 20, '\0'); | ||
| 236 | snprintf(buffer, 20, "%s rx", up->name); | ||
| 237 | retval = request_irq(up->rx_irq, sport_uart_rx_irq, IRQF_SAMPLE_RANDOM, buffer, up); | ||
| 238 | if (retval) { | ||
| 239 | printk(KERN_ERR "Unable to request interrupt %s\n", buffer); | ||
| 240 | return retval; | ||
| 241 | } | ||
| 242 | |||
| 243 | snprintf(buffer, 20, "%s tx", up->name); | ||
| 244 | retval = request_irq(up->tx_irq, sport_uart_tx_irq, IRQF_SAMPLE_RANDOM, buffer, up); | ||
| 245 | if (retval) { | ||
| 246 | printk(KERN_ERR "Unable to request interrupt %s\n", buffer); | ||
| 247 | goto fail1; | ||
| 248 | } | ||
| 249 | |||
| 250 | snprintf(buffer, 20, "%s err", up->name); | ||
| 251 | retval = request_irq(up->err_irq, sport_uart_err_irq, IRQF_SAMPLE_RANDOM, buffer, up); | ||
| 252 | if (retval) { | ||
| 253 | printk(KERN_ERR "Unable to request interrupt %s\n", buffer); | ||
| 254 | goto fail2; | ||
| 255 | } | ||
| 256 | |||
| 257 | if (port->line) { | ||
| 258 | if (peripheral_request_list(bfin_uart_pin_req_sport1, DRV_NAME)) | ||
| 259 | goto fail3; | ||
| 260 | } else { | ||
| 261 | if (peripheral_request_list(bfin_uart_pin_req_sport0, DRV_NAME)) | ||
| 262 | goto fail3; | ||
| 263 | } | ||
| 264 | |||
| 265 | sport_uart_setup(up, get_sclk(), port->uartclk); | ||
| 266 | |||
| 267 | /* Enable receive interrupt */ | ||
| 268 | SPORT_PUT_RCR1(up, (SPORT_GET_RCR1(up) | RSPEN)); | ||
| 269 | SSYNC(); | ||
| 270 | |||
| 271 | return 0; | ||
| 272 | |||
| 273 | |||
| 274 | fail3: | ||
| 275 | printk(KERN_ERR DRV_NAME | ||
| 276 | ": Requesting Peripherals failed\n"); | ||
| 277 | |||
| 278 | free_irq(up->err_irq, up); | ||
| 279 | fail2: | ||
| 280 | free_irq(up->tx_irq, up); | ||
| 281 | fail1: | ||
| 282 | free_irq(up->rx_irq, up); | ||
| 283 | |||
| 284 | return retval; | ||
| 285 | |||
| 286 | } | ||
| 287 | |||
| 288 | static void sport_uart_tx_chars(struct sport_uart_port *up) | ||
| 289 | { | ||
| 290 | struct circ_buf *xmit = &up->port.info->xmit; | ||
| 291 | |||
| 292 | if (SPORT_GET_STAT(up) & TXF) | ||
| 293 | return; | ||
| 294 | |||
| 295 | if (up->port.x_char) { | ||
| 296 | tx_one_byte(up, up->port.x_char); | ||
| 297 | up->port.icount.tx++; | ||
| 298 | up->port.x_char = 0; | ||
| 299 | return; | ||
| 300 | } | ||
| 301 | |||
| 302 | if (uart_circ_empty(xmit) || uart_tx_stopped(&up->port)) { | ||
| 303 | sport_stop_tx(&up->port); | ||
| 304 | return; | ||
| 305 | } | ||
| 306 | |||
| 307 | while(!(SPORT_GET_STAT(up) & TXF) && !uart_circ_empty(xmit)) { | ||
| 308 | tx_one_byte(up, xmit->buf[xmit->tail]); | ||
| 309 | xmit->tail = (xmit->tail + 1) & (UART_XMIT_SIZE -1); | ||
| 310 | up->port.icount.tx++; | ||
| 311 | } | ||
| 312 | |||
| 313 | if (uart_circ_chars_pending(xmit) < WAKEUP_CHARS) | ||
| 314 | uart_write_wakeup(&up->port); | ||
| 315 | } | ||
| 316 | |||
| 317 | static unsigned int sport_tx_empty(struct uart_port *port) | ||
| 318 | { | ||
| 319 | struct sport_uart_port *up = (struct sport_uart_port *)port; | ||
| 320 | unsigned int stat; | ||
| 321 | |||
| 322 | stat = SPORT_GET_STAT(up); | ||
| 323 | pr_debug("%s stat:%04x\n", __FUNCTION__, stat); | ||
| 324 | if (stat & TXHRE) { | ||
| 325 | return TIOCSER_TEMT; | ||
| 326 | } else | ||
| 327 | return 0; | ||
| 328 | } | ||
| 329 | |||
| 330 | static unsigned int sport_get_mctrl(struct uart_port *port) | ||
| 331 | { | ||
| 332 | pr_debug("%s enter\n", __FUNCTION__); | ||
| 333 | return (TIOCM_CTS | TIOCM_CD | TIOCM_DSR); | ||
| 334 | } | ||
| 335 | |||
| 336 | static void sport_set_mctrl(struct uart_port *port, unsigned int mctrl) | ||
| 337 | { | ||
| 338 | pr_debug("%s enter\n", __FUNCTION__); | ||
| 339 | } | ||
| 340 | |||
| 341 | static void sport_stop_tx(struct uart_port *port) | ||
| 342 | { | ||
| 343 | struct sport_uart_port *up = (struct sport_uart_port *)port; | ||
| 344 | unsigned int stat; | ||
| 345 | |||
| 346 | pr_debug("%s enter\n", __FUNCTION__); | ||
| 347 | |||
| 348 | stat = SPORT_GET_STAT(up); | ||
| 349 | while(!(stat & TXHRE)) { | ||
| 350 | udelay(1); | ||
| 351 | stat = SPORT_GET_STAT(up); | ||
| 352 | } | ||
| 353 | /* Although the hold register is empty, last byte is still in shift | ||
| 354 | * register and not sent out yet. If baud rate is lower than default, | ||
| 355 | * delay should be longer. For example, if the baud rate is 9600, | ||
| 356 | * the delay must be at least 2ms by experience */ | ||
| 357 | udelay(500); | ||
| 358 | |||
| 359 | SPORT_PUT_TCR1(up, (SPORT_GET_TCR1(up) & ~TSPEN)); | ||
| 360 | SSYNC(); | ||
| 361 | |||
| 362 | return; | ||
| 363 | } | ||
| 364 | |||
| 365 | static void sport_start_tx(struct uart_port *port) | ||
| 366 | { | ||
| 367 | struct sport_uart_port *up = (struct sport_uart_port *)port; | ||
| 368 | |||
| 369 | pr_debug("%s enter\n", __FUNCTION__); | ||
| 370 | /* Write data into SPORT FIFO before enable SPROT to transmit */ | ||
| 371 | sport_uart_tx_chars(up); | ||
| 372 | |||
| 373 | /* Enable transmit, then an interrupt will generated */ | ||
| 374 | SPORT_PUT_TCR1(up, (SPORT_GET_TCR1(up) | TSPEN)); | ||
| 375 | SSYNC(); | ||
| 376 | pr_debug("%s exit\n", __FUNCTION__); | ||
| 377 | } | ||
| 378 | |||
| 379 | static void sport_stop_rx(struct uart_port *port) | ||
| 380 | { | ||
| 381 | struct sport_uart_port *up = (struct sport_uart_port *)port; | ||
| 382 | |||
| 383 | pr_debug("%s enter\n", __FUNCTION__); | ||
| 384 | /* Disable sport to stop rx */ | ||
| 385 | SPORT_PUT_RCR1(up, (SPORT_GET_RCR1(up) & ~RSPEN)); | ||
| 386 | SSYNC(); | ||
| 387 | } | ||
| 388 | |||
| 389 | static void sport_enable_ms(struct uart_port *port) | ||
| 390 | { | ||
| 391 | pr_debug("%s enter\n", __FUNCTION__); | ||
| 392 | } | ||
| 393 | |||
| 394 | static void sport_break_ctl(struct uart_port *port, int break_state) | ||
| 395 | { | ||
| 396 | pr_debug("%s enter\n", __FUNCTION__); | ||
| 397 | } | ||
| 398 | |||
| 399 | static void sport_shutdown(struct uart_port *port) | ||
| 400 | { | ||
| 401 | struct sport_uart_port *up = (struct sport_uart_port *)port; | ||
| 402 | |||
| 403 | pr_debug("%s enter\n", __FUNCTION__); | ||
| 404 | |||
| 405 | /* Disable sport */ | ||
| 406 | SPORT_PUT_TCR1(up, (SPORT_GET_TCR1(up) & ~TSPEN)); | ||
| 407 | SPORT_PUT_RCR1(up, (SPORT_GET_RCR1(up) & ~RSPEN)); | ||
| 408 | SSYNC(); | ||
| 409 | |||
| 410 | if (port->line) { | ||
| 411 | peripheral_free_list(bfin_uart_pin_req_sport1); | ||
| 412 | } else { | ||
| 413 | peripheral_free_list(bfin_uart_pin_req_sport0); | ||
| 414 | } | ||
| 415 | |||
| 416 | free_irq(up->rx_irq, up); | ||
| 417 | free_irq(up->tx_irq, up); | ||
| 418 | free_irq(up->err_irq, up); | ||
| 419 | } | ||
| 420 | |||
| 421 | static void sport_set_termios(struct uart_port *port, | ||
| 422 | struct termios *termios, struct termios *old) | ||
| 423 | { | ||
| 424 | pr_debug("%s enter, c_cflag:%08x\n", __FUNCTION__, termios->c_cflag); | ||
| 425 | uart_update_timeout(port, CS8 ,port->uartclk); | ||
| 426 | } | ||
| 427 | |||
| 428 | static const char *sport_type(struct uart_port *port) | ||
| 429 | { | ||
| 430 | struct sport_uart_port *up = (struct sport_uart_port *)port; | ||
| 431 | |||
| 432 | pr_debug("%s enter\n", __FUNCTION__); | ||
| 433 | return up->name; | ||
| 434 | } | ||
| 435 | |||
| 436 | static void sport_release_port(struct uart_port *port) | ||
| 437 | { | ||
| 438 | pr_debug("%s enter\n", __FUNCTION__); | ||
| 439 | } | ||
| 440 | |||
| 441 | static int sport_request_port(struct uart_port *port) | ||
| 442 | { | ||
| 443 | pr_debug("%s enter\n", __FUNCTION__); | ||
| 444 | return 0; | ||
| 445 | } | ||
| 446 | |||
| 447 | static void sport_config_port(struct uart_port *port, int flags) | ||
| 448 | { | ||
| 449 | struct sport_uart_port *up = (struct sport_uart_port *)port; | ||
| 450 | |||
| 451 | pr_debug("%s enter\n", __FUNCTION__); | ||
| 452 | up->port.type = PORT_BFIN_SPORT; | ||
| 453 | } | ||
| 454 | |||
| 455 | static int sport_verify_port(struct uart_port *port, struct serial_struct *ser) | ||
| 456 | { | ||
| 457 | pr_debug("%s enter\n", __FUNCTION__); | ||
| 458 | return 0; | ||
| 459 | } | ||
| 460 | |||
| 461 | struct uart_ops sport_uart_ops = { | ||
| 462 | .tx_empty = sport_tx_empty, | ||
| 463 | .set_mctrl = sport_set_mctrl, | ||
| 464 | .get_mctrl = sport_get_mctrl, | ||
| 465 | .stop_tx = sport_stop_tx, | ||
| 466 | .start_tx = sport_start_tx, | ||
| 467 | .stop_rx = sport_stop_rx, | ||
| 468 | .enable_ms = sport_enable_ms, | ||
| 469 | .break_ctl = sport_break_ctl, | ||
| 470 | .startup = sport_startup, | ||
| 471 | .shutdown = sport_shutdown, | ||
| 472 | .set_termios = sport_set_termios, | ||
| 473 | .type = sport_type, | ||
| 474 | .release_port = sport_release_port, | ||
| 475 | .request_port = sport_request_port, | ||
| 476 | .config_port = sport_config_port, | ||
| 477 | .verify_port = sport_verify_port, | ||
| 478 | }; | ||
| 479 | |||
| 480 | static struct sport_uart_port sport_uart_ports[] = { | ||
| 481 | { /* SPORT 0 */ | ||
| 482 | .name = "SPORT0", | ||
| 483 | .tx_irq = IRQ_SPORT0_TX, | ||
| 484 | .rx_irq = IRQ_SPORT0_RX, | ||
| 485 | .err_irq= IRQ_SPORT0_ERROR, | ||
| 486 | .port = { | ||
| 487 | .type = PORT_BFIN_SPORT, | ||
| 488 | .iotype = UPIO_MEM, | ||
| 489 | .membase = (void __iomem *)SPORT0_TCR1, | ||
| 490 | .mapbase = SPORT0_TCR1, | ||
| 491 | .irq = IRQ_SPORT0_RX, | ||
| 492 | .uartclk = CONFIG_SPORT_BAUD_RATE, | ||
| 493 | .fifosize = 8, | ||
| 494 | .ops = &sport_uart_ops, | ||
| 495 | .line = 0, | ||
| 496 | }, | ||
| 497 | }, { /* SPORT 1 */ | ||
| 498 | .name = "SPORT1", | ||
| 499 | .tx_irq = IRQ_SPORT1_TX, | ||
| 500 | .rx_irq = IRQ_SPORT1_RX, | ||
| 501 | .err_irq= IRQ_SPORT1_ERROR, | ||
| 502 | .port = { | ||
| 503 | .type = PORT_BFIN_SPORT, | ||
| 504 | .iotype = UPIO_MEM, | ||
| 505 | .membase = (void __iomem *)SPORT1_TCR1, | ||
| 506 | .mapbase = SPORT1_TCR1, | ||
| 507 | .irq = IRQ_SPORT1_RX, | ||
| 508 | .uartclk = CONFIG_SPORT_BAUD_RATE, | ||
| 509 | .fifosize = 8, | ||
| 510 | .ops = &sport_uart_ops, | ||
| 511 | .line = 1, | ||
| 512 | }, | ||
| 513 | } | ||
| 514 | }; | ||
| 515 | |||
| 516 | static struct uart_driver sport_uart_reg = { | ||
| 517 | .owner = THIS_MODULE, | ||
| 518 | .driver_name = "SPORT-UART", | ||
| 519 | .dev_name = "ttySS", | ||
| 520 | .major = 204, | ||
| 521 | .minor = 84, | ||
| 522 | .nr = ARRAY_SIZE(sport_uart_ports), | ||
| 523 | .cons = NULL, | ||
| 524 | }; | ||
| 525 | |||
| 526 | static int sport_uart_suspend(struct platform_device *dev, pm_message_t state) | ||
| 527 | { | ||
| 528 | struct sport_uart_port *sport = platform_get_drvdata(dev); | ||
| 529 | |||
| 530 | pr_debug("%s enter\n", __FUNCTION__); | ||
| 531 | if (sport) | ||
| 532 | uart_suspend_port(&sport_uart_reg, &sport->port); | ||
| 533 | |||
| 534 | return 0; | ||
| 535 | } | ||
| 536 | |||
| 537 | static int sport_uart_resume(struct platform_device *dev) | ||
| 538 | { | ||
| 539 | struct sport_uart_port *sport = platform_get_drvdata(dev); | ||
| 540 | |||
| 541 | pr_debug("%s enter\n", __FUNCTION__); | ||
| 542 | if (sport) | ||
| 543 | uart_resume_port(&sport_uart_reg, &sport->port); | ||
| 544 | |||
| 545 | return 0; | ||
| 546 | } | ||
| 547 | |||
| 548 | static int sport_uart_probe(struct platform_device *dev) | ||
| 549 | { | ||
| 550 | pr_debug("%s enter\n", __FUNCTION__); | ||
| 551 | sport_uart_ports[dev->id].port.dev = &dev->dev; | ||
| 552 | uart_add_one_port(&sport_uart_reg, &sport_uart_ports[dev->id].port); | ||
| 553 | platform_set_drvdata(dev, &sport_uart_ports[dev->id]); | ||
| 554 | |||
| 555 | return 0; | ||
| 556 | } | ||
| 557 | |||
| 558 | static int sport_uart_remove(struct platform_device *dev) | ||
| 559 | { | ||
| 560 | struct sport_uart_port *sport = platform_get_drvdata(dev); | ||
| 561 | |||
| 562 | pr_debug("%s enter\n", __FUNCTION__); | ||
| 563 | platform_set_drvdata(dev, NULL); | ||
| 564 | |||
| 565 | if (sport) | ||
| 566 | uart_remove_one_port(&sport_uart_reg, &sport->port); | ||
| 567 | |||
| 568 | return 0; | ||
| 569 | } | ||
| 570 | |||
| 571 | static struct platform_driver sport_uart_driver = { | ||
| 572 | .probe = sport_uart_probe, | ||
| 573 | .remove = sport_uart_remove, | ||
| 574 | .suspend = sport_uart_suspend, | ||
| 575 | .resume = sport_uart_resume, | ||
| 576 | .driver = { | ||
| 577 | .name = DRV_NAME, | ||
| 578 | }, | ||
| 579 | }; | ||
| 580 | |||
| 581 | static int __init sport_uart_init(void) | ||
| 582 | { | ||
| 583 | int ret; | ||
| 584 | |||
| 585 | pr_debug("%s enter\n", __FUNCTION__); | ||
| 586 | ret = uart_register_driver(&sport_uart_reg); | ||
| 587 | if (ret != 0) { | ||
| 588 | printk(KERN_ERR "Failed to register %s:%d\n", | ||
| 589 | sport_uart_reg.driver_name, ret); | ||
| 590 | return ret; | ||
| 591 | } | ||
| 592 | |||
| 593 | ret = platform_driver_register(&sport_uart_driver); | ||
| 594 | if (ret != 0) { | ||
| 595 | printk(KERN_ERR "Failed to register sport uart driver:%d\n", ret); | ||
| 596 | uart_unregister_driver(&sport_uart_reg); | ||
| 597 | } | ||
| 598 | |||
| 599 | |||
| 600 | pr_debug("%s exit\n", __FUNCTION__); | ||
| 601 | return ret; | ||
| 602 | } | ||
| 603 | |||
| 604 | static void __exit sport_uart_exit(void) | ||
| 605 | { | ||
| 606 | pr_debug("%s enter\n", __FUNCTION__); | ||
| 607 | platform_driver_unregister(&sport_uart_driver); | ||
| 608 | uart_unregister_driver(&sport_uart_reg); | ||
| 609 | } | ||
| 610 | |||
| 611 | module_init(sport_uart_init); | ||
| 612 | module_exit(sport_uart_exit); | ||
| 613 | |||
| 614 | MODULE_LICENSE("GPL"); | ||
diff --git a/drivers/serial/bfin_sport_uart.h b/drivers/serial/bfin_sport_uart.h new file mode 100644 index 000000000000..671d41cc1a3f --- /dev/null +++ b/drivers/serial/bfin_sport_uart.h | |||
| @@ -0,0 +1,63 @@ | |||
| 1 | /* | ||
| 2 | * File: linux/drivers/serial/bfin_sport_uart.h | ||
| 3 | * | ||
| 4 | * Based on: include/asm-blackfin/mach-533/bfin_serial_5xx.h | ||
| 5 | * Author: Roy Huang <roy.huang>analog.com> | ||
| 6 | * | ||
| 7 | * Created: Nov 22, 2006 | ||
| 8 | * Copyright: (C) Analog Device Inc. | ||
| 9 | * Description: this driver enable SPORTs on Blackfin emulate UART. | ||
| 10 | * | ||
| 11 | * This program is free software; you can redistribute it and/or modify | ||
| 12 | * it under the terms of the GNU General Public License as published by | ||
| 13 | * the Free Software Foundation; either version 2 of the License, or | ||
| 14 | * (at your option) any later version. | ||
| 15 | * | ||
| 16 | * This program is distributed in the hope that it will be useful, | ||
| 17 | * but WITHOUT ANY WARRANTY; without even the implied warranty of | ||
| 18 | * MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE. See the | ||
| 19 | * GNU General Public License for more details. | ||
| 20 | * | ||
| 21 | * You should have received a copy of the GNU General Public License | ||
| 22 | * along with this program; if not, see the file COPYING, or write | ||
| 23 | * to the Free Software Foundation, Inc., | ||
| 24 | * 51 Franklin St, Fifth Floor, Boston, MA 02110-1301 USA | ||
| 25 | */ | ||
| 26 | |||
| 27 | |||
| 28 | #define OFFSET_TCR1 0x00 /* Transmit Configuration 1 Register */ | ||
| 29 | #define OFFSET_TCR2 0x04 /* Transmit Configuration 2 Register */ | ||
| 30 | #define OFFSET_TCLKDIV 0x08 /* Transmit Serial Clock Divider Register */ | ||
| 31 | #define OFFSET_TFSDIV 0x0C /* Transmit Frame Sync Divider Register */ | ||
| 32 | #define OFFSET_TX 0x10 /* Transmit Data Register */ | ||
| 33 | #define OFFSET_RX 0x18 /* Receive Data Register */ | ||
| 34 | #define OFFSET_RCR1 0x20 /* Receive Configuration 1 Register */ | ||
| 35 | #define OFFSET_RCR2 0x24 /* Receive Configuration 2 Register */ | ||
| 36 | #define OFFSET_RCLKDIV 0x28 /* Receive Serial Clock Divider Register */ | ||
| 37 | #define OFFSET_RFSDIV 0x2c /* Receive Frame Sync Divider Register */ | ||
| 38 | #define OFFSET_STAT 0x30 /* Status Register */ | ||
| 39 | |||
| 40 | #define SPORT_GET_TCR1(sport) bfin_read16(((sport)->port.membase + OFFSET_TCR1)) | ||
| 41 | #define SPORT_GET_TCR2(sport) bfin_read16(((sport)->port.membase + OFFSET_TCR2)) | ||
| 42 | #define SPORT_GET_TCLKDIV(sport) bfin_read16(((sport)->port.membase + OFFSET_TCLKDIV)) | ||
| 43 | #define SPORT_GET_TFSDIV(sport) bfin_read16(((sport)->port.membase + OFFSET_TFSDIV)) | ||
| 44 | #define SPORT_GET_TX(sport) bfin_read16(((sport)->port.membase + OFFSET_TX)) | ||
| 45 | #define SPORT_GET_RX(sport) bfin_read16(((sport)->port.membase + OFFSET_RX)) | ||
| 46 | #define SPORT_GET_RX32(sport) bfin_read32(((sport)->port.membase + OFFSET_RX)) | ||
| 47 | #define SPORT_GET_RCR1(sport) bfin_read16(((sport)->port.membase + OFFSET_RCR1)) | ||
| 48 | #define SPORT_GET_RCR2(sport) bfin_read16(((sport)->port.membase + OFFSET_RCR2)) | ||
| 49 | #define SPORT_GET_RCLKDIV(sport) bfin_read16(((sport)->port.membase + OFFSET_RCLKDIV)) | ||
| 50 | #define SPORT_GET_RFSDIV(sport) bfin_read16(((sport)->port.membase + OFFSET_RFSDIV)) | ||
| 51 | #define SPORT_GET_STAT(sport) bfin_read16(((sport)->port.membase + OFFSET_STAT)) | ||
| 52 | |||
| 53 | #define SPORT_PUT_TCR1(sport, v) bfin_write16(((sport)->port.membase + OFFSET_TCR1), v) | ||
| 54 | #define SPORT_PUT_TCR2(sport, v) bfin_write16(((sport)->port.membase + OFFSET_TCR2), v) | ||
| 55 | #define SPORT_PUT_TCLKDIV(sport, v) bfin_write16(((sport)->port.membase + OFFSET_TCLKDIV), v) | ||
| 56 | #define SPORT_PUT_TFSDIV(sport, v) bfin_write16(((sport)->port.membase + OFFSET_TFSDIV), v) | ||
| 57 | #define SPORT_PUT_TX(sport, v) bfin_write16(((sport)->port.membase + OFFSET_TX), v) | ||
| 58 | #define SPORT_PUT_RX(sport, v) bfin_write16(((sport)->port.membase + OFFSET_RX), v) | ||
| 59 | #define SPORT_PUT_RCR1(sport, v) bfin_write16(((sport)->port.membase + OFFSET_RCR1), v) | ||
| 60 | #define SPORT_PUT_RCR2(sport, v) bfin_write16(((sport)->port.membase + OFFSET_RCR2), v) | ||
| 61 | #define SPORT_PUT_RCLKDIV(sport, v) bfin_write16(((sport)->port.membase + OFFSET_RCLKDIV), v) | ||
| 62 | #define SPORT_PUT_RFSDIV(sport, v) bfin_write16(((sport)->port.membase + OFFSET_RFSDIV), v) | ||
| 63 | #define SPORT_PUT_STAT(sport, v) bfin_write16(((sport)->port.membase + OFFSET_STAT), v) | ||
diff --git a/drivers/serial/cpm_uart/cpm_uart_core.c b/drivers/serial/cpm_uart/cpm_uart_core.c index a638ba0679ac..a19dc7ef8861 100644 --- a/drivers/serial/cpm_uart/cpm_uart_core.c +++ b/drivers/serial/cpm_uart/cpm_uart_core.c | |||
| @@ -1117,7 +1117,7 @@ int cpm_uart_drv_get_platform_data(struct platform_device *pdev, int is_con) | |||
| 1117 | 1117 | ||
| 1118 | line = cpm_uart_id2nr(idx); | 1118 | line = cpm_uart_id2nr(idx); |
| 1119 | if(line < 0) { | 1119 | if(line < 0) { |
| 1120 | printk(KERN_ERR"%s(): port %d is not registered", __FUNCTION__, idx); | 1120 | printk(KERN_ERR"%s(): port %d is not registered", __func__, idx); |
| 1121 | return -EINVAL; | 1121 | return -EINVAL; |
| 1122 | } | 1122 | } |
| 1123 | 1123 | ||
diff --git a/drivers/serial/crisv10.c b/drivers/serial/crisv10.c index 88e7c1d5b919..f9fa237aa949 100644 --- a/drivers/serial/crisv10.c +++ b/drivers/serial/crisv10.c | |||
| @@ -1788,7 +1788,7 @@ static unsigned int handle_descr_data(struct e100_serial *info, | |||
| 1788 | 1788 | ||
| 1789 | if (info->recv_cnt + recvl > 65536) { | 1789 | if (info->recv_cnt + recvl > 65536) { |
| 1790 | printk(KERN_CRIT | 1790 | printk(KERN_CRIT |
| 1791 | "%s: Too much pending incoming serial data! Dropping %u bytes.\n", __FUNCTION__, recvl); | 1791 | "%s: Too much pending incoming serial data! Dropping %u bytes.\n", __func__, recvl); |
| 1792 | return 0; | 1792 | return 0; |
| 1793 | } | 1793 | } |
| 1794 | 1794 | ||
| @@ -1801,7 +1801,7 @@ static unsigned int handle_descr_data(struct e100_serial *info, | |||
| 1801 | append_recv_buffer(info, buffer); | 1801 | append_recv_buffer(info, buffer); |
| 1802 | 1802 | ||
| 1803 | if (!(buffer = alloc_recv_buffer(SERIAL_DESCR_BUF_SIZE))) | 1803 | if (!(buffer = alloc_recv_buffer(SERIAL_DESCR_BUF_SIZE))) |
| 1804 | panic("%s: Failed to allocate memory for receive buffer!\n", __FUNCTION__); | 1804 | panic("%s: Failed to allocate memory for receive buffer!\n", __func__); |
| 1805 | 1805 | ||
| 1806 | descr->buf = virt_to_phys(buffer->buffer); | 1806 | descr->buf = virt_to_phys(buffer->buffer); |
| 1807 | 1807 | ||
| @@ -1925,7 +1925,7 @@ static int start_recv_dma(struct e100_serial *info) | |||
| 1925 | /* Set up the receiving descriptors */ | 1925 | /* Set up the receiving descriptors */ |
| 1926 | for (i = 0; i < SERIAL_RECV_DESCRIPTORS; i++) { | 1926 | for (i = 0; i < SERIAL_RECV_DESCRIPTORS; i++) { |
| 1927 | if (!(buffer = alloc_recv_buffer(SERIAL_DESCR_BUF_SIZE))) | 1927 | if (!(buffer = alloc_recv_buffer(SERIAL_DESCR_BUF_SIZE))) |
| 1928 | panic("%s: Failed to allocate memory for receive buffer!\n", __FUNCTION__); | 1928 | panic("%s: Failed to allocate memory for receive buffer!\n", __func__); |
| 1929 | 1929 | ||
| 1930 | descr[i].ctrl = d_int; | 1930 | descr[i].ctrl = d_int; |
| 1931 | descr[i].buf = virt_to_phys(buffer->buffer); | 1931 | descr[i].buf = virt_to_phys(buffer->buffer); |
| @@ -3581,8 +3581,9 @@ rs_tiocmset(struct tty_struct *tty, struct file *file, | |||
| 3581 | unsigned int set, unsigned int clear) | 3581 | unsigned int set, unsigned int clear) |
| 3582 | { | 3582 | { |
| 3583 | struct e100_serial *info = (struct e100_serial *)tty->driver_data; | 3583 | struct e100_serial *info = (struct e100_serial *)tty->driver_data; |
| 3584 | unsigned long flags; | ||
| 3584 | 3585 | ||
| 3585 | lock_kernel(); | 3586 | local_irq_save(flags); |
| 3586 | 3587 | ||
| 3587 | if (clear & TIOCM_RTS) | 3588 | if (clear & TIOCM_RTS) |
| 3588 | e100_rts(info, 0); | 3589 | e100_rts(info, 0); |
| @@ -3604,7 +3605,7 @@ rs_tiocmset(struct tty_struct *tty, struct file *file, | |||
| 3604 | if (set & TIOCM_CD) | 3605 | if (set & TIOCM_CD) |
| 3605 | e100_cd_out(info, 1); | 3606 | e100_cd_out(info, 1); |
| 3606 | 3607 | ||
| 3607 | unlock_kernel(); | 3608 | local_irq_restore(flags); |
| 3608 | return 0; | 3609 | return 0; |
| 3609 | } | 3610 | } |
| 3610 | 3611 | ||
| @@ -3613,8 +3614,10 @@ rs_tiocmget(struct tty_struct *tty, struct file *file) | |||
| 3613 | { | 3614 | { |
| 3614 | struct e100_serial *info = (struct e100_serial *)tty->driver_data; | 3615 | struct e100_serial *info = (struct e100_serial *)tty->driver_data; |
| 3615 | unsigned int result; | 3616 | unsigned int result; |
| 3617 | unsigned long flags; | ||
| 3618 | |||
| 3619 | local_irq_save(flags); | ||
| 3616 | 3620 | ||
| 3617 | lock_kernel(); | ||
| 3618 | result = | 3621 | result = |
| 3619 | (!E100_RTS_GET(info) ? TIOCM_RTS : 0) | 3622 | (!E100_RTS_GET(info) ? TIOCM_RTS : 0) |
| 3620 | | (!E100_DTR_GET(info) ? TIOCM_DTR : 0) | 3623 | | (!E100_DTR_GET(info) ? TIOCM_DTR : 0) |
| @@ -3623,7 +3626,7 @@ rs_tiocmget(struct tty_struct *tty, struct file *file) | |||
| 3623 | | (!E100_CD_GET(info) ? TIOCM_CAR : 0) | 3626 | | (!E100_CD_GET(info) ? TIOCM_CAR : 0) |
| 3624 | | (!E100_CTS_GET(info) ? TIOCM_CTS : 0); | 3627 | | (!E100_CTS_GET(info) ? TIOCM_CTS : 0); |
| 3625 | 3628 | ||
| 3626 | unlock_kernel(); | 3629 | local_irq_restore(flags); |
| 3627 | 3630 | ||
| 3628 | #ifdef SERIAL_DEBUG_IO | 3631 | #ifdef SERIAL_DEBUG_IO |
| 3629 | printk(KERN_DEBUG "ser%i: modem state: %i 0x%08X\n", | 3632 | printk(KERN_DEBUG "ser%i: modem state: %i 0x%08X\n", |
| @@ -3702,10 +3705,6 @@ rs_set_termios(struct tty_struct *tty, struct ktermios *old_termios) | |||
| 3702 | { | 3705 | { |
| 3703 | struct e100_serial *info = (struct e100_serial *)tty->driver_data; | 3706 | struct e100_serial *info = (struct e100_serial *)tty->driver_data; |
| 3704 | 3707 | ||
| 3705 | if (tty->termios->c_cflag == old_termios->c_cflag && | ||
| 3706 | tty->termios->c_iflag == old_termios->c_iflag) | ||
| 3707 | return; | ||
| 3708 | |||
| 3709 | change_speed(info); | 3708 | change_speed(info); |
| 3710 | 3709 | ||
| 3711 | /* Handle turning off CRTSCTS */ | 3710 | /* Handle turning off CRTSCTS */ |
| @@ -3808,10 +3807,8 @@ rs_close(struct tty_struct *tty, struct file * filp) | |||
| 3808 | #endif | 3807 | #endif |
| 3809 | 3808 | ||
| 3810 | shutdown(info); | 3809 | shutdown(info); |
| 3811 | if (tty->driver->flush_buffer) | 3810 | rs_flush_buffer(tty); |
| 3812 | tty->driver->flush_buffer(tty); | 3811 | tty_ldisc_flush_buffer(tty); |
| 3813 | if (tty->ldisc.flush_buffer) | ||
| 3814 | tty->ldisc.flush_buffer(tty); | ||
| 3815 | tty->closing = 0; | 3812 | tty->closing = 0; |
| 3816 | info->event = 0; | 3813 | info->event = 0; |
| 3817 | info->tty = 0; | 3814 | info->tty = 0; |
| @@ -3885,6 +3882,7 @@ static void rs_wait_until_sent(struct tty_struct *tty, int timeout) | |||
| 3885 | * Check R_DMA_CHx_STATUS bit 0-6=number of available bytes in FIFO | 3882 | * Check R_DMA_CHx_STATUS bit 0-6=number of available bytes in FIFO |
| 3886 | * R_DMA_CHx_HWSW bit 31-16=nbr of bytes left in DMA buffer (0=64k) | 3883 | * R_DMA_CHx_HWSW bit 31-16=nbr of bytes left in DMA buffer (0=64k) |
| 3887 | */ | 3884 | */ |
| 3885 | lock_kernel(); | ||
| 3888 | orig_jiffies = jiffies; | 3886 | orig_jiffies = jiffies; |
| 3889 | while (info->xmit.head != info->xmit.tail || /* More in send queue */ | 3887 | while (info->xmit.head != info->xmit.tail || /* More in send queue */ |
| 3890 | (*info->ostatusadr & 0x007f) || /* more in FIFO */ | 3888 | (*info->ostatusadr & 0x007f) || /* more in FIFO */ |
| @@ -3901,6 +3899,7 @@ static void rs_wait_until_sent(struct tty_struct *tty, int timeout) | |||
| 3901 | curr_time_usec - info->last_tx_active_usec; | 3899 | curr_time_usec - info->last_tx_active_usec; |
| 3902 | } | 3900 | } |
| 3903 | set_current_state(TASK_RUNNING); | 3901 | set_current_state(TASK_RUNNING); |
| 3902 | unlock_kernel(); | ||
| 3904 | } | 3903 | } |
| 3905 | 3904 | ||
| 3906 | /* | 3905 | /* |
| @@ -4520,7 +4519,7 @@ rs_init(void) | |||
| 4520 | 4519 | ||
| 4521 | if (request_irq(SERIAL_IRQ_NBR, ser_interrupt, | 4520 | if (request_irq(SERIAL_IRQ_NBR, ser_interrupt, |
| 4522 | IRQF_SHARED | IRQF_DISABLED, "serial ", driver)) | 4521 | IRQF_SHARED | IRQF_DISABLED, "serial ", driver)) |
| 4523 | panic("%s: Failed to request irq8", __FUNCTION__); | 4522 | panic("%s: Failed to request irq8", __func__); |
| 4524 | 4523 | ||
| 4525 | #endif | 4524 | #endif |
| 4526 | #endif /* CONFIG_SVINTO_SIM */ | 4525 | #endif /* CONFIG_SVINTO_SIM */ |
diff --git a/drivers/serial/ioc3_serial.c b/drivers/serial/ioc3_serial.c index 168073f12cec..4f1af71e9a1b 100644 --- a/drivers/serial/ioc3_serial.c +++ b/drivers/serial/ioc3_serial.c | |||
| @@ -52,7 +52,7 @@ static unsigned int Submodule_slot; | |||
| 52 | #define DPRINT_CONFIG(_x...) ; | 52 | #define DPRINT_CONFIG(_x...) ; |
| 53 | //#define DPRINT_CONFIG(_x...) printk _x | 53 | //#define DPRINT_CONFIG(_x...) printk _x |
| 54 | #define NOT_PROGRESS() ; | 54 | #define NOT_PROGRESS() ; |
| 55 | //#define NOT_PROGRESS() printk("%s : fails %d\n", __FUNCTION__, __LINE__) | 55 | //#define NOT_PROGRESS() printk("%s : fails %d\n", __func__, __LINE__) |
| 56 | 56 | ||
| 57 | /* number of characters we want to transmit to the lower level at a time */ | 57 | /* number of characters we want to transmit to the lower level at a time */ |
| 58 | #define MAX_CHARS 256 | 58 | #define MAX_CHARS 256 |
| @@ -445,7 +445,7 @@ static int inline port_init(struct ioc3_port *port) | |||
| 445 | sbbr_h = &idd->vma->sbbr_h; | 445 | sbbr_h = &idd->vma->sbbr_h; |
| 446 | ring_pci_addr = (unsigned long __iomem)port->ip_dma_ringbuf; | 446 | ring_pci_addr = (unsigned long __iomem)port->ip_dma_ringbuf; |
| 447 | DPRINT_CONFIG(("%s: ring_pci_addr 0x%p\n", | 447 | DPRINT_CONFIG(("%s: ring_pci_addr 0x%p\n", |
| 448 | __FUNCTION__, (void *)ring_pci_addr)); | 448 | __func__, (void *)ring_pci_addr)); |
| 449 | 449 | ||
| 450 | writel((unsigned int)((uint64_t) ring_pci_addr >> 32), sbbr_h); | 450 | writel((unsigned int)((uint64_t) ring_pci_addr >> 32), sbbr_h); |
| 451 | writel((unsigned int)ring_pci_addr | BUF_SIZE_BIT, sbbr_l); | 451 | writel((unsigned int)ring_pci_addr | BUF_SIZE_BIT, sbbr_l); |
| @@ -593,7 +593,7 @@ config_port(struct ioc3_port *port, | |||
| 593 | 593 | ||
| 594 | DPRINT_CONFIG(("%s: line %d baud %d byte_size %d stop %d parenb %d " | 594 | DPRINT_CONFIG(("%s: line %d baud %d byte_size %d stop %d parenb %d " |
| 595 | "parodd %d\n", | 595 | "parodd %d\n", |
| 596 | __FUNCTION__, ((struct uart_port *)port->ip_port)->line, | 596 | __func__, ((struct uart_port *)port->ip_port)->line, |
| 597 | baud, byte_size, stop_bits, parenb, parodd)); | 597 | baud, byte_size, stop_bits, parenb, parodd)); |
| 598 | 598 | ||
| 599 | if (set_baud(port, baud)) | 599 | if (set_baud(port, baud)) |
| @@ -871,14 +871,14 @@ static int ioc3_set_proto(struct ioc3_port *port, int proto) | |||
| 871 | default: | 871 | default: |
| 872 | case PROTO_RS232: | 872 | case PROTO_RS232: |
| 873 | /* Clear the appropriate GIO pin */ | 873 | /* Clear the appropriate GIO pin */ |
| 874 | DPRINT_CONFIG(("%s: rs232\n", __FUNCTION__)); | 874 | DPRINT_CONFIG(("%s: rs232\n", __func__)); |
| 875 | writel(0, (&port->ip_idd->vma->gppr[0] | 875 | writel(0, (&port->ip_idd->vma->gppr[0] |
| 876 | + hooks->rs422_select_pin)); | 876 | + hooks->rs422_select_pin)); |
| 877 | break; | 877 | break; |
| 878 | 878 | ||
| 879 | case PROTO_RS422: | 879 | case PROTO_RS422: |
| 880 | /* Set the appropriate GIO pin */ | 880 | /* Set the appropriate GIO pin */ |
| 881 | DPRINT_CONFIG(("%s: rs422\n", __FUNCTION__)); | 881 | DPRINT_CONFIG(("%s: rs422\n", __func__)); |
| 882 | writel(1, (&port->ip_idd->vma->gppr[0] | 882 | writel(1, (&port->ip_idd->vma->gppr[0] |
| 883 | + hooks->rs422_select_pin)); | 883 | + hooks->rs422_select_pin)); |
| 884 | break; | 884 | break; |
| @@ -988,7 +988,7 @@ ioc3_change_speed(struct uart_port *the_port, | |||
| 988 | } | 988 | } |
| 989 | baud = uart_get_baud_rate(the_port, new_termios, old_termios, | 989 | baud = uart_get_baud_rate(the_port, new_termios, old_termios, |
| 990 | MIN_BAUD_SUPPORTED, MAX_BAUD_SUPPORTED); | 990 | MIN_BAUD_SUPPORTED, MAX_BAUD_SUPPORTED); |
| 991 | DPRINT_CONFIG(("%s: returned baud %d for line %d\n", __FUNCTION__, baud, | 991 | DPRINT_CONFIG(("%s: returned baud %d for line %d\n", __func__, baud, |
| 992 | the_port->line)); | 992 | the_port->line)); |
| 993 | 993 | ||
| 994 | if (!the_port->fifosize) | 994 | if (!the_port->fifosize) |
| @@ -1026,7 +1026,7 @@ ioc3_change_speed(struct uart_port *the_port, | |||
| 1026 | DPRINT_CONFIG(("%s : port 0x%p line %d cflag 0%o " | 1026 | DPRINT_CONFIG(("%s : port 0x%p line %d cflag 0%o " |
| 1027 | "config_port(baud %d data %d stop %d penable %d " | 1027 | "config_port(baud %d data %d stop %d penable %d " |
| 1028 | " parity %d), notification 0x%x\n", | 1028 | " parity %d), notification 0x%x\n", |
| 1029 | __FUNCTION__, (void *)port, the_port->line, cflag, baud, | 1029 | __func__, (void *)port, the_port->line, cflag, baud, |
| 1030 | new_data, new_stop, new_parity_enable, new_parity, | 1030 | new_data, new_stop, new_parity_enable, new_parity, |
| 1031 | the_port->ignore_status_mask)); | 1031 | the_port->ignore_status_mask)); |
| 1032 | 1032 | ||
| @@ -1919,7 +1919,7 @@ static inline int ioc3_serial_core_attach( struct ioc3_submodule *is, | |||
| 1919 | struct pci_dev *pdev = idd->pdev; | 1919 | struct pci_dev *pdev = idd->pdev; |
| 1920 | 1920 | ||
| 1921 | DPRINT_CONFIG(("%s: attach pdev 0x%p - card_ptr 0x%p\n", | 1921 | DPRINT_CONFIG(("%s: attach pdev 0x%p - card_ptr 0x%p\n", |
| 1922 | __FUNCTION__, pdev, (void *)card_ptr)); | 1922 | __func__, pdev, (void *)card_ptr)); |
| 1923 | 1923 | ||
| 1924 | if (!card_ptr) | 1924 | if (!card_ptr) |
| 1925 | return -ENODEV; | 1925 | return -ENODEV; |
| @@ -1933,7 +1933,7 @@ static inline int ioc3_serial_core_attach( struct ioc3_submodule *is, | |||
| 1933 | port->ip_port = the_port; | 1933 | port->ip_port = the_port; |
| 1934 | 1934 | ||
| 1935 | DPRINT_CONFIG(("%s: attach the_port 0x%p / port 0x%p [%d/%d]\n", | 1935 | DPRINT_CONFIG(("%s: attach the_port 0x%p / port 0x%p [%d/%d]\n", |
| 1936 | __FUNCTION__, (void *)the_port, (void *)port, | 1936 | __func__, (void *)the_port, (void *)port, |
| 1937 | phys_port, ii)); | 1937 | phys_port, ii)); |
| 1938 | 1938 | ||
| 1939 | /* membase, iobase and mapbase just need to be non-0 */ | 1939 | /* membase, iobase and mapbase just need to be non-0 */ |
| @@ -1950,7 +1950,7 @@ static inline int ioc3_serial_core_attach( struct ioc3_submodule *is, | |||
| 1950 | if (uart_add_one_port(&ioc3_uart, the_port) < 0) { | 1950 | if (uart_add_one_port(&ioc3_uart, the_port) < 0) { |
| 1951 | printk(KERN_WARNING | 1951 | printk(KERN_WARNING |
| 1952 | "%s: unable to add port %d bus %d\n", | 1952 | "%s: unable to add port %d bus %d\n", |
| 1953 | __FUNCTION__, the_port->line, pdev->bus->number); | 1953 | __func__, the_port->line, pdev->bus->number); |
| 1954 | } else { | 1954 | } else { |
| 1955 | DPRINT_CONFIG(("IOC3 serial port %d irq %d bus %d\n", | 1955 | DPRINT_CONFIG(("IOC3 serial port %d irq %d bus %d\n", |
| 1956 | the_port->line, the_port->irq, pdev->bus->number)); | 1956 | the_port->line, the_port->irq, pdev->bus->number)); |
| @@ -2017,7 +2017,7 @@ ioc3uart_probe(struct ioc3_submodule *is, struct ioc3_driver_data *idd) | |||
| 2017 | struct ioc3_port *ports[PORTS_PER_CARD]; | 2017 | struct ioc3_port *ports[PORTS_PER_CARD]; |
| 2018 | int phys_port; | 2018 | int phys_port; |
| 2019 | 2019 | ||
| 2020 | DPRINT_CONFIG(("%s (0x%p, 0x%p)\n", __FUNCTION__, is, idd)); | 2020 | DPRINT_CONFIG(("%s (0x%p, 0x%p)\n", __func__, is, idd)); |
| 2021 | 2021 | ||
| 2022 | card_ptr = kzalloc(sizeof(struct ioc3_card), GFP_KERNEL); | 2022 | card_ptr = kzalloc(sizeof(struct ioc3_card), GFP_KERNEL); |
| 2023 | if (!card_ptr) { | 2023 | if (!card_ptr) { |
| @@ -2067,7 +2067,7 @@ ioc3uart_probe(struct ioc3_submodule *is, struct ioc3_driver_data *idd) | |||
| 2067 | 2067 | ||
| 2068 | DPRINT_CONFIG(("%s : Port A ip_serial_regs 0x%p " | 2068 | DPRINT_CONFIG(("%s : Port A ip_serial_regs 0x%p " |
| 2069 | "ip_uart_regs 0x%p\n", | 2069 | "ip_uart_regs 0x%p\n", |
| 2070 | __FUNCTION__, | 2070 | __func__, |
| 2071 | (void *)port->ip_serial_regs, | 2071 | (void *)port->ip_serial_regs, |
| 2072 | (void *)port->ip_uart_regs)); | 2072 | (void *)port->ip_uart_regs)); |
| 2073 | 2073 | ||
| @@ -2082,7 +2082,7 @@ ioc3uart_probe(struct ioc3_submodule *is, struct ioc3_driver_data *idd) | |||
| 2082 | DPRINT_CONFIG(("%s : Port A ip_cpu_ringbuf 0x%p " | 2082 | DPRINT_CONFIG(("%s : Port A ip_cpu_ringbuf 0x%p " |
| 2083 | "ip_dma_ringbuf 0x%p, ip_inring 0x%p " | 2083 | "ip_dma_ringbuf 0x%p, ip_inring 0x%p " |
| 2084 | "ip_outring 0x%p\n", | 2084 | "ip_outring 0x%p\n", |
| 2085 | __FUNCTION__, | 2085 | __func__, |
| 2086 | (void *)port->ip_cpu_ringbuf, | 2086 | (void *)port->ip_cpu_ringbuf, |
| 2087 | (void *)port->ip_dma_ringbuf, | 2087 | (void *)port->ip_dma_ringbuf, |
| 2088 | (void *)port->ip_inring, | 2088 | (void *)port->ip_inring, |
| @@ -2094,7 +2094,7 @@ ioc3uart_probe(struct ioc3_submodule *is, struct ioc3_driver_data *idd) | |||
| 2094 | 2094 | ||
| 2095 | DPRINT_CONFIG(("%s : Port B ip_serial_regs 0x%p " | 2095 | DPRINT_CONFIG(("%s : Port B ip_serial_regs 0x%p " |
| 2096 | "ip_uart_regs 0x%p\n", | 2096 | "ip_uart_regs 0x%p\n", |
| 2097 | __FUNCTION__, | 2097 | __func__, |
| 2098 | (void *)port->ip_serial_regs, | 2098 | (void *)port->ip_serial_regs, |
| 2099 | (void *)port->ip_uart_regs)); | 2099 | (void *)port->ip_uart_regs)); |
| 2100 | 2100 | ||
| @@ -2108,7 +2108,7 @@ ioc3uart_probe(struct ioc3_submodule *is, struct ioc3_driver_data *idd) | |||
| 2108 | DPRINT_CONFIG(("%s : Port B ip_cpu_ringbuf 0x%p " | 2108 | DPRINT_CONFIG(("%s : Port B ip_cpu_ringbuf 0x%p " |
| 2109 | "ip_dma_ringbuf 0x%p, ip_inring 0x%p " | 2109 | "ip_dma_ringbuf 0x%p, ip_inring 0x%p " |
| 2110 | "ip_outring 0x%p\n", | 2110 | "ip_outring 0x%p\n", |
| 2111 | __FUNCTION__, | 2111 | __func__, |
| 2112 | (void *)port->ip_cpu_ringbuf, | 2112 | (void *)port->ip_cpu_ringbuf, |
| 2113 | (void *)port->ip_dma_ringbuf, | 2113 | (void *)port->ip_dma_ringbuf, |
| 2114 | (void *)port->ip_inring, | 2114 | (void *)port->ip_inring, |
| @@ -2116,7 +2116,7 @@ ioc3uart_probe(struct ioc3_submodule *is, struct ioc3_driver_data *idd) | |||
| 2116 | } | 2116 | } |
| 2117 | 2117 | ||
| 2118 | DPRINT_CONFIG(("%s : port %d [addr 0x%p] card_ptr 0x%p", | 2118 | DPRINT_CONFIG(("%s : port %d [addr 0x%p] card_ptr 0x%p", |
| 2119 | __FUNCTION__, | 2119 | __func__, |
| 2120 | phys_port, (void *)port, (void *)card_ptr)); | 2120 | phys_port, (void *)port, (void *)card_ptr)); |
| 2121 | DPRINT_CONFIG((" ip_serial_regs 0x%p ip_uart_regs 0x%p\n", | 2121 | DPRINT_CONFIG((" ip_serial_regs 0x%p ip_uart_regs 0x%p\n", |
| 2122 | (void *)port->ip_serial_regs, | 2122 | (void *)port->ip_serial_regs, |
| @@ -2127,7 +2127,7 @@ ioc3uart_probe(struct ioc3_submodule *is, struct ioc3_driver_data *idd) | |||
| 2127 | 2127 | ||
| 2128 | DPRINT_CONFIG(("%s: phys_port %d port 0x%p inring 0x%p " | 2128 | DPRINT_CONFIG(("%s: phys_port %d port 0x%p inring 0x%p " |
| 2129 | "outring 0x%p\n", | 2129 | "outring 0x%p\n", |
| 2130 | __FUNCTION__, | 2130 | __func__, |
| 2131 | phys_port, (void *)port, | 2131 | phys_port, (void *)port, |
| 2132 | (void *)port->ip_inring, | 2132 | (void *)port->ip_inring, |
| 2133 | (void *)port->ip_outring)); | 2133 | (void *)port->ip_outring)); |
| @@ -2170,7 +2170,7 @@ static int __devinit ioc3uart_init(void) | |||
| 2170 | if ((ret = uart_register_driver(&ioc3_uart)) < 0) { | 2170 | if ((ret = uart_register_driver(&ioc3_uart)) < 0) { |
| 2171 | printk(KERN_WARNING | 2171 | printk(KERN_WARNING |
| 2172 | "%s: Couldn't register IOC3 uart serial driver\n", | 2172 | "%s: Couldn't register IOC3 uart serial driver\n", |
| 2173 | __FUNCTION__); | 2173 | __func__); |
| 2174 | return ret; | 2174 | return ret; |
| 2175 | } | 2175 | } |
| 2176 | ret = ioc3_register_submodule(&ioc3uart_submodule); | 2176 | ret = ioc3_register_submodule(&ioc3uart_submodule); |
diff --git a/drivers/serial/ioc4_serial.c b/drivers/serial/ioc4_serial.c index 0c179384fb0c..49b8a82b7b9f 100644 --- a/drivers/serial/ioc4_serial.c +++ b/drivers/serial/ioc4_serial.c | |||
| @@ -889,7 +889,7 @@ static int inline port_init(struct ioc4_port *port) | |||
| 889 | 889 | ||
| 890 | ring_pci_addr = (unsigned long __iomem)port->ip_dma_ringbuf; | 890 | ring_pci_addr = (unsigned long __iomem)port->ip_dma_ringbuf; |
| 891 | DPRINT_CONFIG(("%s: ring_pci_addr 0x%lx\n", | 891 | DPRINT_CONFIG(("%s: ring_pci_addr 0x%lx\n", |
| 892 | __FUNCTION__, ring_pci_addr)); | 892 | __func__, ring_pci_addr)); |
| 893 | 893 | ||
| 894 | writel((unsigned int)((uint64_t)ring_pci_addr >> 32), sbbr_h); | 894 | writel((unsigned int)((uint64_t)ring_pci_addr >> 32), sbbr_h); |
| 895 | writel((unsigned int)ring_pci_addr | IOC4_BUF_SIZE_BIT, sbbr_l); | 895 | writel((unsigned int)ring_pci_addr | IOC4_BUF_SIZE_BIT, sbbr_l); |
| @@ -1028,7 +1028,7 @@ static irqreturn_t ioc4_intr(int irq, void *arg) | |||
| 1028 | spin_lock_irqsave(&soft->is_ir_lock, flag); | 1028 | spin_lock_irqsave(&soft->is_ir_lock, flag); |
| 1029 | printk ("%s : %d : mem 0x%p sio_ir 0x%x sio_ies 0x%x " | 1029 | printk ("%s : %d : mem 0x%p sio_ir 0x%x sio_ies 0x%x " |
| 1030 | "other_ir 0x%x other_ies 0x%x mask 0x%x\n", | 1030 | "other_ir 0x%x other_ies 0x%x mask 0x%x\n", |
| 1031 | __FUNCTION__, __LINE__, | 1031 | __func__, __LINE__, |
| 1032 | (void *)mem, readl(&mem->sio_ir.raw), | 1032 | (void *)mem, readl(&mem->sio_ir.raw), |
| 1033 | readl(&mem->sio_ies.raw), | 1033 | readl(&mem->sio_ies.raw), |
| 1034 | readl(&mem->other_ir.raw), | 1034 | readl(&mem->other_ir.raw), |
| @@ -1155,14 +1155,14 @@ static int inline ioc4_attach_local(struct ioc4_driver_data *idd) | |||
| 1155 | (TOTAL_RING_BUF_SIZE - 1)) == 0)); | 1155 | (TOTAL_RING_BUF_SIZE - 1)) == 0)); |
| 1156 | DPRINT_CONFIG(("%s : ip_cpu_ringbuf 0x%p " | 1156 | DPRINT_CONFIG(("%s : ip_cpu_ringbuf 0x%p " |
| 1157 | "ip_dma_ringbuf 0x%p\n", | 1157 | "ip_dma_ringbuf 0x%p\n", |
| 1158 | __FUNCTION__, | 1158 | __func__, |
| 1159 | (void *)port->ip_cpu_ringbuf, | 1159 | (void *)port->ip_cpu_ringbuf, |
| 1160 | (void *)port->ip_dma_ringbuf)); | 1160 | (void *)port->ip_dma_ringbuf)); |
| 1161 | port->ip_inring = RING(port, RX_0_OR_2); | 1161 | port->ip_inring = RING(port, RX_0_OR_2); |
| 1162 | port->ip_outring = RING(port, TX_0_OR_2); | 1162 | port->ip_outring = RING(port, TX_0_OR_2); |
| 1163 | } | 1163 | } |
| 1164 | DPRINT_CONFIG(("%s : port %d [addr 0x%p] control 0x%p", | 1164 | DPRINT_CONFIG(("%s : port %d [addr 0x%p] control 0x%p", |
| 1165 | __FUNCTION__, | 1165 | __func__, |
| 1166 | port_number, (void *)port, (void *)control)); | 1166 | port_number, (void *)port, (void *)control)); |
| 1167 | DPRINT_CONFIG((" ip_serial_regs 0x%p ip_uart_regs 0x%p\n", | 1167 | DPRINT_CONFIG((" ip_serial_regs 0x%p ip_uart_regs 0x%p\n", |
| 1168 | (void *)port->ip_serial_regs, | 1168 | (void *)port->ip_serial_regs, |
| @@ -1173,7 +1173,7 @@ static int inline ioc4_attach_local(struct ioc4_driver_data *idd) | |||
| 1173 | 1173 | ||
| 1174 | DPRINT_CONFIG(("%s: port_number %d port 0x%p inring 0x%p " | 1174 | DPRINT_CONFIG(("%s: port_number %d port 0x%p inring 0x%p " |
| 1175 | "outring 0x%p\n", | 1175 | "outring 0x%p\n", |
| 1176 | __FUNCTION__, | 1176 | __func__, |
| 1177 | port_number, (void *)port, | 1177 | port_number, (void *)port, |
| 1178 | (void *)port->ip_inring, | 1178 | (void *)port->ip_inring, |
| 1179 | (void *)port->ip_outring)); | 1179 | (void *)port->ip_outring)); |
| @@ -1317,7 +1317,7 @@ config_port(struct ioc4_port *port, | |||
| 1317 | int spiniter = 0; | 1317 | int spiniter = 0; |
| 1318 | 1318 | ||
| 1319 | DPRINT_CONFIG(("%s: baud %d byte_size %d stop %d parenb %d parodd %d\n", | 1319 | DPRINT_CONFIG(("%s: baud %d byte_size %d stop %d parenb %d parodd %d\n", |
| 1320 | __FUNCTION__, baud, byte_size, stop_bits, parenb, parodd)); | 1320 | __func__, baud, byte_size, stop_bits, parenb, parodd)); |
| 1321 | 1321 | ||
| 1322 | if (set_baud(port, baud)) | 1322 | if (set_baud(port, baud)) |
| 1323 | return 1; | 1323 | return 1; |
| @@ -1725,7 +1725,7 @@ ioc4_change_speed(struct uart_port *the_port, | |||
| 1725 | } | 1725 | } |
| 1726 | baud = uart_get_baud_rate(the_port, new_termios, old_termios, | 1726 | baud = uart_get_baud_rate(the_port, new_termios, old_termios, |
| 1727 | MIN_BAUD_SUPPORTED, MAX_BAUD_SUPPORTED); | 1727 | MIN_BAUD_SUPPORTED, MAX_BAUD_SUPPORTED); |
| 1728 | DPRINT_CONFIG(("%s: returned baud %d\n", __FUNCTION__, baud)); | 1728 | DPRINT_CONFIG(("%s: returned baud %d\n", __func__, baud)); |
| 1729 | 1729 | ||
| 1730 | /* default is 9600 */ | 1730 | /* default is 9600 */ |
| 1731 | if (!baud) | 1731 | if (!baud) |
| @@ -1765,7 +1765,7 @@ ioc4_change_speed(struct uart_port *the_port, | |||
| 1765 | DPRINT_CONFIG(("%s : port 0x%p cflag 0%o " | 1765 | DPRINT_CONFIG(("%s : port 0x%p cflag 0%o " |
| 1766 | "config_port(baud %d data %d stop %d p enable %d parity %d)," | 1766 | "config_port(baud %d data %d stop %d p enable %d parity %d)," |
| 1767 | " notification 0x%x\n", | 1767 | " notification 0x%x\n", |
| 1768 | __FUNCTION__, (void *)port, cflag, baud, new_data, new_stop, | 1768 | __func__, (void *)port, cflag, baud, new_data, new_stop, |
| 1769 | new_parity_enable, new_parity, the_port->ignore_status_mask)); | 1769 | new_parity_enable, new_parity, the_port->ignore_status_mask)); |
| 1770 | 1770 | ||
| 1771 | if ((config_port(port, baud, /* baud */ | 1771 | if ((config_port(port, baud, /* baud */ |
| @@ -2715,7 +2715,7 @@ ioc4_serial_core_attach(struct pci_dev *pdev, int port_type) | |||
| 2715 | 2715 | ||
| 2716 | 2716 | ||
| 2717 | DPRINT_CONFIG(("%s: attach pdev 0x%p - control 0x%p\n", | 2717 | DPRINT_CONFIG(("%s: attach pdev 0x%p - control 0x%p\n", |
| 2718 | __FUNCTION__, pdev, (void *)control)); | 2718 | __func__, pdev, (void *)control)); |
| 2719 | 2719 | ||
| 2720 | if (!control) | 2720 | if (!control) |
| 2721 | return -ENODEV; | 2721 | return -ENODEV; |
| @@ -2734,7 +2734,7 @@ ioc4_serial_core_attach(struct pci_dev *pdev, int port_type) | |||
| 2734 | port->ip_all_ports[port_type_idx] = the_port; | 2734 | port->ip_all_ports[port_type_idx] = the_port; |
| 2735 | 2735 | ||
| 2736 | DPRINT_CONFIG(("%s: attach the_port 0x%p / port 0x%p : type %s\n", | 2736 | DPRINT_CONFIG(("%s: attach the_port 0x%p / port 0x%p : type %s\n", |
| 2737 | __FUNCTION__, (void *)the_port, | 2737 | __func__, (void *)the_port, |
| 2738 | (void *)port, | 2738 | (void *)port, |
| 2739 | port_type == PROTO_RS232 ? "rs232" : "rs422")); | 2739 | port_type == PROTO_RS232 ? "rs232" : "rs422")); |
| 2740 | 2740 | ||
| @@ -2752,7 +2752,7 @@ ioc4_serial_core_attach(struct pci_dev *pdev, int port_type) | |||
| 2752 | if (uart_add_one_port(u_driver, the_port) < 0) { | 2752 | if (uart_add_one_port(u_driver, the_port) < 0) { |
| 2753 | printk(KERN_WARNING | 2753 | printk(KERN_WARNING |
| 2754 | "%s: unable to add port %d bus %d\n", | 2754 | "%s: unable to add port %d bus %d\n", |
| 2755 | __FUNCTION__, the_port->line, pdev->bus->number); | 2755 | __func__, the_port->line, pdev->bus->number); |
| 2756 | } else { | 2756 | } else { |
| 2757 | DPRINT_CONFIG( | 2757 | DPRINT_CONFIG( |
| 2758 | ("IOC4 serial port %d irq = %d, bus %d\n", | 2758 | ("IOC4 serial port %d irq = %d, bus %d\n", |
| @@ -2777,7 +2777,7 @@ ioc4_serial_attach_one(struct ioc4_driver_data *idd) | |||
| 2777 | int ret = 0; | 2777 | int ret = 0; |
| 2778 | 2778 | ||
| 2779 | 2779 | ||
| 2780 | DPRINT_CONFIG(("%s (0x%p, 0x%p)\n", __FUNCTION__, idd->idd_pdev, | 2780 | DPRINT_CONFIG(("%s (0x%p, 0x%p)\n", __func__, idd->idd_pdev, |
| 2781 | idd->idd_pci_id)); | 2781 | idd->idd_pci_id)); |
| 2782 | 2782 | ||
| 2783 | /* PCI-RT does not bring out serial connections. | 2783 | /* PCI-RT does not bring out serial connections. |
| @@ -2806,7 +2806,7 @@ ioc4_serial_attach_one(struct ioc4_driver_data *idd) | |||
| 2806 | goto out2; | 2806 | goto out2; |
| 2807 | } | 2807 | } |
| 2808 | DPRINT_CONFIG(("%s : mem 0x%p, serial 0x%p\n", | 2808 | DPRINT_CONFIG(("%s : mem 0x%p, serial 0x%p\n", |
| 2809 | __FUNCTION__, (void *)idd->idd_misc_regs, | 2809 | __func__, (void *)idd->idd_misc_regs, |
| 2810 | (void *)serial)); | 2810 | (void *)serial)); |
| 2811 | 2811 | ||
| 2812 | /* Get memory for the new card */ | 2812 | /* Get memory for the new card */ |
| @@ -2858,7 +2858,7 @@ ioc4_serial_attach_one(struct ioc4_driver_data *idd) | |||
| 2858 | } else { | 2858 | } else { |
| 2859 | printk(KERN_WARNING | 2859 | printk(KERN_WARNING |
| 2860 | "%s : request_irq fails for IRQ 0x%x\n ", | 2860 | "%s : request_irq fails for IRQ 0x%x\n ", |
| 2861 | __FUNCTION__, idd->idd_pdev->irq); | 2861 | __func__, idd->idd_pdev->irq); |
| 2862 | } | 2862 | } |
| 2863 | ret = ioc4_attach_local(idd); | 2863 | ret = ioc4_attach_local(idd); |
| 2864 | if (ret) | 2864 | if (ret) |
| @@ -2911,13 +2911,13 @@ int ioc4_serial_init(void) | |||
| 2911 | if ((ret = uart_register_driver(&ioc4_uart_rs232)) < 0) { | 2911 | if ((ret = uart_register_driver(&ioc4_uart_rs232)) < 0) { |
| 2912 | printk(KERN_WARNING | 2912 | printk(KERN_WARNING |
| 2913 | "%s: Couldn't register rs232 IOC4 serial driver\n", | 2913 | "%s: Couldn't register rs232 IOC4 serial driver\n", |
| 2914 | __FUNCTION__); | 2914 | __func__); |
| 2915 | return ret; | 2915 | return ret; |
| 2916 | } | 2916 | } |
| 2917 | if ((ret = uart_register_driver(&ioc4_uart_rs422)) < 0) { | 2917 | if ((ret = uart_register_driver(&ioc4_uart_rs422)) < 0) { |
| 2918 | printk(KERN_WARNING | 2918 | printk(KERN_WARNING |
| 2919 | "%s: Couldn't register rs422 IOC4 serial driver\n", | 2919 | "%s: Couldn't register rs422 IOC4 serial driver\n", |
| 2920 | __FUNCTION__); | 2920 | __func__); |
| 2921 | return ret; | 2921 | return ret; |
| 2922 | } | 2922 | } |
| 2923 | 2923 | ||
diff --git a/drivers/serial/kgdboc.c b/drivers/serial/kgdboc.c index 9cf03327386a..eadc1ab6bbce 100644 --- a/drivers/serial/kgdboc.c +++ b/drivers/serial/kgdboc.c | |||
| @@ -96,12 +96,14 @@ static void cleanup_kgdboc(void) | |||
| 96 | 96 | ||
| 97 | static int kgdboc_get_char(void) | 97 | static int kgdboc_get_char(void) |
| 98 | { | 98 | { |
| 99 | return kgdb_tty_driver->poll_get_char(kgdb_tty_driver, kgdb_tty_line); | 99 | return kgdb_tty_driver->ops->poll_get_char(kgdb_tty_driver, |
| 100 | kgdb_tty_line); | ||
| 100 | } | 101 | } |
| 101 | 102 | ||
| 102 | static void kgdboc_put_char(u8 chr) | 103 | static void kgdboc_put_char(u8 chr) |
| 103 | { | 104 | { |
| 104 | kgdb_tty_driver->poll_put_char(kgdb_tty_driver, kgdb_tty_line, chr); | 105 | kgdb_tty_driver->ops->poll_put_char(kgdb_tty_driver, |
| 106 | kgdb_tty_line, chr); | ||
| 105 | } | 107 | } |
| 106 | 108 | ||
| 107 | static int param_set_kgdboc_var(const char *kmessage, struct kernel_param *kp) | 109 | static int param_set_kgdboc_var(const char *kmessage, struct kernel_param *kp) |
diff --git a/drivers/serial/mcfserial.c b/drivers/serial/mcfserial.c index ddd3aa50d4ad..43af40d59b8a 100644 --- a/drivers/serial/mcfserial.c +++ b/drivers/serial/mcfserial.c | |||
| @@ -1072,18 +1072,6 @@ static int mcfrs_ioctl(struct tty_struct *tty, struct file * file, | |||
| 1072 | tty_wait_until_sent(tty, 0); | 1072 | tty_wait_until_sent(tty, 0); |
| 1073 | send_break(info, arg ? arg*(HZ/10) : HZ/4); | 1073 | send_break(info, arg ? arg*(HZ/10) : HZ/4); |
| 1074 | return 0; | 1074 | return 0; |
| 1075 | case TIOCGSOFTCAR: | ||
| 1076 | error = put_user(C_CLOCAL(tty) ? 1 : 0, | ||
| 1077 | (unsigned long *) arg); | ||
| 1078 | if (error) | ||
| 1079 | return error; | ||
| 1080 | return 0; | ||
| 1081 | case TIOCSSOFTCAR: | ||
| 1082 | get_user(arg, (unsigned long *) arg); | ||
| 1083 | tty->termios->c_cflag = | ||
| 1084 | ((tty->termios->c_cflag & ~CLOCAL) | | ||
| 1085 | (arg ? CLOCAL : 0)); | ||
| 1086 | return 0; | ||
| 1087 | case TIOCGSERIAL: | 1075 | case TIOCGSERIAL: |
| 1088 | if (access_ok(VERIFY_WRITE, (void *) arg, | 1076 | if (access_ok(VERIFY_WRITE, (void *) arg, |
| 1089 | sizeof(struct serial_struct))) | 1077 | sizeof(struct serial_struct))) |
| @@ -1222,8 +1210,7 @@ static void mcfrs_close(struct tty_struct *tty, struct file * filp) | |||
| 1222 | } else | 1210 | } else |
| 1223 | #endif | 1211 | #endif |
| 1224 | shutdown(info); | 1212 | shutdown(info); |
| 1225 | if (tty->driver->flush_buffer) | 1213 | mcfrs_flush_buffer(tty); |
| 1226 | tty->driver->flush_buffer(tty); | ||
| 1227 | tty_ldisc_flush(tty); | 1214 | tty_ldisc_flush(tty); |
| 1228 | 1215 | ||
| 1229 | tty->closing = 0; | 1216 | tty->closing = 0; |
| @@ -1276,6 +1263,8 @@ mcfrs_wait_until_sent(struct tty_struct *tty, int timeout) | |||
| 1276 | * Note: we have to use pretty tight timings here to satisfy | 1263 | * Note: we have to use pretty tight timings here to satisfy |
| 1277 | * the NIST-PCTS. | 1264 | * the NIST-PCTS. |
| 1278 | */ | 1265 | */ |
| 1266 | lock_kernel(); | ||
| 1267 | |||
| 1279 | fifo_time = (MCF5272_FIFO_SIZE * HZ * 10) / info->baud; | 1268 | fifo_time = (MCF5272_FIFO_SIZE * HZ * 10) / info->baud; |
| 1280 | char_time = fifo_time / 5; | 1269 | char_time = fifo_time / 5; |
| 1281 | if (char_time == 0) | 1270 | if (char_time == 0) |
| @@ -1312,6 +1301,7 @@ mcfrs_wait_until_sent(struct tty_struct *tty, int timeout) | |||
| 1312 | if (timeout && time_after(jiffies, orig_jiffies + timeout)) | 1301 | if (timeout && time_after(jiffies, orig_jiffies + timeout)) |
| 1313 | break; | 1302 | break; |
| 1314 | } | 1303 | } |
| 1304 | unlock_kernel(); | ||
| 1315 | #else | 1305 | #else |
| 1316 | /* | 1306 | /* |
| 1317 | * For the other coldfire models, assume all data has been sent | 1307 | * For the other coldfire models, assume all data has been sent |
| @@ -1907,7 +1897,7 @@ static struct tty_driver *mcfrs_console_device(struct console *c, int *index) | |||
| 1907 | * This is used for console output. | 1897 | * This is used for console output. |
| 1908 | */ | 1898 | */ |
| 1909 | 1899 | ||
| 1910 | void mcfrs_put_char(char ch) | 1900 | int mcfrs_put_char(char ch) |
| 1911 | { | 1901 | { |
| 1912 | volatile unsigned char *uartp; | 1902 | volatile unsigned char *uartp; |
| 1913 | unsigned long flags; | 1903 | unsigned long flags; |
| @@ -1931,7 +1921,7 @@ void mcfrs_put_char(char ch) | |||
| 1931 | mcfrs_init_console(); /* try and get it back */ | 1921 | mcfrs_init_console(); /* try and get it back */ |
| 1932 | local_irq_restore(flags); | 1922 | local_irq_restore(flags); |
| 1933 | 1923 | ||
| 1934 | return; | 1924 | return 1; |
| 1935 | } | 1925 | } |
| 1936 | 1926 | ||
| 1937 | 1927 | ||
diff --git a/drivers/serial/mpc52xx_uart.c b/drivers/serial/mpc52xx_uart.c index d93b3578c5e2..7a3625f52a03 100644 --- a/drivers/serial/mpc52xx_uart.c +++ b/drivers/serial/mpc52xx_uart.c | |||
| @@ -1221,8 +1221,8 @@ static struct of_device_id mpc52xx_uart_of_match[] = { | |||
| 1221 | #endif | 1221 | #endif |
| 1222 | #ifdef CONFIG_PPC_MPC512x | 1222 | #ifdef CONFIG_PPC_MPC512x |
| 1223 | { .compatible = "fsl,mpc5121-psc-uart", .data = &mpc512x_psc_ops, }, | 1223 | { .compatible = "fsl,mpc5121-psc-uart", .data = &mpc512x_psc_ops, }, |
| 1224 | {}, | ||
| 1225 | #endif | 1224 | #endif |
| 1225 | {}, | ||
| 1226 | }; | 1226 | }; |
| 1227 | 1227 | ||
| 1228 | static int __devinit | 1228 | static int __devinit |
diff --git a/drivers/serial/netx-serial.c b/drivers/serial/netx-serial.c index 3123ffeac8ad..81ac9bb4f39b 100644 --- a/drivers/serial/netx-serial.c +++ b/drivers/serial/netx-serial.c | |||
| @@ -287,6 +287,7 @@ static void netx_set_mctrl(struct uart_port *port, unsigned int mctrl) | |||
| 287 | { | 287 | { |
| 288 | unsigned int val; | 288 | unsigned int val; |
| 289 | 289 | ||
| 290 | /* FIXME: Locking needed ? */ | ||
| 290 | if (mctrl & TIOCM_RTS) { | 291 | if (mctrl & TIOCM_RTS) { |
| 291 | val = readl(port->membase + UART_RTS_CR); | 292 | val = readl(port->membase + UART_RTS_CR); |
| 292 | writel(val | RTS_CR_RTS, port->membase + UART_RTS_CR); | 293 | writel(val | RTS_CR_RTS, port->membase + UART_RTS_CR); |
diff --git a/drivers/serial/s3c2410.c b/drivers/serial/s3c2410.c index da5a02cb4f63..2b6a013639e6 100644 --- a/drivers/serial/s3c2410.c +++ b/drivers/serial/s3c2410.c | |||
| @@ -1096,13 +1096,13 @@ static int s3c24xx_serial_probe(struct platform_device *dev, | |||
| 1096 | ourport = &s3c24xx_serial_ports[probe_index]; | 1096 | ourport = &s3c24xx_serial_ports[probe_index]; |
| 1097 | probe_index++; | 1097 | probe_index++; |
| 1098 | 1098 | ||
| 1099 | dbg("%s: initialising port %p...\n", __FUNCTION__, ourport); | 1099 | dbg("%s: initialising port %p...\n", __func__, ourport); |
| 1100 | 1100 | ||
| 1101 | ret = s3c24xx_serial_init_port(ourport, info, dev); | 1101 | ret = s3c24xx_serial_init_port(ourport, info, dev); |
| 1102 | if (ret < 0) | 1102 | if (ret < 0) |
| 1103 | goto probe_err; | 1103 | goto probe_err; |
| 1104 | 1104 | ||
| 1105 | dbg("%s: adding port\n", __FUNCTION__); | 1105 | dbg("%s: adding port\n", __func__); |
| 1106 | uart_add_one_port(&s3c24xx_uart_drv, &ourport->port); | 1106 | uart_add_one_port(&s3c24xx_uart_drv, &ourport->port); |
| 1107 | platform_set_drvdata(dev, &ourport->port); | 1107 | platform_set_drvdata(dev, &ourport->port); |
| 1108 | 1108 | ||
| @@ -1587,7 +1587,7 @@ static int s3c2412_serial_resetport(struct uart_port *port, | |||
| 1587 | unsigned long ucon = rd_regl(port, S3C2410_UCON); | 1587 | unsigned long ucon = rd_regl(port, S3C2410_UCON); |
| 1588 | 1588 | ||
| 1589 | dbg("%s: port=%p (%08lx), cfg=%p\n", | 1589 | dbg("%s: port=%p (%08lx), cfg=%p\n", |
| 1590 | __FUNCTION__, port, port->mapbase, cfg); | 1590 | __func__, port, port->mapbase, cfg); |
| 1591 | 1591 | ||
| 1592 | /* ensure we don't change the clock settings... */ | 1592 | /* ensure we don't change the clock settings... */ |
| 1593 | 1593 | ||
diff --git a/drivers/serial/sa1100.c b/drivers/serial/sa1100.c index 67b2338913c2..62b38582f5e9 100644 --- a/drivers/serial/sa1100.c +++ b/drivers/serial/sa1100.c | |||
| @@ -655,7 +655,7 @@ void __init sa1100_register_uart_fns(struct sa1100_port_fns *fns) | |||
| 655 | void __init sa1100_register_uart(int idx, int port) | 655 | void __init sa1100_register_uart(int idx, int port) |
| 656 | { | 656 | { |
| 657 | if (idx >= NR_PORTS) { | 657 | if (idx >= NR_PORTS) { |
| 658 | printk(KERN_ERR "%s: bad index number %d\n", __FUNCTION__, idx); | 658 | printk(KERN_ERR "%s: bad index number %d\n", __func__, idx); |
| 659 | return; | 659 | return; |
| 660 | } | 660 | } |
| 661 | 661 | ||
| @@ -682,7 +682,7 @@ void __init sa1100_register_uart(int idx, int port) | |||
| 682 | break; | 682 | break; |
| 683 | 683 | ||
| 684 | default: | 684 | default: |
| 685 | printk(KERN_ERR "%s: bad port number %d\n", __FUNCTION__, port); | 685 | printk(KERN_ERR "%s: bad port number %d\n", __func__, port); |
| 686 | } | 686 | } |
| 687 | } | 687 | } |
| 688 | 688 | ||
diff --git a/drivers/serial/serial_core.c b/drivers/serial/serial_core.c index 977ce820ce30..1e2b9d826f69 100644 --- a/drivers/serial/serial_core.c +++ b/drivers/serial/serial_core.c | |||
| @@ -422,6 +422,7 @@ uart_get_divisor(struct uart_port *port, unsigned int baud) | |||
| 422 | 422 | ||
| 423 | EXPORT_SYMBOL(uart_get_divisor); | 423 | EXPORT_SYMBOL(uart_get_divisor); |
| 424 | 424 | ||
| 425 | /* FIXME: Consistent locking policy */ | ||
| 425 | static void | 426 | static void |
| 426 | uart_change_speed(struct uart_state *state, struct ktermios *old_termios) | 427 | uart_change_speed(struct uart_state *state, struct ktermios *old_termios) |
| 427 | { | 428 | { |
| @@ -454,27 +455,30 @@ uart_change_speed(struct uart_state *state, struct ktermios *old_termios) | |||
| 454 | port->ops->set_termios(port, termios, old_termios); | 455 | port->ops->set_termios(port, termios, old_termios); |
| 455 | } | 456 | } |
| 456 | 457 | ||
| 457 | static inline void | 458 | static inline int |
| 458 | __uart_put_char(struct uart_port *port, struct circ_buf *circ, unsigned char c) | 459 | __uart_put_char(struct uart_port *port, struct circ_buf *circ, unsigned char c) |
| 459 | { | 460 | { |
| 460 | unsigned long flags; | 461 | unsigned long flags; |
| 462 | int ret = 0; | ||
| 461 | 463 | ||
| 462 | if (!circ->buf) | 464 | if (!circ->buf) |
| 463 | return; | 465 | return 0; |
| 464 | 466 | ||
| 465 | spin_lock_irqsave(&port->lock, flags); | 467 | spin_lock_irqsave(&port->lock, flags); |
| 466 | if (uart_circ_chars_free(circ) != 0) { | 468 | if (uart_circ_chars_free(circ) != 0) { |
| 467 | circ->buf[circ->head] = c; | 469 | circ->buf[circ->head] = c; |
| 468 | circ->head = (circ->head + 1) & (UART_XMIT_SIZE - 1); | 470 | circ->head = (circ->head + 1) & (UART_XMIT_SIZE - 1); |
| 471 | ret = 1; | ||
| 469 | } | 472 | } |
| 470 | spin_unlock_irqrestore(&port->lock, flags); | 473 | spin_unlock_irqrestore(&port->lock, flags); |
| 474 | return ret; | ||
| 471 | } | 475 | } |
| 472 | 476 | ||
| 473 | static void uart_put_char(struct tty_struct *tty, unsigned char ch) | 477 | static int uart_put_char(struct tty_struct *tty, unsigned char ch) |
| 474 | { | 478 | { |
| 475 | struct uart_state *state = tty->driver_data; | 479 | struct uart_state *state = tty->driver_data; |
| 476 | 480 | ||
| 477 | __uart_put_char(state->port, &state->info->xmit, ch); | 481 | return __uart_put_char(state->port, &state->info->xmit, ch); |
| 478 | } | 482 | } |
| 479 | 483 | ||
| 480 | static void uart_flush_chars(struct tty_struct *tty) | 484 | static void uart_flush_chars(struct tty_struct *tty) |
| @@ -528,15 +532,25 @@ uart_write(struct tty_struct *tty, const unsigned char *buf, int count) | |||
| 528 | static int uart_write_room(struct tty_struct *tty) | 532 | static int uart_write_room(struct tty_struct *tty) |
| 529 | { | 533 | { |
| 530 | struct uart_state *state = tty->driver_data; | 534 | struct uart_state *state = tty->driver_data; |
| 535 | unsigned long flags; | ||
| 536 | int ret; | ||
| 531 | 537 | ||
| 532 | return uart_circ_chars_free(&state->info->xmit); | 538 | spin_lock_irqsave(&state->port->lock, flags); |
| 539 | ret = uart_circ_chars_free(&state->info->xmit); | ||
| 540 | spin_unlock_irqrestore(&state->port->lock, flags); | ||
| 541 | return ret; | ||
| 533 | } | 542 | } |
| 534 | 543 | ||
| 535 | static int uart_chars_in_buffer(struct tty_struct *tty) | 544 | static int uart_chars_in_buffer(struct tty_struct *tty) |
| 536 | { | 545 | { |
| 537 | struct uart_state *state = tty->driver_data; | 546 | struct uart_state *state = tty->driver_data; |
| 547 | unsigned long flags; | ||
| 548 | int ret; | ||
| 538 | 549 | ||
| 539 | return uart_circ_chars_pending(&state->info->xmit); | 550 | spin_lock_irqsave(&state->port->lock, flags); |
| 551 | ret = uart_circ_chars_pending(&state->info->xmit); | ||
| 552 | spin_unlock_irqrestore(&state->port->lock, flags); | ||
| 553 | return ret; | ||
| 540 | } | 554 | } |
| 541 | 555 | ||
| 542 | static void uart_flush_buffer(struct tty_struct *tty) | 556 | static void uart_flush_buffer(struct tty_struct *tty) |
| @@ -618,6 +632,11 @@ static int uart_get_info(struct uart_state *state, | |||
| 618 | struct serial_struct tmp; | 632 | struct serial_struct tmp; |
| 619 | 633 | ||
| 620 | memset(&tmp, 0, sizeof(tmp)); | 634 | memset(&tmp, 0, sizeof(tmp)); |
| 635 | |||
| 636 | /* Ensure the state we copy is consistent and no hardware changes | ||
| 637 | occur as we go */ | ||
| 638 | mutex_lock(&state->mutex); | ||
| 639 | |||
| 621 | tmp.type = port->type; | 640 | tmp.type = port->type; |
| 622 | tmp.line = port->line; | 641 | tmp.line = port->line; |
| 623 | tmp.port = port->iobase; | 642 | tmp.port = port->iobase; |
| @@ -637,6 +656,8 @@ static int uart_get_info(struct uart_state *state, | |||
| 637 | tmp.iomem_reg_shift = port->regshift; | 656 | tmp.iomem_reg_shift = port->regshift; |
| 638 | tmp.iomem_base = (void *)(unsigned long)port->mapbase; | 657 | tmp.iomem_base = (void *)(unsigned long)port->mapbase; |
| 639 | 658 | ||
| 659 | mutex_unlock(&state->mutex); | ||
| 660 | |||
| 640 | if (copy_to_user(retinfo, &tmp, sizeof(*retinfo))) | 661 | if (copy_to_user(retinfo, &tmp, sizeof(*retinfo))) |
| 641 | return -EFAULT; | 662 | return -EFAULT; |
| 642 | return 0; | 663 | return 0; |
| @@ -914,8 +935,6 @@ static void uart_break_ctl(struct tty_struct *tty, int break_state) | |||
| 914 | struct uart_state *state = tty->driver_data; | 935 | struct uart_state *state = tty->driver_data; |
| 915 | struct uart_port *port = state->port; | 936 | struct uart_port *port = state->port; |
| 916 | 937 | ||
| 917 | BUG_ON(!kernel_locked()); | ||
| 918 | |||
| 919 | mutex_lock(&state->mutex); | 938 | mutex_lock(&state->mutex); |
| 920 | 939 | ||
| 921 | if (port->type != PORT_UNKNOWN) | 940 | if (port->type != PORT_UNKNOWN) |
| @@ -1059,7 +1078,7 @@ static int uart_get_count(struct uart_state *state, | |||
| 1059 | } | 1078 | } |
| 1060 | 1079 | ||
| 1061 | /* | 1080 | /* |
| 1062 | * Called via sys_ioctl under the BKL. We can use spin_lock_irq() here. | 1081 | * Called via sys_ioctl. We can use spin_lock_irq() here. |
| 1063 | */ | 1082 | */ |
| 1064 | static int | 1083 | static int |
| 1065 | uart_ioctl(struct tty_struct *tty, struct file *filp, unsigned int cmd, | 1084 | uart_ioctl(struct tty_struct *tty, struct file *filp, unsigned int cmd, |
| @@ -1069,7 +1088,6 @@ uart_ioctl(struct tty_struct *tty, struct file *filp, unsigned int cmd, | |||
| 1069 | void __user *uarg = (void __user *)arg; | 1088 | void __user *uarg = (void __user *)arg; |
| 1070 | int ret = -ENOIOCTLCMD; | 1089 | int ret = -ENOIOCTLCMD; |
| 1071 | 1090 | ||
| 1072 | BUG_ON(!kernel_locked()); | ||
| 1073 | 1091 | ||
| 1074 | /* | 1092 | /* |
| 1075 | * These ioctls don't rely on the hardware to be present. | 1093 | * These ioctls don't rely on the hardware to be present. |
| @@ -1140,9 +1158,9 @@ uart_ioctl(struct tty_struct *tty, struct file *filp, unsigned int cmd, | |||
| 1140 | break; | 1158 | break; |
| 1141 | } | 1159 | } |
| 1142 | } | 1160 | } |
| 1143 | out_up: | 1161 | out_up: |
| 1144 | mutex_unlock(&state->mutex); | 1162 | mutex_unlock(&state->mutex); |
| 1145 | out: | 1163 | out: |
| 1146 | return ret; | 1164 | return ret; |
| 1147 | } | 1165 | } |
| 1148 | 1166 | ||
| @@ -1153,7 +1171,6 @@ static void uart_set_termios(struct tty_struct *tty, | |||
| 1153 | unsigned long flags; | 1171 | unsigned long flags; |
| 1154 | unsigned int cflag = tty->termios->c_cflag; | 1172 | unsigned int cflag = tty->termios->c_cflag; |
| 1155 | 1173 | ||
| 1156 | BUG_ON(!kernel_locked()); | ||
| 1157 | 1174 | ||
| 1158 | /* | 1175 | /* |
| 1159 | * These are the bits that are used to setup various | 1176 | * These are the bits that are used to setup various |
| @@ -1165,8 +1182,9 @@ static void uart_set_termios(struct tty_struct *tty, | |||
| 1165 | if ((cflag ^ old_termios->c_cflag) == 0 && | 1182 | if ((cflag ^ old_termios->c_cflag) == 0 && |
| 1166 | tty->termios->c_ospeed == old_termios->c_ospeed && | 1183 | tty->termios->c_ospeed == old_termios->c_ospeed && |
| 1167 | tty->termios->c_ispeed == old_termios->c_ispeed && | 1184 | tty->termios->c_ispeed == old_termios->c_ispeed && |
| 1168 | RELEVANT_IFLAG(tty->termios->c_iflag ^ old_termios->c_iflag) == 0) | 1185 | RELEVANT_IFLAG(tty->termios->c_iflag ^ old_termios->c_iflag) == 0) { |
| 1169 | return; | 1186 | return; |
| 1187 | } | ||
| 1170 | 1188 | ||
| 1171 | uart_change_speed(state, old_termios); | 1189 | uart_change_speed(state, old_termios); |
| 1172 | 1190 | ||
| @@ -1200,7 +1218,6 @@ static void uart_set_termios(struct tty_struct *tty, | |||
| 1200 | } | 1218 | } |
| 1201 | spin_unlock_irqrestore(&state->port->lock, flags); | 1219 | spin_unlock_irqrestore(&state->port->lock, flags); |
| 1202 | } | 1220 | } |
| 1203 | |||
| 1204 | #if 0 | 1221 | #if 0 |
| 1205 | /* | 1222 | /* |
| 1206 | * No need to wake up processes in open wait, since they | 1223 | * No need to wake up processes in open wait, since they |
| @@ -1316,11 +1333,11 @@ static void uart_wait_until_sent(struct tty_struct *tty, int timeout) | |||
| 1316 | struct uart_port *port = state->port; | 1333 | struct uart_port *port = state->port; |
| 1317 | unsigned long char_time, expire; | 1334 | unsigned long char_time, expire; |
| 1318 | 1335 | ||
| 1319 | BUG_ON(!kernel_locked()); | ||
| 1320 | |||
| 1321 | if (port->type == PORT_UNKNOWN || port->fifosize == 0) | 1336 | if (port->type == PORT_UNKNOWN || port->fifosize == 0) |
| 1322 | return; | 1337 | return; |
| 1323 | 1338 | ||
| 1339 | lock_kernel(); | ||
| 1340 | |||
| 1324 | /* | 1341 | /* |
| 1325 | * Set the check interval to be 1/5 of the estimated time to | 1342 | * Set the check interval to be 1/5 of the estimated time to |
| 1326 | * send a single character, and make it at least 1. The check | 1343 | * send a single character, and make it at least 1. The check |
| @@ -1366,6 +1383,7 @@ static void uart_wait_until_sent(struct tty_struct *tty, int timeout) | |||
| 1366 | break; | 1383 | break; |
| 1367 | } | 1384 | } |
| 1368 | set_current_state(TASK_RUNNING); /* might not be needed */ | 1385 | set_current_state(TASK_RUNNING); /* might not be needed */ |
| 1386 | unlock_kernel(); | ||
| 1369 | } | 1387 | } |
| 1370 | 1388 | ||
| 1371 | /* | 1389 | /* |
| @@ -2079,7 +2097,9 @@ int uart_resume_port(struct uart_driver *drv, struct uart_port *port) | |||
| 2079 | int ret; | 2097 | int ret; |
| 2080 | 2098 | ||
| 2081 | uart_change_pm(state, 0); | 2099 | uart_change_pm(state, 0); |
| 2100 | spin_lock_irq(&port->lock); | ||
| 2082 | ops->set_mctrl(port, 0); | 2101 | ops->set_mctrl(port, 0); |
| 2102 | spin_unlock_irq(&port->lock); | ||
| 2083 | ret = ops->startup(port); | 2103 | ret = ops->startup(port); |
| 2084 | if (ret == 0) { | 2104 | if (ret == 0) { |
| 2085 | uart_change_speed(state, NULL); | 2105 | uart_change_speed(state, NULL); |
diff --git a/drivers/serial/sh-sci.c b/drivers/serial/sh-sci.c index c2ea5d4df44a..969106187718 100644 --- a/drivers/serial/sh-sci.c +++ b/drivers/serial/sh-sci.c | |||
| @@ -855,7 +855,7 @@ static int sci_notifier(struct notifier_block *self, | |||
| 855 | 855 | ||
| 856 | printk(KERN_INFO "%s: got a postchange notification " | 856 | printk(KERN_INFO "%s: got a postchange notification " |
| 857 | "for cpu %d (old %d, new %d)\n", | 857 | "for cpu %d (old %d, new %d)\n", |
| 858 | __FUNCTION__, freqs->cpu, freqs->old, freqs->new); | 858 | __func__, freqs->cpu, freqs->old, freqs->new); |
| 859 | } | 859 | } |
| 860 | 860 | ||
| 861 | return NOTIFY_OK; | 861 | return NOTIFY_OK; |
diff --git a/drivers/serial/sn_console.c b/drivers/serial/sn_console.c index 41fc61264443..019da2e05f0b 100644 --- a/drivers/serial/sn_console.c +++ b/drivers/serial/sn_console.c | |||
| @@ -839,7 +839,7 @@ static int __init sn_sal_module_init(void) | |||
| 839 | 839 | ||
| 840 | if (uart_add_one_port(&sal_console_uart, &sal_console_port.sc_port) < 0) { | 840 | if (uart_add_one_port(&sal_console_uart, &sal_console_port.sc_port) < 0) { |
| 841 | /* error - not sure what I'd do - so I'll do nothing */ | 841 | /* error - not sure what I'd do - so I'll do nothing */ |
| 842 | printk(KERN_ERR "%s: unable to add port\n", __FUNCTION__); | 842 | printk(KERN_ERR "%s: unable to add port\n", __func__); |
| 843 | } | 843 | } |
| 844 | 844 | ||
| 845 | /* when this driver is compiled in, the console initialization | 845 | /* when this driver is compiled in, the console initialization |
diff --git a/drivers/serial/uartlite.c b/drivers/serial/uartlite.c index b565d5a37499..b51c24245be4 100644 --- a/drivers/serial/uartlite.c +++ b/drivers/serial/uartlite.c | |||
| @@ -584,7 +584,7 @@ ulite_of_probe(struct of_device *op, const struct of_device_id *match) | |||
| 584 | const unsigned int *id; | 584 | const unsigned int *id; |
| 585 | int irq, rc; | 585 | int irq, rc; |
| 586 | 586 | ||
| 587 | dev_dbg(&op->dev, "%s(%p, %p)\n", __FUNCTION__, op, match); | 587 | dev_dbg(&op->dev, "%s(%p, %p)\n", __func__, op, match); |
| 588 | 588 | ||
| 589 | rc = of_address_to_resource(op->node, 0, &res); | 589 | rc = of_address_to_resource(op->node, 0, &res); |
| 590 | if (rc) { | 590 | if (rc) { |
diff --git a/drivers/serial/ucc_uart.c b/drivers/serial/ucc_uart.c index 5e4310ccd591..01917c433f17 100644 --- a/drivers/serial/ucc_uart.c +++ b/drivers/serial/ucc_uart.c | |||
| @@ -215,7 +215,7 @@ static inline dma_addr_t cpu2qe_addr(void *addr, struct uart_qe_port *qe_port) | |||
| 215 | return qe_port->bd_dma_addr + (addr - qe_port->bd_virt); | 215 | return qe_port->bd_dma_addr + (addr - qe_port->bd_virt); |
| 216 | 216 | ||
| 217 | /* something nasty happened */ | 217 | /* something nasty happened */ |
| 218 | printk(KERN_ERR "%s: addr=%p\n", __FUNCTION__, addr); | 218 | printk(KERN_ERR "%s: addr=%p\n", __func__, addr); |
| 219 | BUG(); | 219 | BUG(); |
| 220 | return 0; | 220 | return 0; |
| 221 | } | 221 | } |
| @@ -234,7 +234,7 @@ static inline void *qe2cpu_addr(dma_addr_t addr, struct uart_qe_port *qe_port) | |||
| 234 | return qe_port->bd_virt + (addr - qe_port->bd_dma_addr); | 234 | return qe_port->bd_virt + (addr - qe_port->bd_dma_addr); |
| 235 | 235 | ||
| 236 | /* something nasty happened */ | 236 | /* something nasty happened */ |
| 237 | printk(KERN_ERR "%s: addr=%x\n", __FUNCTION__, addr); | 237 | printk(KERN_ERR "%s: addr=%x\n", __func__, addr); |
| 238 | BUG(); | 238 | BUG(); |
| 239 | return NULL; | 239 | return NULL; |
| 240 | } | 240 | } |
diff --git a/drivers/spi/atmel_spi.c b/drivers/spi/atmel_spi.c index 02c8e305b14f..e81d59d78910 100644 --- a/drivers/spi/atmel_spi.c +++ b/drivers/spi/atmel_spi.c | |||
| @@ -497,7 +497,7 @@ static int atmel_spi_setup(struct spi_device *spi) | |||
| 497 | struct atmel_spi *as; | 497 | struct atmel_spi *as; |
| 498 | u32 scbr, csr; | 498 | u32 scbr, csr; |
| 499 | unsigned int bits = spi->bits_per_word; | 499 | unsigned int bits = spi->bits_per_word; |
| 500 | unsigned long bus_hz, sck_hz; | 500 | unsigned long bus_hz; |
| 501 | unsigned int npcs_pin; | 501 | unsigned int npcs_pin; |
| 502 | int ret; | 502 | int ret; |
| 503 | 503 | ||
| @@ -536,14 +536,25 @@ static int atmel_spi_setup(struct spi_device *spi) | |||
| 536 | return -EINVAL; | 536 | return -EINVAL; |
| 537 | } | 537 | } |
| 538 | 538 | ||
| 539 | /* speed zero convention is used by some upper layers */ | 539 | /* |
| 540 | * Pre-new_1 chips start out at half the peripheral | ||
| 541 | * bus speed. | ||
| 542 | */ | ||
| 540 | bus_hz = clk_get_rate(as->clk); | 543 | bus_hz = clk_get_rate(as->clk); |
| 544 | if (!as->new_1) | ||
| 545 | bus_hz /= 2; | ||
| 546 | |||
| 541 | if (spi->max_speed_hz) { | 547 | if (spi->max_speed_hz) { |
| 542 | /* assume div32/fdiv/mbz == 0 */ | 548 | /* |
| 543 | if (!as->new_1) | 549 | * Calculate the lowest divider that satisfies the |
| 544 | bus_hz /= 2; | 550 | * constraint, assuming div32/fdiv/mbz == 0. |
| 545 | scbr = ((bus_hz + spi->max_speed_hz - 1) | 551 | */ |
| 546 | / spi->max_speed_hz); | 552 | scbr = DIV_ROUND_UP(bus_hz, spi->max_speed_hz); |
| 553 | |||
| 554 | /* | ||
| 555 | * If the resulting divider doesn't fit into the | ||
| 556 | * register bitfield, we can't satisfy the constraint. | ||
| 557 | */ | ||
| 547 | if (scbr >= (1 << SPI_SCBR_SIZE)) { | 558 | if (scbr >= (1 << SPI_SCBR_SIZE)) { |
| 548 | dev_dbg(&spi->dev, | 559 | dev_dbg(&spi->dev, |
| 549 | "setup: %d Hz too slow, scbr %u; min %ld Hz\n", | 560 | "setup: %d Hz too slow, scbr %u; min %ld Hz\n", |
| @@ -551,8 +562,8 @@ static int atmel_spi_setup(struct spi_device *spi) | |||
| 551 | return -EINVAL; | 562 | return -EINVAL; |
| 552 | } | 563 | } |
| 553 | } else | 564 | } else |
| 565 | /* speed zero means "as slow as possible" */ | ||
| 554 | scbr = 0xff; | 566 | scbr = 0xff; |
| 555 | sck_hz = bus_hz / scbr; | ||
| 556 | 567 | ||
| 557 | csr = SPI_BF(SCBR, scbr) | SPI_BF(BITS, bits - 8); | 568 | csr = SPI_BF(SCBR, scbr) | SPI_BF(BITS, bits - 8); |
| 558 | if (spi->mode & SPI_CPOL) | 569 | if (spi->mode & SPI_CPOL) |
| @@ -589,7 +600,7 @@ static int atmel_spi_setup(struct spi_device *spi) | |||
| 589 | 600 | ||
| 590 | dev_dbg(&spi->dev, | 601 | dev_dbg(&spi->dev, |
| 591 | "setup: %lu Hz bpw %u mode 0x%x -> csr%d %08x\n", | 602 | "setup: %lu Hz bpw %u mode 0x%x -> csr%d %08x\n", |
| 592 | sck_hz, bits, spi->mode, spi->chip_select, csr); | 603 | bus_hz / scbr, bits, spi->mode, spi->chip_select, csr); |
| 593 | 604 | ||
| 594 | spi_writel(as, CSR0 + 4 * spi->chip_select, csr); | 605 | spi_writel(as, CSR0 + 4 * spi->chip_select, csr); |
| 595 | 606 | ||
diff --git a/drivers/usb/gadget/serial.c b/drivers/usb/gadget/serial.c index 433b3f44f42e..8d158e5640e3 100644 --- a/drivers/usb/gadget/serial.c +++ b/drivers/usb/gadget/serial.c | |||
| @@ -170,7 +170,7 @@ static int gs_open(struct tty_struct *tty, struct file *file); | |||
| 170 | static void gs_close(struct tty_struct *tty, struct file *file); | 170 | static void gs_close(struct tty_struct *tty, struct file *file); |
| 171 | static int gs_write(struct tty_struct *tty, | 171 | static int gs_write(struct tty_struct *tty, |
| 172 | const unsigned char *buf, int count); | 172 | const unsigned char *buf, int count); |
| 173 | static void gs_put_char(struct tty_struct *tty, unsigned char ch); | 173 | static int gs_put_char(struct tty_struct *tty, unsigned char ch); |
| 174 | static void gs_flush_chars(struct tty_struct *tty); | 174 | static void gs_flush_chars(struct tty_struct *tty); |
| 175 | static int gs_write_room(struct tty_struct *tty); | 175 | static int gs_write_room(struct tty_struct *tty); |
| 176 | static int gs_chars_in_buffer(struct tty_struct *tty); | 176 | static int gs_chars_in_buffer(struct tty_struct *tty); |
| @@ -883,14 +883,15 @@ exit: | |||
| 883 | /* | 883 | /* |
| 884 | * gs_put_char | 884 | * gs_put_char |
| 885 | */ | 885 | */ |
| 886 | static void gs_put_char(struct tty_struct *tty, unsigned char ch) | 886 | static int gs_put_char(struct tty_struct *tty, unsigned char ch) |
| 887 | { | 887 | { |
| 888 | unsigned long flags; | 888 | unsigned long flags; |
| 889 | struct gs_port *port = tty->driver_data; | 889 | struct gs_port *port = tty->driver_data; |
| 890 | int ret = 0; | ||
| 890 | 891 | ||
| 891 | if (port == NULL) { | 892 | if (port == NULL) { |
| 892 | pr_err("gs_put_char: NULL port pointer\n"); | 893 | pr_err("gs_put_char: NULL port pointer\n"); |
| 893 | return; | 894 | return 0; |
| 894 | } | 895 | } |
| 895 | 896 | ||
| 896 | gs_debug("gs_put_char: (%d,%p) char=0x%x, called from %p\n", | 897 | gs_debug("gs_put_char: (%d,%p) char=0x%x, called from %p\n", |
| @@ -910,10 +911,11 @@ static void gs_put_char(struct tty_struct *tty, unsigned char ch) | |||
| 910 | goto exit; | 911 | goto exit; |
| 911 | } | 912 | } |
| 912 | 913 | ||
| 913 | gs_buf_put(port->port_write_buf, &ch, 1); | 914 | ret = gs_buf_put(port->port_write_buf, &ch, 1); |
| 914 | 915 | ||
| 915 | exit: | 916 | exit: |
| 916 | spin_unlock_irqrestore(&port->port_lock, flags); | 917 | spin_unlock_irqrestore(&port->port_lock, flags); |
| 918 | return ret; | ||
| 917 | } | 919 | } |
| 918 | 920 | ||
| 919 | /* | 921 | /* |
diff --git a/drivers/usb/serial/digi_acceleport.c b/drivers/usb/serial/digi_acceleport.c index d17d1645714f..04a56f300ea6 100644 --- a/drivers/usb/serial/digi_acceleport.c +++ b/drivers/usb/serial/digi_acceleport.c | |||
| @@ -1421,8 +1421,7 @@ static void digi_close(struct usb_serial_port *port, struct file *filp) | |||
| 1421 | tty_wait_until_sent(tty, DIGI_CLOSE_TIMEOUT); | 1421 | tty_wait_until_sent(tty, DIGI_CLOSE_TIMEOUT); |
| 1422 | 1422 | ||
| 1423 | /* flush driver and line discipline buffers */ | 1423 | /* flush driver and line discipline buffers */ |
| 1424 | if (tty->driver->flush_buffer) | 1424 | tty_driver_flush_buffer(tty); |
| 1425 | tty->driver->flush_buffer(tty); | ||
| 1426 | tty_ldisc_flush(tty); | 1425 | tty_ldisc_flush(tty); |
| 1427 | 1426 | ||
| 1428 | if (port->serial->dev) { | 1427 | if (port->serial->dev) { |
diff --git a/drivers/usb/serial/usb-serial.c b/drivers/usb/serial/usb-serial.c index a9934a3f9845..0cb0d77dc429 100644 --- a/drivers/usb/serial/usb-serial.c +++ b/drivers/usb/serial/usb-serial.c | |||
| @@ -296,16 +296,14 @@ static int serial_write (struct tty_struct * tty, const unsigned char *buf, int | |||
| 296 | struct usb_serial_port *port = tty->driver_data; | 296 | struct usb_serial_port *port = tty->driver_data; |
| 297 | int retval = -ENODEV; | 297 | int retval = -ENODEV; |
| 298 | 298 | ||
| 299 | if (!port || port->serial->dev->state == USB_STATE_NOTATTACHED) | 299 | if (port->serial->dev->state == USB_STATE_NOTATTACHED) |
| 300 | goto exit; | 300 | goto exit; |
| 301 | 301 | ||
| 302 | dbg("%s - port %d, %d byte(s)", __func__, port->number, count); | 302 | dbg("%s - port %d, %d byte(s)", __func__, port->number, count); |
| 303 | 303 | ||
| 304 | if (!port->open_count) { | 304 | /* open_count is managed under the mutex lock for the tty so cannot |
| 305 | retval = -EINVAL; | 305 | drop to zero until after the last close completes */ |
| 306 | dbg("%s - port not opened", __func__); | 306 | WARN_ON(!port->open_count); |
| 307 | goto exit; | ||
| 308 | } | ||
| 309 | 307 | ||
| 310 | /* pass on to the driver specific version of this function */ | 308 | /* pass on to the driver specific version of this function */ |
| 311 | retval = port->serial->type->write(port, buf, count); | 309 | retval = port->serial->type->write(port, buf, count); |
| @@ -317,61 +315,28 @@ exit: | |||
| 317 | static int serial_write_room (struct tty_struct *tty) | 315 | static int serial_write_room (struct tty_struct *tty) |
| 318 | { | 316 | { |
| 319 | struct usb_serial_port *port = tty->driver_data; | 317 | struct usb_serial_port *port = tty->driver_data; |
| 320 | int retval = -ENODEV; | ||
| 321 | |||
| 322 | if (!port) | ||
| 323 | goto exit; | ||
| 324 | |||
| 325 | dbg("%s - port %d", __func__, port->number); | 318 | dbg("%s - port %d", __func__, port->number); |
| 326 | 319 | WARN_ON(!port->open_count); | |
| 327 | if (!port->open_count) { | ||
| 328 | dbg("%s - port not open", __func__); | ||
| 329 | goto exit; | ||
| 330 | } | ||
| 331 | |||
| 332 | /* pass on to the driver specific version of this function */ | 320 | /* pass on to the driver specific version of this function */ |
| 333 | retval = port->serial->type->write_room(port); | 321 | return port->serial->type->write_room(port); |
| 334 | |||
| 335 | exit: | ||
| 336 | return retval; | ||
| 337 | } | 322 | } |
| 338 | 323 | ||
| 339 | static int serial_chars_in_buffer (struct tty_struct *tty) | 324 | static int serial_chars_in_buffer (struct tty_struct *tty) |
| 340 | { | 325 | { |
| 341 | struct usb_serial_port *port = tty->driver_data; | 326 | struct usb_serial_port *port = tty->driver_data; |
| 342 | int retval = -ENODEV; | ||
| 343 | |||
| 344 | if (!port) | ||
| 345 | goto exit; | ||
| 346 | |||
| 347 | dbg("%s = port %d", __func__, port->number); | 327 | dbg("%s = port %d", __func__, port->number); |
| 348 | 328 | ||
| 349 | if (!port->open_count) { | 329 | WARN_ON(!port->open_count); |
| 350 | dbg("%s - port not open", __func__); | ||
| 351 | goto exit; | ||
| 352 | } | ||
| 353 | |||
| 354 | /* pass on to the driver specific version of this function */ | 330 | /* pass on to the driver specific version of this function */ |
| 355 | retval = port->serial->type->chars_in_buffer(port); | 331 | return port->serial->type->chars_in_buffer(port); |
| 356 | |||
| 357 | exit: | ||
| 358 | return retval; | ||
| 359 | } | 332 | } |
| 360 | 333 | ||
| 361 | static void serial_throttle (struct tty_struct * tty) | 334 | static void serial_throttle (struct tty_struct * tty) |
| 362 | { | 335 | { |
| 363 | struct usb_serial_port *port = tty->driver_data; | 336 | struct usb_serial_port *port = tty->driver_data; |
| 364 | |||
| 365 | if (!port) | ||
| 366 | return; | ||
| 367 | |||
| 368 | dbg("%s - port %d", __func__, port->number); | 337 | dbg("%s - port %d", __func__, port->number); |
| 369 | 338 | ||
| 370 | if (!port->open_count) { | 339 | WARN_ON(!port->open_count); |
| 371 | dbg ("%s - port not open", __func__); | ||
| 372 | return; | ||
| 373 | } | ||
| 374 | |||
| 375 | /* pass on to the driver specific version of this function */ | 340 | /* pass on to the driver specific version of this function */ |
| 376 | if (port->serial->type->throttle) | 341 | if (port->serial->type->throttle) |
| 377 | port->serial->type->throttle(port); | 342 | port->serial->type->throttle(port); |
| @@ -380,17 +345,9 @@ static void serial_throttle (struct tty_struct * tty) | |||
| 380 | static void serial_unthrottle (struct tty_struct * tty) | 345 | static void serial_unthrottle (struct tty_struct * tty) |
| 381 | { | 346 | { |
| 382 | struct usb_serial_port *port = tty->driver_data; | 347 | struct usb_serial_port *port = tty->driver_data; |
| 383 | |||
| 384 | if (!port) | ||
| 385 | return; | ||
| 386 | |||
| 387 | dbg("%s - port %d", __func__, port->number); | 348 | dbg("%s - port %d", __func__, port->number); |
| 388 | 349 | ||
| 389 | if (!port->open_count) { | 350 | WARN_ON(!port->open_count); |
| 390 | dbg("%s - port not open", __func__); | ||
| 391 | return; | ||
| 392 | } | ||
| 393 | |||
| 394 | /* pass on to the driver specific version of this function */ | 351 | /* pass on to the driver specific version of this function */ |
| 395 | if (port->serial->type->unthrottle) | 352 | if (port->serial->type->unthrottle) |
| 396 | port->serial->type->unthrottle(port); | 353 | port->serial->type->unthrottle(port); |
| @@ -401,42 +358,27 @@ static int serial_ioctl (struct tty_struct *tty, struct file * file, unsigned in | |||
| 401 | struct usb_serial_port *port = tty->driver_data; | 358 | struct usb_serial_port *port = tty->driver_data; |
| 402 | int retval = -ENODEV; | 359 | int retval = -ENODEV; |
| 403 | 360 | ||
| 404 | lock_kernel(); | ||
| 405 | if (!port) | ||
| 406 | goto exit; | ||
| 407 | |||
| 408 | dbg("%s - port %d, cmd 0x%.4x", __func__, port->number, cmd); | 361 | dbg("%s - port %d, cmd 0x%.4x", __func__, port->number, cmd); |
| 409 | 362 | ||
| 410 | /* Caution - port->open_count is BKL protected */ | 363 | WARN_ON(!port->open_count); |
| 411 | if (!port->open_count) { | ||
| 412 | dbg ("%s - port not open", __func__); | ||
| 413 | goto exit; | ||
| 414 | } | ||
| 415 | 364 | ||
| 416 | /* pass on to the driver specific version of this function if it is available */ | 365 | /* pass on to the driver specific version of this function if it is available */ |
| 417 | if (port->serial->type->ioctl) | 366 | if (port->serial->type->ioctl) { |
| 367 | lock_kernel(); | ||
| 418 | retval = port->serial->type->ioctl(port, file, cmd, arg); | 368 | retval = port->serial->type->ioctl(port, file, cmd, arg); |
| 369 | unlock_kernel(); | ||
| 370 | } | ||
| 419 | else | 371 | else |
| 420 | retval = -ENOIOCTLCMD; | 372 | retval = -ENOIOCTLCMD; |
| 421 | exit: | ||
| 422 | unlock_kernel(); | ||
| 423 | return retval; | 373 | return retval; |
| 424 | } | 374 | } |
| 425 | 375 | ||
| 426 | static void serial_set_termios (struct tty_struct *tty, struct ktermios * old) | 376 | static void serial_set_termios (struct tty_struct *tty, struct ktermios * old) |
| 427 | { | 377 | { |
| 428 | struct usb_serial_port *port = tty->driver_data; | 378 | struct usb_serial_port *port = tty->driver_data; |
| 429 | |||
| 430 | if (!port) | ||
| 431 | return; | ||
| 432 | |||
| 433 | dbg("%s - port %d", __func__, port->number); | 379 | dbg("%s - port %d", __func__, port->number); |
| 434 | 380 | ||
| 435 | if (!port->open_count) { | 381 | WARN_ON(!port->open_count); |
| 436 | dbg("%s - port not open", __func__); | ||
| 437 | return; | ||
| 438 | } | ||
| 439 | |||
| 440 | /* pass on to the driver specific version of this function if it is available */ | 382 | /* pass on to the driver specific version of this function if it is available */ |
| 441 | if (port->serial->type->set_termios) | 383 | if (port->serial->type->set_termios) |
| 442 | port->serial->type->set_termios(port, old); | 384 | port->serial->type->set_termios(port, old); |
| @@ -448,24 +390,15 @@ static void serial_break (struct tty_struct *tty, int break_state) | |||
| 448 | { | 390 | { |
| 449 | struct usb_serial_port *port = tty->driver_data; | 391 | struct usb_serial_port *port = tty->driver_data; |
| 450 | 392 | ||
| 451 | lock_kernel(); | ||
| 452 | if (!port) { | ||
| 453 | unlock_kernel(); | ||
| 454 | return; | ||
| 455 | } | ||
| 456 | |||
| 457 | dbg("%s - port %d", __func__, port->number); | 393 | dbg("%s - port %d", __func__, port->number); |
| 458 | 394 | ||
| 459 | if (!port->open_count) { | 395 | WARN_ON(!port->open_count); |
| 460 | dbg("%s - port not open", __func__); | ||
| 461 | unlock_kernel(); | ||
| 462 | return; | ||
| 463 | } | ||
| 464 | |||
| 465 | /* pass on to the driver specific version of this function if it is available */ | 396 | /* pass on to the driver specific version of this function if it is available */ |
| 466 | if (port->serial->type->break_ctl) | 397 | if (port->serial->type->break_ctl) { |
| 398 | lock_kernel(); | ||
| 467 | port->serial->type->break_ctl(port, break_state); | 399 | port->serial->type->break_ctl(port, break_state); |
| 468 | unlock_kernel(); | 400 | unlock_kernel(); |
| 401 | } | ||
| 469 | } | 402 | } |
| 470 | 403 | ||
| 471 | static int serial_read_proc (char *page, char **start, off_t off, int count, int *eof, void *data) | 404 | static int serial_read_proc (char *page, char **start, off_t off, int count, int *eof, void *data) |
| @@ -519,19 +452,11 @@ static int serial_tiocmget (struct tty_struct *tty, struct file *file) | |||
| 519 | { | 452 | { |
| 520 | struct usb_serial_port *port = tty->driver_data; | 453 | struct usb_serial_port *port = tty->driver_data; |
| 521 | 454 | ||
| 522 | if (!port) | ||
| 523 | return -ENODEV; | ||
| 524 | |||
| 525 | dbg("%s - port %d", __func__, port->number); | 455 | dbg("%s - port %d", __func__, port->number); |
| 526 | 456 | ||
| 527 | if (!port->open_count) { | 457 | WARN_ON(!port->open_count); |
| 528 | dbg("%s - port not open", __func__); | ||
| 529 | return -ENODEV; | ||
| 530 | } | ||
| 531 | |||
| 532 | if (port->serial->type->tiocmget) | 458 | if (port->serial->type->tiocmget) |
| 533 | return port->serial->type->tiocmget(port, file); | 459 | return port->serial->type->tiocmget(port, file); |
| 534 | |||
| 535 | return -EINVAL; | 460 | return -EINVAL; |
| 536 | } | 461 | } |
| 537 | 462 | ||
| @@ -540,19 +465,11 @@ static int serial_tiocmset (struct tty_struct *tty, struct file *file, | |||
| 540 | { | 465 | { |
| 541 | struct usb_serial_port *port = tty->driver_data; | 466 | struct usb_serial_port *port = tty->driver_data; |
| 542 | 467 | ||
| 543 | if (!port) | ||
| 544 | return -ENODEV; | ||
| 545 | |||
| 546 | dbg("%s - port %d", __func__, port->number); | 468 | dbg("%s - port %d", __func__, port->number); |
| 547 | 469 | ||
| 548 | if (!port->open_count) { | 470 | WARN_ON(!port->open_count); |
| 549 | dbg("%s - port not open", __func__); | ||
| 550 | return -ENODEV; | ||
| 551 | } | ||
| 552 | |||
| 553 | if (port->serial->type->tiocmset) | 471 | if (port->serial->type->tiocmset) |
| 554 | return port->serial->type->tiocmset(port, file, set, clear); | 472 | return port->serial->type->tiocmset(port, file, set, clear); |
| 555 | |||
| 556 | return -EINVAL; | 473 | return -EINVAL; |
| 557 | } | 474 | } |
| 558 | 475 | ||
diff --git a/drivers/usb/serial/whiteheat.c b/drivers/usb/serial/whiteheat.c index e96bf8663ffc..f07e8a4c1f3d 100644 --- a/drivers/usb/serial/whiteheat.c +++ b/drivers/usb/serial/whiteheat.c | |||
| @@ -673,15 +673,13 @@ static void whiteheat_close(struct usb_serial_port *port, struct file * filp) | |||
| 673 | } | 673 | } |
| 674 | */ | 674 | */ |
| 675 | 675 | ||
| 676 | if (port->tty->driver->flush_buffer) | 676 | tty_driver_flush_buffer(port->tty); |
| 677 | port->tty->driver->flush_buffer(port->tty); | ||
| 678 | tty_ldisc_flush(port->tty); | 677 | tty_ldisc_flush(port->tty); |
| 679 | 678 | ||
| 680 | firm_report_tx_done(port); | 679 | firm_report_tx_done(port); |
| 681 | 680 | ||
| 682 | firm_close(port); | 681 | firm_close(port); |
| 683 | 682 | ||
| 684 | printk(KERN_ERR"Before processing rx_urbs_submitted.\n"); | ||
| 685 | /* shutdown our bulk reads and writes */ | 683 | /* shutdown our bulk reads and writes */ |
| 686 | mutex_lock(&info->deathwarrant); | 684 | mutex_lock(&info->deathwarrant); |
| 687 | spin_lock_irq(&info->lock); | 685 | spin_lock_irq(&info->lock); |
diff --git a/drivers/video/Kconfig b/drivers/video/Kconfig index a576dc261732..bb1dadaa4a23 100644 --- a/drivers/video/Kconfig +++ b/drivers/video/Kconfig | |||
| @@ -1774,6 +1774,11 @@ config FB_PXA | |||
| 1774 | 1774 | ||
| 1775 | If unsure, say N. | 1775 | If unsure, say N. |
| 1776 | 1776 | ||
| 1777 | config FB_PXA_SMARTPANEL | ||
| 1778 | bool "PXA Smartpanel LCD support" | ||
| 1779 | default n | ||
| 1780 | depends on FB_PXA | ||
| 1781 | |||
| 1777 | config FB_PXA_PARAMETERS | 1782 | config FB_PXA_PARAMETERS |
| 1778 | bool "PXA LCD command line parameters" | 1783 | bool "PXA LCD command line parameters" |
| 1779 | default n | 1784 | default n |
diff --git a/drivers/video/pxafb.c b/drivers/video/pxafb.c index 757651954e6c..3ab6e3d973a1 100644 --- a/drivers/video/pxafb.c +++ b/drivers/video/pxafb.c | |||
| @@ -39,6 +39,9 @@ | |||
| 39 | #include <linux/dma-mapping.h> | 39 | #include <linux/dma-mapping.h> |
| 40 | #include <linux/clk.h> | 40 | #include <linux/clk.h> |
| 41 | #include <linux/err.h> | 41 | #include <linux/err.h> |
| 42 | #include <linux/completion.h> | ||
| 43 | #include <linux/kthread.h> | ||
| 44 | #include <linux/freezer.h> | ||
| 42 | 45 | ||
| 43 | #include <asm/hardware.h> | 46 | #include <asm/hardware.h> |
| 44 | #include <asm/io.h> | 47 | #include <asm/io.h> |
| @@ -57,19 +60,31 @@ | |||
| 57 | #include "pxafb.h" | 60 | #include "pxafb.h" |
| 58 | 61 | ||
| 59 | /* Bits which should not be set in machine configuration structures */ | 62 | /* Bits which should not be set in machine configuration structures */ |
| 60 | #define LCCR0_INVALID_CONFIG_MASK (LCCR0_OUM|LCCR0_BM|LCCR0_QDM|LCCR0_DIS|LCCR0_EFM|LCCR0_IUM|LCCR0_SFM|LCCR0_LDM|LCCR0_ENB) | 63 | #define LCCR0_INVALID_CONFIG_MASK (LCCR0_OUM | LCCR0_BM | LCCR0_QDM |\ |
| 61 | #define LCCR3_INVALID_CONFIG_MASK (LCCR3_HSP|LCCR3_VSP|LCCR3_PCD|LCCR3_BPP) | 64 | LCCR0_DIS | LCCR0_EFM | LCCR0_IUM |\ |
| 65 | LCCR0_SFM | LCCR0_LDM | LCCR0_ENB) | ||
| 66 | |||
| 67 | #define LCCR3_INVALID_CONFIG_MASK (LCCR3_HSP | LCCR3_VSP |\ | ||
| 68 | LCCR3_PCD | LCCR3_BPP) | ||
| 62 | 69 | ||
| 63 | static void (*pxafb_backlight_power)(int); | 70 | static void (*pxafb_backlight_power)(int); |
| 64 | static void (*pxafb_lcd_power)(int, struct fb_var_screeninfo *); | 71 | static void (*pxafb_lcd_power)(int, struct fb_var_screeninfo *); |
| 65 | 72 | ||
| 66 | static int pxafb_activate_var(struct fb_var_screeninfo *var, struct pxafb_info *); | 73 | static int pxafb_activate_var(struct fb_var_screeninfo *var, |
| 74 | struct pxafb_info *); | ||
| 67 | static void set_ctrlr_state(struct pxafb_info *fbi, u_int state); | 75 | static void set_ctrlr_state(struct pxafb_info *fbi, u_int state); |
| 68 | 76 | ||
| 69 | #ifdef CONFIG_FB_PXA_PARAMETERS | 77 | static inline unsigned long |
| 70 | #define PXAFB_OPTIONS_SIZE 256 | 78 | lcd_readl(struct pxafb_info *fbi, unsigned int off) |
| 71 | static char g_options[PXAFB_OPTIONS_SIZE] __devinitdata = ""; | 79 | { |
| 72 | #endif | 80 | return __raw_readl(fbi->mmio_base + off); |
| 81 | } | ||
| 82 | |||
| 83 | static inline void | ||
| 84 | lcd_writel(struct pxafb_info *fbi, unsigned int off, unsigned long val) | ||
| 85 | { | ||
| 86 | __raw_writel(val, fbi->mmio_base + off); | ||
| 87 | } | ||
| 73 | 88 | ||
| 74 | static inline void pxafb_schedule_work(struct pxafb_info *fbi, u_int state) | 89 | static inline void pxafb_schedule_work(struct pxafb_info *fbi, u_int state) |
| 75 | { | 90 | { |
| @@ -79,10 +94,12 @@ static inline void pxafb_schedule_work(struct pxafb_info *fbi, u_int state) | |||
| 79 | /* | 94 | /* |
| 80 | * We need to handle two requests being made at the same time. | 95 | * We need to handle two requests being made at the same time. |
| 81 | * There are two important cases: | 96 | * There are two important cases: |
| 82 | * 1. When we are changing VT (C_REENABLE) while unblanking (C_ENABLE) | 97 | * 1. When we are changing VT (C_REENABLE) while unblanking |
| 83 | * We must perform the unblanking, which will do our REENABLE for us. | 98 | * (C_ENABLE) We must perform the unblanking, which will |
| 84 | * 2. When we are blanking, but immediately unblank before we have | 99 | * do our REENABLE for us. |
| 85 | * blanked. We do the "REENABLE" thing here as well, just to be sure. | 100 | * 2. When we are blanking, but immediately unblank before |
| 101 | * we have blanked. We do the "REENABLE" thing here as | ||
| 102 | * well, just to be sure. | ||
| 86 | */ | 103 | */ |
| 87 | if (fbi->task_state == C_ENABLE && state == C_REENABLE) | 104 | if (fbi->task_state == C_ENABLE && state == C_REENABLE) |
| 88 | state = (u_int) -1; | 105 | state = (u_int) -1; |
| @@ -129,13 +146,13 @@ pxafb_setpalettereg(u_int regno, u_int red, u_int green, u_int blue, | |||
| 129 | val = ((red << 8) & 0x00f80000); | 146 | val = ((red << 8) & 0x00f80000); |
| 130 | val |= ((green >> 0) & 0x0000fc00); | 147 | val |= ((green >> 0) & 0x0000fc00); |
| 131 | val |= ((blue >> 8) & 0x000000f8); | 148 | val |= ((blue >> 8) & 0x000000f8); |
| 132 | ((u32*)(fbi->palette_cpu))[regno] = val; | 149 | ((u32 *)(fbi->palette_cpu))[regno] = val; |
| 133 | break; | 150 | break; |
| 134 | case LCCR4_PAL_FOR_2: | 151 | case LCCR4_PAL_FOR_2: |
| 135 | val = ((red << 8) & 0x00fc0000); | 152 | val = ((red << 8) & 0x00fc0000); |
| 136 | val |= ((green >> 0) & 0x0000fc00); | 153 | val |= ((green >> 0) & 0x0000fc00); |
| 137 | val |= ((blue >> 8) & 0x000000fc); | 154 | val |= ((blue >> 8) & 0x000000fc); |
| 138 | ((u32*)(fbi->palette_cpu))[regno] = val; | 155 | ((u32 *)(fbi->palette_cpu))[regno] = val; |
| 139 | break; | 156 | break; |
| 140 | } | 157 | } |
| 141 | 158 | ||
| @@ -203,15 +220,15 @@ pxafb_setcolreg(u_int regno, u_int red, u_int green, u_int blue, | |||
| 203 | */ | 220 | */ |
| 204 | static int pxafb_bpp_to_lccr3(struct fb_var_screeninfo *var) | 221 | static int pxafb_bpp_to_lccr3(struct fb_var_screeninfo *var) |
| 205 | { | 222 | { |
| 206 | int ret = 0; | 223 | int ret = 0; |
| 207 | switch (var->bits_per_pixel) { | 224 | switch (var->bits_per_pixel) { |
| 208 | case 1: ret = LCCR3_1BPP; break; | 225 | case 1: ret = LCCR3_1BPP; break; |
| 209 | case 2: ret = LCCR3_2BPP; break; | 226 | case 2: ret = LCCR3_2BPP; break; |
| 210 | case 4: ret = LCCR3_4BPP; break; | 227 | case 4: ret = LCCR3_4BPP; break; |
| 211 | case 8: ret = LCCR3_8BPP; break; | 228 | case 8: ret = LCCR3_8BPP; break; |
| 212 | case 16: ret = LCCR3_16BPP; break; | 229 | case 16: ret = LCCR3_16BPP; break; |
| 213 | } | 230 | } |
| 214 | return ret; | 231 | return ret; |
| 215 | } | 232 | } |
| 216 | 233 | ||
| 217 | #ifdef CONFIG_CPU_FREQ | 234 | #ifdef CONFIG_CPU_FREQ |
| @@ -223,31 +240,32 @@ static int pxafb_bpp_to_lccr3(struct fb_var_screeninfo *var) | |||
| 223 | */ | 240 | */ |
| 224 | static unsigned int pxafb_display_dma_period(struct fb_var_screeninfo *var) | 241 | static unsigned int pxafb_display_dma_period(struct fb_var_screeninfo *var) |
| 225 | { | 242 | { |
| 226 | /* | 243 | /* |
| 227 | * Period = pixclock * bits_per_byte * bytes_per_transfer | 244 | * Period = pixclock * bits_per_byte * bytes_per_transfer |
| 228 | * / memory_bits_per_pixel; | 245 | * / memory_bits_per_pixel; |
| 229 | */ | 246 | */ |
| 230 | return var->pixclock * 8 * 16 / var->bits_per_pixel; | 247 | return var->pixclock * 8 * 16 / var->bits_per_pixel; |
| 231 | } | 248 | } |
| 232 | |||
| 233 | extern unsigned int get_clk_frequency_khz(int info); | ||
| 234 | #endif | 249 | #endif |
| 235 | 250 | ||
| 236 | /* | 251 | /* |
| 237 | * Select the smallest mode that allows the desired resolution to be | 252 | * Select the smallest mode that allows the desired resolution to be |
| 238 | * displayed. If desired parameters can be rounded up. | 253 | * displayed. If desired parameters can be rounded up. |
| 239 | */ | 254 | */ |
| 240 | static struct pxafb_mode_info *pxafb_getmode(struct pxafb_mach_info *mach, struct fb_var_screeninfo *var) | 255 | static struct pxafb_mode_info *pxafb_getmode(struct pxafb_mach_info *mach, |
| 256 | struct fb_var_screeninfo *var) | ||
| 241 | { | 257 | { |
| 242 | struct pxafb_mode_info *mode = NULL; | 258 | struct pxafb_mode_info *mode = NULL; |
| 243 | struct pxafb_mode_info *modelist = mach->modes; | 259 | struct pxafb_mode_info *modelist = mach->modes; |
| 244 | unsigned int best_x = 0xffffffff, best_y = 0xffffffff; | 260 | unsigned int best_x = 0xffffffff, best_y = 0xffffffff; |
| 245 | unsigned int i; | 261 | unsigned int i; |
| 246 | 262 | ||
| 247 | for (i = 0 ; i < mach->num_modes ; i++) { | 263 | for (i = 0; i < mach->num_modes; i++) { |
| 248 | if (modelist[i].xres >= var->xres && modelist[i].yres >= var->yres && | 264 | if (modelist[i].xres >= var->xres && |
| 249 | modelist[i].xres < best_x && modelist[i].yres < best_y && | 265 | modelist[i].yres >= var->yres && |
| 250 | modelist[i].bpp >= var->bits_per_pixel ) { | 266 | modelist[i].xres < best_x && |
| 267 | modelist[i].yres < best_y && | ||
| 268 | modelist[i].bpp >= var->bits_per_pixel) { | ||
| 251 | best_x = modelist[i].xres; | 269 | best_x = modelist[i].xres; |
| 252 | best_y = modelist[i].yres; | 270 | best_y = modelist[i].yres; |
| 253 | mode = &modelist[i]; | 271 | mode = &modelist[i]; |
| @@ -257,7 +275,8 @@ static struct pxafb_mode_info *pxafb_getmode(struct pxafb_mach_info *mach, struc | |||
| 257 | return mode; | 275 | return mode; |
| 258 | } | 276 | } |
| 259 | 277 | ||
| 260 | static void pxafb_setmode(struct fb_var_screeninfo *var, struct pxafb_mode_info *mode) | 278 | static void pxafb_setmode(struct fb_var_screeninfo *var, |
| 279 | struct pxafb_mode_info *mode) | ||
| 261 | { | 280 | { |
| 262 | var->xres = mode->xres; | 281 | var->xres = mode->xres; |
| 263 | var->yres = mode->yres; | 282 | var->yres = mode->yres; |
| @@ -315,19 +334,20 @@ static int pxafb_check_var(struct fb_var_screeninfo *var, struct fb_info *info) | |||
| 315 | var->yres_virtual = | 334 | var->yres_virtual = |
| 316 | max(var->yres_virtual, var->yres); | 335 | max(var->yres_virtual, var->yres); |
| 317 | 336 | ||
| 318 | /* | 337 | /* |
| 319 | * Setup the RGB parameters for this display. | 338 | * Setup the RGB parameters for this display. |
| 320 | * | 339 | * |
| 321 | * The pixel packing format is described on page 7-11 of the | 340 | * The pixel packing format is described on page 7-11 of the |
| 322 | * PXA2XX Developer's Manual. | 341 | * PXA2XX Developer's Manual. |
| 323 | */ | 342 | */ |
| 324 | if (var->bits_per_pixel == 16) { | 343 | if (var->bits_per_pixel == 16) { |
| 325 | var->red.offset = 11; var->red.length = 5; | 344 | var->red.offset = 11; var->red.length = 5; |
| 326 | var->green.offset = 5; var->green.length = 6; | 345 | var->green.offset = 5; var->green.length = 6; |
| 327 | var->blue.offset = 0; var->blue.length = 5; | 346 | var->blue.offset = 0; var->blue.length = 5; |
| 328 | var->transp.offset = var->transp.length = 0; | 347 | var->transp.offset = var->transp.length = 0; |
| 329 | } else { | 348 | } else { |
| 330 | var->red.offset = var->green.offset = var->blue.offset = var->transp.offset = 0; | 349 | var->red.offset = var->green.offset = 0; |
| 350 | var->blue.offset = var->transp.offset = 0; | ||
| 331 | var->red.length = 8; | 351 | var->red.length = 8; |
| 332 | var->green.length = 8; | 352 | var->green.length = 8; |
| 333 | var->blue.length = 8; | 353 | var->blue.length = 8; |
| @@ -345,8 +365,7 @@ static int pxafb_check_var(struct fb_var_screeninfo *var, struct fb_info *info) | |||
| 345 | 365 | ||
| 346 | static inline void pxafb_set_truecolor(u_int is_true_color) | 366 | static inline void pxafb_set_truecolor(u_int is_true_color) |
| 347 | { | 367 | { |
| 348 | pr_debug("pxafb: true_color = %d\n", is_true_color); | 368 | /* do your machine-specific setup if needed */ |
| 349 | // do your machine-specific setup if needed | ||
| 350 | } | 369 | } |
| 351 | 370 | ||
| 352 | /* | 371 | /* |
| @@ -357,9 +376,6 @@ static int pxafb_set_par(struct fb_info *info) | |||
| 357 | { | 376 | { |
| 358 | struct pxafb_info *fbi = (struct pxafb_info *)info; | 377 | struct pxafb_info *fbi = (struct pxafb_info *)info; |
| 359 | struct fb_var_screeninfo *var = &info->var; | 378 | struct fb_var_screeninfo *var = &info->var; |
| 360 | unsigned long palette_mem_size; | ||
| 361 | |||
| 362 | pr_debug("pxafb: set_par\n"); | ||
| 363 | 379 | ||
| 364 | if (var->bits_per_pixel == 16) | 380 | if (var->bits_per_pixel == 16) |
| 365 | fbi->fb.fix.visual = FB_VISUAL_TRUECOLOR; | 381 | fbi->fb.fix.visual = FB_VISUAL_TRUECOLOR; |
| @@ -379,17 +395,10 @@ static int pxafb_set_par(struct fb_info *info) | |||
| 379 | if (var->bits_per_pixel == 16) | 395 | if (var->bits_per_pixel == 16) |
| 380 | fbi->palette_size = 0; | 396 | fbi->palette_size = 0; |
| 381 | else | 397 | else |
| 382 | fbi->palette_size = var->bits_per_pixel == 1 ? 4 : 1 << var->bits_per_pixel; | 398 | fbi->palette_size = var->bits_per_pixel == 1 ? |
| 383 | 399 | 4 : 1 << var->bits_per_pixel; | |
| 384 | if ((fbi->lccr4 & LCCR4_PAL_FOR_MASK) == LCCR4_PAL_FOR_0) | ||
| 385 | palette_mem_size = fbi->palette_size * sizeof(u16); | ||
| 386 | else | ||
| 387 | palette_mem_size = fbi->palette_size * sizeof(u32); | ||
| 388 | |||
| 389 | pr_debug("pxafb: palette_mem_size = 0x%08lx\n", palette_mem_size); | ||
| 390 | 400 | ||
| 391 | fbi->palette_cpu = (u16 *)(fbi->map_cpu + PAGE_SIZE - palette_mem_size); | 401 | fbi->palette_cpu = (u16 *)&fbi->dma_buff->palette[0]; |
| 392 | fbi->palette_dma = fbi->map_dma + PAGE_SIZE - palette_mem_size; | ||
| 393 | 402 | ||
| 394 | /* | 403 | /* |
| 395 | * Set (any) board control register to handle new color depth | 404 | * Set (any) board control register to handle new color depth |
| @@ -407,36 +416,6 @@ static int pxafb_set_par(struct fb_info *info) | |||
| 407 | } | 416 | } |
| 408 | 417 | ||
| 409 | /* | 418 | /* |
| 410 | * Formal definition of the VESA spec: | ||
| 411 | * On | ||
| 412 | * This refers to the state of the display when it is in full operation | ||
| 413 | * Stand-By | ||
| 414 | * This defines an optional operating state of minimal power reduction with | ||
| 415 | * the shortest recovery time | ||
| 416 | * Suspend | ||
| 417 | * This refers to a level of power management in which substantial power | ||
| 418 | * reduction is achieved by the display. The display can have a longer | ||
| 419 | * recovery time from this state than from the Stand-by state | ||
| 420 | * Off | ||
| 421 | * This indicates that the display is consuming the lowest level of power | ||
| 422 | * and is non-operational. Recovery from this state may optionally require | ||
| 423 | * the user to manually power on the monitor | ||
| 424 | * | ||
| 425 | * Now, the fbdev driver adds an additional state, (blank), where they | ||
| 426 | * turn off the video (maybe by colormap tricks), but don't mess with the | ||
| 427 | * video itself: think of it semantically between on and Stand-By. | ||
| 428 | * | ||
| 429 | * So here's what we should do in our fbdev blank routine: | ||
| 430 | * | ||
| 431 | * VESA_NO_BLANKING (mode 0) Video on, front/back light on | ||
| 432 | * VESA_VSYNC_SUSPEND (mode 1) Video on, front/back light off | ||
| 433 | * VESA_HSYNC_SUSPEND (mode 2) Video on, front/back light off | ||
| 434 | * VESA_POWERDOWN (mode 3) Video off, front/back light off | ||
| 435 | * | ||
| 436 | * This will match the matrox implementation. | ||
| 437 | */ | ||
| 438 | |||
| 439 | /* | ||
| 440 | * pxafb_blank(): | 419 | * pxafb_blank(): |
| 441 | * Blank the display by setting all palette values to zero. Note, the | 420 | * Blank the display by setting all palette values to zero. Note, the |
| 442 | * 16 bpp mode does not really use the palette, so this will not | 421 | * 16 bpp mode does not really use the palette, so this will not |
| @@ -447,8 +426,6 @@ static int pxafb_blank(int blank, struct fb_info *info) | |||
| 447 | struct pxafb_info *fbi = (struct pxafb_info *)info; | 426 | struct pxafb_info *fbi = (struct pxafb_info *)info; |
| 448 | int i; | 427 | int i; |
| 449 | 428 | ||
| 450 | pr_debug("pxafb: blank=%d\n", blank); | ||
| 451 | |||
| 452 | switch (blank) { | 429 | switch (blank) { |
| 453 | case FB_BLANK_POWERDOWN: | 430 | case FB_BLANK_POWERDOWN: |
| 454 | case FB_BLANK_VSYNC_SUSPEND: | 431 | case FB_BLANK_VSYNC_SUSPEND: |
| @@ -460,11 +437,11 @@ static int pxafb_blank(int blank, struct fb_info *info) | |||
| 460 | pxafb_setpalettereg(i, 0, 0, 0, 0, info); | 437 | pxafb_setpalettereg(i, 0, 0, 0, 0, info); |
| 461 | 438 | ||
| 462 | pxafb_schedule_work(fbi, C_DISABLE); | 439 | pxafb_schedule_work(fbi, C_DISABLE); |
| 463 | //TODO if (pxafb_blank_helper) pxafb_blank_helper(blank); | 440 | /* TODO if (pxafb_blank_helper) pxafb_blank_helper(blank); */ |
| 464 | break; | 441 | break; |
| 465 | 442 | ||
| 466 | case FB_BLANK_UNBLANK: | 443 | case FB_BLANK_UNBLANK: |
| 467 | //TODO if (pxafb_blank_helper) pxafb_blank_helper(blank); | 444 | /* TODO if (pxafb_blank_helper) pxafb_blank_helper(blank); */ |
| 468 | if (fbi->fb.fix.visual == FB_VISUAL_PSEUDOCOLOR || | 445 | if (fbi->fb.fix.visual == FB_VISUAL_PSEUDOCOLOR || |
| 469 | fbi->fb.fix.visual == FB_VISUAL_STATIC_PSEUDOCOLOR) | 446 | fbi->fb.fix.visual == FB_VISUAL_STATIC_PSEUDOCOLOR) |
| 470 | fb_set_cmap(&fbi->fb.cmap, info); | 447 | fb_set_cmap(&fbi->fb.cmap, info); |
| @@ -480,7 +457,7 @@ static int pxafb_mmap(struct fb_info *info, | |||
| 480 | unsigned long off = vma->vm_pgoff << PAGE_SHIFT; | 457 | unsigned long off = vma->vm_pgoff << PAGE_SHIFT; |
| 481 | 458 | ||
| 482 | if (off < info->fix.smem_len) { | 459 | if (off < info->fix.smem_len) { |
| 483 | vma->vm_pgoff += 1; | 460 | vma->vm_pgoff += fbi->video_offset / PAGE_SIZE; |
| 484 | return dma_mmap_writecombine(fbi->dev, vma, fbi->map_cpu, | 461 | return dma_mmap_writecombine(fbi->dev, vma, fbi->map_cpu, |
| 485 | fbi->map_dma, fbi->map_size); | 462 | fbi->map_dma, fbi->map_size); |
| 486 | } | 463 | } |
| @@ -529,7 +506,8 @@ static struct fb_ops pxafb_ops = { | |||
| 529 | * | 506 | * |
| 530 | * Factoring the 10^4 and 10^-12 out gives 10^-8 == 1 / 100000000 as used below. | 507 | * Factoring the 10^4 and 10^-12 out gives 10^-8 == 1 / 100000000 as used below. |
| 531 | */ | 508 | */ |
| 532 | static inline unsigned int get_pcd(struct pxafb_info *fbi, unsigned int pixclock) | 509 | static inline unsigned int get_pcd(struct pxafb_info *fbi, |
| 510 | unsigned int pixclock) | ||
| 533 | { | 511 | { |
| 534 | unsigned long long pcd; | 512 | unsigned long long pcd; |
| 535 | 513 | ||
| @@ -555,7 +533,7 @@ static inline void set_hsync_time(struct pxafb_info *fbi, unsigned int pcd) | |||
| 555 | unsigned long htime; | 533 | unsigned long htime; |
| 556 | 534 | ||
| 557 | if ((pcd == 0) || (fbi->fb.var.hsync_len == 0)) { | 535 | if ((pcd == 0) || (fbi->fb.var.hsync_len == 0)) { |
| 558 | fbi->hsync_time=0; | 536 | fbi->hsync_time = 0; |
| 559 | return; | 537 | return; |
| 560 | } | 538 | } |
| 561 | 539 | ||
| @@ -576,71 +554,231 @@ unsigned long pxafb_get_hsync_time(struct device *dev) | |||
| 576 | } | 554 | } |
| 577 | EXPORT_SYMBOL(pxafb_get_hsync_time); | 555 | EXPORT_SYMBOL(pxafb_get_hsync_time); |
| 578 | 556 | ||
| 579 | /* | 557 | static int setup_frame_dma(struct pxafb_info *fbi, int dma, int pal, |
| 580 | * pxafb_activate_var(): | 558 | unsigned int offset, size_t size) |
| 581 | * Configures LCD Controller based on entries in var parameter. Settings are | ||
| 582 | * only written to the controller if changes were made. | ||
| 583 | */ | ||
| 584 | static int pxafb_activate_var(struct fb_var_screeninfo *var, struct pxafb_info *fbi) | ||
| 585 | { | 559 | { |
| 586 | struct pxafb_lcd_reg new_regs; | 560 | struct pxafb_dma_descriptor *dma_desc, *pal_desc; |
| 587 | u_long flags; | 561 | unsigned int dma_desc_off, pal_desc_off; |
| 588 | u_int lines_per_panel, pcd = get_pcd(fbi, var->pixclock); | ||
| 589 | 562 | ||
| 590 | pr_debug("pxafb: Configuring PXA LCD\n"); | 563 | if (dma < 0 || dma >= DMA_MAX) |
| 564 | return -EINVAL; | ||
| 591 | 565 | ||
| 592 | pr_debug("var: xres=%d hslen=%d lm=%d rm=%d\n", | 566 | dma_desc = &fbi->dma_buff->dma_desc[dma]; |
| 593 | var->xres, var->hsync_len, | 567 | dma_desc_off = offsetof(struct pxafb_dma_buff, dma_desc[dma]); |
| 594 | var->left_margin, var->right_margin); | ||
| 595 | pr_debug("var: yres=%d vslen=%d um=%d bm=%d\n", | ||
| 596 | var->yres, var->vsync_len, | ||
| 597 | var->upper_margin, var->lower_margin); | ||
| 598 | pr_debug("var: pixclock=%d pcd=%d\n", var->pixclock, pcd); | ||
| 599 | 568 | ||
| 600 | #if DEBUG_VAR | 569 | dma_desc->fsadr = fbi->screen_dma + offset; |
| 601 | if (var->xres < 16 || var->xres > 1024) | 570 | dma_desc->fidr = 0; |
| 602 | printk(KERN_ERR "%s: invalid xres %d\n", | 571 | dma_desc->ldcmd = size; |
| 603 | fbi->fb.fix.id, var->xres); | 572 | |
| 604 | switch(var->bits_per_pixel) { | 573 | if (pal < 0 || pal >= PAL_MAX) { |
| 605 | case 1: | 574 | dma_desc->fdadr = fbi->dma_buff_phys + dma_desc_off; |
| 606 | case 2: | 575 | fbi->fdadr[dma] = fbi->dma_buff_phys + dma_desc_off; |
| 607 | case 4: | 576 | } else { |
| 608 | case 8: | 577 | pal_desc = &fbi->dma_buff->pal_desc[dma]; |
| 609 | case 16: | 578 | pal_desc_off = offsetof(struct pxafb_dma_buff, dma_desc[pal]); |
| 610 | break; | 579 | |
| 611 | default: | 580 | pal_desc->fsadr = fbi->dma_buff_phys + pal * PALETTE_SIZE; |
| 612 | printk(KERN_ERR "%s: invalid bit depth %d\n", | 581 | pal_desc->fidr = 0; |
| 613 | fbi->fb.fix.id, var->bits_per_pixel); | 582 | |
| 614 | break; | 583 | if ((fbi->lccr4 & LCCR4_PAL_FOR_MASK) == LCCR4_PAL_FOR_0) |
| 584 | pal_desc->ldcmd = fbi->palette_size * sizeof(u16); | ||
| 585 | else | ||
| 586 | pal_desc->ldcmd = fbi->palette_size * sizeof(u32); | ||
| 587 | |||
| 588 | pal_desc->ldcmd |= LDCMD_PAL; | ||
| 589 | |||
| 590 | /* flip back and forth between palette and frame buffer */ | ||
| 591 | pal_desc->fdadr = fbi->dma_buff_phys + dma_desc_off; | ||
| 592 | dma_desc->fdadr = fbi->dma_buff_phys + pal_desc_off; | ||
| 593 | fbi->fdadr[dma] = fbi->dma_buff_phys + dma_desc_off; | ||
| 615 | } | 594 | } |
| 616 | if (var->hsync_len < 1 || var->hsync_len > 64) | ||
| 617 | printk(KERN_ERR "%s: invalid hsync_len %d\n", | ||
| 618 | fbi->fb.fix.id, var->hsync_len); | ||
| 619 | if (var->left_margin < 1 || var->left_margin > 255) | ||
| 620 | printk(KERN_ERR "%s: invalid left_margin %d\n", | ||
| 621 | fbi->fb.fix.id, var->left_margin); | ||
| 622 | if (var->right_margin < 1 || var->right_margin > 255) | ||
| 623 | printk(KERN_ERR "%s: invalid right_margin %d\n", | ||
| 624 | fbi->fb.fix.id, var->right_margin); | ||
| 625 | if (var->yres < 1 || var->yres > 1024) | ||
| 626 | printk(KERN_ERR "%s: invalid yres %d\n", | ||
| 627 | fbi->fb.fix.id, var->yres); | ||
| 628 | if (var->vsync_len < 1 || var->vsync_len > 64) | ||
| 629 | printk(KERN_ERR "%s: invalid vsync_len %d\n", | ||
| 630 | fbi->fb.fix.id, var->vsync_len); | ||
| 631 | if (var->upper_margin < 0 || var->upper_margin > 255) | ||
| 632 | printk(KERN_ERR "%s: invalid upper_margin %d\n", | ||
| 633 | fbi->fb.fix.id, var->upper_margin); | ||
| 634 | if (var->lower_margin < 0 || var->lower_margin > 255) | ||
| 635 | printk(KERN_ERR "%s: invalid lower_margin %d\n", | ||
| 636 | fbi->fb.fix.id, var->lower_margin); | ||
| 637 | #endif | ||
| 638 | 595 | ||
| 639 | new_regs.lccr0 = fbi->lccr0 | | 596 | return 0; |
| 640 | (LCCR0_LDM | LCCR0_SFM | LCCR0_IUM | LCCR0_EFM | | 597 | } |
| 641 | LCCR0_QDM | LCCR0_BM | LCCR0_OUM); | 598 | |
| 599 | #ifdef CONFIG_FB_PXA_SMARTPANEL | ||
| 600 | static int setup_smart_dma(struct pxafb_info *fbi) | ||
| 601 | { | ||
| 602 | struct pxafb_dma_descriptor *dma_desc; | ||
| 603 | unsigned long dma_desc_off, cmd_buff_off; | ||
| 604 | |||
| 605 | dma_desc = &fbi->dma_buff->dma_desc[DMA_CMD]; | ||
| 606 | dma_desc_off = offsetof(struct pxafb_dma_buff, dma_desc[DMA_CMD]); | ||
| 607 | cmd_buff_off = offsetof(struct pxafb_dma_buff, cmd_buff); | ||
| 608 | |||
| 609 | dma_desc->fdadr = fbi->dma_buff_phys + dma_desc_off; | ||
| 610 | dma_desc->fsadr = fbi->dma_buff_phys + cmd_buff_off; | ||
| 611 | dma_desc->fidr = 0; | ||
| 612 | dma_desc->ldcmd = fbi->n_smart_cmds * sizeof(uint16_t); | ||
| 613 | |||
| 614 | fbi->fdadr[DMA_CMD] = dma_desc->fdadr; | ||
| 615 | return 0; | ||
| 616 | } | ||
| 617 | |||
| 618 | int pxafb_smart_flush(struct fb_info *info) | ||
| 619 | { | ||
| 620 | struct pxafb_info *fbi = container_of(info, struct pxafb_info, fb); | ||
| 621 | uint32_t prsr; | ||
| 622 | int ret = 0; | ||
| 623 | |||
| 624 | /* disable controller until all registers are set up */ | ||
| 625 | lcd_writel(fbi, LCCR0, fbi->reg_lccr0 & ~LCCR0_ENB); | ||
| 626 | |||
| 627 | /* 1. make it an even number of commands to align on 32-bit boundary | ||
| 628 | * 2. add the interrupt command to the end of the chain so we can | ||
| 629 | * keep track of the end of the transfer | ||
| 630 | */ | ||
| 631 | |||
| 632 | while (fbi->n_smart_cmds & 1) | ||
| 633 | fbi->smart_cmds[fbi->n_smart_cmds++] = SMART_CMD_NOOP; | ||
| 634 | |||
| 635 | fbi->smart_cmds[fbi->n_smart_cmds++] = SMART_CMD_INTERRUPT; | ||
| 636 | fbi->smart_cmds[fbi->n_smart_cmds++] = SMART_CMD_WAIT_FOR_VSYNC; | ||
| 637 | setup_smart_dma(fbi); | ||
| 638 | |||
| 639 | /* continue to execute next command */ | ||
| 640 | prsr = lcd_readl(fbi, PRSR) | PRSR_ST_OK | PRSR_CON_NT; | ||
| 641 | lcd_writel(fbi, PRSR, prsr); | ||
| 642 | |||
| 643 | /* stop the processor in case it executed "wait for sync" cmd */ | ||
| 644 | lcd_writel(fbi, CMDCR, 0x0001); | ||
| 645 | |||
| 646 | /* don't send interrupts for fifo underruns on channel 6 */ | ||
| 647 | lcd_writel(fbi, LCCR5, LCCR5_IUM(6)); | ||
| 648 | |||
| 649 | lcd_writel(fbi, LCCR1, fbi->reg_lccr1); | ||
| 650 | lcd_writel(fbi, LCCR2, fbi->reg_lccr2); | ||
| 651 | lcd_writel(fbi, LCCR3, fbi->reg_lccr3); | ||
| 652 | lcd_writel(fbi, FDADR0, fbi->fdadr[0]); | ||
| 653 | lcd_writel(fbi, FDADR6, fbi->fdadr[6]); | ||
| 654 | |||
| 655 | /* begin sending */ | ||
| 656 | lcd_writel(fbi, LCCR0, fbi->reg_lccr0 | LCCR0_ENB); | ||
| 657 | |||
| 658 | if (wait_for_completion_timeout(&fbi->command_done, HZ/2) == 0) { | ||
| 659 | pr_warning("%s: timeout waiting for command done\n", | ||
| 660 | __func__); | ||
| 661 | ret = -ETIMEDOUT; | ||
| 662 | } | ||
| 663 | |||
| 664 | /* quick disable */ | ||
| 665 | prsr = lcd_readl(fbi, PRSR) & ~(PRSR_ST_OK | PRSR_CON_NT); | ||
| 666 | lcd_writel(fbi, PRSR, prsr); | ||
| 667 | lcd_writel(fbi, LCCR0, fbi->reg_lccr0 & ~LCCR0_ENB); | ||
| 668 | lcd_writel(fbi, FDADR6, 0); | ||
| 669 | fbi->n_smart_cmds = 0; | ||
| 670 | return ret; | ||
| 671 | } | ||
| 672 | |||
| 673 | int pxafb_smart_queue(struct fb_info *info, uint16_t *cmds, int n_cmds) | ||
| 674 | { | ||
| 675 | int i; | ||
| 676 | struct pxafb_info *fbi = container_of(info, struct pxafb_info, fb); | ||
| 677 | |||
| 678 | /* leave 2 commands for INTERRUPT and WAIT_FOR_SYNC */ | ||
| 679 | for (i = 0; i < n_cmds; i++) { | ||
| 680 | if (fbi->n_smart_cmds == CMD_BUFF_SIZE - 8) | ||
| 681 | pxafb_smart_flush(info); | ||
| 682 | |||
| 683 | fbi->smart_cmds[fbi->n_smart_cmds++] = *cmds++; | ||
| 684 | } | ||
| 685 | |||
| 686 | return 0; | ||
| 687 | } | ||
| 688 | |||
| 689 | static unsigned int __smart_timing(unsigned time_ns, unsigned long lcd_clk) | ||
| 690 | { | ||
| 691 | unsigned int t = (time_ns * (lcd_clk / 1000000) / 1000); | ||
| 692 | return (t == 0) ? 1 : t; | ||
| 693 | } | ||
| 694 | |||
| 695 | static void setup_smart_timing(struct pxafb_info *fbi, | ||
| 696 | struct fb_var_screeninfo *var) | ||
| 697 | { | ||
| 698 | struct pxafb_mach_info *inf = fbi->dev->platform_data; | ||
| 699 | struct pxafb_mode_info *mode = &inf->modes[0]; | ||
| 700 | unsigned long lclk = clk_get_rate(fbi->clk); | ||
| 701 | unsigned t1, t2, t3, t4; | ||
| 702 | |||
| 703 | t1 = max(mode->a0csrd_set_hld, mode->a0cswr_set_hld); | ||
| 704 | t2 = max(mode->rd_pulse_width, mode->wr_pulse_width); | ||
| 705 | t3 = mode->op_hold_time; | ||
| 706 | t4 = mode->cmd_inh_time; | ||
| 707 | |||
| 708 | fbi->reg_lccr1 = | ||
| 709 | LCCR1_DisWdth(var->xres) | | ||
| 710 | LCCR1_BegLnDel(__smart_timing(t1, lclk)) | | ||
| 711 | LCCR1_EndLnDel(__smart_timing(t2, lclk)) | | ||
| 712 | LCCR1_HorSnchWdth(__smart_timing(t3, lclk)); | ||
| 713 | |||
| 714 | fbi->reg_lccr2 = LCCR2_DisHght(var->yres); | ||
| 715 | fbi->reg_lccr3 = LCCR3_PixClkDiv(__smart_timing(t4, lclk)); | ||
| 716 | |||
| 717 | /* FIXME: make this configurable */ | ||
| 718 | fbi->reg_cmdcr = 1; | ||
| 719 | } | ||
| 720 | |||
| 721 | static int pxafb_smart_thread(void *arg) | ||
| 722 | { | ||
| 723 | struct pxafb_info *fbi = arg; | ||
| 724 | struct pxafb_mach_info *inf = fbi->dev->platform_data; | ||
| 725 | |||
| 726 | if (!fbi || !inf->smart_update) { | ||
| 727 | pr_err("%s: not properly initialized, thread terminated\n", | ||
| 728 | __func__); | ||
| 729 | return -EINVAL; | ||
| 730 | } | ||
| 642 | 731 | ||
| 643 | new_regs.lccr1 = | 732 | pr_debug("%s(): task starting\n", __func__); |
| 733 | |||
| 734 | set_freezable(); | ||
| 735 | while (!kthread_should_stop()) { | ||
| 736 | |||
| 737 | if (try_to_freeze()) | ||
| 738 | continue; | ||
| 739 | |||
| 740 | if (fbi->state == C_ENABLE) { | ||
| 741 | inf->smart_update(&fbi->fb); | ||
| 742 | complete(&fbi->refresh_done); | ||
| 743 | } | ||
| 744 | |||
| 745 | set_current_state(TASK_INTERRUPTIBLE); | ||
| 746 | schedule_timeout(30 * HZ / 1000); | ||
| 747 | } | ||
| 748 | |||
| 749 | pr_debug("%s(): task ending\n", __func__); | ||
| 750 | return 0; | ||
| 751 | } | ||
| 752 | |||
| 753 | static int pxafb_smart_init(struct pxafb_info *fbi) | ||
| 754 | { | ||
| 755 | fbi->smart_thread = kthread_run(pxafb_smart_thread, fbi, | ||
| 756 | "lcd_refresh"); | ||
| 757 | if (IS_ERR(fbi->smart_thread)) { | ||
| 758 | printk(KERN_ERR "%s: unable to create kernel thread\n", | ||
| 759 | __func__); | ||
| 760 | return PTR_ERR(fbi->smart_thread); | ||
| 761 | } | ||
| 762 | return 0; | ||
| 763 | } | ||
| 764 | #else | ||
| 765 | int pxafb_smart_queue(struct fb_info *info, uint16_t *cmds, int n_cmds) | ||
| 766 | { | ||
| 767 | return 0; | ||
| 768 | } | ||
| 769 | |||
| 770 | int pxafb_smart_flush(struct fb_info *info) | ||
| 771 | { | ||
| 772 | return 0; | ||
| 773 | } | ||
| 774 | #endif /* CONFIG_FB_SMART_PANEL */ | ||
| 775 | |||
| 776 | static void setup_parallel_timing(struct pxafb_info *fbi, | ||
| 777 | struct fb_var_screeninfo *var) | ||
| 778 | { | ||
| 779 | unsigned int lines_per_panel, pcd = get_pcd(fbi, var->pixclock); | ||
| 780 | |||
| 781 | fbi->reg_lccr1 = | ||
| 644 | LCCR1_DisWdth(var->xres) + | 782 | LCCR1_DisWdth(var->xres) + |
| 645 | LCCR1_HorSnchWdth(var->hsync_len) + | 783 | LCCR1_HorSnchWdth(var->hsync_len) + |
| 646 | LCCR1_BegLnDel(var->left_margin) + | 784 | LCCR1_BegLnDel(var->left_margin) + |
| @@ -654,110 +792,118 @@ static int pxafb_activate_var(struct fb_var_screeninfo *var, struct pxafb_info * | |||
| 654 | if ((fbi->lccr0 & LCCR0_SDS) == LCCR0_Dual) | 792 | if ((fbi->lccr0 & LCCR0_SDS) == LCCR0_Dual) |
| 655 | lines_per_panel /= 2; | 793 | lines_per_panel /= 2; |
| 656 | 794 | ||
| 657 | new_regs.lccr2 = | 795 | fbi->reg_lccr2 = |
| 658 | LCCR2_DisHght(lines_per_panel) + | 796 | LCCR2_DisHght(lines_per_panel) + |
| 659 | LCCR2_VrtSnchWdth(var->vsync_len) + | 797 | LCCR2_VrtSnchWdth(var->vsync_len) + |
| 660 | LCCR2_BegFrmDel(var->upper_margin) + | 798 | LCCR2_BegFrmDel(var->upper_margin) + |
| 661 | LCCR2_EndFrmDel(var->lower_margin); | 799 | LCCR2_EndFrmDel(var->lower_margin); |
| 662 | 800 | ||
| 663 | new_regs.lccr3 = fbi->lccr3 | | 801 | fbi->reg_lccr3 = fbi->lccr3 | |
| 664 | pxafb_bpp_to_lccr3(var) | | 802 | (var->sync & FB_SYNC_HOR_HIGH_ACT ? |
| 665 | (var->sync & FB_SYNC_HOR_HIGH_ACT ? LCCR3_HorSnchH : LCCR3_HorSnchL) | | 803 | LCCR3_HorSnchH : LCCR3_HorSnchL) | |
| 666 | (var->sync & FB_SYNC_VERT_HIGH_ACT ? LCCR3_VrtSnchH : LCCR3_VrtSnchL); | 804 | (var->sync & FB_SYNC_VERT_HIGH_ACT ? |
| 805 | LCCR3_VrtSnchH : LCCR3_VrtSnchL); | ||
| 806 | |||
| 807 | if (pcd) { | ||
| 808 | fbi->reg_lccr3 |= LCCR3_PixClkDiv(pcd); | ||
| 809 | set_hsync_time(fbi, pcd); | ||
| 810 | } | ||
| 811 | } | ||
| 667 | 812 | ||
| 668 | if (pcd) | 813 | /* |
| 669 | new_regs.lccr3 |= LCCR3_PixClkDiv(pcd); | 814 | * pxafb_activate_var(): |
| 815 | * Configures LCD Controller based on entries in var parameter. | ||
| 816 | * Settings are only written to the controller if changes were made. | ||
| 817 | */ | ||
| 818 | static int pxafb_activate_var(struct fb_var_screeninfo *var, | ||
| 819 | struct pxafb_info *fbi) | ||
| 820 | { | ||
| 821 | u_long flags; | ||
| 822 | size_t nbytes; | ||
| 670 | 823 | ||
| 671 | pr_debug("nlccr0 = 0x%08x\n", new_regs.lccr0); | 824 | #if DEBUG_VAR |
| 672 | pr_debug("nlccr1 = 0x%08x\n", new_regs.lccr1); | 825 | if (!(fbi->lccr0 & LCCR0_LCDT)) { |
| 673 | pr_debug("nlccr2 = 0x%08x\n", new_regs.lccr2); | 826 | if (var->xres < 16 || var->xres > 1024) |
| 674 | pr_debug("nlccr3 = 0x%08x\n", new_regs.lccr3); | 827 | printk(KERN_ERR "%s: invalid xres %d\n", |
| 828 | fbi->fb.fix.id, var->xres); | ||
| 829 | switch (var->bits_per_pixel) { | ||
| 830 | case 1: | ||
| 831 | case 2: | ||
| 832 | case 4: | ||
| 833 | case 8: | ||
| 834 | case 16: | ||
| 835 | break; | ||
| 836 | default: | ||
| 837 | printk(KERN_ERR "%s: invalid bit depth %d\n", | ||
| 838 | fbi->fb.fix.id, var->bits_per_pixel); | ||
| 839 | break; | ||
| 840 | } | ||
| 675 | 841 | ||
| 842 | if (var->hsync_len < 1 || var->hsync_len > 64) | ||
| 843 | printk(KERN_ERR "%s: invalid hsync_len %d\n", | ||
| 844 | fbi->fb.fix.id, var->hsync_len); | ||
| 845 | if (var->left_margin < 1 || var->left_margin > 255) | ||
| 846 | printk(KERN_ERR "%s: invalid left_margin %d\n", | ||
| 847 | fbi->fb.fix.id, var->left_margin); | ||
| 848 | if (var->right_margin < 1 || var->right_margin > 255) | ||
| 849 | printk(KERN_ERR "%s: invalid right_margin %d\n", | ||
| 850 | fbi->fb.fix.id, var->right_margin); | ||
| 851 | if (var->yres < 1 || var->yres > 1024) | ||
| 852 | printk(KERN_ERR "%s: invalid yres %d\n", | ||
| 853 | fbi->fb.fix.id, var->yres); | ||
| 854 | if (var->vsync_len < 1 || var->vsync_len > 64) | ||
| 855 | printk(KERN_ERR "%s: invalid vsync_len %d\n", | ||
| 856 | fbi->fb.fix.id, var->vsync_len); | ||
| 857 | if (var->upper_margin < 0 || var->upper_margin > 255) | ||
| 858 | printk(KERN_ERR "%s: invalid upper_margin %d\n", | ||
| 859 | fbi->fb.fix.id, var->upper_margin); | ||
| 860 | if (var->lower_margin < 0 || var->lower_margin > 255) | ||
| 861 | printk(KERN_ERR "%s: invalid lower_margin %d\n", | ||
| 862 | fbi->fb.fix.id, var->lower_margin); | ||
| 863 | } | ||
| 864 | #endif | ||
| 676 | /* Update shadow copy atomically */ | 865 | /* Update shadow copy atomically */ |
| 677 | local_irq_save(flags); | 866 | local_irq_save(flags); |
| 678 | 867 | ||
| 679 | /* setup dma descriptors */ | 868 | #ifdef CONFIG_FB_PXA_SMARTPANEL |
| 680 | fbi->dmadesc_fblow_cpu = (struct pxafb_dma_descriptor *)((unsigned int)fbi->palette_cpu - 3*16); | 869 | if (fbi->lccr0 & LCCR0_LCDT) |
| 681 | fbi->dmadesc_fbhigh_cpu = (struct pxafb_dma_descriptor *)((unsigned int)fbi->palette_cpu - 2*16); | 870 | setup_smart_timing(fbi, var); |
| 682 | fbi->dmadesc_palette_cpu = (struct pxafb_dma_descriptor *)((unsigned int)fbi->palette_cpu - 1*16); | 871 | else |
| 683 | 872 | #endif | |
| 684 | fbi->dmadesc_fblow_dma = fbi->palette_dma - 3*16; | 873 | setup_parallel_timing(fbi, var); |
| 685 | fbi->dmadesc_fbhigh_dma = fbi->palette_dma - 2*16; | ||
| 686 | fbi->dmadesc_palette_dma = fbi->palette_dma - 1*16; | ||
| 687 | |||
| 688 | #define BYTES_PER_PANEL (lines_per_panel * fbi->fb.fix.line_length) | ||
| 689 | |||
| 690 | /* populate descriptors */ | ||
| 691 | fbi->dmadesc_fblow_cpu->fdadr = fbi->dmadesc_fblow_dma; | ||
| 692 | fbi->dmadesc_fblow_cpu->fsadr = fbi->screen_dma + BYTES_PER_PANEL; | ||
| 693 | fbi->dmadesc_fblow_cpu->fidr = 0; | ||
| 694 | fbi->dmadesc_fblow_cpu->ldcmd = BYTES_PER_PANEL; | ||
| 695 | 874 | ||
| 696 | fbi->fdadr1 = fbi->dmadesc_fblow_dma; /* only used in dual-panel mode */ | 875 | fbi->reg_lccr0 = fbi->lccr0 | |
| 876 | (LCCR0_LDM | LCCR0_SFM | LCCR0_IUM | LCCR0_EFM | | ||
| 877 | LCCR0_QDM | LCCR0_BM | LCCR0_OUM); | ||
| 697 | 878 | ||
| 698 | fbi->dmadesc_fbhigh_cpu->fsadr = fbi->screen_dma; | 879 | fbi->reg_lccr3 |= pxafb_bpp_to_lccr3(var); |
| 699 | fbi->dmadesc_fbhigh_cpu->fidr = 0; | ||
| 700 | fbi->dmadesc_fbhigh_cpu->ldcmd = BYTES_PER_PANEL; | ||
| 701 | 880 | ||
| 702 | fbi->dmadesc_palette_cpu->fsadr = fbi->palette_dma; | 881 | nbytes = var->yres * fbi->fb.fix.line_length; |
| 703 | fbi->dmadesc_palette_cpu->fidr = 0; | ||
| 704 | if ((fbi->lccr4 & LCCR4_PAL_FOR_MASK) == LCCR4_PAL_FOR_0) | ||
| 705 | fbi->dmadesc_palette_cpu->ldcmd = fbi->palette_size * | ||
| 706 | sizeof(u16); | ||
| 707 | else | ||
| 708 | fbi->dmadesc_palette_cpu->ldcmd = fbi->palette_size * | ||
| 709 | sizeof(u32); | ||
| 710 | fbi->dmadesc_palette_cpu->ldcmd |= LDCMD_PAL; | ||
| 711 | 882 | ||
| 712 | if (var->bits_per_pixel == 16) { | 883 | if ((fbi->lccr0 & LCCR0_SDS) == LCCR0_Dual) { |
| 713 | /* palette shouldn't be loaded in true-color mode */ | 884 | nbytes = nbytes / 2; |
| 714 | fbi->dmadesc_fbhigh_cpu->fdadr = fbi->dmadesc_fbhigh_dma; | 885 | setup_frame_dma(fbi, DMA_LOWER, PAL_NONE, nbytes, nbytes); |
| 715 | fbi->fdadr0 = fbi->dmadesc_fbhigh_dma; /* no pal just fbhigh */ | ||
| 716 | /* init it to something, even though we won't be using it */ | ||
| 717 | fbi->dmadesc_palette_cpu->fdadr = fbi->dmadesc_palette_dma; | ||
| 718 | } else { | ||
| 719 | fbi->dmadesc_palette_cpu->fdadr = fbi->dmadesc_fbhigh_dma; | ||
| 720 | fbi->dmadesc_fbhigh_cpu->fdadr = fbi->dmadesc_palette_dma; | ||
| 721 | fbi->fdadr0 = fbi->dmadesc_palette_dma; /* flips back and forth between pal and fbhigh */ | ||
| 722 | } | 886 | } |
| 723 | 887 | ||
| 724 | #if 0 | 888 | if ((var->bits_per_pixel >= 16) || (fbi->lccr0 & LCCR0_LCDT)) |
| 725 | pr_debug("fbi->dmadesc_fblow_cpu = 0x%p\n", fbi->dmadesc_fblow_cpu); | 889 | setup_frame_dma(fbi, DMA_BASE, PAL_NONE, 0, nbytes); |
| 726 | pr_debug("fbi->dmadesc_fbhigh_cpu = 0x%p\n", fbi->dmadesc_fbhigh_cpu); | 890 | else |
| 727 | pr_debug("fbi->dmadesc_palette_cpu = 0x%p\n", fbi->dmadesc_palette_cpu); | 891 | setup_frame_dma(fbi, DMA_BASE, PAL_BASE, 0, nbytes); |
| 728 | pr_debug("fbi->dmadesc_fblow_dma = 0x%x\n", fbi->dmadesc_fblow_dma); | ||
| 729 | pr_debug("fbi->dmadesc_fbhigh_dma = 0x%x\n", fbi->dmadesc_fbhigh_dma); | ||
| 730 | pr_debug("fbi->dmadesc_palette_dma = 0x%x\n", fbi->dmadesc_palette_dma); | ||
| 731 | |||
| 732 | pr_debug("fbi->dmadesc_fblow_cpu->fdadr = 0x%x\n", fbi->dmadesc_fblow_cpu->fdadr); | ||
| 733 | pr_debug("fbi->dmadesc_fbhigh_cpu->fdadr = 0x%x\n", fbi->dmadesc_fbhigh_cpu->fdadr); | ||
| 734 | pr_debug("fbi->dmadesc_palette_cpu->fdadr = 0x%x\n", fbi->dmadesc_palette_cpu->fdadr); | ||
| 735 | |||
| 736 | pr_debug("fbi->dmadesc_fblow_cpu->fsadr = 0x%x\n", fbi->dmadesc_fblow_cpu->fsadr); | ||
| 737 | pr_debug("fbi->dmadesc_fbhigh_cpu->fsadr = 0x%x\n", fbi->dmadesc_fbhigh_cpu->fsadr); | ||
| 738 | pr_debug("fbi->dmadesc_palette_cpu->fsadr = 0x%x\n", fbi->dmadesc_palette_cpu->fsadr); | ||
| 739 | |||
| 740 | pr_debug("fbi->dmadesc_fblow_cpu->ldcmd = 0x%x\n", fbi->dmadesc_fblow_cpu->ldcmd); | ||
| 741 | pr_debug("fbi->dmadesc_fbhigh_cpu->ldcmd = 0x%x\n", fbi->dmadesc_fbhigh_cpu->ldcmd); | ||
| 742 | pr_debug("fbi->dmadesc_palette_cpu->ldcmd = 0x%x\n", fbi->dmadesc_palette_cpu->ldcmd); | ||
| 743 | #endif | ||
| 744 | 892 | ||
| 745 | fbi->reg_lccr0 = new_regs.lccr0; | 893 | fbi->reg_lccr4 = lcd_readl(fbi, LCCR4) & ~LCCR4_PAL_FOR_MASK; |
| 746 | fbi->reg_lccr1 = new_regs.lccr1; | ||
| 747 | fbi->reg_lccr2 = new_regs.lccr2; | ||
| 748 | fbi->reg_lccr3 = new_regs.lccr3; | ||
| 749 | fbi->reg_lccr4 = LCCR4 & (~LCCR4_PAL_FOR_MASK); | ||
| 750 | fbi->reg_lccr4 |= (fbi->lccr4 & LCCR4_PAL_FOR_MASK); | 894 | fbi->reg_lccr4 |= (fbi->lccr4 & LCCR4_PAL_FOR_MASK); |
| 751 | set_hsync_time(fbi, pcd); | ||
| 752 | local_irq_restore(flags); | 895 | local_irq_restore(flags); |
| 753 | 896 | ||
| 754 | /* | 897 | /* |
| 755 | * Only update the registers if the controller is enabled | 898 | * Only update the registers if the controller is enabled |
| 756 | * and something has changed. | 899 | * and something has changed. |
| 757 | */ | 900 | */ |
| 758 | if ((LCCR0 != fbi->reg_lccr0) || (LCCR1 != fbi->reg_lccr1) || | 901 | if ((lcd_readl(fbi, LCCR0) != fbi->reg_lccr0) || |
| 759 | (LCCR2 != fbi->reg_lccr2) || (LCCR3 != fbi->reg_lccr3) || | 902 | (lcd_readl(fbi, LCCR1) != fbi->reg_lccr1) || |
| 760 | (FDADR0 != fbi->fdadr0) || (FDADR1 != fbi->fdadr1)) | 903 | (lcd_readl(fbi, LCCR2) != fbi->reg_lccr2) || |
| 904 | (lcd_readl(fbi, LCCR3) != fbi->reg_lccr3) || | ||
| 905 | (lcd_readl(fbi, FDADR0) != fbi->fdadr[0]) || | ||
| 906 | (lcd_readl(fbi, FDADR1) != fbi->fdadr[1])) | ||
| 761 | pxafb_schedule_work(fbi, C_REENABLE); | 907 | pxafb_schedule_work(fbi, C_REENABLE); |
| 762 | 908 | ||
| 763 | return 0; | 909 | return 0; |
| @@ -773,8 +919,8 @@ static inline void __pxafb_backlight_power(struct pxafb_info *fbi, int on) | |||
| 773 | { | 919 | { |
| 774 | pr_debug("pxafb: backlight o%s\n", on ? "n" : "ff"); | 920 | pr_debug("pxafb: backlight o%s\n", on ? "n" : "ff"); |
| 775 | 921 | ||
| 776 | if (pxafb_backlight_power) | 922 | if (pxafb_backlight_power) |
| 777 | pxafb_backlight_power(on); | 923 | pxafb_backlight_power(on); |
| 778 | } | 924 | } |
| 779 | 925 | ||
| 780 | static inline void __pxafb_lcd_power(struct pxafb_info *fbi, int on) | 926 | static inline void __pxafb_lcd_power(struct pxafb_info *fbi, int on) |
| @@ -788,11 +934,11 @@ static inline void __pxafb_lcd_power(struct pxafb_info *fbi, int on) | |||
| 788 | static void pxafb_setup_gpio(struct pxafb_info *fbi) | 934 | static void pxafb_setup_gpio(struct pxafb_info *fbi) |
| 789 | { | 935 | { |
| 790 | int gpio, ldd_bits; | 936 | int gpio, ldd_bits; |
| 791 | unsigned int lccr0 = fbi->lccr0; | 937 | unsigned int lccr0 = fbi->lccr0; |
| 792 | 938 | ||
| 793 | /* | 939 | /* |
| 794 | * setup is based on type of panel supported | 940 | * setup is based on type of panel supported |
| 795 | */ | 941 | */ |
| 796 | 942 | ||
| 797 | /* 4 bit interface */ | 943 | /* 4 bit interface */ |
| 798 | if ((lccr0 & LCCR0_CMS) == LCCR0_Mono && | 944 | if ((lccr0 & LCCR0_CMS) == LCCR0_Mono && |
| @@ -801,21 +947,25 @@ static void pxafb_setup_gpio(struct pxafb_info *fbi) | |||
| 801 | ldd_bits = 4; | 947 | ldd_bits = 4; |
| 802 | 948 | ||
| 803 | /* 8 bit interface */ | 949 | /* 8 bit interface */ |
| 804 | else if (((lccr0 & LCCR0_CMS) == LCCR0_Mono && | 950 | else if (((lccr0 & LCCR0_CMS) == LCCR0_Mono && |
| 805 | ((lccr0 & LCCR0_SDS) == LCCR0_Dual || (lccr0 & LCCR0_DPD) == LCCR0_8PixMono)) || | 951 | ((lccr0 & LCCR0_SDS) == LCCR0_Dual || |
| 806 | ((lccr0 & LCCR0_CMS) == LCCR0_Color && | 952 | (lccr0 & LCCR0_DPD) == LCCR0_8PixMono)) || |
| 807 | (lccr0 & LCCR0_PAS) == LCCR0_Pas && (lccr0 & LCCR0_SDS) == LCCR0_Sngl)) | 953 | ((lccr0 & LCCR0_CMS) == LCCR0_Color && |
| 954 | (lccr0 & LCCR0_PAS) == LCCR0_Pas && | ||
| 955 | (lccr0 & LCCR0_SDS) == LCCR0_Sngl)) | ||
| 808 | ldd_bits = 8; | 956 | ldd_bits = 8; |
| 809 | 957 | ||
| 810 | /* 16 bit interface */ | 958 | /* 16 bit interface */ |
| 811 | else if ((lccr0 & LCCR0_CMS) == LCCR0_Color && | 959 | else if ((lccr0 & LCCR0_CMS) == LCCR0_Color && |
| 812 | ((lccr0 & LCCR0_SDS) == LCCR0_Dual || (lccr0 & LCCR0_PAS) == LCCR0_Act)) | 960 | ((lccr0 & LCCR0_SDS) == LCCR0_Dual || |
| 961 | (lccr0 & LCCR0_PAS) == LCCR0_Act)) | ||
| 813 | ldd_bits = 16; | 962 | ldd_bits = 16; |
| 814 | 963 | ||
| 815 | else { | 964 | else { |
| 816 | printk(KERN_ERR "pxafb_setup_gpio: unable to determine bits per pixel\n"); | 965 | printk(KERN_ERR "pxafb_setup_gpio: unable to determine " |
| 966 | "bits per pixel\n"); | ||
| 817 | return; | 967 | return; |
| 818 | } | 968 | } |
| 819 | 969 | ||
| 820 | for (gpio = 58; ldd_bits; gpio++, ldd_bits--) | 970 | for (gpio = 58; ldd_bits; gpio++, ldd_bits--) |
| 821 | pxa_gpio_mode(gpio | GPIO_ALT_FN_2_OUT); | 971 | pxa_gpio_mode(gpio | GPIO_ALT_FN_2_OUT); |
| @@ -828,8 +978,8 @@ static void pxafb_setup_gpio(struct pxafb_info *fbi) | |||
| 828 | static void pxafb_enable_controller(struct pxafb_info *fbi) | 978 | static void pxafb_enable_controller(struct pxafb_info *fbi) |
| 829 | { | 979 | { |
| 830 | pr_debug("pxafb: Enabling LCD controller\n"); | 980 | pr_debug("pxafb: Enabling LCD controller\n"); |
| 831 | pr_debug("fdadr0 0x%08x\n", (unsigned int) fbi->fdadr0); | 981 | pr_debug("fdadr0 0x%08x\n", (unsigned int) fbi->fdadr[0]); |
| 832 | pr_debug("fdadr1 0x%08x\n", (unsigned int) fbi->fdadr1); | 982 | pr_debug("fdadr1 0x%08x\n", (unsigned int) fbi->fdadr[1]); |
| 833 | pr_debug("reg_lccr0 0x%08x\n", (unsigned int) fbi->reg_lccr0); | 983 | pr_debug("reg_lccr0 0x%08x\n", (unsigned int) fbi->reg_lccr0); |
| 834 | pr_debug("reg_lccr1 0x%08x\n", (unsigned int) fbi->reg_lccr1); | 984 | pr_debug("reg_lccr1 0x%08x\n", (unsigned int) fbi->reg_lccr1); |
| 835 | pr_debug("reg_lccr2 0x%08x\n", (unsigned int) fbi->reg_lccr2); | 985 | pr_debug("reg_lccr2 0x%08x\n", (unsigned int) fbi->reg_lccr2); |
| @@ -838,40 +988,40 @@ static void pxafb_enable_controller(struct pxafb_info *fbi) | |||
| 838 | /* enable LCD controller clock */ | 988 | /* enable LCD controller clock */ |
| 839 | clk_enable(fbi->clk); | 989 | clk_enable(fbi->clk); |
| 840 | 990 | ||
| 991 | if (fbi->lccr0 & LCCR0_LCDT) | ||
| 992 | return; | ||
| 993 | |||
| 841 | /* Sequence from 11.7.10 */ | 994 | /* Sequence from 11.7.10 */ |
| 842 | LCCR3 = fbi->reg_lccr3; | 995 | lcd_writel(fbi, LCCR3, fbi->reg_lccr3); |
| 843 | LCCR2 = fbi->reg_lccr2; | 996 | lcd_writel(fbi, LCCR2, fbi->reg_lccr2); |
| 844 | LCCR1 = fbi->reg_lccr1; | 997 | lcd_writel(fbi, LCCR1, fbi->reg_lccr1); |
| 845 | LCCR0 = fbi->reg_lccr0 & ~LCCR0_ENB; | 998 | lcd_writel(fbi, LCCR0, fbi->reg_lccr0 & ~LCCR0_ENB); |
| 846 | 999 | ||
| 847 | FDADR0 = fbi->fdadr0; | 1000 | lcd_writel(fbi, FDADR0, fbi->fdadr[0]); |
| 848 | FDADR1 = fbi->fdadr1; | 1001 | lcd_writel(fbi, FDADR1, fbi->fdadr[1]); |
| 849 | LCCR0 |= LCCR0_ENB; | 1002 | lcd_writel(fbi, LCCR0, fbi->reg_lccr0 | LCCR0_ENB); |
| 850 | |||
| 851 | pr_debug("FDADR0 0x%08x\n", (unsigned int) FDADR0); | ||
| 852 | pr_debug("FDADR1 0x%08x\n", (unsigned int) FDADR1); | ||
| 853 | pr_debug("LCCR0 0x%08x\n", (unsigned int) LCCR0); | ||
| 854 | pr_debug("LCCR1 0x%08x\n", (unsigned int) LCCR1); | ||
| 855 | pr_debug("LCCR2 0x%08x\n", (unsigned int) LCCR2); | ||
| 856 | pr_debug("LCCR3 0x%08x\n", (unsigned int) LCCR3); | ||
| 857 | pr_debug("LCCR4 0x%08x\n", (unsigned int) LCCR4); | ||
| 858 | } | 1003 | } |
| 859 | 1004 | ||
| 860 | static void pxafb_disable_controller(struct pxafb_info *fbi) | 1005 | static void pxafb_disable_controller(struct pxafb_info *fbi) |
| 861 | { | 1006 | { |
| 862 | DECLARE_WAITQUEUE(wait, current); | 1007 | uint32_t lccr0; |
| 863 | 1008 | ||
| 864 | pr_debug("pxafb: disabling LCD controller\n"); | 1009 | #ifdef CONFIG_FB_PXA_SMARTPANEL |
| 1010 | if (fbi->lccr0 & LCCR0_LCDT) { | ||
| 1011 | wait_for_completion_timeout(&fbi->refresh_done, | ||
| 1012 | 200 * HZ / 1000); | ||
| 1013 | return; | ||
| 1014 | } | ||
| 1015 | #endif | ||
| 865 | 1016 | ||
| 866 | set_current_state(TASK_UNINTERRUPTIBLE); | 1017 | /* Clear LCD Status Register */ |
| 867 | add_wait_queue(&fbi->ctrlr_wait, &wait); | 1018 | lcd_writel(fbi, LCSR, 0xffffffff); |
| 868 | 1019 | ||
| 869 | LCSR = 0xffffffff; /* Clear LCD Status Register */ | 1020 | lccr0 = lcd_readl(fbi, LCCR0) & ~LCCR0_LDM; |
| 870 | LCCR0 &= ~LCCR0_LDM; /* Enable LCD Disable Done Interrupt */ | 1021 | lcd_writel(fbi, LCCR0, lccr0); |
| 871 | LCCR0 |= LCCR0_DIS; /* Disable LCD Controller */ | 1022 | lcd_writel(fbi, LCCR0, lccr0 | LCCR0_DIS); |
| 872 | 1023 | ||
| 873 | schedule_timeout(200 * HZ / 1000); | 1024 | wait_for_completion_timeout(&fbi->disable_done, 200 * HZ / 1000); |
| 874 | remove_wait_queue(&fbi->ctrlr_wait, &wait); | ||
| 875 | 1025 | ||
| 876 | /* disable LCD controller clock */ | 1026 | /* disable LCD controller clock */ |
| 877 | clk_disable(fbi->clk); | 1027 | clk_disable(fbi->clk); |
| @@ -883,14 +1033,20 @@ static void pxafb_disable_controller(struct pxafb_info *fbi) | |||
| 883 | static irqreturn_t pxafb_handle_irq(int irq, void *dev_id) | 1033 | static irqreturn_t pxafb_handle_irq(int irq, void *dev_id) |
| 884 | { | 1034 | { |
| 885 | struct pxafb_info *fbi = dev_id; | 1035 | struct pxafb_info *fbi = dev_id; |
| 886 | unsigned int lcsr = LCSR; | 1036 | unsigned int lccr0, lcsr = lcd_readl(fbi, LCSR); |
| 887 | 1037 | ||
| 888 | if (lcsr & LCSR_LDD) { | 1038 | if (lcsr & LCSR_LDD) { |
| 889 | LCCR0 |= LCCR0_LDM; | 1039 | lccr0 = lcd_readl(fbi, LCCR0); |
| 890 | wake_up(&fbi->ctrlr_wait); | 1040 | lcd_writel(fbi, LCCR0, lccr0 | LCCR0_LDM); |
| 1041 | complete(&fbi->disable_done); | ||
| 891 | } | 1042 | } |
| 892 | 1043 | ||
| 893 | LCSR = lcsr; | 1044 | #ifdef CONFIG_FB_PXA_SMARTPANEL |
| 1045 | if (lcsr & LCSR_CMD_INT) | ||
| 1046 | complete(&fbi->command_done); | ||
| 1047 | #endif | ||
| 1048 | |||
| 1049 | lcd_writel(fbi, LCSR, lcsr); | ||
| 894 | return IRQ_HANDLED; | 1050 | return IRQ_HANDLED; |
| 895 | } | 1051 | } |
| 896 | 1052 | ||
| @@ -921,7 +1077,7 @@ static void set_ctrlr_state(struct pxafb_info *fbi, u_int state) | |||
| 921 | */ | 1077 | */ |
| 922 | if (old_state != C_DISABLE && old_state != C_DISABLE_PM) { | 1078 | if (old_state != C_DISABLE && old_state != C_DISABLE_PM) { |
| 923 | fbi->state = state; | 1079 | fbi->state = state; |
| 924 | //TODO __pxafb_lcd_power(fbi, 0); | 1080 | /* TODO __pxafb_lcd_power(fbi, 0); */ |
| 925 | pxafb_disable_controller(fbi); | 1081 | pxafb_disable_controller(fbi); |
| 926 | } | 1082 | } |
| 927 | break; | 1083 | break; |
| @@ -948,7 +1104,7 @@ static void set_ctrlr_state(struct pxafb_info *fbi, u_int state) | |||
| 948 | if (old_state == C_DISABLE_CLKCHANGE) { | 1104 | if (old_state == C_DISABLE_CLKCHANGE) { |
| 949 | fbi->state = C_ENABLE; | 1105 | fbi->state = C_ENABLE; |
| 950 | pxafb_enable_controller(fbi); | 1106 | pxafb_enable_controller(fbi); |
| 951 | //TODO __pxafb_lcd_power(fbi, 1); | 1107 | /* TODO __pxafb_lcd_power(fbi, 1); */ |
| 952 | } | 1108 | } |
| 953 | break; | 1109 | break; |
| 954 | 1110 | ||
| @@ -1019,7 +1175,7 @@ static int | |||
| 1019 | pxafb_freq_transition(struct notifier_block *nb, unsigned long val, void *data) | 1175 | pxafb_freq_transition(struct notifier_block *nb, unsigned long val, void *data) |
| 1020 | { | 1176 | { |
| 1021 | struct pxafb_info *fbi = TO_INF(nb, freq_transition); | 1177 | struct pxafb_info *fbi = TO_INF(nb, freq_transition); |
| 1022 | //TODO struct cpufreq_freqs *f = data; | 1178 | /* TODO struct cpufreq_freqs *f = data; */ |
| 1023 | u_int pcd; | 1179 | u_int pcd; |
| 1024 | 1180 | ||
| 1025 | switch (val) { | 1181 | switch (val) { |
| @@ -1030,7 +1186,8 @@ pxafb_freq_transition(struct notifier_block *nb, unsigned long val, void *data) | |||
| 1030 | case CPUFREQ_POSTCHANGE: | 1186 | case CPUFREQ_POSTCHANGE: |
| 1031 | pcd = get_pcd(fbi, fbi->fb.var.pixclock); | 1187 | pcd = get_pcd(fbi, fbi->fb.var.pixclock); |
| 1032 | set_hsync_time(fbi, pcd); | 1188 | set_hsync_time(fbi, pcd); |
| 1033 | fbi->reg_lccr3 = (fbi->reg_lccr3 & ~0xff) | LCCR3_PixClkDiv(pcd); | 1189 | fbi->reg_lccr3 = (fbi->reg_lccr3 & ~0xff) | |
| 1190 | LCCR3_PixClkDiv(pcd); | ||
| 1034 | set_ctrlr_state(fbi, C_ENABLE_CLKCHANGE); | 1191 | set_ctrlr_state(fbi, C_ENABLE_CLKCHANGE); |
| 1035 | break; | 1192 | break; |
| 1036 | } | 1193 | } |
| @@ -1050,18 +1207,8 @@ pxafb_freq_policy(struct notifier_block *nb, unsigned long val, void *data) | |||
| 1050 | pr_debug("min dma period: %d ps, " | 1207 | pr_debug("min dma period: %d ps, " |
| 1051 | "new clock %d kHz\n", pxafb_display_dma_period(var), | 1208 | "new clock %d kHz\n", pxafb_display_dma_period(var), |
| 1052 | policy->max); | 1209 | policy->max); |
| 1053 | // TODO: fill in min/max values | 1210 | /* TODO: fill in min/max values */ |
| 1054 | break; | ||
| 1055 | #if 0 | ||
| 1056 | case CPUFREQ_NOTIFY: | ||
| 1057 | printk(KERN_ERR "%s: got CPUFREQ_NOTIFY\n", __FUNCTION__); | ||
| 1058 | do {} while(0); | ||
| 1059 | /* todo: panic if min/max values aren't fulfilled | ||
| 1060 | * [can't really happen unless there's a bug in the | ||
| 1061 | * CPU policy verification process * | ||
| 1062 | */ | ||
| 1063 | break; | 1211 | break; |
| 1064 | #endif | ||
| 1065 | } | 1212 | } |
| 1066 | return 0; | 1213 | return 0; |
| 1067 | } | 1214 | } |
| @@ -1102,21 +1249,21 @@ static int pxafb_resume(struct platform_device *dev) | |||
| 1102 | */ | 1249 | */ |
| 1103 | static int __init pxafb_map_video_memory(struct pxafb_info *fbi) | 1250 | static int __init pxafb_map_video_memory(struct pxafb_info *fbi) |
| 1104 | { | 1251 | { |
| 1105 | u_long palette_mem_size; | ||
| 1106 | |||
| 1107 | /* | 1252 | /* |
| 1108 | * We reserve one page for the palette, plus the size | 1253 | * We reserve one page for the palette, plus the size |
| 1109 | * of the framebuffer. | 1254 | * of the framebuffer. |
| 1110 | */ | 1255 | */ |
| 1111 | fbi->map_size = PAGE_ALIGN(fbi->fb.fix.smem_len + PAGE_SIZE); | 1256 | fbi->video_offset = PAGE_ALIGN(sizeof(struct pxafb_dma_buff)); |
| 1257 | fbi->map_size = PAGE_ALIGN(fbi->fb.fix.smem_len + fbi->video_offset); | ||
| 1112 | fbi->map_cpu = dma_alloc_writecombine(fbi->dev, fbi->map_size, | 1258 | fbi->map_cpu = dma_alloc_writecombine(fbi->dev, fbi->map_size, |
| 1113 | &fbi->map_dma, GFP_KERNEL); | 1259 | &fbi->map_dma, GFP_KERNEL); |
| 1114 | 1260 | ||
| 1115 | if (fbi->map_cpu) { | 1261 | if (fbi->map_cpu) { |
| 1116 | /* prevent initial garbage on screen */ | 1262 | /* prevent initial garbage on screen */ |
| 1117 | memset(fbi->map_cpu, 0, fbi->map_size); | 1263 | memset(fbi->map_cpu, 0, fbi->map_size); |
| 1118 | fbi->fb.screen_base = fbi->map_cpu + PAGE_SIZE; | 1264 | fbi->fb.screen_base = fbi->map_cpu + fbi->video_offset; |
| 1119 | fbi->screen_dma = fbi->map_dma + PAGE_SIZE; | 1265 | fbi->screen_dma = fbi->map_dma + fbi->video_offset; |
| 1266 | |||
| 1120 | /* | 1267 | /* |
| 1121 | * FIXME: this is actually the wrong thing to place in | 1268 | * FIXME: this is actually the wrong thing to place in |
| 1122 | * smem_start. But fbdev suffers from the problem that | 1269 | * smem_start. But fbdev suffers from the problem that |
| @@ -1126,27 +1273,86 @@ static int __init pxafb_map_video_memory(struct pxafb_info *fbi) | |||
| 1126 | fbi->fb.fix.smem_start = fbi->screen_dma; | 1273 | fbi->fb.fix.smem_start = fbi->screen_dma; |
| 1127 | fbi->palette_size = fbi->fb.var.bits_per_pixel == 8 ? 256 : 16; | 1274 | fbi->palette_size = fbi->fb.var.bits_per_pixel == 8 ? 256 : 16; |
| 1128 | 1275 | ||
| 1129 | if ((fbi->lccr4 & LCCR4_PAL_FOR_MASK) == LCCR4_PAL_FOR_0) | 1276 | fbi->dma_buff = (void *) fbi->map_cpu; |
| 1130 | palette_mem_size = fbi->palette_size * sizeof(u16); | 1277 | fbi->dma_buff_phys = fbi->map_dma; |
| 1131 | else | 1278 | fbi->palette_cpu = (u16 *) fbi->dma_buff->palette; |
| 1132 | palette_mem_size = fbi->palette_size * sizeof(u32); | ||
| 1133 | 1279 | ||
| 1134 | pr_debug("pxafb: palette_mem_size = 0x%08lx\n", palette_mem_size); | 1280 | #ifdef CONFIG_FB_PXA_SMARTPANEL |
| 1135 | 1281 | fbi->smart_cmds = (uint16_t *) fbi->dma_buff->cmd_buff; | |
| 1136 | fbi->palette_cpu = (u16 *)(fbi->map_cpu + PAGE_SIZE - palette_mem_size); | 1282 | fbi->n_smart_cmds = 0; |
| 1137 | fbi->palette_dma = fbi->map_dma + PAGE_SIZE - palette_mem_size; | 1283 | #endif |
| 1138 | } | 1284 | } |
| 1139 | 1285 | ||
| 1140 | return fbi->map_cpu ? 0 : -ENOMEM; | 1286 | return fbi->map_cpu ? 0 : -ENOMEM; |
| 1141 | } | 1287 | } |
| 1142 | 1288 | ||
| 1289 | static void pxafb_decode_mode_info(struct pxafb_info *fbi, | ||
| 1290 | struct pxafb_mode_info *modes, | ||
| 1291 | unsigned int num_modes) | ||
| 1292 | { | ||
| 1293 | unsigned int i, smemlen; | ||
| 1294 | |||
| 1295 | pxafb_setmode(&fbi->fb.var, &modes[0]); | ||
| 1296 | |||
| 1297 | for (i = 0; i < num_modes; i++) { | ||
| 1298 | smemlen = modes[i].xres * modes[i].yres * modes[i].bpp / 8; | ||
| 1299 | if (smemlen > fbi->fb.fix.smem_len) | ||
| 1300 | fbi->fb.fix.smem_len = smemlen; | ||
| 1301 | } | ||
| 1302 | } | ||
| 1303 | |||
| 1304 | static int pxafb_decode_mach_info(struct pxafb_info *fbi, | ||
| 1305 | struct pxafb_mach_info *inf) | ||
| 1306 | { | ||
| 1307 | unsigned int lcd_conn = inf->lcd_conn; | ||
| 1308 | |||
| 1309 | fbi->cmap_inverse = inf->cmap_inverse; | ||
| 1310 | fbi->cmap_static = inf->cmap_static; | ||
| 1311 | |||
| 1312 | switch (lcd_conn & 0xf) { | ||
| 1313 | case LCD_TYPE_MONO_STN: | ||
| 1314 | fbi->lccr0 = LCCR0_CMS; | ||
| 1315 | break; | ||
| 1316 | case LCD_TYPE_MONO_DSTN: | ||
| 1317 | fbi->lccr0 = LCCR0_CMS | LCCR0_SDS; | ||
| 1318 | break; | ||
| 1319 | case LCD_TYPE_COLOR_STN: | ||
| 1320 | fbi->lccr0 = 0; | ||
| 1321 | break; | ||
| 1322 | case LCD_TYPE_COLOR_DSTN: | ||
| 1323 | fbi->lccr0 = LCCR0_SDS; | ||
| 1324 | break; | ||
| 1325 | case LCD_TYPE_COLOR_TFT: | ||
| 1326 | fbi->lccr0 = LCCR0_PAS; | ||
| 1327 | break; | ||
| 1328 | case LCD_TYPE_SMART_PANEL: | ||
| 1329 | fbi->lccr0 = LCCR0_LCDT | LCCR0_PAS; | ||
| 1330 | break; | ||
| 1331 | default: | ||
| 1332 | /* fall back to backward compatibility way */ | ||
| 1333 | fbi->lccr0 = inf->lccr0; | ||
| 1334 | fbi->lccr3 = inf->lccr3; | ||
| 1335 | fbi->lccr4 = inf->lccr4; | ||
| 1336 | return -EINVAL; | ||
| 1337 | } | ||
| 1338 | |||
| 1339 | if (lcd_conn == LCD_MONO_STN_8BPP) | ||
| 1340 | fbi->lccr0 |= LCCR0_DPD; | ||
| 1341 | |||
| 1342 | fbi->lccr3 = LCCR3_Acb((inf->lcd_conn >> 10) & 0xff); | ||
| 1343 | fbi->lccr3 |= (lcd_conn & LCD_BIAS_ACTIVE_LOW) ? LCCR3_OEP : 0; | ||
| 1344 | fbi->lccr3 |= (lcd_conn & LCD_PCLK_EDGE_FALL) ? LCCR3_PCP : 0; | ||
| 1345 | |||
| 1346 | pxafb_decode_mode_info(fbi, inf->modes, inf->num_modes); | ||
| 1347 | return 0; | ||
| 1348 | } | ||
| 1349 | |||
| 1143 | static struct pxafb_info * __init pxafb_init_fbinfo(struct device *dev) | 1350 | static struct pxafb_info * __init pxafb_init_fbinfo(struct device *dev) |
| 1144 | { | 1351 | { |
| 1145 | struct pxafb_info *fbi; | 1352 | struct pxafb_info *fbi; |
| 1146 | void *addr; | 1353 | void *addr; |
| 1147 | struct pxafb_mach_info *inf = dev->platform_data; | 1354 | struct pxafb_mach_info *inf = dev->platform_data; |
| 1148 | struct pxafb_mode_info *mode = inf->modes; | 1355 | struct pxafb_mode_info *mode = inf->modes; |
| 1149 | int i, smemlen; | ||
| 1150 | 1356 | ||
| 1151 | /* Alloc the pxafb_info and pseudo_palette in one step */ | 1357 | /* Alloc the pxafb_info and pseudo_palette in one step */ |
| 1152 | fbi = kmalloc(sizeof(struct pxafb_info) + sizeof(u32) * 16, GFP_KERNEL); | 1358 | fbi = kmalloc(sizeof(struct pxafb_info) + sizeof(u32) * 16, GFP_KERNEL); |
| @@ -1186,187 +1392,233 @@ static struct pxafb_info * __init pxafb_init_fbinfo(struct device *dev) | |||
| 1186 | addr = addr + sizeof(struct pxafb_info); | 1392 | addr = addr + sizeof(struct pxafb_info); |
| 1187 | fbi->fb.pseudo_palette = addr; | 1393 | fbi->fb.pseudo_palette = addr; |
| 1188 | 1394 | ||
| 1189 | pxafb_setmode(&fbi->fb.var, mode); | 1395 | fbi->state = C_STARTUP; |
| 1396 | fbi->task_state = (u_char)-1; | ||
| 1190 | 1397 | ||
| 1191 | fbi->cmap_inverse = inf->cmap_inverse; | 1398 | pxafb_decode_mach_info(fbi, inf); |
| 1192 | fbi->cmap_static = inf->cmap_static; | ||
| 1193 | |||
| 1194 | fbi->lccr0 = inf->lccr0; | ||
| 1195 | fbi->lccr3 = inf->lccr3; | ||
| 1196 | fbi->lccr4 = inf->lccr4; | ||
| 1197 | fbi->state = C_STARTUP; | ||
| 1198 | fbi->task_state = (u_char)-1; | ||
| 1199 | |||
| 1200 | for (i = 0; i < inf->num_modes; i++) { | ||
| 1201 | smemlen = mode[i].xres * mode[i].yres * mode[i].bpp / 8; | ||
| 1202 | if (smemlen > fbi->fb.fix.smem_len) | ||
| 1203 | fbi->fb.fix.smem_len = smemlen; | ||
| 1204 | } | ||
| 1205 | 1399 | ||
| 1206 | init_waitqueue_head(&fbi->ctrlr_wait); | 1400 | init_waitqueue_head(&fbi->ctrlr_wait); |
| 1207 | INIT_WORK(&fbi->task, pxafb_task); | 1401 | INIT_WORK(&fbi->task, pxafb_task); |
| 1208 | init_MUTEX(&fbi->ctrlr_sem); | 1402 | init_MUTEX(&fbi->ctrlr_sem); |
| 1403 | init_completion(&fbi->disable_done); | ||
| 1404 | #ifdef CONFIG_FB_PXA_SMARTPANEL | ||
| 1405 | init_completion(&fbi->command_done); | ||
| 1406 | init_completion(&fbi->refresh_done); | ||
| 1407 | #endif | ||
| 1209 | 1408 | ||
| 1210 | return fbi; | 1409 | return fbi; |
| 1211 | } | 1410 | } |
| 1212 | 1411 | ||
| 1213 | #ifdef CONFIG_FB_PXA_PARAMETERS | 1412 | #ifdef CONFIG_FB_PXA_PARAMETERS |
| 1214 | static int __init pxafb_parse_options(struct device *dev, char *options) | 1413 | static int __init parse_opt_mode(struct device *dev, const char *this_opt) |
| 1414 | { | ||
| 1415 | struct pxafb_mach_info *inf = dev->platform_data; | ||
| 1416 | |||
| 1417 | const char *name = this_opt+5; | ||
| 1418 | unsigned int namelen = strlen(name); | ||
| 1419 | int res_specified = 0, bpp_specified = 0; | ||
| 1420 | unsigned int xres = 0, yres = 0, bpp = 0; | ||
| 1421 | int yres_specified = 0; | ||
| 1422 | int i; | ||
| 1423 | for (i = namelen-1; i >= 0; i--) { | ||
| 1424 | switch (name[i]) { | ||
| 1425 | case '-': | ||
| 1426 | namelen = i; | ||
| 1427 | if (!bpp_specified && !yres_specified) { | ||
| 1428 | bpp = simple_strtoul(&name[i+1], NULL, 0); | ||
| 1429 | bpp_specified = 1; | ||
| 1430 | } else | ||
| 1431 | goto done; | ||
| 1432 | break; | ||
| 1433 | case 'x': | ||
| 1434 | if (!yres_specified) { | ||
| 1435 | yres = simple_strtoul(&name[i+1], NULL, 0); | ||
| 1436 | yres_specified = 1; | ||
| 1437 | } else | ||
| 1438 | goto done; | ||
| 1439 | break; | ||
| 1440 | case '0' ... '9': | ||
| 1441 | break; | ||
| 1442 | default: | ||
| 1443 | goto done; | ||
| 1444 | } | ||
| 1445 | } | ||
| 1446 | if (i < 0 && yres_specified) { | ||
| 1447 | xres = simple_strtoul(name, NULL, 0); | ||
| 1448 | res_specified = 1; | ||
| 1449 | } | ||
| 1450 | done: | ||
| 1451 | if (res_specified) { | ||
| 1452 | dev_info(dev, "overriding resolution: %dx%d\n", xres, yres); | ||
| 1453 | inf->modes[0].xres = xres; inf->modes[0].yres = yres; | ||
| 1454 | } | ||
| 1455 | if (bpp_specified) | ||
| 1456 | switch (bpp) { | ||
| 1457 | case 1: | ||
| 1458 | case 2: | ||
| 1459 | case 4: | ||
| 1460 | case 8: | ||
| 1461 | case 16: | ||
| 1462 | inf->modes[0].bpp = bpp; | ||
| 1463 | dev_info(dev, "overriding bit depth: %d\n", bpp); | ||
| 1464 | break; | ||
| 1465 | default: | ||
| 1466 | dev_err(dev, "Depth %d is not valid\n", bpp); | ||
| 1467 | return -EINVAL; | ||
| 1468 | } | ||
| 1469 | return 0; | ||
| 1470 | } | ||
| 1471 | |||
| 1472 | static int __init parse_opt(struct device *dev, char *this_opt) | ||
| 1215 | { | 1473 | { |
| 1216 | struct pxafb_mach_info *inf = dev->platform_data; | 1474 | struct pxafb_mach_info *inf = dev->platform_data; |
| 1475 | struct pxafb_mode_info *mode = &inf->modes[0]; | ||
| 1476 | char s[64]; | ||
| 1477 | |||
| 1478 | s[0] = '\0'; | ||
| 1479 | |||
| 1480 | if (!strncmp(this_opt, "mode:", 5)) { | ||
| 1481 | return parse_opt_mode(dev, this_opt); | ||
| 1482 | } else if (!strncmp(this_opt, "pixclock:", 9)) { | ||
| 1483 | mode->pixclock = simple_strtoul(this_opt+9, NULL, 0); | ||
| 1484 | sprintf(s, "pixclock: %ld\n", mode->pixclock); | ||
| 1485 | } else if (!strncmp(this_opt, "left:", 5)) { | ||
| 1486 | mode->left_margin = simple_strtoul(this_opt+5, NULL, 0); | ||
| 1487 | sprintf(s, "left: %u\n", mode->left_margin); | ||
| 1488 | } else if (!strncmp(this_opt, "right:", 6)) { | ||
| 1489 | mode->right_margin = simple_strtoul(this_opt+6, NULL, 0); | ||
| 1490 | sprintf(s, "right: %u\n", mode->right_margin); | ||
| 1491 | } else if (!strncmp(this_opt, "upper:", 6)) { | ||
| 1492 | mode->upper_margin = simple_strtoul(this_opt+6, NULL, 0); | ||
| 1493 | sprintf(s, "upper: %u\n", mode->upper_margin); | ||
| 1494 | } else if (!strncmp(this_opt, "lower:", 6)) { | ||
| 1495 | mode->lower_margin = simple_strtoul(this_opt+6, NULL, 0); | ||
| 1496 | sprintf(s, "lower: %u\n", mode->lower_margin); | ||
| 1497 | } else if (!strncmp(this_opt, "hsynclen:", 9)) { | ||
| 1498 | mode->hsync_len = simple_strtoul(this_opt+9, NULL, 0); | ||
| 1499 | sprintf(s, "hsynclen: %u\n", mode->hsync_len); | ||
| 1500 | } else if (!strncmp(this_opt, "vsynclen:", 9)) { | ||
| 1501 | mode->vsync_len = simple_strtoul(this_opt+9, NULL, 0); | ||
| 1502 | sprintf(s, "vsynclen: %u\n", mode->vsync_len); | ||
| 1503 | } else if (!strncmp(this_opt, "hsync:", 6)) { | ||
| 1504 | if (simple_strtoul(this_opt+6, NULL, 0) == 0) { | ||
| 1505 | sprintf(s, "hsync: Active Low\n"); | ||
| 1506 | mode->sync &= ~FB_SYNC_HOR_HIGH_ACT; | ||
| 1507 | } else { | ||
| 1508 | sprintf(s, "hsync: Active High\n"); | ||
| 1509 | mode->sync |= FB_SYNC_HOR_HIGH_ACT; | ||
| 1510 | } | ||
| 1511 | } else if (!strncmp(this_opt, "vsync:", 6)) { | ||
| 1512 | if (simple_strtoul(this_opt+6, NULL, 0) == 0) { | ||
| 1513 | sprintf(s, "vsync: Active Low\n"); | ||
| 1514 | mode->sync &= ~FB_SYNC_VERT_HIGH_ACT; | ||
| 1515 | } else { | ||
| 1516 | sprintf(s, "vsync: Active High\n"); | ||
| 1517 | mode->sync |= FB_SYNC_VERT_HIGH_ACT; | ||
| 1518 | } | ||
| 1519 | } else if (!strncmp(this_opt, "dpc:", 4)) { | ||
| 1520 | if (simple_strtoul(this_opt+4, NULL, 0) == 0) { | ||
| 1521 | sprintf(s, "double pixel clock: false\n"); | ||
| 1522 | inf->lccr3 &= ~LCCR3_DPC; | ||
| 1523 | } else { | ||
| 1524 | sprintf(s, "double pixel clock: true\n"); | ||
| 1525 | inf->lccr3 |= LCCR3_DPC; | ||
| 1526 | } | ||
| 1527 | } else if (!strncmp(this_opt, "outputen:", 9)) { | ||
| 1528 | if (simple_strtoul(this_opt+9, NULL, 0) == 0) { | ||
| 1529 | sprintf(s, "output enable: active low\n"); | ||
| 1530 | inf->lccr3 = (inf->lccr3 & ~LCCR3_OEP) | LCCR3_OutEnL; | ||
| 1531 | } else { | ||
| 1532 | sprintf(s, "output enable: active high\n"); | ||
| 1533 | inf->lccr3 = (inf->lccr3 & ~LCCR3_OEP) | LCCR3_OutEnH; | ||
| 1534 | } | ||
| 1535 | } else if (!strncmp(this_opt, "pixclockpol:", 12)) { | ||
| 1536 | if (simple_strtoul(this_opt+12, NULL, 0) == 0) { | ||
| 1537 | sprintf(s, "pixel clock polarity: falling edge\n"); | ||
| 1538 | inf->lccr3 = (inf->lccr3 & ~LCCR3_PCP) | LCCR3_PixFlEdg; | ||
| 1539 | } else { | ||
| 1540 | sprintf(s, "pixel clock polarity: rising edge\n"); | ||
| 1541 | inf->lccr3 = (inf->lccr3 & ~LCCR3_PCP) | LCCR3_PixRsEdg; | ||
| 1542 | } | ||
| 1543 | } else if (!strncmp(this_opt, "color", 5)) { | ||
| 1544 | inf->lccr0 = (inf->lccr0 & ~LCCR0_CMS) | LCCR0_Color; | ||
| 1545 | } else if (!strncmp(this_opt, "mono", 4)) { | ||
| 1546 | inf->lccr0 = (inf->lccr0 & ~LCCR0_CMS) | LCCR0_Mono; | ||
| 1547 | } else if (!strncmp(this_opt, "active", 6)) { | ||
| 1548 | inf->lccr0 = (inf->lccr0 & ~LCCR0_PAS) | LCCR0_Act; | ||
| 1549 | } else if (!strncmp(this_opt, "passive", 7)) { | ||
| 1550 | inf->lccr0 = (inf->lccr0 & ~LCCR0_PAS) | LCCR0_Pas; | ||
| 1551 | } else if (!strncmp(this_opt, "single", 6)) { | ||
| 1552 | inf->lccr0 = (inf->lccr0 & ~LCCR0_SDS) | LCCR0_Sngl; | ||
| 1553 | } else if (!strncmp(this_opt, "dual", 4)) { | ||
| 1554 | inf->lccr0 = (inf->lccr0 & ~LCCR0_SDS) | LCCR0_Dual; | ||
| 1555 | } else if (!strncmp(this_opt, "4pix", 4)) { | ||
| 1556 | inf->lccr0 = (inf->lccr0 & ~LCCR0_DPD) | LCCR0_4PixMono; | ||
| 1557 | } else if (!strncmp(this_opt, "8pix", 4)) { | ||
| 1558 | inf->lccr0 = (inf->lccr0 & ~LCCR0_DPD) | LCCR0_8PixMono; | ||
| 1559 | } else { | ||
| 1560 | dev_err(dev, "unknown option: %s\n", this_opt); | ||
| 1561 | return -EINVAL; | ||
| 1562 | } | ||
| 1563 | |||
| 1564 | if (s[0] != '\0') | ||
| 1565 | dev_info(dev, "override %s", s); | ||
| 1566 | |||
| 1567 | return 0; | ||
| 1568 | } | ||
| 1569 | |||
| 1570 | static int __init pxafb_parse_options(struct device *dev, char *options) | ||
| 1571 | { | ||
| 1217 | char *this_opt; | 1572 | char *this_opt; |
| 1573 | int ret; | ||
| 1218 | 1574 | ||
| 1219 | if (!options || !*options) | 1575 | if (!options || !*options) |
| 1220 | return 0; | 1576 | return 0; |
| 1221 | 1577 | ||
| 1222 | dev_dbg(dev, "options are \"%s\"\n", options ? options : "null"); | 1578 | dev_dbg(dev, "options are \"%s\"\n", options ? options : "null"); |
| 1223 | 1579 | ||
| 1224 | /* could be made table driven or similar?... */ | 1580 | /* could be made table driven or similar?... */ |
| 1225 | while ((this_opt = strsep(&options, ",")) != NULL) { | 1581 | while ((this_opt = strsep(&options, ",")) != NULL) { |
| 1226 | if (!strncmp(this_opt, "mode:", 5)) { | 1582 | ret = parse_opt(dev, this_opt); |
| 1227 | const char *name = this_opt+5; | 1583 | if (ret) |
| 1228 | unsigned int namelen = strlen(name); | 1584 | return ret; |
| 1229 | int res_specified = 0, bpp_specified = 0; | 1585 | } |
| 1230 | unsigned int xres = 0, yres = 0, bpp = 0; | 1586 | return 0; |
| 1231 | int yres_specified = 0; | 1587 | } |
| 1232 | int i; | 1588 | |
| 1233 | for (i = namelen-1; i >= 0; i--) { | 1589 | static char g_options[256] __devinitdata = ""; |
| 1234 | switch (name[i]) { | ||
| 1235 | case '-': | ||
| 1236 | namelen = i; | ||
| 1237 | if (!bpp_specified && !yres_specified) { | ||
| 1238 | bpp = simple_strtoul(&name[i+1], NULL, 0); | ||
| 1239 | bpp_specified = 1; | ||
| 1240 | } else | ||
| 1241 | goto done; | ||
| 1242 | break; | ||
| 1243 | case 'x': | ||
| 1244 | if (!yres_specified) { | ||
| 1245 | yres = simple_strtoul(&name[i+1], NULL, 0); | ||
| 1246 | yres_specified = 1; | ||
| 1247 | } else | ||
| 1248 | goto done; | ||
| 1249 | break; | ||
| 1250 | case '0' ... '9': | ||
| 1251 | break; | ||
| 1252 | default: | ||
| 1253 | goto done; | ||
| 1254 | } | ||
| 1255 | } | ||
| 1256 | if (i < 0 && yres_specified) { | ||
| 1257 | xres = simple_strtoul(name, NULL, 0); | ||
| 1258 | res_specified = 1; | ||
| 1259 | } | ||
| 1260 | done: | ||
| 1261 | if (res_specified) { | ||
| 1262 | dev_info(dev, "overriding resolution: %dx%d\n", xres, yres); | ||
| 1263 | inf->modes[0].xres = xres; inf->modes[0].yres = yres; | ||
| 1264 | } | ||
| 1265 | if (bpp_specified) | ||
| 1266 | switch (bpp) { | ||
| 1267 | case 1: | ||
| 1268 | case 2: | ||
| 1269 | case 4: | ||
| 1270 | case 8: | ||
| 1271 | case 16: | ||
| 1272 | inf->modes[0].bpp = bpp; | ||
| 1273 | dev_info(dev, "overriding bit depth: %d\n", bpp); | ||
| 1274 | break; | ||
| 1275 | default: | ||
| 1276 | dev_err(dev, "Depth %d is not valid\n", bpp); | ||
| 1277 | } | ||
| 1278 | } else if (!strncmp(this_opt, "pixclock:", 9)) { | ||
| 1279 | inf->modes[0].pixclock = simple_strtoul(this_opt+9, NULL, 0); | ||
| 1280 | dev_info(dev, "override pixclock: %ld\n", inf->modes[0].pixclock); | ||
| 1281 | } else if (!strncmp(this_opt, "left:", 5)) { | ||
| 1282 | inf->modes[0].left_margin = simple_strtoul(this_opt+5, NULL, 0); | ||
| 1283 | dev_info(dev, "override left: %u\n", inf->modes[0].left_margin); | ||
| 1284 | } else if (!strncmp(this_opt, "right:", 6)) { | ||
| 1285 | inf->modes[0].right_margin = simple_strtoul(this_opt+6, NULL, 0); | ||
| 1286 | dev_info(dev, "override right: %u\n", inf->modes[0].right_margin); | ||
| 1287 | } else if (!strncmp(this_opt, "upper:", 6)) { | ||
| 1288 | inf->modes[0].upper_margin = simple_strtoul(this_opt+6, NULL, 0); | ||
| 1289 | dev_info(dev, "override upper: %u\n", inf->modes[0].upper_margin); | ||
| 1290 | } else if (!strncmp(this_opt, "lower:", 6)) { | ||
| 1291 | inf->modes[0].lower_margin = simple_strtoul(this_opt+6, NULL, 0); | ||
| 1292 | dev_info(dev, "override lower: %u\n", inf->modes[0].lower_margin); | ||
| 1293 | } else if (!strncmp(this_opt, "hsynclen:", 9)) { | ||
| 1294 | inf->modes[0].hsync_len = simple_strtoul(this_opt+9, NULL, 0); | ||
| 1295 | dev_info(dev, "override hsynclen: %u\n", inf->modes[0].hsync_len); | ||
| 1296 | } else if (!strncmp(this_opt, "vsynclen:", 9)) { | ||
| 1297 | inf->modes[0].vsync_len = simple_strtoul(this_opt+9, NULL, 0); | ||
| 1298 | dev_info(dev, "override vsynclen: %u\n", inf->modes[0].vsync_len); | ||
| 1299 | } else if (!strncmp(this_opt, "hsync:", 6)) { | ||
| 1300 | if (simple_strtoul(this_opt+6, NULL, 0) == 0) { | ||
| 1301 | dev_info(dev, "override hsync: Active Low\n"); | ||
| 1302 | inf->modes[0].sync &= ~FB_SYNC_HOR_HIGH_ACT; | ||
| 1303 | } else { | ||
| 1304 | dev_info(dev, "override hsync: Active High\n"); | ||
| 1305 | inf->modes[0].sync |= FB_SYNC_HOR_HIGH_ACT; | ||
| 1306 | } | ||
| 1307 | } else if (!strncmp(this_opt, "vsync:", 6)) { | ||
| 1308 | if (simple_strtoul(this_opt+6, NULL, 0) == 0) { | ||
| 1309 | dev_info(dev, "override vsync: Active Low\n"); | ||
| 1310 | inf->modes[0].sync &= ~FB_SYNC_VERT_HIGH_ACT; | ||
| 1311 | } else { | ||
| 1312 | dev_info(dev, "override vsync: Active High\n"); | ||
| 1313 | inf->modes[0].sync |= FB_SYNC_VERT_HIGH_ACT; | ||
| 1314 | } | ||
| 1315 | } else if (!strncmp(this_opt, "dpc:", 4)) { | ||
| 1316 | if (simple_strtoul(this_opt+4, NULL, 0) == 0) { | ||
| 1317 | dev_info(dev, "override double pixel clock: false\n"); | ||
| 1318 | inf->lccr3 &= ~LCCR3_DPC; | ||
| 1319 | } else { | ||
| 1320 | dev_info(dev, "override double pixel clock: true\n"); | ||
| 1321 | inf->lccr3 |= LCCR3_DPC; | ||
| 1322 | } | ||
| 1323 | } else if (!strncmp(this_opt, "outputen:", 9)) { | ||
| 1324 | if (simple_strtoul(this_opt+9, NULL, 0) == 0) { | ||
| 1325 | dev_info(dev, "override output enable: active low\n"); | ||
| 1326 | inf->lccr3 = (inf->lccr3 & ~LCCR3_OEP) | LCCR3_OutEnL; | ||
| 1327 | } else { | ||
| 1328 | dev_info(dev, "override output enable: active high\n"); | ||
| 1329 | inf->lccr3 = (inf->lccr3 & ~LCCR3_OEP) | LCCR3_OutEnH; | ||
| 1330 | } | ||
| 1331 | } else if (!strncmp(this_opt, "pixclockpol:", 12)) { | ||
| 1332 | if (simple_strtoul(this_opt+12, NULL, 0) == 0) { | ||
| 1333 | dev_info(dev, "override pixel clock polarity: falling edge\n"); | ||
| 1334 | inf->lccr3 = (inf->lccr3 & ~LCCR3_PCP) | LCCR3_PixFlEdg; | ||
| 1335 | } else { | ||
| 1336 | dev_info(dev, "override pixel clock polarity: rising edge\n"); | ||
| 1337 | inf->lccr3 = (inf->lccr3 & ~LCCR3_PCP) | LCCR3_PixRsEdg; | ||
| 1338 | } | ||
| 1339 | } else if (!strncmp(this_opt, "color", 5)) { | ||
| 1340 | inf->lccr0 = (inf->lccr0 & ~LCCR0_CMS) | LCCR0_Color; | ||
| 1341 | } else if (!strncmp(this_opt, "mono", 4)) { | ||
| 1342 | inf->lccr0 = (inf->lccr0 & ~LCCR0_CMS) | LCCR0_Mono; | ||
| 1343 | } else if (!strncmp(this_opt, "active", 6)) { | ||
| 1344 | inf->lccr0 = (inf->lccr0 & ~LCCR0_PAS) | LCCR0_Act; | ||
| 1345 | } else if (!strncmp(this_opt, "passive", 7)) { | ||
| 1346 | inf->lccr0 = (inf->lccr0 & ~LCCR0_PAS) | LCCR0_Pas; | ||
| 1347 | } else if (!strncmp(this_opt, "single", 6)) { | ||
| 1348 | inf->lccr0 = (inf->lccr0 & ~LCCR0_SDS) | LCCR0_Sngl; | ||
| 1349 | } else if (!strncmp(this_opt, "dual", 4)) { | ||
| 1350 | inf->lccr0 = (inf->lccr0 & ~LCCR0_SDS) | LCCR0_Dual; | ||
| 1351 | } else if (!strncmp(this_opt, "4pix", 4)) { | ||
| 1352 | inf->lccr0 = (inf->lccr0 & ~LCCR0_DPD) | LCCR0_4PixMono; | ||
| 1353 | } else if (!strncmp(this_opt, "8pix", 4)) { | ||
| 1354 | inf->lccr0 = (inf->lccr0 & ~LCCR0_DPD) | LCCR0_8PixMono; | ||
| 1355 | } else { | ||
| 1356 | dev_err(dev, "unknown option: %s\n", this_opt); | ||
| 1357 | return -EINVAL; | ||
| 1358 | } | ||
| 1359 | } | ||
| 1360 | return 0; | ||
| 1361 | 1590 | ||
| 1591 | #ifndef CONFIG_MODULES | ||
| 1592 | static int __devinit pxafb_setup_options(void) | ||
| 1593 | { | ||
| 1594 | char *options = NULL; | ||
| 1595 | |||
| 1596 | if (fb_get_options("pxafb", &options)) | ||
| 1597 | return -ENODEV; | ||
| 1598 | |||
| 1599 | if (options) | ||
| 1600 | strlcpy(g_options, options, sizeof(g_options)); | ||
| 1601 | |||
| 1602 | return 0; | ||
| 1362 | } | 1603 | } |
| 1604 | #else | ||
| 1605 | #define pxafb_setup_options() (0) | ||
| 1606 | |||
| 1607 | module_param_string(options, g_options, sizeof(g_options), 0); | ||
| 1608 | MODULE_PARM_DESC(options, "LCD parameters (see Documentation/fb/pxafb.txt)"); | ||
| 1609 | #endif | ||
| 1610 | |||
| 1611 | #else | ||
| 1612 | #define pxafb_parse_options(...) (0) | ||
| 1613 | #define pxafb_setup_options() (0) | ||
| 1363 | #endif | 1614 | #endif |
| 1364 | 1615 | ||
| 1365 | static int __init pxafb_probe(struct platform_device *dev) | 1616 | static int __init pxafb_probe(struct platform_device *dev) |
| 1366 | { | 1617 | { |
| 1367 | struct pxafb_info *fbi; | 1618 | struct pxafb_info *fbi; |
| 1368 | struct pxafb_mach_info *inf; | 1619 | struct pxafb_mach_info *inf; |
| 1369 | int ret; | 1620 | struct resource *r; |
| 1621 | int irq, ret; | ||
| 1370 | 1622 | ||
| 1371 | dev_dbg(&dev->dev, "pxafb_probe\n"); | 1623 | dev_dbg(&dev->dev, "pxafb_probe\n"); |
| 1372 | 1624 | ||
| @@ -1376,38 +1628,45 @@ static int __init pxafb_probe(struct platform_device *dev) | |||
| 1376 | if (!inf) | 1628 | if (!inf) |
| 1377 | goto failed; | 1629 | goto failed; |
| 1378 | 1630 | ||
| 1379 | #ifdef CONFIG_FB_PXA_PARAMETERS | ||
| 1380 | ret = pxafb_parse_options(&dev->dev, g_options); | 1631 | ret = pxafb_parse_options(&dev->dev, g_options); |
| 1381 | if (ret < 0) | 1632 | if (ret < 0) |
| 1382 | goto failed; | 1633 | goto failed; |
| 1383 | #endif | ||
| 1384 | 1634 | ||
| 1385 | #ifdef DEBUG_VAR | 1635 | #ifdef DEBUG_VAR |
| 1386 | /* Check for various illegal bit-combinations. Currently only | 1636 | /* Check for various illegal bit-combinations. Currently only |
| 1387 | * a warning is given. */ | 1637 | * a warning is given. */ |
| 1388 | 1638 | ||
| 1389 | if (inf->lccr0 & LCCR0_INVALID_CONFIG_MASK) | 1639 | if (inf->lccr0 & LCCR0_INVALID_CONFIG_MASK) |
| 1390 | dev_warn(&dev->dev, "machine LCCR0 setting contains illegal bits: %08x\n", | 1640 | dev_warn(&dev->dev, "machine LCCR0 setting contains " |
| 1391 | inf->lccr0 & LCCR0_INVALID_CONFIG_MASK); | 1641 | "illegal bits: %08x\n", |
| 1392 | if (inf->lccr3 & LCCR3_INVALID_CONFIG_MASK) | 1642 | inf->lccr0 & LCCR0_INVALID_CONFIG_MASK); |
| 1393 | dev_warn(&dev->dev, "machine LCCR3 setting contains illegal bits: %08x\n", | 1643 | if (inf->lccr3 & LCCR3_INVALID_CONFIG_MASK) |
| 1394 | inf->lccr3 & LCCR3_INVALID_CONFIG_MASK); | 1644 | dev_warn(&dev->dev, "machine LCCR3 setting contains " |
| 1395 | if (inf->lccr0 & LCCR0_DPD && | 1645 | "illegal bits: %08x\n", |
| 1646 | inf->lccr3 & LCCR3_INVALID_CONFIG_MASK); | ||
| 1647 | if (inf->lccr0 & LCCR0_DPD && | ||
| 1396 | ((inf->lccr0 & LCCR0_PAS) != LCCR0_Pas || | 1648 | ((inf->lccr0 & LCCR0_PAS) != LCCR0_Pas || |
| 1397 | (inf->lccr0 & LCCR0_SDS) != LCCR0_Sngl || | 1649 | (inf->lccr0 & LCCR0_SDS) != LCCR0_Sngl || |
| 1398 | (inf->lccr0 & LCCR0_CMS) != LCCR0_Mono)) | 1650 | (inf->lccr0 & LCCR0_CMS) != LCCR0_Mono)) |
| 1399 | dev_warn(&dev->dev, "Double Pixel Data (DPD) mode is only valid in passive mono" | 1651 | dev_warn(&dev->dev, "Double Pixel Data (DPD) mode is " |
| 1400 | " single panel mode\n"); | 1652 | "only valid in passive mono" |
| 1401 | if ((inf->lccr0 & LCCR0_PAS) == LCCR0_Act && | 1653 | " single panel mode\n"); |
| 1654 | if ((inf->lccr0 & LCCR0_PAS) == LCCR0_Act && | ||
| 1402 | (inf->lccr0 & LCCR0_SDS) == LCCR0_Dual) | 1655 | (inf->lccr0 & LCCR0_SDS) == LCCR0_Dual) |
| 1403 | dev_warn(&dev->dev, "Dual panel only valid in passive mode\n"); | 1656 | dev_warn(&dev->dev, "Dual panel only valid in passive mode\n"); |
| 1404 | if ((inf->lccr0 & LCCR0_PAS) == LCCR0_Pas && | 1657 | if ((inf->lccr0 & LCCR0_PAS) == LCCR0_Pas && |
| 1405 | (inf->modes->upper_margin || inf->modes->lower_margin)) | 1658 | (inf->modes->upper_margin || inf->modes->lower_margin)) |
| 1406 | dev_warn(&dev->dev, "Upper and lower margins must be 0 in passive mode\n"); | 1659 | dev_warn(&dev->dev, "Upper and lower margins must be 0 in " |
| 1660 | "passive mode\n"); | ||
| 1407 | #endif | 1661 | #endif |
| 1408 | 1662 | ||
| 1409 | dev_dbg(&dev->dev, "got a %dx%dx%d LCD\n",inf->modes->xres, inf->modes->yres, inf->modes->bpp); | 1663 | dev_dbg(&dev->dev, "got a %dx%dx%d LCD\n", |
| 1410 | if (inf->modes->xres == 0 || inf->modes->yres == 0 || inf->modes->bpp == 0) { | 1664 | inf->modes->xres, |
| 1665 | inf->modes->yres, | ||
| 1666 | inf->modes->bpp); | ||
| 1667 | if (inf->modes->xres == 0 || | ||
| 1668 | inf->modes->yres == 0 || | ||
| 1669 | inf->modes->bpp == 0) { | ||
| 1411 | dev_err(&dev->dev, "Invalid resolution or bit depth\n"); | 1670 | dev_err(&dev->dev, "Invalid resolution or bit depth\n"); |
| 1412 | ret = -EINVAL; | 1671 | ret = -EINVAL; |
| 1413 | goto failed; | 1672 | goto failed; |
| @@ -1416,26 +1675,62 @@ static int __init pxafb_probe(struct platform_device *dev) | |||
| 1416 | pxafb_lcd_power = inf->pxafb_lcd_power; | 1675 | pxafb_lcd_power = inf->pxafb_lcd_power; |
| 1417 | fbi = pxafb_init_fbinfo(&dev->dev); | 1676 | fbi = pxafb_init_fbinfo(&dev->dev); |
| 1418 | if (!fbi) { | 1677 | if (!fbi) { |
| 1678 | /* only reason for pxafb_init_fbinfo to fail is kmalloc */ | ||
| 1419 | dev_err(&dev->dev, "Failed to initialize framebuffer device\n"); | 1679 | dev_err(&dev->dev, "Failed to initialize framebuffer device\n"); |
| 1420 | ret = -ENOMEM; // only reason for pxafb_init_fbinfo to fail is kmalloc | 1680 | ret = -ENOMEM; |
| 1421 | goto failed; | 1681 | goto failed; |
| 1422 | } | 1682 | } |
| 1423 | 1683 | ||
| 1684 | r = platform_get_resource(dev, IORESOURCE_MEM, 0); | ||
| 1685 | if (r == NULL) { | ||
| 1686 | dev_err(&dev->dev, "no I/O memory resource defined\n"); | ||
| 1687 | ret = -ENODEV; | ||
| 1688 | goto failed; | ||
| 1689 | } | ||
| 1690 | |||
| 1691 | r = request_mem_region(r->start, r->end - r->start + 1, dev->name); | ||
| 1692 | if (r == NULL) { | ||
| 1693 | dev_err(&dev->dev, "failed to request I/O memory\n"); | ||
| 1694 | ret = -EBUSY; | ||
| 1695 | goto failed; | ||
| 1696 | } | ||
| 1697 | |||
| 1698 | fbi->mmio_base = ioremap(r->start, r->end - r->start + 1); | ||
| 1699 | if (fbi->mmio_base == NULL) { | ||
| 1700 | dev_err(&dev->dev, "failed to map I/O memory\n"); | ||
| 1701 | ret = -EBUSY; | ||
| 1702 | goto failed_free_res; | ||
| 1703 | } | ||
| 1704 | |||
| 1424 | /* Initialize video memory */ | 1705 | /* Initialize video memory */ |
| 1425 | ret = pxafb_map_video_memory(fbi); | 1706 | ret = pxafb_map_video_memory(fbi); |
| 1426 | if (ret) { | 1707 | if (ret) { |
| 1427 | dev_err(&dev->dev, "Failed to allocate video RAM: %d\n", ret); | 1708 | dev_err(&dev->dev, "Failed to allocate video RAM: %d\n", ret); |
| 1428 | ret = -ENOMEM; | 1709 | ret = -ENOMEM; |
| 1429 | goto failed; | 1710 | goto failed_free_io; |
| 1711 | } | ||
| 1712 | |||
| 1713 | irq = platform_get_irq(dev, 0); | ||
| 1714 | if (irq < 0) { | ||
| 1715 | dev_err(&dev->dev, "no IRQ defined\n"); | ||
| 1716 | ret = -ENODEV; | ||
| 1717 | goto failed_free_mem; | ||
| 1430 | } | 1718 | } |
| 1431 | 1719 | ||
| 1432 | ret = request_irq(IRQ_LCD, pxafb_handle_irq, IRQF_DISABLED, "LCD", fbi); | 1720 | ret = request_irq(irq, pxafb_handle_irq, IRQF_DISABLED, "LCD", fbi); |
| 1433 | if (ret) { | 1721 | if (ret) { |
| 1434 | dev_err(&dev->dev, "request_irq failed: %d\n", ret); | 1722 | dev_err(&dev->dev, "request_irq failed: %d\n", ret); |
| 1435 | ret = -EBUSY; | 1723 | ret = -EBUSY; |
| 1436 | goto failed; | 1724 | goto failed_free_mem; |
| 1437 | } | 1725 | } |
| 1438 | 1726 | ||
| 1727 | #ifdef CONFIG_FB_PXA_SMARTPANEL | ||
| 1728 | ret = pxafb_smart_init(fbi); | ||
| 1729 | if (ret) { | ||
| 1730 | dev_err(&dev->dev, "failed to initialize smartpanel\n"); | ||
| 1731 | goto failed_free_irq; | ||
| 1732 | } | ||
| 1733 | #endif | ||
| 1439 | /* | 1734 | /* |
| 1440 | * This makes sure that our colour bitfield | 1735 | * This makes sure that our colour bitfield |
| 1441 | * descriptors are correctly initialised. | 1736 | * descriptors are correctly initialised. |
| @@ -1447,19 +1742,18 @@ static int __init pxafb_probe(struct platform_device *dev) | |||
| 1447 | 1742 | ||
| 1448 | ret = register_framebuffer(&fbi->fb); | 1743 | ret = register_framebuffer(&fbi->fb); |
| 1449 | if (ret < 0) { | 1744 | if (ret < 0) { |
| 1450 | dev_err(&dev->dev, "Failed to register framebuffer device: %d\n", ret); | 1745 | dev_err(&dev->dev, |
| 1451 | goto failed; | 1746 | "Failed to register framebuffer device: %d\n", ret); |
| 1747 | goto failed_free_irq; | ||
| 1452 | } | 1748 | } |
| 1453 | 1749 | ||
| 1454 | #ifdef CONFIG_PM | ||
| 1455 | // TODO | ||
| 1456 | #endif | ||
| 1457 | |||
| 1458 | #ifdef CONFIG_CPU_FREQ | 1750 | #ifdef CONFIG_CPU_FREQ |
| 1459 | fbi->freq_transition.notifier_call = pxafb_freq_transition; | 1751 | fbi->freq_transition.notifier_call = pxafb_freq_transition; |
| 1460 | fbi->freq_policy.notifier_call = pxafb_freq_policy; | 1752 | fbi->freq_policy.notifier_call = pxafb_freq_policy; |
| 1461 | cpufreq_register_notifier(&fbi->freq_transition, CPUFREQ_TRANSITION_NOTIFIER); | 1753 | cpufreq_register_notifier(&fbi->freq_transition, |
| 1462 | cpufreq_register_notifier(&fbi->freq_policy, CPUFREQ_POLICY_NOTIFIER); | 1754 | CPUFREQ_TRANSITION_NOTIFIER); |
| 1755 | cpufreq_register_notifier(&fbi->freq_policy, | ||
| 1756 | CPUFREQ_POLICY_NOTIFIER); | ||
| 1463 | #endif | 1757 | #endif |
| 1464 | 1758 | ||
| 1465 | /* | 1759 | /* |
| @@ -1469,6 +1763,15 @@ static int __init pxafb_probe(struct platform_device *dev) | |||
| 1469 | 1763 | ||
| 1470 | return 0; | 1764 | return 0; |
| 1471 | 1765 | ||
| 1766 | failed_free_irq: | ||
| 1767 | free_irq(irq, fbi); | ||
| 1768 | failed_free_res: | ||
| 1769 | release_mem_region(r->start, r->end - r->start + 1); | ||
| 1770 | failed_free_io: | ||
| 1771 | iounmap(fbi->mmio_base); | ||
| 1772 | failed_free_mem: | ||
| 1773 | dma_free_writecombine(&dev->dev, fbi->map_size, | ||
| 1774 | fbi->map_cpu, fbi->map_dma); | ||
| 1472 | failed: | 1775 | failed: |
| 1473 | platform_set_drvdata(dev, NULL); | 1776 | platform_set_drvdata(dev, NULL); |
| 1474 | kfree(fbi); | 1777 | kfree(fbi); |
| @@ -1477,40 +1780,18 @@ failed: | |||
| 1477 | 1780 | ||
| 1478 | static struct platform_driver pxafb_driver = { | 1781 | static struct platform_driver pxafb_driver = { |
| 1479 | .probe = pxafb_probe, | 1782 | .probe = pxafb_probe, |
| 1480 | #ifdef CONFIG_PM | ||
| 1481 | .suspend = pxafb_suspend, | 1783 | .suspend = pxafb_suspend, |
| 1482 | .resume = pxafb_resume, | 1784 | .resume = pxafb_resume, |
| 1483 | #endif | ||
| 1484 | .driver = { | 1785 | .driver = { |
| 1485 | .name = "pxa2xx-fb", | 1786 | .name = "pxa2xx-fb", |
| 1486 | }, | 1787 | }, |
| 1487 | }; | 1788 | }; |
| 1488 | 1789 | ||
| 1489 | #ifndef MODULE | ||
| 1490 | static int __devinit pxafb_setup(char *options) | ||
| 1491 | { | ||
| 1492 | # ifdef CONFIG_FB_PXA_PARAMETERS | ||
| 1493 | if (options) | ||
| 1494 | strlcpy(g_options, options, sizeof(g_options)); | ||
| 1495 | # endif | ||
| 1496 | return 0; | ||
| 1497 | } | ||
| 1498 | #else | ||
| 1499 | # ifdef CONFIG_FB_PXA_PARAMETERS | ||
| 1500 | module_param_string(options, g_options, sizeof(g_options), 0); | ||
| 1501 | MODULE_PARM_DESC(options, "LCD parameters (see Documentation/fb/pxafb.txt)"); | ||
| 1502 | # endif | ||
| 1503 | #endif | ||
| 1504 | |||
| 1505 | static int __devinit pxafb_init(void) | 1790 | static int __devinit pxafb_init(void) |
| 1506 | { | 1791 | { |
| 1507 | #ifndef MODULE | 1792 | if (pxafb_setup_options()) |
| 1508 | char *option = NULL; | 1793 | return -EINVAL; |
| 1509 | 1794 | ||
| 1510 | if (fb_get_options("pxafb", &option)) | ||
| 1511 | return -ENODEV; | ||
| 1512 | pxafb_setup(option); | ||
| 1513 | #endif | ||
| 1514 | return platform_driver_register(&pxafb_driver); | 1795 | return platform_driver_register(&pxafb_driver); |
| 1515 | } | 1796 | } |
| 1516 | 1797 | ||
diff --git a/drivers/video/pxafb.h b/drivers/video/pxafb.h index d920b8a14c35..8238dc826429 100644 --- a/drivers/video/pxafb.h +++ b/drivers/video/pxafb.h | |||
| @@ -21,14 +21,6 @@ | |||
| 21 | * for more details. | 21 | * for more details. |
| 22 | */ | 22 | */ |
| 23 | 23 | ||
| 24 | /* Shadows for LCD controller registers */ | ||
| 25 | struct pxafb_lcd_reg { | ||
| 26 | unsigned int lccr0; | ||
| 27 | unsigned int lccr1; | ||
| 28 | unsigned int lccr2; | ||
| 29 | unsigned int lccr3; | ||
| 30 | }; | ||
| 31 | |||
| 32 | /* PXA LCD DMA descriptor */ | 24 | /* PXA LCD DMA descriptor */ |
| 33 | struct pxafb_dma_descriptor { | 25 | struct pxafb_dma_descriptor { |
| 34 | unsigned int fdadr; | 26 | unsigned int fdadr; |
| @@ -37,11 +29,49 @@ struct pxafb_dma_descriptor { | |||
| 37 | unsigned int ldcmd; | 29 | unsigned int ldcmd; |
| 38 | }; | 30 | }; |
| 39 | 31 | ||
| 32 | enum { | ||
| 33 | PAL_NONE = -1, | ||
| 34 | PAL_BASE = 0, | ||
| 35 | PAL_OV1 = 1, | ||
| 36 | PAL_OV2 = 2, | ||
| 37 | PAL_MAX, | ||
| 38 | }; | ||
| 39 | |||
| 40 | enum { | ||
| 41 | DMA_BASE = 0, | ||
| 42 | DMA_UPPER = 0, | ||
| 43 | DMA_LOWER = 1, | ||
| 44 | DMA_OV1 = 1, | ||
| 45 | DMA_OV2_Y = 2, | ||
| 46 | DMA_OV2_Cb = 3, | ||
| 47 | DMA_OV2_Cr = 4, | ||
| 48 | DMA_CURSOR = 5, | ||
| 49 | DMA_CMD = 6, | ||
| 50 | DMA_MAX, | ||
| 51 | }; | ||
| 52 | |||
| 53 | /* maximum palette size - 256 entries, each 4 bytes long */ | ||
| 54 | #define PALETTE_SIZE (256 * 4) | ||
| 55 | #define CMD_BUFF_SIZE (1024 * 50) | ||
| 56 | |||
| 57 | struct pxafb_dma_buff { | ||
| 58 | unsigned char palette[PAL_MAX * PALETTE_SIZE]; | ||
| 59 | uint16_t cmd_buff[CMD_BUFF_SIZE]; | ||
| 60 | struct pxafb_dma_descriptor pal_desc[PAL_MAX]; | ||
| 61 | struct pxafb_dma_descriptor dma_desc[DMA_MAX]; | ||
| 62 | }; | ||
| 63 | |||
| 40 | struct pxafb_info { | 64 | struct pxafb_info { |
| 41 | struct fb_info fb; | 65 | struct fb_info fb; |
| 42 | struct device *dev; | 66 | struct device *dev; |
| 43 | struct clk *clk; | 67 | struct clk *clk; |
| 44 | 68 | ||
| 69 | void __iomem *mmio_base; | ||
| 70 | |||
| 71 | struct pxafb_dma_buff *dma_buff; | ||
| 72 | dma_addr_t dma_buff_phys; | ||
| 73 | dma_addr_t fdadr[DMA_MAX]; | ||
| 74 | |||
| 45 | /* | 75 | /* |
| 46 | * These are the addresses we mapped | 76 | * These are the addresses we mapped |
| 47 | * the framebuffer memory region to. | 77 | * the framebuffer memory region to. |
| @@ -55,19 +85,8 @@ struct pxafb_info { | |||
| 55 | u_char * screen_cpu; /* virtual address of frame buffer */ | 85 | u_char * screen_cpu; /* virtual address of frame buffer */ |
| 56 | dma_addr_t screen_dma; /* physical address of frame buffer */ | 86 | dma_addr_t screen_dma; /* physical address of frame buffer */ |
| 57 | u16 * palette_cpu; /* virtual address of palette memory */ | 87 | u16 * palette_cpu; /* virtual address of palette memory */ |
| 58 | dma_addr_t palette_dma; /* physical address of palette memory */ | ||
| 59 | u_int palette_size; | 88 | u_int palette_size; |
| 60 | 89 | ssize_t video_offset; | |
| 61 | /* DMA descriptors */ | ||
| 62 | struct pxafb_dma_descriptor * dmadesc_fblow_cpu; | ||
| 63 | dma_addr_t dmadesc_fblow_dma; | ||
| 64 | struct pxafb_dma_descriptor * dmadesc_fbhigh_cpu; | ||
| 65 | dma_addr_t dmadesc_fbhigh_dma; | ||
| 66 | struct pxafb_dma_descriptor * dmadesc_palette_cpu; | ||
| 67 | dma_addr_t dmadesc_palette_dma; | ||
| 68 | |||
| 69 | dma_addr_t fdadr0; | ||
| 70 | dma_addr_t fdadr1; | ||
| 71 | 90 | ||
| 72 | u_int lccr0; | 91 | u_int lccr0; |
| 73 | u_int lccr3; | 92 | u_int lccr3; |
| @@ -81,6 +100,7 @@ struct pxafb_info { | |||
| 81 | u_int reg_lccr2; | 100 | u_int reg_lccr2; |
| 82 | u_int reg_lccr3; | 101 | u_int reg_lccr3; |
| 83 | u_int reg_lccr4; | 102 | u_int reg_lccr4; |
| 103 | u_int reg_cmdcr; | ||
| 84 | 104 | ||
| 85 | unsigned long hsync_time; | 105 | unsigned long hsync_time; |
| 86 | 106 | ||
| @@ -90,6 +110,16 @@ struct pxafb_info { | |||
| 90 | wait_queue_head_t ctrlr_wait; | 110 | wait_queue_head_t ctrlr_wait; |
| 91 | struct work_struct task; | 111 | struct work_struct task; |
| 92 | 112 | ||
| 113 | struct completion disable_done; | ||
| 114 | |||
| 115 | #ifdef CONFIG_FB_PXA_SMARTPANEL | ||
| 116 | uint16_t *smart_cmds; | ||
| 117 | size_t n_smart_cmds; | ||
| 118 | struct completion command_done; | ||
| 119 | struct completion refresh_done; | ||
| 120 | struct task_struct *smart_thread; | ||
| 121 | #endif | ||
| 122 | |||
| 93 | #ifdef CONFIG_CPU_FREQ | 123 | #ifdef CONFIG_CPU_FREQ |
| 94 | struct notifier_block freq_transition; | 124 | struct notifier_block freq_transition; |
| 95 | struct notifier_block freq_policy; | 125 | struct notifier_block freq_policy; |
diff --git a/drivers/w1/w1_log.h b/drivers/w1/w1_log.h index fe6bdf43380f..e6ab7cf08f88 100644 --- a/drivers/w1/w1_log.h +++ b/drivers/w1/w1_log.h | |||
| @@ -30,7 +30,7 @@ | |||
| 30 | # define assert(expr) \ | 30 | # define assert(expr) \ |
| 31 | if(unlikely(!(expr))) { \ | 31 | if(unlikely(!(expr))) { \ |
| 32 | printk(KERN_ERR "Assertion failed! %s,%s,%s,line=%d\n", \ | 32 | printk(KERN_ERR "Assertion failed! %s,%s,%s,line=%d\n", \ |
| 33 | #expr,__FILE__,__FUNCTION__,__LINE__); \ | 33 | #expr, __FILE__, __func__, __LINE__); \ |
| 34 | } | 34 | } |
| 35 | #endif | 35 | #endif |
| 36 | 36 | ||
