aboutsummaryrefslogtreecommitdiffstats
path: root/drivers
diff options
context:
space:
mode:
Diffstat (limited to 'drivers')
-rw-r--r--drivers/hid/Kconfig10
-rw-r--r--drivers/hid/hid-core.c93
-rw-r--r--drivers/hid/hid-debug.c15
-rw-r--r--drivers/hid/hid-input.c125
-rw-r--r--drivers/hid/usbhid/hid-core.c111
-rw-r--r--drivers/hid/usbhid/hid-lgff.c10
-rw-r--r--drivers/hid/usbhid/hid-pidff.c1
-rw-r--r--drivers/hid/usbhid/hid-quirks.c185
-rw-r--r--drivers/hid/usbhid/hid-tmff.c2
-rw-r--r--drivers/hid/usbhid/hid-zpff.c8
-rw-r--r--drivers/hid/usbhid/hiddev.c2
-rw-r--r--drivers/hid/usbhid/usbkbd.c6
-rw-r--r--drivers/ide/arm/icside.c16
-rw-r--r--drivers/ide/cris/ide-cris.c2
-rw-r--r--drivers/ide/ide-cd.c6
-rw-r--r--drivers/ide/ide-cd.h2
-rw-r--r--drivers/ide/ide-disk.c8
-rw-r--r--drivers/ide/ide-dma.c110
-rw-r--r--drivers/ide/ide-io.c4
-rw-r--r--drivers/ide/ide-iops.c8
-rw-r--r--drivers/ide/ide-probe.c10
-rw-r--r--drivers/ide/ide-proc.c34
-rw-r--r--drivers/ide/ide-timing.h56
-rw-r--r--drivers/ide/ide.c33
-rw-r--r--drivers/ide/legacy/hd.c2
-rw-r--r--drivers/ide/legacy/macide.c14
-rw-r--r--drivers/ide/mips/au1xxx-ide.c24
-rw-r--r--drivers/ide/pci/aec62xx.c119
-rw-r--r--drivers/ide/pci/alim15x3.c78
-rw-r--r--drivers/ide/pci/amd74xx.c127
-rw-r--r--drivers/ide/pci/atiixp.c5
-rw-r--r--drivers/ide/pci/cmd64x.c130
-rw-r--r--drivers/ide/pci/cs5535.c6
-rw-r--r--drivers/ide/pci/hpt366.c170
-rw-r--r--drivers/ide/pci/it8213.c8
-rw-r--r--drivers/ide/pci/it821x.c9
-rw-r--r--drivers/ide/pci/jmicron.c20
-rw-r--r--drivers/ide/pci/pdc202xx_new.c9
-rw-r--r--drivers/ide/pci/pdc202xx_old.c35
-rw-r--r--drivers/ide/pci/piix.c45
-rw-r--r--drivers/ide/pci/scc_pata.c2
-rw-r--r--drivers/ide/pci/serverworks.c103
-rw-r--r--drivers/ide/pci/sgiioc4.c20
-rw-r--r--drivers/ide/pci/siimage.c18
-rw-r--r--drivers/ide/pci/sis5513.c34
-rw-r--r--drivers/ide/pci/sl82c105.c20
-rw-r--r--drivers/ide/pci/slc90e66.c5
-rw-r--r--drivers/ide/pci/tc86c001.c4
-rw-r--r--drivers/ide/pci/via82cxxx.c175
-rw-r--r--drivers/ide/ppc/pmac.c42
-rw-r--r--drivers/misc/Kconfig6
-rw-r--r--drivers/misc/Makefile1
-rw-r--r--drivers/misc/eeprom_93cx6.c241
-rw-r--r--drivers/net/8139cp.c11
-rw-r--r--drivers/net/Kconfig172
-rw-r--r--drivers/net/Makefile1
-rw-r--r--drivers/net/acenic.c4
-rw-r--r--drivers/net/arm/Kconfig12
-rw-r--r--drivers/net/b44.c56
-rw-r--r--drivers/net/b44.h2
-rw-r--r--drivers/net/cxgb3/adapter.h38
-rw-r--r--drivers/net/cxgb3/common.h28
-rw-r--r--drivers/net/cxgb3/cxgb3_main.c43
-rw-r--r--drivers/net/cxgb3/regs.h11
-rw-r--r--drivers/net/cxgb3/sge.c423
-rw-r--r--drivers/net/cxgb3/t3_hw.c128
-rw-r--r--drivers/net/cxgb3/version.h2
-rw-r--r--drivers/net/ehea/ehea.h14
-rw-r--r--drivers/net/ehea/ehea_hw.h24
-rw-r--r--drivers/net/ehea/ehea_main.c32
-rw-r--r--drivers/net/ehea/ehea_qmr.c56
-rw-r--r--drivers/net/fec_8xx/Kconfig2
-rw-r--r--drivers/net/fs_enet/Kconfig2
-rw-r--r--drivers/net/gianfar.c27
-rw-r--r--drivers/net/gianfar.h6
-rw-r--r--drivers/net/gianfar_mii.c55
-rw-r--r--drivers/net/lasi_82596.c1460
-rw-r--r--drivers/net/lib82596.c1434
-rw-r--r--drivers/net/mlx4/qp.c3
-rw-r--r--drivers/net/netxen/netxen_nic_niu.c6
-rw-r--r--drivers/net/pasemi_mac.c2
-rw-r--r--drivers/net/pcmcia/axnet_cs.c7
-rw-r--r--drivers/net/pcmcia/fmvj18x_cs.c23
-rw-r--r--drivers/net/pcmcia/pcnet_cs.c10
-rw-r--r--drivers/net/phy/Kconfig5
-rw-r--r--drivers/net/phy/Makefile1
-rw-r--r--drivers/net/phy/icplus.c134
-rw-r--r--drivers/net/phy/marvell.c16
-rwxr-xr-xdrivers/net/qla3xxx.c33
-rw-r--r--drivers/net/r8169.c944
-rw-r--r--drivers/net/s2io.c116
-rw-r--r--drivers/net/s2io.h6
-rw-r--r--drivers/net/sky2.c224
-rw-r--r--drivers/net/sky2.h163
-rw-r--r--drivers/net/sni_82596.c185
-rw-r--r--drivers/net/spider_net.c184
-rw-r--r--drivers/net/spider_net.h21
-rw-r--r--drivers/net/tulip/Kconfig27
-rw-r--r--drivers/net/tulip/de2104x.c1
-rw-r--r--drivers/net/tulip/de4x5.c98
-rw-r--r--drivers/net/tulip/de4x5.h9
-rw-r--r--drivers/net/usb/usbnet.c76
-rw-r--r--drivers/net/usb/usbnet.h10
-rw-r--r--drivers/net/wireless/Kconfig12
-rw-r--r--drivers/net/wireless/Makefile3
-rw-r--r--drivers/net/wireless/bcm43xx/bcm43xx_phy.c2
-rw-r--r--drivers/net/wireless/hostap/hostap_ap.c34
-rw-r--r--drivers/net/wireless/hostap/hostap_config.h2
-rw-r--r--drivers/net/wireless/hostap/hostap_cs.c4
-rw-r--r--drivers/net/wireless/hostap/hostap_ioctl.c2
-rw-r--r--drivers/net/wireless/hostap/hostap_main.c1
-rw-r--r--drivers/net/wireless/hostap/hostap_pci.c5
-rw-r--r--drivers/net/wireless/hostap/hostap_plx.c5
-rw-r--r--drivers/net/wireless/rtl8187.h145
-rw-r--r--drivers/net/wireless/rtl8187_dev.c731
-rw-r--r--drivers/net/wireless/rtl8187_rtl8225.c745
-rw-r--r--drivers/net/wireless/rtl8187_rtl8225.h44
-rw-r--r--drivers/net/wireless/rtl818x.h226
-rw-r--r--drivers/net/wireless/zd1211rw/Makefile2
-rw-r--r--drivers/net/wireless/zd1211rw/zd_chip.c5
-rw-r--r--drivers/net/wireless/zd1211rw/zd_chip.h3
-rw-r--r--drivers/net/wireless/zd1211rw/zd_rf.c21
-rw-r--r--drivers/net/wireless/zd1211rw/zd_rf.h28
-rw-r--r--drivers/net/wireless/zd1211rw/zd_rf_al2230.c1
-rw-r--r--drivers/net/wireless/zd1211rw/zd_rf_al7230b.c1
-rw-r--r--drivers/net/wireless/zd1211rw/zd_rf_uw2453.c534
-rw-r--r--drivers/net/wireless/zd1211rw/zd_usb.c1
-rw-r--r--drivers/serial/serial_cs.c4
128 files changed, 7590 insertions, 3647 deletions
diff --git a/drivers/hid/Kconfig b/drivers/hid/Kconfig
index 8fbe9fdac128..3b63b0b78122 100644
--- a/drivers/hid/Kconfig
+++ b/drivers/hid/Kconfig
@@ -1,8 +1,12 @@
1# 1#
2# HID driver configuration 2# HID driver configuration
3# 3#
4menu "HID Devices" 4menuconfig HID_SUPPORT
5 bool "HID Devices"
5 depends on INPUT 6 depends on INPUT
7 default y
8
9if HID_SUPPORT
6 10
7config HID 11config HID
8 tristate "Generic HID support" 12 tristate "Generic HID support"
@@ -24,6 +28,7 @@ config HID
24 28
25config HID_DEBUG 29config HID_DEBUG
26 bool "HID debugging support" 30 bool "HID debugging support"
31 default y if !EMBEDDED
27 depends on HID 32 depends on HID
28 ---help--- 33 ---help---
29 This option lets the HID layer output diagnostics about its internal 34 This option lets the HID layer output diagnostics about its internal
@@ -38,5 +43,4 @@ config HID_DEBUG
38 43
39source "drivers/hid/usbhid/Kconfig" 44source "drivers/hid/usbhid/Kconfig"
40 45
41endmenu 46endif # HID_SUPPORT
42
diff --git a/drivers/hid/hid-core.c b/drivers/hid/hid-core.c
index 6ec04e79f685..317cf8a7b63c 100644
--- a/drivers/hid/hid-core.c
+++ b/drivers/hid/hid-core.c
@@ -40,6 +40,13 @@
40#define DRIVER_DESC "HID core driver" 40#define DRIVER_DESC "HID core driver"
41#define DRIVER_LICENSE "GPL" 41#define DRIVER_LICENSE "GPL"
42 42
43#ifdef CONFIG_HID_DEBUG
44int hid_debug = 0;
45module_param_named(debug, hid_debug, bool, 0600);
46MODULE_PARM_DESC(debug, "Turn HID debugging mode on and off");
47EXPORT_SYMBOL_GPL(hid_debug);
48#endif
49
43/* 50/*
44 * Register a new report for a device. 51 * Register a new report for a device.
45 */ 52 */
@@ -78,7 +85,7 @@ static struct hid_field *hid_register_field(struct hid_report *report, unsigned
78 struct hid_field *field; 85 struct hid_field *field;
79 86
80 if (report->maxfield == HID_MAX_FIELDS) { 87 if (report->maxfield == HID_MAX_FIELDS) {
81 dbg("too many fields in report"); 88 dbg_hid("too many fields in report\n");
82 return NULL; 89 return NULL;
83 } 90 }
84 91
@@ -106,7 +113,7 @@ static int open_collection(struct hid_parser *parser, unsigned type)
106 usage = parser->local.usage[0]; 113 usage = parser->local.usage[0];
107 114
108 if (parser->collection_stack_ptr == HID_COLLECTION_STACK_SIZE) { 115 if (parser->collection_stack_ptr == HID_COLLECTION_STACK_SIZE) {
109 dbg("collection stack overflow"); 116 dbg_hid("collection stack overflow\n");
110 return -1; 117 return -1;
111 } 118 }
112 119
@@ -114,7 +121,7 @@ static int open_collection(struct hid_parser *parser, unsigned type)
114 collection = kmalloc(sizeof(struct hid_collection) * 121 collection = kmalloc(sizeof(struct hid_collection) *
115 parser->device->collection_size * 2, GFP_KERNEL); 122 parser->device->collection_size * 2, GFP_KERNEL);
116 if (collection == NULL) { 123 if (collection == NULL) {
117 dbg("failed to reallocate collection array"); 124 dbg_hid("failed to reallocate collection array\n");
118 return -1; 125 return -1;
119 } 126 }
120 memcpy(collection, parser->device->collection, 127 memcpy(collection, parser->device->collection,
@@ -150,7 +157,7 @@ static int open_collection(struct hid_parser *parser, unsigned type)
150static int close_collection(struct hid_parser *parser) 157static int close_collection(struct hid_parser *parser)
151{ 158{
152 if (!parser->collection_stack_ptr) { 159 if (!parser->collection_stack_ptr) {
153 dbg("collection stack underflow"); 160 dbg_hid("collection stack underflow\n");
154 return -1; 161 return -1;
155 } 162 }
156 parser->collection_stack_ptr--; 163 parser->collection_stack_ptr--;
@@ -178,7 +185,7 @@ static unsigned hid_lookup_collection(struct hid_parser *parser, unsigned type)
178static int hid_add_usage(struct hid_parser *parser, unsigned usage) 185static int hid_add_usage(struct hid_parser *parser, unsigned usage)
179{ 186{
180 if (parser->local.usage_index >= HID_MAX_USAGES) { 187 if (parser->local.usage_index >= HID_MAX_USAGES) {
181 dbg("usage index exceeded"); 188 dbg_hid("usage index exceeded\n");
182 return -1; 189 return -1;
183 } 190 }
184 parser->local.usage[parser->local.usage_index] = usage; 191 parser->local.usage[parser->local.usage_index] = usage;
@@ -202,12 +209,12 @@ static int hid_add_field(struct hid_parser *parser, unsigned report_type, unsign
202 int i; 209 int i;
203 210
204 if (!(report = hid_register_report(parser->device, report_type, parser->global.report_id))) { 211 if (!(report = hid_register_report(parser->device, report_type, parser->global.report_id))) {
205 dbg("hid_register_report failed"); 212 dbg_hid("hid_register_report failed\n");
206 return -1; 213 return -1;
207 } 214 }
208 215
209 if (parser->global.logical_maximum < parser->global.logical_minimum) { 216 if (parser->global.logical_maximum < parser->global.logical_minimum) {
210 dbg("logical range invalid %d %d", parser->global.logical_minimum, parser->global.logical_maximum); 217 dbg_hid("logical range invalid %d %d\n", parser->global.logical_minimum, parser->global.logical_maximum);
211 return -1; 218 return -1;
212 } 219 }
213 220
@@ -287,7 +294,7 @@ static int hid_parser_global(struct hid_parser *parser, struct hid_item *item)
287 case HID_GLOBAL_ITEM_TAG_PUSH: 294 case HID_GLOBAL_ITEM_TAG_PUSH:
288 295
289 if (parser->global_stack_ptr == HID_GLOBAL_STACK_SIZE) { 296 if (parser->global_stack_ptr == HID_GLOBAL_STACK_SIZE) {
290 dbg("global enviroment stack overflow"); 297 dbg_hid("global enviroment stack overflow\n");
291 return -1; 298 return -1;
292 } 299 }
293 300
@@ -298,7 +305,7 @@ static int hid_parser_global(struct hid_parser *parser, struct hid_item *item)
298 case HID_GLOBAL_ITEM_TAG_POP: 305 case HID_GLOBAL_ITEM_TAG_POP:
299 306
300 if (!parser->global_stack_ptr) { 307 if (!parser->global_stack_ptr) {
301 dbg("global enviroment stack underflow"); 308 dbg_hid("global enviroment stack underflow\n");
302 return -1; 309 return -1;
303 } 310 }
304 311
@@ -342,27 +349,27 @@ static int hid_parser_global(struct hid_parser *parser, struct hid_item *item)
342 349
343 case HID_GLOBAL_ITEM_TAG_REPORT_SIZE: 350 case HID_GLOBAL_ITEM_TAG_REPORT_SIZE:
344 if ((parser->global.report_size = item_udata(item)) > 32) { 351 if ((parser->global.report_size = item_udata(item)) > 32) {
345 dbg("invalid report_size %d", parser->global.report_size); 352 dbg_hid("invalid report_size %d\n", parser->global.report_size);
346 return -1; 353 return -1;
347 } 354 }
348 return 0; 355 return 0;
349 356
350 case HID_GLOBAL_ITEM_TAG_REPORT_COUNT: 357 case HID_GLOBAL_ITEM_TAG_REPORT_COUNT:
351 if ((parser->global.report_count = item_udata(item)) > HID_MAX_USAGES) { 358 if ((parser->global.report_count = item_udata(item)) > HID_MAX_USAGES) {
352 dbg("invalid report_count %d", parser->global.report_count); 359 dbg_hid("invalid report_count %d\n", parser->global.report_count);
353 return -1; 360 return -1;
354 } 361 }
355 return 0; 362 return 0;
356 363
357 case HID_GLOBAL_ITEM_TAG_REPORT_ID: 364 case HID_GLOBAL_ITEM_TAG_REPORT_ID:
358 if ((parser->global.report_id = item_udata(item)) == 0) { 365 if ((parser->global.report_id = item_udata(item)) == 0) {
359 dbg("report_id 0 is invalid"); 366 dbg_hid("report_id 0 is invalid\n");
360 return -1; 367 return -1;
361 } 368 }
362 return 0; 369 return 0;
363 370
364 default: 371 default:
365 dbg("unknown global tag 0x%x", item->tag); 372 dbg_hid("unknown global tag 0x%x\n", item->tag);
366 return -1; 373 return -1;
367 } 374 }
368} 375}
@@ -377,7 +384,7 @@ static int hid_parser_local(struct hid_parser *parser, struct hid_item *item)
377 unsigned n; 384 unsigned n;
378 385
379 if (item->size == 0) { 386 if (item->size == 0) {
380 dbg("item data expected for local item"); 387 dbg_hid("item data expected for local item\n");
381 return -1; 388 return -1;
382 } 389 }
383 390
@@ -395,14 +402,14 @@ static int hid_parser_local(struct hid_parser *parser, struct hid_item *item)
395 * items and the first delimiter set. 402 * items and the first delimiter set.
396 */ 403 */
397 if (parser->local.delimiter_depth != 0) { 404 if (parser->local.delimiter_depth != 0) {
398 dbg("nested delimiters"); 405 dbg_hid("nested delimiters\n");
399 return -1; 406 return -1;
400 } 407 }
401 parser->local.delimiter_depth++; 408 parser->local.delimiter_depth++;
402 parser->local.delimiter_branch++; 409 parser->local.delimiter_branch++;
403 } else { 410 } else {
404 if (parser->local.delimiter_depth < 1) { 411 if (parser->local.delimiter_depth < 1) {
405 dbg("bogus close delimiter"); 412 dbg_hid("bogus close delimiter\n");
406 return -1; 413 return -1;
407 } 414 }
408 parser->local.delimiter_depth--; 415 parser->local.delimiter_depth--;
@@ -412,7 +419,7 @@ static int hid_parser_local(struct hid_parser *parser, struct hid_item *item)
412 case HID_LOCAL_ITEM_TAG_USAGE: 419 case HID_LOCAL_ITEM_TAG_USAGE:
413 420
414 if (parser->local.delimiter_branch > 1) { 421 if (parser->local.delimiter_branch > 1) {
415 dbg("alternative usage ignored"); 422 dbg_hid("alternative usage ignored\n");
416 return 0; 423 return 0;
417 } 424 }
418 425
@@ -424,7 +431,7 @@ static int hid_parser_local(struct hid_parser *parser, struct hid_item *item)
424 case HID_LOCAL_ITEM_TAG_USAGE_MINIMUM: 431 case HID_LOCAL_ITEM_TAG_USAGE_MINIMUM:
425 432
426 if (parser->local.delimiter_branch > 1) { 433 if (parser->local.delimiter_branch > 1) {
427 dbg("alternative usage ignored"); 434 dbg_hid("alternative usage ignored\n");
428 return 0; 435 return 0;
429 } 436 }
430 437
@@ -437,7 +444,7 @@ static int hid_parser_local(struct hid_parser *parser, struct hid_item *item)
437 case HID_LOCAL_ITEM_TAG_USAGE_MAXIMUM: 444 case HID_LOCAL_ITEM_TAG_USAGE_MAXIMUM:
438 445
439 if (parser->local.delimiter_branch > 1) { 446 if (parser->local.delimiter_branch > 1) {
440 dbg("alternative usage ignored"); 447 dbg_hid("alternative usage ignored\n");
441 return 0; 448 return 0;
442 } 449 }
443 450
@@ -446,14 +453,14 @@ static int hid_parser_local(struct hid_parser *parser, struct hid_item *item)
446 453
447 for (n = parser->local.usage_minimum; n <= data; n++) 454 for (n = parser->local.usage_minimum; n <= data; n++)
448 if (hid_add_usage(parser, n)) { 455 if (hid_add_usage(parser, n)) {
449 dbg("hid_add_usage failed\n"); 456 dbg_hid("hid_add_usage failed\n");
450 return -1; 457 return -1;
451 } 458 }
452 return 0; 459 return 0;
453 460
454 default: 461 default:
455 462
456 dbg("unknown local item tag 0x%x", item->tag); 463 dbg_hid("unknown local item tag 0x%x\n", item->tag);
457 return 0; 464 return 0;
458 } 465 }
459 return 0; 466 return 0;
@@ -487,7 +494,7 @@ static int hid_parser_main(struct hid_parser *parser, struct hid_item *item)
487 ret = hid_add_field(parser, HID_FEATURE_REPORT, data); 494 ret = hid_add_field(parser, HID_FEATURE_REPORT, data);
488 break; 495 break;
489 default: 496 default:
490 dbg("unknown main item tag 0x%x", item->tag); 497 dbg_hid("unknown main item tag 0x%x\n", item->tag);
491 ret = 0; 498 ret = 0;
492 } 499 }
493 500
@@ -502,7 +509,7 @@ static int hid_parser_main(struct hid_parser *parser, struct hid_item *item)
502 509
503static int hid_parser_reserved(struct hid_parser *parser, struct hid_item *item) 510static int hid_parser_reserved(struct hid_parser *parser, struct hid_item *item)
504{ 511{
505 dbg("reserved item type, tag 0x%x", item->tag); 512 dbg_hid("reserved item type, tag 0x%x\n", item->tag);
506 return 0; 513 return 0;
507} 514}
508 515
@@ -667,14 +674,14 @@ struct hid_device *hid_parse_report(__u8 *start, unsigned size)
667 while ((start = fetch_item(start, end, &item)) != NULL) { 674 while ((start = fetch_item(start, end, &item)) != NULL) {
668 675
669 if (item.format != HID_ITEM_FORMAT_SHORT) { 676 if (item.format != HID_ITEM_FORMAT_SHORT) {
670 dbg("unexpected long global item"); 677 dbg_hid("unexpected long global item\n");
671 hid_free_device(device); 678 hid_free_device(device);
672 vfree(parser); 679 vfree(parser);
673 return NULL; 680 return NULL;
674 } 681 }
675 682
676 if (dispatch_type[item.type](parser, &item)) { 683 if (dispatch_type[item.type](parser, &item)) {
677 dbg("item %u %u %u %u parsing failed\n", 684 dbg_hid("item %u %u %u %u parsing failed\n",
678 item.format, (unsigned)item.size, (unsigned)item.type, (unsigned)item.tag); 685 item.format, (unsigned)item.size, (unsigned)item.type, (unsigned)item.tag);
679 hid_free_device(device); 686 hid_free_device(device);
680 vfree(parser); 687 vfree(parser);
@@ -683,13 +690,13 @@ struct hid_device *hid_parse_report(__u8 *start, unsigned size)
683 690
684 if (start == end) { 691 if (start == end) {
685 if (parser->collection_stack_ptr) { 692 if (parser->collection_stack_ptr) {
686 dbg("unbalanced collection at end of report description"); 693 dbg_hid("unbalanced collection at end of report description\n");
687 hid_free_device(device); 694 hid_free_device(device);
688 vfree(parser); 695 vfree(parser);
689 return NULL; 696 return NULL;
690 } 697 }
691 if (parser->local.delimiter_depth) { 698 if (parser->local.delimiter_depth) {
692 dbg("unbalanced delimiter at end of report description"); 699 dbg_hid("unbalanced delimiter at end of report description\n");
693 hid_free_device(device); 700 hid_free_device(device);
694 vfree(parser); 701 vfree(parser);
695 return NULL; 702 return NULL;
@@ -699,7 +706,7 @@ struct hid_device *hid_parse_report(__u8 *start, unsigned size)
699 } 706 }
700 } 707 }
701 708
702 dbg("item fetching failed at offset %d\n", (int)(end - start)); 709 dbg_hid("item fetching failed at offset %d\n", (int)(end - start));
703 hid_free_device(device); 710 hid_free_device(device);
704 vfree(parser); 711 vfree(parser);
705 return NULL; 712 return NULL;
@@ -915,13 +922,13 @@ int hid_set_field(struct hid_field *field, unsigned offset, __s32 value)
915 hid_dump_input(field->usage + offset, value); 922 hid_dump_input(field->usage + offset, value);
916 923
917 if (offset >= field->report_count) { 924 if (offset >= field->report_count) {
918 dbg("offset (%d) exceeds report_count (%d)", offset, field->report_count); 925 dbg_hid("offset (%d) exceeds report_count (%d)\n", offset, field->report_count);
919 hid_dump_field(field, 8); 926 hid_dump_field(field, 8);
920 return -1; 927 return -1;
921 } 928 }
922 if (field->logical_minimum < 0) { 929 if (field->logical_minimum < 0) {
923 if (value != snto32(s32ton(value, size), size)) { 930 if (value != snto32(s32ton(value, size), size)) {
924 dbg("value %d is out of range", value); 931 dbg_hid("value %d is out of range\n", value);
925 return -1; 932 return -1;
926 } 933 }
927 } 934 }
@@ -934,19 +941,17 @@ int hid_input_report(struct hid_device *hid, int type, u8 *data, int size, int i
934{ 941{
935 struct hid_report_enum *report_enum = hid->report_enum + type; 942 struct hid_report_enum *report_enum = hid->report_enum + type;
936 struct hid_report *report; 943 struct hid_report *report;
937 int n, rsize; 944 int n, rsize, i;
938 945
939 if (!hid) 946 if (!hid)
940 return -ENODEV; 947 return -ENODEV;
941 948
942 if (!size) { 949 if (!size) {
943 dbg("empty report"); 950 dbg_hid("empty report\n");
944 return -1; 951 return -1;
945 } 952 }
946 953
947#ifdef CONFIG_HID_DEBUG 954 dbg_hid("report (size %u) (%snumbered)\n", size, report_enum->numbered ? "" : "un");
948 printk(KERN_DEBUG __FILE__ ": report (size %u) (%snumbered)\n", size, report_enum->numbered ? "" : "un");
949#endif
950 955
951 n = 0; /* Normally report number is 0 */ 956 n = 0; /* Normally report number is 0 */
952 if (report_enum->numbered) { /* Device uses numbered reports, data[0] is report number */ 957 if (report_enum->numbered) { /* Device uses numbered reports, data[0] is report number */
@@ -954,25 +959,21 @@ int hid_input_report(struct hid_device *hid, int type, u8 *data, int size, int i
954 size--; 959 size--;
955 } 960 }
956 961
957#ifdef CONFIG_HID_DEBUG 962 /* dump the report descriptor */
958 { 963 dbg_hid("report %d (size %u) = ", n, size);
959 int i; 964 for (i = 0; i < size; i++)
960 printk(KERN_DEBUG __FILE__ ": report %d (size %u) = ", n, size); 965 dbg_hid_line(" %02x", data[i]);
961 for (i = 0; i < size; i++) 966 dbg_hid_line("\n");
962 printk(" %02x", data[i]);
963 printk("\n");
964 }
965#endif
966 967
967 if (!(report = report_enum->report_id_hash[n])) { 968 if (!(report = report_enum->report_id_hash[n])) {
968 dbg("undefined report_id %d received", n); 969 dbg_hid("undefined report_id %d received\n", n);
969 return -1; 970 return -1;
970 } 971 }
971 972
972 rsize = ((report->size - 1) >> 3) + 1; 973 rsize = ((report->size - 1) >> 3) + 1;
973 974
974 if (size < rsize) { 975 if (size < rsize) {
975 dbg("report %d is too short, (%d < %d)", report->id, size, rsize); 976 dbg_hid("report %d is too short, (%d < %d)\n", report->id, size, rsize);
976 memset(data + size, 0, rsize - size); 977 memset(data + size, 0, rsize - size);
977 } 978 }
978 979
diff --git a/drivers/hid/hid-debug.c b/drivers/hid/hid-debug.c
index 83c4126b37c3..a13757b78980 100644
--- a/drivers/hid/hid-debug.c
+++ b/drivers/hid/hid-debug.c
@@ -347,6 +347,9 @@ static void resolv_usage_page(unsigned page) {
347void hid_resolv_usage(unsigned usage) { 347void hid_resolv_usage(unsigned usage) {
348 const struct hid_usage_entry *p; 348 const struct hid_usage_entry *p;
349 349
350 if (!hid_debug)
351 return;
352
350 resolv_usage_page(usage >> 16); 353 resolv_usage_page(usage >> 16);
351 printk("."); 354 printk(".");
352 for (p = hid_usage_table; p->description; p++) 355 for (p = hid_usage_table; p->description; p++)
@@ -369,6 +372,9 @@ __inline__ static void tab(int n) {
369void hid_dump_field(struct hid_field *field, int n) { 372void hid_dump_field(struct hid_field *field, int n) {
370 int j; 373 int j;
371 374
375 if (!hid_debug)
376 return;
377
372 if (field->physical) { 378 if (field->physical) {
373 tab(n); 379 tab(n);
374 printk("Physical("); 380 printk("Physical(");
@@ -466,6 +472,9 @@ void hid_dump_device(struct hid_device *device) {
466 unsigned i,k; 472 unsigned i,k;
467 static char *table[] = {"INPUT", "OUTPUT", "FEATURE"}; 473 static char *table[] = {"INPUT", "OUTPUT", "FEATURE"};
468 474
475 if (!hid_debug)
476 return;
477
469 for (i = 0; i < HID_REPORT_TYPES; i++) { 478 for (i = 0; i < HID_REPORT_TYPES; i++) {
470 report_enum = device->report_enum + i; 479 report_enum = device->report_enum + i;
471 list = report_enum->report_list.next; 480 list = report_enum->report_list.next;
@@ -489,6 +498,9 @@ void hid_dump_device(struct hid_device *device) {
489EXPORT_SYMBOL_GPL(hid_dump_device); 498EXPORT_SYMBOL_GPL(hid_dump_device);
490 499
491void hid_dump_input(struct hid_usage *usage, __s32 value) { 500void hid_dump_input(struct hid_usage *usage, __s32 value) {
501 if (!hid_debug)
502 return;
503
492 printk("hid-debug: input "); 504 printk("hid-debug: input ");
493 hid_resolv_usage(usage->hid); 505 hid_resolv_usage(usage->hid);
494 printk(" = %d\n", value); 506 printk(" = %d\n", value);
@@ -758,6 +770,9 @@ static char **names[EV_MAX + 1] = {
758 770
759void hid_resolv_event(__u8 type, __u16 code) { 771void hid_resolv_event(__u8 type, __u16 code) {
760 772
773 if (!hid_debug)
774 return;
775
761 printk("%s.%s", events[type] ? events[type] : "?", 776 printk("%s.%s", events[type] ? events[type] : "?",
762 names[type] ? (names[type][code] ? names[type][code] : "?") : "?"); 777 names[type] ? (names[type][code] ? names[type][code] : "?") : "?");
763} 778}
diff --git a/drivers/hid/hid-input.c b/drivers/hid/hid-input.c
index 7f817897b178..8edbd30cf795 100644
--- a/drivers/hid/hid-input.c
+++ b/drivers/hid/hid-input.c
@@ -60,6 +60,19 @@ static const unsigned char hid_keyboard[256] = {
60 150,158,159,128,136,177,178,176,142,152,173,140,unk,unk,unk,unk 60 150,158,159,128,136,177,178,176,142,152,173,140,unk,unk,unk,unk
61}; 61};
62 62
63/* extended mapping for certain Logitech hardware (Logitech cordless desktop LX500) */
64#define LOGITECH_EXPANDED_KEYMAP_SIZE 80
65static int logitech_expanded_keymap[LOGITECH_EXPANDED_KEYMAP_SIZE] = {
66 0,216, 0,213,175,156, 0, 0, 0, 0,
67 144, 0, 0, 0, 0, 0, 0, 0, 0,212,
68 174,167,152,161,112, 0, 0, 0,154, 0,
69 0, 0, 0, 0, 0, 0, 0, 0, 0, 0,
70 0, 0, 0, 0, 0, 0, 0, 0, 0, 0,
71 0, 0, 0, 0, 0, 0, 0, 0, 0, 0,
72 0, 0, 0, 0, 0,183,184,185,186,187,
73 188,189,190,191,192,193,194, 0, 0, 0
74};
75
63static const struct { 76static const struct {
64 __s32 x; 77 __s32 x;
65 __s32 y; 78 __s32 y;
@@ -308,9 +321,7 @@ static int hidinput_setkeycode(struct input_dev *dev, int scancode,
308 321
309 clear_bit(old_keycode, dev->keybit); 322 clear_bit(old_keycode, dev->keybit);
310 set_bit(usage->code, dev->keybit); 323 set_bit(usage->code, dev->keybit);
311#ifdef CONFIG_HID_DEBUG 324 dbg_hid(KERN_DEBUG "Assigned keycode %d to HID usage code %x\n", keycode, scancode);
312 printk (KERN_DEBUG "Assigned keycode %d to HID usage code %x\n", keycode, scancode);
313#endif
314 /* Set the keybit for the old keycode if the old keycode is used 325 /* Set the keybit for the old keycode if the old keycode is used
315 * by another key */ 326 * by another key */
316 if (hidinput_find_key (hid, 0, old_keycode)) 327 if (hidinput_find_key (hid, 0, old_keycode))
@@ -333,11 +344,9 @@ static void hidinput_configure_usage(struct hid_input *hidinput, struct hid_fiel
333 344
334 field->hidinput = hidinput; 345 field->hidinput = hidinput;
335 346
336#ifdef CONFIG_HID_DEBUG 347 dbg_hid("Mapping: ");
337 printk(KERN_DEBUG "Mapping: ");
338 hid_resolv_usage(usage->hid); 348 hid_resolv_usage(usage->hid);
339 printk(" ---> "); 349 dbg_hid_line(" ---> ");
340#endif
341 350
342 if (field->flags & HID_MAIN_ITEM_CONSTANT) 351 if (field->flags & HID_MAIN_ITEM_CONSTANT)
343 goto ignore; 352 goto ignore;
@@ -378,6 +387,21 @@ static void hidinput_configure_usage(struct hid_input *hidinput, struct hid_fiel
378 } 387 }
379 } 388 }
380 389
390 /* Special handling for Logitech Cordless Desktop */
391 if (field->application != HID_GD_MOUSE) {
392 if (device->quirks & HID_QUIRK_LOGITECH_EXPANDED_KEYMAP) {
393 int hid = usage->hid & HID_USAGE;
394 if (hid < LOGITECH_EXPANDED_KEYMAP_SIZE && logitech_expanded_keymap[hid] != 0)
395 code = logitech_expanded_keymap[hid];
396 }
397 } else {
398 if (device->quirks & HID_QUIRK_LOGITECH_IGNORE_DOUBLED_WHEEL) {
399 int hid = usage->hid & HID_USAGE;
400 if (hid == 7 || hid == 8)
401 goto ignore;
402 }
403 }
404
381 map_key(code); 405 map_key(code);
382 break; 406 break;
383 407
@@ -566,6 +590,11 @@ static void hidinput_configure_usage(struct hid_input *hidinput, struct hid_fiel
566 case 0x0e5: map_key_clear(KEY_BASSBOOST); break; 590 case 0x0e5: map_key_clear(KEY_BASSBOOST); break;
567 case 0x0e9: map_key_clear(KEY_VOLUMEUP); break; 591 case 0x0e9: map_key_clear(KEY_VOLUMEUP); break;
568 case 0x0ea: map_key_clear(KEY_VOLUMEDOWN); break; 592 case 0x0ea: map_key_clear(KEY_VOLUMEDOWN); break;
593
594 /* reserved in HUT 1.12. Reported on Petalynx remote */
595 case 0x0f6: map_key_clear(KEY_NEXT); break;
596 case 0x0fa: map_key_clear(KEY_BACK); break;
597
569 case 0x183: map_key_clear(KEY_CONFIG); break; 598 case 0x183: map_key_clear(KEY_CONFIG); break;
570 case 0x184: map_key_clear(KEY_WORDPROCESSOR); break; 599 case 0x184: map_key_clear(KEY_WORDPROCESSOR); break;
571 case 0x185: map_key_clear(KEY_EDITOR); break; 600 case 0x185: map_key_clear(KEY_EDITOR); break;
@@ -598,7 +627,9 @@ static void hidinput_configure_usage(struct hid_input *hidinput, struct hid_fiel
598 case 0x21b: map_key_clear(KEY_COPY); break; 627 case 0x21b: map_key_clear(KEY_COPY); break;
599 case 0x21c: map_key_clear(KEY_CUT); break; 628 case 0x21c: map_key_clear(KEY_CUT); break;
600 case 0x21d: map_key_clear(KEY_PASTE); break; 629 case 0x21d: map_key_clear(KEY_PASTE); break;
601 case 0x221: map_key_clear(KEY_FIND); break; 630 case 0x21f: map_key_clear(KEY_FIND); break;
631 case 0x221: map_key_clear(KEY_SEARCH); break;
632 case 0x222: map_key_clear(KEY_GOTO); break;
602 case 0x223: map_key_clear(KEY_HOMEPAGE); break; 633 case 0x223: map_key_clear(KEY_HOMEPAGE); break;
603 case 0x224: map_key_clear(KEY_BACK); break; 634 case 0x224: map_key_clear(KEY_BACK); break;
604 case 0x225: map_key_clear(KEY_FORWARD); break; 635 case 0x225: map_key_clear(KEY_FORWARD); break;
@@ -688,7 +719,28 @@ static void hidinput_configure_usage(struct hid_input *hidinput, struct hid_fiel
688 break; 719 break;
689 720
690 case HID_UP_MSVENDOR: 721 case HID_UP_MSVENDOR:
691 goto ignore; 722
723 /* special case - Chicony Chicony KU-0418 tactical pad */
724 if (device->vendor == 0x04f2 && device->product == 0x0418) {
725 set_bit(EV_REP, input->evbit);
726 switch(usage->hid & HID_USAGE) {
727 case 0xff01: map_key_clear(BTN_1); break;
728 case 0xff02: map_key_clear(BTN_2); break;
729 case 0xff03: map_key_clear(BTN_3); break;
730 case 0xff04: map_key_clear(BTN_4); break;
731 case 0xff05: map_key_clear(BTN_5); break;
732 case 0xff06: map_key_clear(BTN_6); break;
733 case 0xff07: map_key_clear(BTN_7); break;
734 case 0xff08: map_key_clear(BTN_8); break;
735 case 0xff09: map_key_clear(BTN_9); break;
736 case 0xff0a: map_key_clear(BTN_A); break;
737 case 0xff0b: map_key_clear(BTN_B); break;
738 default: goto ignore;
739 }
740 } else {
741 goto ignore;
742 }
743 break;
692 744
693 case HID_UP_CUSTOM: /* Reported on Logitech and Powerbook USB keyboards */ 745 case HID_UP_CUSTOM: /* Reported on Logitech and Powerbook USB keyboards */
694 746
@@ -704,10 +756,10 @@ static void hidinput_configure_usage(struct hid_input *hidinput, struct hid_fiel
704 } 756 }
705 break; 757 break;
706 758
707 case HID_UP_LOGIVENDOR: /* Reported on Logitech Ultra X Media Remote */ 759 case HID_UP_LOGIVENDOR:
708
709 set_bit(EV_REP, input->evbit); 760 set_bit(EV_REP, input->evbit);
710 switch(usage->hid & HID_USAGE) { 761 switch(usage->hid & HID_USAGE) {
762 /* Reported on Logitech Ultra X Media Remote */
711 case 0x004: map_key_clear(KEY_AGAIN); break; 763 case 0x004: map_key_clear(KEY_AGAIN); break;
712 case 0x00d: map_key_clear(KEY_HOME); break; 764 case 0x00d: map_key_clear(KEY_HOME); break;
713 case 0x024: map_key_clear(KEY_SHUFFLE); break; 765 case 0x024: map_key_clear(KEY_SHUFFLE); break;
@@ -725,6 +777,14 @@ static void hidinput_configure_usage(struct hid_input *hidinput, struct hid_fiel
725 case 0x04d: map_key_clear(KEY_SUBTITLE); break; 777 case 0x04d: map_key_clear(KEY_SUBTITLE); break;
726 case 0x051: map_key_clear(KEY_RED); break; 778 case 0x051: map_key_clear(KEY_RED); break;
727 case 0x052: map_key_clear(KEY_CLOSE); break; 779 case 0x052: map_key_clear(KEY_CLOSE); break;
780
781 /* Reported on Petalynx Maxter remote */
782 case 0x05a: map_key_clear(KEY_TEXT); break;
783 case 0x05b: map_key_clear(KEY_RED); break;
784 case 0x05c: map_key_clear(KEY_GREEN); break;
785 case 0x05d: map_key_clear(KEY_YELLOW); break;
786 case 0x05e: map_key_clear(KEY_BLUE); break;
787
728 default: goto ignore; 788 default: goto ignore;
729 } 789 }
730 break; 790 break;
@@ -818,16 +878,24 @@ static void hidinput_configure_usage(struct hid_input *hidinput, struct hid_fiel
818 field->dpad = usage->code; 878 field->dpad = usage->code;
819 } 879 }
820 880
881 /* for those devices which produce Consumer volume usage as relative,
882 * we emulate pressing volumeup/volumedown appropriate number of times
883 * in hidinput_hid_event()
884 */
885 if ((usage->type == EV_ABS) && (field->flags & HID_MAIN_ITEM_RELATIVE) &&
886 (usage->code == ABS_VOLUME)) {
887 set_bit(KEY_VOLUMEUP, input->keybit);
888 set_bit(KEY_VOLUMEDOWN, input->keybit);
889 }
890
821 hid_resolv_event(usage->type, usage->code); 891 hid_resolv_event(usage->type, usage->code);
822#ifdef CONFIG_HID_DEBUG 892
823 printk("\n"); 893 dbg_hid_line("\n");
824#endif 894
825 return; 895 return;
826 896
827ignore: 897ignore:
828#ifdef CONFIG_HID_DEBUG 898 dbg_hid_line("IGNORED\n");
829 printk("IGNORED\n");
830#endif
831 return; 899 return;
832} 900}
833 901
@@ -896,18 +964,33 @@ void hidinput_hid_event(struct hid_device *hid, struct hid_field *field, struct
896 } 964 }
897 965
898 if (usage->hid == (HID_UP_PID | 0x83UL)) { /* Simultaneous Effects Max */ 966 if (usage->hid == (HID_UP_PID | 0x83UL)) { /* Simultaneous Effects Max */
899 dbg("Maximum Effects - %d",value); 967 dbg_hid("Maximum Effects - %d\n",value);
900 return; 968 return;
901 } 969 }
902 970
903 if (usage->hid == (HID_UP_PID | 0x7fUL)) { 971 if (usage->hid == (HID_UP_PID | 0x7fUL)) {
904 dbg("PID Pool Report\n"); 972 dbg_hid("PID Pool Report\n");
905 return; 973 return;
906 } 974 }
907 975
908 if ((usage->type == EV_KEY) && (usage->code == 0)) /* Key 0 is "unassigned", not KEY_UNKNOWN */ 976 if ((usage->type == EV_KEY) && (usage->code == 0)) /* Key 0 is "unassigned", not KEY_UNKNOWN */
909 return; 977 return;
910 978
979 if ((usage->type == EV_ABS) && (field->flags & HID_MAIN_ITEM_RELATIVE) &&
980 (usage->code == ABS_VOLUME)) {
981 int count = abs(value);
982 int direction = value > 0 ? KEY_VOLUMEUP : KEY_VOLUMEDOWN;
983 int i;
984
985 for (i = 0; i < count; i++) {
986 input_event(input, EV_KEY, direction, 1);
987 input_sync(input);
988 input_event(input, EV_KEY, direction, 0);
989 input_sync(input);
990 }
991 return;
992 }
993
911 input_event(input, usage->type, usage->code, value); 994 input_event(input, usage->type, usage->code, value);
912 995
913 if ((field->flags & HID_MAIN_ITEM_RELATIVE) && (usage->type == EV_KEY)) 996 if ((field->flags & HID_MAIN_ITEM_RELATIVE) && (usage->type == EV_KEY))
@@ -976,7 +1059,7 @@ int hidinput_connect(struct hid_device *hid)
976 if (IS_INPUT_APPLICATION(hid->collection[i].usage)) 1059 if (IS_INPUT_APPLICATION(hid->collection[i].usage))
977 break; 1060 break;
978 1061
979 if (i == hid->maxcollection) 1062 if (i == hid->maxcollection && (hid->quirks & HID_QUIRK_HIDINPUT) == 0)
980 return -1; 1063 return -1;
981 1064
982 if (hid->quirks & HID_QUIRK_SKIP_OUTPUT_REPORTS) 1065 if (hid->quirks & HID_QUIRK_SKIP_OUTPUT_REPORTS)
@@ -994,7 +1077,7 @@ int hidinput_connect(struct hid_device *hid)
994 if (!hidinput || !input_dev) { 1077 if (!hidinput || !input_dev) {
995 kfree(hidinput); 1078 kfree(hidinput);
996 input_free_device(input_dev); 1079 input_free_device(input_dev);
997 err("Out of memory during hid input probe"); 1080 err_hid("Out of memory during hid input probe");
998 return -1; 1081 return -1;
999 } 1082 }
1000 1083
diff --git a/drivers/hid/usbhid/hid-core.c b/drivers/hid/usbhid/hid-core.c
index d91b9dac6dff..3afa4a5035b7 100644
--- a/drivers/hid/usbhid/hid-core.c
+++ b/drivers/hid/usbhid/hid-core.c
@@ -60,6 +60,12 @@ MODULE_PARM_DESC(quirks, "Add/modify USB HID quirks by specifying "
60 " quirks=vendorID:productID:quirks" 60 " quirks=vendorID:productID:quirks"
61 " where vendorID, productID, and quirks are all in" 61 " where vendorID, productID, and quirks are all in"
62 " 0x-prefixed hex"); 62 " 0x-prefixed hex");
63static char *rdesc_quirks_param[MAX_USBHID_BOOT_QUIRKS] = { [ 0 ... (MAX_USBHID_BOOT_QUIRKS - 1) ] = NULL };
64module_param_array_named(rdesc_quirks, rdesc_quirks_param, charp, NULL, 0444);
65MODULE_PARM_DESC(rdesc_quirks, "Add/modify report descriptor quirks by specifying "
66 " rdesc_quirks=vendorID:productID:rdesc_quirks"
67 " where vendorID, productID, and rdesc_quirks are all in"
68 " 0x-prefixed hex");
63/* 69/*
64 * Input submission and I/O error handler. 70 * Input submission and I/O error handler.
65 */ 71 */
@@ -127,7 +133,7 @@ static void hid_reset(struct work_struct *work)
127 hid_io_error(hid); 133 hid_io_error(hid);
128 break; 134 break;
129 default: 135 default:
130 err("can't reset device, %s-%s/input%d, status %d", 136 err_hid("can't reset device, %s-%s/input%d, status %d",
131 hid_to_usb_dev(hid)->bus->bus_name, 137 hid_to_usb_dev(hid)->bus->bus_name,
132 hid_to_usb_dev(hid)->devpath, 138 hid_to_usb_dev(hid)->devpath,
133 usbhid->ifnum, rc); 139 usbhid->ifnum, rc);
@@ -220,7 +226,7 @@ static void hid_irq_in(struct urb *urb)
220 if (status) { 226 if (status) {
221 clear_bit(HID_IN_RUNNING, &usbhid->iofl); 227 clear_bit(HID_IN_RUNNING, &usbhid->iofl);
222 if (status != -EPERM) { 228 if (status != -EPERM) {
223 err("can't resubmit intr, %s-%s/input%d, status %d", 229 err_hid("can't resubmit intr, %s-%s/input%d, status %d",
224 hid_to_usb_dev(hid)->bus->bus_name, 230 hid_to_usb_dev(hid)->bus->bus_name,
225 hid_to_usb_dev(hid)->devpath, 231 hid_to_usb_dev(hid)->devpath,
226 usbhid->ifnum, status); 232 usbhid->ifnum, status);
@@ -240,10 +246,10 @@ static int hid_submit_out(struct hid_device *hid)
240 usbhid->urbout->transfer_buffer_length = ((report->size - 1) >> 3) + 1 + (report->id > 0); 246 usbhid->urbout->transfer_buffer_length = ((report->size - 1) >> 3) + 1 + (report->id > 0);
241 usbhid->urbout->dev = hid_to_usb_dev(hid); 247 usbhid->urbout->dev = hid_to_usb_dev(hid);
242 248
243 dbg("submitting out urb"); 249 dbg_hid("submitting out urb\n");
244 250
245 if (usb_submit_urb(usbhid->urbout, GFP_ATOMIC)) { 251 if (usb_submit_urb(usbhid->urbout, GFP_ATOMIC)) {
246 err("usb_submit_urb(out) failed"); 252 err_hid("usb_submit_urb(out) failed");
247 return -1; 253 return -1;
248 } 254 }
249 255
@@ -287,12 +293,12 @@ static int hid_submit_ctrl(struct hid_device *hid)
287 usbhid->cr->wIndex = cpu_to_le16(usbhid->ifnum); 293 usbhid->cr->wIndex = cpu_to_le16(usbhid->ifnum);
288 usbhid->cr->wLength = cpu_to_le16(len); 294 usbhid->cr->wLength = cpu_to_le16(len);
289 295
290 dbg("submitting ctrl urb: %s wValue=0x%04x wIndex=0x%04x wLength=%u", 296 dbg_hid("submitting ctrl urb: %s wValue=0x%04x wIndex=0x%04x wLength=%u\n",
291 usbhid->cr->bRequest == HID_REQ_SET_REPORT ? "Set_Report" : "Get_Report", 297 usbhid->cr->bRequest == HID_REQ_SET_REPORT ? "Set_Report" : "Get_Report",
292 usbhid->cr->wValue, usbhid->cr->wIndex, usbhid->cr->wLength); 298 usbhid->cr->wValue, usbhid->cr->wIndex, usbhid->cr->wLength);
293 299
294 if (usb_submit_urb(usbhid->urbctrl, GFP_ATOMIC)) { 300 if (usb_submit_urb(usbhid->urbctrl, GFP_ATOMIC)) {
295 err("usb_submit_urb(ctrl) failed"); 301 err_hid("usb_submit_urb(ctrl) failed");
296 return -1; 302 return -1;
297 } 303 }
298 304
@@ -474,7 +480,7 @@ int usbhid_wait_io(struct hid_device *hid)
474 if (!wait_event_timeout(hid->wait, (!test_bit(HID_CTRL_RUNNING, &usbhid->iofl) && 480 if (!wait_event_timeout(hid->wait, (!test_bit(HID_CTRL_RUNNING, &usbhid->iofl) &&
475 !test_bit(HID_OUT_RUNNING, &usbhid->iofl)), 481 !test_bit(HID_OUT_RUNNING, &usbhid->iofl)),
476 10*HZ)) { 482 10*HZ)) {
477 dbg("timeout waiting for ctrl or out queue to clear"); 483 dbg_hid("timeout waiting for ctrl or out queue to clear\n");
478 return -1; 484 return -1;
479 } 485 }
480 486
@@ -633,20 +639,6 @@ static void hid_free_buffers(struct usb_device *dev, struct hid_device *hid)
633} 639}
634 640
635/* 641/*
636 * Cherry Cymotion keyboard have an invalid HID report descriptor,
637 * that needs fixing before we can parse it.
638 */
639
640static void hid_fixup_cymotion_descriptor(char *rdesc, int rsize)
641{
642 if (rsize >= 17 && rdesc[11] == 0x3c && rdesc[12] == 0x02) {
643 info("Fixing up Cherry Cymotion report descriptor");
644 rdesc[11] = rdesc[16] = 0xff;
645 rdesc[12] = rdesc[17] = 0x03;
646 }
647}
648
649/*
650 * Sending HID_REQ_GET_REPORT changes the operation mode of the ps3 controller 642 * Sending HID_REQ_GET_REPORT changes the operation mode of the ps3 controller
651 * to "operational". Without this, the ps3 controller will not report any 643 * to "operational". Without this, the ps3 controller will not report any
652 * events. 644 * events.
@@ -667,51 +659,11 @@ static void hid_fixup_sony_ps3_controller(struct usb_device *dev, int ifnum)
667 USB_CTRL_GET_TIMEOUT); 659 USB_CTRL_GET_TIMEOUT);
668 660
669 if (result < 0) 661 if (result < 0)
670 err("%s failed: %d\n", __func__, result); 662 err_hid("%s failed: %d\n", __func__, result);
671 663
672 kfree(buf); 664 kfree(buf);
673} 665}
674 666
675/*
676 * Certain Logitech keyboards send in report #3 keys which are far
677 * above the logical maximum described in descriptor. This extends
678 * the original value of 0x28c of logical maximum to 0x104d
679 */
680static void hid_fixup_logitech_descriptor(unsigned char *rdesc, int rsize)
681{
682 if (rsize >= 90 && rdesc[83] == 0x26
683 && rdesc[84] == 0x8c
684 && rdesc[85] == 0x02) {
685 info("Fixing up Logitech keyboard report descriptor");
686 rdesc[84] = rdesc[89] = 0x4d;
687 rdesc[85] = rdesc[90] = 0x10;
688 }
689}
690
691/*
692 * Some USB barcode readers from cypress have usage min and usage max in
693 * the wrong order
694 */
695static void hid_fixup_cypress_descriptor(unsigned char *rdesc, int rsize)
696{
697 short fixed = 0;
698 int i;
699
700 for (i = 0; i < rsize - 4; i++) {
701 if (rdesc[i] == 0x29 && rdesc [i+2] == 0x19) {
702 unsigned char tmp;
703
704 rdesc[i] = 0x19; rdesc[i+2] = 0x29;
705 tmp = rdesc[i+3];
706 rdesc[i+3] = rdesc[i+1];
707 rdesc[i+1] = tmp;
708 }
709 }
710
711 if (fixed)
712 info("Fixing up Cypress report descriptor");
713}
714
715static struct hid_device *usb_hid_configure(struct usb_interface *intf) 667static struct hid_device *usb_hid_configure(struct usb_interface *intf)
716{ 668{
717 struct usb_host_interface *interface = intf->cur_altsetting; 669 struct usb_host_interface *interface = intf->cur_altsetting;
@@ -746,7 +698,7 @@ static struct hid_device *usb_hid_configure(struct usb_interface *intf)
746 if (usb_get_extra_descriptor(interface, HID_DT_HID, &hdesc) && 698 if (usb_get_extra_descriptor(interface, HID_DT_HID, &hdesc) &&
747 (!interface->desc.bNumEndpoints || 699 (!interface->desc.bNumEndpoints ||
748 usb_get_extra_descriptor(&interface->endpoint[0], HID_DT_HID, &hdesc))) { 700 usb_get_extra_descriptor(&interface->endpoint[0], HID_DT_HID, &hdesc))) {
749 dbg("class descriptor not present\n"); 701 dbg_hid("class descriptor not present\n");
750 return NULL; 702 return NULL;
751 } 703 }
752 704
@@ -755,41 +707,34 @@ static struct hid_device *usb_hid_configure(struct usb_interface *intf)
755 rsize = le16_to_cpu(hdesc->desc[n].wDescriptorLength); 707 rsize = le16_to_cpu(hdesc->desc[n].wDescriptorLength);
756 708
757 if (!rsize || rsize > HID_MAX_DESCRIPTOR_SIZE) { 709 if (!rsize || rsize > HID_MAX_DESCRIPTOR_SIZE) {
758 dbg("weird size of report descriptor (%u)", rsize); 710 dbg_hid("weird size of report descriptor (%u)\n", rsize);
759 return NULL; 711 return NULL;
760 } 712 }
761 713
762 if (!(rdesc = kmalloc(rsize, GFP_KERNEL))) { 714 if (!(rdesc = kmalloc(rsize, GFP_KERNEL))) {
763 dbg("couldn't allocate rdesc memory"); 715 dbg_hid("couldn't allocate rdesc memory\n");
764 return NULL; 716 return NULL;
765 } 717 }
766 718
767 hid_set_idle(dev, interface->desc.bInterfaceNumber, 0, 0); 719 hid_set_idle(dev, interface->desc.bInterfaceNumber, 0, 0);
768 720
769 if ((n = hid_get_class_descriptor(dev, interface->desc.bInterfaceNumber, HID_DT_REPORT, rdesc, rsize)) < 0) { 721 if ((n = hid_get_class_descriptor(dev, interface->desc.bInterfaceNumber, HID_DT_REPORT, rdesc, rsize)) < 0) {
770 dbg("reading report descriptor failed"); 722 dbg_hid("reading report descriptor failed\n");
771 kfree(rdesc); 723 kfree(rdesc);
772 return NULL; 724 return NULL;
773 } 725 }
774 726
775 if ((quirks & HID_QUIRK_CYMOTION)) 727 usbhid_fixup_report_descriptor(le16_to_cpu(dev->descriptor.idVendor),
776 hid_fixup_cymotion_descriptor(rdesc, rsize); 728 le16_to_cpu(dev->descriptor.idProduct), rdesc,
729 rsize, rdesc_quirks_param);
777 730
778 if (quirks & HID_QUIRK_LOGITECH_DESCRIPTOR) 731 dbg_hid("report descriptor (size %u, read %d) = ", rsize, n);
779 hid_fixup_logitech_descriptor(rdesc, rsize);
780
781 if (quirks & HID_QUIRK_SWAPPED_MIN_MAX)
782 hid_fixup_cypress_descriptor(rdesc, rsize);
783
784#ifdef CONFIG_HID_DEBUG
785 printk(KERN_DEBUG __FILE__ ": report descriptor (size %u, read %d) = ", rsize, n);
786 for (n = 0; n < rsize; n++) 732 for (n = 0; n < rsize; n++)
787 printk(" %02x", (unsigned char) rdesc[n]); 733 dbg_hid_line(" %02x", (unsigned char) rdesc[n]);
788 printk("\n"); 734 dbg_hid_line("\n");
789#endif
790 735
791 if (!(hid = hid_parse_report(rdesc, n))) { 736 if (!(hid = hid_parse_report(rdesc, n))) {
792 dbg("parsing report descriptor failed"); 737 dbg_hid("parsing report descriptor failed\n");
793 kfree(rdesc); 738 kfree(rdesc);
794 return NULL; 739 return NULL;
795 } 740 }
@@ -861,7 +806,7 @@ static struct hid_device *usb_hid_configure(struct usb_interface *intf)
861 } 806 }
862 807
863 if (!usbhid->urbin) { 808 if (!usbhid->urbin) {
864 err("couldn't find an input interrupt endpoint"); 809 err_hid("couldn't find an input interrupt endpoint");
865 goto fail; 810 goto fail;
866 } 811 }
867 812
@@ -956,7 +901,7 @@ static void hid_disconnect(struct usb_interface *intf)
956 usb_kill_urb(usbhid->urbctrl); 901 usb_kill_urb(usbhid->urbctrl);
957 902
958 del_timer_sync(&usbhid->io_retry); 903 del_timer_sync(&usbhid->io_retry);
959 flush_scheduled_work(); 904 cancel_work_sync(&usbhid->reset_work);
960 905
961 if (hid->claimed & HID_CLAIMED_INPUT) 906 if (hid->claimed & HID_CLAIMED_INPUT)
962 hidinput_disconnect(hid); 907 hidinput_disconnect(hid);
@@ -978,7 +923,7 @@ static int hid_probe(struct usb_interface *intf, const struct usb_device_id *id)
978 int i; 923 int i;
979 char *c; 924 char *c;
980 925
981 dbg("HID probe called for ifnum %d", 926 dbg_hid("HID probe called for ifnum %d\n",
982 intf->altsetting->desc.bInterfaceNumber); 927 intf->altsetting->desc.bInterfaceNumber);
983 928
984 if (!(hid = usb_hid_configure(intf))) 929 if (!(hid = usb_hid_configure(intf)))
diff --git a/drivers/hid/usbhid/hid-lgff.c b/drivers/hid/usbhid/hid-lgff.c
index c5cd4107d6af..4b7ab6a46d93 100644
--- a/drivers/hid/usbhid/hid-lgff.c
+++ b/drivers/hid/usbhid/hid-lgff.c
@@ -78,7 +78,7 @@ static int hid_lgff_play(struct input_dev *dev, void *data, struct ff_effect *ef
78 report->field[0]->value[1] = 0x08; 78 report->field[0]->value[1] = 0x08;
79 report->field[0]->value[2] = x; 79 report->field[0]->value[2] = x;
80 report->field[0]->value[3] = y; 80 report->field[0]->value[3] = y;
81 dbg("(x, y)=(%04x, %04x)", x, y); 81 dbg_hid("(x, y)=(%04x, %04x)\n", x, y);
82 usbhid_submit_report(hid, report, USB_DIR_OUT); 82 usbhid_submit_report(hid, report, USB_DIR_OUT);
83 break; 83 break;
84 84
@@ -93,7 +93,7 @@ static int hid_lgff_play(struct input_dev *dev, void *data, struct ff_effect *ef
93 report->field[0]->value[1] = 0x00; 93 report->field[0]->value[1] = 0x00;
94 report->field[0]->value[2] = left; 94 report->field[0]->value[2] = left;
95 report->field[0]->value[3] = right; 95 report->field[0]->value[3] = right;
96 dbg("(left, right)=(%04x, %04x)", left, right); 96 dbg_hid("(left, right)=(%04x, %04x)\n", left, right);
97 usbhid_submit_report(hid, report, USB_DIR_OUT); 97 usbhid_submit_report(hid, report, USB_DIR_OUT);
98 break; 98 break;
99 } 99 }
@@ -113,20 +113,20 @@ int hid_lgff_init(struct hid_device* hid)
113 113
114 /* Find the report to use */ 114 /* Find the report to use */
115 if (list_empty(report_list)) { 115 if (list_empty(report_list)) {
116 err("No output report found"); 116 err_hid("No output report found");
117 return -1; 117 return -1;
118 } 118 }
119 119
120 /* Check that the report looks ok */ 120 /* Check that the report looks ok */
121 report = list_entry(report_list->next, struct hid_report, list); 121 report = list_entry(report_list->next, struct hid_report, list);
122 if (!report) { 122 if (!report) {
123 err("NULL output report"); 123 err_hid("NULL output report");
124 return -1; 124 return -1;
125 } 125 }
126 126
127 field = report->field[0]; 127 field = report->field[0];
128 if (!field) { 128 if (!field) {
129 err("NULL field"); 129 err_hid("NULL field");
130 return -1; 130 return -1;
131 } 131 }
132 132
diff --git a/drivers/hid/usbhid/hid-pidff.c b/drivers/hid/usbhid/hid-pidff.c
index f5a90e950e6b..011326178c06 100644
--- a/drivers/hid/usbhid/hid-pidff.c
+++ b/drivers/hid/usbhid/hid-pidff.c
@@ -738,6 +738,7 @@ static void pidff_autocenter(struct pidff_device *pidff, u16 magnitude)
738 pidff->set_effect[PID_TRIGGER_BUTTON].value[0] = 0; 738 pidff->set_effect[PID_TRIGGER_BUTTON].value[0] = 0;
739 pidff->set_effect[PID_TRIGGER_REPEAT_INT].value[0] = 0; 739 pidff->set_effect[PID_TRIGGER_REPEAT_INT].value[0] = 0;
740 pidff_set(&pidff->set_effect[PID_GAIN], magnitude); 740 pidff_set(&pidff->set_effect[PID_GAIN], magnitude);
741 pidff->set_effect[PID_DIRECTION_ENABLE].value[0] = 1;
741 pidff->set_effect[PID_START_DELAY].value[0] = 0; 742 pidff->set_effect[PID_START_DELAY].value[0] = 0;
742 743
743 usbhid_submit_report(pidff->hid, pidff->reports[PID_SET_EFFECT], 744 usbhid_submit_report(pidff->hid, pidff->reports[PID_SET_EFFECT],
diff --git a/drivers/hid/usbhid/hid-quirks.c b/drivers/hid/usbhid/hid-quirks.c
index f6c4145dc202..775b9f3b8ce3 100644
--- a/drivers/hid/usbhid/hid-quirks.c
+++ b/drivers/hid/usbhid/hid-quirks.c
@@ -105,6 +105,9 @@
105#define USB_VENDOR_ID_ESSENTIAL_REALITY 0x0d7f 105#define USB_VENDOR_ID_ESSENTIAL_REALITY 0x0d7f
106#define USB_DEVICE_ID_ESSENTIAL_REALITY_P5 0x0100 106#define USB_DEVICE_ID_ESSENTIAL_REALITY_P5 0x0100
107 107
108#define USB_VENDOR_ID_GAMERON 0x0810
109#define USB_DEVICE_ID_GAMERON_DUAL_PSX_ADAPTOR 0x0001
110
108#define USB_VENDOR_ID_GLAB 0x06c2 111#define USB_VENDOR_ID_GLAB 0x06c2
109#define USB_DEVICE_ID_4_PHIDGETSERVO_30 0x0038 112#define USB_DEVICE_ID_4_PHIDGETSERVO_30 0x0038
110#define USB_DEVICE_ID_1_PHIDGETSERVO_30 0x0039 113#define USB_DEVICE_ID_1_PHIDGETSERVO_30 0x0039
@@ -196,8 +199,10 @@
196#define USB_VENDOR_ID_LOGITECH 0x046d 199#define USB_VENDOR_ID_LOGITECH 0x046d
197#define USB_DEVICE_ID_LOGITECH_RECEIVER 0xc101 200#define USB_DEVICE_ID_LOGITECH_RECEIVER 0xc101
198#define USB_DEVICE_ID_LOGITECH_WHEEL 0xc294 201#define USB_DEVICE_ID_LOGITECH_WHEEL 0xc294
202#define USB_DEVICE_ID_LOGITECH_KBD 0xc311
199#define USB_DEVICE_ID_S510_RECEIVER 0xc50c 203#define USB_DEVICE_ID_S510_RECEIVER 0xc50c
200#define USB_DEVICE_ID_S510_RECEIVER_2 0xc517 204#define USB_DEVICE_ID_S510_RECEIVER_2 0xc517
205#define USB_DEVICE_ID_LOGITECH_CORDLESS_DESKTOP_LX500 0xc512
201#define USB_DEVICE_ID_MX3000_RECEIVER 0xc513 206#define USB_DEVICE_ID_MX3000_RECEIVER 0xc513
202#define USB_DEVICE_ID_DINOVO_EDGE 0xc714 207#define USB_DEVICE_ID_DINOVO_EDGE 0xc714
203 208
@@ -209,6 +214,13 @@
209#define USB_DEVICE_ID_MGE_UPS 0xffff 214#define USB_DEVICE_ID_MGE_UPS 0xffff
210#define USB_DEVICE_ID_MGE_UPS1 0x0001 215#define USB_DEVICE_ID_MGE_UPS1 0x0001
211 216
217#define USB_VENDOR_ID_MICROSOFT 0x045e
218#define USB_DEVICE_ID_SIDEWINDER_GV 0x003b
219
220#define USB_VENDOR_ID_NCR 0x0404
221#define USB_DEVICE_ID_NCR_FIRST 0x0300
222#define USB_DEVICE_ID_NCR_LAST 0x03ff
223
212#define USB_VENDOR_ID_NEC 0x073e 224#define USB_VENDOR_ID_NEC 0x073e
213#define USB_DEVICE_ID_NEC_USB_GAME_PAD 0x0301 225#define USB_DEVICE_ID_NEC_USB_GAME_PAD 0x0301
214 226
@@ -220,6 +232,9 @@
220#define USB_VENDOR_ID_PANTHERLORD 0x0810 232#define USB_VENDOR_ID_PANTHERLORD 0x0810
221#define USB_DEVICE_ID_PANTHERLORD_TWIN_USB_JOYSTICK 0x0001 233#define USB_DEVICE_ID_PANTHERLORD_TWIN_USB_JOYSTICK 0x0001
222 234
235#define USB_VENDOR_ID_PETALYNX 0x18b1
236#define USB_DEVICE_ID_PETALYNX_MAXTER_REMOTE 0x0037
237
223#define USB_VENDOR_ID_PLAYDOTCOM 0x0b43 238#define USB_VENDOR_ID_PLAYDOTCOM 0x0b43
224#define USB_DEVICE_ID_PLAYDOTCOM_EMS_USBII 0x0003 239#define USB_DEVICE_ID_PLAYDOTCOM_EMS_USBII 0x0003
225 240
@@ -278,6 +293,7 @@ static const struct hid_blacklist {
278 { USB_VENDOR_ID_AASHIMA, USB_DEVICE_ID_AASHIMA_PREDATOR, HID_QUIRK_BADPAD }, 293 { USB_VENDOR_ID_AASHIMA, USB_DEVICE_ID_AASHIMA_PREDATOR, HID_QUIRK_BADPAD },
279 { USB_VENDOR_ID_ALPS, USB_DEVICE_ID_IBM_GAMEPAD, HID_QUIRK_BADPAD }, 294 { USB_VENDOR_ID_ALPS, USB_DEVICE_ID_IBM_GAMEPAD, HID_QUIRK_BADPAD },
280 { USB_VENDOR_ID_CHIC, USB_DEVICE_ID_CHIC_GAMEPAD, HID_QUIRK_BADPAD }, 295 { USB_VENDOR_ID_CHIC, USB_DEVICE_ID_CHIC_GAMEPAD, HID_QUIRK_BADPAD },
296 { USB_VENDOR_ID_GAMERON, USB_DEVICE_ID_GAMERON_DUAL_PSX_ADAPTOR, HID_QUIRK_MULTI_INPUT },
281 { USB_VENDOR_ID_HAPP, USB_DEVICE_ID_UGCI_DRIVING, HID_QUIRK_BADPAD | HID_QUIRK_MULTI_INPUT }, 297 { USB_VENDOR_ID_HAPP, USB_DEVICE_ID_UGCI_DRIVING, HID_QUIRK_BADPAD | HID_QUIRK_MULTI_INPUT },
282 { USB_VENDOR_ID_HAPP, USB_DEVICE_ID_UGCI_FLYING, HID_QUIRK_BADPAD | HID_QUIRK_MULTI_INPUT }, 298 { USB_VENDOR_ID_HAPP, USB_DEVICE_ID_UGCI_FLYING, HID_QUIRK_BADPAD | HID_QUIRK_MULTI_INPUT },
283 { USB_VENDOR_ID_HAPP, USB_DEVICE_ID_UGCI_FIGHTING, HID_QUIRK_BADPAD | HID_QUIRK_MULTI_INPUT }, 299 { USB_VENDOR_ID_HAPP, USB_DEVICE_ID_UGCI_FIGHTING, HID_QUIRK_BADPAD | HID_QUIRK_MULTI_INPUT },
@@ -285,11 +301,10 @@ static const struct hid_blacklist {
285 { USB_VENDOR_ID_SAITEK, USB_DEVICE_ID_SAITEK_RUMBLEPAD, HID_QUIRK_BADPAD }, 301 { USB_VENDOR_ID_SAITEK, USB_DEVICE_ID_SAITEK_RUMBLEPAD, HID_QUIRK_BADPAD },
286 { USB_VENDOR_ID_TOPMAX, USB_DEVICE_ID_TOPMAX_COBRAPAD, HID_QUIRK_BADPAD }, 302 { USB_VENDOR_ID_TOPMAX, USB_DEVICE_ID_TOPMAX_COBRAPAD, HID_QUIRK_BADPAD },
287 303
288 { USB_VENDOR_ID_CHERRY, USB_DEVICE_ID_CHERRY_CYMOTION, HID_QUIRK_CYMOTION },
289
290 { USB_VENDOR_ID_LOGITECH, USB_DEVICE_ID_DINOVO_EDGE, HID_QUIRK_DUPLICATE_USAGES }, 304 { USB_VENDOR_ID_LOGITECH, USB_DEVICE_ID_DINOVO_EDGE, HID_QUIRK_DUPLICATE_USAGES },
291 305
292 { USB_VENDOR_ID_BELKIN, USB_DEVICE_ID_FLIP_KVM, HID_QUIRK_HIDDEV }, 306 { USB_VENDOR_ID_BELKIN, USB_DEVICE_ID_FLIP_KVM, HID_QUIRK_HIDDEV },
307 { USB_VENDOR_ID_MICROSOFT, USB_DEVICE_ID_SIDEWINDER_GV, HID_QUIRK_HIDINPUT },
293 308
294 { USB_VENDOR_ID_AIPTEK, USB_DEVICE_ID_AIPTEK_01, HID_QUIRK_IGNORE }, 309 { USB_VENDOR_ID_AIPTEK, USB_DEVICE_ID_AIPTEK_01, HID_QUIRK_IGNORE },
295 { USB_VENDOR_ID_AIPTEK, USB_DEVICE_ID_AIPTEK_10, HID_QUIRK_IGNORE }, 310 { USB_VENDOR_ID_AIPTEK, USB_DEVICE_ID_AIPTEK_10, HID_QUIRK_IGNORE },
@@ -409,9 +424,7 @@ static const struct hid_blacklist {
409 { USB_VENDOR_ID_ACECAD, USB_DEVICE_ID_ACECAD_FLAIR, HID_QUIRK_IGNORE }, 424 { USB_VENDOR_ID_ACECAD, USB_DEVICE_ID_ACECAD_FLAIR, HID_QUIRK_IGNORE },
410 { USB_VENDOR_ID_ACECAD, USB_DEVICE_ID_ACECAD_302, HID_QUIRK_IGNORE }, 425 { USB_VENDOR_ID_ACECAD, USB_DEVICE_ID_ACECAD_302, HID_QUIRK_IGNORE },
411 426
412 { USB_VENDOR_ID_LOGITECH, USB_DEVICE_ID_MX3000_RECEIVER, HID_QUIRK_LOGITECH_DESCRIPTOR }, 427 { USB_VENDOR_ID_LOGITECH, USB_DEVICE_ID_LOGITECH_CORDLESS_DESKTOP_LX500, HID_QUIRK_LOGITECH_IGNORE_DOUBLED_WHEEL | HID_QUIRK_LOGITECH_EXPANDED_KEYMAP },
413 { USB_VENDOR_ID_LOGITECH, USB_DEVICE_ID_S510_RECEIVER, HID_QUIRK_LOGITECH_DESCRIPTOR },
414 { USB_VENDOR_ID_LOGITECH, USB_DEVICE_ID_S510_RECEIVER_2, HID_QUIRK_LOGITECH_DESCRIPTOR },
415 428
416 { USB_VENDOR_ID_APPLE, USB_DEVICE_ID_APPLE_MIGHTYMOUSE, HID_QUIRK_MIGHTYMOUSE | HID_QUIRK_INVERT_HWHEEL }, 429 { USB_VENDOR_ID_APPLE, USB_DEVICE_ID_APPLE_MIGHTYMOUSE, HID_QUIRK_MIGHTYMOUSE | HID_QUIRK_INVERT_HWHEEL },
417 430
@@ -426,6 +439,7 @@ static const struct hid_blacklist {
426 { USB_VENDOR_ID_ATEN, USB_DEVICE_ID_ATEN_4PORTKVM, HID_QUIRK_NOGET }, 439 { USB_VENDOR_ID_ATEN, USB_DEVICE_ID_ATEN_4PORTKVM, HID_QUIRK_NOGET },
427 { USB_VENDOR_ID_ATEN, USB_DEVICE_ID_ATEN_4PORTKVMC, HID_QUIRK_NOGET }, 440 { USB_VENDOR_ID_ATEN, USB_DEVICE_ID_ATEN_4PORTKVMC, HID_QUIRK_NOGET },
428 { USB_VENDOR_ID_LOGITECH, USB_DEVICE_ID_LOGITECH_WHEEL, HID_QUIRK_NOGET }, 441 { USB_VENDOR_ID_LOGITECH, USB_DEVICE_ID_LOGITECH_WHEEL, HID_QUIRK_NOGET },
442 { USB_VENDOR_ID_PETALYNX, USB_DEVICE_ID_PETALYNX_MAXTER_REMOTE, HID_QUIRK_NOGET },
429 { USB_VENDOR_ID_SUN, USB_DEVICE_ID_RARITAN_KVM_DONGLE, HID_QUIRK_NOGET }, 443 { USB_VENDOR_ID_SUN, USB_DEVICE_ID_RARITAN_KVM_DONGLE, HID_QUIRK_NOGET },
430 { USB_VENDOR_ID_TURBOX, USB_DEVICE_ID_TURBOX_KEYBOARD, HID_QUIRK_NOGET }, 444 { USB_VENDOR_ID_TURBOX, USB_DEVICE_ID_TURBOX_KEYBOARD, HID_QUIRK_NOGET },
431 { USB_VENDOR_ID_WISEGROUP, USB_DEVICE_ID_DUAL_USB_JOYPAD, HID_QUIRK_NOGET | HID_QUIRK_MULTI_INPUT }, 445 { USB_VENDOR_ID_WISEGROUP, USB_DEVICE_ID_DUAL_USB_JOYPAD, HID_QUIRK_NOGET | HID_QUIRK_MULTI_INPUT },
@@ -448,9 +462,28 @@ static const struct hid_blacklist {
448 { USB_VENDOR_ID_APPLE, USB_DEVICE_ID_APPLE_GEYSER1_TP_ONLY, HID_QUIRK_POWERBOOK_HAS_FN | HID_QUIRK_IGNORE_MOUSE }, 462 { USB_VENDOR_ID_APPLE, USB_DEVICE_ID_APPLE_GEYSER1_TP_ONLY, HID_QUIRK_POWERBOOK_HAS_FN | HID_QUIRK_IGNORE_MOUSE },
449 463
450 { USB_VENDOR_ID_DELL, USB_DEVICE_ID_DELL_W7658, HID_QUIRK_RESET_LEDS }, 464 { USB_VENDOR_ID_DELL, USB_DEVICE_ID_DELL_W7658, HID_QUIRK_RESET_LEDS },
465 { USB_VENDOR_ID_LOGITECH, USB_DEVICE_ID_LOGITECH_KBD, HID_QUIRK_RESET_LEDS },
451 466
452 { USB_VENDOR_ID_CYPRESS, USB_DEVICE_ID_CYPRESS_BARCODE_1, HID_QUIRK_SWAPPED_MIN_MAX }, 467 { 0, 0 }
453 { USB_VENDOR_ID_CYPRESS, USB_DEVICE_ID_CYPRESS_BARCODE_2, HID_QUIRK_SWAPPED_MIN_MAX }, 468};
469
470/* Quirks for devices which require report descriptor fixup go here */
471static const struct hid_rdesc_blacklist {
472 __u16 idVendor;
473 __u16 idProduct;
474 __u32 quirks;
475} hid_rdesc_blacklist[] = {
476
477 { USB_VENDOR_ID_CHERRY, USB_DEVICE_ID_CHERRY_CYMOTION, HID_QUIRK_RDESC_CYMOTION },
478
479 { USB_VENDOR_ID_LOGITECH, USB_DEVICE_ID_MX3000_RECEIVER, HID_QUIRK_RDESC_LOGITECH },
480 { USB_VENDOR_ID_LOGITECH, USB_DEVICE_ID_S510_RECEIVER, HID_QUIRK_RDESC_LOGITECH },
481 { USB_VENDOR_ID_LOGITECH, USB_DEVICE_ID_S510_RECEIVER_2, HID_QUIRK_RDESC_LOGITECH },
482
483 { USB_VENDOR_ID_PETALYNX, USB_DEVICE_ID_PETALYNX_MAXTER_REMOTE, HID_QUIRK_RDESC_PETALYNX },
484
485 { USB_VENDOR_ID_CYPRESS, USB_DEVICE_ID_CYPRESS_BARCODE_1, HID_QUIRK_RDESC_SWAPPED_MIN_MAX },
486 { USB_VENDOR_ID_CYPRESS, USB_DEVICE_ID_CYPRESS_BARCODE_2, HID_QUIRK_RDESC_SWAPPED_MIN_MAX },
454 487
455 { 0, 0 } 488 { 0, 0 }
456}; 489};
@@ -493,7 +526,7 @@ static struct hid_blacklist *usbhid_exists_dquirk(const u16 idVendor,
493 } 526 }
494 527
495 if (bl_entry != NULL) 528 if (bl_entry != NULL)
496 dbg("Found dynamic quirk 0x%x for USB HID vendor 0x%hx prod 0x%hx\n", 529 dbg_hid("Found dynamic quirk 0x%x for USB HID vendor 0x%hx prod 0x%hx\n",
497 bl_entry->quirks, bl_entry->idVendor, 530 bl_entry->quirks, bl_entry->idVendor,
498 bl_entry->idProduct); 531 bl_entry->idProduct);
499 532
@@ -521,13 +554,13 @@ int usbhid_modify_dquirk(const u16 idVendor, const u16 idProduct,
521 int list_edited = 0; 554 int list_edited = 0;
522 555
523 if (!idVendor) { 556 if (!idVendor) {
524 dbg("Cannot add a quirk with idVendor = 0"); 557 dbg_hid("Cannot add a quirk with idVendor = 0\n");
525 return -EINVAL; 558 return -EINVAL;
526 } 559 }
527 560
528 q_new = kmalloc(sizeof(struct quirks_list_struct), GFP_KERNEL); 561 q_new = kmalloc(sizeof(struct quirks_list_struct), GFP_KERNEL);
529 if (!q_new) { 562 if (!q_new) {
530 dbg("Could not allocate quirks_list_struct"); 563 dbg_hid("Could not allocate quirks_list_struct\n");
531 return -ENOMEM; 564 return -ENOMEM;
532 } 565 }
533 566
@@ -559,7 +592,6 @@ int usbhid_modify_dquirk(const u16 idVendor, const u16 idProduct,
559 return 0; 592 return 0;
560} 593}
561 594
562
563/** 595/**
564 * usbhid_remove_all_dquirks: remove all runtime HID quirks from memory 596 * usbhid_remove_all_dquirks: remove all runtime HID quirks from memory
565 * 597 *
@@ -643,7 +675,7 @@ static const struct hid_blacklist *usbhid_exists_squirk(const u16 idVendor,
643 bl_entry = &hid_blacklist[n]; 675 bl_entry = &hid_blacklist[n];
644 676
645 if (bl_entry != NULL) 677 if (bl_entry != NULL)
646 dbg("Found squirk 0x%x for USB HID vendor 0x%hx prod 0x%hx\n", 678 dbg_hid("Found squirk 0x%x for USB HID vendor 0x%hx prod 0x%hx\n",
647 bl_entry->quirks, bl_entry->idVendor, 679 bl_entry->quirks, bl_entry->idVendor,
648 bl_entry->idProduct); 680 bl_entry->idProduct);
649 return bl_entry; 681 return bl_entry;
@@ -675,6 +707,12 @@ u32 usbhid_lookup_quirk(const u16 idVendor, const u16 idProduct)
675 idProduct <= USB_DEVICE_ID_CODEMERCS_IOW_LAST) 707 idProduct <= USB_DEVICE_ID_CODEMERCS_IOW_LAST)
676 return HID_QUIRK_IGNORE; 708 return HID_QUIRK_IGNORE;
677 709
710 /* NCR devices must not be queried for reports */
711 if (idVendor == USB_VENDOR_ID_NCR &&
712 idProduct >= USB_DEVICE_ID_NCR_FIRST &&
713 idProduct <= USB_DEVICE_ID_NCR_LAST)
714 return HID_QUIRK_NOGET;
715
678 down_read(&dquirks_rwsem); 716 down_read(&dquirks_rwsem);
679 bl_entry = usbhid_exists_dquirk(idVendor, idProduct); 717 bl_entry = usbhid_exists_dquirk(idVendor, idProduct);
680 if (!bl_entry) 718 if (!bl_entry)
@@ -686,3 +724,126 @@ u32 usbhid_lookup_quirk(const u16 idVendor, const u16 idProduct)
686 return quirks; 724 return quirks;
687} 725}
688 726
727/*
728 * Cherry Cymotion keyboard have an invalid HID report descriptor,
729 * that needs fixing before we can parse it.
730 */
731static void usbhid_fixup_cymotion_descriptor(char *rdesc, int rsize)
732{
733 if (rsize >= 17 && rdesc[11] == 0x3c && rdesc[12] == 0x02) {
734 printk(KERN_INFO "Fixing up Cherry Cymotion report descriptor\n");
735 rdesc[11] = rdesc[16] = 0xff;
736 rdesc[12] = rdesc[17] = 0x03;
737 }
738}
739
740
741/*
742 * Certain Logitech keyboards send in report #3 keys which are far
743 * above the logical maximum described in descriptor. This extends
744 * the original value of 0x28c of logical maximum to 0x104d
745 */
746static void usbhid_fixup_logitech_descriptor(unsigned char *rdesc, int rsize)
747{
748 if (rsize >= 90 && rdesc[83] == 0x26
749 && rdesc[84] == 0x8c
750 && rdesc[85] == 0x02) {
751 printk(KERN_INFO "Fixing up Logitech keyboard report descriptor\n");
752 rdesc[84] = rdesc[89] = 0x4d;
753 rdesc[85] = rdesc[90] = 0x10;
754 }
755}
756
757/* Petalynx Maxter Remote has maximum for consumer page set too low */
758static void usbhid_fixup_petalynx_descriptor(unsigned char *rdesc, int rsize)
759{
760 if (rsize >= 60 && rdesc[39] == 0x2a
761 && rdesc[40] == 0xf5
762 && rdesc[41] == 0x00
763 && rdesc[59] == 0x26
764 && rdesc[60] == 0xf9
765 && rdesc[61] == 0x00) {
766 printk(KERN_INFO "Fixing up Petalynx Maxter Remote report descriptor\n");
767 rdesc[60] = 0xfa;
768 rdesc[40] = 0xfa;
769 }
770}
771
772/*
773 * Some USB barcode readers from cypress have usage min and usage max in
774 * the wrong order
775 */
776static void usbhid_fixup_cypress_descriptor(unsigned char *rdesc, int rsize)
777{
778 short fixed = 0;
779 int i;
780
781 for (i = 0; i < rsize - 4; i++) {
782 if (rdesc[i] == 0x29 && rdesc [i+2] == 0x19) {
783 unsigned char tmp;
784
785 rdesc[i] = 0x19; rdesc[i+2] = 0x29;
786 tmp = rdesc[i+3];
787 rdesc[i+3] = rdesc[i+1];
788 rdesc[i+1] = tmp;
789 }
790 }
791
792 if (fixed)
793 printk(KERN_INFO "Fixing up Cypress report descriptor\n");
794}
795
796
797static void __usbhid_fixup_report_descriptor(__u32 quirks, char *rdesc, unsigned rsize)
798{
799 if ((quirks & HID_QUIRK_RDESC_CYMOTION))
800 usbhid_fixup_cymotion_descriptor(rdesc, rsize);
801
802 if (quirks & HID_QUIRK_RDESC_LOGITECH)
803 usbhid_fixup_logitech_descriptor(rdesc, rsize);
804
805 if (quirks & HID_QUIRK_RDESC_SWAPPED_MIN_MAX)
806 usbhid_fixup_cypress_descriptor(rdesc, rsize);
807
808 if (quirks & HID_QUIRK_RDESC_PETALYNX)
809 usbhid_fixup_petalynx_descriptor(rdesc, rsize);
810}
811
812/**
813 * usbhid_fixup_report_descriptor: check if report descriptor needs fixup
814 *
815 * Description:
816 * Walks the hid_rdesc_blacklist[] array and checks whether the device
817 * is known to have broken report descriptor that needs to be fixed up
818 * prior to entering the HID parser
819 *
820 * Returns: nothing
821 */
822void usbhid_fixup_report_descriptor(const u16 idVendor, const u16 idProduct,
823 char *rdesc, unsigned rsize, char **quirks_param)
824{
825 int n, m;
826 u16 paramVendor, paramProduct;
827 u32 quirks;
828
829 /* static rdesc quirk entries */
830 for (n = 0; hid_rdesc_blacklist[n].idVendor; n++)
831 if (hid_rdesc_blacklist[n].idVendor == idVendor &&
832 hid_rdesc_blacklist[n].idProduct == idProduct)
833 __usbhid_fixup_report_descriptor(hid_rdesc_blacklist[n].quirks,
834 rdesc, rsize);
835
836 /* runtime rdesc quirk entries handling */
837 for (n = 0; quirks_param[n] && n < MAX_USBHID_BOOT_QUIRKS; n++) {
838 m = sscanf(quirks_param[n], "0x%hx:0x%hx:0x%x",
839 &paramVendor, &paramProduct, &quirks);
840
841 if (m != 3)
842 printk(KERN_WARNING
843 "Could not parse HID quirk module param %s\n",
844 quirks_param[n]);
845 else if (paramVendor == idVendor && paramProduct == idProduct)
846 __usbhid_fixup_report_descriptor(quirks, rdesc, rsize);
847 }
848
849}
diff --git a/drivers/hid/usbhid/hid-tmff.c b/drivers/hid/usbhid/hid-tmff.c
index ab5ba6ef891c..555bb48b4295 100644
--- a/drivers/hid/usbhid/hid-tmff.c
+++ b/drivers/hid/usbhid/hid-tmff.c
@@ -70,7 +70,7 @@ static int hid_tmff_play(struct input_dev *dev, void *data, struct ff_effect *ef
70 70
71 tmff->rumble->value[0] = left; 71 tmff->rumble->value[0] = left;
72 tmff->rumble->value[1] = right; 72 tmff->rumble->value[1] = right;
73 dbg("(left,right)=(%08x, %08x)", left, right); 73 dbg_hid("(left,right)=(%08x, %08x)\n", left, right);
74 usbhid_submit_report(hid, tmff->report, USB_DIR_OUT); 74 usbhid_submit_report(hid, tmff->report, USB_DIR_OUT);
75 75
76 return 0; 76 return 0;
diff --git a/drivers/hid/usbhid/hid-zpff.c b/drivers/hid/usbhid/hid-zpff.c
index a7fbffcdaf36..5a688274f6a3 100644
--- a/drivers/hid/usbhid/hid-zpff.c
+++ b/drivers/hid/usbhid/hid-zpff.c
@@ -21,10 +21,6 @@
21 */ 21 */
22 22
23 23
24/* #define DEBUG */
25
26#define debug(format, arg...) pr_debug("hid-zpff: " format "\n" , ## arg)
27
28#include <linux/input.h> 24#include <linux/input.h>
29#include <linux/usb.h> 25#include <linux/usb.h>
30#include <linux/hid.h> 26#include <linux/hid.h>
@@ -49,14 +45,14 @@ static int hid_zpff_play(struct input_dev *dev, void *data,
49 45
50 left = effect->u.rumble.strong_magnitude; 46 left = effect->u.rumble.strong_magnitude;
51 right = effect->u.rumble.weak_magnitude; 47 right = effect->u.rumble.weak_magnitude;
52 debug("called with 0x%04x 0x%04x", left, right); 48 dbg_hid("called with 0x%04x 0x%04x\n", left, right);
53 49
54 left = left * 0x7f / 0xffff; 50 left = left * 0x7f / 0xffff;
55 right = right * 0x7f / 0xffff; 51 right = right * 0x7f / 0xffff;
56 52
57 zpff->report->field[2]->value[0] = left; 53 zpff->report->field[2]->value[0] = left;
58 zpff->report->field[3]->value[0] = right; 54 zpff->report->field[3]->value[0] = right;
59 debug("running with 0x%02x 0x%02x", left, right); 55 dbg_hid("running with 0x%02x 0x%02x\n", left, right);
60 usbhid_submit_report(hid, zpff->report, USB_DIR_OUT); 56 usbhid_submit_report(hid, zpff->report, USB_DIR_OUT);
61 57
62 return 0; 58 return 0;
diff --git a/drivers/hid/usbhid/hiddev.c b/drivers/hid/usbhid/hiddev.c
index 488d61bdbf2c..e793127f971e 100644
--- a/drivers/hid/usbhid/hiddev.c
+++ b/drivers/hid/usbhid/hiddev.c
@@ -779,7 +779,7 @@ int hiddev_connect(struct hid_device *hid)
779 779
780 retval = usb_register_dev(usbhid->intf, &hiddev_class); 780 retval = usb_register_dev(usbhid->intf, &hiddev_class);
781 if (retval) { 781 if (retval) {
782 err("Not able to get a minor for this device."); 782 err_hid("Not able to get a minor for this device.");
783 kfree(hiddev); 783 kfree(hiddev);
784 return -1; 784 return -1;
785 } 785 }
diff --git a/drivers/hid/usbhid/usbkbd.c b/drivers/hid/usbhid/usbkbd.c
index 130978780713..b76b02f7b52d 100644
--- a/drivers/hid/usbhid/usbkbd.c
+++ b/drivers/hid/usbhid/usbkbd.c
@@ -125,7 +125,7 @@ static void usb_kbd_irq(struct urb *urb)
125resubmit: 125resubmit:
126 i = usb_submit_urb (urb, GFP_ATOMIC); 126 i = usb_submit_urb (urb, GFP_ATOMIC);
127 if (i) 127 if (i)
128 err ("can't resubmit intr, %s-%s/input0, status %d", 128 err_hid ("can't resubmit intr, %s-%s/input0, status %d",
129 kbd->usbdev->bus->bus_name, 129 kbd->usbdev->bus->bus_name,
130 kbd->usbdev->devpath, i); 130 kbd->usbdev->devpath, i);
131} 131}
@@ -151,7 +151,7 @@ static int usb_kbd_event(struct input_dev *dev, unsigned int type,
151 *(kbd->leds) = kbd->newleds; 151 *(kbd->leds) = kbd->newleds;
152 kbd->led->dev = kbd->usbdev; 152 kbd->led->dev = kbd->usbdev;
153 if (usb_submit_urb(kbd->led, GFP_ATOMIC)) 153 if (usb_submit_urb(kbd->led, GFP_ATOMIC))
154 err("usb_submit_urb(leds) failed"); 154 err_hid("usb_submit_urb(leds) failed");
155 155
156 return 0; 156 return 0;
157} 157}
@@ -169,7 +169,7 @@ static void usb_kbd_led(struct urb *urb)
169 *(kbd->leds) = kbd->newleds; 169 *(kbd->leds) = kbd->newleds;
170 kbd->led->dev = kbd->usbdev; 170 kbd->led->dev = kbd->usbdev;
171 if (usb_submit_urb(kbd->led, GFP_ATOMIC)) 171 if (usb_submit_urb(kbd->led, GFP_ATOMIC))
172 err("usb_submit_urb(leds) failed"); 172 err_hid("usb_submit_urb(leds) failed");
173} 173}
174 174
175static int usb_kbd_open(struct input_dev *dev) 175static int usb_kbd_open(struct input_dev *dev)
diff --git a/drivers/ide/arm/icside.c b/drivers/ide/arm/icside.c
index 66f826252aee..444a0b84f5bd 100644
--- a/drivers/ide/arm/icside.c
+++ b/drivers/ide/arm/icside.c
@@ -448,23 +448,21 @@ static int icside_dma_test_irq(ide_drive_t *drive)
448 ICS_ARCIN_V6_INTRSTAT_1)) & 1; 448 ICS_ARCIN_V6_INTRSTAT_1)) & 1;
449} 449}
450 450
451static int icside_dma_timeout(ide_drive_t *drive) 451static void icside_dma_timeout(ide_drive_t *drive)
452{ 452{
453 printk(KERN_ERR "%s: DMA timeout occurred: ", drive->name); 453 printk(KERN_ERR "%s: DMA timeout occurred: ", drive->name);
454 454
455 if (icside_dma_test_irq(drive)) 455 if (icside_dma_test_irq(drive))
456 return 0; 456 return;
457 457
458 ide_dump_status(drive, "DMA timeout", 458 ide_dump_status(drive, "DMA timeout", HWIF(drive)->INB(IDE_STATUS_REG));
459 HWIF(drive)->INB(IDE_STATUS_REG));
460 459
461 return icside_dma_end(drive); 460 icside_dma_end(drive);
462} 461}
463 462
464static int icside_dma_lostirq(ide_drive_t *drive) 463static void icside_dma_lost_irq(ide_drive_t *drive)
465{ 464{
466 printk(KERN_ERR "%s: IRQ lost\n", drive->name); 465 printk(KERN_ERR "%s: IRQ lost\n", drive->name);
467 return 1;
468} 466}
469 467
470static void icside_dma_init(ide_hwif_t *hwif) 468static void icside_dma_init(ide_hwif_t *hwif)
@@ -490,8 +488,8 @@ static void icside_dma_init(ide_hwif_t *hwif)
490 hwif->dma_start = icside_dma_start; 488 hwif->dma_start = icside_dma_start;
491 hwif->ide_dma_end = icside_dma_end; 489 hwif->ide_dma_end = icside_dma_end;
492 hwif->ide_dma_test_irq = icside_dma_test_irq; 490 hwif->ide_dma_test_irq = icside_dma_test_irq;
493 hwif->ide_dma_timeout = icside_dma_timeout; 491 hwif->dma_timeout = icside_dma_timeout;
494 hwif->ide_dma_lostirq = icside_dma_lostirq; 492 hwif->dma_lost_irq = icside_dma_lost_irq;
495 493
496 hwif->drives[0].autodma = hwif->autodma; 494 hwif->drives[0].autodma = hwif->autodma;
497 hwif->drives[1].autodma = hwif->autodma; 495 hwif->drives[1].autodma = hwif->autodma;
diff --git a/drivers/ide/cris/ide-cris.c b/drivers/ide/cris/ide-cris.c
index ca0341c05e55..886091bc7db0 100644
--- a/drivers/ide/cris/ide-cris.c
+++ b/drivers/ide/cris/ide-cris.c
@@ -819,7 +819,7 @@ init_e100_ide (void)
819 hwif->dma_host_off = &cris_dma_off; 819 hwif->dma_host_off = &cris_dma_off;
820 hwif->dma_host_on = &cris_dma_on; 820 hwif->dma_host_on = &cris_dma_on;
821 hwif->dma_off_quietly = &cris_dma_off; 821 hwif->dma_off_quietly = &cris_dma_off;
822 hwif->udma_four = 0; 822 hwif->cbl = ATA_CBL_PATA40;
823 hwif->ultra_mask = cris_ultra_mask; 823 hwif->ultra_mask = cris_ultra_mask;
824 hwif->mwdma_mask = 0x07; /* Multiword DMA 0-2 */ 824 hwif->mwdma_mask = 0x07; /* Multiword DMA 0-2 */
825 hwif->autodma = 1; 825 hwif->autodma = 1;
diff --git a/drivers/ide/ide-cd.c b/drivers/ide/ide-cd.c
index 252ab8295edf..1486eb212ccc 100644
--- a/drivers/ide/ide-cd.c
+++ b/drivers/ide/ide-cd.c
@@ -481,7 +481,7 @@ void cdrom_analyze_sense_data(ide_drive_t *drive,
481 else 481 else
482 printk(" Unknown Error Type: "); 482 printk(" Unknown Error Type: ");
483 483
484 if (sense->sense_key < ARY_LEN(sense_key_texts)) 484 if (sense->sense_key < ARRAY_SIZE(sense_key_texts))
485 s = sense_key_texts[sense->sense_key]; 485 s = sense_key_texts[sense->sense_key];
486 486
487 printk("%s -- (Sense key=0x%02x)\n", s, sense->sense_key); 487 printk("%s -- (Sense key=0x%02x)\n", s, sense->sense_key);
@@ -491,7 +491,7 @@ void cdrom_analyze_sense_data(ide_drive_t *drive,
491 sense->ascq); 491 sense->ascq);
492 s = buf; 492 s = buf;
493 } else { 493 } else {
494 int lo = 0, mid, hi = ARY_LEN(sense_data_texts); 494 int lo = 0, mid, hi = ARRAY_SIZE(sense_data_texts);
495 unsigned long key = (sense->sense_key << 16); 495 unsigned long key = (sense->sense_key << 16);
496 key |= (sense->asc << 8); 496 key |= (sense->asc << 8);
497 if (!(sense->ascq >= 0x80 && sense->ascq <= 0xdd)) 497 if (!(sense->ascq >= 0x80 && sense->ascq <= 0xdd))
@@ -524,7 +524,7 @@ void cdrom_analyze_sense_data(ide_drive_t *drive,
524 524
525 if (failed_command != NULL) { 525 if (failed_command != NULL) {
526 526
527 int lo=0, mid, hi= ARY_LEN (packet_command_texts); 527 int lo=0, mid, hi= ARRAY_SIZE(packet_command_texts);
528 s = NULL; 528 s = NULL;
529 529
530 while (hi > lo) { 530 while (hi > lo) {
diff --git a/drivers/ide/ide-cd.h b/drivers/ide/ide-cd.h
index ad1f2ed14a37..228b29c5d2e4 100644
--- a/drivers/ide/ide-cd.h
+++ b/drivers/ide/ide-cd.h
@@ -498,8 +498,6 @@ struct cdrom_info {
498 * Descriptions of ATAPI error codes. 498 * Descriptions of ATAPI error codes.
499 */ 499 */
500 500
501#define ARY_LEN(a) ((sizeof(a) / sizeof(a[0])))
502
503/* This stuff should be in cdrom.h, since it is now generic... */ 501/* This stuff should be in cdrom.h, since it is now generic... */
504 502
505/* ATAPI sense keys (from table 140 of ATAPI 2.6) */ 503/* ATAPI sense keys (from table 140 of ATAPI 2.6) */
diff --git a/drivers/ide/ide-disk.c b/drivers/ide/ide-disk.c
index dc2175c81f5e..b1304a7f3e0a 100644
--- a/drivers/ide/ide-disk.c
+++ b/drivers/ide/ide-disk.c
@@ -1190,11 +1190,11 @@ static int idedisk_ioctl(struct inode *inode, struct file *file,
1190 return generic_ide_ioctl(drive, file, bdev, cmd, arg); 1190 return generic_ide_ioctl(drive, file, bdev, cmd, arg);
1191 1191
1192read_val: 1192read_val:
1193 down(&ide_setting_sem); 1193 mutex_lock(&ide_setting_mtx);
1194 spin_lock_irqsave(&ide_lock, flags); 1194 spin_lock_irqsave(&ide_lock, flags);
1195 err = *val; 1195 err = *val;
1196 spin_unlock_irqrestore(&ide_lock, flags); 1196 spin_unlock_irqrestore(&ide_lock, flags);
1197 up(&ide_setting_sem); 1197 mutex_unlock(&ide_setting_mtx);
1198 return err >= 0 ? put_user(err, (long __user *)arg) : err; 1198 return err >= 0 ? put_user(err, (long __user *)arg) : err;
1199 1199
1200set_val: 1200set_val:
@@ -1204,9 +1204,9 @@ set_val:
1204 if (!capable(CAP_SYS_ADMIN)) 1204 if (!capable(CAP_SYS_ADMIN))
1205 err = -EACCES; 1205 err = -EACCES;
1206 else { 1206 else {
1207 down(&ide_setting_sem); 1207 mutex_lock(&ide_setting_mtx);
1208 err = setfunc(drive, arg); 1208 err = setfunc(drive, arg);
1209 up(&ide_setting_sem); 1209 mutex_unlock(&ide_setting_mtx);
1210 } 1210 }
1211 } 1211 }
1212 return err; 1212 return err;
diff --git a/drivers/ide/ide-dma.c b/drivers/ide/ide-dma.c
index ead141e2db9e..5fe1d72ab451 100644
--- a/drivers/ide/ide-dma.c
+++ b/drivers/ide/ide-dma.c
@@ -91,45 +91,45 @@
91 91
92static const struct drive_list_entry drive_whitelist [] = { 92static const struct drive_list_entry drive_whitelist [] = {
93 93
94 { "Micropolis 2112A" , "ALL" }, 94 { "Micropolis 2112A" , NULL },
95 { "CONNER CTMA 4000" , "ALL" }, 95 { "CONNER CTMA 4000" , NULL },
96 { "CONNER CTT8000-A" , "ALL" }, 96 { "CONNER CTT8000-A" , NULL },
97 { "ST34342A" , "ALL" }, 97 { "ST34342A" , NULL },
98 { NULL , NULL } 98 { NULL , NULL }
99}; 99};
100 100
101static const struct drive_list_entry drive_blacklist [] = { 101static const struct drive_list_entry drive_blacklist [] = {
102 102
103 { "WDC AC11000H" , "ALL" }, 103 { "WDC AC11000H" , NULL },
104 { "WDC AC22100H" , "ALL" }, 104 { "WDC AC22100H" , NULL },
105 { "WDC AC32500H" , "ALL" }, 105 { "WDC AC32500H" , NULL },
106 { "WDC AC33100H" , "ALL" }, 106 { "WDC AC33100H" , NULL },
107 { "WDC AC31600H" , "ALL" }, 107 { "WDC AC31600H" , NULL },
108 { "WDC AC32100H" , "24.09P07" }, 108 { "WDC AC32100H" , "24.09P07" },
109 { "WDC AC23200L" , "21.10N21" }, 109 { "WDC AC23200L" , "21.10N21" },
110 { "Compaq CRD-8241B" , "ALL" }, 110 { "Compaq CRD-8241B" , NULL },
111 { "CRD-8400B" , "ALL" }, 111 { "CRD-8400B" , NULL },
112 { "CRD-8480B", "ALL" }, 112 { "CRD-8480B", NULL },
113 { "CRD-8482B", "ALL" }, 113 { "CRD-8482B", NULL },
114 { "CRD-84" , "ALL" }, 114 { "CRD-84" , NULL },
115 { "SanDisk SDP3B" , "ALL" }, 115 { "SanDisk SDP3B" , NULL },
116 { "SanDisk SDP3B-64" , "ALL" }, 116 { "SanDisk SDP3B-64" , NULL },
117 { "SANYO CD-ROM CRD" , "ALL" }, 117 { "SANYO CD-ROM CRD" , NULL },
118 { "HITACHI CDR-8" , "ALL" }, 118 { "HITACHI CDR-8" , NULL },
119 { "HITACHI CDR-8335" , "ALL" }, 119 { "HITACHI CDR-8335" , NULL },
120 { "HITACHI CDR-8435" , "ALL" }, 120 { "HITACHI CDR-8435" , NULL },
121 { "Toshiba CD-ROM XM-6202B" , "ALL" }, 121 { "Toshiba CD-ROM XM-6202B" , NULL },
122 { "TOSHIBA CD-ROM XM-1702BC", "ALL" }, 122 { "TOSHIBA CD-ROM XM-1702BC", NULL },
123 { "CD-532E-A" , "ALL" }, 123 { "CD-532E-A" , NULL },
124 { "E-IDE CD-ROM CR-840", "ALL" }, 124 { "E-IDE CD-ROM CR-840", NULL },
125 { "CD-ROM Drive/F5A", "ALL" }, 125 { "CD-ROM Drive/F5A", NULL },
126 { "WPI CDD-820", "ALL" }, 126 { "WPI CDD-820", NULL },
127 { "SAMSUNG CD-ROM SC-148C", "ALL" }, 127 { "SAMSUNG CD-ROM SC-148C", NULL },
128 { "SAMSUNG CD-ROM SC", "ALL" }, 128 { "SAMSUNG CD-ROM SC", NULL },
129 { "ATAPI CD-ROM DRIVE 40X MAXIMUM", "ALL" }, 129 { "ATAPI CD-ROM DRIVE 40X MAXIMUM", NULL },
130 { "_NEC DV5800A", "ALL" }, 130 { "_NEC DV5800A", NULL },
131 { "SAMSUNG CD-ROM SN-124", "N001" }, 131 { "SAMSUNG CD-ROM SN-124", "N001" },
132 { "Seagate STT20000A", "ALL" }, 132 { "Seagate STT20000A", NULL },
133 { NULL , NULL } 133 { NULL , NULL }
134 134
135}; 135};
@@ -147,8 +147,8 @@ int ide_in_drive_list(struct hd_driveid *id, const struct drive_list_entry *driv
147{ 147{
148 for ( ; drive_table->id_model ; drive_table++) 148 for ( ; drive_table->id_model ; drive_table++)
149 if ((!strcmp(drive_table->id_model, id->model)) && 149 if ((!strcmp(drive_table->id_model, id->model)) &&
150 ((strstr(id->fw_rev, drive_table->id_firmware)) || 150 (!drive_table->id_firmware ||
151 (!strcmp(drive_table->id_firmware, "ALL")))) 151 strstr(id->fw_rev, drive_table->id_firmware)))
152 return 1; 152 return 1;
153 return 0; 153 return 0;
154} 154}
@@ -702,8 +702,22 @@ static unsigned int ide_get_mode_mask(ide_drive_t *drive, u8 base)
702 mask = id->dma_mword & hwif->mwdma_mask; 702 mask = id->dma_mword & hwif->mwdma_mask;
703 break; 703 break;
704 case XFER_SW_DMA_0: 704 case XFER_SW_DMA_0:
705 if (id->field_valid & 2) 705 if (id->field_valid & 2) {
706 mask = id->dma_1word & hwif->swdma_mask; 706 mask = id->dma_1word & hwif->swdma_mask;
707 } else if (id->tDMA) {
708 /*
709 * ide_fix_driveid() doesn't convert ->tDMA to the
710 * CPU endianness so we need to do it here
711 */
712 u8 mode = le16_to_cpu(id->tDMA);
713
714 /*
715 * if the mode is valid convert it to the mask
716 * (the maximum allowed mode is XFER_SW_DMA_2)
717 */
718 if (mode <= 2)
719 mask = ((2 << mode) - 1) & hwif->swdma_mask;
720 }
707 break; 721 break;
708 default: 722 default:
709 BUG(); 723 BUG();
@@ -847,27 +861,27 @@ int ide_set_dma(ide_drive_t *drive)
847 return rc; 861 return rc;
848} 862}
849 863
850EXPORT_SYMBOL_GPL(ide_set_dma);
851
852#ifdef CONFIG_BLK_DEV_IDEDMA_PCI 864#ifdef CONFIG_BLK_DEV_IDEDMA_PCI
853int __ide_dma_lostirq (ide_drive_t *drive) 865void ide_dma_lost_irq (ide_drive_t *drive)
854{ 866{
855 printk("%s: DMA interrupt recovery\n", drive->name); 867 printk("%s: DMA interrupt recovery\n", drive->name);
856 return 1;
857} 868}
858 869
859EXPORT_SYMBOL(__ide_dma_lostirq); 870EXPORT_SYMBOL(ide_dma_lost_irq);
860 871
861int __ide_dma_timeout (ide_drive_t *drive) 872void ide_dma_timeout (ide_drive_t *drive)
862{ 873{
874 ide_hwif_t *hwif = HWIF(drive);
875
863 printk(KERN_ERR "%s: timeout waiting for DMA\n", drive->name); 876 printk(KERN_ERR "%s: timeout waiting for DMA\n", drive->name);
864 if (HWIF(drive)->ide_dma_test_irq(drive))
865 return 0;
866 877
867 return HWIF(drive)->ide_dma_end(drive); 878 if (hwif->ide_dma_test_irq(drive))
879 return;
880
881 hwif->ide_dma_end(drive);
868} 882}
869 883
870EXPORT_SYMBOL(__ide_dma_timeout); 884EXPORT_SYMBOL(ide_dma_timeout);
871 885
872/* 886/*
873 * Needed for allowing full modular support of ide-driver 887 * Needed for allowing full modular support of ide-driver
@@ -1018,10 +1032,10 @@ void ide_setup_dma (ide_hwif_t *hwif, unsigned long dma_base, unsigned int num_p
1018 hwif->ide_dma_end = &__ide_dma_end; 1032 hwif->ide_dma_end = &__ide_dma_end;
1019 if (!hwif->ide_dma_test_irq) 1033 if (!hwif->ide_dma_test_irq)
1020 hwif->ide_dma_test_irq = &__ide_dma_test_irq; 1034 hwif->ide_dma_test_irq = &__ide_dma_test_irq;
1021 if (!hwif->ide_dma_timeout) 1035 if (!hwif->dma_timeout)
1022 hwif->ide_dma_timeout = &__ide_dma_timeout; 1036 hwif->dma_timeout = &ide_dma_timeout;
1023 if (!hwif->ide_dma_lostirq) 1037 if (!hwif->dma_lost_irq)
1024 hwif->ide_dma_lostirq = &__ide_dma_lostirq; 1038 hwif->dma_lost_irq = &ide_dma_lost_irq;
1025 1039
1026 if (hwif->chipset != ide_trm290) { 1040 if (hwif->chipset != ide_trm290) {
1027 u8 dma_stat = hwif->INB(hwif->dma_status); 1041 u8 dma_stat = hwif->INB(hwif->dma_status);
diff --git a/drivers/ide/ide-io.c b/drivers/ide/ide-io.c
index bfe8f1b712ba..c5b5011da56e 100644
--- a/drivers/ide/ide-io.c
+++ b/drivers/ide/ide-io.c
@@ -1350,7 +1350,7 @@ static ide_startstop_t ide_dma_timeout_retry(ide_drive_t *drive, int error)
1350 hwif->INB(IDE_STATUS_REG)); 1350 hwif->INB(IDE_STATUS_REG));
1351 } else { 1351 } else {
1352 printk(KERN_WARNING "%s: DMA timeout retry\n", drive->name); 1352 printk(KERN_WARNING "%s: DMA timeout retry\n", drive->name);
1353 (void) hwif->ide_dma_timeout(drive); 1353 hwif->dma_timeout(drive);
1354 } 1354 }
1355 1355
1356 /* 1356 /*
@@ -1466,7 +1466,7 @@ void ide_timer_expiry (unsigned long data)
1466 startstop = handler(drive); 1466 startstop = handler(drive);
1467 } else if (drive_is_ready(drive)) { 1467 } else if (drive_is_ready(drive)) {
1468 if (drive->waiting_for_dma) 1468 if (drive->waiting_for_dma)
1469 (void) hwgroup->hwif->ide_dma_lostirq(drive); 1469 hwgroup->hwif->dma_lost_irq(drive);
1470 (void)ide_ack_intr(hwif); 1470 (void)ide_ack_intr(hwif);
1471 printk(KERN_WARNING "%s: lost interrupt\n", drive->name); 1471 printk(KERN_WARNING "%s: lost interrupt\n", drive->name);
1472 startstop = handler(drive); 1472 startstop = handler(drive);
diff --git a/drivers/ide/ide-iops.c b/drivers/ide/ide-iops.c
index f0be5f665a0e..92578b6832e9 100644
--- a/drivers/ide/ide-iops.c
+++ b/drivers/ide/ide-iops.c
@@ -574,7 +574,10 @@ u8 eighty_ninty_three (ide_drive_t *drive)
574 ide_hwif_t *hwif = drive->hwif; 574 ide_hwif_t *hwif = drive->hwif;
575 struct hd_driveid *id = drive->id; 575 struct hd_driveid *id = drive->id;
576 576
577 if (hwif->udma_four == 0) 577 if (hwif->cbl == ATA_CBL_PATA40_SHORT)
578 return 1;
579
580 if (hwif->cbl != ATA_CBL_PATA80)
578 goto no_80w; 581 goto no_80w;
579 582
580 /* Check for SATA but only if we are ATA5 or higher */ 583 /* Check for SATA but only if we are ATA5 or higher */
@@ -600,7 +603,8 @@ no_80w:
600 603
601 printk(KERN_WARNING "%s: %s side 80-wire cable detection failed, " 604 printk(KERN_WARNING "%s: %s side 80-wire cable detection failed, "
602 "limiting max speed to UDMA33\n", 605 "limiting max speed to UDMA33\n",
603 drive->name, hwif->udma_four ? "drive" : "host"); 606 drive->name,
607 hwif->cbl == ATA_CBL_PATA80 ? "drive" : "host");
604 608
605 drive->udma33_warned = 1; 609 drive->udma33_warned = 1;
606 610
diff --git a/drivers/ide/ide-probe.c b/drivers/ide/ide-probe.c
index f5ce22c38f82..cc5801399467 100644
--- a/drivers/ide/ide-probe.c
+++ b/drivers/ide/ide-probe.c
@@ -144,7 +144,7 @@ static inline void do_identify (ide_drive_t *drive, u8 cmd)
144 local_irq_enable(); 144 local_irq_enable();
145 ide_fix_driveid(id); 145 ide_fix_driveid(id);
146 146
147#if defined (CONFIG_SCSI_EATA_DMA) || defined (CONFIG_SCSI_EATA_PIO) || defined (CONFIG_SCSI_EATA) 147#if defined (CONFIG_SCSI_EATA_PIO) || defined (CONFIG_SCSI_EATA)
148 /* 148 /*
149 * EATA SCSI controllers do a hardware ATA emulation: 149 * EATA SCSI controllers do a hardware ATA emulation:
150 * Ignore them if there is a driver for them available. 150 * Ignore them if there is a driver for them available.
@@ -154,7 +154,7 @@ static inline void do_identify (ide_drive_t *drive, u8 cmd)
154 printk("%s: EATA SCSI HBA %.10s\n", drive->name, id->model); 154 printk("%s: EATA SCSI HBA %.10s\n", drive->name, id->model);
155 goto err_misc; 155 goto err_misc;
156 } 156 }
157#endif /* CONFIG_SCSI_EATA_DMA || CONFIG_SCSI_EATA_PIO */ 157#endif /* CONFIG_SCSI_EATA || CONFIG_SCSI_EATA_PIO */
158 158
159 /* 159 /*
160 * WIN_IDENTIFY returns little-endian info, 160 * WIN_IDENTIFY returns little-endian info,
@@ -1025,7 +1025,7 @@ static int init_irq (ide_hwif_t *hwif)
1025 BUG_ON(irqs_disabled()); 1025 BUG_ON(irqs_disabled());
1026 BUG_ON(hwif == NULL); 1026 BUG_ON(hwif == NULL);
1027 1027
1028 down(&ide_cfg_sem); 1028 mutex_lock(&ide_cfg_mtx);
1029 hwif->hwgroup = NULL; 1029 hwif->hwgroup = NULL;
1030#if MAX_HWIFS > 1 1030#if MAX_HWIFS > 1
1031 /* 1031 /*
@@ -1154,7 +1154,7 @@ static int init_irq (ide_hwif_t *hwif)
1154 printk(" (%sed with %s)", 1154 printk(" (%sed with %s)",
1155 hwif->sharing_irq ? "shar" : "serializ", match->name); 1155 hwif->sharing_irq ? "shar" : "serializ", match->name);
1156 printk("\n"); 1156 printk("\n");
1157 up(&ide_cfg_sem); 1157 mutex_unlock(&ide_cfg_mtx);
1158 return 0; 1158 return 0;
1159out_unlink: 1159out_unlink:
1160 spin_lock_irq(&ide_lock); 1160 spin_lock_irq(&ide_lock);
@@ -1177,7 +1177,7 @@ out_unlink:
1177 } 1177 }
1178 spin_unlock_irq(&ide_lock); 1178 spin_unlock_irq(&ide_lock);
1179out_up: 1179out_up:
1180 up(&ide_cfg_sem); 1180 mutex_unlock(&ide_cfg_mtx);
1181 return 1; 1181 return 1;
1182} 1182}
1183 1183
diff --git a/drivers/ide/ide-proc.c b/drivers/ide/ide-proc.c
index ea94c9aa1220..fc1d8ae6a803 100644
--- a/drivers/ide/ide-proc.c
+++ b/drivers/ide/ide-proc.c
@@ -156,7 +156,7 @@ static int __ide_add_setting(ide_drive_t *drive, const char *name, int rw, int d
156{ 156{
157 ide_settings_t **p = (ide_settings_t **) &drive->settings, *setting = NULL; 157 ide_settings_t **p = (ide_settings_t **) &drive->settings, *setting = NULL;
158 158
159 down(&ide_setting_sem); 159 mutex_lock(&ide_setting_mtx);
160 while ((*p) && strcmp((*p)->name, name) < 0) 160 while ((*p) && strcmp((*p)->name, name) < 0)
161 p = &((*p)->next); 161 p = &((*p)->next);
162 if ((setting = kzalloc(sizeof(*setting), GFP_KERNEL)) == NULL) 162 if ((setting = kzalloc(sizeof(*setting), GFP_KERNEL)) == NULL)
@@ -177,10 +177,10 @@ static int __ide_add_setting(ide_drive_t *drive, const char *name, int rw, int d
177 if (auto_remove) 177 if (auto_remove)
178 setting->auto_remove = 1; 178 setting->auto_remove = 1;
179 *p = setting; 179 *p = setting;
180 up(&ide_setting_sem); 180 mutex_unlock(&ide_setting_mtx);
181 return 0; 181 return 0;
182abort: 182abort:
183 up(&ide_setting_sem); 183 mutex_unlock(&ide_setting_mtx);
184 kfree(setting); 184 kfree(setting);
185 return -1; 185 return -1;
186} 186}
@@ -224,7 +224,7 @@ static void __ide_remove_setting (ide_drive_t *drive, char *name)
224 * 224 *
225 * Automatically remove all the driver specific settings for this 225 * Automatically remove all the driver specific settings for this
226 * drive. This function may not be called from IRQ context. The 226 * drive. This function may not be called from IRQ context. The
227 * caller must hold ide_setting_sem. 227 * caller must hold ide_setting_mtx.
228 */ 228 */
229 229
230static void auto_remove_settings (ide_drive_t *drive) 230static void auto_remove_settings (ide_drive_t *drive)
@@ -269,7 +269,7 @@ static ide_settings_t *ide_find_setting_by_name(ide_drive_t *drive, char *name)
269 * @setting: drive setting 269 * @setting: drive setting
270 * 270 *
271 * Read a drive setting and return the value. The caller 271 * Read a drive setting and return the value. The caller
272 * must hold the ide_setting_sem when making this call. 272 * must hold the ide_setting_mtx when making this call.
273 * 273 *
274 * BUGS: the data return and error are the same return value 274 * BUGS: the data return and error are the same return value
275 * so an error -EINVAL and true return of the same value cannot 275 * so an error -EINVAL and true return of the same value cannot
@@ -306,7 +306,7 @@ static int ide_read_setting(ide_drive_t *drive, ide_settings_t *setting)
306 * @val: value 306 * @val: value
307 * 307 *
308 * Write a drive setting if it is possible. The caller 308 * Write a drive setting if it is possible. The caller
309 * must hold the ide_setting_sem when making this call. 309 * must hold the ide_setting_mtx when making this call.
310 * 310 *
311 * BUGS: the data return and error are the same return value 311 * BUGS: the data return and error are the same return value
312 * so an error -EINVAL and true return of the same value cannot 312 * so an error -EINVAL and true return of the same value cannot
@@ -367,7 +367,7 @@ static int set_xfer_rate (ide_drive_t *drive, int arg)
367 * @drive: drive being configured 367 * @drive: drive being configured
368 * 368 *
369 * Add the generic parts of the system settings to the /proc files. 369 * Add the generic parts of the system settings to the /proc files.
370 * The caller must not be holding the ide_setting_sem. 370 * The caller must not be holding the ide_setting_mtx.
371 */ 371 */
372 372
373void ide_add_generic_settings (ide_drive_t *drive) 373void ide_add_generic_settings (ide_drive_t *drive)
@@ -408,7 +408,7 @@ static int proc_ide_read_settings
408 408
409 proc_ide_settings_warn(); 409 proc_ide_settings_warn();
410 410
411 down(&ide_setting_sem); 411 mutex_lock(&ide_setting_mtx);
412 out += sprintf(out, "name\t\t\tvalue\t\tmin\t\tmax\t\tmode\n"); 412 out += sprintf(out, "name\t\t\tvalue\t\tmin\t\tmax\t\tmode\n");
413 out += sprintf(out, "----\t\t\t-----\t\t---\t\t---\t\t----\n"); 413 out += sprintf(out, "----\t\t\t-----\t\t---\t\t---\t\t----\n");
414 while(setting) { 414 while(setting) {
@@ -428,7 +428,7 @@ static int proc_ide_read_settings
428 setting = setting->next; 428 setting = setting->next;
429 } 429 }
430 len = out - page; 430 len = out - page;
431 up(&ide_setting_sem); 431 mutex_unlock(&ide_setting_mtx);
432 PROC_IDE_READ_RETURN(page,start,off,count,eof,len); 432 PROC_IDE_READ_RETURN(page,start,off,count,eof,len);
433} 433}
434 434
@@ -508,16 +508,16 @@ static int proc_ide_write_settings(struct file *file, const char __user *buffer,
508 ++p; 508 ++p;
509 } 509 }
510 510
511 down(&ide_setting_sem); 511 mutex_lock(&ide_setting_mtx);
512 setting = ide_find_setting_by_name(drive, name); 512 setting = ide_find_setting_by_name(drive, name);
513 if (!setting) 513 if (!setting)
514 { 514 {
515 up(&ide_setting_sem); 515 mutex_unlock(&ide_setting_mtx);
516 goto parse_error; 516 goto parse_error;
517 } 517 }
518 if (for_real) 518 if (for_real)
519 ide_write_setting(drive, setting, val * setting->div_factor / setting->mul_factor); 519 ide_write_setting(drive, setting, val * setting->div_factor / setting->mul_factor);
520 up(&ide_setting_sem); 520 mutex_unlock(&ide_setting_mtx);
521 } 521 }
522 } while (!for_real++); 522 } while (!for_real++);
523 free_page((unsigned long)buf); 523 free_page((unsigned long)buf);
@@ -705,7 +705,7 @@ EXPORT_SYMBOL(ide_proc_register_driver);
705 * Clean up the driver specific /proc files and IDE settings 705 * Clean up the driver specific /proc files and IDE settings
706 * for a given drive. 706 * for a given drive.
707 * 707 *
708 * Takes ide_setting_sem and ide_lock. 708 * Takes ide_setting_mtx and ide_lock.
709 * Caller must hold none of the locks. 709 * Caller must hold none of the locks.
710 */ 710 */
711 711
@@ -715,10 +715,10 @@ void ide_proc_unregister_driver(ide_drive_t *drive, ide_driver_t *driver)
715 715
716 ide_remove_proc_entries(drive->proc, driver->proc); 716 ide_remove_proc_entries(drive->proc, driver->proc);
717 717
718 down(&ide_setting_sem); 718 mutex_lock(&ide_setting_mtx);
719 spin_lock_irqsave(&ide_lock, flags); 719 spin_lock_irqsave(&ide_lock, flags);
720 /* 720 /*
721 * ide_setting_sem protects the settings list 721 * ide_setting_mtx protects the settings list
722 * ide_lock protects the use of settings 722 * ide_lock protects the use of settings
723 * 723 *
724 * so we need to hold both, ide_settings_sem because we want to 724 * so we need to hold both, ide_settings_sem because we want to
@@ -726,11 +726,11 @@ void ide_proc_unregister_driver(ide_drive_t *drive, ide_driver_t *driver)
726 * a setting out that is being used. 726 * a setting out that is being used.
727 * 727 *
728 * OTOH both ide_{read,write}_setting are only ever used under 728 * OTOH both ide_{read,write}_setting are only ever used under
729 * ide_setting_sem. 729 * ide_setting_mtx.
730 */ 730 */
731 auto_remove_settings(drive); 731 auto_remove_settings(drive);
732 spin_unlock_irqrestore(&ide_lock, flags); 732 spin_unlock_irqrestore(&ide_lock, flags);
733 up(&ide_setting_sem); 733 mutex_unlock(&ide_setting_mtx);
734} 734}
735 735
736EXPORT_SYMBOL(ide_proc_unregister_driver); 736EXPORT_SYMBOL(ide_proc_unregister_driver);
diff --git a/drivers/ide/ide-timing.h b/drivers/ide/ide-timing.h
index c0864b1e9228..e6cb8593b5ba 100644
--- a/drivers/ide/ide-timing.h
+++ b/drivers/ide/ide-timing.h
@@ -102,66 +102,16 @@ static struct ide_timing ide_timing[] = {
102#define EZ(v,unit) ((v)?ENOUGH(v,unit):0) 102#define EZ(v,unit) ((v)?ENOUGH(v,unit):0)
103 103
104#define XFER_MODE 0xf0 104#define XFER_MODE 0xf0
105#define XFER_UDMA_133 0x48
106#define XFER_UDMA_100 0x44
107#define XFER_UDMA_66 0x42
108#define XFER_UDMA 0x40
109#define XFER_MWDMA 0x20 105#define XFER_MWDMA 0x20
110#define XFER_SWDMA 0x10
111#define XFER_EPIO 0x01 106#define XFER_EPIO 0x01
112#define XFER_PIO 0x00 107#define XFER_PIO 0x00
113 108
114static short ide_find_best_mode(ide_drive_t *drive, int map) 109static short ide_find_best_pio_mode(ide_drive_t *drive)
115{ 110{
116 struct hd_driveid *id = drive->id; 111 struct hd_driveid *id = drive->id;
117 short best = 0; 112 short best = 0;
118 113
119 if (!id) 114 if (id->field_valid & 2) { /* EIDE PIO modes */
120 return XFER_PIO_SLOW;
121
122 if ((map & XFER_UDMA) && (id->field_valid & 4)) { /* Want UDMA and UDMA bitmap valid */
123
124 if ((map & XFER_UDMA_133) == XFER_UDMA_133)
125 if ((best = (id->dma_ultra & 0x0040) ? XFER_UDMA_6 : 0)) return best;
126
127 if ((map & XFER_UDMA_100) == XFER_UDMA_100)
128 if ((best = (id->dma_ultra & 0x0020) ? XFER_UDMA_5 : 0)) return best;
129
130 if ((map & XFER_UDMA_66) == XFER_UDMA_66)
131 if ((best = (id->dma_ultra & 0x0010) ? XFER_UDMA_4 :
132 (id->dma_ultra & 0x0008) ? XFER_UDMA_3 : 0)) return best;
133
134 if ((best = (id->dma_ultra & 0x0004) ? XFER_UDMA_2 :
135 (id->dma_ultra & 0x0002) ? XFER_UDMA_1 :
136 (id->dma_ultra & 0x0001) ? XFER_UDMA_0 : 0)) return best;
137 }
138
139 if ((map & XFER_MWDMA) && (id->field_valid & 2)) { /* Want MWDMA and drive has EIDE fields */
140
141 if ((best = (id->dma_mword & 0x0004) ? XFER_MW_DMA_2 :
142 (id->dma_mword & 0x0002) ? XFER_MW_DMA_1 :
143 (id->dma_mword & 0x0001) ? XFER_MW_DMA_0 : 0)) return best;
144 }
145
146 if (map & XFER_SWDMA) { /* Want SWDMA */
147
148 if (id->field_valid & 2) { /* EIDE SWDMA */
149
150 if ((best = (id->dma_1word & 0x0004) ? XFER_SW_DMA_2 :
151 (id->dma_1word & 0x0002) ? XFER_SW_DMA_1 :
152 (id->dma_1word & 0x0001) ? XFER_SW_DMA_0 : 0)) return best;
153 }
154
155 if (id->capability & 1) { /* Pre-EIDE style SWDMA */
156
157 if ((best = (id->tDMA == 2) ? XFER_SW_DMA_2 :
158 (id->tDMA == 1) ? XFER_SW_DMA_1 :
159 (id->tDMA == 0) ? XFER_SW_DMA_0 : 0)) return best;
160 }
161 }
162
163
164 if ((map & XFER_EPIO) && (id->field_valid & 2)) { /* EIDE PIO modes */
165 115
166 if ((best = (drive->id->eide_pio_modes & 4) ? XFER_PIO_5 : 116 if ((best = (drive->id->eide_pio_modes & 4) ? XFER_PIO_5 :
167 (drive->id->eide_pio_modes & 2) ? XFER_PIO_4 : 117 (drive->id->eide_pio_modes & 2) ? XFER_PIO_4 :
@@ -262,7 +212,7 @@ static int ide_timing_compute(ide_drive_t *drive, short speed, struct ide_timing
262 */ 212 */
263 213
264 if ((speed & XFER_MODE) != XFER_PIO) { 214 if ((speed & XFER_MODE) != XFER_PIO) {
265 ide_timing_compute(drive, ide_find_best_mode(drive, XFER_PIO | XFER_EPIO), &p, T, UT); 215 ide_timing_compute(drive, ide_find_best_pio_mode(drive), &p, T, UT);
266 ide_timing_merge(&p, t, t, IDE_TIMING_ALL); 216 ide_timing_merge(&p, t, t, IDE_TIMING_ALL);
267 } 217 }
268 218
diff --git a/drivers/ide/ide.c b/drivers/ide/ide.c
index 0cd76bf66833..c948a5c17a5d 100644
--- a/drivers/ide/ide.c
+++ b/drivers/ide/ide.c
@@ -169,7 +169,7 @@ static const u8 ide_hwif_to_major[] = { IDE0_MAJOR, IDE1_MAJOR,
169static int idebus_parameter; /* holds the "idebus=" parameter */ 169static int idebus_parameter; /* holds the "idebus=" parameter */
170static int system_bus_speed; /* holds what we think is VESA/PCI bus speed */ 170static int system_bus_speed; /* holds what we think is VESA/PCI bus speed */
171 171
172DECLARE_MUTEX(ide_cfg_sem); 172DEFINE_MUTEX(ide_cfg_mtx);
173 __cacheline_aligned_in_smp DEFINE_SPINLOCK(ide_lock); 173 __cacheline_aligned_in_smp DEFINE_SPINLOCK(ide_lock);
174 174
175#ifdef CONFIG_IDEPCI_PCIBUS_ORDER 175#ifdef CONFIG_IDEPCI_PCIBUS_ORDER
@@ -460,6 +460,8 @@ static void ide_hwif_restore(ide_hwif_t *hwif, ide_hwif_t *tmp_hwif)
460 hwif->mwdma_mask = tmp_hwif->mwdma_mask; 460 hwif->mwdma_mask = tmp_hwif->mwdma_mask;
461 hwif->swdma_mask = tmp_hwif->swdma_mask; 461 hwif->swdma_mask = tmp_hwif->swdma_mask;
462 462
463 hwif->cbl = tmp_hwif->cbl;
464
463 hwif->chipset = tmp_hwif->chipset; 465 hwif->chipset = tmp_hwif->chipset;
464 hwif->hold = tmp_hwif->hold; 466 hwif->hold = tmp_hwif->hold;
465 467
@@ -496,8 +498,8 @@ static void ide_hwif_restore(ide_hwif_t *hwif, ide_hwif_t *tmp_hwif)
496 hwif->ide_dma_clear_irq = tmp_hwif->ide_dma_clear_irq; 498 hwif->ide_dma_clear_irq = tmp_hwif->ide_dma_clear_irq;
497 hwif->dma_host_on = tmp_hwif->dma_host_on; 499 hwif->dma_host_on = tmp_hwif->dma_host_on;
498 hwif->dma_host_off = tmp_hwif->dma_host_off; 500 hwif->dma_host_off = tmp_hwif->dma_host_off;
499 hwif->ide_dma_lostirq = tmp_hwif->ide_dma_lostirq; 501 hwif->dma_lost_irq = tmp_hwif->dma_lost_irq;
500 hwif->ide_dma_timeout = tmp_hwif->ide_dma_timeout; 502 hwif->dma_timeout = tmp_hwif->dma_timeout;
501 503
502 hwif->OUTB = tmp_hwif->OUTB; 504 hwif->OUTB = tmp_hwif->OUTB;
503 hwif->OUTBSYNC = tmp_hwif->OUTBSYNC; 505 hwif->OUTBSYNC = tmp_hwif->OUTBSYNC;
@@ -533,7 +535,6 @@ static void ide_hwif_restore(ide_hwif_t *hwif, ide_hwif_t *tmp_hwif)
533 hwif->extra_base = tmp_hwif->extra_base; 535 hwif->extra_base = tmp_hwif->extra_base;
534 hwif->extra_ports = tmp_hwif->extra_ports; 536 hwif->extra_ports = tmp_hwif->extra_ports;
535 hwif->autodma = tmp_hwif->autodma; 537 hwif->autodma = tmp_hwif->autodma;
536 hwif->udma_four = tmp_hwif->udma_four;
537 538
538 hwif->hwif_data = tmp_hwif->hwif_data; 539 hwif->hwif_data = tmp_hwif->hwif_data;
539} 540}
@@ -564,7 +565,7 @@ void ide_unregister(unsigned int index)
564{ 565{
565 ide_drive_t *drive; 566 ide_drive_t *drive;
566 ide_hwif_t *hwif, *g; 567 ide_hwif_t *hwif, *g;
567 static ide_hwif_t tmp_hwif; /* protected by ide_cfg_sem */ 568 static ide_hwif_t tmp_hwif; /* protected by ide_cfg_mtx */
568 ide_hwgroup_t *hwgroup; 569 ide_hwgroup_t *hwgroup;
569 int irq_count = 0, unit; 570 int irq_count = 0, unit;
570 571
@@ -572,7 +573,7 @@ void ide_unregister(unsigned int index)
572 573
573 BUG_ON(in_interrupt()); 574 BUG_ON(in_interrupt());
574 BUG_ON(irqs_disabled()); 575 BUG_ON(irqs_disabled());
575 down(&ide_cfg_sem); 576 mutex_lock(&ide_cfg_mtx);
576 spin_lock_irq(&ide_lock); 577 spin_lock_irq(&ide_lock);
577 hwif = &ide_hwifs[index]; 578 hwif = &ide_hwifs[index];
578 if (!hwif->present) 579 if (!hwif->present)
@@ -679,7 +680,7 @@ void ide_unregister(unsigned int index)
679 680
680abort: 681abort:
681 spin_unlock_irq(&ide_lock); 682 spin_unlock_irq(&ide_lock);
682 up(&ide_cfg_sem); 683 mutex_unlock(&ide_cfg_mtx);
683} 684}
684 685
685EXPORT_SYMBOL(ide_unregister); 686EXPORT_SYMBOL(ide_unregister);
@@ -817,9 +818,9 @@ EXPORT_SYMBOL(ide_register_hw);
817 * Locks for IDE setting functionality 818 * Locks for IDE setting functionality
818 */ 819 */
819 820
820DECLARE_MUTEX(ide_setting_sem); 821DEFINE_MUTEX(ide_setting_mtx);
821 822
822EXPORT_SYMBOL_GPL(ide_setting_sem); 823EXPORT_SYMBOL_GPL(ide_setting_mtx);
823 824
824/** 825/**
825 * ide_spin_wait_hwgroup - wait for group 826 * ide_spin_wait_hwgroup - wait for group
@@ -1192,11 +1193,11 @@ int generic_ide_ioctl(ide_drive_t *drive, struct file *file, struct block_device
1192 } 1193 }
1193 1194
1194read_val: 1195read_val:
1195 down(&ide_setting_sem); 1196 mutex_lock(&ide_setting_mtx);
1196 spin_lock_irqsave(&ide_lock, flags); 1197 spin_lock_irqsave(&ide_lock, flags);
1197 err = *val; 1198 err = *val;
1198 spin_unlock_irqrestore(&ide_lock, flags); 1199 spin_unlock_irqrestore(&ide_lock, flags);
1199 up(&ide_setting_sem); 1200 mutex_unlock(&ide_setting_mtx);
1200 return err >= 0 ? put_user(err, (long __user *)arg) : err; 1201 return err >= 0 ? put_user(err, (long __user *)arg) : err;
1201 1202
1202set_val: 1203set_val:
@@ -1206,9 +1207,9 @@ set_val:
1206 if (!capable(CAP_SYS_ADMIN)) 1207 if (!capable(CAP_SYS_ADMIN))
1207 err = -EACCES; 1208 err = -EACCES;
1208 else { 1209 else {
1209 down(&ide_setting_sem); 1210 mutex_lock(&ide_setting_mtx);
1210 err = setfunc(drive, arg); 1211 err = setfunc(drive, arg);
1211 up(&ide_setting_sem); 1212 mutex_unlock(&ide_setting_mtx);
1212 } 1213 }
1213 } 1214 }
1214 return err; 1215 return err;
@@ -1548,7 +1549,11 @@ static int __init ide_setup(char *s)
1548 goto bad_option; 1549 goto bad_option;
1549 case -7: /* ata66 */ 1550 case -7: /* ata66 */
1550#ifdef CONFIG_BLK_DEV_IDEPCI 1551#ifdef CONFIG_BLK_DEV_IDEPCI
1551 hwif->udma_four = 1; 1552 /*
1553 * Use ATA_CBL_PATA40_SHORT so drive side
1554 * cable detection is also overriden.
1555 */
1556 hwif->cbl = ATA_CBL_PATA40_SHORT;
1552 goto obsolete_option; 1557 goto obsolete_option;
1553#else 1558#else
1554 goto bad_hwif; 1559 goto bad_hwif;
diff --git a/drivers/ide/legacy/hd.c b/drivers/ide/legacy/hd.c
index 45ed03591cd8..661c12f6dda6 100644
--- a/drivers/ide/legacy/hd.c
+++ b/drivers/ide/legacy/hd.c
@@ -130,7 +130,7 @@ struct hd_i_struct {
130 130
131#ifdef HD_TYPE 131#ifdef HD_TYPE
132static struct hd_i_struct hd_info[] = { HD_TYPE }; 132static struct hd_i_struct hd_info[] = { HD_TYPE };
133static int NR_HD = ((sizeof (hd_info))/(sizeof (struct hd_i_struct))); 133static int NR_HD = ARRAY_SIZE(hd_info);
134#else 134#else
135static struct hd_i_struct hd_info[MAX_HD]; 135static struct hd_i_struct hd_info[MAX_HD];
136static int NR_HD; 136static int NR_HD;
diff --git a/drivers/ide/legacy/macide.c b/drivers/ide/legacy/macide.c
index c211fc78345d..b557c45a5a9d 100644
--- a/drivers/ide/legacy/macide.c
+++ b/drivers/ide/legacy/macide.c
@@ -77,15 +77,6 @@ int macide_ack_intr(ide_hwif_t* hwif)
77 return 0; 77 return 0;
78} 78}
79 79
80#ifdef CONFIG_BLK_DEV_MAC_MEDIABAY
81static void macide_mediabay_interrupt(int irq, void *dev_id)
82{
83 int state = baboon->mb_status & 0x04;
84
85 printk(KERN_INFO "macide: media bay %s detected\n", state? "removal":"insertion");
86}
87#endif
88
89/* 80/*
90 * Probe for a Macintosh IDE interface 81 * Probe for a Macintosh IDE interface
91 */ 82 */
@@ -128,11 +119,6 @@ void macide_init(void)
128 ide_drive_t *drive = &ide_hwifs[index].drives[0]; 119 ide_drive_t *drive = &ide_hwifs[index].drives[0];
129 drive->capacity64 = drive->cyl*drive->head*drive->sect; 120 drive->capacity64 = drive->cyl*drive->head*drive->sect;
130 121
131#ifdef CONFIG_BLK_DEV_MAC_MEDIABAY
132 request_irq(IRQ_BABOON_2, macide_mediabay_interrupt,
133 IRQ_FLG_FAST, "mediabay",
134 macide_mediabay_interrupt);
135#endif
136 } 122 }
137 break; 123 break;
138 124
diff --git a/drivers/ide/mips/au1xxx-ide.c b/drivers/ide/mips/au1xxx-ide.c
index ca95e990862e..2e7013a2a7f6 100644
--- a/drivers/ide/mips/au1xxx-ide.c
+++ b/drivers/ide/mips/au1xxx-ide.c
@@ -381,9 +381,7 @@ static int auide_dma_setup(ide_drive_t *drive)
381 381
382static int auide_dma_check(ide_drive_t *drive) 382static int auide_dma_check(ide_drive_t *drive)
383{ 383{
384 u8 speed; 384 u8 speed = ide_max_dma_mode(drive);
385
386#ifdef CONFIG_BLK_DEV_IDE_AU1XXX_MDMA2_DBDMA
387 385
388 if( dbdma_init_done == 0 ){ 386 if( dbdma_init_done == 0 ){
389 auide_hwif.white_list = ide_in_drive_list(drive->id, 387 auide_hwif.white_list = ide_in_drive_list(drive->id,
@@ -394,7 +392,6 @@ static int auide_dma_check(ide_drive_t *drive)
394 auide_ddma_init(&auide_hwif); 392 auide_ddma_init(&auide_hwif);
395 dbdma_init_done = 1; 393 dbdma_init_done = 1;
396 } 394 }
397#endif
398 395
399 /* Is the drive in our DMA black list? */ 396 /* Is the drive in our DMA black list? */
400 397
@@ -409,8 +406,6 @@ static int auide_dma_check(ide_drive_t *drive)
409 else 406 else
410 drive->using_dma = 1; 407 drive->using_dma = 1;
411 408
412 speed = ide_find_best_mode(drive, XFER_PIO | XFER_MWDMA);
413
414 if (drive->autodma && (speed & XFER_MODE) != XFER_PIO) 409 if (drive->autodma && (speed & XFER_MODE) != XFER_PIO)
415 return 0; 410 return 0;
416 411
@@ -456,10 +451,9 @@ static void auide_dma_off_quietly(ide_drive_t *drive)
456 drive->using_dma = 0; 451 drive->using_dma = 0;
457} 452}
458 453
459static int auide_dma_lostirq(ide_drive_t *drive) 454static void auide_dma_lost_irq(ide_drive_t *drive)
460{ 455{
461 printk(KERN_ERR "%s: IRQ lost\n", drive->name); 456 printk(KERN_ERR "%s: IRQ lost\n", drive->name);
462 return 0;
463} 457}
464 458
465static void auide_ddma_tx_callback(int irq, void *param) 459static void auide_ddma_tx_callback(int irq, void *param)
@@ -489,16 +483,16 @@ static void auide_init_dbdma_dev(dbdev_tab_t *dev, u32 dev_id, u32 tsize, u32 de
489 483
490#if defined(CONFIG_BLK_DEV_IDE_AU1XXX_MDMA2_DBDMA) 484#if defined(CONFIG_BLK_DEV_IDE_AU1XXX_MDMA2_DBDMA)
491 485
492static int auide_dma_timeout(ide_drive_t *drive) 486static void auide_dma_timeout(ide_drive_t *drive)
493{ 487{
494// printk("%s\n", __FUNCTION__); 488 ide_hwif_t *hwif = HWIF(drive);
495 489
496 printk(KERN_ERR "%s: DMA timeout occurred: ", drive->name); 490 printk(KERN_ERR "%s: DMA timeout occurred: ", drive->name);
497 491
498 if (HWIF(drive)->ide_dma_test_irq(drive)) 492 if (hwif->ide_dma_test_irq(drive))
499 return 0; 493 return;
500 494
501 return HWIF(drive)->ide_dma_end(drive); 495 hwif->ide_dma_end(drive);
502} 496}
503 497
504 498
@@ -721,7 +715,7 @@ static int au_ide_probe(struct device *dev)
721 715
722#ifdef CONFIG_BLK_DEV_IDE_AU1XXX_MDMA2_DBDMA 716#ifdef CONFIG_BLK_DEV_IDE_AU1XXX_MDMA2_DBDMA
723 hwif->dma_off_quietly = &auide_dma_off_quietly; 717 hwif->dma_off_quietly = &auide_dma_off_quietly;
724 hwif->ide_dma_timeout = &auide_dma_timeout; 718 hwif->dma_timeout = &auide_dma_timeout;
725 719
726 hwif->ide_dma_check = &auide_dma_check; 720 hwif->ide_dma_check = &auide_dma_check;
727 hwif->dma_exec_cmd = &auide_dma_exec_cmd; 721 hwif->dma_exec_cmd = &auide_dma_exec_cmd;
@@ -731,7 +725,7 @@ static int au_ide_probe(struct device *dev)
731 hwif->ide_dma_test_irq = &auide_dma_test_irq; 725 hwif->ide_dma_test_irq = &auide_dma_test_irq;
732 hwif->dma_host_off = &auide_dma_host_off; 726 hwif->dma_host_off = &auide_dma_host_off;
733 hwif->dma_host_on = &auide_dma_host_on; 727 hwif->dma_host_on = &auide_dma_host_on;
734 hwif->ide_dma_lostirq = &auide_dma_lostirq; 728 hwif->dma_lost_irq = &auide_dma_lost_irq;
735 hwif->ide_dma_on = &auide_dma_on; 729 hwif->ide_dma_on = &auide_dma_on;
736 730
737 hwif->autodma = 1; 731 hwif->autodma = 1;
diff --git a/drivers/ide/pci/aec62xx.c b/drivers/ide/pci/aec62xx.c
index b173bc66ce1e..e5d09367627e 100644
--- a/drivers/ide/pci/aec62xx.c
+++ b/drivers/ide/pci/aec62xx.c
@@ -1,5 +1,5 @@
1/* 1/*
2 * linux/drivers/ide/pci/aec62xx.c Version 0.21 Apr 21, 2007 2 * linux/drivers/ide/pci/aec62xx.c Version 0.24 May 24, 2007
3 * 3 *
4 * Copyright (C) 1999-2002 Andre Hedrick <andre@linux-ide.org> 4 * Copyright (C) 1999-2002 Andre Hedrick <andre@linux-ide.org>
5 * Copyright (C) 2007 MontaVista Software, Inc. <source@mvista.com> 5 * Copyright (C) 2007 MontaVista Software, Inc. <source@mvista.com>
@@ -140,25 +140,10 @@ static int aec6260_tune_chipset (ide_drive_t *drive, u8 xferspeed)
140 return(ide_config_drive_speed(drive, speed)); 140 return(ide_config_drive_speed(drive, speed));
141} 141}
142 142
143static int aec62xx_tune_chipset (ide_drive_t *drive, u8 speed)
144{
145 switch (HWIF(drive)->pci_dev->device) {
146 case PCI_DEVICE_ID_ARTOP_ATP865:
147 case PCI_DEVICE_ID_ARTOP_ATP865R:
148 case PCI_DEVICE_ID_ARTOP_ATP860:
149 case PCI_DEVICE_ID_ARTOP_ATP860R:
150 return ((int) aec6260_tune_chipset(drive, speed));
151 case PCI_DEVICE_ID_ARTOP_ATP850UF:
152 return ((int) aec6210_tune_chipset(drive, speed));
153 default:
154 return -1;
155 }
156}
157
158static void aec62xx_tune_drive (ide_drive_t *drive, u8 pio) 143static void aec62xx_tune_drive (ide_drive_t *drive, u8 pio)
159{ 144{
160 pio = ide_get_best_pio_mode(drive, pio, 4, NULL); 145 pio = ide_get_best_pio_mode(drive, pio, 4, NULL);
161 (void) aec62xx_tune_chipset(drive, pio + XFER_PIO_0); 146 (void) HWIF(drive)->speedproc(drive, pio + XFER_PIO_0);
162} 147}
163 148
164static int aec62xx_config_drive_xfer_rate (ide_drive_t *drive) 149static int aec62xx_config_drive_xfer_rate (ide_drive_t *drive)
@@ -172,12 +157,9 @@ static int aec62xx_config_drive_xfer_rate (ide_drive_t *drive)
172 return -1; 157 return -1;
173} 158}
174 159
175static int aec62xx_irq_timeout (ide_drive_t *drive) 160static void aec62xx_dma_lost_irq (ide_drive_t *drive)
176{ 161{
177 ide_hwif_t *hwif = HWIF(drive); 162 switch (HWIF(drive)->pci_dev->device) {
178 struct pci_dev *dev = hwif->pci_dev;
179
180 switch(dev->device) {
181 case PCI_DEVICE_ID_ARTOP_ATP860: 163 case PCI_DEVICE_ID_ARTOP_ATP860:
182 case PCI_DEVICE_ID_ARTOP_ATP860R: 164 case PCI_DEVICE_ID_ARTOP_ATP860R:
183 case PCI_DEVICE_ID_ARTOP_ATP865: 165 case PCI_DEVICE_ID_ARTOP_ATP865:
@@ -186,7 +168,6 @@ static int aec62xx_irq_timeout (ide_drive_t *drive)
186 default: 168 default:
187 break; 169 break;
188 } 170 }
189 return 0;
190} 171}
191 172
192static unsigned int __devinit init_chipset_aec62xx(struct pci_dev *dev, const char *name) 173static unsigned int __devinit init_chipset_aec62xx(struct pci_dev *dev, const char *name)
@@ -224,64 +205,46 @@ static unsigned int __devinit init_chipset_aec62xx(struct pci_dev *dev, const ch
224 205
225static void __devinit init_hwif_aec62xx(ide_hwif_t *hwif) 206static void __devinit init_hwif_aec62xx(ide_hwif_t *hwif)
226{ 207{
227 struct pci_dev *dev = hwif->pci_dev; 208 struct pci_dev *dev = hwif->pci_dev;
209 u8 reg54 = 0, mask = hwif->channel ? 0xf0 : 0x0f;
210 unsigned long flags;
228 211
229 hwif->autodma = 0;
230 hwif->tuneproc = &aec62xx_tune_drive; 212 hwif->tuneproc = &aec62xx_tune_drive;
231 hwif->speedproc = &aec62xx_tune_chipset;
232 213
233 if (dev->device == PCI_DEVICE_ID_ARTOP_ATP850UF) 214 if (dev->device == PCI_DEVICE_ID_ARTOP_ATP850UF) {
234 hwif->serialized = hwif->channel; 215 if(hwif->mate)
235 216 hwif->mate->serialized = hwif->serialized = 1;
236 if (hwif->mate) 217 hwif->speedproc = &aec6210_tune_chipset;
237 hwif->mate->serialized = hwif->serialized; 218 } else
219 hwif->speedproc = &aec6260_tune_chipset;
238 220
239 if (!hwif->dma_base) { 221 if (!hwif->dma_base) {
240 hwif->drives[0].autotune = 1; 222 hwif->drives[0].autotune = hwif->drives[1].autotune = 1;
241 hwif->drives[1].autotune = 1;
242 return; 223 return;
243 } 224 }
244 225
245 hwif->ultra_mask = hwif->cds->udma_mask; 226 hwif->ultra_mask = hwif->cds->udma_mask;
246
247 /* atp865 and atp865r */
248 if (hwif->ultra_mask == 0x3f) {
249 /* check bit 0x10 of DMA status register */
250 if (inb(pci_resource_start(dev, 4) + 2) & 0x10)
251 hwif->ultra_mask = 0x7f; /* udma0-6 */
252 }
253
254 hwif->mwdma_mask = 0x07; 227 hwif->mwdma_mask = 0x07;
255 228
256 hwif->ide_dma_check = &aec62xx_config_drive_xfer_rate; 229 hwif->ide_dma_check = &aec62xx_config_drive_xfer_rate;
257 hwif->ide_dma_lostirq = &aec62xx_irq_timeout; 230 hwif->dma_lost_irq = &aec62xx_dma_lost_irq;
258
259 if (!noautodma)
260 hwif->autodma = 1;
261 hwif->drives[0].autodma = hwif->autodma;
262 hwif->drives[1].autodma = hwif->autodma;
263}
264
265static void __devinit init_dma_aec62xx(ide_hwif_t *hwif, unsigned long dmabase)
266{
267 struct pci_dev *dev = hwif->pci_dev;
268 231
269 if (dev->device == PCI_DEVICE_ID_ARTOP_ATP850UF) { 232 if (dev->device == PCI_DEVICE_ID_ARTOP_ATP850UF) {
270 u8 reg54h = 0;
271 unsigned long flags;
272
273 spin_lock_irqsave(&ide_lock, flags); 233 spin_lock_irqsave(&ide_lock, flags);
274 pci_read_config_byte(dev, 0x54, &reg54h); 234 pci_read_config_byte (dev, 0x54, &reg54);
275 pci_write_config_byte(dev, 0x54, reg54h & ~(hwif->channel ? 0xF0 : 0x0F)); 235 pci_write_config_byte(dev, 0x54, (reg54 & ~mask));
276 spin_unlock_irqrestore(&ide_lock, flags); 236 spin_unlock_irqrestore(&ide_lock, flags);
277 } else { 237 } else if (hwif->cbl != ATA_CBL_PATA40_SHORT) {
278 u8 ata66 = 0; 238 u8 ata66 = 0, mask = hwif->channel ? 0x02 : 0x01;
239
279 pci_read_config_byte(hwif->pci_dev, 0x49, &ata66); 240 pci_read_config_byte(hwif->pci_dev, 0x49, &ata66);
280 if (!(hwif->udma_four)) 241
281 hwif->udma_four = (ata66&(hwif->channel?0x02:0x01))?0:1; 242 hwif->cbl = (ata66 & mask) ? ATA_CBL_PATA40 : ATA_CBL_PATA80;
282 } 243 }
283 244
284 ide_setup_dma(hwif, dmabase, 8); 245 if (!noautodma)
246 hwif->autodma = 1;
247 hwif->drives[0].autodma = hwif->drives[1].autodma = hwif->autodma;
285} 248}
286 249
287static int __devinit init_setup_aec62xx(struct pci_dev *dev, ide_pci_device_t *d) 250static int __devinit init_setup_aec62xx(struct pci_dev *dev, ide_pci_device_t *d)
@@ -291,16 +254,12 @@ static int __devinit init_setup_aec62xx(struct pci_dev *dev, ide_pci_device_t *d
291 254
292static int __devinit init_setup_aec6x80(struct pci_dev *dev, ide_pci_device_t *d) 255static int __devinit init_setup_aec6x80(struct pci_dev *dev, ide_pci_device_t *d)
293{ 256{
294 unsigned long bar4reg = pci_resource_start(dev, 4); 257 unsigned long dma_base = pci_resource_start(dev, 4);
295 258
296 if (inb(bar4reg+2) & 0x10) { 259 if (inb(dma_base + 2) & 0x10) {
297 strcpy(d->name, "AEC6880"); 260 d->name = (dev->device == PCI_DEVICE_ID_ARTOP_ATP865R) ?
298 if (dev->device == PCI_DEVICE_ID_ARTOP_ATP865R) 261 "AEC6880R" : "AEC6880";
299 strcpy(d->name, "AEC6880R"); 262 d->udma_mask = 0x7f; /* udma0-6 */
300 } else {
301 strcpy(d->name, "AEC6280");
302 if (dev->device == PCI_DEVICE_ID_ARTOP_ATP865R)
303 strcpy(d->name, "AEC6280R");
304 } 263 }
305 264
306 return ide_setup_pci_device(dev, d); 265 return ide_setup_pci_device(dev, d);
@@ -312,7 +271,6 @@ static ide_pci_device_t aec62xx_chipsets[] __devinitdata = {
312 .init_setup = init_setup_aec62xx, 271 .init_setup = init_setup_aec62xx,
313 .init_chipset = init_chipset_aec62xx, 272 .init_chipset = init_chipset_aec62xx,
314 .init_hwif = init_hwif_aec62xx, 273 .init_hwif = init_hwif_aec62xx,
315 .init_dma = init_dma_aec62xx,
316 .channels = 2, 274 .channels = 2,
317 .autodma = AUTODMA, 275 .autodma = AUTODMA,
318 .enablebits = {{0x4a,0x02,0x02}, {0x4a,0x04,0x04}}, 276 .enablebits = {{0x4a,0x02,0x02}, {0x4a,0x04,0x04}},
@@ -323,7 +281,6 @@ static ide_pci_device_t aec62xx_chipsets[] __devinitdata = {
323 .init_setup = init_setup_aec62xx, 281 .init_setup = init_setup_aec62xx,
324 .init_chipset = init_chipset_aec62xx, 282 .init_chipset = init_chipset_aec62xx,
325 .init_hwif = init_hwif_aec62xx, 283 .init_hwif = init_hwif_aec62xx,
326 .init_dma = init_dma_aec62xx,
327 .channels = 2, 284 .channels = 2,
328 .autodma = NOAUTODMA, 285 .autodma = NOAUTODMA,
329 .bootable = OFF_BOARD, 286 .bootable = OFF_BOARD,
@@ -333,28 +290,25 @@ static ide_pci_device_t aec62xx_chipsets[] __devinitdata = {
333 .init_setup = init_setup_aec62xx, 290 .init_setup = init_setup_aec62xx,
334 .init_chipset = init_chipset_aec62xx, 291 .init_chipset = init_chipset_aec62xx,
335 .init_hwif = init_hwif_aec62xx, 292 .init_hwif = init_hwif_aec62xx,
336 .init_dma = init_dma_aec62xx,
337 .channels = 2, 293 .channels = 2,
338 .autodma = AUTODMA, 294 .autodma = AUTODMA,
339 .enablebits = {{0x4a,0x02,0x02}, {0x4a,0x04,0x04}}, 295 .enablebits = {{0x4a,0x02,0x02}, {0x4a,0x04,0x04}},
340 .bootable = NEVER_BOARD, 296 .bootable = NEVER_BOARD,
341 .udma_mask = 0x1f, /* udma0-4 */ 297 .udma_mask = 0x1f, /* udma0-4 */
342 },{ /* 3 */ 298 },{ /* 3 */
343 .name = "AEC6X80", 299 .name = "AEC6280",
344 .init_setup = init_setup_aec6x80, 300 .init_setup = init_setup_aec6x80,
345 .init_chipset = init_chipset_aec62xx, 301 .init_chipset = init_chipset_aec62xx,
346 .init_hwif = init_hwif_aec62xx, 302 .init_hwif = init_hwif_aec62xx,
347 .init_dma = init_dma_aec62xx,
348 .channels = 2, 303 .channels = 2,
349 .autodma = AUTODMA, 304 .autodma = AUTODMA,
350 .bootable = OFF_BOARD, 305 .bootable = OFF_BOARD,
351 .udma_mask = 0x3f, /* udma0-5 */ 306 .udma_mask = 0x3f, /* udma0-5 */
352 },{ /* 4 */ 307 },{ /* 4 */
353 .name = "AEC6X80R", 308 .name = "AEC6280R",
354 .init_setup = init_setup_aec6x80, 309 .init_setup = init_setup_aec6x80,
355 .init_chipset = init_chipset_aec62xx, 310 .init_chipset = init_chipset_aec62xx,
356 .init_hwif = init_hwif_aec62xx, 311 .init_hwif = init_hwif_aec62xx,
357 .init_dma = init_dma_aec62xx,
358 .channels = 2, 312 .channels = 2,
359 .autodma = AUTODMA, 313 .autodma = AUTODMA,
360 .enablebits = {{0x4a,0x02,0x02}, {0x4a,0x04,0x04}}, 314 .enablebits = {{0x4a,0x02,0x02}, {0x4a,0x04,0x04}},
@@ -370,13 +324,16 @@ static ide_pci_device_t aec62xx_chipsets[] __devinitdata = {
370 * 324 *
371 * Called when the PCI registration layer (or the IDE initialization) 325 * Called when the PCI registration layer (or the IDE initialization)
372 * finds a device matching our IDE device tables. 326 * finds a device matching our IDE device tables.
327 *
328 * NOTE: since we're going to modify the 'name' field for AEC-6[26]80[R]
329 * chips, pass a local copy of 'struct pci_device_id' down the call chain.
373 */ 330 */
374 331
375static int __devinit aec62xx_init_one(struct pci_dev *dev, const struct pci_device_id *id) 332static int __devinit aec62xx_init_one(struct pci_dev *dev, const struct pci_device_id *id)
376{ 333{
377 ide_pci_device_t *d = &aec62xx_chipsets[id->driver_data]; 334 ide_pci_device_t d = aec62xx_chipsets[id->driver_data];
378 335
379 return d->init_setup(dev, d); 336 return d.init_setup(dev, &d);
380} 337}
381 338
382static struct pci_device_id aec62xx_pci_tbl[] = { 339static struct pci_device_id aec62xx_pci_tbl[] = {
diff --git a/drivers/ide/pci/alim15x3.c b/drivers/ide/pci/alim15x3.c
index 27525ec2e19a..8a6b27b3bcc3 100644
--- a/drivers/ide/pci/alim15x3.c
+++ b/drivers/ide/pci/alim15x3.c
@@ -1,5 +1,5 @@
1/* 1/*
2 * linux/drivers/ide/pci/alim15x3.c Version 0.21 2007/02/03 2 * linux/drivers/ide/pci/alim15x3.c Version 0.25 Jun 9 2007
3 * 3 *
4 * Copyright (C) 1998-2000 Michel Aubry, Maintainer 4 * Copyright (C) 1998-2000 Michel Aubry, Maintainer
5 * Copyright (C) 1998-2000 Andrzej Krzysztofowicz, Maintainer 5 * Copyright (C) 1998-2000 Andrzej Krzysztofowicz, Maintainer
@@ -10,6 +10,7 @@
10 * Copyright (C) 2002 Alan Cox <alan@redhat.com> 10 * Copyright (C) 2002 Alan Cox <alan@redhat.com>
11 * ALi (now ULi M5228) support by Clear Zhang <Clear.Zhang@ali.com.tw> 11 * ALi (now ULi M5228) support by Clear Zhang <Clear.Zhang@ali.com.tw>
12 * Copyright (C) 2007 MontaVista Software, Inc. <source@mvista.com> 12 * Copyright (C) 2007 MontaVista Software, Inc. <source@mvista.com>
13 * Copyright (C) 2007 Bartlomiej Zolnierkiewicz <bzolnier@gmail.com>
13 * 14 *
14 * (U)DMA capable version of ali 1533/1543(C), 1535(D) 15 * (U)DMA capable version of ali 1533/1543(C), 1535(D)
15 * 16 *
@@ -36,6 +37,7 @@
36#include <linux/hdreg.h> 37#include <linux/hdreg.h>
37#include <linux/ide.h> 38#include <linux/ide.h>
38#include <linux/init.h> 39#include <linux/init.h>
40#include <linux/dmi.h>
39 41
40#include <asm/io.h> 42#include <asm/io.h>
41 43
@@ -583,6 +585,35 @@ out:
583 return 0; 585 return 0;
584} 586}
585 587
588/*
589 * Cable special cases
590 */
591
592static struct dmi_system_id cable_dmi_table[] = {
593 {
594 .ident = "HP Pavilion N5430",
595 .matches = {
596 DMI_MATCH(DMI_BOARD_VENDOR, "Hewlett-Packard"),
597 DMI_MATCH(DMI_BOARD_NAME, "OmniBook N32N-736"),
598 },
599 },
600 { }
601};
602
603static int ali_cable_override(struct pci_dev *pdev)
604{
605 /* Fujitsu P2000 */
606 if (pdev->subsystem_vendor == 0x10CF &&
607 pdev->subsystem_device == 0x10AF)
608 return 1;
609
610 /* Systems by DMI */
611 if (dmi_check_system(cable_dmi_table))
612 return 1;
613
614 return 0;
615}
616
586/** 617/**
587 * ata66_ali15x3 - check for UDMA 66 support 618 * ata66_ali15x3 - check for UDMA 66 support
588 * @hwif: IDE interface 619 * @hwif: IDE interface
@@ -594,37 +625,31 @@ out:
594 * FIXME: frobs bits that are not defined on newer ALi devicea 625 * FIXME: frobs bits that are not defined on newer ALi devicea
595 */ 626 */
596 627
597static unsigned int __devinit ata66_ali15x3 (ide_hwif_t *hwif) 628static u8 __devinit ata66_ali15x3(ide_hwif_t *hwif)
598{ 629{
599 struct pci_dev *dev = hwif->pci_dev; 630 struct pci_dev *dev = hwif->pci_dev;
600 unsigned int ata66 = 0;
601 u8 cable_80_pin[2] = { 0, 0 };
602
603 unsigned long flags; 631 unsigned long flags;
604 u8 tmpbyte; 632 u8 cbl = ATA_CBL_PATA40, tmpbyte;
605 633
606 local_irq_save(flags); 634 local_irq_save(flags);
607 635
608 if (m5229_revision >= 0xC2) { 636 if (m5229_revision >= 0xC2) {
609 /* 637 /*
610 * Ultra66 cable detection (from Host View) 638 * m5229 80-pin cable detection (from Host View)
611 * m5229, 0x4a, bit0: primary, bit1: secondary 80 pin 639 *
612 */ 640 * 0x4a bit0 is 0 => primary channel has 80-pin
613 pci_read_config_byte(dev, 0x4a, &tmpbyte); 641 * 0x4a bit1 is 0 => secondary channel has 80-pin
614 /* 642 *
615 * 0x4a, bit0 is 0 => primary channel 643 * Certain laptops use short but suitable cables
616 * has 80-pin (from host view) 644 * and don't implement the detect logic.
617 */
618 if (!(tmpbyte & 0x01)) cable_80_pin[0] = 1;
619 /*
620 * 0x4a, bit1 is 0 => secondary channel
621 * has 80-pin (from host view)
622 */
623 if (!(tmpbyte & 0x02)) cable_80_pin[1] = 1;
624 /*
625 * Allow ata66 if cable of current channel has 80 pins
626 */ 645 */
627 ata66 = (hwif->channel)?cable_80_pin[1]:cable_80_pin[0]; 646 if (ali_cable_override(dev))
647 cbl = ATA_CBL_PATA40_SHORT;
648 else {
649 pci_read_config_byte(dev, 0x4a, &tmpbyte);
650 if ((tmpbyte & (1 << hwif->channel)) == 0)
651 cbl = ATA_CBL_PATA80;
652 }
628 } else { 653 } else {
629 /* 654 /*
630 * check m1533, 0x5e, bit 1~4 == 1001 => & 00011110 = 00010010 655 * check m1533, 0x5e, bit 1~4 == 1001 => & 00011110 = 00010010
@@ -657,7 +682,7 @@ static unsigned int __devinit ata66_ali15x3 (ide_hwif_t *hwif)
657 682
658 local_irq_restore(flags); 683 local_irq_restore(flags);
659 684
660 return(ata66); 685 return cbl;
661} 686}
662 687
663/** 688/**
@@ -708,8 +733,9 @@ static void __devinit init_hwif_common_ali15x3 (ide_hwif_t *hwif)
708 hwif->dma_setup = &ali15x3_dma_setup; 733 hwif->dma_setup = &ali15x3_dma_setup;
709 if (!noautodma) 734 if (!noautodma)
710 hwif->autodma = 1; 735 hwif->autodma = 1;
711 if (!(hwif->udma_four)) 736
712 hwif->udma_four = ata66_ali15x3(hwif); 737 if (hwif->cbl != ATA_CBL_PATA40_SHORT)
738 hwif->cbl = ata66_ali15x3(hwif);
713 } 739 }
714 hwif->drives[0].autodma = hwif->autodma; 740 hwif->drives[0].autodma = hwif->autodma;
715 hwif->drives[1].autodma = hwif->autodma; 741 hwif->drives[1].autodma = hwif->autodma;
diff --git a/drivers/ide/pci/amd74xx.c b/drivers/ide/pci/amd74xx.c
index a2be65fcf89c..84ed30cdb324 100644
--- a/drivers/ide/pci/amd74xx.c
+++ b/drivers/ide/pci/amd74xx.c
@@ -1,10 +1,11 @@
1/* 1/*
2 * Version 2.16 2 * Version 2.20
3 * 3 *
4 * AMD 755/756/766/8111 and nVidia nForce/2/2s/3/3s/CK804/MCP04 4 * AMD 755/756/766/8111 and nVidia nForce/2/2s/3/3s/CK804/MCP04
5 * IDE driver for Linux. 5 * IDE driver for Linux.
6 * 6 *
7 * Copyright (c) 2000-2002 Vojtech Pavlik 7 * Copyright (c) 2000-2002 Vojtech Pavlik
8 * Copyright (c) 2007 Bartlomiej Zolnierkiewicz
8 * 9 *
9 * Based on the work of: 10 * Based on the work of:
10 * Andre Hedrick 11 * Andre Hedrick
@@ -37,11 +38,6 @@
37#define AMD_ADDRESS_SETUP (0x0c + amd_config->base) 38#define AMD_ADDRESS_SETUP (0x0c + amd_config->base)
38#define AMD_UDMA_TIMING (0x10 + amd_config->base) 39#define AMD_UDMA_TIMING (0x10 + amd_config->base)
39 40
40#define AMD_UDMA 0x07
41#define AMD_UDMA_33 0x01
42#define AMD_UDMA_66 0x02
43#define AMD_UDMA_100 0x03
44#define AMD_UDMA_133 0x04
45#define AMD_CHECK_SWDMA 0x08 41#define AMD_CHECK_SWDMA 0x08
46#define AMD_BAD_SWDMA 0x10 42#define AMD_BAD_SWDMA 0x10
47#define AMD_BAD_FIFO 0x20 43#define AMD_BAD_FIFO 0x20
@@ -53,32 +49,33 @@
53 49
54static struct amd_ide_chip { 50static struct amd_ide_chip {
55 unsigned short id; 51 unsigned short id;
56 unsigned long base; 52 u8 base;
57 unsigned char flags; 53 u8 udma_mask;
54 u8 flags;
58} amd_ide_chips[] = { 55} amd_ide_chips[] = {
59 { PCI_DEVICE_ID_AMD_COBRA_7401, 0x40, AMD_UDMA_33 | AMD_BAD_SWDMA }, 56 { PCI_DEVICE_ID_AMD_COBRA_7401, 0x40, ATA_UDMA2, AMD_BAD_SWDMA },
60 { PCI_DEVICE_ID_AMD_VIPER_7409, 0x40, AMD_UDMA_66 | AMD_CHECK_SWDMA }, 57 { PCI_DEVICE_ID_AMD_VIPER_7409, 0x40, ATA_UDMA4, AMD_CHECK_SWDMA },
61 { PCI_DEVICE_ID_AMD_VIPER_7411, 0x40, AMD_UDMA_100 | AMD_BAD_FIFO }, 58 { PCI_DEVICE_ID_AMD_VIPER_7411, 0x40, ATA_UDMA5, AMD_BAD_FIFO },
62 { PCI_DEVICE_ID_AMD_OPUS_7441, 0x40, AMD_UDMA_100 }, 59 { PCI_DEVICE_ID_AMD_OPUS_7441, 0x40, ATA_UDMA5, },
63 { PCI_DEVICE_ID_AMD_8111_IDE, 0x40, AMD_UDMA_133 | AMD_CHECK_SERENADE }, 60 { PCI_DEVICE_ID_AMD_8111_IDE, 0x40, ATA_UDMA6, AMD_CHECK_SERENADE },
64 { PCI_DEVICE_ID_NVIDIA_NFORCE_IDE, 0x50, AMD_UDMA_100 }, 61 { PCI_DEVICE_ID_NVIDIA_NFORCE_IDE, 0x50, ATA_UDMA5, },
65 { PCI_DEVICE_ID_NVIDIA_NFORCE2_IDE, 0x50, AMD_UDMA_133 }, 62 { PCI_DEVICE_ID_NVIDIA_NFORCE2_IDE, 0x50, ATA_UDMA6, },
66 { PCI_DEVICE_ID_NVIDIA_NFORCE2S_IDE, 0x50, AMD_UDMA_133 }, 63 { PCI_DEVICE_ID_NVIDIA_NFORCE2S_IDE, 0x50, ATA_UDMA6, },
67 { PCI_DEVICE_ID_NVIDIA_NFORCE2S_SATA, 0x50, AMD_UDMA_133 }, 64 { PCI_DEVICE_ID_NVIDIA_NFORCE2S_SATA, 0x50, ATA_UDMA6, },
68 { PCI_DEVICE_ID_NVIDIA_NFORCE3_IDE, 0x50, AMD_UDMA_133 }, 65 { PCI_DEVICE_ID_NVIDIA_NFORCE3_IDE, 0x50, ATA_UDMA6, },
69 { PCI_DEVICE_ID_NVIDIA_NFORCE3S_IDE, 0x50, AMD_UDMA_133 }, 66 { PCI_DEVICE_ID_NVIDIA_NFORCE3S_IDE, 0x50, ATA_UDMA6, },
70 { PCI_DEVICE_ID_NVIDIA_NFORCE3S_SATA, 0x50, AMD_UDMA_133 }, 67 { PCI_DEVICE_ID_NVIDIA_NFORCE3S_SATA, 0x50, ATA_UDMA6, },
71 { PCI_DEVICE_ID_NVIDIA_NFORCE3S_SATA2, 0x50, AMD_UDMA_133 }, 68 { PCI_DEVICE_ID_NVIDIA_NFORCE3S_SATA2, 0x50, ATA_UDMA6, },
72 { PCI_DEVICE_ID_NVIDIA_NFORCE_CK804_IDE, 0x50, AMD_UDMA_133 }, 69 { PCI_DEVICE_ID_NVIDIA_NFORCE_CK804_IDE, 0x50, ATA_UDMA6, },
73 { PCI_DEVICE_ID_NVIDIA_NFORCE_MCP04_IDE, 0x50, AMD_UDMA_133 }, 70 { PCI_DEVICE_ID_NVIDIA_NFORCE_MCP04_IDE, 0x50, ATA_UDMA6, },
74 { PCI_DEVICE_ID_NVIDIA_NFORCE_MCP51_IDE, 0x50, AMD_UDMA_133 }, 71 { PCI_DEVICE_ID_NVIDIA_NFORCE_MCP51_IDE, 0x50, ATA_UDMA6, },
75 { PCI_DEVICE_ID_NVIDIA_NFORCE_MCP55_IDE, 0x50, AMD_UDMA_133 }, 72 { PCI_DEVICE_ID_NVIDIA_NFORCE_MCP55_IDE, 0x50, ATA_UDMA6, },
76 { PCI_DEVICE_ID_NVIDIA_NFORCE_MCP61_IDE, 0x50, AMD_UDMA_133 }, 73 { PCI_DEVICE_ID_NVIDIA_NFORCE_MCP61_IDE, 0x50, ATA_UDMA6, },
77 { PCI_DEVICE_ID_NVIDIA_NFORCE_MCP65_IDE, 0x50, AMD_UDMA_133 }, 74 { PCI_DEVICE_ID_NVIDIA_NFORCE_MCP65_IDE, 0x50, ATA_UDMA6, },
78 { PCI_DEVICE_ID_NVIDIA_NFORCE_MCP67_IDE, 0x50, AMD_UDMA_133 }, 75 { PCI_DEVICE_ID_NVIDIA_NFORCE_MCP67_IDE, 0x50, ATA_UDMA6, },
79 { PCI_DEVICE_ID_NVIDIA_NFORCE_MCP73_IDE, 0x50, AMD_UDMA_133 }, 76 { PCI_DEVICE_ID_NVIDIA_NFORCE_MCP73_IDE, 0x50, ATA_UDMA6, },
80 { PCI_DEVICE_ID_NVIDIA_NFORCE_MCP77_IDE, 0x50, AMD_UDMA_133 }, 77 { PCI_DEVICE_ID_NVIDIA_NFORCE_MCP77_IDE, 0x50, ATA_UDMA6, },
81 { PCI_DEVICE_ID_AMD_CS5536_IDE, 0x40, AMD_UDMA_100 }, 78 { PCI_DEVICE_ID_AMD_CS5536_IDE, 0x40, ATA_UDMA5, },
82 { 0 } 79 { 0 }
83}; 80};
84 81
@@ -87,7 +84,7 @@ static ide_pci_device_t *amd_chipset;
87static unsigned int amd_80w; 84static unsigned int amd_80w;
88static unsigned int amd_clock; 85static unsigned int amd_clock;
89 86
90static char *amd_dma[] = { "MWDMA16", "UDMA33", "UDMA66", "UDMA100", "UDMA133" }; 87static char *amd_dma[] = { "16", "25", "33", "44", "66", "100", "133" };
91static unsigned char amd_cyc2udma[] = { 6, 6, 5, 4, 0, 1, 1, 2, 2, 3, 3, 3, 3, 3, 3, 7 }; 88static unsigned char amd_cyc2udma[] = { 6, 6, 5, 4, 0, 1, 1, 2, 2, 3, 3, 3, 3, 3, 3, 7 };
92 89
93/* 90/*
@@ -128,7 +125,7 @@ static int amd74xx_get_info(char *buffer, char **addr, off_t offset, int count)
128 125
129 pci_read_config_byte(dev, PCI_REVISION_ID, &t); 126 pci_read_config_byte(dev, PCI_REVISION_ID, &t);
130 amd_print("Revision: IDE %#x", t); 127 amd_print("Revision: IDE %#x", t);
131 amd_print("Highest DMA rate: %s", amd_dma[amd_config->flags & AMD_UDMA]); 128 amd_print("Highest DMA rate: UDMA%s", amd_dma[fls(amd_config->udma_mask) - 1]);
132 129
133 amd_print("BM-DMA base: %#lx", amd_base); 130 amd_print("BM-DMA base: %#lx", amd_base);
134 amd_print("PCI clock: %d.%dMHz", amd_clock / 1000, amd_clock / 100 % 10); 131 amd_print("PCI clock: %d.%dMHz", amd_clock / 1000, amd_clock / 100 % 10);
@@ -221,12 +218,12 @@ static void amd_set_speed(struct pci_dev *dev, unsigned char dn, struct ide_timi
221 pci_write_config_byte(dev, AMD_DRIVE_TIMING + (3 - dn), 218 pci_write_config_byte(dev, AMD_DRIVE_TIMING + (3 - dn),
222 ((FIT(timing->active, 1, 16) - 1) << 4) | (FIT(timing->recover, 1, 16) - 1)); 219 ((FIT(timing->active, 1, 16) - 1) << 4) | (FIT(timing->recover, 1, 16) - 1));
223 220
224 switch (amd_config->flags & AMD_UDMA) { 221 switch (amd_config->udma_mask) {
225 case AMD_UDMA_33: t = timing->udma ? (0xc0 | (FIT(timing->udma, 2, 5) - 2)) : 0x03; break; 222 case ATA_UDMA2: t = timing->udma ? (0xc0 | (FIT(timing->udma, 2, 5) - 2)) : 0x03; break;
226 case AMD_UDMA_66: t = timing->udma ? (0xc0 | amd_cyc2udma[FIT(timing->udma, 2, 10)]) : 0x03; break; 223 case ATA_UDMA4: t = timing->udma ? (0xc0 | amd_cyc2udma[FIT(timing->udma, 2, 10)]) : 0x03; break;
227 case AMD_UDMA_100: t = timing->udma ? (0xc0 | amd_cyc2udma[FIT(timing->udma, 1, 10)]) : 0x03; break; 224 case ATA_UDMA5: t = timing->udma ? (0xc0 | amd_cyc2udma[FIT(timing->udma, 1, 10)]) : 0x03; break;
228 case AMD_UDMA_133: t = timing->udma ? (0xc0 | amd_cyc2udma[FIT(timing->udma, 1, 15)]) : 0x03; break; 225 case ATA_UDMA6: t = timing->udma ? (0xc0 | amd_cyc2udma[FIT(timing->udma, 1, 15)]) : 0x03; break;
229 default: return; 226 default: return;
230 } 227 }
231 228
232 pci_write_config_byte(dev, AMD_UDMA_TIMING + (3 - dn), t); 229 pci_write_config_byte(dev, AMD_UDMA_TIMING + (3 - dn), t);
@@ -248,7 +245,7 @@ static int amd_set_drive(ide_drive_t *drive, u8 speed)
248 ide_config_drive_speed(drive, speed); 245 ide_config_drive_speed(drive, speed);
249 246
250 T = 1000000000 / amd_clock; 247 T = 1000000000 / amd_clock;
251 UT = T / min_t(int, max_t(int, amd_config->flags & AMD_UDMA, 1), 2); 248 UT = (amd_config->udma_mask == ATA_UDMA2) ? T : (T / 2);
252 249
253 ide_timing_compute(drive, speed, &t, T, UT); 250 ide_timing_compute(drive, speed, &t, T, UT);
254 251
@@ -277,29 +274,19 @@ static int amd_set_drive(ide_drive_t *drive, u8 speed)
277static void amd74xx_tune_drive(ide_drive_t *drive, u8 pio) 274static void amd74xx_tune_drive(ide_drive_t *drive, u8 pio)
278{ 275{
279 if (pio == 255) { 276 if (pio == 255) {
280 amd_set_drive(drive, ide_find_best_mode(drive, XFER_PIO | XFER_EPIO)); 277 amd_set_drive(drive, ide_find_best_pio_mode(drive));
281 return; 278 return;
282 } 279 }
283 280
284 amd_set_drive(drive, XFER_PIO_0 + min_t(byte, pio, 5)); 281 amd_set_drive(drive, XFER_PIO_0 + min_t(byte, pio, 5));
285} 282}
286 283
287/*
288 * amd74xx_dmaproc() is a callback from upper layers that can do
289 * a lot, but we use it for DMA/PIO tuning only, delegating everything
290 * else to the default ide_dmaproc().
291 */
292
293static int amd74xx_ide_dma_check(ide_drive_t *drive) 284static int amd74xx_ide_dma_check(ide_drive_t *drive)
294{ 285{
295 int w80 = HWIF(drive)->udma_four; 286 u8 speed = ide_max_dma_mode(drive);
296 287
297 u8 speed = ide_find_best_mode(drive, 288 if (speed == 0)
298 XFER_PIO | XFER_EPIO | XFER_MWDMA | XFER_UDMA | 289 speed = ide_find_best_pio_mode(drive);
299 ((amd_config->flags & AMD_BAD_SWDMA) ? 0 : XFER_SWDMA) |
300 (w80 && (amd_config->flags & AMD_UDMA) >= AMD_UDMA_66 ? XFER_UDMA_66 : 0) |
301 (w80 && (amd_config->flags & AMD_UDMA) >= AMD_UDMA_100 ? XFER_UDMA_100 : 0) |
302 (w80 && (amd_config->flags & AMD_UDMA) >= AMD_UDMA_133 ? XFER_UDMA_133 : 0));
303 290
304 amd_set_drive(drive, speed); 291 amd_set_drive(drive, speed);
305 292
@@ -334,10 +321,10 @@ static unsigned int __devinit init_chipset_amd74xx(struct pci_dev *dev, const ch
334 * Check 80-wire cable presence. 321 * Check 80-wire cable presence.
335 */ 322 */
336 323
337 switch (amd_config->flags & AMD_UDMA) { 324 switch (amd_config->udma_mask) {
338 325
339 case AMD_UDMA_133: 326 case ATA_UDMA6:
340 case AMD_UDMA_100: 327 case ATA_UDMA5:
341 pci_read_config_byte(dev, AMD_CABLE_DETECT, &t); 328 pci_read_config_byte(dev, AMD_CABLE_DETECT, &t);
342 pci_read_config_dword(dev, AMD_UDMA_TIMING, &u); 329 pci_read_config_dword(dev, AMD_UDMA_TIMING, &u);
343 amd_80w = ((t & 0x3) ? 1 : 0) | ((t & 0xc) ? 2 : 0); 330 amd_80w = ((t & 0x3) ? 1 : 0) | ((t & 0xc) ? 2 : 0);
@@ -349,7 +336,7 @@ static unsigned int __devinit init_chipset_amd74xx(struct pci_dev *dev, const ch
349 } 336 }
350 break; 337 break;
351 338
352 case AMD_UDMA_66: 339 case ATA_UDMA4:
353 /* no host side cable detection */ 340 /* no host side cable detection */
354 amd_80w = 0x03; 341 amd_80w = 0x03;
355 break; 342 break;
@@ -370,7 +357,7 @@ static unsigned int __devinit init_chipset_amd74xx(struct pci_dev *dev, const ch
370 if ((amd_config->flags & AMD_CHECK_SERENADE) && 357 if ((amd_config->flags & AMD_CHECK_SERENADE) &&
371 dev->subsystem_vendor == PCI_VENDOR_ID_AMD && 358 dev->subsystem_vendor == PCI_VENDOR_ID_AMD &&
372 dev->subsystem_device == PCI_DEVICE_ID_AMD_SERENADE) 359 dev->subsystem_device == PCI_DEVICE_ID_AMD_SERENADE)
373 amd_config->flags = AMD_UDMA_100; 360 amd_config->udma_mask = ATA_UDMA5;
374 361
375/* 362/*
376 * Determine the system bus clock. 363 * Determine the system bus clock.
@@ -395,8 +382,9 @@ static unsigned int __devinit init_chipset_amd74xx(struct pci_dev *dev, const ch
395 */ 382 */
396 383
397 pci_read_config_byte(dev, PCI_REVISION_ID, &t); 384 pci_read_config_byte(dev, PCI_REVISION_ID, &t);
398 printk(KERN_INFO "%s: %s (rev %02x) %s controller\n", 385 printk(KERN_INFO "%s: %s (rev %02x) UDMA%s controller\n",
399 amd_chipset->name, pci_name(dev), t, amd_dma[amd_config->flags & AMD_UDMA]); 386 amd_chipset->name, pci_name(dev), t,
387 amd_dma[fls(amd_config->udma_mask) - 1]);
400 388
401/* 389/*
402 * Register /proc/ide/amd74xx entry 390 * Register /proc/ide/amd74xx entry
@@ -437,12 +425,19 @@ static void __devinit init_hwif_amd74xx(ide_hwif_t *hwif)
437 return; 425 return;
438 426
439 hwif->atapi_dma = 1; 427 hwif->atapi_dma = 1;
440 hwif->ultra_mask = 0x7f;
441 hwif->mwdma_mask = 0x07;
442 hwif->swdma_mask = 0x07;
443 428
444 if (!hwif->udma_four) 429 hwif->ultra_mask = amd_config->udma_mask;
445 hwif->udma_four = (amd_80w >> hwif->channel) & 1; 430 hwif->mwdma_mask = 0x07;
431 if ((amd_config->flags & AMD_BAD_SWDMA) == 0)
432 hwif->swdma_mask = 0x07;
433
434 if (hwif->cbl != ATA_CBL_PATA40_SHORT) {
435 if ((amd_80w >> hwif->channel) & 1)
436 hwif->cbl = ATA_CBL_PATA80;
437 else
438 hwif->cbl = ATA_CBL_PATA40;
439 }
440
446 hwif->ide_dma_check = &amd74xx_ide_dma_check; 441 hwif->ide_dma_check = &amd74xx_ide_dma_check;
447 if (!noautodma) 442 if (!noautodma)
448 hwif->autodma = 1; 443 hwif->autodma = 1;
diff --git a/drivers/ide/pci/atiixp.c b/drivers/ide/pci/atiixp.c
index 8ab33faf6f76..2761510309b3 100644
--- a/drivers/ide/pci/atiixp.c
+++ b/drivers/ide/pci/atiixp.c
@@ -264,10 +264,11 @@ static void __devinit init_hwif_atiixp(ide_hwif_t *hwif)
264 hwif->swdma_mask = 0x04; 264 hwif->swdma_mask = 0x04;
265 265
266 pci_read_config_byte(pdev, ATIIXP_IDE_UDMA_MODE + ch, &udma_mode); 266 pci_read_config_byte(pdev, ATIIXP_IDE_UDMA_MODE + ch, &udma_mode);
267
267 if ((udma_mode & 0x07) >= 0x04 || (udma_mode & 0x70) >= 0x40) 268 if ((udma_mode & 0x07) >= 0x04 || (udma_mode & 0x70) >= 0x40)
268 hwif->udma_four = 1; 269 hwif->cbl = ATA_CBL_PATA80;
269 else 270 else
270 hwif->udma_four = 0; 271 hwif->cbl = ATA_CBL_PATA40;
271 272
272 hwif->dma_host_on = &atiixp_dma_host_on; 273 hwif->dma_host_on = &atiixp_dma_host_on;
273 hwif->dma_host_off = &atiixp_dma_host_off; 274 hwif->dma_host_off = &atiixp_dma_host_off;
diff --git a/drivers/ide/pci/cmd64x.c b/drivers/ide/pci/cmd64x.c
index 7c57dc696f52..8631b6c8aa15 100644
--- a/drivers/ide/pci/cmd64x.c
+++ b/drivers/ide/pci/cmd64x.c
@@ -1,5 +1,5 @@
1/* 1/*
2 * linux/drivers/ide/pci/cmd64x.c Version 1.47 Mar 19, 2007 2 * linux/drivers/ide/pci/cmd64x.c Version 1.50 May 10, 2007
3 * 3 *
4 * cmd64x.c: Enable interrupts at initialization time on Ultra/PCI machines. 4 * cmd64x.c: Enable interrupts at initialization time on Ultra/PCI machines.
5 * Due to massive hardware bugs, UltraDMA is only supported 5 * Due to massive hardware bugs, UltraDMA is only supported
@@ -52,9 +52,6 @@
52#define ARTTIM23_DIS_RA2 0x04 52#define ARTTIM23_DIS_RA2 0x04
53#define ARTTIM23_DIS_RA3 0x08 53#define ARTTIM23_DIS_RA3 0x08
54#define ARTTIM23_INTR_CH1 0x10 54#define ARTTIM23_INTR_CH1 0x10
55#define ARTTIM2 0x57
56#define ARTTIM3 0x57
57#define DRWTIM23 0x58
58#define DRWTIM2 0x58 55#define DRWTIM2 0x58
59#define BRST 0x59 56#define BRST 0x59
60#define DRWTIM3 0x5b 57#define DRWTIM3 0x5b
@@ -469,71 +466,43 @@ static int cmd646_1_ide_dma_end (ide_drive_t *drive)
469 466
470static unsigned int __devinit init_chipset_cmd64x(struct pci_dev *dev, const char *name) 467static unsigned int __devinit init_chipset_cmd64x(struct pci_dev *dev, const char *name)
471{ 468{
472 u32 class_rev = 0;
473 u8 mrdmode = 0; 469 u8 mrdmode = 0;
474 470
475 pci_read_config_dword(dev, PCI_CLASS_REVISION, &class_rev); 471 if (dev->device == PCI_DEVICE_ID_CMD_646) {
476 class_rev &= 0xff; 472 u8 rev = 0;
477 473
478 switch(dev->device) { 474 pci_read_config_byte(dev, PCI_REVISION_ID, &rev);
479 case PCI_DEVICE_ID_CMD_643: 475
480 break; 476 switch (rev) {
481 case PCI_DEVICE_ID_CMD_646: 477 case 0x07:
482 printk(KERN_INFO "%s: chipset revision 0x%02X, ", name, class_rev); 478 case 0x05:
483 switch(class_rev) { 479 printk("%s: UltraDMA capable", name);
484 case 0x07:
485 case 0x05:
486 printk("UltraDMA Capable");
487 break;
488 case 0x03:
489 printk("MultiWord DMA Force Limited");
490 break;
491 case 0x01:
492 default:
493 printk("MultiWord DMA Limited, IRQ workaround enabled");
494 break;
495 }
496 printk("\n");
497 break;
498 case PCI_DEVICE_ID_CMD_648:
499 case PCI_DEVICE_ID_CMD_649:
500 break; 480 break;
481 case 0x03:
501 default: 482 default:
483 printk("%s: MultiWord DMA force limited", name);
484 break;
485 case 0x01:
486 printk("%s: MultiWord DMA limited, "
487 "IRQ workaround enabled\n", name);
502 break; 488 break;
489 }
503 } 490 }
504 491
505 /* Set a good latency timer and cache line size value. */ 492 /* Set a good latency timer and cache line size value. */
506 (void) pci_write_config_byte(dev, PCI_LATENCY_TIMER, 64); 493 (void) pci_write_config_byte(dev, PCI_LATENCY_TIMER, 64);
507 /* FIXME: pci_set_master() to ensure a good latency timer value */ 494 /* FIXME: pci_set_master() to ensure a good latency timer value */
508 495
509 /* Setup interrupts. */ 496 /*
510 (void) pci_read_config_byte(dev, MRDMODE, &mrdmode); 497 * Enable interrupts, select MEMORY READ LINE for reads.
511 mrdmode &= ~(0x30); 498 *
512 (void) pci_write_config_byte(dev, MRDMODE, mrdmode); 499 * NOTE: although not mentioned in the PCI0646U specs,
513 500 * bits 0-1 are write only and won't be read back as
514 /* Use MEMORY READ LINE for reads. 501 * set or not -- PCI0646U2 specs clarify this point.
515 * NOTE: Although not mentioned in the PCI0646U specs,
516 * these bits are write only and won't be read
517 * back as set or not. The PCI0646U2 specs clarify
518 * this point.
519 */ 502 */
520 (void) pci_write_config_byte(dev, MRDMODE, mrdmode | 0x02); 503 (void) pci_read_config_byte (dev, MRDMODE, &mrdmode);
521 504 mrdmode &= ~0x30;
522 /* Set reasonable active/recovery/address-setup values. */ 505 (void) pci_write_config_byte(dev, MRDMODE, (mrdmode | 0x02));
523 (void) pci_write_config_byte(dev, ARTTIM0, 0x40);
524 (void) pci_write_config_byte(dev, DRWTIM0, 0x3f);
525 (void) pci_write_config_byte(dev, ARTTIM1, 0x40);
526 (void) pci_write_config_byte(dev, DRWTIM1, 0x3f);
527#ifdef __i386__
528 (void) pci_write_config_byte(dev, ARTTIM23, 0x1c);
529#else
530 (void) pci_write_config_byte(dev, ARTTIM23, 0x5c);
531#endif
532 (void) pci_write_config_byte(dev, DRWTIM23, 0x3f);
533 (void) pci_write_config_byte(dev, DRWTIM3, 0x3f);
534#ifdef CONFIG_PPC
535 (void) pci_write_config_byte(dev, UDIDETCR0, 0xf0);
536#endif /* CONFIG_PPC */
537 506
538#if defined(DISPLAY_CMD64X_TIMINGS) && defined(CONFIG_IDE_PROC_FS) 507#if defined(DISPLAY_CMD64X_TIMINGS) && defined(CONFIG_IDE_PROC_FS)
539 508
@@ -548,29 +517,27 @@ static unsigned int __devinit init_chipset_cmd64x(struct pci_dev *dev, const cha
548 return 0; 517 return 0;
549} 518}
550 519
551static unsigned int __devinit ata66_cmd64x(ide_hwif_t *hwif) 520static u8 __devinit ata66_cmd64x(ide_hwif_t *hwif)
552{ 521{
553 u8 ata66 = 0, mask = (hwif->channel) ? 0x02 : 0x01; 522 struct pci_dev *dev = hwif->pci_dev;
523 u8 bmidecsr = 0, mask = hwif->channel ? 0x02 : 0x01;
554 524
555 switch(hwif->pci_dev->device) { 525 switch (dev->device) {
556 case PCI_DEVICE_ID_CMD_643: 526 case PCI_DEVICE_ID_CMD_648:
557 case PCI_DEVICE_ID_CMD_646: 527 case PCI_DEVICE_ID_CMD_649:
558 return ata66; 528 pci_read_config_byte(dev, BMIDECSR, &bmidecsr);
559 default: 529 return (bmidecsr & mask) ? ATA_CBL_PATA80 : ATA_CBL_PATA40;
560 break; 530 default:
531 return ATA_CBL_PATA40;
561 } 532 }
562 pci_read_config_byte(hwif->pci_dev, BMIDECSR, &ata66);
563 return (ata66 & mask) ? 1 : 0;
564} 533}
565 534
566static void __devinit init_hwif_cmd64x(ide_hwif_t *hwif) 535static void __devinit init_hwif_cmd64x(ide_hwif_t *hwif)
567{ 536{
568 struct pci_dev *dev = hwif->pci_dev; 537 struct pci_dev *dev = hwif->pci_dev;
569 unsigned int class_rev; 538 u8 rev = 0;
570 539
571 hwif->autodma = 0; 540 pci_read_config_byte(dev, PCI_REVISION_ID, &rev);
572 pci_read_config_dword(dev, PCI_CLASS_REVISION, &class_rev);
573 class_rev &= 0xff;
574 541
575 hwif->tuneproc = &cmd64x_tune_drive; 542 hwif->tuneproc = &cmd64x_tune_drive;
576 hwif->speedproc = &cmd64x_tune_chipset; 543 hwif->speedproc = &cmd64x_tune_chipset;
@@ -580,8 +547,8 @@ static void __devinit init_hwif_cmd64x(ide_hwif_t *hwif)
580 if (!hwif->dma_base) 547 if (!hwif->dma_base)
581 return; 548 return;
582 549
583 hwif->atapi_dma = 1; 550 hwif->atapi_dma = 1;
584 551 hwif->mwdma_mask = 0x07;
585 hwif->ultra_mask = hwif->cds->udma_mask; 552 hwif->ultra_mask = hwif->cds->udma_mask;
586 553
587 /* 554 /*
@@ -596,16 +563,15 @@ static void __devinit init_hwif_cmd64x(ide_hwif_t *hwif)
596 * 563 *
597 * So we only do UltraDMA on revision 0x05 and 0x07 chipsets. 564 * So we only do UltraDMA on revision 0x05 and 0x07 chipsets.
598 */ 565 */
599 if (dev->device == PCI_DEVICE_ID_CMD_646 && class_rev < 5) 566 if (dev->device == PCI_DEVICE_ID_CMD_646 && rev < 5)
600 hwif->ultra_mask = 0x00; 567 hwif->ultra_mask = 0x00;
601 568
602 hwif->mwdma_mask = 0x07;
603
604 hwif->ide_dma_check = &cmd64x_config_drive_for_dma; 569 hwif->ide_dma_check = &cmd64x_config_drive_for_dma;
605 if (!(hwif->udma_four))
606 hwif->udma_four = ata66_cmd64x(hwif);
607 570
608 switch(dev->device) { 571 if (hwif->cbl != ATA_CBL_PATA40_SHORT)
572 hwif->cbl = ata66_cmd64x(hwif);
573
574 switch (dev->device) {
609 case PCI_DEVICE_ID_CMD_648: 575 case PCI_DEVICE_ID_CMD_648:
610 case PCI_DEVICE_ID_CMD_649: 576 case PCI_DEVICE_ID_CMD_649:
611 alt_irq_bits: 577 alt_irq_bits:
@@ -614,10 +580,10 @@ static void __devinit init_hwif_cmd64x(ide_hwif_t *hwif)
614 break; 580 break;
615 case PCI_DEVICE_ID_CMD_646: 581 case PCI_DEVICE_ID_CMD_646:
616 hwif->chipset = ide_cmd646; 582 hwif->chipset = ide_cmd646;
617 if (class_rev == 0x01) { 583 if (rev == 0x01) {
618 hwif->ide_dma_end = &cmd646_1_ide_dma_end; 584 hwif->ide_dma_end = &cmd646_1_ide_dma_end;
619 break; 585 break;
620 } else if (class_rev >= 0x03) 586 } else if (rev >= 0x03)
621 goto alt_irq_bits; 587 goto alt_irq_bits;
622 /* fall thru */ 588 /* fall thru */
623 default: 589 default:
@@ -626,11 +592,9 @@ static void __devinit init_hwif_cmd64x(ide_hwif_t *hwif)
626 break; 592 break;
627 } 593 }
628 594
629
630 if (!noautodma) 595 if (!noautodma)
631 hwif->autodma = 1; 596 hwif->autodma = 1;
632 hwif->drives[0].autodma = hwif->autodma; 597 hwif->drives[0].autodma = hwif->drives[1].autodma = hwif->autodma;
633 hwif->drives[1].autodma = hwif->autodma;
634} 598}
635 599
636static int __devinit init_setup_cmd64x(struct pci_dev *dev, ide_pci_device_t *d) 600static int __devinit init_setup_cmd64x(struct pci_dev *dev, ide_pci_device_t *d)
diff --git a/drivers/ide/pci/cs5535.c b/drivers/ide/pci/cs5535.c
index 41925c47ef05..10f61f38243c 100644
--- a/drivers/ide/pci/cs5535.c
+++ b/drivers/ide/pci/cs5535.c
@@ -187,7 +187,8 @@ static u8 __devinit cs5535_cable_detect(struct pci_dev *dev)
187 187
188 /* if a 80 wire cable was detected */ 188 /* if a 80 wire cable was detected */
189 pci_read_config_byte(dev, CS5535_CABLE_DETECT, &bit); 189 pci_read_config_byte(dev, CS5535_CABLE_DETECT, &bit);
190 return (bit & 1); 190
191 return (bit & 1) ? ATA_CBL_PATA80 : ATA_CBL_PATA40;
191} 192}
192 193
193/**** 194/****
@@ -212,8 +213,7 @@ static void __devinit init_hwif_cs5535(ide_hwif_t *hwif)
212 hwif->ultra_mask = 0x1F; 213 hwif->ultra_mask = 0x1F;
213 hwif->mwdma_mask = 0x07; 214 hwif->mwdma_mask = 0x07;
214 215
215 216 hwif->cbl = cs5535_cable_detect(hwif->pci_dev);
216 hwif->udma_four = cs5535_cable_detect(hwif->pci_dev);
217 217
218 if (!noautodma) 218 if (!noautodma)
219 hwif->autodma = 1; 219 hwif->autodma = 1;
diff --git a/drivers/ide/pci/hpt366.c b/drivers/ide/pci/hpt366.c
index c33d0b0f11c9..4b6bae8eee82 100644
--- a/drivers/ide/pci/hpt366.c
+++ b/drivers/ide/pci/hpt366.c
@@ -1,5 +1,5 @@
1/* 1/*
2 * linux/drivers/ide/pci/hpt366.c Version 1.06 Jun 27, 2007 2 * linux/drivers/ide/pci/hpt366.c Version 1.10 Jun 29, 2007
3 * 3 *
4 * Copyright (C) 1999-2003 Andre Hedrick <andre@linux-ide.org> 4 * Copyright (C) 1999-2003 Andre Hedrick <andre@linux-ide.org>
5 * Portions Copyright (C) 2001 Sun Microsystems, Inc. 5 * Portions Copyright (C) 2001 Sun Microsystems, Inc.
@@ -77,7 +77,7 @@
77 * since they may tamper with its fields 77 * since they may tamper with its fields
78 * - prefix the driver startup messages with the real chip name 78 * - prefix the driver startup messages with the real chip name
79 * - claim the extra 240 bytes of I/O space for all chips 79 * - claim the extra 240 bytes of I/O space for all chips
80 * - optimize the rate masking/filtering and the drive list lookup code 80 * - optimize the UltraDMA filtering and the drive list lookup code
81 * - use pci_get_slot() to get to the function 1 of HPT36x/374 81 * - use pci_get_slot() to get to the function 1 of HPT36x/374
82 * - cache offset of the channel's misc. control registers (MCRs) being used 82 * - cache offset of the channel's misc. control registers (MCRs) being used
83 * throughout the driver 83 * throughout the driver
@@ -99,9 +99,9 @@
99 * stop duplicating it for each channel by storing the pointer in the pci_dev 99 * stop duplicating it for each channel by storing the pointer in the pci_dev
100 * structure: first, at the init_setup stage, point it to a static "template" 100 * structure: first, at the init_setup stage, point it to a static "template"
101 * with only the chip type and its specific base DPLL frequency, the highest 101 * with only the chip type and its specific base DPLL frequency, the highest
102 * supported DMA mode, and the chip settings table pointer filled, then, at 102 * UltraDMA mode, and the chip settings table pointer filled, then, at the
103 * the init_chipset stage, allocate per-chip instance and fill it with the 103 * init_chipset stage, allocate per-chip instance and fill it with the rest
104 * rest of the necessary information 104 * of the necessary information
105 * - get rid of the constant thresholds in the HPT37x PCI clock detection code, 105 * - get rid of the constant thresholds in the HPT37x PCI clock detection code,
106 * switch to calculating PCI clock frequency based on the chip's base DPLL 106 * switch to calculating PCI clock frequency based on the chip's base DPLL
107 * frequency 107 * frequency
@@ -112,6 +112,7 @@
112 * also fixing the interchanged 25/40 MHz PCI clock cases for HPT36x chips; 112 * also fixing the interchanged 25/40 MHz PCI clock cases for HPT36x chips;
113 * unify HPT36x/37x timing setup code and the speedproc handlers by joining 113 * unify HPT36x/37x timing setup code and the speedproc handlers by joining
114 * the register setting lists into the table indexed by the clock selected 114 * the register setting lists into the table indexed by the clock selected
115 * - set the correct hwif->ultra_mask for each individual chip
115 * Sergei Shtylyov, <sshtylyov@ru.mvista.com> or <source@mvista.com> 116 * Sergei Shtylyov, <sshtylyov@ru.mvista.com> or <source@mvista.com>
116 */ 117 */
117 118
@@ -391,7 +392,7 @@ enum ata_clock {
391 392
392struct hpt_info { 393struct hpt_info {
393 u8 chip_type; /* Chip type */ 394 u8 chip_type; /* Chip type */
394 u8 max_mode; /* Speeds allowed */ 395 u8 max_ultra; /* Max. UltraDMA mode allowed */
395 u8 dpll_clk; /* DPLL clock in MHz */ 396 u8 dpll_clk; /* DPLL clock in MHz */
396 u8 pci_clk; /* PCI clock in MHz */ 397 u8 pci_clk; /* PCI clock in MHz */
397 u32 **settings; /* Chipset settings table */ 398 u32 **settings; /* Chipset settings table */
@@ -430,77 +431,77 @@ static u32 *hpt37x_settings[NUM_ATA_CLOCKS] = {
430 431
431static struct hpt_info hpt36x __devinitdata = { 432static struct hpt_info hpt36x __devinitdata = {
432 .chip_type = HPT36x, 433 .chip_type = HPT36x,
433 .max_mode = (HPT366_ALLOW_ATA66_4 || HPT366_ALLOW_ATA66_3) ? 2 : 1, 434 .max_ultra = HPT366_ALLOW_ATA66_3 ? (HPT366_ALLOW_ATA66_4 ? 4 : 3) : 2,
434 .dpll_clk = 0, /* no DPLL */ 435 .dpll_clk = 0, /* no DPLL */
435 .settings = hpt36x_settings 436 .settings = hpt36x_settings
436}; 437};
437 438
438static struct hpt_info hpt370 __devinitdata = { 439static struct hpt_info hpt370 __devinitdata = {
439 .chip_type = HPT370, 440 .chip_type = HPT370,
440 .max_mode = HPT370_ALLOW_ATA100_5 ? 3 : 2, 441 .max_ultra = HPT370_ALLOW_ATA100_5 ? 5 : 4,
441 .dpll_clk = 48, 442 .dpll_clk = 48,
442 .settings = hpt37x_settings 443 .settings = hpt37x_settings
443}; 444};
444 445
445static struct hpt_info hpt370a __devinitdata = { 446static struct hpt_info hpt370a __devinitdata = {
446 .chip_type = HPT370A, 447 .chip_type = HPT370A,
447 .max_mode = HPT370_ALLOW_ATA100_5 ? 3 : 2, 448 .max_ultra = HPT370_ALLOW_ATA100_5 ? 5 : 4,
448 .dpll_clk = 48, 449 .dpll_clk = 48,
449 .settings = hpt37x_settings 450 .settings = hpt37x_settings
450}; 451};
451 452
452static struct hpt_info hpt374 __devinitdata = { 453static struct hpt_info hpt374 __devinitdata = {
453 .chip_type = HPT374, 454 .chip_type = HPT374,
454 .max_mode = 3, 455 .max_ultra = 5,
455 .dpll_clk = 48, 456 .dpll_clk = 48,
456 .settings = hpt37x_settings 457 .settings = hpt37x_settings
457}; 458};
458 459
459static struct hpt_info hpt372 __devinitdata = { 460static struct hpt_info hpt372 __devinitdata = {
460 .chip_type = HPT372, 461 .chip_type = HPT372,
461 .max_mode = HPT372_ALLOW_ATA133_6 ? 4 : 3, 462 .max_ultra = HPT372_ALLOW_ATA133_6 ? 6 : 5,
462 .dpll_clk = 55, 463 .dpll_clk = 55,
463 .settings = hpt37x_settings 464 .settings = hpt37x_settings
464}; 465};
465 466
466static struct hpt_info hpt372a __devinitdata = { 467static struct hpt_info hpt372a __devinitdata = {
467 .chip_type = HPT372A, 468 .chip_type = HPT372A,
468 .max_mode = HPT372_ALLOW_ATA133_6 ? 4 : 3, 469 .max_ultra = HPT372_ALLOW_ATA133_6 ? 6 : 5,
469 .dpll_clk = 66, 470 .dpll_clk = 66,
470 .settings = hpt37x_settings 471 .settings = hpt37x_settings
471}; 472};
472 473
473static struct hpt_info hpt302 __devinitdata = { 474static struct hpt_info hpt302 __devinitdata = {
474 .chip_type = HPT302, 475 .chip_type = HPT302,
475 .max_mode = HPT302_ALLOW_ATA133_6 ? 4 : 3, 476 .max_ultra = HPT372_ALLOW_ATA133_6 ? 6 : 5,
476 .dpll_clk = 66, 477 .dpll_clk = 66,
477 .settings = hpt37x_settings 478 .settings = hpt37x_settings
478}; 479};
479 480
480static struct hpt_info hpt371 __devinitdata = { 481static struct hpt_info hpt371 __devinitdata = {
481 .chip_type = HPT371, 482 .chip_type = HPT371,
482 .max_mode = HPT371_ALLOW_ATA133_6 ? 4 : 3, 483 .max_ultra = HPT371_ALLOW_ATA133_6 ? 6 : 5,
483 .dpll_clk = 66, 484 .dpll_clk = 66,
484 .settings = hpt37x_settings 485 .settings = hpt37x_settings
485}; 486};
486 487
487static struct hpt_info hpt372n __devinitdata = { 488static struct hpt_info hpt372n __devinitdata = {
488 .chip_type = HPT372N, 489 .chip_type = HPT372N,
489 .max_mode = HPT372_ALLOW_ATA133_6 ? 4 : 3, 490 .max_ultra = HPT372_ALLOW_ATA133_6 ? 6 : 5,
490 .dpll_clk = 77, 491 .dpll_clk = 77,
491 .settings = hpt37x_settings 492 .settings = hpt37x_settings
492}; 493};
493 494
494static struct hpt_info hpt302n __devinitdata = { 495static struct hpt_info hpt302n __devinitdata = {
495 .chip_type = HPT302N, 496 .chip_type = HPT302N,
496 .max_mode = HPT302_ALLOW_ATA133_6 ? 4 : 3, 497 .max_ultra = HPT302_ALLOW_ATA133_6 ? 6 : 5,
497 .dpll_clk = 77, 498 .dpll_clk = 77,
498 .settings = hpt37x_settings 499 .settings = hpt37x_settings
499}; 500};
500 501
501static struct hpt_info hpt371n __devinitdata = { 502static struct hpt_info hpt371n __devinitdata = {
502 .chip_type = HPT371N, 503 .chip_type = HPT371N,
503 .max_mode = HPT371_ALLOW_ATA133_6 ? 4 : 3, 504 .max_ultra = HPT371_ALLOW_ATA133_6 ? 6 : 5,
504 .dpll_clk = 77, 505 .dpll_clk = 77,
505 .settings = hpt37x_settings 506 .settings = hpt37x_settings
506}; 507};
@@ -523,53 +524,38 @@ static int check_in_drive_list(ide_drive_t *drive, const char **list)
523static u8 hpt3xx_udma_filter(ide_drive_t *drive) 524static u8 hpt3xx_udma_filter(ide_drive_t *drive)
524{ 525{
525 struct hpt_info *info = pci_get_drvdata(HWIF(drive)->pci_dev); 526 struct hpt_info *info = pci_get_drvdata(HWIF(drive)->pci_dev);
526 u8 chip_type = info->chip_type;
527 u8 mode = info->max_mode;
528 u8 mask; 527 u8 mask;
529 528
530 switch (mode) { 529 switch (info->chip_type) {
531 case 0x04: 530 case HPT370A:
532 mask = 0x7f; 531 if (!HPT370_ALLOW_ATA100_5 ||
533 break; 532 check_in_drive_list(drive, bad_ata100_5))
534 case 0x03: 533 return 0x1f;
534 else
535 return 0x3f;
536 case HPT370:
537 if (!HPT370_ALLOW_ATA100_5 ||
538 check_in_drive_list(drive, bad_ata100_5))
539 mask = 0x1f;
540 else
535 mask = 0x3f; 541 mask = 0x3f;
536 if (chip_type >= HPT374) 542 break;
537 break; 543 case HPT36x:
538 if (!check_in_drive_list(drive, bad_ata100_5)) 544 if (!HPT366_ALLOW_ATA66_4 ||
539 goto check_bad_ata33; 545 check_in_drive_list(drive, bad_ata66_4))
540 /* fall thru */ 546 mask = 0x0f;
541 case 0x02: 547 else
542 mask = 0x1f; 548 mask = 0x1f;
543 549
544 /* 550 if (!HPT366_ALLOW_ATA66_3 ||
545 * CHECK ME, Does this need to be changed to HPT374 ?? 551 check_in_drive_list(drive, bad_ata66_3))
546 */
547 if (chip_type >= HPT370)
548 goto check_bad_ata33;
549 if (HPT366_ALLOW_ATA66_4 &&
550 !check_in_drive_list(drive, bad_ata66_4))
551 goto check_bad_ata33;
552
553 mask = 0x0f;
554 if (HPT366_ALLOW_ATA66_3 &&
555 !check_in_drive_list(drive, bad_ata66_3))
556 goto check_bad_ata33;
557 /* fall thru */
558 case 0x01:
559 mask = 0x07; 552 mask = 0x07;
560 553 break;
561 check_bad_ata33: 554 default:
562 if (chip_type >= HPT370A) 555 return 0x7f;
563 break;
564 if (!check_in_drive_list(drive, bad_ata33))
565 break;
566 /* fall thru */
567 case 0x00:
568 default:
569 mask = 0x00;
570 break;
571 } 556 }
572 return mask; 557
558 return check_in_drive_list(drive, bad_ata33) ? 0x00 : mask;
573} 559}
574 560
575static u32 get_speed_setting(u8 speed, struct hpt_info *info) 561static u32 get_speed_setting(u8 speed, struct hpt_info *info)
@@ -737,7 +723,7 @@ static int hpt366_config_drive_xfer_rate(ide_drive_t *drive)
737 * This is specific to the HPT366 UDMA chipset 723 * This is specific to the HPT366 UDMA chipset
738 * by HighPoint|Triones Technologies, Inc. 724 * by HighPoint|Triones Technologies, Inc.
739 */ 725 */
740static int hpt366_ide_dma_lostirq(ide_drive_t *drive) 726static void hpt366_dma_lost_irq(ide_drive_t *drive)
741{ 727{
742 struct pci_dev *dev = HWIF(drive)->pci_dev; 728 struct pci_dev *dev = HWIF(drive)->pci_dev;
743 u8 mcr1 = 0, mcr3 = 0, scr1 = 0; 729 u8 mcr1 = 0, mcr3 = 0, scr1 = 0;
@@ -749,7 +735,7 @@ static int hpt366_ide_dma_lostirq(ide_drive_t *drive)
749 drive->name, __FUNCTION__, mcr1, mcr3, scr1); 735 drive->name, __FUNCTION__, mcr1, mcr3, scr1);
750 if (scr1 & 0x10) 736 if (scr1 & 0x10)
751 pci_write_config_byte(dev, 0x5a, scr1 & ~0x10); 737 pci_write_config_byte(dev, 0x5a, scr1 & ~0x10);
752 return __ide_dma_lostirq(drive); 738 ide_dma_lost_irq(drive);
753} 739}
754 740
755static void hpt370_clear_engine(ide_drive_t *drive) 741static void hpt370_clear_engine(ide_drive_t *drive)
@@ -799,10 +785,10 @@ static int hpt370_ide_dma_end(ide_drive_t *drive)
799 return __ide_dma_end(drive); 785 return __ide_dma_end(drive);
800} 786}
801 787
802static int hpt370_ide_dma_timeout(ide_drive_t *drive) 788static void hpt370_dma_timeout(ide_drive_t *drive)
803{ 789{
804 hpt370_irq_timeout(drive); 790 hpt370_irq_timeout(drive);
805 return __ide_dma_timeout(drive); 791 ide_dma_timeout(drive);
806} 792}
807 793
808/* returns 1 if DMA IRQ issued, 0 otherwise */ 794/* returns 1 if DMA IRQ issued, 0 otherwise */
@@ -1150,7 +1136,7 @@ static unsigned int __devinit init_chipset_hpt366(struct pci_dev *dev, const cha
1150 * Select 66 MHz DPLL clock only if UltraATA/133 mode is 1136 * Select 66 MHz DPLL clock only if UltraATA/133 mode is
1151 * supported/enabled, use 50 MHz DPLL clock otherwise... 1137 * supported/enabled, use 50 MHz DPLL clock otherwise...
1152 */ 1138 */
1153 if (info->max_mode == 0x04) { 1139 if (info->max_ultra == 6) {
1154 dpll_clk = 66; 1140 dpll_clk = 66;
1155 clock = ATA_CLOCK_66MHZ; 1141 clock = ATA_CLOCK_66MHZ;
1156 } else if (dpll_clk) { /* HPT36x chips don't have DPLL */ 1142 } else if (dpll_clk) { /* HPT36x chips don't have DPLL */
@@ -1243,7 +1229,7 @@ static void __devinit init_hwif_hpt366(ide_hwif_t *hwif)
1243 struct pci_dev *dev = hwif->pci_dev; 1229 struct pci_dev *dev = hwif->pci_dev;
1244 struct hpt_info *info = pci_get_drvdata(dev); 1230 struct hpt_info *info = pci_get_drvdata(dev);
1245 int serialize = HPT_SERIALIZE_IO; 1231 int serialize = HPT_SERIALIZE_IO;
1246 u8 scr1 = 0, ata66 = (hwif->channel) ? 0x01 : 0x02; 1232 u8 scr1 = 0, ata66 = hwif->channel ? 0x01 : 0x02;
1247 u8 chip_type = info->chip_type; 1233 u8 chip_type = info->chip_type;
1248 u8 new_mcr, old_mcr = 0; 1234 u8 new_mcr, old_mcr = 0;
1249 1235
@@ -1256,7 +1242,9 @@ static void __devinit init_hwif_hpt366(ide_hwif_t *hwif)
1256 hwif->intrproc = &hpt3xx_intrproc; 1242 hwif->intrproc = &hpt3xx_intrproc;
1257 hwif->maskproc = &hpt3xx_maskproc; 1243 hwif->maskproc = &hpt3xx_maskproc;
1258 hwif->busproc = &hpt3xx_busproc; 1244 hwif->busproc = &hpt3xx_busproc;
1259 hwif->udma_filter = &hpt3xx_udma_filter; 1245
1246 if (chip_type <= HPT370A)
1247 hwif->udma_filter = &hpt3xx_udma_filter;
1260 1248
1261 /* 1249 /*
1262 * HPT3xxN chips have some complications: 1250 * HPT3xxN chips have some complications:
@@ -1305,7 +1293,7 @@ static void __devinit init_hwif_hpt366(ide_hwif_t *hwif)
1305 return; 1293 return;
1306 } 1294 }
1307 1295
1308 hwif->ultra_mask = 0x7f; 1296 hwif->ultra_mask = hwif->cds->udma_mask;
1309 hwif->mwdma_mask = 0x07; 1297 hwif->mwdma_mask = 0x07;
1310 1298
1311 /* 1299 /*
@@ -1342,8 +1330,8 @@ static void __devinit init_hwif_hpt366(ide_hwif_t *hwif)
1342 } else 1330 } else
1343 pci_read_config_byte (dev, 0x5a, &scr1); 1331 pci_read_config_byte (dev, 0x5a, &scr1);
1344 1332
1345 if (!hwif->udma_four) 1333 if (hwif->cbl != ATA_CBL_PATA40_SHORT)
1346 hwif->udma_four = (scr1 & ata66) ? 0 : 1; 1334 hwif->cbl = (scr1 & ata66) ? ATA_CBL_PATA40 : ATA_CBL_PATA80;
1347 1335
1348 hwif->ide_dma_check = &hpt366_config_drive_xfer_rate; 1336 hwif->ide_dma_check = &hpt366_config_drive_xfer_rate;
1349 1337
@@ -1353,9 +1341,9 @@ static void __devinit init_hwif_hpt366(ide_hwif_t *hwif)
1353 } else if (chip_type >= HPT370) { 1341 } else if (chip_type >= HPT370) {
1354 hwif->dma_start = &hpt370_ide_dma_start; 1342 hwif->dma_start = &hpt370_ide_dma_start;
1355 hwif->ide_dma_end = &hpt370_ide_dma_end; 1343 hwif->ide_dma_end = &hpt370_ide_dma_end;
1356 hwif->ide_dma_timeout = &hpt370_ide_dma_timeout; 1344 hwif->dma_timeout = &hpt370_dma_timeout;
1357 } else 1345 } else
1358 hwif->ide_dma_lostirq = &hpt366_ide_dma_lostirq; 1346 hwif->dma_lost_irq = &hpt366_dma_lost_irq;
1359 1347
1360 if (!noautodma) 1348 if (!noautodma)
1361 hwif->autodma = 1; 1349 hwif->autodma = 1;
@@ -1503,9 +1491,35 @@ static int __devinit init_setup_hpt366(struct pci_dev *dev, ide_pci_device_t *d)
1503 1491
1504 pci_read_config_byte(dev, PCI_REVISION_ID, &rev); 1492 pci_read_config_byte(dev, PCI_REVISION_ID, &rev);
1505 1493
1506 if (rev > 6) 1494 switch (rev) {
1495 case 0:
1496 case 1:
1497 case 2:
1498 /*
1499 * HPT36x chips have one channel per function and have
1500 * both channel enable bits located differently and visible
1501 * to both functions -- really stupid design decision... :-(
1502 * Bit 4 is for the primary channel, bit 5 for the secondary.
1503 */
1504 d->channels = 1;
1505 d->enablebits[0].mask = d->enablebits[0].val = 0x10;
1506
1507 d->udma_mask = HPT366_ALLOW_ATA66_3 ?
1508 (HPT366_ALLOW_ATA66_4 ? 0x1f : 0x0f) : 0x07;
1509 break;
1510 case 3:
1511 case 4:
1512 d->udma_mask = HPT370_ALLOW_ATA100_5 ? 0x3f : 0x1f;
1513 break;
1514 default:
1507 rev = 6; 1515 rev = 6;
1508 1516 /* fall thru */
1517 case 5:
1518 case 6:
1519 d->udma_mask = HPT372_ALLOW_ATA133_6 ? 0x7f : 0x3f;
1520 break;
1521 }
1522
1509 d->name = chipset_names[rev]; 1523 d->name = chipset_names[rev];
1510 1524
1511 pci_set_drvdata(dev, info[rev]); 1525 pci_set_drvdata(dev, info[rev]);
@@ -1513,15 +1527,6 @@ static int __devinit init_setup_hpt366(struct pci_dev *dev, ide_pci_device_t *d)
1513 if (rev > 2) 1527 if (rev > 2)
1514 goto init_single; 1528 goto init_single;
1515 1529
1516 /*
1517 * HPT36x chips have one channel per function and have
1518 * both channel enable bits located differently and visible
1519 * to both functions -- really stupid design decision... :-(
1520 * Bit 4 is for the primary channel, bit 5 for the secondary.
1521 */
1522 d->channels = 1;
1523 d->enablebits[0].mask = d->enablebits[0].val = 0x10;
1524
1525 if ((dev2 = pci_get_slot(dev->bus, dev->devfn + 1)) != NULL) { 1530 if ((dev2 = pci_get_slot(dev->bus, dev->devfn + 1)) != NULL) {
1526 u8 mcr1 = 0, pin1 = 0, pin2 = 0; 1531 u8 mcr1 = 0, pin1 = 0, pin2 = 0;
1527 int ret; 1532 int ret;
@@ -1573,6 +1578,7 @@ static ide_pci_device_t hpt366_chipsets[] __devinitdata = {
1573 .channels = 2, 1578 .channels = 2,
1574 .autodma = AUTODMA, 1579 .autodma = AUTODMA,
1575 .enablebits = {{0x50,0x04,0x04}, {0x54,0x04,0x04}}, 1580 .enablebits = {{0x50,0x04,0x04}, {0x54,0x04,0x04}},
1581 .udma_mask = HPT372_ALLOW_ATA133_6 ? 0x7f : 0x3f,
1576 .bootable = OFF_BOARD, 1582 .bootable = OFF_BOARD,
1577 .extra = 240 1583 .extra = 240
1578 },{ /* 2 */ 1584 },{ /* 2 */
@@ -1584,6 +1590,7 @@ static ide_pci_device_t hpt366_chipsets[] __devinitdata = {
1584 .channels = 2, 1590 .channels = 2,
1585 .autodma = AUTODMA, 1591 .autodma = AUTODMA,
1586 .enablebits = {{0x50,0x04,0x04}, {0x54,0x04,0x04}}, 1592 .enablebits = {{0x50,0x04,0x04}, {0x54,0x04,0x04}},
1593 .udma_mask = HPT302_ALLOW_ATA133_6 ? 0x7f : 0x3f,
1587 .bootable = OFF_BOARD, 1594 .bootable = OFF_BOARD,
1588 .extra = 240 1595 .extra = 240
1589 },{ /* 3 */ 1596 },{ /* 3 */
@@ -1595,6 +1602,7 @@ static ide_pci_device_t hpt366_chipsets[] __devinitdata = {
1595 .channels = 2, 1602 .channels = 2,
1596 .autodma = AUTODMA, 1603 .autodma = AUTODMA,
1597 .enablebits = {{0x50,0x04,0x04}, {0x54,0x04,0x04}}, 1604 .enablebits = {{0x50,0x04,0x04}, {0x54,0x04,0x04}},
1605 .udma_mask = HPT371_ALLOW_ATA133_6 ? 0x7f : 0x3f,
1598 .bootable = OFF_BOARD, 1606 .bootable = OFF_BOARD,
1599 .extra = 240 1607 .extra = 240
1600 },{ /* 4 */ 1608 },{ /* 4 */
@@ -1606,6 +1614,7 @@ static ide_pci_device_t hpt366_chipsets[] __devinitdata = {
1606 .channels = 2, /* 4 */ 1614 .channels = 2, /* 4 */
1607 .autodma = AUTODMA, 1615 .autodma = AUTODMA,
1608 .enablebits = {{0x50,0x04,0x04}, {0x54,0x04,0x04}}, 1616 .enablebits = {{0x50,0x04,0x04}, {0x54,0x04,0x04}},
1617 .udma_mask = 0x3f,
1609 .bootable = OFF_BOARD, 1618 .bootable = OFF_BOARD,
1610 .extra = 240 1619 .extra = 240
1611 },{ /* 5 */ 1620 },{ /* 5 */
@@ -1617,6 +1626,7 @@ static ide_pci_device_t hpt366_chipsets[] __devinitdata = {
1617 .channels = 2, /* 4 */ 1626 .channels = 2, /* 4 */
1618 .autodma = AUTODMA, 1627 .autodma = AUTODMA,
1619 .enablebits = {{0x50,0x04,0x04}, {0x54,0x04,0x04}}, 1628 .enablebits = {{0x50,0x04,0x04}, {0x54,0x04,0x04}},
1629 .udma_mask = HPT372_ALLOW_ATA133_6 ? 0x7f : 0x3f,
1620 .bootable = OFF_BOARD, 1630 .bootable = OFF_BOARD,
1621 .extra = 240 1631 .extra = 240
1622 } 1632 }
diff --git a/drivers/ide/pci/it8213.c b/drivers/ide/pci/it8213.c
index c04a02687b95..ff48c23e571e 100644
--- a/drivers/ide/pci/it8213.c
+++ b/drivers/ide/pci/it8213.c
@@ -231,7 +231,7 @@ static int it8213_config_drive_for_dma (ide_drive_t *drive)
231 231
232static void __devinit init_hwif_it8213(ide_hwif_t *hwif) 232static void __devinit init_hwif_it8213(ide_hwif_t *hwif)
233{ 233{
234 u8 reg42h = 0, ata66 = 0; 234 u8 reg42h = 0;
235 235
236 hwif->speedproc = &it8213_tune_chipset; 236 hwif->speedproc = &it8213_tune_chipset;
237 hwif->tuneproc = &it8213_tuneproc; 237 hwif->tuneproc = &it8213_tuneproc;
@@ -250,11 +250,11 @@ static void __devinit init_hwif_it8213(ide_hwif_t *hwif)
250 hwif->swdma_mask = 0x04; 250 hwif->swdma_mask = 0x04;
251 251
252 pci_read_config_byte(hwif->pci_dev, 0x42, &reg42h); 252 pci_read_config_byte(hwif->pci_dev, 0x42, &reg42h);
253 ata66 = (reg42h & 0x02) ? 0 : 1;
254 253
255 hwif->ide_dma_check = &it8213_config_drive_for_dma; 254 hwif->ide_dma_check = &it8213_config_drive_for_dma;
256 if (!(hwif->udma_four)) 255
257 hwif->udma_four = ata66; 256 if (hwif->cbl != ATA_CBL_PATA40_SHORT)
257 hwif->cbl = (reg42h & 0x02) ? ATA_CBL_PATA40 : ATA_CBL_PATA80;
258 258
259 /* 259 /*
260 * The BIOS often doesn't set up DMA on this controller 260 * The BIOS often doesn't set up DMA on this controller
diff --git a/drivers/ide/pci/it821x.c b/drivers/ide/pci/it821x.c
index 3aeb7f1b7916..8197b653ba1e 100644
--- a/drivers/ide/pci/it821x.c
+++ b/drivers/ide/pci/it821x.c
@@ -491,10 +491,10 @@ static int it821x_config_drive_for_dma (ide_drive_t *drive)
491 * the needed logic onboard. 491 * the needed logic onboard.
492 */ 492 */
493 493
494static unsigned int __devinit ata66_it821x(ide_hwif_t *hwif) 494static u8 __devinit ata66_it821x(ide_hwif_t *hwif)
495{ 495{
496 /* The reference driver also only does disk side */ 496 /* The reference driver also only does disk side */
497 return 1; 497 return ATA_CBL_PATA80;
498} 498}
499 499
500/** 500/**
@@ -662,8 +662,9 @@ static void __devinit init_hwif_it821x(ide_hwif_t *hwif)
662 hwif->mwdma_mask = 0x07; 662 hwif->mwdma_mask = 0x07;
663 663
664 hwif->ide_dma_check = &it821x_config_drive_for_dma; 664 hwif->ide_dma_check = &it821x_config_drive_for_dma;
665 if (!(hwif->udma_four)) 665
666 hwif->udma_four = ata66_it821x(hwif); 666 if (hwif->cbl != ATA_CBL_PATA40_SHORT)
667 hwif->cbl = ata66_it821x(hwif);
667 668
668 /* 669 /*
669 * The BIOS often doesn't set up DMA on this controller 670 * The BIOS often doesn't set up DMA on this controller
diff --git a/drivers/ide/pci/jmicron.c b/drivers/ide/pci/jmicron.c
index 76ed25147229..a6008f63e71e 100644
--- a/drivers/ide/pci/jmicron.c
+++ b/drivers/ide/pci/jmicron.c
@@ -25,10 +25,10 @@ typedef enum {
25 * ata66_jmicron - Cable check 25 * ata66_jmicron - Cable check
26 * @hwif: IDE port 26 * @hwif: IDE port
27 * 27 *
28 * Return 1 if the cable is 80pin 28 * Returns the cable type.
29 */ 29 */
30 30
31static int __devinit ata66_jmicron(ide_hwif_t *hwif) 31static u8 __devinit ata66_jmicron(ide_hwif_t *hwif)
32{ 32{
33 struct pci_dev *pdev = hwif->pci_dev; 33 struct pci_dev *pdev = hwif->pci_dev;
34 34
@@ -70,16 +70,17 @@ static int __devinit ata66_jmicron(ide_hwif_t *hwif)
70 { 70 {
71 case PORT_PATA0: 71 case PORT_PATA0:
72 if (control & (1 << 3)) /* 40/80 pin primary */ 72 if (control & (1 << 3)) /* 40/80 pin primary */
73 return 0; 73 return ATA_CBL_PATA40;
74 return 1; 74 return ATA_CBL_PATA80;
75 case PORT_PATA1: 75 case PORT_PATA1:
76 if (control5 & (1 << 19)) /* 40/80 pin secondary */ 76 if (control5 & (1 << 19)) /* 40/80 pin secondary */
77 return 0; 77 return ATA_CBL_PATA40;
78 return 1; 78 return ATA_CBL_PATA80;
79 case PORT_SATA: 79 case PORT_SATA:
80 break; 80 break;
81 } 81 }
82 return 1; /* Avoid bogus "control reaches end of non-void function" */ 82 /* Avoid bogus "control reaches end of non-void function" */
83 return ATA_CBL_PATA80;
83} 84}
84 85
85static void jmicron_tuneproc (ide_drive_t *drive, byte mode_wanted) 86static void jmicron_tuneproc (ide_drive_t *drive, byte mode_wanted)
@@ -159,8 +160,9 @@ static void __devinit init_hwif_jmicron(ide_hwif_t *hwif)
159 hwif->mwdma_mask = 0x07; 160 hwif->mwdma_mask = 0x07;
160 161
161 hwif->ide_dma_check = &jmicron_config_drive_for_dma; 162 hwif->ide_dma_check = &jmicron_config_drive_for_dma;
162 if (!(hwif->udma_four)) 163
163 hwif->udma_four = ata66_jmicron(hwif); 164 if (hwif->cbl != ATA_CBL_PATA40_SHORT)
165 hwif->cbl = ata66_jmicron(hwif);
164 166
165 hwif->autodma = 1; 167 hwif->autodma = 1;
166 hwif->drives[0].autodma = hwif->autodma; 168 hwif->drives[0].autodma = hwif->autodma;
diff --git a/drivers/ide/pci/pdc202xx_new.c b/drivers/ide/pci/pdc202xx_new.c
index 0765dce6948e..ee5020df005d 100644
--- a/drivers/ide/pci/pdc202xx_new.c
+++ b/drivers/ide/pci/pdc202xx_new.c
@@ -225,7 +225,10 @@ static void pdcnew_tune_drive(ide_drive_t *drive, u8 pio)
225 225
226static u8 pdcnew_cable_detect(ide_hwif_t *hwif) 226static u8 pdcnew_cable_detect(ide_hwif_t *hwif)
227{ 227{
228 return get_indexed_reg(hwif, 0x0b) & 0x04; 228 if (get_indexed_reg(hwif, 0x0b) & 0x04)
229 return ATA_CBL_PATA40;
230 else
231 return ATA_CBL_PATA80;
229} 232}
230 233
231static int pdcnew_config_drive_xfer_rate(ide_drive_t *drive) 234static int pdcnew_config_drive_xfer_rate(ide_drive_t *drive)
@@ -509,8 +512,8 @@ static void __devinit init_hwif_pdc202new(ide_hwif_t *hwif)
509 512
510 hwif->ide_dma_check = &pdcnew_config_drive_xfer_rate; 513 hwif->ide_dma_check = &pdcnew_config_drive_xfer_rate;
511 514
512 if (!hwif->udma_four) 515 if (hwif->cbl != ATA_CBL_PATA40_SHORT)
513 hwif->udma_four = pdcnew_cable_detect(hwif) ? 0 : 1; 516 hwif->cbl = pdcnew_cable_detect(hwif);
514 517
515 if (!noautodma) 518 if (!noautodma)
516 hwif->autodma = 1; 519 hwif->autodma = 1;
diff --git a/drivers/ide/pci/pdc202xx_old.c b/drivers/ide/pci/pdc202xx_old.c
index 23844687deea..41ac4a94959f 100644
--- a/drivers/ide/pci/pdc202xx_old.c
+++ b/drivers/ide/pci/pdc202xx_old.c
@@ -152,8 +152,10 @@ static void pdc202xx_tune_drive(ide_drive_t *drive, u8 pio)
152static u8 pdc202xx_old_cable_detect (ide_hwif_t *hwif) 152static u8 pdc202xx_old_cable_detect (ide_hwif_t *hwif)
153{ 153{
154 u16 CIS = 0, mask = (hwif->channel) ? (1<<11) : (1<<10); 154 u16 CIS = 0, mask = (hwif->channel) ? (1<<11) : (1<<10);
155
155 pci_read_config_word(hwif->pci_dev, 0x50, &CIS); 156 pci_read_config_word(hwif->pci_dev, 0x50, &CIS);
156 return (CIS & mask) ? 1 : 0; 157
158 return (CIS & mask) ? ATA_CBL_PATA40 : ATA_CBL_PATA80;
157} 159}
158 160
159/* 161/*
@@ -267,18 +269,24 @@ somebody_else:
267 return (dma_stat & 4) == 4; /* return 1 if INTR asserted */ 269 return (dma_stat & 4) == 4; /* return 1 if INTR asserted */
268} 270}
269 271
270static int pdc202xx_ide_dma_lostirq(ide_drive_t *drive) 272static void pdc202xx_dma_lost_irq(ide_drive_t *drive)
271{ 273{
272 if (HWIF(drive)->resetproc != NULL) 274 ide_hwif_t *hwif = HWIF(drive);
273 HWIF(drive)->resetproc(drive); 275
274 return __ide_dma_lostirq(drive); 276 if (hwif->resetproc != NULL)
277 hwif->resetproc(drive);
278
279 ide_dma_lost_irq(drive);
275} 280}
276 281
277static int pdc202xx_ide_dma_timeout(ide_drive_t *drive) 282static void pdc202xx_dma_timeout(ide_drive_t *drive)
278{ 283{
279 if (HWIF(drive)->resetproc != NULL) 284 ide_hwif_t *hwif = HWIF(drive);
280 HWIF(drive)->resetproc(drive); 285
281 return __ide_dma_timeout(drive); 286 if (hwif->resetproc != NULL)
287 hwif->resetproc(drive);
288
289 ide_dma_timeout(drive);
282} 290}
283 291
284static void pdc202xx_reset_host (ide_hwif_t *hwif) 292static void pdc202xx_reset_host (ide_hwif_t *hwif)
@@ -347,12 +355,13 @@ static void __devinit init_hwif_pdc202xx(ide_hwif_t *hwif)
347 hwif->err_stops_fifo = 1; 355 hwif->err_stops_fifo = 1;
348 356
349 hwif->ide_dma_check = &pdc202xx_config_drive_xfer_rate; 357 hwif->ide_dma_check = &pdc202xx_config_drive_xfer_rate;
350 hwif->ide_dma_lostirq = &pdc202xx_ide_dma_lostirq; 358 hwif->dma_lost_irq = &pdc202xx_dma_lost_irq;
351 hwif->ide_dma_timeout = &pdc202xx_ide_dma_timeout; 359 hwif->dma_timeout = &pdc202xx_dma_timeout;
352 360
353 if (hwif->pci_dev->device != PCI_DEVICE_ID_PROMISE_20246) { 361 if (hwif->pci_dev->device != PCI_DEVICE_ID_PROMISE_20246) {
354 if (!(hwif->udma_four)) 362 if (hwif->cbl != ATA_CBL_PATA40_SHORT)
355 hwif->udma_four = (pdc202xx_old_cable_detect(hwif)) ? 0 : 1; 363 hwif->cbl = pdc202xx_old_cable_detect(hwif);
364
356 hwif->dma_start = &pdc202xx_old_ide_dma_start; 365 hwif->dma_start = &pdc202xx_old_ide_dma_start;
357 hwif->ide_dma_end = &pdc202xx_old_ide_dma_end; 366 hwif->ide_dma_end = &pdc202xx_old_ide_dma_end;
358 } 367 }
diff --git a/drivers/ide/pci/piix.c b/drivers/ide/pci/piix.c
index 8b219dd63024..2e0b29ef596a 100644
--- a/drivers/ide/pci/piix.c
+++ b/drivers/ide/pci/piix.c
@@ -1,5 +1,5 @@
1/* 1/*
2 * linux/drivers/ide/pci/piix.c Version 0.47 February 8, 2007 2 * linux/drivers/ide/pci/piix.c Version 0.50 Jun 10, 2007
3 * 3 *
4 * Copyright (C) 1998-1999 Andrzej Krzysztofowicz, Author and Maintainer 4 * Copyright (C) 1998-1999 Andrzej Krzysztofowicz, Author and Maintainer
5 * Copyright (C) 1998-2000 Andre Hedrick <andre@linux-ide.org> 5 * Copyright (C) 1998-2000 Andre Hedrick <andre@linux-ide.org>
@@ -394,14 +394,45 @@ static void piix_dma_clear_irq(ide_drive_t *drive)
394 hwif->OUTB(dma_stat, hwif->dma_status); 394 hwif->OUTB(dma_stat, hwif->dma_status);
395} 395}
396 396
397static int __devinit piix_cable_detect(ide_hwif_t *hwif) 397struct ich_laptop {
398 u16 device;
399 u16 subvendor;
400 u16 subdevice;
401};
402
403/*
404 * List of laptops that use short cables rather than 80 wire
405 */
406
407static const struct ich_laptop ich_laptop[] = {
408 /* devid, subvendor, subdev */
409 { 0x27DF, 0x0005, 0x0280 }, /* ICH7 on Acer 5602WLMi */
410 { 0x27DF, 0x1025, 0x0110 }, /* ICH7 on Acer 3682WLMi */
411 { 0x27DF, 0x1043, 0x1267 }, /* ICH7 on Asus W5F */
412 { 0x24CA, 0x1025, 0x0061 }, /* ICH4 on Acer Aspire 2023WLMi */
413 /* end marker */
414 { 0, }
415};
416
417static u8 __devinit piix_cable_detect(ide_hwif_t *hwif)
398{ 418{
399 struct pci_dev *dev = hwif->pci_dev; 419 struct pci_dev *pdev = hwif->pci_dev;
420 const struct ich_laptop *lap = &ich_laptop[0];
400 u8 reg54h = 0, mask = hwif->channel ? 0xc0 : 0x30; 421 u8 reg54h = 0, mask = hwif->channel ? 0xc0 : 0x30;
401 422
402 pci_read_config_byte(dev, 0x54, &reg54h); 423 /* check for specials */
424 while (lap->device) {
425 if (lap->device == pdev->device &&
426 lap->subvendor == pdev->subsystem_vendor &&
427 lap->subdevice == pdev->subsystem_device) {
428 return ATA_CBL_PATA40_SHORT;
429 }
430 lap++;
431 }
432
433 pci_read_config_byte(pdev, 0x54, &reg54h);
403 434
404 return (reg54h & mask) ? 1 : 0; 435 return (reg54h & mask) ? ATA_CBL_PATA80 : ATA_CBL_PATA40;
405} 436}
406 437
407/** 438/**
@@ -444,8 +475,8 @@ static void __devinit init_hwif_piix(ide_hwif_t *hwif)
444 hwif->swdma_mask = 0x04; 475 hwif->swdma_mask = 0x04;
445 476
446 if (hwif->ultra_mask & 0x78) { 477 if (hwif->ultra_mask & 0x78) {
447 if (!hwif->udma_four) 478 if (hwif->cbl != ATA_CBL_PATA40_SHORT)
448 hwif->udma_four = piix_cable_detect(hwif); 479 hwif->cbl = piix_cable_detect(hwif);
449 } 480 }
450 481
451 if (no_piix_dma) 482 if (no_piix_dma)
diff --git a/drivers/ide/pci/scc_pata.c b/drivers/ide/pci/scc_pata.c
index 55bc0a32e34f..7b87488e3daa 100644
--- a/drivers/ide/pci/scc_pata.c
+++ b/drivers/ide/pci/scc_pata.c
@@ -716,7 +716,7 @@ static void __devinit init_hwif_scc(ide_hwif_t *hwif)
716 hwif->atapi_dma = 1; 716 hwif->atapi_dma = 1;
717 717
718 /* we support 80c cable only. */ 718 /* we support 80c cable only. */
719 hwif->udma_four = 1; 719 hwif->cbl = ATA_CBL_PATA80;
720 720
721 hwif->autodma = 0; 721 hwif->autodma = 0;
722 if (!noautodma) 722 if (!noautodma)
diff --git a/drivers/ide/pci/serverworks.c b/drivers/ide/pci/serverworks.c
index d9c4fd1ae996..1371b5bf6bf0 100644
--- a/drivers/ide/pci/serverworks.c
+++ b/drivers/ide/pci/serverworks.c
@@ -1,5 +1,5 @@
1/* 1/*
2 * linux/drivers/ide/pci/serverworks.c Version 0.11 Jun 2 2007 2 * linux/drivers/ide/pci/serverworks.c Version 0.20 Jun 3 2007
3 * 3 *
4 * Copyright (C) 1998-2000 Michel Aubry 4 * Copyright (C) 1998-2000 Michel Aubry
5 * Copyright (C) 1998-2000 Andrzej Krzysztofowicz 5 * Copyright (C) 1998-2000 Andrzej Krzysztofowicz
@@ -151,84 +151,11 @@ static int svwks_tune_chipset (ide_drive_t *drive, u8 xferspeed)
151 if(dev->device == PCI_DEVICE_ID_SERVERWORKS_OSB4 && 151 if(dev->device == PCI_DEVICE_ID_SERVERWORKS_OSB4 &&
152 drive->media == ide_disk && speed >= XFER_UDMA_0) 152 drive->media == ide_disk && speed >= XFER_UDMA_0)
153 BUG(); 153 BUG();
154 154
155 pci_read_config_byte(dev, drive_pci[drive->dn], &pio_timing);
156 pci_read_config_byte(dev, drive_pci2[drive->dn], &dma_timing);
157 pci_read_config_byte(dev, (0x56|hwif->channel), &ultra_timing); 155 pci_read_config_byte(dev, (0x56|hwif->channel), &ultra_timing);
158 pci_read_config_word(dev, 0x4A, &csb5_pio); 156 pci_read_config_word(dev, 0x4A, &csb5_pio);
159 pci_read_config_byte(dev, 0x54, &ultra_enable); 157 pci_read_config_byte(dev, 0x54, &ultra_enable);
160 158
161 /* If we are in RAID mode (eg AMI MegaIDE) then we can't it
162 turns out trust the firmware configuration */
163
164 if ((dev->class >> 8) != PCI_CLASS_STORAGE_IDE)
165 goto oem_setup_failed;
166
167 /* Per Specified Design by OEM, and ASIC Architect */
168 if ((dev->device == PCI_DEVICE_ID_SERVERWORKS_CSB6IDE) ||
169 (dev->device == PCI_DEVICE_ID_SERVERWORKS_CSB6IDE2)) {
170 if (!drive->init_speed) {
171 u8 dma_stat = inb(hwif->dma_status);
172
173 if (((ultra_enable << (7-drive->dn) & 0x80) == 0x80) &&
174 ((dma_stat & (1<<(5+unit))) == (1<<(5+unit)))) {
175 drive->current_speed = drive->init_speed = XFER_UDMA_0 + udma_modes[(ultra_timing >> (4*unit)) & ~(0xF0)];
176 return 0;
177 } else if ((dma_timing) &&
178 ((dma_stat&(1<<(5+unit)))==(1<<(5+unit)))) {
179 u8 dmaspeed;
180
181 switch (dma_timing & 0x77) {
182 case 0x20:
183 dmaspeed = XFER_MW_DMA_2;
184 break;
185 case 0x21:
186 dmaspeed = XFER_MW_DMA_1;
187 break;
188 case 0x77:
189 dmaspeed = XFER_MW_DMA_0;
190 break;
191 default:
192 goto dma_pio;
193 }
194
195 drive->current_speed = drive->init_speed = dmaspeed;
196 return 0;
197 }
198dma_pio:
199 if (pio_timing) {
200 u8 piospeed;
201
202 switch (pio_timing & 0x7f) {
203 case 0x20:
204 piospeed = XFER_PIO_4;
205 break;
206 case 0x22:
207 piospeed = XFER_PIO_3;
208 break;
209 case 0x34:
210 piospeed = XFER_PIO_2;
211 break;
212 case 0x47:
213 piospeed = XFER_PIO_1;
214 break;
215 case 0x5d:
216 piospeed = XFER_PIO_0;
217 break;
218 default:
219 goto oem_setup_failed;
220 }
221
222 drive->current_speed = drive->init_speed = piospeed;
223 return 0;
224 }
225 }
226 }
227
228oem_setup_failed:
229
230 pio_timing = 0;
231 dma_timing = 0;
232 ultra_timing &= ~(0x0F << (4*unit)); 159 ultra_timing &= ~(0x0F << (4*unit));
233 ultra_enable &= ~(0x01 << drive->dn); 160 ultra_enable &= ~(0x01 << drive->dn);
234 csb5_pio &= ~(0x0F << (4*drive->dn)); 161 csb5_pio &= ~(0x0F << (4*drive->dn));
@@ -402,9 +329,9 @@ static unsigned int __devinit init_chipset_svwks (struct pci_dev *dev, const cha
402 return dev->irq; 329 return dev->irq;
403} 330}
404 331
405static unsigned int __devinit ata66_svwks_svwks (ide_hwif_t *hwif) 332static u8 __devinit ata66_svwks_svwks(ide_hwif_t *hwif)
406{ 333{
407 return 1; 334 return ATA_CBL_PATA80;
408} 335}
409 336
410/* On Dell PowerEdge servers with a CSB5/CSB6, the top two bits 337/* On Dell PowerEdge servers with a CSB5/CSB6, the top two bits
@@ -414,7 +341,7 @@ static unsigned int __devinit ata66_svwks_svwks (ide_hwif_t *hwif)
414 * Bit 14 clear = primary IDE channel does not have 80-pin cable. 341 * Bit 14 clear = primary IDE channel does not have 80-pin cable.
415 * Bit 14 set = primary IDE channel has 80-pin cable. 342 * Bit 14 set = primary IDE channel has 80-pin cable.
416 */ 343 */
417static unsigned int __devinit ata66_svwks_dell (ide_hwif_t *hwif) 344static u8 __devinit ata66_svwks_dell(ide_hwif_t *hwif)
418{ 345{
419 struct pci_dev *dev = hwif->pci_dev; 346 struct pci_dev *dev = hwif->pci_dev;
420 if (dev->subsystem_vendor == PCI_VENDOR_ID_DELL && 347 if (dev->subsystem_vendor == PCI_VENDOR_ID_DELL &&
@@ -422,8 +349,8 @@ static unsigned int __devinit ata66_svwks_dell (ide_hwif_t *hwif)
422 (dev->device == PCI_DEVICE_ID_SERVERWORKS_CSB5IDE || 349 (dev->device == PCI_DEVICE_ID_SERVERWORKS_CSB5IDE ||
423 dev->device == PCI_DEVICE_ID_SERVERWORKS_CSB6IDE)) 350 dev->device == PCI_DEVICE_ID_SERVERWORKS_CSB6IDE))
424 return ((1 << (hwif->channel + 14)) & 351 return ((1 << (hwif->channel + 14)) &
425 dev->subsystem_device) ? 1 : 0; 352 dev->subsystem_device) ? ATA_CBL_PATA80 : ATA_CBL_PATA40;
426 return 0; 353 return ATA_CBL_PATA40;
427} 354}
428 355
429/* Sun Cobalt Alpine hardware avoids the 80-pin cable 356/* Sun Cobalt Alpine hardware avoids the 80-pin cable
@@ -432,18 +359,18 @@ static unsigned int __devinit ata66_svwks_dell (ide_hwif_t *hwif)
432 * 359 *
433 * WARNING: this only works on Alpine hardware! 360 * WARNING: this only works on Alpine hardware!
434 */ 361 */
435static unsigned int __devinit ata66_svwks_cobalt (ide_hwif_t *hwif) 362static u8 __devinit ata66_svwks_cobalt(ide_hwif_t *hwif)
436{ 363{
437 struct pci_dev *dev = hwif->pci_dev; 364 struct pci_dev *dev = hwif->pci_dev;
438 if (dev->subsystem_vendor == PCI_VENDOR_ID_SUN && 365 if (dev->subsystem_vendor == PCI_VENDOR_ID_SUN &&
439 dev->vendor == PCI_VENDOR_ID_SERVERWORKS && 366 dev->vendor == PCI_VENDOR_ID_SERVERWORKS &&
440 dev->device == PCI_DEVICE_ID_SERVERWORKS_CSB5IDE) 367 dev->device == PCI_DEVICE_ID_SERVERWORKS_CSB5IDE)
441 return ((1 << (hwif->channel + 14)) & 368 return ((1 << (hwif->channel + 14)) &
442 dev->subsystem_device) ? 1 : 0; 369 dev->subsystem_device) ? ATA_CBL_PATA80 : ATA_CBL_PATA40;
443 return 0; 370 return ATA_CBL_PATA40;
444} 371}
445 372
446static unsigned int __devinit ata66_svwks (ide_hwif_t *hwif) 373static u8 __devinit ata66_svwks(ide_hwif_t *hwif)
447{ 374{
448 struct pci_dev *dev = hwif->pci_dev; 375 struct pci_dev *dev = hwif->pci_dev;
449 376
@@ -462,9 +389,9 @@ static unsigned int __devinit ata66_svwks (ide_hwif_t *hwif)
462 /* Per Specified Design by OEM, and ASIC Architect */ 389 /* Per Specified Design by OEM, and ASIC Architect */
463 if ((dev->device == PCI_DEVICE_ID_SERVERWORKS_CSB6IDE) || 390 if ((dev->device == PCI_DEVICE_ID_SERVERWORKS_CSB6IDE) ||
464 (dev->device == PCI_DEVICE_ID_SERVERWORKS_CSB6IDE2)) 391 (dev->device == PCI_DEVICE_ID_SERVERWORKS_CSB6IDE2))
465 return 1; 392 return ATA_CBL_PATA80;
466 393
467 return 0; 394 return ATA_CBL_PATA40;
468} 395}
469 396
470static void __devinit init_hwif_svwks (ide_hwif_t *hwif) 397static void __devinit init_hwif_svwks (ide_hwif_t *hwif)
@@ -495,8 +422,8 @@ static void __devinit init_hwif_svwks (ide_hwif_t *hwif)
495 422
496 hwif->ide_dma_check = &svwks_config_drive_xfer_rate; 423 hwif->ide_dma_check = &svwks_config_drive_xfer_rate;
497 if (hwif->pci_dev->device != PCI_DEVICE_ID_SERVERWORKS_OSB4IDE) { 424 if (hwif->pci_dev->device != PCI_DEVICE_ID_SERVERWORKS_OSB4IDE) {
498 if (!hwif->udma_four) 425 if (hwif->cbl != ATA_CBL_PATA40_SHORT)
499 hwif->udma_four = ata66_svwks(hwif); 426 hwif->cbl = ata66_svwks(hwif);
500 } 427 }
501 if (!noautodma) 428 if (!noautodma)
502 hwif->autodma = 1; 429 hwif->autodma = 1;
diff --git a/drivers/ide/pci/sgiioc4.c b/drivers/ide/pci/sgiioc4.c
index d3185e29a38e..d396b2929ed8 100644
--- a/drivers/ide/pci/sgiioc4.c
+++ b/drivers/ide/pci/sgiioc4.c
@@ -316,14 +316,6 @@ static void sgiioc4_dma_host_off(ide_drive_t * drive)
316 sgiioc4_clearirq(drive); 316 sgiioc4_clearirq(drive);
317} 317}
318 318
319static int
320sgiioc4_ide_dma_lostirq(ide_drive_t * drive)
321{
322 HWIF(drive)->resetproc(drive);
323
324 return __ide_dma_lostirq(drive);
325}
326
327static void 319static void
328sgiioc4_resetproc(ide_drive_t * drive) 320sgiioc4_resetproc(ide_drive_t * drive)
329{ 321{
@@ -331,6 +323,14 @@ sgiioc4_resetproc(ide_drive_t * drive)
331 sgiioc4_clearirq(drive); 323 sgiioc4_clearirq(drive);
332} 324}
333 325
326static void
327sgiioc4_dma_lost_irq(ide_drive_t * drive)
328{
329 sgiioc4_resetproc(drive);
330
331 ide_dma_lost_irq(drive);
332}
333
334static u8 334static u8
335sgiioc4_INB(unsigned long port) 335sgiioc4_INB(unsigned long port)
336{ 336{
@@ -607,8 +607,8 @@ ide_init_sgiioc4(ide_hwif_t * hwif)
607 hwif->ide_dma_test_irq = &sgiioc4_ide_dma_test_irq; 607 hwif->ide_dma_test_irq = &sgiioc4_ide_dma_test_irq;
608 hwif->dma_host_on = &sgiioc4_dma_host_on; 608 hwif->dma_host_on = &sgiioc4_dma_host_on;
609 hwif->dma_host_off = &sgiioc4_dma_host_off; 609 hwif->dma_host_off = &sgiioc4_dma_host_off;
610 hwif->ide_dma_lostirq = &sgiioc4_ide_dma_lostirq; 610 hwif->dma_lost_irq = &sgiioc4_dma_lost_irq;
611 hwif->ide_dma_timeout = &__ide_dma_timeout; 611 hwif->dma_timeout = &ide_dma_timeout;
612 612
613 hwif->INB = &sgiioc4_INB; 613 hwif->INB = &sgiioc4_INB;
614} 614}
diff --git a/drivers/ide/pci/siimage.c b/drivers/ide/pci/siimage.c
index 1a4444e7226a..1c3e35487893 100644
--- a/drivers/ide/pci/siimage.c
+++ b/drivers/ide/pci/siimage.c
@@ -933,16 +933,17 @@ static void __devinit init_iops_siimage(ide_hwif_t *hwif)
933 * interface. 933 * interface.
934 */ 934 */
935 935
936static unsigned int __devinit ata66_siimage(ide_hwif_t *hwif) 936static u8 __devinit ata66_siimage(ide_hwif_t *hwif)
937{ 937{
938 unsigned long addr = siimage_selreg(hwif, 0); 938 unsigned long addr = siimage_selreg(hwif, 0);
939 if (pci_get_drvdata(hwif->pci_dev) == NULL) { 939 u8 ata66 = 0;
940 u8 ata66 = 0; 940
941 if (pci_get_drvdata(hwif->pci_dev) == NULL)
941 pci_read_config_byte(hwif->pci_dev, addr, &ata66); 942 pci_read_config_byte(hwif->pci_dev, addr, &ata66);
942 return (ata66 & 0x01) ? 1 : 0; 943 else
943 } 944 ata66 = hwif->INB(addr);
944 945
945 return (hwif->INB(addr) & 0x01) ? 1 : 0; 946 return (ata66 & 0x01) ? ATA_CBL_PATA80 : ATA_CBL_PATA40;
946} 947}
947 948
948/** 949/**
@@ -988,8 +989,9 @@ static void __devinit init_hwif_siimage(ide_hwif_t *hwif)
988 hwif->atapi_dma = 1; 989 hwif->atapi_dma = 1;
989 990
990 hwif->ide_dma_check = &siimage_config_drive_for_dma; 991 hwif->ide_dma_check = &siimage_config_drive_for_dma;
991 if (!(hwif->udma_four)) 992
992 hwif->udma_four = ata66_siimage(hwif); 993 if (hwif->cbl != ATA_CBL_PATA40_SHORT)
994 hwif->cbl = ata66_siimage(hwif);
993 995
994 if (hwif->mmio) { 996 if (hwif->mmio) {
995 hwif->ide_dma_test_irq = &siimage_mmio_ide_dma_test_irq; 997 hwif->ide_dma_test_irq = &siimage_mmio_ide_dma_test_irq;
diff --git a/drivers/ide/pci/sis5513.c b/drivers/ide/pci/sis5513.c
index ec0adad9ef61..f875183ac8d9 100644
--- a/drivers/ide/pci/sis5513.c
+++ b/drivers/ide/pci/sis5513.c
@@ -1,5 +1,5 @@
1/* 1/*
2 * linux/drivers/ide/pci/sis5513.c Version 0.20 Mar 4, 2007 2 * linux/drivers/ide/pci/sis5513.c Version 0.25 Jun 10, 2007
3 * 3 *
4 * Copyright (C) 1999-2000 Andre Hedrick <andre@linux-ide.org> 4 * Copyright (C) 1999-2000 Andre Hedrick <andre@linux-ide.org>
5 * Copyright (C) 2002 Lionel Bouton <Lionel.Bouton@inet6.fr>, Maintainer 5 * Copyright (C) 2002 Lionel Bouton <Lionel.Bouton@inet6.fr>, Maintainer
@@ -796,10 +796,33 @@ static unsigned int __devinit init_chipset_sis5513 (struct pci_dev *dev, const c
796 return 0; 796 return 0;
797} 797}
798 798
799static unsigned int __devinit ata66_sis5513 (ide_hwif_t *hwif) 799struct sis_laptop {
800 u16 device;
801 u16 subvendor;
802 u16 subdevice;
803};
804
805static const struct sis_laptop sis_laptop[] = {
806 /* devid, subvendor, subdev */
807 { 0x5513, 0x1043, 0x1107 }, /* ASUS A6K */
808 /* end marker */
809 { 0, }
810};
811
812static u8 __devinit ata66_sis5513(ide_hwif_t *hwif)
800{ 813{
814 struct pci_dev *pdev = hwif->pci_dev;
815 const struct sis_laptop *lap = &sis_laptop[0];
801 u8 ata66 = 0; 816 u8 ata66 = 0;
802 817
818 while (lap->device) {
819 if (lap->device == pdev->device &&
820 lap->subvendor == pdev->subsystem_vendor &&
821 lap->subdevice == pdev->subsystem_device)
822 return ATA_CBL_PATA40_SHORT;
823 lap++;
824 }
825
803 if (chipset_family >= ATA_133) { 826 if (chipset_family >= ATA_133) {
804 u16 regw = 0; 827 u16 regw = 0;
805 u16 reg_addr = hwif->channel ? 0x52: 0x50; 828 u16 reg_addr = hwif->channel ? 0x52: 0x50;
@@ -811,7 +834,8 @@ static unsigned int __devinit ata66_sis5513 (ide_hwif_t *hwif)
811 pci_read_config_byte(hwif->pci_dev, 0x48, &reg48h); 834 pci_read_config_byte(hwif->pci_dev, 0x48, &reg48h);
812 ata66 = (reg48h & mask) ? 0 : 1; 835 ata66 = (reg48h & mask) ? 0 : 1;
813 } 836 }
814 return ata66; 837
838 return ata66 ? ATA_CBL_PATA80 : ATA_CBL_PATA40;
815} 839}
816 840
817static void __devinit init_hwif_sis5513 (ide_hwif_t *hwif) 841static void __devinit init_hwif_sis5513 (ide_hwif_t *hwif)
@@ -841,8 +865,8 @@ static void __devinit init_hwif_sis5513 (ide_hwif_t *hwif)
841 if (!chipset_family) 865 if (!chipset_family)
842 return; 866 return;
843 867
844 if (!(hwif->udma_four)) 868 if (hwif->cbl != ATA_CBL_PATA40_SHORT)
845 hwif->udma_four = ata66_sis5513(hwif); 869 hwif->cbl = ata66_sis5513(hwif);
846 870
847 if (chipset_family > ATA_16) { 871 if (chipset_family > ATA_16) {
848 hwif->ide_dma_check = &sis5513_config_xfer_rate; 872 hwif->ide_dma_check = &sis5513_config_xfer_rate;
diff --git a/drivers/ide/pci/sl82c105.c b/drivers/ide/pci/sl82c105.c
index 7c383d9cc472..487879842af4 100644
--- a/drivers/ide/pci/sl82c105.c
+++ b/drivers/ide/pci/sl82c105.c
@@ -195,7 +195,7 @@ static inline void sl82c105_reset_host(struct pci_dev *dev)
195 * This function is called when the IDE timer expires, the drive 195 * This function is called when the IDE timer expires, the drive
196 * indicates that it is READY, and we were waiting for DMA to complete. 196 * indicates that it is READY, and we were waiting for DMA to complete.
197 */ 197 */
198static int sl82c105_ide_dma_lostirq(ide_drive_t *drive) 198static void sl82c105_dma_lost_irq(ide_drive_t *drive)
199{ 199{
200 ide_hwif_t *hwif = HWIF(drive); 200 ide_hwif_t *hwif = HWIF(drive);
201 struct pci_dev *dev = hwif->pci_dev; 201 struct pci_dev *dev = hwif->pci_dev;
@@ -222,9 +222,6 @@ static int sl82c105_ide_dma_lostirq(ide_drive_t *drive)
222 } 222 }
223 223
224 sl82c105_reset_host(dev); 224 sl82c105_reset_host(dev);
225
226 /* __ide_dma_lostirq would return 1, so we do as well */
227 return 1;
228} 225}
229 226
230/* 227/*
@@ -244,15 +241,12 @@ static void sl82c105_dma_start(ide_drive_t *drive)
244 ide_dma_start(drive); 241 ide_dma_start(drive);
245} 242}
246 243
247static int sl82c105_ide_dma_timeout(ide_drive_t *drive) 244static void sl82c105_dma_timeout(ide_drive_t *drive)
248{ 245{
249 ide_hwif_t *hwif = HWIF(drive); 246 DBG(("sl82c105_dma_timeout(drive:%s)\n", drive->name));
250 struct pci_dev *dev = hwif->pci_dev;
251 247
252 DBG(("sl82c105_ide_dma_timeout(drive:%s)\n", drive->name)); 248 sl82c105_reset_host(HWIF(drive)->pci_dev);
253 249 ide_dma_timeout(drive);
254 sl82c105_reset_host(dev);
255 return __ide_dma_timeout(drive);
256} 250}
257 251
258static int sl82c105_ide_dma_on(ide_drive_t *drive) 252static int sl82c105_ide_dma_on(ide_drive_t *drive)
@@ -441,9 +435,9 @@ static void __devinit init_hwif_sl82c105(ide_hwif_t *hwif)
441 hwif->ide_dma_check = &sl82c105_ide_dma_check; 435 hwif->ide_dma_check = &sl82c105_ide_dma_check;
442 hwif->ide_dma_on = &sl82c105_ide_dma_on; 436 hwif->ide_dma_on = &sl82c105_ide_dma_on;
443 hwif->dma_off_quietly = &sl82c105_dma_off_quietly; 437 hwif->dma_off_quietly = &sl82c105_dma_off_quietly;
444 hwif->ide_dma_lostirq = &sl82c105_ide_dma_lostirq; 438 hwif->dma_lost_irq = &sl82c105_dma_lost_irq;
445 hwif->dma_start = &sl82c105_dma_start; 439 hwif->dma_start = &sl82c105_dma_start;
446 hwif->ide_dma_timeout = &sl82c105_ide_dma_timeout; 440 hwif->dma_timeout = &sl82c105_dma_timeout;
447 441
448 if (!noautodma) 442 if (!noautodma)
449 hwif->autodma = 1; 443 hwif->autodma = 1;
diff --git a/drivers/ide/pci/slc90e66.c b/drivers/ide/pci/slc90e66.c
index c40f291f91e0..575dbbd8b482 100644
--- a/drivers/ide/pci/slc90e66.c
+++ b/drivers/ide/pci/slc90e66.c
@@ -199,10 +199,9 @@ static void __devinit init_hwif_slc90e66 (ide_hwif_t *hwif)
199 hwif->mwdma_mask = 0x06; 199 hwif->mwdma_mask = 0x06;
200 hwif->swdma_mask = 0x04; 200 hwif->swdma_mask = 0x04;
201 201
202 if (!hwif->udma_four) { 202 if (hwif->cbl != ATA_CBL_PATA40_SHORT)
203 /* bit[0(1)]: 0:80, 1:40 */ 203 /* bit[0(1)]: 0:80, 1:40 */
204 hwif->udma_four = (reg47 & mask) ? 0 : 1; 204 hwif->cbl = (reg47 & mask) ? ATA_CBL_PATA40 : ATA_CBL_PATA80;
205 }
206 205
207 hwif->ide_dma_check = &slc90e66_config_drive_xfer_rate; 206 hwif->ide_dma_check = &slc90e66_config_drive_xfer_rate;
208 207
diff --git a/drivers/ide/pci/tc86c001.c b/drivers/ide/pci/tc86c001.c
index cee619bb2eaf..8de1f8e22494 100644
--- a/drivers/ide/pci/tc86c001.c
+++ b/drivers/ide/pci/tc86c001.c
@@ -220,13 +220,13 @@ static void __devinit init_hwif_tc86c001(ide_hwif_t *hwif)
220 hwif->ide_dma_check = &tc86c001_config_drive_xfer_rate; 220 hwif->ide_dma_check = &tc86c001_config_drive_xfer_rate;
221 hwif->dma_start = &tc86c001_dma_start; 221 hwif->dma_start = &tc86c001_dma_start;
222 222
223 if (!hwif->udma_four) { 223 if (hwif->cbl != ATA_CBL_PATA40_SHORT) {
224 /* 224 /*
225 * System Control 1 Register bit 13 (PDIAGN): 225 * System Control 1 Register bit 13 (PDIAGN):
226 * 0=80-pin cable, 1=40-pin cable 226 * 0=80-pin cable, 1=40-pin cable
227 */ 227 */
228 scr1 = hwif->INW(sc_base + 0x00); 228 scr1 = hwif->INW(sc_base + 0x00);
229 hwif->udma_four = (scr1 & 0x2000) ? 0 : 1; 229 hwif->cbl = (scr1 & 0x2000) ? ATA_CBL_PATA40 : ATA_CBL_PATA80;
230 } 230 }
231 231
232 if (!noautodma) 232 if (!noautodma)
diff --git a/drivers/ide/pci/via82cxxx.c b/drivers/ide/pci/via82cxxx.c
index a508550c4095..d21dd2e7eeb3 100644
--- a/drivers/ide/pci/via82cxxx.c
+++ b/drivers/ide/pci/via82cxxx.c
@@ -1,6 +1,6 @@
1/* 1/*
2 * 2 *
3 * Version 3.38 3 * Version 3.45
4 * 4 *
5 * VIA IDE driver for Linux. Supported southbridges: 5 * VIA IDE driver for Linux. Supported southbridges:
6 * 6 *
@@ -9,6 +9,7 @@
9 * vt8235, vt8237, vt8237a 9 * vt8235, vt8237, vt8237a
10 * 10 *
11 * Copyright (c) 2000-2002 Vojtech Pavlik 11 * Copyright (c) 2000-2002 Vojtech Pavlik
12 * Copyright (c) 2007 Bartlomiej Zolnierkiewicz
12 * 13 *
13 * Based on the work of: 14 * Based on the work of:
14 * Michel Aubry 15 * Michel Aubry
@@ -33,6 +34,8 @@
33#include <linux/pci.h> 34#include <linux/pci.h>
34#include <linux/init.h> 35#include <linux/init.h>
35#include <linux/ide.h> 36#include <linux/ide.h>
37#include <linux/dmi.h>
38
36#include <asm/io.h> 39#include <asm/io.h>
37 40
38#ifdef CONFIG_PPC_CHRP 41#ifdef CONFIG_PPC_CHRP
@@ -41,8 +44,6 @@
41 44
42#include "ide-timing.h" 45#include "ide-timing.h"
43 46
44#define DISPLAY_VIA_TIMINGS
45
46#define VIA_IDE_ENABLE 0x40 47#define VIA_IDE_ENABLE 0x40
47#define VIA_IDE_CONFIG 0x41 48#define VIA_IDE_CONFIG 0x41
48#define VIA_FIFO_CONFIG 0x43 49#define VIA_FIFO_CONFIG 0x43
@@ -54,18 +55,12 @@
54#define VIA_ADDRESS_SETUP 0x4c 55#define VIA_ADDRESS_SETUP 0x4c
55#define VIA_UDMA_TIMING 0x50 56#define VIA_UDMA_TIMING 0x50
56 57
57#define VIA_UDMA 0x007 58#define VIA_BAD_PREQ 0x01 /* Crashes if PREQ# till DDACK# set */
58#define VIA_UDMA_NONE 0x000 59#define VIA_BAD_CLK66 0x02 /* 66 MHz clock doesn't work correctly */
59#define VIA_UDMA_33 0x001 60#define VIA_SET_FIFO 0x04 /* Needs to have FIFO split set */
60#define VIA_UDMA_66 0x002 61#define VIA_NO_UNMASK 0x08 /* Doesn't work with IRQ unmasking on */
61#define VIA_UDMA_100 0x003 62#define VIA_BAD_ID 0x10 /* Has wrong vendor ID (0x1107) */
62#define VIA_UDMA_133 0x004 63#define VIA_BAD_AST 0x20 /* Don't touch Address Setup Timing */
63#define VIA_BAD_PREQ 0x010 /* Crashes if PREQ# till DDACK# set */
64#define VIA_BAD_CLK66 0x020 /* 66 MHz clock doesn't work correctly */
65#define VIA_SET_FIFO 0x040 /* Needs to have FIFO split set */
66#define VIA_NO_UNMASK 0x080 /* Doesn't work with IRQ unmasking on */
67#define VIA_BAD_ID 0x100 /* Has wrong vendor ID (0x1107) */
68#define VIA_BAD_AST 0x200 /* Don't touch Address Setup Timing */
69 64
70/* 65/*
71 * VIA SouthBridge chips. 66 * VIA SouthBridge chips.
@@ -76,36 +71,37 @@ static struct via_isa_bridge {
76 u16 id; 71 u16 id;
77 u8 rev_min; 72 u8 rev_min;
78 u8 rev_max; 73 u8 rev_max;
79 u16 flags; 74 u8 udma_mask;
75 u8 flags;
80} via_isa_bridges[] = { 76} via_isa_bridges[] = {
81 { "cx700", PCI_DEVICE_ID_VIA_CX700, 0x00, 0x2f, VIA_UDMA_133 | VIA_BAD_AST }, 77 { "cx700", PCI_DEVICE_ID_VIA_CX700, 0x00, 0x2f, ATA_UDMA6, VIA_BAD_AST },
82 { "vt8237s", PCI_DEVICE_ID_VIA_8237S, 0x00, 0x2f, VIA_UDMA_133 | VIA_BAD_AST }, 78 { "vt8237s", PCI_DEVICE_ID_VIA_8237S, 0x00, 0x2f, ATA_UDMA6, VIA_BAD_AST },
83 { "vt6410", PCI_DEVICE_ID_VIA_6410, 0x00, 0x2f, VIA_UDMA_133 | VIA_BAD_AST }, 79 { "vt6410", PCI_DEVICE_ID_VIA_6410, 0x00, 0x2f, ATA_UDMA6, VIA_BAD_AST },
84 { "vt8251", PCI_DEVICE_ID_VIA_8251, 0x00, 0x2f, VIA_UDMA_133 | VIA_BAD_AST }, 80 { "vt8251", PCI_DEVICE_ID_VIA_8251, 0x00, 0x2f, ATA_UDMA6, VIA_BAD_AST },
85 { "vt8237", PCI_DEVICE_ID_VIA_8237, 0x00, 0x2f, VIA_UDMA_133 | VIA_BAD_AST }, 81 { "vt8237", PCI_DEVICE_ID_VIA_8237, 0x00, 0x2f, ATA_UDMA6, VIA_BAD_AST },
86 { "vt8237a", PCI_DEVICE_ID_VIA_8237A, 0x00, 0x2f, VIA_UDMA_133 | VIA_BAD_AST }, 82 { "vt8237a", PCI_DEVICE_ID_VIA_8237A, 0x00, 0x2f, ATA_UDMA6, VIA_BAD_AST },
87 { "vt8235", PCI_DEVICE_ID_VIA_8235, 0x00, 0x2f, VIA_UDMA_133 | VIA_BAD_AST }, 83 { "vt8235", PCI_DEVICE_ID_VIA_8235, 0x00, 0x2f, ATA_UDMA6, VIA_BAD_AST },
88 { "vt8233a", PCI_DEVICE_ID_VIA_8233A, 0x00, 0x2f, VIA_UDMA_133 | VIA_BAD_AST }, 84 { "vt8233a", PCI_DEVICE_ID_VIA_8233A, 0x00, 0x2f, ATA_UDMA6, VIA_BAD_AST },
89 { "vt8233c", PCI_DEVICE_ID_VIA_8233C_0, 0x00, 0x2f, VIA_UDMA_100 }, 85 { "vt8233c", PCI_DEVICE_ID_VIA_8233C_0, 0x00, 0x2f, ATA_UDMA5, },
90 { "vt8233", PCI_DEVICE_ID_VIA_8233_0, 0x00, 0x2f, VIA_UDMA_100 }, 86 { "vt8233", PCI_DEVICE_ID_VIA_8233_0, 0x00, 0x2f, ATA_UDMA5, },
91 { "vt8231", PCI_DEVICE_ID_VIA_8231, 0x00, 0x2f, VIA_UDMA_100 }, 87 { "vt8231", PCI_DEVICE_ID_VIA_8231, 0x00, 0x2f, ATA_UDMA5, },
92 { "vt82c686b", PCI_DEVICE_ID_VIA_82C686, 0x40, 0x4f, VIA_UDMA_100 }, 88 { "vt82c686b", PCI_DEVICE_ID_VIA_82C686, 0x40, 0x4f, ATA_UDMA5, },
93 { "vt82c686a", PCI_DEVICE_ID_VIA_82C686, 0x10, 0x2f, VIA_UDMA_66 }, 89 { "vt82c686a", PCI_DEVICE_ID_VIA_82C686, 0x10, 0x2f, ATA_UDMA4, },
94 { "vt82c686", PCI_DEVICE_ID_VIA_82C686, 0x00, 0x0f, VIA_UDMA_33 | VIA_BAD_CLK66 }, 90 { "vt82c686", PCI_DEVICE_ID_VIA_82C686, 0x00, 0x0f, ATA_UDMA2, VIA_BAD_CLK66 },
95 { "vt82c596b", PCI_DEVICE_ID_VIA_82C596, 0x10, 0x2f, VIA_UDMA_66 }, 91 { "vt82c596b", PCI_DEVICE_ID_VIA_82C596, 0x10, 0x2f, ATA_UDMA4, },
96 { "vt82c596a", PCI_DEVICE_ID_VIA_82C596, 0x00, 0x0f, VIA_UDMA_33 | VIA_BAD_CLK66 }, 92 { "vt82c596a", PCI_DEVICE_ID_VIA_82C596, 0x00, 0x0f, ATA_UDMA2, VIA_BAD_CLK66 },
97 { "vt82c586b", PCI_DEVICE_ID_VIA_82C586_0, 0x47, 0x4f, VIA_UDMA_33 | VIA_SET_FIFO }, 93 { "vt82c586b", PCI_DEVICE_ID_VIA_82C586_0, 0x47, 0x4f, ATA_UDMA2, VIA_SET_FIFO },
98 { "vt82c586b", PCI_DEVICE_ID_VIA_82C586_0, 0x40, 0x46, VIA_UDMA_33 | VIA_SET_FIFO | VIA_BAD_PREQ }, 94 { "vt82c586b", PCI_DEVICE_ID_VIA_82C586_0, 0x40, 0x46, ATA_UDMA2, VIA_SET_FIFO | VIA_BAD_PREQ },
99 { "vt82c586b", PCI_DEVICE_ID_VIA_82C586_0, 0x30, 0x3f, VIA_UDMA_33 | VIA_SET_FIFO }, 95 { "vt82c586b", PCI_DEVICE_ID_VIA_82C586_0, 0x30, 0x3f, ATA_UDMA2, VIA_SET_FIFO },
100 { "vt82c586a", PCI_DEVICE_ID_VIA_82C586_0, 0x20, 0x2f, VIA_UDMA_33 | VIA_SET_FIFO }, 96 { "vt82c586a", PCI_DEVICE_ID_VIA_82C586_0, 0x20, 0x2f, ATA_UDMA2, VIA_SET_FIFO },
101 { "vt82c586", PCI_DEVICE_ID_VIA_82C586_0, 0x00, 0x0f, VIA_UDMA_NONE | VIA_SET_FIFO }, 97 { "vt82c586", PCI_DEVICE_ID_VIA_82C586_0, 0x00, 0x0f, 0x00, VIA_SET_FIFO },
102 { "vt82c576", PCI_DEVICE_ID_VIA_82C576, 0x00, 0x2f, VIA_UDMA_NONE | VIA_SET_FIFO | VIA_NO_UNMASK }, 98 { "vt82c576", PCI_DEVICE_ID_VIA_82C576, 0x00, 0x2f, 0x00, VIA_SET_FIFO | VIA_NO_UNMASK },
103 { "vt82c576", PCI_DEVICE_ID_VIA_82C576, 0x00, 0x2f, VIA_UDMA_NONE | VIA_SET_FIFO | VIA_NO_UNMASK | VIA_BAD_ID }, 99 { "vt82c576", PCI_DEVICE_ID_VIA_82C576, 0x00, 0x2f, 0x00, VIA_SET_FIFO | VIA_NO_UNMASK | VIA_BAD_ID },
104 { NULL } 100 { NULL }
105}; 101};
106 102
107static unsigned int via_clock; 103static unsigned int via_clock;
108static char *via_dma[] = { "MWDMA16", "UDMA33", "UDMA66", "UDMA100", "UDMA133" }; 104static char *via_dma[] = { "16", "25", "33", "44", "66", "100", "133" };
109 105
110struct via82cxxx_dev 106struct via82cxxx_dev
111{ 107{
@@ -140,12 +136,12 @@ static void via_set_speed(ide_hwif_t *hwif, u8 dn, struct ide_timing *timing)
140 pci_write_config_byte(dev, VIA_DRIVE_TIMING + (3 - dn), 136 pci_write_config_byte(dev, VIA_DRIVE_TIMING + (3 - dn),
141 ((FIT(timing->active, 1, 16) - 1) << 4) | (FIT(timing->recover, 1, 16) - 1)); 137 ((FIT(timing->active, 1, 16) - 1) << 4) | (FIT(timing->recover, 1, 16) - 1));
142 138
143 switch (vdev->via_config->flags & VIA_UDMA) { 139 switch (vdev->via_config->udma_mask) {
144 case VIA_UDMA_33: t = timing->udma ? (0xe0 | (FIT(timing->udma, 2, 5) - 2)) : 0x03; break; 140 case ATA_UDMA2: t = timing->udma ? (0xe0 | (FIT(timing->udma, 2, 5) - 2)) : 0x03; break;
145 case VIA_UDMA_66: t = timing->udma ? (0xe8 | (FIT(timing->udma, 2, 9) - 2)) : 0x0f; break; 141 case ATA_UDMA4: t = timing->udma ? (0xe8 | (FIT(timing->udma, 2, 9) - 2)) : 0x0f; break;
146 case VIA_UDMA_100: t = timing->udma ? (0xe0 | (FIT(timing->udma, 2, 9) - 2)) : 0x07; break; 142 case ATA_UDMA5: t = timing->udma ? (0xe0 | (FIT(timing->udma, 2, 9) - 2)) : 0x07; break;
147 case VIA_UDMA_133: t = timing->udma ? (0xe0 | (FIT(timing->udma, 2, 9) - 2)) : 0x07; break; 143 case ATA_UDMA6: t = timing->udma ? (0xe0 | (FIT(timing->udma, 2, 9) - 2)) : 0x07; break;
148 default: return; 144 default: return;
149 } 145 }
150 146
151 pci_write_config_byte(dev, VIA_UDMA_TIMING + (3 - dn), t); 147 pci_write_config_byte(dev, VIA_UDMA_TIMING + (3 - dn), t);
@@ -173,12 +169,12 @@ static int via_set_drive(ide_drive_t *drive, u8 speed)
173 169
174 T = 1000000000 / via_clock; 170 T = 1000000000 / via_clock;
175 171
176 switch (vdev->via_config->flags & VIA_UDMA) { 172 switch (vdev->via_config->udma_mask) {
177 case VIA_UDMA_33: UT = T; break; 173 case ATA_UDMA2: UT = T; break;
178 case VIA_UDMA_66: UT = T/2; break; 174 case ATA_UDMA4: UT = T/2; break;
179 case VIA_UDMA_100: UT = T/3; break; 175 case ATA_UDMA5: UT = T/3; break;
180 case VIA_UDMA_133: UT = T/4; break; 176 case ATA_UDMA6: UT = T/4; break;
181 default: UT = T; 177 default: UT = T;
182 } 178 }
183 179
184 ide_timing_compute(drive, speed, &t, T, UT); 180 ide_timing_compute(drive, speed, &t, T, UT);
@@ -208,8 +204,7 @@ static int via_set_drive(ide_drive_t *drive, u8 speed)
208static void via82cxxx_tune_drive(ide_drive_t *drive, u8 pio) 204static void via82cxxx_tune_drive(ide_drive_t *drive, u8 pio)
209{ 205{
210 if (pio == 255) { 206 if (pio == 255) {
211 via_set_drive(drive, 207 via_set_drive(drive, ide_find_best_pio_mode(drive));
212 ide_find_best_mode(drive, XFER_PIO | XFER_EPIO));
213 return; 208 return;
214 } 209 }
215 210
@@ -226,16 +221,10 @@ static void via82cxxx_tune_drive(ide_drive_t *drive, u8 pio)
226 221
227static int via82cxxx_ide_dma_check (ide_drive_t *drive) 222static int via82cxxx_ide_dma_check (ide_drive_t *drive)
228{ 223{
229 ide_hwif_t *hwif = HWIF(drive); 224 u8 speed = ide_max_dma_mode(drive);
230 struct via82cxxx_dev *vdev = pci_get_drvdata(hwif->pci_dev);
231 u16 w80 = hwif->udma_four;
232 225
233 u16 speed = ide_find_best_mode(drive, 226 if (speed == 0)
234 XFER_PIO | XFER_EPIO | XFER_SWDMA | XFER_MWDMA | 227 speed = ide_find_best_pio_mode(drive);
235 (vdev->via_config->flags & VIA_UDMA ? XFER_UDMA : 0) |
236 (w80 && (vdev->via_config->flags & VIA_UDMA) >= VIA_UDMA_66 ? XFER_UDMA_66 : 0) |
237 (w80 && (vdev->via_config->flags & VIA_UDMA) >= VIA_UDMA_100 ? XFER_UDMA_100 : 0) |
238 (w80 && (vdev->via_config->flags & VIA_UDMA) >= VIA_UDMA_133 ? XFER_UDMA_133 : 0));
239 228
240 via_set_drive(drive, speed); 229 via_set_drive(drive, speed);
241 230
@@ -272,8 +261,8 @@ static void __devinit via_cable_detect(struct via82cxxx_dev *vdev, u32 u)
272{ 261{
273 int i; 262 int i;
274 263
275 switch (vdev->via_config->flags & VIA_UDMA) { 264 switch (vdev->via_config->udma_mask) {
276 case VIA_UDMA_66: 265 case ATA_UDMA4:
277 for (i = 24; i >= 0; i -= 8) 266 for (i = 24; i >= 0; i -= 8)
278 if (((u >> (i & 16)) & 8) && 267 if (((u >> (i & 16)) & 8) &&
279 ((u >> i) & 0x20) && 268 ((u >> i) & 0x20) &&
@@ -286,7 +275,7 @@ static void __devinit via_cable_detect(struct via82cxxx_dev *vdev, u32 u)
286 } 275 }
287 break; 276 break;
288 277
289 case VIA_UDMA_100: 278 case ATA_UDMA5:
290 for (i = 24; i >= 0; i -= 8) 279 for (i = 24; i >= 0; i -= 8)
291 if (((u >> i) & 0x10) || 280 if (((u >> i) & 0x10) ||
292 (((u >> i) & 0x20) && 281 (((u >> i) & 0x20) &&
@@ -298,7 +287,7 @@ static void __devinit via_cable_detect(struct via82cxxx_dev *vdev, u32 u)
298 } 287 }
299 break; 288 break;
300 289
301 case VIA_UDMA_133: 290 case ATA_UDMA6:
302 for (i = 24; i >= 0; i -= 8) 291 for (i = 24; i >= 0; i -= 8)
303 if (((u >> i) & 0x10) || 292 if (((u >> i) & 0x10) ||
304 (((u >> i) & 0x20) && 293 (((u >> i) & 0x20) &&
@@ -353,7 +342,7 @@ static unsigned int __devinit init_chipset_via82cxxx(struct pci_dev *dev, const
353 342
354 via_cable_detect(vdev, u); 343 via_cable_detect(vdev, u);
355 344
356 if ((via_config->flags & VIA_UDMA) == VIA_UDMA_66) { 345 if (via_config->udma_mask == ATA_UDMA4) {
357 /* Enable Clk66 */ 346 /* Enable Clk66 */
358 pci_write_config_dword(dev, VIA_UDMA_TIMING, u|0x80008); 347 pci_write_config_dword(dev, VIA_UDMA_TIMING, u|0x80008);
359 } else if (via_config->flags & VIA_BAD_CLK66) { 348 } else if (via_config->flags & VIA_BAD_CLK66) {
@@ -416,16 +405,54 @@ static unsigned int __devinit init_chipset_via82cxxx(struct pci_dev *dev, const
416 */ 405 */
417 406
418 pci_read_config_byte(isa, PCI_REVISION_ID, &t); 407 pci_read_config_byte(isa, PCI_REVISION_ID, &t);
419 printk(KERN_INFO "VP_IDE: VIA %s (rev %02x) IDE %s " 408 printk(KERN_INFO "VP_IDE: VIA %s (rev %02x) IDE %sDMA%s "
420 "controller on pci%s\n", 409 "controller on pci%s\n",
421 via_config->name, t, 410 via_config->name, t,
422 via_dma[via_config->flags & VIA_UDMA], 411 via_config->udma_mask ? "U" : "MW",
412 via_dma[via_config->udma_mask ?
413 (fls(via_config->udma_mask) - 1) : 0],
423 pci_name(dev)); 414 pci_name(dev));
424 415
425 pci_dev_put(isa); 416 pci_dev_put(isa);
426 return 0; 417 return 0;
427} 418}
428 419
420/*
421 * Cable special cases
422 */
423
424static struct dmi_system_id cable_dmi_table[] = {
425 {
426 .ident = "Acer Ferrari 3400",
427 .matches = {
428 DMI_MATCH(DMI_BOARD_VENDOR, "Acer,Inc."),
429 DMI_MATCH(DMI_BOARD_NAME, "Ferrari 3400"),
430 },
431 },
432 { }
433};
434
435static int via_cable_override(void)
436{
437 /* Systems by DMI */
438 if (dmi_check_system(cable_dmi_table))
439 return 1;
440 return 0;
441}
442
443static u8 __devinit via82cxxx_cable_detect(ide_hwif_t *hwif)
444{
445 struct via82cxxx_dev *vdev = pci_get_drvdata(hwif->pci_dev);
446
447 if (via_cable_override())
448 return ATA_CBL_PATA40_SHORT;
449
450 if ((vdev->via_80w >> hwif->channel) & 1)
451 return ATA_CBL_PATA80;
452 else
453 return ATA_CBL_PATA40;
454}
455
429static void __devinit init_hwif_via82cxxx(ide_hwif_t *hwif) 456static void __devinit init_hwif_via82cxxx(ide_hwif_t *hwif)
430{ 457{
431 struct via82cxxx_dev *vdev = pci_get_drvdata(hwif->pci_dev); 458 struct via82cxxx_dev *vdev = pci_get_drvdata(hwif->pci_dev);
@@ -454,12 +481,14 @@ static void __devinit init_hwif_via82cxxx(ide_hwif_t *hwif)
454 return; 481 return;
455 482
456 hwif->atapi_dma = 1; 483 hwif->atapi_dma = 1;
457 hwif->ultra_mask = 0x7f; 484
485 hwif->ultra_mask = vdev->via_config->udma_mask;
458 hwif->mwdma_mask = 0x07; 486 hwif->mwdma_mask = 0x07;
459 hwif->swdma_mask = 0x07; 487 hwif->swdma_mask = 0x07;
460 488
461 if (!hwif->udma_four) 489 if (hwif->cbl != ATA_CBL_PATA40_SHORT)
462 hwif->udma_four = (vdev->via_80w >> hwif->channel) & 1; 490 hwif->cbl = via82cxxx_cable_detect(hwif);
491
463 hwif->ide_dma_check = &via82cxxx_ide_dma_check; 492 hwif->ide_dma_check = &via82cxxx_ide_dma_check;
464 if (!noautodma) 493 if (!noautodma)
465 hwif->autodma = 1; 494 hwif->autodma = 1;
diff --git a/drivers/ide/ppc/pmac.c b/drivers/ide/ppc/pmac.c
index 45fc36f0f219..e46f47206542 100644
--- a/drivers/ide/ppc/pmac.c
+++ b/drivers/ide/ppc/pmac.c
@@ -942,8 +942,8 @@ pmac_ide_tune_chipset (ide_drive_t *drive, byte speed)
942 return 1; 942 return 1;
943 case XFER_UDMA_4: 943 case XFER_UDMA_4:
944 case XFER_UDMA_3: 944 case XFER_UDMA_3:
945 if (HWIF(drive)->udma_four == 0) 945 if (drive->hwif->cbl != ATA_CBL_PATA80)
946 return 1; 946 return 1;
947 case XFER_UDMA_2: 947 case XFER_UDMA_2:
948 case XFER_UDMA_1: 948 case XFER_UDMA_1:
949 case XFER_UDMA_0: 949 case XFER_UDMA_0:
@@ -1244,7 +1244,7 @@ pmac_ide_setup_device(pmac_ide_hwif_t *pmif, ide_hwif_t *hwif)
1244 hwif->chipset = ide_pmac; 1244 hwif->chipset = ide_pmac;
1245 hwif->noprobe = !hwif->io_ports[IDE_DATA_OFFSET] || pmif->mediabay; 1245 hwif->noprobe = !hwif->io_ports[IDE_DATA_OFFSET] || pmif->mediabay;
1246 hwif->hold = pmif->mediabay; 1246 hwif->hold = pmif->mediabay;
1247 hwif->udma_four = pmif->cable_80; 1247 hwif->cbl = pmif->cable_80 ? ATA_CBL_PATA80 : ATA_CBL_PATA40;
1248 hwif->drives[0].unmask = 1; 1248 hwif->drives[0].unmask = 1;
1249 hwif->drives[1].unmask = 1; 1249 hwif->drives[1].unmask = 1;
1250 hwif->tuneproc = pmac_ide_tuneproc; 1250 hwif->tuneproc = pmac_ide_tuneproc;
@@ -1821,28 +1821,11 @@ pmac_ide_dma_check(ide_drive_t *drive)
1821 enable = 0; 1821 enable = 0;
1822 1822
1823 if (enable) { 1823 if (enable) {
1824 short mode; 1824 u8 mode = ide_max_dma_mode(drive);
1825 1825
1826 map = XFER_MWDMA; 1826 if (mode >= XFER_UDMA_0)
1827 if (pmif->kind == controller_kl_ata4
1828 || pmif->kind == controller_un_ata6
1829 || pmif->kind == controller_k2_ata6
1830 || pmif->kind == controller_sh_ata6) {
1831 map |= XFER_UDMA;
1832 if (pmif->cable_80) {
1833 map |= XFER_UDMA_66;
1834 if (pmif->kind == controller_un_ata6 ||
1835 pmif->kind == controller_k2_ata6 ||
1836 pmif->kind == controller_sh_ata6)
1837 map |= XFER_UDMA_100;
1838 if (pmif->kind == controller_sh_ata6)
1839 map |= XFER_UDMA_133;
1840 }
1841 }
1842 mode = ide_find_best_mode(drive, map);
1843 if (mode & XFER_UDMA)
1844 drive->using_dma = pmac_ide_udma_enable(drive, mode); 1827 drive->using_dma = pmac_ide_udma_enable(drive, mode);
1845 else if (mode & XFER_MWDMA) 1828 else if (mode >= XFER_MW_DMA_0)
1846 drive->using_dma = pmac_ide_mdma_enable(drive, mode); 1829 drive->using_dma = pmac_ide_mdma_enable(drive, mode);
1847 hwif->OUTB(0, IDE_CONTROL_REG); 1830 hwif->OUTB(0, IDE_CONTROL_REG);
1848 /* Apply settings to controller */ 1831 /* Apply settings to controller */
@@ -2004,20 +1987,19 @@ static void pmac_ide_dma_host_on(ide_drive_t *drive)
2004{ 1987{
2005} 1988}
2006 1989
2007static int 1990static void
2008pmac_ide_dma_lostirq (ide_drive_t *drive) 1991pmac_ide_dma_lost_irq (ide_drive_t *drive)
2009{ 1992{
2010 pmac_ide_hwif_t* pmif = (pmac_ide_hwif_t *)HWIF(drive)->hwif_data; 1993 pmac_ide_hwif_t* pmif = (pmac_ide_hwif_t *)HWIF(drive)->hwif_data;
2011 volatile struct dbdma_regs __iomem *dma; 1994 volatile struct dbdma_regs __iomem *dma;
2012 unsigned long status; 1995 unsigned long status;
2013 1996
2014 if (pmif == NULL) 1997 if (pmif == NULL)
2015 return 0; 1998 return;
2016 dma = pmif->dma_regs; 1999 dma = pmif->dma_regs;
2017 2000
2018 status = readl(&dma->status); 2001 status = readl(&dma->status);
2019 printk(KERN_ERR "ide-pmac lost interrupt, dma status: %lx\n", status); 2002 printk(KERN_ERR "ide-pmac lost interrupt, dma status: %lx\n", status);
2020 return 0;
2021} 2003}
2022 2004
2023/* 2005/*
@@ -2057,8 +2039,8 @@ pmac_ide_setup_dma(pmac_ide_hwif_t *pmif, ide_hwif_t *hwif)
2057 hwif->ide_dma_test_irq = &pmac_ide_dma_test_irq; 2039 hwif->ide_dma_test_irq = &pmac_ide_dma_test_irq;
2058 hwif->dma_host_off = &pmac_ide_dma_host_off; 2040 hwif->dma_host_off = &pmac_ide_dma_host_off;
2059 hwif->dma_host_on = &pmac_ide_dma_host_on; 2041 hwif->dma_host_on = &pmac_ide_dma_host_on;
2060 hwif->ide_dma_timeout = &__ide_dma_timeout; 2042 hwif->dma_timeout = &ide_dma_timeout;
2061 hwif->ide_dma_lostirq = &pmac_ide_dma_lostirq; 2043 hwif->dma_lost_irq = &pmac_ide_dma_lost_irq;
2062 2044
2063 hwif->atapi_dma = 1; 2045 hwif->atapi_dma = 1;
2064 switch(pmif->kind) { 2046 switch(pmif->kind) {
diff --git a/drivers/misc/Kconfig b/drivers/misc/Kconfig
index 616eee9c04f1..bd601efa7bd1 100644
--- a/drivers/misc/Kconfig
+++ b/drivers/misc/Kconfig
@@ -34,6 +34,11 @@ config PHANTOM
34 If you choose to build module, its name will be phantom. If unsure, 34 If you choose to build module, its name will be phantom. If unsure,
35 say N here. 35 say N here.
36 36
37config EEPROM_93CX6
38 tristate "EEPROM 93CX6 support"
39 ---help---
40 This is a driver for the EEPROM chipsets 93c46 and 93c66.
41 The driver supports both read as well as write commands.
37 42
38 If unsure, say N. 43 If unsure, say N.
39 44
@@ -187,5 +192,4 @@ config THINKPAD_ACPI_BAY
187 192
188 If you are not sure, say Y here. 193 If you are not sure, say Y here.
189 194
190
191endmenu 195endmenu
diff --git a/drivers/misc/Makefile b/drivers/misc/Makefile
index 8abbf2f07a65..b5ce0e3dba86 100644
--- a/drivers/misc/Makefile
+++ b/drivers/misc/Makefile
@@ -14,3 +14,4 @@ obj-$(CONFIG_PHANTOM) += phantom.o
14obj-$(CONFIG_SGI_IOC4) += ioc4.o 14obj-$(CONFIG_SGI_IOC4) += ioc4.o
15obj-$(CONFIG_SONY_LAPTOP) += sony-laptop.o 15obj-$(CONFIG_SONY_LAPTOP) += sony-laptop.o
16obj-$(CONFIG_THINKPAD_ACPI) += thinkpad_acpi.o 16obj-$(CONFIG_THINKPAD_ACPI) += thinkpad_acpi.o
17obj-$(CONFIG_EEPROM_93CX6) += eeprom_93cx6.o
diff --git a/drivers/misc/eeprom_93cx6.c b/drivers/misc/eeprom_93cx6.c
new file mode 100644
index 000000000000..ac515b0ef67c
--- /dev/null
+++ b/drivers/misc/eeprom_93cx6.c
@@ -0,0 +1,241 @@
1/*
2 Copyright (C) 2004 - 2006 rt2x00 SourceForge Project
3 <http://rt2x00.serialmonkey.com>
4
5 This program is free software; you can redistribute it and/or modify
6 it under the terms of the GNU General Public License as published by
7 the Free Software Foundation; either version 2 of the License, or
8 (at your option) any later version.
9
10 This program is distributed in the hope that it will be useful,
11 but WITHOUT ANY WARRANTY; without even the implied warranty of
12 MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE. See the
13 GNU General Public License for more details.
14
15 You should have received a copy of the GNU General Public License
16 along with this program; if not, write to the
17 Free Software Foundation, Inc.,
18 59 Temple Place - Suite 330, Boston, MA 02111-1307, USA.
19 */
20
21/*
22 Module: eeprom_93cx6
23 Abstract: EEPROM reader routines for 93cx6 chipsets.
24 Supported chipsets: 93c46 & 93c66.
25 */
26
27#include <linux/kernel.h>
28#include <linux/module.h>
29#include <linux/version.h>
30#include <linux/delay.h>
31#include <linux/eeprom_93cx6.h>
32
33MODULE_AUTHOR("http://rt2x00.serialmonkey.com");
34MODULE_VERSION("1.0");
35MODULE_DESCRIPTION("EEPROM 93cx6 chip driver");
36MODULE_LICENSE("GPL");
37
38static inline void eeprom_93cx6_pulse_high(struct eeprom_93cx6 *eeprom)
39{
40 eeprom->reg_data_clock = 1;
41 eeprom->register_write(eeprom);
42
43 /*
44 * Add a short delay for the pulse to work.
45 * According to the specifications the "maximum minimum"
46 * time should be 450ns.
47 */
48 ndelay(450);
49}
50
51static inline void eeprom_93cx6_pulse_low(struct eeprom_93cx6 *eeprom)
52{
53 eeprom->reg_data_clock = 0;
54 eeprom->register_write(eeprom);
55
56 /*
57 * Add a short delay for the pulse to work.
58 * According to the specifications the minimal time
59 * should be 450ns so a 1us delay is sufficient.
60 */
61 udelay(1);
62}
63
64static void eeprom_93cx6_startup(struct eeprom_93cx6 *eeprom)
65{
66 /*
67 * Clear all flags, and enable chip select.
68 */
69 eeprom->register_read(eeprom);
70 eeprom->reg_data_in = 0;
71 eeprom->reg_data_out = 0;
72 eeprom->reg_data_clock = 0;
73 eeprom->reg_chip_select = 1;
74 eeprom->register_write(eeprom);
75
76 /*
77 * kick a pulse.
78 */
79 eeprom_93cx6_pulse_high(eeprom);
80 eeprom_93cx6_pulse_low(eeprom);
81}
82
83static void eeprom_93cx6_cleanup(struct eeprom_93cx6 *eeprom)
84{
85 /*
86 * Clear chip_select and data_in flags.
87 */
88 eeprom->register_read(eeprom);
89 eeprom->reg_data_in = 0;
90 eeprom->reg_chip_select = 0;
91 eeprom->register_write(eeprom);
92
93 /*
94 * kick a pulse.
95 */
96 eeprom_93cx6_pulse_high(eeprom);
97 eeprom_93cx6_pulse_low(eeprom);
98}
99
100static void eeprom_93cx6_write_bits(struct eeprom_93cx6 *eeprom,
101 const u16 data, const u16 count)
102{
103 unsigned int i;
104
105 eeprom->register_read(eeprom);
106
107 /*
108 * Clear data flags.
109 */
110 eeprom->reg_data_in = 0;
111 eeprom->reg_data_out = 0;
112
113 /*
114 * Start writing all bits.
115 */
116 for (i = count; i > 0; i--) {
117 /*
118 * Check if this bit needs to be set.
119 */
120 eeprom->reg_data_in = !!(data & (1 << (i - 1)));
121
122 /*
123 * Write the bit to the eeprom register.
124 */
125 eeprom->register_write(eeprom);
126
127 /*
128 * Kick a pulse.
129 */
130 eeprom_93cx6_pulse_high(eeprom);
131 eeprom_93cx6_pulse_low(eeprom);
132 }
133
134 eeprom->reg_data_in = 0;
135 eeprom->register_write(eeprom);
136}
137
138static void eeprom_93cx6_read_bits(struct eeprom_93cx6 *eeprom,
139 u16 *data, const u16 count)
140{
141 unsigned int i;
142 u16 buf = 0;
143
144 eeprom->register_read(eeprom);
145
146 /*
147 * Clear data flags.
148 */
149 eeprom->reg_data_in = 0;
150 eeprom->reg_data_out = 0;
151
152 /*
153 * Start reading all bits.
154 */
155 for (i = count; i > 0; i--) {
156 eeprom_93cx6_pulse_high(eeprom);
157
158 eeprom->register_read(eeprom);
159
160 /*
161 * Clear data_in flag.
162 */
163 eeprom->reg_data_in = 0;
164
165 /*
166 * Read if the bit has been set.
167 */
168 if (eeprom->reg_data_out)
169 buf |= (1 << (i - 1));
170
171 eeprom_93cx6_pulse_low(eeprom);
172 }
173
174 *data = buf;
175}
176
177/**
178 * eeprom_93cx6_read - Read multiple words from eeprom
179 * @eeprom: Pointer to eeprom structure
180 * @word: Word index from where we should start reading
181 * @data: target pointer where the information will have to be stored
182 *
183 * This function will read the eeprom data as host-endian word
184 * into the given data pointer.
185 */
186void eeprom_93cx6_read(struct eeprom_93cx6 *eeprom, const u8 word,
187 u16 *data)
188{
189 u16 command;
190
191 /*
192 * Initialize the eeprom register
193 */
194 eeprom_93cx6_startup(eeprom);
195
196 /*
197 * Select the read opcode and the word to be read.
198 */
199 command = (PCI_EEPROM_READ_OPCODE << eeprom->width) | word;
200 eeprom_93cx6_write_bits(eeprom, command,
201 PCI_EEPROM_WIDTH_OPCODE + eeprom->width);
202
203 /*
204 * Read the requested 16 bits.
205 */
206 eeprom_93cx6_read_bits(eeprom, data, 16);
207
208 /*
209 * Cleanup eeprom register.
210 */
211 eeprom_93cx6_cleanup(eeprom);
212}
213EXPORT_SYMBOL_GPL(eeprom_93cx6_read);
214
215/**
216 * eeprom_93cx6_multiread - Read multiple words from eeprom
217 * @eeprom: Pointer to eeprom structure
218 * @word: Word index from where we should start reading
219 * @data: target pointer where the information will have to be stored
220 * @words: Number of words that should be read.
221 *
222 * This function will read all requested words from the eeprom,
223 * this is done by calling eeprom_93cx6_read() multiple times.
224 * But with the additional change that while the eeprom_93cx6_read
225 * will return host ordered bytes, this method will return little
226 * endian words.
227 */
228void eeprom_93cx6_multiread(struct eeprom_93cx6 *eeprom, const u8 word,
229 __le16 *data, const u16 words)
230{
231 unsigned int i;
232 u16 tmp;
233
234 for (i = 0; i < words; i++) {
235 tmp = 0;
236 eeprom_93cx6_read(eeprom, word + i, &tmp);
237 data[i] = cpu_to_le16(tmp);
238 }
239}
240EXPORT_SYMBOL_GPL(eeprom_93cx6_multiread);
241
diff --git a/drivers/net/8139cp.c b/drivers/net/8139cp.c
index a804965e6542..58bbc3e6d0de 100644
--- a/drivers/net/8139cp.c
+++ b/drivers/net/8139cp.c
@@ -107,11 +107,6 @@ MODULE_PARM_DESC (multicast_filter_limit, "8139cp: maximum number of filtered mu
107 107
108#define PFX DRV_NAME ": " 108#define PFX DRV_NAME ": "
109 109
110#ifndef TRUE
111#define FALSE 0
112#define TRUE (!FALSE)
113#endif
114
115#define CP_DEF_MSG_ENABLE (NETIF_MSG_DRV | \ 110#define CP_DEF_MSG_ENABLE (NETIF_MSG_DRV | \
116 NETIF_MSG_PROBE | \ 111 NETIF_MSG_PROBE | \
117 NETIF_MSG_LINK) 112 NETIF_MSG_LINK)
@@ -661,7 +656,7 @@ static irqreturn_t cp_interrupt (int irq, void *dev_instance)
661 if (status & (TxOK | TxErr | TxEmpty | SWInt)) 656 if (status & (TxOK | TxErr | TxEmpty | SWInt))
662 cp_tx(cp); 657 cp_tx(cp);
663 if (status & LinkChg) 658 if (status & LinkChg)
664 mii_check_media(&cp->mii_if, netif_msg_link(cp), FALSE); 659 mii_check_media(&cp->mii_if, netif_msg_link(cp), false);
665 660
666 spin_unlock(&cp->lock); 661 spin_unlock(&cp->lock);
667 662
@@ -1188,7 +1183,7 @@ static int cp_open (struct net_device *dev)
1188 goto err_out_hw; 1183 goto err_out_hw;
1189 1184
1190 netif_carrier_off(dev); 1185 netif_carrier_off(dev);
1191 mii_check_media(&cp->mii_if, netif_msg_link(cp), TRUE); 1186 mii_check_media(&cp->mii_if, netif_msg_link(cp), true);
1192 netif_start_queue(dev); 1187 netif_start_queue(dev);
1193 1188
1194 return 0; 1189 return 0;
@@ -2050,7 +2045,7 @@ static int cp_resume (struct pci_dev *pdev)
2050 2045
2051 spin_lock_irqsave (&cp->lock, flags); 2046 spin_lock_irqsave (&cp->lock, flags);
2052 2047
2053 mii_check_media(&cp->mii_if, netif_msg_link(cp), FALSE); 2048 mii_check_media(&cp->mii_if, netif_msg_link(cp), false);
2054 2049
2055 spin_unlock_irqrestore (&cp->lock, flags); 2050 spin_unlock_irqrestore (&cp->lock, flags);
2056 2051
diff --git a/drivers/net/Kconfig b/drivers/net/Kconfig
index b49375abb5f4..5cc3d517e39b 100644
--- a/drivers/net/Kconfig
+++ b/drivers/net/Kconfig
@@ -3,10 +3,7 @@
3# Network device configuration 3# Network device configuration
4# 4#
5 5
6menu "Network device support" 6menuconfig NETDEVICES
7 depends on NET
8
9config NETDEVICES
10 default y if UML 7 default y if UML
11 bool "Network device support" 8 bool "Network device support"
12 ---help--- 9 ---help---
@@ -151,11 +148,9 @@ source "drivers/net/phy/Kconfig"
151# Ethernet 148# Ethernet
152# 149#
153 150
154menu "Ethernet (10 or 100Mbit)" 151menuconfig NET_ETHERNET
155 depends on !UML
156
157config NET_ETHERNET
158 bool "Ethernet (10 or 100Mbit)" 152 bool "Ethernet (10 or 100Mbit)"
153 depends on !UML
159 ---help--- 154 ---help---
160 Ethernet (also called IEEE 802.3 or ISO 8802-2) is the most common 155 Ethernet (also called IEEE 802.3 or ISO 8802-2) is the most common
161 type of Local Area Network (LAN) in universities and companies. 156 type of Local Area Network (LAN) in universities and companies.
@@ -180,9 +175,10 @@ config NET_ETHERNET
180 kernel: saying N will just cause the configurator to skip all 175 kernel: saying N will just cause the configurator to skip all
181 the questions about Ethernet network cards. If unsure, say N. 176 the questions about Ethernet network cards. If unsure, say N.
182 177
178if NET_ETHERNET
179
183config MII 180config MII
184 tristate "Generic Media Independent Interface device support" 181 tristate "Generic Media Independent Interface device support"
185 depends on NET_ETHERNET
186 help 182 help
187 Most ethernet controllers have MII transceiver either as an external 183 Most ethernet controllers have MII transceiver either as an external
188 or internal device. It is safe to say Y or M here even if your 184 or internal device. It is safe to say Y or M here even if your
@@ -190,7 +186,7 @@ config MII
190 186
191config MACB 187config MACB
192 tristate "Atmel MACB support" 188 tristate "Atmel MACB support"
193 depends on NET_ETHERNET && (AVR32 || ARCH_AT91SAM9260 || ARCH_AT91SAM9263) 189 depends on AVR32 || ARCH_AT91SAM9260 || ARCH_AT91SAM9263
194 select MII 190 select MII
195 help 191 help
196 The Atmel MACB ethernet interface is found on many AT32 and AT91 192 The Atmel MACB ethernet interface is found on many AT32 and AT91
@@ -203,7 +199,7 @@ source "drivers/net/arm/Kconfig"
203 199
204config MACE 200config MACE
205 tristate "MACE (Power Mac ethernet) support" 201 tristate "MACE (Power Mac ethernet) support"
206 depends on NET_ETHERNET && PPC_PMAC && PPC32 202 depends on PPC_PMAC && PPC32
207 select CRC32 203 select CRC32
208 help 204 help
209 Power Macintoshes and clones with Ethernet built-in on the 205 Power Macintoshes and clones with Ethernet built-in on the
@@ -226,7 +222,7 @@ config MACE_AAUI_PORT
226 222
227config BMAC 223config BMAC
228 tristate "BMAC (G3 ethernet) support" 224 tristate "BMAC (G3 ethernet) support"
229 depends on NET_ETHERNET && PPC_PMAC && PPC32 225 depends on PPC_PMAC && PPC32
230 select CRC32 226 select CRC32
231 help 227 help
232 Say Y for support of BMAC Ethernet interfaces. These are used on G3 228 Say Y for support of BMAC Ethernet interfaces. These are used on G3
@@ -237,7 +233,7 @@ config BMAC
237 233
238config ARIADNE 234config ARIADNE
239 tristate "Ariadne support" 235 tristate "Ariadne support"
240 depends on NET_ETHERNET && ZORRO 236 depends on ZORRO
241 help 237 help
242 If you have a Village Tronic Ariadne Ethernet adapter, say Y. 238 If you have a Village Tronic Ariadne Ethernet adapter, say Y.
243 Otherwise, say N. 239 Otherwise, say N.
@@ -247,7 +243,7 @@ config ARIADNE
247 243
248config A2065 244config A2065
249 tristate "A2065 support" 245 tristate "A2065 support"
250 depends on NET_ETHERNET && ZORRO 246 depends on ZORRO
251 select CRC32 247 select CRC32
252 help 248 help
253 If you have a Commodore A2065 Ethernet adapter, say Y. Otherwise, 249 If you have a Commodore A2065 Ethernet adapter, say Y. Otherwise,
@@ -258,7 +254,7 @@ config A2065
258 254
259config HYDRA 255config HYDRA
260 tristate "Hydra support" 256 tristate "Hydra support"
261 depends on NET_ETHERNET && ZORRO 257 depends on ZORRO
262 select CRC32 258 select CRC32
263 help 259 help
264 If you have a Hydra Ethernet adapter, say Y. Otherwise, say N. 260 If you have a Hydra Ethernet adapter, say Y. Otherwise, say N.
@@ -268,7 +264,7 @@ config HYDRA
268 264
269config ZORRO8390 265config ZORRO8390
270 tristate "Zorro NS8390-based Ethernet support" 266 tristate "Zorro NS8390-based Ethernet support"
271 depends on NET_ETHERNET && ZORRO 267 depends on ZORRO
272 select CRC32 268 select CRC32
273 help 269 help
274 This driver is for Zorro Ethernet cards using an NS8390-compatible 270 This driver is for Zorro Ethernet cards using an NS8390-compatible
@@ -281,7 +277,7 @@ config ZORRO8390
281 277
282config APNE 278config APNE
283 tristate "PCMCIA NE2000 support" 279 tristate "PCMCIA NE2000 support"
284 depends on NET_ETHERNET && AMIGA_PCMCIA 280 depends on AMIGA_PCMCIA
285 select CRC32 281 select CRC32
286 help 282 help
287 If you have a PCMCIA NE2000 compatible adapter, say Y. Otherwise, 283 If you have a PCMCIA NE2000 compatible adapter, say Y. Otherwise,
@@ -292,7 +288,7 @@ config APNE
292 288
293config APOLLO_ELPLUS 289config APOLLO_ELPLUS
294 tristate "Apollo 3c505 support" 290 tristate "Apollo 3c505 support"
295 depends on NET_ETHERNET && APOLLO 291 depends on APOLLO
296 help 292 help
297 Say Y or M here if your Apollo has a 3Com 3c505 ISA Ethernet card. 293 Say Y or M here if your Apollo has a 3Com 3c505 ISA Ethernet card.
298 If you don't have one made for Apollos, you can use one from a PC, 294 If you don't have one made for Apollos, you can use one from a PC,
@@ -301,7 +297,7 @@ config APOLLO_ELPLUS
301 297
302config MAC8390 298config MAC8390
303 bool "Macintosh NS 8390 based ethernet cards" 299 bool "Macintosh NS 8390 based ethernet cards"
304 depends on NET_ETHERNET && MAC 300 depends on MAC
305 select CRC32 301 select CRC32
306 help 302 help
307 If you want to include a driver to support Nubus or LC-PDS 303 If you want to include a driver to support Nubus or LC-PDS
@@ -311,7 +307,7 @@ config MAC8390
311 307
312config MAC89x0 308config MAC89x0
313 tristate "Macintosh CS89x0 based ethernet cards" 309 tristate "Macintosh CS89x0 based ethernet cards"
314 depends on NET_ETHERNET && MAC 310 depends on MAC
315 ---help--- 311 ---help---
316 Support for CS89x0 chipset based Ethernet cards. If you have a 312 Support for CS89x0 chipset based Ethernet cards. If you have a
317 Nubus or LC-PDS network (Ethernet) card of this type, say Y and 313 Nubus or LC-PDS network (Ethernet) card of this type, say Y and
@@ -324,7 +320,7 @@ config MAC89x0
324 320
325config MACSONIC 321config MACSONIC
326 tristate "Macintosh SONIC based ethernet (onboard, NuBus, LC, CS)" 322 tristate "Macintosh SONIC based ethernet (onboard, NuBus, LC, CS)"
327 depends on NET_ETHERNET && MAC 323 depends on MAC
328 ---help--- 324 ---help---
329 Support for NatSemi SONIC based Ethernet devices. This includes 325 Support for NatSemi SONIC based Ethernet devices. This includes
330 the onboard Ethernet in many Quadras as well as some LC-PDS, 326 the onboard Ethernet in many Quadras as well as some LC-PDS,
@@ -338,7 +334,7 @@ config MACSONIC
338 334
339config MACMACE 335config MACMACE
340 bool "Macintosh (AV) onboard MACE ethernet" 336 bool "Macintosh (AV) onboard MACE ethernet"
341 depends on NET_ETHERNET && MAC 337 depends on MAC
342 select CRC32 338 select CRC32
343 help 339 help
344 Support for the onboard AMD 79C940 MACE Ethernet controller used in 340 Support for the onboard AMD 79C940 MACE Ethernet controller used in
@@ -348,7 +344,7 @@ config MACMACE
348 344
349config MVME147_NET 345config MVME147_NET
350 tristate "MVME147 (Lance) Ethernet support" 346 tristate "MVME147 (Lance) Ethernet support"
351 depends on NET_ETHERNET && MVME147 347 depends on MVME147
352 select CRC32 348 select CRC32
353 help 349 help
354 Support for the on-board Ethernet interface on the Motorola MVME147 350 Support for the on-board Ethernet interface on the Motorola MVME147
@@ -358,7 +354,7 @@ config MVME147_NET
358 354
359config MVME16x_NET 355config MVME16x_NET
360 tristate "MVME16x Ethernet support" 356 tristate "MVME16x Ethernet support"
361 depends on NET_ETHERNET && MVME16x 357 depends on MVME16x
362 help 358 help
363 This is the driver for the Ethernet interface on the Motorola 359 This is the driver for the Ethernet interface on the Motorola
364 MVME162, 166, 167, 172 and 177 boards. Say Y here to include the 360 MVME162, 166, 167, 172 and 177 boards. Say Y here to include the
@@ -367,7 +363,7 @@ config MVME16x_NET
367 363
368config BVME6000_NET 364config BVME6000_NET
369 tristate "BVME6000 Ethernet support" 365 tristate "BVME6000 Ethernet support"
370 depends on NET_ETHERNET && BVME6000 366 depends on BVME6000
371 help 367 help
372 This is the driver for the Ethernet interface on BVME4000 and 368 This is the driver for the Ethernet interface on BVME4000 and
373 BVME6000 VME boards. Say Y here to include the driver for this chip 369 BVME6000 VME boards. Say Y here to include the driver for this chip
@@ -376,7 +372,7 @@ config BVME6000_NET
376 372
377config ATARILANCE 373config ATARILANCE
378 tristate "Atari Lance support" 374 tristate "Atari Lance support"
379 depends on NET_ETHERNET && ATARI 375 depends on ATARI
380 help 376 help
381 Say Y to include support for several Atari Ethernet adapters based 377 Say Y to include support for several Atari Ethernet adapters based
382 on the AMD Lance chipset: RieblCard (with or without battery), or 378 on the AMD Lance chipset: RieblCard (with or without battery), or
@@ -384,7 +380,7 @@ config ATARILANCE
384 380
385config ATARI_BIONET 381config ATARI_BIONET
386 tristate "BioNet-100 support" 382 tristate "BioNet-100 support"
387 depends on NET_ETHERNET && ATARI && ATARI_ACSI && BROKEN 383 depends on ATARI && ATARI_ACSI && BROKEN
388 help 384 help
389 Say Y to include support for BioData's BioNet-100 Ethernet adapter 385 Say Y to include support for BioData's BioNet-100 Ethernet adapter
390 for the ACSI port. The driver works (has to work...) with a polled 386 for the ACSI port. The driver works (has to work...) with a polled
@@ -392,7 +388,7 @@ config ATARI_BIONET
392 388
393config ATARI_PAMSNET 389config ATARI_PAMSNET
394 tristate "PAMsNet support" 390 tristate "PAMsNet support"
395 depends on NET_ETHERNET && ATARI && ATARI_ACSI && BROKEN 391 depends on ATARI && ATARI_ACSI && BROKEN
396 help 392 help
397 Say Y to include support for the PAMsNet Ethernet adapter for the 393 Say Y to include support for the PAMsNet Ethernet adapter for the
398 ACSI port ("ACSI node"). The driver works (has to work...) with a 394 ACSI port ("ACSI node"). The driver works (has to work...) with a
@@ -400,7 +396,7 @@ config ATARI_PAMSNET
400 396
401config SUN3LANCE 397config SUN3LANCE
402 tristate "Sun3/Sun3x on-board LANCE support" 398 tristate "Sun3/Sun3x on-board LANCE support"
403 depends on NET_ETHERNET && (SUN3 || SUN3X) 399 depends on SUN3 || SUN3X
404 help 400 help
405 Most Sun3 and Sun3x motherboards (including the 3/50, 3/60 and 3/80) 401 Most Sun3 and Sun3x motherboards (including the 3/50, 3/60 and 3/80)
406 featured an AMD Lance 10Mbit Ethernet controller on board; say Y 402 featured an AMD Lance 10Mbit Ethernet controller on board; say Y
@@ -413,7 +409,7 @@ config SUN3LANCE
413 409
414config SUN3_82586 410config SUN3_82586
415 bool "Sun3 on-board Intel 82586 support" 411 bool "Sun3 on-board Intel 82586 support"
416 depends on NET_ETHERNET && SUN3 412 depends on SUN3
417 help 413 help
418 This driver enables support for the on-board Intel 82586 based 414 This driver enables support for the on-board Intel 82586 based
419 Ethernet adapter found on Sun 3/1xx and 3/2xx motherboards. Note 415 Ethernet adapter found on Sun 3/1xx and 3/2xx motherboards. Note
@@ -422,7 +418,7 @@ config SUN3_82586
422 418
423config HPLANCE 419config HPLANCE
424 bool "HP on-board LANCE support" 420 bool "HP on-board LANCE support"
425 depends on NET_ETHERNET && DIO 421 depends on DIO
426 select CRC32 422 select CRC32
427 help 423 help
428 If you want to use the builtin "LANCE" Ethernet controller on an 424 If you want to use the builtin "LANCE" Ethernet controller on an
@@ -430,21 +426,28 @@ config HPLANCE
430 426
431config LASI_82596 427config LASI_82596
432 tristate "Lasi ethernet" 428 tristate "Lasi ethernet"
433 depends on NET_ETHERNET && GSC 429 depends on GSC
434 help 430 help
435 Say Y here to support the builtin Intel 82596 ethernet controller 431 Say Y here to support the builtin Intel 82596 ethernet controller
436 found in Hewlett-Packard PA-RISC machines with 10Mbit ethernet. 432 found in Hewlett-Packard PA-RISC machines with 10Mbit ethernet.
437 433
434config SNI_82596
435 tristate "SNI RM ethernet"
436 depends on NET_ETHERNET && SNI_RM
437 help
438 Say Y here to support the on-board Intel 82596 ethernet controller
439 built into SNI RM machines.
440
438config MIPS_JAZZ_SONIC 441config MIPS_JAZZ_SONIC
439 tristate "MIPS JAZZ onboard SONIC Ethernet support" 442 tristate "MIPS JAZZ onboard SONIC Ethernet support"
440 depends on NET_ETHERNET && MACH_JAZZ 443 depends on MACH_JAZZ
441 help 444 help
442 This is the driver for the onboard card of MIPS Magnum 4000, 445 This is the driver for the onboard card of MIPS Magnum 4000,
443 Acer PICA, Olivetti M700-10 and a few other identical OEM systems. 446 Acer PICA, Olivetti M700-10 and a few other identical OEM systems.
444 447
445config MIPS_AU1X00_ENET 448config MIPS_AU1X00_ENET
446 bool "MIPS AU1000 Ethernet support" 449 bool "MIPS AU1000 Ethernet support"
447 depends on NET_ETHERNET && SOC_AU1X00 450 depends on SOC_AU1X00
448 select PHYLIB 451 select PHYLIB
449 select CRC32 452 select CRC32
450 help 453 help
@@ -453,11 +456,11 @@ config MIPS_AU1X00_ENET
453 456
454config NET_SB1250_MAC 457config NET_SB1250_MAC
455 tristate "SB1250 Ethernet support" 458 tristate "SB1250 Ethernet support"
456 depends on NET_ETHERNET && SIBYTE_SB1xxx_SOC 459 depends on SIBYTE_SB1xxx_SOC
457 460
458config SGI_IOC3_ETH 461config SGI_IOC3_ETH
459 bool "SGI IOC3 Ethernet" 462 bool "SGI IOC3 Ethernet"
460 depends on NET_ETHERNET && PCI && SGI_IP27 463 depends on PCI && SGI_IP27
461 select CRC32 464 select CRC32
462 select MII 465 select MII
463 help 466 help
@@ -487,7 +490,7 @@ config SGI_IOC3_ETH_HW_TX_CSUM
487 490
488config MIPS_SIM_NET 491config MIPS_SIM_NET
489 tristate "MIPS simulator Network device" 492 tristate "MIPS simulator Network device"
490 depends on NET_ETHERNET && MIPS_SIM 493 depends on MIPS_SIM
491 help 494 help
492 The MIPSNET device is a simple Ethernet network device which is 495 The MIPSNET device is a simple Ethernet network device which is
493 emulated by the MIPS Simulator. 496 emulated by the MIPS Simulator.
@@ -495,11 +498,11 @@ config MIPS_SIM_NET
495 498
496config SGI_O2MACE_ETH 499config SGI_O2MACE_ETH
497 tristate "SGI O2 MACE Fast Ethernet support" 500 tristate "SGI O2 MACE Fast Ethernet support"
498 depends on NET_ETHERNET && SGI_IP32=y 501 depends on SGI_IP32=y
499 502
500config STNIC 503config STNIC
501 tristate "National DP83902AV support" 504 tristate "National DP83902AV support"
502 depends on NET_ETHERNET && SUPERH 505 depends on SUPERH
503 select CRC32 506 select CRC32
504 help 507 help
505 Support for cards based on the National Semiconductor DP83902AV 508 Support for cards based on the National Semiconductor DP83902AV
@@ -511,7 +514,7 @@ config STNIC
511 514
512config SUNLANCE 515config SUNLANCE
513 tristate "Sun LANCE support" 516 tristate "Sun LANCE support"
514 depends on NET_ETHERNET && SBUS 517 depends on SBUS
515 select CRC32 518 select CRC32
516 help 519 help
517 This driver supports the "le" interface present on all 32-bit Sparc 520 This driver supports the "le" interface present on all 32-bit Sparc
@@ -524,7 +527,7 @@ config SUNLANCE
524 527
525config HAPPYMEAL 528config HAPPYMEAL
526 tristate "Sun Happy Meal 10/100baseT support" 529 tristate "Sun Happy Meal 10/100baseT support"
527 depends on NET_ETHERNET && (SBUS || PCI) 530 depends on SBUS || PCI
528 select CRC32 531 select CRC32
529 help 532 help
530 This driver supports the "hme" interface present on most Ultra 533 This driver supports the "hme" interface present on most Ultra
@@ -537,7 +540,7 @@ config HAPPYMEAL
537 540
538config SUNBMAC 541config SUNBMAC
539 tristate "Sun BigMAC 10/100baseT support (EXPERIMENTAL)" 542 tristate "Sun BigMAC 10/100baseT support (EXPERIMENTAL)"
540 depends on NET_ETHERNET && SBUS && EXPERIMENTAL 543 depends on SBUS && EXPERIMENTAL
541 select CRC32 544 select CRC32
542 help 545 help
543 This driver supports the "be" interface available as an Sbus option. 546 This driver supports the "be" interface available as an Sbus option.
@@ -548,7 +551,7 @@ config SUNBMAC
548 551
549config SUNQE 552config SUNQE
550 tristate "Sun QuadEthernet support" 553 tristate "Sun QuadEthernet support"
551 depends on NET_ETHERNET && SBUS 554 depends on SBUS
552 select CRC32 555 select CRC32
553 help 556 help
554 This driver supports the "qe" 10baseT Ethernet device, available as 557 This driver supports the "qe" 10baseT Ethernet device, available as
@@ -560,7 +563,7 @@ config SUNQE
560 563
561config SUNGEM 564config SUNGEM
562 tristate "Sun GEM support" 565 tristate "Sun GEM support"
563 depends on NET_ETHERNET && PCI 566 depends on PCI
564 select CRC32 567 select CRC32
565 help 568 help
566 Support for the Sun GEM chip, aka Sun GigabitEthernet/P 2.0. See also 569 Support for the Sun GEM chip, aka Sun GigabitEthernet/P 2.0. See also
@@ -568,7 +571,7 @@ config SUNGEM
568 571
569config CASSINI 572config CASSINI
570 tristate "Sun Cassini support" 573 tristate "Sun Cassini support"
571 depends on NET_ETHERNET && PCI 574 depends on PCI
572 select CRC32 575 select CRC32
573 help 576 help
574 Support for the Sun Cassini chip, aka Sun GigaSwift Ethernet. See also 577 Support for the Sun Cassini chip, aka Sun GigaSwift Ethernet. See also
@@ -576,7 +579,7 @@ config CASSINI
576 579
577config NET_VENDOR_3COM 580config NET_VENDOR_3COM
578 bool "3COM cards" 581 bool "3COM cards"
579 depends on NET_ETHERNET && (ISA || EISA || MCA || PCI) 582 depends on ISA || EISA || MCA || PCI
580 help 583 help
581 If you have a network (Ethernet) card belonging to this class, say Y 584 If you have a network (Ethernet) card belonging to this class, say Y
582 and read the Ethernet-HOWTO, available from 585 and read the Ethernet-HOWTO, available from
@@ -736,7 +739,7 @@ config TYPHOON
736 739
737config LANCE 740config LANCE
738 tristate "AMD LANCE and PCnet (AT1500 and NE2100) support" 741 tristate "AMD LANCE and PCnet (AT1500 and NE2100) support"
739 depends on NET_ETHERNET && ISA && ISA_DMA_API 742 depends on ISA && ISA_DMA_API
740 help 743 help
741 If you have a network (Ethernet) card of this type, say Y and read 744 If you have a network (Ethernet) card of this type, say Y and read
742 the Ethernet-HOWTO, available from 745 the Ethernet-HOWTO, available from
@@ -748,7 +751,7 @@ config LANCE
748 751
749config NET_VENDOR_SMC 752config NET_VENDOR_SMC
750 bool "Western Digital/SMC cards" 753 bool "Western Digital/SMC cards"
751 depends on NET_ETHERNET && (ISA || MCA || EISA || MAC) 754 depends on ISA || MCA || EISA || MAC
752 help 755 help
753 If you have a network (Ethernet) card belonging to this class, say Y 756 If you have a network (Ethernet) card belonging to this class, say Y
754 and read the Ethernet-HOWTO, available from 757 and read the Ethernet-HOWTO, available from
@@ -818,11 +821,27 @@ config ULTRA32
818 <file:Documentation/networking/net-modules.txt>. The module 821 <file:Documentation/networking/net-modules.txt>. The module
819 will be called smc-ultra32. 822 will be called smc-ultra32.
820 823
824config SMC9194
825 tristate "SMC 9194 support"
826 depends on NET_VENDOR_SMC && (ISA || MAC && BROKEN)
827 select CRC32
828 ---help---
829 This is support for the SMC9xxx based Ethernet cards. Choose this
830 option if you have a DELL laptop with the docking station, or
831 another SMC9192/9194 based chipset. Say Y if you want it compiled
832 into the kernel, and read the file
833 <file:Documentation/networking/smc9.txt> and the Ethernet-HOWTO,
834 available from <http://www.tldp.org/docs.html#howto>.
835
836 To compile this driver as a module, choose M here and read
837 <file:Documentation/networking/net-modules.txt>. The module
838 will be called smc9194.
839
821config SMC91X 840config SMC91X
822 tristate "SMC 91C9x/91C1xxx support" 841 tristate "SMC 91C9x/91C1xxx support"
823 select CRC32 842 select CRC32
824 select MII 843 select MII
825 depends on NET_ETHERNET && (ARM || REDWOOD_5 || REDWOOD_6 || M32R || SUPERH || SOC_AU1X00 || BFIN) 844 depends on ARM || REDWOOD_5 || REDWOOD_6 || M32R || SUPERH || SOC_AU1X00 || BFIN
826 help 845 help
827 This is a driver for SMC's 91x series of Ethernet chipsets, 846 This is a driver for SMC's 91x series of Ethernet chipsets,
828 including the SMC91C94 and the SMC91C111. Say Y if you want it 847 including the SMC91C94 and the SMC91C111. Say Y if you want it
@@ -836,26 +855,10 @@ config SMC91X
836 module, say M here and read <file:Documentation/kbuild/modules.txt> 855 module, say M here and read <file:Documentation/kbuild/modules.txt>
837 as well as <file:Documentation/networking/net-modules.txt>. 856 as well as <file:Documentation/networking/net-modules.txt>.
838 857
839config SMC9194
840 tristate "SMC 9194 support"
841 depends on NET_VENDOR_SMC && (ISA || MAC && BROKEN)
842 select CRC32
843 ---help---
844 This is support for the SMC9xxx based Ethernet cards. Choose this
845 option if you have a DELL laptop with the docking station, or
846 another SMC9192/9194 based chipset. Say Y if you want it compiled
847 into the kernel, and read the file
848 <file:Documentation/networking/smc9.txt> and the Ethernet-HOWTO,
849 available from <http://www.tldp.org/docs.html#howto>.
850
851 To compile this driver as a module, choose M here and read
852 <file:Documentation/networking/net-modules.txt>. The module
853 will be called smc9194.
854
855config NET_NETX 858config NET_NETX
856 tristate "NetX Ethernet support" 859 tristate "NetX Ethernet support"
857 select MII 860 select MII
858 depends on NET_ETHERNET && ARCH_NETX 861 depends on ARCH_NETX
859 help 862 help
860 This is support for the Hilscher netX builtin Ethernet ports 863 This is support for the Hilscher netX builtin Ethernet ports
861 864
@@ -865,7 +868,7 @@ config NET_NETX
865 868
866config DM9000 869config DM9000
867 tristate "DM9000 support" 870 tristate "DM9000 support"
868 depends on (ARM || MIPS) && NET_ETHERNET 871 depends on ARM || MIPS
869 select CRC32 872 select CRC32
870 select MII 873 select MII
871 ---help--- 874 ---help---
@@ -879,7 +882,7 @@ config SMC911X
879 tristate "SMSC LAN911[5678] support" 882 tristate "SMSC LAN911[5678] support"
880 select CRC32 883 select CRC32
881 select MII 884 select MII
882 depends on NET_ETHERNET && ARCH_PXA 885 depends on ARCH_PXA
883 help 886 help
884 This is a driver for SMSC's LAN911x series of Ethernet chipsets 887 This is a driver for SMSC's LAN911x series of Ethernet chipsets
885 including the new LAN9115, LAN9116, LAN9117, and LAN9118. 888 including the new LAN9115, LAN9116, LAN9117, and LAN9118.
@@ -893,7 +896,7 @@ config SMC911X
893 896
894config NET_VENDOR_RACAL 897config NET_VENDOR_RACAL
895 bool "Racal-Interlan (Micom) NI cards" 898 bool "Racal-Interlan (Micom) NI cards"
896 depends on NET_ETHERNET && ISA 899 depends on ISA
897 help 900 help
898 If you have a network (Ethernet) card belonging to this class, such 901 If you have a network (Ethernet) card belonging to this class, such
899 as the NI5010, NI5210 or NI6210, say Y and read the Ethernet-HOWTO, 902 as the NI5010, NI5210 or NI6210, say Y and read the Ethernet-HOWTO,
@@ -945,7 +948,7 @@ source "drivers/net/tulip/Kconfig"
945 948
946config AT1700 949config AT1700
947 tristate "AT1700/1720 support (EXPERIMENTAL)" 950 tristate "AT1700/1720 support (EXPERIMENTAL)"
948 depends on NET_ETHERNET && (ISA || MCA_LEGACY) && EXPERIMENTAL 951 depends on (ISA || MCA_LEGACY) && EXPERIMENTAL
949 select CRC32 952 select CRC32
950 ---help--- 953 ---help---
951 If you have a network (Ethernet) card of this type, say Y and read 954 If you have a network (Ethernet) card of this type, say Y and read
@@ -958,7 +961,7 @@ config AT1700
958 961
959config DEPCA 962config DEPCA
960 tristate "DEPCA, DE10x, DE200, DE201, DE202, DE422 support" 963 tristate "DEPCA, DE10x, DE200, DE201, DE202, DE422 support"
961 depends on NET_ETHERNET && (ISA || EISA || MCA) 964 depends on ISA || EISA || MCA
962 select CRC32 965 select CRC32
963 ---help--- 966 ---help---
964 If you have a network (Ethernet) card of this type, say Y and read 967 If you have a network (Ethernet) card of this type, say Y and read
@@ -972,7 +975,7 @@ config DEPCA
972 975
973config HP100 976config HP100
974 tristate "HP 10/100VG PCLAN (ISA, EISA, PCI) support" 977 tristate "HP 10/100VG PCLAN (ISA, EISA, PCI) support"
975 depends on NET_ETHERNET && (ISA || EISA || PCI) 978 depends on ISA || EISA || PCI
976 help 979 help
977 If you have a network (Ethernet) card of this type, say Y and read 980 If you have a network (Ethernet) card of this type, say Y and read
978 the Ethernet-HOWTO, available from 981 the Ethernet-HOWTO, available from
@@ -984,7 +987,7 @@ config HP100
984 987
985config NET_ISA 988config NET_ISA
986 bool "Other ISA cards" 989 bool "Other ISA cards"
987 depends on NET_ETHERNET && ISA 990 depends on ISA
988 ---help--- 991 ---help---
989 If your network (Ethernet) card hasn't been mentioned yet and its 992 If your network (Ethernet) card hasn't been mentioned yet and its
990 bus system (that's the way the cards talks to the other components 993 bus system (that's the way the cards talks to the other components
@@ -1147,7 +1150,7 @@ config SEEQ8005
1147 1150
1148config NE2_MCA 1151config NE2_MCA
1149 tristate "NE/2 (ne2000 MCA version) support" 1152 tristate "NE/2 (ne2000 MCA version) support"
1150 depends on NET_ETHERNET && MCA_LEGACY 1153 depends on MCA_LEGACY
1151 select CRC32 1154 select CRC32
1152 help 1155 help
1153 If you have a network (Ethernet) card of this type, say Y and read 1156 If you have a network (Ethernet) card of this type, say Y and read
@@ -1160,7 +1163,7 @@ config NE2_MCA
1160 1163
1161config IBMLANA 1164config IBMLANA
1162 tristate "IBM LAN Adapter/A support" 1165 tristate "IBM LAN Adapter/A support"
1163 depends on NET_ETHERNET && MCA && MCA_LEGACY 1166 depends on MCA && MCA_LEGACY
1164 ---help--- 1167 ---help---
1165 This is a Micro Channel Ethernet adapter. You need to set 1168 This is a Micro Channel Ethernet adapter. You need to set
1166 CONFIG_MCA to use this driver. It is both available as an in-kernel 1169 CONFIG_MCA to use this driver. It is both available as an in-kernel
@@ -1176,7 +1179,7 @@ config IBMLANA
1176 1179
1177config IBMVETH 1180config IBMVETH
1178 tristate "IBM LAN Virtual Ethernet support" 1181 tristate "IBM LAN Virtual Ethernet support"
1179 depends on NET_ETHERNET && PPC_PSERIES 1182 depends on PPC_PSERIES
1180 ---help--- 1183 ---help---
1181 This driver supports virtual ethernet adapters on newer IBM iSeries 1184 This driver supports virtual ethernet adapters on newer IBM iSeries
1182 and pSeries systems. 1185 and pSeries systems.
@@ -1257,7 +1260,7 @@ config IBM_EMAC_TAH
1257 1260
1258config NET_PCI 1261config NET_PCI
1259 bool "EISA, VLB, PCI and on board controllers" 1262 bool "EISA, VLB, PCI and on board controllers"
1260 depends on NET_ETHERNET && (ISA || EISA || PCI) 1263 depends on ISA || EISA || PCI
1261 help 1264 help
1262 This is another class of network cards which attach directly to the 1265 This is another class of network cards which attach directly to the
1263 bus. If you have one of those, say Y and read the Ethernet-HOWTO, 1266 bus. If you have one of those, say Y and read the Ethernet-HOWTO,
@@ -1313,6 +1316,7 @@ config AMD8111_ETH
1313 To compile this driver as a module, choose M here and read 1316 To compile this driver as a module, choose M here and read
1314 <file:Documentation/networking/net-modules.txt>. The module 1317 <file:Documentation/networking/net-modules.txt>. The module
1315 will be called amd8111e. 1318 will be called amd8111e.
1319
1316config AMD8111E_NAPI 1320config AMD8111E_NAPI
1317 bool "Enable NAPI support" 1321 bool "Enable NAPI support"
1318 depends on AMD8111_ETH 1322 depends on AMD8111_ETH
@@ -1778,7 +1782,7 @@ config SC92031
1778 1782
1779config NET_POCKET 1783config NET_POCKET
1780 bool "Pocket and portable adapters" 1784 bool "Pocket and portable adapters"
1781 depends on NET_ETHERNET && PARPORT 1785 depends on PARPORT
1782 ---help--- 1786 ---help---
1783 Cute little network (Ethernet) devices which attach to the parallel 1787 Cute little network (Ethernet) devices which attach to the parallel
1784 port ("pocket adapters"), commonly used with laptops. If you have 1788 port ("pocket adapters"), commonly used with laptops. If you have
@@ -1847,14 +1851,14 @@ config DE620
1847 1851
1848config SGISEEQ 1852config SGISEEQ
1849 tristate "SGI Seeq ethernet controller support" 1853 tristate "SGI Seeq ethernet controller support"
1850 depends on NET_ETHERNET && SGI_IP22 1854 depends on SGI_IP22
1851 help 1855 help
1852 Say Y here if you have an Seeq based Ethernet network card. This is 1856 Say Y here if you have an Seeq based Ethernet network card. This is
1853 used in many Silicon Graphics machines. 1857 used in many Silicon Graphics machines.
1854 1858
1855config DECLANCE 1859config DECLANCE
1856 tristate "DEC LANCE ethernet controller support" 1860 tristate "DEC LANCE ethernet controller support"
1857 depends on NET_ETHERNET && MACH_DECSTATION 1861 depends on MACH_DECSTATION
1858 select CRC32 1862 select CRC32
1859 help 1863 help
1860 This driver is for the series of Ethernet controllers produced by 1864 This driver is for the series of Ethernet controllers produced by
@@ -1884,7 +1888,7 @@ config FEC2
1884 1888
1885config NE_H8300 1889config NE_H8300
1886 tristate "NE2000 compatible support for H8/300" 1890 tristate "NE2000 compatible support for H8/300"
1887 depends on H8300 && NET_ETHERNET 1891 depends on H8300
1888 help 1892 help
1889 Say Y here if you want to use the NE2000 compatible 1893 Say Y here if you want to use the NE2000 compatible
1890 controller on the Renesas H8/300 processor. 1894 controller on the Renesas H8/300 processor.
@@ -1892,7 +1896,7 @@ config NE_H8300
1892source "drivers/net/fec_8xx/Kconfig" 1896source "drivers/net/fec_8xx/Kconfig"
1893source "drivers/net/fs_enet/Kconfig" 1897source "drivers/net/fs_enet/Kconfig"
1894 1898
1895endmenu 1899endif # NET_ETHERNET
1896 1900
1897# 1901#
1898# Gigabit Ethernet 1902# Gigabit Ethernet
@@ -2948,8 +2952,6 @@ config NETCONSOLE
2948 If you want to log kernel messages over the network, enable this. 2952 If you want to log kernel messages over the network, enable this.
2949 See <file:Documentation/networking/netconsole.txt> for details. 2953 See <file:Documentation/networking/netconsole.txt> for details.
2950 2954
2951endif #NETDEVICES
2952
2953config NETPOLL 2955config NETPOLL
2954 def_bool NETCONSOLE 2956 def_bool NETCONSOLE
2955 2957
@@ -2961,4 +2963,4 @@ config NETPOLL_TRAP
2961config NET_POLL_CONTROLLER 2963config NET_POLL_CONTROLLER
2962 def_bool NETPOLL 2964 def_bool NETPOLL
2963 2965
2964endmenu 2966endif # NETDEVICES
diff --git a/drivers/net/Makefile b/drivers/net/Makefile
index a77affa4f6e6..eb62fb48e4b7 100644
--- a/drivers/net/Makefile
+++ b/drivers/net/Makefile
@@ -157,6 +157,7 @@ obj-$(CONFIG_ELPLUS) += 3c505.o
157obj-$(CONFIG_AC3200) += ac3200.o 8390.o 157obj-$(CONFIG_AC3200) += ac3200.o 8390.o
158obj-$(CONFIG_APRICOT) += 82596.o 158obj-$(CONFIG_APRICOT) += 82596.o
159obj-$(CONFIG_LASI_82596) += lasi_82596.o 159obj-$(CONFIG_LASI_82596) += lasi_82596.o
160obj-$(CONFIG_SNI_82596) += sni_82596.o
160obj-$(CONFIG_MVME16x_NET) += 82596.o 161obj-$(CONFIG_MVME16x_NET) += 82596.o
161obj-$(CONFIG_BVME6000_NET) += 82596.o 162obj-$(CONFIG_BVME6000_NET) += 82596.o
162obj-$(CONFIG_SC92031) += sc92031.o 163obj-$(CONFIG_SC92031) += sc92031.o
diff --git a/drivers/net/acenic.c b/drivers/net/acenic.c
index 04382f979c99..b78a4e5ceeb2 100644
--- a/drivers/net/acenic.c
+++ b/drivers/net/acenic.c
@@ -159,10 +159,6 @@ static struct pci_device_id acenic_pci_tbl[] = {
159}; 159};
160MODULE_DEVICE_TABLE(pci, acenic_pci_tbl); 160MODULE_DEVICE_TABLE(pci, acenic_pci_tbl);
161 161
162#ifndef SET_NETDEV_DEV
163#define SET_NETDEV_DEV(net, pdev) do{} while(0)
164#endif
165
166#define ace_sync_irq(irq) synchronize_irq(irq) 162#define ace_sync_irq(irq) synchronize_irq(irq)
167 163
168#ifndef offset_in_page 164#ifndef offset_in_page
diff --git a/drivers/net/arm/Kconfig b/drivers/net/arm/Kconfig
index 678e4f48d36b..5bf2d33887ac 100644
--- a/drivers/net/arm/Kconfig
+++ b/drivers/net/arm/Kconfig
@@ -4,7 +4,7 @@
4# 4#
5config ARM_AM79C961A 5config ARM_AM79C961A
6 bool "ARM EBSA110 AM79C961A support" 6 bool "ARM EBSA110 AM79C961A support"
7 depends on NET_ETHERNET && ARM && ARCH_EBSA110 7 depends on ARM && ARCH_EBSA110
8 select CRC32 8 select CRC32
9 help 9 help
10 If you wish to compile a kernel for the EBSA-110, then you should 10 If you wish to compile a kernel for the EBSA-110, then you should
@@ -12,21 +12,21 @@ config ARM_AM79C961A
12 12
13config ARM_ETHER1 13config ARM_ETHER1
14 tristate "Acorn Ether1 support" 14 tristate "Acorn Ether1 support"
15 depends on NET_ETHERNET && ARM && ARCH_ACORN 15 depends on ARM && ARCH_ACORN
16 help 16 help
17 If you have an Acorn system with one of these (AKA25) network cards, 17 If you have an Acorn system with one of these (AKA25) network cards,
18 you should say Y to this option if you wish to use it with Linux. 18 you should say Y to this option if you wish to use it with Linux.
19 19
20config ARM_ETHER3 20config ARM_ETHER3
21 tristate "Acorn/ANT Ether3 support" 21 tristate "Acorn/ANT Ether3 support"
22 depends on NET_ETHERNET && ARM && ARCH_ACORN 22 depends on ARM && ARCH_ACORN
23 help 23 help
24 If you have an Acorn system with one of these network cards, you 24 If you have an Acorn system with one of these network cards, you
25 should say Y to this option if you wish to use it with Linux. 25 should say Y to this option if you wish to use it with Linux.
26 26
27config ARM_ETHERH 27config ARM_ETHERH
28 tristate "I-cubed EtherH/ANT EtherM support" 28 tristate "I-cubed EtherH/ANT EtherM support"
29 depends on NET_ETHERNET && ARM && ARCH_ACORN 29 depends on ARM && ARCH_ACORN
30 select CRC32 30 select CRC32
31 help 31 help
32 If you have an Acorn system with one of these network cards, you 32 If you have an Acorn system with one of these network cards, you
@@ -34,7 +34,7 @@ config ARM_ETHERH
34 34
35config ARM_AT91_ETHER 35config ARM_AT91_ETHER
36 tristate "AT91RM9200 Ethernet support" 36 tristate "AT91RM9200 Ethernet support"
37 depends on NET_ETHERNET && ARM && ARCH_AT91RM9200 37 depends on ARM && ARCH_AT91RM9200
38 select MII 38 select MII
39 help 39 help
40 If you wish to compile a kernel for the AT91RM9200 and enable 40 If you wish to compile a kernel for the AT91RM9200 and enable
@@ -42,7 +42,7 @@ config ARM_AT91_ETHER
42 42
43config EP93XX_ETH 43config EP93XX_ETH
44 tristate "EP93xx Ethernet support" 44 tristate "EP93xx Ethernet support"
45 depends on NET_ETHERNET && ARM && ARCH_EP93XX 45 depends on ARM && ARCH_EP93XX
46 help 46 help
47 This is a driver for the ethernet hardware included in EP93xx CPUs. 47 This is a driver for the ethernet hardware included in EP93xx CPUs.
48 Say Y if you are building a kernel for EP93xx based devices. 48 Say Y if you are building a kernel for EP93xx based devices.
diff --git a/drivers/net/b44.c b/drivers/net/b44.c
index 879a2fff474e..96fb0ec905a7 100644
--- a/drivers/net/b44.c
+++ b/drivers/net/b44.c
@@ -15,6 +15,7 @@
15#include <linux/ethtool.h> 15#include <linux/ethtool.h>
16#include <linux/mii.h> 16#include <linux/mii.h>
17#include <linux/if_ether.h> 17#include <linux/if_ether.h>
18#include <linux/if_vlan.h>
18#include <linux/etherdevice.h> 19#include <linux/etherdevice.h>
19#include <linux/pci.h> 20#include <linux/pci.h>
20#include <linux/delay.h> 21#include <linux/delay.h>
@@ -68,8 +69,8 @@
68 (BP)->tx_cons - (BP)->tx_prod - TX_RING_GAP(BP)) 69 (BP)->tx_cons - (BP)->tx_prod - TX_RING_GAP(BP))
69#define NEXT_TX(N) (((N) + 1) & (B44_TX_RING_SIZE - 1)) 70#define NEXT_TX(N) (((N) + 1) & (B44_TX_RING_SIZE - 1))
70 71
71#define RX_PKT_BUF_SZ (1536 + bp->rx_offset + 64) 72#define RX_PKT_OFFSET 30
72#define TX_PKT_BUF_SZ (B44_MAX_MTU + ETH_HLEN + 8) 73#define RX_PKT_BUF_SZ (1536 + RX_PKT_OFFSET + 64)
73 74
74/* minimum number of free TX descriptors required to wake up TX process */ 75/* minimum number of free TX descriptors required to wake up TX process */
75#define B44_TX_WAKEUP_THRESH (B44_TX_RING_SIZE / 4) 76#define B44_TX_WAKEUP_THRESH (B44_TX_RING_SIZE / 4)
@@ -599,8 +600,7 @@ static void b44_timer(unsigned long __opaque)
599 600
600 spin_unlock_irq(&bp->lock); 601 spin_unlock_irq(&bp->lock);
601 602
602 bp->timer.expires = jiffies + HZ; 603 mod_timer(&bp->timer, round_jiffies(jiffies + HZ));
603 add_timer(&bp->timer);
604} 604}
605 605
606static void b44_tx(struct b44 *bp) 606static void b44_tx(struct b44 *bp)
@@ -653,7 +653,7 @@ static int b44_alloc_rx_skb(struct b44 *bp, int src_idx, u32 dest_idx_unmasked)
653 src_map = &bp->rx_buffers[src_idx]; 653 src_map = &bp->rx_buffers[src_idx];
654 dest_idx = dest_idx_unmasked & (B44_RX_RING_SIZE - 1); 654 dest_idx = dest_idx_unmasked & (B44_RX_RING_SIZE - 1);
655 map = &bp->rx_buffers[dest_idx]; 655 map = &bp->rx_buffers[dest_idx];
656 skb = dev_alloc_skb(RX_PKT_BUF_SZ); 656 skb = netdev_alloc_skb(bp->dev, RX_PKT_BUF_SZ);
657 if (skb == NULL) 657 if (skb == NULL)
658 return -ENOMEM; 658 return -ENOMEM;
659 659
@@ -669,7 +669,7 @@ static int b44_alloc_rx_skb(struct b44 *bp, int src_idx, u32 dest_idx_unmasked)
669 if (!dma_mapping_error(mapping)) 669 if (!dma_mapping_error(mapping))
670 pci_unmap_single(bp->pdev, mapping, RX_PKT_BUF_SZ,PCI_DMA_FROMDEVICE); 670 pci_unmap_single(bp->pdev, mapping, RX_PKT_BUF_SZ,PCI_DMA_FROMDEVICE);
671 dev_kfree_skb_any(skb); 671 dev_kfree_skb_any(skb);
672 skb = __dev_alloc_skb(RX_PKT_BUF_SZ,GFP_DMA); 672 skb = __netdev_alloc_skb(bp->dev, RX_PKT_BUF_SZ, GFP_ATOMIC|GFP_DMA);
673 if (skb == NULL) 673 if (skb == NULL)
674 return -ENOMEM; 674 return -ENOMEM;
675 mapping = pci_map_single(bp->pdev, skb->data, 675 mapping = pci_map_single(bp->pdev, skb->data,
@@ -684,11 +684,9 @@ static int b44_alloc_rx_skb(struct b44 *bp, int src_idx, u32 dest_idx_unmasked)
684 } 684 }
685 } 685 }
686 686
687 skb->dev = bp->dev; 687 rh = (struct rx_header *) skb->data;
688 skb_reserve(skb, bp->rx_offset); 688 skb_reserve(skb, RX_PKT_OFFSET);
689 689
690 rh = (struct rx_header *)
691 (skb->data - bp->rx_offset);
692 rh->len = 0; 690 rh->len = 0;
693 rh->flags = 0; 691 rh->flags = 0;
694 692
@@ -698,13 +696,13 @@ static int b44_alloc_rx_skb(struct b44 *bp, int src_idx, u32 dest_idx_unmasked)
698 if (src_map != NULL) 696 if (src_map != NULL)
699 src_map->skb = NULL; 697 src_map->skb = NULL;
700 698
701 ctrl = (DESC_CTRL_LEN & (RX_PKT_BUF_SZ - bp->rx_offset)); 699 ctrl = (DESC_CTRL_LEN & (RX_PKT_BUF_SZ - RX_PKT_OFFSET));
702 if (dest_idx == (B44_RX_RING_SIZE - 1)) 700 if (dest_idx == (B44_RX_RING_SIZE - 1))
703 ctrl |= DESC_CTRL_EOT; 701 ctrl |= DESC_CTRL_EOT;
704 702
705 dp = &bp->rx_ring[dest_idx]; 703 dp = &bp->rx_ring[dest_idx];
706 dp->ctrl = cpu_to_le32(ctrl); 704 dp->ctrl = cpu_to_le32(ctrl);
707 dp->addr = cpu_to_le32((u32) mapping + bp->rx_offset + bp->dma_offset); 705 dp->addr = cpu_to_le32((u32) mapping + RX_PKT_OFFSET + bp->dma_offset);
708 706
709 if (bp->flags & B44_FLAG_RX_RING_HACK) 707 if (bp->flags & B44_FLAG_RX_RING_HACK)
710 b44_sync_dma_desc_for_device(bp->pdev, bp->rx_ring_dma, 708 b44_sync_dma_desc_for_device(bp->pdev, bp->rx_ring_dma,
@@ -783,7 +781,7 @@ static int b44_rx(struct b44 *bp, int budget)
783 PCI_DMA_FROMDEVICE); 781 PCI_DMA_FROMDEVICE);
784 rh = (struct rx_header *) skb->data; 782 rh = (struct rx_header *) skb->data;
785 len = le16_to_cpu(rh->len); 783 len = le16_to_cpu(rh->len);
786 if ((len > (RX_PKT_BUF_SZ - bp->rx_offset)) || 784 if ((len > (RX_PKT_BUF_SZ - RX_PKT_OFFSET)) ||
787 (rh->flags & cpu_to_le16(RX_FLAG_ERRORS))) { 785 (rh->flags & cpu_to_le16(RX_FLAG_ERRORS))) {
788 drop_it: 786 drop_it:
789 b44_recycle_rx(bp, cons, bp->rx_prod); 787 b44_recycle_rx(bp, cons, bp->rx_prod);
@@ -815,8 +813,8 @@ static int b44_rx(struct b44 *bp, int budget)
815 pci_unmap_single(bp->pdev, map, 813 pci_unmap_single(bp->pdev, map,
816 skb_size, PCI_DMA_FROMDEVICE); 814 skb_size, PCI_DMA_FROMDEVICE);
817 /* Leave out rx_header */ 815 /* Leave out rx_header */
818 skb_put(skb, len+bp->rx_offset); 816 skb_put(skb, len + RX_PKT_OFFSET);
819 skb_pull(skb,bp->rx_offset); 817 skb_pull(skb, RX_PKT_OFFSET);
820 } else { 818 } else {
821 struct sk_buff *copy_skb; 819 struct sk_buff *copy_skb;
822 820
@@ -828,7 +826,7 @@ static int b44_rx(struct b44 *bp, int budget)
828 skb_reserve(copy_skb, 2); 826 skb_reserve(copy_skb, 2);
829 skb_put(copy_skb, len); 827 skb_put(copy_skb, len);
830 /* DMA sync done above, copy just the actual packet */ 828 /* DMA sync done above, copy just the actual packet */
831 skb_copy_from_linear_data_offset(skb, bp->rx_offset, 829 skb_copy_from_linear_data_offset(skb, RX_PKT_OFFSET,
832 copy_skb->data, len); 830 copy_skb->data, len);
833 skb = copy_skb; 831 skb = copy_skb;
834 } 832 }
@@ -969,7 +967,6 @@ static void b44_tx_timeout(struct net_device *dev)
969static int b44_start_xmit(struct sk_buff *skb, struct net_device *dev) 967static int b44_start_xmit(struct sk_buff *skb, struct net_device *dev)
970{ 968{
971 struct b44 *bp = netdev_priv(dev); 969 struct b44 *bp = netdev_priv(dev);
972 struct sk_buff *bounce_skb;
973 int rc = NETDEV_TX_OK; 970 int rc = NETDEV_TX_OK;
974 dma_addr_t mapping; 971 dma_addr_t mapping;
975 u32 len, entry, ctrl; 972 u32 len, entry, ctrl;
@@ -987,12 +984,13 @@ static int b44_start_xmit(struct sk_buff *skb, struct net_device *dev)
987 984
988 mapping = pci_map_single(bp->pdev, skb->data, len, PCI_DMA_TODEVICE); 985 mapping = pci_map_single(bp->pdev, skb->data, len, PCI_DMA_TODEVICE);
989 if (dma_mapping_error(mapping) || mapping + len > DMA_30BIT_MASK) { 986 if (dma_mapping_error(mapping) || mapping + len > DMA_30BIT_MASK) {
987 struct sk_buff *bounce_skb;
988
990 /* Chip can't handle DMA to/from >1GB, use bounce buffer */ 989 /* Chip can't handle DMA to/from >1GB, use bounce buffer */
991 if (!dma_mapping_error(mapping)) 990 if (!dma_mapping_error(mapping))
992 pci_unmap_single(bp->pdev, mapping, len, PCI_DMA_TODEVICE); 991 pci_unmap_single(bp->pdev, mapping, len, PCI_DMA_TODEVICE);
993 992
994 bounce_skb = __dev_alloc_skb(TX_PKT_BUF_SZ, 993 bounce_skb = __dev_alloc_skb(len, GFP_ATOMIC | GFP_DMA);
995 GFP_ATOMIC|GFP_DMA);
996 if (!bounce_skb) 994 if (!bounce_skb)
997 goto err_out; 995 goto err_out;
998 996
@@ -1001,13 +999,12 @@ static int b44_start_xmit(struct sk_buff *skb, struct net_device *dev)
1001 if (dma_mapping_error(mapping) || mapping + len > DMA_30BIT_MASK) { 999 if (dma_mapping_error(mapping) || mapping + len > DMA_30BIT_MASK) {
1002 if (!dma_mapping_error(mapping)) 1000 if (!dma_mapping_error(mapping))
1003 pci_unmap_single(bp->pdev, mapping, 1001 pci_unmap_single(bp->pdev, mapping,
1004 len, PCI_DMA_TODEVICE); 1002 len, PCI_DMA_TODEVICE);
1005 dev_kfree_skb_any(bounce_skb); 1003 dev_kfree_skb_any(bounce_skb);
1006 goto err_out; 1004 goto err_out;
1007 } 1005 }
1008 1006
1009 skb_copy_from_linear_data(skb, skb_put(bounce_skb, len), 1007 skb_copy_from_linear_data(skb, skb_put(bounce_skb, len), len);
1010 skb->len);
1011 dev_kfree_skb_any(skb); 1008 dev_kfree_skb_any(skb);
1012 skb = bounce_skb; 1009 skb = bounce_skb;
1013 } 1010 }
@@ -1396,12 +1393,12 @@ static void b44_init_hw(struct b44 *bp, int reset_kind)
1396 bw32(bp, B44_TX_WMARK, 56); /* XXX magic */ 1393 bw32(bp, B44_TX_WMARK, 56); /* XXX magic */
1397 if (reset_kind == B44_PARTIAL_RESET) { 1394 if (reset_kind == B44_PARTIAL_RESET) {
1398 bw32(bp, B44_DMARX_CTRL, (DMARX_CTRL_ENABLE | 1395 bw32(bp, B44_DMARX_CTRL, (DMARX_CTRL_ENABLE |
1399 (bp->rx_offset << DMARX_CTRL_ROSHIFT))); 1396 (RX_PKT_OFFSET << DMARX_CTRL_ROSHIFT)));
1400 } else { 1397 } else {
1401 bw32(bp, B44_DMATX_CTRL, DMATX_CTRL_ENABLE); 1398 bw32(bp, B44_DMATX_CTRL, DMATX_CTRL_ENABLE);
1402 bw32(bp, B44_DMATX_ADDR, bp->tx_ring_dma + bp->dma_offset); 1399 bw32(bp, B44_DMATX_ADDR, bp->tx_ring_dma + bp->dma_offset);
1403 bw32(bp, B44_DMARX_CTRL, (DMARX_CTRL_ENABLE | 1400 bw32(bp, B44_DMARX_CTRL, (DMARX_CTRL_ENABLE |
1404 (bp->rx_offset << DMARX_CTRL_ROSHIFT))); 1401 (RX_PKT_OFFSET << DMARX_CTRL_ROSHIFT)));
1405 bw32(bp, B44_DMARX_ADDR, bp->rx_ring_dma + bp->dma_offset); 1402 bw32(bp, B44_DMARX_ADDR, bp->rx_ring_dma + bp->dma_offset);
1406 1403
1407 bw32(bp, B44_DMARX_PTR, bp->rx_pending); 1404 bw32(bp, B44_DMARX_PTR, bp->rx_pending);
@@ -2093,11 +2090,6 @@ static int __devinit b44_get_invariants(struct b44 *bp)
2093 2090
2094 bp->phy_addr = eeprom[90] & 0x1f; 2091 bp->phy_addr = eeprom[90] & 0x1f;
2095 2092
2096 /* With this, plus the rx_header prepended to the data by the
2097 * hardware, we'll land the ethernet header on a 2-byte boundary.
2098 */
2099 bp->rx_offset = 30;
2100
2101 bp->imask = IMASK_DEF; 2093 bp->imask = IMASK_DEF;
2102 2094
2103 bp->core_unit = ssb_core_unit(bp); 2095 bp->core_unit = ssb_core_unit(bp);
@@ -2348,11 +2340,11 @@ static int b44_resume(struct pci_dev *pdev)
2348 netif_device_attach(bp->dev); 2340 netif_device_attach(bp->dev);
2349 spin_unlock_irq(&bp->lock); 2341 spin_unlock_irq(&bp->lock);
2350 2342
2351 bp->timer.expires = jiffies + HZ;
2352 add_timer(&bp->timer);
2353
2354 b44_enable_ints(bp); 2343 b44_enable_ints(bp);
2355 netif_wake_queue(dev); 2344 netif_wake_queue(dev);
2345
2346 mod_timer(&bp->timer, jiffies + 1);
2347
2356 return 0; 2348 return 0;
2357} 2349}
2358 2350
diff --git a/drivers/net/b44.h b/drivers/net/b44.h
index 18fc13336628..e537e63f292e 100644
--- a/drivers/net/b44.h
+++ b/drivers/net/b44.h
@@ -443,8 +443,6 @@ struct b44 {
443#define B44_FLAG_TX_RING_HACK 0x40000000 443#define B44_FLAG_TX_RING_HACK 0x40000000
444#define B44_FLAG_WOL_ENABLE 0x80000000 444#define B44_FLAG_WOL_ENABLE 0x80000000
445 445
446 u32 rx_offset;
447
448 u32 msg_enable; 446 u32 msg_enable;
449 447
450 struct timer_list timer; 448 struct timer_list timer;
diff --git a/drivers/net/cxgb3/adapter.h b/drivers/net/cxgb3/adapter.h
index 80c3d8f268a7..ab72563b81ee 100644
--- a/drivers/net/cxgb3/adapter.h
+++ b/drivers/net/cxgb3/adapter.h
@@ -71,27 +71,29 @@ enum { /* adapter flags */
71 QUEUES_BOUND = (1 << 3), 71 QUEUES_BOUND = (1 << 3),
72}; 72};
73 73
74struct fl_pg_chunk {
75 struct page *page;
76 void *va;
77 unsigned int offset;
78};
79
74struct rx_desc; 80struct rx_desc;
75struct rx_sw_desc; 81struct rx_sw_desc;
76 82
77struct sge_fl_page { 83struct sge_fl { /* SGE per free-buffer list state */
78 struct skb_frag_struct frag; 84 unsigned int buf_size; /* size of each Rx buffer */
79 unsigned char *va; 85 unsigned int credits; /* # of available Rx buffers */
80}; 86 unsigned int size; /* capacity of free list */
81 87 unsigned int cidx; /* consumer index */
82struct sge_fl { /* SGE per free-buffer list state */ 88 unsigned int pidx; /* producer index */
83 unsigned int buf_size; /* size of each Rx buffer */ 89 unsigned int gen; /* free list generation */
84 unsigned int credits; /* # of available Rx buffers */ 90 struct fl_pg_chunk pg_chunk;/* page chunk cache */
85 unsigned int size; /* capacity of free list */ 91 unsigned int use_pages; /* whether FL uses pages or sk_buffs */
86 unsigned int cidx; /* consumer index */ 92 struct rx_desc *desc; /* address of HW Rx descriptor ring */
87 unsigned int pidx; /* producer index */ 93 struct rx_sw_desc *sdesc; /* address of SW Rx descriptor ring */
88 unsigned int gen; /* free list generation */ 94 dma_addr_t phys_addr; /* physical address of HW ring start */
89 unsigned int cntxt_id; /* SGE context id for the free list */ 95 unsigned int cntxt_id; /* SGE context id for the free list */
90 struct sge_fl_page page; 96 unsigned long empty; /* # of times queue ran out of buffers */
91 struct rx_desc *desc; /* address of HW Rx descriptor ring */
92 struct rx_sw_desc *sdesc; /* address of SW Rx descriptor ring */
93 dma_addr_t phys_addr; /* physical address of HW ring start */
94 unsigned long empty; /* # of times queue ran out of buffers */
95 unsigned long alloc_failed; /* # of times buffer allocation failed */ 97 unsigned long alloc_failed; /* # of times buffer allocation failed */
96}; 98};
97 99
diff --git a/drivers/net/cxgb3/common.h b/drivers/net/cxgb3/common.h
index 8d1379633698..16378004507a 100644
--- a/drivers/net/cxgb3/common.h
+++ b/drivers/net/cxgb3/common.h
@@ -101,6 +101,7 @@ enum {
101 TCB_SIZE = 128, /* TCB size */ 101 TCB_SIZE = 128, /* TCB size */
102 NMTUS = 16, /* size of MTU table */ 102 NMTUS = 16, /* size of MTU table */
103 NCCTRL_WIN = 32, /* # of congestion control windows */ 103 NCCTRL_WIN = 32, /* # of congestion control windows */
104 PROTO_SRAM_LINES = 128, /* size of TP sram */
104}; 105};
105 106
106#define MAX_RX_COALESCING_LEN 16224U 107#define MAX_RX_COALESCING_LEN 16224U
@@ -124,6 +125,30 @@ enum { /* adapter interrupt-maintained statistics */
124}; 125};
125 126
126enum { 127enum {
128 TP_VERSION_MAJOR = 1,
129 TP_VERSION_MINOR = 0,
130 TP_VERSION_MICRO = 44
131};
132
133#define S_TP_VERSION_MAJOR 16
134#define M_TP_VERSION_MAJOR 0xFF
135#define V_TP_VERSION_MAJOR(x) ((x) << S_TP_VERSION_MAJOR)
136#define G_TP_VERSION_MAJOR(x) \
137 (((x) >> S_TP_VERSION_MAJOR) & M_TP_VERSION_MAJOR)
138
139#define S_TP_VERSION_MINOR 8
140#define M_TP_VERSION_MINOR 0xFF
141#define V_TP_VERSION_MINOR(x) ((x) << S_TP_VERSION_MINOR)
142#define G_TP_VERSION_MINOR(x) \
143 (((x) >> S_TP_VERSION_MINOR) & M_TP_VERSION_MINOR)
144
145#define S_TP_VERSION_MICRO 0
146#define M_TP_VERSION_MICRO 0xFF
147#define V_TP_VERSION_MICRO(x) ((x) << S_TP_VERSION_MICRO)
148#define G_TP_VERSION_MICRO(x) \
149 (((x) >> S_TP_VERSION_MICRO) & M_TP_VERSION_MICRO)
150
151enum {
127 SGE_QSETS = 8, /* # of SGE Tx/Rx/RspQ sets */ 152 SGE_QSETS = 8, /* # of SGE Tx/Rx/RspQ sets */
128 SGE_RXQ_PER_SET = 2, /* # of Rx queues per set */ 153 SGE_RXQ_PER_SET = 2, /* # of Rx queues per set */
129 SGE_TXQ_PER_SET = 3 /* # of Tx queues per set */ 154 SGE_TXQ_PER_SET = 3 /* # of Tx queues per set */
@@ -654,6 +679,9 @@ const struct adapter_info *t3_get_adapter_info(unsigned int board_id);
654int t3_seeprom_read(struct adapter *adapter, u32 addr, u32 *data); 679int t3_seeprom_read(struct adapter *adapter, u32 addr, u32 *data);
655int t3_seeprom_write(struct adapter *adapter, u32 addr, u32 data); 680int t3_seeprom_write(struct adapter *adapter, u32 addr, u32 data);
656int t3_seeprom_wp(struct adapter *adapter, int enable); 681int t3_seeprom_wp(struct adapter *adapter, int enable);
682int t3_check_tpsram_version(struct adapter *adapter);
683int t3_check_tpsram(struct adapter *adapter, u8 *tp_ram, unsigned int size);
684int t3_set_proto_sram(struct adapter *adap, u8 *data);
657int t3_read_flash(struct adapter *adapter, unsigned int addr, 685int t3_read_flash(struct adapter *adapter, unsigned int addr,
658 unsigned int nwords, u32 *data, int byte_oriented); 686 unsigned int nwords, u32 *data, int byte_oriented);
659int t3_load_fw(struct adapter *adapter, const u8 * fw_data, unsigned int size); 687int t3_load_fw(struct adapter *adapter, const u8 * fw_data, unsigned int size);
diff --git a/drivers/net/cxgb3/cxgb3_main.c b/drivers/net/cxgb3/cxgb3_main.c
index d8a1f5452c51..15defe4c4f05 100644
--- a/drivers/net/cxgb3/cxgb3_main.c
+++ b/drivers/net/cxgb3/cxgb3_main.c
@@ -2088,6 +2088,42 @@ static void cxgb_netpoll(struct net_device *dev)
2088} 2088}
2089#endif 2089#endif
2090 2090
2091#define TPSRAM_NAME "t3%c_protocol_sram-%d.%d.%d.bin"
2092int update_tpsram(struct adapter *adap)
2093{
2094 const struct firmware *tpsram;
2095 char buf[64];
2096 struct device *dev = &adap->pdev->dev;
2097 int ret;
2098 char rev;
2099
2100 rev = adap->params.rev == T3_REV_B2 ? 'b' : 'a';
2101
2102 snprintf(buf, sizeof(buf), TPSRAM_NAME, rev,
2103 TP_VERSION_MAJOR, TP_VERSION_MINOR, TP_VERSION_MICRO);
2104
2105 ret = request_firmware(&tpsram, buf, dev);
2106 if (ret < 0) {
2107 dev_err(dev, "could not load TP SRAM: unable to load %s\n",
2108 buf);
2109 return ret;
2110 }
2111
2112 ret = t3_check_tpsram(adap, tpsram->data, tpsram->size);
2113 if (ret)
2114 goto release_tpsram;
2115
2116 ret = t3_set_proto_sram(adap, tpsram->data);
2117 if (ret)
2118 dev_err(dev, "loading protocol SRAM failed\n");
2119
2120release_tpsram:
2121 release_firmware(tpsram);
2122
2123 return ret;
2124}
2125
2126
2091/* 2127/*
2092 * Periodic accumulation of MAC statistics. 2128 * Periodic accumulation of MAC statistics.
2093 */ 2129 */
@@ -2437,6 +2473,13 @@ static int __devinit init_one(struct pci_dev *pdev,
2437 goto out_free_dev; 2473 goto out_free_dev;
2438 } 2474 }
2439 2475
2476 err = t3_check_tpsram_version(adapter);
2477 if (err == -EINVAL)
2478 err = update_tpsram(adapter);
2479
2480 if (err)
2481 goto out_free_dev;
2482
2440 /* 2483 /*
2441 * The card is now ready to go. If any errors occur during device 2484 * The card is now ready to go. If any errors occur during device
2442 * registration we do not fail the whole card but rather proceed only 2485 * registration we do not fail the whole card but rather proceed only
diff --git a/drivers/net/cxgb3/regs.h b/drivers/net/cxgb3/regs.h
index 020859c855d7..aa80313c922e 100644
--- a/drivers/net/cxgb3/regs.h
+++ b/drivers/net/cxgb3/regs.h
@@ -1160,6 +1160,8 @@
1160 1160
1161#define A_TP_MOD_CHANNEL_WEIGHT 0x434 1161#define A_TP_MOD_CHANNEL_WEIGHT 0x434
1162 1162
1163#define A_TP_MOD_RATE_LIMIT 0x438
1164
1163#define A_TP_PIO_ADDR 0x440 1165#define A_TP_PIO_ADDR 0x440
1164 1166
1165#define A_TP_PIO_DATA 0x444 1167#define A_TP_PIO_DATA 0x444
@@ -1214,6 +1216,15 @@
1214#define G_TXDROPCNTCH0RCVD(x) (((x) >> S_TXDROPCNTCH0RCVD) & \ 1216#define G_TXDROPCNTCH0RCVD(x) (((x) >> S_TXDROPCNTCH0RCVD) & \
1215 M_TXDROPCNTCH0RCVD) 1217 M_TXDROPCNTCH0RCVD)
1216 1218
1219#define A_TP_PROXY_FLOW_CNTL 0x4b0
1220
1221#define A_TP_EMBED_OP_FIELD0 0x4e8
1222#define A_TP_EMBED_OP_FIELD1 0x4ec
1223#define A_TP_EMBED_OP_FIELD2 0x4f0
1224#define A_TP_EMBED_OP_FIELD3 0x4f4
1225#define A_TP_EMBED_OP_FIELD4 0x4f8
1226#define A_TP_EMBED_OP_FIELD5 0x4fc
1227
1217#define A_ULPRX_CTL 0x500 1228#define A_ULPRX_CTL 0x500
1218 1229
1219#define S_ROUND_ROBIN 4 1230#define S_ROUND_ROBIN 4
diff --git a/drivers/net/cxgb3/sge.c b/drivers/net/cxgb3/sge.c
index a60ec4d4707c..a2cfd68ac757 100644
--- a/drivers/net/cxgb3/sge.c
+++ b/drivers/net/cxgb3/sge.c
@@ -46,23 +46,16 @@
46 46
47#define SGE_RX_SM_BUF_SIZE 1536 47#define SGE_RX_SM_BUF_SIZE 1536
48 48
49/*
50 * If USE_RX_PAGE is defined, the small freelist populated with (partial)
51 * pages instead of skbs. Pages are carved up into RX_PAGE_SIZE chunks (must
52 * be a multiple of the host page size).
53 */
54#define USE_RX_PAGE
55#define RX_PAGE_SIZE 2048
56
57/*
58 * skb freelist packets are copied into a new skb (and the freelist one is
59 * reused) if their len is <=
60 */
61#define SGE_RX_COPY_THRES 256 49#define SGE_RX_COPY_THRES 256
50#define SGE_RX_PULL_LEN 128
62 51
63/* 52/*
64 * Minimum number of freelist entries before we start dropping TUNNEL frames. 53 * Page chunk size for FL0 buffers if FL0 is to be populated with page chunks.
54 * It must be a divisor of PAGE_SIZE. If set to 0 FL0 will use sk_buffs
55 * directly.
65 */ 56 */
57#define FL0_PG_CHUNK_SIZE 2048
58
66#define SGE_RX_DROP_THRES 16 59#define SGE_RX_DROP_THRES 16
67 60
68/* 61/*
@@ -100,12 +93,12 @@ struct tx_sw_desc { /* SW state per Tx descriptor */
100 struct sk_buff *skb; 93 struct sk_buff *skb;
101}; 94};
102 95
103struct rx_sw_desc { /* SW state per Rx descriptor */ 96struct rx_sw_desc { /* SW state per Rx descriptor */
104 union { 97 union {
105 struct sk_buff *skb; 98 struct sk_buff *skb;
106 struct sge_fl_page page; 99 struct fl_pg_chunk pg_chunk;
107 } t; 100 };
108 DECLARE_PCI_UNMAP_ADDR(dma_addr); 101 DECLARE_PCI_UNMAP_ADDR(dma_addr);
109}; 102};
110 103
111struct rsp_desc { /* response queue descriptor */ 104struct rsp_desc { /* response queue descriptor */
@@ -351,27 +344,26 @@ static void free_rx_bufs(struct pci_dev *pdev, struct sge_fl *q)
351 344
352 pci_unmap_single(pdev, pci_unmap_addr(d, dma_addr), 345 pci_unmap_single(pdev, pci_unmap_addr(d, dma_addr),
353 q->buf_size, PCI_DMA_FROMDEVICE); 346 q->buf_size, PCI_DMA_FROMDEVICE);
354 347 if (q->use_pages) {
355 if (q->buf_size != RX_PAGE_SIZE) { 348 put_page(d->pg_chunk.page);
356 kfree_skb(d->t.skb); 349 d->pg_chunk.page = NULL;
357 d->t.skb = NULL;
358 } else { 350 } else {
359 if (d->t.page.frag.page) 351 kfree_skb(d->skb);
360 put_page(d->t.page.frag.page); 352 d->skb = NULL;
361 d->t.page.frag.page = NULL;
362 } 353 }
363 if (++cidx == q->size) 354 if (++cidx == q->size)
364 cidx = 0; 355 cidx = 0;
365 } 356 }
366 357
367 if (q->page.frag.page) 358 if (q->pg_chunk.page) {
368 put_page(q->page.frag.page); 359 __free_page(q->pg_chunk.page);
369 q->page.frag.page = NULL; 360 q->pg_chunk.page = NULL;
361 }
370} 362}
371 363
372/** 364/**
373 * add_one_rx_buf - add a packet buffer to a free-buffer list 365 * add_one_rx_buf - add a packet buffer to a free-buffer list
374 * @va: va of the buffer to add 366 * @va: buffer start VA
375 * @len: the buffer length 367 * @len: the buffer length
376 * @d: the HW Rx descriptor to write 368 * @d: the HW Rx descriptor to write
377 * @sd: the SW Rx descriptor to write 369 * @sd: the SW Rx descriptor to write
@@ -381,7 +373,7 @@ static void free_rx_bufs(struct pci_dev *pdev, struct sge_fl *q)
381 * Add a buffer of the given length to the supplied HW and SW Rx 373 * Add a buffer of the given length to the supplied HW and SW Rx
382 * descriptors. 374 * descriptors.
383 */ 375 */
384static inline void add_one_rx_buf(unsigned char *va, unsigned int len, 376static inline void add_one_rx_buf(void *va, unsigned int len,
385 struct rx_desc *d, struct rx_sw_desc *sd, 377 struct rx_desc *d, struct rx_sw_desc *sd,
386 unsigned int gen, struct pci_dev *pdev) 378 unsigned int gen, struct pci_dev *pdev)
387{ 379{
@@ -397,6 +389,27 @@ static inline void add_one_rx_buf(unsigned char *va, unsigned int len,
397 d->gen2 = cpu_to_be32(V_FLD_GEN2(gen)); 389 d->gen2 = cpu_to_be32(V_FLD_GEN2(gen));
398} 390}
399 391
392static int alloc_pg_chunk(struct sge_fl *q, struct rx_sw_desc *sd, gfp_t gfp)
393{
394 if (!q->pg_chunk.page) {
395 q->pg_chunk.page = alloc_page(gfp);
396 if (unlikely(!q->pg_chunk.page))
397 return -ENOMEM;
398 q->pg_chunk.va = page_address(q->pg_chunk.page);
399 q->pg_chunk.offset = 0;
400 }
401 sd->pg_chunk = q->pg_chunk;
402
403 q->pg_chunk.offset += q->buf_size;
404 if (q->pg_chunk.offset == PAGE_SIZE)
405 q->pg_chunk.page = NULL;
406 else {
407 q->pg_chunk.va += q->buf_size;
408 get_page(q->pg_chunk.page);
409 }
410 return 0;
411}
412
400/** 413/**
401 * refill_fl - refill an SGE free-buffer list 414 * refill_fl - refill an SGE free-buffer list
402 * @adapter: the adapter 415 * @adapter: the adapter
@@ -410,49 +423,29 @@ static inline void add_one_rx_buf(unsigned char *va, unsigned int len,
410 */ 423 */
411static void refill_fl(struct adapter *adap, struct sge_fl *q, int n, gfp_t gfp) 424static void refill_fl(struct adapter *adap, struct sge_fl *q, int n, gfp_t gfp)
412{ 425{
426 void *buf_start;
413 struct rx_sw_desc *sd = &q->sdesc[q->pidx]; 427 struct rx_sw_desc *sd = &q->sdesc[q->pidx];
414 struct rx_desc *d = &q->desc[q->pidx]; 428 struct rx_desc *d = &q->desc[q->pidx];
415 struct sge_fl_page *p = &q->page;
416 429
417 while (n--) { 430 while (n--) {
418 unsigned char *va; 431 if (q->use_pages) {
419 432 if (unlikely(alloc_pg_chunk(q, sd, gfp))) {
420 if (unlikely(q->buf_size != RX_PAGE_SIZE)) { 433nomem: q->alloc_failed++;
421 struct sk_buff *skb = alloc_skb(q->buf_size, gfp);
422
423 if (!skb) {
424 q->alloc_failed++;
425 break; 434 break;
426 } 435 }
427 va = skb->data; 436 buf_start = sd->pg_chunk.va;
428 sd->t.skb = skb;
429 } else { 437 } else {
430 if (!p->frag.page) { 438 struct sk_buff *skb = alloc_skb(q->buf_size, gfp);
431 p->frag.page = alloc_pages(gfp, 0);
432 if (unlikely(!p->frag.page)) {
433 q->alloc_failed++;
434 break;
435 } else {
436 p->frag.size = RX_PAGE_SIZE;
437 p->frag.page_offset = 0;
438 p->va = page_address(p->frag.page);
439 }
440 }
441 439
442 memcpy(&sd->t, p, sizeof(*p)); 440 if (!skb)
443 va = p->va; 441 goto nomem;
444 442
445 p->frag.page_offset += RX_PAGE_SIZE; 443 sd->skb = skb;
446 BUG_ON(p->frag.page_offset > PAGE_SIZE); 444 buf_start = skb->data;
447 p->va += RX_PAGE_SIZE;
448 if (p->frag.page_offset == PAGE_SIZE)
449 p->frag.page = NULL;
450 else
451 get_page(p->frag.page);
452 } 445 }
453 446
454 add_one_rx_buf(va, q->buf_size, d, sd, q->gen, adap->pdev); 447 add_one_rx_buf(buf_start, q->buf_size, d, sd, q->gen,
455 448 adap->pdev);
456 d++; 449 d++;
457 sd++; 450 sd++;
458 if (++q->pidx == q->size) { 451 if (++q->pidx == q->size) {
@@ -487,7 +480,7 @@ static void recycle_rx_buf(struct adapter *adap, struct sge_fl *q,
487 struct rx_desc *from = &q->desc[idx]; 480 struct rx_desc *from = &q->desc[idx];
488 struct rx_desc *to = &q->desc[q->pidx]; 481 struct rx_desc *to = &q->desc[q->pidx];
489 482
490 memcpy(&q->sdesc[q->pidx], &q->sdesc[idx], sizeof(struct rx_sw_desc)); 483 q->sdesc[q->pidx] = q->sdesc[idx];
491 to->addr_lo = from->addr_lo; /* already big endian */ 484 to->addr_lo = from->addr_lo; /* already big endian */
492 to->addr_hi = from->addr_hi; /* likewise */ 485 to->addr_hi = from->addr_hi; /* likewise */
493 wmb(); 486 wmb();
@@ -650,6 +643,132 @@ static inline unsigned int flits_to_desc(unsigned int n)
650} 643}
651 644
652/** 645/**
646 * get_packet - return the next ingress packet buffer from a free list
647 * @adap: the adapter that received the packet
648 * @fl: the SGE free list holding the packet
649 * @len: the packet length including any SGE padding
650 * @drop_thres: # of remaining buffers before we start dropping packets
651 *
652 * Get the next packet from a free list and complete setup of the
653 * sk_buff. If the packet is small we make a copy and recycle the
654 * original buffer, otherwise we use the original buffer itself. If a
655 * positive drop threshold is supplied packets are dropped and their
656 * buffers recycled if (a) the number of remaining buffers is under the
657 * threshold and the packet is too big to copy, or (b) the packet should
658 * be copied but there is no memory for the copy.
659 */
660static struct sk_buff *get_packet(struct adapter *adap, struct sge_fl *fl,
661 unsigned int len, unsigned int drop_thres)
662{
663 struct sk_buff *skb = NULL;
664 struct rx_sw_desc *sd = &fl->sdesc[fl->cidx];
665
666 prefetch(sd->skb->data);
667 fl->credits--;
668
669 if (len <= SGE_RX_COPY_THRES) {
670 skb = alloc_skb(len, GFP_ATOMIC);
671 if (likely(skb != NULL)) {
672 __skb_put(skb, len);
673 pci_dma_sync_single_for_cpu(adap->pdev,
674 pci_unmap_addr(sd, dma_addr), len,
675 PCI_DMA_FROMDEVICE);
676 memcpy(skb->data, sd->skb->data, len);
677 pci_dma_sync_single_for_device(adap->pdev,
678 pci_unmap_addr(sd, dma_addr), len,
679 PCI_DMA_FROMDEVICE);
680 } else if (!drop_thres)
681 goto use_orig_buf;
682recycle:
683 recycle_rx_buf(adap, fl, fl->cidx);
684 return skb;
685 }
686
687 if (unlikely(fl->credits < drop_thres))
688 goto recycle;
689
690use_orig_buf:
691 pci_unmap_single(adap->pdev, pci_unmap_addr(sd, dma_addr),
692 fl->buf_size, PCI_DMA_FROMDEVICE);
693 skb = sd->skb;
694 skb_put(skb, len);
695 __refill_fl(adap, fl);
696 return skb;
697}
698
699/**
700 * get_packet_pg - return the next ingress packet buffer from a free list
701 * @adap: the adapter that received the packet
702 * @fl: the SGE free list holding the packet
703 * @len: the packet length including any SGE padding
704 * @drop_thres: # of remaining buffers before we start dropping packets
705 *
706 * Get the next packet from a free list populated with page chunks.
707 * If the packet is small we make a copy and recycle the original buffer,
708 * otherwise we attach the original buffer as a page fragment to a fresh
709 * sk_buff. If a positive drop threshold is supplied packets are dropped
710 * and their buffers recycled if (a) the number of remaining buffers is
711 * under the threshold and the packet is too big to copy, or (b) there's
712 * no system memory.
713 *
714 * Note: this function is similar to @get_packet but deals with Rx buffers
715 * that are page chunks rather than sk_buffs.
716 */
717static struct sk_buff *get_packet_pg(struct adapter *adap, struct sge_fl *fl,
718 unsigned int len, unsigned int drop_thres)
719{
720 struct sk_buff *skb = NULL;
721 struct rx_sw_desc *sd = &fl->sdesc[fl->cidx];
722
723 if (len <= SGE_RX_COPY_THRES) {
724 skb = alloc_skb(len, GFP_ATOMIC);
725 if (likely(skb != NULL)) {
726 __skb_put(skb, len);
727 pci_dma_sync_single_for_cpu(adap->pdev,
728 pci_unmap_addr(sd, dma_addr), len,
729 PCI_DMA_FROMDEVICE);
730 memcpy(skb->data, sd->pg_chunk.va, len);
731 pci_dma_sync_single_for_device(adap->pdev,
732 pci_unmap_addr(sd, dma_addr), len,
733 PCI_DMA_FROMDEVICE);
734 } else if (!drop_thres)
735 return NULL;
736recycle:
737 fl->credits--;
738 recycle_rx_buf(adap, fl, fl->cidx);
739 return skb;
740 }
741
742 if (unlikely(fl->credits <= drop_thres))
743 goto recycle;
744
745 skb = alloc_skb(SGE_RX_PULL_LEN, GFP_ATOMIC);
746 if (unlikely(!skb)) {
747 if (!drop_thres)
748 return NULL;
749 goto recycle;
750 }
751
752 pci_unmap_single(adap->pdev, pci_unmap_addr(sd, dma_addr),
753 fl->buf_size, PCI_DMA_FROMDEVICE);
754 __skb_put(skb, SGE_RX_PULL_LEN);
755 memcpy(skb->data, sd->pg_chunk.va, SGE_RX_PULL_LEN);
756 skb_fill_page_desc(skb, 0, sd->pg_chunk.page,
757 sd->pg_chunk.offset + SGE_RX_PULL_LEN,
758 len - SGE_RX_PULL_LEN);
759 skb->len = len;
760 skb->data_len = len - SGE_RX_PULL_LEN;
761 skb->truesize += skb->data_len;
762
763 fl->credits--;
764 /*
765 * We do not refill FLs here, we let the caller do it to overlap a
766 * prefetch.
767 */
768 return skb;
769}
770
771/**
653 * get_imm_packet - return the next ingress packet buffer from a response 772 * get_imm_packet - return the next ingress packet buffer from a response
654 * @resp: the response descriptor containing the packet data 773 * @resp: the response descriptor containing the packet data
655 * 774 *
@@ -1715,85 +1834,6 @@ static void rx_eth(struct adapter *adap, struct sge_rspq *rq,
1715 netif_rx(skb); 1834 netif_rx(skb);
1716} 1835}
1717 1836
1718#define SKB_DATA_SIZE 128
1719
1720static void skb_data_init(struct sk_buff *skb, struct sge_fl_page *p,
1721 unsigned int len)
1722{
1723 skb->len = len;
1724 if (len <= SKB_DATA_SIZE) {
1725 skb_copy_to_linear_data(skb, p->va, len);
1726 skb->tail += len;
1727 put_page(p->frag.page);
1728 } else {
1729 skb_copy_to_linear_data(skb, p->va, SKB_DATA_SIZE);
1730 skb_shinfo(skb)->frags[0].page = p->frag.page;
1731 skb_shinfo(skb)->frags[0].page_offset =
1732 p->frag.page_offset + SKB_DATA_SIZE;
1733 skb_shinfo(skb)->frags[0].size = len - SKB_DATA_SIZE;
1734 skb_shinfo(skb)->nr_frags = 1;
1735 skb->data_len = len - SKB_DATA_SIZE;
1736 skb->tail += SKB_DATA_SIZE;
1737 skb->truesize += skb->data_len;
1738 }
1739}
1740
1741/**
1742* get_packet - return the next ingress packet buffer from a free list
1743* @adap: the adapter that received the packet
1744* @fl: the SGE free list holding the packet
1745* @len: the packet length including any SGE padding
1746* @drop_thres: # of remaining buffers before we start dropping packets
1747*
1748* Get the next packet from a free list and complete setup of the
1749* sk_buff. If the packet is small we make a copy and recycle the
1750* original buffer, otherwise we use the original buffer itself. If a
1751* positive drop threshold is supplied packets are dropped and their
1752* buffers recycled if (a) the number of remaining buffers is under the
1753* threshold and the packet is too big to copy, or (b) the packet should
1754* be copied but there is no memory for the copy.
1755*/
1756static struct sk_buff *get_packet(struct adapter *adap, struct sge_fl *fl,
1757 unsigned int len, unsigned int drop_thres)
1758{
1759 struct sk_buff *skb = NULL;
1760 struct rx_sw_desc *sd = &fl->sdesc[fl->cidx];
1761
1762 prefetch(sd->t.skb->data);
1763
1764 if (len <= SGE_RX_COPY_THRES) {
1765 skb = alloc_skb(len, GFP_ATOMIC);
1766 if (likely(skb != NULL)) {
1767 struct rx_desc *d = &fl->desc[fl->cidx];
1768 dma_addr_t mapping =
1769 (dma_addr_t)((u64) be32_to_cpu(d->addr_hi) << 32 |
1770 be32_to_cpu(d->addr_lo));
1771
1772 __skb_put(skb, len);
1773 pci_dma_sync_single_for_cpu(adap->pdev, mapping, len,
1774 PCI_DMA_FROMDEVICE);
1775 skb_copy_from_linear_data(sd->t.skb, skb->data, len);
1776 pci_dma_sync_single_for_device(adap->pdev, mapping, len,
1777 PCI_DMA_FROMDEVICE);
1778 } else if (!drop_thres)
1779 goto use_orig_buf;
1780recycle:
1781 recycle_rx_buf(adap, fl, fl->cidx);
1782 return skb;
1783 }
1784
1785 if (unlikely(fl->credits < drop_thres))
1786 goto recycle;
1787
1788use_orig_buf:
1789 pci_unmap_single(adap->pdev, pci_unmap_addr(sd, dma_addr),
1790 fl->buf_size, PCI_DMA_FROMDEVICE);
1791 skb = sd->t.skb;
1792 skb_put(skb, len);
1793 __refill_fl(adap, fl);
1794 return skb;
1795}
1796
1797/** 1837/**
1798 * handle_rsp_cntrl_info - handles control information in a response 1838 * handle_rsp_cntrl_info - handles control information in a response
1799 * @qs: the queue set corresponding to the response 1839 * @qs: the queue set corresponding to the response
@@ -1935,7 +1975,7 @@ static int process_responses(struct adapter *adap, struct sge_qset *qs,
1935 } else if (flags & F_RSPD_IMM_DATA_VALID) { 1975 } else if (flags & F_RSPD_IMM_DATA_VALID) {
1936 skb = get_imm_packet(r); 1976 skb = get_imm_packet(r);
1937 if (unlikely(!skb)) { 1977 if (unlikely(!skb)) {
1938 no_mem: 1978no_mem:
1939 q->next_holdoff = NOMEM_INTR_DELAY; 1979 q->next_holdoff = NOMEM_INTR_DELAY;
1940 q->nomem++; 1980 q->nomem++;
1941 /* consume one credit since we tried */ 1981 /* consume one credit since we tried */
@@ -1945,53 +1985,29 @@ static int process_responses(struct adapter *adap, struct sge_qset *qs,
1945 q->imm_data++; 1985 q->imm_data++;
1946 ethpad = 0; 1986 ethpad = 0;
1947 } else if ((len = ntohl(r->len_cq)) != 0) { 1987 } else if ((len = ntohl(r->len_cq)) != 0) {
1948 struct sge_fl *fl = 1988 struct sge_fl *fl;
1949 (len & F_RSPD_FLQ) ? &qs->fl[1] : &qs->fl[0];
1950 1989
1951 if (fl->buf_size == RX_PAGE_SIZE) { 1990 fl = (len & F_RSPD_FLQ) ? &qs->fl[1] : &qs->fl[0];
1952 struct rx_sw_desc *sd = &fl->sdesc[fl->cidx]; 1991 if (fl->use_pages) {
1953 struct sge_fl_page *p = &sd->t.page; 1992 void *addr = fl->sdesc[fl->cidx].pg_chunk.va;
1954
1955 prefetch(p->va);
1956 prefetch(p->va + L1_CACHE_BYTES);
1957 1993
1994 prefetch(addr);
1995#if L1_CACHE_BYTES < 128
1996 prefetch(addr + L1_CACHE_BYTES);
1997#endif
1958 __refill_fl(adap, fl); 1998 __refill_fl(adap, fl);
1959 1999
1960 pci_unmap_single(adap->pdev, 2000 skb = get_packet_pg(adap, fl, G_RSPD_LEN(len),
1961 pci_unmap_addr(sd, dma_addr), 2001 eth ? SGE_RX_DROP_THRES : 0);
1962 fl->buf_size, 2002 } else
1963 PCI_DMA_FROMDEVICE);
1964
1965 if (eth) {
1966 if (unlikely(fl->credits <
1967 SGE_RX_DROP_THRES))
1968 goto eth_recycle;
1969
1970 skb = alloc_skb(SKB_DATA_SIZE,
1971 GFP_ATOMIC);
1972 if (unlikely(!skb)) {
1973eth_recycle:
1974 q->rx_drops++;
1975 recycle_rx_buf(adap, fl,
1976 fl->cidx);
1977 goto eth_done;
1978 }
1979 } else {
1980 skb = alloc_skb(SKB_DATA_SIZE,
1981 GFP_ATOMIC);
1982 if (unlikely(!skb))
1983 goto no_mem;
1984 }
1985
1986 skb_data_init(skb, p, G_RSPD_LEN(len));
1987eth_done:
1988 fl->credits--;
1989 q->eth_pkts++;
1990 } else {
1991 fl->credits--;
1992 skb = get_packet(adap, fl, G_RSPD_LEN(len), 2003 skb = get_packet(adap, fl, G_RSPD_LEN(len),
1993 eth ? SGE_RX_DROP_THRES : 0); 2004 eth ? SGE_RX_DROP_THRES : 0);
1994 } 2005 if (unlikely(!skb)) {
2006 if (!eth)
2007 goto no_mem;
2008 q->rx_drops++;
2009 } else if (unlikely(r->rss_hdr.opcode == CPL_TRACE_PKT))
2010 __skb_pull(skb, 2);
1995 2011
1996 if (++fl->cidx == fl->size) 2012 if (++fl->cidx == fl->size)
1997 fl->cidx = 0; 2013 fl->cidx = 0;
@@ -2016,20 +2032,15 @@ eth_done:
2016 q->credits = 0; 2032 q->credits = 0;
2017 } 2033 }
2018 2034
2019 if (skb) { 2035 if (likely(skb != NULL)) {
2020 /* Preserve the RSS info in csum & priority */
2021 skb->csum = rss_hi;
2022 skb->priority = rss_lo;
2023
2024 if (eth) 2036 if (eth)
2025 rx_eth(adap, q, skb, ethpad); 2037 rx_eth(adap, q, skb, ethpad);
2026 else { 2038 else {
2027 if (unlikely(r->rss_hdr.opcode == 2039 /* Preserve the RSS info in csum & priority */
2028 CPL_TRACE_PKT)) 2040 skb->csum = rss_hi;
2029 __skb_pull(skb, ethpad); 2041 skb->priority = rss_lo;
2030 2042 ngathered = rx_offload(&adap->tdev, q, skb,
2031 ngathered = rx_offload(&adap->tdev, q, 2043 offload_skbs,
2032 skb, offload_skbs,
2033 ngathered); 2044 ngathered);
2034 } 2045 }
2035 } 2046 }
@@ -2635,25 +2646,15 @@ int t3_sge_alloc_qset(struct adapter *adapter, unsigned int id, int nports,
2635 q->txq[TXQ_ETH].stop_thres = nports * 2646 q->txq[TXQ_ETH].stop_thres = nports *
2636 flits_to_desc(sgl_len(MAX_SKB_FRAGS + 1) + 3); 2647 flits_to_desc(sgl_len(MAX_SKB_FRAGS + 1) + 3);
2637 2648
2638 if (!is_offload(adapter)) { 2649#if FL0_PG_CHUNK_SIZE > 0
2639#ifdef USE_RX_PAGE 2650 q->fl[0].buf_size = FL0_PG_CHUNK_SIZE;
2640 q->fl[0].buf_size = RX_PAGE_SIZE;
2641#else 2651#else
2642 q->fl[0].buf_size = SGE_RX_SM_BUF_SIZE + 2 + 2652 q->fl[0].buf_size = SGE_RX_SM_BUF_SIZE + sizeof(struct cpl_rx_data);
2643 sizeof(struct cpl_rx_pkt);
2644#endif 2653#endif
2645 q->fl[1].buf_size = MAX_FRAME_SIZE + 2 + 2654 q->fl[0].use_pages = FL0_PG_CHUNK_SIZE > 0;
2646 sizeof(struct cpl_rx_pkt); 2655 q->fl[1].buf_size = is_offload(adapter) ?
2647 } else { 2656 (16 * 1024) - SKB_DATA_ALIGN(sizeof(struct skb_shared_info)) :
2648#ifdef USE_RX_PAGE 2657 MAX_FRAME_SIZE + 2 + sizeof(struct cpl_rx_pkt);
2649 q->fl[0].buf_size = RX_PAGE_SIZE;
2650#else
2651 q->fl[0].buf_size = SGE_RX_SM_BUF_SIZE +
2652 sizeof(struct cpl_rx_data);
2653#endif
2654 q->fl[1].buf_size = (16 * 1024) -
2655 SKB_DATA_ALIGN(sizeof(struct skb_shared_info));
2656 }
2657 2658
2658 spin_lock(&adapter->sge.reg_lock); 2659 spin_lock(&adapter->sge.reg_lock);
2659 2660
diff --git a/drivers/net/cxgb3/t3_hw.c b/drivers/net/cxgb3/t3_hw.c
index fb485d0a43d8..dd3149d94ba8 100644
--- a/drivers/net/cxgb3/t3_hw.c
+++ b/drivers/net/cxgb3/t3_hw.c
@@ -847,6 +847,64 @@ static int t3_write_flash(struct adapter *adapter, unsigned int addr,
847 return 0; 847 return 0;
848} 848}
849 849
850/**
851 * t3_check_tpsram_version - read the tp sram version
852 * @adapter: the adapter
853 *
854 * Reads the protocol sram version from serial eeprom.
855 */
856int t3_check_tpsram_version(struct adapter *adapter)
857{
858 int ret;
859 u32 vers;
860 unsigned int major, minor;
861
862 /* Get version loaded in SRAM */
863 t3_write_reg(adapter, A_TP_EMBED_OP_FIELD0, 0);
864 ret = t3_wait_op_done(adapter, A_TP_EMBED_OP_FIELD0,
865 1, 1, 5, 1);
866 if (ret)
867 return ret;
868
869 vers = t3_read_reg(adapter, A_TP_EMBED_OP_FIELD1);
870
871 major = G_TP_VERSION_MAJOR(vers);
872 minor = G_TP_VERSION_MINOR(vers);
873
874 if (major == TP_VERSION_MAJOR && minor == TP_VERSION_MINOR)
875 return 0;
876
877 return -EINVAL;
878}
879
880/**
881 * t3_check_tpsram - check if provided protocol SRAM
882 * is compatible with this driver
883 * @adapter: the adapter
884 * @tp_sram: the firmware image to write
885 * @size: image size
886 *
887 * Checks if an adapter's tp sram is compatible with the driver.
888 * Returns 0 if the versions are compatible, a negative error otherwise.
889 */
890int t3_check_tpsram(struct adapter *adapter, u8 *tp_sram, unsigned int size)
891{
892 u32 csum;
893 unsigned int i;
894 const u32 *p = (const u32 *)tp_sram;
895
896 /* Verify checksum */
897 for (csum = 0, i = 0; i < size / sizeof(csum); i++)
898 csum += ntohl(p[i]);
899 if (csum != 0xffffffff) {
900 CH_ERR(adapter, "corrupted protocol SRAM image, checksum %u\n",
901 csum);
902 return -EINVAL;
903 }
904
905 return 0;
906}
907
850enum fw_version_type { 908enum fw_version_type {
851 FW_VERSION_N3, 909 FW_VERSION_N3,
852 FW_VERSION_T3 910 FW_VERSION_T3
@@ -921,7 +979,7 @@ static int t3_flash_erase_sectors(struct adapter *adapter, int start, int end)
921/* 979/*
922 * t3_load_fw - download firmware 980 * t3_load_fw - download firmware
923 * @adapter: the adapter 981 * @adapter: the adapter
924 * @fw_data: the firrware image to write 982 * @fw_data: the firmware image to write
925 * @size: image size 983 * @size: image size
926 * 984 *
927 * Write the supplied firmware image to the card's serial flash. 985 * Write the supplied firmware image to the card's serial flash.
@@ -2362,7 +2420,7 @@ static void tp_config(struct adapter *adap, const struct tp_params *p)
2362 F_TCPCHECKSUMOFFLOAD | V_IPTTL(64)); 2420 F_TCPCHECKSUMOFFLOAD | V_IPTTL(64));
2363 t3_write_reg(adap, A_TP_TCP_OPTIONS, V_MTUDEFAULT(576) | 2421 t3_write_reg(adap, A_TP_TCP_OPTIONS, V_MTUDEFAULT(576) |
2364 F_MTUENABLE | V_WINDOWSCALEMODE(1) | 2422 F_MTUENABLE | V_WINDOWSCALEMODE(1) |
2365 V_TIMESTAMPSMODE(1) | V_SACKMODE(1) | V_SACKRX(1)); 2423 V_TIMESTAMPSMODE(0) | V_SACKMODE(1) | V_SACKRX(1));
2366 t3_write_reg(adap, A_TP_DACK_CONFIG, V_AUTOSTATE3(1) | 2424 t3_write_reg(adap, A_TP_DACK_CONFIG, V_AUTOSTATE3(1) |
2367 V_AUTOSTATE2(1) | V_AUTOSTATE1(0) | 2425 V_AUTOSTATE2(1) | V_AUTOSTATE1(0) |
2368 V_BYTETHRESHOLD(16384) | V_MSSTHRESHOLD(2) | 2426 V_BYTETHRESHOLD(16384) | V_MSSTHRESHOLD(2) |
@@ -2371,16 +2429,18 @@ static void tp_config(struct adapter *adap, const struct tp_params *p)
2371 F_IPV6ENABLE | F_NICMODE); 2429 F_IPV6ENABLE | F_NICMODE);
2372 t3_write_reg(adap, A_TP_TX_RESOURCE_LIMIT, 0x18141814); 2430 t3_write_reg(adap, A_TP_TX_RESOURCE_LIMIT, 0x18141814);
2373 t3_write_reg(adap, A_TP_PARA_REG4, 0x5050105); 2431 t3_write_reg(adap, A_TP_PARA_REG4, 0x5050105);
2374 t3_set_reg_field(adap, A_TP_PARA_REG6, 2432 t3_set_reg_field(adap, A_TP_PARA_REG6, 0,
2375 adap->params.rev > 0 ? F_ENABLEESND : F_T3A_ENABLEESND, 2433 adap->params.rev > 0 ? F_ENABLEESND :
2376 0); 2434 F_T3A_ENABLEESND);
2377 2435
2378 t3_set_reg_field(adap, A_TP_PC_CONFIG, 2436 t3_set_reg_field(adap, A_TP_PC_CONFIG,
2379 F_ENABLEEPCMDAFULL | F_ENABLEOCSPIFULL, 2437 F_ENABLEEPCMDAFULL,
2380 F_TXDEFERENABLE | F_HEARBEATDACK | F_TXCONGESTIONMODE | 2438 F_ENABLEOCSPIFULL |F_TXDEFERENABLE | F_HEARBEATDACK |
2381 F_RXCONGESTIONMODE); 2439 F_TXCONGESTIONMODE | F_RXCONGESTIONMODE);
2382 t3_set_reg_field(adap, A_TP_PC_CONFIG2, F_CHDRAFULL, 0); 2440 t3_set_reg_field(adap, A_TP_PC_CONFIG2, F_CHDRAFULL, 0);
2383 2441 t3_write_reg(adap, A_TP_PROXY_FLOW_CNTL, 1080);
2442 t3_write_reg(adap, A_TP_PROXY_FLOW_CNTL, 1000);
2443
2384 if (adap->params.rev > 0) { 2444 if (adap->params.rev > 0) {
2385 tp_wr_indirect(adap, A_TP_EGRESS_CONFIG, F_REWRITEFORCETOSIZE); 2445 tp_wr_indirect(adap, A_TP_EGRESS_CONFIG, F_REWRITEFORCETOSIZE);
2386 t3_set_reg_field(adap, A_TP_PARA_REG3, F_TXPACEAUTO, 2446 t3_set_reg_field(adap, A_TP_PARA_REG3, F_TXPACEAUTO,
@@ -2390,9 +2450,10 @@ static void tp_config(struct adapter *adap, const struct tp_params *p)
2390 } else 2450 } else
2391 t3_set_reg_field(adap, A_TP_PARA_REG3, 0, F_TXPACEFIXED); 2451 t3_set_reg_field(adap, A_TP_PARA_REG3, 0, F_TXPACEFIXED);
2392 2452
2393 t3_write_reg(adap, A_TP_TX_MOD_QUEUE_WEIGHT1, 0x12121212); 2453 t3_write_reg(adap, A_TP_TX_MOD_QUEUE_WEIGHT1, 0);
2394 t3_write_reg(adap, A_TP_TX_MOD_QUEUE_WEIGHT0, 0x12121212); 2454 t3_write_reg(adap, A_TP_TX_MOD_QUEUE_WEIGHT0, 0);
2395 t3_write_reg(adap, A_TP_MOD_CHANNEL_WEIGHT, 0x1212); 2455 t3_write_reg(adap, A_TP_MOD_CHANNEL_WEIGHT, 0);
2456 t3_write_reg(adap, A_TP_MOD_RATE_LIMIT, 0xf2200000);
2396} 2457}
2397 2458
2398/* Desired TP timer resolution in usec */ 2459/* Desired TP timer resolution in usec */
@@ -2468,6 +2529,7 @@ int t3_tp_set_coalescing_size(struct adapter *adap, unsigned int size, int psh)
2468 val |= F_RXCOALESCEENABLE; 2529 val |= F_RXCOALESCEENABLE;
2469 if (psh) 2530 if (psh)
2470 val |= F_RXCOALESCEPSHEN; 2531 val |= F_RXCOALESCEPSHEN;
2532 size = min(MAX_RX_COALESCING_LEN, size);
2471 t3_write_reg(adap, A_TP_PARA_REG2, V_RXCOALESCESIZE(size) | 2533 t3_write_reg(adap, A_TP_PARA_REG2, V_RXCOALESCESIZE(size) |
2472 V_MAXRXDATA(MAX_RX_COALESCING_LEN)); 2534 V_MAXRXDATA(MAX_RX_COALESCING_LEN));
2473 } 2535 }
@@ -2496,11 +2558,11 @@ static void __devinit init_mtus(unsigned short mtus[])
2496 * it can accomodate max size TCP/IP headers when SACK and timestamps 2558 * it can accomodate max size TCP/IP headers when SACK and timestamps
2497 * are enabled and still have at least 8 bytes of payload. 2559 * are enabled and still have at least 8 bytes of payload.
2498 */ 2560 */
2499 mtus[0] = 88; 2561 mtus[1] = 88;
2500 mtus[1] = 256; 2562 mtus[1] = 88;
2501 mtus[2] = 512; 2563 mtus[2] = 256;
2502 mtus[3] = 576; 2564 mtus[3] = 512;
2503 mtus[4] = 808; 2565 mtus[4] = 576;
2504 mtus[5] = 1024; 2566 mtus[5] = 1024;
2505 mtus[6] = 1280; 2567 mtus[6] = 1280;
2506 mtus[7] = 1492; 2568 mtus[7] = 1492;
@@ -2682,6 +2744,34 @@ static void ulp_config(struct adapter *adap, const struct tp_params *p)
2682 t3_write_reg(adap, A_ULPRX_TDDP_TAGMASK, 0xffffffff); 2744 t3_write_reg(adap, A_ULPRX_TDDP_TAGMASK, 0xffffffff);
2683} 2745}
2684 2746
2747/**
2748 * t3_set_proto_sram - set the contents of the protocol sram
2749 * @adapter: the adapter
2750 * @data: the protocol image
2751 *
2752 * Write the contents of the protocol SRAM.
2753 */
2754int t3_set_proto_sram(struct adapter *adap, u8 *data)
2755{
2756 int i;
2757 u32 *buf = (u32 *)data;
2758
2759 for (i = 0; i < PROTO_SRAM_LINES; i++) {
2760 t3_write_reg(adap, A_TP_EMBED_OP_FIELD5, cpu_to_be32(*buf++));
2761 t3_write_reg(adap, A_TP_EMBED_OP_FIELD4, cpu_to_be32(*buf++));
2762 t3_write_reg(adap, A_TP_EMBED_OP_FIELD3, cpu_to_be32(*buf++));
2763 t3_write_reg(adap, A_TP_EMBED_OP_FIELD2, cpu_to_be32(*buf++));
2764 t3_write_reg(adap, A_TP_EMBED_OP_FIELD1, cpu_to_be32(*buf++));
2765
2766 t3_write_reg(adap, A_TP_EMBED_OP_FIELD0, i << 1 | 1 << 31);
2767 if (t3_wait_op_done(adap, A_TP_EMBED_OP_FIELD0, 1, 1, 5, 1))
2768 return -EIO;
2769 }
2770 t3_write_reg(adap, A_TP_EMBED_OP_FIELD0, 0);
2771
2772 return 0;
2773}
2774
2685void t3_config_trace_filter(struct adapter *adapter, 2775void t3_config_trace_filter(struct adapter *adapter,
2686 const struct trace_params *tp, int filter_index, 2776 const struct trace_params *tp, int filter_index,
2687 int invert, int enable) 2777 int invert, int enable)
@@ -2802,7 +2892,7 @@ static void init_hw_for_avail_ports(struct adapter *adap, int nports)
2802 t3_set_reg_field(adap, A_ULPTX_CONFIG, F_CFG_RR_ARB, 0); 2892 t3_set_reg_field(adap, A_ULPTX_CONFIG, F_CFG_RR_ARB, 0);
2803 t3_write_reg(adap, A_MPS_CFG, F_TPRXPORTEN | F_TPTXPORT0EN | 2893 t3_write_reg(adap, A_MPS_CFG, F_TPRXPORTEN | F_TPTXPORT0EN |
2804 F_PORT0ACTIVE | F_ENFORCEPKT); 2894 F_PORT0ACTIVE | F_ENFORCEPKT);
2805 t3_write_reg(adap, A_PM1_TX_CFG, 0xc000c000); 2895 t3_write_reg(adap, A_PM1_TX_CFG, 0xffffffff);
2806 } else { 2896 } else {
2807 t3_set_reg_field(adap, A_ULPRX_CTL, 0, F_ROUND_ROBIN); 2897 t3_set_reg_field(adap, A_ULPRX_CTL, 0, F_ROUND_ROBIN);
2808 t3_set_reg_field(adap, A_ULPTX_CONFIG, 0, F_CFG_RR_ARB); 2898 t3_set_reg_field(adap, A_ULPTX_CONFIG, 0, F_CFG_RR_ARB);
@@ -3097,7 +3187,7 @@ int t3_init_hw(struct adapter *adapter, u32 fw_params)
3097 else 3187 else
3098 t3_set_reg_field(adapter, A_PCIX_CFG, 0, F_CLIDECEN); 3188 t3_set_reg_field(adapter, A_PCIX_CFG, 0, F_CLIDECEN);
3099 3189
3100 t3_write_reg(adapter, A_PM1_RX_CFG, 0xf000f000); 3190 t3_write_reg(adapter, A_PM1_RX_CFG, 0xffffffff);
3101 init_hw_for_avail_ports(adapter, adapter->params.nports); 3191 init_hw_for_avail_ports(adapter, adapter->params.nports);
3102 t3_sge_init(adapter, &adapter->params.sge); 3192 t3_sge_init(adapter, &adapter->params.sge);
3103 3193
diff --git a/drivers/net/cxgb3/version.h b/drivers/net/cxgb3/version.h
index b112317f033e..8eddd23a3a51 100644
--- a/drivers/net/cxgb3/version.h
+++ b/drivers/net/cxgb3/version.h
@@ -39,6 +39,6 @@
39 39
40/* Firmware version */ 40/* Firmware version */
41#define FW_VERSION_MAJOR 4 41#define FW_VERSION_MAJOR 4
42#define FW_VERSION_MINOR 0 42#define FW_VERSION_MINOR 1
43#define FW_VERSION_MICRO 0 43#define FW_VERSION_MICRO 0
44#endif /* __CHELSIO_VERSION_H */ 44#endif /* __CHELSIO_VERSION_H */
diff --git a/drivers/net/ehea/ehea.h b/drivers/net/ehea/ehea.h
index c0f81b5a30fb..abaf3ac94936 100644
--- a/drivers/net/ehea/ehea.h
+++ b/drivers/net/ehea/ehea.h
@@ -39,7 +39,7 @@
39#include <asm/io.h> 39#include <asm/io.h>
40 40
41#define DRV_NAME "ehea" 41#define DRV_NAME "ehea"
42#define DRV_VERSION "EHEA_0064" 42#define DRV_VERSION "EHEA_0065"
43 43
44#define EHEA_MSG_DEFAULT (NETIF_MSG_LINK | NETIF_MSG_TIMER \ 44#define EHEA_MSG_DEFAULT (NETIF_MSG_LINK | NETIF_MSG_TIMER \
45 | NETIF_MSG_RX_ERR | NETIF_MSG_TX_ERR) 45 | NETIF_MSG_RX_ERR | NETIF_MSG_TX_ERR)
@@ -136,10 +136,10 @@ void ehea_dump(void *adr, int len, char *msg);
136 (0xffffffffffffffffULL >> ((64 - (mask)) & 0xffff)) 136 (0xffffffffffffffffULL >> ((64 - (mask)) & 0xffff))
137 137
138#define EHEA_BMASK_SET(mask, value) \ 138#define EHEA_BMASK_SET(mask, value) \
139 ((EHEA_BMASK_MASK(mask) & ((u64)(value))) << EHEA_BMASK_SHIFTPOS(mask)) 139 ((EHEA_BMASK_MASK(mask) & ((u64)(value))) << EHEA_BMASK_SHIFTPOS(mask))
140 140
141#define EHEA_BMASK_GET(mask, value) \ 141#define EHEA_BMASK_GET(mask, value) \
142 (EHEA_BMASK_MASK(mask) & (((u64)(value)) >> EHEA_BMASK_SHIFTPOS(mask))) 142 (EHEA_BMASK_MASK(mask) & (((u64)(value)) >> EHEA_BMASK_SHIFTPOS(mask)))
143 143
144/* 144/*
145 * Generic ehea page 145 * Generic ehea page
@@ -190,7 +190,7 @@ struct ehea_av;
190 * Queue attributes passed to ehea_create_qp() 190 * Queue attributes passed to ehea_create_qp()
191 */ 191 */
192struct ehea_qp_init_attr { 192struct ehea_qp_init_attr {
193 /* input parameter */ 193 /* input parameter */
194 u32 qp_token; /* queue token */ 194 u32 qp_token; /* queue token */
195 u8 low_lat_rq1; 195 u8 low_lat_rq1;
196 u8 signalingtype; /* cqe generation flag */ 196 u8 signalingtype; /* cqe generation flag */
@@ -212,7 +212,7 @@ struct ehea_qp_init_attr {
212 u64 recv_cq_handle; 212 u64 recv_cq_handle;
213 u64 aff_eq_handle; 213 u64 aff_eq_handle;
214 214
215 /* output parameter */ 215 /* output parameter */
216 u32 qp_nr; 216 u32 qp_nr;
217 u16 act_nr_send_wqes; 217 u16 act_nr_send_wqes;
218 u16 act_nr_rwqes_rq1; 218 u16 act_nr_rwqes_rq1;
@@ -279,12 +279,12 @@ struct ehea_qp {
279 * Completion Queue attributes 279 * Completion Queue attributes
280 */ 280 */
281struct ehea_cq_attr { 281struct ehea_cq_attr {
282 /* input parameter */ 282 /* input parameter */
283 u32 max_nr_of_cqes; 283 u32 max_nr_of_cqes;
284 u32 cq_token; 284 u32 cq_token;
285 u64 eq_handle; 285 u64 eq_handle;
286 286
287 /* output parameter */ 287 /* output parameter */
288 u32 act_nr_of_cqes; 288 u32 act_nr_of_cqes;
289 u32 nr_pages; 289 u32 nr_pages;
290}; 290};
diff --git a/drivers/net/ehea/ehea_hw.h b/drivers/net/ehea/ehea_hw.h
index 1246757f2c22..1af7ca499ec5 100644
--- a/drivers/net/ehea/ehea_hw.h
+++ b/drivers/net/ehea/ehea_hw.h
@@ -211,34 +211,34 @@ static inline void epa_store_acc(struct h_epa epa, u32 offset, u64 value)
211} 211}
212 212
213#define epa_store_eq(epa, offset, value)\ 213#define epa_store_eq(epa, offset, value)\
214 epa_store(epa, EQTEMM_OFFSET(offset), value) 214 epa_store(epa, EQTEMM_OFFSET(offset), value)
215#define epa_load_eq(epa, offset)\ 215#define epa_load_eq(epa, offset)\
216 epa_load(epa, EQTEMM_OFFSET(offset)) 216 epa_load(epa, EQTEMM_OFFSET(offset))
217 217
218#define epa_store_cq(epa, offset, value)\ 218#define epa_store_cq(epa, offset, value)\
219 epa_store(epa, CQTEMM_OFFSET(offset), value) 219 epa_store(epa, CQTEMM_OFFSET(offset), value)
220#define epa_load_cq(epa, offset)\ 220#define epa_load_cq(epa, offset)\
221 epa_load(epa, CQTEMM_OFFSET(offset)) 221 epa_load(epa, CQTEMM_OFFSET(offset))
222 222
223#define epa_store_qp(epa, offset, value)\ 223#define epa_store_qp(epa, offset, value)\
224 epa_store(epa, QPTEMM_OFFSET(offset), value) 224 epa_store(epa, QPTEMM_OFFSET(offset), value)
225#define epa_load_qp(epa, offset)\ 225#define epa_load_qp(epa, offset)\
226 epa_load(epa, QPTEMM_OFFSET(offset)) 226 epa_load(epa, QPTEMM_OFFSET(offset))
227 227
228#define epa_store_qped(epa, offset, value)\ 228#define epa_store_qped(epa, offset, value)\
229 epa_store(epa, QPEDMM_OFFSET(offset), value) 229 epa_store(epa, QPEDMM_OFFSET(offset), value)
230#define epa_load_qped(epa, offset)\ 230#define epa_load_qped(epa, offset)\
231 epa_load(epa, QPEDMM_OFFSET(offset)) 231 epa_load(epa, QPEDMM_OFFSET(offset))
232 232
233#define epa_store_mrmw(epa, offset, value)\ 233#define epa_store_mrmw(epa, offset, value)\
234 epa_store(epa, MRMWMM_OFFSET(offset), value) 234 epa_store(epa, MRMWMM_OFFSET(offset), value)
235#define epa_load_mrmw(epa, offset)\ 235#define epa_load_mrmw(epa, offset)\
236 epa_load(epa, MRMWMM_OFFSET(offset)) 236 epa_load(epa, MRMWMM_OFFSET(offset))
237 237
238#define epa_store_base(epa, offset, value)\ 238#define epa_store_base(epa, offset, value)\
239 epa_store(epa, HCAGR_OFFSET(offset), value) 239 epa_store(epa, HCAGR_OFFSET(offset), value)
240#define epa_load_base(epa, offset)\ 240#define epa_load_base(epa, offset)\
241 epa_load(epa, HCAGR_OFFSET(offset)) 241 epa_load(epa, HCAGR_OFFSET(offset))
242 242
243static inline void ehea_update_sqa(struct ehea_qp *qp, u16 nr_wqes) 243static inline void ehea_update_sqa(struct ehea_qp *qp, u16 nr_wqes)
244{ 244{
diff --git a/drivers/net/ehea/ehea_main.c b/drivers/net/ehea/ehea_main.c
index 9e13433a268a..bdb52419dbf5 100644
--- a/drivers/net/ehea/ehea_main.c
+++ b/drivers/net/ehea/ehea_main.c
@@ -81,7 +81,7 @@ MODULE_PARM_DESC(use_mcs, " 0:NAPI, 1:Multiple receive queues, Default = 1 ");
81static int port_name_cnt = 0; 81static int port_name_cnt = 0;
82 82
83static int __devinit ehea_probe_adapter(struct ibmebus_dev *dev, 83static int __devinit ehea_probe_adapter(struct ibmebus_dev *dev,
84 const struct of_device_id *id); 84 const struct of_device_id *id);
85 85
86static int __devexit ehea_remove(struct ibmebus_dev *dev); 86static int __devexit ehea_remove(struct ibmebus_dev *dev);
87 87
@@ -236,7 +236,7 @@ static int ehea_refill_rq_def(struct ehea_port_res *pr,
236 236
237 rwqe = ehea_get_next_rwqe(qp, rq_nr); 237 rwqe = ehea_get_next_rwqe(qp, rq_nr);
238 rwqe->wr_id = EHEA_BMASK_SET(EHEA_WR_ID_TYPE, wqe_type) 238 rwqe->wr_id = EHEA_BMASK_SET(EHEA_WR_ID_TYPE, wqe_type)
239 | EHEA_BMASK_SET(EHEA_WR_ID_INDEX, index); 239 | EHEA_BMASK_SET(EHEA_WR_ID_INDEX, index);
240 rwqe->sg_list[0].l_key = pr->recv_mr.lkey; 240 rwqe->sg_list[0].l_key = pr->recv_mr.lkey;
241 rwqe->sg_list[0].vaddr = (u64)skb->data; 241 rwqe->sg_list[0].vaddr = (u64)skb->data;
242 rwqe->sg_list[0].len = packet_size; 242 rwqe->sg_list[0].len = packet_size;
@@ -427,7 +427,7 @@ static struct ehea_cqe *ehea_proc_rwqes(struct net_device *dev,
427 break; 427 break;
428 } 428 }
429 skb_copy_to_linear_data(skb, ((char*)cqe) + 64, 429 skb_copy_to_linear_data(skb, ((char*)cqe) + 64,
430 cqe->num_bytes_transfered - 4); 430 cqe->num_bytes_transfered - 4);
431 ehea_fill_skb(port->netdev, skb, cqe); 431 ehea_fill_skb(port->netdev, skb, cqe);
432 } else if (rq == 2) { /* RQ2 */ 432 } else if (rq == 2) { /* RQ2 */
433 skb = get_skb_by_index(skb_arr_rq2, 433 skb = get_skb_by_index(skb_arr_rq2,
@@ -618,7 +618,7 @@ static struct ehea_port *ehea_get_port(struct ehea_adapter *adapter,
618 618
619 for (i = 0; i < EHEA_MAX_PORTS; i++) 619 for (i = 0; i < EHEA_MAX_PORTS; i++)
620 if (adapter->port[i]) 620 if (adapter->port[i])
621 if (adapter->port[i]->logical_port_id == logical_port) 621 if (adapter->port[i]->logical_port_id == logical_port)
622 return adapter->port[i]; 622 return adapter->port[i];
623 return NULL; 623 return NULL;
624} 624}
@@ -1695,6 +1695,7 @@ static void ehea_xmit2(struct sk_buff *skb, struct net_device *dev,
1695{ 1695{
1696 if (skb->protocol == htons(ETH_P_IP)) { 1696 if (skb->protocol == htons(ETH_P_IP)) {
1697 const struct iphdr *iph = ip_hdr(skb); 1697 const struct iphdr *iph = ip_hdr(skb);
1698
1698 /* IPv4 */ 1699 /* IPv4 */
1699 swqe->tx_control |= EHEA_SWQE_CRC 1700 swqe->tx_control |= EHEA_SWQE_CRC
1700 | EHEA_SWQE_IP_CHECKSUM 1701 | EHEA_SWQE_IP_CHECKSUM
@@ -1705,13 +1706,12 @@ static void ehea_xmit2(struct sk_buff *skb, struct net_device *dev,
1705 write_ip_start_end(swqe, skb); 1706 write_ip_start_end(swqe, skb);
1706 1707
1707 if (iph->protocol == IPPROTO_UDP) { 1708 if (iph->protocol == IPPROTO_UDP) {
1708 if ((iph->frag_off & IP_MF) || 1709 if ((iph->frag_off & IP_MF)
1709 (iph->frag_off & IP_OFFSET)) 1710 || (iph->frag_off & IP_OFFSET))
1710 /* IP fragment, so don't change cs */ 1711 /* IP fragment, so don't change cs */
1711 swqe->tx_control &= ~EHEA_SWQE_TCP_CHECKSUM; 1712 swqe->tx_control &= ~EHEA_SWQE_TCP_CHECKSUM;
1712 else 1713 else
1713 write_udp_offset_end(swqe, skb); 1714 write_udp_offset_end(swqe, skb);
1714
1715 } else if (iph->protocol == IPPROTO_TCP) { 1715 } else if (iph->protocol == IPPROTO_TCP) {
1716 write_tcp_offset_end(swqe, skb); 1716 write_tcp_offset_end(swqe, skb);
1717 } 1717 }
@@ -1739,6 +1739,7 @@ static void ehea_xmit3(struct sk_buff *skb, struct net_device *dev,
1739 1739
1740 if (skb->protocol == htons(ETH_P_IP)) { 1740 if (skb->protocol == htons(ETH_P_IP)) {
1741 const struct iphdr *iph = ip_hdr(skb); 1741 const struct iphdr *iph = ip_hdr(skb);
1742
1742 /* IPv4 */ 1743 /* IPv4 */
1743 write_ip_start_end(swqe, skb); 1744 write_ip_start_end(swqe, skb);
1744 1745
@@ -1751,8 +1752,8 @@ static void ehea_xmit3(struct sk_buff *skb, struct net_device *dev,
1751 write_tcp_offset_end(swqe, skb); 1752 write_tcp_offset_end(swqe, skb);
1752 1753
1753 } else if (iph->protocol == IPPROTO_UDP) { 1754 } else if (iph->protocol == IPPROTO_UDP) {
1754 if ((iph->frag_off & IP_MF) || 1755 if ((iph->frag_off & IP_MF)
1755 (iph->frag_off & IP_OFFSET)) 1756 || (iph->frag_off & IP_OFFSET))
1756 /* IP fragment, so don't change cs */ 1757 /* IP fragment, so don't change cs */
1757 swqe->tx_control |= EHEA_SWQE_CRC 1758 swqe->tx_control |= EHEA_SWQE_CRC
1758 | EHEA_SWQE_IMM_DATA_PRESENT; 1759 | EHEA_SWQE_IMM_DATA_PRESENT;
@@ -2407,7 +2408,7 @@ static void __devinit logical_port_release(struct device *dev)
2407} 2408}
2408 2409
2409static int ehea_driver_sysfs_add(struct device *dev, 2410static int ehea_driver_sysfs_add(struct device *dev,
2410 struct device_driver *driver) 2411 struct device_driver *driver)
2411{ 2412{
2412 int ret; 2413 int ret;
2413 2414
@@ -2424,7 +2425,7 @@ static int ehea_driver_sysfs_add(struct device *dev,
2424} 2425}
2425 2426
2426static void ehea_driver_sysfs_remove(struct device *dev, 2427static void ehea_driver_sysfs_remove(struct device *dev,
2427 struct device_driver *driver) 2428 struct device_driver *driver)
2428{ 2429{
2429 struct device_driver *drv = driver; 2430 struct device_driver *drv = driver;
2430 2431
@@ -2453,7 +2454,7 @@ static struct device *ehea_register_port(struct ehea_port *port,
2453 } 2454 }
2454 2455
2455 ret = device_create_file(&port->ofdev.dev, &dev_attr_log_port_id); 2456 ret = device_create_file(&port->ofdev.dev, &dev_attr_log_port_id);
2456 if (ret) { 2457 if (ret) {
2457 ehea_error("failed to register attributes, ret=%d", ret); 2458 ehea_error("failed to register attributes, ret=%d", ret);
2458 goto out_unreg_of_dev; 2459 goto out_unreg_of_dev;
2459 } 2460 }
@@ -2601,6 +2602,7 @@ static int ehea_setup_ports(struct ehea_adapter *adapter)
2601{ 2602{
2602 struct device_node *lhea_dn; 2603 struct device_node *lhea_dn;
2603 struct device_node *eth_dn = NULL; 2604 struct device_node *eth_dn = NULL;
2605
2604 const u32 *dn_log_port_id; 2606 const u32 *dn_log_port_id;
2605 int i = 0; 2607 int i = 0;
2606 2608
@@ -2608,7 +2610,7 @@ static int ehea_setup_ports(struct ehea_adapter *adapter)
2608 while ((eth_dn = of_get_next_child(lhea_dn, eth_dn))) { 2610 while ((eth_dn = of_get_next_child(lhea_dn, eth_dn))) {
2609 2611
2610 dn_log_port_id = of_get_property(eth_dn, "ibm,hea-port-no", 2612 dn_log_port_id = of_get_property(eth_dn, "ibm,hea-port-no",
2611 NULL); 2613 NULL);
2612 if (!dn_log_port_id) { 2614 if (!dn_log_port_id) {
2613 ehea_error("bad device node: eth_dn name=%s", 2615 ehea_error("bad device node: eth_dn name=%s",
2614 eth_dn->full_name); 2616 eth_dn->full_name);
@@ -2648,7 +2650,7 @@ static struct device_node *ehea_get_eth_dn(struct ehea_adapter *adapter,
2648 while ((eth_dn = of_get_next_child(lhea_dn, eth_dn))) { 2650 while ((eth_dn = of_get_next_child(lhea_dn, eth_dn))) {
2649 2651
2650 dn_log_port_id = of_get_property(eth_dn, "ibm,hea-port-no", 2652 dn_log_port_id = of_get_property(eth_dn, "ibm,hea-port-no",
2651 NULL); 2653 NULL);
2652 if (dn_log_port_id) 2654 if (dn_log_port_id)
2653 if (*dn_log_port_id == logical_port_id) 2655 if (*dn_log_port_id == logical_port_id)
2654 return eth_dn; 2656 return eth_dn;
@@ -2789,7 +2791,7 @@ static int __devinit ehea_probe_adapter(struct ibmebus_dev *dev,
2789 adapter->ebus_dev = dev; 2791 adapter->ebus_dev = dev;
2790 2792
2791 adapter_handle = of_get_property(dev->ofdev.node, "ibm,hea-handle", 2793 adapter_handle = of_get_property(dev->ofdev.node, "ibm,hea-handle",
2792 NULL); 2794 NULL);
2793 if (adapter_handle) 2795 if (adapter_handle)
2794 adapter->handle = *adapter_handle; 2796 adapter->handle = *adapter_handle;
2795 2797
diff --git a/drivers/net/ehea/ehea_qmr.c b/drivers/net/ehea/ehea_qmr.c
index f24a8862977d..29eaa46948b0 100644
--- a/drivers/net/ehea/ehea_qmr.c
+++ b/drivers/net/ehea/ehea_qmr.c
@@ -211,7 +211,7 @@ u64 ehea_destroy_cq_res(struct ehea_cq *cq, u64 force)
211 u64 hret; 211 u64 hret;
212 u64 adapter_handle = cq->adapter->handle; 212 u64 adapter_handle = cq->adapter->handle;
213 213
214 /* deregister all previous registered pages */ 214 /* deregister all previous registered pages */
215 hret = ehea_h_free_resource(adapter_handle, cq->fw_handle, force); 215 hret = ehea_h_free_resource(adapter_handle, cq->fw_handle, force);
216 if (hret != H_SUCCESS) 216 if (hret != H_SUCCESS)
217 return hret; 217 return hret;
@@ -362,7 +362,7 @@ int ehea_destroy_eq(struct ehea_eq *eq)
362 if (hret != H_SUCCESS) { 362 if (hret != H_SUCCESS) {
363 ehea_error("destroy EQ failed"); 363 ehea_error("destroy EQ failed");
364 return -EIO; 364 return -EIO;
365 } 365 }
366 366
367 return 0; 367 return 0;
368} 368}
@@ -507,44 +507,44 @@ out_freemem:
507 507
508u64 ehea_destroy_qp_res(struct ehea_qp *qp, u64 force) 508u64 ehea_destroy_qp_res(struct ehea_qp *qp, u64 force)
509{ 509{
510 u64 hret; 510 u64 hret;
511 struct ehea_qp_init_attr *qp_attr = &qp->init_attr; 511 struct ehea_qp_init_attr *qp_attr = &qp->init_attr;
512 512
513 513
514 ehea_h_disable_and_get_hea(qp->adapter->handle, qp->fw_handle); 514 ehea_h_disable_and_get_hea(qp->adapter->handle, qp->fw_handle);
515 hret = ehea_h_free_resource(qp->adapter->handle, qp->fw_handle, force); 515 hret = ehea_h_free_resource(qp->adapter->handle, qp->fw_handle, force);
516 if (hret != H_SUCCESS) 516 if (hret != H_SUCCESS)
517 return hret; 517 return hret;
518 518
519 hw_queue_dtor(&qp->hw_squeue); 519 hw_queue_dtor(&qp->hw_squeue);
520 hw_queue_dtor(&qp->hw_rqueue1); 520 hw_queue_dtor(&qp->hw_rqueue1);
521 521
522 if (qp_attr->rq_count > 1) 522 if (qp_attr->rq_count > 1)
523 hw_queue_dtor(&qp->hw_rqueue2); 523 hw_queue_dtor(&qp->hw_rqueue2);
524 if (qp_attr->rq_count > 2) 524 if (qp_attr->rq_count > 2)
525 hw_queue_dtor(&qp->hw_rqueue3); 525 hw_queue_dtor(&qp->hw_rqueue3);
526 kfree(qp); 526 kfree(qp);
527 527
528 return hret; 528 return hret;
529} 529}
530 530
531int ehea_destroy_qp(struct ehea_qp *qp) 531int ehea_destroy_qp(struct ehea_qp *qp)
532{ 532{
533 u64 hret; 533 u64 hret;
534 if (!qp) 534 if (!qp)
535 return 0; 535 return 0;
536 536
537 if ((hret = ehea_destroy_qp_res(qp, NORMAL_FREE)) == H_R_STATE) { 537 if ((hret = ehea_destroy_qp_res(qp, NORMAL_FREE)) == H_R_STATE) {
538 ehea_error_data(qp->adapter, qp->fw_handle); 538 ehea_error_data(qp->adapter, qp->fw_handle);
539 hret = ehea_destroy_qp_res(qp, FORCE_FREE); 539 hret = ehea_destroy_qp_res(qp, FORCE_FREE);
540 } 540 }
541 541
542 if (hret != H_SUCCESS) { 542 if (hret != H_SUCCESS) {
543 ehea_error("destroy QP failed"); 543 ehea_error("destroy QP failed");
544 return -EIO; 544 return -EIO;
545 } 545 }
546 546
547 return 0; 547 return 0;
548} 548}
549 549
550int ehea_reg_kernel_mr(struct ehea_adapter *adapter, struct ehea_mr *mr) 550int ehea_reg_kernel_mr(struct ehea_adapter *adapter, struct ehea_mr *mr)
diff --git a/drivers/net/fec_8xx/Kconfig b/drivers/net/fec_8xx/Kconfig
index a84c232395e3..afb34ded26ee 100644
--- a/drivers/net/fec_8xx/Kconfig
+++ b/drivers/net/fec_8xx/Kconfig
@@ -1,6 +1,6 @@
1config FEC_8XX 1config FEC_8XX
2 tristate "Motorola 8xx FEC driver" 2 tristate "Motorola 8xx FEC driver"
3 depends on NET_ETHERNET && 8xx 3 depends on 8XX
4 select MII 4 select MII
5 5
6config FEC_8XX_GENERIC_PHY 6config FEC_8XX_GENERIC_PHY
diff --git a/drivers/net/fs_enet/Kconfig b/drivers/net/fs_enet/Kconfig
index 6aaee67dd4b7..e27ee210b605 100644
--- a/drivers/net/fs_enet/Kconfig
+++ b/drivers/net/fs_enet/Kconfig
@@ -1,6 +1,6 @@
1config FS_ENET 1config FS_ENET
2 tristate "Freescale Ethernet Driver" 2 tristate "Freescale Ethernet Driver"
3 depends on NET_ETHERNET && (CPM1 || CPM2) 3 depends on CPM1 || CPM2
4 select MII 4 select MII
5 5
6config FS_ENET_HAS_SCC 6config FS_ENET_HAS_SCC
diff --git a/drivers/net/gianfar.c b/drivers/net/gianfar.c
index 1b854bf07b09..d7a1a58de766 100644
--- a/drivers/net/gianfar.c
+++ b/drivers/net/gianfar.c
@@ -130,6 +130,9 @@ static int gfar_remove(struct platform_device *pdev);
130static void free_skb_resources(struct gfar_private *priv); 130static void free_skb_resources(struct gfar_private *priv);
131static void gfar_set_multi(struct net_device *dev); 131static void gfar_set_multi(struct net_device *dev);
132static void gfar_set_hash_for_addr(struct net_device *dev, u8 *addr); 132static void gfar_set_hash_for_addr(struct net_device *dev, u8 *addr);
133static void gfar_configure_serdes(struct net_device *dev);
134extern int gfar_local_mdio_write(struct gfar_mii *regs, int mii_id, int regnum, u16 value);
135extern int gfar_local_mdio_read(struct gfar_mii *regs, int mii_id, int regnum);
133#ifdef CONFIG_GFAR_NAPI 136#ifdef CONFIG_GFAR_NAPI
134static int gfar_poll(struct net_device *dev, int *budget); 137static int gfar_poll(struct net_device *dev, int *budget);
135#endif 138#endif
@@ -451,6 +454,9 @@ static int init_phy(struct net_device *dev)
451 454
452 phydev = phy_connect(dev, phy_id, &adjust_link, 0, interface); 455 phydev = phy_connect(dev, phy_id, &adjust_link, 0, interface);
453 456
457 if (interface == PHY_INTERFACE_MODE_SGMII)
458 gfar_configure_serdes(dev);
459
454 if (IS_ERR(phydev)) { 460 if (IS_ERR(phydev)) {
455 printk(KERN_ERR "%s: Could not attach to PHY\n", dev->name); 461 printk(KERN_ERR "%s: Could not attach to PHY\n", dev->name);
456 return PTR_ERR(phydev); 462 return PTR_ERR(phydev);
@@ -465,6 +471,27 @@ static int init_phy(struct net_device *dev)
465 return 0; 471 return 0;
466} 472}
467 473
474static void gfar_configure_serdes(struct net_device *dev)
475{
476 struct gfar_private *priv = netdev_priv(dev);
477 struct gfar_mii __iomem *regs =
478 (void __iomem *)&priv->regs->gfar_mii_regs;
479
480 /* Initialise TBI i/f to communicate with serdes (lynx phy) */
481
482 /* Single clk mode, mii mode off(for aerdes communication) */
483 gfar_local_mdio_write(regs, TBIPA_VALUE, MII_TBICON, TBICON_CLK_SELECT);
484
485 /* Supported pause and full-duplex, no half-duplex */
486 gfar_local_mdio_write(regs, TBIPA_VALUE, MII_ADVERTISE,
487 ADVERTISE_1000XFULL | ADVERTISE_1000XPAUSE |
488 ADVERTISE_1000XPSE_ASYM);
489
490 /* ANEG enable, restart ANEG, full duplex mode, speed[1] set */
491 gfar_local_mdio_write(regs, TBIPA_VALUE, MII_BMCR, BMCR_ANENABLE |
492 BMCR_ANRESTART | BMCR_FULLDPLX | BMCR_SPEED1000);
493}
494
468static void init_registers(struct net_device *dev) 495static void init_registers(struct net_device *dev)
469{ 496{
470 struct gfar_private *priv = netdev_priv(dev); 497 struct gfar_private *priv = netdev_priv(dev);
diff --git a/drivers/net/gianfar.h b/drivers/net/gianfar.h
index 39e9e321fcbc..d8e779c102fa 100644
--- a/drivers/net/gianfar.h
+++ b/drivers/net/gianfar.h
@@ -136,6 +136,12 @@ extern const char gfar_driver_version[];
136#define MIIMCFG_RESET 0x80000000 136#define MIIMCFG_RESET 0x80000000
137#define MIIMIND_BUSY 0x00000001 137#define MIIMIND_BUSY 0x00000001
138 138
139/* TBI register addresses */
140#define MII_TBICON 0x11
141
142/* TBICON register bit fields */
143#define TBICON_CLK_SELECT 0x0020
144
139/* MAC register bits */ 145/* MAC register bits */
140#define MACCFG1_SOFT_RESET 0x80000000 146#define MACCFG1_SOFT_RESET 0x80000000
141#define MACCFG1_RESET_RX_MC 0x00080000 147#define MACCFG1_RESET_RX_MC 0x00080000
diff --git a/drivers/net/gianfar_mii.c b/drivers/net/gianfar_mii.c
index bcc6b82f4a33..5dd34a1a7b89 100644
--- a/drivers/net/gianfar_mii.c
+++ b/drivers/net/gianfar_mii.c
@@ -43,13 +43,18 @@
43#include "gianfar.h" 43#include "gianfar.h"
44#include "gianfar_mii.h" 44#include "gianfar_mii.h"
45 45
46/* Write value to the PHY at mii_id at register regnum, 46/*
47 * on the bus, waiting until the write is done before returning. 47 * Write value to the PHY at mii_id at register regnum,
48 * All PHY configuration is done through the TSEC1 MIIM regs */ 48 * on the bus attached to the local interface, which may be different from the
49int gfar_mdio_write(struct mii_bus *bus, int mii_id, int regnum, u16 value) 49 * generic mdio bus (tied to a single interface), waiting until the write is
50 * done before returning. This is helpful in programming interfaces like
51 * the TBI which control interfaces like onchip SERDES and are always tied to
52 * the local mdio pins, which may not be the same as system mdio bus, used for
53 * controlling the external PHYs, for example.
54 */
55int gfar_local_mdio_write(struct gfar_mii *regs, int mii_id,
56 int regnum, u16 value)
50{ 57{
51 struct gfar_mii __iomem *regs = (void __iomem *)bus->priv;
52
53 /* Set the PHY address and the register address we want to write */ 58 /* Set the PHY address and the register address we want to write */
54 gfar_write(&regs->miimadd, (mii_id << 8) | regnum); 59 gfar_write(&regs->miimadd, (mii_id << 8) | regnum);
55 60
@@ -63,12 +68,19 @@ int gfar_mdio_write(struct mii_bus *bus, int mii_id, int regnum, u16 value)
63 return 0; 68 return 0;
64} 69}
65 70
66/* Read the bus for PHY at addr mii_id, register regnum, and 71/*
67 * return the value. Clears miimcom first. All PHY 72 * Read the bus for PHY at addr mii_id, register regnum, and
68 * configuration has to be done through the TSEC1 MIIM regs */ 73 * return the value. Clears miimcom first. All PHY operation
69int gfar_mdio_read(struct mii_bus *bus, int mii_id, int regnum) 74 * done on the bus attached to the local interface,
75 * which may be different from the generic mdio bus
76 * This is helpful in programming interfaces like
77 * the TBI which, inturn, control interfaces like onchip SERDES
78 * and are always tied to the local mdio pins, which may not be the
79 * same as system mdio bus, used for controlling the external PHYs, for eg.
80 */
81int gfar_local_mdio_read(struct gfar_mii *regs, int mii_id, int regnum)
82
70{ 83{
71 struct gfar_mii __iomem *regs = (void __iomem *)bus->priv;
72 u16 value; 84 u16 value;
73 85
74 /* Set the PHY address and the register address we want to read */ 86 /* Set the PHY address and the register address we want to read */
@@ -88,6 +100,27 @@ int gfar_mdio_read(struct mii_bus *bus, int mii_id, int regnum)
88 return value; 100 return value;
89} 101}
90 102
103/* Write value to the PHY at mii_id at register regnum,
104 * on the bus, waiting until the write is done before returning.
105 * All PHY configuration is done through the TSEC1 MIIM regs */
106int gfar_mdio_write(struct mii_bus *bus, int mii_id, int regnum, u16 value)
107{
108 struct gfar_mii __iomem *regs = (void __iomem *)bus->priv;
109
110 /* Write to the local MII regs */
111 return(gfar_local_mdio_write(regs, mii_id, regnum, value));
112}
113
114/* Read the bus for PHY at addr mii_id, register regnum, and
115 * return the value. Clears miimcom first. All PHY
116 * configuration has to be done through the TSEC1 MIIM regs */
117int gfar_mdio_read(struct mii_bus *bus, int mii_id, int regnum)
118{
119 struct gfar_mii __iomem *regs = (void __iomem *)bus->priv;
120
121 /* Read the local MII regs */
122 return(gfar_local_mdio_read(regs, mii_id, regnum));
123}
91 124
92/* Reset the MIIM registers, and wait for the bus to free */ 125/* Reset the MIIM registers, and wait for the bus to free */
93int gfar_mdio_reset(struct mii_bus *bus) 126int gfar_mdio_reset(struct mii_bus *bus)
diff --git a/drivers/net/lasi_82596.c b/drivers/net/lasi_82596.c
index 741780e14b2c..efbae4b8398e 100644
--- a/drivers/net/lasi_82596.c
+++ b/drivers/net/lasi_82596.c
@@ -86,93 +86,36 @@
86#include <linux/dma-mapping.h> 86#include <linux/dma-mapping.h>
87 87
88#include <asm/io.h> 88#include <asm/io.h>
89#include <asm/pgtable.h>
90#include <asm/irq.h> 89#include <asm/irq.h>
91#include <asm/pdc.h> 90#include <asm/pdc.h>
92#include <asm/cache.h>
93#include <asm/parisc-device.h> 91#include <asm/parisc-device.h>
94 92
95#define LASI_82596_DRIVER_VERSION "LASI 82596 driver - Revision: 1.30" 93#define LASI_82596_DRIVER_VERSION "LASI 82596 driver - Revision: 1.30"
96 94
97/* DEBUG flags
98 */
99
100#define DEB_INIT 0x0001
101#define DEB_PROBE 0x0002
102#define DEB_SERIOUS 0x0004
103#define DEB_ERRORS 0x0008
104#define DEB_MULTI 0x0010
105#define DEB_TDR 0x0020
106#define DEB_OPEN 0x0040
107#define DEB_RESET 0x0080
108#define DEB_ADDCMD 0x0100
109#define DEB_STATUS 0x0200
110#define DEB_STARTTX 0x0400
111#define DEB_RXADDR 0x0800
112#define DEB_TXADDR 0x1000
113#define DEB_RXFRAME 0x2000
114#define DEB_INTS 0x4000
115#define DEB_STRUCT 0x8000
116#define DEB_ANY 0xffff
117
118
119#define DEB(x,y) if (i596_debug & (x)) { y; }
120
121
122#define CHECK_WBACK(priv, addr,len) \
123 do { dma_cache_sync((priv)->dev, (void *)addr, len, DMA_TO_DEVICE); } while (0)
124
125#define CHECK_INV(priv, addr,len) \
126 do { dma_cache_sync((priv)->dev, (void *)addr, len, DMA_FROM_DEVICE); } while(0)
127
128#define CHECK_WBACK_INV(priv, addr,len) \
129 do { dma_cache_sync((priv)->dev, (void *)addr, len, DMA_BIDIRECTIONAL); } while (0)
130
131
132#define PA_I82596_RESET 0 /* Offsets relative to LASI-LAN-Addr.*/ 95#define PA_I82596_RESET 0 /* Offsets relative to LASI-LAN-Addr.*/
133#define PA_CPU_PORT_L_ACCESS 4 96#define PA_CPU_PORT_L_ACCESS 4
134#define PA_CHANNEL_ATTENTION 8 97#define PA_CHANNEL_ATTENTION 8
135 98
99#define OPT_SWAP_PORT 0x0001 /* Need to wordswp on the MPU port */
136 100
137/* 101#define DMA_ALLOC dma_alloc_noncoherent
138 * Define various macros for Channel Attention, word swapping etc., dependent 102#define DMA_FREE dma_free_noncoherent
139 * on architecture. MVME and BVME are 680x0 based, otherwise it is Intel. 103#define DMA_WBACK(ndev, addr, len) \
140 */ 104 do { dma_cache_sync((ndev)->dev.parent, (void *)addr, len, DMA_TO_DEVICE); } while (0)
141 105
142#ifdef __BIG_ENDIAN 106#define DMA_INV(ndev, addr, len) \
143#define WSWAPrfd(x) (((u32)(x)<<16) | ((((u32)(x)))>>16)) 107 do { dma_cache_sync((ndev)->dev.parent, (void *)addr, len, DMA_FROM_DEVICE); } while (0)
144#define WSWAPrbd(x) (((u32)(x)<<16) | ((((u32)(x)))>>16))
145#define WSWAPiscp(x) (((u32)(x)<<16) | ((((u32)(x)))>>16))
146#define WSWAPscb(x) (((u32)(x)<<16) | ((((u32)(x)))>>16))
147#define WSWAPcmd(x) (((u32)(x)<<16) | ((((u32)(x)))>>16))
148#define WSWAPtbd(x) (((u32)(x)<<16) | ((((u32)(x)))>>16))
149#define WSWAPchar(x) (((u32)(x)<<16) | ((((u32)(x)))>>16))
150#define ISCP_BUSY 0x00010000
151#define MACH_IS_APRICOT 0
152#else
153#define WSWAPrfd(x) ((struct i596_rfd *)(x))
154#define WSWAPrbd(x) ((struct i596_rbd *)(x))
155#define WSWAPiscp(x) ((struct i596_iscp *)(x))
156#define WSWAPscb(x) ((struct i596_scb *)(x))
157#define WSWAPcmd(x) ((struct i596_cmd *)(x))
158#define WSWAPtbd(x) ((struct i596_tbd *)(x))
159#define WSWAPchar(x) ((char *)(x))
160#define ISCP_BUSY 0x0001
161#define MACH_IS_APRICOT 1
162#endif
163 108
164/* 109#define DMA_WBACK_INV(ndev, addr, len) \
165 * The MPU_PORT command allows direct access to the 82596. With PORT access 110 do { dma_cache_sync((ndev)->dev.parent, (void *)addr, len, DMA_BIDIRECTIONAL); } while (0)
166 * the following commands are available (p5-18). The 32-bit port command 111
167 * must be word-swapped with the most significant word written first. 112#define SYSBUS 0x0000006c;
168 * This only applies to VME boards. 113
169 */ 114/* big endian CPU, 82596 "big" endian mode */
170#define PORT_RESET 0x00 /* reset 82596 */ 115#define SWAP32(x) (((u32)(x)<<16) | ((((u32)(x)))>>16))
171#define PORT_SELFTEST 0x01 /* selftest */ 116#define SWAP16(x) (x)
172#define PORT_ALTSCP 0x02 /* alternate SCB address */
173#define PORT_ALTDUMP 0x03 /* Alternate DUMP address */
174 117
175static int i596_debug = (DEB_SERIOUS|DEB_PROBE); 118#include "lib82596.c"
176 119
177MODULE_AUTHOR("Richard Hirst"); 120MODULE_AUTHOR("Richard Hirst");
178MODULE_DESCRIPTION("i82596 driver"); 121MODULE_DESCRIPTION("i82596 driver");
@@ -180,255 +123,15 @@ MODULE_LICENSE("GPL");
180module_param(i596_debug, int, 0); 123module_param(i596_debug, int, 0);
181MODULE_PARM_DESC(i596_debug, "lasi_82596 debug mask"); 124MODULE_PARM_DESC(i596_debug, "lasi_82596 debug mask");
182 125
183/* Copy frames shorter than rx_copybreak, otherwise pass on up in 126static inline void ca(struct net_device *dev)
184 * a full sized sk_buff. Value of 100 stolen from tulip.c (!alpha).
185 */
186static int rx_copybreak = 100;
187
188#define MAX_DRIVERS 4 /* max count of drivers */
189
190#define PKT_BUF_SZ 1536
191#define MAX_MC_CNT 64
192
193#define I596_NULL ((u32)0xffffffff)
194
195#define CMD_EOL 0x8000 /* The last command of the list, stop. */
196#define CMD_SUSP 0x4000 /* Suspend after doing cmd. */
197#define CMD_INTR 0x2000 /* Interrupt after doing cmd. */
198
199#define CMD_FLEX 0x0008 /* Enable flexible memory model */
200
201enum commands {
202 CmdNOp = 0, CmdSASetup = 1, CmdConfigure = 2, CmdMulticastList = 3,
203 CmdTx = 4, CmdTDR = 5, CmdDump = 6, CmdDiagnose = 7
204};
205
206#define STAT_C 0x8000 /* Set to 0 after execution */
207#define STAT_B 0x4000 /* Command being executed */
208#define STAT_OK 0x2000 /* Command executed ok */
209#define STAT_A 0x1000 /* Command aborted */
210
211#define CUC_START 0x0100
212#define CUC_RESUME 0x0200
213#define CUC_SUSPEND 0x0300
214#define CUC_ABORT 0x0400
215#define RX_START 0x0010
216#define RX_RESUME 0x0020
217#define RX_SUSPEND 0x0030
218#define RX_ABORT 0x0040
219
220#define TX_TIMEOUT 5
221
222#define OPT_SWAP_PORT 0x0001 /* Need to wordswp on the MPU port */
223
224
225struct i596_reg {
226 unsigned short porthi;
227 unsigned short portlo;
228 u32 ca;
229};
230
231#define EOF 0x8000
232#define SIZE_MASK 0x3fff
233
234struct i596_tbd {
235 unsigned short size;
236 unsigned short pad;
237 dma_addr_t next;
238 dma_addr_t data;
239 u32 cache_pad[5]; /* Total 32 bytes... */
240};
241
242/* The command structure has two 'next' pointers; v_next is the address of
243 * the next command as seen by the CPU, b_next is the address of the next
244 * command as seen by the 82596. The b_next pointer, as used by the 82596
245 * always references the status field of the next command, rather than the
246 * v_next field, because the 82596 is unaware of v_next. It may seem more
247 * logical to put v_next at the end of the structure, but we cannot do that
248 * because the 82596 expects other fields to be there, depending on command
249 * type.
250 */
251
252struct i596_cmd {
253 struct i596_cmd *v_next; /* Address from CPUs viewpoint */
254 unsigned short status;
255 unsigned short command;
256 dma_addr_t b_next; /* Address from i596 viewpoint */
257};
258
259struct tx_cmd {
260 struct i596_cmd cmd;
261 dma_addr_t tbd;
262 unsigned short size;
263 unsigned short pad;
264 struct sk_buff *skb; /* So we can free it after tx */
265 dma_addr_t dma_addr;
266#ifdef __LP64__
267 u32 cache_pad[6]; /* Total 64 bytes... */
268#else
269 u32 cache_pad[1]; /* Total 32 bytes... */
270#endif
271};
272
273struct tdr_cmd {
274 struct i596_cmd cmd;
275 unsigned short status;
276 unsigned short pad;
277};
278
279struct mc_cmd {
280 struct i596_cmd cmd;
281 short mc_cnt;
282 char mc_addrs[MAX_MC_CNT*6];
283};
284
285struct sa_cmd {
286 struct i596_cmd cmd;
287 char eth_addr[8];
288};
289
290struct cf_cmd {
291 struct i596_cmd cmd;
292 char i596_config[16];
293};
294
295struct i596_rfd {
296 unsigned short stat;
297 unsigned short cmd;
298 dma_addr_t b_next; /* Address from i596 viewpoint */
299 dma_addr_t rbd;
300 unsigned short count;
301 unsigned short size;
302 struct i596_rfd *v_next; /* Address from CPUs viewpoint */
303 struct i596_rfd *v_prev;
304#ifndef __LP64__
305 u32 cache_pad[2]; /* Total 32 bytes... */
306#endif
307};
308
309struct i596_rbd {
310 /* hardware data */
311 unsigned short count;
312 unsigned short zero1;
313 dma_addr_t b_next;
314 dma_addr_t b_data; /* Address from i596 viewpoint */
315 unsigned short size;
316 unsigned short zero2;
317 /* driver data */
318 struct sk_buff *skb;
319 struct i596_rbd *v_next;
320 dma_addr_t b_addr; /* This rbd addr from i596 view */
321 unsigned char *v_data; /* Address from CPUs viewpoint */
322 /* Total 32 bytes... */
323#ifdef __LP64__
324 u32 cache_pad[4];
325#endif
326};
327
328/* These values as chosen so struct i596_private fits in one page... */
329
330#define TX_RING_SIZE 32
331#define RX_RING_SIZE 16
332
333struct i596_scb {
334 unsigned short status;
335 unsigned short command;
336 dma_addr_t cmd;
337 dma_addr_t rfd;
338 u32 crc_err;
339 u32 align_err;
340 u32 resource_err;
341 u32 over_err;
342 u32 rcvdt_err;
343 u32 short_err;
344 unsigned short t_on;
345 unsigned short t_off;
346};
347
348struct i596_iscp {
349 u32 stat;
350 dma_addr_t scb;
351};
352
353struct i596_scp {
354 u32 sysbus;
355 u32 pad;
356 dma_addr_t iscp;
357};
358
359struct i596_private {
360 volatile struct i596_scp scp __attribute__((aligned(32)));
361 volatile struct i596_iscp iscp __attribute__((aligned(32)));
362 volatile struct i596_scb scb __attribute__((aligned(32)));
363 struct sa_cmd sa_cmd __attribute__((aligned(32)));
364 struct cf_cmd cf_cmd __attribute__((aligned(32)));
365 struct tdr_cmd tdr_cmd __attribute__((aligned(32)));
366 struct mc_cmd mc_cmd __attribute__((aligned(32)));
367 struct i596_rfd rfds[RX_RING_SIZE] __attribute__((aligned(32)));
368 struct i596_rbd rbds[RX_RING_SIZE] __attribute__((aligned(32)));
369 struct tx_cmd tx_cmds[TX_RING_SIZE] __attribute__((aligned(32)));
370 struct i596_tbd tbds[TX_RING_SIZE] __attribute__((aligned(32)));
371 u32 stat;
372 int last_restart;
373 struct i596_rfd *rfd_head;
374 struct i596_rbd *rbd_head;
375 struct i596_cmd *cmd_tail;
376 struct i596_cmd *cmd_head;
377 int cmd_backlog;
378 u32 last_cmd;
379 struct net_device_stats stats;
380 int next_tx_cmd;
381 int options;
382 spinlock_t lock;
383 dma_addr_t dma_addr;
384 struct device *dev;
385};
386
387static const char init_setup[] =
388{
389 0x8E, /* length, prefetch on */
390 0xC8, /* fifo to 8, monitor off */
391 0x80, /* don't save bad frames */
392 0x2E, /* No source address insertion, 8 byte preamble */
393 0x00, /* priority and backoff defaults */
394 0x60, /* interframe spacing */
395 0x00, /* slot time LSB */
396 0xf2, /* slot time and retries */
397 0x00, /* promiscuous mode */
398 0x00, /* collision detect */
399 0x40, /* minimum frame length */
400 0xff,
401 0x00,
402 0x7f /* *multi IA */ };
403
404static int i596_open(struct net_device *dev);
405static int i596_start_xmit(struct sk_buff *skb, struct net_device *dev);
406static irqreturn_t i596_interrupt(int irq, void *dev_id);
407static int i596_close(struct net_device *dev);
408static struct net_device_stats *i596_get_stats(struct net_device *dev);
409static void i596_add_cmd(struct net_device *dev, struct i596_cmd *cmd);
410static void i596_tx_timeout (struct net_device *dev);
411static void print_eth(unsigned char *buf, char *str);
412static void set_multicast_list(struct net_device *dev);
413
414static int rx_ring_size = RX_RING_SIZE;
415static int ticks_limit = 100;
416static int max_cmd_backlog = TX_RING_SIZE-1;
417
418#ifdef CONFIG_NET_POLL_CONTROLLER
419static void i596_poll_controller(struct net_device *dev);
420#endif
421
422
423static inline void CA(struct net_device *dev)
424{ 127{
425 gsc_writel(0, dev->base_addr + PA_CHANNEL_ATTENTION); 128 gsc_writel(0, dev->base_addr + PA_CHANNEL_ATTENTION);
426} 129}
427 130
428 131
429static inline void MPU_PORT(struct net_device *dev, int c, dma_addr_t x) 132static void mpu_port(struct net_device *dev, int c, dma_addr_t x)
430{ 133{
431 struct i596_private *lp = dev->priv; 134 struct i596_private *lp = netdev_priv(dev);
432 135
433 u32 v = (u32) (c) | (u32) (x); 136 u32 v = (u32) (c) | (u32) (x);
434 u16 a, b; 137 u16 a, b;
@@ -446,1078 +149,15 @@ static inline void MPU_PORT(struct net_device *dev, int c, dma_addr_t x)
446 gsc_writel(b, dev->base_addr + PA_CPU_PORT_L_ACCESS); 149 gsc_writel(b, dev->base_addr + PA_CPU_PORT_L_ACCESS);
447} 150}
448 151
449
450static inline int wait_istat(struct net_device *dev, struct i596_private *lp, int delcnt, char *str)
451{
452 CHECK_INV(lp, &(lp->iscp), sizeof(struct i596_iscp));
453 while (--delcnt && lp->iscp.stat) {
454 udelay(10);
455 CHECK_INV(lp, &(lp->iscp), sizeof(struct i596_iscp));
456 }
457 if (!delcnt) {
458 printk("%s: %s, iscp.stat %04x, didn't clear\n",
459 dev->name, str, lp->iscp.stat);
460 return -1;
461 }
462 else
463 return 0;
464}
465
466
467static inline int wait_cmd(struct net_device *dev, struct i596_private *lp, int delcnt, char *str)
468{
469 CHECK_INV(lp, &(lp->scb), sizeof(struct i596_scb));
470 while (--delcnt && lp->scb.command) {
471 udelay(10);
472 CHECK_INV(lp, &(lp->scb), sizeof(struct i596_scb));
473 }
474 if (!delcnt) {
475 printk("%s: %s, status %4.4x, cmd %4.4x.\n",
476 dev->name, str, lp->scb.status, lp->scb.command);
477 return -1;
478 }
479 else
480 return 0;
481}
482
483
484static void i596_display_data(struct net_device *dev)
485{
486 struct i596_private *lp = dev->priv;
487 struct i596_cmd *cmd;
488 struct i596_rfd *rfd;
489 struct i596_rbd *rbd;
490
491 printk("lp and scp at %p, .sysbus = %08x, .iscp = %08x\n",
492 &lp->scp, lp->scp.sysbus, lp->scp.iscp);
493 printk("iscp at %p, iscp.stat = %08x, .scb = %08x\n",
494 &lp->iscp, lp->iscp.stat, lp->iscp.scb);
495 printk("scb at %p, scb.status = %04x, .command = %04x,"
496 " .cmd = %08x, .rfd = %08x\n",
497 &lp->scb, lp->scb.status, lp->scb.command,
498 lp->scb.cmd, lp->scb.rfd);
499 printk(" errors: crc %x, align %x, resource %x,"
500 " over %x, rcvdt %x, short %x\n",
501 lp->scb.crc_err, lp->scb.align_err, lp->scb.resource_err,
502 lp->scb.over_err, lp->scb.rcvdt_err, lp->scb.short_err);
503 cmd = lp->cmd_head;
504 while (cmd != NULL) {
505 printk("cmd at %p, .status = %04x, .command = %04x, .b_next = %08x\n",
506 cmd, cmd->status, cmd->command, cmd->b_next);
507 cmd = cmd->v_next;
508 }
509 rfd = lp->rfd_head;
510 printk("rfd_head = %p\n", rfd);
511 do {
512 printk(" %p .stat %04x, .cmd %04x, b_next %08x, rbd %08x,"
513 " count %04x\n",
514 rfd, rfd->stat, rfd->cmd, rfd->b_next, rfd->rbd,
515 rfd->count);
516 rfd = rfd->v_next;
517 } while (rfd != lp->rfd_head);
518 rbd = lp->rbd_head;
519 printk("rbd_head = %p\n", rbd);
520 do {
521 printk(" %p .count %04x, b_next %08x, b_data %08x, size %04x\n",
522 rbd, rbd->count, rbd->b_next, rbd->b_data, rbd->size);
523 rbd = rbd->v_next;
524 } while (rbd != lp->rbd_head);
525 CHECK_INV(lp, lp, sizeof(struct i596_private));
526}
527
528
529#if defined(ENABLE_MVME16x_NET) || defined(ENABLE_BVME6000_NET)
530static void i596_error(int irq, void *dev_id)
531{
532 struct net_device *dev = dev_id;
533 volatile unsigned char *pcc2 = (unsigned char *) 0xfff42000;
534
535 pcc2[0x28] = 1;
536 pcc2[0x2b] = 0x1d;
537 printk("%s: Error interrupt\n", dev->name);
538 i596_display_data(dev);
539}
540#endif
541
542#define virt_to_dma(lp,v) ((lp)->dma_addr + (dma_addr_t)((unsigned long)(v)-(unsigned long)(lp)))
543
544static inline void init_rx_bufs(struct net_device *dev)
545{
546 struct i596_private *lp = dev->priv;
547 int i;
548 struct i596_rfd *rfd;
549 struct i596_rbd *rbd;
550
551 /* First build the Receive Buffer Descriptor List */
552
553 for (i = 0, rbd = lp->rbds; i < rx_ring_size; i++, rbd++) {
554 dma_addr_t dma_addr;
555 struct sk_buff *skb = dev_alloc_skb(PKT_BUF_SZ + 4);
556
557 if (skb == NULL)
558 panic("%s: alloc_skb() failed", __FILE__);
559 skb_reserve(skb, 2);
560 dma_addr = dma_map_single(lp->dev, skb->data,PKT_BUF_SZ,
561 DMA_FROM_DEVICE);
562 skb->dev = dev;
563 rbd->v_next = rbd+1;
564 rbd->b_next = WSWAPrbd(virt_to_dma(lp,rbd+1));
565 rbd->b_addr = WSWAPrbd(virt_to_dma(lp,rbd));
566 rbd->skb = skb;
567 rbd->v_data = skb->data;
568 rbd->b_data = WSWAPchar(dma_addr);
569 rbd->size = PKT_BUF_SZ;
570 }
571 lp->rbd_head = lp->rbds;
572 rbd = lp->rbds + rx_ring_size - 1;
573 rbd->v_next = lp->rbds;
574 rbd->b_next = WSWAPrbd(virt_to_dma(lp,lp->rbds));
575
576 /* Now build the Receive Frame Descriptor List */
577
578 for (i = 0, rfd = lp->rfds; i < rx_ring_size; i++, rfd++) {
579 rfd->rbd = I596_NULL;
580 rfd->v_next = rfd+1;
581 rfd->v_prev = rfd-1;
582 rfd->b_next = WSWAPrfd(virt_to_dma(lp,rfd+1));
583 rfd->cmd = CMD_FLEX;
584 }
585 lp->rfd_head = lp->rfds;
586 lp->scb.rfd = WSWAPrfd(virt_to_dma(lp,lp->rfds));
587 rfd = lp->rfds;
588 rfd->rbd = WSWAPrbd(virt_to_dma(lp,lp->rbd_head));
589 rfd->v_prev = lp->rfds + rx_ring_size - 1;
590 rfd = lp->rfds + rx_ring_size - 1;
591 rfd->v_next = lp->rfds;
592 rfd->b_next = WSWAPrfd(virt_to_dma(lp,lp->rfds));
593 rfd->cmd = CMD_EOL|CMD_FLEX;
594
595 CHECK_WBACK_INV(lp, lp, sizeof(struct i596_private));
596}
597
598static inline void remove_rx_bufs(struct net_device *dev)
599{
600 struct i596_private *lp = dev->priv;
601 struct i596_rbd *rbd;
602 int i;
603
604 for (i = 0, rbd = lp->rbds; i < rx_ring_size; i++, rbd++) {
605 if (rbd->skb == NULL)
606 break;
607 dma_unmap_single(lp->dev,
608 (dma_addr_t)WSWAPchar(rbd->b_data),
609 PKT_BUF_SZ, DMA_FROM_DEVICE);
610 dev_kfree_skb(rbd->skb);
611 }
612}
613
614
615static void rebuild_rx_bufs(struct net_device *dev)
616{
617 struct i596_private *lp = dev->priv;
618 int i;
619
620 /* Ensure rx frame/buffer descriptors are tidy */
621
622 for (i = 0; i < rx_ring_size; i++) {
623 lp->rfds[i].rbd = I596_NULL;
624 lp->rfds[i].cmd = CMD_FLEX;
625 }
626 lp->rfds[rx_ring_size-1].cmd = CMD_EOL|CMD_FLEX;
627 lp->rfd_head = lp->rfds;
628 lp->scb.rfd = WSWAPrfd(virt_to_dma(lp,lp->rfds));
629 lp->rbd_head = lp->rbds;
630 lp->rfds[0].rbd = WSWAPrbd(virt_to_dma(lp,lp->rbds));
631
632 CHECK_WBACK_INV(lp, lp, sizeof(struct i596_private));
633}
634
635
636static int init_i596_mem(struct net_device *dev)
637{
638 struct i596_private *lp = dev->priv;
639 unsigned long flags;
640
641 disable_irq(dev->irq); /* disable IRQs from LAN */
642 DEB(DEB_INIT,
643 printk("RESET 82596 port: %lx (with IRQ %d disabled)\n",
644 (dev->base_addr + PA_I82596_RESET),
645 dev->irq));
646
647 gsc_writel(0, (dev->base_addr + PA_I82596_RESET)); /* Hard Reset */
648 udelay(100); /* Wait 100us - seems to help */
649
650 /* change the scp address */
651
652 lp->last_cmd = jiffies;
653
654
655 lp->scp.sysbus = 0x0000006c;
656 lp->scp.iscp = WSWAPiscp(virt_to_dma(lp,&(lp->iscp)));
657 lp->iscp.scb = WSWAPscb(virt_to_dma(lp,&(lp->scb)));
658 lp->iscp.stat = ISCP_BUSY;
659 lp->cmd_backlog = 0;
660
661 lp->cmd_head = NULL;
662 lp->scb.cmd = I596_NULL;
663
664 DEB(DEB_INIT, printk("%s: starting i82596.\n", dev->name));
665
666 CHECK_WBACK(lp, &(lp->scp), sizeof(struct i596_scp));
667 CHECK_WBACK(lp, &(lp->iscp), sizeof(struct i596_iscp));
668
669 MPU_PORT(dev, PORT_ALTSCP, virt_to_dma(lp,&lp->scp));
670
671 CA(dev);
672
673 if (wait_istat(dev, lp, 1000, "initialization timed out"))
674 goto failed;
675 DEB(DEB_INIT, printk("%s: i82596 initialization successful\n", dev->name));
676
677 /* Ensure rx frame/buffer descriptors are tidy */
678 rebuild_rx_bufs(dev);
679
680 lp->scb.command = 0;
681 CHECK_WBACK(lp, &(lp->scb), sizeof(struct i596_scb));
682
683 enable_irq(dev->irq); /* enable IRQs from LAN */
684
685 DEB(DEB_INIT, printk("%s: queuing CmdConfigure\n", dev->name));
686 memcpy(lp->cf_cmd.i596_config, init_setup, sizeof(init_setup));
687 lp->cf_cmd.cmd.command = CmdConfigure;
688 CHECK_WBACK(lp, &(lp->cf_cmd), sizeof(struct cf_cmd));
689 i596_add_cmd(dev, &lp->cf_cmd.cmd);
690
691 DEB(DEB_INIT, printk("%s: queuing CmdSASetup\n", dev->name));
692 memcpy(lp->sa_cmd.eth_addr, dev->dev_addr, 6);
693 lp->sa_cmd.cmd.command = CmdSASetup;
694 CHECK_WBACK(lp, &(lp->sa_cmd), sizeof(struct sa_cmd));
695 i596_add_cmd(dev, &lp->sa_cmd.cmd);
696
697 DEB(DEB_INIT, printk("%s: queuing CmdTDR\n", dev->name));
698 lp->tdr_cmd.cmd.command = CmdTDR;
699 CHECK_WBACK(lp, &(lp->tdr_cmd), sizeof(struct tdr_cmd));
700 i596_add_cmd(dev, &lp->tdr_cmd.cmd);
701
702 spin_lock_irqsave (&lp->lock, flags);
703
704 if (wait_cmd(dev, lp, 1000, "timed out waiting to issue RX_START")) {
705 spin_unlock_irqrestore (&lp->lock, flags);
706 goto failed;
707 }
708 DEB(DEB_INIT, printk("%s: Issuing RX_START\n", dev->name));
709 lp->scb.command = RX_START;
710 lp->scb.rfd = WSWAPrfd(virt_to_dma(lp,lp->rfds));
711 CHECK_WBACK(lp, &(lp->scb), sizeof(struct i596_scb));
712
713 CA(dev);
714
715 spin_unlock_irqrestore (&lp->lock, flags);
716
717 if (wait_cmd(dev, lp, 1000, "RX_START not processed"))
718 goto failed;
719 DEB(DEB_INIT, printk("%s: Receive unit started OK\n", dev->name));
720
721 return 0;
722
723failed:
724 printk("%s: Failed to initialise 82596\n", dev->name);
725 MPU_PORT(dev, PORT_RESET, 0);
726 return -1;
727}
728
729
730static inline int i596_rx(struct net_device *dev)
731{
732 struct i596_private *lp = dev->priv;
733 struct i596_rfd *rfd;
734 struct i596_rbd *rbd;
735 int frames = 0;
736
737 DEB(DEB_RXFRAME, printk("i596_rx(), rfd_head %p, rbd_head %p\n",
738 lp->rfd_head, lp->rbd_head));
739
740
741 rfd = lp->rfd_head; /* Ref next frame to check */
742
743 CHECK_INV(lp, rfd, sizeof(struct i596_rfd));
744 while ((rfd->stat) & STAT_C) { /* Loop while complete frames */
745 if (rfd->rbd == I596_NULL)
746 rbd = NULL;
747 else if (rfd->rbd == lp->rbd_head->b_addr) {
748 rbd = lp->rbd_head;
749 CHECK_INV(lp, rbd, sizeof(struct i596_rbd));
750 }
751 else {
752 printk("%s: rbd chain broken!\n", dev->name);
753 /* XXX Now what? */
754 rbd = NULL;
755 }
756 DEB(DEB_RXFRAME, printk(" rfd %p, rfd.rbd %08x, rfd.stat %04x\n",
757 rfd, rfd->rbd, rfd->stat));
758
759 if (rbd != NULL && ((rfd->stat) & STAT_OK)) {
760 /* a good frame */
761 int pkt_len = rbd->count & 0x3fff;
762 struct sk_buff *skb = rbd->skb;
763 int rx_in_place = 0;
764
765 DEB(DEB_RXADDR,print_eth(rbd->v_data, "received"));
766 frames++;
767
768 /* Check if the packet is long enough to just accept
769 * without copying to a properly sized skbuff.
770 */
771
772 if (pkt_len > rx_copybreak) {
773 struct sk_buff *newskb;
774 dma_addr_t dma_addr;
775
776 dma_unmap_single(lp->dev,(dma_addr_t)WSWAPchar(rbd->b_data), PKT_BUF_SZ, DMA_FROM_DEVICE);
777 /* Get fresh skbuff to replace filled one. */
778 newskb = dev_alloc_skb(PKT_BUF_SZ + 4);
779 if (newskb == NULL) {
780 skb = NULL; /* drop pkt */
781 goto memory_squeeze;
782 }
783 skb_reserve(newskb, 2);
784
785 /* Pass up the skb already on the Rx ring. */
786 skb_put(skb, pkt_len);
787 rx_in_place = 1;
788 rbd->skb = newskb;
789 newskb->dev = dev;
790 dma_addr = dma_map_single(lp->dev, newskb->data, PKT_BUF_SZ, DMA_FROM_DEVICE);
791 rbd->v_data = newskb->data;
792 rbd->b_data = WSWAPchar(dma_addr);
793 CHECK_WBACK_INV(lp, rbd, sizeof(struct i596_rbd));
794 }
795 else
796 skb = dev_alloc_skb(pkt_len + 2);
797memory_squeeze:
798 if (skb == NULL) {
799 /* XXX tulip.c can defer packets here!! */
800 printk("%s: i596_rx Memory squeeze, dropping packet.\n", dev->name);
801 lp->stats.rx_dropped++;
802 }
803 else {
804 if (!rx_in_place) {
805 /* 16 byte align the data fields */
806 dma_sync_single_for_cpu(lp->dev, (dma_addr_t)WSWAPchar(rbd->b_data), PKT_BUF_SZ, DMA_FROM_DEVICE);
807 skb_reserve(skb, 2);
808 memcpy(skb_put(skb,pkt_len), rbd->v_data, pkt_len);
809 dma_sync_single_for_device(lp->dev, (dma_addr_t)WSWAPchar(rbd->b_data), PKT_BUF_SZ, DMA_FROM_DEVICE);
810 }
811 skb->len = pkt_len;
812 skb->protocol=eth_type_trans(skb,dev);
813 netif_rx(skb);
814 dev->last_rx = jiffies;
815 lp->stats.rx_packets++;
816 lp->stats.rx_bytes+=pkt_len;
817 }
818 }
819 else {
820 DEB(DEB_ERRORS, printk("%s: Error, rfd.stat = 0x%04x\n",
821 dev->name, rfd->stat));
822 lp->stats.rx_errors++;
823 if ((rfd->stat) & 0x0001)
824 lp->stats.collisions++;
825 if ((rfd->stat) & 0x0080)
826 lp->stats.rx_length_errors++;
827 if ((rfd->stat) & 0x0100)
828 lp->stats.rx_over_errors++;
829 if ((rfd->stat) & 0x0200)
830 lp->stats.rx_fifo_errors++;
831 if ((rfd->stat) & 0x0400)
832 lp->stats.rx_frame_errors++;
833 if ((rfd->stat) & 0x0800)
834 lp->stats.rx_crc_errors++;
835 if ((rfd->stat) & 0x1000)
836 lp->stats.rx_length_errors++;
837 }
838
839 /* Clear the buffer descriptor count and EOF + F flags */
840
841 if (rbd != NULL && (rbd->count & 0x4000)) {
842 rbd->count = 0;
843 lp->rbd_head = rbd->v_next;
844 CHECK_WBACK_INV(lp, rbd, sizeof(struct i596_rbd));
845 }
846
847 /* Tidy the frame descriptor, marking it as end of list */
848
849 rfd->rbd = I596_NULL;
850 rfd->stat = 0;
851 rfd->cmd = CMD_EOL|CMD_FLEX;
852 rfd->count = 0;
853
854 /* Remove end-of-list from old end descriptor */
855
856 rfd->v_prev->cmd = CMD_FLEX;
857
858 /* Update record of next frame descriptor to process */
859
860 lp->scb.rfd = rfd->b_next;
861 lp->rfd_head = rfd->v_next;
862 CHECK_WBACK_INV(lp, rfd->v_prev, sizeof(struct i596_rfd));
863 CHECK_WBACK_INV(lp, rfd, sizeof(struct i596_rfd));
864 rfd = lp->rfd_head;
865 CHECK_INV(lp, rfd, sizeof(struct i596_rfd));
866 }
867
868 DEB(DEB_RXFRAME, printk("frames %d\n", frames));
869
870 return 0;
871}
872
873
874static inline void i596_cleanup_cmd(struct net_device *dev, struct i596_private *lp)
875{
876 struct i596_cmd *ptr;
877
878 while (lp->cmd_head != NULL) {
879 ptr = lp->cmd_head;
880 lp->cmd_head = ptr->v_next;
881 lp->cmd_backlog--;
882
883 switch ((ptr->command) & 0x7) {
884 case CmdTx:
885 {
886 struct tx_cmd *tx_cmd = (struct tx_cmd *) ptr;
887 struct sk_buff *skb = tx_cmd->skb;
888 dma_unmap_single(lp->dev, tx_cmd->dma_addr, skb->len, DMA_TO_DEVICE);
889
890 dev_kfree_skb(skb);
891
892 lp->stats.tx_errors++;
893 lp->stats.tx_aborted_errors++;
894
895 ptr->v_next = NULL;
896 ptr->b_next = I596_NULL;
897 tx_cmd->cmd.command = 0; /* Mark as free */
898 break;
899 }
900 default:
901 ptr->v_next = NULL;
902 ptr->b_next = I596_NULL;
903 }
904 CHECK_WBACK_INV(lp, ptr, sizeof(struct i596_cmd));
905 }
906
907 wait_cmd(dev, lp, 100, "i596_cleanup_cmd timed out");
908 lp->scb.cmd = I596_NULL;
909 CHECK_WBACK(lp, &(lp->scb), sizeof(struct i596_scb));
910}
911
912
913static inline void i596_reset(struct net_device *dev, struct i596_private *lp)
914{
915 unsigned long flags;
916
917 DEB(DEB_RESET, printk("i596_reset\n"));
918
919 spin_lock_irqsave (&lp->lock, flags);
920
921 wait_cmd(dev, lp, 100, "i596_reset timed out");
922
923 netif_stop_queue(dev);
924
925 /* FIXME: this command might cause an lpmc */
926 lp->scb.command = CUC_ABORT | RX_ABORT;
927 CHECK_WBACK(lp, &(lp->scb), sizeof(struct i596_scb));
928 CA(dev);
929
930 /* wait for shutdown */
931 wait_cmd(dev, lp, 1000, "i596_reset 2 timed out");
932 spin_unlock_irqrestore (&lp->lock, flags);
933
934 i596_cleanup_cmd(dev,lp);
935 i596_rx(dev);
936
937 netif_start_queue(dev);
938 init_i596_mem(dev);
939}
940
941
942static void i596_add_cmd(struct net_device *dev, struct i596_cmd *cmd)
943{
944 struct i596_private *lp = dev->priv;
945 unsigned long flags;
946
947 DEB(DEB_ADDCMD, printk("i596_add_cmd cmd_head %p\n", lp->cmd_head));
948
949 cmd->status = 0;
950 cmd->command |= (CMD_EOL | CMD_INTR);
951 cmd->v_next = NULL;
952 cmd->b_next = I596_NULL;
953 CHECK_WBACK(lp, cmd, sizeof(struct i596_cmd));
954
955 spin_lock_irqsave (&lp->lock, flags);
956
957 if (lp->cmd_head != NULL) {
958 lp->cmd_tail->v_next = cmd;
959 lp->cmd_tail->b_next = WSWAPcmd(virt_to_dma(lp,&cmd->status));
960 CHECK_WBACK(lp, lp->cmd_tail, sizeof(struct i596_cmd));
961 } else {
962 lp->cmd_head = cmd;
963 wait_cmd(dev, lp, 100, "i596_add_cmd timed out");
964 lp->scb.cmd = WSWAPcmd(virt_to_dma(lp,&cmd->status));
965 lp->scb.command = CUC_START;
966 CHECK_WBACK(lp, &(lp->scb), sizeof(struct i596_scb));
967 CA(dev);
968 }
969 lp->cmd_tail = cmd;
970 lp->cmd_backlog++;
971
972 spin_unlock_irqrestore (&lp->lock, flags);
973
974 if (lp->cmd_backlog > max_cmd_backlog) {
975 unsigned long tickssofar = jiffies - lp->last_cmd;
976
977 if (tickssofar < ticks_limit)
978 return;
979
980 printk("%s: command unit timed out, status resetting.\n", dev->name);
981#if 1
982 i596_reset(dev, lp);
983#endif
984 }
985}
986
987#if 0
988/* this function makes a perfectly adequate probe... but we have a
989 device list */
990static int i596_test(struct net_device *dev)
991{
992 struct i596_private *lp = dev->priv;
993 volatile int *tint;
994 u32 data;
995
996 tint = (volatile int *)(&(lp->scp));
997 data = virt_to_dma(lp,tint);
998
999 tint[1] = -1;
1000 CHECK_WBACK(lp, tint, PAGE_SIZE);
1001
1002 MPU_PORT(dev, 1, data);
1003
1004 for(data = 1000000; data; data--) {
1005 CHECK_INV(lp, tint, PAGE_SIZE);
1006 if(tint[1] != -1)
1007 break;
1008
1009 }
1010
1011 printk("i596_test result %d\n", tint[1]);
1012
1013}
1014#endif
1015
1016
1017static int i596_open(struct net_device *dev)
1018{
1019 DEB(DEB_OPEN, printk("%s: i596_open() irq %d.\n", dev->name, dev->irq));
1020
1021 if (request_irq(dev->irq, &i596_interrupt, 0, "i82596", dev)) {
1022 printk("%s: IRQ %d not free\n", dev->name, dev->irq);
1023 goto out;
1024 }
1025
1026 init_rx_bufs(dev);
1027
1028 if (init_i596_mem(dev)) {
1029 printk("%s: Failed to init memory\n", dev->name);
1030 goto out_remove_rx_bufs;
1031 }
1032
1033 netif_start_queue(dev);
1034
1035 return 0;
1036
1037out_remove_rx_bufs:
1038 remove_rx_bufs(dev);
1039 free_irq(dev->irq, dev);
1040out:
1041 return -EAGAIN;
1042}
1043
1044static void i596_tx_timeout (struct net_device *dev)
1045{
1046 struct i596_private *lp = dev->priv;
1047
1048 /* Transmitter timeout, serious problems. */
1049 DEB(DEB_ERRORS, printk("%s: transmit timed out, status resetting.\n",
1050 dev->name));
1051
1052 lp->stats.tx_errors++;
1053
1054 /* Try to restart the adaptor */
1055 if (lp->last_restart == lp->stats.tx_packets) {
1056 DEB(DEB_ERRORS, printk("Resetting board.\n"));
1057 /* Shutdown and restart */
1058 i596_reset (dev, lp);
1059 } else {
1060 /* Issue a channel attention signal */
1061 DEB(DEB_ERRORS, printk("Kicking board.\n"));
1062 lp->scb.command = CUC_START | RX_START;
1063 CHECK_WBACK_INV(lp, &(lp->scb), sizeof(struct i596_scb));
1064 CA (dev);
1065 lp->last_restart = lp->stats.tx_packets;
1066 }
1067
1068 dev->trans_start = jiffies;
1069 netif_wake_queue (dev);
1070}
1071
1072
1073static int i596_start_xmit(struct sk_buff *skb, struct net_device *dev)
1074{
1075 struct i596_private *lp = dev->priv;
1076 struct tx_cmd *tx_cmd;
1077 struct i596_tbd *tbd;
1078 short length = skb->len;
1079 dev->trans_start = jiffies;
1080
1081 DEB(DEB_STARTTX, printk("%s: i596_start_xmit(%x,%p) called\n", dev->name,
1082 skb->len, skb->data));
1083
1084 if (length < ETH_ZLEN) {
1085 if (skb_padto(skb, ETH_ZLEN))
1086 return 0;
1087 length = ETH_ZLEN;
1088 }
1089
1090 netif_stop_queue(dev);
1091
1092 tx_cmd = lp->tx_cmds + lp->next_tx_cmd;
1093 tbd = lp->tbds + lp->next_tx_cmd;
1094
1095 if (tx_cmd->cmd.command) {
1096 DEB(DEB_ERRORS, printk("%s: xmit ring full, dropping packet.\n",
1097 dev->name));
1098 lp->stats.tx_dropped++;
1099
1100 dev_kfree_skb(skb);
1101 } else {
1102 if (++lp->next_tx_cmd == TX_RING_SIZE)
1103 lp->next_tx_cmd = 0;
1104 tx_cmd->tbd = WSWAPtbd(virt_to_dma(lp,tbd));
1105 tbd->next = I596_NULL;
1106
1107 tx_cmd->cmd.command = CMD_FLEX | CmdTx;
1108 tx_cmd->skb = skb;
1109
1110 tx_cmd->pad = 0;
1111 tx_cmd->size = 0;
1112 tbd->pad = 0;
1113 tbd->size = EOF | length;
1114
1115 tx_cmd->dma_addr = dma_map_single(lp->dev, skb->data, skb->len,
1116 DMA_TO_DEVICE);
1117 tbd->data = WSWAPchar(tx_cmd->dma_addr);
1118
1119 DEB(DEB_TXADDR,print_eth(skb->data, "tx-queued"));
1120 CHECK_WBACK_INV(lp, tx_cmd, sizeof(struct tx_cmd));
1121 CHECK_WBACK_INV(lp, tbd, sizeof(struct i596_tbd));
1122 i596_add_cmd(dev, &tx_cmd->cmd);
1123
1124 lp->stats.tx_packets++;
1125 lp->stats.tx_bytes += length;
1126 }
1127
1128 netif_start_queue(dev);
1129
1130 return 0;
1131}
1132
1133static void print_eth(unsigned char *add, char *str)
1134{
1135 int i;
1136
1137 printk("i596 0x%p, ", add);
1138 for (i = 0; i < 6; i++)
1139 printk(" %02X", add[i + 6]);
1140 printk(" -->");
1141 for (i = 0; i < 6; i++)
1142 printk(" %02X", add[i]);
1143 printk(" %02X%02X, %s\n", add[12], add[13], str);
1144}
1145
1146
1147#define LAN_PROM_ADDR 0xF0810000 152#define LAN_PROM_ADDR 0xF0810000
1148 153
1149static int __devinit i82596_probe(struct net_device *dev,
1150 struct device *gen_dev)
1151{
1152 int i;
1153 struct i596_private *lp;
1154 char eth_addr[6];
1155 dma_addr_t dma_addr;
1156
1157 /* This lot is ensure things have been cache line aligned. */
1158 BUILD_BUG_ON(sizeof(struct i596_rfd) != 32);
1159 BUILD_BUG_ON(sizeof(struct i596_rbd) & 31);
1160 BUILD_BUG_ON(sizeof(struct tx_cmd) & 31);
1161 BUILD_BUG_ON(sizeof(struct i596_tbd) != 32);
1162#ifndef __LP64__
1163 BUILD_BUG_ON(sizeof(struct i596_private) > 4096);
1164#endif
1165
1166 if (!dev->base_addr || !dev->irq)
1167 return -ENODEV;
1168
1169 if (pdc_lan_station_id(eth_addr, dev->base_addr)) {
1170 for (i=0; i < 6; i++) {
1171 eth_addr[i] = gsc_readb(LAN_PROM_ADDR + i);
1172 }
1173 printk(KERN_INFO "%s: MAC of HP700 LAN read from EEPROM\n", __FILE__);
1174 }
1175
1176 dev->mem_start = (unsigned long) dma_alloc_noncoherent(gen_dev,
1177 sizeof(struct i596_private), &dma_addr, GFP_KERNEL);
1178 if (!dev->mem_start) {
1179 printk(KERN_ERR "%s: Couldn't get shared memory\n", __FILE__);
1180 return -ENOMEM;
1181 }
1182
1183 for (i = 0; i < 6; i++)
1184 dev->dev_addr[i] = eth_addr[i];
1185
1186 /* The 82596-specific entries in the device structure. */
1187 dev->open = i596_open;
1188 dev->stop = i596_close;
1189 dev->hard_start_xmit = i596_start_xmit;
1190 dev->get_stats = i596_get_stats;
1191 dev->set_multicast_list = set_multicast_list;
1192 dev->tx_timeout = i596_tx_timeout;
1193 dev->watchdog_timeo = TX_TIMEOUT;
1194#ifdef CONFIG_NET_POLL_CONTROLLER
1195 dev->poll_controller = i596_poll_controller;
1196#endif
1197
1198 dev->priv = (void *)(dev->mem_start);
1199
1200 lp = dev->priv;
1201 memset(lp, 0, sizeof(struct i596_private));
1202
1203 lp->scb.command = 0;
1204 lp->scb.cmd = I596_NULL;
1205 lp->scb.rfd = I596_NULL;
1206 spin_lock_init(&lp->lock);
1207 lp->dma_addr = dma_addr;
1208 lp->dev = gen_dev;
1209
1210 CHECK_WBACK_INV(lp, dev->mem_start, sizeof(struct i596_private));
1211
1212 i = register_netdev(dev);
1213 if (i) {
1214 lp = dev->priv;
1215 dma_free_noncoherent(lp->dev, sizeof(struct i596_private),
1216 (void *)dev->mem_start, lp->dma_addr);
1217 return i;
1218 };
1219
1220 DEB(DEB_PROBE, printk(KERN_INFO "%s: 82596 at %#3lx,", dev->name, dev->base_addr));
1221 for (i = 0; i < 6; i++)
1222 DEB(DEB_PROBE, printk(" %2.2X", dev->dev_addr[i]));
1223 DEB(DEB_PROBE, printk(" IRQ %d.\n", dev->irq));
1224 DEB(DEB_INIT, printk(KERN_INFO "%s: lp at 0x%p (%d bytes), lp->scb at 0x%p\n",
1225 dev->name, lp, (int)sizeof(struct i596_private), &lp->scb));
1226
1227 return 0;
1228}
1229
1230#ifdef CONFIG_NET_POLL_CONTROLLER
1231static void i596_poll_controller(struct net_device *dev)
1232{
1233 disable_irq(dev->irq);
1234 i596_interrupt(dev->irq, dev);
1235 enable_irq(dev->irq);
1236}
1237#endif
1238
1239static irqreturn_t i596_interrupt(int irq, void *dev_id)
1240{
1241 struct net_device *dev = dev_id;
1242 struct i596_private *lp;
1243 unsigned short status, ack_cmd = 0;
1244
1245 if (dev == NULL) {
1246 printk("%s: irq %d for unknown device.\n", __FUNCTION__, irq);
1247 return IRQ_NONE;
1248 }
1249
1250 lp = dev->priv;
1251
1252 spin_lock (&lp->lock);
1253
1254 wait_cmd(dev, lp, 100, "i596 interrupt, timeout");
1255 status = lp->scb.status;
1256
1257 DEB(DEB_INTS, printk("%s: i596 interrupt, IRQ %d, status %4.4x.\n",
1258 dev->name, irq, status));
1259
1260 ack_cmd = status & 0xf000;
1261
1262 if (!ack_cmd) {
1263 DEB(DEB_ERRORS, printk("%s: interrupt with no events\n", dev->name));
1264 spin_unlock (&lp->lock);
1265 return IRQ_NONE;
1266 }
1267
1268 if ((status & 0x8000) || (status & 0x2000)) {
1269 struct i596_cmd *ptr;
1270
1271 if ((status & 0x8000))
1272 DEB(DEB_INTS, printk("%s: i596 interrupt completed command.\n", dev->name));
1273 if ((status & 0x2000))
1274 DEB(DEB_INTS, printk("%s: i596 interrupt command unit inactive %x.\n", dev->name, status & 0x0700));
1275
1276 while (lp->cmd_head != NULL) {
1277 CHECK_INV(lp, lp->cmd_head, sizeof(struct i596_cmd));
1278 if (!(lp->cmd_head->status & STAT_C))
1279 break;
1280
1281 ptr = lp->cmd_head;
1282
1283 DEB(DEB_STATUS, printk("cmd_head->status = %04x, ->command = %04x\n",
1284 lp->cmd_head->status, lp->cmd_head->command));
1285 lp->cmd_head = ptr->v_next;
1286 lp->cmd_backlog--;
1287
1288 switch ((ptr->command) & 0x7) {
1289 case CmdTx:
1290 {
1291 struct tx_cmd *tx_cmd = (struct tx_cmd *) ptr;
1292 struct sk_buff *skb = tx_cmd->skb;
1293
1294 if ((ptr->status) & STAT_OK) {
1295 DEB(DEB_TXADDR, print_eth(skb->data, "tx-done"));
1296 } else {
1297 lp->stats.tx_errors++;
1298 if ((ptr->status) & 0x0020)
1299 lp->stats.collisions++;
1300 if (!((ptr->status) & 0x0040))
1301 lp->stats.tx_heartbeat_errors++;
1302 if ((ptr->status) & 0x0400)
1303 lp->stats.tx_carrier_errors++;
1304 if ((ptr->status) & 0x0800)
1305 lp->stats.collisions++;
1306 if ((ptr->status) & 0x1000)
1307 lp->stats.tx_aborted_errors++;
1308 }
1309 dma_unmap_single(lp->dev, tx_cmd->dma_addr, skb->len, DMA_TO_DEVICE);
1310 dev_kfree_skb_irq(skb);
1311
1312 tx_cmd->cmd.command = 0; /* Mark free */
1313 break;
1314 }
1315 case CmdTDR:
1316 {
1317 unsigned short status = ((struct tdr_cmd *)ptr)->status;
1318
1319 if (status & 0x8000) {
1320 DEB(DEB_ANY, printk("%s: link ok.\n", dev->name));
1321 } else {
1322 if (status & 0x4000)
1323 printk("%s: Transceiver problem.\n", dev->name);
1324 if (status & 0x2000)
1325 printk("%s: Termination problem.\n", dev->name);
1326 if (status & 0x1000)
1327 printk("%s: Short circuit.\n", dev->name);
1328
1329 DEB(DEB_TDR, printk("%s: Time %d.\n", dev->name, status & 0x07ff));
1330 }
1331 break;
1332 }
1333 case CmdConfigure:
1334 /* Zap command so set_multicast_list() knows it is free */
1335 ptr->command = 0;
1336 break;
1337 }
1338 ptr->v_next = NULL;
1339 ptr->b_next = I596_NULL;
1340 CHECK_WBACK(lp, ptr, sizeof(struct i596_cmd));
1341 lp->last_cmd = jiffies;
1342 }
1343
1344 /* This mess is arranging that only the last of any outstanding
1345 * commands has the interrupt bit set. Should probably really
1346 * only add to the cmd queue when the CU is stopped.
1347 */
1348 ptr = lp->cmd_head;
1349 while ((ptr != NULL) && (ptr != lp->cmd_tail)) {
1350 struct i596_cmd *prev = ptr;
1351
1352 ptr->command &= 0x1fff;
1353 ptr = ptr->v_next;
1354 CHECK_WBACK_INV(lp, prev, sizeof(struct i596_cmd));
1355 }
1356
1357 if ((lp->cmd_head != NULL))
1358 ack_cmd |= CUC_START;
1359 lp->scb.cmd = WSWAPcmd(virt_to_dma(lp,&lp->cmd_head->status));
1360 CHECK_WBACK_INV(lp, &lp->scb, sizeof(struct i596_scb));
1361 }
1362 if ((status & 0x1000) || (status & 0x4000)) {
1363 if ((status & 0x4000))
1364 DEB(DEB_INTS, printk("%s: i596 interrupt received a frame.\n", dev->name));
1365 i596_rx(dev);
1366 /* Only RX_START if stopped - RGH 07-07-96 */
1367 if (status & 0x1000) {
1368 if (netif_running(dev)) {
1369 DEB(DEB_ERRORS, printk("%s: i596 interrupt receive unit inactive, status 0x%x\n", dev->name, status));
1370 ack_cmd |= RX_START;
1371 lp->stats.rx_errors++;
1372 lp->stats.rx_fifo_errors++;
1373 rebuild_rx_bufs(dev);
1374 }
1375 }
1376 }
1377 wait_cmd(dev, lp, 100, "i596 interrupt, timeout");
1378 lp->scb.command = ack_cmd;
1379 CHECK_WBACK(lp, &lp->scb, sizeof(struct i596_scb));
1380
1381 /* DANGER: I suspect that some kind of interrupt
1382 acknowledgement aside from acking the 82596 might be needed
1383 here... but it's running acceptably without */
1384
1385 CA(dev);
1386
1387 wait_cmd(dev, lp, 100, "i596 interrupt, exit timeout");
1388 DEB(DEB_INTS, printk("%s: exiting interrupt.\n", dev->name));
1389
1390 spin_unlock (&lp->lock);
1391 return IRQ_HANDLED;
1392}
1393
1394static int i596_close(struct net_device *dev)
1395{
1396 struct i596_private *lp = dev->priv;
1397 unsigned long flags;
1398
1399 netif_stop_queue(dev);
1400
1401 DEB(DEB_INIT, printk("%s: Shutting down ethercard, status was %4.4x.\n",
1402 dev->name, lp->scb.status));
1403
1404 spin_lock_irqsave(&lp->lock, flags);
1405
1406 wait_cmd(dev, lp, 100, "close1 timed out");
1407 lp->scb.command = CUC_ABORT | RX_ABORT;
1408 CHECK_WBACK(lp, &lp->scb, sizeof(struct i596_scb));
1409
1410 CA(dev);
1411
1412 wait_cmd(dev, lp, 100, "close2 timed out");
1413 spin_unlock_irqrestore(&lp->lock, flags);
1414 DEB(DEB_STRUCT,i596_display_data(dev));
1415 i596_cleanup_cmd(dev,lp);
1416
1417 disable_irq(dev->irq);
1418
1419 free_irq(dev->irq, dev);
1420 remove_rx_bufs(dev);
1421
1422 return 0;
1423}
1424
1425static struct net_device_stats *
1426 i596_get_stats(struct net_device *dev)
1427{
1428 struct i596_private *lp = dev->priv;
1429
1430 return &lp->stats;
1431}
1432
1433/*
1434 * Set or clear the multicast filter for this adaptor.
1435 */
1436
1437static void set_multicast_list(struct net_device *dev)
1438{
1439 struct i596_private *lp = dev->priv;
1440 int config = 0, cnt;
1441
1442 DEB(DEB_MULTI, printk("%s: set multicast list, %d entries, promisc %s, allmulti %s\n",
1443 dev->name, dev->mc_count, dev->flags & IFF_PROMISC ? "ON" : "OFF",
1444 dev->flags & IFF_ALLMULTI ? "ON" : "OFF"));
1445
1446 if ((dev->flags & IFF_PROMISC) && !(lp->cf_cmd.i596_config[8] & 0x01)) {
1447 lp->cf_cmd.i596_config[8] |= 0x01;
1448 config = 1;
1449 }
1450 if (!(dev->flags & IFF_PROMISC) && (lp->cf_cmd.i596_config[8] & 0x01)) {
1451 lp->cf_cmd.i596_config[8] &= ~0x01;
1452 config = 1;
1453 }
1454 if ((dev->flags & IFF_ALLMULTI) && (lp->cf_cmd.i596_config[11] & 0x20)) {
1455 lp->cf_cmd.i596_config[11] &= ~0x20;
1456 config = 1;
1457 }
1458 if (!(dev->flags & IFF_ALLMULTI) && !(lp->cf_cmd.i596_config[11] & 0x20)) {
1459 lp->cf_cmd.i596_config[11] |= 0x20;
1460 config = 1;
1461 }
1462 if (config) {
1463 if (lp->cf_cmd.cmd.command)
1464 printk("%s: config change request already queued\n",
1465 dev->name);
1466 else {
1467 lp->cf_cmd.cmd.command = CmdConfigure;
1468 CHECK_WBACK_INV(lp, &lp->cf_cmd, sizeof(struct cf_cmd));
1469 i596_add_cmd(dev, &lp->cf_cmd.cmd);
1470 }
1471 }
1472
1473 cnt = dev->mc_count;
1474 if (cnt > MAX_MC_CNT)
1475 {
1476 cnt = MAX_MC_CNT;
1477 printk("%s: Only %d multicast addresses supported",
1478 dev->name, cnt);
1479 }
1480
1481 if (dev->mc_count > 0) {
1482 struct dev_mc_list *dmi;
1483 unsigned char *cp;
1484 struct mc_cmd *cmd;
1485
1486 cmd = &lp->mc_cmd;
1487 cmd->cmd.command = CmdMulticastList;
1488 cmd->mc_cnt = dev->mc_count * 6;
1489 cp = cmd->mc_addrs;
1490 for (dmi = dev->mc_list; cnt && dmi != NULL; dmi = dmi->next, cnt--, cp += 6) {
1491 memcpy(cp, dmi->dmi_addr, 6);
1492 if (i596_debug > 1)
1493 DEB(DEB_MULTI, printk("%s: Adding address %02x:%02x:%02x:%02x:%02x:%02x\n",
1494 dev->name, cp[0],cp[1],cp[2],cp[3],cp[4],cp[5]));
1495 }
1496 CHECK_WBACK_INV(lp, &lp->mc_cmd, sizeof(struct mc_cmd));
1497 i596_add_cmd(dev, &cmd->cmd);
1498 }
1499}
1500
1501static int debug = -1;
1502module_param(debug, int, 0);
1503MODULE_PARM_DESC(debug, "lasi_82596 debug mask");
1504
1505static int num_drivers;
1506static struct net_device *netdevs[MAX_DRIVERS];
1507
1508static int __devinit 154static int __devinit
1509lan_init_chip(struct parisc_device *dev) 155lan_init_chip(struct parisc_device *dev)
1510{ 156{
1511 struct net_device *netdevice; 157 struct net_device *netdevice;
158 struct i596_private *lp;
1512 int retval; 159 int retval;
1513 160 int i;
1514 if (num_drivers >= MAX_DRIVERS) {
1515 /* max count of possible i82596 drivers reached */
1516 return -ENOMEM;
1517 }
1518
1519 if (num_drivers == 0)
1520 printk(KERN_INFO LASI_82596_DRIVER_VERSION "\n");
1521 161
1522 if (!dev->irq) { 162 if (!dev->irq) {
1523 printk(KERN_ERR "%s: IRQ not found for i82596 at 0x%lx\n", 163 printk(KERN_ERR "%s: IRQ not found for i82596 at 0x%lx\n",
@@ -1528,28 +168,45 @@ lan_init_chip(struct parisc_device *dev)
1528 printk(KERN_INFO "Found i82596 at 0x%lx, IRQ %d\n", dev->hpa.start, 168 printk(KERN_INFO "Found i82596 at 0x%lx, IRQ %d\n", dev->hpa.start,
1529 dev->irq); 169 dev->irq);
1530 170
1531 netdevice = alloc_etherdev(0); 171 netdevice = alloc_etherdev(sizeof(struct i596_private));
1532 if (!netdevice) 172 if (!netdevice)
1533 return -ENOMEM; 173 return -ENOMEM;
174 SET_NETDEV_DEV(netdevice, &dev->dev);
175 parisc_set_drvdata (dev, netdevice);
1534 176
1535 netdevice->base_addr = dev->hpa.start; 177 netdevice->base_addr = dev->hpa.start;
1536 netdevice->irq = dev->irq; 178 netdevice->irq = dev->irq;
1537 179
1538 retval = i82596_probe(netdevice, &dev->dev); 180 if (pdc_lan_station_id(netdevice->dev_addr, netdevice->base_addr)) {
181 for (i = 0; i < 6; i++) {
182 netdevice->dev_addr[i] = gsc_readb(LAN_PROM_ADDR + i);
183 }
184 printk(KERN_INFO
185 "%s: MAC of HP700 LAN read from EEPROM\n", __FILE__);
186 }
187
188 lp = netdev_priv(netdevice);
189 lp->options = dev->id.sversion == 0x72 ? OPT_SWAP_PORT : 0;
190
191 retval = i82596_probe(netdevice);
1539 if (retval) { 192 if (retval) {
1540 free_netdev(netdevice); 193 free_netdev(netdevice);
1541 return -ENODEV; 194 return -ENODEV;
1542 } 195 }
1543
1544 if (dev->id.sversion == 0x72) {
1545 ((struct i596_private *)netdevice->priv)->options = OPT_SWAP_PORT;
1546 }
1547
1548 netdevs[num_drivers++] = netdevice;
1549
1550 return retval; 196 return retval;
1551} 197}
1552 198
199static int __devexit lan_remove_chip (struct parisc_device *pdev)
200{
201 struct net_device *dev = parisc_get_drvdata(pdev);
202 struct i596_private *lp = netdev_priv(dev);
203
204 unregister_netdev (dev);
205 DMA_FREE(&pdev->dev, sizeof(struct i596_private),
206 (void *)lp->dma, lp->dma_addr);
207 free_netdev (dev);
208 return 0;
209}
1553 210
1554static struct parisc_device_id lan_tbl[] = { 211static struct parisc_device_id lan_tbl[] = {
1555 { HPHW_FIO, HVERSION_REV_ANY_ID, HVERSION_ANY_ID, 0x0008a }, 212 { HPHW_FIO, HVERSION_REV_ANY_ID, HVERSION_ANY_ID, 0x0008a },
@@ -1563,12 +220,12 @@ static struct parisc_driver lan_driver = {
1563 .name = "lasi_82596", 220 .name = "lasi_82596",
1564 .id_table = lan_tbl, 221 .id_table = lan_tbl,
1565 .probe = lan_init_chip, 222 .probe = lan_init_chip,
223 .remove = __devexit_p(lan_remove_chip),
1566}; 224};
1567 225
1568static int __devinit lasi_82596_init(void) 226static int __devinit lasi_82596_init(void)
1569{ 227{
1570 if (debug >= 0) 228 printk(KERN_INFO LASI_82596_DRIVER_VERSION "\n");
1571 i596_debug = debug;
1572 return register_parisc_driver(&lan_driver); 229 return register_parisc_driver(&lan_driver);
1573} 230}
1574 231
@@ -1576,25 +233,6 @@ module_init(lasi_82596_init);
1576 233
1577static void __exit lasi_82596_exit(void) 234static void __exit lasi_82596_exit(void)
1578{ 235{
1579 int i;
1580
1581 for (i=0; i<MAX_DRIVERS; i++) {
1582 struct i596_private *lp;
1583 struct net_device *netdevice;
1584
1585 netdevice = netdevs[i];
1586 if (!netdevice)
1587 continue;
1588
1589 unregister_netdev(netdevice);
1590
1591 lp = netdevice->priv;
1592 dma_free_noncoherent(lp->dev, sizeof(struct i596_private),
1593 (void *)netdevice->mem_start, lp->dma_addr);
1594 free_netdev(netdevice);
1595 }
1596 num_drivers = 0;
1597
1598 unregister_parisc_driver(&lan_driver); 236 unregister_parisc_driver(&lan_driver);
1599} 237}
1600 238
diff --git a/drivers/net/lib82596.c b/drivers/net/lib82596.c
new file mode 100644
index 000000000000..5884f5bd04a4
--- /dev/null
+++ b/drivers/net/lib82596.c
@@ -0,0 +1,1434 @@
1/* lasi_82596.c -- driver for the intel 82596 ethernet controller, as
2 munged into HPPA boxen .
3
4 This driver is based upon 82596.c, original credits are below...
5 but there were too many hoops which HP wants jumped through to
6 keep this code in there in a sane manner.
7
8 3 primary sources of the mess --
9 1) hppa needs *lots* of cacheline flushing to keep this kind of
10 MMIO running.
11
12 2) The 82596 needs to see all of its pointers as their physical
13 address. Thus virt_to_bus/bus_to_virt are *everywhere*.
14
15 3) The implementation HP is using seems to be significantly pickier
16 about when and how the command and RX units are started. some
17 command ordering was changed.
18
19 Examination of the mach driver leads one to believe that there
20 might be a saner way to pull this off... anyone who feels like a
21 full rewrite can be my guest.
22
23 Split 02/13/2000 Sam Creasey (sammy@oh.verio.com)
24
25 02/01/2000 Initial modifications for parisc by Helge Deller (deller@gmx.de)
26 03/02/2000 changes for better/correct(?) cache-flushing (deller)
27*/
28
29/* 82596.c: A generic 82596 ethernet driver for linux. */
30/*
31 Based on Apricot.c
32 Written 1994 by Mark Evans.
33 This driver is for the Apricot 82596 bus-master interface
34
35 Modularised 12/94 Mark Evans
36
37
38 Modified to support the 82596 ethernet chips on 680x0 VME boards.
39 by Richard Hirst <richard@sleepie.demon.co.uk>
40 Renamed to be 82596.c
41
42 980825: Changed to receive directly in to sk_buffs which are
43 allocated at open() time. Eliminates copy on incoming frames
44 (small ones are still copied). Shared data now held in a
45 non-cached page, so we can run on 68060 in copyback mode.
46
47 TBD:
48 * look at deferring rx frames rather than discarding (as per tulip)
49 * handle tx ring full as per tulip
50 * performace test to tune rx_copybreak
51
52 Most of my modifications relate to the braindead big-endian
53 implementation by Intel. When the i596 is operating in
54 'big-endian' mode, it thinks a 32 bit value of 0x12345678
55 should be stored as 0x56781234. This is a real pain, when
56 you have linked lists which are shared by the 680x0 and the
57 i596.
58
59 Driver skeleton
60 Written 1993 by Donald Becker.
61 Copyright 1993 United States Government as represented by the Director,
62 National Security Agency. This software may only be used and distributed
63 according to the terms of the GNU General Public License as modified by SRC,
64 incorporated herein by reference.
65
66 The author may be reached as becker@scyld.com, or C/O
67 Scyld Computing Corporation, 410 Severn Ave., Suite 210, Annapolis MD 21403
68
69 */
70
71#include <linux/module.h>
72#include <linux/kernel.h>
73#include <linux/string.h>
74#include <linux/errno.h>
75#include <linux/ioport.h>
76#include <linux/slab.h>
77#include <linux/interrupt.h>
78#include <linux/delay.h>
79#include <linux/netdevice.h>
80#include <linux/etherdevice.h>
81#include <linux/skbuff.h>
82#include <linux/init.h>
83#include <linux/types.h>
84#include <linux/bitops.h>
85#include <linux/dma-mapping.h>
86#include <linux/io.h>
87#include <linux/irq.h>
88
89/* DEBUG flags
90 */
91
92#define DEB_INIT 0x0001
93#define DEB_PROBE 0x0002
94#define DEB_SERIOUS 0x0004
95#define DEB_ERRORS 0x0008
96#define DEB_MULTI 0x0010
97#define DEB_TDR 0x0020
98#define DEB_OPEN 0x0040
99#define DEB_RESET 0x0080
100#define DEB_ADDCMD 0x0100
101#define DEB_STATUS 0x0200
102#define DEB_STARTTX 0x0400
103#define DEB_RXADDR 0x0800
104#define DEB_TXADDR 0x1000
105#define DEB_RXFRAME 0x2000
106#define DEB_INTS 0x4000
107#define DEB_STRUCT 0x8000
108#define DEB_ANY 0xffff
109
110
111#define DEB(x, y) if (i596_debug & (x)) { y; }
112
113
114/*
115 * The MPU_PORT command allows direct access to the 82596. With PORT access
116 * the following commands are available (p5-18). The 32-bit port command
117 * must be word-swapped with the most significant word written first.
118 * This only applies to VME boards.
119 */
120#define PORT_RESET 0x00 /* reset 82596 */
121#define PORT_SELFTEST 0x01 /* selftest */
122#define PORT_ALTSCP 0x02 /* alternate SCB address */
123#define PORT_ALTDUMP 0x03 /* Alternate DUMP address */
124
125static int i596_debug = (DEB_SERIOUS|DEB_PROBE);
126
127/* Copy frames shorter than rx_copybreak, otherwise pass on up in
128 * a full sized sk_buff. Value of 100 stolen from tulip.c (!alpha).
129 */
130static int rx_copybreak = 100;
131
132#define PKT_BUF_SZ 1536
133#define MAX_MC_CNT 64
134
135#define ISCP_BUSY 0x0001
136
137#define I596_NULL ((u32)0xffffffff)
138
139#define CMD_EOL 0x8000 /* The last command of the list, stop. */
140#define CMD_SUSP 0x4000 /* Suspend after doing cmd. */
141#define CMD_INTR 0x2000 /* Interrupt after doing cmd. */
142
143#define CMD_FLEX 0x0008 /* Enable flexible memory model */
144
145enum commands {
146 CmdNOp = 0, CmdSASetup = 1, CmdConfigure = 2, CmdMulticastList = 3,
147 CmdTx = 4, CmdTDR = 5, CmdDump = 6, CmdDiagnose = 7
148};
149
150#define STAT_C 0x8000 /* Set to 0 after execution */
151#define STAT_B 0x4000 /* Command being executed */
152#define STAT_OK 0x2000 /* Command executed ok */
153#define STAT_A 0x1000 /* Command aborted */
154
155#define CUC_START 0x0100
156#define CUC_RESUME 0x0200
157#define CUC_SUSPEND 0x0300
158#define CUC_ABORT 0x0400
159#define RX_START 0x0010
160#define RX_RESUME 0x0020
161#define RX_SUSPEND 0x0030
162#define RX_ABORT 0x0040
163
164#define TX_TIMEOUT 5
165
166
167struct i596_reg {
168 unsigned short porthi;
169 unsigned short portlo;
170 u32 ca;
171};
172
173#define EOF 0x8000
174#define SIZE_MASK 0x3fff
175
176struct i596_tbd {
177 unsigned short size;
178 unsigned short pad;
179 dma_addr_t next;
180 dma_addr_t data;
181 u32 cache_pad[5]; /* Total 32 bytes... */
182};
183
184/* The command structure has two 'next' pointers; v_next is the address of
185 * the next command as seen by the CPU, b_next is the address of the next
186 * command as seen by the 82596. The b_next pointer, as used by the 82596
187 * always references the status field of the next command, rather than the
188 * v_next field, because the 82596 is unaware of v_next. It may seem more
189 * logical to put v_next at the end of the structure, but we cannot do that
190 * because the 82596 expects other fields to be there, depending on command
191 * type.
192 */
193
194struct i596_cmd {
195 struct i596_cmd *v_next; /* Address from CPUs viewpoint */
196 unsigned short status;
197 unsigned short command;
198 dma_addr_t b_next; /* Address from i596 viewpoint */
199};
200
201struct tx_cmd {
202 struct i596_cmd cmd;
203 dma_addr_t tbd;
204 unsigned short size;
205 unsigned short pad;
206 struct sk_buff *skb; /* So we can free it after tx */
207 dma_addr_t dma_addr;
208#ifdef __LP64__
209 u32 cache_pad[6]; /* Total 64 bytes... */
210#else
211 u32 cache_pad[1]; /* Total 32 bytes... */
212#endif
213};
214
215struct tdr_cmd {
216 struct i596_cmd cmd;
217 unsigned short status;
218 unsigned short pad;
219};
220
221struct mc_cmd {
222 struct i596_cmd cmd;
223 short mc_cnt;
224 char mc_addrs[MAX_MC_CNT*6];
225};
226
227struct sa_cmd {
228 struct i596_cmd cmd;
229 char eth_addr[8];
230};
231
232struct cf_cmd {
233 struct i596_cmd cmd;
234 char i596_config[16];
235};
236
237struct i596_rfd {
238 unsigned short stat;
239 unsigned short cmd;
240 dma_addr_t b_next; /* Address from i596 viewpoint */
241 dma_addr_t rbd;
242 unsigned short count;
243 unsigned short size;
244 struct i596_rfd *v_next; /* Address from CPUs viewpoint */
245 struct i596_rfd *v_prev;
246#ifndef __LP64__
247 u32 cache_pad[2]; /* Total 32 bytes... */
248#endif
249};
250
251struct i596_rbd {
252 /* hardware data */
253 unsigned short count;
254 unsigned short zero1;
255 dma_addr_t b_next;
256 dma_addr_t b_data; /* Address from i596 viewpoint */
257 unsigned short size;
258 unsigned short zero2;
259 /* driver data */
260 struct sk_buff *skb;
261 struct i596_rbd *v_next;
262 dma_addr_t b_addr; /* This rbd addr from i596 view */
263 unsigned char *v_data; /* Address from CPUs viewpoint */
264 /* Total 32 bytes... */
265#ifdef __LP64__
266 u32 cache_pad[4];
267#endif
268};
269
270/* These values as chosen so struct i596_dma fits in one page... */
271
272#define TX_RING_SIZE 32
273#define RX_RING_SIZE 16
274
275struct i596_scb {
276 unsigned short status;
277 unsigned short command;
278 dma_addr_t cmd;
279 dma_addr_t rfd;
280 u32 crc_err;
281 u32 align_err;
282 u32 resource_err;
283 u32 over_err;
284 u32 rcvdt_err;
285 u32 short_err;
286 unsigned short t_on;
287 unsigned short t_off;
288};
289
290struct i596_iscp {
291 u32 stat;
292 dma_addr_t scb;
293};
294
295struct i596_scp {
296 u32 sysbus;
297 u32 pad;
298 dma_addr_t iscp;
299};
300
301struct i596_dma {
302 struct i596_scp scp __attribute__((aligned(32)));
303 volatile struct i596_iscp iscp __attribute__((aligned(32)));
304 volatile struct i596_scb scb __attribute__((aligned(32)));
305 struct sa_cmd sa_cmd __attribute__((aligned(32)));
306 struct cf_cmd cf_cmd __attribute__((aligned(32)));
307 struct tdr_cmd tdr_cmd __attribute__((aligned(32)));
308 struct mc_cmd mc_cmd __attribute__((aligned(32)));
309 struct i596_rfd rfds[RX_RING_SIZE] __attribute__((aligned(32)));
310 struct i596_rbd rbds[RX_RING_SIZE] __attribute__((aligned(32)));
311 struct tx_cmd tx_cmds[TX_RING_SIZE] __attribute__((aligned(32)));
312 struct i596_tbd tbds[TX_RING_SIZE] __attribute__((aligned(32)));
313};
314
315struct i596_private {
316 struct i596_dma *dma;
317 u32 stat;
318 int last_restart;
319 struct i596_rfd *rfd_head;
320 struct i596_rbd *rbd_head;
321 struct i596_cmd *cmd_tail;
322 struct i596_cmd *cmd_head;
323 int cmd_backlog;
324 u32 last_cmd;
325 struct net_device_stats stats;
326 int next_tx_cmd;
327 int options;
328 spinlock_t lock; /* serialize access to chip */
329 dma_addr_t dma_addr;
330 void __iomem *mpu_port;
331 void __iomem *ca;
332};
333
334static const char init_setup[] =
335{
336 0x8E, /* length, prefetch on */
337 0xC8, /* fifo to 8, monitor off */
338 0x80, /* don't save bad frames */
339 0x2E, /* No source address insertion, 8 byte preamble */
340 0x00, /* priority and backoff defaults */
341 0x60, /* interframe spacing */
342 0x00, /* slot time LSB */
343 0xf2, /* slot time and retries */
344 0x00, /* promiscuous mode */
345 0x00, /* collision detect */
346 0x40, /* minimum frame length */
347 0xff,
348 0x00,
349 0x7f /* *multi IA */ };
350
351static int i596_open(struct net_device *dev);
352static int i596_start_xmit(struct sk_buff *skb, struct net_device *dev);
353static irqreturn_t i596_interrupt(int irq, void *dev_id);
354static int i596_close(struct net_device *dev);
355static struct net_device_stats *i596_get_stats(struct net_device *dev);
356static void i596_add_cmd(struct net_device *dev, struct i596_cmd *cmd);
357static void i596_tx_timeout (struct net_device *dev);
358static void print_eth(unsigned char *buf, char *str);
359static void set_multicast_list(struct net_device *dev);
360static inline void ca(struct net_device *dev);
361static void mpu_port(struct net_device *dev, int c, dma_addr_t x);
362
363static int rx_ring_size = RX_RING_SIZE;
364static int ticks_limit = 100;
365static int max_cmd_backlog = TX_RING_SIZE-1;
366
367#ifdef CONFIG_NET_POLL_CONTROLLER
368static void i596_poll_controller(struct net_device *dev);
369#endif
370
371
372static inline int wait_istat(struct net_device *dev, struct i596_dma *dma, int delcnt, char *str)
373{
374 DMA_INV(dev, &(dma->iscp), sizeof(struct i596_iscp));
375 while (--delcnt && dma->iscp.stat) {
376 udelay(10);
377 DMA_INV(dev, &(dma->iscp), sizeof(struct i596_iscp));
378 }
379 if (!delcnt) {
380 printk(KERN_ERR "%s: %s, iscp.stat %04x, didn't clear\n",
381 dev->name, str, SWAP16(dma->iscp.stat));
382 return -1;
383 } else
384 return 0;
385}
386
387
388static inline int wait_cmd(struct net_device *dev, struct i596_dma *dma, int delcnt, char *str)
389{
390 DMA_INV(dev, &(dma->scb), sizeof(struct i596_scb));
391 while (--delcnt && dma->scb.command) {
392 udelay(10);
393 DMA_INV(dev, &(dma->scb), sizeof(struct i596_scb));
394 }
395 if (!delcnt) {
396 printk(KERN_ERR "%s: %s, status %4.4x, cmd %4.4x.\n",
397 dev->name, str,
398 SWAP16(dma->scb.status),
399 SWAP16(dma->scb.command));
400 return -1;
401 } else
402 return 0;
403}
404
405
406static void i596_display_data(struct net_device *dev)
407{
408 struct i596_private *lp = netdev_priv(dev);
409 struct i596_dma *dma = lp->dma;
410 struct i596_cmd *cmd;
411 struct i596_rfd *rfd;
412 struct i596_rbd *rbd;
413
414 printk(KERN_DEBUG "lp and scp at %p, .sysbus = %08x, .iscp = %08x\n",
415 &dma->scp, dma->scp.sysbus, SWAP32(dma->scp.iscp));
416 printk(KERN_DEBUG "iscp at %p, iscp.stat = %08x, .scb = %08x\n",
417 &dma->iscp, SWAP32(dma->iscp.stat), SWAP32(dma->iscp.scb));
418 printk(KERN_DEBUG "scb at %p, scb.status = %04x, .command = %04x,"
419 " .cmd = %08x, .rfd = %08x\n",
420 &dma->scb, SWAP16(dma->scb.status), SWAP16(dma->scb.command),
421 SWAP16(dma->scb.cmd), SWAP32(dma->scb.rfd));
422 printk(KERN_DEBUG " errors: crc %x, align %x, resource %x,"
423 " over %x, rcvdt %x, short %x\n",
424 SWAP32(dma->scb.crc_err), SWAP32(dma->scb.align_err),
425 SWAP32(dma->scb.resource_err), SWAP32(dma->scb.over_err),
426 SWAP32(dma->scb.rcvdt_err), SWAP32(dma->scb.short_err));
427 cmd = lp->cmd_head;
428 while (cmd != NULL) {
429 printk(KERN_DEBUG
430 "cmd at %p, .status = %04x, .command = %04x,"
431 " .b_next = %08x\n",
432 cmd, SWAP16(cmd->status), SWAP16(cmd->command),
433 SWAP32(cmd->b_next));
434 cmd = cmd->v_next;
435 }
436 rfd = lp->rfd_head;
437 printk(KERN_DEBUG "rfd_head = %p\n", rfd);
438 do {
439 printk(KERN_DEBUG
440 " %p .stat %04x, .cmd %04x, b_next %08x, rbd %08x,"
441 " count %04x\n",
442 rfd, SWAP16(rfd->stat), SWAP16(rfd->cmd),
443 SWAP32(rfd->b_next), SWAP32(rfd->rbd),
444 SWAP16(rfd->count));
445 rfd = rfd->v_next;
446 } while (rfd != lp->rfd_head);
447 rbd = lp->rbd_head;
448 printk(KERN_DEBUG "rbd_head = %p\n", rbd);
449 do {
450 printk(KERN_DEBUG
451 " %p .count %04x, b_next %08x, b_data %08x,"
452 " size %04x\n",
453 rbd, SWAP16(rbd->count), SWAP32(rbd->b_next),
454 SWAP32(rbd->b_data), SWAP16(rbd->size));
455 rbd = rbd->v_next;
456 } while (rbd != lp->rbd_head);
457 DMA_INV(dev, dma, sizeof(struct i596_dma));
458}
459
460
461#define virt_to_dma(lp, v) ((lp)->dma_addr + (dma_addr_t)((unsigned long)(v)-(unsigned long)((lp)->dma)))
462
463static inline int init_rx_bufs(struct net_device *dev)
464{
465 struct i596_private *lp = netdev_priv(dev);
466 struct i596_dma *dma = lp->dma;
467 int i;
468 struct i596_rfd *rfd;
469 struct i596_rbd *rbd;
470
471 /* First build the Receive Buffer Descriptor List */
472
473 for (i = 0, rbd = dma->rbds; i < rx_ring_size; i++, rbd++) {
474 dma_addr_t dma_addr;
475 struct sk_buff *skb = netdev_alloc_skb(dev, PKT_BUF_SZ + 4);
476
477 if (skb == NULL)
478 return -1;
479 skb_reserve(skb, 2);
480 dma_addr = dma_map_single(dev->dev.parent, skb->data,
481 PKT_BUF_SZ, DMA_FROM_DEVICE);
482 rbd->v_next = rbd+1;
483 rbd->b_next = SWAP32(virt_to_dma(lp, rbd+1));
484 rbd->b_addr = SWAP32(virt_to_dma(lp, rbd));
485 rbd->skb = skb;
486 rbd->v_data = skb->data;
487 rbd->b_data = SWAP32(dma_addr);
488 rbd->size = SWAP16(PKT_BUF_SZ);
489 }
490 lp->rbd_head = dma->rbds;
491 rbd = dma->rbds + rx_ring_size - 1;
492 rbd->v_next = dma->rbds;
493 rbd->b_next = SWAP32(virt_to_dma(lp, dma->rbds));
494
495 /* Now build the Receive Frame Descriptor List */
496
497 for (i = 0, rfd = dma->rfds; i < rx_ring_size; i++, rfd++) {
498 rfd->rbd = I596_NULL;
499 rfd->v_next = rfd+1;
500 rfd->v_prev = rfd-1;
501 rfd->b_next = SWAP32(virt_to_dma(lp, rfd+1));
502 rfd->cmd = SWAP16(CMD_FLEX);
503 }
504 lp->rfd_head = dma->rfds;
505 dma->scb.rfd = SWAP32(virt_to_dma(lp, dma->rfds));
506 rfd = dma->rfds;
507 rfd->rbd = SWAP32(virt_to_dma(lp, lp->rbd_head));
508 rfd->v_prev = dma->rfds + rx_ring_size - 1;
509 rfd = dma->rfds + rx_ring_size - 1;
510 rfd->v_next = dma->rfds;
511 rfd->b_next = SWAP32(virt_to_dma(lp, dma->rfds));
512 rfd->cmd = SWAP16(CMD_EOL|CMD_FLEX);
513
514 DMA_WBACK_INV(dev, dma, sizeof(struct i596_dma));
515 return 0;
516}
517
518static inline void remove_rx_bufs(struct net_device *dev)
519{
520 struct i596_private *lp = netdev_priv(dev);
521 struct i596_rbd *rbd;
522 int i;
523
524 for (i = 0, rbd = lp->dma->rbds; i < rx_ring_size; i++, rbd++) {
525 if (rbd->skb == NULL)
526 break;
527 dma_unmap_single(dev->dev.parent,
528 (dma_addr_t)SWAP32(rbd->b_data),
529 PKT_BUF_SZ, DMA_FROM_DEVICE);
530 dev_kfree_skb(rbd->skb);
531 }
532}
533
534
535static void rebuild_rx_bufs(struct net_device *dev)
536{
537 struct i596_private *lp = netdev_priv(dev);
538 struct i596_dma *dma = lp->dma;
539 int i;
540
541 /* Ensure rx frame/buffer descriptors are tidy */
542
543 for (i = 0; i < rx_ring_size; i++) {
544 dma->rfds[i].rbd = I596_NULL;
545 dma->rfds[i].cmd = SWAP16(CMD_FLEX);
546 }
547 dma->rfds[rx_ring_size-1].cmd = SWAP16(CMD_EOL|CMD_FLEX);
548 lp->rfd_head = dma->rfds;
549 dma->scb.rfd = SWAP32(virt_to_dma(lp, dma->rfds));
550 lp->rbd_head = dma->rbds;
551 dma->rfds[0].rbd = SWAP32(virt_to_dma(lp, dma->rbds));
552
553 DMA_WBACK_INV(dev, dma, sizeof(struct i596_dma));
554}
555
556
557static int init_i596_mem(struct net_device *dev)
558{
559 struct i596_private *lp = netdev_priv(dev);
560 struct i596_dma *dma = lp->dma;
561 unsigned long flags;
562
563 mpu_port(dev, PORT_RESET, 0);
564 udelay(100); /* Wait 100us - seems to help */
565
566 /* change the scp address */
567
568 lp->last_cmd = jiffies;
569
570 dma->scp.sysbus = SYSBUS;
571 dma->scp.iscp = SWAP32(virt_to_dma(lp, &(dma->iscp)));
572 dma->iscp.scb = SWAP32(virt_to_dma(lp, &(dma->scb)));
573 dma->iscp.stat = SWAP32(ISCP_BUSY);
574 lp->cmd_backlog = 0;
575
576 lp->cmd_head = NULL;
577 dma->scb.cmd = I596_NULL;
578
579 DEB(DEB_INIT, printk(KERN_DEBUG "%s: starting i82596.\n", dev->name));
580
581 DMA_WBACK(dev, &(dma->scp), sizeof(struct i596_scp));
582 DMA_WBACK(dev, &(dma->iscp), sizeof(struct i596_iscp));
583 DMA_WBACK(dev, &(dma->scb), sizeof(struct i596_scb));
584
585 mpu_port(dev, PORT_ALTSCP, virt_to_dma(lp, &dma->scp));
586 ca(dev);
587 if (wait_istat(dev, dma, 1000, "initialization timed out"))
588 goto failed;
589 DEB(DEB_INIT, printk(KERN_DEBUG
590 "%s: i82596 initialization successful\n",
591 dev->name));
592
593 if (request_irq(dev->irq, &i596_interrupt, 0, "i82596", dev)) {
594 printk(KERN_ERR "%s: IRQ %d not free\n", dev->name, dev->irq);
595 goto failed;
596 }
597
598 /* Ensure rx frame/buffer descriptors are tidy */
599 rebuild_rx_bufs(dev);
600
601 dma->scb.command = 0;
602 DMA_WBACK(dev, &(dma->scb), sizeof(struct i596_scb));
603
604 DEB(DEB_INIT, printk(KERN_DEBUG
605 "%s: queuing CmdConfigure\n", dev->name));
606 memcpy(dma->cf_cmd.i596_config, init_setup, 14);
607 dma->cf_cmd.cmd.command = SWAP16(CmdConfigure);
608 DMA_WBACK(dev, &(dma->cf_cmd), sizeof(struct cf_cmd));
609 i596_add_cmd(dev, &dma->cf_cmd.cmd);
610
611 DEB(DEB_INIT, printk(KERN_DEBUG "%s: queuing CmdSASetup\n", dev->name));
612 memcpy(dma->sa_cmd.eth_addr, dev->dev_addr, 6);
613 dma->sa_cmd.cmd.command = SWAP16(CmdSASetup);
614 DMA_WBACK(dev, &(dma->sa_cmd), sizeof(struct sa_cmd));
615 i596_add_cmd(dev, &dma->sa_cmd.cmd);
616
617 DEB(DEB_INIT, printk(KERN_DEBUG "%s: queuing CmdTDR\n", dev->name));
618 dma->tdr_cmd.cmd.command = SWAP16(CmdTDR);
619 DMA_WBACK(dev, &(dma->tdr_cmd), sizeof(struct tdr_cmd));
620 i596_add_cmd(dev, &dma->tdr_cmd.cmd);
621
622 spin_lock_irqsave (&lp->lock, flags);
623
624 if (wait_cmd(dev, dma, 1000, "timed out waiting to issue RX_START")) {
625 spin_unlock_irqrestore (&lp->lock, flags);
626 goto failed_free_irq;
627 }
628 DEB(DEB_INIT, printk(KERN_DEBUG "%s: Issuing RX_START\n", dev->name));
629 dma->scb.command = SWAP16(RX_START);
630 dma->scb.rfd = SWAP32(virt_to_dma(lp, dma->rfds));
631 DMA_WBACK(dev, &(dma->scb), sizeof(struct i596_scb));
632
633 ca(dev);
634
635 spin_unlock_irqrestore (&lp->lock, flags);
636 if (wait_cmd(dev, dma, 1000, "RX_START not processed"))
637 goto failed_free_irq;
638 DEB(DEB_INIT, printk(KERN_DEBUG
639 "%s: Receive unit started OK\n", dev->name));
640 return 0;
641
642failed_free_irq:
643 free_irq(dev->irq, dev);
644failed:
645 printk(KERN_ERR "%s: Failed to initialise 82596\n", dev->name);
646 mpu_port(dev, PORT_RESET, 0);
647 return -1;
648}
649
650
651static inline int i596_rx(struct net_device *dev)
652{
653 struct i596_private *lp = netdev_priv(dev);
654 struct i596_rfd *rfd;
655 struct i596_rbd *rbd;
656 int frames = 0;
657
658 DEB(DEB_RXFRAME, printk(KERN_DEBUG
659 "i596_rx(), rfd_head %p, rbd_head %p\n",
660 lp->rfd_head, lp->rbd_head));
661
662
663 rfd = lp->rfd_head; /* Ref next frame to check */
664
665 DMA_INV(dev, rfd, sizeof(struct i596_rfd));
666 while (rfd->stat & SWAP16(STAT_C)) { /* Loop while complete frames */
667 if (rfd->rbd == I596_NULL)
668 rbd = NULL;
669 else if (rfd->rbd == lp->rbd_head->b_addr) {
670 rbd = lp->rbd_head;
671 DMA_INV(dev, rbd, sizeof(struct i596_rbd));
672 } else {
673 printk(KERN_ERR "%s: rbd chain broken!\n", dev->name);
674 /* XXX Now what? */
675 rbd = NULL;
676 }
677 DEB(DEB_RXFRAME, printk(KERN_DEBUG
678 " rfd %p, rfd.rbd %08x, rfd.stat %04x\n",
679 rfd, rfd->rbd, rfd->stat));
680
681 if (rbd != NULL && (rfd->stat & SWAP16(STAT_OK))) {
682 /* a good frame */
683 int pkt_len = SWAP16(rbd->count) & 0x3fff;
684 struct sk_buff *skb = rbd->skb;
685 int rx_in_place = 0;
686
687 DEB(DEB_RXADDR, print_eth(rbd->v_data, "received"));
688 frames++;
689
690 /* Check if the packet is long enough to just accept
691 * without copying to a properly sized skbuff.
692 */
693
694 if (pkt_len > rx_copybreak) {
695 struct sk_buff *newskb;
696 dma_addr_t dma_addr;
697
698 dma_unmap_single(dev->dev.parent,
699 (dma_addr_t)SWAP32(rbd->b_data),
700 PKT_BUF_SZ, DMA_FROM_DEVICE);
701 /* Get fresh skbuff to replace filled one. */
702 newskb = netdev_alloc_skb(dev, PKT_BUF_SZ + 4);
703 if (newskb == NULL) {
704 skb = NULL; /* drop pkt */
705 goto memory_squeeze;
706 }
707 skb_reserve(newskb, 2);
708
709 /* Pass up the skb already on the Rx ring. */
710 skb_put(skb, pkt_len);
711 rx_in_place = 1;
712 rbd->skb = newskb;
713 dma_addr = dma_map_single(dev->dev.parent,
714 newskb->data,
715 PKT_BUF_SZ,
716 DMA_FROM_DEVICE);
717 rbd->v_data = newskb->data;
718 rbd->b_data = SWAP32(dma_addr);
719 DMA_WBACK_INV(dev, rbd, sizeof(struct i596_rbd));
720 } else
721 skb = netdev_alloc_skb(dev, pkt_len + 2);
722memory_squeeze:
723 if (skb == NULL) {
724 /* XXX tulip.c can defer packets here!! */
725 printk(KERN_ERR
726 "%s: i596_rx Memory squeeze, dropping packet.\n",
727 dev->name);
728 lp->stats.rx_dropped++;
729 } else {
730 if (!rx_in_place) {
731 /* 16 byte align the data fields */
732 dma_sync_single_for_cpu(dev->dev.parent,
733 (dma_addr_t)SWAP32(rbd->b_data),
734 PKT_BUF_SZ, DMA_FROM_DEVICE);
735 skb_reserve(skb, 2);
736 memcpy(skb_put(skb, pkt_len), rbd->v_data, pkt_len);
737 dma_sync_single_for_device(dev->dev.parent,
738 (dma_addr_t)SWAP32(rbd->b_data),
739 PKT_BUF_SZ, DMA_FROM_DEVICE);
740 }
741 skb->len = pkt_len;
742 skb->protocol = eth_type_trans(skb, dev);
743 netif_rx(skb);
744 dev->last_rx = jiffies;
745 lp->stats.rx_packets++;
746 lp->stats.rx_bytes += pkt_len;
747 }
748 } else {
749 DEB(DEB_ERRORS, printk(KERN_DEBUG
750 "%s: Error, rfd.stat = 0x%04x\n",
751 dev->name, rfd->stat));
752 lp->stats.rx_errors++;
753 if (rfd->stat & SWAP16(0x0100))
754 lp->stats.collisions++;
755 if (rfd->stat & SWAP16(0x8000))
756 lp->stats.rx_length_errors++;
757 if (rfd->stat & SWAP16(0x0001))
758 lp->stats.rx_over_errors++;
759 if (rfd->stat & SWAP16(0x0002))
760 lp->stats.rx_fifo_errors++;
761 if (rfd->stat & SWAP16(0x0004))
762 lp->stats.rx_frame_errors++;
763 if (rfd->stat & SWAP16(0x0008))
764 lp->stats.rx_crc_errors++;
765 if (rfd->stat & SWAP16(0x0010))
766 lp->stats.rx_length_errors++;
767 }
768
769 /* Clear the buffer descriptor count and EOF + F flags */
770
771 if (rbd != NULL && (rbd->count & SWAP16(0x4000))) {
772 rbd->count = 0;
773 lp->rbd_head = rbd->v_next;
774 DMA_WBACK_INV(dev, rbd, sizeof(struct i596_rbd));
775 }
776
777 /* Tidy the frame descriptor, marking it as end of list */
778
779 rfd->rbd = I596_NULL;
780 rfd->stat = 0;
781 rfd->cmd = SWAP16(CMD_EOL|CMD_FLEX);
782 rfd->count = 0;
783
784 /* Update record of next frame descriptor to process */
785
786 lp->dma->scb.rfd = rfd->b_next;
787 lp->rfd_head = rfd->v_next;
788 DMA_WBACK_INV(dev, rfd, sizeof(struct i596_rfd));
789
790 /* Remove end-of-list from old end descriptor */
791
792 rfd->v_prev->cmd = SWAP16(CMD_FLEX);
793 DMA_WBACK_INV(dev, rfd->v_prev, sizeof(struct i596_rfd));
794 rfd = lp->rfd_head;
795 DMA_INV(dev, rfd, sizeof(struct i596_rfd));
796 }
797
798 DEB(DEB_RXFRAME, printk(KERN_DEBUG "frames %d\n", frames));
799
800 return 0;
801}
802
803
804static inline void i596_cleanup_cmd(struct net_device *dev, struct i596_private *lp)
805{
806 struct i596_cmd *ptr;
807
808 while (lp->cmd_head != NULL) {
809 ptr = lp->cmd_head;
810 lp->cmd_head = ptr->v_next;
811 lp->cmd_backlog--;
812
813 switch (SWAP16(ptr->command) & 0x7) {
814 case CmdTx:
815 {
816 struct tx_cmd *tx_cmd = (struct tx_cmd *) ptr;
817 struct sk_buff *skb = tx_cmd->skb;
818 dma_unmap_single(dev->dev.parent,
819 tx_cmd->dma_addr,
820 skb->len, DMA_TO_DEVICE);
821
822 dev_kfree_skb(skb);
823
824 lp->stats.tx_errors++;
825 lp->stats.tx_aborted_errors++;
826
827 ptr->v_next = NULL;
828 ptr->b_next = I596_NULL;
829 tx_cmd->cmd.command = 0; /* Mark as free */
830 break;
831 }
832 default:
833 ptr->v_next = NULL;
834 ptr->b_next = I596_NULL;
835 }
836 DMA_WBACK_INV(dev, ptr, sizeof(struct i596_cmd));
837 }
838
839 wait_cmd(dev, lp->dma, 100, "i596_cleanup_cmd timed out");
840 lp->dma->scb.cmd = I596_NULL;
841 DMA_WBACK(dev, &(lp->dma->scb), sizeof(struct i596_scb));
842}
843
844
845static inline void i596_reset(struct net_device *dev, struct i596_private *lp)
846{
847 unsigned long flags;
848
849 DEB(DEB_RESET, printk(KERN_DEBUG "i596_reset\n"));
850
851 spin_lock_irqsave (&lp->lock, flags);
852
853 wait_cmd(dev, lp->dma, 100, "i596_reset timed out");
854
855 netif_stop_queue(dev);
856
857 /* FIXME: this command might cause an lpmc */
858 lp->dma->scb.command = SWAP16(CUC_ABORT | RX_ABORT);
859 DMA_WBACK(dev, &(lp->dma->scb), sizeof(struct i596_scb));
860 ca(dev);
861
862 /* wait for shutdown */
863 wait_cmd(dev, lp->dma, 1000, "i596_reset 2 timed out");
864 spin_unlock_irqrestore (&lp->lock, flags);
865
866 i596_cleanup_cmd(dev, lp);
867 i596_rx(dev);
868
869 netif_start_queue(dev);
870 init_i596_mem(dev);
871}
872
873
874static void i596_add_cmd(struct net_device *dev, struct i596_cmd *cmd)
875{
876 struct i596_private *lp = netdev_priv(dev);
877 struct i596_dma *dma = lp->dma;
878 unsigned long flags;
879
880 DEB(DEB_ADDCMD, printk(KERN_DEBUG "i596_add_cmd cmd_head %p\n",
881 lp->cmd_head));
882
883 cmd->status = 0;
884 cmd->command |= SWAP16(CMD_EOL | CMD_INTR);
885 cmd->v_next = NULL;
886 cmd->b_next = I596_NULL;
887 DMA_WBACK(dev, cmd, sizeof(struct i596_cmd));
888
889 spin_lock_irqsave (&lp->lock, flags);
890
891 if (lp->cmd_head != NULL) {
892 lp->cmd_tail->v_next = cmd;
893 lp->cmd_tail->b_next = SWAP32(virt_to_dma(lp, &cmd->status));
894 DMA_WBACK(dev, lp->cmd_tail, sizeof(struct i596_cmd));
895 } else {
896 lp->cmd_head = cmd;
897 wait_cmd(dev, dma, 100, "i596_add_cmd timed out");
898 dma->scb.cmd = SWAP32(virt_to_dma(lp, &cmd->status));
899 dma->scb.command = SWAP16(CUC_START);
900 DMA_WBACK(dev, &(dma->scb), sizeof(struct i596_scb));
901 ca(dev);
902 }
903 lp->cmd_tail = cmd;
904 lp->cmd_backlog++;
905
906 spin_unlock_irqrestore (&lp->lock, flags);
907
908 if (lp->cmd_backlog > max_cmd_backlog) {
909 unsigned long tickssofar = jiffies - lp->last_cmd;
910
911 if (tickssofar < ticks_limit)
912 return;
913
914 printk(KERN_ERR
915 "%s: command unit timed out, status resetting.\n",
916 dev->name);
917#if 1
918 i596_reset(dev, lp);
919#endif
920 }
921}
922
923static int i596_open(struct net_device *dev)
924{
925 DEB(DEB_OPEN, printk(KERN_DEBUG
926 "%s: i596_open() irq %d.\n", dev->name, dev->irq));
927
928 if (init_rx_bufs(dev)) {
929 printk(KERN_ERR "%s: Failed to init rx bufs\n", dev->name);
930 return -EAGAIN;
931 }
932 if (init_i596_mem(dev)) {
933 printk(KERN_ERR "%s: Failed to init memory\n", dev->name);
934 goto out_remove_rx_bufs;
935 }
936 netif_start_queue(dev);
937
938 return 0;
939
940out_remove_rx_bufs:
941 remove_rx_bufs(dev);
942 return -EAGAIN;
943}
944
945static void i596_tx_timeout (struct net_device *dev)
946{
947 struct i596_private *lp = netdev_priv(dev);
948
949 /* Transmitter timeout, serious problems. */
950 DEB(DEB_ERRORS, printk(KERN_DEBUG
951 "%s: transmit timed out, status resetting.\n",
952 dev->name));
953
954 lp->stats.tx_errors++;
955
956 /* Try to restart the adaptor */
957 if (lp->last_restart == lp->stats.tx_packets) {
958 DEB(DEB_ERRORS, printk(KERN_DEBUG "Resetting board.\n"));
959 /* Shutdown and restart */
960 i596_reset (dev, lp);
961 } else {
962 /* Issue a channel attention signal */
963 DEB(DEB_ERRORS, printk(KERN_DEBUG "Kicking board.\n"));
964 lp->dma->scb.command = SWAP16(CUC_START | RX_START);
965 DMA_WBACK_INV(dev, &(lp->dma->scb), sizeof(struct i596_scb));
966 ca (dev);
967 lp->last_restart = lp->stats.tx_packets;
968 }
969
970 dev->trans_start = jiffies;
971 netif_wake_queue (dev);
972}
973
974
975static int i596_start_xmit(struct sk_buff *skb, struct net_device *dev)
976{
977 struct i596_private *lp = netdev_priv(dev);
978 struct tx_cmd *tx_cmd;
979 struct i596_tbd *tbd;
980 short length = skb->len;
981 dev->trans_start = jiffies;
982
983 DEB(DEB_STARTTX, printk(KERN_DEBUG
984 "%s: i596_start_xmit(%x,%p) called\n",
985 dev->name, skb->len, skb->data));
986
987 if (length < ETH_ZLEN) {
988 if (skb_padto(skb, ETH_ZLEN))
989 return 0;
990 length = ETH_ZLEN;
991 }
992
993 netif_stop_queue(dev);
994
995 tx_cmd = lp->dma->tx_cmds + lp->next_tx_cmd;
996 tbd = lp->dma->tbds + lp->next_tx_cmd;
997
998 if (tx_cmd->cmd.command) {
999 DEB(DEB_ERRORS, printk(KERN_DEBUG
1000 "%s: xmit ring full, dropping packet.\n",
1001 dev->name));
1002 lp->stats.tx_dropped++;
1003
1004 dev_kfree_skb(skb);
1005 } else {
1006 if (++lp->next_tx_cmd == TX_RING_SIZE)
1007 lp->next_tx_cmd = 0;
1008 tx_cmd->tbd = SWAP32(virt_to_dma(lp, tbd));
1009 tbd->next = I596_NULL;
1010
1011 tx_cmd->cmd.command = SWAP16(CMD_FLEX | CmdTx);
1012 tx_cmd->skb = skb;
1013
1014 tx_cmd->pad = 0;
1015 tx_cmd->size = 0;
1016 tbd->pad = 0;
1017 tbd->size = SWAP16(EOF | length);
1018
1019 tx_cmd->dma_addr = dma_map_single(dev->dev.parent, skb->data,
1020 skb->len, DMA_TO_DEVICE);
1021 tbd->data = SWAP32(tx_cmd->dma_addr);
1022
1023 DEB(DEB_TXADDR, print_eth(skb->data, "tx-queued"));
1024 DMA_WBACK_INV(dev, tx_cmd, sizeof(struct tx_cmd));
1025 DMA_WBACK_INV(dev, tbd, sizeof(struct i596_tbd));
1026 i596_add_cmd(dev, &tx_cmd->cmd);
1027
1028 lp->stats.tx_packets++;
1029 lp->stats.tx_bytes += length;
1030 }
1031
1032 netif_start_queue(dev);
1033
1034 return 0;
1035}
1036
1037static void print_eth(unsigned char *add, char *str)
1038{
1039 int i;
1040
1041 printk(KERN_DEBUG "i596 0x%p, ", add);
1042 for (i = 0; i < 6; i++)
1043 printk(" %02X", add[i + 6]);
1044 printk(" -->");
1045 for (i = 0; i < 6; i++)
1046 printk(" %02X", add[i]);
1047 printk(" %02X%02X, %s\n", add[12], add[13], str);
1048}
1049
1050static int __devinit i82596_probe(struct net_device *dev)
1051{
1052 int i;
1053 struct i596_private *lp = netdev_priv(dev);
1054 struct i596_dma *dma;
1055
1056 /* This lot is ensure things have been cache line aligned. */
1057 BUILD_BUG_ON(sizeof(struct i596_rfd) != 32);
1058 BUILD_BUG_ON(sizeof(struct i596_rbd) & 31);
1059 BUILD_BUG_ON(sizeof(struct tx_cmd) & 31);
1060 BUILD_BUG_ON(sizeof(struct i596_tbd) != 32);
1061#ifndef __LP64__
1062 BUILD_BUG_ON(sizeof(struct i596_dma) > 4096);
1063#endif
1064
1065 if (!dev->base_addr || !dev->irq)
1066 return -ENODEV;
1067
1068 dma = (struct i596_dma *) DMA_ALLOC(dev->dev.parent,
1069 sizeof(struct i596_dma), &lp->dma_addr, GFP_KERNEL);
1070 if (!dma) {
1071 printk(KERN_ERR "%s: Couldn't get shared memory\n", __FILE__);
1072 return -ENOMEM;
1073 }
1074
1075 /* The 82596-specific entries in the device structure. */
1076 dev->open = i596_open;
1077 dev->stop = i596_close;
1078 dev->hard_start_xmit = i596_start_xmit;
1079 dev->get_stats = i596_get_stats;
1080 dev->set_multicast_list = set_multicast_list;
1081 dev->tx_timeout = i596_tx_timeout;
1082 dev->watchdog_timeo = TX_TIMEOUT;
1083#ifdef CONFIG_NET_POLL_CONTROLLER
1084 dev->poll_controller = i596_poll_controller;
1085#endif
1086
1087 memset(dma, 0, sizeof(struct i596_dma));
1088 lp->dma = dma;
1089
1090 dma->scb.command = 0;
1091 dma->scb.cmd = I596_NULL;
1092 dma->scb.rfd = I596_NULL;
1093 spin_lock_init(&lp->lock);
1094
1095 DMA_WBACK_INV(dev, dma, sizeof(struct i596_dma));
1096
1097 i = register_netdev(dev);
1098 if (i) {
1099 DMA_FREE(dev->dev.parent, sizeof(struct i596_dma),
1100 (void *)dma, lp->dma_addr);
1101 return i;
1102 };
1103
1104 DEB(DEB_PROBE, printk(KERN_INFO "%s: 82596 at %#3lx,",
1105 dev->name, dev->base_addr));
1106 for (i = 0; i < 6; i++)
1107 DEB(DEB_PROBE, printk(" %2.2X", dev->dev_addr[i]));
1108 DEB(DEB_PROBE, printk(" IRQ %d.\n", dev->irq));
1109 DEB(DEB_INIT, printk(KERN_INFO
1110 "%s: dma at 0x%p (%d bytes), lp->scb at 0x%p\n",
1111 dev->name, dma, (int)sizeof(struct i596_dma),
1112 &dma->scb));
1113
1114 return 0;
1115}
1116
1117#ifdef CONFIG_NET_POLL_CONTROLLER
1118static void i596_poll_controller(struct net_device *dev)
1119{
1120 disable_irq(dev->irq);
1121 i596_interrupt(dev->irq, dev);
1122 enable_irq(dev->irq);
1123}
1124#endif
1125
1126static irqreturn_t i596_interrupt(int irq, void *dev_id)
1127{
1128 struct net_device *dev = dev_id;
1129 struct i596_private *lp;
1130 struct i596_dma *dma;
1131 unsigned short status, ack_cmd = 0;
1132
1133 if (dev == NULL) {
1134 printk(KERN_WARNING "%s: irq %d for unknown device.\n",
1135 __FUNCTION__, irq);
1136 return IRQ_NONE;
1137 }
1138
1139 lp = netdev_priv(dev);
1140 dma = lp->dma;
1141
1142 spin_lock (&lp->lock);
1143
1144 wait_cmd(dev, dma, 100, "i596 interrupt, timeout");
1145 status = SWAP16(dma->scb.status);
1146
1147 DEB(DEB_INTS, printk(KERN_DEBUG
1148 "%s: i596 interrupt, IRQ %d, status %4.4x.\n",
1149 dev->name, irq, status));
1150
1151 ack_cmd = status & 0xf000;
1152
1153 if (!ack_cmd) {
1154 DEB(DEB_ERRORS, printk(KERN_DEBUG
1155 "%s: interrupt with no events\n",
1156 dev->name));
1157 spin_unlock (&lp->lock);
1158 return IRQ_NONE;
1159 }
1160
1161 if ((status & 0x8000) || (status & 0x2000)) {
1162 struct i596_cmd *ptr;
1163
1164 if ((status & 0x8000))
1165 DEB(DEB_INTS,
1166 printk(KERN_DEBUG
1167 "%s: i596 interrupt completed command.\n",
1168 dev->name));
1169 if ((status & 0x2000))
1170 DEB(DEB_INTS,
1171 printk(KERN_DEBUG
1172 "%s: i596 interrupt command unit inactive %x.\n",
1173 dev->name, status & 0x0700));
1174
1175 while (lp->cmd_head != NULL) {
1176 DMA_INV(dev, lp->cmd_head, sizeof(struct i596_cmd));
1177 if (!(lp->cmd_head->status & SWAP16(STAT_C)))
1178 break;
1179
1180 ptr = lp->cmd_head;
1181
1182 DEB(DEB_STATUS,
1183 printk(KERN_DEBUG
1184 "cmd_head->status = %04x, ->command = %04x\n",
1185 SWAP16(lp->cmd_head->status),
1186 SWAP16(lp->cmd_head->command)));
1187 lp->cmd_head = ptr->v_next;
1188 lp->cmd_backlog--;
1189
1190 switch (SWAP16(ptr->command) & 0x7) {
1191 case CmdTx:
1192 {
1193 struct tx_cmd *tx_cmd = (struct tx_cmd *) ptr;
1194 struct sk_buff *skb = tx_cmd->skb;
1195
1196 if (ptr->status & SWAP16(STAT_OK)) {
1197 DEB(DEB_TXADDR,
1198 print_eth(skb->data, "tx-done"));
1199 } else {
1200 lp->stats.tx_errors++;
1201 if (ptr->status & SWAP16(0x0020))
1202 lp->stats.collisions++;
1203 if (!(ptr->status & SWAP16(0x0040)))
1204 lp->stats.tx_heartbeat_errors++;
1205 if (ptr->status & SWAP16(0x0400))
1206 lp->stats.tx_carrier_errors++;
1207 if (ptr->status & SWAP16(0x0800))
1208 lp->stats.collisions++;
1209 if (ptr->status & SWAP16(0x1000))
1210 lp->stats.tx_aborted_errors++;
1211 }
1212 dma_unmap_single(dev->dev.parent,
1213 tx_cmd->dma_addr,
1214 skb->len, DMA_TO_DEVICE);
1215 dev_kfree_skb_irq(skb);
1216
1217 tx_cmd->cmd.command = 0; /* Mark free */
1218 break;
1219 }
1220 case CmdTDR:
1221 {
1222 unsigned short status = SWAP16(((struct tdr_cmd *)ptr)->status);
1223
1224 if (status & 0x8000) {
1225 DEB(DEB_ANY,
1226 printk(KERN_DEBUG "%s: link ok.\n",
1227 dev->name));
1228 } else {
1229 if (status & 0x4000)
1230 printk(KERN_ERR
1231 "%s: Transceiver problem.\n",
1232 dev->name);
1233 if (status & 0x2000)
1234 printk(KERN_ERR
1235 "%s: Termination problem.\n",
1236 dev->name);
1237 if (status & 0x1000)
1238 printk(KERN_ERR
1239 "%s: Short circuit.\n",
1240 dev->name);
1241
1242 DEB(DEB_TDR,
1243 printk(KERN_DEBUG "%s: Time %d.\n",
1244 dev->name, status & 0x07ff));
1245 }
1246 break;
1247 }
1248 case CmdConfigure:
1249 /*
1250 * Zap command so set_multicast_list() know
1251 * it is free
1252 */
1253 ptr->command = 0;
1254 break;
1255 }
1256 ptr->v_next = NULL;
1257 ptr->b_next = I596_NULL;
1258 DMA_WBACK(dev, ptr, sizeof(struct i596_cmd));
1259 lp->last_cmd = jiffies;
1260 }
1261
1262 /* This mess is arranging that only the last of any outstanding
1263 * commands has the interrupt bit set. Should probably really
1264 * only add to the cmd queue when the CU is stopped.
1265 */
1266 ptr = lp->cmd_head;
1267 while ((ptr != NULL) && (ptr != lp->cmd_tail)) {
1268 struct i596_cmd *prev = ptr;
1269
1270 ptr->command &= SWAP16(0x1fff);
1271 ptr = ptr->v_next;
1272 DMA_WBACK_INV(dev, prev, sizeof(struct i596_cmd));
1273 }
1274
1275 if (lp->cmd_head != NULL)
1276 ack_cmd |= CUC_START;
1277 dma->scb.cmd = SWAP32(virt_to_dma(lp, &lp->cmd_head->status));
1278 DMA_WBACK_INV(dev, &dma->scb, sizeof(struct i596_scb));
1279 }
1280 if ((status & 0x1000) || (status & 0x4000)) {
1281 if ((status & 0x4000))
1282 DEB(DEB_INTS,
1283 printk(KERN_DEBUG
1284 "%s: i596 interrupt received a frame.\n",
1285 dev->name));
1286 i596_rx(dev);
1287 /* Only RX_START if stopped - RGH 07-07-96 */
1288 if (status & 0x1000) {
1289 if (netif_running(dev)) {
1290 DEB(DEB_ERRORS,
1291 printk(KERN_DEBUG
1292 "%s: i596 interrupt receive unit inactive, status 0x%x\n",
1293 dev->name, status));
1294 ack_cmd |= RX_START;
1295 lp->stats.rx_errors++;
1296 lp->stats.rx_fifo_errors++;
1297 rebuild_rx_bufs(dev);
1298 }
1299 }
1300 }
1301 wait_cmd(dev, dma, 100, "i596 interrupt, timeout");
1302 dma->scb.command = SWAP16(ack_cmd);
1303 DMA_WBACK(dev, &dma->scb, sizeof(struct i596_scb));
1304
1305 /* DANGER: I suspect that some kind of interrupt
1306 acknowledgement aside from acking the 82596 might be needed
1307 here... but it's running acceptably without */
1308
1309 ca(dev);
1310
1311 wait_cmd(dev, dma, 100, "i596 interrupt, exit timeout");
1312 DEB(DEB_INTS, printk(KERN_DEBUG "%s: exiting interrupt.\n", dev->name));
1313
1314 spin_unlock (&lp->lock);
1315 return IRQ_HANDLED;
1316}
1317
1318static int i596_close(struct net_device *dev)
1319{
1320 struct i596_private *lp = netdev_priv(dev);
1321 unsigned long flags;
1322
1323 netif_stop_queue(dev);
1324
1325 DEB(DEB_INIT,
1326 printk(KERN_DEBUG
1327 "%s: Shutting down ethercard, status was %4.4x.\n",
1328 dev->name, SWAP16(lp->dma->scb.status)));
1329
1330 spin_lock_irqsave(&lp->lock, flags);
1331
1332 wait_cmd(dev, lp->dma, 100, "close1 timed out");
1333 lp->dma->scb.command = SWAP16(CUC_ABORT | RX_ABORT);
1334 DMA_WBACK(dev, &lp->dma->scb, sizeof(struct i596_scb));
1335
1336 ca(dev);
1337
1338 wait_cmd(dev, lp->dma, 100, "close2 timed out");
1339 spin_unlock_irqrestore(&lp->lock, flags);
1340 DEB(DEB_STRUCT, i596_display_data(dev));
1341 i596_cleanup_cmd(dev, lp);
1342
1343 free_irq(dev->irq, dev);
1344 remove_rx_bufs(dev);
1345
1346 return 0;
1347}
1348
1349static struct net_device_stats *i596_get_stats(struct net_device *dev)
1350{
1351 struct i596_private *lp = netdev_priv(dev);
1352
1353 return &lp->stats;
1354}
1355
1356/*
1357 * Set or clear the multicast filter for this adaptor.
1358 */
1359
1360static void set_multicast_list(struct net_device *dev)
1361{
1362 struct i596_private *lp = netdev_priv(dev);
1363 struct i596_dma *dma = lp->dma;
1364 int config = 0, cnt;
1365
1366 DEB(DEB_MULTI,
1367 printk(KERN_DEBUG
1368 "%s: set multicast list, %d entries, promisc %s, allmulti %s\n",
1369 dev->name, dev->mc_count,
1370 dev->flags & IFF_PROMISC ? "ON" : "OFF",
1371 dev->flags & IFF_ALLMULTI ? "ON" : "OFF"));
1372
1373 if ((dev->flags & IFF_PROMISC) &&
1374 !(dma->cf_cmd.i596_config[8] & 0x01)) {
1375 dma->cf_cmd.i596_config[8] |= 0x01;
1376 config = 1;
1377 }
1378 if (!(dev->flags & IFF_PROMISC) &&
1379 (dma->cf_cmd.i596_config[8] & 0x01)) {
1380 dma->cf_cmd.i596_config[8] &= ~0x01;
1381 config = 1;
1382 }
1383 if ((dev->flags & IFF_ALLMULTI) &&
1384 (dma->cf_cmd.i596_config[11] & 0x20)) {
1385 dma->cf_cmd.i596_config[11] &= ~0x20;
1386 config = 1;
1387 }
1388 if (!(dev->flags & IFF_ALLMULTI) &&
1389 !(dma->cf_cmd.i596_config[11] & 0x20)) {
1390 dma->cf_cmd.i596_config[11] |= 0x20;
1391 config = 1;
1392 }
1393 if (config) {
1394 if (dma->cf_cmd.cmd.command)
1395 printk(KERN_INFO
1396 "%s: config change request already queued\n",
1397 dev->name);
1398 else {
1399 dma->cf_cmd.cmd.command = SWAP16(CmdConfigure);
1400 DMA_WBACK_INV(dev, &dma->cf_cmd, sizeof(struct cf_cmd));
1401 i596_add_cmd(dev, &dma->cf_cmd.cmd);
1402 }
1403 }
1404
1405 cnt = dev->mc_count;
1406 if (cnt > MAX_MC_CNT) {
1407 cnt = MAX_MC_CNT;
1408 printk(KERN_NOTICE "%s: Only %d multicast addresses supported",
1409 dev->name, cnt);
1410 }
1411
1412 if (dev->mc_count > 0) {
1413 struct dev_mc_list *dmi;
1414 unsigned char *cp;
1415 struct mc_cmd *cmd;
1416
1417 cmd = &dma->mc_cmd;
1418 cmd->cmd.command = SWAP16(CmdMulticastList);
1419 cmd->mc_cnt = SWAP16(dev->mc_count * 6);
1420 cp = cmd->mc_addrs;
1421 for (dmi = dev->mc_list;
1422 cnt && dmi != NULL;
1423 dmi = dmi->next, cnt--, cp += 6) {
1424 memcpy(cp, dmi->dmi_addr, 6);
1425 if (i596_debug > 1)
1426 DEB(DEB_MULTI,
1427 printk(KERN_DEBUG
1428 "%s: Adding address %02x:%02x:%02x:%02x:%02x:%02x\n",
1429 dev->name, cp[0], cp[1], cp[2], cp[3], cp[4], cp[5]));
1430 }
1431 DMA_WBACK_INV(dev, &dma->mc_cmd, sizeof(struct mc_cmd));
1432 i596_add_cmd(dev, &cmd->cmd);
1433 }
1434}
diff --git a/drivers/net/mlx4/qp.c b/drivers/net/mlx4/qp.c
index 7f8b7d55b6e1..492cfaaaa75c 100644
--- a/drivers/net/mlx4/qp.c
+++ b/drivers/net/mlx4/qp.c
@@ -113,8 +113,7 @@ int mlx4_qp_modify(struct mlx4_dev *dev, struct mlx4_mtt *mtt,
113 struct mlx4_cmd_mailbox *mailbox; 113 struct mlx4_cmd_mailbox *mailbox;
114 int ret = 0; 114 int ret = 0;
115 115
116 if (cur_state < 0 || cur_state >= MLX4_QP_NUM_STATE || 116 if (cur_state >= MLX4_QP_NUM_STATE || cur_state >= MLX4_QP_NUM_STATE ||
117 new_state < 0 || cur_state >= MLX4_QP_NUM_STATE ||
118 !op[cur_state][new_state]) 117 !op[cur_state][new_state])
119 return -EINVAL; 118 return -EINVAL;
120 119
diff --git a/drivers/net/netxen/netxen_nic_niu.c b/drivers/net/netxen/netxen_nic_niu.c
index 75102d30730f..05e0577a0e10 100644
--- a/drivers/net/netxen/netxen_nic_niu.c
+++ b/drivers/net/netxen/netxen_nic_niu.c
@@ -724,7 +724,7 @@ int netxen_niu_disable_gbe_port(struct netxen_adapter *adapter)
724 __u32 mac_cfg0; 724 __u32 mac_cfg0;
725 u32 port = physical_port[adapter->portnum]; 725 u32 port = physical_port[adapter->portnum];
726 726
727 if ((port < 0) || (port > NETXEN_NIU_MAX_GBE_PORTS)) 727 if (port > NETXEN_NIU_MAX_GBE_PORTS)
728 return -EINVAL; 728 return -EINVAL;
729 mac_cfg0 = 0; 729 mac_cfg0 = 0;
730 netxen_gb_soft_reset(mac_cfg0); 730 netxen_gb_soft_reset(mac_cfg0);
@@ -757,7 +757,7 @@ int netxen_niu_set_promiscuous_mode(struct netxen_adapter *adapter,
757 __u32 reg; 757 __u32 reg;
758 u32 port = physical_port[adapter->portnum]; 758 u32 port = physical_port[adapter->portnum];
759 759
760 if ((port < 0) || (port > NETXEN_NIU_MAX_GBE_PORTS)) 760 if (port > NETXEN_NIU_MAX_GBE_PORTS)
761 return -EINVAL; 761 return -EINVAL;
762 762
763 /* save previous contents */ 763 /* save previous contents */
@@ -894,7 +894,7 @@ int netxen_niu_xg_set_promiscuous_mode(struct netxen_adapter *adapter,
894 __u32 reg; 894 __u32 reg;
895 u32 port = physical_port[adapter->portnum]; 895 u32 port = physical_port[adapter->portnum];
896 896
897 if ((port < 0) || (port > NETXEN_NIU_MAX_XG_PORTS)) 897 if (port > NETXEN_NIU_MAX_XG_PORTS)
898 return -EINVAL; 898 return -EINVAL;
899 899
900 if (netxen_nic_hw_read_wx(adapter, 900 if (netxen_nic_hw_read_wx(adapter,
diff --git a/drivers/net/pasemi_mac.c b/drivers/net/pasemi_mac.c
index 8d38425e46c3..0b3066a6fe40 100644
--- a/drivers/net/pasemi_mac.c
+++ b/drivers/net/pasemi_mac.c
@@ -755,7 +755,7 @@ static int pasemi_mac_open(struct net_device *dev)
755 flags |= PAS_MAC_CFG_PCFG_TSR_1G | PAS_MAC_CFG_PCFG_SPD_1G; 755 flags |= PAS_MAC_CFG_PCFG_TSR_1G | PAS_MAC_CFG_PCFG_SPD_1G;
756 756
757 pci_write_config_dword(mac->iob_pdev, PAS_IOB_DMA_RXCH_CFG(mac->dma_rxch), 757 pci_write_config_dword(mac->iob_pdev, PAS_IOB_DMA_RXCH_CFG(mac->dma_rxch),
758 PAS_IOB_DMA_RXCH_CFG_CNTTH(1)); 758 PAS_IOB_DMA_RXCH_CFG_CNTTH(0));
759 759
760 pci_write_config_dword(mac->iob_pdev, PAS_IOB_DMA_TXCH_CFG(mac->dma_txch), 760 pci_write_config_dword(mac->iob_pdev, PAS_IOB_DMA_TXCH_CFG(mac->dma_txch),
761 PAS_IOB_DMA_TXCH_CFG_CNTTH(32)); 761 PAS_IOB_DMA_TXCH_CFG_CNTTH(32));
diff --git a/drivers/net/pcmcia/axnet_cs.c b/drivers/net/pcmcia/axnet_cs.c
index 808fae1577e0..50dff1b81d34 100644
--- a/drivers/net/pcmcia/axnet_cs.c
+++ b/drivers/net/pcmcia/axnet_cs.c
@@ -521,6 +521,7 @@ static void mdio_write(kio_addr_t addr, int phy_id, int loc, int value)
521 521
522static int axnet_open(struct net_device *dev) 522static int axnet_open(struct net_device *dev)
523{ 523{
524 int ret;
524 axnet_dev_t *info = PRIV(dev); 525 axnet_dev_t *info = PRIV(dev);
525 struct pcmcia_device *link = info->p_dev; 526 struct pcmcia_device *link = info->p_dev;
526 527
@@ -529,9 +530,11 @@ static int axnet_open(struct net_device *dev)
529 if (!pcmcia_dev_present(link)) 530 if (!pcmcia_dev_present(link))
530 return -ENODEV; 531 return -ENODEV;
531 532
532 link->open++; 533 ret = request_irq(dev->irq, ei_irq_wrapper, IRQF_SHARED, "axnet_cs", dev);
534 if (ret)
535 return ret;
533 536
534 request_irq(dev->irq, ei_irq_wrapper, IRQF_SHARED, "axnet_cs", dev); 537 link->open++;
535 538
536 info->link_status = 0x00; 539 info->link_status = 0x00;
537 init_timer(&info->watchdog); 540 init_timer(&info->watchdog);
diff --git a/drivers/net/pcmcia/fmvj18x_cs.c b/drivers/net/pcmcia/fmvj18x_cs.c
index 3f93d4933235..85d5f2ca4bb5 100644
--- a/drivers/net/pcmcia/fmvj18x_cs.c
+++ b/drivers/net/pcmcia/fmvj18x_cs.c
@@ -109,7 +109,7 @@ static const struct ethtool_ops netdev_ethtool_ops;
109 card type 109 card type
110 */ 110 */
111typedef enum { MBH10302, MBH10304, TDK, CONTEC, LA501, UNGERMANN, 111typedef enum { MBH10302, MBH10304, TDK, CONTEC, LA501, UNGERMANN,
112 XXX10304 112 XXX10304, NEC, KME
113} cardtype_t; 113} cardtype_t;
114 114
115/* 115/*
@@ -374,6 +374,18 @@ static int fmvj18x_config(struct pcmcia_device *link)
374 link->io.NumPorts2 = 8; 374 link->io.NumPorts2 = 8;
375 } 375 }
376 break; 376 break;
377 case MANFID_NEC:
378 cardtype = NEC; /* MultiFunction Card */
379 link->conf.ConfigBase = 0x800;
380 link->conf.ConfigIndex = 0x47;
381 link->io.NumPorts2 = 8;
382 break;
383 case MANFID_KME:
384 cardtype = KME; /* MultiFunction Card */
385 link->conf.ConfigBase = 0x800;
386 link->conf.ConfigIndex = 0x47;
387 link->io.NumPorts2 = 8;
388 break;
377 case MANFID_CONTEC: 389 case MANFID_CONTEC:
378 cardtype = CONTEC; 390 cardtype = CONTEC;
379 break; 391 break;
@@ -450,6 +462,8 @@ static int fmvj18x_config(struct pcmcia_device *link)
450 case TDK: 462 case TDK:
451 case LA501: 463 case LA501:
452 case CONTEC: 464 case CONTEC:
465 case NEC:
466 case KME:
453 tuple.DesiredTuple = CISTPL_FUNCE; 467 tuple.DesiredTuple = CISTPL_FUNCE;
454 tuple.TupleOffset = 0; 468 tuple.TupleOffset = 0;
455 CS_CHECK(GetFirstTuple, pcmcia_get_first_tuple(link, &tuple)); 469 CS_CHECK(GetFirstTuple, pcmcia_get_first_tuple(link, &tuple));
@@ -469,6 +483,10 @@ static int fmvj18x_config(struct pcmcia_device *link)
469 card_name = "TDK LAK-CD021"; 483 card_name = "TDK LAK-CD021";
470 } else if( cardtype == LA501 ) { 484 } else if( cardtype == LA501 ) {
471 card_name = "LA501"; 485 card_name = "LA501";
486 } else if( cardtype == NEC ) {
487 card_name = "PK-UG-J001";
488 } else if( cardtype == KME ) {
489 card_name = "Panasonic";
472 } else { 490 } else {
473 card_name = "C-NET(PC)C"; 491 card_name = "C-NET(PC)C";
474 } 492 }
@@ -678,8 +696,11 @@ static struct pcmcia_device_id fmvj18x_ids[] = {
678 PCMCIA_DEVICE_PROD_ID1("PCMCIA MBH10302", 0x8f4005da), 696 PCMCIA_DEVICE_PROD_ID1("PCMCIA MBH10302", 0x8f4005da),
679 PCMCIA_DEVICE_PROD_ID1("UBKK,V2.0", 0x90888080), 697 PCMCIA_DEVICE_PROD_ID1("UBKK,V2.0", 0x90888080),
680 PCMCIA_PFC_DEVICE_PROD_ID12(0, "TDK", "GlobalNetworker 3410/3412", 0x1eae9475, 0xd9a93bed), 698 PCMCIA_PFC_DEVICE_PROD_ID12(0, "TDK", "GlobalNetworker 3410/3412", 0x1eae9475, 0xd9a93bed),
699 PCMCIA_PFC_DEVICE_PROD_ID12(0, "NEC", "PK-UG-J001" ,0x18df0ba0 ,0x831b1064),
681 PCMCIA_PFC_DEVICE_MANF_CARD(0, 0x0105, 0x0d0a), 700 PCMCIA_PFC_DEVICE_MANF_CARD(0, 0x0105, 0x0d0a),
682 PCMCIA_PFC_DEVICE_MANF_CARD(0, 0x0105, 0x0e0a), 701 PCMCIA_PFC_DEVICE_MANF_CARD(0, 0x0105, 0x0e0a),
702 PCMCIA_PFC_DEVICE_MANF_CARD(0, 0x0032, 0x0a05),
703 PCMCIA_PFC_DEVICE_MANF_CARD(0, 0x0032, 0x1101),
683 PCMCIA_DEVICE_NULL, 704 PCMCIA_DEVICE_NULL,
684}; 705};
685MODULE_DEVICE_TABLE(pcmcia, fmvj18x_ids); 706MODULE_DEVICE_TABLE(pcmcia, fmvj18x_ids);
diff --git a/drivers/net/pcmcia/pcnet_cs.c b/drivers/net/pcmcia/pcnet_cs.c
index d88e9b2e93cf..f2613c29b008 100644
--- a/drivers/net/pcmcia/pcnet_cs.c
+++ b/drivers/net/pcmcia/pcnet_cs.c
@@ -960,6 +960,7 @@ static void mii_phy_probe(struct net_device *dev)
960 960
961static int pcnet_open(struct net_device *dev) 961static int pcnet_open(struct net_device *dev)
962{ 962{
963 int ret;
963 pcnet_dev_t *info = PRIV(dev); 964 pcnet_dev_t *info = PRIV(dev);
964 struct pcmcia_device *link = info->p_dev; 965 struct pcmcia_device *link = info->p_dev;
965 966
@@ -968,10 +969,12 @@ static int pcnet_open(struct net_device *dev)
968 if (!pcmcia_dev_present(link)) 969 if (!pcmcia_dev_present(link))
969 return -ENODEV; 970 return -ENODEV;
970 971
971 link->open++;
972
973 set_misc_reg(dev); 972 set_misc_reg(dev);
974 request_irq(dev->irq, ei_irq_wrapper, IRQF_SHARED, dev_info, dev); 973 ret = request_irq(dev->irq, ei_irq_wrapper, IRQF_SHARED, dev_info, dev);
974 if (ret)
975 return ret;
976
977 link->open++;
975 978
976 info->phy_id = info->eth_phy; 979 info->phy_id = info->eth_phy;
977 info->link_status = 0x00; 980 info->link_status = 0x00;
@@ -1552,6 +1555,7 @@ static struct pcmcia_device_id pcnet_ids[] = {
1552 PCMCIA_PFC_DEVICE_PROD_ID12(0, "Grey Cell", "GCS3000", 0x2a151fac, 0x48b932ae), 1555 PCMCIA_PFC_DEVICE_PROD_ID12(0, "Grey Cell", "GCS3000", 0x2a151fac, 0x48b932ae),
1553 PCMCIA_PFC_DEVICE_PROD_ID12(0, "Linksys", "EtherFast 10&100 + 56K PC Card (PCMLM56)", 0x0733cc81, 0xb3765033), 1556 PCMCIA_PFC_DEVICE_PROD_ID12(0, "Linksys", "EtherFast 10&100 + 56K PC Card (PCMLM56)", 0x0733cc81, 0xb3765033),
1554 PCMCIA_PFC_DEVICE_PROD_ID12(0, "LINKSYS", "PCMLM336", 0xf7cb0b07, 0x7a821b58), 1557 PCMCIA_PFC_DEVICE_PROD_ID12(0, "LINKSYS", "PCMLM336", 0xf7cb0b07, 0x7a821b58),
1558 PCMCIA_PFC_DEVICE_PROD_ID12(0, "MICRO RESEARCH", "COMBO-L/M-336", 0xb2ced065, 0x3ced0555),
1555 PCMCIA_PFC_DEVICE_PROD_ID12(0, "PCMCIAs", "ComboCard", 0xdcfe12d3, 0xcd8906cc), 1559 PCMCIA_PFC_DEVICE_PROD_ID12(0, "PCMCIAs", "ComboCard", 0xdcfe12d3, 0xcd8906cc),
1556 PCMCIA_PFC_DEVICE_PROD_ID12(0, "PCMCIAs", "LanModem", 0xdcfe12d3, 0xc67c648f), 1560 PCMCIA_PFC_DEVICE_PROD_ID12(0, "PCMCIAs", "LanModem", 0xdcfe12d3, 0xc67c648f),
1557 PCMCIA_MFC_DEVICE_PROD_ID12(0, "IBM", "Home and Away 28.8 PC Card ", 0xb569a6e5, 0x5bd4ff2c), 1561 PCMCIA_MFC_DEVICE_PROD_ID12(0, "IBM", "Home and Away 28.8 PC Card ", 0xb569a6e5, 0x5bd4ff2c),
diff --git a/drivers/net/phy/Kconfig b/drivers/net/phy/Kconfig
index 09b6f259eb92..dd09011c7ee5 100644
--- a/drivers/net/phy/Kconfig
+++ b/drivers/net/phy/Kconfig
@@ -55,6 +55,11 @@ config BROADCOM_PHY
55 ---help--- 55 ---help---
56 Currently supports the BCM5411, BCM5421 and BCM5461 PHYs. 56 Currently supports the BCM5411, BCM5421 and BCM5461 PHYs.
57 57
58config ICPLUS_PHY
59 tristate "Drivers for ICPlus PHYs"
60 ---help---
61 Currently supports the IP175C PHY.
62
58config FIXED_PHY 63config FIXED_PHY
59 tristate "Drivers for PHY emulation on fixed speed/link" 64 tristate "Drivers for PHY emulation on fixed speed/link"
60 ---help--- 65 ---help---
diff --git a/drivers/net/phy/Makefile b/drivers/net/phy/Makefile
index bcd1efbd2a18..8885650647ff 100644
--- a/drivers/net/phy/Makefile
+++ b/drivers/net/phy/Makefile
@@ -11,4 +11,5 @@ obj-$(CONFIG_QSEMI_PHY) += qsemi.o
11obj-$(CONFIG_SMSC_PHY) += smsc.o 11obj-$(CONFIG_SMSC_PHY) += smsc.o
12obj-$(CONFIG_VITESSE_PHY) += vitesse.o 12obj-$(CONFIG_VITESSE_PHY) += vitesse.o
13obj-$(CONFIG_BROADCOM_PHY) += broadcom.o 13obj-$(CONFIG_BROADCOM_PHY) += broadcom.o
14obj-$(CONFIG_ICPLUS_PHY) += icplus.o
14obj-$(CONFIG_FIXED_PHY) += fixed.o 15obj-$(CONFIG_FIXED_PHY) += fixed.o
diff --git a/drivers/net/phy/icplus.c b/drivers/net/phy/icplus.c
new file mode 100644
index 000000000000..af3f1f2a9f87
--- /dev/null
+++ b/drivers/net/phy/icplus.c
@@ -0,0 +1,134 @@
1/*
2 * Driver for ICPlus PHYs
3 *
4 * Copyright (c) 2007 Freescale Semiconductor, Inc.
5 *
6 * This program is free software; you can redistribute it and/or modify it
7 * under the terms of the GNU General Public License as published by the
8 * Free Software Foundation; either version 2 of the License, or (at your
9 * option) any later version.
10 *
11 */
12#include <linux/kernel.h>
13#include <linux/string.h>
14#include <linux/errno.h>
15#include <linux/unistd.h>
16#include <linux/slab.h>
17#include <linux/interrupt.h>
18#include <linux/init.h>
19#include <linux/delay.h>
20#include <linux/netdevice.h>
21#include <linux/etherdevice.h>
22#include <linux/skbuff.h>
23#include <linux/spinlock.h>
24#include <linux/mm.h>
25#include <linux/module.h>
26#include <linux/mii.h>
27#include <linux/ethtool.h>
28#include <linux/phy.h>
29
30#include <asm/io.h>
31#include <asm/irq.h>
32#include <asm/uaccess.h>
33
34MODULE_DESCRIPTION("ICPlus IP175C PHY driver");
35MODULE_AUTHOR("Michael Barkowski");
36MODULE_LICENSE("GPL");
37
38static int ip175c_config_init(struct phy_device *phydev)
39{
40 int err, i;
41 static int full_reset_performed = 0;
42
43 if (full_reset_performed == 0) {
44
45 /* master reset */
46 err = phydev->bus->write(phydev->bus, 30, 0, 0x175c);
47 if (err < 0)
48 return err;
49
50 /* ensure no bus delays overlap reset period */
51 err = phydev->bus->read(phydev->bus, 30, 0);
52
53 /* data sheet specifies reset period is 2 msec */
54 mdelay(2);
55
56 /* enable IP175C mode */
57 err = phydev->bus->write(phydev->bus, 29, 31, 0x175c);
58 if (err < 0)
59 return err;
60
61 /* Set MII0 speed and duplex (in PHY mode) */
62 err = phydev->bus->write(phydev->bus, 29, 22, 0x420);
63 if (err < 0)
64 return err;
65
66 /* reset switch ports */
67 for (i = 0; i < 5; i++) {
68 err = phydev->bus->write(phydev->bus, i,
69 MII_BMCR, BMCR_RESET);
70 if (err < 0)
71 return err;
72 }
73
74 for (i = 0; i < 5; i++)
75 err = phydev->bus->read(phydev->bus, i, MII_BMCR);
76
77 mdelay(2);
78
79 full_reset_performed = 1;
80 }
81
82 if (phydev->addr != 4) {
83 phydev->state = PHY_RUNNING;
84 phydev->speed = SPEED_100;
85 phydev->duplex = DUPLEX_FULL;
86 phydev->link = 1;
87 netif_carrier_on(phydev->attached_dev);
88 }
89
90 return 0;
91}
92
93static int ip175c_read_status(struct phy_device *phydev)
94{
95 if (phydev->addr == 4) /* WAN port */
96 genphy_read_status(phydev);
97 else
98 /* Don't need to read status for switch ports */
99 phydev->irq = PHY_IGNORE_INTERRUPT;
100
101 return 0;
102}
103
104static int ip175c_config_aneg(struct phy_device *phydev)
105{
106 if (phydev->addr == 4) /* WAN port */
107 genphy_config_aneg(phydev);
108
109 return 0;
110}
111
112static struct phy_driver ip175c_driver = {
113 .phy_id = 0x02430d80,
114 .name = "ICPlus IP175C",
115 .phy_id_mask = 0x0ffffff0,
116 .features = PHY_BASIC_FEATURES,
117 .config_init = &ip175c_config_init,
118 .config_aneg = &ip175c_config_aneg,
119 .read_status = &ip175c_read_status,
120 .driver = { .owner = THIS_MODULE,},
121};
122
123static int __init ip175c_init(void)
124{
125 return phy_driver_register(&ip175c_driver);
126}
127
128static void __exit ip175c_exit(void)
129{
130 phy_driver_unregister(&ip175c_driver);
131}
132
133module_init(ip175c_init);
134module_exit(ip175c_exit);
diff --git a/drivers/net/phy/marvell.c b/drivers/net/phy/marvell.c
index b87f8d2a888b..fbe1104e9a07 100644
--- a/drivers/net/phy/marvell.c
+++ b/drivers/net/phy/marvell.c
@@ -60,6 +60,7 @@
60#define MII_M1111_PHY_EXT_SR 0x1b 60#define MII_M1111_PHY_EXT_SR 0x1b
61#define MII_M1111_HWCFG_MODE_MASK 0xf 61#define MII_M1111_HWCFG_MODE_MASK 0xf
62#define MII_M1111_HWCFG_MODE_RGMII 0xb 62#define MII_M1111_HWCFG_MODE_RGMII 0xb
63#define MII_M1111_HWCFG_MODE_SGMII_NO_CLK 0x4
63 64
64MODULE_DESCRIPTION("Marvell PHY driver"); 65MODULE_DESCRIPTION("Marvell PHY driver");
65MODULE_AUTHOR("Andy Fleming"); 66MODULE_AUTHOR("Andy Fleming");
@@ -169,6 +170,21 @@ static int m88e1111_config_init(struct phy_device *phydev)
169 return err; 170 return err;
170 } 171 }
171 172
173 if (phydev->interface == PHY_INTERFACE_MODE_SGMII) {
174 int temp;
175
176 temp = phy_read(phydev, MII_M1111_PHY_EXT_SR);
177 if (temp < 0)
178 return temp;
179
180 temp &= ~(MII_M1111_HWCFG_MODE_MASK);
181 temp |= MII_M1111_HWCFG_MODE_SGMII_NO_CLK;
182
183 err = phy_write(phydev, MII_M1111_PHY_EXT_SR, temp);
184 if (err < 0)
185 return err;
186 }
187
172 err = phy_write(phydev, MII_BMCR, BMCR_RESET); 188 err = phy_write(phydev, MII_BMCR, BMCR_RESET);
173 if (err < 0) 189 if (err < 0)
174 return err; 190 return err;
diff --git a/drivers/net/qla3xxx.c b/drivers/net/qla3xxx.c
index 585be044ebbb..8be8be451ada 100755
--- a/drivers/net/qla3xxx.c
+++ b/drivers/net/qla3xxx.c
@@ -2433,37 +2433,22 @@ static int ql_get_seg_count(struct ql3_adapter *qdev,
2433 return -1; 2433 return -1;
2434} 2434}
2435 2435
2436static void ql_hw_csum_setup(struct sk_buff *skb, 2436static void ql_hw_csum_setup(const struct sk_buff *skb,
2437 struct ob_mac_iocb_req *mac_iocb_ptr) 2437 struct ob_mac_iocb_req *mac_iocb_ptr)
2438{ 2438{
2439 struct ethhdr *eth; 2439 const struct iphdr *ip = ip_hdr(skb);
2440 struct iphdr *ip = NULL;
2441 u8 offset = ETH_HLEN;
2442 2440
2443 eth = (struct ethhdr *)(skb->data); 2441 mac_iocb_ptr->ip_hdr_off = skb_network_offset(skb);
2442 mac_iocb_ptr->ip_hdr_len = ip->ihl;
2444 2443
2445 if (eth->h_proto == __constant_htons(ETH_P_IP)) { 2444 if (ip->protocol == IPPROTO_TCP) {
2446 ip = (struct iphdr *)&skb->data[ETH_HLEN]; 2445 mac_iocb_ptr->flags1 |= OB_3032MAC_IOCB_REQ_TC |
2447 } else if (eth->h_proto == htons(ETH_P_8021Q) &&
2448 ((struct vlan_ethhdr *)skb->data)->
2449 h_vlan_encapsulated_proto == __constant_htons(ETH_P_IP)) {
2450 ip = (struct iphdr *)&skb->data[VLAN_ETH_HLEN];
2451 offset = VLAN_ETH_HLEN;
2452 }
2453
2454 if (ip) {
2455 if (ip->protocol == IPPROTO_TCP) {
2456 mac_iocb_ptr->flags1 |= OB_3032MAC_IOCB_REQ_TC |
2457 OB_3032MAC_IOCB_REQ_IC; 2446 OB_3032MAC_IOCB_REQ_IC;
2458 mac_iocb_ptr->ip_hdr_off = offset; 2447 } else {
2459 mac_iocb_ptr->ip_hdr_len = ip->ihl; 2448 mac_iocb_ptr->flags1 |= OB_3032MAC_IOCB_REQ_UC |
2460 } else if (ip->protocol == IPPROTO_UDP) {
2461 mac_iocb_ptr->flags1 |= OB_3032MAC_IOCB_REQ_UC |
2462 OB_3032MAC_IOCB_REQ_IC; 2449 OB_3032MAC_IOCB_REQ_IC;
2463 mac_iocb_ptr->ip_hdr_off = offset;
2464 mac_iocb_ptr->ip_hdr_len = ip->ihl;
2465 }
2466 } 2450 }
2451
2467} 2452}
2468 2453
2469/* 2454/*
diff --git a/drivers/net/r8169.c b/drivers/net/r8169.c
index 5ec7752caa48..982a9010c7a9 100644
--- a/drivers/net/r8169.c
+++ b/drivers/net/r8169.c
@@ -1,53 +1,11 @@
1/* 1/*
2========================================================================= 2 * r8169.c: RealTek 8169/8168/8101 ethernet driver.
3 r8169.c: A RealTek RTL-8169 Gigabit Ethernet driver for Linux kernel 2.4.x. 3 *
4 -------------------------------------------------------------------- 4 * Copyright (c) 2002 ShuChen <shuchen@realtek.com.tw>
5 5 * Copyright (c) 2003 - 2007 Francois Romieu <romieu@fr.zoreil.com>
6 History: 6 * Copyright (c) a lot of people too. Please respect their work.
7 Feb 4 2002 - created initially by ShuChen <shuchen@realtek.com.tw>. 7 *
8 May 20 2002 - Add link status force-mode and TBI mode support. 8 * See MAINTAINERS file for support contact information.
9 2004 - Massive updates. See kernel SCM system for details.
10=========================================================================
11 1. [DEPRECATED: use ethtool instead] The media can be forced in 5 modes.
12 Command: 'insmod r8169 media = SET_MEDIA'
13 Ex: 'insmod r8169 media = 0x04' will force PHY to operate in 100Mpbs Half-duplex.
14
15 SET_MEDIA can be:
16 _10_Half = 0x01
17 _10_Full = 0x02
18 _100_Half = 0x04
19 _100_Full = 0x08
20 _1000_Full = 0x10
21
22 2. Support TBI mode.
23=========================================================================
24VERSION 1.1 <2002/10/4>
25
26 The bit4:0 of MII register 4 is called "selector field", and have to be
27 00001b to indicate support of IEEE std 802.3 during NWay process of
28 exchanging Link Code Word (FLP).
29
30VERSION 1.2 <2002/11/30>
31
32 - Large style cleanup
33 - Use ether_crc in stock kernel (linux/crc32.h)
34 - Copy mc_filter setup code from 8139cp
35 (includes an optimization, and avoids set_bit use)
36
37VERSION 1.6LK <2004/04/14>
38
39 - Merge of Realtek's version 1.6
40 - Conversion to DMA API
41 - Suspend/resume
42 - Endianness
43 - Misc Rx/Tx bugs
44
45VERSION 2.2LK <2005/01/25>
46
47 - RX csum, TX csum/SG, TSO
48 - VLAN
49 - baby (< 7200) Jumbo frames support
50 - Merge of Realtek's version 2.2 (new phy)
51 */ 9 */
52 10
53#include <linux/module.h> 11#include <linux/module.h>
@@ -108,11 +66,6 @@ VERSION 2.2LK <2005/01/25>
108#define rtl8169_rx_quota(count, quota) count 66#define rtl8169_rx_quota(count, quota) count
109#endif 67#endif
110 68
111/* media options */
112#define MAX_UNITS 8
113static int media[MAX_UNITS] = { -1, -1, -1, -1, -1, -1, -1, -1 };
114static int num_media = 0;
115
116/* Maximum events (Rx packets, etc.) to handle at each interrupt. */ 69/* Maximum events (Rx packets, etc.) to handle at each interrupt. */
117static const int max_interrupt_work = 20; 70static const int max_interrupt_work = 20;
118 71
@@ -126,7 +79,7 @@ static const int multicast_filter_limit = 32;
126#define RX_FIFO_THRESH 7 /* 7 means NO threshold, Rx buffer level before first PCI xfer. */ 79#define RX_FIFO_THRESH 7 /* 7 means NO threshold, Rx buffer level before first PCI xfer. */
127#define RX_DMA_BURST 6 /* Maximum PCI burst, '6' is 1024 */ 80#define RX_DMA_BURST 6 /* Maximum PCI burst, '6' is 1024 */
128#define TX_DMA_BURST 6 /* Maximum PCI burst, '6' is 1024 */ 81#define TX_DMA_BURST 6 /* Maximum PCI burst, '6' is 1024 */
129#define EarlyTxThld 0x3F /* 0x3F means NO early transmit */ 82#define EarlyTxThld 0x3F /* 0x3F means NO early transmit */
130#define RxPacketMaxSize 0x3FE8 /* 16K - 1 - ETH_HLEN - VLAN - CRC... */ 83#define RxPacketMaxSize 0x3FE8 /* 16K - 1 - ETH_HLEN - VLAN - CRC... */
131#define SafeMtu 0x1c20 /* ... actually life sucks beyond ~7k */ 84#define SafeMtu 0x1c20 /* ... actually life sucks beyond ~7k */
132#define InterFrameGap 0x03 /* 3 means InterFrameGap = the shortest one */ 85#define InterFrameGap 0x03 /* 3 means InterFrameGap = the shortest one */
@@ -151,16 +104,17 @@ static const int multicast_filter_limit = 32;
151#define RTL_R32(reg) ((unsigned long) readl (ioaddr + (reg))) 104#define RTL_R32(reg) ((unsigned long) readl (ioaddr + (reg)))
152 105
153enum mac_version { 106enum mac_version {
154 RTL_GIGA_MAC_VER_01 = 0x00, 107 RTL_GIGA_MAC_VER_01 = 0x01, // 8169
155 RTL_GIGA_MAC_VER_02 = 0x01, 108 RTL_GIGA_MAC_VER_02 = 0x02, // 8169S
156 RTL_GIGA_MAC_VER_03 = 0x02, 109 RTL_GIGA_MAC_VER_03 = 0x03, // 8110S
157 RTL_GIGA_MAC_VER_04 = 0x03, 110 RTL_GIGA_MAC_VER_04 = 0x04, // 8169SB
158 RTL_GIGA_MAC_VER_05 = 0x04, 111 RTL_GIGA_MAC_VER_05 = 0x05, // 8110SCd
159 RTL_GIGA_MAC_VER_11 = 0x0b, 112 RTL_GIGA_MAC_VER_06 = 0x06, // 8110SCe
160 RTL_GIGA_MAC_VER_12 = 0x0c, 113 RTL_GIGA_MAC_VER_11 = 0x0b, // 8168Bb
161 RTL_GIGA_MAC_VER_13 = 0x0d, 114 RTL_GIGA_MAC_VER_12 = 0x0c, // 8168Be 8168Bf
162 RTL_GIGA_MAC_VER_14 = 0x0e, 115 RTL_GIGA_MAC_VER_13 = 0x0d, // 8101Eb 8101Ec
163 RTL_GIGA_MAC_VER_15 = 0x0f 116 RTL_GIGA_MAC_VER_14 = 0x0e, // 8101
117 RTL_GIGA_MAC_VER_15 = 0x0f // 8101
164}; 118};
165 119
166enum phy_version { 120enum phy_version {
@@ -180,11 +134,12 @@ static const struct {
180 u8 mac_version; 134 u8 mac_version;
181 u32 RxConfigMask; /* Clears the bits supported by this chip */ 135 u32 RxConfigMask; /* Clears the bits supported by this chip */
182} rtl_chip_info[] = { 136} rtl_chip_info[] = {
183 _R("RTL8169", RTL_GIGA_MAC_VER_01, 0xff7e1880), 137 _R("RTL8169", RTL_GIGA_MAC_VER_01, 0xff7e1880), // 8169
184 _R("RTL8169s/8110s", RTL_GIGA_MAC_VER_02, 0xff7e1880), 138 _R("RTL8169s", RTL_GIGA_MAC_VER_02, 0xff7e1880), // 8169S
185 _R("RTL8169s/8110s", RTL_GIGA_MAC_VER_03, 0xff7e1880), 139 _R("RTL8110s", RTL_GIGA_MAC_VER_03, 0xff7e1880), // 8110S
186 _R("RTL8169sb/8110sb", RTL_GIGA_MAC_VER_04, 0xff7e1880), 140 _R("RTL8169sb/8110sb", RTL_GIGA_MAC_VER_04, 0xff7e1880), // 8169SB
187 _R("RTL8169sc/8110sc", RTL_GIGA_MAC_VER_05, 0xff7e1880), 141 _R("RTL8169sc/8110sc", RTL_GIGA_MAC_VER_05, 0xff7e1880), // 8110SCd
142 _R("RTL8169sc/8110sc", RTL_GIGA_MAC_VER_06, 0xff7e1880), // 8110SCe
188 _R("RTL8168b/8111b", RTL_GIGA_MAC_VER_11, 0xff7e1880), // PCI-E 143 _R("RTL8168b/8111b", RTL_GIGA_MAC_VER_11, 0xff7e1880), // PCI-E
189 _R("RTL8168b/8111b", RTL_GIGA_MAC_VER_12, 0xff7e1880), // PCI-E 144 _R("RTL8168b/8111b", RTL_GIGA_MAC_VER_12, 0xff7e1880), // PCI-E
190 _R("RTL8101e", RTL_GIGA_MAC_VER_13, 0xff7e1880), // PCI-E 8139 145 _R("RTL8101e", RTL_GIGA_MAC_VER_13, 0xff7e1880), // PCI-E 8139
@@ -199,20 +154,15 @@ enum cfg_version {
199 RTL_CFG_2 154 RTL_CFG_2
200}; 155};
201 156
202static const struct { 157static void rtl_hw_start_8169(struct net_device *);
203 unsigned int region; 158static void rtl_hw_start_8168(struct net_device *);
204 unsigned int align; 159static void rtl_hw_start_8101(struct net_device *);
205} rtl_cfg_info[] = {
206 [RTL_CFG_0] = { 1, NET_IP_ALIGN },
207 [RTL_CFG_1] = { 2, NET_IP_ALIGN },
208 [RTL_CFG_2] = { 2, 8 }
209};
210 160
211static struct pci_device_id rtl8169_pci_tbl[] = { 161static struct pci_device_id rtl8169_pci_tbl[] = {
212 { PCI_DEVICE(PCI_VENDOR_ID_REALTEK, 0x8129), 0, 0, RTL_CFG_0 }, 162 { PCI_DEVICE(PCI_VENDOR_ID_REALTEK, 0x8129), 0, 0, RTL_CFG_0 },
213 { PCI_DEVICE(PCI_VENDOR_ID_REALTEK, 0x8136), 0, 0, RTL_CFG_2 }, 163 { PCI_DEVICE(PCI_VENDOR_ID_REALTEK, 0x8136), 0, 0, RTL_CFG_2 },
214 { PCI_DEVICE(PCI_VENDOR_ID_REALTEK, 0x8167), 0, 0, RTL_CFG_0 }, 164 { PCI_DEVICE(PCI_VENDOR_ID_REALTEK, 0x8167), 0, 0, RTL_CFG_0 },
215 { PCI_DEVICE(PCI_VENDOR_ID_REALTEK, 0x8168), 0, 0, RTL_CFG_2 }, 165 { PCI_DEVICE(PCI_VENDOR_ID_REALTEK, 0x8168), 0, 0, RTL_CFG_1 },
216 { PCI_DEVICE(PCI_VENDOR_ID_REALTEK, 0x8169), 0, 0, RTL_CFG_0 }, 166 { PCI_DEVICE(PCI_VENDOR_ID_REALTEK, 0x8169), 0, 0, RTL_CFG_0 },
217 { PCI_DEVICE(PCI_VENDOR_ID_DLINK, 0x4300), 0, 0, RTL_CFG_0 }, 167 { PCI_DEVICE(PCI_VENDOR_ID_DLINK, 0x4300), 0, 0, RTL_CFG_0 },
218 { PCI_DEVICE(0x1259, 0xc107), 0, 0, RTL_CFG_0 }, 168 { PCI_DEVICE(0x1259, 0xc107), 0, 0, RTL_CFG_0 },
@@ -230,62 +180,63 @@ static struct {
230 u32 msg_enable; 180 u32 msg_enable;
231} debug = { -1 }; 181} debug = { -1 };
232 182
233enum RTL8169_registers { 183enum rtl_registers {
234 MAC0 = 0, /* Ethernet hardware address. */ 184 MAC0 = 0, /* Ethernet hardware address. */
235 MAR0 = 8, /* Multicast filter. */ 185 MAC4 = 4,
236 CounterAddrLow = 0x10, 186 MAR0 = 8, /* Multicast filter. */
237 CounterAddrHigh = 0x14, 187 CounterAddrLow = 0x10,
238 TxDescStartAddrLow = 0x20, 188 CounterAddrHigh = 0x14,
239 TxDescStartAddrHigh = 0x24, 189 TxDescStartAddrLow = 0x20,
240 TxHDescStartAddrLow = 0x28, 190 TxDescStartAddrHigh = 0x24,
241 TxHDescStartAddrHigh = 0x2c, 191 TxHDescStartAddrLow = 0x28,
242 FLASH = 0x30, 192 TxHDescStartAddrHigh = 0x2c,
243 ERSR = 0x36, 193 FLASH = 0x30,
244 ChipCmd = 0x37, 194 ERSR = 0x36,
245 TxPoll = 0x38, 195 ChipCmd = 0x37,
246 IntrMask = 0x3C, 196 TxPoll = 0x38,
247 IntrStatus = 0x3E, 197 IntrMask = 0x3c,
248 TxConfig = 0x40, 198 IntrStatus = 0x3e,
249 RxConfig = 0x44, 199 TxConfig = 0x40,
250 RxMissed = 0x4C, 200 RxConfig = 0x44,
251 Cfg9346 = 0x50, 201 RxMissed = 0x4c,
252 Config0 = 0x51, 202 Cfg9346 = 0x50,
253 Config1 = 0x52, 203 Config0 = 0x51,
254 Config2 = 0x53, 204 Config1 = 0x52,
255 Config3 = 0x54, 205 Config2 = 0x53,
256 Config4 = 0x55, 206 Config3 = 0x54,
257 Config5 = 0x56, 207 Config4 = 0x55,
258 MultiIntr = 0x5C, 208 Config5 = 0x56,
259 PHYAR = 0x60, 209 MultiIntr = 0x5c,
260 TBICSR = 0x64, 210 PHYAR = 0x60,
261 TBI_ANAR = 0x68, 211 TBICSR = 0x64,
262 TBI_LPAR = 0x6A, 212 TBI_ANAR = 0x68,
263 PHYstatus = 0x6C, 213 TBI_LPAR = 0x6a,
264 RxMaxSize = 0xDA, 214 PHYstatus = 0x6c,
265 CPlusCmd = 0xE0, 215 RxMaxSize = 0xda,
266 IntrMitigate = 0xE2, 216 CPlusCmd = 0xe0,
267 RxDescAddrLow = 0xE4, 217 IntrMitigate = 0xe2,
268 RxDescAddrHigh = 0xE8, 218 RxDescAddrLow = 0xe4,
269 EarlyTxThres = 0xEC, 219 RxDescAddrHigh = 0xe8,
270 FuncEvent = 0xF0, 220 EarlyTxThres = 0xec,
271 FuncEventMask = 0xF4, 221 FuncEvent = 0xf0,
272 FuncPresetState = 0xF8, 222 FuncEventMask = 0xf4,
273 FuncForceEvent = 0xFC, 223 FuncPresetState = 0xf8,
224 FuncForceEvent = 0xfc,
274}; 225};
275 226
276enum RTL8169_register_content { 227enum rtl_register_content {
277 /* InterruptStatusBits */ 228 /* InterruptStatusBits */
278 SYSErr = 0x8000, 229 SYSErr = 0x8000,
279 PCSTimeout = 0x4000, 230 PCSTimeout = 0x4000,
280 SWInt = 0x0100, 231 SWInt = 0x0100,
281 TxDescUnavail = 0x80, 232 TxDescUnavail = 0x0080,
282 RxFIFOOver = 0x40, 233 RxFIFOOver = 0x0040,
283 LinkChg = 0x20, 234 LinkChg = 0x0020,
284 RxOverflow = 0x10, 235 RxOverflow = 0x0010,
285 TxErr = 0x08, 236 TxErr = 0x0008,
286 TxOK = 0x04, 237 TxOK = 0x0004,
287 RxErr = 0x02, 238 RxErr = 0x0002,
288 RxOK = 0x01, 239 RxOK = 0x0001,
289 240
290 /* RxStatusDesc */ 241 /* RxStatusDesc */
291 RxFOVF = (1 << 23), 242 RxFOVF = (1 << 23),
@@ -295,26 +246,31 @@ enum RTL8169_register_content {
295 RxCRC = (1 << 19), 246 RxCRC = (1 << 19),
296 247
297 /* ChipCmdBits */ 248 /* ChipCmdBits */
298 CmdReset = 0x10, 249 CmdReset = 0x10,
299 CmdRxEnb = 0x08, 250 CmdRxEnb = 0x08,
300 CmdTxEnb = 0x04, 251 CmdTxEnb = 0x04,
301 RxBufEmpty = 0x01, 252 RxBufEmpty = 0x01,
253
254 /* TXPoll register p.5 */
255 HPQ = 0x80, /* Poll cmd on the high prio queue */
256 NPQ = 0x40, /* Poll cmd on the low prio queue */
257 FSWInt = 0x01, /* Forced software interrupt */
302 258
303 /* Cfg9346Bits */ 259 /* Cfg9346Bits */
304 Cfg9346_Lock = 0x00, 260 Cfg9346_Lock = 0x00,
305 Cfg9346_Unlock = 0xC0, 261 Cfg9346_Unlock = 0xc0,
306 262
307 /* rx_mode_bits */ 263 /* rx_mode_bits */
308 AcceptErr = 0x20, 264 AcceptErr = 0x20,
309 AcceptRunt = 0x10, 265 AcceptRunt = 0x10,
310 AcceptBroadcast = 0x08, 266 AcceptBroadcast = 0x08,
311 AcceptMulticast = 0x04, 267 AcceptMulticast = 0x04,
312 AcceptMyPhys = 0x02, 268 AcceptMyPhys = 0x02,
313 AcceptAllPhys = 0x01, 269 AcceptAllPhys = 0x01,
314 270
315 /* RxConfigBits */ 271 /* RxConfigBits */
316 RxCfgFIFOShift = 13, 272 RxCfgFIFOShift = 13,
317 RxCfgDMAShift = 8, 273 RxCfgDMAShift = 8,
318 274
319 /* TxConfigBits */ 275 /* TxConfigBits */
320 TxInterFrameGapShift = 24, 276 TxInterFrameGapShift = 24,
@@ -323,6 +279,10 @@ enum RTL8169_register_content {
323 /* Config1 register p.24 */ 279 /* Config1 register p.24 */
324 PMEnable = (1 << 0), /* Power Management Enable */ 280 PMEnable = (1 << 0), /* Power Management Enable */
325 281
282 /* Config2 register p. 25 */
283 PCI_Clock_66MHz = 0x01,
284 PCI_Clock_33MHz = 0x00,
285
326 /* Config3 register p.25 */ 286 /* Config3 register p.25 */
327 MagicPacket = (1 << 5), /* Wake up when receives a Magic Packet */ 287 MagicPacket = (1 << 5), /* Wake up when receives a Magic Packet */
328 LinkUp = (1 << 4), /* Wake up when the cable connection is re-established */ 288 LinkUp = (1 << 4), /* Wake up when the cable connection is re-established */
@@ -343,36 +303,34 @@ enum RTL8169_register_content {
343 TBINwComplete = 0x01000000, 303 TBINwComplete = 0x01000000,
344 304
345 /* CPlusCmd p.31 */ 305 /* CPlusCmd p.31 */
306 PktCntrDisable = (1 << 7), // 8168
346 RxVlan = (1 << 6), 307 RxVlan = (1 << 6),
347 RxChkSum = (1 << 5), 308 RxChkSum = (1 << 5),
348 PCIDAC = (1 << 4), 309 PCIDAC = (1 << 4),
349 PCIMulRW = (1 << 3), 310 PCIMulRW = (1 << 3),
311 INTT_0 = 0x0000, // 8168
312 INTT_1 = 0x0001, // 8168
313 INTT_2 = 0x0002, // 8168
314 INTT_3 = 0x0003, // 8168
350 315
351 /* rtl8169_PHYstatus */ 316 /* rtl8169_PHYstatus */
352 TBI_Enable = 0x80, 317 TBI_Enable = 0x80,
353 TxFlowCtrl = 0x40, 318 TxFlowCtrl = 0x40,
354 RxFlowCtrl = 0x20, 319 RxFlowCtrl = 0x20,
355 _1000bpsF = 0x10, 320 _1000bpsF = 0x10,
356 _100bps = 0x08, 321 _100bps = 0x08,
357 _10bps = 0x04, 322 _10bps = 0x04,
358 LinkStatus = 0x02, 323 LinkStatus = 0x02,
359 FullDup = 0x01, 324 FullDup = 0x01,
360
361 /* _MediaType */
362 _10_Half = 0x01,
363 _10_Full = 0x02,
364 _100_Half = 0x04,
365 _100_Full = 0x08,
366 _1000_Full = 0x10,
367 325
368 /* _TBICSRBit */ 326 /* _TBICSRBit */
369 TBILinkOK = 0x02000000, 327 TBILinkOK = 0x02000000,
370 328
371 /* DumpCounterCommand */ 329 /* DumpCounterCommand */
372 CounterDump = 0x8, 330 CounterDump = 0x8,
373}; 331};
374 332
375enum _DescStatusBit { 333enum desc_status_bit {
376 DescOwn = (1 << 31), /* Descriptor is owned by NIC */ 334 DescOwn = (1 << 31), /* Descriptor is owned by NIC */
377 RingEnd = (1 << 30), /* End of descriptor ring */ 335 RingEnd = (1 << 30), /* End of descriptor ring */
378 FirstFrag = (1 << 29), /* First segment of a packet */ 336 FirstFrag = (1 << 29), /* First segment of a packet */
@@ -405,15 +363,15 @@ enum _DescStatusBit {
405#define RsvdMask 0x3fffc000 363#define RsvdMask 0x3fffc000
406 364
407struct TxDesc { 365struct TxDesc {
408 u32 opts1; 366 __le32 opts1;
409 u32 opts2; 367 __le32 opts2;
410 u64 addr; 368 __le64 addr;
411}; 369};
412 370
413struct RxDesc { 371struct RxDesc {
414 u32 opts1; 372 __le32 opts1;
415 u32 opts2; 373 __le32 opts2;
416 u64 addr; 374 __le64 addr;
417}; 375};
418 376
419struct ring_info { 377struct ring_info {
@@ -446,6 +404,8 @@ struct rtl8169_private {
446 unsigned rx_buf_sz; 404 unsigned rx_buf_sz;
447 struct timer_list timer; 405 struct timer_list timer;
448 u16 cp_cmd; 406 u16 cp_cmd;
407 u16 intr_event;
408 u16 napi_event;
449 u16 intr_mask; 409 u16 intr_mask;
450 int phy_auto_nego_reg; 410 int phy_auto_nego_reg;
451 int phy_1000_ctrl_reg; 411 int phy_1000_ctrl_reg;
@@ -455,6 +415,7 @@ struct rtl8169_private {
455 int (*set_speed)(struct net_device *, u8 autoneg, u16 speed, u8 duplex); 415 int (*set_speed)(struct net_device *, u8 autoneg, u16 speed, u8 duplex);
456 void (*get_settings)(struct net_device *, struct ethtool_cmd *); 416 void (*get_settings)(struct net_device *, struct ethtool_cmd *);
457 void (*phy_reset_enable)(void __iomem *); 417 void (*phy_reset_enable)(void __iomem *);
418 void (*hw_start)(struct net_device *);
458 unsigned int (*phy_reset_pending)(void __iomem *); 419 unsigned int (*phy_reset_pending)(void __iomem *);
459 unsigned int (*link_ok)(void __iomem *); 420 unsigned int (*link_ok)(void __iomem *);
460 struct delayed_work task; 421 struct delayed_work task;
@@ -463,8 +424,6 @@ struct rtl8169_private {
463 424
464MODULE_AUTHOR("Realtek and the Linux r8169 crew <netdev@vger.kernel.org>"); 425MODULE_AUTHOR("Realtek and the Linux r8169 crew <netdev@vger.kernel.org>");
465MODULE_DESCRIPTION("RealTek RTL-8169 Gigabit Ethernet driver"); 426MODULE_DESCRIPTION("RealTek RTL-8169 Gigabit Ethernet driver");
466module_param_array(media, int, &num_media, 0);
467MODULE_PARM_DESC(media, "force phy operation. Deprecated by ethtool (8).");
468module_param(rx_copybreak, int, 0); 427module_param(rx_copybreak, int, 0);
469MODULE_PARM_DESC(rx_copybreak, "Copy breakpoint for copy-only-tiny-frames"); 428MODULE_PARM_DESC(rx_copybreak, "Copy breakpoint for copy-only-tiny-frames");
470module_param(use_dac, int, 0); 429module_param(use_dac, int, 0);
@@ -478,9 +437,9 @@ static int rtl8169_open(struct net_device *dev);
478static int rtl8169_start_xmit(struct sk_buff *skb, struct net_device *dev); 437static int rtl8169_start_xmit(struct sk_buff *skb, struct net_device *dev);
479static irqreturn_t rtl8169_interrupt(int irq, void *dev_instance); 438static irqreturn_t rtl8169_interrupt(int irq, void *dev_instance);
480static int rtl8169_init_ring(struct net_device *dev); 439static int rtl8169_init_ring(struct net_device *dev);
481static void rtl8169_hw_start(struct net_device *dev); 440static void rtl_hw_start(struct net_device *dev);
482static int rtl8169_close(struct net_device *dev); 441static int rtl8169_close(struct net_device *dev);
483static void rtl8169_set_rx_mode(struct net_device *dev); 442static void rtl_set_rx_mode(struct net_device *dev);
484static void rtl8169_tx_timeout(struct net_device *dev); 443static void rtl8169_tx_timeout(struct net_device *dev);
485static struct net_device_stats *rtl8169_get_stats(struct net_device *dev); 444static struct net_device_stats *rtl8169_get_stats(struct net_device *dev);
486static int rtl8169_rx_interrupt(struct net_device *, struct rtl8169_private *, 445static int rtl8169_rx_interrupt(struct net_device *, struct rtl8169_private *,
@@ -493,35 +452,37 @@ static void rtl8169_rx_clear(struct rtl8169_private *tp);
493static int rtl8169_poll(struct net_device *dev, int *budget); 452static int rtl8169_poll(struct net_device *dev, int *budget);
494#endif 453#endif
495 454
496static const u16 rtl8169_intr_mask =
497 SYSErr | LinkChg | RxOverflow | RxFIFOOver | TxErr | TxOK | RxErr | RxOK;
498static const u16 rtl8169_napi_event =
499 RxOK | RxOverflow | RxFIFOOver | TxOK | TxErr;
500static const unsigned int rtl8169_rx_config = 455static const unsigned int rtl8169_rx_config =
501 (RX_FIFO_THRESH << RxCfgFIFOShift) | (RX_DMA_BURST << RxCfgDMAShift); 456 (RX_FIFO_THRESH << RxCfgFIFOShift) | (RX_DMA_BURST << RxCfgDMAShift);
502 457
503static void mdio_write(void __iomem *ioaddr, int RegAddr, int value) 458static void mdio_write(void __iomem *ioaddr, int reg_addr, int value)
504{ 459{
505 int i; 460 int i;
506 461
507 RTL_W32(PHYAR, 0x80000000 | (RegAddr & 0xFF) << 16 | value); 462 RTL_W32(PHYAR, 0x80000000 | (reg_addr & 0xFF) << 16 | value);
508 463
509 for (i = 20; i > 0; i--) { 464 for (i = 20; i > 0; i--) {
510 /* Check if the RTL8169 has completed writing to the specified MII register */ 465 /*
466 * Check if the RTL8169 has completed writing to the specified
467 * MII register.
468 */
511 if (!(RTL_R32(PHYAR) & 0x80000000)) 469 if (!(RTL_R32(PHYAR) & 0x80000000))
512 break; 470 break;
513 udelay(25); 471 udelay(25);
514 } 472 }
515} 473}
516 474
517static int mdio_read(void __iomem *ioaddr, int RegAddr) 475static int mdio_read(void __iomem *ioaddr, int reg_addr)
518{ 476{
519 int i, value = -1; 477 int i, value = -1;
520 478
521 RTL_W32(PHYAR, 0x0 | (RegAddr & 0xFF) << 16); 479 RTL_W32(PHYAR, 0x0 | (reg_addr & 0xFF) << 16);
522 480
523 for (i = 20; i > 0; i--) { 481 for (i = 20; i > 0; i--) {
524 /* Check if the RTL8169 has completed retrieving data from the specified MII register */ 482 /*
483 * Check if the RTL8169 has completed retrieving data from
484 * the specified MII register.
485 */
525 if (RTL_R32(PHYAR) & 0x80000000) { 486 if (RTL_R32(PHYAR) & 0x80000000) {
526 value = (int) (RTL_R32(PHYAR) & 0xFFFF); 487 value = (int) (RTL_R32(PHYAR) & 0xFFFF);
527 break; 488 break;
@@ -579,7 +540,8 @@ static void rtl8169_xmii_reset_enable(void __iomem *ioaddr)
579} 540}
580 541
581static void rtl8169_check_link_status(struct net_device *dev, 542static void rtl8169_check_link_status(struct net_device *dev,
582 struct rtl8169_private *tp, void __iomem *ioaddr) 543 struct rtl8169_private *tp,
544 void __iomem *ioaddr)
583{ 545{
584 unsigned long flags; 546 unsigned long flags;
585 547
@@ -596,38 +558,6 @@ static void rtl8169_check_link_status(struct net_device *dev,
596 spin_unlock_irqrestore(&tp->lock, flags); 558 spin_unlock_irqrestore(&tp->lock, flags);
597} 559}
598 560
599static void rtl8169_link_option(int idx, u8 *autoneg, u16 *speed, u8 *duplex)
600{
601 struct {
602 u16 speed;
603 u8 duplex;
604 u8 autoneg;
605 u8 media;
606 } link_settings[] = {
607 { SPEED_10, DUPLEX_HALF, AUTONEG_DISABLE, _10_Half },
608 { SPEED_10, DUPLEX_FULL, AUTONEG_DISABLE, _10_Full },
609 { SPEED_100, DUPLEX_HALF, AUTONEG_DISABLE, _100_Half },
610 { SPEED_100, DUPLEX_FULL, AUTONEG_DISABLE, _100_Full },
611 { SPEED_1000, DUPLEX_FULL, AUTONEG_DISABLE, _1000_Full },
612 /* Make TBI happy */
613 { SPEED_1000, DUPLEX_FULL, AUTONEG_ENABLE, 0xff }
614 }, *p;
615 unsigned char option;
616
617 option = ((idx < MAX_UNITS) && (idx >= 0)) ? media[idx] : 0xff;
618
619 if ((option != 0xff) && !idx && netif_msg_drv(&debug))
620 printk(KERN_WARNING PFX "media option is deprecated.\n");
621
622 for (p = link_settings; p->media != 0xff; p++) {
623 if (p->media == option)
624 break;
625 }
626 *autoneg = p->autoneg;
627 *speed = p->speed;
628 *duplex = p->duplex;
629}
630
631static void rtl8169_get_wol(struct net_device *dev, struct ethtool_wolinfo *wol) 561static void rtl8169_get_wol(struct net_device *dev, struct ethtool_wolinfo *wol)
632{ 562{
633 struct rtl8169_private *tp = netdev_priv(dev); 563 struct rtl8169_private *tp = netdev_priv(dev);
@@ -667,7 +597,7 @@ static int rtl8169_set_wol(struct net_device *dev, struct ethtool_wolinfo *wol)
667{ 597{
668 struct rtl8169_private *tp = netdev_priv(dev); 598 struct rtl8169_private *tp = netdev_priv(dev);
669 void __iomem *ioaddr = tp->mmio_addr; 599 void __iomem *ioaddr = tp->mmio_addr;
670 int i; 600 unsigned int i;
671 static struct { 601 static struct {
672 u32 opt; 602 u32 opt;
673 u16 reg; 603 u16 reg;
@@ -893,8 +823,7 @@ static int rtl8169_rx_vlan_skb(struct rtl8169_private *tp, struct RxDesc *desc,
893 int ret; 823 int ret;
894 824
895 if (tp->vlgrp && (opts2 & RxVlanTag)) { 825 if (tp->vlgrp && (opts2 & RxVlanTag)) {
896 rtl8169_rx_hwaccel_skb(skb, tp->vlgrp, 826 rtl8169_rx_hwaccel_skb(skb, tp->vlgrp, swab16(opts2 & 0xffff));
897 swab16(opts2 & 0xffff));
898 ret = 0; 827 ret = 0;
899 } else 828 } else
900 ret = -1; 829 ret = -1;
@@ -1115,7 +1044,6 @@ static void rtl8169_get_strings(struct net_device *dev, u32 stringset, u8 *data)
1115 } 1044 }
1116} 1045}
1117 1046
1118
1119static const struct ethtool_ops rtl8169_ethtool_ops = { 1047static const struct ethtool_ops rtl8169_ethtool_ops = {
1120 .get_drvinfo = rtl8169_get_drvinfo, 1048 .get_drvinfo = rtl8169_get_drvinfo,
1121 .get_regs_len = rtl8169_get_regs_len, 1049 .get_regs_len = rtl8169_get_regs_len,
@@ -1141,8 +1069,8 @@ static const struct ethtool_ops rtl8169_ethtool_ops = {
1141 .get_perm_addr = ethtool_op_get_perm_addr, 1069 .get_perm_addr = ethtool_op_get_perm_addr,
1142}; 1070};
1143 1071
1144static void rtl8169_write_gmii_reg_bit(void __iomem *ioaddr, int reg, int bitnum, 1072static void rtl8169_write_gmii_reg_bit(void __iomem *ioaddr, int reg,
1145 int bitval) 1073 int bitnum, int bitval)
1146{ 1074{
1147 int val; 1075 int val;
1148 1076
@@ -1152,8 +1080,20 @@ static void rtl8169_write_gmii_reg_bit(void __iomem *ioaddr, int reg, int bitnum
1152 mdio_write(ioaddr, reg, val & 0xffff); 1080 mdio_write(ioaddr, reg, val & 0xffff);
1153} 1081}
1154 1082
1155static void rtl8169_get_mac_version(struct rtl8169_private *tp, void __iomem *ioaddr) 1083static void rtl8169_get_mac_version(struct rtl8169_private *tp,
1084 void __iomem *ioaddr)
1156{ 1085{
1086 /*
1087 * The driver currently handles the 8168Bf and the 8168Be identically
1088 * but they can be identified more specifically through the test below
1089 * if needed:
1090 *
1091 * (RTL_R32(TxConfig) & 0x700000) == 0x500000 ? 8168Bf : 8168Be
1092 *
1093 * Same thing for the 8101Eb and the 8101Ec:
1094 *
1095 * (RTL_R32(TxConfig) & 0x700000) == 0x200000 ? 8101Eb : 8101Ec
1096 */
1157 const struct { 1097 const struct {
1158 u32 mask; 1098 u32 mask;
1159 int mac_version; 1099 int mac_version;
@@ -1163,6 +1103,7 @@ static void rtl8169_get_mac_version(struct rtl8169_private *tp, void __iomem *io
1163 { 0x34000000, RTL_GIGA_MAC_VER_13 }, 1103 { 0x34000000, RTL_GIGA_MAC_VER_13 },
1164 { 0x30800000, RTL_GIGA_MAC_VER_14 }, 1104 { 0x30800000, RTL_GIGA_MAC_VER_14 },
1165 { 0x30000000, RTL_GIGA_MAC_VER_11 }, 1105 { 0x30000000, RTL_GIGA_MAC_VER_11 },
1106 { 0x98000000, RTL_GIGA_MAC_VER_06 },
1166 { 0x18000000, RTL_GIGA_MAC_VER_05 }, 1107 { 0x18000000, RTL_GIGA_MAC_VER_05 },
1167 { 0x10000000, RTL_GIGA_MAC_VER_04 }, 1108 { 0x10000000, RTL_GIGA_MAC_VER_04 },
1168 { 0x04000000, RTL_GIGA_MAC_VER_03 }, 1109 { 0x04000000, RTL_GIGA_MAC_VER_03 },
@@ -1171,7 +1112,7 @@ static void rtl8169_get_mac_version(struct rtl8169_private *tp, void __iomem *io
1171 }, *p = mac_info; 1112 }, *p = mac_info;
1172 u32 reg; 1113 u32 reg;
1173 1114
1174 reg = RTL_R32(TxConfig) & 0x7c800000; 1115 reg = RTL_R32(TxConfig) & 0xfc800000;
1175 while ((reg & p->mask) != p->mask) 1116 while ((reg & p->mask) != p->mask)
1176 p++; 1117 p++;
1177 tp->mac_version = p->mac_version; 1118 tp->mac_version = p->mac_version;
@@ -1182,7 +1123,8 @@ static void rtl8169_print_mac_version(struct rtl8169_private *tp)
1182 dprintk("mac_version = 0x%02x\n", tp->mac_version); 1123 dprintk("mac_version = 0x%02x\n", tp->mac_version);
1183} 1124}
1184 1125
1185static void rtl8169_get_phy_version(struct rtl8169_private *tp, void __iomem *ioaddr) 1126static void rtl8169_get_phy_version(struct rtl8169_private *tp,
1127 void __iomem *ioaddr)
1186{ 1128{
1187 const struct { 1129 const struct {
1188 u16 mask; 1130 u16 mask;
@@ -1259,7 +1201,7 @@ static void rtl8169_hw_phy_config(struct net_device *dev)
1259 0xbf00 } //w 0 15 0 bf00 1201 0xbf00 } //w 0 15 0 bf00
1260 } 1202 }
1261 }, *p = phy_magic; 1203 }, *p = phy_magic;
1262 int i; 1204 unsigned int i;
1263 1205
1264 rtl8169_print_mac_version(tp); 1206 rtl8169_print_mac_version(tp);
1265 rtl8169_print_phy_version(tp); 1207 rtl8169_print_phy_version(tp);
@@ -1393,7 +1335,7 @@ static void rtl8169_phy_reset(struct net_device *dev,
1393 struct rtl8169_private *tp) 1335 struct rtl8169_private *tp)
1394{ 1336{
1395 void __iomem *ioaddr = tp->mmio_addr; 1337 void __iomem *ioaddr = tp->mmio_addr;
1396 int i; 1338 unsigned int i;
1397 1339
1398 tp->phy_reset_enable(ioaddr); 1340 tp->phy_reset_enable(ioaddr);
1399 for (i = 0; i < 100; i++) { 1341 for (i = 0; i < 100; i++) {
@@ -1408,21 +1350,16 @@ static void rtl8169_phy_reset(struct net_device *dev,
1408static void rtl8169_init_phy(struct net_device *dev, struct rtl8169_private *tp) 1350static void rtl8169_init_phy(struct net_device *dev, struct rtl8169_private *tp)
1409{ 1351{
1410 void __iomem *ioaddr = tp->mmio_addr; 1352 void __iomem *ioaddr = tp->mmio_addr;
1411 static int board_idx = -1;
1412 u8 autoneg, duplex;
1413 u16 speed;
1414
1415 board_idx++;
1416 1353
1417 rtl8169_hw_phy_config(dev); 1354 rtl8169_hw_phy_config(dev);
1418 1355
1419 dprintk("Set MAC Reg C+CR Offset 0x82h = 0x01h\n"); 1356 dprintk("Set MAC Reg C+CR Offset 0x82h = 0x01h\n");
1420 RTL_W8(0x82, 0x01); 1357 RTL_W8(0x82, 0x01);
1421 1358
1422 if (tp->mac_version < RTL_GIGA_MAC_VER_03) { 1359 pci_write_config_byte(tp->pci_dev, PCI_LATENCY_TIMER, 0x40);
1423 dprintk("Set PCI Latency=0x40\n"); 1360
1424 pci_write_config_byte(tp->pci_dev, PCI_LATENCY_TIMER, 0x40); 1361 if (tp->mac_version <= RTL_GIGA_MAC_VER_06)
1425 } 1362 pci_write_config_byte(tp->pci_dev, PCI_CACHE_LINE_SIZE, 0x08);
1426 1363
1427 if (tp->mac_version == RTL_GIGA_MAC_VER_02) { 1364 if (tp->mac_version == RTL_GIGA_MAC_VER_02) {
1428 dprintk("Set MAC Reg C+CR Offset 0x82h = 0x01h\n"); 1365 dprintk("Set MAC Reg C+CR Offset 0x82h = 0x01h\n");
@@ -1431,16 +1368,52 @@ static void rtl8169_init_phy(struct net_device *dev, struct rtl8169_private *tp)
1431 mdio_write(ioaddr, 0x0b, 0x0000); //w 0x0b 15 0 0 1368 mdio_write(ioaddr, 0x0b, 0x0000); //w 0x0b 15 0 0
1432 } 1369 }
1433 1370
1434 rtl8169_link_option(board_idx, &autoneg, &speed, &duplex);
1435
1436 rtl8169_phy_reset(dev, tp); 1371 rtl8169_phy_reset(dev, tp);
1437 1372
1438 rtl8169_set_speed(dev, autoneg, speed, duplex); 1373 /*
1374 * rtl8169_set_speed_xmii takes good care of the Fast Ethernet
1375 * only 8101. Don't panic.
1376 */
1377 rtl8169_set_speed(dev, AUTONEG_ENABLE, SPEED_1000, DUPLEX_FULL);
1439 1378
1440 if ((RTL_R8(PHYstatus) & TBI_Enable) && netif_msg_link(tp)) 1379 if ((RTL_R8(PHYstatus) & TBI_Enable) && netif_msg_link(tp))
1441 printk(KERN_INFO PFX "%s: TBI auto-negotiating\n", dev->name); 1380 printk(KERN_INFO PFX "%s: TBI auto-negotiating\n", dev->name);
1442} 1381}
1443 1382
1383static void rtl_rar_set(struct rtl8169_private *tp, u8 *addr)
1384{
1385 void __iomem *ioaddr = tp->mmio_addr;
1386 u32 high;
1387 u32 low;
1388
1389 low = addr[0] | (addr[1] << 8) | (addr[2] << 16) | (addr[3] << 24);
1390 high = addr[4] | (addr[5] << 8);
1391
1392 spin_lock_irq(&tp->lock);
1393
1394 RTL_W8(Cfg9346, Cfg9346_Unlock);
1395 RTL_W32(MAC0, low);
1396 RTL_W32(MAC4, high);
1397 RTL_W8(Cfg9346, Cfg9346_Lock);
1398
1399 spin_unlock_irq(&tp->lock);
1400}
1401
1402static int rtl_set_mac_address(struct net_device *dev, void *p)
1403{
1404 struct rtl8169_private *tp = netdev_priv(dev);
1405 struct sockaddr *addr = p;
1406
1407 if (!is_valid_ether_addr(addr->sa_data))
1408 return -EADDRNOTAVAIL;
1409
1410 memcpy(dev->dev_addr, addr->sa_data, dev->addr_len);
1411
1412 rtl_rar_set(tp, dev->dev_addr);
1413
1414 return 0;
1415}
1416
1444static int rtl8169_ioctl(struct net_device *dev, struct ifreq *ifr, int cmd) 1417static int rtl8169_ioctl(struct net_device *dev, struct ifreq *ifr, int cmd)
1445{ 1418{
1446 struct rtl8169_private *tp = netdev_priv(dev); 1419 struct rtl8169_private *tp = netdev_priv(dev);
@@ -1467,15 +1440,49 @@ static int rtl8169_ioctl(struct net_device *dev, struct ifreq *ifr, int cmd)
1467 return -EOPNOTSUPP; 1440 return -EOPNOTSUPP;
1468} 1441}
1469 1442
1443static const struct rtl_cfg_info {
1444 void (*hw_start)(struct net_device *);
1445 unsigned int region;
1446 unsigned int align;
1447 u16 intr_event;
1448 u16 napi_event;
1449} rtl_cfg_infos [] = {
1450 [RTL_CFG_0] = {
1451 .hw_start = rtl_hw_start_8169,
1452 .region = 1,
1453 .align = 0,
1454 .intr_event = SYSErr | LinkChg | RxOverflow |
1455 RxFIFOOver | TxErr | TxOK | RxOK | RxErr,
1456 .napi_event = RxFIFOOver | TxErr | TxOK | RxOK | RxOverflow
1457 },
1458 [RTL_CFG_1] = {
1459 .hw_start = rtl_hw_start_8168,
1460 .region = 2,
1461 .align = 8,
1462 .intr_event = SYSErr | LinkChg | RxOverflow |
1463 TxErr | TxOK | RxOK | RxErr,
1464 .napi_event = TxErr | TxOK | RxOK | RxOverflow
1465 },
1466 [RTL_CFG_2] = {
1467 .hw_start = rtl_hw_start_8101,
1468 .region = 2,
1469 .align = 8,
1470 .intr_event = SYSErr | LinkChg | RxOverflow | PCSTimeout |
1471 RxFIFOOver | TxErr | TxOK | RxOK | RxErr,
1472 .napi_event = RxFIFOOver | TxErr | TxOK | RxOK | RxOverflow
1473 }
1474};
1475
1470static int __devinit 1476static int __devinit
1471rtl8169_init_one(struct pci_dev *pdev, const struct pci_device_id *ent) 1477rtl8169_init_one(struct pci_dev *pdev, const struct pci_device_id *ent)
1472{ 1478{
1473 const unsigned int region = rtl_cfg_info[ent->driver_data].region; 1479 const struct rtl_cfg_info *cfg = rtl_cfg_infos + ent->driver_data;
1480 const unsigned int region = cfg->region;
1474 struct rtl8169_private *tp; 1481 struct rtl8169_private *tp;
1475 struct net_device *dev; 1482 struct net_device *dev;
1476 void __iomem *ioaddr; 1483 void __iomem *ioaddr;
1477 unsigned int pm_cap; 1484 unsigned int i;
1478 int i, rc; 1485 int rc;
1479 1486
1480 if (netif_msg_drv(&debug)) { 1487 if (netif_msg_drv(&debug)) {
1481 printk(KERN_INFO "%s Gigabit Ethernet driver %s loaded\n", 1488 printk(KERN_INFO "%s Gigabit Ethernet driver %s loaded\n",
@@ -1508,20 +1515,6 @@ rtl8169_init_one(struct pci_dev *pdev, const struct pci_device_id *ent)
1508 if (rc < 0) 1515 if (rc < 0)
1509 goto err_out_disable_2; 1516 goto err_out_disable_2;
1510 1517
1511 /* save power state before pci_enable_device overwrites it */
1512 pm_cap = pci_find_capability(pdev, PCI_CAP_ID_PM);
1513 if (pm_cap) {
1514 u16 pwr_command, acpi_idle_state;
1515
1516 pci_read_config_word(pdev, pm_cap + PCI_PM_CTRL, &pwr_command);
1517 acpi_idle_state = pwr_command & PCI_PM_CTRL_STATE_MASK;
1518 } else {
1519 if (netif_msg_probe(tp)) {
1520 dev_err(&pdev->dev,
1521 "PowerManagement capability not found.\n");
1522 }
1523 }
1524
1525 /* make sure PCI base addr 1 is MMIO */ 1518 /* make sure PCI base addr 1 is MMIO */
1526 if (!(pci_resource_flags(pdev, region) & IORESOURCE_MEM)) { 1519 if (!(pci_resource_flags(pdev, region) & IORESOURCE_MEM)) {
1527 if (netif_msg_probe(tp)) { 1520 if (netif_msg_probe(tp)) {
@@ -1585,7 +1578,7 @@ rtl8169_init_one(struct pci_dev *pdev, const struct pci_device_id *ent)
1585 RTL_W8(ChipCmd, CmdReset); 1578 RTL_W8(ChipCmd, CmdReset);
1586 1579
1587 /* Check that the chip has finished the reset. */ 1580 /* Check that the chip has finished the reset. */
1588 for (i = 100; i > 0; i--) { 1581 for (i = 0; i < 100; i++) {
1589 if ((RTL_R8(ChipCmd) & CmdReset) == 0) 1582 if ((RTL_R8(ChipCmd) & CmdReset) == 0)
1590 break; 1583 break;
1591 msleep_interruptible(1); 1584 msleep_interruptible(1);
@@ -1647,11 +1640,12 @@ rtl8169_init_one(struct pci_dev *pdev, const struct pci_device_id *ent)
1647 SET_ETHTOOL_OPS(dev, &rtl8169_ethtool_ops); 1640 SET_ETHTOOL_OPS(dev, &rtl8169_ethtool_ops);
1648 dev->stop = rtl8169_close; 1641 dev->stop = rtl8169_close;
1649 dev->tx_timeout = rtl8169_tx_timeout; 1642 dev->tx_timeout = rtl8169_tx_timeout;
1650 dev->set_multicast_list = rtl8169_set_rx_mode; 1643 dev->set_multicast_list = rtl_set_rx_mode;
1651 dev->watchdog_timeo = RTL8169_TX_TIMEOUT; 1644 dev->watchdog_timeo = RTL8169_TX_TIMEOUT;
1652 dev->irq = pdev->irq; 1645 dev->irq = pdev->irq;
1653 dev->base_addr = (unsigned long) ioaddr; 1646 dev->base_addr = (unsigned long) ioaddr;
1654 dev->change_mtu = rtl8169_change_mtu; 1647 dev->change_mtu = rtl8169_change_mtu;
1648 dev->set_mac_address = rtl_set_mac_address;
1655 1649
1656#ifdef CONFIG_R8169_NAPI 1650#ifdef CONFIG_R8169_NAPI
1657 dev->poll = rtl8169_poll; 1651 dev->poll = rtl8169_poll;
@@ -1670,7 +1664,10 @@ rtl8169_init_one(struct pci_dev *pdev, const struct pci_device_id *ent)
1670 tp->intr_mask = 0xffff; 1664 tp->intr_mask = 0xffff;
1671 tp->pci_dev = pdev; 1665 tp->pci_dev = pdev;
1672 tp->mmio_addr = ioaddr; 1666 tp->mmio_addr = ioaddr;
1673 tp->align = rtl_cfg_info[ent->driver_data].align; 1667 tp->align = cfg->align;
1668 tp->hw_start = cfg->hw_start;
1669 tp->intr_event = cfg->intr_event;
1670 tp->napi_event = cfg->napi_event;
1674 1671
1675 init_timer(&tp->timer); 1672 init_timer(&tp->timer);
1676 tp->timer.data = (unsigned long) dev; 1673 tp->timer.data = (unsigned long) dev;
@@ -1685,15 +1682,17 @@ rtl8169_init_one(struct pci_dev *pdev, const struct pci_device_id *ent)
1685 pci_set_drvdata(pdev, dev); 1682 pci_set_drvdata(pdev, dev);
1686 1683
1687 if (netif_msg_probe(tp)) { 1684 if (netif_msg_probe(tp)) {
1685 u32 xid = RTL_R32(TxConfig) & 0x7cf0f8ff;
1686
1688 printk(KERN_INFO "%s: %s at 0x%lx, " 1687 printk(KERN_INFO "%s: %s at 0x%lx, "
1689 "%2.2x:%2.2x:%2.2x:%2.2x:%2.2x:%2.2x, " 1688 "%2.2x:%2.2x:%2.2x:%2.2x:%2.2x:%2.2x, "
1690 "IRQ %d\n", 1689 "XID %08x IRQ %d\n",
1691 dev->name, 1690 dev->name,
1692 rtl_chip_info[tp->chipset].name, 1691 rtl_chip_info[tp->chipset].name,
1693 dev->base_addr, 1692 dev->base_addr,
1694 dev->dev_addr[0], dev->dev_addr[1], 1693 dev->dev_addr[0], dev->dev_addr[1],
1695 dev->dev_addr[2], dev->dev_addr[3], 1694 dev->dev_addr[2], dev->dev_addr[3],
1696 dev->dev_addr[4], dev->dev_addr[5], dev->irq); 1695 dev->dev_addr[4], dev->dev_addr[5], xid, dev->irq);
1697 } 1696 }
1698 1697
1699 rtl8169_init_phy(dev, tp); 1698 rtl8169_init_phy(dev, tp);
@@ -1714,15 +1713,11 @@ err_out_free_dev_1:
1714 goto out; 1713 goto out;
1715} 1714}
1716 1715
1717static void __devexit 1716static void __devexit rtl8169_remove_one(struct pci_dev *pdev)
1718rtl8169_remove_one(struct pci_dev *pdev)
1719{ 1717{
1720 struct net_device *dev = pci_get_drvdata(pdev); 1718 struct net_device *dev = pci_get_drvdata(pdev);
1721 struct rtl8169_private *tp = netdev_priv(dev); 1719 struct rtl8169_private *tp = netdev_priv(dev);
1722 1720
1723 assert(dev != NULL);
1724 assert(tp != NULL);
1725
1726 flush_scheduled_work(); 1721 flush_scheduled_work();
1727 1722
1728 unregister_netdev(dev); 1723 unregister_netdev(dev);
@@ -1774,7 +1769,7 @@ static int rtl8169_open(struct net_device *dev)
1774 if (retval < 0) 1769 if (retval < 0)
1775 goto err_release_ring_2; 1770 goto err_release_ring_2;
1776 1771
1777 rtl8169_hw_start(dev); 1772 rtl_hw_start(dev);
1778 1773
1779 rtl8169_request_timer(dev); 1774 rtl8169_request_timer(dev);
1780 1775
@@ -1805,7 +1800,7 @@ static void rtl8169_hw_reset(void __iomem *ioaddr)
1805 RTL_R8(ChipCmd); 1800 RTL_R8(ChipCmd);
1806} 1801}
1807 1802
1808static void rtl8169_set_rx_tx_config_registers(struct rtl8169_private *tp) 1803static void rtl_set_rx_tx_config_registers(struct rtl8169_private *tp)
1809{ 1804{
1810 void __iomem *ioaddr = tp->mmio_addr; 1805 void __iomem *ioaddr = tp->mmio_addr;
1811 u32 cfg = rtl8169_rx_config; 1806 u32 cfg = rtl8169_rx_config;
@@ -1818,45 +1813,90 @@ static void rtl8169_set_rx_tx_config_registers(struct rtl8169_private *tp)
1818 (InterFrameGap << TxInterFrameGapShift)); 1813 (InterFrameGap << TxInterFrameGapShift));
1819} 1814}
1820 1815
1821static void rtl8169_hw_start(struct net_device *dev) 1816static void rtl_hw_start(struct net_device *dev)
1822{ 1817{
1823 struct rtl8169_private *tp = netdev_priv(dev); 1818 struct rtl8169_private *tp = netdev_priv(dev);
1824 void __iomem *ioaddr = tp->mmio_addr; 1819 void __iomem *ioaddr = tp->mmio_addr;
1825 struct pci_dev *pdev = tp->pci_dev; 1820 unsigned int i;
1826 u16 cmd;
1827 u32 i;
1828 1821
1829 /* Soft reset the chip. */ 1822 /* Soft reset the chip. */
1830 RTL_W8(ChipCmd, CmdReset); 1823 RTL_W8(ChipCmd, CmdReset);
1831 1824
1832 /* Check that the chip has finished the reset. */ 1825 /* Check that the chip has finished the reset. */
1833 for (i = 100; i > 0; i--) { 1826 for (i = 0; i < 100; i++) {
1834 if ((RTL_R8(ChipCmd) & CmdReset) == 0) 1827 if ((RTL_R8(ChipCmd) & CmdReset) == 0)
1835 break; 1828 break;
1836 msleep_interruptible(1); 1829 msleep_interruptible(1);
1837 } 1830 }
1838 1831
1839 if (tp->mac_version == RTL_GIGA_MAC_VER_05) { 1832 tp->hw_start(dev);
1840 RTL_W16(CPlusCmd, RTL_R16(CPlusCmd) | PCIMulRW);
1841 pci_write_config_byte(pdev, PCI_CACHE_LINE_SIZE, 0x08);
1842 }
1843 1833
1844 if (tp->mac_version == RTL_GIGA_MAC_VER_13) { 1834 netif_start_queue(dev);
1845 pci_write_config_word(pdev, 0x68, 0x00); 1835}
1846 pci_write_config_word(pdev, 0x69, 0x08);
1847 }
1848 1836
1849 /* Undocumented stuff. */
1850 if (tp->mac_version == RTL_GIGA_MAC_VER_05) {
1851 /* Realtek's r1000_n.c driver uses '&& 0x01' here. Well... */
1852 if ((RTL_R8(Config2) & 0x07) & 0x01)
1853 RTL_W32(0x7c, 0x0007ffff);
1854 1837
1855 RTL_W32(0x7c, 0x0007ff00); 1838static void rtl_set_rx_tx_desc_registers(struct rtl8169_private *tp,
1839 void __iomem *ioaddr)
1840{
1841 /*
1842 * Magic spell: some iop3xx ARM board needs the TxDescAddrHigh
1843 * register to be written before TxDescAddrLow to work.
1844 * Switching from MMIO to I/O access fixes the issue as well.
1845 */
1846 RTL_W32(TxDescStartAddrHigh, ((u64) tp->TxPhyAddr) >> 32);
1847 RTL_W32(TxDescStartAddrLow, ((u64) tp->TxPhyAddr) & DMA_32BIT_MASK);
1848 RTL_W32(RxDescAddrHigh, ((u64) tp->RxPhyAddr) >> 32);
1849 RTL_W32(RxDescAddrLow, ((u64) tp->RxPhyAddr) & DMA_32BIT_MASK);
1850}
1851
1852static u16 rtl_rw_cpluscmd(void __iomem *ioaddr)
1853{
1854 u16 cmd;
1855
1856 cmd = RTL_R16(CPlusCmd);
1857 RTL_W16(CPlusCmd, cmd);
1858 return cmd;
1859}
1860
1861static void rtl_set_rx_max_size(void __iomem *ioaddr)
1862{
1863 /* Low hurts. Let's disable the filtering. */
1864 RTL_W16(RxMaxSize, 16383);
1865}
1866
1867static void rtl8169_set_magic_reg(void __iomem *ioaddr, unsigned mac_version)
1868{
1869 struct {
1870 u32 mac_version;
1871 u32 clk;
1872 u32 val;
1873 } cfg2_info [] = {
1874 { RTL_GIGA_MAC_VER_05, PCI_Clock_33MHz, 0x000fff00 }, // 8110SCd
1875 { RTL_GIGA_MAC_VER_05, PCI_Clock_66MHz, 0x000fffff },
1876 { RTL_GIGA_MAC_VER_06, PCI_Clock_33MHz, 0x00ffff00 }, // 8110SCe
1877 { RTL_GIGA_MAC_VER_06, PCI_Clock_66MHz, 0x00ffffff }
1878 }, *p = cfg2_info;
1879 unsigned int i;
1880 u32 clk;
1856 1881
1857 pci_read_config_word(pdev, PCI_COMMAND, &cmd); 1882 clk = RTL_R8(Config2) & PCI_Clock_66MHz;
1858 cmd = cmd & 0xef; 1883 for (i = 0; i < ARRAY_SIZE(cfg2_info); i++) {
1859 pci_write_config_word(pdev, PCI_COMMAND, cmd); 1884 if ((p->mac_version == mac_version) && (p->clk == clk)) {
1885 RTL_W32(0x7c, p->val);
1886 break;
1887 }
1888 }
1889}
1890
1891static void rtl_hw_start_8169(struct net_device *dev)
1892{
1893 struct rtl8169_private *tp = netdev_priv(dev);
1894 void __iomem *ioaddr = tp->mmio_addr;
1895 struct pci_dev *pdev = tp->pci_dev;
1896
1897 if (tp->mac_version == RTL_GIGA_MAC_VER_05) {
1898 RTL_W16(CPlusCmd, RTL_R16(CPlusCmd) | PCIMulRW);
1899 pci_write_config_byte(pdev, PCI_CACHE_LINE_SIZE, 0x08);
1860 } 1900 }
1861 1901
1862 RTL_W8(Cfg9346, Cfg9346_Unlock); 1902 RTL_W8(Cfg9346, Cfg9346_Unlock);
@@ -1868,19 +1908,11 @@ static void rtl8169_hw_start(struct net_device *dev)
1868 1908
1869 RTL_W8(EarlyTxThres, EarlyTxThld); 1909 RTL_W8(EarlyTxThres, EarlyTxThld);
1870 1910
1871 /* Low hurts. Let's disable the filtering. */ 1911 rtl_set_rx_max_size(ioaddr);
1872 RTL_W16(RxMaxSize, 16383);
1873
1874 if ((tp->mac_version == RTL_GIGA_MAC_VER_01) ||
1875 (tp->mac_version == RTL_GIGA_MAC_VER_02) ||
1876 (tp->mac_version == RTL_GIGA_MAC_VER_03) ||
1877 (tp->mac_version == RTL_GIGA_MAC_VER_04))
1878 rtl8169_set_rx_tx_config_registers(tp);
1879 1912
1880 cmd = RTL_R16(CPlusCmd); 1913 rtl_set_rx_tx_config_registers(tp);
1881 RTL_W16(CPlusCmd, cmd);
1882 1914
1883 tp->cp_cmd |= cmd | PCIMulRW; 1915 tp->cp_cmd |= rtl_rw_cpluscmd(ioaddr) | PCIMulRW;
1884 1916
1885 if ((tp->mac_version == RTL_GIGA_MAC_VER_02) || 1917 if ((tp->mac_version == RTL_GIGA_MAC_VER_02) ||
1886 (tp->mac_version == RTL_GIGA_MAC_VER_03)) { 1918 (tp->mac_version == RTL_GIGA_MAC_VER_03)) {
@@ -1891,29 +1923,15 @@ static void rtl8169_hw_start(struct net_device *dev)
1891 1923
1892 RTL_W16(CPlusCmd, tp->cp_cmd); 1924 RTL_W16(CPlusCmd, tp->cp_cmd);
1893 1925
1926 rtl8169_set_magic_reg(ioaddr, tp->mac_version);
1927
1894 /* 1928 /*
1895 * Undocumented corner. Supposedly: 1929 * Undocumented corner. Supposedly:
1896 * (TxTimer << 12) | (TxPackets << 8) | (RxTimer << 4) | RxPackets 1930 * (TxTimer << 12) | (TxPackets << 8) | (RxTimer << 4) | RxPackets
1897 */ 1931 */
1898 RTL_W16(IntrMitigate, 0x0000); 1932 RTL_W16(IntrMitigate, 0x0000);
1899 1933
1900 /* 1934 rtl_set_rx_tx_desc_registers(tp, ioaddr);
1901 * Magic spell: some iop3xx ARM board needs the TxDescAddrHigh
1902 * register to be written before TxDescAddrLow to work.
1903 * Switching from MMIO to I/O access fixes the issue as well.
1904 */
1905 RTL_W32(TxDescStartAddrHigh, ((u64) tp->TxPhyAddr >> 32));
1906 RTL_W32(TxDescStartAddrLow, ((u64) tp->TxPhyAddr & DMA_32BIT_MASK));
1907 RTL_W32(RxDescAddrHigh, ((u64) tp->RxPhyAddr >> 32));
1908 RTL_W32(RxDescAddrLow, ((u64) tp->RxPhyAddr & DMA_32BIT_MASK));
1909
1910 if ((tp->mac_version != RTL_GIGA_MAC_VER_01) &&
1911 (tp->mac_version != RTL_GIGA_MAC_VER_02) &&
1912 (tp->mac_version != RTL_GIGA_MAC_VER_03) &&
1913 (tp->mac_version != RTL_GIGA_MAC_VER_04)) {
1914 RTL_W8(ChipCmd, CmdTxEnb | CmdRxEnb);
1915 rtl8169_set_rx_tx_config_registers(tp);
1916 }
1917 1935
1918 RTL_W8(Cfg9346, Cfg9346_Lock); 1936 RTL_W8(Cfg9346, Cfg9346_Lock);
1919 1937
@@ -1922,15 +1940,107 @@ static void rtl8169_hw_start(struct net_device *dev)
1922 1940
1923 RTL_W32(RxMissed, 0); 1941 RTL_W32(RxMissed, 0);
1924 1942
1925 rtl8169_set_rx_mode(dev); 1943 rtl_set_rx_mode(dev);
1926 1944
1927 /* no early-rx interrupts */ 1945 /* no early-rx interrupts */
1928 RTL_W16(MultiIntr, RTL_R16(MultiIntr) & 0xF000); 1946 RTL_W16(MultiIntr, RTL_R16(MultiIntr) & 0xF000);
1929 1947
1930 /* Enable all known interrupts by setting the interrupt mask. */ 1948 /* Enable all known interrupts by setting the interrupt mask. */
1931 RTL_W16(IntrMask, rtl8169_intr_mask); 1949 RTL_W16(IntrMask, tp->intr_event);
1932 1950
1933 netif_start_queue(dev); 1951 RTL_W8(ChipCmd, CmdTxEnb | CmdRxEnb);
1952}
1953
1954static void rtl_hw_start_8168(struct net_device *dev)
1955{
1956 struct rtl8169_private *tp = netdev_priv(dev);
1957 void __iomem *ioaddr = tp->mmio_addr;
1958 struct pci_dev *pdev = tp->pci_dev;
1959 u8 ctl;
1960
1961 RTL_W8(Cfg9346, Cfg9346_Unlock);
1962
1963 RTL_W8(EarlyTxThres, EarlyTxThld);
1964
1965 rtl_set_rx_max_size(ioaddr);
1966
1967 rtl_set_rx_tx_config_registers(tp);
1968
1969 tp->cp_cmd |= RTL_R16(CPlusCmd) | PktCntrDisable | INTT_1;
1970
1971 RTL_W16(CPlusCmd, tp->cp_cmd);
1972
1973 /* Tx performance tweak. */
1974 pci_read_config_byte(pdev, 0x69, &ctl);
1975 ctl = (ctl & ~0x70) | 0x50;
1976 pci_write_config_byte(pdev, 0x69, ctl);
1977
1978 RTL_W16(IntrMitigate, 0x5151);
1979
1980 /* Work around for RxFIFO overflow. */
1981 if (tp->mac_version == RTL_GIGA_MAC_VER_11) {
1982 tp->intr_event |= RxFIFOOver | PCSTimeout;
1983 tp->intr_event &= ~RxOverflow;
1984 }
1985
1986 rtl_set_rx_tx_desc_registers(tp, ioaddr);
1987
1988 RTL_W8(Cfg9346, Cfg9346_Lock);
1989
1990 RTL_R8(IntrMask);
1991
1992 RTL_W32(RxMissed, 0);
1993
1994 rtl_set_rx_mode(dev);
1995
1996 RTL_W8(ChipCmd, CmdTxEnb | CmdRxEnb);
1997
1998 RTL_W16(MultiIntr, RTL_R16(MultiIntr) & 0xF000);
1999
2000 RTL_W16(IntrMask, tp->intr_event);
2001}
2002
2003static void rtl_hw_start_8101(struct net_device *dev)
2004{
2005 struct rtl8169_private *tp = netdev_priv(dev);
2006 void __iomem *ioaddr = tp->mmio_addr;
2007 struct pci_dev *pdev = tp->pci_dev;
2008
2009 if (tp->mac_version == RTL_GIGA_MAC_VER_13) {
2010 pci_write_config_word(pdev, 0x68, 0x00);
2011 pci_write_config_word(pdev, 0x69, 0x08);
2012 }
2013
2014 RTL_W8(Cfg9346, Cfg9346_Unlock);
2015
2016 RTL_W8(EarlyTxThres, EarlyTxThld);
2017
2018 rtl_set_rx_max_size(ioaddr);
2019
2020 tp->cp_cmd |= rtl_rw_cpluscmd(ioaddr) | PCIMulRW;
2021
2022 RTL_W16(CPlusCmd, tp->cp_cmd);
2023
2024 RTL_W16(IntrMitigate, 0x0000);
2025
2026 rtl_set_rx_tx_desc_registers(tp, ioaddr);
2027
2028 RTL_W8(ChipCmd, CmdTxEnb | CmdRxEnb);
2029 rtl_set_rx_tx_config_registers(tp);
2030
2031 RTL_W8(Cfg9346, Cfg9346_Lock);
2032
2033 RTL_R8(IntrMask);
2034
2035 RTL_W32(RxMissed, 0);
2036
2037 rtl_set_rx_mode(dev);
2038
2039 RTL_W8(ChipCmd, CmdTxEnb | CmdRxEnb);
2040
2041 RTL_W16(MultiIntr, RTL_R16(MultiIntr) & 0xf000);
2042
2043 RTL_W16(IntrMask, tp->intr_event);
1934} 2044}
1935 2045
1936static int rtl8169_change_mtu(struct net_device *dev, int new_mtu) 2046static int rtl8169_change_mtu(struct net_device *dev, int new_mtu)
@@ -1956,7 +2066,7 @@ static int rtl8169_change_mtu(struct net_device *dev, int new_mtu)
1956 2066
1957 netif_poll_enable(dev); 2067 netif_poll_enable(dev);
1958 2068
1959 rtl8169_hw_start(dev); 2069 rtl_hw_start(dev);
1960 2070
1961 rtl8169_request_timer(dev); 2071 rtl8169_request_timer(dev);
1962 2072
@@ -1997,38 +2107,38 @@ static inline void rtl8169_map_to_asic(struct RxDesc *desc, dma_addr_t mapping,
1997 rtl8169_mark_to_asic(desc, rx_buf_sz); 2107 rtl8169_mark_to_asic(desc, rx_buf_sz);
1998} 2108}
1999 2109
2000static int rtl8169_alloc_rx_skb(struct pci_dev *pdev, struct sk_buff **sk_buff, 2110static struct sk_buff *rtl8169_alloc_rx_skb(struct pci_dev *pdev,
2001 struct RxDesc *desc, int rx_buf_sz, 2111 struct net_device *dev,
2002 unsigned int align) 2112 struct RxDesc *desc, int rx_buf_sz,
2113 unsigned int align)
2003{ 2114{
2004 struct sk_buff *skb; 2115 struct sk_buff *skb;
2005 dma_addr_t mapping; 2116 dma_addr_t mapping;
2006 int ret = 0; 2117 unsigned int pad;
2007 2118
2008 skb = dev_alloc_skb(rx_buf_sz + align); 2119 pad = align ? align : NET_IP_ALIGN;
2120
2121 skb = netdev_alloc_skb(dev, rx_buf_sz + pad);
2009 if (!skb) 2122 if (!skb)
2010 goto err_out; 2123 goto err_out;
2011 2124
2012 skb_reserve(skb, (align - 1) & (unsigned long)skb->data); 2125 skb_reserve(skb, align ? ((pad - 1) & (unsigned long)skb->data) : pad);
2013 *sk_buff = skb;
2014 2126
2015 mapping = pci_map_single(pdev, skb->data, rx_buf_sz, 2127 mapping = pci_map_single(pdev, skb->data, rx_buf_sz,
2016 PCI_DMA_FROMDEVICE); 2128 PCI_DMA_FROMDEVICE);
2017 2129
2018 rtl8169_map_to_asic(desc, mapping, rx_buf_sz); 2130 rtl8169_map_to_asic(desc, mapping, rx_buf_sz);
2019
2020out: 2131out:
2021 return ret; 2132 return skb;
2022 2133
2023err_out: 2134err_out:
2024 ret = -ENOMEM;
2025 rtl8169_make_unusable_by_asic(desc); 2135 rtl8169_make_unusable_by_asic(desc);
2026 goto out; 2136 goto out;
2027} 2137}
2028 2138
2029static void rtl8169_rx_clear(struct rtl8169_private *tp) 2139static void rtl8169_rx_clear(struct rtl8169_private *tp)
2030{ 2140{
2031 int i; 2141 unsigned int i;
2032 2142
2033 for (i = 0; i < NUM_RX_DESC; i++) { 2143 for (i = 0; i < NUM_RX_DESC; i++) {
2034 if (tp->Rx_skbuff[i]) { 2144 if (tp->Rx_skbuff[i]) {
@@ -2043,16 +2153,22 @@ static u32 rtl8169_rx_fill(struct rtl8169_private *tp, struct net_device *dev,
2043{ 2153{
2044 u32 cur; 2154 u32 cur;
2045 2155
2046 for (cur = start; end - cur > 0; cur++) { 2156 for (cur = start; end - cur != 0; cur++) {
2047 int ret, i = cur % NUM_RX_DESC; 2157 struct sk_buff *skb;
2158 unsigned int i = cur % NUM_RX_DESC;
2159
2160 WARN_ON((s32)(end - cur) < 0);
2048 2161
2049 if (tp->Rx_skbuff[i]) 2162 if (tp->Rx_skbuff[i])
2050 continue; 2163 continue;
2051 2164
2052 ret = rtl8169_alloc_rx_skb(tp->pci_dev, tp->Rx_skbuff + i, 2165 skb = rtl8169_alloc_rx_skb(tp->pci_dev, dev,
2053 tp->RxDescArray + i, tp->rx_buf_sz, tp->align); 2166 tp->RxDescArray + i,
2054 if (ret < 0) 2167 tp->rx_buf_sz, tp->align);
2168 if (!skb)
2055 break; 2169 break;
2170
2171 tp->Rx_skbuff[i] = skb;
2056 } 2172 }
2057 return cur - start; 2173 return cur - start;
2058} 2174}
@@ -2164,14 +2280,9 @@ static void rtl8169_reinit_task(struct work_struct *work)
2164 2280
2165 ret = rtl8169_open(dev); 2281 ret = rtl8169_open(dev);
2166 if (unlikely(ret < 0)) { 2282 if (unlikely(ret < 0)) {
2167 if (net_ratelimit()) { 2283 if (net_ratelimit() && netif_msg_drv(tp)) {
2168 struct rtl8169_private *tp = netdev_priv(dev); 2284 printk(PFX KERN_ERR "%s: reinit failure (status = %d)."
2169 2285 " Rescheduling.\n", dev->name, ret);
2170 if (netif_msg_drv(tp)) {
2171 printk(PFX KERN_ERR
2172 "%s: reinit failure (status = %d)."
2173 " Rescheduling.\n", dev->name, ret);
2174 }
2175 } 2286 }
2176 rtl8169_schedule_work(dev, rtl8169_reinit_task); 2287 rtl8169_schedule_work(dev, rtl8169_reinit_task);
2177 } 2288 }
@@ -2198,16 +2309,12 @@ static void rtl8169_reset_task(struct work_struct *work)
2198 2309
2199 if (tp->dirty_rx == tp->cur_rx) { 2310 if (tp->dirty_rx == tp->cur_rx) {
2200 rtl8169_init_ring_indexes(tp); 2311 rtl8169_init_ring_indexes(tp);
2201 rtl8169_hw_start(dev); 2312 rtl_hw_start(dev);
2202 netif_wake_queue(dev); 2313 netif_wake_queue(dev);
2203 } else { 2314 } else {
2204 if (net_ratelimit()) { 2315 if (net_ratelimit() && netif_msg_intr(tp)) {
2205 struct rtl8169_private *tp = netdev_priv(dev); 2316 printk(PFX KERN_EMERG "%s: Rx buffers shortage\n",
2206 2317 dev->name);
2207 if (netif_msg_intr(tp)) {
2208 printk(PFX KERN_EMERG
2209 "%s: Rx buffers shortage\n", dev->name);
2210 }
2211 } 2318 }
2212 rtl8169_schedule_work(dev, rtl8169_reset_task); 2319 rtl8169_schedule_work(dev, rtl8169_reset_task);
2213 } 2320 }
@@ -2344,7 +2451,7 @@ static int rtl8169_start_xmit(struct sk_buff *skb, struct net_device *dev)
2344 2451
2345 smp_wmb(); 2452 smp_wmb();
2346 2453
2347 RTL_W8(TxPoll, 0x40); /* set polling bit */ 2454 RTL_W8(TxPoll, NPQ); /* set polling bit */
2348 2455
2349 if (TX_BUFFS_AVAIL(tp) < MAX_SKB_FRAGS) { 2456 if (TX_BUFFS_AVAIL(tp) < MAX_SKB_FRAGS) {
2350 netif_stop_queue(dev); 2457 netif_stop_queue(dev);
@@ -2414,16 +2521,12 @@ static void rtl8169_pcierr_interrupt(struct net_device *dev)
2414 rtl8169_schedule_work(dev, rtl8169_reinit_task); 2521 rtl8169_schedule_work(dev, rtl8169_reinit_task);
2415} 2522}
2416 2523
2417static void 2524static void rtl8169_tx_interrupt(struct net_device *dev,
2418rtl8169_tx_interrupt(struct net_device *dev, struct rtl8169_private *tp, 2525 struct rtl8169_private *tp,
2419 void __iomem *ioaddr) 2526 void __iomem *ioaddr)
2420{ 2527{
2421 unsigned int dirty_tx, tx_left; 2528 unsigned int dirty_tx, tx_left;
2422 2529
2423 assert(dev != NULL);
2424 assert(tp != NULL);
2425 assert(ioaddr != NULL);
2426
2427 dirty_tx = tp->dirty_tx; 2530 dirty_tx = tp->dirty_tx;
2428 smp_rmb(); 2531 smp_rmb();
2429 tx_left = tp->cur_tx - dirty_tx; 2532 tx_left = tp->cur_tx - dirty_tx;
@@ -2480,38 +2583,37 @@ static inline void rtl8169_rx_csum(struct sk_buff *skb, struct RxDesc *desc)
2480 skb->ip_summed = CHECKSUM_NONE; 2583 skb->ip_summed = CHECKSUM_NONE;
2481} 2584}
2482 2585
2483static inline int rtl8169_try_rx_copy(struct sk_buff **sk_buff, int pkt_size, 2586static inline bool rtl8169_try_rx_copy(struct sk_buff **sk_buff,
2484 struct RxDesc *desc, int rx_buf_sz, 2587 struct rtl8169_private *tp, int pkt_size,
2485 unsigned int align) 2588 dma_addr_t addr)
2486{ 2589{
2487 int ret = -1; 2590 struct sk_buff *skb;
2591 bool done = false;
2488 2592
2489 if (pkt_size < rx_copybreak) { 2593 if (pkt_size >= rx_copybreak)
2490 struct sk_buff *skb; 2594 goto out;
2491 2595
2492 skb = dev_alloc_skb(pkt_size + align); 2596 skb = netdev_alloc_skb(tp->dev, pkt_size + NET_IP_ALIGN);
2493 if (skb) { 2597 if (!skb)
2494 skb_reserve(skb, (align - 1) & (unsigned long)skb->data); 2598 goto out;
2495 eth_copy_and_sum(skb, sk_buff[0]->data, pkt_size, 0); 2599
2496 *sk_buff = skb; 2600 pci_dma_sync_single_for_cpu(tp->pci_dev, addr, pkt_size,
2497 rtl8169_mark_to_asic(desc, rx_buf_sz); 2601 PCI_DMA_FROMDEVICE);
2498 ret = 0; 2602 skb_reserve(skb, NET_IP_ALIGN);
2499 } 2603 skb_copy_from_linear_data(*sk_buff, skb->data, pkt_size);
2500 } 2604 *sk_buff = skb;
2501 return ret; 2605 done = true;
2606out:
2607 return done;
2502} 2608}
2503 2609
2504static int 2610static int rtl8169_rx_interrupt(struct net_device *dev,
2505rtl8169_rx_interrupt(struct net_device *dev, struct rtl8169_private *tp, 2611 struct rtl8169_private *tp,
2506 void __iomem *ioaddr) 2612 void __iomem *ioaddr)
2507{ 2613{
2508 unsigned int cur_rx, rx_left; 2614 unsigned int cur_rx, rx_left;
2509 unsigned int delta, count; 2615 unsigned int delta, count;
2510 2616
2511 assert(dev != NULL);
2512 assert(tp != NULL);
2513 assert(ioaddr != NULL);
2514
2515 cur_rx = tp->cur_rx; 2617 cur_rx = tp->cur_rx;
2516 rx_left = NUM_RX_DESC + tp->dirty_rx - cur_rx; 2618 rx_left = NUM_RX_DESC + tp->dirty_rx - cur_rx;
2517 rx_left = rtl8169_rx_quota(rx_left, (u32) dev->quota); 2619 rx_left = rtl8169_rx_quota(rx_left, (u32) dev->quota);
@@ -2544,9 +2646,9 @@ rtl8169_rx_interrupt(struct net_device *dev, struct rtl8169_private *tp,
2544 rtl8169_mark_to_asic(desc, tp->rx_buf_sz); 2646 rtl8169_mark_to_asic(desc, tp->rx_buf_sz);
2545 } else { 2647 } else {
2546 struct sk_buff *skb = tp->Rx_skbuff[entry]; 2648 struct sk_buff *skb = tp->Rx_skbuff[entry];
2649 dma_addr_t addr = le64_to_cpu(desc->addr);
2547 int pkt_size = (status & 0x00001FFF) - 4; 2650 int pkt_size = (status & 0x00001FFF) - 4;
2548 void (*pci_action)(struct pci_dev *, dma_addr_t, 2651 struct pci_dev *pdev = tp->pci_dev;
2549 size_t, int) = pci_dma_sync_single_for_device;
2550 2652
2551 /* 2653 /*
2552 * The driver does not support incoming fragmented 2654 * The driver does not support incoming fragmented
@@ -2562,19 +2664,16 @@ rtl8169_rx_interrupt(struct net_device *dev, struct rtl8169_private *tp,
2562 2664
2563 rtl8169_rx_csum(skb, desc); 2665 rtl8169_rx_csum(skb, desc);
2564 2666
2565 pci_dma_sync_single_for_cpu(tp->pci_dev, 2667 if (rtl8169_try_rx_copy(&skb, tp, pkt_size, addr)) {
2566 le64_to_cpu(desc->addr), tp->rx_buf_sz, 2668 pci_dma_sync_single_for_device(pdev, addr,
2567 PCI_DMA_FROMDEVICE); 2669 pkt_size, PCI_DMA_FROMDEVICE);
2568 2670 rtl8169_mark_to_asic(desc, tp->rx_buf_sz);
2569 if (rtl8169_try_rx_copy(&skb, pkt_size, desc, 2671 } else {
2570 tp->rx_buf_sz, tp->align)) { 2672 pci_unmap_single(pdev, addr, pkt_size,
2571 pci_action = pci_unmap_single; 2673 PCI_DMA_FROMDEVICE);
2572 tp->Rx_skbuff[entry] = NULL; 2674 tp->Rx_skbuff[entry] = NULL;
2573 } 2675 }
2574 2676
2575 pci_action(tp->pci_dev, le64_to_cpu(desc->addr),
2576 tp->rx_buf_sz, PCI_DMA_FROMDEVICE);
2577
2578 skb_put(skb, pkt_size); 2677 skb_put(skb, pkt_size);
2579 skb->protocol = eth_type_trans(skb, dev); 2678 skb->protocol = eth_type_trans(skb, dev);
2580 2679
@@ -2585,6 +2684,13 @@ rtl8169_rx_interrupt(struct net_device *dev, struct rtl8169_private *tp,
2585 tp->stats.rx_bytes += pkt_size; 2684 tp->stats.rx_bytes += pkt_size;
2586 tp->stats.rx_packets++; 2685 tp->stats.rx_packets++;
2587 } 2686 }
2687
2688 /* Work around for AMD plateform. */
2689 if ((desc->opts2 & 0xfffe000) &&
2690 (tp->mac_version == RTL_GIGA_MAC_VER_05)) {
2691 desc->opts2 = 0;
2692 cur_rx++;
2693 }
2588 } 2694 }
2589 2695
2590 count = cur_rx - tp->cur_rx; 2696 count = cur_rx - tp->cur_rx;
@@ -2608,11 +2714,9 @@ rtl8169_rx_interrupt(struct net_device *dev, struct rtl8169_private *tp,
2608 return count; 2714 return count;
2609} 2715}
2610 2716
2611/* The interrupt handler does all of the Rx thread work and cleans up after the Tx thread. */ 2717static irqreturn_t rtl8169_interrupt(int irq, void *dev_instance)
2612static irqreturn_t
2613rtl8169_interrupt(int irq, void *dev_instance)
2614{ 2718{
2615 struct net_device *dev = (struct net_device *) dev_instance; 2719 struct net_device *dev = dev_instance;
2616 struct rtl8169_private *tp = netdev_priv(dev); 2720 struct rtl8169_private *tp = netdev_priv(dev);
2617 int boguscnt = max_interrupt_work; 2721 int boguscnt = max_interrupt_work;
2618 void __iomem *ioaddr = tp->mmio_addr; 2722 void __iomem *ioaddr = tp->mmio_addr;
@@ -2637,9 +2741,17 @@ rtl8169_interrupt(int irq, void *dev_instance)
2637 RTL_W16(IntrStatus, 2741 RTL_W16(IntrStatus,
2638 (status & RxFIFOOver) ? (status | RxOverflow) : status); 2742 (status & RxFIFOOver) ? (status | RxOverflow) : status);
2639 2743
2640 if (!(status & rtl8169_intr_mask)) 2744 if (!(status & tp->intr_event))
2641 break; 2745 break;
2642 2746
2747 /* Work around for rx fifo overflow */
2748 if (unlikely(status & RxFIFOOver) &&
2749 (tp->mac_version == RTL_GIGA_MAC_VER_11)) {
2750 netif_stop_queue(dev);
2751 rtl8169_tx_timeout(dev);
2752 break;
2753 }
2754
2643 if (unlikely(status & SYSErr)) { 2755 if (unlikely(status & SYSErr)) {
2644 rtl8169_pcierr_interrupt(dev); 2756 rtl8169_pcierr_interrupt(dev);
2645 break; 2757 break;
@@ -2649,8 +2761,8 @@ rtl8169_interrupt(int irq, void *dev_instance)
2649 rtl8169_check_link_status(dev, tp, ioaddr); 2761 rtl8169_check_link_status(dev, tp, ioaddr);
2650 2762
2651#ifdef CONFIG_R8169_NAPI 2763#ifdef CONFIG_R8169_NAPI
2652 RTL_W16(IntrMask, rtl8169_intr_mask & ~rtl8169_napi_event); 2764 RTL_W16(IntrMask, tp->intr_event & ~tp->napi_event);
2653 tp->intr_mask = ~rtl8169_napi_event; 2765 tp->intr_mask = ~tp->napi_event;
2654 2766
2655 if (likely(netif_rx_schedule_prep(dev))) 2767 if (likely(netif_rx_schedule_prep(dev)))
2656 __netif_rx_schedule(dev); 2768 __netif_rx_schedule(dev);
@@ -2661,9 +2773,9 @@ rtl8169_interrupt(int irq, void *dev_instance)
2661 break; 2773 break;
2662#else 2774#else
2663 /* Rx interrupt */ 2775 /* Rx interrupt */
2664 if (status & (RxOK | RxOverflow | RxFIFOOver)) { 2776 if (status & (RxOK | RxOverflow | RxFIFOOver))
2665 rtl8169_rx_interrupt(dev, tp, ioaddr); 2777 rtl8169_rx_interrupt(dev, tp, ioaddr);
2666 } 2778
2667 /* Tx interrupt */ 2779 /* Tx interrupt */
2668 if (status & (TxOK | TxErr)) 2780 if (status & (TxOK | TxErr))
2669 rtl8169_tx_interrupt(dev, tp, ioaddr); 2781 rtl8169_tx_interrupt(dev, tp, ioaddr);
@@ -2707,7 +2819,7 @@ static int rtl8169_poll(struct net_device *dev, int *budget)
2707 * write is safe - FR 2819 * write is safe - FR
2708 */ 2820 */
2709 smp_wmb(); 2821 smp_wmb();
2710 RTL_W16(IntrMask, rtl8169_intr_mask); 2822 RTL_W16(IntrMask, tp->intr_event);
2711 } 2823 }
2712 2824
2713 return (work_done >= work_to_do); 2825 return (work_done >= work_to_do);
@@ -2789,14 +2901,13 @@ static int rtl8169_close(struct net_device *dev)
2789 return 0; 2901 return 0;
2790} 2902}
2791 2903
2792static void 2904static void rtl_set_rx_mode(struct net_device *dev)
2793rtl8169_set_rx_mode(struct net_device *dev)
2794{ 2905{
2795 struct rtl8169_private *tp = netdev_priv(dev); 2906 struct rtl8169_private *tp = netdev_priv(dev);
2796 void __iomem *ioaddr = tp->mmio_addr; 2907 void __iomem *ioaddr = tp->mmio_addr;
2797 unsigned long flags; 2908 unsigned long flags;
2798 u32 mc_filter[2]; /* Multicast hash filter */ 2909 u32 mc_filter[2]; /* Multicast hash filter */
2799 int i, rx_mode; 2910 int rx_mode;
2800 u32 tmp = 0; 2911 u32 tmp = 0;
2801 2912
2802 if (dev->flags & IFF_PROMISC) { 2913 if (dev->flags & IFF_PROMISC) {
@@ -2816,6 +2927,8 @@ rtl8169_set_rx_mode(struct net_device *dev)
2816 mc_filter[1] = mc_filter[0] = 0xffffffff; 2927 mc_filter[1] = mc_filter[0] = 0xffffffff;
2817 } else { 2928 } else {
2818 struct dev_mc_list *mclist; 2929 struct dev_mc_list *mclist;
2930 unsigned int i;
2931
2819 rx_mode = AcceptBroadcast | AcceptMyPhys; 2932 rx_mode = AcceptBroadcast | AcceptMyPhys;
2820 mc_filter[1] = mc_filter[0] = 0; 2933 mc_filter[1] = mc_filter[0] = 0;
2821 for (i = 0, mclist = dev->mc_list; mclist && i < dev->mc_count; 2934 for (i = 0, mclist = dev->mc_list; mclist && i < dev->mc_count;
@@ -2840,10 +2953,11 @@ rtl8169_set_rx_mode(struct net_device *dev)
2840 mc_filter[1] = 0xffffffff; 2953 mc_filter[1] = 0xffffffff;
2841 } 2954 }
2842 2955
2843 RTL_W32(RxConfig, tmp);
2844 RTL_W32(MAR0 + 0, mc_filter[0]); 2956 RTL_W32(MAR0 + 0, mc_filter[0]);
2845 RTL_W32(MAR0 + 4, mc_filter[1]); 2957 RTL_W32(MAR0 + 4, mc_filter[1]);
2846 2958
2959 RTL_W32(RxConfig, tmp);
2960
2847 spin_unlock_irqrestore(&tp->lock, flags); 2961 spin_unlock_irqrestore(&tp->lock, flags);
2848} 2962}
2849 2963
@@ -2931,14 +3045,12 @@ static struct pci_driver rtl8169_pci_driver = {
2931#endif 3045#endif
2932}; 3046};
2933 3047
2934static int __init 3048static int __init rtl8169_init_module(void)
2935rtl8169_init_module(void)
2936{ 3049{
2937 return pci_register_driver(&rtl8169_pci_driver); 3050 return pci_register_driver(&rtl8169_pci_driver);
2938} 3051}
2939 3052
2940static void __exit 3053static void __exit rtl8169_cleanup_module(void)
2941rtl8169_cleanup_module(void)
2942{ 3054{
2943 pci_unregister_driver(&rtl8169_pci_driver); 3055 pci_unregister_driver(&rtl8169_pci_driver);
2944} 3056}
diff --git a/drivers/net/s2io.c b/drivers/net/s2io.c
index 09078ff84cd2..2d826fff7e2e 100644
--- a/drivers/net/s2io.c
+++ b/drivers/net/s2io.c
@@ -469,11 +469,18 @@ static struct pci_device_id s2io_tbl[] __devinitdata = {
469 469
470MODULE_DEVICE_TABLE(pci, s2io_tbl); 470MODULE_DEVICE_TABLE(pci, s2io_tbl);
471 471
472static struct pci_error_handlers s2io_err_handler = {
473 .error_detected = s2io_io_error_detected,
474 .slot_reset = s2io_io_slot_reset,
475 .resume = s2io_io_resume,
476};
477
472static struct pci_driver s2io_driver = { 478static struct pci_driver s2io_driver = {
473 .name = "S2IO", 479 .name = "S2IO",
474 .id_table = s2io_tbl, 480 .id_table = s2io_tbl,
475 .probe = s2io_init_nic, 481 .probe = s2io_init_nic,
476 .remove = __devexit_p(s2io_rem_nic), 482 .remove = __devexit_p(s2io_rem_nic),
483 .err_handler = &s2io_err_handler,
477}; 484};
478 485
479/* A simplifier macro used both by init and free shared_mem Fns(). */ 486/* A simplifier macro used both by init and free shared_mem Fns(). */
@@ -2689,6 +2696,9 @@ static void s2io_netpoll(struct net_device *dev)
2689 u64 val64 = 0xFFFFFFFFFFFFFFFFULL; 2696 u64 val64 = 0xFFFFFFFFFFFFFFFFULL;
2690 int i; 2697 int i;
2691 2698
2699 if (pci_channel_offline(nic->pdev))
2700 return;
2701
2692 disable_irq(dev->irq); 2702 disable_irq(dev->irq);
2693 2703
2694 atomic_inc(&nic->isr_cnt); 2704 atomic_inc(&nic->isr_cnt);
@@ -3215,6 +3225,8 @@ static void alarm_intr_handler(struct s2io_nic *nic)
3215 int i; 3225 int i;
3216 if (atomic_read(&nic->card_state) == CARD_DOWN) 3226 if (atomic_read(&nic->card_state) == CARD_DOWN)
3217 return; 3227 return;
3228 if (pci_channel_offline(nic->pdev))
3229 return;
3218 nic->mac_control.stats_info->sw_stat.ring_full_cnt = 0; 3230 nic->mac_control.stats_info->sw_stat.ring_full_cnt = 0;
3219 /* Handling the XPAK counters update */ 3231 /* Handling the XPAK counters update */
3220 if(nic->mac_control.stats_info->xpak_stat.xpak_timer_count < 72000) { 3232 if(nic->mac_control.stats_info->xpak_stat.xpak_timer_count < 72000) {
@@ -3958,7 +3970,6 @@ static int s2io_close(struct net_device *dev)
3958 /* Reset card, kill tasklet and free Tx and Rx buffers. */ 3970 /* Reset card, kill tasklet and free Tx and Rx buffers. */
3959 s2io_card_down(sp); 3971 s2io_card_down(sp);
3960 3972
3961 sp->device_close_flag = TRUE; /* Device is shut down. */
3962 return 0; 3973 return 0;
3963} 3974}
3964 3975
@@ -4314,6 +4325,10 @@ static irqreturn_t s2io_isr(int irq, void *dev_id)
4314 struct mac_info *mac_control; 4325 struct mac_info *mac_control;
4315 struct config_param *config; 4326 struct config_param *config;
4316 4327
4328 /* Pretend we handled any irq's from a disconnected card */
4329 if (pci_channel_offline(sp->pdev))
4330 return IRQ_NONE;
4331
4317 atomic_inc(&sp->isr_cnt); 4332 atomic_inc(&sp->isr_cnt);
4318 mac_control = &sp->mac_control; 4333 mac_control = &sp->mac_control;
4319 config = &sp->config; 4334 config = &sp->config;
@@ -6569,7 +6584,7 @@ static void s2io_rem_isr(struct s2io_nic * sp)
6569 } while(cnt < 5); 6584 } while(cnt < 5);
6570} 6585}
6571 6586
6572static void s2io_card_down(struct s2io_nic * sp) 6587static void do_s2io_card_down(struct s2io_nic * sp, int do_io)
6573{ 6588{
6574 int cnt = 0; 6589 int cnt = 0;
6575 struct XENA_dev_config __iomem *bar0 = sp->bar0; 6590 struct XENA_dev_config __iomem *bar0 = sp->bar0;
@@ -6584,7 +6599,8 @@ static void s2io_card_down(struct s2io_nic * sp)
6584 atomic_set(&sp->card_state, CARD_DOWN); 6599 atomic_set(&sp->card_state, CARD_DOWN);
6585 6600
6586 /* disable Tx and Rx traffic on the NIC */ 6601 /* disable Tx and Rx traffic on the NIC */
6587 stop_nic(sp); 6602 if (do_io)
6603 stop_nic(sp);
6588 6604
6589 s2io_rem_isr(sp); 6605 s2io_rem_isr(sp);
6590 6606
@@ -6592,7 +6608,7 @@ static void s2io_card_down(struct s2io_nic * sp)
6592 tasklet_kill(&sp->task); 6608 tasklet_kill(&sp->task);
6593 6609
6594 /* Check if the device is Quiescent and then Reset the NIC */ 6610 /* Check if the device is Quiescent and then Reset the NIC */
6595 do { 6611 while(do_io) {
6596 /* As per the HW requirement we need to replenish the 6612 /* As per the HW requirement we need to replenish the
6597 * receive buffer to avoid the ring bump. Since there is 6613 * receive buffer to avoid the ring bump. Since there is
6598 * no intention of processing the Rx frame at this pointwe are 6614 * no intention of processing the Rx frame at this pointwe are
@@ -6617,8 +6633,9 @@ static void s2io_card_down(struct s2io_nic * sp)
6617 (unsigned long long) val64); 6633 (unsigned long long) val64);
6618 break; 6634 break;
6619 } 6635 }
6620 } while (1); 6636 }
6621 s2io_reset(sp); 6637 if (do_io)
6638 s2io_reset(sp);
6622 6639
6623 spin_lock_irqsave(&sp->tx_lock, flags); 6640 spin_lock_irqsave(&sp->tx_lock, flags);
6624 /* Free all Tx buffers */ 6641 /* Free all Tx buffers */
@@ -6633,6 +6650,11 @@ static void s2io_card_down(struct s2io_nic * sp)
6633 clear_bit(0, &(sp->link_state)); 6650 clear_bit(0, &(sp->link_state));
6634} 6651}
6635 6652
6653static void s2io_card_down(struct s2io_nic * sp)
6654{
6655 do_s2io_card_down(sp, 1);
6656}
6657
6636static int s2io_card_up(struct s2io_nic * sp) 6658static int s2io_card_up(struct s2io_nic * sp)
6637{ 6659{
6638 int i, ret = 0; 6660 int i, ret = 0;
@@ -8010,3 +8032,85 @@ static void lro_append_pkt(struct s2io_nic *sp, struct lro *lro,
8010 sp->mac_control.stats_info->sw_stat.clubbed_frms_cnt++; 8032 sp->mac_control.stats_info->sw_stat.clubbed_frms_cnt++;
8011 return; 8033 return;
8012} 8034}
8035
8036/**
8037 * s2io_io_error_detected - called when PCI error is detected
8038 * @pdev: Pointer to PCI device
8039 * @state: The current pci conneection state
8040 *
8041 * This function is called after a PCI bus error affecting
8042 * this device has been detected.
8043 */
8044static pci_ers_result_t s2io_io_error_detected(struct pci_dev *pdev,
8045 pci_channel_state_t state)
8046{
8047 struct net_device *netdev = pci_get_drvdata(pdev);
8048 struct s2io_nic *sp = netdev->priv;
8049
8050 netif_device_detach(netdev);
8051
8052 if (netif_running(netdev)) {
8053 /* Bring down the card, while avoiding PCI I/O */
8054 do_s2io_card_down(sp, 0);
8055 }
8056 pci_disable_device(pdev);
8057
8058 return PCI_ERS_RESULT_NEED_RESET;
8059}
8060
8061/**
8062 * s2io_io_slot_reset - called after the pci bus has been reset.
8063 * @pdev: Pointer to PCI device
8064 *
8065 * Restart the card from scratch, as if from a cold-boot.
8066 * At this point, the card has exprienced a hard reset,
8067 * followed by fixups by BIOS, and has its config space
8068 * set up identically to what it was at cold boot.
8069 */
8070static pci_ers_result_t s2io_io_slot_reset(struct pci_dev *pdev)
8071{
8072 struct net_device *netdev = pci_get_drvdata(pdev);
8073 struct s2io_nic *sp = netdev->priv;
8074
8075 if (pci_enable_device(pdev)) {
8076 printk(KERN_ERR "s2io: "
8077 "Cannot re-enable PCI device after reset.\n");
8078 return PCI_ERS_RESULT_DISCONNECT;
8079 }
8080
8081 pci_set_master(pdev);
8082 s2io_reset(sp);
8083
8084 return PCI_ERS_RESULT_RECOVERED;
8085}
8086
8087/**
8088 * s2io_io_resume - called when traffic can start flowing again.
8089 * @pdev: Pointer to PCI device
8090 *
8091 * This callback is called when the error recovery driver tells
8092 * us that its OK to resume normal operation.
8093 */
8094static void s2io_io_resume(struct pci_dev *pdev)
8095{
8096 struct net_device *netdev = pci_get_drvdata(pdev);
8097 struct s2io_nic *sp = netdev->priv;
8098
8099 if (netif_running(netdev)) {
8100 if (s2io_card_up(sp)) {
8101 printk(KERN_ERR "s2io: "
8102 "Can't bring device back up after reset.\n");
8103 return;
8104 }
8105
8106 if (s2io_set_mac_addr(netdev, netdev->dev_addr) == FAILURE) {
8107 s2io_card_down(sp);
8108 printk(KERN_ERR "s2io: "
8109 "Can't resetore mac addr after reset.\n");
8110 return;
8111 }
8112 }
8113
8114 netif_device_attach(netdev);
8115 netif_wake_queue(netdev);
8116}
diff --git a/drivers/net/s2io.h b/drivers/net/s2io.h
index 54baa0b8ec7c..58592780f519 100644
--- a/drivers/net/s2io.h
+++ b/drivers/net/s2io.h
@@ -794,7 +794,6 @@ struct s2io_nic {
794 794
795 struct net_device_stats stats; 795 struct net_device_stats stats;
796 int high_dma_flag; 796 int high_dma_flag;
797 int device_close_flag;
798 int device_enabled_once; 797 int device_enabled_once;
799 798
800 char name[60]; 799 char name[60];
@@ -1052,6 +1051,11 @@ static void lro_append_pkt(struct s2io_nic *sp, struct lro *lro,
1052 struct sk_buff *skb, u32 tcp_len); 1051 struct sk_buff *skb, u32 tcp_len);
1053static int rts_ds_steer(struct s2io_nic *nic, u8 ds_codepoint, u8 ring); 1052static int rts_ds_steer(struct s2io_nic *nic, u8 ds_codepoint, u8 ring);
1054 1053
1054static pci_ers_result_t s2io_io_error_detected(struct pci_dev *pdev,
1055 pci_channel_state_t state);
1056static pci_ers_result_t s2io_io_slot_reset(struct pci_dev *pdev);
1057static void s2io_io_resume(struct pci_dev *pdev);
1058
1055#define s2io_tcp_mss(skb) skb_shinfo(skb)->gso_size 1059#define s2io_tcp_mss(skb) skb_shinfo(skb)->gso_size
1056#define s2io_udp_mss(skb) skb_shinfo(skb)->gso_size 1060#define s2io_udp_mss(skb) skb_shinfo(skb)->gso_size
1057#define s2io_offload_type(skb) skb_shinfo(skb)->gso_type 1061#define s2io_offload_type(skb) skb_shinfo(skb)->gso_type
diff --git a/drivers/net/sky2.c b/drivers/net/sky2.c
index fe01b961b597..b51d73c8f817 100644
--- a/drivers/net/sky2.c
+++ b/drivers/net/sky2.c
@@ -50,7 +50,7 @@
50#include "sky2.h" 50#include "sky2.h"
51 51
52#define DRV_NAME "sky2" 52#define DRV_NAME "sky2"
53#define DRV_VERSION "1.14" 53#define DRV_VERSION "1.15"
54#define PFX DRV_NAME " " 54#define PFX DRV_NAME " "
55 55
56/* 56/*
@@ -130,7 +130,7 @@ static const struct pci_device_id sky2_id_table[] = {
130 { PCI_DEVICE(PCI_VENDOR_ID_MARVELL, 0x4368) }, /* 88EC034 */ 130 { PCI_DEVICE(PCI_VENDOR_ID_MARVELL, 0x4368) }, /* 88EC034 */
131 { PCI_DEVICE(PCI_VENDOR_ID_MARVELL, 0x4369) }, /* 88EC042 */ 131 { PCI_DEVICE(PCI_VENDOR_ID_MARVELL, 0x4369) }, /* 88EC042 */
132 { PCI_DEVICE(PCI_VENDOR_ID_MARVELL, 0x436A) }, /* 88E8058 */ 132 { PCI_DEVICE(PCI_VENDOR_ID_MARVELL, 0x436A) }, /* 88E8058 */
133// { PCI_DEVICE(PCI_VENDOR_ID_MARVELL, 0x436B) }, /* 88E8071 */ 133 { PCI_DEVICE(PCI_VENDOR_ID_MARVELL, 0x436B) }, /* 88E8071 */
134 { 0 } 134 { 0 }
135}; 135};
136 136
@@ -217,13 +217,24 @@ static void sky2_power_on(struct sky2_hw *hw)
217 sky2_write8(hw, B2_Y2_CLK_GATE, 0); 217 sky2_write8(hw, B2_Y2_CLK_GATE, 0);
218 218
219 if (hw->chip_id == CHIP_ID_YUKON_EC_U || hw->chip_id == CHIP_ID_YUKON_EX) { 219 if (hw->chip_id == CHIP_ID_YUKON_EC_U || hw->chip_id == CHIP_ID_YUKON_EX) {
220 u32 reg1; 220 u32 reg;
221 221
222 sky2_pci_write32(hw, PCI_DEV_REG3, 0); 222 reg = sky2_pci_read32(hw, PCI_DEV_REG4);
223 reg1 = sky2_pci_read32(hw, PCI_DEV_REG4); 223 /* set all bits to 0 except bits 15..12 and 8 */
224 reg1 &= P_ASPM_CONTROL_MSK; 224 reg &= P_ASPM_CONTROL_MSK;
225 sky2_pci_write32(hw, PCI_DEV_REG4, reg1); 225 sky2_pci_write32(hw, PCI_DEV_REG4, reg);
226 sky2_pci_write32(hw, PCI_DEV_REG5, 0); 226
227 reg = sky2_pci_read32(hw, PCI_DEV_REG5);
228 /* set all bits to 0 except bits 28 & 27 */
229 reg &= P_CTL_TIM_VMAIN_AV_MSK;
230 sky2_pci_write32(hw, PCI_DEV_REG5, reg);
231
232 sky2_pci_write32(hw, PCI_CFG_REG_1, 0);
233
234 /* Enable workaround for dev 4.107 on Yukon-Ultra & Extreme */
235 reg = sky2_read32(hw, B2_GP_IO);
236 reg |= GLB_GPIO_STAT_RACE_DIS;
237 sky2_write32(hw, B2_GP_IO, reg);
227 } 238 }
228} 239}
229 240
@@ -650,6 +661,30 @@ static void sky2_wol_init(struct sky2_port *sky2)
650 661
651} 662}
652 663
664static void sky2_set_tx_stfwd(struct sky2_hw *hw, unsigned port)
665{
666 if (hw->chip_id == CHIP_ID_YUKON_EX && hw->chip_rev != CHIP_REV_YU_EX_A0) {
667 sky2_write32(hw, SK_REG(port, TX_GMF_CTRL_T),
668 TX_STFW_ENA |
669 (hw->dev[port]->mtu > ETH_DATA_LEN) ? TX_JUMBO_ENA : TX_JUMBO_DIS);
670 } else {
671 if (hw->dev[port]->mtu > ETH_DATA_LEN) {
672 /* set Tx GMAC FIFO Almost Empty Threshold */
673 sky2_write32(hw, SK_REG(port, TX_GMF_AE_THR),
674 (ECU_JUMBO_WM << 16) | ECU_AE_THR);
675
676 sky2_write32(hw, SK_REG(port, TX_GMF_CTRL_T),
677 TX_JUMBO_ENA | TX_STFW_DIS);
678
679 /* Can't do offload because of lack of store/forward */
680 hw->dev[port]->features &= ~(NETIF_F_TSO | NETIF_F_SG
681 | NETIF_F_ALL_CSUM);
682 } else
683 sky2_write32(hw, SK_REG(port, TX_GMF_CTRL_T),
684 TX_JUMBO_DIS | TX_STFW_ENA);
685 }
686}
687
653static void sky2_mac_init(struct sky2_hw *hw, unsigned port) 688static void sky2_mac_init(struct sky2_hw *hw, unsigned port)
654{ 689{
655 struct sky2_port *sky2 = netdev_priv(hw->dev[port]); 690 struct sky2_port *sky2 = netdev_priv(hw->dev[port]);
@@ -730,8 +765,11 @@ static void sky2_mac_init(struct sky2_hw *hw, unsigned port)
730 765
731 /* Configure Rx MAC FIFO */ 766 /* Configure Rx MAC FIFO */
732 sky2_write8(hw, SK_REG(port, RX_GMF_CTRL_T), GMF_RST_CLR); 767 sky2_write8(hw, SK_REG(port, RX_GMF_CTRL_T), GMF_RST_CLR);
733 sky2_write32(hw, SK_REG(port, RX_GMF_CTRL_T), 768 reg = GMF_OPER_ON | GMF_RX_F_FL_ON;
734 GMF_OPER_ON | GMF_RX_F_FL_ON); 769 if (hw->chip_id == CHIP_ID_YUKON_EX)
770 reg |= GMF_RX_OVER_ON;
771
772 sky2_write32(hw, SK_REG(port, RX_GMF_CTRL_T), reg);
735 773
736 /* Flush Rx MAC FIFO on any flow control or error */ 774 /* Flush Rx MAC FIFO on any flow control or error */
737 sky2_write16(hw, SK_REG(port, RX_GMF_FL_MSK), GMR_FS_ANY_ERR); 775 sky2_write16(hw, SK_REG(port, RX_GMF_FL_MSK), GMR_FS_ANY_ERR);
@@ -747,16 +785,7 @@ static void sky2_mac_init(struct sky2_hw *hw, unsigned port)
747 sky2_write8(hw, SK_REG(port, RX_GMF_LP_THR), 768/8); 785 sky2_write8(hw, SK_REG(port, RX_GMF_LP_THR), 768/8);
748 sky2_write8(hw, SK_REG(port, RX_GMF_UP_THR), 1024/8); 786 sky2_write8(hw, SK_REG(port, RX_GMF_UP_THR), 1024/8);
749 787
750 /* set Tx GMAC FIFO Almost Empty Threshold */ 788 sky2_set_tx_stfwd(hw, port);
751 sky2_write32(hw, SK_REG(port, TX_GMF_AE_THR),
752 (ECU_JUMBO_WM << 16) | ECU_AE_THR);
753
754 if (hw->dev[port]->mtu > ETH_DATA_LEN)
755 sky2_write32(hw, SK_REG(port, TX_GMF_CTRL_T),
756 TX_JUMBO_ENA | TX_STFW_DIS);
757 else
758 sky2_write32(hw, SK_REG(port, TX_GMF_CTRL_T),
759 TX_JUMBO_DIS | TX_STFW_ENA);
760 } 789 }
761 790
762} 791}
@@ -939,14 +968,16 @@ static void rx_set_checksum(struct sky2_port *sky2)
939{ 968{
940 struct sky2_rx_le *le; 969 struct sky2_rx_le *le;
941 970
942 le = sky2_next_rx(sky2); 971 if (sky2->hw->chip_id != CHIP_ID_YUKON_EX) {
943 le->addr = cpu_to_le32((ETH_HLEN << 16) | ETH_HLEN); 972 le = sky2_next_rx(sky2);
944 le->ctrl = 0; 973 le->addr = cpu_to_le32((ETH_HLEN << 16) | ETH_HLEN);
945 le->opcode = OP_TCPSTART | HW_OWNER; 974 le->ctrl = 0;
975 le->opcode = OP_TCPSTART | HW_OWNER;
946 976
947 sky2_write32(sky2->hw, 977 sky2_write32(sky2->hw,
948 Q_ADDR(rxqaddr[sky2->port], Q_CSR), 978 Q_ADDR(rxqaddr[sky2->port], Q_CSR),
949 sky2->rx_csum ? BMU_ENA_RX_CHKSUM : BMU_DIS_RX_CHKSUM); 979 sky2->rx_csum ? BMU_ENA_RX_CHKSUM : BMU_DIS_RX_CHKSUM);
980 }
950 981
951} 982}
952 983
@@ -1134,7 +1165,7 @@ static int sky2_rx_start(struct sky2_port *sky2)
1134 if (hw->chip_id == CHIP_ID_YUKON_EC_U && 1165 if (hw->chip_id == CHIP_ID_YUKON_EC_U &&
1135 (hw->chip_rev == CHIP_REV_YU_EC_U_A1 1166 (hw->chip_rev == CHIP_REV_YU_EC_U_A1
1136 || hw->chip_rev == CHIP_REV_YU_EC_U_B0)) 1167 || hw->chip_rev == CHIP_REV_YU_EC_U_B0))
1137 sky2_write32(hw, Q_ADDR(rxq, Q_F), F_M_RX_RAM_DIS); 1168 sky2_write32(hw, Q_ADDR(rxq, Q_TEST), F_M_RX_RAM_DIS);
1138 1169
1139 sky2_prefetch_init(hw, rxq, sky2->rx_le_map, RX_LE_SIZE - 1); 1170 sky2_prefetch_init(hw, rxq, sky2->rx_le_map, RX_LE_SIZE - 1);
1140 1171
@@ -1285,6 +1316,10 @@ static int sky2_up(struct net_device *dev)
1285 1316
1286 sky2_qset(hw, txqaddr[port]); 1317 sky2_qset(hw, txqaddr[port]);
1287 1318
1319 /* This is copied from sk98lin 10.0.5.3; no one tells me about erratta's */
1320 if (hw->chip_id == CHIP_ID_YUKON_EX && hw->chip_rev == CHIP_REV_YU_EX_B0)
1321 sky2_write32(hw, Q_ADDR(txqaddr[port], Q_TEST), F_TX_CHK_AUTO_OFF);
1322
1288 /* Set almost empty threshold */ 1323 /* Set almost empty threshold */
1289 if (hw->chip_id == CHIP_ID_YUKON_EC_U 1324 if (hw->chip_id == CHIP_ID_YUKON_EC_U
1290 && hw->chip_rev == CHIP_REV_YU_EC_U_A0) 1325 && hw->chip_rev == CHIP_REV_YU_EC_U_A0)
@@ -1393,14 +1428,16 @@ static int sky2_xmit_frame(struct sk_buff *skb, struct net_device *dev)
1393 /* Check for TCP Segmentation Offload */ 1428 /* Check for TCP Segmentation Offload */
1394 mss = skb_shinfo(skb)->gso_size; 1429 mss = skb_shinfo(skb)->gso_size;
1395 if (mss != 0) { 1430 if (mss != 0) {
1396 mss += tcp_optlen(skb); /* TCP options */ 1431 if (hw->chip_id != CHIP_ID_YUKON_EX)
1397 mss += ip_hdrlen(skb) + sizeof(struct tcphdr); 1432 mss += ETH_HLEN + ip_hdrlen(skb) + tcp_hdrlen(skb);
1398 mss += ETH_HLEN; 1433
1399 1434 if (mss != sky2->tx_last_mss) {
1400 if (mss != sky2->tx_last_mss) { 1435 le = get_tx_le(sky2);
1401 le = get_tx_le(sky2); 1436 le->addr = cpu_to_le32(mss);
1402 le->addr = cpu_to_le32(mss); 1437 if (hw->chip_id == CHIP_ID_YUKON_EX)
1403 le->opcode = OP_LRGLEN | HW_OWNER; 1438 le->opcode = OP_MSS | HW_OWNER;
1439 else
1440 le->opcode = OP_LRGLEN | HW_OWNER;
1404 sky2->tx_last_mss = mss; 1441 sky2->tx_last_mss = mss;
1405 } 1442 }
1406 } 1443 }
@@ -1422,24 +1459,30 @@ static int sky2_xmit_frame(struct sk_buff *skb, struct net_device *dev)
1422 1459
1423 /* Handle TCP checksum offload */ 1460 /* Handle TCP checksum offload */
1424 if (skb->ip_summed == CHECKSUM_PARTIAL) { 1461 if (skb->ip_summed == CHECKSUM_PARTIAL) {
1425 const unsigned offset = skb_transport_offset(skb); 1462 /* On Yukon EX (some versions) encoding change. */
1426 u32 tcpsum; 1463 if (hw->chip_id == CHIP_ID_YUKON_EX
1427 1464 && hw->chip_rev != CHIP_REV_YU_EX_B0)
1428 tcpsum = offset << 16; /* sum start */ 1465 ctrl |= CALSUM; /* auto checksum */
1429 tcpsum |= offset + skb->csum_offset; /* sum write */ 1466 else {
1430 1467 const unsigned offset = skb_transport_offset(skb);
1431 ctrl |= CALSUM | WR_SUM | INIT_SUM | LOCK_SUM; 1468 u32 tcpsum;
1432 if (ip_hdr(skb)->protocol == IPPROTO_UDP) 1469
1433 ctrl |= UDPTCP; 1470 tcpsum = offset << 16; /* sum start */
1434 1471 tcpsum |= offset + skb->csum_offset; /* sum write */
1435 if (tcpsum != sky2->tx_tcpsum) { 1472
1436 sky2->tx_tcpsum = tcpsum; 1473 ctrl |= CALSUM | WR_SUM | INIT_SUM | LOCK_SUM;
1437 1474 if (ip_hdr(skb)->protocol == IPPROTO_UDP)
1438 le = get_tx_le(sky2); 1475 ctrl |= UDPTCP;
1439 le->addr = cpu_to_le32(tcpsum); 1476
1440 le->length = 0; /* initial checksum value */ 1477 if (tcpsum != sky2->tx_tcpsum) {
1441 le->ctrl = 1; /* one packet */ 1478 sky2->tx_tcpsum = tcpsum;
1442 le->opcode = OP_TCPLISW | HW_OWNER; 1479
1480 le = get_tx_le(sky2);
1481 le->addr = cpu_to_le32(tcpsum);
1482 le->length = 0; /* initial checksum value */
1483 le->ctrl = 1; /* one packet */
1484 le->opcode = OP_TCPLISW | HW_OWNER;
1485 }
1443 } 1486 }
1444 } 1487 }
1445 1488
@@ -1913,15 +1956,8 @@ static int sky2_change_mtu(struct net_device *dev, int new_mtu)
1913 1956
1914 synchronize_irq(hw->pdev->irq); 1957 synchronize_irq(hw->pdev->irq);
1915 1958
1916 if (hw->chip_id == CHIP_ID_YUKON_EC_U || hw->chip_id == CHIP_ID_YUKON_EX) { 1959 if (hw->chip_id == CHIP_ID_YUKON_EC_U || hw->chip_id == CHIP_ID_YUKON_EX)
1917 if (new_mtu > ETH_DATA_LEN) { 1960 sky2_set_tx_stfwd(hw, port);
1918 sky2_write32(hw, SK_REG(port, TX_GMF_CTRL_T),
1919 TX_JUMBO_ENA | TX_STFW_DIS);
1920 dev->features &= NETIF_F_TSO | NETIF_F_SG | NETIF_F_IP_CSUM;
1921 } else
1922 sky2_write32(hw, SK_REG(port, TX_GMF_CTRL_T),
1923 TX_JUMBO_DIS | TX_STFW_ENA);
1924 }
1925 1961
1926 ctl = gma_read16(hw, port, GM_GP_CTRL); 1962 ctl = gma_read16(hw, port, GM_GP_CTRL);
1927 gma_write16(hw, port, GM_GP_CTRL, ctl & ~GM_GPCR_RX_ENA); 1963 gma_write16(hw, port, GM_GP_CTRL, ctl & ~GM_GPCR_RX_ENA);
@@ -2118,6 +2154,7 @@ static int sky2_status_intr(struct sky2_hw *hw, int to_do)
2118 2154
2119 while (hw->st_idx != hwidx) { 2155 while (hw->st_idx != hwidx) {
2120 struct sky2_status_le *le = hw->st_le + hw->st_idx; 2156 struct sky2_status_le *le = hw->st_le + hw->st_idx;
2157 unsigned port = le->css & CSS_LINK_BIT;
2121 struct net_device *dev; 2158 struct net_device *dev;
2122 struct sk_buff *skb; 2159 struct sk_buff *skb;
2123 u32 status; 2160 u32 status;
@@ -2125,9 +2162,7 @@ static int sky2_status_intr(struct sky2_hw *hw, int to_do)
2125 2162
2126 hw->st_idx = RING_NEXT(hw->st_idx, STATUS_RING_SIZE); 2163 hw->st_idx = RING_NEXT(hw->st_idx, STATUS_RING_SIZE);
2127 2164
2128 BUG_ON(le->link >= 2); 2165 dev = hw->dev[port];
2129 dev = hw->dev[le->link];
2130
2131 sky2 = netdev_priv(dev); 2166 sky2 = netdev_priv(dev);
2132 length = le16_to_cpu(le->length); 2167 length = le16_to_cpu(le->length);
2133 status = le32_to_cpu(le->status); 2168 status = le32_to_cpu(le->status);
@@ -2140,6 +2175,16 @@ static int sky2_status_intr(struct sky2_hw *hw, int to_do)
2140 goto force_update; 2175 goto force_update;
2141 } 2176 }
2142 2177
2178 /* This chip reports checksum status differently */
2179 if (hw->chip_id == CHIP_ID_YUKON_EX) {
2180 if (sky2->rx_csum &&
2181 (le->css & (CSS_ISIPV4 | CSS_ISIPV6)) &&
2182 (le->css & CSS_TCPUDPCSOK))
2183 skb->ip_summed = CHECKSUM_UNNECESSARY;
2184 else
2185 skb->ip_summed = CHECKSUM_NONE;
2186 }
2187
2143 skb->protocol = eth_type_trans(skb, dev); 2188 skb->protocol = eth_type_trans(skb, dev);
2144 sky2->net_stats.rx_packets++; 2189 sky2->net_stats.rx_packets++;
2145 sky2->net_stats.rx_bytes += skb->len; 2190 sky2->net_stats.rx_bytes += skb->len;
@@ -2155,10 +2200,10 @@ static int sky2_status_intr(struct sky2_hw *hw, int to_do)
2155 netif_receive_skb(skb); 2200 netif_receive_skb(skb);
2156 2201
2157 /* Update receiver after 16 frames */ 2202 /* Update receiver after 16 frames */
2158 if (++buf_write[le->link] == RX_BUF_WRITE) { 2203 if (++buf_write[port] == RX_BUF_WRITE) {
2159force_update: 2204force_update:
2160 sky2_put_idx(hw, rxqaddr[le->link], sky2->rx_put); 2205 sky2_put_idx(hw, rxqaddr[port], sky2->rx_put);
2161 buf_write[le->link] = 0; 2206 buf_write[port] = 0;
2162 } 2207 }
2163 2208
2164 /* Stop after net poll weight */ 2209 /* Stop after net poll weight */
@@ -2179,6 +2224,9 @@ force_update:
2179 if (!sky2->rx_csum) 2224 if (!sky2->rx_csum)
2180 break; 2225 break;
2181 2226
2227 if (hw->chip_id == CHIP_ID_YUKON_EX)
2228 break;
2229
2182 /* Both checksum counters are programmed to start at 2230 /* Both checksum counters are programmed to start at
2183 * the same offset, so unless there is a problem they 2231 * the same offset, so unless there is a problem they
2184 * should match. This failure is an early indication that 2232 * should match. This failure is an early indication that
@@ -2194,7 +2242,7 @@ force_update:
2194 dev->name, status); 2242 dev->name, status);
2195 sky2->rx_csum = 0; 2243 sky2->rx_csum = 0;
2196 sky2_write32(sky2->hw, 2244 sky2_write32(sky2->hw,
2197 Q_ADDR(rxqaddr[le->link], Q_CSR), 2245 Q_ADDR(rxqaddr[port], Q_CSR),
2198 BMU_DIS_RX_CHKSUM); 2246 BMU_DIS_RX_CHKSUM);
2199 } 2247 }
2200 break; 2248 break;
@@ -2513,6 +2561,9 @@ static int __devinit sky2_init(struct sky2_hw *hw)
2513{ 2561{
2514 u8 t8; 2562 u8 t8;
2515 2563
2564 /* Enable all clocks */
2565 sky2_pci_write32(hw, PCI_DEV_REG3, 0);
2566
2516 sky2_write8(hw, B0_CTST, CS_RST_CLR); 2567 sky2_write8(hw, B0_CTST, CS_RST_CLR);
2517 2568
2518 hw->chip_id = sky2_read8(hw, B2_CHIP_ID); 2569 hw->chip_id = sky2_read8(hw, B2_CHIP_ID);
@@ -2522,14 +2573,6 @@ static int __devinit sky2_init(struct sky2_hw *hw)
2522 return -EOPNOTSUPP; 2573 return -EOPNOTSUPP;
2523 } 2574 }
2524 2575
2525 if (hw->chip_id == CHIP_ID_YUKON_EX)
2526 dev_warn(&hw->pdev->dev, "this driver not yet tested on this chip type\n"
2527 "Please report success or failure to <netdev@vger.kernel.org>\n");
2528
2529 /* Make sure and enable all clocks */
2530 if (hw->chip_id == CHIP_ID_YUKON_EX || hw->chip_id == CHIP_ID_YUKON_EC_U)
2531 sky2_pci_write32(hw, PCI_DEV_REG3, 0);
2532
2533 hw->chip_rev = (sky2_read8(hw, B2_MAC_CFG) & CFG_CHIP_R_MSK) >> 4; 2576 hw->chip_rev = (sky2_read8(hw, B2_MAC_CFG) & CFG_CHIP_R_MSK) >> 4;
2534 2577
2535 /* This rev is really old, and requires untested workarounds */ 2578 /* This rev is really old, and requires untested workarounds */
@@ -2589,6 +2632,11 @@ static void sky2_reset(struct sky2_hw *hw)
2589 for (i = 0; i < hw->ports; i++) { 2632 for (i = 0; i < hw->ports; i++) {
2590 sky2_write8(hw, SK_REG(i, GMAC_LINK_CTRL), GMLC_RST_SET); 2633 sky2_write8(hw, SK_REG(i, GMAC_LINK_CTRL), GMLC_RST_SET);
2591 sky2_write8(hw, SK_REG(i, GMAC_LINK_CTRL), GMLC_RST_CLR); 2634 sky2_write8(hw, SK_REG(i, GMAC_LINK_CTRL), GMLC_RST_CLR);
2635
2636 if (hw->chip_id == CHIP_ID_YUKON_EX)
2637 sky2_write16(hw, SK_REG(i, GMAC_CTRL),
2638 GMC_BYP_MACSECRX_ON | GMC_BYP_MACSECTX_ON
2639 | GMC_BYP_RETR_ON);
2592 } 2640 }
2593 2641
2594 sky2_write8(hw, B2_TST_CTRL1, TST_CFG_WRITE_OFF); 2642 sky2_write8(hw, B2_TST_CTRL1, TST_CFG_WRITE_OFF);
@@ -2735,7 +2783,7 @@ static int sky2_set_wol(struct net_device *dev, struct ethtool_wolinfo *wol)
2735 2783
2736 sky2->wol = wol->wolopts; 2784 sky2->wol = wol->wolopts;
2737 2785
2738 if (hw->chip_id == CHIP_ID_YUKON_EC_U) 2786 if (hw->chip_id == CHIP_ID_YUKON_EC_U || hw->chip_id == CHIP_ID_YUKON_EX)
2739 sky2_write32(hw, B0_CTST, sky2->wol 2787 sky2_write32(hw, B0_CTST, sky2->wol
2740 ? Y2_HW_WOL_ON : Y2_HW_WOL_OFF); 2788 ? Y2_HW_WOL_ON : Y2_HW_WOL_OFF);
2741 2789
@@ -3330,7 +3378,7 @@ static int sky2_get_regs_len(struct net_device *dev)
3330 3378
3331/* 3379/*
3332 * Returns copy of control register region 3380 * Returns copy of control register region
3333 * Note: access to the RAM address register set will cause timeouts. 3381 * Note: ethtool_get_regs always provides full size (16k) buffer
3334 */ 3382 */
3335static void sky2_get_regs(struct net_device *dev, struct ethtool_regs *regs, 3383static void sky2_get_regs(struct net_device *dev, struct ethtool_regs *regs,
3336 void *p) 3384 void *p)
@@ -3338,15 +3386,19 @@ static void sky2_get_regs(struct net_device *dev, struct ethtool_regs *regs,
3338 const struct sky2_port *sky2 = netdev_priv(dev); 3386 const struct sky2_port *sky2 = netdev_priv(dev);
3339 const void __iomem *io = sky2->hw->regs; 3387 const void __iomem *io = sky2->hw->regs;
3340 3388
3341 BUG_ON(regs->len < B3_RI_WTO_R1);
3342 regs->version = 1; 3389 regs->version = 1;
3343 memset(p, 0, regs->len); 3390 memset(p, 0, regs->len);
3344 3391
3345 memcpy_fromio(p, io, B3_RAM_ADDR); 3392 memcpy_fromio(p, io, B3_RAM_ADDR);
3346 3393
3347 memcpy_fromio(p + B3_RI_WTO_R1, 3394 /* skip diagnostic ram region */
3348 io + B3_RI_WTO_R1, 3395 memcpy_fromio(p + B3_RI_WTO_R1, io + B3_RI_WTO_R1, 0x2000 - B3_RI_WTO_R1);
3349 regs->len - B3_RI_WTO_R1); 3396
3397 /* copy GMAC registers */
3398 memcpy_fromio(p + BASE_GMAC_1, io + BASE_GMAC_1, 0x1000);
3399 if (sky2->hw->ports > 1)
3400 memcpy_fromio(p + BASE_GMAC_2, io + BASE_GMAC_2, 0x1000);
3401
3350} 3402}
3351 3403
3352/* In order to do Jumbo packets on these chips, need to turn off the 3404/* In order to do Jumbo packets on these chips, need to turn off the
@@ -3357,9 +3409,7 @@ static int no_tx_offload(struct net_device *dev)
3357 const struct sky2_port *sky2 = netdev_priv(dev); 3409 const struct sky2_port *sky2 = netdev_priv(dev);
3358 const struct sky2_hw *hw = sky2->hw; 3410 const struct sky2_hw *hw = sky2->hw;
3359 3411
3360 return dev->mtu > ETH_DATA_LEN && 3412 return dev->mtu > ETH_DATA_LEN && hw->chip_id == CHIP_ID_YUKON_EC_U;
3361 (hw->chip_id == CHIP_ID_YUKON_EX
3362 || hw->chip_id == CHIP_ID_YUKON_EC_U);
3363} 3413}
3364 3414
3365static int sky2_set_tx_csum(struct net_device *dev, u32 data) 3415static int sky2_set_tx_csum(struct net_device *dev, u32 data)
diff --git a/drivers/net/sky2.h b/drivers/net/sky2.h
index b8c4a3b5eadf..8df4643493d1 100644
--- a/drivers/net/sky2.h
+++ b/drivers/net/sky2.h
@@ -14,6 +14,8 @@ enum {
14 PCI_DEV_REG3 = 0x80, 14 PCI_DEV_REG3 = 0x80,
15 PCI_DEV_REG4 = 0x84, 15 PCI_DEV_REG4 = 0x84,
16 PCI_DEV_REG5 = 0x88, 16 PCI_DEV_REG5 = 0x88,
17 PCI_CFG_REG_0 = 0x90,
18 PCI_CFG_REG_1 = 0x94,
17}; 19};
18 20
19enum { 21enum {
@@ -28,6 +30,7 @@ enum {
28enum pci_dev_reg_1 { 30enum pci_dev_reg_1 {
29 PCI_Y2_PIG_ENA = 1<<31, /* Enable Plug-in-Go (YUKON-2) */ 31 PCI_Y2_PIG_ENA = 1<<31, /* Enable Plug-in-Go (YUKON-2) */
30 PCI_Y2_DLL_DIS = 1<<30, /* Disable PCI DLL (YUKON-2) */ 32 PCI_Y2_DLL_DIS = 1<<30, /* Disable PCI DLL (YUKON-2) */
33 PCI_SW_PWR_ON_RST= 1<<30, /* SW Power on Reset (Yukon-EX) */
31 PCI_Y2_PHY2_COMA = 1<<29, /* Set PHY 2 to Coma Mode (YUKON-2) */ 34 PCI_Y2_PHY2_COMA = 1<<29, /* Set PHY 2 to Coma Mode (YUKON-2) */
32 PCI_Y2_PHY1_COMA = 1<<28, /* Set PHY 1 to Coma Mode (YUKON-2) */ 35 PCI_Y2_PHY1_COMA = 1<<28, /* Set PHY 1 to Coma Mode (YUKON-2) */
33 PCI_Y2_PHY2_POWD = 1<<27, /* Set PHY 2 to Power Down (YUKON-2) */ 36 PCI_Y2_PHY2_POWD = 1<<27, /* Set PHY 2 to Power Down (YUKON-2) */
@@ -67,6 +70,80 @@ enum pci_dev_reg_4 {
67 | P_ASPM_CLKRUN_REQUEST | P_ASPM_INT_FIFO_EMPTY, 70 | P_ASPM_CLKRUN_REQUEST | P_ASPM_INT_FIFO_EMPTY,
68}; 71};
69 72
73/* PCI_OUR_REG_5 32 bit Our Register 5 (Yukon-ECU only) */
74enum pci_dev_reg_5 {
75 /* Bit 31..27: for A3 & later */
76 P_CTL_DIV_CORE_CLK_ENA = 1<<31, /* Divide Core Clock Enable */
77 P_CTL_SRESET_VMAIN_AV = 1<<30, /* Soft Reset for Vmain_av De-Glitch */
78 P_CTL_BYPASS_VMAIN_AV = 1<<29, /* Bypass En. for Vmain_av De-Glitch */
79 P_CTL_TIM_VMAIN_AV_MSK = 3<<27, /* Bit 28..27: Timer Vmain_av Mask */
80 /* Bit 26..16: Release Clock on Event */
81 P_REL_PCIE_RST_DE_ASS = 1<<26, /* PCIe Reset De-Asserted */
82 P_REL_GPHY_REC_PACKET = 1<<25, /* GPHY Received Packet */
83 P_REL_INT_FIFO_N_EMPTY = 1<<24, /* Internal FIFO Not Empty */
84 P_REL_MAIN_PWR_AVAIL = 1<<23, /* Main Power Available */
85 P_REL_CLKRUN_REQ_REL = 1<<22, /* CLKRUN Request Release */
86 P_REL_PCIE_RESET_ASS = 1<<21, /* PCIe Reset Asserted */
87 P_REL_PME_ASSERTED = 1<<20, /* PME Asserted */
88 P_REL_PCIE_EXIT_L1_ST = 1<<19, /* PCIe Exit L1 State */
89 P_REL_LOADER_NOT_FIN = 1<<18, /* EPROM Loader Not Finished */
90 P_REL_PCIE_RX_EX_IDLE = 1<<17, /* PCIe Rx Exit Electrical Idle State */
91 P_REL_GPHY_LINK_UP = 1<<16, /* GPHY Link Up */
92
93 /* Bit 10.. 0: Mask for Gate Clock */
94 P_GAT_PCIE_RST_ASSERTED = 1<<10,/* PCIe Reset Asserted */
95 P_GAT_GPHY_N_REC_PACKET = 1<<9, /* GPHY Not Received Packet */
96 P_GAT_INT_FIFO_EMPTY = 1<<8, /* Internal FIFO Empty */
97 P_GAT_MAIN_PWR_N_AVAIL = 1<<7, /* Main Power Not Available */
98 P_GAT_CLKRUN_REQ_REL = 1<<6, /* CLKRUN Not Requested */
99 P_GAT_PCIE_RESET_ASS = 1<<5, /* PCIe Reset Asserted */
100 P_GAT_PME_DE_ASSERTED = 1<<4, /* PME De-Asserted */
101 P_GAT_PCIE_ENTER_L1_ST = 1<<3, /* PCIe Enter L1 State */
102 P_GAT_LOADER_FINISHED = 1<<2, /* EPROM Loader Finished */
103 P_GAT_PCIE_RX_EL_IDLE = 1<<1, /* PCIe Rx Electrical Idle State */
104 P_GAT_GPHY_LINK_DOWN = 1<<0, /* GPHY Link Down */
105
106 PCIE_OUR5_EVENT_CLK_D3_SET = P_REL_GPHY_REC_PACKET |
107 P_REL_INT_FIFO_N_EMPTY |
108 P_REL_PCIE_EXIT_L1_ST |
109 P_REL_PCIE_RX_EX_IDLE |
110 P_GAT_GPHY_N_REC_PACKET |
111 P_GAT_INT_FIFO_EMPTY |
112 P_GAT_PCIE_ENTER_L1_ST |
113 P_GAT_PCIE_RX_EL_IDLE,
114};
115
116#/* PCI_CFG_REG_1 32 bit Config Register 1 (Yukon-Ext only) */
117enum pci_cfg_reg1 {
118 P_CF1_DIS_REL_EVT_RST = 1<<24, /* Dis. Rel. Event during PCIE reset */
119 /* Bit 23..21: Release Clock on Event */
120 P_CF1_REL_LDR_NOT_FIN = 1<<23, /* EEPROM Loader Not Finished */
121 P_CF1_REL_VMAIN_AVLBL = 1<<22, /* Vmain available */
122 P_CF1_REL_PCIE_RESET = 1<<21, /* PCI-E reset */
123 /* Bit 20..18: Gate Clock on Event */
124 P_CF1_GAT_LDR_NOT_FIN = 1<<20, /* EEPROM Loader Finished */
125 P_CF1_GAT_PCIE_RX_IDLE = 1<<19, /* PCI-E Rx Electrical idle */
126 P_CF1_GAT_PCIE_RESET = 1<<18, /* PCI-E Reset */
127 P_CF1_PRST_PHY_CLKREQ = 1<<17, /* Enable PCI-E rst & PM2PHY gen. CLKREQ */
128 P_CF1_PCIE_RST_CLKREQ = 1<<16, /* Enable PCI-E rst generate CLKREQ */
129
130 P_CF1_ENA_CFG_LDR_DONE = 1<<8, /* Enable core level Config loader done */
131
132 P_CF1_ENA_TXBMU_RD_IDLE = 1<<1, /* Enable TX BMU Read IDLE for ASPM */
133 P_CF1_ENA_TXBMU_WR_IDLE = 1<<0, /* Enable TX BMU Write IDLE for ASPM */
134
135 PCIE_CFG1_EVENT_CLK_D3_SET = P_CF1_DIS_REL_EVT_RST |
136 P_CF1_REL_LDR_NOT_FIN |
137 P_CF1_REL_VMAIN_AVLBL |
138 P_CF1_REL_PCIE_RESET |
139 P_CF1_GAT_LDR_NOT_FIN |
140 P_CF1_GAT_PCIE_RESET |
141 P_CF1_PRST_PHY_CLKREQ |
142 P_CF1_ENA_CFG_LDR_DONE |
143 P_CF1_ENA_TXBMU_RD_IDLE |
144 P_CF1_ENA_TXBMU_WR_IDLE,
145};
146
70 147
71#define PCI_STATUS_ERROR_BITS (PCI_STATUS_DETECTED_PARITY | \ 148#define PCI_STATUS_ERROR_BITS (PCI_STATUS_DETECTED_PARITY | \
72 PCI_STATUS_SIG_SYSTEM_ERROR | \ 149 PCI_STATUS_SIG_SYSTEM_ERROR | \
@@ -364,6 +441,20 @@ enum {
364 TST_CFG_WRITE_OFF= 1<<0, /* Disable Config Reg WR */ 441 TST_CFG_WRITE_OFF= 1<<0, /* Disable Config Reg WR */
365}; 442};
366 443
444/* B2_GPIO */
445enum {
446 GLB_GPIO_CLK_DEB_ENA = 1<<31, /* Clock Debug Enable */
447 GLB_GPIO_CLK_DBG_MSK = 0xf<<26, /* Clock Debug */
448
449 GLB_GPIO_INT_RST_D3_DIS = 1<<15, /* Disable Internal Reset After D3 to D0 */
450 GLB_GPIO_LED_PAD_SPEED_UP = 1<<14, /* LED PAD Speed Up */
451 GLB_GPIO_STAT_RACE_DIS = 1<<13, /* Status Race Disable */
452 GLB_GPIO_TEST_SEL_MSK = 3<<11, /* Testmode Select */
453 GLB_GPIO_TEST_SEL_BASE = 1<<11,
454 GLB_GPIO_RAND_ENA = 1<<10, /* Random Enable */
455 GLB_GPIO_RAND_BIT_1 = 1<<9, /* Random Bit 1 */
456};
457
367/* B2_MAC_CFG 8 bit MAC Configuration / Chip Revision */ 458/* B2_MAC_CFG 8 bit MAC Configuration / Chip Revision */
368enum { 459enum {
369 CFG_CHIP_R_MSK = 0xf<<4, /* Bit 7.. 4: Chip Revision */ 460 CFG_CHIP_R_MSK = 0xf<<4, /* Bit 7.. 4: Chip Revision */
@@ -392,6 +483,11 @@ enum {
392 CHIP_REV_YU_FE_A2 = 2, 483 CHIP_REV_YU_FE_A2 = 2,
393 484
394}; 485};
486enum yukon_ex_rev {
487 CHIP_REV_YU_EX_A0 = 1,
488 CHIP_REV_YU_EX_B0 = 2,
489};
490
395 491
396/* B2_Y2_CLK_GATE 8 bit Clock Gating (Yukon-2 only) */ 492/* B2_Y2_CLK_GATE 8 bit Clock Gating (Yukon-2 only) */
397enum { 493enum {
@@ -515,23 +611,15 @@ enum {
515enum { 611enum {
516 B8_Q_REGS = 0x0400, /* base of Queue registers */ 612 B8_Q_REGS = 0x0400, /* base of Queue registers */
517 Q_D = 0x00, /* 8*32 bit Current Descriptor */ 613 Q_D = 0x00, /* 8*32 bit Current Descriptor */
518 Q_DA_L = 0x20, /* 32 bit Current Descriptor Address Low dWord */ 614 Q_VLAN = 0x20, /* 16 bit Current VLAN Tag */
519 Q_DA_H = 0x24, /* 32 bit Current Descriptor Address High dWord */ 615 Q_DONE = 0x24, /* 16 bit Done Index */
520 Q_AC_L = 0x28, /* 32 bit Current Address Counter Low dWord */ 616 Q_AC_L = 0x28, /* 32 bit Current Address Counter Low dWord */
521 Q_AC_H = 0x2c, /* 32 bit Current Address Counter High dWord */ 617 Q_AC_H = 0x2c, /* 32 bit Current Address Counter High dWord */
522 Q_BC = 0x30, /* 32 bit Current Byte Counter */ 618 Q_BC = 0x30, /* 32 bit Current Byte Counter */
523 Q_CSR = 0x34, /* 32 bit BMU Control/Status Register */ 619 Q_CSR = 0x34, /* 32 bit BMU Control/Status Register */
524 Q_F = 0x38, /* 32 bit Flag Register */ 620 Q_TEST = 0x38, /* 32 bit Test/Control Register */
525 Q_T1 = 0x3c, /* 32 bit Test Register 1 */
526 Q_T1_TR = 0x3c, /* 8 bit Test Register 1 Transfer SM */
527 Q_T1_WR = 0x3d, /* 8 bit Test Register 1 Write Descriptor SM */
528 Q_T1_RD = 0x3e, /* 8 bit Test Register 1 Read Descriptor SM */
529 Q_T1_SV = 0x3f, /* 8 bit Test Register 1 Supervisor SM */
530 Q_T2 = 0x40, /* 32 bit Test Register 2 */
531 Q_T3 = 0x44, /* 32 bit Test Register 3 */
532 621
533/* Yukon-2 */ 622/* Yukon-2 */
534 Q_DONE = 0x24, /* 16 bit Done Index (Yukon-2 only) */
535 Q_WM = 0x40, /* 16 bit FIFO Watermark */ 623 Q_WM = 0x40, /* 16 bit FIFO Watermark */
536 Q_AL = 0x42, /* 8 bit FIFO Alignment */ 624 Q_AL = 0x42, /* 8 bit FIFO Alignment */
537 Q_RSP = 0x44, /* 16 bit FIFO Read Shadow Pointer */ 625 Q_RSP = 0x44, /* 16 bit FIFO Read Shadow Pointer */
@@ -545,15 +633,16 @@ enum {
545}; 633};
546#define Q_ADDR(reg, offs) (B8_Q_REGS + (reg) + (offs)) 634#define Q_ADDR(reg, offs) (B8_Q_REGS + (reg) + (offs))
547 635
548/* Q_F 32 bit Flag Register */ 636/* Q_TEST 32 bit Test Register */
549enum { 637enum {
550 F_ALM_FULL = 1<<27, /* Rx FIFO: almost full */ 638 /* Transmit */
551 F_EMPTY = 1<<27, /* Tx FIFO: empty flag */ 639 F_TX_CHK_AUTO_OFF = 1<<31, /* Tx checksum auto calc off (Yukon EX) */
552 F_FIFO_EOF = 1<<26, /* Tag (EOF Flag) bit in FIFO */ 640 F_TX_CHK_AUTO_ON = 1<<30, /* Tx checksum auto calc off (Yukon EX) */
553 F_WM_REACHED = 1<<25, /* Watermark reached */ 641
642 /* Receive */
554 F_M_RX_RAM_DIS = 1<<24, /* MAC Rx RAM Read Port disable */ 643 F_M_RX_RAM_DIS = 1<<24, /* MAC Rx RAM Read Port disable */
555 F_FIFO_LEVEL = 0x1fL<<16, /* Bit 23..16: # of Qwords in FIFO */ 644
556 F_WATER_MARK = 0x0007ffL, /* Bit 10.. 0: Watermark */ 645 /* Hardware testbits not used */
557}; 646};
558 647
559/* Queue Prefetch Unit Offsets, use Y2_QADDR() to address (Yukon-2 only)*/ 648/* Queue Prefetch Unit Offsets, use Y2_QADDR() to address (Yukon-2 only)*/
@@ -1608,6 +1697,16 @@ enum {
1608 RX_VLAN_STRIP_ON = 1<<25, /* enable VLAN stripping */ 1697 RX_VLAN_STRIP_ON = 1<<25, /* enable VLAN stripping */
1609 RX_VLAN_STRIP_OFF = 1<<24, /* disable VLAN stripping */ 1698 RX_VLAN_STRIP_OFF = 1<<24, /* disable VLAN stripping */
1610 1699
1700 RX_MACSEC_FLUSH_ON = 1<<23,
1701 RX_MACSEC_FLUSH_OFF = 1<<22,
1702 RX_MACSEC_ASF_FLUSH_ON = 1<<21,
1703 RX_MACSEC_ASF_FLUSH_OFF = 1<<20,
1704
1705 GMF_RX_OVER_ON = 1<<19, /* enable flushing on receive overrun */
1706 GMF_RX_OVER_OFF = 1<<18, /* disable flushing on receive overrun */
1707 GMF_ASF_RX_OVER_ON = 1<<17, /* enable flushing of ASF when overrun */
1708 GMF_ASF_RX_OVER_OFF = 1<<16, /* disable flushing of ASF when overrun */
1709
1611 GMF_WP_TST_ON = 1<<14, /* Write Pointer Test On */ 1710 GMF_WP_TST_ON = 1<<14, /* Write Pointer Test On */
1612 GMF_WP_TST_OFF = 1<<13, /* Write Pointer Test Off */ 1711 GMF_WP_TST_OFF = 1<<13, /* Write Pointer Test Off */
1613 GMF_WP_STEP = 1<<12, /* Write Pointer Step/Increment */ 1712 GMF_WP_STEP = 1<<12, /* Write Pointer Step/Increment */
@@ -1720,6 +1819,15 @@ enum {
1720 1819
1721/* GMAC_CTRL 32 bit GMAC Control Reg (YUKON only) */ 1820/* GMAC_CTRL 32 bit GMAC Control Reg (YUKON only) */
1722enum { 1821enum {
1822 GMC_SET_RST = 1<<15,/* MAC SEC RST */
1823 GMC_SEC_RST_OFF = 1<<14,/* MAC SEC RSt OFF */
1824 GMC_BYP_MACSECRX_ON = 1<<13,/* Bypass macsec RX */
1825 GMC_BYP_MACSECRX_OFF= 1<<12,/* Bypass macsec RX off */
1826 GMC_BYP_MACSECTX_ON = 1<<11,/* Bypass macsec TX */
1827 GMC_BYP_MACSECTX_OFF= 1<<10,/* Bypass macsec TX off*/
1828 GMC_BYP_RETR_ON = 1<<9, /* Bypass retransmit FIFO On */
1829 GMC_BYP_RETR_OFF= 1<<8, /* Bypass retransmit FIFO Off */
1830
1723 GMC_H_BURST_ON = 1<<7, /* Half Duplex Burst Mode On */ 1831 GMC_H_BURST_ON = 1<<7, /* Half Duplex Burst Mode On */
1724 GMC_H_BURST_OFF = 1<<6, /* Half Duplex Burst Mode Off */ 1832 GMC_H_BURST_OFF = 1<<6, /* Half Duplex Burst Mode Off */
1725 GMC_F_LOOPB_ON = 1<<5, /* FIFO Loopback On */ 1833 GMC_F_LOOPB_ON = 1<<5, /* FIFO Loopback On */
@@ -1805,9 +1913,13 @@ enum {
1805 OP_ADDR64VLAN = OP_ADDR64 | OP_VLAN, 1913 OP_ADDR64VLAN = OP_ADDR64 | OP_VLAN,
1806 OP_LRGLEN = 0x24, 1914 OP_LRGLEN = 0x24,
1807 OP_LRGLENVLAN = OP_LRGLEN | OP_VLAN, 1915 OP_LRGLENVLAN = OP_LRGLEN | OP_VLAN,
1916 OP_MSS = 0x28,
1917 OP_MSSVLAN = OP_MSS | OP_VLAN,
1918
1808 OP_BUFFER = 0x40, 1919 OP_BUFFER = 0x40,
1809 OP_PACKET = 0x41, 1920 OP_PACKET = 0x41,
1810 OP_LARGESEND = 0x43, 1921 OP_LARGESEND = 0x43,
1922 OP_LSOV2 = 0x45,
1811 1923
1812/* YUKON-2 STATUS opcodes defines */ 1924/* YUKON-2 STATUS opcodes defines */
1813 OP_RXSTAT = 0x60, 1925 OP_RXSTAT = 0x60,
@@ -1818,6 +1930,19 @@ enum {
1818 OP_RXTIMEVLAN = OP_RXTIMESTAMP | OP_RXVLAN, 1930 OP_RXTIMEVLAN = OP_RXTIMESTAMP | OP_RXVLAN,
1819 OP_RSS_HASH = 0x65, 1931 OP_RSS_HASH = 0x65,
1820 OP_TXINDEXLE = 0x68, 1932 OP_TXINDEXLE = 0x68,
1933 OP_MACSEC = 0x6c,
1934 OP_PUTIDX = 0x70,
1935};
1936
1937enum status_css {
1938 CSS_TCPUDPCSOK = 1<<7, /* TCP / UDP checksum is ok */
1939 CSS_ISUDP = 1<<6, /* packet is a UDP packet */
1940 CSS_ISTCP = 1<<5, /* packet is a TCP packet */
1941 CSS_ISIPFRAG = 1<<4, /* packet is a TCP/UDP frag, CS calc not done */
1942 CSS_ISIPV6 = 1<<3, /* packet is a IPv6 packet */
1943 CSS_IPV4CSUMOK = 1<<2, /* IP v4: TCP header checksum is ok */
1944 CSS_ISIPV4 = 1<<1, /* packet is a IPv4 packet */
1945 CSS_LINK_BIT = 1<<0, /* port number (legacy) */
1821}; 1946};
1822 1947
1823/* Yukon 2 hardware interface */ 1948/* Yukon 2 hardware interface */
@@ -1838,7 +1963,7 @@ struct sky2_rx_le {
1838struct sky2_status_le { 1963struct sky2_status_le {
1839 __le32 status; /* also checksum */ 1964 __le32 status; /* also checksum */
1840 __le16 length; /* also vlan tag */ 1965 __le16 length; /* also vlan tag */
1841 u8 link; 1966 u8 css;
1842 u8 opcode; 1967 u8 opcode;
1843} __attribute((packed)); 1968} __attribute((packed));
1844 1969
diff --git a/drivers/net/sni_82596.c b/drivers/net/sni_82596.c
new file mode 100644
index 000000000000..2cf6794acb4f
--- /dev/null
+++ b/drivers/net/sni_82596.c
@@ -0,0 +1,185 @@
1/*
2 * sni_82596.c -- driver for intel 82596 ethernet controller, as
3 * used in older SNI RM machines
4 */
5
6#include <linux/module.h>
7#include <linux/kernel.h>
8#include <linux/string.h>
9#include <linux/errno.h>
10#include <linux/ioport.h>
11#include <linux/slab.h>
12#include <linux/interrupt.h>
13#include <linux/delay.h>
14#include <linux/netdevice.h>
15#include <linux/etherdevice.h>
16#include <linux/skbuff.h>
17#include <linux/init.h>
18#include <linux/types.h>
19#include <linux/bitops.h>
20#include <linux/platform_device.h>
21#include <linux/io.h>
22#include <linux/irq.h>
23
24#define SNI_82596_DRIVER_VERSION "SNI RM 82596 driver - Revision: 0.01"
25
26static const char sni_82596_string[] = "snirm_82596";
27
28#define DMA_ALLOC dma_alloc_coherent
29#define DMA_FREE dma_free_coherent
30#define DMA_WBACK(priv, addr, len) do { } while (0)
31#define DMA_INV(priv, addr, len) do { } while (0)
32#define DMA_WBACK_INV(priv, addr, len) do { } while (0)
33
34#define SYSBUS 0x00004400
35
36/* big endian CPU, 82596 little endian */
37#define SWAP32(x) cpu_to_le32((u32)(x))
38#define SWAP16(x) cpu_to_le16((u16)(x))
39
40#define OPT_MPU_16BIT 0x01
41
42#include "lib82596.c"
43
44MODULE_AUTHOR("Thomas Bogendoerfer");
45MODULE_DESCRIPTION("i82596 driver");
46MODULE_LICENSE("GPL");
47module_param(i596_debug, int, 0);
48MODULE_PARM_DESC(i596_debug, "82596 debug mask");
49
50static inline void ca(struct net_device *dev)
51{
52 struct i596_private *lp = netdev_priv(dev);
53
54 writel(0, lp->ca);
55}
56
57
58static void mpu_port(struct net_device *dev, int c, dma_addr_t x)
59{
60 struct i596_private *lp = netdev_priv(dev);
61
62 u32 v = (u32) (c) | (u32) (x);
63
64 if (lp->options & OPT_MPU_16BIT) {
65 writew(v & 0xffff, lp->mpu_port);
66 wmb(); /* order writes to MPU port */
67 udelay(1);
68 writew(v >> 16, lp->mpu_port);
69 } else {
70 writel(v, lp->mpu_port);
71 wmb(); /* order writes to MPU port */
72 udelay(1);
73 writel(v, lp->mpu_port);
74 }
75}
76
77
78static int __devinit sni_82596_probe(struct platform_device *dev)
79{
80 struct net_device *netdevice;
81 struct i596_private *lp;
82 struct resource *res, *ca, *idprom, *options;
83 int retval = -ENOMEM;
84 void __iomem *mpu_addr;
85 void __iomem *ca_addr;
86 u8 __iomem *eth_addr;
87
88 res = platform_get_resource(dev, IORESOURCE_MEM, 0);
89 ca = platform_get_resource(dev, IORESOURCE_MEM, 1);
90 options = platform_get_resource(dev, 0, 0);
91 idprom = platform_get_resource(dev, IORESOURCE_MEM, 2);
92 if (!res || !ca || !options || !idprom)
93 return -ENODEV;
94 mpu_addr = ioremap_nocache(res->start, 4);
95 if (!mpu_addr)
96 return -ENOMEM;
97 ca_addr = ioremap_nocache(ca->start, 4);
98 if (!ca_addr)
99 goto probe_failed_free_mpu;
100
101 printk(KERN_INFO "Found i82596 at 0x%x\n", res->start);
102
103 netdevice = alloc_etherdev(sizeof(struct i596_private));
104 if (!netdevice)
105 goto probe_failed_free_ca;
106
107 SET_NETDEV_DEV(netdevice, &dev->dev);
108 platform_set_drvdata (dev, netdevice);
109
110 netdevice->base_addr = res->start;
111 netdevice->irq = platform_get_irq(dev, 0);
112
113 eth_addr = ioremap_nocache(idprom->start, 0x10);
114 if (!eth_addr)
115 goto probe_failed;
116
117 /* someone seems to like messed up stuff */
118 netdevice->dev_addr[0] = readb(eth_addr + 0x0b);
119 netdevice->dev_addr[1] = readb(eth_addr + 0x0a);
120 netdevice->dev_addr[2] = readb(eth_addr + 0x09);
121 netdevice->dev_addr[3] = readb(eth_addr + 0x08);
122 netdevice->dev_addr[4] = readb(eth_addr + 0x07);
123 netdevice->dev_addr[5] = readb(eth_addr + 0x06);
124 iounmap(eth_addr);
125
126 if (!netdevice->irq) {
127 printk(KERN_ERR "%s: IRQ not found for i82596 at 0x%lx\n",
128 __FILE__, netdevice->base_addr);
129 goto probe_failed;
130 }
131
132 lp = netdev_priv(netdevice);
133 lp->options = options->flags & IORESOURCE_BITS;
134 lp->ca = ca_addr;
135 lp->mpu_port = mpu_addr;
136
137 retval = i82596_probe(netdevice);
138 if (retval == 0)
139 return 0;
140
141probe_failed:
142 free_netdev(netdevice);
143probe_failed_free_ca:
144 iounmap(ca_addr);
145probe_failed_free_mpu:
146 iounmap(mpu_addr);
147 return retval;
148}
149
150static int __devexit sni_82596_driver_remove(struct platform_device *pdev)
151{
152 struct net_device *dev = platform_get_drvdata(pdev);
153 struct i596_private *lp = netdev_priv(dev);
154
155 unregister_netdev(dev);
156 DMA_FREE(dev->dev.parent, sizeof(struct i596_private),
157 lp->dma, lp->dma_addr);
158 iounmap(lp->ca);
159 iounmap(lp->mpu_port);
160 free_netdev (dev);
161 return 0;
162}
163
164static struct platform_driver sni_82596_driver = {
165 .probe = sni_82596_probe,
166 .remove = __devexit_p(sni_82596_driver_remove),
167 .driver = {
168 .name = sni_82596_string,
169 },
170};
171
172static int __devinit sni_82596_init(void)
173{
174 printk(KERN_INFO SNI_82596_DRIVER_VERSION "\n");
175 return platform_driver_register(&sni_82596_driver);
176}
177
178
179static void __exit sni_82596_exit(void)
180{
181 platform_driver_unregister(&sni_82596_driver);
182}
183
184module_init(sni_82596_init);
185module_exit(sni_82596_exit);
diff --git a/drivers/net/spider_net.c b/drivers/net/spider_net.c
index 7a4aa6a9f949..f5abb5279d4d 100644
--- a/drivers/net/spider_net.c
+++ b/drivers/net/spider_net.c
@@ -434,7 +434,8 @@ spider_net_prepare_rx_descr(struct spider_net_card *card,
434 bufsize + SPIDER_NET_RXBUF_ALIGN - 1); 434 bufsize + SPIDER_NET_RXBUF_ALIGN - 1);
435 if (!descr->skb) { 435 if (!descr->skb) {
436 if (netif_msg_rx_err(card) && net_ratelimit()) 436 if (netif_msg_rx_err(card) && net_ratelimit())
437 pr_err("Not enough memory to allocate rx buffer\n"); 437 dev_err(&card->netdev->dev,
438 "Not enough memory to allocate rx buffer\n");
438 card->spider_stats.alloc_rx_skb_error++; 439 card->spider_stats.alloc_rx_skb_error++;
439 return -ENOMEM; 440 return -ENOMEM;
440 } 441 }
@@ -455,7 +456,7 @@ spider_net_prepare_rx_descr(struct spider_net_card *card,
455 dev_kfree_skb_any(descr->skb); 456 dev_kfree_skb_any(descr->skb);
456 descr->skb = NULL; 457 descr->skb = NULL;
457 if (netif_msg_rx_err(card) && net_ratelimit()) 458 if (netif_msg_rx_err(card) && net_ratelimit())
458 pr_err("Could not iommu-map rx buffer\n"); 459 dev_err(&card->netdev->dev, "Could not iommu-map rx buffer\n");
459 card->spider_stats.rx_iommu_map_error++; 460 card->spider_stats.rx_iommu_map_error++;
460 hwdescr->dmac_cmd_status = SPIDER_NET_DESCR_NOT_IN_USE; 461 hwdescr->dmac_cmd_status = SPIDER_NET_DESCR_NOT_IN_USE;
461 } else { 462 } else {
@@ -500,6 +501,20 @@ spider_net_enable_rxdmac(struct spider_net_card *card)
500} 501}
501 502
502/** 503/**
504 * spider_net_disable_rxdmac - disables the receive DMA controller
505 * @card: card structure
506 *
507 * spider_net_disable_rxdmac terminates processing on the DMA controller
508 * by turing off the DMA controller, with the force-end flag set.
509 */
510static inline void
511spider_net_disable_rxdmac(struct spider_net_card *card)
512{
513 spider_net_write_reg(card, SPIDER_NET_GDADMACCNTR,
514 SPIDER_NET_DMA_RX_FEND_VALUE);
515}
516
517/**
503 * spider_net_refill_rx_chain - refills descriptors/skbs in the rx chains 518 * spider_net_refill_rx_chain - refills descriptors/skbs in the rx chains
504 * @card: card structure 519 * @card: card structure
505 * 520 *
@@ -655,20 +670,6 @@ write_hash:
655} 670}
656 671
657/** 672/**
658 * spider_net_disable_rxdmac - disables the receive DMA controller
659 * @card: card structure
660 *
661 * spider_net_disable_rxdmac terminates processing on the DMA controller by
662 * turing off DMA and issueing a force end
663 */
664static void
665spider_net_disable_rxdmac(struct spider_net_card *card)
666{
667 spider_net_write_reg(card, SPIDER_NET_GDADMACCNTR,
668 SPIDER_NET_DMA_RX_FEND_VALUE);
669}
670
671/**
672 * spider_net_prepare_tx_descr - fill tx descriptor with skb data 673 * spider_net_prepare_tx_descr - fill tx descriptor with skb data
673 * @card: card structure 674 * @card: card structure
674 * @descr: descriptor structure to fill out 675 * @descr: descriptor structure to fill out
@@ -692,7 +693,7 @@ spider_net_prepare_tx_descr(struct spider_net_card *card,
692 buf = pci_map_single(card->pdev, skb->data, skb->len, PCI_DMA_TODEVICE); 693 buf = pci_map_single(card->pdev, skb->data, skb->len, PCI_DMA_TODEVICE);
693 if (pci_dma_mapping_error(buf)) { 694 if (pci_dma_mapping_error(buf)) {
694 if (netif_msg_tx_err(card) && net_ratelimit()) 695 if (netif_msg_tx_err(card) && net_ratelimit())
695 pr_err("could not iommu-map packet (%p, %i). " 696 dev_err(&card->netdev->dev, "could not iommu-map packet (%p, %i). "
696 "Dropping packet\n", skb->data, skb->len); 697 "Dropping packet\n", skb->data, skb->len);
697 card->spider_stats.tx_iommu_map_error++; 698 card->spider_stats.tx_iommu_map_error++;
698 return -ENOMEM; 699 return -ENOMEM;
@@ -715,7 +716,7 @@ spider_net_prepare_tx_descr(struct spider_net_card *card,
715 hwdescr->data_status = 0; 716 hwdescr->data_status = 0;
716 717
717 hwdescr->dmac_cmd_status = 718 hwdescr->dmac_cmd_status =
718 SPIDER_NET_DESCR_CARDOWNED | SPIDER_NET_DMAC_NOCS; 719 SPIDER_NET_DESCR_CARDOWNED | SPIDER_NET_DMAC_TXFRMTL;
719 spin_unlock_irqrestore(&chain->lock, flags); 720 spin_unlock_irqrestore(&chain->lock, flags);
720 721
721 if (skb->ip_summed == CHECKSUM_PARTIAL) 722 if (skb->ip_summed == CHECKSUM_PARTIAL)
@@ -832,9 +833,8 @@ spider_net_release_tx_chain(struct spider_net_card *card, int brutal)
832 case SPIDER_NET_DESCR_PROTECTION_ERROR: 833 case SPIDER_NET_DESCR_PROTECTION_ERROR:
833 case SPIDER_NET_DESCR_FORCE_END: 834 case SPIDER_NET_DESCR_FORCE_END:
834 if (netif_msg_tx_err(card)) 835 if (netif_msg_tx_err(card))
835 pr_err("%s: forcing end of tx descriptor " 836 dev_err(&card->netdev->dev, "forcing end of tx descriptor "
836 "with status x%02x\n", 837 "with status x%02x\n", status);
837 card->netdev->name, status);
838 card->netdev_stats.tx_errors++; 838 card->netdev_stats.tx_errors++;
839 break; 839 break;
840 840
@@ -1022,34 +1022,94 @@ spider_net_pass_skb_up(struct spider_net_descr *descr,
1022 netif_receive_skb(skb); 1022 netif_receive_skb(skb);
1023} 1023}
1024 1024
1025#ifdef DEBUG
1026static void show_rx_chain(struct spider_net_card *card) 1025static void show_rx_chain(struct spider_net_card *card)
1027{ 1026{
1028 struct spider_net_descr_chain *chain = &card->rx_chain; 1027 struct spider_net_descr_chain *chain = &card->rx_chain;
1029 struct spider_net_descr *start= chain->tail; 1028 struct spider_net_descr *start= chain->tail;
1030 struct spider_net_descr *descr= start; 1029 struct spider_net_descr *descr= start;
1030 struct spider_net_hw_descr *hwd = start->hwdescr;
1031 struct device *dev = &card->netdev->dev;
1032 u32 curr_desc, next_desc;
1031 int status; 1033 int status;
1032 1034
1035 int tot = 0;
1033 int cnt = 0; 1036 int cnt = 0;
1034 int cstat = spider_net_get_descr_status(descr); 1037 int off = start - chain->ring;
1035 printk(KERN_INFO "RX chain tail at descr=%ld\n", 1038 int cstat = hwd->dmac_cmd_status;
1036 (start - card->descr) - card->tx_chain.num_desc); 1039
1040 dev_info(dev, "Total number of descrs=%d\n",
1041 chain->num_desc);
1042 dev_info(dev, "Chain tail located at descr=%d, status=0x%x\n",
1043 off, cstat);
1044
1045 curr_desc = spider_net_read_reg(card, SPIDER_NET_GDACTDPA);
1046 next_desc = spider_net_read_reg(card, SPIDER_NET_GDACNEXTDA);
1047
1037 status = cstat; 1048 status = cstat;
1038 do 1049 do
1039 { 1050 {
1040 status = spider_net_get_descr_status(descr); 1051 hwd = descr->hwdescr;
1052 off = descr - chain->ring;
1053 status = hwd->dmac_cmd_status;
1054
1055 if (descr == chain->head)
1056 dev_info(dev, "Chain head is at %d, head status=0x%x\n",
1057 off, status);
1058
1059 if (curr_desc == descr->bus_addr)
1060 dev_info(dev, "HW curr desc (GDACTDPA) is at %d, status=0x%x\n",
1061 off, status);
1062
1063 if (next_desc == descr->bus_addr)
1064 dev_info(dev, "HW next desc (GDACNEXTDA) is at %d, status=0x%x\n",
1065 off, status);
1066
1067 if (hwd->next_descr_addr == 0)
1068 dev_info(dev, "chain is cut at %d\n", off);
1069
1041 if (cstat != status) { 1070 if (cstat != status) {
1042 printk(KERN_INFO "Have %d descrs with stat=x%08x\n", cnt, cstat); 1071 int from = (chain->num_desc + off - cnt) % chain->num_desc;
1072 int to = (chain->num_desc + off - 1) % chain->num_desc;
1073 dev_info(dev, "Have %d (from %d to %d) descrs "
1074 "with stat=0x%08x\n", cnt, from, to, cstat);
1043 cstat = status; 1075 cstat = status;
1044 cnt = 0; 1076 cnt = 0;
1045 } 1077 }
1078
1046 cnt ++; 1079 cnt ++;
1080 tot ++;
1081 descr = descr->next;
1082 } while (descr != start);
1083
1084 dev_info(dev, "Last %d descrs with stat=0x%08x "
1085 "for a total of %d descrs\n", cnt, cstat, tot);
1086
1087#ifdef DEBUG
1088 /* Now dump the whole ring */
1089 descr = start;
1090 do
1091 {
1092 struct spider_net_hw_descr *hwd = descr->hwdescr;
1093 status = spider_net_get_descr_status(hwd);
1094 cnt = descr - chain->ring;
1095 dev_info(dev, "Descr %d stat=0x%08x skb=%p\n",
1096 cnt, status, descr->skb);
1097 dev_info(dev, "bus addr=%08x buf addr=%08x sz=%d\n",
1098 descr->bus_addr, hwd->buf_addr, hwd->buf_size);
1099 dev_info(dev, "next=%08x result sz=%d valid sz=%d\n",
1100 hwd->next_descr_addr, hwd->result_size,
1101 hwd->valid_size);
1102 dev_info(dev, "dmac=%08x data stat=%08x data err=%08x\n",
1103 hwd->dmac_cmd_status, hwd->data_status,
1104 hwd->data_error);
1105 dev_info(dev, "\n");
1106
1047 descr = descr->next; 1107 descr = descr->next;
1048 } while (descr != start); 1108 } while (descr != start);
1049 printk(KERN_INFO "Last %d descrs with stat=x%08x\n", cnt, cstat);
1050}
1051#endif 1109#endif
1052 1110
1111}
1112
1053/** 1113/**
1054 * spider_net_resync_head_ptr - Advance head ptr past empty descrs 1114 * spider_net_resync_head_ptr - Advance head ptr past empty descrs
1055 * 1115 *
@@ -1127,6 +1187,7 @@ spider_net_decode_one_descr(struct spider_net_card *card)
1127 struct spider_net_descr_chain *chain = &card->rx_chain; 1187 struct spider_net_descr_chain *chain = &card->rx_chain;
1128 struct spider_net_descr *descr = chain->tail; 1188 struct spider_net_descr *descr = chain->tail;
1129 struct spider_net_hw_descr *hwdescr = descr->hwdescr; 1189 struct spider_net_hw_descr *hwdescr = descr->hwdescr;
1190 u32 hw_buf_addr;
1130 int status; 1191 int status;
1131 1192
1132 status = spider_net_get_descr_status(hwdescr); 1193 status = spider_net_get_descr_status(hwdescr);
@@ -1140,15 +1201,17 @@ spider_net_decode_one_descr(struct spider_net_card *card)
1140 chain->tail = descr->next; 1201 chain->tail = descr->next;
1141 1202
1142 /* unmap descriptor */ 1203 /* unmap descriptor */
1143 pci_unmap_single(card->pdev, hwdescr->buf_addr, 1204 hw_buf_addr = hwdescr->buf_addr;
1205 hwdescr->buf_addr = 0xffffffff;
1206 pci_unmap_single(card->pdev, hw_buf_addr,
1144 SPIDER_NET_MAX_FRAME, PCI_DMA_FROMDEVICE); 1207 SPIDER_NET_MAX_FRAME, PCI_DMA_FROMDEVICE);
1145 1208
1146 if ( (status == SPIDER_NET_DESCR_RESPONSE_ERROR) || 1209 if ( (status == SPIDER_NET_DESCR_RESPONSE_ERROR) ||
1147 (status == SPIDER_NET_DESCR_PROTECTION_ERROR) || 1210 (status == SPIDER_NET_DESCR_PROTECTION_ERROR) ||
1148 (status == SPIDER_NET_DESCR_FORCE_END) ) { 1211 (status == SPIDER_NET_DESCR_FORCE_END) ) {
1149 if (netif_msg_rx_err(card)) 1212 if (netif_msg_rx_err(card))
1150 pr_err("%s: dropping RX descriptor with state %d\n", 1213 dev_err(&card->netdev->dev,
1151 card->netdev->name, status); 1214 "dropping RX descriptor with state %d\n", status);
1152 card->netdev_stats.rx_dropped++; 1215 card->netdev_stats.rx_dropped++;
1153 goto bad_desc; 1216 goto bad_desc;
1154 } 1217 }
@@ -1156,8 +1219,8 @@ spider_net_decode_one_descr(struct spider_net_card *card)
1156 if ( (status != SPIDER_NET_DESCR_COMPLETE) && 1219 if ( (status != SPIDER_NET_DESCR_COMPLETE) &&
1157 (status != SPIDER_NET_DESCR_FRAME_END) ) { 1220 (status != SPIDER_NET_DESCR_FRAME_END) ) {
1158 if (netif_msg_rx_err(card)) 1221 if (netif_msg_rx_err(card))
1159 pr_err("%s: RX descriptor with unknown state %d\n", 1222 dev_err(&card->netdev->dev,
1160 card->netdev->name, status); 1223 "RX descriptor with unknown state %d\n", status);
1161 card->spider_stats.rx_desc_unk_state++; 1224 card->spider_stats.rx_desc_unk_state++;
1162 goto bad_desc; 1225 goto bad_desc;
1163 } 1226 }
@@ -1165,18 +1228,17 @@ spider_net_decode_one_descr(struct spider_net_card *card)
1165 /* The cases we'll throw away the packet immediately */ 1228 /* The cases we'll throw away the packet immediately */
1166 if (hwdescr->data_error & SPIDER_NET_DESTROY_RX_FLAGS) { 1229 if (hwdescr->data_error & SPIDER_NET_DESTROY_RX_FLAGS) {
1167 if (netif_msg_rx_err(card)) 1230 if (netif_msg_rx_err(card))
1168 pr_err("%s: error in received descriptor found, " 1231 dev_err(&card->netdev->dev,
1232 "error in received descriptor found, "
1169 "data_status=x%08x, data_error=x%08x\n", 1233 "data_status=x%08x, data_error=x%08x\n",
1170 card->netdev->name,
1171 hwdescr->data_status, hwdescr->data_error); 1234 hwdescr->data_status, hwdescr->data_error);
1172 goto bad_desc; 1235 goto bad_desc;
1173 } 1236 }
1174 1237
1175 if (hwdescr->dmac_cmd_status & 0xfcf4) { 1238 if (hwdescr->dmac_cmd_status & SPIDER_NET_DESCR_BAD_STATUS) {
1176 pr_err("%s: bad status, cmd_status=x%08x\n", 1239 dev_err(&card->netdev->dev, "bad status, cmd_status=x%08x\n",
1177 card->netdev->name,
1178 hwdescr->dmac_cmd_status); 1240 hwdescr->dmac_cmd_status);
1179 pr_err("buf_addr=x%08x\n", hwdescr->buf_addr); 1241 pr_err("buf_addr=x%08x\n", hw_buf_addr);
1180 pr_err("buf_size=x%08x\n", hwdescr->buf_size); 1242 pr_err("buf_size=x%08x\n", hwdescr->buf_size);
1181 pr_err("next_descr_addr=x%08x\n", hwdescr->next_descr_addr); 1243 pr_err("next_descr_addr=x%08x\n", hwdescr->next_descr_addr);
1182 pr_err("result_size=x%08x\n", hwdescr->result_size); 1244 pr_err("result_size=x%08x\n", hwdescr->result_size);
@@ -1196,6 +1258,8 @@ spider_net_decode_one_descr(struct spider_net_card *card)
1196 return 1; 1258 return 1;
1197 1259
1198bad_desc: 1260bad_desc:
1261 if (netif_msg_rx_err(card))
1262 show_rx_chain(card);
1199 dev_kfree_skb_irq(descr->skb); 1263 dev_kfree_skb_irq(descr->skb);
1200 descr->skb = NULL; 1264 descr->skb = NULL;
1201 hwdescr->dmac_cmd_status = SPIDER_NET_DESCR_NOT_IN_USE; 1265 hwdescr->dmac_cmd_status = SPIDER_NET_DESCR_NOT_IN_USE;
@@ -1221,7 +1285,6 @@ spider_net_poll(struct net_device *netdev, int *budget)
1221 int packets_to_do, packets_done = 0; 1285 int packets_to_do, packets_done = 0;
1222 int no_more_packets = 0; 1286 int no_more_packets = 0;
1223 1287
1224 spider_net_cleanup_tx_ring(card);
1225 packets_to_do = min(*budget, netdev->quota); 1288 packets_to_do = min(*budget, netdev->quota);
1226 1289
1227 while (packets_to_do) { 1290 while (packets_to_do) {
@@ -1246,6 +1309,8 @@ spider_net_poll(struct net_device *netdev, int *budget)
1246 spider_net_refill_rx_chain(card); 1309 spider_net_refill_rx_chain(card);
1247 spider_net_enable_rxdmac(card); 1310 spider_net_enable_rxdmac(card);
1248 1311
1312 spider_net_cleanup_tx_ring(card);
1313
1249 /* if all packets are in the stack, enable interrupts and return 0 */ 1314 /* if all packets are in the stack, enable interrupts and return 0 */
1250 /* if not, return 1 */ 1315 /* if not, return 1 */
1251 if (no_more_packets) { 1316 if (no_more_packets) {
@@ -1415,7 +1480,7 @@ spider_net_handle_error_irq(struct spider_net_card *card, u32 status_reg)
1415 case SPIDER_NET_GPWFFINT: 1480 case SPIDER_NET_GPWFFINT:
1416 /* PHY command queue full */ 1481 /* PHY command queue full */
1417 if (netif_msg_intr(card)) 1482 if (netif_msg_intr(card))
1418 pr_err("PHY write queue full\n"); 1483 dev_err(&card->netdev->dev, "PHY write queue full\n");
1419 show_error = 0; 1484 show_error = 0;
1420 break; 1485 break;
1421 1486
@@ -1582,9 +1647,8 @@ spider_net_handle_error_irq(struct spider_net_card *card, u32 status_reg)
1582 } 1647 }
1583 1648
1584 if ((show_error) && (netif_msg_intr(card)) && net_ratelimit()) 1649 if ((show_error) && (netif_msg_intr(card)) && net_ratelimit())
1585 pr_err("Got error interrupt on %s, GHIINT0STS = 0x%08x, " 1650 dev_err(&card->netdev->dev, "Error interrupt, GHIINT0STS = 0x%08x, "
1586 "GHIINT1STS = 0x%08x, GHIINT2STS = 0x%08x\n", 1651 "GHIINT1STS = 0x%08x, GHIINT2STS = 0x%08x\n",
1587 card->netdev->name,
1588 status_reg, error_reg1, error_reg2); 1652 status_reg, error_reg1, error_reg2);
1589 1653
1590 /* clear interrupt sources */ 1654 /* clear interrupt sources */
@@ -1849,7 +1913,8 @@ spider_net_init_firmware(struct spider_net_card *card)
1849 SPIDER_NET_FIRMWARE_NAME, &card->pdev->dev) == 0) { 1913 SPIDER_NET_FIRMWARE_NAME, &card->pdev->dev) == 0) {
1850 if ( (firmware->size != SPIDER_NET_FIRMWARE_LEN) && 1914 if ( (firmware->size != SPIDER_NET_FIRMWARE_LEN) &&
1851 netif_msg_probe(card) ) { 1915 netif_msg_probe(card) ) {
1852 pr_err("Incorrect size of spidernet firmware in " \ 1916 dev_err(&card->netdev->dev,
1917 "Incorrect size of spidernet firmware in " \
1853 "filesystem. Looking in host firmware...\n"); 1918 "filesystem. Looking in host firmware...\n");
1854 goto try_host_fw; 1919 goto try_host_fw;
1855 } 1920 }
@@ -1873,8 +1938,8 @@ try_host_fw:
1873 1938
1874 if ( (fw_size != SPIDER_NET_FIRMWARE_LEN) && 1939 if ( (fw_size != SPIDER_NET_FIRMWARE_LEN) &&
1875 netif_msg_probe(card) ) { 1940 netif_msg_probe(card) ) {
1876 pr_err("Incorrect size of spidernet firmware in " \ 1941 dev_err(&card->netdev->dev,
1877 "host firmware\n"); 1942 "Incorrect size of spidernet firmware in host firmware\n");
1878 goto done; 1943 goto done;
1879 } 1944 }
1880 1945
@@ -1884,7 +1949,8 @@ done:
1884 return err; 1949 return err;
1885out_err: 1950out_err:
1886 if (netif_msg_probe(card)) 1951 if (netif_msg_probe(card))
1887 pr_err("Couldn't find spidernet firmware in filesystem " \ 1952 dev_err(&card->netdev->dev,
1953 "Couldn't find spidernet firmware in filesystem " \
1888 "or host firmware\n"); 1954 "or host firmware\n");
1889 return err; 1955 return err;
1890} 1956}
@@ -2279,13 +2345,14 @@ spider_net_setup_netdev(struct spider_net_card *card)
2279 2345
2280 result = spider_net_set_mac(netdev, &addr); 2346 result = spider_net_set_mac(netdev, &addr);
2281 if ((result) && (netif_msg_probe(card))) 2347 if ((result) && (netif_msg_probe(card)))
2282 pr_err("Failed to set MAC address: %i\n", result); 2348 dev_err(&card->netdev->dev,
2349 "Failed to set MAC address: %i\n", result);
2283 2350
2284 result = register_netdev(netdev); 2351 result = register_netdev(netdev);
2285 if (result) { 2352 if (result) {
2286 if (netif_msg_probe(card)) 2353 if (netif_msg_probe(card))
2287 pr_err("Couldn't register net_device: %i\n", 2354 dev_err(&card->netdev->dev,
2288 result); 2355 "Couldn't register net_device: %i\n", result);
2289 return result; 2356 return result;
2290 } 2357 }
2291 2358
@@ -2363,17 +2430,19 @@ spider_net_setup_pci_dev(struct pci_dev *pdev)
2363 unsigned long mmio_start, mmio_len; 2430 unsigned long mmio_start, mmio_len;
2364 2431
2365 if (pci_enable_device(pdev)) { 2432 if (pci_enable_device(pdev)) {
2366 pr_err("Couldn't enable PCI device\n"); 2433 dev_err(&pdev->dev, "Couldn't enable PCI device\n");
2367 return NULL; 2434 return NULL;
2368 } 2435 }
2369 2436
2370 if (!(pci_resource_flags(pdev, 0) & IORESOURCE_MEM)) { 2437 if (!(pci_resource_flags(pdev, 0) & IORESOURCE_MEM)) {
2371 pr_err("Couldn't find proper PCI device base address.\n"); 2438 dev_err(&pdev->dev,
2439 "Couldn't find proper PCI device base address.\n");
2372 goto out_disable_dev; 2440 goto out_disable_dev;
2373 } 2441 }
2374 2442
2375 if (pci_request_regions(pdev, spider_net_driver_name)) { 2443 if (pci_request_regions(pdev, spider_net_driver_name)) {
2376 pr_err("Couldn't obtain PCI resources, aborting.\n"); 2444 dev_err(&pdev->dev,
2445 "Couldn't obtain PCI resources, aborting.\n");
2377 goto out_disable_dev; 2446 goto out_disable_dev;
2378 } 2447 }
2379 2448
@@ -2381,8 +2450,8 @@ spider_net_setup_pci_dev(struct pci_dev *pdev)
2381 2450
2382 card = spider_net_alloc_card(); 2451 card = spider_net_alloc_card();
2383 if (!card) { 2452 if (!card) {
2384 pr_err("Couldn't allocate net_device structure, " 2453 dev_err(&pdev->dev,
2385 "aborting.\n"); 2454 "Couldn't allocate net_device structure, aborting.\n");
2386 goto out_release_regions; 2455 goto out_release_regions;
2387 } 2456 }
2388 card->pdev = pdev; 2457 card->pdev = pdev;
@@ -2396,7 +2465,8 @@ spider_net_setup_pci_dev(struct pci_dev *pdev)
2396 card->regs = ioremap(mmio_start, mmio_len); 2465 card->regs = ioremap(mmio_start, mmio_len);
2397 2466
2398 if (!card->regs) { 2467 if (!card->regs) {
2399 pr_err("Couldn't obtain PCI resources, aborting.\n"); 2468 dev_err(&pdev->dev,
2469 "Couldn't obtain PCI resources, aborting.\n");
2400 goto out_release_regions; 2470 goto out_release_regions;
2401 } 2471 }
2402 2472
diff --git a/drivers/net/spider_net.h b/drivers/net/spider_net.h
index 1d054aa71504..dbbdb8cee3c6 100644
--- a/drivers/net/spider_net.h
+++ b/drivers/net/spider_net.h
@@ -349,11 +349,23 @@ enum spider_net_int2_status {
349#define SPIDER_NET_GPRDAT_MASK 0x0000ffff 349#define SPIDER_NET_GPRDAT_MASK 0x0000ffff
350 350
351#define SPIDER_NET_DMAC_NOINTR_COMPLETE 0x00800000 351#define SPIDER_NET_DMAC_NOINTR_COMPLETE 0x00800000
352#define SPIDER_NET_DMAC_NOCS 0x00040000 352#define SPIDER_NET_DMAC_TXFRMTL 0x00040000
353#define SPIDER_NET_DMAC_TCP 0x00020000 353#define SPIDER_NET_DMAC_TCP 0x00020000
354#define SPIDER_NET_DMAC_UDP 0x00030000 354#define SPIDER_NET_DMAC_UDP 0x00030000
355#define SPIDER_NET_TXDCEST 0x08000000 355#define SPIDER_NET_TXDCEST 0x08000000
356 356
357#define SPIDER_NET_DESCR_RXFDIS 0x00000001
358#define SPIDER_NET_DESCR_RXDCEIS 0x00000002
359#define SPIDER_NET_DESCR_RXDEN0IS 0x00000004
360#define SPIDER_NET_DESCR_RXINVDIS 0x00000008
361#define SPIDER_NET_DESCR_RXRERRIS 0x00000010
362#define SPIDER_NET_DESCR_RXFDCIMS 0x00000100
363#define SPIDER_NET_DESCR_RXDCEIMS 0x00000200
364#define SPIDER_NET_DESCR_RXDEN0IMS 0x00000400
365#define SPIDER_NET_DESCR_RXINVDIMS 0x00000800
366#define SPIDER_NET_DESCR_RXRERRMIS 0x00001000
367#define SPIDER_NET_DESCR_UNUSED 0x077fe0e0
368
357#define SPIDER_NET_DESCR_IND_PROC_MASK 0xF0000000 369#define SPIDER_NET_DESCR_IND_PROC_MASK 0xF0000000
358#define SPIDER_NET_DESCR_COMPLETE 0x00000000 /* used in rx and tx */ 370#define SPIDER_NET_DESCR_COMPLETE 0x00000000 /* used in rx and tx */
359#define SPIDER_NET_DESCR_RESPONSE_ERROR 0x10000000 /* used in rx and tx */ 371#define SPIDER_NET_DESCR_RESPONSE_ERROR 0x10000000 /* used in rx and tx */
@@ -364,6 +376,13 @@ enum spider_net_int2_status {
364#define SPIDER_NET_DESCR_NOT_IN_USE 0xF0000000 376#define SPIDER_NET_DESCR_NOT_IN_USE 0xF0000000
365#define SPIDER_NET_DESCR_TXDESFLG 0x00800000 377#define SPIDER_NET_DESCR_TXDESFLG 0x00800000
366 378
379#define SPIDER_NET_DESCR_BAD_STATUS (SPIDER_NET_DESCR_RXDEN0IS | \
380 SPIDER_NET_DESCR_RXRERRIS | \
381 SPIDER_NET_DESCR_RXDEN0IMS | \
382 SPIDER_NET_DESCR_RXINVDIMS | \
383 SPIDER_NET_DESCR_RXRERRMIS | \
384 SPIDER_NET_DESCR_UNUSED)
385
367/* Descriptor, as defined by the hardware */ 386/* Descriptor, as defined by the hardware */
368struct spider_net_hw_descr { 387struct spider_net_hw_descr {
369 u32 buf_addr; 388 u32 buf_addr;
diff --git a/drivers/net/tulip/Kconfig b/drivers/net/tulip/Kconfig
index 8c9634a98c11..1c537d5a3062 100644
--- a/drivers/net/tulip/Kconfig
+++ b/drivers/net/tulip/Kconfig
@@ -2,17 +2,17 @@
2# Tulip family network device configuration 2# Tulip family network device configuration
3# 3#
4 4
5menu "Tulip family network device support" 5menuconfig NET_TULIP
6 depends on NET_ETHERNET && (PCI || EISA || CARDBUS)
7
8config NET_TULIP
9 bool "\"Tulip\" family network device support" 6 bool "\"Tulip\" family network device support"
7 depends on PCI || EISA || CARDBUS
10 help 8 help
11 This selects the "Tulip" family of EISA/PCI network cards. 9 This selects the "Tulip" family of EISA/PCI network cards.
12 10
11if NET_TULIP
12
13config DE2104X 13config DE2104X
14 tristate "Early DECchip Tulip (dc2104x) PCI support (EXPERIMENTAL)" 14 tristate "Early DECchip Tulip (dc2104x) PCI support (EXPERIMENTAL)"
15 depends on NET_TULIP && PCI && EXPERIMENTAL 15 depends on PCI && EXPERIMENTAL
16 select CRC32 16 select CRC32
17 ---help--- 17 ---help---
18 This driver is developed for the SMC EtherPower series Ethernet 18 This driver is developed for the SMC EtherPower series Ethernet
@@ -30,7 +30,7 @@ config DE2104X
30 30
31config TULIP 31config TULIP
32 tristate "DECchip Tulip (dc2114x) PCI support" 32 tristate "DECchip Tulip (dc2114x) PCI support"
33 depends on NET_TULIP && PCI 33 depends on PCI
34 select CRC32 34 select CRC32
35 ---help--- 35 ---help---
36 This driver is developed for the SMC EtherPower series Ethernet 36 This driver is developed for the SMC EtherPower series Ethernet
@@ -95,7 +95,7 @@ config TULIP_NAPI_HW_MITIGATION
95 95
96config DE4X5 96config DE4X5
97 tristate "Generic DECchip & DIGITAL EtherWORKS PCI/EISA" 97 tristate "Generic DECchip & DIGITAL EtherWORKS PCI/EISA"
98 depends on NET_TULIP && (PCI || EISA) 98 depends on PCI || EISA
99 select CRC32 99 select CRC32
100 ---help--- 100 ---help---
101 This is support for the DIGITAL series of PCI/EISA Ethernet cards. 101 This is support for the DIGITAL series of PCI/EISA Ethernet cards.
@@ -112,7 +112,7 @@ config DE4X5
112 112
113config WINBOND_840 113config WINBOND_840
114 tristate "Winbond W89c840 Ethernet support" 114 tristate "Winbond W89c840 Ethernet support"
115 depends on NET_TULIP && PCI 115 depends on PCI
116 select CRC32 116 select CRC32
117 select MII 117 select MII
118 help 118 help
@@ -123,7 +123,7 @@ config WINBOND_840
123 123
124config DM9102 124config DM9102
125 tristate "Davicom DM910x/DM980x support" 125 tristate "Davicom DM910x/DM980x support"
126 depends on NET_TULIP && PCI 126 depends on PCI
127 select CRC32 127 select CRC32
128 ---help--- 128 ---help---
129 This driver is for DM9102(A)/DM9132/DM9801 compatible PCI cards from 129 This driver is for DM9102(A)/DM9132/DM9801 compatible PCI cards from
@@ -137,7 +137,7 @@ config DM9102
137 137
138config ULI526X 138config ULI526X
139 tristate "ULi M526x controller support" 139 tristate "ULi M526x controller support"
140 depends on NET_TULIP && PCI 140 depends on PCI
141 select CRC32 141 select CRC32
142 ---help--- 142 ---help---
143 This driver is for ULi M5261/M5263 10/100M Ethernet Controller 143 This driver is for ULi M5261/M5263 10/100M Ethernet Controller
@@ -149,7 +149,7 @@ config ULI526X
149 149
150config PCMCIA_XIRCOM 150config PCMCIA_XIRCOM
151 tristate "Xircom CardBus support (new driver)" 151 tristate "Xircom CardBus support (new driver)"
152 depends on NET_TULIP && CARDBUS 152 depends on CARDBUS
153 ---help--- 153 ---help---
154 This driver is for the Digital "Tulip" Ethernet CardBus adapters. 154 This driver is for the Digital "Tulip" Ethernet CardBus adapters.
155 It should work with most DEC 21*4*-based chips/ethercards, as well 155 It should work with most DEC 21*4*-based chips/ethercards, as well
@@ -162,7 +162,7 @@ config PCMCIA_XIRCOM
162 162
163config PCMCIA_XIRTULIP 163config PCMCIA_XIRTULIP
164 tristate "Xircom Tulip-like CardBus support (old driver)" 164 tristate "Xircom Tulip-like CardBus support (old driver)"
165 depends on NET_TULIP && CARDBUS && BROKEN_ON_SMP 165 depends on CARDBUS && BROKEN_ON_SMP
166 select CRC32 166 select CRC32
167 ---help--- 167 ---help---
168 This driver is for the Digital "Tulip" Ethernet CardBus adapters. 168 This driver is for the Digital "Tulip" Ethernet CardBus adapters.
@@ -174,5 +174,4 @@ config PCMCIA_XIRTULIP
174 <file:Documentation/networking/net-modules.txt>. The module will 174 <file:Documentation/networking/net-modules.txt>. The module will
175 be called xircom_tulip_cb. If unsure, say N. 175 be called xircom_tulip_cb. If unsure, say N.
176 176
177endmenu 177endif # NET_TULIP
178
diff --git a/drivers/net/tulip/de2104x.c b/drivers/net/tulip/de2104x.c
index 861729806dc1..d380e0b3f05a 100644
--- a/drivers/net/tulip/de2104x.c
+++ b/drivers/net/tulip/de2104x.c
@@ -785,7 +785,6 @@ static void __de_set_rx_mode (struct net_device *dev)
785 785
786 de->tx_head = NEXT_TX(entry); 786 de->tx_head = NEXT_TX(entry);
787 787
788 BUG_ON(TX_BUFFS_AVAIL(de) < 0);
789 if (TX_BUFFS_AVAIL(de) == 0) 788 if (TX_BUFFS_AVAIL(de) == 0)
790 netif_stop_queue(dev); 789 netif_stop_queue(dev);
791 790
diff --git a/drivers/net/tulip/de4x5.c b/drivers/net/tulip/de4x5.c
index 62143f92c231..42fca26afc50 100644
--- a/drivers/net/tulip/de4x5.c
+++ b/drivers/net/tulip/de4x5.c
@@ -597,7 +597,7 @@ static char *args;
597#endif 597#endif
598 598
599struct parameters { 599struct parameters {
600 int fdx; 600 bool fdx;
601 int autosense; 601 int autosense;
602}; 602};
603 603
@@ -809,10 +809,10 @@ struct de4x5_private {
809 s32 irq_en; /* Summary interrupt bits */ 809 s32 irq_en; /* Summary interrupt bits */
810 int media; /* Media (eg TP), mode (eg 100B)*/ 810 int media; /* Media (eg TP), mode (eg 100B)*/
811 int c_media; /* Remember the last media conn */ 811 int c_media; /* Remember the last media conn */
812 int fdx; /* media full duplex flag */ 812 bool fdx; /* media full duplex flag */
813 int linkOK; /* Link is OK */ 813 int linkOK; /* Link is OK */
814 int autosense; /* Allow/disallow autosensing */ 814 int autosense; /* Allow/disallow autosensing */
815 int tx_enable; /* Enable descriptor polling */ 815 bool tx_enable; /* Enable descriptor polling */
816 int setup_f; /* Setup frame filtering type */ 816 int setup_f; /* Setup frame filtering type */
817 int local_state; /* State within a 'media' state */ 817 int local_state; /* State within a 'media' state */
818 struct mii_phy phy[DE4X5_MAX_PHY]; /* List of attached PHY devices */ 818 struct mii_phy phy[DE4X5_MAX_PHY]; /* List of attached PHY devices */
@@ -838,8 +838,8 @@ struct de4x5_private {
838 struct de4x5_srom srom; /* A copy of the SROM */ 838 struct de4x5_srom srom; /* A copy of the SROM */
839 int cfrv; /* Card CFRV copy */ 839 int cfrv; /* Card CFRV copy */
840 int rx_ovf; /* Check for 'RX overflow' tag */ 840 int rx_ovf; /* Check for 'RX overflow' tag */
841 int useSROM; /* For non-DEC card use SROM */ 841 bool useSROM; /* For non-DEC card use SROM */
842 int useMII; /* Infoblock using the MII */ 842 bool useMII; /* Infoblock using the MII */
843 int asBitValid; /* Autosense bits in GEP? */ 843 int asBitValid; /* Autosense bits in GEP? */
844 int asPolarity; /* 0 => asserted high */ 844 int asPolarity; /* 0 => asserted high */
845 int asBit; /* Autosense bit number in GEP */ 845 int asBit; /* Autosense bit number in GEP */
@@ -928,7 +928,7 @@ static int dc21040_state(struct net_device *dev, int csr13, int csr14, int c
928static int test_media(struct net_device *dev, s32 irqs, s32 irq_mask, s32 csr13, s32 csr14, s32 csr15, s32 msec); 928static int test_media(struct net_device *dev, s32 irqs, s32 irq_mask, s32 csr13, s32 csr14, s32 csr15, s32 msec);
929static int test_for_100Mb(struct net_device *dev, int msec); 929static int test_for_100Mb(struct net_device *dev, int msec);
930static int wait_for_link(struct net_device *dev); 930static int wait_for_link(struct net_device *dev);
931static int test_mii_reg(struct net_device *dev, int reg, int mask, int pol, long msec); 931static int test_mii_reg(struct net_device *dev, int reg, int mask, bool pol, long msec);
932static int is_spd_100(struct net_device *dev); 932static int is_spd_100(struct net_device *dev);
933static int is_100_up(struct net_device *dev); 933static int is_100_up(struct net_device *dev);
934static int is_10_up(struct net_device *dev); 934static int is_10_up(struct net_device *dev);
@@ -1109,7 +1109,7 @@ de4x5_hw_init(struct net_device *dev, u_long iobase, struct device *gendev)
1109 /* 1109 /*
1110 ** Now find out what kind of DC21040/DC21041/DC21140 board we have. 1110 ** Now find out what kind of DC21040/DC21041/DC21140 board we have.
1111 */ 1111 */
1112 lp->useSROM = FALSE; 1112 lp->useSROM = false;
1113 if (lp->bus == PCI) { 1113 if (lp->bus == PCI) {
1114 PCI_signature(name, lp); 1114 PCI_signature(name, lp);
1115 } else { 1115 } else {
@@ -1137,7 +1137,7 @@ de4x5_hw_init(struct net_device *dev, u_long iobase, struct device *gendev)
1137 lp->cache.gepc = GEP_INIT; 1137 lp->cache.gepc = GEP_INIT;
1138 lp->asBit = GEP_SLNK; 1138 lp->asBit = GEP_SLNK;
1139 lp->asPolarity = GEP_SLNK; 1139 lp->asPolarity = GEP_SLNK;
1140 lp->asBitValid = TRUE; 1140 lp->asBitValid = ~0;
1141 lp->timeout = -1; 1141 lp->timeout = -1;
1142 lp->gendev = gendev; 1142 lp->gendev = gendev;
1143 spin_lock_init(&lp->lock); 1143 spin_lock_init(&lp->lock);
@@ -1463,7 +1463,7 @@ de4x5_queue_pkt(struct sk_buff *skb, struct net_device *dev)
1463 u_long flags = 0; 1463 u_long flags = 0;
1464 1464
1465 netif_stop_queue(dev); 1465 netif_stop_queue(dev);
1466 if (lp->tx_enable == NO) { /* Cannot send for now */ 1466 if (!lp->tx_enable) { /* Cannot send for now */
1467 return -1; 1467 return -1;
1468 } 1468 }
1469 1469
@@ -2424,7 +2424,7 @@ dc21040_autoconf(struct net_device *dev)
2424 switch (lp->media) { 2424 switch (lp->media) {
2425 case INIT: 2425 case INIT:
2426 DISABLE_IRQs; 2426 DISABLE_IRQs;
2427 lp->tx_enable = NO; 2427 lp->tx_enable = false;
2428 lp->timeout = -1; 2428 lp->timeout = -1;
2429 de4x5_save_skbs(dev); 2429 de4x5_save_skbs(dev);
2430 if ((lp->autosense == AUTO) || (lp->autosense == TP)) { 2430 if ((lp->autosense == AUTO) || (lp->autosense == TP)) {
@@ -2477,7 +2477,7 @@ dc21040_autoconf(struct net_device *dev)
2477 lp->c_media = lp->media; 2477 lp->c_media = lp->media;
2478 } 2478 }
2479 lp->media = INIT; 2479 lp->media = INIT;
2480 lp->tx_enable = NO; 2480 lp->tx_enable = false;
2481 break; 2481 break;
2482 } 2482 }
2483 2483
@@ -2578,7 +2578,7 @@ dc21041_autoconf(struct net_device *dev)
2578 switch (lp->media) { 2578 switch (lp->media) {
2579 case INIT: 2579 case INIT:
2580 DISABLE_IRQs; 2580 DISABLE_IRQs;
2581 lp->tx_enable = NO; 2581 lp->tx_enable = false;
2582 lp->timeout = -1; 2582 lp->timeout = -1;
2583 de4x5_save_skbs(dev); /* Save non transmitted skb's */ 2583 de4x5_save_skbs(dev); /* Save non transmitted skb's */
2584 if ((lp->autosense == AUTO) || (lp->autosense == TP_NW)) { 2584 if ((lp->autosense == AUTO) || (lp->autosense == TP_NW)) {
@@ -2757,7 +2757,7 @@ dc21041_autoconf(struct net_device *dev)
2757 lp->c_media = lp->media; 2757 lp->c_media = lp->media;
2758 } 2758 }
2759 lp->media = INIT; 2759 lp->media = INIT;
2760 lp->tx_enable = NO; 2760 lp->tx_enable = false;
2761 break; 2761 break;
2762 } 2762 }
2763 2763
@@ -2781,7 +2781,7 @@ dc21140m_autoconf(struct net_device *dev)
2781 case INIT: 2781 case INIT:
2782 if (lp->timeout < 0) { 2782 if (lp->timeout < 0) {
2783 DISABLE_IRQs; 2783 DISABLE_IRQs;
2784 lp->tx_enable = FALSE; 2784 lp->tx_enable = false;
2785 lp->linkOK = 0; 2785 lp->linkOK = 0;
2786 de4x5_save_skbs(dev); /* Save non transmitted skb's */ 2786 de4x5_save_skbs(dev); /* Save non transmitted skb's */
2787 } 2787 }
@@ -2830,7 +2830,7 @@ dc21140m_autoconf(struct net_device *dev)
2830 if (lp->timeout < 0) { 2830 if (lp->timeout < 0) {
2831 mii_wr(MII_CR_ASSE | MII_CR_RAN, MII_CR, lp->phy[lp->active].addr, DE4X5_MII); 2831 mii_wr(MII_CR_ASSE | MII_CR_RAN, MII_CR, lp->phy[lp->active].addr, DE4X5_MII);
2832 } 2832 }
2833 cr = test_mii_reg(dev, MII_CR, MII_CR_RAN, FALSE, 500); 2833 cr = test_mii_reg(dev, MII_CR, MII_CR_RAN, false, 500);
2834 if (cr < 0) { 2834 if (cr < 0) {
2835 next_tick = cr & ~TIMER_CB; 2835 next_tick = cr & ~TIMER_CB;
2836 } else { 2836 } else {
@@ -2845,7 +2845,7 @@ dc21140m_autoconf(struct net_device *dev)
2845 break; 2845 break;
2846 2846
2847 case 1: 2847 case 1:
2848 if ((sr=test_mii_reg(dev, MII_SR, MII_SR_ASSC, TRUE, 2000)) < 0) { 2848 if ((sr=test_mii_reg(dev, MII_SR, MII_SR_ASSC, true, 2000)) < 0) {
2849 next_tick = sr & ~TIMER_CB; 2849 next_tick = sr & ~TIMER_CB;
2850 } else { 2850 } else {
2851 lp->media = SPD_DET; 2851 lp->media = SPD_DET;
@@ -2857,10 +2857,10 @@ dc21140m_autoconf(struct net_device *dev)
2857 if (!(anlpa & MII_ANLPA_RF) && 2857 if (!(anlpa & MII_ANLPA_RF) &&
2858 (cap = anlpa & MII_ANLPA_TAF & ana)) { 2858 (cap = anlpa & MII_ANLPA_TAF & ana)) {
2859 if (cap & MII_ANA_100M) { 2859 if (cap & MII_ANA_100M) {
2860 lp->fdx = ((ana & anlpa & MII_ANA_FDAM & MII_ANA_100M) ? TRUE : FALSE); 2860 lp->fdx = (ana & anlpa & MII_ANA_FDAM & MII_ANA_100M) != 0;
2861 lp->media = _100Mb; 2861 lp->media = _100Mb;
2862 } else if (cap & MII_ANA_10M) { 2862 } else if (cap & MII_ANA_10M) {
2863 lp->fdx = ((ana & anlpa & MII_ANA_FDAM & MII_ANA_10M) ? TRUE : FALSE); 2863 lp->fdx = (ana & anlpa & MII_ANA_FDAM & MII_ANA_10M) != 0;
2864 2864
2865 lp->media = _10Mb; 2865 lp->media = _10Mb;
2866 } 2866 }
@@ -2932,7 +2932,7 @@ dc21140m_autoconf(struct net_device *dev)
2932 lp->c_media = lp->media; 2932 lp->c_media = lp->media;
2933 } 2933 }
2934 lp->media = INIT; 2934 lp->media = INIT;
2935 lp->tx_enable = FALSE; 2935 lp->tx_enable = false;
2936 break; 2936 break;
2937 } 2937 }
2938 2938
@@ -2965,7 +2965,7 @@ dc2114x_autoconf(struct net_device *dev)
2965 case INIT: 2965 case INIT:
2966 if (lp->timeout < 0) { 2966 if (lp->timeout < 0) {
2967 DISABLE_IRQs; 2967 DISABLE_IRQs;
2968 lp->tx_enable = FALSE; 2968 lp->tx_enable = false;
2969 lp->linkOK = 0; 2969 lp->linkOK = 0;
2970 lp->timeout = -1; 2970 lp->timeout = -1;
2971 de4x5_save_skbs(dev); /* Save non transmitted skb's */ 2971 de4x5_save_skbs(dev); /* Save non transmitted skb's */
@@ -3013,7 +3013,7 @@ dc2114x_autoconf(struct net_device *dev)
3013 if (lp->timeout < 0) { 3013 if (lp->timeout < 0) {
3014 mii_wr(MII_CR_ASSE | MII_CR_RAN, MII_CR, lp->phy[lp->active].addr, DE4X5_MII); 3014 mii_wr(MII_CR_ASSE | MII_CR_RAN, MII_CR, lp->phy[lp->active].addr, DE4X5_MII);
3015 } 3015 }
3016 cr = test_mii_reg(dev, MII_CR, MII_CR_RAN, FALSE, 500); 3016 cr = test_mii_reg(dev, MII_CR, MII_CR_RAN, false, 500);
3017 if (cr < 0) { 3017 if (cr < 0) {
3018 next_tick = cr & ~TIMER_CB; 3018 next_tick = cr & ~TIMER_CB;
3019 } else { 3019 } else {
@@ -3028,7 +3028,8 @@ dc2114x_autoconf(struct net_device *dev)
3028 break; 3028 break;
3029 3029
3030 case 1: 3030 case 1:
3031 if ((sr=test_mii_reg(dev, MII_SR, MII_SR_ASSC, TRUE, 2000)) < 0) { 3031 sr = test_mii_reg(dev, MII_SR, MII_SR_ASSC, true, 2000);
3032 if (sr < 0) {
3032 next_tick = sr & ~TIMER_CB; 3033 next_tick = sr & ~TIMER_CB;
3033 } else { 3034 } else {
3034 lp->media = SPD_DET; 3035 lp->media = SPD_DET;
@@ -3040,10 +3041,10 @@ dc2114x_autoconf(struct net_device *dev)
3040 if (!(anlpa & MII_ANLPA_RF) && 3041 if (!(anlpa & MII_ANLPA_RF) &&
3041 (cap = anlpa & MII_ANLPA_TAF & ana)) { 3042 (cap = anlpa & MII_ANLPA_TAF & ana)) {
3042 if (cap & MII_ANA_100M) { 3043 if (cap & MII_ANA_100M) {
3043 lp->fdx = ((ana & anlpa & MII_ANA_FDAM & MII_ANA_100M) ? TRUE : FALSE); 3044 lp->fdx = (ana & anlpa & MII_ANA_FDAM & MII_ANA_100M) != 0;
3044 lp->media = _100Mb; 3045 lp->media = _100Mb;
3045 } else if (cap & MII_ANA_10M) { 3046 } else if (cap & MII_ANA_10M) {
3046 lp->fdx = ((ana & anlpa & MII_ANA_FDAM & MII_ANA_10M) ? TRUE : FALSE); 3047 lp->fdx = (ana & anlpa & MII_ANA_FDAM & MII_ANA_10M) != 0;
3047 lp->media = _10Mb; 3048 lp->media = _10Mb;
3048 } 3049 }
3049 } 3050 }
@@ -3222,14 +3223,14 @@ srom_map_media(struct net_device *dev)
3222{ 3223{
3223 struct de4x5_private *lp = netdev_priv(dev); 3224 struct de4x5_private *lp = netdev_priv(dev);
3224 3225
3225 lp->fdx = 0; 3226 lp->fdx = false;
3226 if (lp->infoblock_media == lp->media) 3227 if (lp->infoblock_media == lp->media)
3227 return 0; 3228 return 0;
3228 3229
3229 switch(lp->infoblock_media) { 3230 switch(lp->infoblock_media) {
3230 case SROM_10BASETF: 3231 case SROM_10BASETF:
3231 if (!lp->params.fdx) return -1; 3232 if (!lp->params.fdx) return -1;
3232 lp->fdx = TRUE; 3233 lp->fdx = true;
3233 case SROM_10BASET: 3234 case SROM_10BASET:
3234 if (lp->params.fdx && !lp->fdx) return -1; 3235 if (lp->params.fdx && !lp->fdx) return -1;
3235 if ((lp->chipset == DC21140) || ((lp->chipset & ~0x00ff) == DC2114x)) { 3236 if ((lp->chipset == DC21140) || ((lp->chipset & ~0x00ff) == DC2114x)) {
@@ -3249,7 +3250,7 @@ srom_map_media(struct net_device *dev)
3249 3250
3250 case SROM_100BASETF: 3251 case SROM_100BASETF:
3251 if (!lp->params.fdx) return -1; 3252 if (!lp->params.fdx) return -1;
3252 lp->fdx = TRUE; 3253 lp->fdx = true;
3253 case SROM_100BASET: 3254 case SROM_100BASET:
3254 if (lp->params.fdx && !lp->fdx) return -1; 3255 if (lp->params.fdx && !lp->fdx) return -1;
3255 lp->media = _100Mb; 3256 lp->media = _100Mb;
@@ -3261,7 +3262,7 @@ srom_map_media(struct net_device *dev)
3261 3262
3262 case SROM_100BASEFF: 3263 case SROM_100BASEFF:
3263 if (!lp->params.fdx) return -1; 3264 if (!lp->params.fdx) return -1;
3264 lp->fdx = TRUE; 3265 lp->fdx = true;
3265 case SROM_100BASEF: 3266 case SROM_100BASEF:
3266 if (lp->params.fdx && !lp->fdx) return -1; 3267 if (lp->params.fdx && !lp->fdx) return -1;
3267 lp->media = _100Mb; 3268 lp->media = _100Mb;
@@ -3297,7 +3298,7 @@ de4x5_init_connection(struct net_device *dev)
3297 spin_lock_irqsave(&lp->lock, flags); 3298 spin_lock_irqsave(&lp->lock, flags);
3298 de4x5_rst_desc_ring(dev); 3299 de4x5_rst_desc_ring(dev);
3299 de4x5_setup_intr(dev); 3300 de4x5_setup_intr(dev);
3300 lp->tx_enable = YES; 3301 lp->tx_enable = true;
3301 spin_unlock_irqrestore(&lp->lock, flags); 3302 spin_unlock_irqrestore(&lp->lock, flags);
3302 outl(POLL_DEMAND, DE4X5_TPD); 3303 outl(POLL_DEMAND, DE4X5_TPD);
3303 3304
@@ -3336,7 +3337,7 @@ de4x5_reset_phy(struct net_device *dev)
3336 } 3337 }
3337 } 3338 }
3338 if (lp->useMII) { 3339 if (lp->useMII) {
3339 next_tick = test_mii_reg(dev, MII_CR, MII_CR_RST, FALSE, 500); 3340 next_tick = test_mii_reg(dev, MII_CR, MII_CR_RST, false, 500);
3340 } 3341 }
3341 } else if (lp->chipset == DC21140) { 3342 } else if (lp->chipset == DC21140) {
3342 PHY_HARD_RESET; 3343 PHY_HARD_RESET;
@@ -3466,7 +3467,7 @@ wait_for_link(struct net_device *dev)
3466** 3467**
3467*/ 3468*/
3468static int 3469static int
3469test_mii_reg(struct net_device *dev, int reg, int mask, int pol, long msec) 3470test_mii_reg(struct net_device *dev, int reg, int mask, bool pol, long msec)
3470{ 3471{
3471 struct de4x5_private *lp = netdev_priv(dev); 3472 struct de4x5_private *lp = netdev_priv(dev);
3472 int test; 3473 int test;
@@ -3476,9 +3477,8 @@ test_mii_reg(struct net_device *dev, int reg, int mask, int pol, long msec)
3476 lp->timeout = msec/100; 3477 lp->timeout = msec/100;
3477 } 3478 }
3478 3479
3479 if (pol) pol = ~0;
3480 reg = mii_rd((u_char)reg, lp->phy[lp->active].addr, DE4X5_MII) & mask; 3480 reg = mii_rd((u_char)reg, lp->phy[lp->active].addr, DE4X5_MII) & mask;
3481 test = (reg ^ pol) & mask; 3481 test = (reg ^ (pol ? ~0 : 0)) & mask;
3482 3482
3483 if (test && --lp->timeout) { 3483 if (test && --lp->timeout) {
3484 reg = 100 | TIMER_CB; 3484 reg = 100 | TIMER_CB;
@@ -3992,10 +3992,10 @@ PCI_signature(char *name, struct de4x5_private *lp)
3992 ))))))); 3992 )))))));
3993 } 3993 }
3994 if (lp->chipset != DC21041) { 3994 if (lp->chipset != DC21041) {
3995 lp->useSROM = TRUE; /* card is not recognisably DEC */ 3995 lp->useSROM = true; /* card is not recognisably DEC */
3996 } 3996 }
3997 } else if ((lp->chipset & ~0x00ff) == DC2114x) { 3997 } else if ((lp->chipset & ~0x00ff) == DC2114x) {
3998 lp->useSROM = TRUE; 3998 lp->useSROM = true;
3999 } 3999 }
4000 4000
4001 return status; 4001 return status;
@@ -4216,7 +4216,7 @@ srom_repair(struct net_device *dev, int card)
4216 memset((char *)&lp->srom, 0, sizeof(struct de4x5_srom)); 4216 memset((char *)&lp->srom, 0, sizeof(struct de4x5_srom));
4217 memcpy(lp->srom.ieee_addr, (char *)dev->dev_addr, ETH_ALEN); 4217 memcpy(lp->srom.ieee_addr, (char *)dev->dev_addr, ETH_ALEN);
4218 memcpy(lp->srom.info, (char *)&srom_repair_info[SMC-1], 100); 4218 memcpy(lp->srom.info, (char *)&srom_repair_info[SMC-1], 100);
4219 lp->useSROM = TRUE; 4219 lp->useSROM = true;
4220 break; 4220 break;
4221 } 4221 }
4222 4222
@@ -4392,7 +4392,7 @@ srom_infoleaf_info(struct net_device *dev)
4392 if (lp->chipset == infoleaf_array[i].chipset) break; 4392 if (lp->chipset == infoleaf_array[i].chipset) break;
4393 } 4393 }
4394 if (i == INFOLEAF_SIZE) { 4394 if (i == INFOLEAF_SIZE) {
4395 lp->useSROM = FALSE; 4395 lp->useSROM = false;
4396 printk("%s: Cannot find correct chipset for SROM decoding!\n", 4396 printk("%s: Cannot find correct chipset for SROM decoding!\n",
4397 dev->name); 4397 dev->name);
4398 return -ENXIO; 4398 return -ENXIO;
@@ -4409,7 +4409,7 @@ srom_infoleaf_info(struct net_device *dev)
4409 if (lp->device == *p) break; 4409 if (lp->device == *p) break;
4410 } 4410 }
4411 if (i == 0) { 4411 if (i == 0) {
4412 lp->useSROM = FALSE; 4412 lp->useSROM = false;
4413 printk("%s: Cannot find correct PCI device [%d] for SROM decoding!\n", 4413 printk("%s: Cannot find correct PCI device [%d] for SROM decoding!\n",
4414 dev->name, lp->device); 4414 dev->name, lp->device);
4415 return -ENXIO; 4415 return -ENXIO;
@@ -4542,7 +4542,7 @@ dc21140_infoleaf(struct net_device *dev)
4542 } 4542 }
4543 lp->media = INIT; 4543 lp->media = INIT;
4544 lp->tcount = 0; 4544 lp->tcount = 0;
4545 lp->tx_enable = FALSE; 4545 lp->tx_enable = false;
4546 } 4546 }
4547 4547
4548 return next_tick & ~TIMER_CB; 4548 return next_tick & ~TIMER_CB;
@@ -4577,7 +4577,7 @@ dc21142_infoleaf(struct net_device *dev)
4577 } 4577 }
4578 lp->media = INIT; 4578 lp->media = INIT;
4579 lp->tcount = 0; 4579 lp->tcount = 0;
4580 lp->tx_enable = FALSE; 4580 lp->tx_enable = false;
4581 } 4581 }
4582 4582
4583 return next_tick & ~TIMER_CB; 4583 return next_tick & ~TIMER_CB;
@@ -4611,7 +4611,7 @@ dc21143_infoleaf(struct net_device *dev)
4611 } 4611 }
4612 lp->media = INIT; 4612 lp->media = INIT;
4613 lp->tcount = 0; 4613 lp->tcount = 0;
4614 lp->tx_enable = FALSE; 4614 lp->tx_enable = false;
4615 } 4615 }
4616 4616
4617 return next_tick & ~TIMER_CB; 4617 return next_tick & ~TIMER_CB;
@@ -4650,7 +4650,7 @@ compact_infoblock(struct net_device *dev, u_char count, u_char *p)
4650 lp->asBit = 1 << ((csr6 >> 1) & 0x07); 4650 lp->asBit = 1 << ((csr6 >> 1) & 0x07);
4651 lp->asPolarity = ((csr6 & 0x80) ? -1 : 0) & lp->asBit; 4651 lp->asPolarity = ((csr6 & 0x80) ? -1 : 0) & lp->asBit;
4652 lp->infoblock_csr6 = OMR_DEF | ((csr6 & 0x71) << 18); 4652 lp->infoblock_csr6 = OMR_DEF | ((csr6 & 0x71) << 18);
4653 lp->useMII = FALSE; 4653 lp->useMII = false;
4654 4654
4655 de4x5_switch_mac_port(dev); 4655 de4x5_switch_mac_port(dev);
4656 } 4656 }
@@ -4691,7 +4691,7 @@ type0_infoblock(struct net_device *dev, u_char count, u_char *p)
4691 lp->asBit = 1 << ((csr6 >> 1) & 0x07); 4691 lp->asBit = 1 << ((csr6 >> 1) & 0x07);
4692 lp->asPolarity = ((csr6 & 0x80) ? -1 : 0) & lp->asBit; 4692 lp->asPolarity = ((csr6 & 0x80) ? -1 : 0) & lp->asBit;
4693 lp->infoblock_csr6 = OMR_DEF | ((csr6 & 0x71) << 18); 4693 lp->infoblock_csr6 = OMR_DEF | ((csr6 & 0x71) << 18);
4694 lp->useMII = FALSE; 4694 lp->useMII = false;
4695 4695
4696 de4x5_switch_mac_port(dev); 4696 de4x5_switch_mac_port(dev);
4697 } 4697 }
@@ -4731,7 +4731,7 @@ type1_infoblock(struct net_device *dev, u_char count, u_char *p)
4731 lp->ibn = 1; 4731 lp->ibn = 1;
4732 lp->active = *p; 4732 lp->active = *p;
4733 lp->infoblock_csr6 = OMR_MII_100; 4733 lp->infoblock_csr6 = OMR_MII_100;
4734 lp->useMII = TRUE; 4734 lp->useMII = true;
4735 lp->infoblock_media = ANS; 4735 lp->infoblock_media = ANS;
4736 4736
4737 de4x5_switch_mac_port(dev); 4737 de4x5_switch_mac_port(dev);
@@ -4773,7 +4773,7 @@ type2_infoblock(struct net_device *dev, u_char count, u_char *p)
4773 lp->cache.gepc = ((s32)(TWIDDLE(p)) << 16); p += 2; 4773 lp->cache.gepc = ((s32)(TWIDDLE(p)) << 16); p += 2;
4774 lp->cache.gep = ((s32)(TWIDDLE(p)) << 16); 4774 lp->cache.gep = ((s32)(TWIDDLE(p)) << 16);
4775 lp->infoblock_csr6 = OMR_SIA; 4775 lp->infoblock_csr6 = OMR_SIA;
4776 lp->useMII = FALSE; 4776 lp->useMII = false;
4777 4777
4778 de4x5_switch_mac_port(dev); 4778 de4x5_switch_mac_port(dev);
4779 } 4779 }
@@ -4814,7 +4814,7 @@ type3_infoblock(struct net_device *dev, u_char count, u_char *p)
4814 lp->active = *p; 4814 lp->active = *p;
4815 if (MOTO_SROM_BUG) lp->active = 0; 4815 if (MOTO_SROM_BUG) lp->active = 0;
4816 lp->infoblock_csr6 = OMR_MII_100; 4816 lp->infoblock_csr6 = OMR_MII_100;
4817 lp->useMII = TRUE; 4817 lp->useMII = true;
4818 lp->infoblock_media = ANS; 4818 lp->infoblock_media = ANS;
4819 4819
4820 de4x5_switch_mac_port(dev); 4820 de4x5_switch_mac_port(dev);
@@ -4856,7 +4856,7 @@ type4_infoblock(struct net_device *dev, u_char count, u_char *p)
4856 lp->asBit = 1 << ((csr6 >> 1) & 0x07); 4856 lp->asBit = 1 << ((csr6 >> 1) & 0x07);
4857 lp->asPolarity = ((csr6 & 0x80) ? -1 : 0) & lp->asBit; 4857 lp->asPolarity = ((csr6 & 0x80) ? -1 : 0) & lp->asBit;
4858 lp->infoblock_csr6 = OMR_DEF | ((csr6 & 0x71) << 18); 4858 lp->infoblock_csr6 = OMR_DEF | ((csr6 & 0x71) << 18);
4859 lp->useMII = FALSE; 4859 lp->useMII = false;
4860 4860
4861 de4x5_switch_mac_port(dev); 4861 de4x5_switch_mac_port(dev);
4862 } 4862 }
@@ -5077,7 +5077,7 @@ mii_get_phy(struct net_device *dev)
5077 int id; 5077 int id;
5078 5078
5079 lp->active = 0; 5079 lp->active = 0;
5080 lp->useMII = TRUE; 5080 lp->useMII = true;
5081 5081
5082 /* Search the MII address space for possible PHY devices */ 5082 /* Search the MII address space for possible PHY devices */
5083 for (n=0, lp->mii_cnt=0, i=1; !((i==1) && (n==1)); i=(i+1)%DE4X5_MAX_MII) { 5083 for (n=0, lp->mii_cnt=0, i=1; !((i==1) && (n==1)); i=(i+1)%DE4X5_MAX_MII) {
@@ -5127,7 +5127,7 @@ mii_get_phy(struct net_device *dev)
5127 de4x5_dbg_mii(dev, k); 5127 de4x5_dbg_mii(dev, k);
5128 } 5128 }
5129 } 5129 }
5130 if (!lp->mii_cnt) lp->useMII = FALSE; 5130 if (!lp->mii_cnt) lp->useMII = false;
5131 5131
5132 return lp->mii_cnt; 5132 return lp->mii_cnt;
5133} 5133}
diff --git a/drivers/net/tulip/de4x5.h b/drivers/net/tulip/de4x5.h
index 57226e5eb8a6..12af0cc037fb 100644
--- a/drivers/net/tulip/de4x5.h
+++ b/drivers/net/tulip/de4x5.h
@@ -893,15 +893,6 @@
893#define PHYS_ADDR_ONLY 1 /* Update the physical address only */ 893#define PHYS_ADDR_ONLY 1 /* Update the physical address only */
894 894
895/* 895/*
896** Booleans
897*/
898#define NO 0
899#define FALSE 0
900
901#define YES ~0
902#define TRUE ~0
903
904/*
905** Adapter state 896** Adapter state
906*/ 897*/
907#define INITIALISED 0 /* After h/w initialised and mem alloc'd */ 898#define INITIALISED 0 /* After h/w initialised and mem alloc'd */
diff --git a/drivers/net/usb/usbnet.c b/drivers/net/usb/usbnet.c
index a12f576391cf..86b690843362 100644
--- a/drivers/net/usb/usbnet.c
+++ b/drivers/net/usb/usbnet.c
@@ -192,7 +192,7 @@ static int init_status (struct usbnet *dev, struct usb_interface *intf)
192 usb_pipeendpoint(pipe), maxp, period); 192 usb_pipeendpoint(pipe), maxp, period);
193 } 193 }
194 } 194 }
195 return 0; 195 return 0;
196} 196}
197 197
198/* Passes this packet up the stack, updating its accounting. 198/* Passes this packet up the stack, updating its accounting.
@@ -326,7 +326,7 @@ static void rx_submit (struct usbnet *dev, struct urb *urb, gfp_t flags)
326 if (netif_running (dev->net) 326 if (netif_running (dev->net)
327 && netif_device_present (dev->net) 327 && netif_device_present (dev->net)
328 && !test_bit (EVENT_RX_HALT, &dev->flags)) { 328 && !test_bit (EVENT_RX_HALT, &dev->flags)) {
329 switch (retval = usb_submit_urb (urb, GFP_ATOMIC)){ 329 switch (retval = usb_submit_urb (urb, GFP_ATOMIC)) {
330 case -EPIPE: 330 case -EPIPE:
331 usbnet_defer_kevent (dev, EVENT_RX_HALT); 331 usbnet_defer_kevent (dev, EVENT_RX_HALT);
332 break; 332 break;
@@ -393,8 +393,8 @@ static void rx_complete (struct urb *urb)
393 entry->urb = NULL; 393 entry->urb = NULL;
394 394
395 switch (urb_status) { 395 switch (urb_status) {
396 // success 396 /* success */
397 case 0: 397 case 0:
398 if (skb->len < dev->net->hard_header_len) { 398 if (skb->len < dev->net->hard_header_len) {
399 entry->state = rx_cleanup; 399 entry->state = rx_cleanup;
400 dev->stats.rx_errors++; 400 dev->stats.rx_errors++;
@@ -404,28 +404,30 @@ static void rx_complete (struct urb *urb)
404 } 404 }
405 break; 405 break;
406 406
407 // stalls need manual reset. this is rare ... except that 407 /* stalls need manual reset. this is rare ... except that
408 // when going through USB 2.0 TTs, unplug appears this way. 408 * when going through USB 2.0 TTs, unplug appears this way.
409 // we avoid the highspeed version of the ETIMEOUT/EILSEQ 409 * we avoid the highspeed version of the ETIMEOUT/EILSEQ
410 // storm, recovering as needed. 410 * storm, recovering as needed.
411 case -EPIPE: 411 */
412 case -EPIPE:
412 dev->stats.rx_errors++; 413 dev->stats.rx_errors++;
413 usbnet_defer_kevent (dev, EVENT_RX_HALT); 414 usbnet_defer_kevent (dev, EVENT_RX_HALT);
414 // FALLTHROUGH 415 // FALLTHROUGH
415 416
416 // software-driven interface shutdown 417 /* software-driven interface shutdown */
417 case -ECONNRESET: // async unlink 418 case -ECONNRESET: /* async unlink */
418 case -ESHUTDOWN: // hardware gone 419 case -ESHUTDOWN: /* hardware gone */
419 if (netif_msg_ifdown (dev)) 420 if (netif_msg_ifdown (dev))
420 devdbg (dev, "rx shutdown, code %d", urb_status); 421 devdbg (dev, "rx shutdown, code %d", urb_status);
421 goto block; 422 goto block;
422 423
423 // we get controller i/o faults during khubd disconnect() delays. 424 /* we get controller i/o faults during khubd disconnect() delays.
424 // throttle down resubmits, to avoid log floods; just temporarily, 425 * throttle down resubmits, to avoid log floods; just temporarily,
425 // so we still recover when the fault isn't a khubd delay. 426 * so we still recover when the fault isn't a khubd delay.
426 case -EPROTO: 427 */
427 case -ETIME: 428 case -EPROTO:
428 case -EILSEQ: 429 case -ETIME:
430 case -EILSEQ:
429 dev->stats.rx_errors++; 431 dev->stats.rx_errors++;
430 if (!timer_pending (&dev->delay)) { 432 if (!timer_pending (&dev->delay)) {
431 mod_timer (&dev->delay, jiffies + THROTTLE_JIFFIES); 433 mod_timer (&dev->delay, jiffies + THROTTLE_JIFFIES);
@@ -438,12 +440,12 @@ block:
438 urb = NULL; 440 urb = NULL;
439 break; 441 break;
440 442
441 // data overrun ... flush fifo? 443 /* data overrun ... flush fifo? */
442 case -EOVERFLOW: 444 case -EOVERFLOW:
443 dev->stats.rx_over_errors++; 445 dev->stats.rx_over_errors++;
444 // FALLTHROUGH 446 // FALLTHROUGH
445 447
446 default: 448 default:
447 entry->state = rx_cleanup; 449 entry->state = rx_cleanup;
448 dev->stats.rx_errors++; 450 dev->stats.rx_errors++;
449 if (netif_msg_rx_err (dev)) 451 if (netif_msg_rx_err (dev))
@@ -471,22 +473,22 @@ static void intr_complete (struct urb *urb)
471 int status = urb->status; 473 int status = urb->status;
472 474
473 switch (status) { 475 switch (status) {
474 /* success */ 476 /* success */
475 case 0: 477 case 0:
476 dev->driver_info->status(dev, urb); 478 dev->driver_info->status(dev, urb);
477 break; 479 break;
478 480
479 /* software-driven interface shutdown */ 481 /* software-driven interface shutdown */
480 case -ENOENT: // urb killed 482 case -ENOENT: /* urb killed */
481 case -ESHUTDOWN: // hardware gone 483 case -ESHUTDOWN: /* hardware gone */
482 if (netif_msg_ifdown (dev)) 484 if (netif_msg_ifdown (dev))
483 devdbg (dev, "intr shutdown, code %d", status); 485 devdbg (dev, "intr shutdown, code %d", status);
484 return; 486 return;
485 487
486 /* NOTE: not throttling like RX/TX, since this endpoint 488 /* NOTE: not throttling like RX/TX, since this endpoint
487 * already polls infrequently 489 * already polls infrequently
488 */ 490 */
489 default: 491 default:
490 devdbg (dev, "intr status %d", status); 492 devdbg (dev, "intr status %d", status);
491 break; 493 break;
492 } 494 }
@@ -569,9 +571,9 @@ static int usbnet_stop (struct net_device *net)
569 temp = unlink_urbs (dev, &dev->txq) + unlink_urbs (dev, &dev->rxq); 571 temp = unlink_urbs (dev, &dev->txq) + unlink_urbs (dev, &dev->rxq);
570 572
571 // maybe wait for deletions to finish. 573 // maybe wait for deletions to finish.
572 while (!skb_queue_empty(&dev->rxq) && 574 while (!skb_queue_empty(&dev->rxq)
573 !skb_queue_empty(&dev->txq) && 575 && !skb_queue_empty(&dev->txq)
574 !skb_queue_empty(&dev->done)) { 576 && !skb_queue_empty(&dev->done)) {
575 msleep(UNLINK_TIMEOUT_MS); 577 msleep(UNLINK_TIMEOUT_MS);
576 if (netif_msg_ifdown (dev)) 578 if (netif_msg_ifdown (dev))
577 devdbg (dev, "waited for %d urb completions", temp); 579 devdbg (dev, "waited for %d urb completions", temp);
@@ -1011,16 +1013,16 @@ static void usbnet_bh (unsigned long param)
1011 while ((skb = skb_dequeue (&dev->done))) { 1013 while ((skb = skb_dequeue (&dev->done))) {
1012 entry = (struct skb_data *) skb->cb; 1014 entry = (struct skb_data *) skb->cb;
1013 switch (entry->state) { 1015 switch (entry->state) {
1014 case rx_done: 1016 case rx_done:
1015 entry->state = rx_cleanup; 1017 entry->state = rx_cleanup;
1016 rx_process (dev, skb); 1018 rx_process (dev, skb);
1017 continue; 1019 continue;
1018 case tx_done: 1020 case tx_done:
1019 case rx_cleanup: 1021 case rx_cleanup:
1020 usb_free_urb (entry->urb); 1022 usb_free_urb (entry->urb);
1021 dev_kfree_skb (skb); 1023 dev_kfree_skb (skb);
1022 continue; 1024 continue;
1023 default: 1025 default:
1024 devdbg (dev, "bogus skb state %d", entry->state); 1026 devdbg (dev, "bogus skb state %d", entry->state);
1025 } 1027 }
1026 } 1028 }
diff --git a/drivers/net/usb/usbnet.h b/drivers/net/usb/usbnet.h
index a3f8b9e7bc00..a6c5820767de 100644
--- a/drivers/net/usb/usbnet.h
+++ b/drivers/net/usb/usbnet.h
@@ -47,7 +47,7 @@ struct usbnet {
47 unsigned long data [5]; 47 unsigned long data [5];
48 u32 xid; 48 u32 xid;
49 u32 hard_mtu; /* count any extra framing */ 49 u32 hard_mtu; /* count any extra framing */
50 size_t rx_urb_size; /* size for rx urbs */ 50 size_t rx_urb_size; /* size for rx urbs */
51 struct mii_if_info mii; 51 struct mii_if_info mii;
52 52
53 /* various kinds of pending driver work */ 53 /* various kinds of pending driver work */
@@ -85,7 +85,7 @@ struct driver_info {
85#define FLAG_NO_SETINT 0x0010 /* device can't set_interface() */ 85#define FLAG_NO_SETINT 0x0010 /* device can't set_interface() */
86#define FLAG_ETHER 0x0020 /* maybe use "eth%d" names */ 86#define FLAG_ETHER 0x0020 /* maybe use "eth%d" names */
87 87
88#define FLAG_FRAMING_AX 0x0040 /* AX88772/178 packets */ 88#define FLAG_FRAMING_AX 0x0040 /* AX88772/178 packets */
89 89
90 /* init device ... can sleep, or cause probe() failure */ 90 /* init device ... can sleep, or cause probe() failure */
91 int (*bind)(struct usbnet *, struct usb_interface *); 91 int (*bind)(struct usbnet *, struct usb_interface *);
@@ -146,9 +146,9 @@ extern void usbnet_cdc_unbind (struct usbnet *, struct usb_interface *);
146 146
147/* CDC and RNDIS support the same host-chosen packet filters for IN transfers */ 147/* CDC and RNDIS support the same host-chosen packet filters for IN transfers */
148#define DEFAULT_FILTER (USB_CDC_PACKET_TYPE_BROADCAST \ 148#define DEFAULT_FILTER (USB_CDC_PACKET_TYPE_BROADCAST \
149 |USB_CDC_PACKET_TYPE_ALL_MULTICAST \ 149 |USB_CDC_PACKET_TYPE_ALL_MULTICAST \
150 |USB_CDC_PACKET_TYPE_PROMISCUOUS \ 150 |USB_CDC_PACKET_TYPE_PROMISCUOUS \
151 |USB_CDC_PACKET_TYPE_DIRECTED) 151 |USB_CDC_PACKET_TYPE_DIRECTED)
152 152
153 153
154/* we record the state for each of our queued skbs */ 154/* we record the state for each of our queued skbs */
diff --git a/drivers/net/wireless/Kconfig b/drivers/net/wireless/Kconfig
index fa2399cbd5ca..ae27af0141c0 100644
--- a/drivers/net/wireless/Kconfig
+++ b/drivers/net/wireless/Kconfig
@@ -546,6 +546,18 @@ config USB_ZD1201
546 To compile this driver as a module, choose M here: the 546 To compile this driver as a module, choose M here: the
547 module will be called zd1201. 547 module will be called zd1201.
548 548
549config RTL8187
550 tristate "Realtek 8187 USB support"
551 depends on MAC80211 && USB && WLAN_80211 && EXPERIMENTAL
552 select EEPROM_93CX6
553 ---help---
554 This is a driver for RTL8187 based cards.
555 These are USB based chips found in cards such as:
556
557 Netgear WG111v2
558
559 Thanks to Realtek for their support!
560
549source "drivers/net/wireless/hostap/Kconfig" 561source "drivers/net/wireless/hostap/Kconfig"
550source "drivers/net/wireless/bcm43xx/Kconfig" 562source "drivers/net/wireless/bcm43xx/Kconfig"
551source "drivers/net/wireless/zd1211rw/Kconfig" 563source "drivers/net/wireless/zd1211rw/Kconfig"
diff --git a/drivers/net/wireless/Makefile b/drivers/net/wireless/Makefile
index d2124602263b..ef35bc6c4a22 100644
--- a/drivers/net/wireless/Makefile
+++ b/drivers/net/wireless/Makefile
@@ -44,3 +44,6 @@ obj-$(CONFIG_PCMCIA_WL3501) += wl3501_cs.o
44 44
45obj-$(CONFIG_USB_ZD1201) += zd1201.o 45obj-$(CONFIG_USB_ZD1201) += zd1201.o
46obj-$(CONFIG_LIBERTAS_USB) += libertas/ 46obj-$(CONFIG_LIBERTAS_USB) += libertas/
47
48rtl8187-objs := rtl8187_dev.o rtl8187_rtl8225.o
49obj-$(CONFIG_RTL8187) += rtl8187.o
diff --git a/drivers/net/wireless/bcm43xx/bcm43xx_phy.c b/drivers/net/wireless/bcm43xx/bcm43xx_phy.c
index b37f1e348700..d779199c30d0 100644
--- a/drivers/net/wireless/bcm43xx/bcm43xx_phy.c
+++ b/drivers/net/wireless/bcm43xx/bcm43xx_phy.c
@@ -1638,7 +1638,7 @@ void bcm43xx_phy_set_baseband_attenuation(struct bcm43xx_private *bcm,
1638 return; 1638 return;
1639 } 1639 }
1640 1640
1641 if (phy->analog > 1) { 1641 if (phy->analog == 1) {
1642 value = bcm43xx_phy_read(bcm, 0x0060) & ~0x003C; 1642 value = bcm43xx_phy_read(bcm, 0x0060) & ~0x003C;
1643 value |= (baseband_attenuation << 2) & 0x003C; 1643 value |= (baseband_attenuation << 2) & 0x003C;
1644 } else { 1644 } else {
diff --git a/drivers/net/wireless/hostap/hostap_ap.c b/drivers/net/wireless/hostap/hostap_ap.c
index 5b3abd54d0e5..90900525379c 100644
--- a/drivers/net/wireless/hostap/hostap_ap.c
+++ b/drivers/net/wireless/hostap/hostap_ap.c
@@ -326,7 +326,6 @@ static int ap_control_proc_read(char *page, char **start, off_t off,
326 char *p = page; 326 char *p = page;
327 struct ap_data *ap = (struct ap_data *) data; 327 struct ap_data *ap = (struct ap_data *) data;
328 char *policy_txt; 328 char *policy_txt;
329 struct list_head *ptr;
330 struct mac_entry *entry; 329 struct mac_entry *entry;
331 330
332 if (off != 0) { 331 if (off != 0) {
@@ -352,14 +351,12 @@ static int ap_control_proc_read(char *page, char **start, off_t off,
352 p += sprintf(p, "MAC entries: %u\n", ap->mac_restrictions.entries); 351 p += sprintf(p, "MAC entries: %u\n", ap->mac_restrictions.entries);
353 p += sprintf(p, "MAC list:\n"); 352 p += sprintf(p, "MAC list:\n");
354 spin_lock_bh(&ap->mac_restrictions.lock); 353 spin_lock_bh(&ap->mac_restrictions.lock);
355 for (ptr = ap->mac_restrictions.mac_list.next; 354 list_for_each_entry(entry, &ap->mac_restrictions.mac_list, list) {
356 ptr != &ap->mac_restrictions.mac_list; ptr = ptr->next) {
357 if (p - page > PAGE_SIZE - 80) { 355 if (p - page > PAGE_SIZE - 80) {
358 p += sprintf(p, "All entries did not fit one page.\n"); 356 p += sprintf(p, "All entries did not fit one page.\n");
359 break; 357 break;
360 } 358 }
361 359
362 entry = list_entry(ptr, struct mac_entry, list);
363 p += sprintf(p, MACSTR "\n", MAC2STR(entry->addr)); 360 p += sprintf(p, MACSTR "\n", MAC2STR(entry->addr));
364 } 361 }
365 spin_unlock_bh(&ap->mac_restrictions.lock); 362 spin_unlock_bh(&ap->mac_restrictions.lock);
@@ -413,7 +410,6 @@ int ap_control_del_mac(struct mac_restrictions *mac_restrictions, u8 *mac)
413static int ap_control_mac_deny(struct mac_restrictions *mac_restrictions, 410static int ap_control_mac_deny(struct mac_restrictions *mac_restrictions,
414 u8 *mac) 411 u8 *mac)
415{ 412{
416 struct list_head *ptr;
417 struct mac_entry *entry; 413 struct mac_entry *entry;
418 int found = 0; 414 int found = 0;
419 415
@@ -421,10 +417,7 @@ static int ap_control_mac_deny(struct mac_restrictions *mac_restrictions,
421 return 0; 417 return 0;
422 418
423 spin_lock_bh(&mac_restrictions->lock); 419 spin_lock_bh(&mac_restrictions->lock);
424 for (ptr = mac_restrictions->mac_list.next; 420 list_for_each_entry(entry, &mac_restrictions->mac_list, list) {
425 ptr != &mac_restrictions->mac_list; ptr = ptr->next) {
426 entry = list_entry(ptr, struct mac_entry, list);
427
428 if (memcmp(entry->addr, mac, ETH_ALEN) == 0) { 421 if (memcmp(entry->addr, mac, ETH_ALEN) == 0) {
429 found = 1; 422 found = 1;
430 break; 423 break;
@@ -519,7 +512,7 @@ static int prism2_ap_proc_read(char *page, char **start, off_t off,
519{ 512{
520 char *p = page; 513 char *p = page;
521 struct ap_data *ap = (struct ap_data *) data; 514 struct ap_data *ap = (struct ap_data *) data;
522 struct list_head *ptr; 515 struct sta_info *sta;
523 int i; 516 int i;
524 517
525 if (off > PROC_LIMIT) { 518 if (off > PROC_LIMIT) {
@@ -529,9 +522,7 @@ static int prism2_ap_proc_read(char *page, char **start, off_t off,
529 522
530 p += sprintf(p, "# BSSID CHAN SIGNAL NOISE RATE SSID FLAGS\n"); 523 p += sprintf(p, "# BSSID CHAN SIGNAL NOISE RATE SSID FLAGS\n");
531 spin_lock_bh(&ap->sta_table_lock); 524 spin_lock_bh(&ap->sta_table_lock);
532 for (ptr = ap->sta_list.next; ptr != &ap->sta_list; ptr = ptr->next) { 525 list_for_each_entry(sta, &ap->sta_list, list) {
533 struct sta_info *sta = (struct sta_info *) ptr;
534
535 if (!sta->ap) 526 if (!sta->ap)
536 continue; 527 continue;
537 528
@@ -861,7 +852,7 @@ void hostap_init_ap_proc(local_info_t *local)
861 852
862void hostap_free_data(struct ap_data *ap) 853void hostap_free_data(struct ap_data *ap)
863{ 854{
864 struct list_head *n, *ptr; 855 struct sta_info *n, *sta;
865 856
866 if (ap == NULL || !ap->initialized) { 857 if (ap == NULL || !ap->initialized) {
867 printk(KERN_DEBUG "hostap_free_data: ap has not yet been " 858 printk(KERN_DEBUG "hostap_free_data: ap has not yet been "
@@ -875,8 +866,7 @@ void hostap_free_data(struct ap_data *ap)
875 ap->crypt = ap->crypt_priv = NULL; 866 ap->crypt = ap->crypt_priv = NULL;
876#endif /* PRISM2_NO_KERNEL_IEEE80211_MGMT */ 867#endif /* PRISM2_NO_KERNEL_IEEE80211_MGMT */
877 868
878 list_for_each_safe(ptr, n, &ap->sta_list) { 869 list_for_each_entry_safe(sta, n, &ap->sta_list, list) {
879 struct sta_info *sta = list_entry(ptr, struct sta_info, list);
880 ap_sta_hash_del(ap, sta); 870 ap_sta_hash_del(ap, sta);
881 list_del(&sta->list); 871 list_del(&sta->list);
882 if ((sta->flags & WLAN_STA_ASSOC) && !sta->ap && sta->local) 872 if ((sta->flags & WLAN_STA_ASSOC) && !sta->ap && sta->local)
@@ -2704,6 +2694,8 @@ ap_tx_ret hostap_handle_sta_tx(local_info_t *local, struct hostap_tx_data *tx)
2704 2694
2705 if (hdr->addr1[0] & 0x01) { 2695 if (hdr->addr1[0] & 0x01) {
2706 /* broadcast/multicast frame - no AP related processing */ 2696 /* broadcast/multicast frame - no AP related processing */
2697 if (local->ap->num_sta <= 0)
2698 ret = AP_TX_DROP;
2707 goto out; 2699 goto out;
2708 } 2700 }
2709 2701
@@ -3198,15 +3190,14 @@ int hostap_update_rx_stats(struct ap_data *ap,
3198 3190
3199void hostap_update_rates(local_info_t *local) 3191void hostap_update_rates(local_info_t *local)
3200{ 3192{
3201 struct list_head *ptr; 3193 struct sta_info *sta;
3202 struct ap_data *ap = local->ap; 3194 struct ap_data *ap = local->ap;
3203 3195
3204 if (!ap) 3196 if (!ap)
3205 return; 3197 return;
3206 3198
3207 spin_lock_bh(&ap->sta_table_lock); 3199 spin_lock_bh(&ap->sta_table_lock);
3208 for (ptr = ap->sta_list.next; ptr != &ap->sta_list; ptr = ptr->next) { 3200 list_for_each_entry(sta, &ap->sta_list, list) {
3209 struct sta_info *sta = (struct sta_info *) ptr;
3210 prism2_check_tx_rates(sta); 3201 prism2_check_tx_rates(sta);
3211 } 3202 }
3212 spin_unlock_bh(&ap->sta_table_lock); 3203 spin_unlock_bh(&ap->sta_table_lock);
@@ -3242,11 +3233,10 @@ void * ap_crypt_get_ptrs(struct ap_data *ap, u8 *addr, int permanent,
3242void hostap_add_wds_links(local_info_t *local) 3233void hostap_add_wds_links(local_info_t *local)
3243{ 3234{
3244 struct ap_data *ap = local->ap; 3235 struct ap_data *ap = local->ap;
3245 struct list_head *ptr; 3236 struct sta_info *sta;
3246 3237
3247 spin_lock_bh(&ap->sta_table_lock); 3238 spin_lock_bh(&ap->sta_table_lock);
3248 list_for_each(ptr, &ap->sta_list) { 3239 list_for_each_entry(sta, &ap->sta_list, list) {
3249 struct sta_info *sta = list_entry(ptr, struct sta_info, list);
3250 if (sta->ap) 3240 if (sta->ap)
3251 hostap_wds_link_oper(local, sta->addr, WDS_ADD); 3241 hostap_wds_link_oper(local, sta->addr, WDS_ADD);
3252 } 3242 }
diff --git a/drivers/net/wireless/hostap/hostap_config.h b/drivers/net/wireless/hostap/hostap_config.h
index c090a5aebb58..30acd39d76a2 100644
--- a/drivers/net/wireless/hostap/hostap_config.h
+++ b/drivers/net/wireless/hostap/hostap_config.h
@@ -1,8 +1,6 @@
1#ifndef HOSTAP_CONFIG_H 1#ifndef HOSTAP_CONFIG_H
2#define HOSTAP_CONFIG_H 2#define HOSTAP_CONFIG_H
3 3
4#define PRISM2_VERSION "0.4.4-kernel"
5
6/* In the previous versions of Host AP driver, support for user space version 4/* In the previous versions of Host AP driver, support for user space version
7 * of IEEE 802.11 management (hostapd) used to be disabled in the default 5 * of IEEE 802.11 management (hostapd) used to be disabled in the default
8 * configuration. From now on, support for hostapd is always included and it is 6 * configuration. From now on, support for hostapd is always included and it is
diff --git a/drivers/net/wireless/hostap/hostap_cs.c b/drivers/net/wireless/hostap/hostap_cs.c
index ee1532b62e42..30e723f65979 100644
--- a/drivers/net/wireless/hostap/hostap_cs.c
+++ b/drivers/net/wireless/hostap/hostap_cs.c
@@ -22,7 +22,6 @@
22#include "hostap_wlan.h" 22#include "hostap_wlan.h"
23 23
24 24
25static char *version = PRISM2_VERSION " (Jouni Malinen <j@w1.fi>)";
26static dev_info_t dev_info = "hostap_cs"; 25static dev_info_t dev_info = "hostap_cs";
27 26
28MODULE_AUTHOR("Jouni Malinen"); 27MODULE_AUTHOR("Jouni Malinen");
@@ -30,7 +29,6 @@ MODULE_DESCRIPTION("Support for Intersil Prism2-based 802.11 wireless LAN "
30 "cards (PC Card)."); 29 "cards (PC Card).");
31MODULE_SUPPORTED_DEVICE("Intersil Prism2-based WLAN cards (PC Card)"); 30MODULE_SUPPORTED_DEVICE("Intersil Prism2-based WLAN cards (PC Card)");
32MODULE_LICENSE("GPL"); 31MODULE_LICENSE("GPL");
33MODULE_VERSION(PRISM2_VERSION);
34 32
35 33
36static int ignore_cis_vcc; 34static int ignore_cis_vcc;
@@ -910,14 +908,12 @@ static struct pcmcia_driver hostap_driver = {
910 908
911static int __init init_prism2_pccard(void) 909static int __init init_prism2_pccard(void)
912{ 910{
913 printk(KERN_INFO "%s: %s\n", dev_info, version);
914 return pcmcia_register_driver(&hostap_driver); 911 return pcmcia_register_driver(&hostap_driver);
915} 912}
916 913
917static void __exit exit_prism2_pccard(void) 914static void __exit exit_prism2_pccard(void)
918{ 915{
919 pcmcia_unregister_driver(&hostap_driver); 916 pcmcia_unregister_driver(&hostap_driver);
920 printk(KERN_INFO "%s: Driver unloaded\n", dev_info);
921} 917}
922 918
923 919
diff --git a/drivers/net/wireless/hostap/hostap_ioctl.c b/drivers/net/wireless/hostap/hostap_ioctl.c
index cdea7f71b9eb..8c71077d653c 100644
--- a/drivers/net/wireless/hostap/hostap_ioctl.c
+++ b/drivers/net/wireless/hostap/hostap_ioctl.c
@@ -3893,8 +3893,6 @@ static void prism2_get_drvinfo(struct net_device *dev,
3893 local = iface->local; 3893 local = iface->local;
3894 3894
3895 strncpy(info->driver, "hostap", sizeof(info->driver) - 1); 3895 strncpy(info->driver, "hostap", sizeof(info->driver) - 1);
3896 strncpy(info->version, PRISM2_VERSION,
3897 sizeof(info->version) - 1);
3898 snprintf(info->fw_version, sizeof(info->fw_version) - 1, 3896 snprintf(info->fw_version, sizeof(info->fw_version) - 1,
3899 "%d.%d.%d", (local->sta_fw_ver >> 16) & 0xff, 3897 "%d.%d.%d", (local->sta_fw_ver >> 16) & 0xff,
3900 (local->sta_fw_ver >> 8) & 0xff, 3898 (local->sta_fw_ver >> 8) & 0xff,
diff --git a/drivers/net/wireless/hostap/hostap_main.c b/drivers/net/wireless/hostap/hostap_main.c
index 4743426cf6ad..446de51bab74 100644
--- a/drivers/net/wireless/hostap/hostap_main.c
+++ b/drivers/net/wireless/hostap/hostap_main.c
@@ -37,7 +37,6 @@
37MODULE_AUTHOR("Jouni Malinen"); 37MODULE_AUTHOR("Jouni Malinen");
38MODULE_DESCRIPTION("Host AP common routines"); 38MODULE_DESCRIPTION("Host AP common routines");
39MODULE_LICENSE("GPL"); 39MODULE_LICENSE("GPL");
40MODULE_VERSION(PRISM2_VERSION);
41 40
42#define TX_TIMEOUT (2 * HZ) 41#define TX_TIMEOUT (2 * HZ)
43 42
diff --git a/drivers/net/wireless/hostap/hostap_pci.c b/drivers/net/wireless/hostap/hostap_pci.c
index db4899ed4bb1..0cd48d151f5e 100644
--- a/drivers/net/wireless/hostap/hostap_pci.c
+++ b/drivers/net/wireless/hostap/hostap_pci.c
@@ -20,7 +20,6 @@
20#include "hostap_wlan.h" 20#include "hostap_wlan.h"
21 21
22 22
23static char *version = PRISM2_VERSION " (Jouni Malinen <j@w1.fi>)";
24static char *dev_info = "hostap_pci"; 23static char *dev_info = "hostap_pci";
25 24
26 25
@@ -29,7 +28,6 @@ MODULE_DESCRIPTION("Support for Intersil Prism2.5-based 802.11 wireless LAN "
29 "PCI cards."); 28 "PCI cards.");
30MODULE_SUPPORTED_DEVICE("Intersil Prism2.5-based WLAN PCI cards"); 29MODULE_SUPPORTED_DEVICE("Intersil Prism2.5-based WLAN PCI cards");
31MODULE_LICENSE("GPL"); 30MODULE_LICENSE("GPL");
32MODULE_VERSION(PRISM2_VERSION);
33 31
34 32
35/* struct local_info::hw_priv */ 33/* struct local_info::hw_priv */
@@ -462,8 +460,6 @@ static struct pci_driver prism2_pci_drv_id = {
462 460
463static int __init init_prism2_pci(void) 461static int __init init_prism2_pci(void)
464{ 462{
465 printk(KERN_INFO "%s: %s\n", dev_info, version);
466
467 return pci_register_driver(&prism2_pci_drv_id); 463 return pci_register_driver(&prism2_pci_drv_id);
468} 464}
469 465
@@ -471,7 +467,6 @@ static int __init init_prism2_pci(void)
471static void __exit exit_prism2_pci(void) 467static void __exit exit_prism2_pci(void)
472{ 468{
473 pci_unregister_driver(&prism2_pci_drv_id); 469 pci_unregister_driver(&prism2_pci_drv_id);
474 printk(KERN_INFO "%s: Driver unloaded\n", dev_info);
475} 470}
476 471
477 472
diff --git a/drivers/net/wireless/hostap/hostap_plx.c b/drivers/net/wireless/hostap/hostap_plx.c
index f0fd5ecdb24d..0183df757b3e 100644
--- a/drivers/net/wireless/hostap/hostap_plx.c
+++ b/drivers/net/wireless/hostap/hostap_plx.c
@@ -23,7 +23,6 @@
23#include "hostap_wlan.h" 23#include "hostap_wlan.h"
24 24
25 25
26static char *version = PRISM2_VERSION " (Jouni Malinen <j@w1.fi>)";
27static char *dev_info = "hostap_plx"; 26static char *dev_info = "hostap_plx";
28 27
29 28
@@ -32,7 +31,6 @@ MODULE_DESCRIPTION("Support for Intersil Prism2-based 802.11 wireless LAN "
32 "cards (PLX)."); 31 "cards (PLX).");
33MODULE_SUPPORTED_DEVICE("Intersil Prism2-based WLAN cards (PLX)"); 32MODULE_SUPPORTED_DEVICE("Intersil Prism2-based WLAN cards (PLX)");
34MODULE_LICENSE("GPL"); 33MODULE_LICENSE("GPL");
35MODULE_VERSION(PRISM2_VERSION);
36 34
37 35
38static int ignore_cis; 36static int ignore_cis;
@@ -623,8 +621,6 @@ static struct pci_driver prism2_plx_drv_id = {
623 621
624static int __init init_prism2_plx(void) 622static int __init init_prism2_plx(void)
625{ 623{
626 printk(KERN_INFO "%s: %s\n", dev_info, version);
627
628 return pci_register_driver(&prism2_plx_drv_id); 624 return pci_register_driver(&prism2_plx_drv_id);
629} 625}
630 626
@@ -632,7 +628,6 @@ static int __init init_prism2_plx(void)
632static void __exit exit_prism2_plx(void) 628static void __exit exit_prism2_plx(void)
633{ 629{
634 pci_unregister_driver(&prism2_plx_drv_id); 630 pci_unregister_driver(&prism2_plx_drv_id);
635 printk(KERN_INFO "%s: Driver unloaded\n", dev_info);
636} 631}
637 632
638 633
diff --git a/drivers/net/wireless/rtl8187.h b/drivers/net/wireless/rtl8187.h
new file mode 100644
index 000000000000..6124e467b156
--- /dev/null
+++ b/drivers/net/wireless/rtl8187.h
@@ -0,0 +1,145 @@
1/*
2 * Definitions for RTL8187 hardware
3 *
4 * Copyright 2007 Michael Wu <flamingice@sourmilk.net>
5 * Copyright 2007 Andrea Merello <andreamrl@tiscali.it>
6 *
7 * Based on the r8187 driver, which is:
8 * Copyright 2005 Andrea Merello <andreamrl@tiscali.it>, et al.
9 *
10 * This program is free software; you can redistribute it and/or modify
11 * it under the terms of the GNU General Public License version 2 as
12 * published by the Free Software Foundation.
13 */
14
15#ifndef RTL8187_H
16#define RTL8187_H
17
18#include "rtl818x.h"
19
20#define RTL8187_EEPROM_TXPWR_BASE 0x05
21#define RTL8187_EEPROM_MAC_ADDR 0x07
22#define RTL8187_EEPROM_TXPWR_CHAN_1 0x16 /* 3 channels */
23#define RTL8187_EEPROM_TXPWR_CHAN_6 0x1B /* 2 channels */
24#define RTL8187_EEPROM_TXPWR_CHAN_4 0x3D /* 2 channels */
25
26#define RTL8187_REQT_READ 0xC0
27#define RTL8187_REQT_WRITE 0x40
28#define RTL8187_REQ_GET_REG 0x05
29#define RTL8187_REQ_SET_REG 0x05
30
31#define RTL8187_MAX_RX 0x9C4
32
33struct rtl8187_rx_info {
34 struct urb *urb;
35 struct ieee80211_hw *dev;
36};
37
38struct rtl8187_rx_hdr {
39 __le16 len;
40 __le16 rate;
41 u8 noise;
42 u8 signal;
43 u8 agc;
44 u8 reserved;
45 __le64 mac_time;
46} __attribute__((packed));
47
48struct rtl8187_tx_info {
49 struct ieee80211_tx_control *control;
50 struct urb *urb;
51 struct ieee80211_hw *dev;
52};
53
54struct rtl8187_tx_hdr {
55 __le32 flags;
56#define RTL8187_TX_FLAG_NO_ENCRYPT (1 << 15)
57#define RTL8187_TX_FLAG_MORE_FRAG (1 << 17)
58#define RTL8187_TX_FLAG_CTS (1 << 18)
59#define RTL8187_TX_FLAG_RTS (1 << 23)
60 __le16 rts_duration;
61 __le16 len;
62 __le32 retry;
63} __attribute__((packed));
64
65struct rtl8187_priv {
66 /* common between rtl818x drivers */
67 struct rtl818x_csr *map;
68 void (*rf_init)(struct ieee80211_hw *);
69 int mode;
70
71 /* rtl8187 specific */
72 struct ieee80211_channel channels[14];
73 struct ieee80211_rate rates[12];
74 struct ieee80211_hw_mode modes[2];
75 struct usb_device *udev;
76 u8 *hwaddr;
77 u16 txpwr_base;
78 u8 asic_rev;
79 struct sk_buff_head rx_queue;
80};
81
82void rtl8187_write_phy(struct ieee80211_hw *dev, u8 addr, u32 data);
83
84static inline u8 rtl818x_ioread8(struct rtl8187_priv *priv, u8 *addr)
85{
86 u8 val;
87
88 usb_control_msg(priv->udev, usb_rcvctrlpipe(priv->udev, 0),
89 RTL8187_REQ_GET_REG, RTL8187_REQT_READ,
90 (unsigned long)addr, 0, &val, sizeof(val), HZ / 2);
91
92 return val;
93}
94
95static inline u16 rtl818x_ioread16(struct rtl8187_priv *priv, __le16 *addr)
96{
97 __le16 val;
98
99 usb_control_msg(priv->udev, usb_rcvctrlpipe(priv->udev, 0),
100 RTL8187_REQ_GET_REG, RTL8187_REQT_READ,
101 (unsigned long)addr, 0, &val, sizeof(val), HZ / 2);
102
103 return le16_to_cpu(val);
104}
105
106static inline u32 rtl818x_ioread32(struct rtl8187_priv *priv, __le32 *addr)
107{
108 __le32 val;
109
110 usb_control_msg(priv->udev, usb_rcvctrlpipe(priv->udev, 0),
111 RTL8187_REQ_GET_REG, RTL8187_REQT_READ,
112 (unsigned long)addr, 0, &val, sizeof(val), HZ / 2);
113
114 return le32_to_cpu(val);
115}
116
117static inline void rtl818x_iowrite8(struct rtl8187_priv *priv,
118 u8 *addr, u8 val)
119{
120 usb_control_msg(priv->udev, usb_sndctrlpipe(priv->udev, 0),
121 RTL8187_REQ_SET_REG, RTL8187_REQT_WRITE,
122 (unsigned long)addr, 0, &val, sizeof(val), HZ / 2);
123}
124
125static inline void rtl818x_iowrite16(struct rtl8187_priv *priv,
126 __le16 *addr, u16 val)
127{
128 __le16 buf = cpu_to_le16(val);
129
130 usb_control_msg(priv->udev, usb_sndctrlpipe(priv->udev, 0),
131 RTL8187_REQ_SET_REG, RTL8187_REQT_WRITE,
132 (unsigned long)addr, 0, &buf, sizeof(buf), HZ / 2);
133}
134
135static inline void rtl818x_iowrite32(struct rtl8187_priv *priv,
136 __le32 *addr, u32 val)
137{
138 __le32 buf = cpu_to_le32(val);
139
140 usb_control_msg(priv->udev, usb_sndctrlpipe(priv->udev, 0),
141 RTL8187_REQ_SET_REG, RTL8187_REQT_WRITE,
142 (unsigned long)addr, 0, &buf, sizeof(buf), HZ / 2);
143}
144
145#endif /* RTL8187_H */
diff --git a/drivers/net/wireless/rtl8187_dev.c b/drivers/net/wireless/rtl8187_dev.c
new file mode 100644
index 000000000000..cea85894b7f2
--- /dev/null
+++ b/drivers/net/wireless/rtl8187_dev.c
@@ -0,0 +1,731 @@
1/*
2 * Linux device driver for RTL8187
3 *
4 * Copyright 2007 Michael Wu <flamingice@sourmilk.net>
5 * Copyright 2007 Andrea Merello <andreamrl@tiscali.it>
6 *
7 * Based on the r8187 driver, which is:
8 * Copyright 2005 Andrea Merello <andreamrl@tiscali.it>, et al.
9 *
10 * Magic delays and register offsets below are taken from the original
11 * r8187 driver sources. Thanks to Realtek for their support!
12 *
13 * This program is free software; you can redistribute it and/or modify
14 * it under the terms of the GNU General Public License version 2 as
15 * published by the Free Software Foundation.
16 */
17
18#include <linux/init.h>
19#include <linux/usb.h>
20#include <linux/delay.h>
21#include <linux/etherdevice.h>
22#include <linux/eeprom_93cx6.h>
23#include <net/mac80211.h>
24
25#include "rtl8187.h"
26#include "rtl8187_rtl8225.h"
27
28MODULE_AUTHOR("Michael Wu <flamingice@sourmilk.net>");
29MODULE_AUTHOR("Andrea Merello <andreamrl@tiscali.it>");
30MODULE_DESCRIPTION("RTL8187 USB wireless driver");
31MODULE_LICENSE("GPL");
32
33static struct usb_device_id rtl8187_table[] __devinitdata = {
34 /* Realtek */
35 {USB_DEVICE(0x0bda, 0x8187)},
36 /* Netgear */
37 {USB_DEVICE(0x0846, 0x6100)},
38 {USB_DEVICE(0x0846, 0x6a00)},
39 {}
40};
41
42MODULE_DEVICE_TABLE(usb, rtl8187_table);
43
44void rtl8187_write_phy(struct ieee80211_hw *dev, u8 addr, u32 data)
45{
46 struct rtl8187_priv *priv = dev->priv;
47
48 data <<= 8;
49 data |= addr | 0x80;
50
51 rtl818x_iowrite8(priv, &priv->map->PHY[3], (data >> 24) & 0xFF);
52 rtl818x_iowrite8(priv, &priv->map->PHY[2], (data >> 16) & 0xFF);
53 rtl818x_iowrite8(priv, &priv->map->PHY[1], (data >> 8) & 0xFF);
54 rtl818x_iowrite8(priv, &priv->map->PHY[0], data & 0xFF);
55
56 msleep(1);
57}
58
59static void rtl8187_tx_cb(struct urb *urb)
60{
61 struct ieee80211_tx_status status = { {0} };
62 struct sk_buff *skb = (struct sk_buff *)urb->context;
63 struct rtl8187_tx_info *info = (struct rtl8187_tx_info *)skb->cb;
64
65 usb_free_urb(info->urb);
66 if (info->control)
67 memcpy(&status.control, info->control, sizeof(status.control));
68 kfree(info->control);
69 skb_pull(skb, sizeof(struct rtl8187_tx_hdr));
70 status.flags |= IEEE80211_TX_STATUS_ACK;
71 ieee80211_tx_status_irqsafe(info->dev, skb, &status);
72}
73
74static int rtl8187_tx(struct ieee80211_hw *dev, struct sk_buff *skb,
75 struct ieee80211_tx_control *control)
76{
77 struct rtl8187_priv *priv = dev->priv;
78 struct rtl8187_tx_hdr *hdr;
79 struct rtl8187_tx_info *info;
80 struct urb *urb;
81 u32 tmp;
82
83 urb = usb_alloc_urb(0, GFP_ATOMIC);
84 if (!urb) {
85 kfree_skb(skb);
86 return 0;
87 }
88
89 hdr = (struct rtl8187_tx_hdr *)skb_push(skb, sizeof(*hdr));
90 tmp = skb->len - sizeof(*hdr);
91 tmp |= RTL8187_TX_FLAG_NO_ENCRYPT;
92 tmp |= control->rts_cts_rate << 19;
93 tmp |= control->tx_rate << 24;
94 if (ieee80211_get_morefrag((struct ieee80211_hdr *)skb))
95 tmp |= RTL8187_TX_FLAG_MORE_FRAG;
96 if (control->flags & IEEE80211_TXCTL_USE_RTS_CTS) {
97 tmp |= RTL8187_TX_FLAG_RTS;
98 hdr->rts_duration =
99 ieee80211_rts_duration(dev, skb->len, control);
100 }
101 if (control->flags & IEEE80211_TXCTL_USE_CTS_PROTECT)
102 tmp |= RTL8187_TX_FLAG_CTS;
103 hdr->flags = cpu_to_le32(tmp);
104 hdr->len = 0;
105 tmp = control->retry_limit << 8;
106 hdr->retry = cpu_to_le32(tmp);
107
108 info = (struct rtl8187_tx_info *)skb->cb;
109 info->control = kmemdup(control, sizeof(*control), GFP_ATOMIC);
110 info->urb = urb;
111 info->dev = dev;
112 usb_fill_bulk_urb(urb, priv->udev, usb_sndbulkpipe(priv->udev, 2),
113 hdr, skb->len, rtl8187_tx_cb, skb);
114 usb_submit_urb(urb, GFP_ATOMIC);
115
116 return 0;
117}
118
119static void rtl8187_rx_cb(struct urb *urb)
120{
121 struct sk_buff *skb = (struct sk_buff *)urb->context;
122 struct rtl8187_rx_info *info = (struct rtl8187_rx_info *)skb->cb;
123 struct ieee80211_hw *dev = info->dev;
124 struct rtl8187_priv *priv = dev->priv;
125 struct rtl8187_rx_hdr *hdr;
126 struct ieee80211_rx_status rx_status = { 0 };
127 int rate, signal;
128
129 spin_lock(&priv->rx_queue.lock);
130 if (skb->next)
131 __skb_unlink(skb, &priv->rx_queue);
132 else {
133 spin_unlock(&priv->rx_queue.lock);
134 return;
135 }
136 spin_unlock(&priv->rx_queue.lock);
137
138 if (unlikely(urb->status)) {
139 usb_free_urb(urb);
140 dev_kfree_skb_irq(skb);
141 return;
142 }
143
144 skb_put(skb, urb->actual_length);
145 hdr = (struct rtl8187_rx_hdr *)(skb_tail_pointer(skb) - sizeof(*hdr));
146 skb_trim(skb, le16_to_cpu(hdr->len) & 0x0FFF);
147
148 signal = hdr->agc >> 1;
149 rate = (le16_to_cpu(hdr->rate) >> 4) & 0xF;
150 if (rate > 3) { /* OFDM rate */
151 if (signal > 90)
152 signal = 90;
153 else if (signal < 25)
154 signal = 25;
155 signal = 90 - signal;
156 } else { /* CCK rate */
157 if (signal > 95)
158 signal = 95;
159 else if (signal < 30)
160 signal = 30;
161 signal = 95 - signal;
162 }
163
164 rx_status.antenna = (hdr->signal >> 7) & 1;
165 rx_status.signal = 64 - min(hdr->noise, (u8)64);
166 rx_status.ssi = signal;
167 rx_status.rate = rate;
168 rx_status.freq = dev->conf.freq;
169 rx_status.channel = dev->conf.channel;
170 rx_status.phymode = dev->conf.phymode;
171 rx_status.mactime = le64_to_cpu(hdr->mac_time);
172 ieee80211_rx_irqsafe(dev, skb, &rx_status);
173
174 skb = dev_alloc_skb(RTL8187_MAX_RX);
175 if (unlikely(!skb)) {
176 usb_free_urb(urb);
177 /* TODO check rx queue length and refill *somewhere* */
178 return;
179 }
180
181 info = (struct rtl8187_rx_info *)skb->cb;
182 info->urb = urb;
183 info->dev = dev;
184 urb->transfer_buffer = skb_tail_pointer(skb);
185 urb->context = skb;
186 skb_queue_tail(&priv->rx_queue, skb);
187
188 usb_submit_urb(urb, GFP_ATOMIC);
189}
190
191static int rtl8187_init_urbs(struct ieee80211_hw *dev)
192{
193 struct rtl8187_priv *priv = dev->priv;
194 struct urb *entry;
195 struct sk_buff *skb;
196 struct rtl8187_rx_info *info;
197
198 while (skb_queue_len(&priv->rx_queue) < 8) {
199 skb = __dev_alloc_skb(RTL8187_MAX_RX, GFP_KERNEL);
200 if (!skb)
201 break;
202 entry = usb_alloc_urb(0, GFP_KERNEL);
203 if (!entry) {
204 kfree_skb(skb);
205 break;
206 }
207 usb_fill_bulk_urb(entry, priv->udev,
208 usb_rcvbulkpipe(priv->udev, 1),
209 skb_tail_pointer(skb),
210 RTL8187_MAX_RX, rtl8187_rx_cb, skb);
211 info = (struct rtl8187_rx_info *)skb->cb;
212 info->urb = entry;
213 info->dev = dev;
214 skb_queue_tail(&priv->rx_queue, skb);
215 usb_submit_urb(entry, GFP_KERNEL);
216 }
217
218 return 0;
219}
220
221static int rtl8187_init_hw(struct ieee80211_hw *dev)
222{
223 struct rtl8187_priv *priv = dev->priv;
224 u8 reg;
225 int i;
226
227 /* reset */
228 rtl818x_iowrite8(priv, &priv->map->EEPROM_CMD, RTL818X_EEPROM_CMD_CONFIG);
229 reg = rtl818x_ioread8(priv, &priv->map->CONFIG3);
230 rtl818x_iowrite8(priv, &priv->map->CONFIG3, reg | RTL818X_CONFIG3_ANAPARAM_WRITE);
231 rtl818x_iowrite32(priv, &priv->map->ANAPARAM, RTL8225_ANAPARAM_ON);
232 rtl818x_iowrite32(priv, &priv->map->ANAPARAM2, RTL8225_ANAPARAM2_ON);
233 rtl818x_iowrite8(priv, &priv->map->CONFIG3, reg & ~RTL818X_CONFIG3_ANAPARAM_WRITE);
234 rtl818x_iowrite8(priv, &priv->map->EEPROM_CMD, RTL818X_EEPROM_CMD_NORMAL);
235
236 rtl818x_iowrite16(priv, &priv->map->INT_MASK, 0);
237
238 msleep(200);
239 rtl818x_iowrite8(priv, (u8 *)0xFE18, 0x10);
240 rtl818x_iowrite8(priv, (u8 *)0xFE18, 0x11);
241 rtl818x_iowrite8(priv, (u8 *)0xFE18, 0x00);
242 msleep(200);
243
244 reg = rtl818x_ioread8(priv, &priv->map->CMD);
245 reg &= (1 << 1);
246 reg |= RTL818X_CMD_RESET;
247 rtl818x_iowrite8(priv, &priv->map->CMD, reg);
248
249 i = 10;
250 do {
251 msleep(2);
252 if (!(rtl818x_ioread8(priv, &priv->map->CMD) &
253 RTL818X_CMD_RESET))
254 break;
255 } while (--i);
256
257 if (!i) {
258 printk(KERN_ERR "%s: Reset timeout!\n", wiphy_name(dev->wiphy));
259 return -ETIMEDOUT;
260 }
261
262 /* reload registers from eeprom */
263 rtl818x_iowrite8(priv, &priv->map->EEPROM_CMD, RTL818X_EEPROM_CMD_LOAD);
264
265 i = 10;
266 do {
267 msleep(4);
268 if (!(rtl818x_ioread8(priv, &priv->map->EEPROM_CMD) &
269 RTL818X_EEPROM_CMD_CONFIG))
270 break;
271 } while (--i);
272
273 if (!i) {
274 printk(KERN_ERR "%s: eeprom reset timeout!\n",
275 wiphy_name(dev->wiphy));
276 return -ETIMEDOUT;
277 }
278
279 rtl818x_iowrite8(priv, &priv->map->EEPROM_CMD, RTL818X_EEPROM_CMD_CONFIG);
280 reg = rtl818x_ioread8(priv, &priv->map->CONFIG3);
281 rtl818x_iowrite8(priv, &priv->map->CONFIG3, reg | RTL818X_CONFIG3_ANAPARAM_WRITE);
282 rtl818x_iowrite32(priv, &priv->map->ANAPARAM, RTL8225_ANAPARAM_ON);
283 rtl818x_iowrite32(priv, &priv->map->ANAPARAM2, RTL8225_ANAPARAM2_ON);
284 rtl818x_iowrite8(priv, &priv->map->CONFIG3, reg & ~RTL818X_CONFIG3_ANAPARAM_WRITE);
285 rtl818x_iowrite8(priv, &priv->map->EEPROM_CMD, RTL818X_EEPROM_CMD_NORMAL);
286
287 /* setup card */
288 rtl818x_iowrite16(priv, &priv->map->RFPinsSelect, 0);
289 rtl818x_iowrite8(priv, &priv->map->GPIO, 0);
290
291 rtl818x_iowrite16(priv, &priv->map->RFPinsSelect, (4 << 8));
292 rtl818x_iowrite8(priv, &priv->map->GPIO, 1);
293 rtl818x_iowrite8(priv, &priv->map->GP_ENABLE, 0);
294
295 rtl818x_iowrite8(priv, &priv->map->EEPROM_CMD, RTL818X_EEPROM_CMD_CONFIG);
296 for (i = 0; i < ETH_ALEN; i++)
297 rtl818x_iowrite8(priv, &priv->map->MAC[i], priv->hwaddr[i]);
298
299 rtl818x_iowrite16(priv, (__le16 *)0xFFF4, 0xFFFF);
300 reg = rtl818x_ioread8(priv, &priv->map->CONFIG1);
301 reg &= 0x3F;
302 reg |= 0x80;
303 rtl818x_iowrite8(priv, &priv->map->CONFIG1, reg);
304
305 rtl818x_iowrite8(priv, &priv->map->EEPROM_CMD, RTL818X_EEPROM_CMD_NORMAL);
306
307 rtl818x_iowrite32(priv, &priv->map->INT_TIMEOUT, 0);
308 rtl818x_iowrite8(priv, &priv->map->WPA_CONF, 0);
309 rtl818x_iowrite8(priv, &priv->map->RATE_FALLBACK, 0x81);
310
311 // TODO: set RESP_RATE and BRSR properly
312 rtl818x_iowrite8(priv, &priv->map->RESP_RATE, (8 << 4) | 0);
313 rtl818x_iowrite16(priv, &priv->map->BRSR, 0x01F3);
314
315 /* host_usb_init */
316 rtl818x_iowrite16(priv, &priv->map->RFPinsSelect, 0);
317 rtl818x_iowrite8(priv, &priv->map->GPIO, 0);
318 reg = rtl818x_ioread8(priv, (u8 *)0xFE53);
319 rtl818x_iowrite8(priv, (u8 *)0xFE53, reg | (1 << 7));
320 rtl818x_iowrite16(priv, &priv->map->RFPinsSelect, (4 << 8));
321 rtl818x_iowrite8(priv, &priv->map->GPIO, 0x20);
322 rtl818x_iowrite8(priv, &priv->map->GP_ENABLE, 0);
323 rtl818x_iowrite16(priv, &priv->map->RFPinsOutput, 0x80);
324 rtl818x_iowrite16(priv, &priv->map->RFPinsSelect, 0x80);
325 rtl818x_iowrite16(priv, &priv->map->RFPinsEnable, 0x80);
326 msleep(100);
327
328 rtl818x_iowrite32(priv, &priv->map->RF_TIMING, 0x000a8008);
329 rtl818x_iowrite16(priv, &priv->map->BRSR, 0xFFFF);
330 rtl818x_iowrite32(priv, &priv->map->RF_PARA, 0x00100044);
331 rtl818x_iowrite8(priv, &priv->map->EEPROM_CMD, RTL818X_EEPROM_CMD_CONFIG);
332 rtl818x_iowrite8(priv, &priv->map->CONFIG3, 0x44);
333 rtl818x_iowrite8(priv, &priv->map->EEPROM_CMD, RTL818X_EEPROM_CMD_NORMAL);
334 rtl818x_iowrite16(priv, &priv->map->RFPinsEnable, 0x1FF7);
335 msleep(100);
336
337 priv->rf_init(dev);
338
339 rtl818x_iowrite16(priv, &priv->map->BRSR, 0x01F3);
340 reg = rtl818x_ioread16(priv, &priv->map->PGSELECT) & 0xfffe;
341 rtl818x_iowrite16(priv, &priv->map->PGSELECT, reg | 0x1);
342 rtl818x_iowrite16(priv, (__le16 *)0xFFFE, 0x10);
343 rtl818x_iowrite8(priv, &priv->map->TALLY_SEL, 0x80);
344 rtl818x_iowrite8(priv, (u8 *)0xFFFF, 0x60);
345 rtl818x_iowrite16(priv, &priv->map->PGSELECT, reg);
346
347 return 0;
348}
349
350static void rtl8187_set_channel(struct ieee80211_hw *dev, int channel)
351{
352 u32 reg;
353 struct rtl8187_priv *priv = dev->priv;
354
355 reg = rtl818x_ioread32(priv, &priv->map->TX_CONF);
356 /* Enable TX loopback on MAC level to avoid TX during channel
357 * changes, as this has be seen to causes problems and the
358 * card will stop work until next reset
359 */
360 rtl818x_iowrite32(priv, &priv->map->TX_CONF,
361 reg | RTL818X_TX_CONF_LOOPBACK_MAC);
362 msleep(10);
363 rtl8225_rf_set_channel(dev, channel);
364 msleep(10);
365 rtl818x_iowrite32(priv, &priv->map->TX_CONF, reg);
366}
367
368static int rtl8187_open(struct ieee80211_hw *dev)
369{
370 struct rtl8187_priv *priv = dev->priv;
371 u32 reg;
372 int ret;
373
374 ret = rtl8187_init_hw(dev);
375 if (ret)
376 return ret;
377
378 rtl818x_iowrite16(priv, &priv->map->INT_MASK, 0xFFFF);
379
380 rtl8187_init_urbs(dev);
381
382 reg = RTL818X_RX_CONF_ONLYERLPKT |
383 RTL818X_RX_CONF_RX_AUTORESETPHY |
384 RTL818X_RX_CONF_BSSID |
385 RTL818X_RX_CONF_MGMT |
386 RTL818X_RX_CONF_CTRL |
387 RTL818X_RX_CONF_DATA |
388 (7 << 13 /* RX FIFO threshold NONE */) |
389 (7 << 10 /* MAX RX DMA */) |
390 RTL818X_RX_CONF_BROADCAST |
391 RTL818X_RX_CONF_MULTICAST |
392 RTL818X_RX_CONF_NICMAC;
393 if (priv->mode == IEEE80211_IF_TYPE_MNTR)
394 reg |= RTL818X_RX_CONF_MONITOR;
395
396 rtl818x_iowrite32(priv, &priv->map->RX_CONF, reg);
397
398 reg = rtl818x_ioread8(priv, &priv->map->CW_CONF);
399 reg &= ~RTL818X_CW_CONF_PERPACKET_CW_SHIFT;
400 reg |= RTL818X_CW_CONF_PERPACKET_RETRY_SHIFT;
401 rtl818x_iowrite8(priv, &priv->map->CW_CONF, reg);
402
403 reg = rtl818x_ioread8(priv, &priv->map->TX_AGC_CTL);
404 reg &= ~RTL818X_TX_AGC_CTL_PERPACKET_GAIN_SHIFT;
405 reg &= ~RTL818X_TX_AGC_CTL_PERPACKET_ANTSEL_SHIFT;
406 reg &= ~RTL818X_TX_AGC_CTL_FEEDBACK_ANT;
407 rtl818x_iowrite8(priv, &priv->map->TX_AGC_CTL, reg);
408
409 reg = RTL818X_TX_CONF_CW_MIN |
410 (7 << 21 /* MAX TX DMA */) |
411 RTL818X_TX_CONF_NO_ICV;
412 rtl818x_iowrite32(priv, &priv->map->TX_CONF, reg);
413
414 reg = rtl818x_ioread8(priv, &priv->map->CMD);
415 reg |= RTL818X_CMD_TX_ENABLE;
416 reg |= RTL818X_CMD_RX_ENABLE;
417 rtl818x_iowrite8(priv, &priv->map->CMD, reg);
418
419 return 0;
420}
421
422static int rtl8187_stop(struct ieee80211_hw *dev)
423{
424 struct rtl8187_priv *priv = dev->priv;
425 struct rtl8187_rx_info *info;
426 struct sk_buff *skb;
427 u32 reg;
428
429 rtl818x_iowrite16(priv, &priv->map->INT_MASK, 0);
430
431 reg = rtl818x_ioread8(priv, &priv->map->CMD);
432 reg &= ~RTL818X_CMD_TX_ENABLE;
433 reg &= ~RTL818X_CMD_RX_ENABLE;
434 rtl818x_iowrite8(priv, &priv->map->CMD, reg);
435
436 rtl8225_rf_stop(dev);
437
438 rtl818x_iowrite8(priv, &priv->map->EEPROM_CMD, RTL818X_EEPROM_CMD_CONFIG);
439 reg = rtl818x_ioread8(priv, &priv->map->CONFIG4);
440 rtl818x_iowrite8(priv, &priv->map->CONFIG4, reg | RTL818X_CONFIG4_VCOOFF);
441 rtl818x_iowrite8(priv, &priv->map->EEPROM_CMD, RTL818X_EEPROM_CMD_NORMAL);
442
443 while ((skb = skb_dequeue(&priv->rx_queue))) {
444 info = (struct rtl8187_rx_info *)skb->cb;
445 usb_kill_urb(info->urb);
446 kfree_skb(skb);
447 }
448 return 0;
449}
450
451static int rtl8187_add_interface(struct ieee80211_hw *dev,
452 struct ieee80211_if_init_conf *conf)
453{
454 struct rtl8187_priv *priv = dev->priv;
455
456 /* NOTE: using IEEE80211_IF_TYPE_MGMT to indicate no mode selected */
457 if (priv->mode != IEEE80211_IF_TYPE_MGMT)
458 return -1;
459
460 switch (conf->type) {
461 case IEEE80211_IF_TYPE_STA:
462 case IEEE80211_IF_TYPE_MNTR:
463 priv->mode = conf->type;
464 break;
465 default:
466 return -EOPNOTSUPP;
467 }
468
469 priv->hwaddr = conf->mac_addr;
470
471 return 0;
472}
473
474static void rtl8187_remove_interface(struct ieee80211_hw *dev,
475 struct ieee80211_if_init_conf *conf)
476{
477 struct rtl8187_priv *priv = dev->priv;
478 priv->mode = IEEE80211_IF_TYPE_MGMT;
479}
480
481static int rtl8187_config(struct ieee80211_hw *dev, struct ieee80211_conf *conf)
482{
483 struct rtl8187_priv *priv = dev->priv;
484 rtl8187_set_channel(dev, conf->channel);
485
486 rtl818x_iowrite8(priv, &priv->map->SIFS, 0x22);
487
488 if (conf->flags & IEEE80211_CONF_SHORT_SLOT_TIME) {
489 rtl818x_iowrite8(priv, &priv->map->SLOT, 0x9);
490 rtl818x_iowrite8(priv, &priv->map->DIFS, 0x14);
491 rtl818x_iowrite8(priv, &priv->map->EIFS, 91 - 0x14);
492 rtl818x_iowrite8(priv, &priv->map->CW_VAL, 0x73);
493 } else {
494 rtl818x_iowrite8(priv, &priv->map->SLOT, 0x14);
495 rtl818x_iowrite8(priv, &priv->map->DIFS, 0x24);
496 rtl818x_iowrite8(priv, &priv->map->EIFS, 91 - 0x24);
497 rtl818x_iowrite8(priv, &priv->map->CW_VAL, 0xa5);
498 }
499
500 rtl818x_iowrite16(priv, &priv->map->ATIM_WND, 2);
501 rtl818x_iowrite16(priv, &priv->map->ATIMTR_INTERVAL, 100);
502 rtl818x_iowrite16(priv, &priv->map->BEACON_INTERVAL, 100);
503 rtl818x_iowrite16(priv, &priv->map->BEACON_INTERVAL_TIME, 100);
504 return 0;
505}
506
507static int rtl8187_config_interface(struct ieee80211_hw *dev, int if_id,
508 struct ieee80211_if_conf *conf)
509{
510 struct rtl8187_priv *priv = dev->priv;
511 int i;
512
513 for (i = 0; i < ETH_ALEN; i++)
514 rtl818x_iowrite8(priv, &priv->map->BSSID[i], conf->bssid[i]);
515
516 if (is_valid_ether_addr(conf->bssid))
517 rtl818x_iowrite8(priv, &priv->map->MSR, RTL818X_MSR_INFRA);
518 else
519 rtl818x_iowrite8(priv, &priv->map->MSR, RTL818X_MSR_NO_LINK);
520
521 return 0;
522}
523
524static const struct ieee80211_ops rtl8187_ops = {
525 .tx = rtl8187_tx,
526 .open = rtl8187_open,
527 .stop = rtl8187_stop,
528 .add_interface = rtl8187_add_interface,
529 .remove_interface = rtl8187_remove_interface,
530 .config = rtl8187_config,
531 .config_interface = rtl8187_config_interface,
532};
533
534static void rtl8187_eeprom_register_read(struct eeprom_93cx6 *eeprom)
535{
536 struct ieee80211_hw *dev = eeprom->data;
537 struct rtl8187_priv *priv = dev->priv;
538 u8 reg = rtl818x_ioread8(priv, &priv->map->EEPROM_CMD);
539
540 eeprom->reg_data_in = reg & RTL818X_EEPROM_CMD_WRITE;
541 eeprom->reg_data_out = reg & RTL818X_EEPROM_CMD_READ;
542 eeprom->reg_data_clock = reg & RTL818X_EEPROM_CMD_CK;
543 eeprom->reg_chip_select = reg & RTL818X_EEPROM_CMD_CS;
544}
545
546static void rtl8187_eeprom_register_write(struct eeprom_93cx6 *eeprom)
547{
548 struct ieee80211_hw *dev = eeprom->data;
549 struct rtl8187_priv *priv = dev->priv;
550 u8 reg = RTL818X_EEPROM_CMD_PROGRAM;
551
552 if (eeprom->reg_data_in)
553 reg |= RTL818X_EEPROM_CMD_WRITE;
554 if (eeprom->reg_data_out)
555 reg |= RTL818X_EEPROM_CMD_READ;
556 if (eeprom->reg_data_clock)
557 reg |= RTL818X_EEPROM_CMD_CK;
558 if (eeprom->reg_chip_select)
559 reg |= RTL818X_EEPROM_CMD_CS;
560
561 rtl818x_iowrite8(priv, &priv->map->EEPROM_CMD, reg);
562 udelay(10);
563}
564
565static int __devinit rtl8187_probe(struct usb_interface *intf,
566 const struct usb_device_id *id)
567{
568 struct usb_device *udev = interface_to_usbdev(intf);
569 struct ieee80211_hw *dev;
570 struct rtl8187_priv *priv;
571 struct eeprom_93cx6 eeprom;
572 struct ieee80211_channel *channel;
573 u16 txpwr, reg;
574 int err, i;
575
576 dev = ieee80211_alloc_hw(sizeof(*priv), &rtl8187_ops);
577 if (!dev) {
578 printk(KERN_ERR "rtl8187: ieee80211 alloc failed\n");
579 return -ENOMEM;
580 }
581
582 priv = dev->priv;
583
584 SET_IEEE80211_DEV(dev, &intf->dev);
585 usb_set_intfdata(intf, dev);
586 priv->udev = udev;
587
588 usb_get_dev(udev);
589
590 skb_queue_head_init(&priv->rx_queue);
591 memcpy(priv->channels, rtl818x_channels, sizeof(rtl818x_channels));
592 memcpy(priv->rates, rtl818x_rates, sizeof(rtl818x_rates));
593 priv->map = (struct rtl818x_csr *)0xFF00;
594 priv->modes[0].mode = MODE_IEEE80211G;
595 priv->modes[0].num_rates = ARRAY_SIZE(rtl818x_rates);
596 priv->modes[0].rates = priv->rates;
597 priv->modes[0].num_channels = ARRAY_SIZE(rtl818x_channels);
598 priv->modes[0].channels = priv->channels;
599 priv->modes[1].mode = MODE_IEEE80211B;
600 priv->modes[1].num_rates = 4;
601 priv->modes[1].rates = priv->rates;
602 priv->modes[1].num_channels = ARRAY_SIZE(rtl818x_channels);
603 priv->modes[1].channels = priv->channels;
604 priv->mode = IEEE80211_IF_TYPE_MGMT;
605 dev->flags = IEEE80211_HW_HOST_BROADCAST_PS_BUFFERING |
606 IEEE80211_HW_RX_INCLUDES_FCS |
607 IEEE80211_HW_WEP_INCLUDE_IV |
608 IEEE80211_HW_DATA_NULLFUNC_ACK;
609 dev->extra_tx_headroom = sizeof(struct rtl8187_tx_hdr);
610 dev->queues = 1;
611 dev->max_rssi = 65;
612 dev->max_signal = 64;
613
614 for (i = 0; i < 2; i++)
615 if ((err = ieee80211_register_hwmode(dev, &priv->modes[i])))
616 goto err_free_dev;
617
618 eeprom.data = dev;
619 eeprom.register_read = rtl8187_eeprom_register_read;
620 eeprom.register_write = rtl8187_eeprom_register_write;
621 if (rtl818x_ioread32(priv, &priv->map->RX_CONF) & (1 << 6))
622 eeprom.width = PCI_EEPROM_WIDTH_93C66;
623 else
624 eeprom.width = PCI_EEPROM_WIDTH_93C46;
625
626 rtl818x_iowrite8(priv, &priv->map->EEPROM_CMD, RTL818X_EEPROM_CMD_CONFIG);
627 udelay(10);
628
629 eeprom_93cx6_multiread(&eeprom, RTL8187_EEPROM_MAC_ADDR,
630 (__le16 __force *)dev->wiphy->perm_addr, 3);
631 if (!is_valid_ether_addr(dev->wiphy->perm_addr)) {
632 printk(KERN_WARNING "rtl8187: Invalid hwaddr! Using randomly "
633 "generated MAC address\n");
634 random_ether_addr(dev->wiphy->perm_addr);
635 }
636
637 channel = priv->channels;
638 for (i = 0; i < 3; i++) {
639 eeprom_93cx6_read(&eeprom, RTL8187_EEPROM_TXPWR_CHAN_1 + i,
640 &txpwr);
641 (*channel++).val = txpwr & 0xFF;
642 (*channel++).val = txpwr >> 8;
643 }
644 for (i = 0; i < 2; i++) {
645 eeprom_93cx6_read(&eeprom, RTL8187_EEPROM_TXPWR_CHAN_4 + i,
646 &txpwr);
647 (*channel++).val = txpwr & 0xFF;
648 (*channel++).val = txpwr >> 8;
649 }
650 for (i = 0; i < 2; i++) {
651 eeprom_93cx6_read(&eeprom, RTL8187_EEPROM_TXPWR_CHAN_6 + i,
652 &txpwr);
653 (*channel++).val = txpwr & 0xFF;
654 (*channel++).val = txpwr >> 8;
655 }
656
657 eeprom_93cx6_read(&eeprom, RTL8187_EEPROM_TXPWR_BASE,
658 &priv->txpwr_base);
659
660 reg = rtl818x_ioread16(priv, &priv->map->PGSELECT) & ~1;
661 rtl818x_iowrite16(priv, &priv->map->PGSELECT, reg | 1);
662 /* 0 means asic B-cut, we should use SW 3 wire
663 * bit-by-bit banging for radio. 1 means we can use
664 * USB specific request to write radio registers */
665 priv->asic_rev = rtl818x_ioread8(priv, (u8 *)0xFFFE) & 0x3;
666 rtl818x_iowrite16(priv, &priv->map->PGSELECT, reg);
667 rtl818x_iowrite8(priv, &priv->map->EEPROM_CMD, RTL818X_EEPROM_CMD_NORMAL);
668
669 rtl8225_write(dev, 0, 0x1B7);
670
671 if (rtl8225_read(dev, 8) != 0x588 || rtl8225_read(dev, 9) != 0x700)
672 priv->rf_init = rtl8225_rf_init;
673 else
674 priv->rf_init = rtl8225z2_rf_init;
675
676 rtl8225_write(dev, 0, 0x0B7);
677
678 err = ieee80211_register_hw(dev);
679 if (err) {
680 printk(KERN_ERR "rtl8187: Cannot register device\n");
681 goto err_free_dev;
682 }
683
684 printk(KERN_INFO "%s: hwaddr " MAC_FMT ", rtl8187 V%d + %s\n",
685 wiphy_name(dev->wiphy), MAC_ARG(dev->wiphy->perm_addr),
686 priv->asic_rev, priv->rf_init == rtl8225_rf_init ?
687 "rtl8225" : "rtl8225z2");
688
689 return 0;
690
691 err_free_dev:
692 ieee80211_free_hw(dev);
693 usb_set_intfdata(intf, NULL);
694 usb_put_dev(udev);
695 return err;
696}
697
698static void __devexit rtl8187_disconnect(struct usb_interface *intf)
699{
700 struct ieee80211_hw *dev = usb_get_intfdata(intf);
701 struct rtl8187_priv *priv;
702
703 if (!dev)
704 return;
705
706 ieee80211_unregister_hw(dev);
707
708 priv = dev->priv;
709 usb_put_dev(interface_to_usbdev(intf));
710 ieee80211_free_hw(dev);
711}
712
713static struct usb_driver rtl8187_driver = {
714 .name = KBUILD_MODNAME,
715 .id_table = rtl8187_table,
716 .probe = rtl8187_probe,
717 .disconnect = rtl8187_disconnect,
718};
719
720static int __init rtl8187_init(void)
721{
722 return usb_register(&rtl8187_driver);
723}
724
725static void __exit rtl8187_exit(void)
726{
727 usb_deregister(&rtl8187_driver);
728}
729
730module_init(rtl8187_init);
731module_exit(rtl8187_exit);
diff --git a/drivers/net/wireless/rtl8187_rtl8225.c b/drivers/net/wireless/rtl8187_rtl8225.c
new file mode 100644
index 000000000000..e25a09f1b068
--- /dev/null
+++ b/drivers/net/wireless/rtl8187_rtl8225.c
@@ -0,0 +1,745 @@
1/*
2 * Radio tuning for RTL8225 on RTL8187
3 *
4 * Copyright 2007 Michael Wu <flamingice@sourmilk.net>
5 * Copyright 2007 Andrea Merello <andreamrl@tiscali.it>
6 *
7 * Based on the r8187 driver, which is:
8 * Copyright 2005 Andrea Merello <andreamrl@tiscali.it>, et al.
9 *
10 * Magic delays, register offsets, and phy value tables below are
11 * taken from the original r8187 driver sources. Thanks to Realtek
12 * for their support!
13 *
14 * This program is free software; you can redistribute it and/or modify
15 * it under the terms of the GNU General Public License version 2 as
16 * published by the Free Software Foundation.
17 */
18
19#include <linux/init.h>
20#include <linux/usb.h>
21#include <net/mac80211.h>
22
23#include "rtl8187.h"
24#include "rtl8187_rtl8225.h"
25
26static void rtl8225_write_bitbang(struct ieee80211_hw *dev, u8 addr, u16 data)
27{
28 struct rtl8187_priv *priv = dev->priv;
29 u16 reg80, reg84, reg82;
30 u32 bangdata;
31 int i;
32
33 bangdata = (data << 4) | (addr & 0xf);
34
35 reg80 = rtl818x_ioread16(priv, &priv->map->RFPinsOutput) & 0xfff3;
36 reg82 = rtl818x_ioread16(priv, &priv->map->RFPinsEnable);
37
38 rtl818x_iowrite16(priv, &priv->map->RFPinsEnable, reg82 | 0x7);
39
40 reg84 = rtl818x_ioread16(priv, &priv->map->RFPinsSelect);
41 rtl818x_iowrite16(priv, &priv->map->RFPinsSelect, reg84 | 0x7);
42 udelay(10);
43
44 rtl818x_iowrite16(priv, &priv->map->RFPinsOutput, reg80 | (1 << 2));
45 udelay(2);
46 rtl818x_iowrite16(priv, &priv->map->RFPinsOutput, reg80);
47 udelay(10);
48
49 for (i = 15; i >= 0; i--) {
50 u16 reg = reg80 | (bangdata & (1 << i)) >> i;
51
52 if (i & 1)
53 rtl818x_iowrite16(priv, &priv->map->RFPinsOutput, reg);
54
55 rtl818x_iowrite16(priv, &priv->map->RFPinsOutput, reg | (1 << 1));
56 rtl818x_iowrite16(priv, &priv->map->RFPinsOutput, reg | (1 << 1));
57
58 if (!(i & 1))
59 rtl818x_iowrite16(priv, &priv->map->RFPinsOutput, reg);
60 }
61
62 rtl818x_iowrite16(priv, &priv->map->RFPinsOutput, reg80 | (1 << 2));
63 udelay(10);
64
65 rtl818x_iowrite16(priv, &priv->map->RFPinsOutput, reg80 | (1 << 2));
66 rtl818x_iowrite16(priv, &priv->map->RFPinsSelect, reg84);
67 msleep(2);
68}
69
70static void rtl8225_write_8051(struct ieee80211_hw *dev, u8 addr, u16 data)
71{
72 struct rtl8187_priv *priv = dev->priv;
73 u16 reg80, reg82, reg84;
74
75 reg80 = rtl818x_ioread16(priv, &priv->map->RFPinsOutput);
76 reg82 = rtl818x_ioread16(priv, &priv->map->RFPinsEnable);
77 reg84 = rtl818x_ioread16(priv, &priv->map->RFPinsSelect);
78
79 reg80 &= ~(0x3 << 2);
80 reg84 &= ~0xF;
81
82 rtl818x_iowrite16(priv, &priv->map->RFPinsEnable, reg82 | 0x0007);
83 rtl818x_iowrite16(priv, &priv->map->RFPinsSelect, reg84 | 0x0007);
84 udelay(10);
85
86 rtl818x_iowrite16(priv, &priv->map->RFPinsOutput, reg80 | (1 << 2));
87 udelay(2);
88
89 rtl818x_iowrite16(priv, &priv->map->RFPinsOutput, reg80);
90 udelay(10);
91
92 usb_control_msg(priv->udev, usb_sndctrlpipe(priv->udev, 0),
93 RTL8187_REQ_SET_REG, RTL8187_REQT_WRITE,
94 addr, 0x8225, &data, sizeof(data), HZ / 2);
95
96 rtl818x_iowrite16(priv, &priv->map->RFPinsOutput, reg80 | (1 << 2));
97 udelay(10);
98
99 rtl818x_iowrite16(priv, &priv->map->RFPinsOutput, reg80 | (1 << 2));
100 rtl818x_iowrite16(priv, &priv->map->RFPinsSelect, reg84);
101 msleep(2);
102}
103
104void rtl8225_write(struct ieee80211_hw *dev, u8 addr, u16 data)
105{
106 struct rtl8187_priv *priv = dev->priv;
107
108 if (priv->asic_rev)
109 rtl8225_write_8051(dev, addr, data);
110 else
111 rtl8225_write_bitbang(dev, addr, data);
112}
113
114u16 rtl8225_read(struct ieee80211_hw *dev, u8 addr)
115{
116 struct rtl8187_priv *priv = dev->priv;
117 u16 reg80, reg82, reg84, out;
118 int i;
119
120 reg80 = rtl818x_ioread16(priv, &priv->map->RFPinsOutput);
121 reg82 = rtl818x_ioread16(priv, &priv->map->RFPinsEnable);
122 reg84 = rtl818x_ioread16(priv, &priv->map->RFPinsSelect);
123
124 reg80 &= ~0xF;
125
126 rtl818x_iowrite16(priv, &priv->map->RFPinsEnable, reg82 | 0x000F);
127 rtl818x_iowrite16(priv, &priv->map->RFPinsSelect, reg84 | 0x000F);
128
129 rtl818x_iowrite16(priv, &priv->map->RFPinsOutput, reg80 | (1 << 2));
130 udelay(4);
131 rtl818x_iowrite16(priv, &priv->map->RFPinsOutput, reg80);
132 udelay(5);
133
134 for (i = 4; i >= 0; i--) {
135 u16 reg = reg80 | ((addr >> i) & 1);
136
137 if (!(i & 1)) {
138 rtl818x_iowrite16(priv, &priv->map->RFPinsOutput, reg);
139 udelay(1);
140 }
141
142 rtl818x_iowrite16(priv, &priv->map->RFPinsOutput,
143 reg | (1 << 1));
144 udelay(2);
145 rtl818x_iowrite16(priv, &priv->map->RFPinsOutput,
146 reg | (1 << 1));
147 udelay(2);
148
149 if (i & 1) {
150 rtl818x_iowrite16(priv, &priv->map->RFPinsOutput, reg);
151 udelay(1);
152 }
153 }
154
155 rtl818x_iowrite16(priv, &priv->map->RFPinsOutput,
156 reg80 | (1 << 3) | (1 << 1));
157 udelay(2);
158 rtl818x_iowrite16(priv, &priv->map->RFPinsOutput,
159 reg80 | (1 << 3));
160 udelay(2);
161 rtl818x_iowrite16(priv, &priv->map->RFPinsOutput,
162 reg80 | (1 << 3));
163 udelay(2);
164
165 out = 0;
166 for (i = 11; i >= 0; i--) {
167 rtl818x_iowrite16(priv, &priv->map->RFPinsOutput,
168 reg80 | (1 << 3));
169 udelay(1);
170 rtl818x_iowrite16(priv, &priv->map->RFPinsOutput,
171 reg80 | (1 << 3) | (1 << 1));
172 udelay(2);
173 rtl818x_iowrite16(priv, &priv->map->RFPinsOutput,
174 reg80 | (1 << 3) | (1 << 1));
175 udelay(2);
176 rtl818x_iowrite16(priv, &priv->map->RFPinsOutput,
177 reg80 | (1 << 3) | (1 << 1));
178 udelay(2);
179
180 if (rtl818x_ioread16(priv, &priv->map->RFPinsInput) & (1 << 1))
181 out |= 1 << i;
182
183 rtl818x_iowrite16(priv, &priv->map->RFPinsOutput,
184 reg80 | (1 << 3));
185 udelay(2);
186 }
187
188 rtl818x_iowrite16(priv, &priv->map->RFPinsOutput,
189 reg80 | (1 << 3) | (1 << 2));
190 udelay(2);
191
192 rtl818x_iowrite16(priv, &priv->map->RFPinsEnable, reg82);
193 rtl818x_iowrite16(priv, &priv->map->RFPinsSelect, reg84);
194 rtl818x_iowrite16(priv, &priv->map->RFPinsOutput, 0x03A0);
195
196 return out;
197}
198
199static const u16 rtl8225bcd_rxgain[] = {
200 0x0400, 0x0401, 0x0402, 0x0403, 0x0404, 0x0405, 0x0408, 0x0409,
201 0x040a, 0x040b, 0x0502, 0x0503, 0x0504, 0x0505, 0x0540, 0x0541,
202 0x0542, 0x0543, 0x0544, 0x0545, 0x0580, 0x0581, 0x0582, 0x0583,
203 0x0584, 0x0585, 0x0588, 0x0589, 0x058a, 0x058b, 0x0643, 0x0644,
204 0x0645, 0x0680, 0x0681, 0x0682, 0x0683, 0x0684, 0x0685, 0x0688,
205 0x0689, 0x068a, 0x068b, 0x068c, 0x0742, 0x0743, 0x0744, 0x0745,
206 0x0780, 0x0781, 0x0782, 0x0783, 0x0784, 0x0785, 0x0788, 0x0789,
207 0x078a, 0x078b, 0x078c, 0x078d, 0x0790, 0x0791, 0x0792, 0x0793,
208 0x0794, 0x0795, 0x0798, 0x0799, 0x079a, 0x079b, 0x079c, 0x079d,
209 0x07a0, 0x07a1, 0x07a2, 0x07a3, 0x07a4, 0x07a5, 0x07a8, 0x07a9,
210 0x07aa, 0x07ab, 0x07ac, 0x07ad, 0x07b0, 0x07b1, 0x07b2, 0x07b3,
211 0x07b4, 0x07b5, 0x07b8, 0x07b9, 0x07ba, 0x07bb, 0x07bb
212};
213
214static const u8 rtl8225_agc[] = {
215 0x9e, 0x9e, 0x9e, 0x9e, 0x9e, 0x9e, 0x9e, 0x9e,
216 0x9d, 0x9c, 0x9b, 0x9a, 0x99, 0x98, 0x97, 0x96,
217 0x95, 0x94, 0x93, 0x92, 0x91, 0x90, 0x8f, 0x8e,
218 0x8d, 0x8c, 0x8b, 0x8a, 0x89, 0x88, 0x87, 0x86,
219 0x85, 0x84, 0x83, 0x82, 0x81, 0x80, 0x3f, 0x3e,
220 0x3d, 0x3c, 0x3b, 0x3a, 0x39, 0x38, 0x37, 0x36,
221 0x35, 0x34, 0x33, 0x32, 0x31, 0x30, 0x2f, 0x2e,
222 0x2d, 0x2c, 0x2b, 0x2a, 0x29, 0x28, 0x27, 0x26,
223 0x25, 0x24, 0x23, 0x22, 0x21, 0x20, 0x1f, 0x1e,
224 0x1d, 0x1c, 0x1b, 0x1a, 0x19, 0x18, 0x17, 0x16,
225 0x15, 0x14, 0x13, 0x12, 0x11, 0x10, 0x0f, 0x0e,
226 0x0d, 0x0c, 0x0b, 0x0a, 0x09, 0x08, 0x07, 0x06,
227 0x05, 0x04, 0x03, 0x02, 0x01, 0x01, 0x01, 0x01,
228 0x01, 0x01, 0x01, 0x01, 0x01, 0x01, 0x01, 0x01,
229 0x01, 0x01, 0x01, 0x01, 0x01, 0x01, 0x01, 0x01,
230 0x01, 0x01, 0x01, 0x01, 0x01, 0x01, 0x01, 0x01
231};
232
233static const u8 rtl8225_gain[] = {
234 0x23, 0x88, 0x7c, 0xa5, /* -82dBm */
235 0x23, 0x88, 0x7c, 0xb5, /* -82dBm */
236 0x23, 0x88, 0x7c, 0xc5, /* -82dBm */
237 0x33, 0x80, 0x79, 0xc5, /* -78dBm */
238 0x43, 0x78, 0x76, 0xc5, /* -74dBm */
239 0x53, 0x60, 0x73, 0xc5, /* -70dBm */
240 0x63, 0x58, 0x70, 0xc5, /* -66dBm */
241};
242
243static const u8 rtl8225_threshold[] = {
244 0x8d, 0x8d, 0x8d, 0x8d, 0x9d, 0xad, 0xbd
245};
246
247static const u8 rtl8225_tx_gain_cck_ofdm[] = {
248 0x02, 0x06, 0x0e, 0x1e, 0x3e, 0x7e
249};
250
251static const u8 rtl8225_tx_power_cck[] = {
252 0x18, 0x17, 0x15, 0x11, 0x0c, 0x08, 0x04, 0x02,
253 0x1b, 0x1a, 0x17, 0x13, 0x0e, 0x09, 0x04, 0x02,
254 0x1f, 0x1e, 0x1a, 0x15, 0x10, 0x0a, 0x05, 0x02,
255 0x22, 0x21, 0x1d, 0x18, 0x11, 0x0b, 0x06, 0x02,
256 0x26, 0x25, 0x21, 0x1b, 0x14, 0x0d, 0x06, 0x03,
257 0x2b, 0x2a, 0x25, 0x1e, 0x16, 0x0e, 0x07, 0x03
258};
259
260static const u8 rtl8225_tx_power_cck_ch14[] = {
261 0x18, 0x17, 0x15, 0x0c, 0x00, 0x00, 0x00, 0x00,
262 0x1b, 0x1a, 0x17, 0x0e, 0x00, 0x00, 0x00, 0x00,
263 0x1f, 0x1e, 0x1a, 0x0f, 0x00, 0x00, 0x00, 0x00,
264 0x22, 0x21, 0x1d, 0x11, 0x00, 0x00, 0x00, 0x00,
265 0x26, 0x25, 0x21, 0x13, 0x00, 0x00, 0x00, 0x00,
266 0x2b, 0x2a, 0x25, 0x15, 0x00, 0x00, 0x00, 0x00
267};
268
269static const u8 rtl8225_tx_power_ofdm[] = {
270 0x80, 0x90, 0xa2, 0xb5, 0xcb, 0xe4
271};
272
273static const u32 rtl8225_chan[] = {
274 0x085c, 0x08dc, 0x095c, 0x09dc, 0x0a5c, 0x0adc, 0x0b5c,
275 0x0bdc, 0x0c5c, 0x0cdc, 0x0d5c, 0x0ddc, 0x0e5c, 0x0f72
276};
277
278static void rtl8225_rf_set_tx_power(struct ieee80211_hw *dev, int channel)
279{
280 struct rtl8187_priv *priv = dev->priv;
281 u8 cck_power, ofdm_power;
282 const u8 *tmp;
283 u32 reg;
284 int i;
285
286 cck_power = priv->channels[channel - 1].val & 0xF;
287 ofdm_power = priv->channels[channel - 1].val >> 4;
288
289 cck_power = min(cck_power, (u8)11);
290 ofdm_power = min(ofdm_power, (u8)35);
291
292 rtl818x_iowrite8(priv, &priv->map->TX_GAIN_CCK,
293 rtl8225_tx_gain_cck_ofdm[cck_power / 6] >> 1);
294
295 if (channel == 14)
296 tmp = &rtl8225_tx_power_cck_ch14[(cck_power % 6) * 8];
297 else
298 tmp = &rtl8225_tx_power_cck[(cck_power % 6) * 8];
299
300 for (i = 0; i < 8; i++)
301 rtl8225_write_phy_cck(dev, 0x44 + i, *tmp++);
302
303 msleep(1); // FIXME: optional?
304
305 /* anaparam2 on */
306 rtl818x_iowrite8(priv, &priv->map->EEPROM_CMD, RTL818X_EEPROM_CMD_CONFIG);
307 reg = rtl818x_ioread8(priv, &priv->map->CONFIG3);
308 rtl818x_iowrite8(priv, &priv->map->CONFIG3, reg | RTL818X_CONFIG3_ANAPARAM_WRITE);
309 rtl818x_iowrite32(priv, &priv->map->ANAPARAM2, RTL8225_ANAPARAM2_ON);
310 rtl818x_iowrite8(priv, &priv->map->CONFIG3, reg & ~RTL818X_CONFIG3_ANAPARAM_WRITE);
311 rtl818x_iowrite8(priv, &priv->map->EEPROM_CMD, RTL818X_EEPROM_CMD_NORMAL);
312
313 rtl8225_write_phy_ofdm(dev, 2, 0x42);
314 rtl8225_write_phy_ofdm(dev, 6, 0x00);
315 rtl8225_write_phy_ofdm(dev, 8, 0x00);
316
317 rtl818x_iowrite8(priv, &priv->map->TX_GAIN_OFDM,
318 rtl8225_tx_gain_cck_ofdm[ofdm_power / 6] >> 1);
319
320 tmp = &rtl8225_tx_power_ofdm[ofdm_power % 6];
321
322 rtl8225_write_phy_ofdm(dev, 5, *tmp);
323 rtl8225_write_phy_ofdm(dev, 7, *tmp);
324
325 msleep(1);
326}
327
328void rtl8225_rf_init(struct ieee80211_hw *dev)
329{
330 struct rtl8187_priv *priv = dev->priv;
331 int i;
332
333 rtl8225_write(dev, 0x0, 0x067); msleep(1);
334 rtl8225_write(dev, 0x1, 0xFE0); msleep(1);
335 rtl8225_write(dev, 0x2, 0x44D); msleep(1);
336 rtl8225_write(dev, 0x3, 0x441); msleep(1);
337 rtl8225_write(dev, 0x4, 0x486); msleep(1);
338 rtl8225_write(dev, 0x5, 0xBC0); msleep(1);
339 rtl8225_write(dev, 0x6, 0xAE6); msleep(1);
340 rtl8225_write(dev, 0x7, 0x82A); msleep(1);
341 rtl8225_write(dev, 0x8, 0x01F); msleep(1);
342 rtl8225_write(dev, 0x9, 0x334); msleep(1);
343 rtl8225_write(dev, 0xA, 0xFD4); msleep(1);
344 rtl8225_write(dev, 0xB, 0x391); msleep(1);
345 rtl8225_write(dev, 0xC, 0x050); msleep(1);
346 rtl8225_write(dev, 0xD, 0x6DB); msleep(1);
347 rtl8225_write(dev, 0xE, 0x029); msleep(1);
348 rtl8225_write(dev, 0xF, 0x914); msleep(100);
349
350 rtl8225_write(dev, 0x2, 0xC4D); msleep(200);
351 rtl8225_write(dev, 0x2, 0x44D); msleep(200);
352
353 if (!(rtl8225_read(dev, 6) & (1 << 7))) {
354 rtl8225_write(dev, 0x02, 0x0c4d);
355 msleep(200);
356 rtl8225_write(dev, 0x02, 0x044d);
357 msleep(100);
358 if (!(rtl8225_read(dev, 6) & (1 << 7)))
359 printk(KERN_WARNING "%s: RF Calibration Failed! %x\n",
360 wiphy_name(dev->wiphy), rtl8225_read(dev, 6));
361 }
362
363 rtl8225_write(dev, 0x0, 0x127);
364
365 for (i = 0; i < ARRAY_SIZE(rtl8225bcd_rxgain); i++) {
366 rtl8225_write(dev, 0x1, i + 1);
367 rtl8225_write(dev, 0x2, rtl8225bcd_rxgain[i]);
368 }
369
370 rtl8225_write(dev, 0x0, 0x027);
371 rtl8225_write(dev, 0x0, 0x22F);
372
373 for (i = 0; i < ARRAY_SIZE(rtl8225_agc); i++) {
374 rtl8225_write_phy_ofdm(dev, 0xB, rtl8225_agc[i]);
375 msleep(1);
376 rtl8225_write_phy_ofdm(dev, 0xA, 0x80 + i);
377 msleep(1);
378 }
379
380 msleep(1);
381
382 rtl8225_write_phy_ofdm(dev, 0x00, 0x01); msleep(1);
383 rtl8225_write_phy_ofdm(dev, 0x01, 0x02); msleep(1);
384 rtl8225_write_phy_ofdm(dev, 0x02, 0x42); msleep(1);
385 rtl8225_write_phy_ofdm(dev, 0x03, 0x00); msleep(1);
386 rtl8225_write_phy_ofdm(dev, 0x04, 0x00); msleep(1);
387 rtl8225_write_phy_ofdm(dev, 0x05, 0x00); msleep(1);
388 rtl8225_write_phy_ofdm(dev, 0x06, 0x40); msleep(1);
389 rtl8225_write_phy_ofdm(dev, 0x07, 0x00); msleep(1);
390 rtl8225_write_phy_ofdm(dev, 0x08, 0x40); msleep(1);
391 rtl8225_write_phy_ofdm(dev, 0x09, 0xfe); msleep(1);
392 rtl8225_write_phy_ofdm(dev, 0x0a, 0x09); msleep(1);
393 rtl8225_write_phy_ofdm(dev, 0x0b, 0x80); msleep(1);
394 rtl8225_write_phy_ofdm(dev, 0x0c, 0x01); msleep(1);
395 rtl8225_write_phy_ofdm(dev, 0x0e, 0xd3); msleep(1);
396 rtl8225_write_phy_ofdm(dev, 0x0f, 0x38); msleep(1);
397 rtl8225_write_phy_ofdm(dev, 0x10, 0x84); msleep(1);
398 rtl8225_write_phy_ofdm(dev, 0x11, 0x06); msleep(1);
399 rtl8225_write_phy_ofdm(dev, 0x12, 0x20); msleep(1);
400 rtl8225_write_phy_ofdm(dev, 0x13, 0x20); msleep(1);
401 rtl8225_write_phy_ofdm(dev, 0x14, 0x00); msleep(1);
402 rtl8225_write_phy_ofdm(dev, 0x15, 0x40); msleep(1);
403 rtl8225_write_phy_ofdm(dev, 0x16, 0x00); msleep(1);
404 rtl8225_write_phy_ofdm(dev, 0x17, 0x40); msleep(1);
405 rtl8225_write_phy_ofdm(dev, 0x18, 0xef); msleep(1);
406 rtl8225_write_phy_ofdm(dev, 0x19, 0x19); msleep(1);
407 rtl8225_write_phy_ofdm(dev, 0x1a, 0x20); msleep(1);
408 rtl8225_write_phy_ofdm(dev, 0x1b, 0x76); msleep(1);
409 rtl8225_write_phy_ofdm(dev, 0x1c, 0x04); msleep(1);
410 rtl8225_write_phy_ofdm(dev, 0x1e, 0x95); msleep(1);
411 rtl8225_write_phy_ofdm(dev, 0x1f, 0x75); msleep(1);
412 rtl8225_write_phy_ofdm(dev, 0x20, 0x1f); msleep(1);
413 rtl8225_write_phy_ofdm(dev, 0x21, 0x27); msleep(1);
414 rtl8225_write_phy_ofdm(dev, 0x22, 0x16); msleep(1);
415 rtl8225_write_phy_ofdm(dev, 0x24, 0x46); msleep(1);
416 rtl8225_write_phy_ofdm(dev, 0x25, 0x20); msleep(1);
417 rtl8225_write_phy_ofdm(dev, 0x26, 0x90); msleep(1);
418 rtl8225_write_phy_ofdm(dev, 0x27, 0x88); msleep(1);
419
420 rtl8225_write_phy_ofdm(dev, 0x0d, rtl8225_gain[2 * 4]);
421 rtl8225_write_phy_ofdm(dev, 0x1b, rtl8225_gain[2 * 4 + 2]);
422 rtl8225_write_phy_ofdm(dev, 0x1d, rtl8225_gain[2 * 4 + 3]);
423 rtl8225_write_phy_ofdm(dev, 0x23, rtl8225_gain[2 * 4 + 1]);
424
425 rtl8225_write_phy_cck(dev, 0x00, 0x98); msleep(1);
426 rtl8225_write_phy_cck(dev, 0x03, 0x20); msleep(1);
427 rtl8225_write_phy_cck(dev, 0x04, 0x7e); msleep(1);
428 rtl8225_write_phy_cck(dev, 0x05, 0x12); msleep(1);
429 rtl8225_write_phy_cck(dev, 0x06, 0xfc); msleep(1);
430 rtl8225_write_phy_cck(dev, 0x07, 0x78); msleep(1);
431 rtl8225_write_phy_cck(dev, 0x08, 0x2e); msleep(1);
432 rtl8225_write_phy_cck(dev, 0x10, 0x9b); msleep(1);
433 rtl8225_write_phy_cck(dev, 0x11, 0x88); msleep(1);
434 rtl8225_write_phy_cck(dev, 0x12, 0x47); msleep(1);
435 rtl8225_write_phy_cck(dev, 0x13, 0xd0);
436 rtl8225_write_phy_cck(dev, 0x19, 0x00);
437 rtl8225_write_phy_cck(dev, 0x1a, 0xa0);
438 rtl8225_write_phy_cck(dev, 0x1b, 0x08);
439 rtl8225_write_phy_cck(dev, 0x40, 0x86);
440 rtl8225_write_phy_cck(dev, 0x41, 0x8d); msleep(1);
441 rtl8225_write_phy_cck(dev, 0x42, 0x15); msleep(1);
442 rtl8225_write_phy_cck(dev, 0x43, 0x18); msleep(1);
443 rtl8225_write_phy_cck(dev, 0x44, 0x1f); msleep(1);
444 rtl8225_write_phy_cck(dev, 0x45, 0x1e); msleep(1);
445 rtl8225_write_phy_cck(dev, 0x46, 0x1a); msleep(1);
446 rtl8225_write_phy_cck(dev, 0x47, 0x15); msleep(1);
447 rtl8225_write_phy_cck(dev, 0x48, 0x10); msleep(1);
448 rtl8225_write_phy_cck(dev, 0x49, 0x0a); msleep(1);
449 rtl8225_write_phy_cck(dev, 0x4a, 0x05); msleep(1);
450 rtl8225_write_phy_cck(dev, 0x4b, 0x02); msleep(1);
451 rtl8225_write_phy_cck(dev, 0x4c, 0x05); msleep(1);
452
453 rtl818x_iowrite8(priv, &priv->map->TESTR, 0x0D);
454
455 rtl8225_rf_set_tx_power(dev, 1);
456
457 /* RX antenna default to A */
458 rtl8225_write_phy_cck(dev, 0x10, 0x9b); msleep(1); /* B: 0xDB */
459 rtl8225_write_phy_ofdm(dev, 0x26, 0x90); msleep(1); /* B: 0x10 */
460
461 rtl818x_iowrite8(priv, &priv->map->TX_ANTENNA, 0x03); /* B: 0x00 */
462 msleep(1);
463 rtl818x_iowrite32(priv, (__le32 *)0xFF94, 0x3dc00002);
464
465 /* set sensitivity */
466 rtl8225_write(dev, 0x0c, 0x50);
467 rtl8225_write_phy_ofdm(dev, 0x0d, rtl8225_gain[2 * 4]);
468 rtl8225_write_phy_ofdm(dev, 0x1b, rtl8225_gain[2 * 4 + 2]);
469 rtl8225_write_phy_ofdm(dev, 0x1d, rtl8225_gain[2 * 4 + 3]);
470 rtl8225_write_phy_ofdm(dev, 0x23, rtl8225_gain[2 * 4 + 1]);
471 rtl8225_write_phy_cck(dev, 0x41, rtl8225_threshold[2]);
472}
473
474static const u8 rtl8225z2_tx_power_cck_ch14[] = {
475 0x36, 0x35, 0x2e, 0x1b, 0x00, 0x00, 0x00, 0x00
476};
477
478static const u8 rtl8225z2_tx_power_cck[] = {
479 0x36, 0x35, 0x2e, 0x25, 0x1c, 0x12, 0x09, 0x04
480};
481
482static const u8 rtl8225z2_tx_power_ofdm[] = {
483 0x42, 0x00, 0x40, 0x00, 0x40
484};
485
486static const u8 rtl8225z2_tx_gain_cck_ofdm[] = {
487 0x00, 0x01, 0x02, 0x03, 0x04, 0x05,
488 0x06, 0x07, 0x08, 0x09, 0x0a, 0x0b,
489 0x0c, 0x0d, 0x0e, 0x0f, 0x10, 0x11,
490 0x12, 0x13, 0x14, 0x15, 0x16, 0x17,
491 0x18, 0x19, 0x1a, 0x1b, 0x1c, 0x1d,
492 0x1e, 0x1f, 0x20, 0x21, 0x22, 0x23
493};
494
495static void rtl8225z2_rf_set_tx_power(struct ieee80211_hw *dev, int channel)
496{
497 struct rtl8187_priv *priv = dev->priv;
498 u8 cck_power, ofdm_power;
499 const u8 *tmp;
500 u32 reg;
501 int i;
502
503 cck_power = priv->channels[channel - 1].val & 0xF;
504 ofdm_power = priv->channels[channel - 1].val >> 4;
505
506 cck_power = min(cck_power, (u8)15);
507 cck_power += priv->txpwr_base & 0xF;
508 cck_power = min(cck_power, (u8)35);
509
510 ofdm_power = min(ofdm_power, (u8)15);
511 ofdm_power += priv->txpwr_base >> 4;
512 ofdm_power = min(ofdm_power, (u8)35);
513
514 if (channel == 14)
515 tmp = rtl8225z2_tx_power_cck_ch14;
516 else
517 tmp = rtl8225z2_tx_power_cck;
518
519 for (i = 0; i < 8; i++)
520 rtl8225_write_phy_cck(dev, 0x44 + i, *tmp++);
521
522 rtl818x_iowrite8(priv, &priv->map->TX_GAIN_CCK,
523 rtl8225z2_tx_gain_cck_ofdm[cck_power]);
524 msleep(1);
525
526 /* anaparam2 on */
527 rtl818x_iowrite8(priv, &priv->map->EEPROM_CMD, RTL818X_EEPROM_CMD_CONFIG);
528 reg = rtl818x_ioread8(priv, &priv->map->CONFIG3);
529 rtl818x_iowrite8(priv, &priv->map->CONFIG3, reg | RTL818X_CONFIG3_ANAPARAM_WRITE);
530 rtl818x_iowrite32(priv, &priv->map->ANAPARAM2, RTL8225_ANAPARAM2_ON);
531 rtl818x_iowrite8(priv, &priv->map->CONFIG3, reg & ~RTL818X_CONFIG3_ANAPARAM_WRITE);
532 rtl818x_iowrite8(priv, &priv->map->EEPROM_CMD, RTL818X_EEPROM_CMD_NORMAL);
533
534 rtl8225_write_phy_ofdm(dev, 2, 0x42);
535 rtl8225_write_phy_ofdm(dev, 5, 0x00);
536 rtl8225_write_phy_ofdm(dev, 6, 0x40);
537 rtl8225_write_phy_ofdm(dev, 7, 0x00);
538 rtl8225_write_phy_ofdm(dev, 8, 0x40);
539
540 rtl818x_iowrite8(priv, &priv->map->TX_GAIN_OFDM,
541 rtl8225z2_tx_gain_cck_ofdm[ofdm_power]);
542 msleep(1);
543}
544
545static const u16 rtl8225z2_rxgain[] = {
546 0x0400, 0x0401, 0x0402, 0x0403, 0x0404, 0x0405, 0x0408, 0x0409,
547 0x040a, 0x040b, 0x0502, 0x0503, 0x0504, 0x0505, 0x0540, 0x0541,
548 0x0542, 0x0543, 0x0544, 0x0545, 0x0580, 0x0581, 0x0582, 0x0583,
549 0x0584, 0x0585, 0x0588, 0x0589, 0x058a, 0x058b, 0x0643, 0x0644,
550 0x0645, 0x0680, 0x0681, 0x0682, 0x0683, 0x0684, 0x0685, 0x0688,
551 0x0689, 0x068a, 0x068b, 0x068c, 0x0742, 0x0743, 0x0744, 0x0745,
552 0x0780, 0x0781, 0x0782, 0x0783, 0x0784, 0x0785, 0x0788, 0x0789,
553 0x078a, 0x078b, 0x078c, 0x078d, 0x0790, 0x0791, 0x0792, 0x0793,
554 0x0794, 0x0795, 0x0798, 0x0799, 0x079a, 0x079b, 0x079c, 0x079d,
555 0x07a0, 0x07a1, 0x07a2, 0x07a3, 0x07a4, 0x07a5, 0x07a8, 0x07a9,
556 0x03aa, 0x03ab, 0x03ac, 0x03ad, 0x03b0, 0x03b1, 0x03b2, 0x03b3,
557 0x03b4, 0x03b5, 0x03b8, 0x03b9, 0x03ba, 0x03bb, 0x03bb
558};
559
560static const u8 rtl8225z2_gain_bg[] = {
561 0x23, 0x15, 0xa5, /* -82-1dBm */
562 0x23, 0x15, 0xb5, /* -82-2dBm */
563 0x23, 0x15, 0xc5, /* -82-3dBm */
564 0x33, 0x15, 0xc5, /* -78dBm */
565 0x43, 0x15, 0xc5, /* -74dBm */
566 0x53, 0x15, 0xc5, /* -70dBm */
567 0x63, 0x15, 0xc5 /* -66dBm */
568};
569
570void rtl8225z2_rf_init(struct ieee80211_hw *dev)
571{
572 struct rtl8187_priv *priv = dev->priv;
573 int i;
574
575 rtl8225_write(dev, 0x0, 0x2BF); msleep(1);
576 rtl8225_write(dev, 0x1, 0xEE0); msleep(1);
577 rtl8225_write(dev, 0x2, 0x44D); msleep(1);
578 rtl8225_write(dev, 0x3, 0x441); msleep(1);
579 rtl8225_write(dev, 0x4, 0x8C3); msleep(1);
580 rtl8225_write(dev, 0x5, 0xC72); msleep(1);
581 rtl8225_write(dev, 0x6, 0x0E6); msleep(1);
582 rtl8225_write(dev, 0x7, 0x82A); msleep(1);
583 rtl8225_write(dev, 0x8, 0x03F); msleep(1);
584 rtl8225_write(dev, 0x9, 0x335); msleep(1);
585 rtl8225_write(dev, 0xa, 0x9D4); msleep(1);
586 rtl8225_write(dev, 0xb, 0x7BB); msleep(1);
587 rtl8225_write(dev, 0xc, 0x850); msleep(1);
588 rtl8225_write(dev, 0xd, 0xCDF); msleep(1);
589 rtl8225_write(dev, 0xe, 0x02B); msleep(1);
590 rtl8225_write(dev, 0xf, 0x114); msleep(100);
591
592 rtl8225_write(dev, 0x0, 0x1B7);
593
594 for (i = 0; i < ARRAY_SIZE(rtl8225z2_rxgain); i++) {
595 rtl8225_write(dev, 0x1, i + 1);
596 rtl8225_write(dev, 0x2, rtl8225z2_rxgain[i]);
597 }
598
599 rtl8225_write(dev, 0x3, 0x080);
600 rtl8225_write(dev, 0x5, 0x004);
601 rtl8225_write(dev, 0x0, 0x0B7);
602 rtl8225_write(dev, 0x2, 0xc4D);
603
604 msleep(200);
605 rtl8225_write(dev, 0x2, 0x44D);
606 msleep(100);
607
608 if (!(rtl8225_read(dev, 6) & (1 << 7))) {
609 rtl8225_write(dev, 0x02, 0x0C4D);
610 msleep(200);
611 rtl8225_write(dev, 0x02, 0x044D);
612 msleep(100);
613 if (!(rtl8225_read(dev, 6) & (1 << 7)))
614 printk(KERN_WARNING "%s: RF Calibration Failed! %x\n",
615 wiphy_name(dev->wiphy), rtl8225_read(dev, 6));
616 }
617
618 msleep(200);
619
620 rtl8225_write(dev, 0x0, 0x2BF);
621
622 for (i = 0; i < ARRAY_SIZE(rtl8225_agc); i++) {
623 rtl8225_write_phy_ofdm(dev, 0xB, rtl8225_agc[i]);
624 msleep(1);
625 rtl8225_write_phy_ofdm(dev, 0xA, 0x80 + i);
626 msleep(1);
627 }
628
629 msleep(1);
630
631 rtl8225_write_phy_ofdm(dev, 0x00, 0x01); msleep(1);
632 rtl8225_write_phy_ofdm(dev, 0x01, 0x02); msleep(1);
633 rtl8225_write_phy_ofdm(dev, 0x02, 0x42); msleep(1);
634 rtl8225_write_phy_ofdm(dev, 0x03, 0x00); msleep(1);
635 rtl8225_write_phy_ofdm(dev, 0x04, 0x00); msleep(1);
636 rtl8225_write_phy_ofdm(dev, 0x05, 0x00); msleep(1);
637 rtl8225_write_phy_ofdm(dev, 0x06, 0x40); msleep(1);
638 rtl8225_write_phy_ofdm(dev, 0x07, 0x00); msleep(1);
639 rtl8225_write_phy_ofdm(dev, 0x08, 0x40); msleep(1);
640 rtl8225_write_phy_ofdm(dev, 0x09, 0xfe); msleep(1);
641 rtl8225_write_phy_ofdm(dev, 0x0a, 0x08); msleep(1);
642 rtl8225_write_phy_ofdm(dev, 0x0b, 0x80); msleep(1);
643 rtl8225_write_phy_ofdm(dev, 0x0c, 0x01); msleep(1);
644 rtl8225_write_phy_ofdm(dev, 0x0d, 0x43);
645 rtl8225_write_phy_ofdm(dev, 0x0e, 0xd3); msleep(1);
646 rtl8225_write_phy_ofdm(dev, 0x0f, 0x38); msleep(1);
647 rtl8225_write_phy_ofdm(dev, 0x10, 0x84); msleep(1);
648 rtl8225_write_phy_ofdm(dev, 0x11, 0x07); msleep(1);
649 rtl8225_write_phy_ofdm(dev, 0x12, 0x20); msleep(1);
650 rtl8225_write_phy_ofdm(dev, 0x13, 0x20); msleep(1);
651 rtl8225_write_phy_ofdm(dev, 0x14, 0x00); msleep(1);
652 rtl8225_write_phy_ofdm(dev, 0x15, 0x40); msleep(1);
653 rtl8225_write_phy_ofdm(dev, 0x16, 0x00); msleep(1);
654 rtl8225_write_phy_ofdm(dev, 0x17, 0x40); msleep(1);
655 rtl8225_write_phy_ofdm(dev, 0x18, 0xef); msleep(1);
656 rtl8225_write_phy_ofdm(dev, 0x19, 0x19); msleep(1);
657 rtl8225_write_phy_ofdm(dev, 0x1a, 0x20); msleep(1);
658 rtl8225_write_phy_ofdm(dev, 0x1b, 0x15); msleep(1);
659 rtl8225_write_phy_ofdm(dev, 0x1c, 0x04); msleep(1);
660 rtl8225_write_phy_ofdm(dev, 0x1d, 0xc5); msleep(1);
661 rtl8225_write_phy_ofdm(dev, 0x1e, 0x95); msleep(1);
662 rtl8225_write_phy_ofdm(dev, 0x1f, 0x75); msleep(1);
663 rtl8225_write_phy_ofdm(dev, 0x20, 0x1f); msleep(1);
664 rtl8225_write_phy_ofdm(dev, 0x21, 0x17); msleep(1);
665 rtl8225_write_phy_ofdm(dev, 0x22, 0x16); msleep(1);
666 rtl8225_write_phy_ofdm(dev, 0x23, 0x80); msleep(1); //FIXME: not needed?
667 rtl8225_write_phy_ofdm(dev, 0x24, 0x46); msleep(1);
668 rtl8225_write_phy_ofdm(dev, 0x25, 0x00); msleep(1);
669 rtl8225_write_phy_ofdm(dev, 0x26, 0x90); msleep(1);
670 rtl8225_write_phy_ofdm(dev, 0x27, 0x88); msleep(1);
671
672 rtl8225_write_phy_ofdm(dev, 0x0b, rtl8225z2_gain_bg[4 * 3]);
673 rtl8225_write_phy_ofdm(dev, 0x1b, rtl8225z2_gain_bg[4 * 3 + 1]);
674 rtl8225_write_phy_ofdm(dev, 0x1d, rtl8225z2_gain_bg[4 * 3 + 2]);
675 rtl8225_write_phy_ofdm(dev, 0x21, 0x37);
676
677 rtl8225_write_phy_cck(dev, 0x00, 0x98); msleep(1);
678 rtl8225_write_phy_cck(dev, 0x03, 0x20); msleep(1);
679 rtl8225_write_phy_cck(dev, 0x04, 0x7e); msleep(1);
680 rtl8225_write_phy_cck(dev, 0x05, 0x12); msleep(1);
681 rtl8225_write_phy_cck(dev, 0x06, 0xfc); msleep(1);
682 rtl8225_write_phy_cck(dev, 0x07, 0x78); msleep(1);
683 rtl8225_write_phy_cck(dev, 0x08, 0x2e); msleep(1);
684 rtl8225_write_phy_cck(dev, 0x10, 0x9b); msleep(1);
685 rtl8225_write_phy_cck(dev, 0x11, 0x88); msleep(1);
686 rtl8225_write_phy_cck(dev, 0x12, 0x47); msleep(1);
687 rtl8225_write_phy_cck(dev, 0x13, 0xd0);
688 rtl8225_write_phy_cck(dev, 0x19, 0x00);
689 rtl8225_write_phy_cck(dev, 0x1a, 0xa0);
690 rtl8225_write_phy_cck(dev, 0x1b, 0x08);
691 rtl8225_write_phy_cck(dev, 0x40, 0x86);
692 rtl8225_write_phy_cck(dev, 0x41, 0x8d); msleep(1);
693 rtl8225_write_phy_cck(dev, 0x42, 0x15); msleep(1);
694 rtl8225_write_phy_cck(dev, 0x43, 0x18); msleep(1);
695 rtl8225_write_phy_cck(dev, 0x44, 0x36); msleep(1);
696 rtl8225_write_phy_cck(dev, 0x45, 0x35); msleep(1);
697 rtl8225_write_phy_cck(dev, 0x46, 0x2e); msleep(1);
698 rtl8225_write_phy_cck(dev, 0x47, 0x25); msleep(1);
699 rtl8225_write_phy_cck(dev, 0x48, 0x1c); msleep(1);
700 rtl8225_write_phy_cck(dev, 0x49, 0x12); msleep(1);
701 rtl8225_write_phy_cck(dev, 0x4a, 0x09); msleep(1);
702 rtl8225_write_phy_cck(dev, 0x4b, 0x04); msleep(1);
703 rtl8225_write_phy_cck(dev, 0x4c, 0x05); msleep(1);
704
705 rtl818x_iowrite8(priv, (u8 *)0xFF5B, 0x0D); msleep(1);
706
707 rtl8225z2_rf_set_tx_power(dev, 1);
708
709 /* RX antenna default to A */
710 rtl8225_write_phy_cck(dev, 0x10, 0x9b); msleep(1); /* B: 0xDB */
711 rtl8225_write_phy_ofdm(dev, 0x26, 0x90); msleep(1); /* B: 0x10 */
712
713 rtl818x_iowrite8(priv, &priv->map->TX_ANTENNA, 0x03); /* B: 0x00 */
714 msleep(1);
715 rtl818x_iowrite32(priv, (__le32 *)0xFF94, 0x3dc00002);
716}
717
718void rtl8225_rf_stop(struct ieee80211_hw *dev)
719{
720 u8 reg;
721 struct rtl8187_priv *priv = dev->priv;
722
723 rtl8225_write(dev, 0x4, 0x1f); msleep(1);
724
725 rtl818x_iowrite8(priv, &priv->map->EEPROM_CMD, RTL818X_EEPROM_CMD_CONFIG);
726 reg = rtl818x_ioread8(priv, &priv->map->CONFIG3);
727 rtl818x_iowrite8(priv, &priv->map->CONFIG3, reg | RTL818X_CONFIG3_ANAPARAM_WRITE);
728 rtl818x_iowrite32(priv, &priv->map->ANAPARAM2, RTL8225_ANAPARAM2_OFF);
729 rtl818x_iowrite32(priv, &priv->map->ANAPARAM, RTL8225_ANAPARAM_OFF);
730 rtl818x_iowrite8(priv, &priv->map->CONFIG3, reg & ~RTL818X_CONFIG3_ANAPARAM_WRITE);
731 rtl818x_iowrite8(priv, &priv->map->EEPROM_CMD, RTL818X_EEPROM_CMD_NORMAL);
732}
733
734void rtl8225_rf_set_channel(struct ieee80211_hw *dev, int channel)
735{
736 struct rtl8187_priv *priv = dev->priv;
737
738 if (priv->rf_init == rtl8225_rf_init)
739 rtl8225_rf_set_tx_power(dev, channel);
740 else
741 rtl8225z2_rf_set_tx_power(dev, channel);
742
743 rtl8225_write(dev, 0x7, rtl8225_chan[channel - 1]);
744 msleep(10);
745}
diff --git a/drivers/net/wireless/rtl8187_rtl8225.h b/drivers/net/wireless/rtl8187_rtl8225.h
new file mode 100644
index 000000000000..798ba4a97376
--- /dev/null
+++ b/drivers/net/wireless/rtl8187_rtl8225.h
@@ -0,0 +1,44 @@
1/*
2 * Radio tuning definitions for RTL8225 on RTL8187
3 *
4 * Copyright 2007 Michael Wu <flamingice@sourmilk.net>
5 * Copyright 2007 Andrea Merello <andreamrl@tiscali.it>
6 *
7 * Based on the r8187 driver, which is:
8 * Copyright 2005 Andrea Merello <andreamrl@tiscali.it>, et al.
9 *
10 * This program is free software; you can redistribute it and/or modify
11 * it under the terms of the GNU General Public License version 2 as
12 * published by the Free Software Foundation.
13 */
14
15#ifndef RTL8187_RTL8225_H
16#define RTL8187_RTL8225_H
17
18#define RTL8225_ANAPARAM_ON 0xa0000a59
19#define RTL8225_ANAPARAM2_ON 0x860c7312
20#define RTL8225_ANAPARAM_OFF 0xa00beb59
21#define RTL8225_ANAPARAM2_OFF 0x840dec11
22
23void rtl8225_write(struct ieee80211_hw *, u8 addr, u16 data);
24u16 rtl8225_read(struct ieee80211_hw *, u8 addr);
25
26void rtl8225_rf_init(struct ieee80211_hw *);
27void rtl8225z2_rf_init(struct ieee80211_hw *);
28void rtl8225_rf_stop(struct ieee80211_hw *);
29void rtl8225_rf_set_channel(struct ieee80211_hw *, int);
30
31
32static inline void rtl8225_write_phy_ofdm(struct ieee80211_hw *dev,
33 u8 addr, u32 data)
34{
35 rtl8187_write_phy(dev, addr, data);
36}
37
38static inline void rtl8225_write_phy_cck(struct ieee80211_hw *dev,
39 u8 addr, u32 data)
40{
41 rtl8187_write_phy(dev, addr, data | 0x10000);
42}
43
44#endif /* RTL8187_RTL8225_H */
diff --git a/drivers/net/wireless/rtl818x.h b/drivers/net/wireless/rtl818x.h
new file mode 100644
index 000000000000..283de30628e1
--- /dev/null
+++ b/drivers/net/wireless/rtl818x.h
@@ -0,0 +1,226 @@
1/*
2 * Definitions for RTL818x hardware
3 *
4 * Copyright 2007 Michael Wu <flamingice@sourmilk.net>
5 * Copyright 2007 Andrea Merello <andreamrl@tiscali.it>
6 *
7 * Based on the r8187 driver, which is:
8 * Copyright 2005 Andrea Merello <andreamrl@tiscali.it>, et al.
9 *
10 * This program is free software; you can redistribute it and/or modify
11 * it under the terms of the GNU General Public License version 2 as
12 * published by the Free Software Foundation.
13 */
14
15#ifndef RTL818X_H
16#define RTL818X_H
17
18struct rtl818x_csr {
19 u8 MAC[6];
20 u8 reserved_0[2];
21 __le32 MAR[2];
22 u8 RX_FIFO_COUNT;
23 u8 reserved_1;
24 u8 TX_FIFO_COUNT;
25 u8 BQREQ;
26 u8 reserved_2[4];
27 __le32 TSFT[2];
28 __le32 TLPDA;
29 __le32 TNPDA;
30 __le32 THPDA;
31 __le16 BRSR;
32 u8 BSSID[6];
33 u8 RESP_RATE;
34 u8 EIFS;
35 u8 reserved_3[1];
36 u8 CMD;
37#define RTL818X_CMD_TX_ENABLE (1 << 2)
38#define RTL818X_CMD_RX_ENABLE (1 << 3)
39#define RTL818X_CMD_RESET (1 << 4)
40 u8 reserved_4[4];
41 __le16 INT_MASK;
42 __le16 INT_STATUS;
43#define RTL818X_INT_RX_OK (1 << 0)
44#define RTL818X_INT_RX_ERR (1 << 1)
45#define RTL818X_INT_TXL_OK (1 << 2)
46#define RTL818X_INT_TXL_ERR (1 << 3)
47#define RTL818X_INT_RX_DU (1 << 4)
48#define RTL818X_INT_RX_FO (1 << 5)
49#define RTL818X_INT_TXN_OK (1 << 6)
50#define RTL818X_INT_TXN_ERR (1 << 7)
51#define RTL818X_INT_TXH_OK (1 << 8)
52#define RTL818X_INT_TXH_ERR (1 << 9)
53#define RTL818X_INT_TXB_OK (1 << 10)
54#define RTL818X_INT_TXB_ERR (1 << 11)
55#define RTL818X_INT_ATIM (1 << 12)
56#define RTL818X_INT_BEACON (1 << 13)
57#define RTL818X_INT_TIME_OUT (1 << 14)
58#define RTL818X_INT_TX_FO (1 << 15)
59 __le32 TX_CONF;
60#define RTL818X_TX_CONF_LOOPBACK_MAC (1 << 17)
61#define RTL818X_TX_CONF_NO_ICV (1 << 19)
62#define RTL818X_TX_CONF_DISCW (1 << 20)
63#define RTL818X_TX_CONF_R8180_ABCD (2 << 25)
64#define RTL818X_TX_CONF_R8180_F (3 << 25)
65#define RTL818X_TX_CONF_R8185_ABC (4 << 25)
66#define RTL818X_TX_CONF_R8185_D (5 << 25)
67#define RTL818X_TX_CONF_HWVER_MASK (7 << 25)
68#define RTL818X_TX_CONF_CW_MIN (1 << 31)
69 __le32 RX_CONF;
70#define RTL818X_RX_CONF_MONITOR (1 << 0)
71#define RTL818X_RX_CONF_NICMAC (1 << 1)
72#define RTL818X_RX_CONF_MULTICAST (1 << 2)
73#define RTL818X_RX_CONF_BROADCAST (1 << 3)
74#define RTL818X_RX_CONF_DATA (1 << 18)
75#define RTL818X_RX_CONF_CTRL (1 << 19)
76#define RTL818X_RX_CONF_MGMT (1 << 20)
77#define RTL818X_RX_CONF_BSSID (1 << 23)
78#define RTL818X_RX_CONF_RX_AUTORESETPHY (1 << 28)
79#define RTL818X_RX_CONF_ONLYERLPKT (1 << 31)
80 __le32 INT_TIMEOUT;
81 __le32 TBDA;
82 u8 EEPROM_CMD;
83#define RTL818X_EEPROM_CMD_READ (1 << 0)
84#define RTL818X_EEPROM_CMD_WRITE (1 << 1)
85#define RTL818X_EEPROM_CMD_CK (1 << 2)
86#define RTL818X_EEPROM_CMD_CS (1 << 3)
87#define RTL818X_EEPROM_CMD_NORMAL (0 << 6)
88#define RTL818X_EEPROM_CMD_LOAD (1 << 6)
89#define RTL818X_EEPROM_CMD_PROGRAM (2 << 6)
90#define RTL818X_EEPROM_CMD_CONFIG (3 << 6)
91 u8 CONFIG0;
92 u8 CONFIG1;
93 u8 CONFIG2;
94 __le32 ANAPARAM;
95 u8 MSR;
96#define RTL818X_MSR_NO_LINK (0 << 2)
97#define RTL818X_MSR_ADHOC (1 << 2)
98#define RTL818X_MSR_INFRA (2 << 2)
99 u8 CONFIG3;
100#define RTL818X_CONFIG3_ANAPARAM_WRITE (1 << 6)
101 u8 CONFIG4;
102#define RTL818X_CONFIG4_POWEROFF (1 << 6)
103#define RTL818X_CONFIG4_VCOOFF (1 << 7)
104 u8 TESTR;
105 u8 reserved_9[2];
106 __le16 PGSELECT;
107 __le32 ANAPARAM2;
108 u8 reserved_10[12];
109 __le16 BEACON_INTERVAL;
110 __le16 ATIM_WND;
111 __le16 BEACON_INTERVAL_TIME;
112 __le16 ATIMTR_INTERVAL;
113 u8 reserved_11[4];
114 u8 PHY[4];
115 __le16 RFPinsOutput;
116 __le16 RFPinsEnable;
117 __le16 RFPinsSelect;
118 __le16 RFPinsInput;
119 __le32 RF_PARA;
120 __le32 RF_TIMING;
121 u8 GP_ENABLE;
122 u8 GPIO;
123 u8 reserved_12[10];
124 u8 TX_AGC_CTL;
125#define RTL818X_TX_AGC_CTL_PERPACKET_GAIN_SHIFT (1 << 0)
126#define RTL818X_TX_AGC_CTL_PERPACKET_ANTSEL_SHIFT (1 << 1)
127#define RTL818X_TX_AGC_CTL_FEEDBACK_ANT (1 << 2)
128 u8 TX_GAIN_CCK;
129 u8 TX_GAIN_OFDM;
130 u8 TX_ANTENNA;
131 u8 reserved_13[16];
132 u8 WPA_CONF;
133 u8 reserved_14[3];
134 u8 SIFS;
135 u8 DIFS;
136 u8 SLOT;
137 u8 reserved_15[5];
138 u8 CW_CONF;
139#define RTL818X_CW_CONF_PERPACKET_CW_SHIFT (1 << 0)
140#define RTL818X_CW_CONF_PERPACKET_RETRY_SHIFT (1 << 1)
141 u8 CW_VAL;
142 u8 RATE_FALLBACK;
143 u8 reserved_16[25];
144 u8 CONFIG5;
145 u8 TX_DMA_POLLING;
146 u8 reserved_17[2];
147 __le16 CWR;
148 u8 RETRY_CTR;
149 u8 reserved_18[5];
150 __le32 RDSAR;
151 u8 reserved_19[18];
152 u16 TALLY_CNT;
153 u8 TALLY_SEL;
154} __attribute__((packed));
155
156static const struct ieee80211_rate rtl818x_rates[] = {
157 { .rate = 10,
158 .val = 0,
159 .flags = IEEE80211_RATE_CCK },
160 { .rate = 20,
161 .val = 1,
162 .flags = IEEE80211_RATE_CCK },
163 { .rate = 55,
164 .val = 2,
165 .flags = IEEE80211_RATE_CCK },
166 { .rate = 110,
167 .val = 3,
168 .flags = IEEE80211_RATE_CCK },
169 { .rate = 60,
170 .val = 4,
171 .flags = IEEE80211_RATE_OFDM },
172 { .rate = 90,
173 .val = 5,
174 .flags = IEEE80211_RATE_OFDM },
175 { .rate = 120,
176 .val = 6,
177 .flags = IEEE80211_RATE_OFDM },
178 { .rate = 180,
179 .val = 7,
180 .flags = IEEE80211_RATE_OFDM },
181 { .rate = 240,
182 .val = 8,
183 .flags = IEEE80211_RATE_OFDM },
184 { .rate = 360,
185 .val = 9,
186 .flags = IEEE80211_RATE_OFDM },
187 { .rate = 480,
188 .val = 10,
189 .flags = IEEE80211_RATE_OFDM },
190 { .rate = 540,
191 .val = 11,
192 .flags = IEEE80211_RATE_OFDM },
193};
194
195static const struct ieee80211_channel rtl818x_channels[] = {
196 { .chan = 1,
197 .freq = 2412},
198 { .chan = 2,
199 .freq = 2417},
200 { .chan = 3,
201 .freq = 2422},
202 { .chan = 4,
203 .freq = 2427},
204 { .chan = 5,
205 .freq = 2432},
206 { .chan = 6,
207 .freq = 2437},
208 { .chan = 7,
209 .freq = 2442},
210 { .chan = 8,
211 .freq = 2447},
212 { .chan = 9,
213 .freq = 2452},
214 { .chan = 10,
215 .freq = 2457},
216 { .chan = 11,
217 .freq = 2462},
218 { .chan = 12,
219 .freq = 2467},
220 { .chan = 13,
221 .freq = 2472},
222 { .chan = 14,
223 .freq = 2484}
224};
225
226#endif /* RTL818X_H */
diff --git a/drivers/net/wireless/zd1211rw/Makefile b/drivers/net/wireless/zd1211rw/Makefile
index 6603ad5be63d..4d505903352c 100644
--- a/drivers/net/wireless/zd1211rw/Makefile
+++ b/drivers/net/wireless/zd1211rw/Makefile
@@ -3,7 +3,7 @@ obj-$(CONFIG_ZD1211RW) += zd1211rw.o
3zd1211rw-objs := zd_chip.o zd_ieee80211.o \ 3zd1211rw-objs := zd_chip.o zd_ieee80211.o \
4 zd_mac.o zd_netdev.o \ 4 zd_mac.o zd_netdev.o \
5 zd_rf_al2230.o zd_rf_rf2959.o \ 5 zd_rf_al2230.o zd_rf_rf2959.o \
6 zd_rf_al7230b.o \ 6 zd_rf_al7230b.o zd_rf_uw2453.o \
7 zd_rf.o zd_usb.o zd_util.o 7 zd_rf.o zd_usb.o zd_util.o
8 8
9ifeq ($(CONFIG_ZD1211RW_DEBUG),y) 9ifeq ($(CONFIG_ZD1211RW_DEBUG),y)
diff --git a/drivers/net/wireless/zd1211rw/zd_chip.c b/drivers/net/wireless/zd1211rw/zd_chip.c
index 95b4a2a26707..5b624bfc01a6 100644
--- a/drivers/net/wireless/zd1211rw/zd_chip.c
+++ b/drivers/net/wireless/zd1211rw/zd_chip.c
@@ -1253,6 +1253,9 @@ static int update_channel_integration_and_calibration(struct zd_chip *chip,
1253{ 1253{
1254 int r; 1254 int r;
1255 1255
1256 if (!zd_rf_should_update_pwr_int(&chip->rf))
1257 return 0;
1258
1256 r = update_pwr_int(chip, channel); 1259 r = update_pwr_int(chip, channel);
1257 if (r) 1260 if (r)
1258 return r; 1261 return r;
@@ -1283,7 +1286,7 @@ static int patch_cck_gain(struct zd_chip *chip)
1283 int r; 1286 int r;
1284 u32 value; 1287 u32 value;
1285 1288
1286 if (!chip->patch_cck_gain) 1289 if (!chip->patch_cck_gain || !zd_rf_should_patch_cck_gain(&chip->rf))
1287 return 0; 1290 return 0;
1288 1291
1289 ZD_ASSERT(mutex_is_locked(&chip->mutex)); 1292 ZD_ASSERT(mutex_is_locked(&chip->mutex));
diff --git a/drivers/net/wireless/zd1211rw/zd_chip.h b/drivers/net/wireless/zd1211rw/zd_chip.h
index ce0a5f6da0d2..79d0288c193a 100644
--- a/drivers/net/wireless/zd1211rw/zd_chip.h
+++ b/drivers/net/wireless/zd1211rw/zd_chip.h
@@ -608,6 +608,9 @@ enum {
608#define CR_ZD1211B_TXOP CTL_REG(0x0b20) 608#define CR_ZD1211B_TXOP CTL_REG(0x0b20)
609#define CR_ZD1211B_RETRY_MAX CTL_REG(0x0b28) 609#define CR_ZD1211B_RETRY_MAX CTL_REG(0x0b28)
610 610
611/* Used to detect PLL lock */
612#define UW2453_INTR_REG ((zd_addr_t)0x85c1)
613
611#define CWIN_SIZE 0x007f043f 614#define CWIN_SIZE 0x007f043f
612 615
613 616
diff --git a/drivers/net/wireless/zd1211rw/zd_rf.c b/drivers/net/wireless/zd1211rw/zd_rf.c
index 549c23bcd6cc..7407409b60b1 100644
--- a/drivers/net/wireless/zd1211rw/zd_rf.c
+++ b/drivers/net/wireless/zd1211rw/zd_rf.c
@@ -52,34 +52,38 @@ const char *zd_rf_name(u8 type)
52void zd_rf_init(struct zd_rf *rf) 52void zd_rf_init(struct zd_rf *rf)
53{ 53{
54 memset(rf, 0, sizeof(*rf)); 54 memset(rf, 0, sizeof(*rf));
55
56 /* default to update channel integration, as almost all RF's do want
57 * this */
58 rf->update_channel_int = 1;
55} 59}
56 60
57void zd_rf_clear(struct zd_rf *rf) 61void zd_rf_clear(struct zd_rf *rf)
58{ 62{
63 if (rf->clear)
64 rf->clear(rf);
59 ZD_MEMCLEAR(rf, sizeof(*rf)); 65 ZD_MEMCLEAR(rf, sizeof(*rf));
60} 66}
61 67
62int zd_rf_init_hw(struct zd_rf *rf, u8 type) 68int zd_rf_init_hw(struct zd_rf *rf, u8 type)
63{ 69{
64 int r, t; 70 int r = 0;
71 int t;
65 struct zd_chip *chip = zd_rf_to_chip(rf); 72 struct zd_chip *chip = zd_rf_to_chip(rf);
66 73
67 ZD_ASSERT(mutex_is_locked(&chip->mutex)); 74 ZD_ASSERT(mutex_is_locked(&chip->mutex));
68 switch (type) { 75 switch (type) {
69 case RF2959_RF: 76 case RF2959_RF:
70 r = zd_rf_init_rf2959(rf); 77 r = zd_rf_init_rf2959(rf);
71 if (r)
72 return r;
73 break; 78 break;
74 case AL2230_RF: 79 case AL2230_RF:
75 r = zd_rf_init_al2230(rf); 80 r = zd_rf_init_al2230(rf);
76 if (r)
77 return r;
78 break; 81 break;
79 case AL7230B_RF: 82 case AL7230B_RF:
80 r = zd_rf_init_al7230b(rf); 83 r = zd_rf_init_al7230b(rf);
81 if (r) 84 break;
82 return r; 85 case UW2453_RF:
86 r = zd_rf_init_uw2453(rf);
83 break; 87 break;
84 default: 88 default:
85 dev_err(zd_chip_dev(chip), 89 dev_err(zd_chip_dev(chip),
@@ -88,6 +92,9 @@ int zd_rf_init_hw(struct zd_rf *rf, u8 type)
88 return -ENODEV; 92 return -ENODEV;
89 } 93 }
90 94
95 if (r)
96 return r;
97
91 rf->type = type; 98 rf->type = type;
92 99
93 r = zd_chip_lock_phy_regs(chip); 100 r = zd_chip_lock_phy_regs(chip);
diff --git a/drivers/net/wireless/zd1211rw/zd_rf.h b/drivers/net/wireless/zd1211rw/zd_rf.h
index aa9cc105ce60..c6dfd8227f6e 100644
--- a/drivers/net/wireless/zd1211rw/zd_rf.h
+++ b/drivers/net/wireless/zd1211rw/zd_rf.h
@@ -48,12 +48,26 @@ struct zd_rf {
48 48
49 u8 channel; 49 u8 channel;
50 50
51 /* whether channel integration and calibration should be updated
52 * defaults to 1 (yes) */
53 u8 update_channel_int:1;
54
55 /* whether CR47 should be patched from the EEPROM, if the appropriate
56 * flag is set in the POD. The vendor driver suggests that this should
57 * be done for all RF's, but a bug in their code prevents but their
58 * HW_OverWritePhyRegFromE2P() routine from ever taking effect. */
59 u8 patch_cck_gain:1;
60
61 /* private RF driver data */
62 void *priv;
63
51 /* RF-specific functions */ 64 /* RF-specific functions */
52 int (*init_hw)(struct zd_rf *rf); 65 int (*init_hw)(struct zd_rf *rf);
53 int (*set_channel)(struct zd_rf *rf, u8 channel); 66 int (*set_channel)(struct zd_rf *rf, u8 channel);
54 int (*switch_radio_on)(struct zd_rf *rf); 67 int (*switch_radio_on)(struct zd_rf *rf);
55 int (*switch_radio_off)(struct zd_rf *rf); 68 int (*switch_radio_off)(struct zd_rf *rf);
56 int (*patch_6m_band_edge)(struct zd_rf *rf, u8 channel); 69 int (*patch_6m_band_edge)(struct zd_rf *rf, u8 channel);
70 void (*clear)(struct zd_rf *rf);
57}; 71};
58 72
59const char *zd_rf_name(u8 type); 73const char *zd_rf_name(u8 type);
@@ -71,10 +85,24 @@ int zd_switch_radio_off(struct zd_rf *rf);
71int zd_rf_patch_6m_band_edge(struct zd_rf *rf, u8 channel); 85int zd_rf_patch_6m_band_edge(struct zd_rf *rf, u8 channel);
72int zd_rf_generic_patch_6m(struct zd_rf *rf, u8 channel); 86int zd_rf_generic_patch_6m(struct zd_rf *rf, u8 channel);
73 87
88static inline int zd_rf_should_update_pwr_int(struct zd_rf *rf)
89{
90 return rf->update_channel_int;
91}
92
93static inline int zd_rf_should_patch_cck_gain(struct zd_rf *rf)
94{
95 return rf->patch_cck_gain;
96}
97
98int zd_rf_patch_6m_band_edge(struct zd_rf *rf, u8 channel);
99int zd_rf_generic_patch_6m(struct zd_rf *rf, u8 channel);
100
74/* Functions for individual RF chips */ 101/* Functions for individual RF chips */
75 102
76int zd_rf_init_rf2959(struct zd_rf *rf); 103int zd_rf_init_rf2959(struct zd_rf *rf);
77int zd_rf_init_al2230(struct zd_rf *rf); 104int zd_rf_init_al2230(struct zd_rf *rf);
78int zd_rf_init_al7230b(struct zd_rf *rf); 105int zd_rf_init_al7230b(struct zd_rf *rf);
106int zd_rf_init_uw2453(struct zd_rf *rf);
79 107
80#endif /* _ZD_RF_H */ 108#endif /* _ZD_RF_H */
diff --git a/drivers/net/wireless/zd1211rw/zd_rf_al2230.c b/drivers/net/wireless/zd1211rw/zd_rf_al2230.c
index 511392acfedf..e7a4ecf7b6e2 100644
--- a/drivers/net/wireless/zd1211rw/zd_rf_al2230.c
+++ b/drivers/net/wireless/zd1211rw/zd_rf_al2230.c
@@ -432,5 +432,6 @@ int zd_rf_init_al2230(struct zd_rf *rf)
432 rf->switch_radio_on = zd1211_al2230_switch_radio_on; 432 rf->switch_radio_on = zd1211_al2230_switch_radio_on;
433 } 433 }
434 rf->patch_6m_band_edge = zd_rf_generic_patch_6m; 434 rf->patch_6m_band_edge = zd_rf_generic_patch_6m;
435 rf->patch_cck_gain = 1;
435 return 0; 436 return 0;
436} 437}
diff --git a/drivers/net/wireless/zd1211rw/zd_rf_al7230b.c b/drivers/net/wireless/zd1211rw/zd_rf_al7230b.c
index 5e5e9ddc6a74..f4e8b6ada854 100644
--- a/drivers/net/wireless/zd1211rw/zd_rf_al7230b.c
+++ b/drivers/net/wireless/zd1211rw/zd_rf_al7230b.c
@@ -483,6 +483,7 @@ int zd_rf_init_al7230b(struct zd_rf *rf)
483 rf->switch_radio_on = zd1211_al7230b_switch_radio_on; 483 rf->switch_radio_on = zd1211_al7230b_switch_radio_on;
484 rf->set_channel = zd1211_al7230b_set_channel; 484 rf->set_channel = zd1211_al7230b_set_channel;
485 rf->patch_6m_band_edge = zd_rf_generic_patch_6m; 485 rf->patch_6m_band_edge = zd_rf_generic_patch_6m;
486 rf->patch_cck_gain = 1;
486 } 487 }
487 488
488 rf->switch_radio_off = al7230b_switch_radio_off; 489 rf->switch_radio_off = al7230b_switch_radio_off;
diff --git a/drivers/net/wireless/zd1211rw/zd_rf_uw2453.c b/drivers/net/wireless/zd1211rw/zd_rf_uw2453.c
new file mode 100644
index 000000000000..414e40d571ab
--- /dev/null
+++ b/drivers/net/wireless/zd1211rw/zd_rf_uw2453.c
@@ -0,0 +1,534 @@
1/* zd_rf_uw2453.c: Functions for the UW2453 RF controller
2 *
3 * This program is free software; you can redistribute it and/or modify
4 * it under the terms of the GNU General Public License as published by
5 * the Free Software Foundation; either version 2 of the License, or
6 * (at your option) any later version.
7 *
8 * This program is distributed in the hope that it will be useful,
9 * but WITHOUT ANY WARRANTY; without even the implied warranty of
10 * MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE. See the
11 * GNU General Public License for more details.
12 *
13 * You should have received a copy of the GNU General Public License
14 * along with this program; if not, write to the Free Software
15 * Foundation, Inc., 59 Temple Place, Suite 330, Boston, MA 02111-1307 USA
16 */
17
18#include <linux/kernel.h>
19
20#include "zd_rf.h"
21#include "zd_usb.h"
22#include "zd_chip.h"
23
24/* This RF programming code is based upon the code found in v2.16.0.0 of the
25 * ZyDAS vendor driver. Unlike other RF's, Ubec publish full technical specs
26 * for this RF on their website, so we're able to understand more than
27 * usual as to what is going on. Thumbs up for Ubec for doing that. */
28
29/* The 3-wire serial interface provides access to 8 write-only registers.
30 * The data format is a 4 bit register address followed by a 20 bit value. */
31#define UW2453_REGWRITE(reg, val) ((((reg) & 0xf) << 20) | ((val) & 0xfffff))
32
33/* For channel tuning, we have to configure registers 1 (synthesizer), 2 (synth
34 * fractional divide ratio) and 3 (VCO config).
35 *
36 * We configure the RF to produce an interrupt when the PLL is locked onto
37 * the configured frequency. During initialization, we run through a variety
38 * of different VCO configurations on channel 1 until we detect a PLL lock.
39 * When this happens, we remember which VCO configuration produced the lock
40 * and use it later. Actually, we use the configuration *after* the one that
41 * produced the lock, which seems odd, but it works.
42 *
43 * If we do not see a PLL lock on any standard VCO config, we fall back on an
44 * autocal configuration, which has a fixed (as opposed to per-channel) VCO
45 * config and different synth values from the standard set (divide ratio
46 * is still shared with the standard set). */
47
48/* The per-channel synth values for all standard VCO configurations. These get
49 * written to register 1. */
50static const u8 uw2453_std_synth[] = {
51 RF_CHANNEL( 1) = 0x47,
52 RF_CHANNEL( 2) = 0x47,
53 RF_CHANNEL( 3) = 0x67,
54 RF_CHANNEL( 4) = 0x67,
55 RF_CHANNEL( 5) = 0x67,
56 RF_CHANNEL( 6) = 0x67,
57 RF_CHANNEL( 7) = 0x57,
58 RF_CHANNEL( 8) = 0x57,
59 RF_CHANNEL( 9) = 0x57,
60 RF_CHANNEL(10) = 0x57,
61 RF_CHANNEL(11) = 0x77,
62 RF_CHANNEL(12) = 0x77,
63 RF_CHANNEL(13) = 0x77,
64 RF_CHANNEL(14) = 0x4f,
65};
66
67/* This table stores the synthesizer fractional divide ratio for *all* VCO
68 * configurations (both standard and autocal). These get written to register 2.
69 */
70static const u16 uw2453_synth_divide[] = {
71 RF_CHANNEL( 1) = 0x999,
72 RF_CHANNEL( 2) = 0x99b,
73 RF_CHANNEL( 3) = 0x998,
74 RF_CHANNEL( 4) = 0x99a,
75 RF_CHANNEL( 5) = 0x999,
76 RF_CHANNEL( 6) = 0x99b,
77 RF_CHANNEL( 7) = 0x998,
78 RF_CHANNEL( 8) = 0x99a,
79 RF_CHANNEL( 9) = 0x999,
80 RF_CHANNEL(10) = 0x99b,
81 RF_CHANNEL(11) = 0x998,
82 RF_CHANNEL(12) = 0x99a,
83 RF_CHANNEL(13) = 0x999,
84 RF_CHANNEL(14) = 0xccc,
85};
86
87/* Here is the data for all the standard VCO configurations. We shrink our
88 * table a little by observing that both channels in a consecutive pair share
89 * the same value. We also observe that the high 4 bits ([0:3] in the specs)
90 * are all 'Reserved' and are always set to 0x4 - we chop them off in the data
91 * below. */
92#define CHAN_TO_PAIRIDX(a) ((a - 1) / 2)
93#define RF_CHANPAIR(a,b) [CHAN_TO_PAIRIDX(a)]
94static const u16 uw2453_std_vco_cfg[][7] = {
95 { /* table 1 */
96 RF_CHANPAIR( 1, 2) = 0x664d,
97 RF_CHANPAIR( 3, 4) = 0x604d,
98 RF_CHANPAIR( 5, 6) = 0x6675,
99 RF_CHANPAIR( 7, 8) = 0x6475,
100 RF_CHANPAIR( 9, 10) = 0x6655,
101 RF_CHANPAIR(11, 12) = 0x6455,
102 RF_CHANPAIR(13, 14) = 0x6665,
103 },
104 { /* table 2 */
105 RF_CHANPAIR( 1, 2) = 0x666d,
106 RF_CHANPAIR( 3, 4) = 0x606d,
107 RF_CHANPAIR( 5, 6) = 0x664d,
108 RF_CHANPAIR( 7, 8) = 0x644d,
109 RF_CHANPAIR( 9, 10) = 0x6675,
110 RF_CHANPAIR(11, 12) = 0x6475,
111 RF_CHANPAIR(13, 14) = 0x6655,
112 },
113 { /* table 3 */
114 RF_CHANPAIR( 1, 2) = 0x665d,
115 RF_CHANPAIR( 3, 4) = 0x605d,
116 RF_CHANPAIR( 5, 6) = 0x666d,
117 RF_CHANPAIR( 7, 8) = 0x646d,
118 RF_CHANPAIR( 9, 10) = 0x664d,
119 RF_CHANPAIR(11, 12) = 0x644d,
120 RF_CHANPAIR(13, 14) = 0x6675,
121 },
122 { /* table 4 */
123 RF_CHANPAIR( 1, 2) = 0x667d,
124 RF_CHANPAIR( 3, 4) = 0x607d,
125 RF_CHANPAIR( 5, 6) = 0x665d,
126 RF_CHANPAIR( 7, 8) = 0x645d,
127 RF_CHANPAIR( 9, 10) = 0x666d,
128 RF_CHANPAIR(11, 12) = 0x646d,
129 RF_CHANPAIR(13, 14) = 0x664d,
130 },
131 { /* table 5 */
132 RF_CHANPAIR( 1, 2) = 0x6643,
133 RF_CHANPAIR( 3, 4) = 0x6043,
134 RF_CHANPAIR( 5, 6) = 0x667d,
135 RF_CHANPAIR( 7, 8) = 0x647d,
136 RF_CHANPAIR( 9, 10) = 0x665d,
137 RF_CHANPAIR(11, 12) = 0x645d,
138 RF_CHANPAIR(13, 14) = 0x666d,
139 },
140 { /* table 6 */
141 RF_CHANPAIR( 1, 2) = 0x6663,
142 RF_CHANPAIR( 3, 4) = 0x6063,
143 RF_CHANPAIR( 5, 6) = 0x6643,
144 RF_CHANPAIR( 7, 8) = 0x6443,
145 RF_CHANPAIR( 9, 10) = 0x667d,
146 RF_CHANPAIR(11, 12) = 0x647d,
147 RF_CHANPAIR(13, 14) = 0x665d,
148 },
149 { /* table 7 */
150 RF_CHANPAIR( 1, 2) = 0x6653,
151 RF_CHANPAIR( 3, 4) = 0x6053,
152 RF_CHANPAIR( 5, 6) = 0x6663,
153 RF_CHANPAIR( 7, 8) = 0x6463,
154 RF_CHANPAIR( 9, 10) = 0x6643,
155 RF_CHANPAIR(11, 12) = 0x6443,
156 RF_CHANPAIR(13, 14) = 0x667d,
157 },
158 { /* table 8 */
159 RF_CHANPAIR( 1, 2) = 0x6673,
160 RF_CHANPAIR( 3, 4) = 0x6073,
161 RF_CHANPAIR( 5, 6) = 0x6653,
162 RF_CHANPAIR( 7, 8) = 0x6453,
163 RF_CHANPAIR( 9, 10) = 0x6663,
164 RF_CHANPAIR(11, 12) = 0x6463,
165 RF_CHANPAIR(13, 14) = 0x6643,
166 },
167 { /* table 9 */
168 RF_CHANPAIR( 1, 2) = 0x664b,
169 RF_CHANPAIR( 3, 4) = 0x604b,
170 RF_CHANPAIR( 5, 6) = 0x6673,
171 RF_CHANPAIR( 7, 8) = 0x6473,
172 RF_CHANPAIR( 9, 10) = 0x6653,
173 RF_CHANPAIR(11, 12) = 0x6453,
174 RF_CHANPAIR(13, 14) = 0x6663,
175 },
176 { /* table 10 */
177 RF_CHANPAIR( 1, 2) = 0x666b,
178 RF_CHANPAIR( 3, 4) = 0x606b,
179 RF_CHANPAIR( 5, 6) = 0x664b,
180 RF_CHANPAIR( 7, 8) = 0x644b,
181 RF_CHANPAIR( 9, 10) = 0x6673,
182 RF_CHANPAIR(11, 12) = 0x6473,
183 RF_CHANPAIR(13, 14) = 0x6653,
184 },
185 { /* table 11 */
186 RF_CHANPAIR( 1, 2) = 0x665b,
187 RF_CHANPAIR( 3, 4) = 0x605b,
188 RF_CHANPAIR( 5, 6) = 0x666b,
189 RF_CHANPAIR( 7, 8) = 0x646b,
190 RF_CHANPAIR( 9, 10) = 0x664b,
191 RF_CHANPAIR(11, 12) = 0x644b,
192 RF_CHANPAIR(13, 14) = 0x6673,
193 },
194
195};
196
197/* The per-channel synth values for autocal. These get written to register 1. */
198static const u16 uw2453_autocal_synth[] = {
199 RF_CHANNEL( 1) = 0x6847,
200 RF_CHANNEL( 2) = 0x6847,
201 RF_CHANNEL( 3) = 0x6867,
202 RF_CHANNEL( 4) = 0x6867,
203 RF_CHANNEL( 5) = 0x6867,
204 RF_CHANNEL( 6) = 0x6867,
205 RF_CHANNEL( 7) = 0x6857,
206 RF_CHANNEL( 8) = 0x6857,
207 RF_CHANNEL( 9) = 0x6857,
208 RF_CHANNEL(10) = 0x6857,
209 RF_CHANNEL(11) = 0x6877,
210 RF_CHANNEL(12) = 0x6877,
211 RF_CHANNEL(13) = 0x6877,
212 RF_CHANNEL(14) = 0x684f,
213};
214
215/* The VCO configuration for autocal (all channels) */
216static const u16 UW2453_AUTOCAL_VCO_CFG = 0x6662;
217
218/* TX gain settings. The array index corresponds to the TX power integration
219 * values found in the EEPROM. The values get written to register 7. */
220static u32 uw2453_txgain[] = {
221 [0x00] = 0x0e313,
222 [0x01] = 0x0fb13,
223 [0x02] = 0x0e093,
224 [0x03] = 0x0f893,
225 [0x04] = 0x0ea93,
226 [0x05] = 0x1f093,
227 [0x06] = 0x1f493,
228 [0x07] = 0x1f693,
229 [0x08] = 0x1f393,
230 [0x09] = 0x1f35b,
231 [0x0a] = 0x1e6db,
232 [0x0b] = 0x1ff3f,
233 [0x0c] = 0x1ffff,
234 [0x0d] = 0x361d7,
235 [0x0e] = 0x37fbf,
236 [0x0f] = 0x3ff8b,
237 [0x10] = 0x3ff33,
238 [0x11] = 0x3fb3f,
239 [0x12] = 0x3ffff,
240};
241
242/* RF-specific structure */
243struct uw2453_priv {
244 /* index into synth/VCO config tables where PLL lock was found
245 * -1 means autocal */
246 int config;
247};
248
249#define UW2453_PRIV(rf) ((struct uw2453_priv *) (rf)->priv)
250
251static int uw2453_synth_set_channel(struct zd_chip *chip, int channel,
252 bool autocal)
253{
254 int r;
255 int idx = channel - 1;
256 u32 val;
257
258 if (autocal)
259 val = UW2453_REGWRITE(1, uw2453_autocal_synth[idx]);
260 else
261 val = UW2453_REGWRITE(1, uw2453_std_synth[idx]);
262
263 r = zd_rfwrite_locked(chip, val, RF_RV_BITS);
264 if (r)
265 return r;
266
267 return zd_rfwrite_locked(chip,
268 UW2453_REGWRITE(2, uw2453_synth_divide[idx]), RF_RV_BITS);
269}
270
271static int uw2453_write_vco_cfg(struct zd_chip *chip, u16 value)
272{
273 /* vendor driver always sets these upper bits even though the specs say
274 * they are reserved */
275 u32 val = 0x40000 | value;
276 return zd_rfwrite_locked(chip, UW2453_REGWRITE(3, val), RF_RV_BITS);
277}
278
279static int uw2453_init_mode(struct zd_chip *chip)
280{
281 static const u32 rv[] = {
282 UW2453_REGWRITE(0, 0x25f98), /* enter IDLE mode */
283 UW2453_REGWRITE(0, 0x25f9a), /* enter CAL_VCO mode */
284 UW2453_REGWRITE(0, 0x25f94), /* enter RX/TX mode */
285 UW2453_REGWRITE(0, 0x27fd4), /* power down RSSI circuit */
286 };
287
288 return zd_rfwritev_locked(chip, rv, ARRAY_SIZE(rv), RF_RV_BITS);
289}
290
291static int uw2453_set_tx_gain_level(struct zd_chip *chip, int channel)
292{
293 u8 int_value = chip->pwr_int_values[channel - 1];
294
295 if (int_value >= ARRAY_SIZE(uw2453_txgain)) {
296 dev_dbg_f(zd_chip_dev(chip), "can't configure TX gain for "
297 "int value %x on channel %d\n", int_value, channel);
298 return 0;
299 }
300
301 return zd_rfwrite_locked(chip,
302 UW2453_REGWRITE(7, uw2453_txgain[int_value]), RF_RV_BITS);
303}
304
305static int uw2453_init_hw(struct zd_rf *rf)
306{
307 int i, r;
308 int found_config = -1;
309 u16 intr_status;
310 struct zd_chip *chip = zd_rf_to_chip(rf);
311
312 static const struct zd_ioreq16 ioreqs[] = {
313 { CR10, 0x89 }, { CR15, 0x20 },
314 { CR17, 0x28 }, /* 6112 no change */
315 { CR23, 0x38 }, { CR24, 0x20 }, { CR26, 0x93 },
316 { CR27, 0x15 }, { CR28, 0x3e }, { CR29, 0x00 },
317 { CR33, 0x28 }, { CR34, 0x30 },
318 { CR35, 0x43 }, /* 6112 3e->43 */
319 { CR41, 0x24 }, { CR44, 0x32 },
320 { CR46, 0x92 }, /* 6112 96->92 */
321 { CR47, 0x1e },
322 { CR48, 0x04 }, /* 5602 Roger */
323 { CR49, 0xfa }, { CR79, 0x58 }, { CR80, 0x30 },
324 { CR81, 0x30 }, { CR87, 0x0a }, { CR89, 0x04 },
325 { CR91, 0x00 }, { CR92, 0x0a }, { CR98, 0x8d },
326 { CR99, 0x28 }, { CR100, 0x02 },
327 { CR101, 0x09 }, /* 6112 13->1f 6220 1f->13 6407 13->9 */
328 { CR102, 0x27 },
329 { CR106, 0x1c }, /* 5d07 5112 1f->1c 6220 1c->1f 6221 1f->1c */
330 { CR107, 0x1c }, /* 6220 1c->1a 5221 1a->1c */
331 { CR109, 0x13 },
332 { CR110, 0x1f }, /* 6112 13->1f 6221 1f->13 6407 13->0x09 */
333 { CR111, 0x13 }, { CR112, 0x1f }, { CR113, 0x27 },
334 { CR114, 0x23 }, /* 6221 27->23 */
335 { CR115, 0x24 }, /* 6112 24->1c 6220 1c->24 */
336 { CR116, 0x24 }, /* 6220 1c->24 */
337 { CR117, 0xfa }, /* 6112 fa->f8 6220 f8->f4 6220 f4->fa */
338 { CR118, 0xf0 }, /* 5d07 6112 f0->f2 6220 f2->f0 */
339 { CR119, 0x1a }, /* 6112 1a->10 6220 10->14 6220 14->1a */
340 { CR120, 0x4f },
341 { CR121, 0x1f }, /* 6220 4f->1f */
342 { CR122, 0xf0 }, { CR123, 0x57 }, { CR125, 0xad },
343 { CR126, 0x6c }, { CR127, 0x03 },
344 { CR128, 0x14 }, /* 6302 12->11 */
345 { CR129, 0x12 }, /* 6301 10->0f */
346 { CR130, 0x10 }, { CR137, 0x50 }, { CR138, 0xa8 },
347 { CR144, 0xac }, { CR146, 0x20 }, { CR252, 0xff },
348 { CR253, 0xff },
349 };
350
351 static const u32 rv[] = {
352 UW2453_REGWRITE(4, 0x2b), /* configure reciever gain */
353 UW2453_REGWRITE(5, 0x19e4f), /* configure transmitter gain */
354 UW2453_REGWRITE(6, 0xf81ad), /* enable RX/TX filter tuning */
355 UW2453_REGWRITE(7, 0x3fffe), /* disable TX gain in test mode */
356
357 /* enter CAL_FIL mode, TX gain set by registers, RX gain set by pins,
358 * RSSI circuit powered down, reduced RSSI range */
359 UW2453_REGWRITE(0, 0x25f9c), /* 5d01 cal_fil */
360
361 /* synthesizer configuration for channel 1 */
362 UW2453_REGWRITE(1, 0x47),
363 UW2453_REGWRITE(2, 0x999),
364
365 /* disable manual VCO band selection */
366 UW2453_REGWRITE(3, 0x7602),
367
368 /* enable manual VCO band selection, configure current level */
369 UW2453_REGWRITE(3, 0x46063),
370 };
371
372 r = zd_iowrite16a_locked(chip, ioreqs, ARRAY_SIZE(ioreqs));
373 if (r)
374 return r;
375
376 r = zd_rfwritev_locked(chip, rv, ARRAY_SIZE(rv), RF_RV_BITS);
377 if (r)
378 return r;
379
380 r = uw2453_init_mode(chip);
381 if (r)
382 return r;
383
384 /* Try all standard VCO configuration settings on channel 1 */
385 for (i = 0; i < ARRAY_SIZE(uw2453_std_vco_cfg) - 1; i++) {
386 /* Configure synthesizer for channel 1 */
387 r = uw2453_synth_set_channel(chip, 1, false);
388 if (r)
389 return r;
390
391 /* Write VCO config */
392 r = uw2453_write_vco_cfg(chip, uw2453_std_vco_cfg[i][0]);
393 if (r)
394 return r;
395
396 /* ack interrupt event */
397 r = zd_iowrite16_locked(chip, 0x0f, UW2453_INTR_REG);
398 if (r)
399 return r;
400
401 /* check interrupt status */
402 r = zd_ioread16_locked(chip, &intr_status, UW2453_INTR_REG);
403 if (r)
404 return r;
405
406 if (!intr_status & 0xf) {
407 dev_dbg_f(zd_chip_dev(chip),
408 "PLL locked on configuration %d\n", i);
409 found_config = i;
410 break;
411 }
412 }
413
414 if (found_config == -1) {
415 /* autocal */
416 dev_dbg_f(zd_chip_dev(chip),
417 "PLL did not lock, using autocal\n");
418
419 r = uw2453_synth_set_channel(chip, 1, true);
420 if (r)
421 return r;
422
423 r = uw2453_write_vco_cfg(chip, UW2453_AUTOCAL_VCO_CFG);
424 if (r)
425 return r;
426 }
427
428 /* To match the vendor driver behaviour, we use the configuration after
429 * the one that produced a lock. */
430 UW2453_PRIV(rf)->config = found_config + 1;
431
432 return zd_iowrite16_locked(chip, 0x06, CR203);
433}
434
435static int uw2453_set_channel(struct zd_rf *rf, u8 channel)
436{
437 int r;
438 u16 vco_cfg;
439 int config = UW2453_PRIV(rf)->config;
440 bool autocal = (config == -1);
441 struct zd_chip *chip = zd_rf_to_chip(rf);
442
443 static const struct zd_ioreq16 ioreqs[] = {
444 { CR80, 0x30 }, { CR81, 0x30 }, { CR79, 0x58 },
445 { CR12, 0xf0 }, { CR77, 0x1b }, { CR78, 0x58 },
446 };
447
448 r = uw2453_synth_set_channel(chip, channel, autocal);
449 if (r)
450 return r;
451
452 if (autocal)
453 vco_cfg = UW2453_AUTOCAL_VCO_CFG;
454 else
455 vco_cfg = uw2453_std_vco_cfg[config][CHAN_TO_PAIRIDX(channel)];
456
457 r = uw2453_write_vco_cfg(chip, vco_cfg);
458 if (r)
459 return r;
460
461 r = uw2453_init_mode(chip);
462 if (r)
463 return r;
464
465 r = zd_iowrite16a_locked(chip, ioreqs, ARRAY_SIZE(ioreqs));
466 if (r)
467 return r;
468
469 r = uw2453_set_tx_gain_level(chip, channel);
470 if (r)
471 return r;
472
473 return zd_iowrite16_locked(chip, 0x06, CR203);
474}
475
476static int uw2453_switch_radio_on(struct zd_rf *rf)
477{
478 int r;
479 struct zd_chip *chip = zd_rf_to_chip(rf);
480 struct zd_ioreq16 ioreqs[] = {
481 { CR11, 0x00 }, { CR251, 0x3f },
482 };
483
484 /* enter RXTX mode */
485 r = zd_rfwrite_locked(chip, UW2453_REGWRITE(0, 0x25f94), RF_RV_BITS);
486 if (r)
487 return r;
488
489 if (chip->is_zd1211b)
490 ioreqs[1].value = 0x7f;
491
492 return zd_iowrite16a_locked(chip, ioreqs, ARRAY_SIZE(ioreqs));
493}
494
495static int uw2453_switch_radio_off(struct zd_rf *rf)
496{
497 int r;
498 struct zd_chip *chip = zd_rf_to_chip(rf);
499 static const struct zd_ioreq16 ioreqs[] = {
500 { CR11, 0x04 }, { CR251, 0x2f },
501 };
502
503 /* enter IDLE mode */
504 /* FIXME: shouldn't we go to SLEEP? sent email to zydas */
505 r = zd_rfwrite_locked(chip, UW2453_REGWRITE(0, 0x25f90), RF_RV_BITS);
506 if (r)
507 return r;
508
509 return zd_iowrite16a_locked(chip, ioreqs, ARRAY_SIZE(ioreqs));
510}
511
512static void uw2453_clear(struct zd_rf *rf)
513{
514 kfree(rf->priv);
515}
516
517int zd_rf_init_uw2453(struct zd_rf *rf)
518{
519 rf->init_hw = uw2453_init_hw;
520 rf->set_channel = uw2453_set_channel;
521 rf->switch_radio_on = uw2453_switch_radio_on;
522 rf->switch_radio_off = uw2453_switch_radio_off;
523 rf->patch_6m_band_edge = zd_rf_generic_patch_6m;
524 rf->clear = uw2453_clear;
525 /* we have our own TX integration code */
526 rf->update_channel_int = 0;
527
528 rf->priv = kmalloc(sizeof(struct uw2453_priv), GFP_KERNEL);
529 if (rf->priv == NULL)
530 return -ENOMEM;
531
532 return 0;
533}
534
diff --git a/drivers/net/wireless/zd1211rw/zd_usb.c b/drivers/net/wireless/zd1211rw/zd_usb.c
index 8459549d0cee..740a2194fdde 100644
--- a/drivers/net/wireless/zd1211rw/zd_usb.c
+++ b/drivers/net/wireless/zd1211rw/zd_usb.c
@@ -54,6 +54,7 @@ static struct usb_device_id usb_ids[] = {
54 { USB_DEVICE(0x0586, 0x3401), .driver_info = DEVICE_ZD1211 }, 54 { USB_DEVICE(0x0586, 0x3401), .driver_info = DEVICE_ZD1211 },
55 { USB_DEVICE(0x14ea, 0xab13), .driver_info = DEVICE_ZD1211 }, 55 { USB_DEVICE(0x14ea, 0xab13), .driver_info = DEVICE_ZD1211 },
56 { USB_DEVICE(0x13b1, 0x001e), .driver_info = DEVICE_ZD1211 }, 56 { USB_DEVICE(0x13b1, 0x001e), .driver_info = DEVICE_ZD1211 },
57 { USB_DEVICE(0x0586, 0x3407), .driver_info = DEVICE_ZD1211 },
57 /* ZD1211B */ 58 /* ZD1211B */
58 { USB_DEVICE(0x0ace, 0x1215), .driver_info = DEVICE_ZD1211B }, 59 { USB_DEVICE(0x0ace, 0x1215), .driver_info = DEVICE_ZD1211B },
59 { USB_DEVICE(0x157e, 0x300d), .driver_info = DEVICE_ZD1211B }, 60 { USB_DEVICE(0x157e, 0x300d), .driver_info = DEVICE_ZD1211B },
diff --git a/drivers/serial/serial_cs.c b/drivers/serial/serial_cs.c
index 6b76babc7fbf..a0ea43598515 100644
--- a/drivers/serial/serial_cs.c
+++ b/drivers/serial/serial_cs.c
@@ -842,12 +842,16 @@ static struct pcmcia_device_id serial_ids[] = {
842 PCMCIA_PFC_DEVICE_PROD_ID12(1, "Linksys", "EtherFast 10&100 + 56K PC Card (PCMLM56)", 0x0733cc81, 0xb3765033), 842 PCMCIA_PFC_DEVICE_PROD_ID12(1, "Linksys", "EtherFast 10&100 + 56K PC Card (PCMLM56)", 0x0733cc81, 0xb3765033),
843 PCMCIA_PFC_DEVICE_PROD_ID12(1, "LINKSYS", "PCMLM336", 0xf7cb0b07, 0x7a821b58), 843 PCMCIA_PFC_DEVICE_PROD_ID12(1, "LINKSYS", "PCMLM336", 0xf7cb0b07, 0x7a821b58),
844 PCMCIA_PFC_DEVICE_PROD_ID12(1, "MEGAHERTZ", "XJEM1144/CCEM1144", 0xf510db04, 0x52d21e1e), 844 PCMCIA_PFC_DEVICE_PROD_ID12(1, "MEGAHERTZ", "XJEM1144/CCEM1144", 0xf510db04, 0x52d21e1e),
845 PCMCIA_PFC_DEVICE_PROD_ID12(1, "MICRO RESEARCH", "COMBO-L/M-336", 0xb2ced065, 0x3ced0555),
846 PCMCIA_PFC_DEVICE_PROD_ID12(1, "NEC", "PK-UG-J001" ,0x18df0ba0 ,0x831b1064),
845 PCMCIA_PFC_DEVICE_PROD_ID12(1, "Ositech", "Trumpcard:Jack of Diamonds Modem+Ethernet", 0xc2f80cd, 0x656947b9), 847 PCMCIA_PFC_DEVICE_PROD_ID12(1, "Ositech", "Trumpcard:Jack of Diamonds Modem+Ethernet", 0xc2f80cd, 0x656947b9),
846 PCMCIA_PFC_DEVICE_PROD_ID12(1, "Ositech", "Trumpcard:Jack of Hearts Modem+Ethernet", 0xc2f80cd, 0xdc9ba5ed), 848 PCMCIA_PFC_DEVICE_PROD_ID12(1, "Ositech", "Trumpcard:Jack of Hearts Modem+Ethernet", 0xc2f80cd, 0xdc9ba5ed),
847 PCMCIA_PFC_DEVICE_PROD_ID12(1, "PCMCIAs", "ComboCard", 0xdcfe12d3, 0xcd8906cc), 849 PCMCIA_PFC_DEVICE_PROD_ID12(1, "PCMCIAs", "ComboCard", 0xdcfe12d3, 0xcd8906cc),
848 PCMCIA_PFC_DEVICE_PROD_ID12(1, "PCMCIAs", "LanModem", 0xdcfe12d3, 0xc67c648f), 850 PCMCIA_PFC_DEVICE_PROD_ID12(1, "PCMCIAs", "LanModem", 0xdcfe12d3, 0xc67c648f),
849 PCMCIA_PFC_DEVICE_PROD_ID12(1, "TDK", "GlobalNetworker 3410/3412", 0x1eae9475, 0xd9a93bed), 851 PCMCIA_PFC_DEVICE_PROD_ID12(1, "TDK", "GlobalNetworker 3410/3412", 0x1eae9475, 0xd9a93bed),
850 PCMCIA_PFC_DEVICE_PROD_ID12(1, "Xircom", "CreditCard Ethernet+Modem II", 0x2e3ee845, 0xeca401bf), 852 PCMCIA_PFC_DEVICE_PROD_ID12(1, "Xircom", "CreditCard Ethernet+Modem II", 0x2e3ee845, 0xeca401bf),
853 PCMCIA_PFC_DEVICE_MANF_CARD(1, 0x0032, 0x0a05),
854 PCMCIA_PFC_DEVICE_MANF_CARD(1, 0x0032, 0x1101),
851 PCMCIA_MFC_DEVICE_MANF_CARD(0, 0x0104, 0x0070), 855 PCMCIA_MFC_DEVICE_MANF_CARD(0, 0x0104, 0x0070),
852 PCMCIA_MFC_DEVICE_MANF_CARD(1, 0x0101, 0x0562), 856 PCMCIA_MFC_DEVICE_MANF_CARD(1, 0x0101, 0x0562),
853 PCMCIA_MFC_DEVICE_MANF_CARD(1, 0x0104, 0x0070), 857 PCMCIA_MFC_DEVICE_MANF_CARD(1, 0x0104, 0x0070),