aboutsummaryrefslogtreecommitdiffstats
path: root/drivers
diff options
context:
space:
mode:
authorTony Luck <tony.luck@intel.com>2005-06-29 18:21:41 -0400
committerTony Luck <tony.luck@intel.com>2005-06-29 18:21:41 -0400
commitd18bfacff20f08aecf01bb971b110ca108eef3c7 (patch)
tree255f862839c593c796e609328575b611e3f56cf3 /drivers
parenta68db763af9b676590c3fe9ec3f17bf18015eb2f (diff)
parentfd782a4a99d2d3e818b9465c427b10f7f027d7da (diff)
Auto merge with /home/aegl/GIT/linus
Diffstat (limited to 'drivers')
-rw-r--r--drivers/acpi/container.c2
-rw-r--r--drivers/acpi/pci_bind.c27
-rw-r--r--drivers/acpi/pci_root.c24
-rw-r--r--drivers/acpi/processor_core.c2
-rw-r--r--drivers/acpi/scan.c126
-rw-r--r--drivers/base/firmware_class.c13
-rw-r--r--drivers/block/cciss.c1
-rw-r--r--drivers/block/ll_rw_blk.c151
-rw-r--r--drivers/char/agp/amd64-agp.c9
-rw-r--r--drivers/char/ipmi/ipmi_msghandler.c4
-rw-r--r--drivers/char/moxa.c2
-rw-r--r--drivers/char/rio/rio_linux.c4
-rw-r--r--drivers/char/rtc.c16
-rw-r--r--drivers/char/tipar.c2
-rw-r--r--drivers/char/tty_ioctl.c4
-rw-r--r--drivers/char/vt_ioctl.c5
-rw-r--r--drivers/char/watchdog/ixp2000_wdt.c2
-rw-r--r--drivers/char/watchdog/ixp4xx_wdt.c2
-rw-r--r--drivers/isdn/hardware/eicon/dadapter.c2
-rw-r--r--drivers/isdn/hisax/hfc4s8s_l1.c8
-rw-r--r--drivers/isdn/hysdn/hycapi.c20
-rw-r--r--drivers/isdn/hysdn/hysdn_boot.c4
-rw-r--r--drivers/isdn/hysdn/hysdn_defs.h12
-rw-r--r--drivers/isdn/hysdn/hysdn_init.c2
-rw-r--r--drivers/isdn/hysdn/hysdn_proclog.c4
-rw-r--r--drivers/md/md.c1
-rw-r--r--drivers/media/video/Makefile3
-rw-r--r--drivers/media/video/bttv-driver.c73
-rw-r--r--drivers/media/video/bttvp.h5
-rw-r--r--drivers/media/video/mt20xx.c6
-rw-r--r--drivers/media/video/tda8290.c20
-rw-r--r--drivers/media/video/tda9887.c7
-rw-r--r--drivers/media/video/tea5767.c334
-rw-r--r--drivers/media/video/tuner-core.c163
-rw-r--r--drivers/media/video/tuner-simple.c34
-rw-r--r--drivers/message/fusion/mptfc.c4
-rw-r--r--drivers/message/fusion/mptscsih.c10
-rw-r--r--drivers/message/fusion/mptscsih.h2
-rw-r--r--drivers/message/fusion/mptspi.c4
-rw-r--r--drivers/net/3c515.c4
-rw-r--r--drivers/net/3c59x.c6
-rw-r--r--drivers/net/8139cp.c4
-rw-r--r--drivers/net/82596.c14
-rwxr-xr-xdrivers/net/amd8111e.c8
-rw-r--r--drivers/net/at1700.c4
-rw-r--r--drivers/net/dl2k.c8
-rw-r--r--drivers/net/e100.c9
-rw-r--r--drivers/net/eepro100.c8
-rw-r--r--drivers/net/epic100.c6
-rw-r--r--drivers/net/fealnx.c8
-rw-r--r--drivers/net/hamachi.c12
-rw-r--r--drivers/net/lance.c2
-rw-r--r--drivers/net/lasi_82596.c8
-rw-r--r--drivers/net/natsemi.c4
-rw-r--r--drivers/net/ns83820.c4
-rw-r--r--drivers/net/pcnet32.c6
-rw-r--r--drivers/net/r8169.c4
-rw-r--r--drivers/net/s2io.c8
-rw-r--r--drivers/net/sb1250-mac.c4
-rw-r--r--drivers/net/sis900.c6
-rw-r--r--drivers/net/skge.c1710
-rw-r--r--drivers/net/skge.h586
-rw-r--r--drivers/net/slip.c1
-rw-r--r--drivers/net/smc91x.c2
-rw-r--r--drivers/net/smc91x.h13
-rw-r--r--drivers/net/starfire.c6
-rw-r--r--drivers/net/sundance.c6
-rw-r--r--drivers/net/tulip/de2104x.c6
-rw-r--r--drivers/net/tulip/dmfe.c10
-rw-r--r--drivers/net/tulip/interrupt.c10
-rw-r--r--drivers/net/tulip/tulip_core.c2
-rw-r--r--drivers/net/tulip/winbond-840.c6
-rw-r--r--drivers/net/tulip/xircom_tulip_cb.c4
-rw-r--r--drivers/net/typhoon.c4
-rw-r--r--drivers/net/via-rhine.c17
-rw-r--r--drivers/net/via-velocity.c6
-rw-r--r--drivers/net/wan/hdlc_cisco.c2
-rw-r--r--drivers/net/yellowfin.c8
-rw-r--r--drivers/parisc/dino.c1
-rw-r--r--drivers/parisc/lba_pci.c2
-rw-r--r--drivers/pci/bus.c11
-rw-r--r--drivers/pci/hotplug/Makefile4
-rw-r--r--drivers/pci/hotplug/acpiphp.h47
-rw-r--r--drivers/pci/hotplug/acpiphp_core.c9
-rw-r--r--drivers/pci/hotplug/acpiphp_glue.c882
-rw-r--r--drivers/pci/hotplug/acpiphp_pci.c449
-rw-r--r--drivers/pci/hotplug/acpiphp_res.c700
-rw-r--r--drivers/pci/hotplug/cpqphp_core.c5
-rw-r--r--drivers/pci/msi.c88
-rw-r--r--drivers/pci/msi.h9
-rw-r--r--drivers/pci/pci-sysfs.c26
-rw-r--r--drivers/pci/probe.c29
-rw-r--r--drivers/pci/proc.c14
-rw-r--r--drivers/pci/remove.c14
-rw-r--r--drivers/pci/setup-bus.c5
-rw-r--r--drivers/scsi/3w-9xxx.c8
-rw-r--r--drivers/scsi/3w-xxxx.c8
-rw-r--r--drivers/scsi/ahci.c22
-rw-r--r--drivers/scsi/ipr.c10
-rw-r--r--drivers/scsi/libata-core.c6
-rw-r--r--drivers/scsi/megaraid.c8
-rw-r--r--drivers/scsi/scsi_lib.c2
102 files changed, 2520 insertions, 3489 deletions
diff --git a/drivers/acpi/container.c b/drivers/acpi/container.c
index 5a0adbf8bc04..97013ddfa202 100644
--- a/drivers/acpi/container.c
+++ b/drivers/acpi/container.c
@@ -153,7 +153,7 @@ container_device_add(struct acpi_device **device, acpi_handle handle)
153 return_VALUE(-ENODEV); 153 return_VALUE(-ENODEV);
154 } 154 }
155 155
156 result = acpi_bus_scan(*device); 156 result = acpi_bus_start(*device);
157 157
158 return_VALUE(result); 158 return_VALUE(result);
159} 159}
diff --git a/drivers/acpi/pci_bind.c b/drivers/acpi/pci_bind.c
index 5d19b39e9e2b..5148f3c10b5c 100644
--- a/drivers/acpi/pci_bind.c
+++ b/drivers/acpi/pci_bind.c
@@ -61,15 +61,14 @@ acpi_pci_data_handler (
61 61
62 62
63/** 63/**
64 * acpi_os_get_pci_id 64 * acpi_get_pci_id
65 * ------------------ 65 * ------------------
66 * This function is used by the ACPI Interpreter (a.k.a. Core Subsystem) 66 * This function is used by the ACPI Interpreter (a.k.a. Core Subsystem)
67 * to resolve PCI information for ACPI-PCI devices defined in the namespace. 67 * to resolve PCI information for ACPI-PCI devices defined in the namespace.
68 * This typically occurs when resolving PCI operation region information. 68 * This typically occurs when resolving PCI operation region information.
69 */ 69 */
70#ifdef ACPI_FUTURE_USAGE
71acpi_status 70acpi_status
72acpi_os_get_pci_id ( 71acpi_get_pci_id (
73 acpi_handle handle, 72 acpi_handle handle,
74 struct acpi_pci_id *id) 73 struct acpi_pci_id *id)
75{ 74{
@@ -78,7 +77,7 @@ acpi_os_get_pci_id (
78 struct acpi_device *device = NULL; 77 struct acpi_device *device = NULL;
79 struct acpi_pci_data *data = NULL; 78 struct acpi_pci_data *data = NULL;
80 79
81 ACPI_FUNCTION_TRACE("acpi_os_get_pci_id"); 80 ACPI_FUNCTION_TRACE("acpi_get_pci_id");
82 81
83 if (!id) 82 if (!id)
84 return_ACPI_STATUS(AE_BAD_PARAMETER); 83 return_ACPI_STATUS(AE_BAD_PARAMETER);
@@ -92,7 +91,7 @@ acpi_os_get_pci_id (
92 } 91 }
93 92
94 status = acpi_get_data(handle, acpi_pci_data_handler, (void**) &data); 93 status = acpi_get_data(handle, acpi_pci_data_handler, (void**) &data);
95 if (ACPI_FAILURE(status) || !data || !data->dev) { 94 if (ACPI_FAILURE(status) || !data) {
96 ACPI_DEBUG_PRINT((ACPI_DB_ERROR, 95 ACPI_DEBUG_PRINT((ACPI_DB_ERROR,
97 "Invalid ACPI-PCI context for device %s\n", 96 "Invalid ACPI-PCI context for device %s\n",
98 acpi_device_bid(device))); 97 acpi_device_bid(device)));
@@ -115,7 +114,7 @@ acpi_os_get_pci_id (
115 114
116 return_ACPI_STATUS(AE_OK); 115 return_ACPI_STATUS(AE_OK);
117} 116}
118#endif /* ACPI_FUTURE_USAGE */ 117EXPORT_SYMBOL(acpi_get_pci_id);
119 118
120 119
121int 120int
@@ -129,6 +128,8 @@ acpi_pci_bind (
129 char *pathname = NULL; 128 char *pathname = NULL;
130 struct acpi_buffer buffer = {0, NULL}; 129 struct acpi_buffer buffer = {0, NULL};
131 acpi_handle handle = NULL; 130 acpi_handle handle = NULL;
131 struct pci_dev *dev;
132 struct pci_bus *bus;
132 133
133 ACPI_FUNCTION_TRACE("acpi_pci_bind"); 134 ACPI_FUNCTION_TRACE("acpi_pci_bind");
134 135
@@ -193,8 +194,20 @@ acpi_pci_bind (
193 * Locate matching device in PCI namespace. If it doesn't exist 194 * Locate matching device in PCI namespace. If it doesn't exist
194 * this typically means that the device isn't currently inserted 195 * this typically means that the device isn't currently inserted
195 * (e.g. docking station, port replicator, etc.). 196 * (e.g. docking station, port replicator, etc.).
197 * We cannot simply search the global pci device list, since
198 * PCI devices are added to the global pci list when the root
199 * bridge start ops are run, which may not have happened yet.
196 */ 200 */
197 data->dev = pci_find_slot(data->id.bus, PCI_DEVFN(data->id.device, data->id.function)); 201 bus = pci_find_bus(data->id.segment, data->id.bus);
202 if (bus) {
203 list_for_each_entry(dev, &bus->devices, bus_list) {
204 if (dev->devfn == PCI_DEVFN(data->id.device,
205 data->id.function)) {
206 data->dev = dev;
207 break;
208 }
209 }
210 }
198 if (!data->dev) { 211 if (!data->dev) {
199 ACPI_DEBUG_PRINT((ACPI_DB_INFO, 212 ACPI_DEBUG_PRINT((ACPI_DB_INFO,
200 "Device %02x:%02x:%02x.%02x not present in PCI namespace\n", 213 "Device %02x:%02x:%02x.%02x not present in PCI namespace\n",
diff --git a/drivers/acpi/pci_root.c b/drivers/acpi/pci_root.c
index 7e6b8e3b2ed4..5d2f77fcd50c 100644
--- a/drivers/acpi/pci_root.c
+++ b/drivers/acpi/pci_root.c
@@ -46,6 +46,7 @@ ACPI_MODULE_NAME ("pci_root")
46 46
47static int acpi_pci_root_add (struct acpi_device *device); 47static int acpi_pci_root_add (struct acpi_device *device);
48static int acpi_pci_root_remove (struct acpi_device *device, int type); 48static int acpi_pci_root_remove (struct acpi_device *device, int type);
49static int acpi_pci_root_start (struct acpi_device *device);
49 50
50static struct acpi_driver acpi_pci_root_driver = { 51static struct acpi_driver acpi_pci_root_driver = {
51 .name = ACPI_PCI_ROOT_DRIVER_NAME, 52 .name = ACPI_PCI_ROOT_DRIVER_NAME,
@@ -54,6 +55,7 @@ static struct acpi_driver acpi_pci_root_driver = {
54 .ops = { 55 .ops = {
55 .add = acpi_pci_root_add, 56 .add = acpi_pci_root_add,
56 .remove = acpi_pci_root_remove, 57 .remove = acpi_pci_root_remove,
58 .start = acpi_pci_root_start,
57 }, 59 },
58}; 60};
59 61
@@ -169,6 +171,7 @@ acpi_pci_root_add (
169 if (!root) 171 if (!root)
170 return_VALUE(-ENOMEM); 172 return_VALUE(-ENOMEM);
171 memset(root, 0, sizeof(struct acpi_pci_root)); 173 memset(root, 0, sizeof(struct acpi_pci_root));
174 INIT_LIST_HEAD(&root->node);
172 175
173 root->handle = device->handle; 176 root->handle = device->handle;
174 strcpy(acpi_device_name(device), ACPI_PCI_ROOT_DEVICE_NAME); 177 strcpy(acpi_device_name(device), ACPI_PCI_ROOT_DEVICE_NAME);
@@ -298,12 +301,31 @@ acpi_pci_root_add (
298 root->id.bus); 301 root->id.bus);
299 302
300end: 303end:
301 if (result) 304 if (result) {
305 if (!list_empty(&root->node))
306 list_del(&root->node);
302 kfree(root); 307 kfree(root);
308 }
303 309
304 return_VALUE(result); 310 return_VALUE(result);
305} 311}
306 312
313static int
314acpi_pci_root_start (
315 struct acpi_device *device)
316{
317 struct acpi_pci_root *root;
318
319 ACPI_FUNCTION_TRACE("acpi_pci_root_start");
320
321 list_for_each_entry(root, &acpi_pci_roots, node) {
322 if (root->handle == device->handle) {
323 pci_bus_add_devices(root->bus);
324 return_VALUE(0);
325 }
326 }
327 return_VALUE(-ENODEV);
328}
307 329
308static int 330static int
309acpi_pci_root_remove ( 331acpi_pci_root_remove (
diff --git a/drivers/acpi/processor_core.c b/drivers/acpi/processor_core.c
index f4778747e889..76156ac91bd3 100644
--- a/drivers/acpi/processor_core.c
+++ b/drivers/acpi/processor_core.c
@@ -723,7 +723,7 @@ int acpi_processor_device_add(
723 return_VALUE(-ENODEV); 723 return_VALUE(-ENODEV);
724 } 724 }
725 725
726 acpi_bus_scan(*device); 726 acpi_bus_start(*device);
727 727
728 pr = acpi_driver_data(*device); 728 pr = acpi_driver_data(*device);
729 if (!pr) 729 if (!pr)
diff --git a/drivers/acpi/scan.c b/drivers/acpi/scan.c
index e85885593280..337d49b5564b 100644
--- a/drivers/acpi/scan.c
+++ b/drivers/acpi/scan.c
@@ -553,20 +553,29 @@ acpi_bus_driver_init (
553 * upon possible configuration and currently allocated resources. 553 * upon possible configuration and currently allocated resources.
554 */ 554 */
555 555
556 ACPI_DEBUG_PRINT((ACPI_DB_INFO, "Driver successfully bound to device\n"));
557 return_VALUE(0);
558}
559
560int
561acpi_start_single_object (
562 struct acpi_device *device)
563{
564 int result = 0;
565 struct acpi_driver *driver;
566
567 ACPI_FUNCTION_TRACE("acpi_start_single_object");
568
569 if (!(driver = device->driver))
570 return_VALUE(0);
571
556 if (driver->ops.start) { 572 if (driver->ops.start) {
557 result = driver->ops.start(device); 573 result = driver->ops.start(device);
558 if (result && driver->ops.remove) 574 if (result && driver->ops.remove)
559 driver->ops.remove(device, ACPI_BUS_REMOVAL_NORMAL); 575 driver->ops.remove(device, ACPI_BUS_REMOVAL_NORMAL);
560 return_VALUE(result);
561 } 576 }
562 577
563 ACPI_DEBUG_PRINT((ACPI_DB_INFO, "Driver successfully bound to device\n")); 578 return_VALUE(result);
564
565 if (driver->ops.scan) {
566 driver->ops.scan(device);
567 }
568
569 return_VALUE(0);
570} 579}
571 580
572static int acpi_driver_attach(struct acpi_driver * drv) 581static int acpi_driver_attach(struct acpi_driver * drv)
@@ -586,6 +595,7 @@ static int acpi_driver_attach(struct acpi_driver * drv)
586 595
587 if (!acpi_bus_match(dev, drv)) { 596 if (!acpi_bus_match(dev, drv)) {
588 if (!acpi_bus_driver_init(dev, drv)) { 597 if (!acpi_bus_driver_init(dev, drv)) {
598 acpi_start_single_object(dev);
589 atomic_inc(&drv->references); 599 atomic_inc(&drv->references);
590 count++; 600 count++;
591 ACPI_DEBUG_PRINT((ACPI_DB_INFO, "Found driver [%s] for device [%s]\n", 601 ACPI_DEBUG_PRINT((ACPI_DB_INFO, "Found driver [%s] for device [%s]\n",
@@ -1009,8 +1019,8 @@ acpi_bus_remove (
1009} 1019}
1010 1020
1011 1021
1012int 1022static int
1013acpi_bus_add ( 1023acpi_add_single_object (
1014 struct acpi_device **child, 1024 struct acpi_device **child,
1015 struct acpi_device *parent, 1025 struct acpi_device *parent,
1016 acpi_handle handle, 1026 acpi_handle handle,
@@ -1019,7 +1029,7 @@ acpi_bus_add (
1019 int result = 0; 1029 int result = 0;
1020 struct acpi_device *device = NULL; 1030 struct acpi_device *device = NULL;
1021 1031
1022 ACPI_FUNCTION_TRACE("acpi_bus_add"); 1032 ACPI_FUNCTION_TRACE("acpi_add_single_object");
1023 1033
1024 if (!child) 1034 if (!child)
1025 return_VALUE(-EINVAL); 1035 return_VALUE(-EINVAL);
@@ -1140,7 +1150,7 @@ acpi_bus_add (
1140 * 1150 *
1141 * TBD: Assumes LDM provides driver hot-plug capability. 1151 * TBD: Assumes LDM provides driver hot-plug capability.
1142 */ 1152 */
1143 acpi_bus_find_driver(device); 1153 result = acpi_bus_find_driver(device);
1144 1154
1145end: 1155end:
1146 if (!result) 1156 if (!result)
@@ -1153,10 +1163,10 @@ end:
1153 1163
1154 return_VALUE(result); 1164 return_VALUE(result);
1155} 1165}
1156EXPORT_SYMBOL(acpi_bus_add);
1157 1166
1158 1167
1159int acpi_bus_scan (struct acpi_device *start) 1168static int acpi_bus_scan (struct acpi_device *start,
1169 struct acpi_bus_ops *ops)
1160{ 1170{
1161 acpi_status status = AE_OK; 1171 acpi_status status = AE_OK;
1162 struct acpi_device *parent = NULL; 1172 struct acpi_device *parent = NULL;
@@ -1229,9 +1239,20 @@ int acpi_bus_scan (struct acpi_device *start)
1229 continue; 1239 continue;
1230 } 1240 }
1231 1241
1232 status = acpi_bus_add(&child, parent, chandle, type); 1242 if (ops->acpi_op_add)
1233 if (ACPI_FAILURE(status)) 1243 status = acpi_add_single_object(&child, parent,
1234 continue; 1244 chandle, type);
1245 else
1246 status = acpi_bus_get_device(chandle, &child);
1247
1248 if (ACPI_FAILURE(status))
1249 continue;
1250
1251 if (ops->acpi_op_start) {
1252 status = acpi_start_single_object(child);
1253 if (ACPI_FAILURE(status))
1254 continue;
1255 }
1235 1256
1236 /* 1257 /*
1237 * If the device is present, enabled, and functioning then 1258 * If the device is present, enabled, and functioning then
@@ -1257,8 +1278,50 @@ int acpi_bus_scan (struct acpi_device *start)
1257 1278
1258 return_VALUE(0); 1279 return_VALUE(0);
1259} 1280}
1260EXPORT_SYMBOL(acpi_bus_scan);
1261 1281
1282int
1283acpi_bus_add (
1284 struct acpi_device **child,
1285 struct acpi_device *parent,
1286 acpi_handle handle,
1287 int type)
1288{
1289 int result;
1290 struct acpi_bus_ops ops;
1291
1292 ACPI_FUNCTION_TRACE("acpi_bus_add");
1293
1294 result = acpi_add_single_object(child, parent, handle, type);
1295 if (!result) {
1296 memset(&ops, 0, sizeof(ops));
1297 ops.acpi_op_add = 1;
1298 result = acpi_bus_scan(*child, &ops);
1299 }
1300 return_VALUE(result);
1301}
1302EXPORT_SYMBOL(acpi_bus_add);
1303
1304int
1305acpi_bus_start (
1306 struct acpi_device *device)
1307{
1308 int result;
1309 struct acpi_bus_ops ops;
1310
1311 ACPI_FUNCTION_TRACE("acpi_bus_start");
1312
1313 if (!device)
1314 return_VALUE(-EINVAL);
1315
1316 result = acpi_start_single_object(device);
1317 if (!result) {
1318 memset(&ops, 0, sizeof(ops));
1319 ops.acpi_op_start = 1;
1320 result = acpi_bus_scan(device, &ops);
1321 }
1322 return_VALUE(result);
1323}
1324EXPORT_SYMBOL(acpi_bus_start);
1262 1325
1263static int 1326static int
1264acpi_bus_trim(struct acpi_device *start, 1327acpi_bus_trim(struct acpi_device *start,
@@ -1331,13 +1394,19 @@ acpi_bus_scan_fixed (
1331 /* 1394 /*
1332 * Enumerate all fixed-feature devices. 1395 * Enumerate all fixed-feature devices.
1333 */ 1396 */
1334 if (acpi_fadt.pwr_button == 0) 1397 if (acpi_fadt.pwr_button == 0) {
1335 result = acpi_bus_add(&device, acpi_root, 1398 result = acpi_add_single_object(&device, acpi_root,
1336 NULL, ACPI_BUS_TYPE_POWER_BUTTON); 1399 NULL, ACPI_BUS_TYPE_POWER_BUTTON);
1400 if (!result)
1401 result = acpi_start_single_object(device);
1402 }
1337 1403
1338 if (acpi_fadt.sleep_button == 0) 1404 if (acpi_fadt.sleep_button == 0) {
1339 result = acpi_bus_add(&device, acpi_root, 1405 result = acpi_add_single_object(&device, acpi_root,
1340 NULL, ACPI_BUS_TYPE_SLEEP_BUTTON); 1406 NULL, ACPI_BUS_TYPE_SLEEP_BUTTON);
1407 if (!result)
1408 result = acpi_start_single_object(device);
1409 }
1341 1410
1342 return_VALUE(result); 1411 return_VALUE(result);
1343} 1412}
@@ -1346,6 +1415,7 @@ acpi_bus_scan_fixed (
1346static int __init acpi_scan_init(void) 1415static int __init acpi_scan_init(void)
1347{ 1416{
1348 int result; 1417 int result;
1418 struct acpi_bus_ops ops;
1349 1419
1350 ACPI_FUNCTION_TRACE("acpi_scan_init"); 1420 ACPI_FUNCTION_TRACE("acpi_scan_init");
1351 1421
@@ -1357,17 +1427,23 @@ static int __init acpi_scan_init(void)
1357 /* 1427 /*
1358 * Create the root device in the bus's device tree 1428 * Create the root device in the bus's device tree
1359 */ 1429 */
1360 result = acpi_bus_add(&acpi_root, NULL, ACPI_ROOT_OBJECT, 1430 result = acpi_add_single_object(&acpi_root, NULL, ACPI_ROOT_OBJECT,
1361 ACPI_BUS_TYPE_SYSTEM); 1431 ACPI_BUS_TYPE_SYSTEM);
1362 if (result) 1432 if (result)
1363 goto Done; 1433 goto Done;
1364 1434
1435 result = acpi_start_single_object(acpi_root);
1436
1365 /* 1437 /*
1366 * Enumerate devices in the ACPI namespace. 1438 * Enumerate devices in the ACPI namespace.
1367 */ 1439 */
1368 result = acpi_bus_scan_fixed(acpi_root); 1440 result = acpi_bus_scan_fixed(acpi_root);
1369 if (!result) 1441 if (!result) {
1370 result = acpi_bus_scan(acpi_root); 1442 memset(&ops, 0, sizeof(ops));
1443 ops.acpi_op_add = 1;
1444 ops.acpi_op_start = 1;
1445 result = acpi_bus_scan(acpi_root, &ops);
1446 }
1371 1447
1372 if (result) 1448 if (result)
1373 acpi_device_unregister(acpi_root, ACPI_BUS_REMOVAL_NORMAL); 1449 acpi_device_unregister(acpi_root, ACPI_BUS_REMOVAL_NORMAL);
diff --git a/drivers/base/firmware_class.c b/drivers/base/firmware_class.c
index 97fe13f7f07c..652281402c92 100644
--- a/drivers/base/firmware_class.c
+++ b/drivers/base/firmware_class.c
@@ -74,6 +74,8 @@ static ssize_t
74firmware_timeout_store(struct class *class, const char *buf, size_t count) 74firmware_timeout_store(struct class *class, const char *buf, size_t count)
75{ 75{
76 loading_timeout = simple_strtol(buf, NULL, 10); 76 loading_timeout = simple_strtol(buf, NULL, 10);
77 if (loading_timeout < 0)
78 loading_timeout = 0;
77 return count; 79 return count;
78} 80}
79 81
@@ -138,6 +140,10 @@ firmware_loading_store(struct class_device *class_dev,
138 switch (loading) { 140 switch (loading) {
139 case 1: 141 case 1:
140 down(&fw_lock); 142 down(&fw_lock);
143 if (!fw_priv->fw) {
144 up(&fw_lock);
145 break;
146 }
141 vfree(fw_priv->fw->data); 147 vfree(fw_priv->fw->data);
142 fw_priv->fw->data = NULL; 148 fw_priv->fw->data = NULL;
143 fw_priv->fw->size = 0; 149 fw_priv->fw->size = 0;
@@ -178,7 +184,7 @@ firmware_data_read(struct kobject *kobj,
178 184
179 down(&fw_lock); 185 down(&fw_lock);
180 fw = fw_priv->fw; 186 fw = fw_priv->fw;
181 if (test_bit(FW_STATUS_DONE, &fw_priv->status)) { 187 if (!fw || test_bit(FW_STATUS_DONE, &fw_priv->status)) {
182 ret_count = -ENODEV; 188 ret_count = -ENODEV;
183 goto out; 189 goto out;
184 } 190 }
@@ -238,9 +244,10 @@ firmware_data_write(struct kobject *kobj,
238 244
239 if (!capable(CAP_SYS_RAWIO)) 245 if (!capable(CAP_SYS_RAWIO))
240 return -EPERM; 246 return -EPERM;
247
241 down(&fw_lock); 248 down(&fw_lock);
242 fw = fw_priv->fw; 249 fw = fw_priv->fw;
243 if (test_bit(FW_STATUS_DONE, &fw_priv->status)) { 250 if (!fw || test_bit(FW_STATUS_DONE, &fw_priv->status)) {
244 retval = -ENODEV; 251 retval = -ENODEV;
245 goto out; 252 goto out;
246 } 253 }
@@ -418,7 +425,7 @@ request_firmware(const struct firmware **firmware_p, const char *name,
418 425
419 fw_priv = class_get_devdata(class_dev); 426 fw_priv = class_get_devdata(class_dev);
420 427
421 if (loading_timeout) { 428 if (loading_timeout > 0) {
422 fw_priv->timeout.expires = jiffies + loading_timeout * HZ; 429 fw_priv->timeout.expires = jiffies + loading_timeout * HZ;
423 add_timer(&fw_priv->timeout); 430 add_timer(&fw_priv->timeout);
424 } 431 }
diff --git a/drivers/block/cciss.c b/drivers/block/cciss.c
index 653512b77570..3e9fb6e4a52a 100644
--- a/drivers/block/cciss.c
+++ b/drivers/block/cciss.c
@@ -786,7 +786,6 @@ static int cciss_ioctl(struct inode *inode, struct file *filep,
786 786
787 case CCISS_GETLUNINFO: { 787 case CCISS_GETLUNINFO: {
788 LogvolInfo_struct luninfo; 788 LogvolInfo_struct luninfo;
789 int i;
790 789
791 luninfo.LunID = drv->LunID; 790 luninfo.LunID = drv->LunID;
792 luninfo.num_opens = drv->usage_count; 791 luninfo.num_opens = drv->usage_count;
diff --git a/drivers/block/ll_rw_blk.c b/drivers/block/ll_rw_blk.c
index 234fdcfbdf01..692a5fced76e 100644
--- a/drivers/block/ll_rw_blk.c
+++ b/drivers/block/ll_rw_blk.c
@@ -1867,19 +1867,20 @@ static void freed_request(request_queue_t *q, int rw)
1867 1867
1868#define blkdev_free_rq(list) list_entry((list)->next, struct request, queuelist) 1868#define blkdev_free_rq(list) list_entry((list)->next, struct request, queuelist)
1869/* 1869/*
1870 * Get a free request, queue_lock must not be held 1870 * Get a free request, queue_lock must be held.
1871 * Returns NULL on failure, with queue_lock held.
1872 * Returns !NULL on success, with queue_lock *not held*.
1871 */ 1873 */
1872static struct request *get_request(request_queue_t *q, int rw, struct bio *bio, 1874static struct request *get_request(request_queue_t *q, int rw, struct bio *bio,
1873 int gfp_mask) 1875 int gfp_mask)
1874{ 1876{
1875 struct request *rq = NULL; 1877 struct request *rq = NULL;
1876 struct request_list *rl = &q->rq; 1878 struct request_list *rl = &q->rq;
1877 struct io_context *ioc = get_io_context(gfp_mask); 1879 struct io_context *ioc = current_io_context(GFP_ATOMIC);
1878 1880
1879 if (unlikely(test_bit(QUEUE_FLAG_DRAIN, &q->queue_flags))) 1881 if (unlikely(test_bit(QUEUE_FLAG_DRAIN, &q->queue_flags)))
1880 goto out; 1882 goto out;
1881 1883
1882 spin_lock_irq(q->queue_lock);
1883 if (rl->count[rw]+1 >= q->nr_requests) { 1884 if (rl->count[rw]+1 >= q->nr_requests) {
1884 /* 1885 /*
1885 * The queue will fill after this allocation, so set it as 1886 * The queue will fill after this allocation, so set it as
@@ -1907,11 +1908,18 @@ static struct request *get_request(request_queue_t *q, int rw, struct bio *bio,
1907 * The queue is full and the allocating process is not a 1908 * The queue is full and the allocating process is not a
1908 * "batcher", and not exempted by the IO scheduler 1909 * "batcher", and not exempted by the IO scheduler
1909 */ 1910 */
1910 spin_unlock_irq(q->queue_lock);
1911 goto out; 1911 goto out;
1912 } 1912 }
1913 1913
1914get_rq: 1914get_rq:
1915 /*
1916 * Only allow batching queuers to allocate up to 50% over the defined
1917 * limit of requests, otherwise we could have thousands of requests
1918 * allocated with any setting of ->nr_requests
1919 */
1920 if (rl->count[rw] >= (3 * q->nr_requests / 2))
1921 goto out;
1922
1915 rl->count[rw]++; 1923 rl->count[rw]++;
1916 rl->starved[rw] = 0; 1924 rl->starved[rw] = 0;
1917 if (rl->count[rw] >= queue_congestion_on_threshold(q)) 1925 if (rl->count[rw] >= queue_congestion_on_threshold(q))
@@ -1941,7 +1949,6 @@ rq_starved:
1941 if (unlikely(rl->count[rw] == 0)) 1949 if (unlikely(rl->count[rw] == 0))
1942 rl->starved[rw] = 1; 1950 rl->starved[rw] = 1;
1943 1951
1944 spin_unlock_irq(q->queue_lock);
1945 goto out; 1952 goto out;
1946 } 1953 }
1947 1954
@@ -1951,21 +1958,23 @@ rq_starved:
1951 rq_init(q, rq); 1958 rq_init(q, rq);
1952 rq->rl = rl; 1959 rq->rl = rl;
1953out: 1960out:
1954 put_io_context(ioc);
1955 return rq; 1961 return rq;
1956} 1962}
1957 1963
1958/* 1964/*
1959 * No available requests for this queue, unplug the device and wait for some 1965 * No available requests for this queue, unplug the device and wait for some
1960 * requests to become available. 1966 * requests to become available.
1967 *
1968 * Called with q->queue_lock held, and returns with it unlocked.
1961 */ 1969 */
1962static struct request *get_request_wait(request_queue_t *q, int rw, 1970static struct request *get_request_wait(request_queue_t *q, int rw,
1963 struct bio *bio) 1971 struct bio *bio)
1964{ 1972{
1965 DEFINE_WAIT(wait);
1966 struct request *rq; 1973 struct request *rq;
1967 1974
1968 do { 1975 rq = get_request(q, rw, bio, GFP_NOIO);
1976 while (!rq) {
1977 DEFINE_WAIT(wait);
1969 struct request_list *rl = &q->rq; 1978 struct request_list *rl = &q->rq;
1970 1979
1971 prepare_to_wait_exclusive(&rl->wait[rw], &wait, 1980 prepare_to_wait_exclusive(&rl->wait[rw], &wait,
@@ -1976,7 +1985,8 @@ static struct request *get_request_wait(request_queue_t *q, int rw,
1976 if (!rq) { 1985 if (!rq) {
1977 struct io_context *ioc; 1986 struct io_context *ioc;
1978 1987
1979 generic_unplug_device(q); 1988 __generic_unplug_device(q);
1989 spin_unlock_irq(q->queue_lock);
1980 io_schedule(); 1990 io_schedule();
1981 1991
1982 /* 1992 /*
@@ -1985,12 +1995,13 @@ static struct request *get_request_wait(request_queue_t *q, int rw,
1985 * up to a big batch of them for a small period time. 1995 * up to a big batch of them for a small period time.
1986 * See ioc_batching, ioc_set_batching 1996 * See ioc_batching, ioc_set_batching
1987 */ 1997 */
1988 ioc = get_io_context(GFP_NOIO); 1998 ioc = current_io_context(GFP_NOIO);
1989 ioc_set_batching(q, ioc); 1999 ioc_set_batching(q, ioc);
1990 put_io_context(ioc); 2000
2001 spin_lock_irq(q->queue_lock);
1991 } 2002 }
1992 finish_wait(&rl->wait[rw], &wait); 2003 finish_wait(&rl->wait[rw], &wait);
1993 } while (!rq); 2004 }
1994 2005
1995 return rq; 2006 return rq;
1996} 2007}
@@ -2001,14 +2012,18 @@ struct request *blk_get_request(request_queue_t *q, int rw, int gfp_mask)
2001 2012
2002 BUG_ON(rw != READ && rw != WRITE); 2013 BUG_ON(rw != READ && rw != WRITE);
2003 2014
2004 if (gfp_mask & __GFP_WAIT) 2015 spin_lock_irq(q->queue_lock);
2016 if (gfp_mask & __GFP_WAIT) {
2005 rq = get_request_wait(q, rw, NULL); 2017 rq = get_request_wait(q, rw, NULL);
2006 else 2018 } else {
2007 rq = get_request(q, rw, NULL, gfp_mask); 2019 rq = get_request(q, rw, NULL, gfp_mask);
2020 if (!rq)
2021 spin_unlock_irq(q->queue_lock);
2022 }
2023 /* q->queue_lock is unlocked at this point */
2008 2024
2009 return rq; 2025 return rq;
2010} 2026}
2011
2012EXPORT_SYMBOL(blk_get_request); 2027EXPORT_SYMBOL(blk_get_request);
2013 2028
2014/** 2029/**
@@ -2512,7 +2527,7 @@ EXPORT_SYMBOL(blk_attempt_remerge);
2512 2527
2513static int __make_request(request_queue_t *q, struct bio *bio) 2528static int __make_request(request_queue_t *q, struct bio *bio)
2514{ 2529{
2515 struct request *req, *freereq = NULL; 2530 struct request *req;
2516 int el_ret, rw, nr_sectors, cur_nr_sectors, barrier, err, sync; 2531 int el_ret, rw, nr_sectors, cur_nr_sectors, barrier, err, sync;
2517 unsigned short prio; 2532 unsigned short prio;
2518 sector_t sector; 2533 sector_t sector;
@@ -2540,14 +2555,9 @@ static int __make_request(request_queue_t *q, struct bio *bio)
2540 goto end_io; 2555 goto end_io;
2541 } 2556 }
2542 2557
2543again:
2544 spin_lock_irq(q->queue_lock); 2558 spin_lock_irq(q->queue_lock);
2545 2559
2546 if (elv_queue_empty(q)) { 2560 if (unlikely(barrier) || elv_queue_empty(q))
2547 blk_plug_device(q);
2548 goto get_rq;
2549 }
2550 if (barrier)
2551 goto get_rq; 2561 goto get_rq;
2552 2562
2553 el_ret = elv_merge(q, &req, bio); 2563 el_ret = elv_merge(q, &req, bio);
@@ -2592,40 +2602,24 @@ again:
2592 elv_merged_request(q, req); 2602 elv_merged_request(q, req);
2593 goto out; 2603 goto out;
2594 2604
2595 /* 2605 /* ELV_NO_MERGE: elevator says don't/can't merge. */
2596 * elevator says don't/can't merge. get new request
2597 */
2598 case ELEVATOR_NO_MERGE:
2599 break;
2600
2601 default: 2606 default:
2602 printk("elevator returned crap (%d)\n", el_ret); 2607 ;
2603 BUG();
2604 } 2608 }
2605 2609
2610get_rq:
2606 /* 2611 /*
2607 * Grab a free request from the freelist - if that is empty, check 2612 * Grab a free request. This is might sleep but can not fail.
2608 * if we are doing read ahead and abort instead of blocking for 2613 * Returns with the queue unlocked.
2609 * a free slot. 2614 */
2615 req = get_request_wait(q, rw, bio);
2616
2617 /*
2618 * After dropping the lock and possibly sleeping here, our request
2619 * may now be mergeable after it had proven unmergeable (above).
2620 * We don't worry about that case for efficiency. It won't happen
2621 * often, and the elevators are able to handle it.
2610 */ 2622 */
2611get_rq:
2612 if (freereq) {
2613 req = freereq;
2614 freereq = NULL;
2615 } else {
2616 spin_unlock_irq(q->queue_lock);
2617 if ((freereq = get_request(q, rw, bio, GFP_ATOMIC)) == NULL) {
2618 /*
2619 * READA bit set
2620 */
2621 err = -EWOULDBLOCK;
2622 if (bio_rw_ahead(bio))
2623 goto end_io;
2624
2625 freereq = get_request_wait(q, rw, bio);
2626 }
2627 goto again;
2628 }
2629 2623
2630 req->flags |= REQ_CMD; 2624 req->flags |= REQ_CMD;
2631 2625
@@ -2654,10 +2648,11 @@ get_rq:
2654 req->rq_disk = bio->bi_bdev->bd_disk; 2648 req->rq_disk = bio->bi_bdev->bd_disk;
2655 req->start_time = jiffies; 2649 req->start_time = jiffies;
2656 2650
2651 spin_lock_irq(q->queue_lock);
2652 if (elv_queue_empty(q))
2653 blk_plug_device(q);
2657 add_request(q, req); 2654 add_request(q, req);
2658out: 2655out:
2659 if (freereq)
2660 __blk_put_request(q, freereq);
2661 if (sync) 2656 if (sync)
2662 __generic_unplug_device(q); 2657 __generic_unplug_device(q);
2663 2658
@@ -3284,24 +3279,20 @@ void exit_io_context(void)
3284 3279
3285/* 3280/*
3286 * If the current task has no IO context then create one and initialise it. 3281 * If the current task has no IO context then create one and initialise it.
3287 * If it does have a context, take a ref on it. 3282 * Otherwise, return its existing IO context.
3288 * 3283 *
3289 * This is always called in the context of the task which submitted the I/O. 3284 * This returned IO context doesn't have a specifically elevated refcount,
3290 * But weird things happen, so we disable local interrupts to ensure exclusive 3285 * but since the current task itself holds a reference, the context can be
3291 * access to *current. 3286 * used in general code, so long as it stays within `current` context.
3292 */ 3287 */
3293struct io_context *get_io_context(int gfp_flags) 3288struct io_context *current_io_context(int gfp_flags)
3294{ 3289{
3295 struct task_struct *tsk = current; 3290 struct task_struct *tsk = current;
3296 unsigned long flags;
3297 struct io_context *ret; 3291 struct io_context *ret;
3298 3292
3299 local_irq_save(flags);
3300 ret = tsk->io_context; 3293 ret = tsk->io_context;
3301 if (ret) 3294 if (likely(ret))
3302 goto out; 3295 return ret;
3303
3304 local_irq_restore(flags);
3305 3296
3306 ret = kmem_cache_alloc(iocontext_cachep, gfp_flags); 3297 ret = kmem_cache_alloc(iocontext_cachep, gfp_flags);
3307 if (ret) { 3298 if (ret) {
@@ -3312,25 +3303,25 @@ struct io_context *get_io_context(int gfp_flags)
3312 ret->nr_batch_requests = 0; /* because this is 0 */ 3303 ret->nr_batch_requests = 0; /* because this is 0 */
3313 ret->aic = NULL; 3304 ret->aic = NULL;
3314 ret->cic = NULL; 3305 ret->cic = NULL;
3306 tsk->io_context = ret;
3307 }
3315 3308
3316 local_irq_save(flags); 3309 return ret;
3317 3310}
3318 /* 3311EXPORT_SYMBOL(current_io_context);
3319 * very unlikely, someone raced with us in setting up the task
3320 * io context. free new context and just grab a reference.
3321 */
3322 if (!tsk->io_context)
3323 tsk->io_context = ret;
3324 else {
3325 kmem_cache_free(iocontext_cachep, ret);
3326 ret = tsk->io_context;
3327 }
3328 3312
3329out: 3313/*
3314 * If the current task has no IO context then create one and initialise it.
3315 * If it does have a context, take a ref on it.
3316 *
3317 * This is always called in the context of the task which submitted the I/O.
3318 */
3319struct io_context *get_io_context(int gfp_flags)
3320{
3321 struct io_context *ret;
3322 ret = current_io_context(gfp_flags);
3323 if (likely(ret))
3330 atomic_inc(&ret->refcount); 3324 atomic_inc(&ret->refcount);
3331 local_irq_restore(flags);
3332 }
3333
3334 return ret; 3325 return ret;
3335} 3326}
3336EXPORT_SYMBOL(get_io_context); 3327EXPORT_SYMBOL(get_io_context);
diff --git a/drivers/char/agp/amd64-agp.c b/drivers/char/agp/amd64-agp.c
index 1407945a5892..59f589d733f9 100644
--- a/drivers/char/agp/amd64-agp.c
+++ b/drivers/char/agp/amd64-agp.c
@@ -686,6 +686,15 @@ static struct pci_device_id agp_amd64_pci_table[] = {
686 .subvendor = PCI_ANY_ID, 686 .subvendor = PCI_ANY_ID,
687 .subdevice = PCI_ANY_ID, 687 .subdevice = PCI_ANY_ID,
688 }, 688 },
689 /* SIS 760 */
690 {
691 .class = (PCI_CLASS_BRIDGE_HOST << 8),
692 .class_mask = ~0,
693 .vendor = PCI_VENDOR_ID_SI,
694 .device = PCI_DEVICE_ID_SI_760,
695 .subvendor = PCI_ANY_ID,
696 .subdevice = PCI_ANY_ID,
697 },
689 { } 698 { }
690}; 699};
691 700
diff --git a/drivers/char/ipmi/ipmi_msghandler.c b/drivers/char/ipmi/ipmi_msghandler.c
index 1813d0d198f1..e16c13fe698d 100644
--- a/drivers/char/ipmi/ipmi_msghandler.c
+++ b/drivers/char/ipmi/ipmi_msghandler.c
@@ -1088,8 +1088,8 @@ static inline int i_ipmi_request(ipmi_user_t user,
1088 long seqid; 1088 long seqid;
1089 int broadcast = 0; 1089 int broadcast = 0;
1090 1090
1091 if (addr->channel > IPMI_NUM_CHANNELS) { 1091 if (addr->channel >= IPMI_MAX_CHANNELS) {
1092 spin_lock_irqsave(&intf->counter_lock, flags); 1092 spin_lock_irqsave(&intf->counter_lock, flags);
1093 intf->sent_invalid_commands++; 1093 intf->sent_invalid_commands++;
1094 spin_unlock_irqrestore(&intf->counter_lock, flags); 1094 spin_unlock_irqrestore(&intf->counter_lock, flags);
1095 rv = -EINVAL; 1095 rv = -EINVAL;
diff --git a/drivers/char/moxa.c b/drivers/char/moxa.c
index 7c24fbe831f8..95f7046ff059 100644
--- a/drivers/char/moxa.c
+++ b/drivers/char/moxa.c
@@ -451,7 +451,7 @@ static int __init moxa_init(void)
451 int n = (sizeof(moxa_pcibrds) / sizeof(moxa_pcibrds[0])) - 1; 451 int n = (sizeof(moxa_pcibrds) / sizeof(moxa_pcibrds[0])) - 1;
452 i = 0; 452 i = 0;
453 while (i < n) { 453 while (i < n) {
454 while ((p = pci_find_device(moxa_pcibrds[i].vendor, moxa_pcibrds[i].device, p))!=NULL) 454 while ((p = pci_get_device(moxa_pcibrds[i].vendor, moxa_pcibrds[i].device, p))!=NULL)
455 { 455 {
456 if (pci_enable_device(p)) 456 if (pci_enable_device(p))
457 continue; 457 continue;
diff --git a/drivers/char/rio/rio_linux.c b/drivers/char/rio/rio_linux.c
index 7db3370f4972..d7d484024e2b 100644
--- a/drivers/char/rio/rio_linux.c
+++ b/drivers/char/rio/rio_linux.c
@@ -1095,7 +1095,7 @@ static int __init rio_init(void)
1095 1095
1096#ifdef CONFIG_PCI 1096#ifdef CONFIG_PCI
1097 /* First look for the JET devices: */ 1097 /* First look for the JET devices: */
1098 while ((pdev = pci_find_device (PCI_VENDOR_ID_SPECIALIX, 1098 while ((pdev = pci_get_device (PCI_VENDOR_ID_SPECIALIX,
1099 PCI_DEVICE_ID_SPECIALIX_SX_XIO_IO8, 1099 PCI_DEVICE_ID_SPECIALIX_SX_XIO_IO8,
1100 pdev))) { 1100 pdev))) {
1101 if (pci_enable_device(pdev)) continue; 1101 if (pci_enable_device(pdev)) continue;
@@ -1169,7 +1169,7 @@ static int __init rio_init(void)
1169 */ 1169 */
1170 1170
1171 /* Then look for the older RIO/PCI devices: */ 1171 /* Then look for the older RIO/PCI devices: */
1172 while ((pdev = pci_find_device (PCI_VENDOR_ID_SPECIALIX, 1172 while ((pdev = pci_get_device (PCI_VENDOR_ID_SPECIALIX,
1173 PCI_DEVICE_ID_SPECIALIX_RIO, 1173 PCI_DEVICE_ID_SPECIALIX_RIO,
1174 pdev))) { 1174 pdev))) {
1175 if (pci_enable_device(pdev)) continue; 1175 if (pci_enable_device(pdev)) continue;
diff --git a/drivers/char/rtc.c b/drivers/char/rtc.c
index ff4f09804865..d8f9e94ae475 100644
--- a/drivers/char/rtc.c
+++ b/drivers/char/rtc.c
@@ -78,6 +78,7 @@
78#include <linux/sysctl.h> 78#include <linux/sysctl.h>
79#include <linux/wait.h> 79#include <linux/wait.h>
80#include <linux/bcd.h> 80#include <linux/bcd.h>
81#include <linux/delay.h>
81 82
82#include <asm/current.h> 83#include <asm/current.h>
83#include <asm/uaccess.h> 84#include <asm/uaccess.h>
@@ -894,7 +895,6 @@ static int __init rtc_init(void)
894 struct proc_dir_entry *ent; 895 struct proc_dir_entry *ent;
895#if defined(__alpha__) || defined(__mips__) 896#if defined(__alpha__) || defined(__mips__)
896 unsigned int year, ctrl; 897 unsigned int year, ctrl;
897 unsigned long uip_watchdog;
898 char *guess = NULL; 898 char *guess = NULL;
899#endif 899#endif
900#ifdef __sparc__ 900#ifdef __sparc__
@@ -1000,12 +1000,8 @@ no_irq:
1000 /* Each operating system on an Alpha uses its own epoch. 1000 /* Each operating system on an Alpha uses its own epoch.
1001 Let's try to guess which one we are using now. */ 1001 Let's try to guess which one we are using now. */
1002 1002
1003 uip_watchdog = jiffies;
1004 if (rtc_is_updating() != 0) 1003 if (rtc_is_updating() != 0)
1005 while (jiffies - uip_watchdog < 2*HZ/100) { 1004 msleep(20);
1006 barrier();
1007 cpu_relax();
1008 }
1009 1005
1010 spin_lock_irq(&rtc_lock); 1006 spin_lock_irq(&rtc_lock);
1011 year = CMOS_READ(RTC_YEAR); 1007 year = CMOS_READ(RTC_YEAR);
@@ -1213,7 +1209,6 @@ static int rtc_proc_open(struct inode *inode, struct file *file)
1213 1209
1214void rtc_get_rtc_time(struct rtc_time *rtc_tm) 1210void rtc_get_rtc_time(struct rtc_time *rtc_tm)
1215{ 1211{
1216 unsigned long uip_watchdog = jiffies;
1217 unsigned char ctrl; 1212 unsigned char ctrl;
1218#ifdef CONFIG_MACH_DECSTATION 1213#ifdef CONFIG_MACH_DECSTATION
1219 unsigned int real_year; 1214 unsigned int real_year;
@@ -1221,7 +1216,7 @@ void rtc_get_rtc_time(struct rtc_time *rtc_tm)
1221 1216
1222 /* 1217 /*
1223 * read RTC once any update in progress is done. The update 1218 * read RTC once any update in progress is done. The update
1224 * can take just over 2ms. We wait 10 to 20ms. There is no need to 1219 * can take just over 2ms. We wait 20ms. There is no need to
1225 * to poll-wait (up to 1s - eeccch) for the falling edge of RTC_UIP. 1220 * to poll-wait (up to 1s - eeccch) for the falling edge of RTC_UIP.
1226 * If you need to know *exactly* when a second has started, enable 1221 * If you need to know *exactly* when a second has started, enable
1227 * periodic update complete interrupts, (via ioctl) and then 1222 * periodic update complete interrupts, (via ioctl) and then
@@ -1230,10 +1225,7 @@ void rtc_get_rtc_time(struct rtc_time *rtc_tm)
1230 */ 1225 */
1231 1226
1232 if (rtc_is_updating() != 0) 1227 if (rtc_is_updating() != 0)
1233 while (jiffies - uip_watchdog < 2*HZ/100) { 1228 msleep(20);
1234 barrier();
1235 cpu_relax();
1236 }
1237 1229
1238 /* 1230 /*
1239 * Only the values that we read from the RTC are set. We leave 1231 * Only the values that we read from the RTC are set. We leave
diff --git a/drivers/char/tipar.c b/drivers/char/tipar.c
index 659335d80ee7..ec78d2f161f7 100644
--- a/drivers/char/tipar.c
+++ b/drivers/char/tipar.c
@@ -396,7 +396,7 @@ static struct file_operations tipar_fops = {
396static int __init 396static int __init
397tipar_setup(char *str) 397tipar_setup(char *str)
398{ 398{
399 int ints[2]; 399 int ints[3];
400 400
401 str = get_options(str, ARRAY_SIZE(ints), ints); 401 str = get_options(str, ARRAY_SIZE(ints), ints);
402 402
diff --git a/drivers/char/tty_ioctl.c b/drivers/char/tty_ioctl.c
index 58597993954f..f19cf9d7792d 100644
--- a/drivers/char/tty_ioctl.c
+++ b/drivers/char/tty_ioctl.c
@@ -476,11 +476,11 @@ int n_tty_ioctl(struct tty_struct * tty, struct file * file,
476 ld = tty_ldisc_ref(tty); 476 ld = tty_ldisc_ref(tty);
477 switch (arg) { 477 switch (arg) {
478 case TCIFLUSH: 478 case TCIFLUSH:
479 if (ld->flush_buffer) 479 if (ld && ld->flush_buffer)
480 ld->flush_buffer(tty); 480 ld->flush_buffer(tty);
481 break; 481 break;
482 case TCIOFLUSH: 482 case TCIOFLUSH:
483 if (ld->flush_buffer) 483 if (ld && ld->flush_buffer)
484 ld->flush_buffer(tty); 484 ld->flush_buffer(tty);
485 /* fall through */ 485 /* fall through */
486 case TCOFLUSH: 486 case TCOFLUSH:
diff --git a/drivers/char/vt_ioctl.c b/drivers/char/vt_ioctl.c
index 8971484b956b..1d44f69e1fda 100644
--- a/drivers/char/vt_ioctl.c
+++ b/drivers/char/vt_ioctl.c
@@ -25,6 +25,7 @@
25#include <linux/fs.h> 25#include <linux/fs.h>
26#include <linux/console.h> 26#include <linux/console.h>
27#include <linux/signal.h> 27#include <linux/signal.h>
28#include <linux/timex.h>
28 29
29#include <asm/io.h> 30#include <asm/io.h>
30#include <asm/uaccess.h> 31#include <asm/uaccess.h>
@@ -386,7 +387,7 @@ int vt_ioctl(struct tty_struct *tty, struct file * file,
386 if (!perm) 387 if (!perm)
387 return -EPERM; 388 return -EPERM;
388 if (arg) 389 if (arg)
389 arg = 1193182 / arg; 390 arg = CLOCK_TICK_RATE / arg;
390 kd_mksound(arg, 0); 391 kd_mksound(arg, 0);
391 return 0; 392 return 0;
392 393
@@ -403,7 +404,7 @@ int vt_ioctl(struct tty_struct *tty, struct file * file,
403 ticks = HZ * ((arg >> 16) & 0xffff) / 1000; 404 ticks = HZ * ((arg >> 16) & 0xffff) / 1000;
404 count = ticks ? (arg & 0xffff) : 0; 405 count = ticks ? (arg & 0xffff) : 0;
405 if (count) 406 if (count)
406 count = 1193182 / count; 407 count = CLOCK_TICK_RATE / count;
407 kd_mksound(count, ticks); 408 kd_mksound(count, ticks);
408 return 0; 409 return 0;
409 } 410 }
diff --git a/drivers/char/watchdog/ixp2000_wdt.c b/drivers/char/watchdog/ixp2000_wdt.c
index 4e98c215e5b1..4b039516cc86 100644
--- a/drivers/char/watchdog/ixp2000_wdt.c
+++ b/drivers/char/watchdog/ixp2000_wdt.c
@@ -162,7 +162,7 @@ ixp2000_wdt_release(struct inode *inode, struct file *file)
162 if (test_bit(WDT_OK_TO_CLOSE, &wdt_status)) { 162 if (test_bit(WDT_OK_TO_CLOSE, &wdt_status)) {
163 wdt_disable(); 163 wdt_disable();
164 } else { 164 } else {
165 printk(KERN_CRIT "WATCHDOG: Device closed unexpectdly - " 165 printk(KERN_CRIT "WATCHDOG: Device closed unexpectedly - "
166 "timer will not stop\n"); 166 "timer will not stop\n");
167 } 167 }
168 168
diff --git a/drivers/char/watchdog/ixp4xx_wdt.c b/drivers/char/watchdog/ixp4xx_wdt.c
index 82396e06c8a8..83df369113a4 100644
--- a/drivers/char/watchdog/ixp4xx_wdt.c
+++ b/drivers/char/watchdog/ixp4xx_wdt.c
@@ -156,7 +156,7 @@ ixp4xx_wdt_release(struct inode *inode, struct file *file)
156 if (test_bit(WDT_OK_TO_CLOSE, &wdt_status)) { 156 if (test_bit(WDT_OK_TO_CLOSE, &wdt_status)) {
157 wdt_disable(); 157 wdt_disable();
158 } else { 158 } else {
159 printk(KERN_CRIT "WATCHDOG: Device closed unexpectdly - " 159 printk(KERN_CRIT "WATCHDOG: Device closed unexpectedly - "
160 "timer will not stop\n"); 160 "timer will not stop\n");
161 } 161 }
162 162
diff --git a/drivers/isdn/hardware/eicon/dadapter.c b/drivers/isdn/hardware/eicon/dadapter.c
index 6e548a222ef1..89497890158d 100644
--- a/drivers/isdn/hardware/eicon/dadapter.c
+++ b/drivers/isdn/hardware/eicon/dadapter.c
@@ -44,7 +44,7 @@ static didd_adapter_change_notification_t\
44 Array to held adapter information 44 Array to held adapter information
45 -------------------------------------------------------------------------- */ 45 -------------------------------------------------------------------------- */
46static DESCRIPTOR HandleTable[NEW_MAX_DESCRIPTORS]; 46static DESCRIPTOR HandleTable[NEW_MAX_DESCRIPTORS];
47dword Adapters = 0; /* Number of adapters */ 47static dword Adapters = 0; /* Number of adapters */
48/* -------------------------------------------------------------------------- 48/* --------------------------------------------------------------------------
49 Shadow IDI_DIMAINT 49 Shadow IDI_DIMAINT
50 and 'shadow' debug stuff 50 and 'shadow' debug stuff
diff --git a/drivers/isdn/hisax/hfc4s8s_l1.c b/drivers/isdn/hisax/hfc4s8s_l1.c
index 6e7e060716b7..7333377ab31d 100644
--- a/drivers/isdn/hisax/hfc4s8s_l1.c
+++ b/drivers/isdn/hisax/hfc4s8s_l1.c
@@ -312,7 +312,7 @@ wait_busy(hfc4s8s_hw * a)
312/* function to read critical counter registers that */ 312/* function to read critical counter registers that */
313/* may be udpated by the chip during read */ 313/* may be udpated by the chip during read */
314/******************************************************/ 314/******************************************************/
315static volatile u_char 315static u_char
316Read_hfc8_stable(hfc4s8s_hw * hw, int reg) 316Read_hfc8_stable(hfc4s8s_hw * hw, int reg)
317{ 317{
318 u_char ref8; 318 u_char ref8;
@@ -324,7 +324,7 @@ Read_hfc8_stable(hfc4s8s_hw * hw, int reg)
324 return in8; 324 return in8;
325} 325}
326 326
327static volatile int 327static int
328Read_hfc16_stable(hfc4s8s_hw * hw, int reg) 328Read_hfc16_stable(hfc4s8s_hw * hw, int reg)
329{ 329{
330 int ref16; 330 int ref16;
@@ -1465,7 +1465,7 @@ hfc_hardware_enable(hfc4s8s_hw * hw, int enable, int nt_mode)
1465/******************************************/ 1465/******************************************/
1466/* disable memory mapped ports / io ports */ 1466/* disable memory mapped ports / io ports */
1467/******************************************/ 1467/******************************************/
1468void 1468static void
1469release_pci_ports(hfc4s8s_hw * hw) 1469release_pci_ports(hfc4s8s_hw * hw)
1470{ 1470{
1471 pci_write_config_word(hw->pdev, PCI_COMMAND, 0); 1471 pci_write_config_word(hw->pdev, PCI_COMMAND, 0);
@@ -1481,7 +1481,7 @@ release_pci_ports(hfc4s8s_hw * hw)
1481/*****************************************/ 1481/*****************************************/
1482/* enable memory mapped ports / io ports */ 1482/* enable memory mapped ports / io ports */
1483/*****************************************/ 1483/*****************************************/
1484void 1484static void
1485enable_pci_ports(hfc4s8s_hw * hw) 1485enable_pci_ports(hfc4s8s_hw * hw)
1486{ 1486{
1487#ifdef CONFIG_HISAX_HFC4S8S_PCIMEM 1487#ifdef CONFIG_HISAX_HFC4S8S_PCIMEM
diff --git a/drivers/isdn/hysdn/hycapi.c b/drivers/isdn/hysdn/hycapi.c
index 8ee25b2ccce1..1fd3d4e5f284 100644
--- a/drivers/isdn/hysdn/hycapi.c
+++ b/drivers/isdn/hysdn/hycapi.c
@@ -42,6 +42,8 @@ typedef struct _hycapi_appl {
42 42
43static hycapi_appl hycapi_applications[CAPI_MAXAPPL]; 43static hycapi_appl hycapi_applications[CAPI_MAXAPPL];
44 44
45static u16 hycapi_send_message(struct capi_ctr *ctrl, struct sk_buff *skb);
46
45static inline int _hycapi_appCheck(int app_id, int ctrl_no) 47static inline int _hycapi_appCheck(int app_id, int ctrl_no)
46{ 48{
47 if((ctrl_no <= 0) || (ctrl_no > CAPI_MAXCONTR) || (app_id <= 0) || 49 if((ctrl_no <= 0) || (ctrl_no > CAPI_MAXCONTR) || (app_id <= 0) ||
@@ -57,7 +59,7 @@ static inline int _hycapi_appCheck(int app_id, int ctrl_no)
57Kernel-Capi callback reset_ctr 59Kernel-Capi callback reset_ctr
58******************************/ 60******************************/
59 61
60void 62static void
61hycapi_reset_ctr(struct capi_ctr *ctrl) 63hycapi_reset_ctr(struct capi_ctr *ctrl)
62{ 64{
63 hycapictrl_info *cinfo = ctrl->driverdata; 65 hycapictrl_info *cinfo = ctrl->driverdata;
@@ -73,7 +75,7 @@ hycapi_reset_ctr(struct capi_ctr *ctrl)
73Kernel-Capi callback remove_ctr 75Kernel-Capi callback remove_ctr
74******************************/ 76******************************/
75 77
76void 78static void
77hycapi_remove_ctr(struct capi_ctr *ctrl) 79hycapi_remove_ctr(struct capi_ctr *ctrl)
78{ 80{
79 int i; 81 int i;
@@ -215,7 +217,7 @@ Error-checking is done for CAPI-compliance.
215The application is recorded in the internal list. 217The application is recorded in the internal list.
216*************************************************************/ 218*************************************************************/
217 219
218void 220static void
219hycapi_register_appl(struct capi_ctr *ctrl, __u16 appl, 221hycapi_register_appl(struct capi_ctr *ctrl, __u16 appl,
220 capi_register_params *rp) 222 capi_register_params *rp)
221{ 223{
@@ -291,7 +293,7 @@ Release the application from the internal list an remove it's
291registration at controller-level 293registration at controller-level
292******************************************************************/ 294******************************************************************/
293 295
294void 296static void
295hycapi_release_appl(struct capi_ctr *ctrl, __u16 appl) 297hycapi_release_appl(struct capi_ctr *ctrl, __u16 appl)
296{ 298{
297 int chk; 299 int chk;
@@ -364,7 +366,7 @@ firmware-releases that do not check the MsgLen-Indication!
364 366
365***************************************************************/ 367***************************************************************/
366 368
367u16 hycapi_send_message(struct capi_ctr *ctrl, struct sk_buff *skb) 369static u16 hycapi_send_message(struct capi_ctr *ctrl, struct sk_buff *skb)
368{ 370{
369 __u16 appl_id; 371 __u16 appl_id;
370 int _len, _len2; 372 int _len, _len2;
@@ -437,8 +439,8 @@ Informations provided in the /proc/capi-entries.
437 439
438*********************************************************************/ 440*********************************************************************/
439 441
440int hycapi_read_proc(char *page, char **start, off_t off, 442static int hycapi_read_proc(char *page, char **start, off_t off,
441 int count, int *eof, struct capi_ctr *ctrl) 443 int count, int *eof, struct capi_ctr *ctrl)
442{ 444{
443 hycapictrl_info *cinfo = (hycapictrl_info *)(ctrl->driverdata); 445 hycapictrl_info *cinfo = (hycapictrl_info *)(ctrl->driverdata);
444 hysdn_card *card = cinfo->card; 446 hysdn_card *card = cinfo->card;
@@ -485,7 +487,7 @@ on capi-interface registration.
485 487
486**************************************************************/ 488**************************************************************/
487 489
488int hycapi_load_firmware(struct capi_ctr *ctrl, capiloaddata *data) 490static int hycapi_load_firmware(struct capi_ctr *ctrl, capiloaddata *data)
489{ 491{
490#ifdef HYCAPI_PRINTFNAMES 492#ifdef HYCAPI_PRINTFNAMES
491 printk(KERN_NOTICE "hycapi_load_firmware\n"); 493 printk(KERN_NOTICE "hycapi_load_firmware\n");
@@ -494,7 +496,7 @@ int hycapi_load_firmware(struct capi_ctr *ctrl, capiloaddata *data)
494} 496}
495 497
496 498
497char *hycapi_procinfo(struct capi_ctr *ctrl) 499static char *hycapi_procinfo(struct capi_ctr *ctrl)
498{ 500{
499 hycapictrl_info *cinfo = (hycapictrl_info *)(ctrl->driverdata); 501 hycapictrl_info *cinfo = (hycapictrl_info *)(ctrl->driverdata);
500#ifdef HYCAPI_PRINTFNAMES 502#ifdef HYCAPI_PRINTFNAMES
diff --git a/drivers/isdn/hysdn/hysdn_boot.c b/drivers/isdn/hysdn/hysdn_boot.c
index 6c04281e57b8..7bfba196f315 100644
--- a/drivers/isdn/hysdn/hysdn_boot.c
+++ b/drivers/isdn/hysdn/hysdn_boot.c
@@ -53,7 +53,7 @@ struct boot_data {
53/* to be called at start of POF file reading, */ 53/* to be called at start of POF file reading, */
54/* before starting any decryption on any POF record. */ 54/* before starting any decryption on any POF record. */
55/*****************************************************/ 55/*****************************************************/
56void 56static void
57StartDecryption(struct boot_data *boot) 57StartDecryption(struct boot_data *boot)
58{ 58{
59 boot->Cryptor = CRYPT_STARTTERM; 59 boot->Cryptor = CRYPT_STARTTERM;
@@ -66,7 +66,7 @@ StartDecryption(struct boot_data *boot)
66/* to HI and LO boot loader and (all) seq tags, because */ 66/* to HI and LO boot loader and (all) seq tags, because */
67/* global Cryptor is started for whole POF. */ 67/* global Cryptor is started for whole POF. */
68/***************************************************************/ 68/***************************************************************/
69void 69static void
70DecryptBuf(struct boot_data *boot, int cnt) 70DecryptBuf(struct boot_data *boot, int cnt)
71{ 71{
72 uchar *bufp = boot->buf.BootBuf; 72 uchar *bufp = boot->buf.BootBuf;
diff --git a/drivers/isdn/hysdn/hysdn_defs.h b/drivers/isdn/hysdn/hysdn_defs.h
index 4cee26e558ee..432f6f99089e 100644
--- a/drivers/isdn/hysdn/hysdn_defs.h
+++ b/drivers/isdn/hysdn/hysdn_defs.h
@@ -227,7 +227,6 @@ typedef struct hycapictrl_info hycapictrl_info;
227/*****************/ 227/*****************/
228/* exported vars */ 228/* exported vars */
229/*****************/ 229/*****************/
230extern int cardmax; /* number of found cards */
231extern hysdn_card *card_root; /* pointer to first card */ 230extern hysdn_card *card_root; /* pointer to first card */
232 231
233 232
@@ -244,7 +243,6 @@ extern void hysdn_procconf_release(void); /* deinit proc config filesys */
244/* hysdn_proclog.c */ 243/* hysdn_proclog.c */
245extern int hysdn_proclog_init(hysdn_card *); /* init proc log entry */ 244extern int hysdn_proclog_init(hysdn_card *); /* init proc log entry */
246extern void hysdn_proclog_release(hysdn_card *); /* deinit proc log entry */ 245extern void hysdn_proclog_release(hysdn_card *); /* deinit proc log entry */
247extern void put_log_buffer(hysdn_card *, char *); /* output log data */
248extern void hysdn_addlog(hysdn_card *, char *,...); /* output data to log */ 246extern void hysdn_addlog(hysdn_card *, char *,...); /* output data to log */
249extern void hysdn_card_errlog(hysdn_card *, tErrLogEntry *, int); /* output card log */ 247extern void hysdn_card_errlog(hysdn_card *, tErrLogEntry *, int); /* output card log */
250 248
@@ -278,16 +276,6 @@ extern unsigned int hycapi_enable;
278extern int hycapi_capi_create(hysdn_card *); /* create a new capi device */ 276extern int hycapi_capi_create(hysdn_card *); /* create a new capi device */
279extern int hycapi_capi_release(hysdn_card *); /* delete the device */ 277extern int hycapi_capi_release(hysdn_card *); /* delete the device */
280extern int hycapi_capi_stop(hysdn_card *card); /* suspend */ 278extern int hycapi_capi_stop(hysdn_card *card); /* suspend */
281extern int hycapi_load_firmware(struct capi_ctr *, capiloaddata *);
282extern void hycapi_reset_ctr(struct capi_ctr *);
283extern void hycapi_remove_ctr(struct capi_ctr *);
284extern void hycapi_register_appl(struct capi_ctr *, __u16 appl,
285 capi_register_params *);
286extern void hycapi_release_appl(struct capi_ctr *, __u16 appl);
287extern u16 hycapi_send_message(struct capi_ctr *, struct sk_buff *skb);
288extern char *hycapi_procinfo(struct capi_ctr *);
289extern int hycapi_read_proc(char *page, char **start, off_t off,
290 int count, int *eof, struct capi_ctr *card);
291extern void hycapi_rx_capipkt(hysdn_card * card, uchar * buf, word len); 279extern void hycapi_rx_capipkt(hysdn_card * card, uchar * buf, word len);
292extern void hycapi_tx_capiack(hysdn_card * card); 280extern void hycapi_tx_capiack(hysdn_card * card);
293extern struct sk_buff *hycapi_tx_capiget(hysdn_card *card); 281extern struct sk_buff *hycapi_tx_capiget(hysdn_card *card);
diff --git a/drivers/isdn/hysdn/hysdn_init.c b/drivers/isdn/hysdn/hysdn_init.c
index 5cac2bf5f4b0..12c8137b5161 100644
--- a/drivers/isdn/hysdn/hysdn_init.c
+++ b/drivers/isdn/hysdn/hysdn_init.c
@@ -34,7 +34,7 @@ MODULE_AUTHOR("Werner Cornelius");
34MODULE_LICENSE("GPL"); 34MODULE_LICENSE("GPL");
35 35
36static char *hysdn_init_revision = "$Revision: 1.6.6.6 $"; 36static char *hysdn_init_revision = "$Revision: 1.6.6.6 $";
37int cardmax; /* number of found cards */ 37static int cardmax; /* number of found cards */
38hysdn_card *card_root = NULL; /* pointer to first card */ 38hysdn_card *card_root = NULL; /* pointer to first card */
39 39
40/**********************************************/ 40/**********************************************/
diff --git a/drivers/isdn/hysdn/hysdn_proclog.c b/drivers/isdn/hysdn/hysdn_proclog.c
index 8ef2b7c952a6..4d57011c5737 100644
--- a/drivers/isdn/hysdn/hysdn_proclog.c
+++ b/drivers/isdn/hysdn/hysdn_proclog.c
@@ -22,6 +22,8 @@
22/* the proc subdir for the interface is defined in the procconf module */ 22/* the proc subdir for the interface is defined in the procconf module */
23extern struct proc_dir_entry *hysdn_proc_entry; 23extern struct proc_dir_entry *hysdn_proc_entry;
24 24
25static void put_log_buffer(hysdn_card * card, char *cp);
26
25/*************************************************/ 27/*************************************************/
26/* structure keeping ascii log for device output */ 28/* structure keeping ascii log for device output */
27/*************************************************/ 29/*************************************************/
@@ -93,7 +95,7 @@ hysdn_addlog(hysdn_card * card, char *fmt,...)
93/* opened for read got the contents. */ 95/* opened for read got the contents. */
94/* Flushes buffers not longer in use. */ 96/* Flushes buffers not longer in use. */
95/********************************************/ 97/********************************************/
96void 98static void
97put_log_buffer(hysdn_card * card, char *cp) 99put_log_buffer(hysdn_card * card, char *cp)
98{ 100{
99 struct log_data *ib; 101 struct log_data *ib;
diff --git a/drivers/md/md.c b/drivers/md/md.c
index 3802f7a17f16..4a0c57db2b67 100644
--- a/drivers/md/md.c
+++ b/drivers/md/md.c
@@ -338,6 +338,7 @@ static int super_written(struct bio *bio, unsigned int bytes_done, int error)
338 338
339 if (atomic_dec_and_test(&rdev->mddev->pending_writes)) 339 if (atomic_dec_and_test(&rdev->mddev->pending_writes))
340 wake_up(&rdev->mddev->sb_wait); 340 wake_up(&rdev->mddev->sb_wait);
341 bio_put(bio);
341 return 0; 342 return 0;
342} 343}
343 344
diff --git a/drivers/media/video/Makefile b/drivers/media/video/Makefile
index 2dc906fdfa55..810e7aac0a53 100644
--- a/drivers/media/video/Makefile
+++ b/drivers/media/video/Makefile
@@ -7,8 +7,7 @@ bttv-objs := bttv-driver.o bttv-cards.o bttv-if.o \
7zoran-objs := zr36120.o zr36120_i2c.o zr36120_mem.o 7zoran-objs := zr36120.o zr36120_i2c.o zr36120_mem.o
8zr36067-objs := zoran_procfs.o zoran_device.o \ 8zr36067-objs := zoran_procfs.o zoran_device.o \
9 zoran_driver.o zoran_card.o 9 zoran_driver.o zoran_card.o
10tuner-objs := tuner-core.o tuner-simple.o mt20xx.o tda8290.o 10tuner-objs := tuner-core.o tuner-simple.o mt20xx.o tda8290.o tea5767.o
11
12obj-$(CONFIG_VIDEO_DEV) += videodev.o v4l2-common.o v4l1-compat.o 11obj-$(CONFIG_VIDEO_DEV) += videodev.o v4l2-common.o v4l1-compat.o
13 12
14obj-$(CONFIG_VIDEO_BT848) += bttv.o msp3400.o tvaudio.o \ 13obj-$(CONFIG_VIDEO_BT848) += bttv.o msp3400.o tvaudio.o \
diff --git a/drivers/media/video/bttv-driver.c b/drivers/media/video/bttv-driver.c
index 290289a99757..7d62b394c509 100644
--- a/drivers/media/video/bttv-driver.c
+++ b/drivers/media/video/bttv-driver.c
@@ -1,5 +1,5 @@
1/* 1/*
2 $Id: bttv-driver.c,v 1.38 2005/06/10 17:20:24 mchehab Exp $ 2 $Id: bttv-driver.c,v 1.40 2005/06/16 21:38:45 nsh Exp $
3 3
4 bttv - Bt848 frame grabber driver 4 bttv - Bt848 frame grabber driver
5 5
@@ -76,6 +76,9 @@ static unsigned int whitecrush_upper = 0xCF;
76static unsigned int whitecrush_lower = 0x7F; 76static unsigned int whitecrush_lower = 0x7F;
77static unsigned int vcr_hack = 0; 77static unsigned int vcr_hack = 0;
78static unsigned int irq_iswitch = 0; 78static unsigned int irq_iswitch = 0;
79static unsigned int uv_ratio = 50;
80static unsigned int full_luma_range = 0;
81static unsigned int coring = 0;
79 82
80/* API features (turn on/off stuff for testing) */ 83/* API features (turn on/off stuff for testing) */
81static unsigned int v4l2 = 1; 84static unsigned int v4l2 = 1;
@@ -106,6 +109,9 @@ module_param(adc_crush, int, 0444);
106module_param(whitecrush_upper, int, 0444); 109module_param(whitecrush_upper, int, 0444);
107module_param(whitecrush_lower, int, 0444); 110module_param(whitecrush_lower, int, 0444);
108module_param(vcr_hack, int, 0444); 111module_param(vcr_hack, int, 0444);
112module_param(uv_ratio, int, 0444);
113module_param(full_luma_range, int, 0444);
114module_param(coring, int, 0444);
109 115
110module_param_array(radio, int, NULL, 0444); 116module_param_array(radio, int, NULL, 0444);
111 117
@@ -124,6 +130,9 @@ MODULE_PARM_DESC(whitecrush_upper,"sets the white crush upper value, default is
124MODULE_PARM_DESC(whitecrush_lower,"sets the white crush lower value, default is 127"); 130MODULE_PARM_DESC(whitecrush_lower,"sets the white crush lower value, default is 127");
125MODULE_PARM_DESC(vcr_hack,"enables the VCR hack (improves synch on poor VCR tapes), default is 0 (no)"); 131MODULE_PARM_DESC(vcr_hack,"enables the VCR hack (improves synch on poor VCR tapes), default is 0 (no)");
126MODULE_PARM_DESC(irq_iswitch,"switch inputs in irq handler"); 132MODULE_PARM_DESC(irq_iswitch,"switch inputs in irq handler");
133MODULE_PARM_DESC(uv_ratio,"ratio between u and v gains, default is 50");
134MODULE_PARM_DESC(full_luma_range,"use the full luma range, default is 0 (no)");
135MODULE_PARM_DESC(coring,"set the luma coring level, default is 0 (no)");
127 136
128MODULE_DESCRIPTION("bttv - v4l/v4l2 driver module for bt848/878 based cards"); 137MODULE_DESCRIPTION("bttv - v4l/v4l2 driver module for bt848/878 based cards");
129MODULE_AUTHOR("Ralph Metzler & Marcus Metzler & Gerd Knorr"); 138MODULE_AUTHOR("Ralph Metzler & Marcus Metzler & Gerd Knorr");
@@ -484,7 +493,10 @@ static const unsigned int BTTV_FORMATS = ARRAY_SIZE(bttv_formats);
484#define V4L2_CID_PRIVATE_VCR_HACK (V4L2_CID_PRIVATE_BASE + 5) 493#define V4L2_CID_PRIVATE_VCR_HACK (V4L2_CID_PRIVATE_BASE + 5)
485#define V4L2_CID_PRIVATE_WHITECRUSH_UPPER (V4L2_CID_PRIVATE_BASE + 6) 494#define V4L2_CID_PRIVATE_WHITECRUSH_UPPER (V4L2_CID_PRIVATE_BASE + 6)
486#define V4L2_CID_PRIVATE_WHITECRUSH_LOWER (V4L2_CID_PRIVATE_BASE + 7) 495#define V4L2_CID_PRIVATE_WHITECRUSH_LOWER (V4L2_CID_PRIVATE_BASE + 7)
487#define V4L2_CID_PRIVATE_LASTP1 (V4L2_CID_PRIVATE_BASE + 8) 496#define V4L2_CID_PRIVATE_UV_RATIO (V4L2_CID_PRIVATE_BASE + 8)
497#define V4L2_CID_PRIVATE_FULL_LUMA_RANGE (V4L2_CID_PRIVATE_BASE + 9)
498#define V4L2_CID_PRIVATE_CORING (V4L2_CID_PRIVATE_BASE + 10)
499#define V4L2_CID_PRIVATE_LASTP1 (V4L2_CID_PRIVATE_BASE + 11)
488 500
489static const struct v4l2_queryctrl no_ctl = { 501static const struct v4l2_queryctrl no_ctl = {
490 .name = "42", 502 .name = "42",
@@ -618,8 +630,32 @@ static const struct v4l2_queryctrl bttv_ctls[] = {
618 .step = 1, 630 .step = 1,
619 .default_value = 0x7F, 631 .default_value = 0x7F,
620 .type = V4L2_CTRL_TYPE_INTEGER, 632 .type = V4L2_CTRL_TYPE_INTEGER,
633 },{
634 .id = V4L2_CID_PRIVATE_UV_RATIO,
635 .name = "uv ratio",
636 .minimum = 0,
637 .maximum = 100,
638 .step = 1,
639 .default_value = 50,
640 .type = V4L2_CTRL_TYPE_INTEGER,
641 },{
642 .id = V4L2_CID_PRIVATE_FULL_LUMA_RANGE,
643 .name = "full luma range",
644 .minimum = 0,
645 .maximum = 1,
646 .type = V4L2_CTRL_TYPE_BOOLEAN,
647 },{
648 .id = V4L2_CID_PRIVATE_CORING,
649 .name = "coring",
650 .minimum = 0,
651 .maximum = 3,
652 .step = 1,
653 .default_value = 0,
654 .type = V4L2_CTRL_TYPE_INTEGER,
621 } 655 }
622 656
657
658
623}; 659};
624static const int BTTV_CTLS = ARRAY_SIZE(bttv_ctls); 660static const int BTTV_CTLS = ARRAY_SIZE(bttv_ctls);
625 661
@@ -833,8 +869,8 @@ static void bt848_sat(struct bttv *btv, int color)
833 btv->saturation = color; 869 btv->saturation = color;
834 870
835 /* 0-511 for the color */ 871 /* 0-511 for the color */
836 val_u = color >> 7; 872 val_u = ((color * btv->opt_uv_ratio) / 50) >> 7;
837 val_v = ((color>>7)*180L)/254; 873 val_v = (((color * (100 - btv->opt_uv_ratio) / 50) >>7)*180L)/254;
838 hibits = (val_u >> 7) & 2; 874 hibits = (val_u >> 7) & 2;
839 hibits |= (val_v >> 8) & 1; 875 hibits |= (val_v >> 8) & 1;
840 btwrite(val_u & 0xff, BT848_SAT_U_LO); 876 btwrite(val_u & 0xff, BT848_SAT_U_LO);
@@ -1151,6 +1187,15 @@ static int get_control(struct bttv *btv, struct v4l2_control *c)
1151 case V4L2_CID_PRIVATE_WHITECRUSH_LOWER: 1187 case V4L2_CID_PRIVATE_WHITECRUSH_LOWER:
1152 c->value = btv->opt_whitecrush_lower; 1188 c->value = btv->opt_whitecrush_lower;
1153 break; 1189 break;
1190 case V4L2_CID_PRIVATE_UV_RATIO:
1191 c->value = btv->opt_uv_ratio;
1192 break;
1193 case V4L2_CID_PRIVATE_FULL_LUMA_RANGE:
1194 c->value = btv->opt_full_luma_range;
1195 break;
1196 case V4L2_CID_PRIVATE_CORING:
1197 c->value = btv->opt_coring;
1198 break;
1154 default: 1199 default:
1155 return -EINVAL; 1200 return -EINVAL;
1156 } 1201 }
@@ -1247,6 +1292,18 @@ static int set_control(struct bttv *btv, struct v4l2_control *c)
1247 btv->opt_whitecrush_lower = c->value; 1292 btv->opt_whitecrush_lower = c->value;
1248 btwrite(c->value, BT848_WC_DOWN); 1293 btwrite(c->value, BT848_WC_DOWN);
1249 break; 1294 break;
1295 case V4L2_CID_PRIVATE_UV_RATIO:
1296 btv->opt_uv_ratio = c->value;
1297 bt848_sat(btv, btv->saturation);
1298 break;
1299 case V4L2_CID_PRIVATE_FULL_LUMA_RANGE:
1300 btv->opt_full_luma_range = c->value;
1301 btaor((c->value<<7), ~BT848_OFORM_RANGE, BT848_OFORM);
1302 break;
1303 case V4L2_CID_PRIVATE_CORING:
1304 btv->opt_coring = c->value;
1305 btaor((c->value<<5), ~BT848_OFORM_CORE32, BT848_OFORM);
1306 break;
1250 default: 1307 default:
1251 return -EINVAL; 1308 return -EINVAL;
1252 } 1309 }
@@ -3117,11 +3174,6 @@ static int radio_do_ioctl(struct inode *inode, struct file *file,
3117 return -EINVAL; 3174 return -EINVAL;
3118 memset(v,0,sizeof(*v)); 3175 memset(v,0,sizeof(*v));
3119 strcpy(v->name, "Radio"); 3176 strcpy(v->name, "Radio");
3120 /* japan: 76.0 MHz - 89.9 MHz
3121 western europe: 87.5 MHz - 108.0 MHz
3122 russia: 65.0 MHz - 108.0 MHz */
3123 v->rangelow=(int)(65*16);
3124 v->rangehigh=(int)(108*16);
3125 bttv_call_i2c_clients(btv,cmd,v); 3177 bttv_call_i2c_clients(btv,cmd,v);
3126 return 0; 3178 return 0;
3127 } 3179 }
@@ -3876,6 +3928,9 @@ static int __devinit bttv_probe(struct pci_dev *dev,
3876 btv->opt_vcr_hack = vcr_hack; 3928 btv->opt_vcr_hack = vcr_hack;
3877 btv->opt_whitecrush_upper = whitecrush_upper; 3929 btv->opt_whitecrush_upper = whitecrush_upper;
3878 btv->opt_whitecrush_lower = whitecrush_lower; 3930 btv->opt_whitecrush_lower = whitecrush_lower;
3931 btv->opt_uv_ratio = uv_ratio;
3932 btv->opt_full_luma_range = full_luma_range;
3933 btv->opt_coring = coring;
3879 3934
3880 /* fill struct bttv with some useful defaults */ 3935 /* fill struct bttv with some useful defaults */
3881 btv->init.btv = btv; 3936 btv->init.btv = btv;
diff --git a/drivers/media/video/bttvp.h b/drivers/media/video/bttvp.h
index 7b6f1e856028..f3293e4a15ad 100644
--- a/drivers/media/video/bttvp.h
+++ b/drivers/media/video/bttvp.h
@@ -1,5 +1,5 @@
1/* 1/*
2 $Id: bttvp.h,v 1.17 2005/02/16 12:14:10 kraxel Exp $ 2 $Id: bttvp.h,v 1.19 2005/06/16 21:38:45 nsh Exp $
3 3
4 bttv - Bt848 frame grabber driver 4 bttv - Bt848 frame grabber driver
5 5
@@ -326,6 +326,9 @@ struct bttv {
326 int opt_vcr_hack; 326 int opt_vcr_hack;
327 int opt_whitecrush_upper; 327 int opt_whitecrush_upper;
328 int opt_whitecrush_lower; 328 int opt_whitecrush_lower;
329 int opt_uv_ratio;
330 int opt_full_luma_range;
331 int opt_coring;
329 332
330 /* radio data/state */ 333 /* radio data/state */
331 int has_radio; 334 int has_radio;
diff --git a/drivers/media/video/mt20xx.c b/drivers/media/video/mt20xx.c
index 95ad17b7f38e..9c005cb128d7 100644
--- a/drivers/media/video/mt20xx.c
+++ b/drivers/media/video/mt20xx.c
@@ -1,5 +1,5 @@
1/* 1/*
2 * $Id: mt20xx.c,v 1.4 2005/03/04 09:24:56 kraxel Exp $ 2 * $Id: mt20xx.c,v 1.5 2005/06/16 08:29:49 nsh Exp $
3 * 3 *
4 * i2c tv tuner chip device driver 4 * i2c tv tuner chip device driver
5 * controls microtune tuners, mt2032 + mt2050 at the moment. 5 * controls microtune tuners, mt2032 + mt2050 at the moment.
@@ -295,8 +295,8 @@ static void mt2032_set_radio_freq(struct i2c_client *c, unsigned int freq)
295 int if2 = t->radio_if2; 295 int if2 = t->radio_if2;
296 296
297 // per Manual for FM tuning: first if center freq. 1085 MHz 297 // per Manual for FM tuning: first if center freq. 1085 MHz
298 mt2032_set_if_freq(c, freq*62500 /* freq*1000*1000/16 */, 298 mt2032_set_if_freq(c, freq * 1000 / 16,
299 1085*1000*1000,if2,if2,if2); 299 1085*1000*1000,if2,if2,if2);
300} 300}
301 301
302// Initalization as described in "MT203x Programming Procedures", Rev 1.2, Feb.2001 302// Initalization as described in "MT203x Programming Procedures", Rev 1.2, Feb.2001
diff --git a/drivers/media/video/tda8290.c b/drivers/media/video/tda8290.c
index b27cc348d95c..f59d4601cc63 100644
--- a/drivers/media/video/tda8290.c
+++ b/drivers/media/video/tda8290.c
@@ -1,5 +1,5 @@
1/* 1/*
2 * $Id: tda8290.c,v 1.7 2005/03/07 12:01:51 kraxel Exp $ 2 * $Id: tda8290.c,v 1.11 2005/06/18 06:09:06 nsh Exp $
3 * 3 *
4 * i2c tv tuner chip device driver 4 * i2c tv tuner chip device driver
5 * controls the philips tda8290+75 tuner chip combo. 5 * controls the philips tda8290+75 tuner chip combo.
@@ -69,7 +69,7 @@ static __u8 get_freq_entry( struct freq_entry* table, __u16 freq)
69static unsigned char i2c_enable_bridge[2] = { 0x21, 0xC0 }; 69static unsigned char i2c_enable_bridge[2] = { 0x21, 0xC0 };
70static unsigned char i2c_disable_bridge[2] = { 0x21, 0x80 }; 70static unsigned char i2c_disable_bridge[2] = { 0x21, 0x80 };
71static unsigned char i2c_init_tda8275[14] = { 0x00, 0x00, 0x00, 0x00, 71static unsigned char i2c_init_tda8275[14] = { 0x00, 0x00, 0x00, 0x00,
72 0x7C, 0x04, 0xA3, 0x3F, 72 0xfC, 0x04, 0xA3, 0x3F,
73 0x2A, 0x04, 0xFF, 0x00, 73 0x2A, 0x04, 0xFF, 0x00,
74 0x00, 0x40 }; 74 0x00, 0x40 };
75static unsigned char i2c_set_VS[2] = { 0x30, 0x6F }; 75static unsigned char i2c_set_VS[2] = { 0x30, 0x6F };
@@ -138,16 +138,24 @@ static int tda8290_tune(struct i2c_client *c)
138 138
139static void set_frequency(struct tuner *t, u16 ifc) 139static void set_frequency(struct tuner *t, u16 ifc)
140{ 140{
141 u32 N = (((t->freq<<3)+ifc)&0x3fffc); 141 u32 freq;
142 u32 N;
142 143
143 N = N >> get_freq_entry(div_table, t->freq); 144 if (t->mode == V4L2_TUNER_RADIO)
145 freq = t->freq / 1000;
146 else
147 freq = t->freq;
148
149 N = (((freq<<3)+ifc)&0x3fffc);
150
151 N = N >> get_freq_entry(div_table, freq);
144 t->i2c_set_freq[0] = 0; 152 t->i2c_set_freq[0] = 0;
145 t->i2c_set_freq[1] = (unsigned char)(N>>8); 153 t->i2c_set_freq[1] = (unsigned char)(N>>8);
146 t->i2c_set_freq[2] = (unsigned char) N; 154 t->i2c_set_freq[2] = (unsigned char) N;
147 t->i2c_set_freq[3] = 0x40; 155 t->i2c_set_freq[3] = 0x40;
148 t->i2c_set_freq[4] = 0x52; 156 t->i2c_set_freq[4] = 0x52;
149 t->i2c_set_freq[5] = get_freq_entry(band_table, t->freq); 157 t->i2c_set_freq[5] = get_freq_entry(band_table, freq);
150 t->i2c_set_freq[6] = get_freq_entry(agc_table, t->freq); 158 t->i2c_set_freq[6] = get_freq_entry(agc_table, freq);
151 t->i2c_set_freq[7] = 0x8f; 159 t->i2c_set_freq[7] = 0x8f;
152} 160}
153 161
diff --git a/drivers/media/video/tda9887.c b/drivers/media/video/tda9887.c
index 39773633cc3c..ee35562f4d1a 100644
--- a/drivers/media/video/tda9887.c
+++ b/drivers/media/video/tda9887.c
@@ -368,7 +368,7 @@ static int tda9887_set_tvnorm(struct tda9887 *t, char *buf)
368 if (t->radio_mode == V4L2_TUNER_MODE_MONO) 368 if (t->radio_mode == V4L2_TUNER_MODE_MONO)
369 norm = &radio_mono; 369 norm = &radio_mono;
370 else 370 else
371 norm = &radio_stereo; 371 norm = &radio_stereo;
372 } else { 372 } else {
373 for (i = 0; i < ARRAY_SIZE(tvnorms); i++) { 373 for (i = 0; i < ARRAY_SIZE(tvnorms); i++) {
374 if (tvnorms[i].std & t->std) { 374 if (tvnorms[i].std & t->std) {
@@ -566,7 +566,6 @@ static int tda9887_configure(struct tda9887 *t)
566 if (UNSET != t->pinnacle_id) { 566 if (UNSET != t->pinnacle_id) {
567 tda9887_set_pinnacle(t,buf); 567 tda9887_set_pinnacle(t,buf);
568 } 568 }
569
570 tda9887_set_config(t,buf); 569 tda9887_set_config(t,buf);
571 tda9887_set_insmod(t,buf); 570 tda9887_set_insmod(t,buf);
572 571
@@ -615,8 +614,8 @@ static int tda9887_attach(struct i2c_adapter *adap, int addr, int kind)
615 t->pinnacle_id = UNSET; 614 t->pinnacle_id = UNSET;
616 t->radio_mode = V4L2_TUNER_MODE_STEREO; 615 t->radio_mode = V4L2_TUNER_MODE_STEREO;
617 616
618 i2c_set_clientdata(&t->client, t); 617 i2c_set_clientdata(&t->client, t);
619 i2c_attach_client(&t->client); 618 i2c_attach_client(&t->client);
620 619
621 return 0; 620 return 0;
622} 621}
diff --git a/drivers/media/video/tea5767.c b/drivers/media/video/tea5767.c
new file mode 100644
index 000000000000..a29f08f81f63
--- /dev/null
+++ b/drivers/media/video/tea5767.c
@@ -0,0 +1,334 @@
1/*
2 * For Philips TEA5767 FM Chip used on some TV Cards like Prolink Pixelview
3 * I2C address is allways 0xC0.
4 *
5 * $Id: tea5767.c,v 1.11 2005/06/21 15:40:33 mchehab Exp $
6 *
7 * Copyright (c) 2005 Mauro Carvalho Chehab (mchehab@brturbo.com.br)
8 * This code is placed under the terms of the GNU General Public License
9 *
10 * tea5767 autodetection thanks to Torsten Seeboth and Atsushi Nakagawa
11 * from their contributions on DScaler.
12 */
13
14#include <linux/module.h>
15#include <linux/init.h>
16#include <linux/kernel.h>
17#include <linux/sched.h>
18#include <linux/string.h>
19#include <linux/timer.h>
20#include <linux/delay.h>
21#include <linux/errno.h>
22#include <linux/slab.h>
23#include <linux/videodev.h>
24#include <linux/i2c.h>
25#include <linux/i2c-algo-bit.h>
26
27#include <media/tuner.h>
28
29/* Declared at tuner-core.c */
30extern unsigned int tuner_debug;
31
32#define PREFIX "TEA5767 "
33
34/*****************************************************************************/
35
36/******************************
37 * Write mode register values *
38 ******************************/
39
40/* First register */
41#define TEA5767_MUTE 0x80 /* Mutes output */
42#define TEA5767_SEARCH 0x40 /* Activates station search */
43/* Bits 0-5 for divider MSB */
44
45/* Second register */
46/* Bits 0-7 for divider LSB */
47
48/* Third register */
49
50/* Station search from botton to up */
51#define TEA5767_SEARCH_UP 0x80
52
53/* Searches with ADC output = 10 */
54#define TEA5767_SRCH_HIGH_LVL 0x60
55
56/* Searches with ADC output = 10 */
57#define TEA5767_SRCH_MID_LVL 0x40
58
59/* Searches with ADC output = 5 */
60#define TEA5767_SRCH_LOW_LVL 0x20
61
62/* if on, div=4*(Frf+Fif)/Fref otherwise, div=4*(Frf-Fif)/Freq) */
63#define TEA5767_HIGH_LO_INJECT 0x10
64
65/* Disable stereo */
66#define TEA5767_MONO 0x08
67
68/* Disable right channel and turns to mono */
69#define TEA5767_MUTE_RIGHT 0x04
70
71/* Disable left channel and turns to mono */
72#define TEA5767_MUTE_LEFT 0x02
73
74#define TEA5767_PORT1_HIGH 0x01
75
76/* Forth register */
77#define TEA5767_PORT2_HIGH 0x80
78/* Chips stops working. Only I2C bus remains on */
79#define TEA5767_STDBY 0x40
80
81/* Japan freq (76-108 MHz. If disabled, 87.5-108 MHz */
82#define TEA5767_JAPAN_BAND 0x20
83
84/* Unselected means 32.768 KHz freq as reference. Otherwise Xtal at 13 MHz */
85#define TEA5767_XTAL_32768 0x10
86
87/* Cuts weak signals */
88#define TEA5767_SOFT_MUTE 0x08
89
90/* Activates high cut control */
91#define TEA5767_HIGH_CUT_CTRL 0x04
92
93/* Activates stereo noise control */
94#define TEA5767_ST_NOISE_CTL 0x02
95
96/* If activate PORT 1 indicates SEARCH or else it is used as PORT1 */
97#define TEA5767_SRCH_IND 0x01
98
99/* Fiveth register */
100
101/* By activating, it will use Xtal at 13 MHz as reference for divider */
102#define TEA5767_PLLREF_ENABLE 0x80
103
104/* By activating, deemphasis=50, or else, deemphasis of 50us */
105#define TEA5767_DEEMPH_75 0X40
106
107/*****************************
108 * Read mode register values *
109 *****************************/
110
111/* First register */
112#define TEA5767_READY_FLAG_MASK 0x80
113#define TEA5767_BAND_LIMIT_MASK 0X40
114/* Bits 0-5 for divider MSB after search or preset */
115
116/* Second register */
117/* Bits 0-7 for divider LSB after search or preset */
118
119/* Third register */
120#define TEA5767_STEREO_MASK 0x80
121#define TEA5767_IF_CNTR_MASK 0x7f
122
123/* Four register */
124#define TEA5767_ADC_LEVEL_MASK 0xf0
125
126/* should be 0 */
127#define TEA5767_CHIP_ID_MASK 0x0f
128
129/* Fiveth register */
130/* Reserved for future extensions */
131#define TEA5767_RESERVED_MASK 0xff
132
133/*****************************************************************************/
134
135static void set_tv_freq(struct i2c_client *c, unsigned int freq)
136{
137 struct tuner *t = i2c_get_clientdata(c);
138
139 tuner_warn("This tuner doesn't support TV freq.\n");
140}
141
142static void tea5767_status_dump(unsigned char *buffer)
143{
144 unsigned int div, frq;
145
146 if (TEA5767_READY_FLAG_MASK & buffer[0])
147 printk(PREFIX "Ready Flag ON\n");
148 else
149 printk(PREFIX "Ready Flag OFF\n");
150
151 if (TEA5767_BAND_LIMIT_MASK & buffer[0])
152 printk(PREFIX "Tuner at band limit\n");
153 else
154 printk(PREFIX "Tuner not at band limit\n");
155
156 div=((buffer[0]&0x3f)<<8) | buffer[1];
157
158 switch (TEA5767_HIGH_LO_32768) {
159 case TEA5767_HIGH_LO_13MHz:
160 frq = 1000*(div*50-700-225)/4; /* Freq in KHz */
161 break;
162 case TEA5767_LOW_LO_13MHz:
163 frq = 1000*(div*50+700+225)/4; /* Freq in KHz */
164 break;
165 case TEA5767_LOW_LO_32768:
166 frq = 1000*(div*32768/1000+700+225)/4; /* Freq in KHz */
167 break;
168 case TEA5767_HIGH_LO_32768:
169 default:
170 frq = 1000*(div*32768/1000-700-225)/4; /* Freq in KHz */
171 break;
172 }
173 buffer[0] = (div>>8) & 0x3f;
174 buffer[1] = div & 0xff;
175
176 printk(PREFIX "Frequency %d.%03d KHz (divider = 0x%04x)\n",
177 frq/1000,frq%1000,div);
178
179 if (TEA5767_STEREO_MASK & buffer[2])
180 printk(PREFIX "Stereo\n");
181 else
182 printk(PREFIX "Mono\n");
183
184 printk(PREFIX "IF Counter = %d\n",buffer[2] & TEA5767_IF_CNTR_MASK);
185
186 printk(PREFIX "ADC Level = %d\n",(buffer[3] & TEA5767_ADC_LEVEL_MASK)>>4);
187
188 printk(PREFIX "Chip ID = %d\n",(buffer[3] & TEA5767_CHIP_ID_MASK));
189
190 printk(PREFIX "Reserved = 0x%02x\n",(buffer[4] & TEA5767_RESERVED_MASK));
191}
192
193/* Freq should be specifyed at 62.5 Hz */
194static void set_radio_freq(struct i2c_client *c, unsigned int frq)
195{
196 struct tuner *t = i2c_get_clientdata(c);
197 unsigned char buffer[5];
198 unsigned div;
199 int rc;
200
201 if ( tuner_debug )
202 printk(PREFIX "radio freq counter %d\n",frq);
203
204 /* Rounds freq to next decimal value - for 62.5 KHz step */
205 /* frq = 20*(frq/16)+radio_frq[frq%16]; */
206
207 buffer[2] = TEA5767_PORT1_HIGH;
208 buffer[3] = TEA5767_PORT2_HIGH | TEA5767_HIGH_CUT_CTRL | TEA5767_ST_NOISE_CTL | TEA5767_JAPAN_BAND;
209 buffer[4]=0;
210
211 if (t->audmode == V4L2_TUNER_MODE_MONO) {
212 tuner_dbg("TEA5767 set to mono\n");
213 buffer[2] |= TEA5767_MONO;
214 } else
215 tuner_dbg("TEA5767 set to stereo\n");
216
217 switch (t->type) {
218 case TEA5767_HIGH_LO_13MHz:
219 tuner_dbg("TEA5767 radio HIGH LO inject xtal @ 13 MHz\n");
220 buffer[2] |= TEA5767_HIGH_LO_INJECT;
221 buffer[4] |= TEA5767_PLLREF_ENABLE;
222 div = (frq*4/16+700+225+25)/50;
223 break;
224 case TEA5767_LOW_LO_13MHz:
225 tuner_dbg("TEA5767 radio LOW LO inject xtal @ 13 MHz\n");
226
227 buffer[4] |= TEA5767_PLLREF_ENABLE;
228 div = (frq*4/16-700-225+25)/50;
229 break;
230 case TEA5767_LOW_LO_32768:
231 tuner_dbg("TEA5767 radio LOW LO inject xtal @ 32,768 MHz\n");
232 buffer[3] |= TEA5767_XTAL_32768;
233 /* const 700=4000*175 Khz - to adjust freq to right value */
234 div = (1000*(frq*4/16-700-225)+16384)>>15;
235 break;
236 case TEA5767_HIGH_LO_32768:
237 default:
238 tuner_dbg("TEA5767 radio HIGH LO inject xtal @ 32,768 MHz\n");
239
240 buffer[2] |= TEA5767_HIGH_LO_INJECT;
241 buffer[3] |= TEA5767_XTAL_32768;
242 div = (1000*(frq*4/16+700+225)+16384)>>15;
243 break;
244 }
245 buffer[0] = (div>>8) & 0x3f;
246 buffer[1] = div & 0xff;
247
248 if ( tuner_debug )
249 tea5767_status_dump(buffer);
250
251 if (5 != (rc = i2c_master_send(c,buffer,5)))
252 tuner_warn("i2c i/o error: rc == %d (should be 5)\n",rc);
253}
254
255static int tea5767_signal(struct i2c_client *c)
256{
257 unsigned char buffer[5];
258 int rc;
259 struct tuner *t = i2c_get_clientdata(c);
260
261 memset(buffer,0,sizeof(buffer));
262 if (5 != (rc = i2c_master_recv(c,buffer,5)))
263 tuner_warn ( "i2c i/o error: rc == %d (should be 5)\n",rc);
264
265 return ((buffer[3] & TEA5767_ADC_LEVEL_MASK) <<(13-4));
266}
267
268static int tea5767_stereo(struct i2c_client *c)
269{
270 unsigned char buffer[5];
271 int rc;
272 struct tuner *t = i2c_get_clientdata(c);
273
274 memset(buffer,0,sizeof(buffer));
275 if (5 != (rc = i2c_master_recv(c,buffer,5)))
276 tuner_warn ( "i2c i/o error: rc == %d (should be 5)\n",rc);
277
278 rc = buffer[2] & TEA5767_STEREO_MASK;
279
280 if ( tuner_debug )
281 tuner_dbg("TEA5767 radio ST GET = %02x\n", rc);
282
283 return ( (buffer[2] & TEA5767_STEREO_MASK) ? V4L2_TUNER_SUB_STEREO: 0);
284}
285
286int tea_detection(struct i2c_client *c)
287{
288 unsigned char buffer[5]= { 0xff, 0xff, 0xff, 0xff, 0xff };
289 int rc;
290 struct tuner *t = i2c_get_clientdata(c);
291
292 if (5 != (rc = i2c_master_recv(c,buffer,5))) {
293 tuner_warn ( "it is not a TEA5767. Received %i chars.\n",rc );
294 return EINVAL;
295 }
296
297 /* If all bytes are the same then it's a TV tuner and not a tea5767 chip. */
298 if (buffer[0] == buffer[1] && buffer[0] == buffer[2] &&
299 buffer[0] == buffer[3] && buffer[0] == buffer[4]) {
300 tuner_warn ( "All bytes are equal. It is not a TEA5767\n" );
301 return EINVAL;
302 }
303
304 /* Status bytes:
305 * Byte 4: bit 3:1 : CI (Chip Identification) == 0
306 * bit 0 : internally set to 0
307 * Byte 5: bit 7:0 : == 0
308 */
309
310 if (!((buffer[3] & 0x0f) == 0x00) && (buffer[4] == 0x00)) {
311 tuner_warn ( "Chip ID is not zero. It is not a TEA5767\n" );
312 return EINVAL;
313 }
314 tuner_warn ( "TEA5767 detected.\n" );
315 return 0;
316}
317
318int tea5767_tuner_init(struct i2c_client *c)
319{
320 struct tuner *t = i2c_get_clientdata(c);
321
322 if (tea_detection(c)==EINVAL) return EINVAL;
323
324 tuner_info("type set to %d (%s)\n",
325 t->type, TEA5767_TUNER_NAME);
326 strlcpy(c->name, TEA5767_TUNER_NAME, sizeof(c->name));
327
328 t->tv_freq = set_tv_freq;
329 t->radio_freq = set_radio_freq;
330 t->has_signal = tea5767_signal;
331 t->is_stereo = tea5767_stereo;
332
333 return (0);
334}
diff --git a/drivers/media/video/tuner-core.c b/drivers/media/video/tuner-core.c
index eaabfc858703..6f6bf4a633fc 100644
--- a/drivers/media/video/tuner-core.c
+++ b/drivers/media/video/tuner-core.c
@@ -1,5 +1,5 @@
1/* 1/*
2 * $Id: tuner-core.c,v 1.15 2005/06/12 01:36:14 mchehab Exp $ 2 * $Id: tuner-core.c,v 1.29 2005/06/21 15:40:33 mchehab Exp $
3 * 3 *
4 * i2c tv tuner chip device driver 4 * i2c tv tuner chip device driver
5 * core core, i.e. kernel interfaces, registering and so on 5 * core core, i.e. kernel interfaces, registering and so on
@@ -26,7 +26,6 @@
26/* 26/*
27 * comment line bellow to return to old behavor, where only one I2C device is supported 27 * comment line bellow to return to old behavor, where only one I2C device is supported
28 */ 28 */
29#define CONFIG_TUNER_MULTI_I2C /**/
30 29
31#define UNSET (-1U) 30#define UNSET (-1U)
32 31
@@ -58,9 +57,7 @@ MODULE_AUTHOR("Ralph Metzler, Gerd Knorr, Gunther Mayer");
58MODULE_LICENSE("GPL"); 57MODULE_LICENSE("GPL");
59 58
60static int this_adap; 59static int this_adap;
61#ifdef CONFIG_TUNER_MULTI_I2C
62static unsigned short first_tuner, tv_tuner, radio_tuner; 60static unsigned short first_tuner, tv_tuner, radio_tuner;
63#endif
64 61
65static struct i2c_driver driver; 62static struct i2c_driver driver;
66static struct i2c_client client_template; 63static struct i2c_client client_template;
@@ -81,26 +78,9 @@ static void set_tv_freq(struct i2c_client *c, unsigned int freq)
81 return; 78 return;
82 } 79 }
83 if (freq < tv_range[0]*16 || freq > tv_range[1]*16) { 80 if (freq < tv_range[0]*16 || freq > tv_range[1]*16) {
84
85 if (freq >= tv_range[0]*16364 && freq <= tv_range[1]*16384) {
86 /* V4L2_TUNER_CAP_LOW frequency */
87
88 tuner_dbg("V4L2_TUNER_CAP_LOW freq selected for TV. Tuners yet doesn't support converting it to valid freq.\n");
89
90 t->tv_freq(c,freq>>10);
91
92 return;
93 } else {
94 /* FIXME: better do that chip-specific, but
95 right now we don't have that in the config
96 struct and this way is still better than no
97 check at all */
98 tuner_info("TV freq (%d.%02d) out of range (%d-%d)\n", 81 tuner_info("TV freq (%d.%02d) out of range (%d-%d)\n",
99 freq/16,freq%16*100/16,tv_range[0],tv_range[1]); 82 freq/16,freq%16*100/16,tv_range[0],tv_range[1]);
100 return;
101 }
102 } 83 }
103 tuner_dbg("62.5 Khz freq step selected for TV.\n");
104 t->tv_freq(c,freq); 84 t->tv_freq(c,freq);
105} 85}
106 86
@@ -116,31 +96,18 @@ static void set_radio_freq(struct i2c_client *c, unsigned int freq)
116 tuner_info("no radio tuning for this one, sorry.\n"); 96 tuner_info("no radio tuning for this one, sorry.\n");
117 return; 97 return;
118 } 98 }
119 if (freq < radio_range[0]*16 || freq > radio_range[1]*16) { 99 if (freq >= radio_range[0]*16000 && freq <= radio_range[1]*16000) {
120 if (freq >= tv_range[0]*16364 && freq <= tv_range[1]*16384) { 100 if (tuner_debug)
121 /* V4L2_TUNER_CAP_LOW frequency */ 101 tuner_info("radio freq step 62.5Hz (%d.%06d)\n",
122 if (t->type == TUNER_TEA5767) { 102 freq/16000,freq%16000*1000/16);
123 tuner_info("radio freq step 62.5Hz (%d.%06d)\n",(freq>>14),freq%(1<<14)*10000); 103 t->radio_freq(c,freq);
124 t->radio_freq(c,freq>>10); 104 } else {
125 return; 105 tuner_info("radio freq (%d.%02d) out of range (%d-%d)\n",
126 } 106 freq/16,freq%16*100/16,
127 107 radio_range[0],radio_range[1]);
128 tuner_dbg("V4L2_TUNER_CAP_LOW freq selected for Radio. Tuners yet doesn't support converting it to valid freq.\n");
129
130 tuner_info("radio freq (%d.%06d)\n",(freq>>14),freq%(1<<14)*10000);
131
132 t->radio_freq(c,freq>>10);
133 return;
134
135 } else {
136 tuner_info("radio freq (%d.%02d) out of range (%d-%d)\n",
137 freq/16,freq%16*100/16,
138 radio_range[0],radio_range[1]);
139 return;
140 }
141 } 108 }
142 tuner_dbg("62.5 Khz freq step selected for Radio.\n"); 109
143 t->radio_freq(c,freq); 110 return;
144} 111}
145 112
146static void set_freq(struct i2c_client *c, unsigned long freq) 113static void set_freq(struct i2c_client *c, unsigned long freq)
@@ -166,8 +133,8 @@ static void set_freq(struct i2c_client *c, unsigned long freq)
166static void set_type(struct i2c_client *c, unsigned int type) 133static void set_type(struct i2c_client *c, unsigned int type)
167{ 134{
168 struct tuner *t = i2c_get_clientdata(c); 135 struct tuner *t = i2c_get_clientdata(c);
136 unsigned char buffer[4];
169 137
170 tuner_dbg ("I2C addr 0x%02x with type %d\n",c->addr<<1,type);
171 /* sanity check */ 138 /* sanity check */
172 if (type == UNSET || type == TUNER_ABSENT) 139 if (type == UNSET || type == TUNER_ABSENT)
173 return; 140 return;
@@ -179,8 +146,8 @@ static void set_type(struct i2c_client *c, unsigned int type)
179 t->type = type; 146 t->type = type;
180 return; 147 return;
181 } 148 }
182 if (t->initialized) 149 if ((t->initialized) && (t->type == type))
183 /* run only once */ 150 /* run only once except type change Hac 04/05*/
184 return; 151 return;
185 152
186 t->initialized = 1; 153 t->initialized = 1;
@@ -193,25 +160,42 @@ static void set_type(struct i2c_client *c, unsigned int type)
193 case TUNER_PHILIPS_TDA8290: 160 case TUNER_PHILIPS_TDA8290:
194 tda8290_init(c); 161 tda8290_init(c);
195 break; 162 break;
163 case TUNER_TEA5767:
164 if (tea5767_tuner_init(c)==EINVAL) t->type=TUNER_ABSENT;
165 break;
166 case TUNER_PHILIPS_FMD1216ME_MK3:
167 buffer[0] = 0x0b;
168 buffer[1] = 0xdc;
169 buffer[2] = 0x9c;
170 buffer[3] = 0x60;
171 i2c_master_send(c,buffer,4);
172 mdelay(1);
173 buffer[2] = 0x86;
174 buffer[3] = 0x54;
175 i2c_master_send(c,buffer,4);
176 default_tuner_init(c);
177 break;
196 default: 178 default:
179 /* TEA5767 autodetection code */
180 if (tea5767_tuner_init(c)!=EINVAL) {
181 t->type = TUNER_TEA5767;
182 if (first_tuner == 0x60)
183 first_tuner++;
184 break;
185 }
186
197 default_tuner_init(c); 187 default_tuner_init(c);
198 break; 188 break;
199 } 189 }
190 tuner_dbg ("I2C addr 0x%02x with type %d\n",c->addr<<1,type);
200} 191}
201 192
202#ifdef CONFIG_TUNER_MULTI_I2C
203#define CHECK_ADDR(tp,cmd,tun) if (client->addr!=tp) { \ 193#define CHECK_ADDR(tp,cmd,tun) if (client->addr!=tp) { \
204 return 0; } else \ 194 return 0; } else if (tuner_debug) \
205 tuner_info ("Cmd %s accepted to "tun"\n",cmd); 195 tuner_info ("Cmd %s accepted to "tun"\n",cmd);
206#define CHECK_MODE(cmd) if (t->mode == V4L2_TUNER_RADIO) { \ 196#define CHECK_MODE(cmd) if (t->mode == V4L2_TUNER_RADIO) { \
207 CHECK_ADDR(radio_tuner,cmd,"radio") } else \ 197 CHECK_ADDR(radio_tuner,cmd,"radio") } else \
208 { CHECK_ADDR(tv_tuner,cmd,"TV"); } 198 { CHECK_ADDR(tv_tuner,cmd,"TV"); }
209#else
210#define CHECK_ADDR(tp,cmd,tun) tuner_info ("Cmd %s accepted to "tun"\n",cmd);
211#define CHECK_MODE(cmd) tuner_info ("Cmd %s accepted\n",cmd);
212#endif
213
214#ifdef CONFIG_TUNER_MULTI_I2C
215 199
216static void set_addr(struct i2c_client *c, struct tuner_addr *tun_addr) 200static void set_addr(struct i2c_client *c, struct tuner_addr *tun_addr)
217{ 201{
@@ -242,9 +226,6 @@ static void set_addr(struct i2c_client *c, struct tuner_addr *tun_addr)
242 } 226 }
243 set_type(c,tun_addr->type); 227 set_type(c,tun_addr->type);
244} 228}
245#else
246#define set_addr(c,tun_addr) set_type(c,(tun_addr)->type)
247#endif
248 229
249static char pal[] = "-"; 230static char pal[] = "-";
250module_param_string(pal, pal, sizeof(pal), 0644); 231module_param_string(pal, pal, sizeof(pal), 0644);
@@ -284,17 +265,12 @@ static int tuner_attach(struct i2c_adapter *adap, int addr, int kind)
284{ 265{
285 struct tuner *t; 266 struct tuner *t;
286 267
287#ifndef CONFIG_TUNER_MULTI_I2C
288 if (this_adap > 0)
289 return -1;
290#else
291 /* by default, first I2C card is both tv and radio tuner */ 268 /* by default, first I2C card is both tv and radio tuner */
292 if (this_adap == 0) { 269 if (this_adap == 0) {
293 first_tuner = addr; 270 first_tuner = addr;
294 tv_tuner = addr; 271 tv_tuner = addr;
295 radio_tuner = addr; 272 radio_tuner = addr;
296 } 273 }
297#endif
298 this_adap++; 274 this_adap++;
299 275
300 client_template.adapter = adap; 276 client_template.adapter = adap;
@@ -308,6 +284,7 @@ static int tuner_attach(struct i2c_adapter *adap, int addr, int kind)
308 i2c_set_clientdata(&t->i2c, t); 284 i2c_set_clientdata(&t->i2c, t);
309 t->type = UNSET; 285 t->type = UNSET;
310 t->radio_if2 = 10700*1000; /* 10.7MHz - FM radio */ 286 t->radio_if2 = 10700*1000; /* 10.7MHz - FM radio */
287 t->audmode = V4L2_TUNER_MODE_STEREO;
311 288
312 i2c_attach_client(&t->i2c); 289 i2c_attach_client(&t->i2c);
313 tuner_info("chip found @ 0x%x (%s)\n", 290 tuner_info("chip found @ 0x%x (%s)\n",
@@ -325,11 +302,9 @@ static int tuner_probe(struct i2c_adapter *adap)
325 } 302 }
326 this_adap = 0; 303 this_adap = 0;
327 304
328#ifdef CONFIG_TUNER_MULTI_I2C
329 first_tuner = 0; 305 first_tuner = 0;
330 tv_tuner = 0; 306 tv_tuner = 0;
331 radio_tuner = 0; 307 radio_tuner = 0;
332#endif
333 308
334 if (adap->class & I2C_CLASS_TV_ANALOG) 309 if (adap->class & I2C_CLASS_TV_ANALOG)
335 return i2c_probe(adap, &addr_data, tuner_attach); 310 return i2c_probe(adap, &addr_data, tuner_attach);
@@ -392,8 +367,7 @@ tuner_command(struct i2c_client *client, unsigned int cmd, void *arg)
392 t->radio_if2 = 41300 * 1000; 367 t->radio_if2 = 41300 * 1000;
393 break; 368 break;
394 } 369 }
395 break; 370 break;
396
397 /* --- v4l ioctls --- */ 371 /* --- v4l ioctls --- */
398 /* take care: bttv does userspace copying, we'll get a 372 /* take care: bttv does userspace copying, we'll get a
399 kernel pointer here... */ 373 kernel pointer here... */
@@ -440,11 +414,18 @@ tuner_command(struct i2c_client *client, unsigned int cmd, void *arg)
440 vt->signal = t->has_signal(client); 414 vt->signal = t->has_signal(client);
441 if (t->is_stereo) { 415 if (t->is_stereo) {
442 if (t->is_stereo(client)) 416 if (t->is_stereo(client))
443 vt-> flags |= VIDEO_TUNER_STEREO_ON; 417 vt->flags |= VIDEO_TUNER_STEREO_ON;
444 else 418 else
445 vt-> flags &= 0xffff ^ VIDEO_TUNER_STEREO_ON; 419 vt->flags &= ~VIDEO_TUNER_STEREO_ON;
446 } 420 }
447 vt->flags |= V4L2_TUNER_CAP_LOW; /* Allow freqs at 62.5 Hz */ 421 vt->flags |= V4L2_TUNER_CAP_LOW; /* Allow freqs at 62.5 Hz */
422
423 vt->rangelow = radio_range[0] * 16000;
424 vt->rangehigh = radio_range[1] * 16000;
425
426 } else {
427 vt->rangelow = tv_range[0] * 16;
428 vt->rangehigh = tv_range[1] * 16;
448 } 429 }
449 430
450 return 0; 431 return 0;
@@ -510,20 +491,46 @@ tuner_command(struct i2c_client *client, unsigned int cmd, void *arg)
510 tuner -> signal = t->has_signal(client); 491 tuner -> signal = t->has_signal(client);
511 if (t->is_stereo) { 492 if (t->is_stereo) {
512 if (t->is_stereo(client)) { 493 if (t->is_stereo(client)) {
513 tuner -> capability |= V4L2_TUNER_CAP_STEREO; 494 tuner -> rxsubchans = V4L2_TUNER_SUB_STEREO | V4L2_TUNER_SUB_MONO;
514 tuner -> rxsubchans |= V4L2_TUNER_SUB_STEREO;
515 } else { 495 } else {
516 tuner -> rxsubchans &= 0xffff ^ V4L2_TUNER_SUB_STEREO; 496 tuner -> rxsubchans = V4L2_TUNER_SUB_MONO;
517 } 497 }
518 } 498 }
499 tuner->capability |= V4L2_TUNER_CAP_LOW | V4L2_TUNER_CAP_STEREO;
500 tuner->audmode = t->audmode;
501
502 tuner->rangelow = radio_range[0] * 16000;
503 tuner->rangehigh = radio_range[1] * 16000;
504 } else {
505 tuner->rangelow = tv_range[0] * 16;
506 tuner->rangehigh = tv_range[1] * 16;
519 } 507 }
520 /* Wow to deal with V4L2_TUNER_CAP_LOW ? For now, it accepts from low at 62.5KHz step to high at 62.5 Hz */
521 tuner->rangelow = tv_range[0] * 16;
522// tuner->rangehigh = tv_range[1] * 16;
523// tuner->rangelow = tv_range[0] * 16384;
524 tuner->rangehigh = tv_range[1] * 16384;
525 break; 508 break;
526 } 509 }
510 case VIDIOC_S_TUNER: /* Allow changing radio range and audio mode */
511 {
512 struct v4l2_tuner *tuner = arg;
513
514 CHECK_ADDR(radio_tuner,"VIDIOC_S_TUNER","radio");
515 SWITCH_V4L2;
516
517 /* To switch the audio mode, applications initialize the
518 index and audmode fields and the reserved array and
519 call the VIDIOC_S_TUNER ioctl. */
520 /* rxsubchannels: V4L2_TUNER_MODE_MONO, V4L2_TUNER_MODE_STEREO,
521 V4L2_TUNER_MODE_LANG1, V4L2_TUNER_MODE_LANG2,
522 V4L2_TUNER_MODE_SAP */
523
524 if (tuner->audmode == V4L2_TUNER_MODE_MONO)
525 t->audmode = V4L2_TUNER_MODE_MONO;
526 else
527 t->audmode = V4L2_TUNER_MODE_STEREO;
528
529 set_radio_freq(client, t->freq);
530 break;
531 }
532 case TDA9887_SET_CONFIG: /* Nothing to do on tuner-core */
533 break;
527 default: 534 default:
528 tuner_dbg ("Unimplemented IOCTL 0x%08x called to tuner.\n", cmd); 535 tuner_dbg ("Unimplemented IOCTL 0x%08x called to tuner.\n", cmd);
529 /* nothing */ 536 /* nothing */
diff --git a/drivers/media/video/tuner-simple.c b/drivers/media/video/tuner-simple.c
index 539f30557317..c39ed6226ee0 100644
--- a/drivers/media/video/tuner-simple.c
+++ b/drivers/media/video/tuner-simple.c
@@ -1,5 +1,5 @@
1/* 1/*
2 * $Id: tuner-simple.c,v 1.21 2005/06/10 19:53:26 nsh Exp $ 2 * $Id: tuner-simple.c,v 1.31 2005/06/21 16:02:25 mkrufky Exp $
3 * 3 *
4 * i2c tv tuner chip device driver 4 * i2c tv tuner chip device driver
5 * controls all those simple 4-control-bytes style tuners. 5 * controls all those simple 4-control-bytes style tuners.
@@ -207,28 +207,27 @@ static struct tunertype tuners[] = {
207 { "LG PAL (TAPE series)", LGINNOTEK, PAL, 207 { "LG PAL (TAPE series)", LGINNOTEK, PAL,
208 16*170.00, 16*450.00, 0x01,0x02,0x08,0xce,623}, 208 16*170.00, 16*450.00, 0x01,0x02,0x08,0xce,623},
209 209
210 { "Philips PAL/SECAM multi (FQ1216AME MK4)", Philips, PAL, 210 { "Philips PAL/SECAM multi (FQ1216AME MK4)", Philips, PAL,
211 16*160.00,16*442.00,0x01,0x02,0x04,0xce,623 }, 211 16*160.00,16*442.00,0x01,0x02,0x04,0xce,623 },
212 { "Philips FQ1236A MK4", Philips, NTSC, 212 { "Philips FQ1236A MK4", Philips, NTSC,
213 16*160.00,16*442.00,0x01,0x02,0x04,0x8e,732 }, 213 16*160.00,16*442.00,0x01,0x02,0x04,0x8e,732 },
214 214
215 /* Should work for TVF8531MF, TVF8831MF, TVF8731MF */ 215 /* Should work for TVF8531MF, TVF8831MF, TVF8731MF */
216 { "Ymec TVision TVF-8531MF", Philips, NTSC, 216 { "Ymec TVision TVF-8531MF", Philips, NTSC,
217 16*160.00,16*454.00,0xa0,0x90,0x30,0x8e,732}, 217 16*160.00,16*454.00,0xa0,0x90,0x30,0x8e,732},
218 { "Ymec TVision TVF-5533MF", Philips, NTSC, 218 { "Ymec TVision TVF-5533MF", Philips, NTSC,
219 16*160.00,16*454.00,0x01,0x02,0x04,0x8e,732}, 219 16*160.00,16*454.00,0x01,0x02,0x04,0x8e,732},
220
221 { "Thomson DDT 7611 (ATSC/NTSC)", THOMSON, ATSC, 220 { "Thomson DDT 7611 (ATSC/NTSC)", THOMSON, ATSC,
222 16*157.25,16*454.00,0x39,0x3a,0x3c,0x8e,732}, 221 16*157.25,16*454.00,0x39,0x3a,0x3c,0x8e,732},
223 { "Tena TNF9533-D/IF", LGINNOTEK, PAL, 222 /* Should work for TNF9533-D/IF, TNF9533-B/DF */
224 16*160.25, 16*464.25, 0x01,0x02,0x08,0x8e,623}, 223 { "Tena TNF9533-D/IF", Philips, PAL,
224 16*160.25,16*464.25,0x01,0x02,0x04,0x8e,623},
225 225
226 /* 226 /* This entry is for TEA5767 FM radio only chip used on several boards w/TV tuner */
227 * This entry is for TEA5767 FM radio only chip used on several boards
228 * w/TV tuner
229 */
230 { TEA5767_TUNER_NAME, Philips, RADIO, 227 { TEA5767_TUNER_NAME, Philips, RADIO,
231 -1, -1, 0, 0, 0, TEA5767_LOW_LO_32768,0}, 228 -1, -1, 0, 0, 0, TEA5767_LOW_LO_32768,0},
229 { "Philips FMD1216ME MK3 Hybrid Tuner", Philips, PAL,
230 16*160.00,16*442.00,0x51,0x52,0x54,0x86,623 },
232}; 231};
233 232
234unsigned const int tuner_count = ARRAY_SIZE(tuners); 233unsigned const int tuner_count = ARRAY_SIZE(tuners);
@@ -455,24 +454,24 @@ static void default_set_radio_freq(struct i2c_client *c, unsigned int freq)
455 int rc; 454 int rc;
456 455
457 tun=&tuners[t->type]; 456 tun=&tuners[t->type];
458 div = freq + (int)(16*10.7); 457 div = (freq / 1000) + (int)(16*10.7);
459 buffer[2] = tun->config; 458 buffer[2] = tun->config;
460 459
461 switch (t->type) { 460 switch (t->type) {
462 case TUNER_TENA_9533_DI: 461 case TUNER_TENA_9533_DI:
463 case TUNER_YMEC_TVF_5533MF: 462 case TUNER_YMEC_TVF_5533MF:
464
465 /*These values are empirically determinated */ 463 /*These values are empirically determinated */
466 div = (freq*122)/16 - 20; 464 div = (freq * 122) / 16000 - 20;
467 buffer[2] = 0x88; /* could be also 0x80 */ 465 buffer[2] = 0x88; /* could be also 0x80 */
468 buffer[3] = 0x19; /* could be also 0x10, 0x18, 0x99 */ 466 buffer[3] = 0x19; /* could be also 0x10, 0x18, 0x99 */
469 break; 467 break;
470 case TUNER_PHILIPS_FM1216ME_MK3: 468 case TUNER_PHILIPS_FM1216ME_MK3:
471 case TUNER_PHILIPS_FM1236_MK3: 469 case TUNER_PHILIPS_FM1236_MK3:
470 case TUNER_PHILIPS_FMD1216ME_MK3:
472 buffer[3] = 0x19; 471 buffer[3] = 0x19;
473 break; 472 break;
474 case TUNER_PHILIPS_FM1256_IH3: 473 case TUNER_PHILIPS_FM1256_IH3:
475 div = (20 * freq)/16 + 333 * 2; 474 div = (20 * freq) / 16000 + 333 * 2;
476 buffer[2] = 0x80; 475 buffer[2] = 0x80;
477 buffer[3] = 0x19; 476 buffer[3] = 0x19;
478 break; 477 break;
@@ -505,6 +504,7 @@ int default_tuner_init(struct i2c_client *c)
505 t->radio_freq = default_set_radio_freq; 504 t->radio_freq = default_set_radio_freq;
506 t->has_signal = tuner_signal; 505 t->has_signal = tuner_signal;
507 t->is_stereo = tuner_stereo; 506 t->is_stereo = tuner_stereo;
507
508 return 0; 508 return 0;
509} 509}
510 510
diff --git a/drivers/message/fusion/mptfc.c b/drivers/message/fusion/mptfc.c
index d8d65397e06e..353deb25e397 100644
--- a/drivers/message/fusion/mptfc.c
+++ b/drivers/message/fusion/mptfc.c
@@ -364,9 +364,7 @@ static struct pci_driver mptfc_driver = {
364 .id_table = mptfc_pci_table, 364 .id_table = mptfc_pci_table,
365 .probe = mptfc_probe, 365 .probe = mptfc_probe,
366 .remove = __devexit_p(mptscsih_remove), 366 .remove = __devexit_p(mptscsih_remove),
367 .driver = { 367 .shutdown = mptscsih_shutdown,
368 .shutdown = mptscsih_shutdown,
369 },
370#ifdef CONFIG_PM 368#ifdef CONFIG_PM
371 .suspend = mptscsih_suspend, 369 .suspend = mptscsih_suspend,
372 .resume = mptscsih_resume, 370 .resume = mptscsih_resume,
diff --git a/drivers/message/fusion/mptscsih.c b/drivers/message/fusion/mptscsih.c
index a0078ae5b9b8..4f973a49be4c 100644
--- a/drivers/message/fusion/mptscsih.c
+++ b/drivers/message/fusion/mptscsih.c
@@ -170,7 +170,7 @@ static void mptscsih_fillbuf(char *buffer, int size, int index, int width);
170#endif 170#endif
171 171
172void mptscsih_remove(struct pci_dev *); 172void mptscsih_remove(struct pci_dev *);
173void mptscsih_shutdown(struct device *); 173void mptscsih_shutdown(struct pci_dev *);
174#ifdef CONFIG_PM 174#ifdef CONFIG_PM
175int mptscsih_suspend(struct pci_dev *pdev, pm_message_t state); 175int mptscsih_suspend(struct pci_dev *pdev, pm_message_t state);
176int mptscsih_resume(struct pci_dev *pdev); 176int mptscsih_resume(struct pci_dev *pdev);
@@ -988,7 +988,7 @@ mptscsih_remove(struct pci_dev *pdev)
988#endif 988#endif
989#endif 989#endif
990 990
991 mptscsih_shutdown(&pdev->dev); 991 mptscsih_shutdown(pdev);
992 992
993 sz1=0; 993 sz1=0;
994 994
@@ -1026,9 +1026,9 @@ mptscsih_remove(struct pci_dev *pdev)
1026 * 1026 *
1027 */ 1027 */
1028void 1028void
1029mptscsih_shutdown(struct device * dev) 1029mptscsih_shutdown(struct pci_dev *pdev)
1030{ 1030{
1031 MPT_ADAPTER *ioc = pci_get_drvdata(to_pci_dev(dev)); 1031 MPT_ADAPTER *ioc = pci_get_drvdata(pdev);
1032 struct Scsi_Host *host = ioc->sh; 1032 struct Scsi_Host *host = ioc->sh;
1033 MPT_SCSI_HOST *hd; 1033 MPT_SCSI_HOST *hd;
1034 1034
@@ -1054,7 +1054,7 @@ mptscsih_shutdown(struct device * dev)
1054int 1054int
1055mptscsih_suspend(struct pci_dev *pdev, pm_message_t state) 1055mptscsih_suspend(struct pci_dev *pdev, pm_message_t state)
1056{ 1056{
1057 mptscsih_shutdown(&pdev->dev); 1057 mptscsih_shutdown(pdev);
1058 return mpt_suspend(pdev,state); 1058 return mpt_suspend(pdev,state);
1059} 1059}
1060 1060
diff --git a/drivers/message/fusion/mptscsih.h b/drivers/message/fusion/mptscsih.h
index d73aec33e16a..5ea89bf0df19 100644
--- a/drivers/message/fusion/mptscsih.h
+++ b/drivers/message/fusion/mptscsih.h
@@ -82,7 +82,7 @@
82#endif 82#endif
83 83
84extern void mptscsih_remove(struct pci_dev *); 84extern void mptscsih_remove(struct pci_dev *);
85extern void mptscsih_shutdown(struct device *); 85extern void mptscsih_shutdown(struct pci_dev *);
86#ifdef CONFIG_PM 86#ifdef CONFIG_PM
87extern int mptscsih_suspend(struct pci_dev *pdev, u32 state); 87extern int mptscsih_suspend(struct pci_dev *pdev, u32 state);
88extern int mptscsih_resume(struct pci_dev *pdev); 88extern int mptscsih_resume(struct pci_dev *pdev);
diff --git a/drivers/message/fusion/mptspi.c b/drivers/message/fusion/mptspi.c
index 5f9a61b85b3b..e0c0ee5bc966 100644
--- a/drivers/message/fusion/mptspi.c
+++ b/drivers/message/fusion/mptspi.c
@@ -419,9 +419,7 @@ static struct pci_driver mptspi_driver = {
419 .id_table = mptspi_pci_table, 419 .id_table = mptspi_pci_table,
420 .probe = mptspi_probe, 420 .probe = mptspi_probe,
421 .remove = __devexit_p(mptscsih_remove), 421 .remove = __devexit_p(mptscsih_remove),
422 .driver = { 422 .shutdown = mptscsih_shutdown,
423 .shutdown = mptscsih_shutdown,
424 },
425#ifdef CONFIG_PM 423#ifdef CONFIG_PM
426 .suspend = mptscsih_suspend, 424 .suspend = mptscsih_suspend,
427 .resume = mptscsih_resume, 425 .resume = mptscsih_resume,
diff --git a/drivers/net/3c515.c b/drivers/net/3c515.c
index d272ea36a578..91d1c4c24d9b 100644
--- a/drivers/net/3c515.c
+++ b/drivers/net/3c515.c
@@ -822,7 +822,7 @@ static int corkscrew_open(struct net_device *dev)
822 break; /* Bad news! */ 822 break; /* Bad news! */
823 skb->dev = dev; /* Mark as being used by this device. */ 823 skb->dev = dev; /* Mark as being used by this device. */
824 skb_reserve(skb, 2); /* Align IP on 16 byte boundaries */ 824 skb_reserve(skb, 2); /* Align IP on 16 byte boundaries */
825 vp->rx_ring[i].addr = isa_virt_to_bus(skb->tail); 825 vp->rx_ring[i].addr = isa_virt_to_bus(skb->data);
826 } 826 }
827 vp->rx_ring[i - 1].next = isa_virt_to_bus(&vp->rx_ring[0]); /* Wrap the ring. */ 827 vp->rx_ring[i - 1].next = isa_virt_to_bus(&vp->rx_ring[0]); /* Wrap the ring. */
828 outl(isa_virt_to_bus(&vp->rx_ring[0]), ioaddr + UpListPtr); 828 outl(isa_virt_to_bus(&vp->rx_ring[0]), ioaddr + UpListPtr);
@@ -1406,7 +1406,7 @@ static int boomerang_rx(struct net_device *dev)
1406 break; /* Bad news! */ 1406 break; /* Bad news! */
1407 skb->dev = dev; /* Mark as being used by this device. */ 1407 skb->dev = dev; /* Mark as being used by this device. */
1408 skb_reserve(skb, 2); /* Align IP on 16 byte boundaries */ 1408 skb_reserve(skb, 2); /* Align IP on 16 byte boundaries */
1409 vp->rx_ring[entry].addr = isa_virt_to_bus(skb->tail); 1409 vp->rx_ring[entry].addr = isa_virt_to_bus(skb->data);
1410 vp->rx_skbuff[entry] = skb; 1410 vp->rx_skbuff[entry] = skb;
1411 } 1411 }
1412 vp->rx_ring[entry].status = 0; /* Clear complete bit. */ 1412 vp->rx_ring[entry].status = 0; /* Clear complete bit. */
diff --git a/drivers/net/3c59x.c b/drivers/net/3c59x.c
index 80ec9aa575bb..07746b95fd83 100644
--- a/drivers/net/3c59x.c
+++ b/drivers/net/3c59x.c
@@ -1802,7 +1802,7 @@ vortex_open(struct net_device *dev)
1802 break; /* Bad news! */ 1802 break; /* Bad news! */
1803 skb->dev = dev; /* Mark as being used by this device. */ 1803 skb->dev = dev; /* Mark as being used by this device. */
1804 skb_reserve(skb, 2); /* Align IP on 16 byte boundaries */ 1804 skb_reserve(skb, 2); /* Align IP on 16 byte boundaries */
1805 vp->rx_ring[i].addr = cpu_to_le32(pci_map_single(VORTEX_PCI(vp), skb->tail, PKT_BUF_SZ, PCI_DMA_FROMDEVICE)); 1805 vp->rx_ring[i].addr = cpu_to_le32(pci_map_single(VORTEX_PCI(vp), skb->data, PKT_BUF_SZ, PCI_DMA_FROMDEVICE));
1806 } 1806 }
1807 if (i != RX_RING_SIZE) { 1807 if (i != RX_RING_SIZE) {
1808 int j; 1808 int j;
@@ -2632,7 +2632,7 @@ boomerang_rx(struct net_device *dev)
2632 pci_dma_sync_single_for_cpu(VORTEX_PCI(vp), dma, PKT_BUF_SZ, PCI_DMA_FROMDEVICE); 2632 pci_dma_sync_single_for_cpu(VORTEX_PCI(vp), dma, PKT_BUF_SZ, PCI_DMA_FROMDEVICE);
2633 /* 'skb_put()' points to the start of sk_buff data area. */ 2633 /* 'skb_put()' points to the start of sk_buff data area. */
2634 memcpy(skb_put(skb, pkt_len), 2634 memcpy(skb_put(skb, pkt_len),
2635 vp->rx_skbuff[entry]->tail, 2635 vp->rx_skbuff[entry]->data,
2636 pkt_len); 2636 pkt_len);
2637 pci_dma_sync_single_for_device(VORTEX_PCI(vp), dma, PKT_BUF_SZ, PCI_DMA_FROMDEVICE); 2637 pci_dma_sync_single_for_device(VORTEX_PCI(vp), dma, PKT_BUF_SZ, PCI_DMA_FROMDEVICE);
2638 vp->rx_copy++; 2638 vp->rx_copy++;
@@ -2678,7 +2678,7 @@ boomerang_rx(struct net_device *dev)
2678 } 2678 }
2679 skb->dev = dev; /* Mark as being used by this device. */ 2679 skb->dev = dev; /* Mark as being used by this device. */
2680 skb_reserve(skb, 2); /* Align IP on 16 byte boundaries */ 2680 skb_reserve(skb, 2); /* Align IP on 16 byte boundaries */
2681 vp->rx_ring[entry].addr = cpu_to_le32(pci_map_single(VORTEX_PCI(vp), skb->tail, PKT_BUF_SZ, PCI_DMA_FROMDEVICE)); 2681 vp->rx_ring[entry].addr = cpu_to_le32(pci_map_single(VORTEX_PCI(vp), skb->data, PKT_BUF_SZ, PCI_DMA_FROMDEVICE));
2682 vp->rx_skbuff[entry] = skb; 2682 vp->rx_skbuff[entry] = skb;
2683 } 2683 }
2684 vp->rx_ring[entry].status = 0; /* Clear complete bit. */ 2684 vp->rx_ring[entry].status = 0; /* Clear complete bit. */
diff --git a/drivers/net/8139cp.c b/drivers/net/8139cp.c
index e4b3c5c88542..7b293f01c9ed 100644
--- a/drivers/net/8139cp.c
+++ b/drivers/net/8139cp.c
@@ -596,7 +596,7 @@ rx_status_loop:
596 596
597 mapping = 597 mapping =
598 cp->rx_skb[rx_tail].mapping = 598 cp->rx_skb[rx_tail].mapping =
599 pci_map_single(cp->pdev, new_skb->tail, 599 pci_map_single(cp->pdev, new_skb->data,
600 buflen, PCI_DMA_FROMDEVICE); 600 buflen, PCI_DMA_FROMDEVICE);
601 cp->rx_skb[rx_tail].skb = new_skb; 601 cp->rx_skb[rx_tail].skb = new_skb;
602 602
@@ -1101,7 +1101,7 @@ static int cp_refill_rx (struct cp_private *cp)
1101 skb_reserve(skb, RX_OFFSET); 1101 skb_reserve(skb, RX_OFFSET);
1102 1102
1103 cp->rx_skb[i].mapping = pci_map_single(cp->pdev, 1103 cp->rx_skb[i].mapping = pci_map_single(cp->pdev,
1104 skb->tail, cp->rx_buf_sz, PCI_DMA_FROMDEVICE); 1104 skb->data, cp->rx_buf_sz, PCI_DMA_FROMDEVICE);
1105 cp->rx_skb[i].skb = skb; 1105 cp->rx_skb[i].skb = skb;
1106 1106
1107 cp->rx_ring[i].opts2 = 0; 1107 cp->rx_ring[i].opts2 = 0;
diff --git a/drivers/net/82596.c b/drivers/net/82596.c
index 65f97b1dc581..13b745b39667 100644
--- a/drivers/net/82596.c
+++ b/drivers/net/82596.c
@@ -546,11 +546,11 @@ static inline void init_rx_bufs(struct net_device *dev)
546 rbd->b_next = WSWAPrbd(virt_to_bus(rbd+1)); 546 rbd->b_next = WSWAPrbd(virt_to_bus(rbd+1));
547 rbd->b_addr = WSWAPrbd(virt_to_bus(rbd)); 547 rbd->b_addr = WSWAPrbd(virt_to_bus(rbd));
548 rbd->skb = skb; 548 rbd->skb = skb;
549 rbd->v_data = skb->tail; 549 rbd->v_data = skb->data;
550 rbd->b_data = WSWAPchar(virt_to_bus(skb->tail)); 550 rbd->b_data = WSWAPchar(virt_to_bus(skb->data));
551 rbd->size = PKT_BUF_SZ; 551 rbd->size = PKT_BUF_SZ;
552#ifdef __mc68000__ 552#ifdef __mc68000__
553 cache_clear(virt_to_phys(skb->tail), PKT_BUF_SZ); 553 cache_clear(virt_to_phys(skb->data), PKT_BUF_SZ);
554#endif 554#endif
555 } 555 }
556 lp->rbd_head = lp->rbds; 556 lp->rbd_head = lp->rbds;
@@ -816,10 +816,10 @@ static inline int i596_rx(struct net_device *dev)
816 rx_in_place = 1; 816 rx_in_place = 1;
817 rbd->skb = newskb; 817 rbd->skb = newskb;
818 newskb->dev = dev; 818 newskb->dev = dev;
819 rbd->v_data = newskb->tail; 819 rbd->v_data = newskb->data;
820 rbd->b_data = WSWAPchar(virt_to_bus(newskb->tail)); 820 rbd->b_data = WSWAPchar(virt_to_bus(newskb->data));
821#ifdef __mc68000__ 821#ifdef __mc68000__
822 cache_clear(virt_to_phys(newskb->tail), PKT_BUF_SZ); 822 cache_clear(virt_to_phys(newskb->data), PKT_BUF_SZ);
823#endif 823#endif
824 } 824 }
825 else 825 else
@@ -840,7 +840,7 @@ memory_squeeze:
840 skb->protocol=eth_type_trans(skb,dev); 840 skb->protocol=eth_type_trans(skb,dev);
841 skb->len = pkt_len; 841 skb->len = pkt_len;
842#ifdef __mc68000__ 842#ifdef __mc68000__
843 cache_clear(virt_to_phys(rbd->skb->tail), 843 cache_clear(virt_to_phys(rbd->skb->data),
844 pkt_len); 844 pkt_len);
845#endif 845#endif
846 netif_rx(skb); 846 netif_rx(skb);
diff --git a/drivers/net/amd8111e.c b/drivers/net/amd8111e.c
index b7dd7260cafb..8618012df06a 100755
--- a/drivers/net/amd8111e.c
+++ b/drivers/net/amd8111e.c
@@ -87,6 +87,7 @@ Revision History:
87#include <linux/if_vlan.h> 87#include <linux/if_vlan.h>
88#include <linux/ctype.h> 88#include <linux/ctype.h>
89#include <linux/crc32.h> 89#include <linux/crc32.h>
90#include <linux/dma-mapping.h>
90 91
91#include <asm/system.h> 92#include <asm/system.h>
92#include <asm/io.h> 93#include <asm/io.h>
@@ -2006,12 +2007,11 @@ static int __devinit amd8111e_probe_one(struct pci_dev *pdev,
2006 } 2007 }
2007 2008
2008 /* Initialize DMA */ 2009 /* Initialize DMA */
2009 if(!pci_dma_supported(pdev, 0xffffffff)){ 2010 if (pci_set_dma_mask(pdev, DMA_32BIT_MASK) < 0) {
2010 printk(KERN_ERR "amd8111e: DMA not supported," 2011 printk(KERN_ERR "amd8111e: DMA not supported,"
2011 "exiting.\n"); 2012 "exiting.\n");
2012 goto err_free_reg; 2013 goto err_free_reg;
2013 } else 2014 }
2014 pdev->dma_mask = 0xffffffff;
2015 2015
2016 reg_addr = pci_resource_start(pdev, 0); 2016 reg_addr = pci_resource_start(pdev, 0);
2017 reg_len = pci_resource_len(pdev, 0); 2017 reg_len = pci_resource_len(pdev, 0);
diff --git a/drivers/net/at1700.c b/drivers/net/at1700.c
index b8ab2b6355eb..e613cc289749 100644
--- a/drivers/net/at1700.c
+++ b/drivers/net/at1700.c
@@ -34,10 +34,6 @@
34 only is it difficult to detect, it also moves around in I/O space in 34 only is it difficult to detect, it also moves around in I/O space in
35 response to inb()s from other device probes! 35 response to inb()s from other device probes!
36*/ 36*/
37/*
38 99/03/03 Allied Telesis RE1000 Plus support by T.Hagawa
39 99/12/30 port to 2.3.35 by K.Takai
40*/
41 37
42#include <linux/config.h> 38#include <linux/config.h>
43#include <linux/errno.h> 39#include <linux/errno.h>
diff --git a/drivers/net/dl2k.c b/drivers/net/dl2k.c
index aa42b7a27735..430c628279b3 100644
--- a/drivers/net/dl2k.c
+++ b/drivers/net/dl2k.c
@@ -547,7 +547,7 @@ rio_timer (unsigned long data)
547 skb_reserve (skb, 2); 547 skb_reserve (skb, 2);
548 np->rx_ring[entry].fraginfo = 548 np->rx_ring[entry].fraginfo =
549 cpu_to_le64 (pci_map_single 549 cpu_to_le64 (pci_map_single
550 (np->pdev, skb->tail, np->rx_buf_sz, 550 (np->pdev, skb->data, np->rx_buf_sz,
551 PCI_DMA_FROMDEVICE)); 551 PCI_DMA_FROMDEVICE));
552 } 552 }
553 np->rx_ring[entry].fraginfo |= 553 np->rx_ring[entry].fraginfo |=
@@ -618,7 +618,7 @@ alloc_list (struct net_device *dev)
618 /* Rubicon now supports 40 bits of addressing space. */ 618 /* Rubicon now supports 40 bits of addressing space. */
619 np->rx_ring[i].fraginfo = 619 np->rx_ring[i].fraginfo =
620 cpu_to_le64 ( pci_map_single ( 620 cpu_to_le64 ( pci_map_single (
621 np->pdev, skb->tail, np->rx_buf_sz, 621 np->pdev, skb->data, np->rx_buf_sz,
622 PCI_DMA_FROMDEVICE)); 622 PCI_DMA_FROMDEVICE));
623 np->rx_ring[i].fraginfo |= cpu_to_le64 (np->rx_buf_sz) << 48; 623 np->rx_ring[i].fraginfo |= cpu_to_le64 (np->rx_buf_sz) << 48;
624 } 624 }
@@ -906,7 +906,7 @@ receive_packet (struct net_device *dev)
906 /* 16 byte align the IP header */ 906 /* 16 byte align the IP header */
907 skb_reserve (skb, 2); 907 skb_reserve (skb, 2);
908 eth_copy_and_sum (skb, 908 eth_copy_and_sum (skb,
909 np->rx_skbuff[entry]->tail, 909 np->rx_skbuff[entry]->data,
910 pkt_len, 0); 910 pkt_len, 0);
911 skb_put (skb, pkt_len); 911 skb_put (skb, pkt_len);
912 pci_dma_sync_single_for_device(np->pdev, 912 pci_dma_sync_single_for_device(np->pdev,
@@ -950,7 +950,7 @@ receive_packet (struct net_device *dev)
950 skb_reserve (skb, 2); 950 skb_reserve (skb, 2);
951 np->rx_ring[entry].fraginfo = 951 np->rx_ring[entry].fraginfo =
952 cpu_to_le64 (pci_map_single 952 cpu_to_le64 (pci_map_single
953 (np->pdev, skb->tail, np->rx_buf_sz, 953 (np->pdev, skb->data, np->rx_buf_sz,
954 PCI_DMA_FROMDEVICE)); 954 PCI_DMA_FROMDEVICE));
955 } 955 }
956 np->rx_ring[entry].fraginfo |= 956 np->rx_ring[entry].fraginfo |=
diff --git a/drivers/net/e100.c b/drivers/net/e100.c
index 1e56c8eea35f..d0fa2448761d 100644
--- a/drivers/net/e100.c
+++ b/drivers/net/e100.c
@@ -2447,9 +2447,8 @@ static int e100_resume(struct pci_dev *pdev)
2447#endif 2447#endif
2448 2448
2449 2449
2450static void e100_shutdown(struct device *dev) 2450static void e100_shutdown(struct pci_dev *pdev)
2451{ 2451{
2452 struct pci_dev *pdev = container_of(dev, struct pci_dev, dev);
2453 struct net_device *netdev = pci_get_drvdata(pdev); 2452 struct net_device *netdev = pci_get_drvdata(pdev);
2454 struct nic *nic = netdev_priv(netdev); 2453 struct nic *nic = netdev_priv(netdev);
2455 2454
@@ -2470,11 +2469,7 @@ static struct pci_driver e100_driver = {
2470 .suspend = e100_suspend, 2469 .suspend = e100_suspend,
2471 .resume = e100_resume, 2470 .resume = e100_resume,
2472#endif 2471#endif
2473 2472 .shutdown = e100_shutdown,
2474 .driver = {
2475 .shutdown = e100_shutdown,
2476 }
2477
2478}; 2473};
2479 2474
2480static int __init e100_init_module(void) 2475static int __init e100_init_module(void)
diff --git a/drivers/net/eepro100.c b/drivers/net/eepro100.c
index 98b3a2fdce90..1795425f512e 100644
--- a/drivers/net/eepro100.c
+++ b/drivers/net/eepro100.c
@@ -1269,7 +1269,7 @@ speedo_init_rx_ring(struct net_device *dev)
1269 if (skb == NULL) 1269 if (skb == NULL)
1270 break; /* OK. Just initially short of Rx bufs. */ 1270 break; /* OK. Just initially short of Rx bufs. */
1271 skb->dev = dev; /* Mark as being used by this device. */ 1271 skb->dev = dev; /* Mark as being used by this device. */
1272 rxf = (struct RxFD *)skb->tail; 1272 rxf = (struct RxFD *)skb->data;
1273 sp->rx_ringp[i] = rxf; 1273 sp->rx_ringp[i] = rxf;
1274 sp->rx_ring_dma[i] = 1274 sp->rx_ring_dma[i] =
1275 pci_map_single(sp->pdev, rxf, 1275 pci_map_single(sp->pdev, rxf,
@@ -1661,7 +1661,7 @@ static inline struct RxFD *speedo_rx_alloc(struct net_device *dev, int entry)
1661 sp->rx_ringp[entry] = NULL; 1661 sp->rx_ringp[entry] = NULL;
1662 return NULL; 1662 return NULL;
1663 } 1663 }
1664 rxf = sp->rx_ringp[entry] = (struct RxFD *)skb->tail; 1664 rxf = sp->rx_ringp[entry] = (struct RxFD *)skb->data;
1665 sp->rx_ring_dma[entry] = 1665 sp->rx_ring_dma[entry] =
1666 pci_map_single(sp->pdev, rxf, 1666 pci_map_single(sp->pdev, rxf,
1667 PKT_BUF_SZ + sizeof(struct RxFD), PCI_DMA_FROMDEVICE); 1667 PKT_BUF_SZ + sizeof(struct RxFD), PCI_DMA_FROMDEVICE);
@@ -1808,10 +1808,10 @@ speedo_rx(struct net_device *dev)
1808 1808
1809#if 1 || USE_IP_CSUM 1809#if 1 || USE_IP_CSUM
1810 /* Packet is in one chunk -- we can copy + cksum. */ 1810 /* Packet is in one chunk -- we can copy + cksum. */
1811 eth_copy_and_sum(skb, sp->rx_skbuff[entry]->tail, pkt_len, 0); 1811 eth_copy_and_sum(skb, sp->rx_skbuff[entry]->data, pkt_len, 0);
1812 skb_put(skb, pkt_len); 1812 skb_put(skb, pkt_len);
1813#else 1813#else
1814 memcpy(skb_put(skb, pkt_len), sp->rx_skbuff[entry]->tail, 1814 memcpy(skb_put(skb, pkt_len), sp->rx_skbuff[entry]->data,
1815 pkt_len); 1815 pkt_len);
1816#endif 1816#endif
1817 pci_dma_sync_single_for_device(sp->pdev, sp->rx_ring_dma[entry], 1817 pci_dma_sync_single_for_device(sp->pdev, sp->rx_ring_dma[entry],
diff --git a/drivers/net/epic100.c b/drivers/net/epic100.c
index 81ebaedaa240..87f522738bfc 100644
--- a/drivers/net/epic100.c
+++ b/drivers/net/epic100.c
@@ -1003,7 +1003,7 @@ static void epic_init_ring(struct net_device *dev)
1003 skb->dev = dev; /* Mark as being used by this device. */ 1003 skb->dev = dev; /* Mark as being used by this device. */
1004 skb_reserve(skb, 2); /* 16 byte align the IP header. */ 1004 skb_reserve(skb, 2); /* 16 byte align the IP header. */
1005 ep->rx_ring[i].bufaddr = pci_map_single(ep->pci_dev, 1005 ep->rx_ring[i].bufaddr = pci_map_single(ep->pci_dev,
1006 skb->tail, ep->rx_buf_sz, PCI_DMA_FROMDEVICE); 1006 skb->data, ep->rx_buf_sz, PCI_DMA_FROMDEVICE);
1007 ep->rx_ring[i].rxstatus = cpu_to_le32(DescOwn); 1007 ep->rx_ring[i].rxstatus = cpu_to_le32(DescOwn);
1008 } 1008 }
1009 ep->dirty_rx = (unsigned int)(i - RX_RING_SIZE); 1009 ep->dirty_rx = (unsigned int)(i - RX_RING_SIZE);
@@ -1274,7 +1274,7 @@ static int epic_rx(struct net_device *dev, int budget)
1274 ep->rx_ring[entry].bufaddr, 1274 ep->rx_ring[entry].bufaddr,
1275 ep->rx_buf_sz, 1275 ep->rx_buf_sz,
1276 PCI_DMA_FROMDEVICE); 1276 PCI_DMA_FROMDEVICE);
1277 eth_copy_and_sum(skb, ep->rx_skbuff[entry]->tail, pkt_len, 0); 1277 eth_copy_and_sum(skb, ep->rx_skbuff[entry]->data, pkt_len, 0);
1278 skb_put(skb, pkt_len); 1278 skb_put(skb, pkt_len);
1279 pci_dma_sync_single_for_device(ep->pci_dev, 1279 pci_dma_sync_single_for_device(ep->pci_dev,
1280 ep->rx_ring[entry].bufaddr, 1280 ep->rx_ring[entry].bufaddr,
@@ -1308,7 +1308,7 @@ static int epic_rx(struct net_device *dev, int budget)
1308 skb->dev = dev; /* Mark as being used by this device. */ 1308 skb->dev = dev; /* Mark as being used by this device. */
1309 skb_reserve(skb, 2); /* Align IP on 16 byte boundaries */ 1309 skb_reserve(skb, 2); /* Align IP on 16 byte boundaries */
1310 ep->rx_ring[entry].bufaddr = pci_map_single(ep->pci_dev, 1310 ep->rx_ring[entry].bufaddr = pci_map_single(ep->pci_dev,
1311 skb->tail, ep->rx_buf_sz, PCI_DMA_FROMDEVICE); 1311 skb->data, ep->rx_buf_sz, PCI_DMA_FROMDEVICE);
1312 work_done++; 1312 work_done++;
1313 } 1313 }
1314 ep->rx_ring[entry].rxstatus = cpu_to_le32(DescOwn); 1314 ep->rx_ring[entry].rxstatus = cpu_to_le32(DescOwn);
diff --git a/drivers/net/fealnx.c b/drivers/net/fealnx.c
index 9e0303f6d73c..55dbe9a3fd56 100644
--- a/drivers/net/fealnx.c
+++ b/drivers/net/fealnx.c
@@ -1107,7 +1107,7 @@ static void allocate_rx_buffers(struct net_device *dev)
1107 1107
1108 skb->dev = dev; /* Mark as being used by this device. */ 1108 skb->dev = dev; /* Mark as being used by this device. */
1109 np->lack_rxbuf->skbuff = skb; 1109 np->lack_rxbuf->skbuff = skb;
1110 np->lack_rxbuf->buffer = pci_map_single(np->pci_dev, skb->tail, 1110 np->lack_rxbuf->buffer = pci_map_single(np->pci_dev, skb->data,
1111 np->rx_buf_sz, PCI_DMA_FROMDEVICE); 1111 np->rx_buf_sz, PCI_DMA_FROMDEVICE);
1112 np->lack_rxbuf->status = RXOWN; 1112 np->lack_rxbuf->status = RXOWN;
1113 ++np->really_rx_count; 1113 ++np->really_rx_count;
@@ -1300,7 +1300,7 @@ static void init_ring(struct net_device *dev)
1300 ++np->really_rx_count; 1300 ++np->really_rx_count;
1301 np->rx_ring[i].skbuff = skb; 1301 np->rx_ring[i].skbuff = skb;
1302 skb->dev = dev; /* Mark as being used by this device. */ 1302 skb->dev = dev; /* Mark as being used by this device. */
1303 np->rx_ring[i].buffer = pci_map_single(np->pci_dev, skb->tail, 1303 np->rx_ring[i].buffer = pci_map_single(np->pci_dev, skb->data,
1304 np->rx_buf_sz, PCI_DMA_FROMDEVICE); 1304 np->rx_buf_sz, PCI_DMA_FROMDEVICE);
1305 np->rx_ring[i].status = RXOWN; 1305 np->rx_ring[i].status = RXOWN;
1306 np->rx_ring[i].control |= RXIC; 1306 np->rx_ring[i].control |= RXIC;
@@ -1737,11 +1737,11 @@ static int netdev_rx(struct net_device *dev)
1737 1737
1738#if ! defined(__alpha__) 1738#if ! defined(__alpha__)
1739 eth_copy_and_sum(skb, 1739 eth_copy_and_sum(skb,
1740 np->cur_rx->skbuff->tail, pkt_len, 0); 1740 np->cur_rx->skbuff->data, pkt_len, 0);
1741 skb_put(skb, pkt_len); 1741 skb_put(skb, pkt_len);
1742#else 1742#else
1743 memcpy(skb_put(skb, pkt_len), 1743 memcpy(skb_put(skb, pkt_len),
1744 np->cur_rx->skbuff->tail, pkt_len); 1744 np->cur_rx->skbuff->data, pkt_len);
1745#endif 1745#endif
1746 pci_dma_sync_single_for_device(np->pci_dev, 1746 pci_dma_sync_single_for_device(np->pci_dev,
1747 np->cur_rx->buffer, 1747 np->cur_rx->buffer,
diff --git a/drivers/net/hamachi.c b/drivers/net/hamachi.c
index 3d96714ed3cf..d9df1d9a5739 100644
--- a/drivers/net/hamachi.c
+++ b/drivers/net/hamachi.c
@@ -1149,7 +1149,7 @@ static void hamachi_tx_timeout(struct net_device *dev)
1149 skb->dev = dev; /* Mark as being used by this device. */ 1149 skb->dev = dev; /* Mark as being used by this device. */
1150 skb_reserve(skb, 2); /* 16 byte align the IP header. */ 1150 skb_reserve(skb, 2); /* 16 byte align the IP header. */
1151 hmp->rx_ring[i].addr = cpu_to_leXX(pci_map_single(hmp->pci_dev, 1151 hmp->rx_ring[i].addr = cpu_to_leXX(pci_map_single(hmp->pci_dev,
1152 skb->tail, hmp->rx_buf_sz, PCI_DMA_FROMDEVICE)); 1152 skb->data, hmp->rx_buf_sz, PCI_DMA_FROMDEVICE));
1153 hmp->rx_ring[i].status_n_length = cpu_to_le32(DescOwn | 1153 hmp->rx_ring[i].status_n_length = cpu_to_le32(DescOwn |
1154 DescEndPacket | DescIntr | (hmp->rx_buf_sz - 2)); 1154 DescEndPacket | DescIntr | (hmp->rx_buf_sz - 2));
1155 } 1155 }
@@ -1210,7 +1210,7 @@ static void hamachi_init_ring(struct net_device *dev)
1210 skb->dev = dev; /* Mark as being used by this device. */ 1210 skb->dev = dev; /* Mark as being used by this device. */
1211 skb_reserve(skb, 2); /* 16 byte align the IP header. */ 1211 skb_reserve(skb, 2); /* 16 byte align the IP header. */
1212 hmp->rx_ring[i].addr = cpu_to_leXX(pci_map_single(hmp->pci_dev, 1212 hmp->rx_ring[i].addr = cpu_to_leXX(pci_map_single(hmp->pci_dev,
1213 skb->tail, hmp->rx_buf_sz, PCI_DMA_FROMDEVICE)); 1213 skb->data, hmp->rx_buf_sz, PCI_DMA_FROMDEVICE));
1214 /* -2 because it doesn't REALLY have that first 2 bytes -KDU */ 1214 /* -2 because it doesn't REALLY have that first 2 bytes -KDU */
1215 hmp->rx_ring[i].status_n_length = cpu_to_le32(DescOwn | 1215 hmp->rx_ring[i].status_n_length = cpu_to_le32(DescOwn |
1216 DescEndPacket | DescIntr | (hmp->rx_buf_sz -2)); 1216 DescEndPacket | DescIntr | (hmp->rx_buf_sz -2));
@@ -1509,7 +1509,7 @@ static int hamachi_rx(struct net_device *dev)
1509 desc->addr, 1509 desc->addr,
1510 hmp->rx_buf_sz, 1510 hmp->rx_buf_sz,
1511 PCI_DMA_FROMDEVICE); 1511 PCI_DMA_FROMDEVICE);
1512 buf_addr = (u8 *) hmp->rx_skbuff[entry]->tail; 1512 buf_addr = (u8 *) hmp->rx_skbuff[entry]->data;
1513 frame_status = le32_to_cpu(get_unaligned((s32*)&(buf_addr[data_size - 12]))); 1513 frame_status = le32_to_cpu(get_unaligned((s32*)&(buf_addr[data_size - 12])));
1514 if (hamachi_debug > 4) 1514 if (hamachi_debug > 4)
1515 printk(KERN_DEBUG " hamachi_rx() status was %8.8x.\n", 1515 printk(KERN_DEBUG " hamachi_rx() status was %8.8x.\n",
@@ -1678,7 +1678,7 @@ static int hamachi_rx(struct net_device *dev)
1678 skb->dev = dev; /* Mark as being used by this device. */ 1678 skb->dev = dev; /* Mark as being used by this device. */
1679 skb_reserve(skb, 2); /* Align IP on 16 byte boundaries */ 1679 skb_reserve(skb, 2); /* Align IP on 16 byte boundaries */
1680 desc->addr = cpu_to_leXX(pci_map_single(hmp->pci_dev, 1680 desc->addr = cpu_to_leXX(pci_map_single(hmp->pci_dev,
1681 skb->tail, hmp->rx_buf_sz, PCI_DMA_FROMDEVICE)); 1681 skb->data, hmp->rx_buf_sz, PCI_DMA_FROMDEVICE));
1682 } 1682 }
1683 desc->status_n_length = cpu_to_le32(hmp->rx_buf_sz); 1683 desc->status_n_length = cpu_to_le32(hmp->rx_buf_sz);
1684 if (entry >= RX_RING_SIZE-1) 1684 if (entry >= RX_RING_SIZE-1)
@@ -1772,9 +1772,9 @@ static int hamachi_close(struct net_device *dev)
1772 readl(ioaddr + RxCurPtr) == (long)&hmp->rx_ring[i] ? '>' : ' ', 1772 readl(ioaddr + RxCurPtr) == (long)&hmp->rx_ring[i] ? '>' : ' ',
1773 i, hmp->rx_ring[i].status_n_length, hmp->rx_ring[i].addr); 1773 i, hmp->rx_ring[i].status_n_length, hmp->rx_ring[i].addr);
1774 if (hamachi_debug > 6) { 1774 if (hamachi_debug > 6) {
1775 if (*(u8*)hmp->rx_skbuff[i]->tail != 0x69) { 1775 if (*(u8*)hmp->rx_skbuff[i]->data != 0x69) {
1776 u16 *addr = (u16 *) 1776 u16 *addr = (u16 *)
1777 hmp->rx_skbuff[i]->tail; 1777 hmp->rx_skbuff[i]->data;
1778 int j; 1778 int j;
1779 1779
1780 for (j = 0; j < 0x50; j++) 1780 for (j = 0; j < 0x50; j++)
diff --git a/drivers/net/lance.c b/drivers/net/lance.c
index ca90f0d1e4b0..b4929beb33b2 100644
--- a/drivers/net/lance.c
+++ b/drivers/net/lance.c
@@ -862,7 +862,7 @@ lance_init_ring(struct net_device *dev, int gfp)
862 lp->rx_skbuff[i] = skb; 862 lp->rx_skbuff[i] = skb;
863 if (skb) { 863 if (skb) {
864 skb->dev = dev; 864 skb->dev = dev;
865 rx_buff = skb->tail; 865 rx_buff = skb->data;
866 } else 866 } else
867 rx_buff = kmalloc(PKT_BUF_SZ, GFP_DMA | gfp); 867 rx_buff = kmalloc(PKT_BUF_SZ, GFP_DMA | gfp);
868 if (rx_buff == NULL) 868 if (rx_buff == NULL)
diff --git a/drivers/net/lasi_82596.c b/drivers/net/lasi_82596.c
index 5e263fcba669..41bad07ac1ac 100644
--- a/drivers/net/lasi_82596.c
+++ b/drivers/net/lasi_82596.c
@@ -553,14 +553,14 @@ static inline void init_rx_bufs(struct net_device *dev)
553 if (skb == NULL) 553 if (skb == NULL)
554 panic("%s: alloc_skb() failed", __FILE__); 554 panic("%s: alloc_skb() failed", __FILE__);
555 skb_reserve(skb, 2); 555 skb_reserve(skb, 2);
556 dma_addr = dma_map_single(lp->dev, skb->tail,PKT_BUF_SZ, 556 dma_addr = dma_map_single(lp->dev, skb->data,PKT_BUF_SZ,
557 DMA_FROM_DEVICE); 557 DMA_FROM_DEVICE);
558 skb->dev = dev; 558 skb->dev = dev;
559 rbd->v_next = rbd+1; 559 rbd->v_next = rbd+1;
560 rbd->b_next = WSWAPrbd(virt_to_dma(lp,rbd+1)); 560 rbd->b_next = WSWAPrbd(virt_to_dma(lp,rbd+1));
561 rbd->b_addr = WSWAPrbd(virt_to_dma(lp,rbd)); 561 rbd->b_addr = WSWAPrbd(virt_to_dma(lp,rbd));
562 rbd->skb = skb; 562 rbd->skb = skb;
563 rbd->v_data = skb->tail; 563 rbd->v_data = skb->data;
564 rbd->b_data = WSWAPchar(dma_addr); 564 rbd->b_data = WSWAPchar(dma_addr);
565 rbd->size = PKT_BUF_SZ; 565 rbd->size = PKT_BUF_SZ;
566 } 566 }
@@ -783,8 +783,8 @@ static inline int i596_rx(struct net_device *dev)
783 rx_in_place = 1; 783 rx_in_place = 1;
784 rbd->skb = newskb; 784 rbd->skb = newskb;
785 newskb->dev = dev; 785 newskb->dev = dev;
786 dma_addr = dma_map_single(lp->dev, newskb->tail, PKT_BUF_SZ, DMA_FROM_DEVICE); 786 dma_addr = dma_map_single(lp->dev, newskb->data, PKT_BUF_SZ, DMA_FROM_DEVICE);
787 rbd->v_data = newskb->tail; 787 rbd->v_data = newskb->data;
788 rbd->b_data = WSWAPchar(dma_addr); 788 rbd->b_data = WSWAPchar(dma_addr);
789 CHECK_WBACK_INV(rbd, sizeof(struct i596_rbd)); 789 CHECK_WBACK_INV(rbd, sizeof(struct i596_rbd));
790 } 790 }
diff --git a/drivers/net/natsemi.c b/drivers/net/natsemi.c
index babb59e146ea..9d6d2548c2d3 100644
--- a/drivers/net/natsemi.c
+++ b/drivers/net/natsemi.c
@@ -1926,7 +1926,7 @@ static void refill_rx(struct net_device *dev)
1926 break; /* Better luck next round. */ 1926 break; /* Better luck next round. */
1927 skb->dev = dev; /* Mark as being used by this device. */ 1927 skb->dev = dev; /* Mark as being used by this device. */
1928 np->rx_dma[entry] = pci_map_single(np->pci_dev, 1928 np->rx_dma[entry] = pci_map_single(np->pci_dev,
1929 skb->tail, buflen, PCI_DMA_FROMDEVICE); 1929 skb->data, buflen, PCI_DMA_FROMDEVICE);
1930 np->rx_ring[entry].addr = cpu_to_le32(np->rx_dma[entry]); 1930 np->rx_ring[entry].addr = cpu_to_le32(np->rx_dma[entry]);
1931 } 1931 }
1932 np->rx_ring[entry].cmd_status = cpu_to_le32(np->rx_buf_sz); 1932 np->rx_ring[entry].cmd_status = cpu_to_le32(np->rx_buf_sz);
@@ -2280,7 +2280,7 @@ static void netdev_rx(struct net_device *dev)
2280 buflen, 2280 buflen,
2281 PCI_DMA_FROMDEVICE); 2281 PCI_DMA_FROMDEVICE);
2282 eth_copy_and_sum(skb, 2282 eth_copy_and_sum(skb,
2283 np->rx_skbuff[entry]->tail, pkt_len, 0); 2283 np->rx_skbuff[entry]->data, pkt_len, 0);
2284 skb_put(skb, pkt_len); 2284 skb_put(skb, pkt_len);
2285 pci_dma_sync_single_for_device(np->pci_dev, 2285 pci_dma_sync_single_for_device(np->pci_dev,
2286 np->rx_dma[entry], 2286 np->rx_dma[entry],
diff --git a/drivers/net/ns83820.c b/drivers/net/ns83820.c
index cc7965271778..e64df4d0800b 100644
--- a/drivers/net/ns83820.c
+++ b/drivers/net/ns83820.c
@@ -574,7 +574,7 @@ static inline int ns83820_add_rx_skb(struct ns83820 *dev, struct sk_buff *skb)
574 574
575 dev->rx_info.next_empty = (next_empty + 1) % NR_RX_DESC; 575 dev->rx_info.next_empty = (next_empty + 1) % NR_RX_DESC;
576 cmdsts = REAL_RX_BUF_SIZE | CMDSTS_INTR; 576 cmdsts = REAL_RX_BUF_SIZE | CMDSTS_INTR;
577 buf = pci_map_single(dev->pci_dev, skb->tail, 577 buf = pci_map_single(dev->pci_dev, skb->data,
578 REAL_RX_BUF_SIZE, PCI_DMA_FROMDEVICE); 578 REAL_RX_BUF_SIZE, PCI_DMA_FROMDEVICE);
579 build_rx_desc(dev, sg, 0, buf, cmdsts, 0); 579 build_rx_desc(dev, sg, 0, buf, cmdsts, 0);
580 /* update link of previous rx */ 580 /* update link of previous rx */
@@ -604,7 +604,7 @@ static inline int rx_refill(struct net_device *ndev, int gfp)
604 if (unlikely(!skb)) 604 if (unlikely(!skb))
605 break; 605 break;
606 606
607 res = (long)skb->tail & 0xf; 607 res = (long)skb->data & 0xf;
608 res = 0x10 - res; 608 res = 0x10 - res;
609 res &= 0xf; 609 res &= 0xf;
610 skb_reserve(skb, res); 610 skb_reserve(skb, res);
diff --git a/drivers/net/pcnet32.c b/drivers/net/pcnet32.c
index 3213f3e50487..113b68099216 100644
--- a/drivers/net/pcnet32.c
+++ b/drivers/net/pcnet32.c
@@ -1602,7 +1602,7 @@ pcnet32_init_ring(struct net_device *dev)
1602 1602
1603 rmb(); 1603 rmb();
1604 if (lp->rx_dma_addr[i] == 0) 1604 if (lp->rx_dma_addr[i] == 0)
1605 lp->rx_dma_addr[i] = pci_map_single(lp->pci_dev, rx_skbuff->tail, 1605 lp->rx_dma_addr[i] = pci_map_single(lp->pci_dev, rx_skbuff->data,
1606 PKT_BUF_SZ-2, PCI_DMA_FROMDEVICE); 1606 PKT_BUF_SZ-2, PCI_DMA_FROMDEVICE);
1607 lp->rx_ring[i].base = (u32)le32_to_cpu(lp->rx_dma_addr[i]); 1607 lp->rx_ring[i].base = (u32)le32_to_cpu(lp->rx_dma_addr[i]);
1608 lp->rx_ring[i].buf_length = le16_to_cpu(2-PKT_BUF_SZ); 1608 lp->rx_ring[i].buf_length = le16_to_cpu(2-PKT_BUF_SZ);
@@ -1983,7 +1983,7 @@ pcnet32_rx(struct net_device *dev)
1983 lp->rx_skbuff[entry] = newskb; 1983 lp->rx_skbuff[entry] = newskb;
1984 newskb->dev = dev; 1984 newskb->dev = dev;
1985 lp->rx_dma_addr[entry] = 1985 lp->rx_dma_addr[entry] =
1986 pci_map_single(lp->pci_dev, newskb->tail, 1986 pci_map_single(lp->pci_dev, newskb->data,
1987 PKT_BUF_SZ-2, PCI_DMA_FROMDEVICE); 1987 PKT_BUF_SZ-2, PCI_DMA_FROMDEVICE);
1988 lp->rx_ring[entry].base = le32_to_cpu(lp->rx_dma_addr[entry]); 1988 lp->rx_ring[entry].base = le32_to_cpu(lp->rx_dma_addr[entry]);
1989 rx_in_place = 1; 1989 rx_in_place = 1;
@@ -2020,7 +2020,7 @@ pcnet32_rx(struct net_device *dev)
2020 PKT_BUF_SZ-2, 2020 PKT_BUF_SZ-2,
2021 PCI_DMA_FROMDEVICE); 2021 PCI_DMA_FROMDEVICE);
2022 eth_copy_and_sum(skb, 2022 eth_copy_and_sum(skb,
2023 (unsigned char *)(lp->rx_skbuff[entry]->tail), 2023 (unsigned char *)(lp->rx_skbuff[entry]->data),
2024 pkt_len,0); 2024 pkt_len,0);
2025 pci_dma_sync_single_for_device(lp->pci_dev, 2025 pci_dma_sync_single_for_device(lp->pci_dev,
2026 lp->rx_dma_addr[entry], 2026 lp->rx_dma_addr[entry],
diff --git a/drivers/net/r8169.c b/drivers/net/r8169.c
index ce449fe90e6d..d5afe05cd826 100644
--- a/drivers/net/r8169.c
+++ b/drivers/net/r8169.c
@@ -1876,7 +1876,7 @@ static int rtl8169_alloc_rx_skb(struct pci_dev *pdev, struct sk_buff **sk_buff,
1876 skb_reserve(skb, NET_IP_ALIGN); 1876 skb_reserve(skb, NET_IP_ALIGN);
1877 *sk_buff = skb; 1877 *sk_buff = skb;
1878 1878
1879 mapping = pci_map_single(pdev, skb->tail, rx_buf_sz, 1879 mapping = pci_map_single(pdev, skb->data, rx_buf_sz,
1880 PCI_DMA_FROMDEVICE); 1880 PCI_DMA_FROMDEVICE);
1881 1881
1882 rtl8169_map_to_asic(desc, mapping, rx_buf_sz); 1882 rtl8169_map_to_asic(desc, mapping, rx_buf_sz);
@@ -2336,7 +2336,7 @@ static inline int rtl8169_try_rx_copy(struct sk_buff **sk_buff, int pkt_size,
2336 skb = dev_alloc_skb(pkt_size + NET_IP_ALIGN); 2336 skb = dev_alloc_skb(pkt_size + NET_IP_ALIGN);
2337 if (skb) { 2337 if (skb) {
2338 skb_reserve(skb, NET_IP_ALIGN); 2338 skb_reserve(skb, NET_IP_ALIGN);
2339 eth_copy_and_sum(skb, sk_buff[0]->tail, pkt_size, 0); 2339 eth_copy_and_sum(skb, sk_buff[0]->data, pkt_size, 0);
2340 *sk_buff = skb; 2340 *sk_buff = skb;
2341 rtl8169_mark_to_asic(desc, rx_buf_sz); 2341 rtl8169_mark_to_asic(desc, rx_buf_sz);
2342 ret = 0; 2342 ret = 0;
diff --git a/drivers/net/s2io.c b/drivers/net/s2io.c
index bb639a8794d4..ea638b162d3f 100644
--- a/drivers/net/s2io.c
+++ b/drivers/net/s2io.c
@@ -1699,11 +1699,9 @@ static int fill_rx_buffers(struct s2io_nic *nic, int ring_no)
1699#else 1699#else
1700 ba = &nic->ba[ring_no][block_no][off]; 1700 ba = &nic->ba[ring_no][block_no][off];
1701 skb_reserve(skb, BUF0_LEN); 1701 skb_reserve(skb, BUF0_LEN);
1702 tmp = (unsigned long) skb->data; 1702 tmp = ((unsigned long) skb->data & ALIGN_SIZE);
1703 tmp += ALIGN_SIZE; 1703 if (tmp)
1704 tmp &= ~ALIGN_SIZE; 1704 skb_reserve(skb, (ALIGN_SIZE + 1) - tmp);
1705 skb->data = (void *) tmp;
1706 skb->tail = (void *) tmp;
1707 1705
1708 memset(rxdp, 0, sizeof(RxD_t)); 1706 memset(rxdp, 0, sizeof(RxD_t));
1709 rxdp->Buffer2_ptr = pci_map_single 1707 rxdp->Buffer2_ptr = pci_map_single
diff --git a/drivers/net/sb1250-mac.c b/drivers/net/sb1250-mac.c
index fd2e7c374906..7abd55a4fb21 100644
--- a/drivers/net/sb1250-mac.c
+++ b/drivers/net/sb1250-mac.c
@@ -963,11 +963,11 @@ static int sbdma_add_rcvbuffer(sbmacdma_t *d,struct sk_buff *sb)
963 /* 963 /*
964 * Do not interrupt per DMA transfer. 964 * Do not interrupt per DMA transfer.
965 */ 965 */
966 dsc->dscr_a = virt_to_phys(sb_new->tail) | 966 dsc->dscr_a = virt_to_phys(sb_new->data) |
967 V_DMA_DSCRA_A_SIZE(NUMCACHEBLKS(pktsize+ETHER_ALIGN)) | 967 V_DMA_DSCRA_A_SIZE(NUMCACHEBLKS(pktsize+ETHER_ALIGN)) |
968 0; 968 0;
969#else 969#else
970 dsc->dscr_a = virt_to_phys(sb_new->tail) | 970 dsc->dscr_a = virt_to_phys(sb_new->data) |
971 V_DMA_DSCRA_A_SIZE(NUMCACHEBLKS(pktsize+ETHER_ALIGN)) | 971 V_DMA_DSCRA_A_SIZE(NUMCACHEBLKS(pktsize+ETHER_ALIGN)) |
972 M_DMA_DSCRA_INTERRUPT; 972 M_DMA_DSCRA_INTERRUPT;
973#endif 973#endif
diff --git a/drivers/net/sis900.c b/drivers/net/sis900.c
index 127324f014de..23b713c700b3 100644
--- a/drivers/net/sis900.c
+++ b/drivers/net/sis900.c
@@ -1154,7 +1154,7 @@ sis900_init_rx_ring(struct net_device *net_dev)
1154 sis_priv->rx_skbuff[i] = skb; 1154 sis_priv->rx_skbuff[i] = skb;
1155 sis_priv->rx_ring[i].cmdsts = RX_BUF_SIZE; 1155 sis_priv->rx_ring[i].cmdsts = RX_BUF_SIZE;
1156 sis_priv->rx_ring[i].bufptr = pci_map_single(sis_priv->pci_dev, 1156 sis_priv->rx_ring[i].bufptr = pci_map_single(sis_priv->pci_dev,
1157 skb->tail, RX_BUF_SIZE, PCI_DMA_FROMDEVICE); 1157 skb->data, RX_BUF_SIZE, PCI_DMA_FROMDEVICE);
1158 } 1158 }
1159 sis_priv->dirty_rx = (unsigned int) (i - NUM_RX_DESC); 1159 sis_priv->dirty_rx = (unsigned int) (i - NUM_RX_DESC);
1160 1160
@@ -1776,7 +1776,7 @@ static int sis900_rx(struct net_device *net_dev)
1776 sis_priv->rx_skbuff[entry] = skb; 1776 sis_priv->rx_skbuff[entry] = skb;
1777 sis_priv->rx_ring[entry].cmdsts = RX_BUF_SIZE; 1777 sis_priv->rx_ring[entry].cmdsts = RX_BUF_SIZE;
1778 sis_priv->rx_ring[entry].bufptr = 1778 sis_priv->rx_ring[entry].bufptr =
1779 pci_map_single(sis_priv->pci_dev, skb->tail, 1779 pci_map_single(sis_priv->pci_dev, skb->data,
1780 RX_BUF_SIZE, PCI_DMA_FROMDEVICE); 1780 RX_BUF_SIZE, PCI_DMA_FROMDEVICE);
1781 sis_priv->dirty_rx++; 1781 sis_priv->dirty_rx++;
1782 } 1782 }
@@ -1809,7 +1809,7 @@ static int sis900_rx(struct net_device *net_dev)
1809 sis_priv->rx_skbuff[entry] = skb; 1809 sis_priv->rx_skbuff[entry] = skb;
1810 sis_priv->rx_ring[entry].cmdsts = RX_BUF_SIZE; 1810 sis_priv->rx_ring[entry].cmdsts = RX_BUF_SIZE;
1811 sis_priv->rx_ring[entry].bufptr = 1811 sis_priv->rx_ring[entry].bufptr =
1812 pci_map_single(sis_priv->pci_dev, skb->tail, 1812 pci_map_single(sis_priv->pci_dev, skb->data,
1813 RX_BUF_SIZE, PCI_DMA_FROMDEVICE); 1813 RX_BUF_SIZE, PCI_DMA_FROMDEVICE);
1814 } 1814 }
1815 } 1815 }
diff --git a/drivers/net/skge.c b/drivers/net/skge.c
index 30e8d589d167..3dbb1cb09ed8 100644
--- a/drivers/net/skge.c
+++ b/drivers/net/skge.c
@@ -7,7 +7,7 @@
7 * of the original driver such as link fail-over and link management because 7 * of the original driver such as link fail-over and link management because
8 * those should be done at higher levels. 8 * those should be done at higher levels.
9 * 9 *
10 * Copyright (C) 2004, Stephen Hemminger <shemminger@osdl.org> 10 * Copyright (C) 2004, 2005 Stephen Hemminger <shemminger@osdl.org>
11 * 11 *
12 * This program is free software; you can redistribute it and/or modify 12 * This program is free software; you can redistribute it and/or modify
13 * it under the terms of the GNU General Public License as published by 13 * it under the terms of the GNU General Public License as published by
@@ -42,19 +42,20 @@
42#include "skge.h" 42#include "skge.h"
43 43
44#define DRV_NAME "skge" 44#define DRV_NAME "skge"
45#define DRV_VERSION "0.6" 45#define DRV_VERSION "0.7"
46#define PFX DRV_NAME " " 46#define PFX DRV_NAME " "
47 47
48#define DEFAULT_TX_RING_SIZE 128 48#define DEFAULT_TX_RING_SIZE 128
49#define DEFAULT_RX_RING_SIZE 512 49#define DEFAULT_RX_RING_SIZE 512
50#define MAX_TX_RING_SIZE 1024 50#define MAX_TX_RING_SIZE 1024
51#define MAX_RX_RING_SIZE 4096 51#define MAX_RX_RING_SIZE 4096
52#define RX_COPY_THRESHOLD 128
53#define RX_BUF_SIZE 1536
52#define PHY_RETRIES 1000 54#define PHY_RETRIES 1000
53#define ETH_JUMBO_MTU 9000 55#define ETH_JUMBO_MTU 9000
54#define TX_WATCHDOG (5 * HZ) 56#define TX_WATCHDOG (5 * HZ)
55#define NAPI_WEIGHT 64 57#define NAPI_WEIGHT 64
56#define BLINK_HZ (HZ/4) 58#define BLINK_HZ (HZ/4)
57#define LINK_POLL_HZ (HZ/10)
58 59
59MODULE_DESCRIPTION("SysKonnect Gigabit Ethernet driver"); 60MODULE_DESCRIPTION("SysKonnect Gigabit Ethernet driver");
60MODULE_AUTHOR("Stephen Hemminger <shemminger@osdl.org>"); 61MODULE_AUTHOR("Stephen Hemminger <shemminger@osdl.org>");
@@ -70,28 +71,17 @@ module_param(debug, int, 0);
70MODULE_PARM_DESC(debug, "Debug level (0=none,...,16=all)"); 71MODULE_PARM_DESC(debug, "Debug level (0=none,...,16=all)");
71 72
72static const struct pci_device_id skge_id_table[] = { 73static const struct pci_device_id skge_id_table[] = {
73 { PCI_VENDOR_ID_3COM, PCI_DEVICE_ID_3COM_3C940, 74 { PCI_DEVICE(PCI_VENDOR_ID_3COM, PCI_DEVICE_ID_3COM_3C940) },
74 PCI_ANY_ID, PCI_ANY_ID }, 75 { PCI_DEVICE(PCI_VENDOR_ID_3COM, PCI_DEVICE_ID_3COM_3C940B) },
75 { PCI_VENDOR_ID_3COM, PCI_DEVICE_ID_3COM_3C940B, 76 { PCI_DEVICE(PCI_VENDOR_ID_SYSKONNECT, PCI_DEVICE_ID_SYSKONNECT_GE) },
76 PCI_ANY_ID, PCI_ANY_ID }, 77 { PCI_DEVICE(PCI_VENDOR_ID_SYSKONNECT, PCI_DEVICE_ID_SYSKONNECT_YU) },
77 { PCI_VENDOR_ID_SYSKONNECT, PCI_DEVICE_ID_SYSKONNECT_GE, 78 { PCI_DEVICE(PCI_VENDOR_ID_SYSKONNECT, 0x9E00) }, /* SK-9Exx */
78 PCI_ANY_ID, PCI_ANY_ID }, 79 { PCI_DEVICE(PCI_VENDOR_ID_DLINK, PCI_DEVICE_ID_DLINK_DGE510T), },
79 { PCI_VENDOR_ID_SYSKONNECT, PCI_DEVICE_ID_SYSKONNECT_YU, 80 { PCI_DEVICE(PCI_VENDOR_ID_MARVELL, 0x4320) },
80 PCI_ANY_ID, PCI_ANY_ID }, 81 { PCI_DEVICE(PCI_VENDOR_ID_MARVELL, 0x5005) }, /* Belkin */
81 { PCI_VENDOR_ID_SYSKONNECT, 0x9E00, /* SK-9Exx */ 82 { PCI_DEVICE(PCI_VENDOR_ID_CNET, PCI_DEVICE_ID_CNET_GIGACARD) },
82 PCI_ANY_ID, PCI_ANY_ID }, 83 { PCI_DEVICE(PCI_VENDOR_ID_LINKSYS, PCI_DEVICE_ID_LINKSYS_EG1032) },
83 { PCI_VENDOR_ID_DLINK, PCI_DEVICE_ID_DLINK_DGE510T, 84 { PCI_DEVICE(PCI_VENDOR_ID_LINKSYS, PCI_DEVICE_ID_LINKSYS_EG1064) },
84 PCI_ANY_ID, PCI_ANY_ID },
85 { PCI_VENDOR_ID_MARVELL, 0x4320, /* Gigabit Ethernet Controller */
86 PCI_ANY_ID, PCI_ANY_ID },
87 { PCI_VENDOR_ID_MARVELL, 0x5005, /* Marvell (11ab), Belkin */
88 PCI_ANY_ID, PCI_ANY_ID },
89 { PCI_VENDOR_ID_CNET, PCI_DEVICE_ID_CNET_GIGACARD,
90 PCI_ANY_ID, PCI_ANY_ID },
91 { PCI_VENDOR_ID_LINKSYS, PCI_DEVICE_ID_LINKSYS_EG1032,
92 PCI_ANY_ID, PCI_ANY_ID },
93 { PCI_VENDOR_ID_LINKSYS, PCI_DEVICE_ID_LINKSYS_EG1064,
94 PCI_ANY_ID, PCI_ANY_ID },
95 { 0 } 85 { 0 }
96}; 86};
97MODULE_DEVICE_TABLE(pci, skge_id_table); 87MODULE_DEVICE_TABLE(pci, skge_id_table);
@@ -99,19 +89,22 @@ MODULE_DEVICE_TABLE(pci, skge_id_table);
99static int skge_up(struct net_device *dev); 89static int skge_up(struct net_device *dev);
100static int skge_down(struct net_device *dev); 90static int skge_down(struct net_device *dev);
101static void skge_tx_clean(struct skge_port *skge); 91static void skge_tx_clean(struct skge_port *skge);
102static void skge_xm_phy_write(struct skge_hw *hw, int port, u16 reg, u16 val); 92static void xm_phy_write(struct skge_hw *hw, int port, u16 reg, u16 val);
103static void skge_gm_phy_write(struct skge_hw *hw, int port, u16 reg, u16 val); 93static void gm_phy_write(struct skge_hw *hw, int port, u16 reg, u16 val);
104static void genesis_get_stats(struct skge_port *skge, u64 *data); 94static void genesis_get_stats(struct skge_port *skge, u64 *data);
105static void yukon_get_stats(struct skge_port *skge, u64 *data); 95static void yukon_get_stats(struct skge_port *skge, u64 *data);
106static void yukon_init(struct skge_hw *hw, int port); 96static void yukon_init(struct skge_hw *hw, int port);
107static void yukon_reset(struct skge_hw *hw, int port); 97static void yukon_reset(struct skge_hw *hw, int port);
108static void genesis_mac_init(struct skge_hw *hw, int port); 98static void genesis_mac_init(struct skge_hw *hw, int port);
109static void genesis_reset(struct skge_hw *hw, int port); 99static void genesis_reset(struct skge_hw *hw, int port);
100static void genesis_link_up(struct skge_port *skge);
110 101
102/* Avoid conditionals by using array */
111static const int txqaddr[] = { Q_XA1, Q_XA2 }; 103static const int txqaddr[] = { Q_XA1, Q_XA2 };
112static const int rxqaddr[] = { Q_R1, Q_R2 }; 104static const int rxqaddr[] = { Q_R1, Q_R2 };
113static const u32 rxirqmask[] = { IS_R1_F, IS_R2_F }; 105static const u32 rxirqmask[] = { IS_R1_F, IS_R2_F };
114static const u32 txirqmask[] = { IS_XA1_F, IS_XA2_F }; 106static const u32 txirqmask[] = { IS_XA1_F, IS_XA2_F };
107static const u32 portirqmask[] = { IS_PORT_1, IS_PORT_2 };
115 108
116/* Don't need to look at whole 16K. 109/* Don't need to look at whole 16K.
117 * last interesting register is descriptor poll timer. 110 * last interesting register is descriptor poll timer.
@@ -154,7 +147,7 @@ static void skge_get_regs(struct net_device *dev, struct ethtool_regs *regs,
154static int wol_supported(const struct skge_hw *hw) 147static int wol_supported(const struct skge_hw *hw)
155{ 148{
156 return !((hw->chip_id == CHIP_ID_GENESIS || 149 return !((hw->chip_id == CHIP_ID_GENESIS ||
157 (hw->chip_id == CHIP_ID_YUKON && chip_rev(hw) == 0))); 150 (hw->chip_id == CHIP_ID_YUKON && hw->chip_rev == 0)));
158} 151}
159 152
160static void skge_get_wol(struct net_device *dev, struct ethtool_wolinfo *wol) 153static void skge_get_wol(struct net_device *dev, struct ethtool_wolinfo *wol)
@@ -170,7 +163,7 @@ static int skge_set_wol(struct net_device *dev, struct ethtool_wolinfo *wol)
170 struct skge_port *skge = netdev_priv(dev); 163 struct skge_port *skge = netdev_priv(dev);
171 struct skge_hw *hw = skge->hw; 164 struct skge_hw *hw = skge->hw;
172 165
173 if(wol->wolopts != WAKE_MAGIC && wol->wolopts != 0) 166 if (wol->wolopts != WAKE_MAGIC && wol->wolopts != 0)
174 return -EOPNOTSUPP; 167 return -EOPNOTSUPP;
175 168
176 if (wol->wolopts == WAKE_MAGIC && !wol_supported(hw)) 169 if (wol->wolopts == WAKE_MAGIC && !wol_supported(hw))
@@ -190,6 +183,36 @@ static int skge_set_wol(struct net_device *dev, struct ethtool_wolinfo *wol)
190 return 0; 183 return 0;
191} 184}
192 185
186/* Determine supported/adverised modes based on hardware.
187 * Note: ethtoool ADVERTISED_xxx == SUPPORTED_xxx
188 */
189static u32 skge_supported_modes(const struct skge_hw *hw)
190{
191 u32 supported;
192
193 if (iscopper(hw)) {
194 supported = SUPPORTED_10baseT_Half
195 | SUPPORTED_10baseT_Full
196 | SUPPORTED_100baseT_Half
197 | SUPPORTED_100baseT_Full
198 | SUPPORTED_1000baseT_Half
199 | SUPPORTED_1000baseT_Full
200 | SUPPORTED_Autoneg| SUPPORTED_TP;
201
202 if (hw->chip_id == CHIP_ID_GENESIS)
203 supported &= ~(SUPPORTED_10baseT_Half
204 | SUPPORTED_10baseT_Full
205 | SUPPORTED_100baseT_Half
206 | SUPPORTED_100baseT_Full);
207
208 else if (hw->chip_id == CHIP_ID_YUKON)
209 supported &= ~SUPPORTED_1000baseT_Half;
210 } else
211 supported = SUPPORTED_1000baseT_Full | SUPPORTED_FIBRE
212 | SUPPORTED_Autoneg;
213
214 return supported;
215}
193 216
194static int skge_get_settings(struct net_device *dev, 217static int skge_get_settings(struct net_device *dev,
195 struct ethtool_cmd *ecmd) 218 struct ethtool_cmd *ecmd)
@@ -198,38 +221,13 @@ static int skge_get_settings(struct net_device *dev,
198 struct skge_hw *hw = skge->hw; 221 struct skge_hw *hw = skge->hw;
199 222
200 ecmd->transceiver = XCVR_INTERNAL; 223 ecmd->transceiver = XCVR_INTERNAL;
224 ecmd->supported = skge_supported_modes(hw);
201 225
202 if (iscopper(hw)) { 226 if (iscopper(hw)) {
203 if (hw->chip_id == CHIP_ID_GENESIS)
204 ecmd->supported = SUPPORTED_1000baseT_Full
205 | SUPPORTED_1000baseT_Half
206 | SUPPORTED_Autoneg | SUPPORTED_TP;
207 else {
208 ecmd->supported = SUPPORTED_10baseT_Half
209 | SUPPORTED_10baseT_Full
210 | SUPPORTED_100baseT_Half
211 | SUPPORTED_100baseT_Full
212 | SUPPORTED_1000baseT_Half
213 | SUPPORTED_1000baseT_Full
214 | SUPPORTED_Autoneg| SUPPORTED_TP;
215
216 if (hw->chip_id == CHIP_ID_YUKON)
217 ecmd->supported &= ~SUPPORTED_1000baseT_Half;
218
219 else if (hw->chip_id == CHIP_ID_YUKON_FE)
220 ecmd->supported &= ~(SUPPORTED_1000baseT_Half
221 | SUPPORTED_1000baseT_Full);
222 }
223
224 ecmd->port = PORT_TP; 227 ecmd->port = PORT_TP;
225 ecmd->phy_address = hw->phy_addr; 228 ecmd->phy_address = hw->phy_addr;
226 } else { 229 } else
227 ecmd->supported = SUPPORTED_1000baseT_Full
228 | SUPPORTED_FIBRE
229 | SUPPORTED_Autoneg;
230
231 ecmd->port = PORT_FIBRE; 230 ecmd->port = PORT_FIBRE;
232 }
233 231
234 ecmd->advertising = skge->advertising; 232 ecmd->advertising = skge->advertising;
235 ecmd->autoneg = skge->autoneg; 233 ecmd->autoneg = skge->autoneg;
@@ -238,65 +236,57 @@ static int skge_get_settings(struct net_device *dev,
238 return 0; 236 return 0;
239} 237}
240 238
241static u32 skge_modes(const struct skge_hw *hw)
242{
243 u32 modes = ADVERTISED_Autoneg
244 | ADVERTISED_1000baseT_Full | ADVERTISED_1000baseT_Half
245 | ADVERTISED_100baseT_Full | ADVERTISED_100baseT_Half
246 | ADVERTISED_10baseT_Full | ADVERTISED_10baseT_Half;
247
248 if (iscopper(hw)) {
249 modes |= ADVERTISED_TP;
250 switch(hw->chip_id) {
251 case CHIP_ID_GENESIS:
252 modes &= ~(ADVERTISED_100baseT_Full
253 | ADVERTISED_100baseT_Half
254 | ADVERTISED_10baseT_Full
255 | ADVERTISED_10baseT_Half);
256 break;
257
258 case CHIP_ID_YUKON:
259 modes &= ~ADVERTISED_1000baseT_Half;
260 break;
261
262 case CHIP_ID_YUKON_FE:
263 modes &= ~(ADVERTISED_1000baseT_Half|ADVERTISED_1000baseT_Full);
264 break;
265 }
266 } else {
267 modes |= ADVERTISED_FIBRE;
268 modes &= ~ADVERTISED_1000baseT_Half;
269 }
270 return modes;
271}
272
273static int skge_set_settings(struct net_device *dev, struct ethtool_cmd *ecmd) 239static int skge_set_settings(struct net_device *dev, struct ethtool_cmd *ecmd)
274{ 240{
275 struct skge_port *skge = netdev_priv(dev); 241 struct skge_port *skge = netdev_priv(dev);
276 const struct skge_hw *hw = skge->hw; 242 const struct skge_hw *hw = skge->hw;
243 u32 supported = skge_supported_modes(hw);
277 244
278 if (ecmd->autoneg == AUTONEG_ENABLE) { 245 if (ecmd->autoneg == AUTONEG_ENABLE) {
279 if (ecmd->advertising & skge_modes(hw)) 246 ecmd->advertising = supported;
280 return -EINVAL; 247 skge->duplex = -1;
248 skge->speed = -1;
281 } else { 249 } else {
250 u32 setting;
251
282 switch(ecmd->speed) { 252 switch(ecmd->speed) {
283 case SPEED_1000: 253 case SPEED_1000:
284 if (hw->chip_id == CHIP_ID_YUKON_FE) 254 if (ecmd->duplex == DUPLEX_FULL)
255 setting = SUPPORTED_1000baseT_Full;
256 else if (ecmd->duplex == DUPLEX_HALF)
257 setting = SUPPORTED_1000baseT_Half;
258 else
285 return -EINVAL; 259 return -EINVAL;
286 break; 260 break;
287 case SPEED_100: 261 case SPEED_100:
262 if (ecmd->duplex == DUPLEX_FULL)
263 setting = SUPPORTED_100baseT_Full;
264 else if (ecmd->duplex == DUPLEX_HALF)
265 setting = SUPPORTED_100baseT_Half;
266 else
267 return -EINVAL;
268 break;
269
288 case SPEED_10: 270 case SPEED_10:
289 if (iscopper(hw) || hw->chip_id == CHIP_ID_GENESIS) 271 if (ecmd->duplex == DUPLEX_FULL)
272 setting = SUPPORTED_10baseT_Full;
273 else if (ecmd->duplex == DUPLEX_HALF)
274 setting = SUPPORTED_10baseT_Half;
275 else
290 return -EINVAL; 276 return -EINVAL;
291 break; 277 break;
292 default: 278 default:
293 return -EINVAL; 279 return -EINVAL;
294 } 280 }
281
282 if ((setting & supported) == 0)
283 return -EINVAL;
284
285 skge->speed = ecmd->speed;
286 skge->duplex = ecmd->duplex;
295 } 287 }
296 288
297 skge->autoneg = ecmd->autoneg; 289 skge->autoneg = ecmd->autoneg;
298 skge->speed = ecmd->speed;
299 skge->duplex = ecmd->duplex;
300 skge->advertising = ecmd->advertising; 290 skge->advertising = ecmd->advertising;
301 291
302 if (netif_running(dev)) { 292 if (netif_running(dev)) {
@@ -393,7 +383,7 @@ static void skge_get_strings(struct net_device *dev, u32 stringset, u8 *data)
393{ 383{
394 int i; 384 int i;
395 385
396 switch(stringset) { 386 switch (stringset) {
397 case ETH_SS_STATS: 387 case ETH_SS_STATS:
398 for (i = 0; i < ARRAY_SIZE(skge_stats); i++) 388 for (i = 0; i < ARRAY_SIZE(skge_stats); i++)
399 memcpy(data + i * ETH_GSTRING_LEN, 389 memcpy(data + i * ETH_GSTRING_LEN,
@@ -511,14 +501,6 @@ static int skge_set_rx_csum(struct net_device *dev, u32 data)
511 return 0; 501 return 0;
512} 502}
513 503
514/* Only Yukon II supports TSO (not implemented yet) */
515static int skge_set_tso(struct net_device *dev, u32 data)
516{
517 if (data)
518 return -EOPNOTSUPP;
519 return 0;
520}
521
522static void skge_get_pauseparam(struct net_device *dev, 504static void skge_get_pauseparam(struct net_device *dev,
523 struct ethtool_pauseparam *ecmd) 505 struct ethtool_pauseparam *ecmd)
524{ 506{
@@ -540,9 +522,9 @@ static int skge_set_pauseparam(struct net_device *dev,
540 skge->autoneg = ecmd->autoneg; 522 skge->autoneg = ecmd->autoneg;
541 if (ecmd->rx_pause && ecmd->tx_pause) 523 if (ecmd->rx_pause && ecmd->tx_pause)
542 skge->flow_control = FLOW_MODE_SYMMETRIC; 524 skge->flow_control = FLOW_MODE_SYMMETRIC;
543 else if(ecmd->rx_pause && !ecmd->tx_pause) 525 else if (ecmd->rx_pause && !ecmd->tx_pause)
544 skge->flow_control = FLOW_MODE_REM_SEND; 526 skge->flow_control = FLOW_MODE_REM_SEND;
545 else if(!ecmd->rx_pause && ecmd->tx_pause) 527 else if (!ecmd->rx_pause && ecmd->tx_pause)
546 skge->flow_control = FLOW_MODE_LOC_SEND; 528 skge->flow_control = FLOW_MODE_LOC_SEND;
547 else 529 else
548 skge->flow_control = FLOW_MODE_NONE; 530 skge->flow_control = FLOW_MODE_NONE;
@@ -559,8 +541,6 @@ static inline u32 hwkhz(const struct skge_hw *hw)
559{ 541{
560 if (hw->chip_id == CHIP_ID_GENESIS) 542 if (hw->chip_id == CHIP_ID_GENESIS)
561 return 53215; /* or: 53.125 MHz */ 543 return 53215; /* or: 53.125 MHz */
562 else if (hw->chip_id == CHIP_ID_YUKON_EC)
563 return 125000; /* or: 125.000 MHz */
564 else 544 else
565 return 78215; /* or: 78.125 MHz */ 545 return 78215; /* or: 78.125 MHz */
566} 546}
@@ -643,30 +623,18 @@ static int skge_set_coalesce(struct net_device *dev,
643static void skge_led_on(struct skge_hw *hw, int port) 623static void skge_led_on(struct skge_hw *hw, int port)
644{ 624{
645 if (hw->chip_id == CHIP_ID_GENESIS) { 625 if (hw->chip_id == CHIP_ID_GENESIS) {
646 skge_write8(hw, SKGEMAC_REG(port, LNK_LED_REG), LINKLED_ON); 626 skge_write8(hw, SK_REG(port, LNK_LED_REG), LINKLED_ON);
647 skge_write8(hw, B0_LED, LED_STAT_ON); 627 skge_write8(hw, B0_LED, LED_STAT_ON);
648 628
649 skge_write8(hw, SKGEMAC_REG(port, RX_LED_TST), LED_T_ON); 629 skge_write8(hw, SK_REG(port, RX_LED_TST), LED_T_ON);
650 skge_write32(hw, SKGEMAC_REG(port, RX_LED_VAL), 100); 630 skge_write32(hw, SK_REG(port, RX_LED_VAL), 100);
651 skge_write8(hw, SKGEMAC_REG(port, RX_LED_CTRL), LED_START); 631 skge_write8(hw, SK_REG(port, RX_LED_CTRL), LED_START);
652 632
653 switch (hw->phy_type) { 633 /* For Broadcom Phy only */
654 case SK_PHY_BCOM: 634 xm_phy_write(hw, port, PHY_BCOM_P_EXT_CTRL, PHY_B_PEC_LED_ON);
655 skge_xm_phy_write(hw, port, PHY_BCOM_P_EXT_CTRL,
656 PHY_B_PEC_LED_ON);
657 break;
658 case SK_PHY_LONE:
659 skge_xm_phy_write(hw, port, PHY_LONE_LED_CFG,
660 0x0800);
661 break;
662 default:
663 skge_write8(hw, SKGEMAC_REG(port, TX_LED_TST), LED_T_ON);
664 skge_write32(hw, SKGEMAC_REG(port, TX_LED_VAL), 100);
665 skge_write8(hw, SKGEMAC_REG(port, TX_LED_CTRL), LED_START);
666 }
667 } else { 635 } else {
668 skge_gm_phy_write(hw, port, PHY_MARV_LED_CTRL, 0); 636 gm_phy_write(hw, port, PHY_MARV_LED_CTRL, 0);
669 skge_gm_phy_write(hw, port, PHY_MARV_LED_OVER, 637 gm_phy_write(hw, port, PHY_MARV_LED_OVER,
670 PHY_M_LED_MO_DUP(MO_LED_ON) | 638 PHY_M_LED_MO_DUP(MO_LED_ON) |
671 PHY_M_LED_MO_10(MO_LED_ON) | 639 PHY_M_LED_MO_10(MO_LED_ON) |
672 PHY_M_LED_MO_100(MO_LED_ON) | 640 PHY_M_LED_MO_100(MO_LED_ON) |
@@ -678,28 +646,17 @@ static void skge_led_on(struct skge_hw *hw, int port)
678static void skge_led_off(struct skge_hw *hw, int port) 646static void skge_led_off(struct skge_hw *hw, int port)
679{ 647{
680 if (hw->chip_id == CHIP_ID_GENESIS) { 648 if (hw->chip_id == CHIP_ID_GENESIS) {
681 skge_write8(hw, SKGEMAC_REG(port, LNK_LED_REG), LINKLED_OFF); 649 skge_write8(hw, SK_REG(port, LNK_LED_REG), LINKLED_OFF);
682 skge_write8(hw, B0_LED, LED_STAT_OFF); 650 skge_write8(hw, B0_LED, LED_STAT_OFF);
683 651
684 skge_write32(hw, SKGEMAC_REG(port, RX_LED_VAL), 0); 652 skge_write32(hw, SK_REG(port, RX_LED_VAL), 0);
685 skge_write8(hw, SKGEMAC_REG(port, RX_LED_CTRL), LED_T_OFF); 653 skge_write8(hw, SK_REG(port, RX_LED_CTRL), LED_T_OFF);
686 654
687 switch (hw->phy_type) { 655 /* Broadcom only */
688 case SK_PHY_BCOM: 656 xm_phy_write(hw, port, PHY_BCOM_P_EXT_CTRL, PHY_B_PEC_LED_OFF);
689 skge_xm_phy_write(hw, port, PHY_BCOM_P_EXT_CTRL,
690 PHY_B_PEC_LED_OFF);
691 break;
692 case SK_PHY_LONE:
693 skge_xm_phy_write(hw, port, PHY_LONE_LED_CFG,
694 PHY_L_LC_LEDT);
695 break;
696 default:
697 skge_write32(hw, SKGEMAC_REG(port, TX_LED_VAL), 0);
698 skge_write8(hw, SKGEMAC_REG(port, TX_LED_CTRL), LED_T_OFF);
699 }
700 } else { 657 } else {
701 skge_gm_phy_write(hw, port, PHY_MARV_LED_CTRL, 0); 658 gm_phy_write(hw, port, PHY_MARV_LED_CTRL, 0);
702 skge_gm_phy_write(hw, port, PHY_MARV_LED_OVER, 659 gm_phy_write(hw, port, PHY_MARV_LED_OVER,
703 PHY_M_LED_MO_DUP(MO_LED_OFF) | 660 PHY_M_LED_MO_DUP(MO_LED_OFF) |
704 PHY_M_LED_MO_10(MO_LED_OFF) | 661 PHY_M_LED_MO_10(MO_LED_OFF) |
705 PHY_M_LED_MO_100(MO_LED_OFF) | 662 PHY_M_LED_MO_100(MO_LED_OFF) |
@@ -730,7 +687,7 @@ static int skge_phys_id(struct net_device *dev, u32 data)
730{ 687{
731 struct skge_port *skge = netdev_priv(dev); 688 struct skge_port *skge = netdev_priv(dev);
732 689
733 if(!data || data > (u32)(MAX_SCHEDULE_TIMEOUT / HZ)) 690 if (!data || data > (u32)(MAX_SCHEDULE_TIMEOUT / HZ))
734 data = (u32)(MAX_SCHEDULE_TIMEOUT / HZ); 691 data = (u32)(MAX_SCHEDULE_TIMEOUT / HZ);
735 692
736 /* start blinking */ 693 /* start blinking */
@@ -763,8 +720,6 @@ static struct ethtool_ops skge_ethtool_ops = {
763 .set_pauseparam = skge_set_pauseparam, 720 .set_pauseparam = skge_set_pauseparam,
764 .get_coalesce = skge_get_coalesce, 721 .get_coalesce = skge_get_coalesce,
765 .set_coalesce = skge_set_coalesce, 722 .set_coalesce = skge_set_coalesce,
766 .get_tso = ethtool_op_get_tso,
767 .set_tso = skge_set_tso,
768 .get_sg = ethtool_op_get_sg, 723 .get_sg = ethtool_op_get_sg,
769 .set_sg = skge_set_sg, 724 .set_sg = skge_set_sg,
770 .get_tx_csum = ethtool_op_get_tx_csum, 725 .get_tx_csum = ethtool_op_get_tx_csum,
@@ -793,6 +748,7 @@ static int skge_ring_alloc(struct skge_ring *ring, void *vaddr, u64 base)
793 748
794 for (i = 0, e = ring->start, d = vaddr; i < ring->count; i++, e++, d++) { 749 for (i = 0, e = ring->start, d = vaddr; i < ring->count; i++, e++, d++) {
795 e->desc = d; 750 e->desc = d;
751 e->skb = NULL;
796 if (i == ring->count - 1) { 752 if (i == ring->count - 1) {
797 e->next = ring->start; 753 e->next = ring->start;
798 d->next_offset = base; 754 d->next_offset = base;
@@ -806,24 +762,23 @@ static int skge_ring_alloc(struct skge_ring *ring, void *vaddr, u64 base)
806 return 0; 762 return 0;
807} 763}
808 764
809/* Setup buffer for receiving */ 765static struct sk_buff *skge_rx_alloc(struct net_device *dev, unsigned int size)
810static inline int skge_rx_alloc(struct skge_port *skge,
811 struct skge_element *e)
812{ 766{
813 unsigned long bufsize = skge->netdev->mtu + ETH_HLEN; /* VLAN? */ 767 struct sk_buff *skb = dev_alloc_skb(size);
814 struct skge_rx_desc *rd = e->desc;
815 struct sk_buff *skb;
816 u64 map;
817 768
818 skb = dev_alloc_skb(bufsize + NET_IP_ALIGN); 769 if (likely(skb)) {
819 if (unlikely(!skb)) { 770 skb->dev = dev;
820 printk(KERN_DEBUG PFX "%s: out of memory for receive\n", 771 skb_reserve(skb, NET_IP_ALIGN);
821 skge->netdev->name);
822 return -ENOMEM;
823 } 772 }
773 return skb;
774}
824 775
825 skb->dev = skge->netdev; 776/* Allocate and setup a new buffer for receiving */
826 skb_reserve(skb, NET_IP_ALIGN); 777static void skge_rx_setup(struct skge_port *skge, struct skge_element *e,
778 struct sk_buff *skb, unsigned int bufsize)
779{
780 struct skge_rx_desc *rd = e->desc;
781 u64 map;
827 782
828 map = pci_map_single(skge->hw->pdev, skb->data, bufsize, 783 map = pci_map_single(skge->hw->pdev, skb->data, bufsize,
829 PCI_DMA_FROMDEVICE); 784 PCI_DMA_FROMDEVICE);
@@ -841,55 +796,69 @@ static inline int skge_rx_alloc(struct skge_port *skge,
841 rd->control = BMU_OWN | BMU_STF | BMU_IRQ_EOF | BMU_TCP_CHECK | bufsize; 796 rd->control = BMU_OWN | BMU_STF | BMU_IRQ_EOF | BMU_TCP_CHECK | bufsize;
842 pci_unmap_addr_set(e, mapaddr, map); 797 pci_unmap_addr_set(e, mapaddr, map);
843 pci_unmap_len_set(e, maplen, bufsize); 798 pci_unmap_len_set(e, maplen, bufsize);
844 return 0;
845} 799}
846 800
847/* Free all unused buffers in receive ring, assumes receiver stopped */ 801/* Resume receiving using existing skb,
802 * Note: DMA address is not changed by chip.
803 * MTU not changed while receiver active.
804 */
805static void skge_rx_reuse(struct skge_element *e, unsigned int size)
806{
807 struct skge_rx_desc *rd = e->desc;
808
809 rd->csum2 = 0;
810 rd->csum2_start = ETH_HLEN;
811
812 wmb();
813
814 rd->control = BMU_OWN | BMU_STF | BMU_IRQ_EOF | BMU_TCP_CHECK | size;
815}
816
817
818/* Free all buffers in receive ring, assumes receiver stopped */
848static void skge_rx_clean(struct skge_port *skge) 819static void skge_rx_clean(struct skge_port *skge)
849{ 820{
850 struct skge_hw *hw = skge->hw; 821 struct skge_hw *hw = skge->hw;
851 struct skge_ring *ring = &skge->rx_ring; 822 struct skge_ring *ring = &skge->rx_ring;
852 struct skge_element *e; 823 struct skge_element *e;
853 824
854 for (e = ring->to_clean; e != ring->to_use; e = e->next) { 825 e = ring->start;
826 do {
855 struct skge_rx_desc *rd = e->desc; 827 struct skge_rx_desc *rd = e->desc;
856 rd->control = 0; 828 rd->control = 0;
857 829 if (e->skb) {
858 pci_unmap_single(hw->pdev, 830 pci_unmap_single(hw->pdev,
859 pci_unmap_addr(e, mapaddr), 831 pci_unmap_addr(e, mapaddr),
860 pci_unmap_len(e, maplen), 832 pci_unmap_len(e, maplen),
861 PCI_DMA_FROMDEVICE); 833 PCI_DMA_FROMDEVICE);
862 dev_kfree_skb(e->skb); 834 dev_kfree_skb(e->skb);
863 e->skb = NULL; 835 e->skb = NULL;
864 } 836 }
865 ring->to_clean = e; 837 } while ((e = e->next) != ring->start);
866} 838}
867 839
840
868/* Allocate buffers for receive ring 841/* Allocate buffers for receive ring
869 * For receive: to_use is refill location 842 * For receive: to_clean is next received frame.
870 * to_clean is next received frame.
871 *
872 * if (to_use == to_clean)
873 * then ring all frames in ring need buffers
874 * if (to_use->next == to_clean)
875 * then ring all frames in ring have buffers
876 */ 843 */
877static int skge_rx_fill(struct skge_port *skge) 844static int skge_rx_fill(struct skge_port *skge)
878{ 845{
879 struct skge_ring *ring = &skge->rx_ring; 846 struct skge_ring *ring = &skge->rx_ring;
880 struct skge_element *e; 847 struct skge_element *e;
881 int ret = 0; 848 unsigned int bufsize = skge->rx_buf_size;
882 849
883 for (e = ring->to_use; e->next != ring->to_clean; e = e->next) { 850 e = ring->start;
884 if (skge_rx_alloc(skge, e)) { 851 do {
885 ret = 1; 852 struct sk_buff *skb = skge_rx_alloc(skge->netdev, bufsize);
886 break;
887 }
888 853
889 } 854 if (!skb)
890 ring->to_use = e; 855 return -ENOMEM;
856
857 skge_rx_setup(skge, e, skb, bufsize);
858 } while ( (e = e->next) != ring->start);
891 859
892 return ret; 860 ring->to_clean = ring->start;
861 return 0;
893} 862}
894 863
895static void skge_link_up(struct skge_port *skge) 864static void skge_link_up(struct skge_port *skge)
@@ -919,50 +888,50 @@ static void skge_link_down(struct skge_port *skge)
919 printk(KERN_INFO PFX "%s: Link is down.\n", skge->netdev->name); 888 printk(KERN_INFO PFX "%s: Link is down.\n", skge->netdev->name);
920} 889}
921 890
922static u16 skge_xm_phy_read(struct skge_hw *hw, int port, u16 reg) 891static u16 xm_phy_read(struct skge_hw *hw, int port, u16 reg)
923{ 892{
924 int i; 893 int i;
925 u16 v; 894 u16 v;
926 895
927 skge_xm_write16(hw, port, XM_PHY_ADDR, reg | hw->phy_addr); 896 xm_write16(hw, port, XM_PHY_ADDR, reg | hw->phy_addr);
928 v = skge_xm_read16(hw, port, XM_PHY_DATA); 897 v = xm_read16(hw, port, XM_PHY_DATA);
929 if (hw->phy_type != SK_PHY_XMAC) {
930 for (i = 0; i < PHY_RETRIES; i++) {
931 udelay(1);
932 if (skge_xm_read16(hw, port, XM_MMU_CMD)
933 & XM_MMU_PHY_RDY)
934 goto ready;
935 }
936 898
937 printk(KERN_WARNING PFX "%s: phy read timed out\n", 899 /* Need to wait for external PHY */
938 hw->dev[port]->name); 900 for (i = 0; i < PHY_RETRIES; i++) {
939 return 0; 901 udelay(1);
940 ready: 902 if (xm_read16(hw, port, XM_MMU_CMD)
941 v = skge_xm_read16(hw, port, XM_PHY_DATA); 903 & XM_MMU_PHY_RDY)
904 goto ready;
942 } 905 }
943 906
907 printk(KERN_WARNING PFX "%s: phy read timed out\n",
908 hw->dev[port]->name);
909 return 0;
910 ready:
911 v = xm_read16(hw, port, XM_PHY_DATA);
912
944 return v; 913 return v;
945} 914}
946 915
947static void skge_xm_phy_write(struct skge_hw *hw, int port, u16 reg, u16 val) 916static void xm_phy_write(struct skge_hw *hw, int port, u16 reg, u16 val)
948{ 917{
949 int i; 918 int i;
950 919
951 skge_xm_write16(hw, port, XM_PHY_ADDR, reg | hw->phy_addr); 920 xm_write16(hw, port, XM_PHY_ADDR, reg | hw->phy_addr);
952 for (i = 0; i < PHY_RETRIES; i++) { 921 for (i = 0; i < PHY_RETRIES; i++) {
953 if (!(skge_xm_read16(hw, port, XM_MMU_CMD) & XM_MMU_PHY_BUSY)) 922 if (!(xm_read16(hw, port, XM_MMU_CMD) & XM_MMU_PHY_BUSY))
954 goto ready; 923 goto ready;
955 cpu_relax(); 924 udelay(1);
956 } 925 }
957 printk(KERN_WARNING PFX "%s: phy write failed to come ready\n", 926 printk(KERN_WARNING PFX "%s: phy write failed to come ready\n",
958 hw->dev[port]->name); 927 hw->dev[port]->name);
959 928
960 929
961 ready: 930 ready:
962 skge_xm_write16(hw, port, XM_PHY_DATA, val); 931 xm_write16(hw, port, XM_PHY_DATA, val);
963 for (i = 0; i < PHY_RETRIES; i++) { 932 for (i = 0; i < PHY_RETRIES; i++) {
964 udelay(1); 933 udelay(1);
965 if (!(skge_xm_read16(hw, port, XM_MMU_CMD) & XM_MMU_PHY_BUSY)) 934 if (!(xm_read16(hw, port, XM_MMU_CMD) & XM_MMU_PHY_BUSY))
966 return; 935 return;
967 } 936 }
968 printk(KERN_WARNING PFX "%s: phy write timed out\n", 937 printk(KERN_WARNING PFX "%s: phy write timed out\n",
@@ -999,34 +968,112 @@ static void genesis_init(struct skge_hw *hw)
999 968
1000static void genesis_reset(struct skge_hw *hw, int port) 969static void genesis_reset(struct skge_hw *hw, int port)
1001{ 970{
1002 int i; 971 const u8 zero[8] = { 0 };
1003 u64 zero = 0;
1004 972
1005 /* reset the statistics module */ 973 /* reset the statistics module */
1006 skge_xm_write32(hw, port, XM_GP_PORT, XM_GP_RES_STAT); 974 xm_write32(hw, port, XM_GP_PORT, XM_GP_RES_STAT);
1007 skge_xm_write16(hw, port, XM_IMSK, 0xffff); /* disable XMAC IRQs */ 975 xm_write16(hw, port, XM_IMSK, 0xffff); /* disable XMAC IRQs */
1008 skge_xm_write32(hw, port, XM_MODE, 0); /* clear Mode Reg */ 976 xm_write32(hw, port, XM_MODE, 0); /* clear Mode Reg */
1009 skge_xm_write16(hw, port, XM_TX_CMD, 0); /* reset TX CMD Reg */ 977 xm_write16(hw, port, XM_TX_CMD, 0); /* reset TX CMD Reg */
1010 skge_xm_write16(hw, port, XM_RX_CMD, 0); /* reset RX CMD Reg */ 978 xm_write16(hw, port, XM_RX_CMD, 0); /* reset RX CMD Reg */
1011 979
1012 /* disable all PHY IRQs */ 980 /* disable Broadcom PHY IRQ */
1013 if (hw->phy_type == SK_PHY_BCOM) 981 xm_write16(hw, port, PHY_BCOM_INT_MASK, 0xffff);
1014 skge_xm_write16(hw, port, PHY_BCOM_INT_MASK, 0xffff);
1015 982
1016 skge_xm_outhash(hw, port, XM_HSM, (u8 *) &zero); 983 xm_outhash(hw, port, XM_HSM, zero);
1017 for (i = 0; i < 15; i++)
1018 skge_xm_outaddr(hw, port, XM_EXM(i), (u8 *) &zero);
1019 skge_xm_outhash(hw, port, XM_SRC_CHK, (u8 *) &zero);
1020} 984}
1021 985
1022 986
1023static void genesis_mac_init(struct skge_hw *hw, int port) 987/* Convert mode to MII values */
988static const u16 phy_pause_map[] = {
989 [FLOW_MODE_NONE] = 0,
990 [FLOW_MODE_LOC_SEND] = PHY_AN_PAUSE_ASYM,
991 [FLOW_MODE_SYMMETRIC] = PHY_AN_PAUSE_CAP,
992 [FLOW_MODE_REM_SEND] = PHY_AN_PAUSE_CAP | PHY_AN_PAUSE_ASYM,
993};
994
995
996/* Check status of Broadcom phy link */
997static void bcom_check_link(struct skge_hw *hw, int port)
1024{ 998{
1025 struct skge_port *skge = netdev_priv(hw->dev[port]); 999 struct net_device *dev = hw->dev[port];
1000 struct skge_port *skge = netdev_priv(dev);
1001 u16 status;
1002
1003 /* read twice because of latch */
1004 (void) xm_phy_read(hw, port, PHY_BCOM_STAT);
1005 status = xm_phy_read(hw, port, PHY_BCOM_STAT);
1006
1007 pr_debug("bcom_check_link status=0x%x\n", status);
1008
1009 if ((status & PHY_ST_LSYNC) == 0) {
1010 u16 cmd = xm_read16(hw, port, XM_MMU_CMD);
1011 cmd &= ~(XM_MMU_ENA_RX | XM_MMU_ENA_TX);
1012 xm_write16(hw, port, XM_MMU_CMD, cmd);
1013 /* dummy read to ensure writing */
1014 (void) xm_read16(hw, port, XM_MMU_CMD);
1015
1016 if (netif_carrier_ok(dev))
1017 skge_link_down(skge);
1018 } else {
1019 if (skge->autoneg == AUTONEG_ENABLE &&
1020 (status & PHY_ST_AN_OVER)) {
1021 u16 lpa = xm_phy_read(hw, port, PHY_BCOM_AUNE_LP);
1022 u16 aux = xm_phy_read(hw, port, PHY_BCOM_AUX_STAT);
1023
1024 if (lpa & PHY_B_AN_RF) {
1025 printk(KERN_NOTICE PFX "%s: remote fault\n",
1026 dev->name);
1027 return;
1028 }
1029
1030 /* Check Duplex mismatch */
1031 switch(aux & PHY_B_AS_AN_RES_MSK) {
1032 case PHY_B_RES_1000FD:
1033 skge->duplex = DUPLEX_FULL;
1034 break;
1035 case PHY_B_RES_1000HD:
1036 skge->duplex = DUPLEX_HALF;
1037 break;
1038 default:
1039 printk(KERN_NOTICE PFX "%s: duplex mismatch\n",
1040 dev->name);
1041 return;
1042 }
1043
1044
1045 /* We are using IEEE 802.3z/D5.0 Table 37-4 */
1046 switch (aux & PHY_B_AS_PAUSE_MSK) {
1047 case PHY_B_AS_PAUSE_MSK:
1048 skge->flow_control = FLOW_MODE_SYMMETRIC;
1049 break;
1050 case PHY_B_AS_PRR:
1051 skge->flow_control = FLOW_MODE_REM_SEND;
1052 break;
1053 case PHY_B_AS_PRT:
1054 skge->flow_control = FLOW_MODE_LOC_SEND;
1055 break;
1056 default:
1057 skge->flow_control = FLOW_MODE_NONE;
1058 }
1059
1060 skge->speed = SPEED_1000;
1061 }
1062
1063 if (!netif_carrier_ok(dev))
1064 genesis_link_up(skge);
1065 }
1066}
1067
1068/* Broadcom 5400 only supports giagabit! SysKonnect did not put an additional
1069 * Phy on for 100 or 10Mbit operation
1070 */
1071static void bcom_phy_init(struct skge_port *skge, int jumbo)
1072{
1073 struct skge_hw *hw = skge->hw;
1074 int port = skge->port;
1026 int i; 1075 int i;
1027 u32 r; 1076 u16 id1, r, ext, ctl;
1028 u16 id1;
1029 u16 ctrl1, ctrl2, ctrl3, ctrl4, ctrl5;
1030 1077
1031 /* magic workaround patterns for Broadcom */ 1078 /* magic workaround patterns for Broadcom */
1032 static const struct { 1079 static const struct {
@@ -1042,16 +1089,120 @@ static void genesis_mac_init(struct skge_hw *hw, int port)
1042 { 0x17, 0x0013 }, { 0x15, 0x0A04 }, { 0x18, 0x0420 }, 1089 { 0x17, 0x0013 }, { 0x15, 0x0A04 }, { 0x18, 0x0420 },
1043 }; 1090 };
1044 1091
1092 pr_debug("bcom_phy_init\n");
1093
1094 /* read Id from external PHY (all have the same address) */
1095 id1 = xm_phy_read(hw, port, PHY_XMAC_ID1);
1096
1097 /* Optimize MDIO transfer by suppressing preamble. */
1098 r = xm_read16(hw, port, XM_MMU_CMD);
1099 r |= XM_MMU_NO_PRE;
1100 xm_write16(hw, port, XM_MMU_CMD,r);
1101
1102 switch(id1) {
1103 case PHY_BCOM_ID1_C0:
1104 /*
1105 * Workaround BCOM Errata for the C0 type.
1106 * Write magic patterns to reserved registers.
1107 */
1108 for (i = 0; i < ARRAY_SIZE(C0hack); i++)
1109 xm_phy_write(hw, port,
1110 C0hack[i].reg, C0hack[i].val);
1111
1112 break;
1113 case PHY_BCOM_ID1_A1:
1114 /*
1115 * Workaround BCOM Errata for the A1 type.
1116 * Write magic patterns to reserved registers.
1117 */
1118 for (i = 0; i < ARRAY_SIZE(A1hack); i++)
1119 xm_phy_write(hw, port,
1120 A1hack[i].reg, A1hack[i].val);
1121 break;
1122 }
1123
1124 /*
1125 * Workaround BCOM Errata (#10523) for all BCom PHYs.
1126 * Disable Power Management after reset.
1127 */
1128 r = xm_phy_read(hw, port, PHY_BCOM_AUX_CTRL);
1129 r |= PHY_B_AC_DIS_PM;
1130 xm_phy_write(hw, port, PHY_BCOM_AUX_CTRL, r);
1131
1132 /* Dummy read */
1133 xm_read16(hw, port, XM_ISRC);
1134
1135 ext = PHY_B_PEC_EN_LTR; /* enable tx led */
1136 ctl = PHY_CT_SP1000; /* always 1000mbit */
1137
1138 if (skge->autoneg == AUTONEG_ENABLE) {
1139 /*
1140 * Workaround BCOM Errata #1 for the C5 type.
1141 * 1000Base-T Link Acquisition Failure in Slave Mode
1142 * Set Repeater/DTE bit 10 of the 1000Base-T Control Register
1143 */
1144 u16 adv = PHY_B_1000C_RD;
1145 if (skge->advertising & ADVERTISED_1000baseT_Half)
1146 adv |= PHY_B_1000C_AHD;
1147 if (skge->advertising & ADVERTISED_1000baseT_Full)
1148 adv |= PHY_B_1000C_AFD;
1149 xm_phy_write(hw, port, PHY_BCOM_1000T_CTRL, adv);
1150
1151 ctl |= PHY_CT_ANE | PHY_CT_RE_CFG;
1152 } else {
1153 if (skge->duplex == DUPLEX_FULL)
1154 ctl |= PHY_CT_DUP_MD;
1155 /* Force to slave */
1156 xm_phy_write(hw, port, PHY_BCOM_1000T_CTRL, PHY_B_1000C_MSE);
1157 }
1158
1159 /* Set autonegotiation pause parameters */
1160 xm_phy_write(hw, port, PHY_BCOM_AUNE_ADV,
1161 phy_pause_map[skge->flow_control] | PHY_AN_CSMA);
1162
1163 /* Handle Jumbo frames */
1164 if (jumbo) {
1165 xm_phy_write(hw, port, PHY_BCOM_AUX_CTRL,
1166 PHY_B_AC_TX_TST | PHY_B_AC_LONG_PACK);
1167
1168 ext |= PHY_B_PEC_HIGH_LA;
1169
1170 }
1171
1172 xm_phy_write(hw, port, PHY_BCOM_P_EXT_CTRL, ext);
1173 xm_phy_write(hw, port, PHY_BCOM_CTRL, ctl);
1174
1175 /* Use link status change interrrupt */
1176 xm_phy_write(hw, port, PHY_BCOM_INT_MASK, PHY_B_DEF_MSK);
1177
1178 bcom_check_link(hw, port);
1179}
1180
1181static void genesis_mac_init(struct skge_hw *hw, int port)
1182{
1183 struct net_device *dev = hw->dev[port];
1184 struct skge_port *skge = netdev_priv(dev);
1185 int jumbo = hw->dev[port]->mtu > ETH_DATA_LEN;
1186 int i;
1187 u32 r;
1188 const u8 zero[6] = { 0 };
1189
1190 /* Clear MIB counters */
1191 xm_write16(hw, port, XM_STAT_CMD,
1192 XM_SC_CLR_RXC | XM_SC_CLR_TXC);
1193 /* Clear two times according to Errata #3 */
1194 xm_write16(hw, port, XM_STAT_CMD,
1195 XM_SC_CLR_RXC | XM_SC_CLR_TXC);
1045 1196
1046 /* initialize Rx, Tx and Link LED */ 1197 /* initialize Rx, Tx and Link LED */
1047 skge_write8(hw, SKGEMAC_REG(port, LNK_LED_REG), LINKLED_ON); 1198 skge_write8(hw, SK_REG(port, LNK_LED_REG), LINKLED_ON);
1048 skge_write8(hw, SKGEMAC_REG(port, LNK_LED_REG), LINKLED_LINKSYNC_ON); 1199 skge_write8(hw, SK_REG(port, LNK_LED_REG), LINKLED_LINKSYNC_ON);
1049 1200
1050 skge_write8(hw, SKGEMAC_REG(port, RX_LED_CTRL), LED_START); 1201 skge_write8(hw, SK_REG(port, RX_LED_CTRL), LED_START);
1051 skge_write8(hw, SKGEMAC_REG(port, TX_LED_CTRL), LED_START); 1202 skge_write8(hw, SK_REG(port, TX_LED_CTRL), LED_START);
1052 1203
1053 /* Unreset the XMAC. */ 1204 /* Unreset the XMAC. */
1054 skge_write16(hw, SKGEMAC_REG(port, TX_MFF_CTRL1), MFF_CLR_MAC_RST); 1205 skge_write16(hw, SK_REG(port, TX_MFF_CTRL1), MFF_CLR_MAC_RST);
1055 1206
1056 /* 1207 /*
1057 * Perform additional initialization for external PHYs, 1208 * Perform additional initialization for external PHYs,
@@ -1059,67 +1210,56 @@ static void genesis_mac_init(struct skge_hw *hw, int port)
1059 * GMII mode. 1210 * GMII mode.
1060 */ 1211 */
1061 spin_lock_bh(&hw->phy_lock); 1212 spin_lock_bh(&hw->phy_lock);
1062 if (hw->phy_type != SK_PHY_XMAC) { 1213 /* Take external Phy out of reset */
1063 /* Take PHY out of reset. */ 1214 r = skge_read32(hw, B2_GP_IO);
1064 r = skge_read32(hw, B2_GP_IO); 1215 if (port == 0)
1065 if (port == 0) 1216 r |= GP_DIR_0|GP_IO_0;
1066 r |= GP_DIR_0|GP_IO_0; 1217 else
1067 else 1218 r |= GP_DIR_2|GP_IO_2;
1068 r |= GP_DIR_2|GP_IO_2;
1069
1070 skge_write32(hw, B2_GP_IO, r);
1071 skge_read32(hw, B2_GP_IO);
1072
1073 /* Enable GMII mode on the XMAC. */
1074 skge_xm_write16(hw, port, XM_HW_CFG, XM_HW_GMII_MD);
1075
1076 id1 = skge_xm_phy_read(hw, port, PHY_XMAC_ID1);
1077
1078 /* Optimize MDIO transfer by suppressing preamble. */
1079 skge_xm_write16(hw, port, XM_MMU_CMD,
1080 skge_xm_read16(hw, port, XM_MMU_CMD)
1081 | XM_MMU_NO_PRE);
1082
1083 if (id1 == PHY_BCOM_ID1_C0) {
1084 /*
1085 * Workaround BCOM Errata for the C0 type.
1086 * Write magic patterns to reserved registers.
1087 */
1088 for (i = 0; i < ARRAY_SIZE(C0hack); i++)
1089 skge_xm_phy_write(hw, port,
1090 C0hack[i].reg, C0hack[i].val);
1091
1092 } else if (id1 == PHY_BCOM_ID1_A1) {
1093 /*
1094 * Workaround BCOM Errata for the A1 type.
1095 * Write magic patterns to reserved registers.
1096 */
1097 for (i = 0; i < ARRAY_SIZE(A1hack); i++)
1098 skge_xm_phy_write(hw, port,
1099 A1hack[i].reg, A1hack[i].val);
1100 }
1101 1219
1102 /* 1220 skge_write32(hw, B2_GP_IO, r);
1103 * Workaround BCOM Errata (#10523) for all BCom PHYs. 1221 skge_read32(hw, B2_GP_IO);
1104 * Disable Power Management after reset. 1222 spin_unlock_bh(&hw->phy_lock);
1105 */
1106 r = skge_xm_phy_read(hw, port, PHY_BCOM_AUX_CTRL);
1107 skge_xm_phy_write(hw, port, PHY_BCOM_AUX_CTRL, r | PHY_B_AC_DIS_PM);
1108 }
1109 1223
1110 /* Dummy read */ 1224 /* Enable GMII interfac */
1111 skge_xm_read16(hw, port, XM_ISRC); 1225 xm_write16(hw, port, XM_HW_CFG, XM_HW_GMII_MD);
1226
1227 bcom_phy_init(skge, jumbo);
1228
1229 /* Set Station Address */
1230 xm_outaddr(hw, port, XM_SA, dev->dev_addr);
1231
1232 /* We don't use match addresses so clear */
1233 for (i = 1; i < 16; i++)
1234 xm_outaddr(hw, port, XM_EXM(i), zero);
1112 1235
1113 r = skge_xm_read32(hw, port, XM_MODE); 1236 /* configure Rx High Water Mark (XM_RX_HI_WM) */
1114 skge_xm_write32(hw, port, XM_MODE, r|XM_MD_CSA); 1237 xm_write16(hw, port, XM_RX_HI_WM, 1450);
1115 1238
1116 /* We don't need the FCS appended to the packet. */ 1239 /* We don't need the FCS appended to the packet. */
1117 r = skge_xm_read16(hw, port, XM_RX_CMD); 1240 r = XM_RX_LENERR_OK | XM_RX_STRIP_FCS;
1118 skge_xm_write16(hw, port, XM_RX_CMD, r | XM_RX_STRIP_FCS); 1241 if (jumbo)
1242 r |= XM_RX_BIG_PK_OK;
1243
1244 if (skge->duplex == DUPLEX_HALF) {
1245 /*
1246 * If in manual half duplex mode the other side might be in
1247 * full duplex mode, so ignore if a carrier extension is not seen
1248 * on frames received
1249 */
1250 r |= XM_RX_DIS_CEXT;
1251 }
1252 xm_write16(hw, port, XM_RX_CMD, r);
1253
1119 1254
1120 /* We want short frames padded to 60 bytes. */ 1255 /* We want short frames padded to 60 bytes. */
1121 r = skge_xm_read16(hw, port, XM_TX_CMD); 1256 xm_write16(hw, port, XM_TX_CMD, XM_TX_AUTO_PAD);
1122 skge_xm_write16(hw, port, XM_TX_CMD, r | XM_TX_AUTO_PAD); 1257
1258 /*
1259 * Bump up the transmit threshold. This helps hold off transmit
1260 * underruns when we're blasting traffic from both ports at once.
1261 */
1262 xm_write16(hw, port, XM_TX_THR, 512);
1123 1263
1124 /* 1264 /*
1125 * Enable the reception of all error frames. This is is 1265 * Enable the reception of all error frames. This is is
@@ -1135,19 +1275,22 @@ static void genesis_mac_init(struct skge_hw *hw, int port)
1135 * case the XMAC will start transfering frames out of the 1275 * case the XMAC will start transfering frames out of the
1136 * RX FIFO as soon as the FIFO threshold is reached. 1276 * RX FIFO as soon as the FIFO threshold is reached.
1137 */ 1277 */
1138 r = skge_xm_read32(hw, port, XM_MODE); 1278 xm_write32(hw, port, XM_MODE, XM_DEF_MODE);
1139 skge_xm_write32(hw, port, XM_MODE,
1140 XM_MD_RX_CRCE|XM_MD_RX_LONG|XM_MD_RX_RUNT|
1141 XM_MD_RX_ERR|XM_MD_RX_IRLE);
1142 1279
1143 skge_xm_outaddr(hw, port, XM_SA, hw->dev[port]->dev_addr);
1144 skge_xm_outaddr(hw, port, XM_EXM(0), hw->dev[port]->dev_addr);
1145 1280
1146 /* 1281 /*
1147 * Bump up the transmit threshold. This helps hold off transmit 1282 * Initialize the Receive Counter Event Mask (XM_RX_EV_MSK)
1148 * underruns when we're blasting traffic from both ports at once. 1283 * - Enable all bits excepting 'Octets Rx OK Low CntOv'
1284 * and 'Octets Rx OK Hi Cnt Ov'.
1149 */ 1285 */
1150 skge_xm_write16(hw, port, XM_TX_THR, 512); 1286 xm_write32(hw, port, XM_RX_EV_MSK, XMR_DEF_MSK);
1287
1288 /*
1289 * Initialize the Transmit Counter Event Mask (XM_TX_EV_MSK)
1290 * - Enable all bits excepting 'Octets Tx OK Low CntOv'
1291 * and 'Octets Tx OK Hi Cnt Ov'.
1292 */
1293 xm_write32(hw, port, XM_TX_EV_MSK, XMT_DEF_MSK);
1151 1294
1152 /* Configure MAC arbiter */ 1295 /* Configure MAC arbiter */
1153 skge_write16(hw, B3_MA_TO_CTRL, MA_RST_CLR); 1296 skge_write16(hw, B3_MA_TO_CTRL, MA_RST_CLR);
@@ -1164,137 +1307,30 @@ static void genesis_mac_init(struct skge_hw *hw, int port)
1164 skge_write8(hw, B3_MA_RCINI_TX2, 0); 1307 skge_write8(hw, B3_MA_RCINI_TX2, 0);
1165 1308
1166 /* Configure Rx MAC FIFO */ 1309 /* Configure Rx MAC FIFO */
1167 skge_write8(hw, SKGEMAC_REG(port, RX_MFF_CTRL2), MFF_RST_CLR); 1310 skge_write8(hw, SK_REG(port, RX_MFF_CTRL2), MFF_RST_CLR);
1168 skge_write16(hw, SKGEMAC_REG(port, RX_MFF_CTRL1), MFF_ENA_TIM_PAT); 1311 skge_write16(hw, SK_REG(port, RX_MFF_CTRL1), MFF_ENA_TIM_PAT);
1169 skge_write8(hw, SKGEMAC_REG(port, RX_MFF_CTRL2), MFF_ENA_OP_MD); 1312 skge_write8(hw, SK_REG(port, RX_MFF_CTRL2), MFF_ENA_OP_MD);
1170 1313
1171 /* Configure Tx MAC FIFO */ 1314 /* Configure Tx MAC FIFO */
1172 skge_write8(hw, SKGEMAC_REG(port, TX_MFF_CTRL2), MFF_RST_CLR); 1315 skge_write8(hw, SK_REG(port, TX_MFF_CTRL2), MFF_RST_CLR);
1173 skge_write16(hw, SKGEMAC_REG(port, TX_MFF_CTRL1), MFF_TX_CTRL_DEF); 1316 skge_write16(hw, SK_REG(port, TX_MFF_CTRL1), MFF_TX_CTRL_DEF);
1174 skge_write8(hw, SKGEMAC_REG(port, TX_MFF_CTRL2), MFF_ENA_OP_MD); 1317 skge_write8(hw, SK_REG(port, TX_MFF_CTRL2), MFF_ENA_OP_MD);
1175 1318
1176 if (hw->dev[port]->mtu > ETH_DATA_LEN) { 1319 if (jumbo) {
1177 /* Enable frame flushing if jumbo frames used */ 1320 /* Enable frame flushing if jumbo frames used */
1178 skge_write16(hw, SKGEMAC_REG(port,RX_MFF_CTRL1), MFF_ENA_FLUSH); 1321 skge_write16(hw, SK_REG(port,RX_MFF_CTRL1), MFF_ENA_FLUSH);
1179 } else { 1322 } else {
1180 /* enable timeout timers if normal frames */ 1323 /* enable timeout timers if normal frames */
1181 skge_write16(hw, B3_PA_CTRL, 1324 skge_write16(hw, B3_PA_CTRL,
1182 port == 0 ? PA_ENA_TO_TX1 : PA_ENA_TO_TX2); 1325 (port == 0) ? PA_ENA_TO_TX1 : PA_ENA_TO_TX2);
1183 } 1326 }
1184
1185
1186 r = skge_xm_read16(hw, port, XM_RX_CMD);
1187 if (hw->dev[port]->mtu > ETH_DATA_LEN)
1188 skge_xm_write16(hw, port, XM_RX_CMD, r | XM_RX_BIG_PK_OK);
1189 else
1190 skge_xm_write16(hw, port, XM_RX_CMD, r & ~(XM_RX_BIG_PK_OK));
1191
1192 switch (hw->phy_type) {
1193 case SK_PHY_XMAC:
1194 if (skge->autoneg == AUTONEG_ENABLE) {
1195 ctrl1 = PHY_X_AN_FD | PHY_X_AN_HD;
1196
1197 switch (skge->flow_control) {
1198 case FLOW_MODE_NONE:
1199 ctrl1 |= PHY_X_P_NO_PAUSE;
1200 break;
1201 case FLOW_MODE_LOC_SEND:
1202 ctrl1 |= PHY_X_P_ASYM_MD;
1203 break;
1204 case FLOW_MODE_SYMMETRIC:
1205 ctrl1 |= PHY_X_P_SYM_MD;
1206 break;
1207 case FLOW_MODE_REM_SEND:
1208 ctrl1 |= PHY_X_P_BOTH_MD;
1209 break;
1210 }
1211
1212 skge_xm_phy_write(hw, port, PHY_XMAC_AUNE_ADV, ctrl1);
1213 ctrl2 = PHY_CT_ANE | PHY_CT_RE_CFG;
1214 } else {
1215 ctrl2 = 0;
1216 if (skge->duplex == DUPLEX_FULL)
1217 ctrl2 |= PHY_CT_DUP_MD;
1218 }
1219
1220 skge_xm_phy_write(hw, port, PHY_XMAC_CTRL, ctrl2);
1221 break;
1222
1223 case SK_PHY_BCOM:
1224 ctrl1 = PHY_CT_SP1000;
1225 ctrl2 = 0;
1226 ctrl3 = PHY_SEL_TYPE;
1227 ctrl4 = PHY_B_PEC_EN_LTR;
1228 ctrl5 = PHY_B_AC_TX_TST;
1229
1230 if (skge->autoneg == AUTONEG_ENABLE) {
1231 /*
1232 * Workaround BCOM Errata #1 for the C5 type.
1233 * 1000Base-T Link Acquisition Failure in Slave Mode
1234 * Set Repeater/DTE bit 10 of the 1000Base-T Control Register
1235 */
1236 ctrl2 |= PHY_B_1000C_RD;
1237 if (skge->advertising & ADVERTISED_1000baseT_Half)
1238 ctrl2 |= PHY_B_1000C_AHD;
1239 if (skge->advertising & ADVERTISED_1000baseT_Full)
1240 ctrl2 |= PHY_B_1000C_AFD;
1241
1242 /* Set Flow-control capabilities */
1243 switch (skge->flow_control) {
1244 case FLOW_MODE_NONE:
1245 ctrl3 |= PHY_B_P_NO_PAUSE;
1246 break;
1247 case FLOW_MODE_LOC_SEND:
1248 ctrl3 |= PHY_B_P_ASYM_MD;
1249 break;
1250 case FLOW_MODE_SYMMETRIC:
1251 ctrl3 |= PHY_B_P_SYM_MD;
1252 break;
1253 case FLOW_MODE_REM_SEND:
1254 ctrl3 |= PHY_B_P_BOTH_MD;
1255 break;
1256 }
1257
1258 /* Restart Auto-negotiation */
1259 ctrl1 |= PHY_CT_ANE | PHY_CT_RE_CFG;
1260 } else {
1261 if (skge->duplex == DUPLEX_FULL)
1262 ctrl1 |= PHY_CT_DUP_MD;
1263
1264 ctrl2 |= PHY_B_1000C_MSE; /* set it to Slave */
1265 }
1266
1267 skge_xm_phy_write(hw, port, PHY_BCOM_1000T_CTRL, ctrl2);
1268 skge_xm_phy_write(hw, port, PHY_BCOM_AUNE_ADV, ctrl3);
1269
1270 if (skge->netdev->mtu > ETH_DATA_LEN) {
1271 ctrl4 |= PHY_B_PEC_HIGH_LA;
1272 ctrl5 |= PHY_B_AC_LONG_PACK;
1273
1274 skge_xm_phy_write(hw, port,PHY_BCOM_AUX_CTRL, ctrl5);
1275 }
1276
1277 skge_xm_phy_write(hw, port, PHY_BCOM_P_EXT_CTRL, ctrl4);
1278 skge_xm_phy_write(hw, port, PHY_BCOM_CTRL, ctrl1);
1279 break;
1280 }
1281 spin_unlock_bh(&hw->phy_lock);
1282
1283 /* Clear MIB counters */
1284 skge_xm_write16(hw, port, XM_STAT_CMD,
1285 XM_SC_CLR_RXC | XM_SC_CLR_TXC);
1286 /* Clear two times according to Errata #3 */
1287 skge_xm_write16(hw, port, XM_STAT_CMD,
1288 XM_SC_CLR_RXC | XM_SC_CLR_TXC);
1289
1290 /* Start polling for link status */
1291 mod_timer(&skge->link_check, jiffies + LINK_POLL_HZ);
1292} 1327}
1293 1328
1294static void genesis_stop(struct skge_port *skge) 1329static void genesis_stop(struct skge_port *skge)
1295{ 1330{
1296 struct skge_hw *hw = skge->hw; 1331 struct skge_hw *hw = skge->hw;
1297 int port = skge->port; 1332 int port = skge->port;
1333 u32 reg;
1298 1334
1299 /* Clear Tx packet arbiter timeout IRQ */ 1335 /* Clear Tx packet arbiter timeout IRQ */
1300 skge_write16(hw, B3_PA_CTRL, 1336 skge_write16(hw, B3_PA_CTRL,
@@ -1304,33 +1340,30 @@ static void genesis_stop(struct skge_port *skge)
1304 * If the transfer stucks at the MAC the STOP command will not 1340 * If the transfer stucks at the MAC the STOP command will not
1305 * terminate if we don't flush the XMAC's transmit FIFO ! 1341 * terminate if we don't flush the XMAC's transmit FIFO !
1306 */ 1342 */
1307 skge_xm_write32(hw, port, XM_MODE, 1343 xm_write32(hw, port, XM_MODE,
1308 skge_xm_read32(hw, port, XM_MODE)|XM_MD_FTF); 1344 xm_read32(hw, port, XM_MODE)|XM_MD_FTF);
1309 1345
1310 1346
1311 /* Reset the MAC */ 1347 /* Reset the MAC */
1312 skge_write16(hw, SKGEMAC_REG(port, TX_MFF_CTRL1), MFF_SET_MAC_RST); 1348 skge_write16(hw, SK_REG(port, TX_MFF_CTRL1), MFF_SET_MAC_RST);
1313 1349
1314 /* For external PHYs there must be special handling */ 1350 /* For external PHYs there must be special handling */
1315 if (hw->phy_type != SK_PHY_XMAC) { 1351 reg = skge_read32(hw, B2_GP_IO);
1316 u32 reg = skge_read32(hw, B2_GP_IO); 1352 if (port == 0) {
1317 1353 reg |= GP_DIR_0;
1318 if (port == 0) { 1354 reg &= ~GP_IO_0;
1319 reg |= GP_DIR_0; 1355 } else {
1320 reg &= ~GP_IO_0; 1356 reg |= GP_DIR_2;
1321 } else { 1357 reg &= ~GP_IO_2;
1322 reg |= GP_DIR_2;
1323 reg &= ~GP_IO_2;
1324 }
1325 skge_write32(hw, B2_GP_IO, reg);
1326 skge_read32(hw, B2_GP_IO);
1327 } 1358 }
1359 skge_write32(hw, B2_GP_IO, reg);
1360 skge_read32(hw, B2_GP_IO);
1328 1361
1329 skge_xm_write16(hw, port, XM_MMU_CMD, 1362 xm_write16(hw, port, XM_MMU_CMD,
1330 skge_xm_read16(hw, port, XM_MMU_CMD) 1363 xm_read16(hw, port, XM_MMU_CMD)
1331 & ~(XM_MMU_ENA_RX | XM_MMU_ENA_TX)); 1364 & ~(XM_MMU_ENA_RX | XM_MMU_ENA_TX));
1332 1365
1333 skge_xm_read16(hw, port, XM_MMU_CMD); 1366 xm_read16(hw, port, XM_MMU_CMD);
1334} 1367}
1335 1368
1336 1369
@@ -1341,11 +1374,11 @@ static void genesis_get_stats(struct skge_port *skge, u64 *data)
1341 int i; 1374 int i;
1342 unsigned long timeout = jiffies + HZ; 1375 unsigned long timeout = jiffies + HZ;
1343 1376
1344 skge_xm_write16(hw, port, 1377 xm_write16(hw, port,
1345 XM_STAT_CMD, XM_SC_SNP_TXC | XM_SC_SNP_RXC); 1378 XM_STAT_CMD, XM_SC_SNP_TXC | XM_SC_SNP_RXC);
1346 1379
1347 /* wait for update to complete */ 1380 /* wait for update to complete */
1348 while (skge_xm_read16(hw, port, XM_STAT_CMD) 1381 while (xm_read16(hw, port, XM_STAT_CMD)
1349 & (XM_SC_SNP_TXC | XM_SC_SNP_RXC)) { 1382 & (XM_SC_SNP_TXC | XM_SC_SNP_RXC)) {
1350 if (time_after(jiffies, timeout)) 1383 if (time_after(jiffies, timeout))
1351 break; 1384 break;
@@ -1353,68 +1386,60 @@ static void genesis_get_stats(struct skge_port *skge, u64 *data)
1353 } 1386 }
1354 1387
1355 /* special case for 64 bit octet counter */ 1388 /* special case for 64 bit octet counter */
1356 data[0] = (u64) skge_xm_read32(hw, port, XM_TXO_OK_HI) << 32 1389 data[0] = (u64) xm_read32(hw, port, XM_TXO_OK_HI) << 32
1357 | skge_xm_read32(hw, port, XM_TXO_OK_LO); 1390 | xm_read32(hw, port, XM_TXO_OK_LO);
1358 data[1] = (u64) skge_xm_read32(hw, port, XM_RXO_OK_HI) << 32 1391 data[1] = (u64) xm_read32(hw, port, XM_RXO_OK_HI) << 32
1359 | skge_xm_read32(hw, port, XM_RXO_OK_LO); 1392 | xm_read32(hw, port, XM_RXO_OK_LO);
1360 1393
1361 for (i = 2; i < ARRAY_SIZE(skge_stats); i++) 1394 for (i = 2; i < ARRAY_SIZE(skge_stats); i++)
1362 data[i] = skge_xm_read32(hw, port, skge_stats[i].xmac_offset); 1395 data[i] = xm_read32(hw, port, skge_stats[i].xmac_offset);
1363} 1396}
1364 1397
1365static void genesis_mac_intr(struct skge_hw *hw, int port) 1398static void genesis_mac_intr(struct skge_hw *hw, int port)
1366{ 1399{
1367 struct skge_port *skge = netdev_priv(hw->dev[port]); 1400 struct skge_port *skge = netdev_priv(hw->dev[port]);
1368 u16 status = skge_xm_read16(hw, port, XM_ISRC); 1401 u16 status = xm_read16(hw, port, XM_ISRC);
1369 1402
1370 pr_debug("genesis_intr status %x\n", status); 1403 if (netif_msg_intr(skge))
1371 if (hw->phy_type == SK_PHY_XMAC) { 1404 printk(KERN_DEBUG PFX "%s: mac interrupt status 0x%x\n",
1372 /* LInk down, start polling for state change */ 1405 skge->netdev->name, status);
1373 if (status & XM_IS_INP_ASS) {
1374 skge_xm_write16(hw, port, XM_IMSK,
1375 skge_xm_read16(hw, port, XM_IMSK) | XM_IS_INP_ASS);
1376 mod_timer(&skge->link_check, jiffies + LINK_POLL_HZ);
1377 }
1378 else if (status & XM_IS_AND)
1379 mod_timer(&skge->link_check, jiffies + LINK_POLL_HZ);
1380 }
1381 1406
1382 if (status & XM_IS_TXF_UR) { 1407 if (status & XM_IS_TXF_UR) {
1383 skge_xm_write32(hw, port, XM_MODE, XM_MD_FTF); 1408 xm_write32(hw, port, XM_MODE, XM_MD_FTF);
1384 ++skge->net_stats.tx_fifo_errors; 1409 ++skge->net_stats.tx_fifo_errors;
1385 } 1410 }
1386 if (status & XM_IS_RXF_OV) { 1411 if (status & XM_IS_RXF_OV) {
1387 skge_xm_write32(hw, port, XM_MODE, XM_MD_FRF); 1412 xm_write32(hw, port, XM_MODE, XM_MD_FRF);
1388 ++skge->net_stats.rx_fifo_errors; 1413 ++skge->net_stats.rx_fifo_errors;
1389 } 1414 }
1390} 1415}
1391 1416
1392static void skge_gm_phy_write(struct skge_hw *hw, int port, u16 reg, u16 val) 1417static void gm_phy_write(struct skge_hw *hw, int port, u16 reg, u16 val)
1393{ 1418{
1394 int i; 1419 int i;
1395 1420
1396 skge_gma_write16(hw, port, GM_SMI_DATA, val); 1421 gma_write16(hw, port, GM_SMI_DATA, val);
1397 skge_gma_write16(hw, port, GM_SMI_CTRL, 1422 gma_write16(hw, port, GM_SMI_CTRL,
1398 GM_SMI_CT_PHY_AD(hw->phy_addr) | GM_SMI_CT_REG_AD(reg)); 1423 GM_SMI_CT_PHY_AD(hw->phy_addr) | GM_SMI_CT_REG_AD(reg));
1399 for (i = 0; i < PHY_RETRIES; i++) { 1424 for (i = 0; i < PHY_RETRIES; i++) {
1400 udelay(1); 1425 udelay(1);
1401 1426
1402 if (!(skge_gma_read16(hw, port, GM_SMI_CTRL) & GM_SMI_CT_BUSY)) 1427 if (!(gma_read16(hw, port, GM_SMI_CTRL) & GM_SMI_CT_BUSY))
1403 break; 1428 break;
1404 } 1429 }
1405} 1430}
1406 1431
1407static u16 skge_gm_phy_read(struct skge_hw *hw, int port, u16 reg) 1432static u16 gm_phy_read(struct skge_hw *hw, int port, u16 reg)
1408{ 1433{
1409 int i; 1434 int i;
1410 1435
1411 skge_gma_write16(hw, port, GM_SMI_CTRL, 1436 gma_write16(hw, port, GM_SMI_CTRL,
1412 GM_SMI_CT_PHY_AD(hw->phy_addr) 1437 GM_SMI_CT_PHY_AD(hw->phy_addr)
1413 | GM_SMI_CT_REG_AD(reg) | GM_SMI_CT_OP_RD); 1438 | GM_SMI_CT_REG_AD(reg) | GM_SMI_CT_OP_RD);
1414 1439
1415 for (i = 0; i < PHY_RETRIES; i++) { 1440 for (i = 0; i < PHY_RETRIES; i++) {
1416 udelay(1); 1441 udelay(1);
1417 if (skge_gma_read16(hw, port, GM_SMI_CTRL) & GM_SMI_CT_RD_VAL) 1442 if (gma_read16(hw, port, GM_SMI_CTRL) & GM_SMI_CT_RD_VAL)
1418 goto ready; 1443 goto ready;
1419 } 1444 }
1420 1445
@@ -1422,24 +1447,7 @@ static u16 skge_gm_phy_read(struct skge_hw *hw, int port, u16 reg)
1422 hw->dev[port]->name); 1447 hw->dev[port]->name);
1423 return 0; 1448 return 0;
1424 ready: 1449 ready:
1425 return skge_gma_read16(hw, port, GM_SMI_DATA); 1450 return gma_read16(hw, port, GM_SMI_DATA);
1426}
1427
1428static void genesis_link_down(struct skge_port *skge)
1429{
1430 struct skge_hw *hw = skge->hw;
1431 int port = skge->port;
1432
1433 pr_debug("genesis_link_down\n");
1434
1435 skge_xm_write16(hw, port, XM_MMU_CMD,
1436 skge_xm_read16(hw, port, XM_MMU_CMD)
1437 & ~(XM_MMU_ENA_RX | XM_MMU_ENA_TX));
1438
1439 /* dummy read to ensure writing */
1440 (void) skge_xm_read16(hw, port, XM_MMU_CMD);
1441
1442 skge_link_down(skge);
1443} 1451}
1444 1452
1445static void genesis_link_up(struct skge_port *skge) 1453static void genesis_link_up(struct skge_port *skge)
@@ -1450,7 +1458,7 @@ static void genesis_link_up(struct skge_port *skge)
1450 u32 mode, msk; 1458 u32 mode, msk;
1451 1459
1452 pr_debug("genesis_link_up\n"); 1460 pr_debug("genesis_link_up\n");
1453 cmd = skge_xm_read16(hw, port, XM_MMU_CMD); 1461 cmd = xm_read16(hw, port, XM_MMU_CMD);
1454 1462
1455 /* 1463 /*
1456 * enabling pause frame reception is required for 1000BT 1464 * enabling pause frame reception is required for 1000BT
@@ -1458,14 +1466,15 @@ static void genesis_link_up(struct skge_port *skge)
1458 */ 1466 */
1459 if (skge->flow_control == FLOW_MODE_NONE || 1467 if (skge->flow_control == FLOW_MODE_NONE ||
1460 skge->flow_control == FLOW_MODE_LOC_SEND) 1468 skge->flow_control == FLOW_MODE_LOC_SEND)
1469 /* Disable Pause Frame Reception */
1461 cmd |= XM_MMU_IGN_PF; 1470 cmd |= XM_MMU_IGN_PF;
1462 else 1471 else
1463 /* Enable Pause Frame Reception */ 1472 /* Enable Pause Frame Reception */
1464 cmd &= ~XM_MMU_IGN_PF; 1473 cmd &= ~XM_MMU_IGN_PF;
1465 1474
1466 skge_xm_write16(hw, port, XM_MMU_CMD, cmd); 1475 xm_write16(hw, port, XM_MMU_CMD, cmd);
1467 1476
1468 mode = skge_xm_read32(hw, port, XM_MODE); 1477 mode = xm_read32(hw, port, XM_MODE);
1469 if (skge->flow_control == FLOW_MODE_SYMMETRIC || 1478 if (skge->flow_control == FLOW_MODE_SYMMETRIC ||
1470 skge->flow_control == FLOW_MODE_LOC_SEND) { 1479 skge->flow_control == FLOW_MODE_LOC_SEND) {
1471 /* 1480 /*
@@ -1479,10 +1488,10 @@ static void genesis_link_up(struct skge_port *skge)
1479 /* XM_PAUSE_DA = '010000C28001' (default) */ 1488 /* XM_PAUSE_DA = '010000C28001' (default) */
1480 /* XM_MAC_PTIME = 0xffff (maximum) */ 1489 /* XM_MAC_PTIME = 0xffff (maximum) */
1481 /* remember this value is defined in big endian (!) */ 1490 /* remember this value is defined in big endian (!) */
1482 skge_xm_write16(hw, port, XM_MAC_PTIME, 0xffff); 1491 xm_write16(hw, port, XM_MAC_PTIME, 0xffff);
1483 1492
1484 mode |= XM_PAUSE_MODE; 1493 mode |= XM_PAUSE_MODE;
1485 skge_write16(hw, SKGEMAC_REG(port, RX_MFF_CTRL1), MFF_ENA_PAUSE); 1494 skge_write16(hw, SK_REG(port, RX_MFF_CTRL1), MFF_ENA_PAUSE);
1486 } else { 1495 } else {
1487 /* 1496 /*
1488 * disable pause frame generation is required for 1000BT 1497 * disable pause frame generation is required for 1000BT
@@ -1491,125 +1500,68 @@ static void genesis_link_up(struct skge_port *skge)
1491 /* Disable Pause Mode in Mode Register */ 1500 /* Disable Pause Mode in Mode Register */
1492 mode &= ~XM_PAUSE_MODE; 1501 mode &= ~XM_PAUSE_MODE;
1493 1502
1494 skge_write16(hw, SKGEMAC_REG(port, RX_MFF_CTRL1), MFF_DIS_PAUSE); 1503 skge_write16(hw, SK_REG(port, RX_MFF_CTRL1), MFF_DIS_PAUSE);
1495 } 1504 }
1496 1505
1497 skge_xm_write32(hw, port, XM_MODE, mode); 1506 xm_write32(hw, port, XM_MODE, mode);
1498 1507
1499 msk = XM_DEF_MSK; 1508 msk = XM_DEF_MSK;
1500 if (hw->phy_type != SK_PHY_XMAC) 1509 /* disable GP0 interrupt bit for external Phy */
1501 msk |= XM_IS_INP_ASS; /* disable GP0 interrupt bit */ 1510 msk |= XM_IS_INP_ASS;
1502 1511
1503 skge_xm_write16(hw, port, XM_IMSK, msk); 1512 xm_write16(hw, port, XM_IMSK, msk);
1504 skge_xm_read16(hw, port, XM_ISRC); 1513 xm_read16(hw, port, XM_ISRC);
1505 1514
1506 /* get MMU Command Reg. */ 1515 /* get MMU Command Reg. */
1507 cmd = skge_xm_read16(hw, port, XM_MMU_CMD); 1516 cmd = xm_read16(hw, port, XM_MMU_CMD);
1508 if (hw->phy_type != SK_PHY_XMAC && skge->duplex == DUPLEX_FULL) 1517 if (skge->duplex == DUPLEX_FULL)
1509 cmd |= XM_MMU_GMII_FD; 1518 cmd |= XM_MMU_GMII_FD;
1510 1519
1511 if (hw->phy_type == SK_PHY_BCOM) { 1520 /*
1512 /* 1521 * Workaround BCOM Errata (#10523) for all BCom Phys
1513 * Workaround BCOM Errata (#10523) for all BCom Phys 1522 * Enable Power Management after link up
1514 * Enable Power Management after link up 1523 */
1515 */ 1524 xm_phy_write(hw, port, PHY_BCOM_AUX_CTRL,
1516 skge_xm_phy_write(hw, port, PHY_BCOM_AUX_CTRL, 1525 xm_phy_read(hw, port, PHY_BCOM_AUX_CTRL)
1517 skge_xm_phy_read(hw, port, PHY_BCOM_AUX_CTRL) 1526 & ~PHY_B_AC_DIS_PM);
1518 & ~PHY_B_AC_DIS_PM); 1527 xm_phy_write(hw, port, PHY_BCOM_INT_MASK, PHY_B_DEF_MSK);
1519 skge_xm_phy_write(hw, port, PHY_BCOM_INT_MASK,
1520 PHY_B_DEF_MSK);
1521 }
1522 1528
1523 /* enable Rx/Tx */ 1529 /* enable Rx/Tx */
1524 skge_xm_write16(hw, port, XM_MMU_CMD, 1530 xm_write16(hw, port, XM_MMU_CMD,
1525 cmd | XM_MMU_ENA_RX | XM_MMU_ENA_TX); 1531 cmd | XM_MMU_ENA_RX | XM_MMU_ENA_TX);
1526 skge_link_up(skge); 1532 skge_link_up(skge);
1527} 1533}
1528 1534
1529 1535
1530static void genesis_bcom_intr(struct skge_port *skge) 1536static inline void bcom_phy_intr(struct skge_port *skge)
1531{ 1537{
1532 struct skge_hw *hw = skge->hw; 1538 struct skge_hw *hw = skge->hw;
1533 int port = skge->port; 1539 int port = skge->port;
1534 u16 stat = skge_xm_phy_read(hw, port, PHY_BCOM_INT_STAT); 1540 u16 isrc;
1541
1542 isrc = xm_phy_read(hw, port, PHY_BCOM_INT_STAT);
1543 if (netif_msg_intr(skge))
1544 printk(KERN_DEBUG PFX "%s: phy interrupt status 0x%x\n",
1545 skge->netdev->name, isrc);
1535 1546
1536 pr_debug("genesis_bcom intr stat=%x\n", stat); 1547 if (isrc & PHY_B_IS_PSE)
1548 printk(KERN_ERR PFX "%s: uncorrectable pair swap error\n",
1549 hw->dev[port]->name);
1537 1550
1538 /* Workaround BCom Errata: 1551 /* Workaround BCom Errata:
1539 * enable and disable loopback mode if "NO HCD" occurs. 1552 * enable and disable loopback mode if "NO HCD" occurs.
1540 */ 1553 */
1541 if (stat & PHY_B_IS_NO_HDCL) { 1554 if (isrc & PHY_B_IS_NO_HDCL) {
1542 u16 ctrl = skge_xm_phy_read(hw, port, PHY_BCOM_CTRL); 1555 u16 ctrl = xm_phy_read(hw, port, PHY_BCOM_CTRL);
1543 skge_xm_phy_write(hw, port, PHY_BCOM_CTRL, 1556 xm_phy_write(hw, port, PHY_BCOM_CTRL,
1544 ctrl | PHY_CT_LOOP); 1557 ctrl | PHY_CT_LOOP);
1545 skge_xm_phy_write(hw, port, PHY_BCOM_CTRL, 1558 xm_phy_write(hw, port, PHY_BCOM_CTRL,
1546 ctrl & ~PHY_CT_LOOP); 1559 ctrl & ~PHY_CT_LOOP);
1547 } 1560 }
1548 1561
1549 stat = skge_xm_phy_read(hw, port, PHY_BCOM_STAT); 1562 if (isrc & (PHY_B_IS_AN_PR | PHY_B_IS_LST_CHANGE))
1550 if (stat & (PHY_B_IS_AN_PR | PHY_B_IS_LST_CHANGE)) { 1563 bcom_check_link(hw, port);
1551 u16 aux = skge_xm_phy_read(hw, port, PHY_BCOM_AUX_STAT);
1552 if ( !(aux & PHY_B_AS_LS) && netif_carrier_ok(skge->netdev))
1553 genesis_link_down(skge);
1554
1555 else if (stat & PHY_B_IS_LST_CHANGE) {
1556 if (aux & PHY_B_AS_AN_C) {
1557 switch (aux & PHY_B_AS_AN_RES_MSK) {
1558 case PHY_B_RES_1000FD:
1559 skge->duplex = DUPLEX_FULL;
1560 break;
1561 case PHY_B_RES_1000HD:
1562 skge->duplex = DUPLEX_HALF;
1563 break;
1564 }
1565
1566 switch (aux & PHY_B_AS_PAUSE_MSK) {
1567 case PHY_B_AS_PAUSE_MSK:
1568 skge->flow_control = FLOW_MODE_SYMMETRIC;
1569 break;
1570 case PHY_B_AS_PRR:
1571 skge->flow_control = FLOW_MODE_REM_SEND;
1572 break;
1573 case PHY_B_AS_PRT:
1574 skge->flow_control = FLOW_MODE_LOC_SEND;
1575 break;
1576 default:
1577 skge->flow_control = FLOW_MODE_NONE;
1578 }
1579 skge->speed = SPEED_1000;
1580 }
1581 genesis_link_up(skge);
1582 }
1583 else
1584 mod_timer(&skge->link_check, jiffies + LINK_POLL_HZ);
1585 }
1586}
1587 1564
1588/* Perodic poll of phy status to check for link transistion */
1589static void skge_link_timer(unsigned long __arg)
1590{
1591 struct skge_port *skge = (struct skge_port *) __arg;
1592 struct skge_hw *hw = skge->hw;
1593 int port = skge->port;
1594
1595 if (hw->chip_id != CHIP_ID_GENESIS || !netif_running(skge->netdev))
1596 return;
1597
1598 spin_lock_bh(&hw->phy_lock);
1599 if (hw->phy_type == SK_PHY_BCOM)
1600 genesis_bcom_intr(skge);
1601 else {
1602 int i;
1603 for (i = 0; i < 3; i++)
1604 if (skge_xm_read16(hw, port, XM_ISRC) & XM_IS_INP_ASS)
1605 break;
1606
1607 if (i == 3)
1608 mod_timer(&skge->link_check, jiffies + LINK_POLL_HZ);
1609 else
1610 genesis_link_up(skge);
1611 }
1612 spin_unlock_bh(&hw->phy_lock);
1613} 1565}
1614 1566
1615/* Marvell Phy Initailization */ 1567/* Marvell Phy Initailization */
@@ -1621,31 +1573,27 @@ static void yukon_init(struct skge_hw *hw, int port)
1621 1573
1622 pr_debug("yukon_init\n"); 1574 pr_debug("yukon_init\n");
1623 if (skge->autoneg == AUTONEG_ENABLE) { 1575 if (skge->autoneg == AUTONEG_ENABLE) {
1624 u16 ectrl = skge_gm_phy_read(hw, port, PHY_MARV_EXT_CTRL); 1576 u16 ectrl = gm_phy_read(hw, port, PHY_MARV_EXT_CTRL);
1625 1577
1626 ectrl &= ~(PHY_M_EC_M_DSC_MSK | PHY_M_EC_S_DSC_MSK | 1578 ectrl &= ~(PHY_M_EC_M_DSC_MSK | PHY_M_EC_S_DSC_MSK |
1627 PHY_M_EC_MAC_S_MSK); 1579 PHY_M_EC_MAC_S_MSK);
1628 ectrl |= PHY_M_EC_MAC_S(MAC_TX_CLK_25_MHZ); 1580 ectrl |= PHY_M_EC_MAC_S(MAC_TX_CLK_25_MHZ);
1629 1581
1630 /* on PHY 88E1111 there is a change for downshift control */ 1582 ectrl |= PHY_M_EC_M_DSC(0) | PHY_M_EC_S_DSC(1);
1631 if (hw->chip_id == CHIP_ID_YUKON_EC)
1632 ectrl |= PHY_M_EC_M_DSC_2(0) | PHY_M_EC_DOWN_S_ENA;
1633 else
1634 ectrl |= PHY_M_EC_M_DSC(0) | PHY_M_EC_S_DSC(1);
1635 1583
1636 skge_gm_phy_write(hw, port, PHY_MARV_EXT_CTRL, ectrl); 1584 gm_phy_write(hw, port, PHY_MARV_EXT_CTRL, ectrl);
1637 } 1585 }
1638 1586
1639 ctrl = skge_gm_phy_read(hw, port, PHY_MARV_CTRL); 1587 ctrl = gm_phy_read(hw, port, PHY_MARV_CTRL);
1640 if (skge->autoneg == AUTONEG_DISABLE) 1588 if (skge->autoneg == AUTONEG_DISABLE)
1641 ctrl &= ~PHY_CT_ANE; 1589 ctrl &= ~PHY_CT_ANE;
1642 1590
1643 ctrl |= PHY_CT_RESET; 1591 ctrl |= PHY_CT_RESET;
1644 skge_gm_phy_write(hw, port, PHY_MARV_CTRL, ctrl); 1592 gm_phy_write(hw, port, PHY_MARV_CTRL, ctrl);
1645 1593
1646 ctrl = 0; 1594 ctrl = 0;
1647 ct1000 = 0; 1595 ct1000 = 0;
1648 adv = PHY_SEL_TYPE; 1596 adv = PHY_AN_CSMA;
1649 1597
1650 if (skge->autoneg == AUTONEG_ENABLE) { 1598 if (skge->autoneg == AUTONEG_ENABLE) {
1651 if (iscopper(hw)) { 1599 if (iscopper(hw)) {
@@ -1661,41 +1609,12 @@ static void yukon_init(struct skge_hw *hw, int port)
1661 adv |= PHY_M_AN_10_FD; 1609 adv |= PHY_M_AN_10_FD;
1662 if (skge->advertising & ADVERTISED_10baseT_Half) 1610 if (skge->advertising & ADVERTISED_10baseT_Half)
1663 adv |= PHY_M_AN_10_HD; 1611 adv |= PHY_M_AN_10_HD;
1664 1612 } else /* special defines for FIBER (88E1011S only) */
1665 /* Set Flow-control capabilities */
1666 switch (skge->flow_control) {
1667 case FLOW_MODE_NONE:
1668 adv |= PHY_B_P_NO_PAUSE;
1669 break;
1670 case FLOW_MODE_LOC_SEND:
1671 adv |= PHY_B_P_ASYM_MD;
1672 break;
1673 case FLOW_MODE_SYMMETRIC:
1674 adv |= PHY_B_P_SYM_MD;
1675 break;
1676 case FLOW_MODE_REM_SEND:
1677 adv |= PHY_B_P_BOTH_MD;
1678 break;
1679 }
1680 } else { /* special defines for FIBER (88E1011S only) */
1681 adv |= PHY_M_AN_1000X_AHD | PHY_M_AN_1000X_AFD; 1613 adv |= PHY_M_AN_1000X_AHD | PHY_M_AN_1000X_AFD;
1682 1614
1683 /* Set Flow-control capabilities */ 1615 /* Set Flow-control capabilities */
1684 switch (skge->flow_control) { 1616 adv |= phy_pause_map[skge->flow_control];
1685 case FLOW_MODE_NONE: 1617
1686 adv |= PHY_M_P_NO_PAUSE_X;
1687 break;
1688 case FLOW_MODE_LOC_SEND:
1689 adv |= PHY_M_P_ASYM_MD_X;
1690 break;
1691 case FLOW_MODE_SYMMETRIC:
1692 adv |= PHY_M_P_SYM_MD_X;
1693 break;
1694 case FLOW_MODE_REM_SEND:
1695 adv |= PHY_M_P_BOTH_MD_X;
1696 break;
1697 }
1698 }
1699 /* Restart Auto-negotiation */ 1618 /* Restart Auto-negotiation */
1700 ctrl |= PHY_CT_ANE | PHY_CT_RE_CFG; 1619 ctrl |= PHY_CT_ANE | PHY_CT_RE_CFG;
1701 } else { 1620 } else {
@@ -1717,36 +1636,23 @@ static void yukon_init(struct skge_hw *hw, int port)
1717 ctrl |= PHY_CT_RESET; 1636 ctrl |= PHY_CT_RESET;
1718 } 1637 }
1719 1638
1720 if (hw->chip_id != CHIP_ID_YUKON_FE) 1639 gm_phy_write(hw, port, PHY_MARV_1000T_CTRL, ct1000);
1721 skge_gm_phy_write(hw, port, PHY_MARV_1000T_CTRL, ct1000);
1722 1640
1723 skge_gm_phy_write(hw, port, PHY_MARV_AUNE_ADV, adv); 1641 gm_phy_write(hw, port, PHY_MARV_AUNE_ADV, adv);
1724 skge_gm_phy_write(hw, port, PHY_MARV_CTRL, ctrl); 1642 gm_phy_write(hw, port, PHY_MARV_CTRL, ctrl);
1725 1643
1726 /* Setup Phy LED's */ 1644 /* Setup Phy LED's */
1727 ledctrl = PHY_M_LED_PULS_DUR(PULS_170MS); 1645 ledctrl = PHY_M_LED_PULS_DUR(PULS_170MS);
1728 ledover = 0; 1646 ledover = 0;
1729 1647
1730 if (hw->chip_id == CHIP_ID_YUKON_FE) { 1648 ledctrl |= PHY_M_LED_BLINK_RT(BLINK_84MS) | PHY_M_LEDC_TX_CTRL;
1731 /* on 88E3082 these bits are at 11..9 (shifted left) */
1732 ledctrl |= PHY_M_LED_BLINK_RT(BLINK_84MS) << 1;
1733
1734 skge_gm_phy_write(hw, port, PHY_MARV_FE_LED_PAR,
1735 ((skge_gm_phy_read(hw, port, PHY_MARV_FE_LED_PAR)
1736
1737 & ~PHY_M_FELP_LED1_MSK)
1738 | PHY_M_FELP_LED1_CTRL(LED_PAR_CTRL_ACT_BL)));
1739 } else {
1740 /* set Tx LED (LED_TX) to blink mode on Rx OR Tx activity */
1741 ledctrl |= PHY_M_LED_BLINK_RT(BLINK_84MS) | PHY_M_LEDC_TX_CTRL;
1742 1649
1743 /* turn off the Rx LED (LED_RX) */ 1650 /* turn off the Rx LED (LED_RX) */
1744 ledover |= PHY_M_LED_MO_RX(MO_LED_OFF); 1651 ledover |= PHY_M_LED_MO_RX(MO_LED_OFF);
1745 }
1746 1652
1747 /* disable blink mode (LED_DUPLEX) on collisions */ 1653 /* disable blink mode (LED_DUPLEX) on collisions */
1748 ctrl |= PHY_M_LEDC_DP_CTRL; 1654 ctrl |= PHY_M_LEDC_DP_CTRL;
1749 skge_gm_phy_write(hw, port, PHY_MARV_LED_CTRL, ledctrl); 1655 gm_phy_write(hw, port, PHY_MARV_LED_CTRL, ledctrl);
1750 1656
1751 if (skge->autoneg == AUTONEG_DISABLE || skge->speed == SPEED_100) { 1657 if (skge->autoneg == AUTONEG_DISABLE || skge->speed == SPEED_100) {
1752 /* turn on 100 Mbps LED (LED_LINK100) */ 1658 /* turn on 100 Mbps LED (LED_LINK100) */
@@ -1754,25 +1660,25 @@ static void yukon_init(struct skge_hw *hw, int port)
1754 } 1660 }
1755 1661
1756 if (ledover) 1662 if (ledover)
1757 skge_gm_phy_write(hw, port, PHY_MARV_LED_OVER, ledover); 1663 gm_phy_write(hw, port, PHY_MARV_LED_OVER, ledover);
1758 1664
1759 /* Enable phy interrupt on autonegotiation complete (or link up) */ 1665 /* Enable phy interrupt on autonegotiation complete (or link up) */
1760 if (skge->autoneg == AUTONEG_ENABLE) 1666 if (skge->autoneg == AUTONEG_ENABLE)
1761 skge_gm_phy_write(hw, port, PHY_MARV_INT_MASK, PHY_M_IS_AN_COMPL); 1667 gm_phy_write(hw, port, PHY_MARV_INT_MASK, PHY_M_IS_AN_COMPL);
1762 else 1668 else
1763 skge_gm_phy_write(hw, port, PHY_MARV_INT_MASK, PHY_M_DEF_MSK); 1669 gm_phy_write(hw, port, PHY_MARV_INT_MASK, PHY_M_DEF_MSK);
1764} 1670}
1765 1671
1766static void yukon_reset(struct skge_hw *hw, int port) 1672static void yukon_reset(struct skge_hw *hw, int port)
1767{ 1673{
1768 skge_gm_phy_write(hw, port, PHY_MARV_INT_MASK, 0);/* disable PHY IRQs */ 1674 gm_phy_write(hw, port, PHY_MARV_INT_MASK, 0);/* disable PHY IRQs */
1769 skge_gma_write16(hw, port, GM_MC_ADDR_H1, 0); /* clear MC hash */ 1675 gma_write16(hw, port, GM_MC_ADDR_H1, 0); /* clear MC hash */
1770 skge_gma_write16(hw, port, GM_MC_ADDR_H2, 0); 1676 gma_write16(hw, port, GM_MC_ADDR_H2, 0);
1771 skge_gma_write16(hw, port, GM_MC_ADDR_H3, 0); 1677 gma_write16(hw, port, GM_MC_ADDR_H3, 0);
1772 skge_gma_write16(hw, port, GM_MC_ADDR_H4, 0); 1678 gma_write16(hw, port, GM_MC_ADDR_H4, 0);
1773 1679
1774 skge_gma_write16(hw, port, GM_RX_CTRL, 1680 gma_write16(hw, port, GM_RX_CTRL,
1775 skge_gma_read16(hw, port, GM_RX_CTRL) 1681 gma_read16(hw, port, GM_RX_CTRL)
1776 | GM_RXCR_UCF_ENA | GM_RXCR_MCF_ENA); 1682 | GM_RXCR_UCF_ENA | GM_RXCR_MCF_ENA);
1777} 1683}
1778 1684
@@ -1785,17 +1691,17 @@ static void yukon_mac_init(struct skge_hw *hw, int port)
1785 1691
1786 /* WA code for COMA mode -- set PHY reset */ 1692 /* WA code for COMA mode -- set PHY reset */
1787 if (hw->chip_id == CHIP_ID_YUKON_LITE && 1693 if (hw->chip_id == CHIP_ID_YUKON_LITE &&
1788 chip_rev(hw) == CHIP_REV_YU_LITE_A3) 1694 hw->chip_rev == CHIP_REV_YU_LITE_A3)
1789 skge_write32(hw, B2_GP_IO, 1695 skge_write32(hw, B2_GP_IO,
1790 (skge_read32(hw, B2_GP_IO) | GP_DIR_9 | GP_IO_9)); 1696 (skge_read32(hw, B2_GP_IO) | GP_DIR_9 | GP_IO_9));
1791 1697
1792 /* hard reset */ 1698 /* hard reset */
1793 skge_write32(hw, SKGEMAC_REG(port, GPHY_CTRL), GPC_RST_SET); 1699 skge_write32(hw, SK_REG(port, GPHY_CTRL), GPC_RST_SET);
1794 skge_write32(hw, SKGEMAC_REG(port, GMAC_CTRL), GMC_RST_SET); 1700 skge_write32(hw, SK_REG(port, GMAC_CTRL), GMC_RST_SET);
1795 1701
1796 /* WA code for COMA mode -- clear PHY reset */ 1702 /* WA code for COMA mode -- clear PHY reset */
1797 if (hw->chip_id == CHIP_ID_YUKON_LITE && 1703 if (hw->chip_id == CHIP_ID_YUKON_LITE &&
1798 chip_rev(hw) == CHIP_REV_YU_LITE_A3) 1704 hw->chip_rev == CHIP_REV_YU_LITE_A3)
1799 skge_write32(hw, B2_GP_IO, 1705 skge_write32(hw, B2_GP_IO,
1800 (skge_read32(hw, B2_GP_IO) | GP_DIR_9) 1706 (skge_read32(hw, B2_GP_IO) | GP_DIR_9)
1801 & ~GP_IO_9); 1707 & ~GP_IO_9);
@@ -1806,13 +1712,13 @@ static void yukon_mac_init(struct skge_hw *hw, int port)
1806 reg |= iscopper(hw) ? GPC_HWCFG_GMII_COP : GPC_HWCFG_GMII_FIB; 1712 reg |= iscopper(hw) ? GPC_HWCFG_GMII_COP : GPC_HWCFG_GMII_FIB;
1807 1713
1808 /* Clear GMC reset */ 1714 /* Clear GMC reset */
1809 skge_write32(hw, SKGEMAC_REG(port, GPHY_CTRL), reg | GPC_RST_SET); 1715 skge_write32(hw, SK_REG(port, GPHY_CTRL), reg | GPC_RST_SET);
1810 skge_write32(hw, SKGEMAC_REG(port, GPHY_CTRL), reg | GPC_RST_CLR); 1716 skge_write32(hw, SK_REG(port, GPHY_CTRL), reg | GPC_RST_CLR);
1811 skge_write32(hw, SKGEMAC_REG(port, GMAC_CTRL), GMC_PAUSE_ON | GMC_RST_CLR); 1717 skge_write32(hw, SK_REG(port, GMAC_CTRL), GMC_PAUSE_ON | GMC_RST_CLR);
1812 if (skge->autoneg == AUTONEG_DISABLE) { 1718 if (skge->autoneg == AUTONEG_DISABLE) {
1813 reg = GM_GPCR_AU_ALL_DIS; 1719 reg = GM_GPCR_AU_ALL_DIS;
1814 skge_gma_write16(hw, port, GM_GP_CTRL, 1720 gma_write16(hw, port, GM_GP_CTRL,
1815 skge_gma_read16(hw, port, GM_GP_CTRL) | reg); 1721 gma_read16(hw, port, GM_GP_CTRL) | reg);
1816 1722
1817 switch (skge->speed) { 1723 switch (skge->speed) {
1818 case SPEED_1000: 1724 case SPEED_1000:
@@ -1828,7 +1734,7 @@ static void yukon_mac_init(struct skge_hw *hw, int port)
1828 reg = GM_GPCR_SPEED_1000 | GM_GPCR_SPEED_100 | GM_GPCR_DUP_FULL; 1734 reg = GM_GPCR_SPEED_1000 | GM_GPCR_SPEED_100 | GM_GPCR_DUP_FULL;
1829 switch (skge->flow_control) { 1735 switch (skge->flow_control) {
1830 case FLOW_MODE_NONE: 1736 case FLOW_MODE_NONE:
1831 skge_write32(hw, SKGEMAC_REG(port, GMAC_CTRL), GMC_PAUSE_OFF); 1737 skge_write32(hw, SK_REG(port, GMAC_CTRL), GMC_PAUSE_OFF);
1832 reg |= GM_GPCR_FC_TX_DIS | GM_GPCR_FC_RX_DIS | GM_GPCR_AU_FCT_DIS; 1738 reg |= GM_GPCR_FC_TX_DIS | GM_GPCR_FC_RX_DIS | GM_GPCR_AU_FCT_DIS;
1833 break; 1739 break;
1834 case FLOW_MODE_LOC_SEND: 1740 case FLOW_MODE_LOC_SEND:
@@ -1836,7 +1742,7 @@ static void yukon_mac_init(struct skge_hw *hw, int port)
1836 reg |= GM_GPCR_FC_RX_DIS | GM_GPCR_AU_FCT_DIS; 1742 reg |= GM_GPCR_FC_RX_DIS | GM_GPCR_AU_FCT_DIS;
1837 } 1743 }
1838 1744
1839 skge_gma_write16(hw, port, GM_GP_CTRL, reg); 1745 gma_write16(hw, port, GM_GP_CTRL, reg);
1840 skge_read16(hw, GMAC_IRQ_SRC); 1746 skge_read16(hw, GMAC_IRQ_SRC);
1841 1747
1842 spin_lock_bh(&hw->phy_lock); 1748 spin_lock_bh(&hw->phy_lock);
@@ -1844,25 +1750,25 @@ static void yukon_mac_init(struct skge_hw *hw, int port)
1844 spin_unlock_bh(&hw->phy_lock); 1750 spin_unlock_bh(&hw->phy_lock);
1845 1751
1846 /* MIB clear */ 1752 /* MIB clear */
1847 reg = skge_gma_read16(hw, port, GM_PHY_ADDR); 1753 reg = gma_read16(hw, port, GM_PHY_ADDR);
1848 skge_gma_write16(hw, port, GM_PHY_ADDR, reg | GM_PAR_MIB_CLR); 1754 gma_write16(hw, port, GM_PHY_ADDR, reg | GM_PAR_MIB_CLR);
1849 1755
1850 for (i = 0; i < GM_MIB_CNT_SIZE; i++) 1756 for (i = 0; i < GM_MIB_CNT_SIZE; i++)
1851 skge_gma_read16(hw, port, GM_MIB_CNT_BASE + 8*i); 1757 gma_read16(hw, port, GM_MIB_CNT_BASE + 8*i);
1852 skge_gma_write16(hw, port, GM_PHY_ADDR, reg); 1758 gma_write16(hw, port, GM_PHY_ADDR, reg);
1853 1759
1854 /* transmit control */ 1760 /* transmit control */
1855 skge_gma_write16(hw, port, GM_TX_CTRL, TX_COL_THR(TX_COL_DEF)); 1761 gma_write16(hw, port, GM_TX_CTRL, TX_COL_THR(TX_COL_DEF));
1856 1762
1857 /* receive control reg: unicast + multicast + no FCS */ 1763 /* receive control reg: unicast + multicast + no FCS */
1858 skge_gma_write16(hw, port, GM_RX_CTRL, 1764 gma_write16(hw, port, GM_RX_CTRL,
1859 GM_RXCR_UCF_ENA | GM_RXCR_CRC_DIS | GM_RXCR_MCF_ENA); 1765 GM_RXCR_UCF_ENA | GM_RXCR_CRC_DIS | GM_RXCR_MCF_ENA);
1860 1766
1861 /* transmit flow control */ 1767 /* transmit flow control */
1862 skge_gma_write16(hw, port, GM_TX_FLOW_CTRL, 0xffff); 1768 gma_write16(hw, port, GM_TX_FLOW_CTRL, 0xffff);
1863 1769
1864 /* transmit parameter */ 1770 /* transmit parameter */
1865 skge_gma_write16(hw, port, GM_TX_PARAM, 1771 gma_write16(hw, port, GM_TX_PARAM,
1866 TX_JAM_LEN_VAL(TX_JAM_LEN_DEF) | 1772 TX_JAM_LEN_VAL(TX_JAM_LEN_DEF) |
1867 TX_JAM_IPG_VAL(TX_JAM_IPG_DEF) | 1773 TX_JAM_IPG_VAL(TX_JAM_IPG_DEF) |
1868 TX_IPG_JAM_DATA(TX_IPG_JAM_DEF)); 1774 TX_IPG_JAM_DATA(TX_IPG_JAM_DEF));
@@ -1872,33 +1778,33 @@ static void yukon_mac_init(struct skge_hw *hw, int port)
1872 if (hw->dev[port]->mtu > 1500) 1778 if (hw->dev[port]->mtu > 1500)
1873 reg |= GM_SMOD_JUMBO_ENA; 1779 reg |= GM_SMOD_JUMBO_ENA;
1874 1780
1875 skge_gma_write16(hw, port, GM_SERIAL_MODE, reg); 1781 gma_write16(hw, port, GM_SERIAL_MODE, reg);
1876 1782
1877 /* physical address: used for pause frames */ 1783 /* physical address: used for pause frames */
1878 skge_gm_set_addr(hw, port, GM_SRC_ADDR_1L, addr); 1784 gma_set_addr(hw, port, GM_SRC_ADDR_1L, addr);
1879 /* virtual address for data */ 1785 /* virtual address for data */
1880 skge_gm_set_addr(hw, port, GM_SRC_ADDR_2L, addr); 1786 gma_set_addr(hw, port, GM_SRC_ADDR_2L, addr);
1881 1787
1882 /* enable interrupt mask for counter overflows */ 1788 /* enable interrupt mask for counter overflows */
1883 skge_gma_write16(hw, port, GM_TX_IRQ_MSK, 0); 1789 gma_write16(hw, port, GM_TX_IRQ_MSK, 0);
1884 skge_gma_write16(hw, port, GM_RX_IRQ_MSK, 0); 1790 gma_write16(hw, port, GM_RX_IRQ_MSK, 0);
1885 skge_gma_write16(hw, port, GM_TR_IRQ_MSK, 0); 1791 gma_write16(hw, port, GM_TR_IRQ_MSK, 0);
1886 1792
1887 /* Initialize Mac Fifo */ 1793 /* Initialize Mac Fifo */
1888 1794
1889 /* Configure Rx MAC FIFO */ 1795 /* Configure Rx MAC FIFO */
1890 skge_write16(hw, SKGEMAC_REG(port, RX_GMF_FL_MSK), RX_FF_FL_DEF_MSK); 1796 skge_write16(hw, SK_REG(port, RX_GMF_FL_MSK), RX_FF_FL_DEF_MSK);
1891 reg = GMF_OPER_ON | GMF_RX_F_FL_ON; 1797 reg = GMF_OPER_ON | GMF_RX_F_FL_ON;
1892 if (hw->chip_id == CHIP_ID_YUKON_LITE && 1798 if (hw->chip_id == CHIP_ID_YUKON_LITE &&
1893 chip_rev(hw) == CHIP_REV_YU_LITE_A3) 1799 hw->chip_rev == CHIP_REV_YU_LITE_A3)
1894 reg &= ~GMF_RX_F_FL_ON; 1800 reg &= ~GMF_RX_F_FL_ON;
1895 skge_write8(hw, SKGEMAC_REG(port, RX_GMF_CTRL_T), GMF_RST_CLR); 1801 skge_write8(hw, SK_REG(port, RX_GMF_CTRL_T), GMF_RST_CLR);
1896 skge_write16(hw, SKGEMAC_REG(port, RX_GMF_CTRL_T), reg); 1802 skge_write16(hw, SK_REG(port, RX_GMF_CTRL_T), reg);
1897 skge_write16(hw, SKGEMAC_REG(port, RX_GMF_FL_THR), RX_GMF_FL_THR_DEF); 1803 skge_write16(hw, SK_REG(port, RX_GMF_FL_THR), RX_GMF_FL_THR_DEF);
1898 1804
1899 /* Configure Tx MAC FIFO */ 1805 /* Configure Tx MAC FIFO */
1900 skge_write8(hw, SKGEMAC_REG(port, TX_GMF_CTRL_T), GMF_RST_CLR); 1806 skge_write8(hw, SK_REG(port, TX_GMF_CTRL_T), GMF_RST_CLR);
1901 skge_write16(hw, SKGEMAC_REG(port, TX_GMF_CTRL_T), GMF_OPER_ON); 1807 skge_write16(hw, SK_REG(port, TX_GMF_CTRL_T), GMF_OPER_ON);
1902} 1808}
1903 1809
1904static void yukon_stop(struct skge_port *skge) 1810static void yukon_stop(struct skge_port *skge)
@@ -1907,19 +1813,19 @@ static void yukon_stop(struct skge_port *skge)
1907 int port = skge->port; 1813 int port = skge->port;
1908 1814
1909 if (hw->chip_id == CHIP_ID_YUKON_LITE && 1815 if (hw->chip_id == CHIP_ID_YUKON_LITE &&
1910 chip_rev(hw) == CHIP_REV_YU_LITE_A3) { 1816 hw->chip_rev == CHIP_REV_YU_LITE_A3) {
1911 skge_write32(hw, B2_GP_IO, 1817 skge_write32(hw, B2_GP_IO,
1912 skge_read32(hw, B2_GP_IO) | GP_DIR_9 | GP_IO_9); 1818 skge_read32(hw, B2_GP_IO) | GP_DIR_9 | GP_IO_9);
1913 } 1819 }
1914 1820
1915 skge_gma_write16(hw, port, GM_GP_CTRL, 1821 gma_write16(hw, port, GM_GP_CTRL,
1916 skge_gma_read16(hw, port, GM_GP_CTRL) 1822 gma_read16(hw, port, GM_GP_CTRL)
1917 & ~(GM_GPCR_RX_ENA|GM_GPCR_RX_ENA)); 1823 & ~(GM_GPCR_RX_ENA|GM_GPCR_RX_ENA));
1918 skge_gma_read16(hw, port, GM_GP_CTRL); 1824 gma_read16(hw, port, GM_GP_CTRL);
1919 1825
1920 /* set GPHY Control reset */ 1826 /* set GPHY Control reset */
1921 skge_gma_write32(hw, port, GPHY_CTRL, GPC_RST_SET); 1827 gma_write32(hw, port, GPHY_CTRL, GPC_RST_SET);
1922 skge_gma_write32(hw, port, GMAC_CTRL, GMC_RST_SET); 1828 gma_write32(hw, port, GMAC_CTRL, GMC_RST_SET);
1923} 1829}
1924 1830
1925static void yukon_get_stats(struct skge_port *skge, u64 *data) 1831static void yukon_get_stats(struct skge_port *skge, u64 *data)
@@ -1928,39 +1834,40 @@ static void yukon_get_stats(struct skge_port *skge, u64 *data)
1928 int port = skge->port; 1834 int port = skge->port;
1929 int i; 1835 int i;
1930 1836
1931 data[0] = (u64) skge_gma_read32(hw, port, GM_TXO_OK_HI) << 32 1837 data[0] = (u64) gma_read32(hw, port, GM_TXO_OK_HI) << 32
1932 | skge_gma_read32(hw, port, GM_TXO_OK_LO); 1838 | gma_read32(hw, port, GM_TXO_OK_LO);
1933 data[1] = (u64) skge_gma_read32(hw, port, GM_RXO_OK_HI) << 32 1839 data[1] = (u64) gma_read32(hw, port, GM_RXO_OK_HI) << 32
1934 | skge_gma_read32(hw, port, GM_RXO_OK_LO); 1840 | gma_read32(hw, port, GM_RXO_OK_LO);
1935 1841
1936 for (i = 2; i < ARRAY_SIZE(skge_stats); i++) 1842 for (i = 2; i < ARRAY_SIZE(skge_stats); i++)
1937 data[i] = skge_gma_read32(hw, port, 1843 data[i] = gma_read32(hw, port,
1938 skge_stats[i].gma_offset); 1844 skge_stats[i].gma_offset);
1939} 1845}
1940 1846
1941static void yukon_mac_intr(struct skge_hw *hw, int port) 1847static void yukon_mac_intr(struct skge_hw *hw, int port)
1942{ 1848{
1943 struct skge_port *skge = netdev_priv(hw->dev[port]); 1849 struct net_device *dev = hw->dev[port];
1944 u8 status = skge_read8(hw, SKGEMAC_REG(port, GMAC_IRQ_SRC)); 1850 struct skge_port *skge = netdev_priv(dev);
1851 u8 status = skge_read8(hw, SK_REG(port, GMAC_IRQ_SRC));
1852
1853 if (netif_msg_intr(skge))
1854 printk(KERN_DEBUG PFX "%s: mac interrupt status 0x%x\n",
1855 dev->name, status);
1945 1856
1946 pr_debug("yukon_intr status %x\n", status);
1947 if (status & GM_IS_RX_FF_OR) { 1857 if (status & GM_IS_RX_FF_OR) {
1948 ++skge->net_stats.rx_fifo_errors; 1858 ++skge->net_stats.rx_fifo_errors;
1949 skge_gma_write8(hw, port, RX_GMF_CTRL_T, GMF_CLI_RX_FO); 1859 gma_write8(hw, port, RX_GMF_CTRL_T, GMF_CLI_RX_FO);
1950 } 1860 }
1951 if (status & GM_IS_TX_FF_UR) { 1861 if (status & GM_IS_TX_FF_UR) {
1952 ++skge->net_stats.tx_fifo_errors; 1862 ++skge->net_stats.tx_fifo_errors;
1953 skge_gma_write8(hw, port, TX_GMF_CTRL_T, GMF_CLI_TX_FU); 1863 gma_write8(hw, port, TX_GMF_CTRL_T, GMF_CLI_TX_FU);
1954 } 1864 }
1955 1865
1956} 1866}
1957 1867
1958static u16 yukon_speed(const struct skge_hw *hw, u16 aux) 1868static u16 yukon_speed(const struct skge_hw *hw, u16 aux)
1959{ 1869{
1960 if (hw->chip_id == CHIP_ID_YUKON_FE) 1870 switch (aux & PHY_M_PS_SPEED_MSK) {
1961 return (aux & PHY_M_PS_SPEED_100) ? SPEED_100 : SPEED_10;
1962
1963 switch(aux & PHY_M_PS_SPEED_MSK) {
1964 case PHY_M_PS_SPEED_1000: 1871 case PHY_M_PS_SPEED_1000:
1965 return SPEED_1000; 1872 return SPEED_1000;
1966 case PHY_M_PS_SPEED_100: 1873 case PHY_M_PS_SPEED_100:
@@ -1981,15 +1888,15 @@ static void yukon_link_up(struct skge_port *skge)
1981 /* Enable Transmit FIFO Underrun */ 1888 /* Enable Transmit FIFO Underrun */
1982 skge_write8(hw, GMAC_IRQ_MSK, GMAC_DEF_MSK); 1889 skge_write8(hw, GMAC_IRQ_MSK, GMAC_DEF_MSK);
1983 1890
1984 reg = skge_gma_read16(hw, port, GM_GP_CTRL); 1891 reg = gma_read16(hw, port, GM_GP_CTRL);
1985 if (skge->duplex == DUPLEX_FULL || skge->autoneg == AUTONEG_ENABLE) 1892 if (skge->duplex == DUPLEX_FULL || skge->autoneg == AUTONEG_ENABLE)
1986 reg |= GM_GPCR_DUP_FULL; 1893 reg |= GM_GPCR_DUP_FULL;
1987 1894
1988 /* enable Rx/Tx */ 1895 /* enable Rx/Tx */
1989 reg |= GM_GPCR_RX_ENA | GM_GPCR_TX_ENA; 1896 reg |= GM_GPCR_RX_ENA | GM_GPCR_TX_ENA;
1990 skge_gma_write16(hw, port, GM_GP_CTRL, reg); 1897 gma_write16(hw, port, GM_GP_CTRL, reg);
1991 1898
1992 skge_gm_phy_write(hw, port, PHY_MARV_INT_MASK, PHY_M_DEF_MSK); 1899 gm_phy_write(hw, port, PHY_MARV_INT_MASK, PHY_M_DEF_MSK);
1993 skge_link_up(skge); 1900 skge_link_up(skge);
1994} 1901}
1995 1902
@@ -1999,16 +1906,15 @@ static void yukon_link_down(struct skge_port *skge)
1999 int port = skge->port; 1906 int port = skge->port;
2000 1907
2001 pr_debug("yukon_link_down\n"); 1908 pr_debug("yukon_link_down\n");
2002 skge_gm_phy_write(hw, port, PHY_MARV_INT_MASK, 0); 1909 gm_phy_write(hw, port, PHY_MARV_INT_MASK, 0);
2003 skge_gm_phy_write(hw, port, GM_GP_CTRL, 1910 gm_phy_write(hw, port, GM_GP_CTRL,
2004 skge_gm_phy_read(hw, port, GM_GP_CTRL) 1911 gm_phy_read(hw, port, GM_GP_CTRL)
2005 & ~(GM_GPCR_RX_ENA | GM_GPCR_TX_ENA)); 1912 & ~(GM_GPCR_RX_ENA | GM_GPCR_TX_ENA));
2006 1913
2007 if (hw->chip_id != CHIP_ID_YUKON_FE && 1914 if (skge->flow_control == FLOW_MODE_REM_SEND) {
2008 skge->flow_control == FLOW_MODE_REM_SEND) {
2009 /* restore Asymmetric Pause bit */ 1915 /* restore Asymmetric Pause bit */
2010 skge_gm_phy_write(hw, port, PHY_MARV_AUNE_ADV, 1916 gm_phy_write(hw, port, PHY_MARV_AUNE_ADV,
2011 skge_gm_phy_read(hw, port, 1917 gm_phy_read(hw, port,
2012 PHY_MARV_AUNE_ADV) 1918 PHY_MARV_AUNE_ADV)
2013 | PHY_M_AN_ASP); 1919 | PHY_M_AN_ASP);
2014 1920
@@ -2027,20 +1933,21 @@ static void yukon_phy_intr(struct skge_port *skge)
2027 const char *reason = NULL; 1933 const char *reason = NULL;
2028 u16 istatus, phystat; 1934 u16 istatus, phystat;
2029 1935
2030 istatus = skge_gm_phy_read(hw, port, PHY_MARV_INT_STAT); 1936 istatus = gm_phy_read(hw, port, PHY_MARV_INT_STAT);
2031 phystat = skge_gm_phy_read(hw, port, PHY_MARV_PHY_STAT); 1937 phystat = gm_phy_read(hw, port, PHY_MARV_PHY_STAT);
2032 pr_debug("yukon phy intr istat=%x phy_stat=%x\n", istatus, phystat); 1938
1939 if (netif_msg_intr(skge))
1940 printk(KERN_DEBUG PFX "%s: phy interrupt status 0x%x 0x%x\n",
1941 skge->netdev->name, istatus, phystat);
2033 1942
2034 if (istatus & PHY_M_IS_AN_COMPL) { 1943 if (istatus & PHY_M_IS_AN_COMPL) {
2035 if (skge_gm_phy_read(hw, port, PHY_MARV_AUNE_LP) 1944 if (gm_phy_read(hw, port, PHY_MARV_AUNE_LP)
2036 & PHY_M_AN_RF) { 1945 & PHY_M_AN_RF) {
2037 reason = "remote fault"; 1946 reason = "remote fault";
2038 goto failed; 1947 goto failed;
2039 } 1948 }
2040 1949
2041 if (!(hw->chip_id == CHIP_ID_YUKON_FE || hw->chip_id == CHIP_ID_YUKON_EC) 1950 if (gm_phy_read(hw, port, PHY_MARV_1000T_STAT) & PHY_B_1000S_MSF) {
2042 && (skge_gm_phy_read(hw, port, PHY_MARV_1000T_STAT)
2043 & PHY_B_1000S_MSF)) {
2044 reason = "master/slave fault"; 1951 reason = "master/slave fault";
2045 goto failed; 1952 goto failed;
2046 } 1953 }
@@ -2054,10 +1961,6 @@ static void yukon_phy_intr(struct skge_port *skge)
2054 ? DUPLEX_FULL : DUPLEX_HALF; 1961 ? DUPLEX_FULL : DUPLEX_HALF;
2055 skge->speed = yukon_speed(hw, phystat); 1962 skge->speed = yukon_speed(hw, phystat);
2056 1963
2057 /* Tx & Rx Pause Enabled bits are at 9..8 */
2058 if (hw->chip_id == CHIP_ID_YUKON_XL)
2059 phystat >>= 6;
2060
2061 /* We are using IEEE 802.3z/D5.0 Table 37-4 */ 1964 /* We are using IEEE 802.3z/D5.0 Table 37-4 */
2062 switch (phystat & PHY_M_PS_PAUSE_MSK) { 1965 switch (phystat & PHY_M_PS_PAUSE_MSK) {
2063 case PHY_M_PS_PAUSE_MSK: 1966 case PHY_M_PS_PAUSE_MSK:
@@ -2075,9 +1978,9 @@ static void yukon_phy_intr(struct skge_port *skge)
2075 1978
2076 if (skge->flow_control == FLOW_MODE_NONE || 1979 if (skge->flow_control == FLOW_MODE_NONE ||
2077 (skge->speed < SPEED_1000 && skge->duplex == DUPLEX_HALF)) 1980 (skge->speed < SPEED_1000 && skge->duplex == DUPLEX_HALF))
2078 skge_write8(hw, SKGEMAC_REG(port, GMAC_CTRL), GMC_PAUSE_OFF); 1981 skge_write8(hw, SK_REG(port, GMAC_CTRL), GMC_PAUSE_OFF);
2079 else 1982 else
2080 skge_write8(hw, SKGEMAC_REG(port, GMAC_CTRL), GMC_PAUSE_ON); 1983 skge_write8(hw, SK_REG(port, GMAC_CTRL), GMC_PAUSE_ON);
2081 yukon_link_up(skge); 1984 yukon_link_up(skge);
2082 return; 1985 return;
2083 } 1986 }
@@ -2161,6 +2064,12 @@ static int skge_up(struct net_device *dev)
2161 if (netif_msg_ifup(skge)) 2064 if (netif_msg_ifup(skge))
2162 printk(KERN_INFO PFX "%s: enabling interface\n", dev->name); 2065 printk(KERN_INFO PFX "%s: enabling interface\n", dev->name);
2163 2066
2067 if (dev->mtu > RX_BUF_SIZE)
2068 skge->rx_buf_size = dev->mtu + ETH_HLEN + NET_IP_ALIGN;
2069 else
2070 skge->rx_buf_size = RX_BUF_SIZE;
2071
2072
2164 rx_size = skge->rx_ring.count * sizeof(struct skge_rx_desc); 2073 rx_size = skge->rx_ring.count * sizeof(struct skge_rx_desc);
2165 tx_size = skge->tx_ring.count * sizeof(struct skge_tx_desc); 2074 tx_size = skge->tx_ring.count * sizeof(struct skge_tx_desc);
2166 skge->mem_size = tx_size + rx_size; 2075 skge->mem_size = tx_size + rx_size;
@@ -2173,7 +2082,8 @@ static int skge_up(struct net_device *dev)
2173 if ((err = skge_ring_alloc(&skge->rx_ring, skge->mem, skge->dma))) 2082 if ((err = skge_ring_alloc(&skge->rx_ring, skge->mem, skge->dma)))
2174 goto free_pci_mem; 2083 goto free_pci_mem;
2175 2084
2176 if (skge_rx_fill(skge)) 2085 err = skge_rx_fill(skge);
2086 if (err)
2177 goto free_rx_ring; 2087 goto free_rx_ring;
2178 2088
2179 if ((err = skge_ring_alloc(&skge->tx_ring, skge->mem + rx_size, 2089 if ((err = skge_ring_alloc(&skge->tx_ring, skge->mem + rx_size,
@@ -2182,6 +2092,10 @@ static int skge_up(struct net_device *dev)
2182 2092
2183 skge->tx_avail = skge->tx_ring.count - 1; 2093 skge->tx_avail = skge->tx_ring.count - 1;
2184 2094
2095 /* Enable IRQ from port */
2096 hw->intr_mask |= portirqmask[port];
2097 skge_write32(hw, B0_IMSK, hw->intr_mask);
2098
2185 /* Initialze MAC */ 2099 /* Initialze MAC */
2186 if (hw->chip_id == CHIP_ID_GENESIS) 2100 if (hw->chip_id == CHIP_ID_GENESIS)
2187 genesis_mac_init(hw, port); 2101 genesis_mac_init(hw, port);
@@ -2189,7 +2103,7 @@ static int skge_up(struct net_device *dev)
2189 yukon_mac_init(hw, port); 2103 yukon_mac_init(hw, port);
2190 2104
2191 /* Configure RAMbuffers */ 2105 /* Configure RAMbuffers */
2192 chunk = hw->ram_size / (isdualport(hw) ? 4 : 2); 2106 chunk = hw->ram_size / ((hw->ports + 1)*2);
2193 ram_addr = hw->ram_offset + 2 * chunk * port; 2107 ram_addr = hw->ram_offset + 2 * chunk * port;
2194 2108
2195 skge_ramset(hw, rxqaddr[port], ram_addr, chunk); 2109 skge_ramset(hw, rxqaddr[port], ram_addr, chunk);
@@ -2227,7 +2141,6 @@ static int skge_down(struct net_device *dev)
2227 netif_stop_queue(dev); 2141 netif_stop_queue(dev);
2228 2142
2229 del_timer_sync(&skge->led_blink); 2143 del_timer_sync(&skge->led_blink);
2230 del_timer_sync(&skge->link_check);
2231 2144
2232 /* Stop transmitter */ 2145 /* Stop transmitter */
2233 skge_write8(hw, Q_ADDR(txqaddr[port], Q_CSR), CSR_STOP); 2146 skge_write8(hw, Q_ADDR(txqaddr[port], Q_CSR), CSR_STOP);
@@ -2240,12 +2153,12 @@ static int skge_down(struct net_device *dev)
2240 yukon_stop(skge); 2153 yukon_stop(skge);
2241 2154
2242 /* Disable Force Sync bit and Enable Alloc bit */ 2155 /* Disable Force Sync bit and Enable Alloc bit */
2243 skge_write8(hw, SKGEMAC_REG(port, TXA_CTRL), 2156 skge_write8(hw, SK_REG(port, TXA_CTRL),
2244 TXA_DIS_FSYNC | TXA_DIS_ALLOC | TXA_STOP_RC); 2157 TXA_DIS_FSYNC | TXA_DIS_ALLOC | TXA_STOP_RC);
2245 2158
2246 /* Stop Interval Timer and Limit Counter of Tx Arbiter */ 2159 /* Stop Interval Timer and Limit Counter of Tx Arbiter */
2247 skge_write32(hw, SKGEMAC_REG(port, TXA_ITI_INI), 0L); 2160 skge_write32(hw, SK_REG(port, TXA_ITI_INI), 0L);
2248 skge_write32(hw, SKGEMAC_REG(port, TXA_LIM_INI), 0L); 2161 skge_write32(hw, SK_REG(port, TXA_LIM_INI), 0L);
2249 2162
2250 /* Reset PCI FIFO */ 2163 /* Reset PCI FIFO */
2251 skge_write32(hw, Q_ADDR(txqaddr[port], Q_CSR), CSR_SET_RESET); 2164 skge_write32(hw, Q_ADDR(txqaddr[port], Q_CSR), CSR_SET_RESET);
@@ -2260,13 +2173,13 @@ static int skge_down(struct net_device *dev)
2260 skge_write32(hw, Q_ADDR(rxqaddr[port], Q_CSR), CSR_SET_RESET); 2173 skge_write32(hw, Q_ADDR(rxqaddr[port], Q_CSR), CSR_SET_RESET);
2261 2174
2262 if (hw->chip_id == CHIP_ID_GENESIS) { 2175 if (hw->chip_id == CHIP_ID_GENESIS) {
2263 skge_write8(hw, SKGEMAC_REG(port, TX_MFF_CTRL2), MFF_RST_SET); 2176 skge_write8(hw, SK_REG(port, TX_MFF_CTRL2), MFF_RST_SET);
2264 skge_write8(hw, SKGEMAC_REG(port, RX_MFF_CTRL2), MFF_RST_SET); 2177 skge_write8(hw, SK_REG(port, RX_MFF_CTRL2), MFF_RST_SET);
2265 skge_write8(hw, SKGEMAC_REG(port, TX_LED_CTRL), LED_STOP); 2178 skge_write8(hw, SK_REG(port, TX_LED_CTRL), LED_STOP);
2266 skge_write8(hw, SKGEMAC_REG(port, RX_LED_CTRL), LED_STOP); 2179 skge_write8(hw, SK_REG(port, RX_LED_CTRL), LED_STOP);
2267 } else { 2180 } else {
2268 skge_write8(hw, SKGEMAC_REG(port, RX_GMF_CTRL_T), GMF_RST_SET); 2181 skge_write8(hw, SK_REG(port, RX_GMF_CTRL_T), GMF_RST_SET);
2269 skge_write8(hw, SKGEMAC_REG(port, TX_GMF_CTRL_T), GMF_RST_SET); 2182 skge_write8(hw, SK_REG(port, TX_GMF_CTRL_T), GMF_RST_SET);
2270 } 2183 }
2271 2184
2272 /* turn off led's */ 2185 /* turn off led's */
@@ -2299,10 +2212,10 @@ static int skge_xmit_frame(struct sk_buff *skb, struct net_device *dev)
2299 2212
2300 local_irq_save(flags); 2213 local_irq_save(flags);
2301 if (!spin_trylock(&skge->tx_lock)) { 2214 if (!spin_trylock(&skge->tx_lock)) {
2302 /* Collision - tell upper layer to requeue */ 2215 /* Collision - tell upper layer to requeue */
2303 local_irq_restore(flags); 2216 local_irq_restore(flags);
2304 return NETDEV_TX_LOCKED; 2217 return NETDEV_TX_LOCKED;
2305 } 2218 }
2306 2219
2307 if (unlikely(skge->tx_avail < skb_shinfo(skb)->nr_frags +1)) { 2220 if (unlikely(skge->tx_avail < skb_shinfo(skb)->nr_frags +1)) {
2308 netif_stop_queue(dev); 2221 netif_stop_queue(dev);
@@ -2333,7 +2246,7 @@ static int skge_xmit_frame(struct sk_buff *skb, struct net_device *dev)
2333 * does. Looks like hardware is wrong? 2246 * does. Looks like hardware is wrong?
2334 */ 2247 */
2335 if (ip->protocol == IPPROTO_UDP 2248 if (ip->protocol == IPPROTO_UDP
2336 && chip_rev(hw) == 0 && hw->chip_id == CHIP_ID_YUKON) 2249 && hw->chip_rev == 0 && hw->chip_id == CHIP_ID_YUKON)
2337 control = BMU_TCP_CHECK; 2250 control = BMU_TCP_CHECK;
2338 else 2251 else
2339 control = BMU_UDP_CHECK; 2252 control = BMU_UDP_CHECK;
@@ -2394,6 +2307,7 @@ static int skge_xmit_frame(struct sk_buff *skb, struct net_device *dev)
2394 2307
2395static inline void skge_tx_free(struct skge_hw *hw, struct skge_element *e) 2308static inline void skge_tx_free(struct skge_hw *hw, struct skge_element *e)
2396{ 2309{
2310 /* This ring element can be skb or fragment */
2397 if (e->skb) { 2311 if (e->skb) {
2398 pci_unmap_single(hw->pdev, 2312 pci_unmap_single(hw->pdev,
2399 pci_unmap_addr(e, mapaddr), 2313 pci_unmap_addr(e, mapaddr),
@@ -2438,16 +2352,17 @@ static void skge_tx_timeout(struct net_device *dev)
2438static int skge_change_mtu(struct net_device *dev, int new_mtu) 2352static int skge_change_mtu(struct net_device *dev, int new_mtu)
2439{ 2353{
2440 int err = 0; 2354 int err = 0;
2355 int running = netif_running(dev);
2441 2356
2442 if(new_mtu < ETH_ZLEN || new_mtu > ETH_JUMBO_MTU) 2357 if (new_mtu < ETH_ZLEN || new_mtu > ETH_JUMBO_MTU)
2443 return -EINVAL; 2358 return -EINVAL;
2444 2359
2445 dev->mtu = new_mtu;
2446 2360
2447 if (netif_running(dev)) { 2361 if (running)
2448 skge_down(dev); 2362 skge_down(dev);
2363 dev->mtu = new_mtu;
2364 if (running)
2449 skge_up(dev); 2365 skge_up(dev);
2450 }
2451 2366
2452 return err; 2367 return err;
2453} 2368}
@@ -2462,7 +2377,9 @@ static void genesis_set_multicast(struct net_device *dev)
2462 u32 mode; 2377 u32 mode;
2463 u8 filter[8]; 2378 u8 filter[8];
2464 2379
2465 mode = skge_xm_read32(hw, port, XM_MODE); 2380 pr_debug("genesis_set_multicast flags=%x count=%d\n", dev->flags, dev->mc_count);
2381
2382 mode = xm_read32(hw, port, XM_MODE);
2466 mode |= XM_MD_ENA_HASH; 2383 mode |= XM_MD_ENA_HASH;
2467 if (dev->flags & IFF_PROMISC) 2384 if (dev->flags & IFF_PROMISC)
2468 mode |= XM_MD_ENA_PROM; 2385 mode |= XM_MD_ENA_PROM;
@@ -2473,17 +2390,16 @@ static void genesis_set_multicast(struct net_device *dev)
2473 memset(filter, 0xff, sizeof(filter)); 2390 memset(filter, 0xff, sizeof(filter));
2474 else { 2391 else {
2475 memset(filter, 0, sizeof(filter)); 2392 memset(filter, 0, sizeof(filter));
2476 for(i = 0; list && i < count; i++, list = list->next) { 2393 for (i = 0; list && i < count; i++, list = list->next) {
2477 u32 crc = crc32_le(~0, list->dmi_addr, ETH_ALEN); 2394 u32 crc, bit;
2478 u8 bit = 63 - (crc & 63); 2395 crc = ether_crc_le(ETH_ALEN, list->dmi_addr);
2479 2396 bit = ~crc & 0x3f;
2480 filter[bit/8] |= 1 << (bit%8); 2397 filter[bit/8] |= 1 << (bit%8);
2481 } 2398 }
2482 } 2399 }
2483 2400
2484 skge_xm_outhash(hw, port, XM_HSM, filter); 2401 xm_write32(hw, port, XM_MODE, mode);
2485 2402 xm_outhash(hw, port, XM_HSM, filter);
2486 skge_xm_write32(hw, port, XM_MODE, mode);
2487} 2403}
2488 2404
2489static void yukon_set_multicast(struct net_device *dev) 2405static void yukon_set_multicast(struct net_device *dev)
@@ -2497,7 +2413,7 @@ static void yukon_set_multicast(struct net_device *dev)
2497 2413
2498 memset(filter, 0, sizeof(filter)); 2414 memset(filter, 0, sizeof(filter));
2499 2415
2500 reg = skge_gma_read16(hw, port, GM_RX_CTRL); 2416 reg = gma_read16(hw, port, GM_RX_CTRL);
2501 reg |= GM_RXCR_UCF_ENA; 2417 reg |= GM_RXCR_UCF_ENA;
2502 2418
2503 if (dev->flags & IFF_PROMISC) /* promiscious */ 2419 if (dev->flags & IFF_PROMISC) /* promiscious */
@@ -2510,23 +2426,23 @@ static void yukon_set_multicast(struct net_device *dev)
2510 int i; 2426 int i;
2511 reg |= GM_RXCR_MCF_ENA; 2427 reg |= GM_RXCR_MCF_ENA;
2512 2428
2513 for(i = 0; list && i < dev->mc_count; i++, list = list->next) { 2429 for (i = 0; list && i < dev->mc_count; i++, list = list->next) {
2514 u32 bit = ether_crc(ETH_ALEN, list->dmi_addr) & 0x3f; 2430 u32 bit = ether_crc(ETH_ALEN, list->dmi_addr) & 0x3f;
2515 filter[bit/8] |= 1 << (bit%8); 2431 filter[bit/8] |= 1 << (bit%8);
2516 } 2432 }
2517 } 2433 }
2518 2434
2519 2435
2520 skge_gma_write16(hw, port, GM_MC_ADDR_H1, 2436 gma_write16(hw, port, GM_MC_ADDR_H1,
2521 (u16)filter[0] | ((u16)filter[1] << 8)); 2437 (u16)filter[0] | ((u16)filter[1] << 8));
2522 skge_gma_write16(hw, port, GM_MC_ADDR_H2, 2438 gma_write16(hw, port, GM_MC_ADDR_H2,
2523 (u16)filter[2] | ((u16)filter[3] << 8)); 2439 (u16)filter[2] | ((u16)filter[3] << 8));
2524 skge_gma_write16(hw, port, GM_MC_ADDR_H3, 2440 gma_write16(hw, port, GM_MC_ADDR_H3,
2525 (u16)filter[4] | ((u16)filter[5] << 8)); 2441 (u16)filter[4] | ((u16)filter[5] << 8));
2526 skge_gma_write16(hw, port, GM_MC_ADDR_H4, 2442 gma_write16(hw, port, GM_MC_ADDR_H4,
2527 (u16)filter[6] | ((u16)filter[7] << 8)); 2443 (u16)filter[6] | ((u16)filter[7] << 8));
2528 2444
2529 skge_gma_write16(hw, port, GM_RX_CTRL, reg); 2445 gma_write16(hw, port, GM_RX_CTRL, reg);
2530} 2446}
2531 2447
2532static inline int bad_phy_status(const struct skge_hw *hw, u32 status) 2448static inline int bad_phy_status(const struct skge_hw *hw, u32 status)
@@ -2545,28 +2461,76 @@ static void skge_rx_error(struct skge_port *skge, int slot,
2545 printk(KERN_DEBUG PFX "%s: rx err, slot %d control 0x%x status 0x%x\n", 2461 printk(KERN_DEBUG PFX "%s: rx err, slot %d control 0x%x status 0x%x\n",
2546 skge->netdev->name, slot, control, status); 2462 skge->netdev->name, slot, control, status);
2547 2463
2548 if ((control & (BMU_EOF|BMU_STF)) != (BMU_STF|BMU_EOF) 2464 if ((control & (BMU_EOF|BMU_STF)) != (BMU_STF|BMU_EOF))
2549 || (control & BMU_BBC) > skge->netdev->mtu + VLAN_ETH_HLEN)
2550 skge->net_stats.rx_length_errors++; 2465 skge->net_stats.rx_length_errors++;
2551 else { 2466 else if (skge->hw->chip_id == CHIP_ID_GENESIS) {
2552 if (skge->hw->chip_id == CHIP_ID_GENESIS) { 2467 if (status & (XMR_FS_RUNT|XMR_FS_LNG_ERR))
2553 if (status & (XMR_FS_RUNT|XMR_FS_LNG_ERR)) 2468 skge->net_stats.rx_length_errors++;
2554 skge->net_stats.rx_length_errors++; 2469 if (status & XMR_FS_FRA_ERR)
2555 if (status & XMR_FS_FRA_ERR) 2470 skge->net_stats.rx_frame_errors++;
2556 skge->net_stats.rx_frame_errors++; 2471 if (status & XMR_FS_FCS_ERR)
2557 if (status & XMR_FS_FCS_ERR) 2472 skge->net_stats.rx_crc_errors++;
2558 skge->net_stats.rx_crc_errors++; 2473 } else {
2559 } else { 2474 if (status & (GMR_FS_LONG_ERR|GMR_FS_UN_SIZE))
2560 if (status & (GMR_FS_LONG_ERR|GMR_FS_UN_SIZE)) 2475 skge->net_stats.rx_length_errors++;
2561 skge->net_stats.rx_length_errors++; 2476 if (status & GMR_FS_FRAGMENT)
2562 if (status & GMR_FS_FRAGMENT) 2477 skge->net_stats.rx_frame_errors++;
2563 skge->net_stats.rx_frame_errors++; 2478 if (status & GMR_FS_CRC_ERR)
2564 if (status & GMR_FS_CRC_ERR) 2479 skge->net_stats.rx_crc_errors++;
2565 skge->net_stats.rx_crc_errors++; 2480 }
2481}
2482
2483/* Get receive buffer from descriptor.
2484 * Handles copy of small buffers and reallocation failures
2485 */
2486static inline struct sk_buff *skge_rx_get(struct skge_port *skge,
2487 struct skge_element *e,
2488 unsigned int len)
2489{
2490 struct sk_buff *nskb, *skb;
2491
2492 if (len < RX_COPY_THRESHOLD) {
2493 nskb = skge_rx_alloc(skge->netdev, len + NET_IP_ALIGN);
2494 if (unlikely(!nskb))
2495 return NULL;
2496
2497 pci_dma_sync_single_for_cpu(skge->hw->pdev,
2498 pci_unmap_addr(e, mapaddr),
2499 len, PCI_DMA_FROMDEVICE);
2500 memcpy(nskb->data, e->skb->data, len);
2501 pci_dma_sync_single_for_device(skge->hw->pdev,
2502 pci_unmap_addr(e, mapaddr),
2503 len, PCI_DMA_FROMDEVICE);
2504
2505 if (skge->rx_csum) {
2506 struct skge_rx_desc *rd = e->desc;
2507 nskb->csum = le16_to_cpu(rd->csum2);
2508 nskb->ip_summed = CHECKSUM_HW;
2566 } 2509 }
2510 skge_rx_reuse(e, skge->rx_buf_size);
2511 return nskb;
2512 } else {
2513 nskb = skge_rx_alloc(skge->netdev, skge->rx_buf_size);
2514 if (unlikely(!nskb))
2515 return NULL;
2516
2517 pci_unmap_single(skge->hw->pdev,
2518 pci_unmap_addr(e, mapaddr),
2519 pci_unmap_len(e, maplen),
2520 PCI_DMA_FROMDEVICE);
2521 skb = e->skb;
2522 if (skge->rx_csum) {
2523 struct skge_rx_desc *rd = e->desc;
2524 skb->csum = le16_to_cpu(rd->csum2);
2525 skb->ip_summed = CHECKSUM_HW;
2526 }
2527
2528 skge_rx_setup(skge, e, nskb, skge->rx_buf_size);
2529 return skb;
2567 } 2530 }
2568} 2531}
2569 2532
2533
2570static int skge_poll(struct net_device *dev, int *budget) 2534static int skge_poll(struct net_device *dev, int *budget)
2571{ 2535{
2572 struct skge_port *skge = netdev_priv(dev); 2536 struct skge_port *skge = netdev_priv(dev);
@@ -2575,13 +2539,12 @@ static int skge_poll(struct net_device *dev, int *budget)
2575 struct skge_element *e; 2539 struct skge_element *e;
2576 unsigned int to_do = min(dev->quota, *budget); 2540 unsigned int to_do = min(dev->quota, *budget);
2577 unsigned int work_done = 0; 2541 unsigned int work_done = 0;
2578 int done;
2579 static const u32 irqmask[] = { IS_PORT_1, IS_PORT_2 };
2580 2542
2581 for (e = ring->to_clean; e != ring->to_use && work_done < to_do; 2543 pr_debug("skge_poll\n");
2582 e = e->next) { 2544
2545 for (e = ring->to_clean; work_done < to_do; e = e->next) {
2583 struct skge_rx_desc *rd = e->desc; 2546 struct skge_rx_desc *rd = e->desc;
2584 struct sk_buff *skb = e->skb; 2547 struct sk_buff *skb;
2585 u32 control, len, status; 2548 u32 control, len, status;
2586 2549
2587 rmb(); 2550 rmb();
@@ -2590,19 +2553,12 @@ static int skge_poll(struct net_device *dev, int *budget)
2590 break; 2553 break;
2591 2554
2592 len = control & BMU_BBC; 2555 len = control & BMU_BBC;
2593 e->skb = NULL;
2594
2595 pci_unmap_single(hw->pdev,
2596 pci_unmap_addr(e, mapaddr),
2597 pci_unmap_len(e, maplen),
2598 PCI_DMA_FROMDEVICE);
2599
2600 status = rd->status; 2556 status = rd->status;
2601 if ((control & (BMU_EOF|BMU_STF)) != (BMU_STF|BMU_EOF) 2557
2602 || len > dev->mtu + VLAN_ETH_HLEN 2558 if (unlikely((control & (BMU_EOF|BMU_STF)) != (BMU_STF|BMU_EOF)
2603 || bad_phy_status(hw, status)) { 2559 || bad_phy_status(hw, status))) {
2604 skge_rx_error(skge, e - ring->start, control, status); 2560 skge_rx_error(skge, e - ring->start, control, status);
2605 dev_kfree_skb(skb); 2561 skge_rx_reuse(e, skge->rx_buf_size);
2606 continue; 2562 continue;
2607 } 2563 }
2608 2564
@@ -2610,43 +2566,37 @@ static int skge_poll(struct net_device *dev, int *budget)
2610 printk(KERN_DEBUG PFX "%s: rx slot %td status 0x%x len %d\n", 2566 printk(KERN_DEBUG PFX "%s: rx slot %td status 0x%x len %d\n",
2611 dev->name, e - ring->start, rd->status, len); 2567 dev->name, e - ring->start, rd->status, len);
2612 2568
2613 skb_put(skb, len); 2569 skb = skge_rx_get(skge, e, len);
2614 skb->protocol = eth_type_trans(skb, dev); 2570 if (likely(skb)) {
2615 2571 skb_put(skb, len);
2616 if (skge->rx_csum) { 2572 skb->protocol = eth_type_trans(skb, dev);
2617 skb->csum = le16_to_cpu(rd->csum2);
2618 skb->ip_summed = CHECKSUM_HW;
2619 }
2620 2573
2621 dev->last_rx = jiffies; 2574 dev->last_rx = jiffies;
2622 netif_receive_skb(skb); 2575 netif_receive_skb(skb);
2623 2576
2624 ++work_done; 2577 ++work_done;
2578 } else
2579 skge_rx_reuse(e, skge->rx_buf_size);
2625 } 2580 }
2626 ring->to_clean = e; 2581 ring->to_clean = e;
2627 2582
2628 *budget -= work_done;
2629 dev->quota -= work_done;
2630 done = work_done < to_do;
2631
2632 if (skge_rx_fill(skge))
2633 done = 0;
2634
2635 /* restart receiver */ 2583 /* restart receiver */
2636 wmb(); 2584 wmb();
2637 skge_write8(hw, Q_ADDR(rxqaddr[skge->port], Q_CSR), 2585 skge_write8(hw, Q_ADDR(rxqaddr[skge->port], Q_CSR),
2638 CSR_START | CSR_IRQ_CL_F); 2586 CSR_START | CSR_IRQ_CL_F);
2639 2587
2640 if (done) { 2588 *budget -= work_done;
2641 local_irq_disable(); 2589 dev->quota -= work_done;
2642 hw->intr_mask |= irqmask[skge->port];
2643 /* Order is important since data can get interrupted */
2644 skge_write32(hw, B0_IMSK, hw->intr_mask);
2645 __netif_rx_complete(dev);
2646 local_irq_enable();
2647 }
2648 2590
2649 return !done; 2591 if (work_done >= to_do)
2592 return 1; /* not done */
2593
2594 local_irq_disable();
2595 __netif_rx_complete(dev);
2596 hw->intr_mask |= portirqmask[skge->port];
2597 skge_write32(hw, B0_IMSK, hw->intr_mask);
2598 local_irq_enable();
2599 return 0;
2650} 2600}
2651 2601
2652static inline void skge_tx_intr(struct net_device *dev) 2602static inline void skge_tx_intr(struct net_device *dev)
@@ -2657,7 +2607,7 @@ static inline void skge_tx_intr(struct net_device *dev)
2657 struct skge_element *e; 2607 struct skge_element *e;
2658 2608
2659 spin_lock(&skge->tx_lock); 2609 spin_lock(&skge->tx_lock);
2660 for(e = ring->to_clean; e != ring->to_use; e = e->next) { 2610 for (e = ring->to_clean; e != ring->to_use; e = e->next) {
2661 struct skge_tx_desc *td = e->desc; 2611 struct skge_tx_desc *td = e->desc;
2662 u32 control; 2612 u32 control;
2663 2613
@@ -2690,12 +2640,12 @@ static void skge_mac_parity(struct skge_hw *hw, int port)
2690 : (port == 0 ? "(port A)": "(port B")); 2640 : (port == 0 ? "(port A)": "(port B"));
2691 2641
2692 if (hw->chip_id == CHIP_ID_GENESIS) 2642 if (hw->chip_id == CHIP_ID_GENESIS)
2693 skge_write16(hw, SKGEMAC_REG(port, TX_MFF_CTRL1), 2643 skge_write16(hw, SK_REG(port, TX_MFF_CTRL1),
2694 MFF_CLR_PERR); 2644 MFF_CLR_PERR);
2695 else 2645 else
2696 /* HW-Bug #8: cleared by GMF_CLI_TX_FC instead of GMF_CLI_TX_PE */ 2646 /* HW-Bug #8: cleared by GMF_CLI_TX_FC instead of GMF_CLI_TX_PE */
2697 skge_write8(hw, SKGEMAC_REG(port, TX_GMF_CTRL_T), 2647 skge_write8(hw, SK_REG(port, TX_GMF_CTRL_T),
2698 (hw->chip_id == CHIP_ID_YUKON && chip_rev(hw) == 0) 2648 (hw->chip_id == CHIP_ID_YUKON && hw->chip_rev == 0)
2699 ? GMF_CLI_TX_FC : GMF_CLI_TX_PE); 2649 ? GMF_CLI_TX_FC : GMF_CLI_TX_PE);
2700} 2650}
2701 2651
@@ -2703,16 +2653,16 @@ static void skge_pci_clear(struct skge_hw *hw)
2703{ 2653{
2704 u16 status; 2654 u16 status;
2705 2655
2706 status = skge_read16(hw, SKGEPCI_REG(PCI_STATUS)); 2656 pci_read_config_word(hw->pdev, PCI_STATUS, &status);
2707 skge_write8(hw, B2_TST_CTRL1, TST_CFG_WRITE_ON); 2657 skge_write8(hw, B2_TST_CTRL1, TST_CFG_WRITE_ON);
2708 skge_write16(hw, SKGEPCI_REG(PCI_STATUS), 2658 pci_write_config_word(hw->pdev, PCI_STATUS,
2709 status | PCI_STATUS_ERROR_BITS); 2659 status | PCI_STATUS_ERROR_BITS);
2710 skge_write8(hw, B2_TST_CTRL1, TST_CFG_WRITE_OFF); 2660 skge_write8(hw, B2_TST_CTRL1, TST_CFG_WRITE_OFF);
2711} 2661}
2712 2662
2713static void skge_mac_intr(struct skge_hw *hw, int port) 2663static void skge_mac_intr(struct skge_hw *hw, int port)
2714{ 2664{
2715 if (hw->chip_id == CHIP_ID_GENESIS) 2665 if (hw->chip_id == CHIP_ID_GENESIS)
2716 genesis_mac_intr(hw, port); 2666 genesis_mac_intr(hw, port);
2717 else 2667 else
2718 yukon_mac_intr(hw, port); 2668 yukon_mac_intr(hw, port);
@@ -2726,9 +2676,9 @@ static void skge_error_irq(struct skge_hw *hw)
2726 if (hw->chip_id == CHIP_ID_GENESIS) { 2676 if (hw->chip_id == CHIP_ID_GENESIS) {
2727 /* clear xmac errors */ 2677 /* clear xmac errors */
2728 if (hwstatus & (IS_NO_STAT_M1|IS_NO_TIST_M1)) 2678 if (hwstatus & (IS_NO_STAT_M1|IS_NO_TIST_M1))
2729 skge_write16(hw, SKGEMAC_REG(0, RX_MFF_CTRL1), MFF_CLR_INSTAT); 2679 skge_write16(hw, SK_REG(0, RX_MFF_CTRL1), MFF_CLR_INSTAT);
2730 if (hwstatus & (IS_NO_STAT_M2|IS_NO_TIST_M2)) 2680 if (hwstatus & (IS_NO_STAT_M2|IS_NO_TIST_M2))
2731 skge_write16(hw, SKGEMAC_REG(0, RX_MFF_CTRL2), MFF_CLR_INSTAT); 2681 skge_write16(hw, SK_REG(0, RX_MFF_CTRL2), MFF_CLR_INSTAT);
2732 } else { 2682 } else {
2733 /* Timestamp (unused) overflow */ 2683 /* Timestamp (unused) overflow */
2734 if (hwstatus & IS_IRQ_TIST_OV) 2684 if (hwstatus & IS_IRQ_TIST_OV)
@@ -2803,8 +2753,8 @@ static void skge_extirq(unsigned long data)
2803 2753
2804 if (hw->chip_id != CHIP_ID_GENESIS) 2754 if (hw->chip_id != CHIP_ID_GENESIS)
2805 yukon_phy_intr(skge); 2755 yukon_phy_intr(skge);
2806 else if (hw->phy_type == SK_PHY_BCOM) 2756 else
2807 genesis_bcom_intr(skge); 2757 bcom_phy_intr(skge);
2808 } 2758 }
2809 } 2759 }
2810 spin_unlock(&hw->phy_lock); 2760 spin_unlock(&hw->phy_lock);
@@ -2824,19 +2774,14 @@ static irqreturn_t skge_intr(int irq, void *dev_id, struct pt_regs *regs)
2824 return IRQ_NONE; 2774 return IRQ_NONE;
2825 2775
2826 status &= hw->intr_mask; 2776 status &= hw->intr_mask;
2827 2777 if (status & IS_R1_F) {
2828 if ((status & IS_R1_F) && netif_rx_schedule_prep(hw->dev[0])) {
2829 status &= ~IS_R1_F;
2830 hw->intr_mask &= ~IS_R1_F; 2778 hw->intr_mask &= ~IS_R1_F;
2831 skge_write32(hw, B0_IMSK, hw->intr_mask); 2779 netif_rx_schedule(hw->dev[0]);
2832 __netif_rx_schedule(hw->dev[0]);
2833 } 2780 }
2834 2781
2835 if ((status & IS_R2_F) && netif_rx_schedule_prep(hw->dev[1])) { 2782 if (status & IS_R2_F) {
2836 status &= ~IS_R2_F;
2837 hw->intr_mask &= ~IS_R2_F; 2783 hw->intr_mask &= ~IS_R2_F;
2838 skge_write32(hw, B0_IMSK, hw->intr_mask); 2784 netif_rx_schedule(hw->dev[1]);
2839 __netif_rx_schedule(hw->dev[1]);
2840 } 2785 }
2841 2786
2842 if (status & IS_XA1_F) 2787 if (status & IS_XA1_F)
@@ -2845,9 +2790,27 @@ static irqreturn_t skge_intr(int irq, void *dev_id, struct pt_regs *regs)
2845 if (status & IS_XA2_F) 2790 if (status & IS_XA2_F)
2846 skge_tx_intr(hw->dev[1]); 2791 skge_tx_intr(hw->dev[1]);
2847 2792
2793 if (status & IS_PA_TO_RX1) {
2794 struct skge_port *skge = netdev_priv(hw->dev[0]);
2795 ++skge->net_stats.rx_over_errors;
2796 skge_write16(hw, B3_PA_CTRL, PA_CLR_TO_RX1);
2797 }
2798
2799 if (status & IS_PA_TO_RX2) {
2800 struct skge_port *skge = netdev_priv(hw->dev[1]);
2801 ++skge->net_stats.rx_over_errors;
2802 skge_write16(hw, B3_PA_CTRL, PA_CLR_TO_RX2);
2803 }
2804
2805 if (status & IS_PA_TO_TX1)
2806 skge_write16(hw, B3_PA_CTRL, PA_CLR_TO_TX1);
2807
2808 if (status & IS_PA_TO_TX2)
2809 skge_write16(hw, B3_PA_CTRL, PA_CLR_TO_TX2);
2810
2848 if (status & IS_MAC1) 2811 if (status & IS_MAC1)
2849 skge_mac_intr(hw, 0); 2812 skge_mac_intr(hw, 0);
2850 2813
2851 if (status & IS_MAC2) 2814 if (status & IS_MAC2)
2852 skge_mac_intr(hw, 1); 2815 skge_mac_intr(hw, 1);
2853 2816
@@ -2859,8 +2822,7 @@ static irqreturn_t skge_intr(int irq, void *dev_id, struct pt_regs *regs)
2859 tasklet_schedule(&hw->ext_tasklet); 2822 tasklet_schedule(&hw->ext_tasklet);
2860 } 2823 }
2861 2824
2862 if (status) 2825 skge_write32(hw, B0_IMSK, hw->intr_mask);
2863 skge_write32(hw, B0_IMSK, hw->intr_mask);
2864 2826
2865 return IRQ_HANDLED; 2827 return IRQ_HANDLED;
2866} 2828}
@@ -2904,9 +2866,6 @@ static const struct {
2904 { CHIP_ID_YUKON, "Yukon" }, 2866 { CHIP_ID_YUKON, "Yukon" },
2905 { CHIP_ID_YUKON_LITE, "Yukon-Lite"}, 2867 { CHIP_ID_YUKON_LITE, "Yukon-Lite"},
2906 { CHIP_ID_YUKON_LP, "Yukon-LP"}, 2868 { CHIP_ID_YUKON_LP, "Yukon-LP"},
2907 { CHIP_ID_YUKON_XL, "Yukon-2 XL"},
2908 { CHIP_ID_YUKON_EC, "YUKON-2 EC"},
2909 { CHIP_ID_YUKON_FE, "YUKON-2 FE"},
2910}; 2869};
2911 2870
2912static const char *skge_board_name(const struct skge_hw *hw) 2871static const char *skge_board_name(const struct skge_hw *hw)
@@ -2930,8 +2889,8 @@ static const char *skge_board_name(const struct skge_hw *hw)
2930static int skge_reset(struct skge_hw *hw) 2889static int skge_reset(struct skge_hw *hw)
2931{ 2890{
2932 u16 ctst; 2891 u16 ctst;
2933 u8 t8; 2892 u8 t8, mac_cfg;
2934 int i, ports; 2893 int i;
2935 2894
2936 ctst = skge_read16(hw, B0_CTST); 2895 ctst = skge_read16(hw, B0_CTST);
2937 2896
@@ -2952,12 +2911,9 @@ static int skge_reset(struct skge_hw *hw)
2952 hw->phy_type = skge_read8(hw, B2_E_1) & 0xf; 2911 hw->phy_type = skge_read8(hw, B2_E_1) & 0xf;
2953 hw->pmd_type = skge_read8(hw, B2_PMD_TYP); 2912 hw->pmd_type = skge_read8(hw, B2_PMD_TYP);
2954 2913
2955 switch(hw->chip_id) { 2914 switch (hw->chip_id) {
2956 case CHIP_ID_GENESIS: 2915 case CHIP_ID_GENESIS:
2957 switch (hw->phy_type) { 2916 switch (hw->phy_type) {
2958 case SK_PHY_XMAC:
2959 hw->phy_addr = PHY_ADDR_XMAC;
2960 break;
2961 case SK_PHY_BCOM: 2917 case SK_PHY_BCOM:
2962 hw->phy_addr = PHY_ADDR_BCOM; 2918 hw->phy_addr = PHY_ADDR_BCOM;
2963 break; 2919 break;
@@ -2986,8 +2942,9 @@ static int skge_reset(struct skge_hw *hw)
2986 return -EOPNOTSUPP; 2942 return -EOPNOTSUPP;
2987 } 2943 }
2988 2944
2989 hw->mac_cfg = skge_read8(hw, B2_MAC_CFG); 2945 mac_cfg = skge_read8(hw, B2_MAC_CFG);
2990 ports = isdualport(hw) ? 2 : 1; 2946 hw->ports = (mac_cfg & CFG_SNG_MAC) ? 1 : 2;
2947 hw->chip_rev = (mac_cfg & CFG_CHIP_R_MSK) >> 4;
2991 2948
2992 /* read the adapters RAM size */ 2949 /* read the adapters RAM size */
2993 t8 = skge_read8(hw, B2_E_0); 2950 t8 = skge_read8(hw, B2_E_0);
@@ -3010,9 +2967,9 @@ static int skge_reset(struct skge_hw *hw)
3010 /* switch power to VCC (WA for VAUX problem) */ 2967 /* switch power to VCC (WA for VAUX problem) */
3011 skge_write8(hw, B0_POWER_CTRL, 2968 skge_write8(hw, B0_POWER_CTRL,
3012 PC_VAUX_ENA | PC_VCC_ENA | PC_VAUX_OFF | PC_VCC_ON); 2969 PC_VAUX_ENA | PC_VCC_ENA | PC_VAUX_OFF | PC_VCC_ON);
3013 for (i = 0; i < ports; i++) { 2970 for (i = 0; i < hw->ports; i++) {
3014 skge_write16(hw, SKGEMAC_REG(i, GMAC_LINK_CTRL), GMLC_RST_SET); 2971 skge_write16(hw, SK_REG(i, GMAC_LINK_CTRL), GMLC_RST_SET);
3015 skge_write16(hw, SKGEMAC_REG(i, GMAC_LINK_CTRL), GMLC_RST_CLR); 2972 skge_write16(hw, SK_REG(i, GMAC_LINK_CTRL), GMLC_RST_CLR);
3016 } 2973 }
3017 } 2974 }
3018 2975
@@ -3022,8 +2979,8 @@ static int skge_reset(struct skge_hw *hw)
3022 skge_write8(hw, B0_LED, LED_STAT_ON); 2979 skge_write8(hw, B0_LED, LED_STAT_ON);
3023 2980
3024 /* enable the Tx Arbiters */ 2981 /* enable the Tx Arbiters */
3025 for (i = 0; i < ports; i++) 2982 for (i = 0; i < hw->ports; i++)
3026 skge_write8(hw, SKGEMAC_REG(i, TXA_CTRL), TXA_ENA_ARB); 2983 skge_write8(hw, SK_REG(i, TXA_CTRL), TXA_ENA_ARB);
3027 2984
3028 /* Initialize ram interface */ 2985 /* Initialize ram interface */
3029 skge_write16(hw, B3_RI_CTRL, RI_RST_CLR); 2986 skge_write16(hw, B3_RI_CTRL, RI_RST_CLR);
@@ -3050,16 +3007,14 @@ static int skge_reset(struct skge_hw *hw)
3050 skge_write32(hw, B2_IRQM_INI, skge_usecs2clk(hw, 100)); 3007 skge_write32(hw, B2_IRQM_INI, skge_usecs2clk(hw, 100));
3051 skge_write32(hw, B2_IRQM_CTRL, TIM_START); 3008 skge_write32(hw, B2_IRQM_CTRL, TIM_START);
3052 3009
3053 hw->intr_mask = IS_HW_ERR | IS_EXT_REG | IS_PORT_1; 3010 hw->intr_mask = IS_HW_ERR | IS_EXT_REG;
3054 if (isdualport(hw))
3055 hw->intr_mask |= IS_PORT_2;
3056 skge_write32(hw, B0_IMSK, hw->intr_mask); 3011 skge_write32(hw, B0_IMSK, hw->intr_mask);
3057 3012
3058 if (hw->chip_id != CHIP_ID_GENESIS) 3013 if (hw->chip_id != CHIP_ID_GENESIS)
3059 skge_write8(hw, GMAC_IRQ_MSK, 0); 3014 skge_write8(hw, GMAC_IRQ_MSK, 0);
3060 3015
3061 spin_lock_bh(&hw->phy_lock); 3016 spin_lock_bh(&hw->phy_lock);
3062 for (i = 0; i < ports; i++) { 3017 for (i = 0; i < hw->ports; i++) {
3063 if (hw->chip_id == CHIP_ID_GENESIS) 3018 if (hw->chip_id == CHIP_ID_GENESIS)
3064 genesis_reset(hw, i); 3019 genesis_reset(hw, i);
3065 else 3020 else
@@ -3071,7 +3026,8 @@ static int skge_reset(struct skge_hw *hw)
3071} 3026}
3072 3027
3073/* Initialize network device */ 3028/* Initialize network device */
3074static struct net_device *skge_devinit(struct skge_hw *hw, int port) 3029static struct net_device *skge_devinit(struct skge_hw *hw, int port,
3030 int highmem)
3075{ 3031{
3076 struct skge_port *skge; 3032 struct skge_port *skge;
3077 struct net_device *dev = alloc_etherdev(sizeof(*skge)); 3033 struct net_device *dev = alloc_etherdev(sizeof(*skge));
@@ -3104,6 +3060,8 @@ static struct net_device *skge_devinit(struct skge_hw *hw, int port)
3104#endif 3060#endif
3105 dev->irq = hw->pdev->irq; 3061 dev->irq = hw->pdev->irq;
3106 dev->features = NETIF_F_LLTX; 3062 dev->features = NETIF_F_LLTX;
3063 if (highmem)
3064 dev->features |= NETIF_F_HIGHDMA;
3107 3065
3108 skge = netdev_priv(dev); 3066 skge = netdev_priv(dev);
3109 skge->netdev = dev; 3067 skge->netdev = dev;
@@ -3117,7 +3075,7 @@ static struct net_device *skge_devinit(struct skge_hw *hw, int port)
3117 skge->flow_control = FLOW_MODE_SYMMETRIC; 3075 skge->flow_control = FLOW_MODE_SYMMETRIC;
3118 skge->duplex = -1; 3076 skge->duplex = -1;
3119 skge->speed = -1; 3077 skge->speed = -1;
3120 skge->advertising = skge_modes(hw); 3078 skge->advertising = skge_supported_modes(hw);
3121 3079
3122 hw->dev[port] = dev; 3080 hw->dev[port] = dev;
3123 3081
@@ -3125,10 +3083,6 @@ static struct net_device *skge_devinit(struct skge_hw *hw, int port)
3125 3083
3126 spin_lock_init(&skge->tx_lock); 3084 spin_lock_init(&skge->tx_lock);
3127 3085
3128 init_timer(&skge->link_check);
3129 skge->link_check.function = skge_link_timer;
3130 skge->link_check.data = (unsigned long) skge;
3131
3132 init_timer(&skge->led_blink); 3086 init_timer(&skge->led_blink);
3133 skge->led_blink.function = skge_blink_timer; 3087 skge->led_blink.function = skge_blink_timer;
3134 skge->led_blink.data = (unsigned long) skge; 3088 skge->led_blink.data = (unsigned long) skge;
@@ -3232,14 +3186,11 @@ static int __devinit skge_probe(struct pci_dev *pdev,
3232 3186
3233 printk(KERN_INFO PFX "addr 0x%lx irq %d chip %s rev %d\n", 3187 printk(KERN_INFO PFX "addr 0x%lx irq %d chip %s rev %d\n",
3234 pci_resource_start(pdev, 0), pdev->irq, 3188 pci_resource_start(pdev, 0), pdev->irq,
3235 skge_board_name(hw), chip_rev(hw)); 3189 skge_board_name(hw), hw->chip_rev);
3236 3190
3237 if ((dev = skge_devinit(hw, 0)) == NULL) 3191 if ((dev = skge_devinit(hw, 0, using_dac)) == NULL)
3238 goto err_out_led_off; 3192 goto err_out_led_off;
3239 3193
3240 if (using_dac)
3241 dev->features |= NETIF_F_HIGHDMA;
3242
3243 if ((err = register_netdev(dev))) { 3194 if ((err = register_netdev(dev))) {
3244 printk(KERN_ERR PFX "%s: cannot register net device\n", 3195 printk(KERN_ERR PFX "%s: cannot register net device\n",
3245 pci_name(pdev)); 3196 pci_name(pdev));
@@ -3248,10 +3199,7 @@ static int __devinit skge_probe(struct pci_dev *pdev,
3248 3199
3249 skge_show_addr(dev); 3200 skge_show_addr(dev);
3250 3201
3251 if (isdualport(hw) && (dev1 = skge_devinit(hw, 1))) { 3202 if (hw->ports > 1 && (dev1 = skge_devinit(hw, 1, using_dac))) {
3252 if (using_dac)
3253 dev1->features |= NETIF_F_HIGHDMA;
3254
3255 if (register_netdev(dev1) == 0) 3203 if (register_netdev(dev1) == 0)
3256 skge_show_addr(dev1); 3204 skge_show_addr(dev1);
3257 else { 3205 else {
@@ -3288,7 +3236,7 @@ static void __devexit skge_remove(struct pci_dev *pdev)
3288 struct skge_hw *hw = pci_get_drvdata(pdev); 3236 struct skge_hw *hw = pci_get_drvdata(pdev);
3289 struct net_device *dev0, *dev1; 3237 struct net_device *dev0, *dev1;
3290 3238
3291 if(!hw) 3239 if (!hw)
3292 return; 3240 return;
3293 3241
3294 if ((dev1 = hw->dev[1])) 3242 if ((dev1 = hw->dev[1]))
@@ -3316,7 +3264,7 @@ static int skge_suspend(struct pci_dev *pdev, u32 state)
3316 struct skge_hw *hw = pci_get_drvdata(pdev); 3264 struct skge_hw *hw = pci_get_drvdata(pdev);
3317 int i, wol = 0; 3265 int i, wol = 0;
3318 3266
3319 for(i = 0; i < 2; i++) { 3267 for (i = 0; i < 2; i++) {
3320 struct net_device *dev = hw->dev[i]; 3268 struct net_device *dev = hw->dev[i];
3321 3269
3322 if (dev) { 3270 if (dev) {
@@ -3349,11 +3297,11 @@ static int skge_resume(struct pci_dev *pdev)
3349 3297
3350 skge_reset(hw); 3298 skge_reset(hw);
3351 3299
3352 for(i = 0; i < 2; i++) { 3300 for (i = 0; i < 2; i++) {
3353 struct net_device *dev = hw->dev[i]; 3301 struct net_device *dev = hw->dev[i];
3354 if (dev) { 3302 if (dev) {
3355 netif_device_attach(dev); 3303 netif_device_attach(dev);
3356 if(netif_running(dev)) 3304 if (netif_running(dev))
3357 skge_up(dev); 3305 skge_up(dev);
3358 } 3306 }
3359 } 3307 }
diff --git a/drivers/net/skge.h b/drivers/net/skge.h
index 36c62b68fab4..14d0cc01fb9a 100644
--- a/drivers/net/skge.h
+++ b/drivers/net/skge.h
@@ -7,31 +7,6 @@
7/* PCI config registers */ 7/* PCI config registers */
8#define PCI_DEV_REG1 0x40 8#define PCI_DEV_REG1 0x40
9#define PCI_DEV_REG2 0x44 9#define PCI_DEV_REG2 0x44
10#ifndef PCI_VPD
11#define PCI_VPD 0x50
12#endif
13
14/* PCI_OUR_REG_2 32 bit Our Register 2 */
15enum {
16 PCI_VPD_WR_THR = 0xff<<24, /* Bit 31..24: VPD Write Threshold */
17 PCI_DEV_SEL = 0x7f<<17, /* Bit 23..17: EEPROM Device Select */
18 PCI_VPD_ROM_SZ = 7 <<14, /* Bit 16..14: VPD ROM Size */
19 /* Bit 13..12: reserved */
20 PCI_EN_DUMMY_RD = 1<<3, /* Enable Dummy Read */
21 PCI_REV_DESC = 1<<2, /* Reverse Desc. Bytes */
22 PCI_USEDATA64 = 1<<0, /* Use 64Bit Data bus ext */
23};
24
25/* PCI_VPD_ADR_REG 16 bit VPD Address Register */
26enum {
27 PCI_VPD_FLAG = 1<<15, /* starts VPD rd/wr cycle */
28 PCI_VPD_ADR_MSK =0x7fffL, /* Bit 14.. 0: VPD Address Mask */
29 VPD_RES_ID = 0x82,
30 VPD_RES_READ = 0x90,
31 VPD_RES_WRITE = 0x81,
32 VPD_RES_END = 0x78,
33};
34
35 10
36#define PCI_STATUS_ERROR_BITS (PCI_STATUS_DETECTED_PARITY | \ 11#define PCI_STATUS_ERROR_BITS (PCI_STATUS_DETECTED_PARITY | \
37 PCI_STATUS_SIG_SYSTEM_ERROR | \ 12 PCI_STATUS_SIG_SYSTEM_ERROR | \
@@ -39,7 +14,6 @@ enum {
39 PCI_STATUS_REC_TARGET_ABORT | \ 14 PCI_STATUS_REC_TARGET_ABORT | \
40 PCI_STATUS_PARITY) 15 PCI_STATUS_PARITY)
41 16
42
43enum csr_regs { 17enum csr_regs {
44 B0_RAP = 0x0000, 18 B0_RAP = 0x0000,
45 B0_CTST = 0x0004, 19 B0_CTST = 0x0004,
@@ -229,8 +203,11 @@ enum {
229 IS_XA2_F = 1<<1, /* Q_XA2 End of Frame */ 203 IS_XA2_F = 1<<1, /* Q_XA2 End of Frame */
230 IS_XA2_C = 1<<0, /* Q_XA2 Encoding Error */ 204 IS_XA2_C = 1<<0, /* Q_XA2 Encoding Error */
231 205
232 IS_PORT_1 = IS_XA1_F| IS_R1_F| IS_MAC1, 206 IS_TO_PORT1 = IS_PA_TO_RX1 | IS_PA_TO_TX1,
233 IS_PORT_2 = IS_XA2_F| IS_R2_F| IS_MAC2, 207 IS_TO_PORT2 = IS_PA_TO_RX2 | IS_PA_TO_TX2,
208
209 IS_PORT_1 = IS_XA1_F| IS_R1_F | IS_TO_PORT1 | IS_MAC1,
210 IS_PORT_2 = IS_XA2_F| IS_R2_F | IS_TO_PORT2 | IS_MAC2,
234}; 211};
235 212
236 213
@@ -288,14 +265,6 @@ enum {
288 CHIP_REV_YU_LITE_A3 = 7, /* Chip Rev. for YUKON-Lite A3 */ 265 CHIP_REV_YU_LITE_A3 = 7, /* Chip Rev. for YUKON-Lite A3 */
289}; 266};
290 267
291/* B2_LD_TEST 8 bit EPROM loader test register */
292enum {
293 LD_T_ON = 1<<3, /* Loader Test mode on */
294 LD_T_OFF = 1<<2, /* Loader Test mode off */
295 LD_T_STEP = 1<<1, /* Decrement FPROM addr. Counter */
296 LD_START = 1<<0, /* Start loading FPROM */
297};
298
299/* B2_TI_CTRL 8 bit Timer control */ 268/* B2_TI_CTRL 8 bit Timer control */
300/* B2_IRQM_CTRL 8 bit IRQ Moderation Timer Control */ 269/* B2_IRQM_CTRL 8 bit IRQ Moderation Timer Control */
301enum { 270enum {
@@ -313,16 +282,6 @@ enum {
313 TIM_T_STEP = 1<<0, /* Test step */ 282 TIM_T_STEP = 1<<0, /* Test step */
314}; 283};
315 284
316/* B28_DPT_INI 32 bit Descriptor Poll Timer Init Val */
317/* B28_DPT_VAL 32 bit Descriptor Poll Timer Curr Val */
318/* B28_DPT_CTRL 8 bit Descriptor Poll Timer Ctrl Reg */
319enum {
320 DPT_MSK = 0x00ffffffL, /* Bit 23.. 0: Desc Poll Timer Bits */
321
322 DPT_START = 1<<1, /* Start Descriptor Poll Timer */
323 DPT_STOP = 1<<0, /* Stop Descriptor Poll Timer */
324};
325
326/* B2_GP_IO 32 bit General Purpose I/O Register */ 285/* B2_GP_IO 32 bit General Purpose I/O Register */
327enum { 286enum {
328 GP_DIR_9 = 1<<25, /* IO_9 direct, 0=In/1=Out */ 287 GP_DIR_9 = 1<<25, /* IO_9 direct, 0=In/1=Out */
@@ -348,30 +307,6 @@ enum {
348 GP_IO_0 = 1<<0, /* IO_0 pin */ 307 GP_IO_0 = 1<<0, /* IO_0 pin */
349}; 308};
350 309
351/* Rx/Tx Path related Arbiter Test Registers */
352/* B3_MA_TO_TEST 16 bit MAC Arbiter Timeout Test Reg */
353/* B3_MA_RC_TEST 16 bit MAC Arbiter Recovery Test Reg */
354/* B3_PA_TEST 16 bit Packet Arbiter Test Register */
355/* Bit 15, 11, 7, and 3 are reserved in B3_PA_TEST */
356enum {
357 TX2_T_EV = 1<<15,/* TX2 Timeout/Recv Event occured */
358 TX2_T_ON = 1<<14,/* TX2 Timeout/Recv Timer Test On */
359 TX2_T_OFF = 1<<13,/* TX2 Timeout/Recv Timer Tst Off */
360 TX2_T_STEP = 1<<12,/* TX2 Timeout/Recv Timer Step */
361 TX1_T_EV = 1<<11,/* TX1 Timeout/Recv Event occured */
362 TX1_T_ON = 1<<10,/* TX1 Timeout/Recv Timer Test On */
363 TX1_T_OFF = 1<<9, /* TX1 Timeout/Recv Timer Tst Off */
364 TX1_T_STEP = 1<<8, /* TX1 Timeout/Recv Timer Step */
365 RX2_T_EV = 1<<7, /* RX2 Timeout/Recv Event occured */
366 RX2_T_ON = 1<<6, /* RX2 Timeout/Recv Timer Test On */
367 RX2_T_OFF = 1<<5, /* RX2 Timeout/Recv Timer Tst Off */
368 RX2_T_STEP = 1<<4, /* RX2 Timeout/Recv Timer Step */
369 RX1_T_EV = 1<<3, /* RX1 Timeout/Recv Event occured */
370 RX1_T_ON = 1<<2, /* RX1 Timeout/Recv Timer Test On */
371 RX1_T_OFF = 1<<1, /* RX1 Timeout/Recv Timer Tst Off */
372 RX1_T_STEP = 1<<0, /* RX1 Timeout/Recv Timer Step */
373};
374
375/* Descriptor Bit Definition */ 310/* Descriptor Bit Definition */
376/* TxCtrl Transmit Buffer Control Field */ 311/* TxCtrl Transmit Buffer Control Field */
377/* RxCtrl Receive Buffer Control Field */ 312/* RxCtrl Receive Buffer Control Field */
@@ -428,14 +363,6 @@ enum {
428 RI_RST_SET = 1<<0, /* Set RAM Interface Reset */ 363 RI_RST_SET = 1<<0, /* Set RAM Interface Reset */
429}; 364};
430 365
431/* B3_RI_TEST 8 bit RAM Iface Test Register */
432enum {
433 RI_T_EV = 1<<3, /* Timeout Event occured */
434 RI_T_ON = 1<<2, /* Timeout Timer Test On */
435 RI_T_OFF = 1<<1, /* Timeout Timer Test Off */
436 RI_T_STEP = 1<<0, /* Timeout Timer Step */
437};
438
439/* MAC Arbiter Registers */ 366/* MAC Arbiter Registers */
440/* B3_MA_TO_CTRL 16 bit MAC Arbiter Timeout Ctrl Reg */ 367/* B3_MA_TO_CTRL 16 bit MAC Arbiter Timeout Ctrl Reg */
441enum { 368enum {
@@ -452,19 +379,6 @@ enum {
452#define SK_PKT_TO_MAX 0xffff /* Maximum value */ 379#define SK_PKT_TO_MAX 0xffff /* Maximum value */
453#define SK_RI_TO_53 36 /* RAM interface timeout */ 380#define SK_RI_TO_53 36 /* RAM interface timeout */
454 381
455
456/* B3_MA_RC_CTRL 16 bit MAC Arbiter Recovery Ctrl Reg */
457enum {
458 MA_ENA_REC_TX2 = 1<<7, /* Enable Recovery Timer TX2 */
459 MA_DIS_REC_TX2 = 1<<6, /* Disable Recovery Timer TX2 */
460 MA_ENA_REC_TX1 = 1<<5, /* Enable Recovery Timer TX1 */
461 MA_DIS_REC_TX1 = 1<<4, /* Disable Recovery Timer TX1 */
462 MA_ENA_REC_RX2 = 1<<3, /* Enable Recovery Timer RX2 */
463 MA_DIS_REC_RX2 = 1<<2, /* Disable Recovery Timer RX2 */
464 MA_ENA_REC_RX1 = 1<<1, /* Enable Recovery Timer RX1 */
465 MA_DIS_REC_RX1 = 1<<0, /* Disable Recovery Timer RX1 */
466};
467
468/* Packet Arbiter Registers */ 382/* Packet Arbiter Registers */
469/* B3_PA_CTRL 16 bit Packet Arbiter Ctrl Register */ 383/* B3_PA_CTRL 16 bit Packet Arbiter Ctrl Register */
470enum { 384enum {
@@ -488,7 +402,7 @@ enum {
488 PA_ENA_TO_TX1 | PA_ENA_TO_TX2) 402 PA_ENA_TO_TX1 | PA_ENA_TO_TX2)
489 403
490 404
491/* Transmit Arbiter Registers MAC 1 and 2, use MR_ADDR() to access */ 405/* Transmit Arbiter Registers MAC 1 and 2, use SK_REG() to access */
492/* TXA_ITI_INI 32 bit Tx Arb Interval Timer Init Val */ 406/* TXA_ITI_INI 32 bit Tx Arb Interval Timer Init Val */
493/* TXA_ITI_VAL 32 bit Tx Arb Interval Timer Value */ 407/* TXA_ITI_VAL 32 bit Tx Arb Interval Timer Value */
494/* TXA_LIM_INI 32 bit Tx Arb Limit Counter Init Val */ 408/* TXA_LIM_INI 32 bit Tx Arb Limit Counter Init Val */
@@ -511,7 +425,7 @@ enum {
511/* 425/*
512 * Bank 4 - 5 426 * Bank 4 - 5
513 */ 427 */
514/* Transmit Arbiter Registers MAC 1 and 2, use MR_ADDR() to access */ 428/* Transmit Arbiter Registers MAC 1 and 2, use SK_REG() to access */
515enum { 429enum {
516 TXA_ITI_INI = 0x0200,/* 32 bit Tx Arb Interval Timer Init Val*/ 430 TXA_ITI_INI = 0x0200,/* 32 bit Tx Arb Interval Timer Init Val*/
517 TXA_ITI_VAL = 0x0204,/* 32 bit Tx Arb Interval Timer Value */ 431 TXA_ITI_VAL = 0x0204,/* 32 bit Tx Arb Interval Timer Value */
@@ -537,7 +451,7 @@ enum {
537 451
538/* Queue Register Offsets, use Q_ADDR() to access */ 452/* Queue Register Offsets, use Q_ADDR() to access */
539enum { 453enum {
540 B8_Q_REGS = 0x0400, /* base of Queue registers */ 454 B8_Q_REGS = 0x0400, /* base of Queue registers */
541 Q_D = 0x00, /* 8*32 bit Current Descriptor */ 455 Q_D = 0x00, /* 8*32 bit Current Descriptor */
542 Q_DA_L = 0x20, /* 32 bit Current Descriptor Address Low dWord */ 456 Q_DA_L = 0x20, /* 32 bit Current Descriptor Address Low dWord */
543 Q_DA_H = 0x24, /* 32 bit Current Descriptor Address High dWord */ 457 Q_DA_H = 0x24, /* 32 bit Current Descriptor Address High dWord */
@@ -618,8 +532,7 @@ enum {
618enum { 532enum {
619 PHY_ADDR_XMAC = 0<<8, 533 PHY_ADDR_XMAC = 0<<8,
620 PHY_ADDR_BCOM = 1<<8, 534 PHY_ADDR_BCOM = 1<<8,
621 PHY_ADDR_LONE = 3<<8, 535
622 PHY_ADDR_NAT = 0<<8,
623/* GPHY address (bits 15..11 of SMI control reg) */ 536/* GPHY address (bits 15..11 of SMI control reg) */
624 PHY_ADDR_MARV = 0, 537 PHY_ADDR_MARV = 0,
625}; 538};
@@ -986,7 +899,7 @@ enum {
986 LINKLED_BLINK_OFF = 0x10, 899 LINKLED_BLINK_OFF = 0x10,
987 LINKLED_BLINK_ON = 0x20, 900 LINKLED_BLINK_ON = 0x20,
988}; 901};
989 902
990/* GMAC and GPHY Control Registers (YUKON only) */ 903/* GMAC and GPHY Control Registers (YUKON only) */
991enum { 904enum {
992 GMAC_CTRL = 0x0f00,/* 32 bit GMAC Control Reg */ 905 GMAC_CTRL = 0x0f00,/* 32 bit GMAC Control Reg */
@@ -1151,54 +1064,6 @@ enum {
1151 PHY_MARV_FE_SPEC_2 = 0x1c,/* 16 bit r/w Specific Control Reg. 2 */ 1064 PHY_MARV_FE_SPEC_2 = 0x1c,/* 16 bit r/w Specific Control Reg. 2 */
1152}; 1065};
1153 1066
1154/* Level One-PHY Registers, indirect addressed over XMAC */
1155enum {
1156 PHY_LONE_CTRL = 0x00,/* 16 bit r/w PHY Control Register */
1157 PHY_LONE_STAT = 0x01,/* 16 bit r/o PHY Status Register */
1158 PHY_LONE_ID0 = 0x02,/* 16 bit r/o PHY ID0 Register */
1159 PHY_LONE_ID1 = 0x03,/* 16 bit r/o PHY ID1 Register */
1160 PHY_LONE_AUNE_ADV = 0x04,/* 16 bit r/w Auto-Neg. Advertisement */
1161 PHY_LONE_AUNE_LP = 0x05,/* 16 bit r/o Link Part Ability Reg */
1162 PHY_LONE_AUNE_EXP = 0x06,/* 16 bit r/o Auto-Neg. Expansion Reg */
1163 PHY_LONE_NEPG = 0x07,/* 16 bit r/w Next Page Register */
1164 PHY_LONE_NEPG_LP = 0x08,/* 16 bit r/o Next Page Link Partner */
1165 /* Level One-specific registers */
1166 PHY_LONE_1000T_CTRL = 0x09,/* 16 bit r/w 1000Base-T Control Reg */
1167 PHY_LONE_1000T_STAT = 0x0a,/* 16 bit r/o 1000Base-T Status Reg */
1168 PHY_LONE_EXT_STAT = 0x0f,/* 16 bit r/o Extended Status Reg */
1169 PHY_LONE_PORT_CFG = 0x10,/* 16 bit r/w Port Configuration Reg*/
1170 PHY_LONE_Q_STAT = 0x11,/* 16 bit r/o Quick Status Reg */
1171 PHY_LONE_INT_ENAB = 0x12,/* 16 bit r/w Interrupt Enable Reg */
1172 PHY_LONE_INT_STAT = 0x13,/* 16 bit r/o Interrupt Status Reg */
1173 PHY_LONE_LED_CFG = 0x14,/* 16 bit r/w LED Configuration Reg */
1174 PHY_LONE_PORT_CTRL = 0x15,/* 16 bit r/w Port Control Reg */
1175 PHY_LONE_CIM = 0x16,/* 16 bit r/o CIM Reg */
1176};
1177
1178/* National-PHY Registers, indirect addressed over XMAC */
1179enum {
1180 PHY_NAT_CTRL = 0x00,/* 16 bit r/w PHY Control Register */
1181 PHY_NAT_STAT = 0x01,/* 16 bit r/w PHY Status Register */
1182 PHY_NAT_ID0 = 0x02,/* 16 bit r/o PHY ID0 Register */
1183 PHY_NAT_ID1 = 0x03,/* 16 bit r/o PHY ID1 Register */
1184 PHY_NAT_AUNE_ADV = 0x04,/* 16 bit r/w Auto-Neg. Advertisement */
1185 PHY_NAT_AUNE_LP = 0x05,/* 16 bit r/o Link Partner Ability Reg */
1186 PHY_NAT_AUNE_EXP = 0x06,/* 16 bit r/o Auto-Neg. Expansion Reg */
1187 PHY_NAT_NEPG = 0x07,/* 16 bit r/w Next Page Register */
1188 PHY_NAT_NEPG_LP = 0x08,/* 16 bit r/o Next Page Link Partner Reg */
1189 /* National-specific registers */
1190 PHY_NAT_1000T_CTRL = 0x09,/* 16 bit r/w 1000Base-T Control Reg */
1191 PHY_NAT_1000T_STAT = 0x0a,/* 16 bit r/o 1000Base-T Status Reg */
1192 PHY_NAT_EXT_STAT = 0x0f,/* 16 bit r/o Extended Status Register */
1193 PHY_NAT_EXT_CTRL1 = 0x10,/* 16 bit r/o Extended Control Reg1 */
1194 PHY_NAT_Q_STAT1 = 0x11,/* 16 bit r/o Quick Status Reg1 */
1195 PHY_NAT_10B_OP = 0x12,/* 16 bit r/o 10Base-T Operations Reg */
1196 PHY_NAT_EXT_CTRL2 = 0x13,/* 16 bit r/o Extended Control Reg1 */
1197 PHY_NAT_Q_STAT2 = 0x14,/* 16 bit r/o Quick Status Reg2 */
1198
1199 PHY_NAT_PHY_ADDR = 0x19,/* 16 bit r/o PHY Address Register */
1200};
1201
1202enum { 1067enum {
1203 PHY_CT_RESET = 1<<15, /* Bit 15: (sc) clear all PHY related regs */ 1068 PHY_CT_RESET = 1<<15, /* Bit 15: (sc) clear all PHY related regs */
1204 PHY_CT_LOOP = 1<<14, /* Bit 14: enable Loopback over PHY */ 1069 PHY_CT_LOOP = 1<<14, /* Bit 14: enable Loopback over PHY */
@@ -1253,8 +1118,29 @@ enum {
1253 PHY_MARV_ID1_Y2 = 0x0C91, /* Yukon-2 (PHY 88E1112) */ 1118 PHY_MARV_ID1_Y2 = 0x0C91, /* Yukon-2 (PHY 88E1112) */
1254}; 1119};
1255 1120
1121/* Advertisement register bits */
1256enum { 1122enum {
1257 PHY_AN_NXT_PG = 1<<15, /* Bit 15: Request Next Page */ 1123 PHY_AN_NXT_PG = 1<<15, /* Bit 15: Request Next Page */
1124 PHY_AN_ACK = 1<<14, /* Bit 14: (ro) Acknowledge Received */
1125 PHY_AN_RF = 1<<13, /* Bit 13: Remote Fault Bits */
1126
1127 PHY_AN_PAUSE_ASYM = 1<<11,/* Bit 11: Try for asymmetric */
1128 PHY_AN_PAUSE_CAP = 1<<10, /* Bit 10: Try for pause */
1129 PHY_AN_100BASE4 = 1<<9, /* Bit 9: Try for 100mbps 4k packets */
1130 PHY_AN_100FULL = 1<<8, /* Bit 8: Try for 100mbps full-duplex */
1131 PHY_AN_100HALF = 1<<7, /* Bit 7: Try for 100mbps half-duplex */
1132 PHY_AN_10FULL = 1<<6, /* Bit 6: Try for 10mbps full-duplex */
1133 PHY_AN_10HALF = 1<<5, /* Bit 5: Try for 10mbps half-duplex */
1134 PHY_AN_CSMA = 1<<0, /* Bit 0: Only selector supported */
1135 PHY_AN_SEL = 0x1f, /* Bit 4..0: Selector Field, 00001=Ethernet*/
1136 PHY_AN_FULL = PHY_AN_100FULL | PHY_AN_10FULL | PHY_AN_CSMA,
1137 PHY_AN_ALL = PHY_AN_10HALF | PHY_AN_10FULL |
1138 PHY_AN_100HALF | PHY_AN_100FULL,
1139};
1140
1141/* Xmac Specific */
1142enum {
1143 PHY_X_AN_NXT_PG = 1<<15, /* Bit 15: Request Next Page */
1258 PHY_X_AN_ACK = 1<<14, /* Bit 14: (ro) Acknowledge Received */ 1144 PHY_X_AN_ACK = 1<<14, /* Bit 14: (ro) Acknowledge Received */
1259 PHY_X_AN_RFB = 3<<12,/* Bit 13..12: Remote Fault Bits */ 1145 PHY_X_AN_RFB = 3<<12,/* Bit 13..12: Remote Fault Bits */
1260 1146
@@ -1263,82 +1149,6 @@ enum {
1263 PHY_X_AN_FD = 1<<5, /* Bit 5: Full Duplex */ 1149 PHY_X_AN_FD = 1<<5, /* Bit 5: Full Duplex */
1264}; 1150};
1265 1151
1266enum {
1267 PHY_B_AN_RF = 1<<13, /* Bit 13: Remote Fault */
1268
1269 PHY_B_AN_ASP = 1<<11, /* Bit 11: Asymmetric Pause */
1270 PHY_B_AN_PC = 1<<10, /* Bit 10: Pause Capable */
1271 PHY_B_AN_SEL = 0x1f, /* Bit 4..0: Selector Field, 00001=Ethernet*/
1272};
1273
1274enum {
1275 PHY_L_AN_RF = 1<<13, /* Bit 13: Remote Fault */
1276 /* Bit 12: reserved */
1277 PHY_L_AN_ASP = 1<<11, /* Bit 11: Asymmetric Pause */
1278 PHY_L_AN_PC = 1<<10, /* Bit 10: Pause Capable */
1279
1280 PHY_L_AN_SEL = 0x1f, /* Bit 4..0: Selector Field, 00001=Ethernet*/
1281};
1282
1283/* PHY_NAT_AUNE_ADV 16 bit r/w Auto-Negotiation Advertisement */
1284/* PHY_NAT_AUNE_LP 16 bit r/o Link Partner Ability Reg *****/
1285/* PHY_AN_NXT_PG (see XMAC) Bit 15: Request Next Page */
1286enum {
1287 PHY_N_AN_RF = 1<<13, /* Bit 13: Remote Fault */
1288
1289 PHY_N_AN_100F = 1<<11, /* Bit 11: 100Base-T2 FD Support */
1290 PHY_N_AN_100H = 1<<10, /* Bit 10: 100Base-T2 HD Support */
1291
1292 PHY_N_AN_SEL = 0x1f, /* Bit 4..0: Selector Field, 00001=Ethernet*/
1293};
1294
1295/* field type definition for PHY_x_AN_SEL */
1296enum {
1297 PHY_SEL_TYPE = 1, /* 00001 = Ethernet */
1298};
1299
1300enum {
1301 PHY_ANE_LP_NP = 1<<3, /* Bit 3: Link Partner can Next Page */
1302 PHY_ANE_LOC_NP = 1<<2, /* Bit 2: Local PHY can Next Page */
1303 PHY_ANE_RX_PG = 1<<1, /* Bit 1: Page Received */
1304};
1305
1306enum {
1307 PHY_ANE_PAR_DF = 1<<4, /* Bit 4: Parallel Detection Fault */
1308
1309 PHY_ANE_LP_CAP = 1<<0, /* Bit 0: Link Partner Auto-Neg. Cap. */
1310};
1311
1312enum {
1313 PHY_NP_MORE = 1<<15, /* Bit 15: More, Next Pages to follow */
1314 PHY_NP_ACK1 = 1<<14, /* Bit 14: (ro) Ack1, for receiving a message */
1315 PHY_NP_MSG_VAL = 1<<13, /* Bit 13: Message Page valid */
1316 PHY_NP_ACK2 = 1<<12, /* Bit 12: Ack2, comply with msg content */
1317 PHY_NP_TOG = 1<<11, /* Bit 11: Toggle Bit, ensure sync */
1318 PHY_NP_MSG = 0x07ff, /* Bit 10..0: Message from/to Link Partner */
1319};
1320
1321enum {
1322 PHY_X_EX_FD = 1<<15, /* Bit 15: Device Supports Full Duplex */
1323 PHY_X_EX_HD = 1<<14, /* Bit 14: Device Supports Half Duplex */
1324};
1325
1326enum {
1327 PHY_X_RS_PAUSE = 3<<7,/* Bit 8..7: selected Pause Mode */
1328 PHY_X_RS_HD = 1<<6, /* Bit 6: Half Duplex Mode selected */
1329 PHY_X_RS_FD = 1<<5, /* Bit 5: Full Duplex Mode selected */
1330 PHY_X_RS_ABLMIS = 1<<4, /* Bit 4: duplex or pause cap mismatch */
1331 PHY_X_RS_PAUMIS = 1<<3, /* Bit 3: pause capability mismatch */
1332};
1333
1334/** Remote Fault Bits (PHY_X_AN_RFB) encoding */
1335enum {
1336 X_RFB_OK = 0<<12,/* Bit 13..12 No errors, Link OK */
1337 X_RFB_LF = 1<<12, /* Bit 13..12 Link Failure */
1338 X_RFB_OFF = 2<<12,/* Bit 13..12 Offline */
1339 X_RFB_AN_ERR = 3<<12,/* Bit 13..12 Auto-Negotiation Error */
1340};
1341
1342/* Pause Bits (PHY_X_AN_PAUSE and PHY_X_RS_PAUSE) encoding */ 1152/* Pause Bits (PHY_X_AN_PAUSE and PHY_X_RS_PAUSE) encoding */
1343enum { 1153enum {
1344 PHY_X_P_NO_PAUSE = 0<<7,/* Bit 8..7: no Pause Mode */ 1154 PHY_X_P_NO_PAUSE = 0<<7,/* Bit 8..7: no Pause Mode */
@@ -1418,6 +1228,16 @@ enum {
1418 PHY_B_PES_MLT3_ER = 1<<0, /* Bit 0: MLT3 code Error */ 1228 PHY_B_PES_MLT3_ER = 1<<0, /* Bit 0: MLT3 code Error */
1419}; 1229};
1420 1230
1231/* PHY_BCOM_AUNE_ADV 16 bit r/w Auto-Negotiation Advertisement *****/
1232/* PHY_BCOM_AUNE_LP 16 bit r/o Link Partner Ability Reg *****/
1233enum {
1234 PHY_B_AN_RF = 1<<13, /* Bit 13: Remote Fault */
1235
1236 PHY_B_AN_ASP = 1<<11, /* Bit 11: Asymmetric Pause */
1237 PHY_B_AN_PC = 1<<10, /* Bit 10: Pause Capable */
1238};
1239
1240
1421/***** PHY_BCOM_FC_CTR 16 bit r/w False Carrier Counter *****/ 1241/***** PHY_BCOM_FC_CTR 16 bit r/w False Carrier Counter *****/
1422enum { 1242enum {
1423 PHY_B_FC_CTR = 0xff, /* Bit 7..0: False Carrier Counter */ 1243 PHY_B_FC_CTR = 0xff, /* Bit 7..0: False Carrier Counter */
@@ -1478,7 +1298,9 @@ enum {
1478 PHY_B_IS_LST_CHANGE = 1<<1, /* Bit 1: Link Status Changed */ 1298 PHY_B_IS_LST_CHANGE = 1<<1, /* Bit 1: Link Status Changed */
1479 PHY_B_IS_CRC_ER = 1<<0, /* Bit 0: CRC Error */ 1299 PHY_B_IS_CRC_ER = 1<<0, /* Bit 0: CRC Error */
1480}; 1300};
1481#define PHY_B_DEF_MSK (~(PHY_B_IS_AN_PR | PHY_B_IS_LST_CHANGE)) 1301#define PHY_B_DEF_MSK \
1302 (~(PHY_B_IS_PSE | PHY_B_IS_AN_PR | PHY_B_IS_DUP_CHANGE | \
1303 PHY_B_IS_LSP_CHANGE | PHY_B_IS_LST_CHANGE))
1482 1304
1483/* Pause Bits (PHY_B_AN_ASP and PHY_B_AN_PC) encoding */ 1305/* Pause Bits (PHY_B_AN_ASP and PHY_B_AN_PC) encoding */
1484enum { 1306enum {
@@ -1495,166 +1317,6 @@ enum {
1495 PHY_B_RES_1000HD = 6<<8,/* Bit 10..8: 1000Base-T Half Dup. */ 1317 PHY_B_RES_1000HD = 6<<8,/* Bit 10..8: 1000Base-T Half Dup. */
1496}; 1318};
1497 1319
1498/*
1499 * Level One-Specific
1500 */
1501/***** PHY_LONE_1000T_CTRL 16 bit r/w 1000Base-T Control Reg *****/
1502enum {
1503 PHY_L_1000C_TEST = 7<<13,/* Bit 15..13: Test Modes */
1504 PHY_L_1000C_MSE = 1<<12, /* Bit 12: Master/Slave Enable */
1505 PHY_L_1000C_MSC = 1<<11, /* Bit 11: M/S Configuration */
1506 PHY_L_1000C_RD = 1<<10, /* Bit 10: Repeater/DTE */
1507 PHY_L_1000C_AFD = 1<<9, /* Bit 9: Advertise Full Duplex */
1508 PHY_L_1000C_AHD = 1<<8, /* Bit 8: Advertise Half Duplex */
1509};
1510
1511/***** PHY_LONE_1000T_STAT 16 bit r/o 1000Base-T Status Reg *****/
1512enum {
1513 PHY_L_1000S_MSF = 1<<15, /* Bit 15: Master/Slave Fault */
1514 PHY_L_1000S_MSR = 1<<14, /* Bit 14: Master/Slave Result */
1515 PHY_L_1000S_LRS = 1<<13, /* Bit 13: Local Receiver Status */
1516 PHY_L_1000S_RRS = 1<<12, /* Bit 12: Remote Receiver Status */
1517 PHY_L_1000S_LP_FD = 1<<11, /* Bit 11: Link Partner can FD */
1518 PHY_L_1000S_LP_HD = 1<<10, /* Bit 10: Link Partner can HD */
1519
1520 PHY_L_1000S_IEC = 0xff, /* Bit 7..0: Idle Error Count */
1521
1522/***** PHY_LONE_EXT_STAT 16 bit r/o Extended Status Register *****/
1523 PHY_L_ES_X_FD_CAP = 1<<15, /* Bit 15: 1000Base-X FD capable */
1524 PHY_L_ES_X_HD_CAP = 1<<14, /* Bit 14: 1000Base-X HD capable */
1525 PHY_L_ES_T_FD_CAP = 1<<13, /* Bit 13: 1000Base-T FD capable */
1526 PHY_L_ES_T_HD_CAP = 1<<12, /* Bit 12: 1000Base-T HD capable */
1527};
1528
1529/***** PHY_LONE_PORT_CFG 16 bit r/w Port Configuration Reg *****/
1530enum {
1531 PHY_L_PC_REP_MODE = 1<<15, /* Bit 15: Repeater Mode */
1532
1533 PHY_L_PC_TX_DIS = 1<<13, /* Bit 13: Tx output Disabled */
1534 PHY_L_PC_BY_SCR = 1<<12, /* Bit 12: Bypass Scrambler */
1535 PHY_L_PC_BY_45 = 1<<11, /* Bit 11: Bypass 4B5B-Decoder */
1536 PHY_L_PC_JAB_DIS = 1<<10, /* Bit 10: Jabber Disabled */
1537 PHY_L_PC_SQE = 1<<9, /* Bit 9: Enable Heartbeat */
1538 PHY_L_PC_TP_LOOP = 1<<8, /* Bit 8: TP Loopback */
1539 PHY_L_PC_SSS = 1<<7, /* Bit 7: Smart Speed Selection */
1540 PHY_L_PC_FIFO_SIZE = 1<<6, /* Bit 6: FIFO Size */
1541 PHY_L_PC_PRE_EN = 1<<5, /* Bit 5: Preamble Enable */
1542 PHY_L_PC_CIM = 1<<4, /* Bit 4: Carrier Integrity Mon */
1543 PHY_L_PC_10_SER = 1<<3, /* Bit 3: Use Serial Output */
1544 PHY_L_PC_ANISOL = 1<<2, /* Bit 2: Unisolate Port */
1545 PHY_L_PC_TEN_BIT = 1<<1, /* Bit 1: 10bit iface mode on */
1546 PHY_L_PC_ALTCLOCK = 1<<0, /* Bit 0: (ro) ALTCLOCK Mode on */
1547};
1548
1549/***** PHY_LONE_Q_STAT 16 bit r/o Quick Status Reg *****/
1550enum {
1551 PHY_L_QS_D_RATE = 3<<14,/* Bit 15..14: Data Rate */
1552 PHY_L_QS_TX_STAT = 1<<13, /* Bit 13: Transmitting */
1553 PHY_L_QS_RX_STAT = 1<<12, /* Bit 12: Receiving */
1554 PHY_L_QS_COL_STAT = 1<<11, /* Bit 11: Collision */
1555 PHY_L_QS_L_STAT = 1<<10, /* Bit 10: Link is up */
1556 PHY_L_QS_DUP_MOD = 1<<9, /* Bit 9: Full/Half Duplex */
1557 PHY_L_QS_AN = 1<<8, /* Bit 8: AutoNeg is On */
1558 PHY_L_QS_AN_C = 1<<7, /* Bit 7: AN is Complete */
1559 PHY_L_QS_LLE = 7<<4,/* Bit 6..4: Line Length Estim. */
1560 PHY_L_QS_PAUSE = 1<<3, /* Bit 3: LP advertised Pause */
1561 PHY_L_QS_AS_PAUSE = 1<<2, /* Bit 2: LP adv. asym. Pause */
1562 PHY_L_QS_ISOLATE = 1<<1, /* Bit 1: CIM Isolated */
1563 PHY_L_QS_EVENT = 1<<0, /* Bit 0: Event has occurred */
1564};
1565
1566/***** PHY_LONE_INT_ENAB 16 bit r/w Interrupt Enable Reg *****/
1567/***** PHY_LONE_INT_STAT 16 bit r/o Interrupt Status Reg *****/
1568enum {
1569 PHY_L_IS_AN_F = 1<<13, /* Bit 13: Auto-Negotiation fault */
1570 PHY_L_IS_CROSS = 1<<11, /* Bit 11: Crossover used */
1571 PHY_L_IS_POL = 1<<10, /* Bit 10: Polarity correct. used */
1572 PHY_L_IS_SS = 1<<9, /* Bit 9: Smart Speed Downgrade */
1573 PHY_L_IS_CFULL = 1<<8, /* Bit 8: Counter Full */
1574 PHY_L_IS_AN_C = 1<<7, /* Bit 7: AutoNeg Complete */
1575 PHY_L_IS_SPEED = 1<<6, /* Bit 6: Speed Changed */
1576 PHY_L_IS_DUP = 1<<5, /* Bit 5: Duplex Changed */
1577 PHY_L_IS_LS = 1<<4, /* Bit 4: Link Status Changed */
1578 PHY_L_IS_ISOL = 1<<3, /* Bit 3: Isolate Occured */
1579 PHY_L_IS_MDINT = 1<<2, /* Bit 2: (ro) STAT: MII Int Pending */
1580 PHY_L_IS_INTEN = 1<<1, /* Bit 1: ENAB: Enable IRQs */
1581 PHY_L_IS_FORCE = 1<<0, /* Bit 0: ENAB: Force Interrupt */
1582};
1583
1584/* int. mask */
1585#define PHY_L_DEF_MSK (PHY_L_IS_LS | PHY_L_IS_ISOL | PHY_L_IS_INTEN)
1586
1587/***** PHY_LONE_LED_CFG 16 bit r/w LED Configuration Reg *****/
1588enum {
1589 PHY_L_LC_LEDC = 3<<14,/* Bit 15..14: Col/Blink/On/Off */
1590 PHY_L_LC_LEDR = 3<<12,/* Bit 13..12: Rx/Blink/On/Off */
1591 PHY_L_LC_LEDT = 3<<10,/* Bit 11..10: Tx/Blink/On/Off */
1592 PHY_L_LC_LEDG = 3<<8,/* Bit 9..8: Giga/Blink/On/Off */
1593 PHY_L_LC_LEDS = 3<<6,/* Bit 7..6: 10-100/Blink/On/Off */
1594 PHY_L_LC_LEDL = 3<<4,/* Bit 5..4: Link/Blink/On/Off */
1595 PHY_L_LC_LEDF = 3<<2,/* Bit 3..2: Duplex/Blink/On/Off */
1596 PHY_L_LC_PSTRECH= 1<<1, /* Bit 1: Strech LED Pulses */
1597 PHY_L_LC_FREQ = 1<<0, /* Bit 0: 30/100 ms */
1598};
1599
1600/***** PHY_LONE_PORT_CTRL 16 bit r/w Port Control Reg *****/
1601enum {
1602 PHY_L_PC_TX_TCLK = 1<<15, /* Bit 15: Enable TX_TCLK */
1603 PHY_L_PC_ALT_NP = 1<<13, /* Bit 14: Alternate Next Page */
1604 PHY_L_PC_GMII_ALT= 1<<12, /* Bit 13: Alternate GMII driver */
1605 PHY_L_PC_TEN_CRS = 1<<10, /* Bit 10: Extend CRS*/
1606};
1607
1608/***** PHY_LONE_CIM 16 bit r/o CIM Reg *****/
1609enum {
1610 PHY_L_CIM_ISOL = 0xff<<8,/* Bit 15..8: Isolate Count */
1611 PHY_L_CIM_FALSE_CAR = 0xff, /* Bit 7..0: False Carrier Count */
1612};
1613
1614/*
1615 * Pause Bits (PHY_L_AN_ASP and PHY_L_AN_PC) encoding
1616 */
1617enum {
1618 PHY_L_P_NO_PAUSE= 0<<10,/* Bit 11..10: no Pause Mode */
1619 PHY_L_P_SYM_MD = 1<<10, /* Bit 11..10: symmetric Pause Mode */
1620 PHY_L_P_ASYM_MD = 2<<10,/* Bit 11..10: asymmetric Pause Mode */
1621 PHY_L_P_BOTH_MD = 3<<10,/* Bit 11..10: both Pause Mode */
1622};
1623
1624/*
1625 * National-Specific
1626 */
1627/***** PHY_NAT_1000T_CTRL 16 bit r/w 1000Base-T Control Reg *****/
1628enum {
1629 PHY_N_1000C_TEST= 7<<13,/* Bit 15..13: Test Modes */
1630 PHY_N_1000C_MSE = 1<<12, /* Bit 12: Master/Slave Enable */
1631 PHY_N_1000C_MSC = 1<<11, /* Bit 11: M/S Configuration */
1632 PHY_N_1000C_RD = 1<<10, /* Bit 10: Repeater/DTE */
1633 PHY_N_1000C_AFD = 1<<9, /* Bit 9: Advertise Full Duplex */
1634 PHY_N_1000C_AHD = 1<<8, /* Bit 8: Advertise Half Duplex */
1635 PHY_N_1000C_APC = 1<<7, /* Bit 7: Asymmetric Pause Cap. */};
1636
1637
1638/***** PHY_NAT_1000T_STAT 16 bit r/o 1000Base-T Status Reg *****/
1639enum {
1640 PHY_N_1000S_MSF = 1<<15, /* Bit 15: Master/Slave Fault */
1641 PHY_N_1000S_MSR = 1<<14, /* Bit 14: Master/Slave Result */
1642 PHY_N_1000S_LRS = 1<<13, /* Bit 13: Local Receiver Status */
1643 PHY_N_1000S_RRS = 1<<12, /* Bit 12: Remote Receiver Status*/
1644 PHY_N_1000S_LP_FD= 1<<11, /* Bit 11: Link Partner can FD */
1645 PHY_N_1000S_LP_HD= 1<<10, /* Bit 10: Link Partner can HD */
1646 PHY_N_1000C_LP_APC= 1<<9, /* Bit 9: LP Asym. Pause Cap. */
1647 PHY_N_1000S_IEC = 0xff, /* Bit 7..0: Idle Error Count */
1648};
1649
1650/***** PHY_NAT_EXT_STAT 16 bit r/o Extended Status Register *****/
1651enum {
1652 PHY_N_ES_X_FD_CAP= 1<<15, /* Bit 15: 1000Base-X FD capable */
1653 PHY_N_ES_X_HD_CAP= 1<<14, /* Bit 14: 1000Base-X HD capable */
1654 PHY_N_ES_T_FD_CAP= 1<<13, /* Bit 13: 1000Base-T FD capable */
1655 PHY_N_ES_T_HD_CAP= 1<<12, /* Bit 12: 1000Base-T HD capable */
1656};
1657
1658/** Marvell-Specific */ 1320/** Marvell-Specific */
1659enum { 1321enum {
1660 PHY_M_AN_NXT_PG = 1<<15, /* Request Next Page */ 1322 PHY_M_AN_NXT_PG = 1<<15, /* Request Next Page */
@@ -1718,7 +1380,7 @@ enum {
1718 PHY_M_PC_EN_DET_PLUS = 3<<8, /* Energy Detect Plus (Mode 2) */ 1380 PHY_M_PC_EN_DET_PLUS = 3<<8, /* Energy Detect Plus (Mode 2) */
1719}; 1381};
1720 1382
1721#define PHY_M_PC_MDI_XMODE(x) (((x)<<5) & PHY_M_PC_MDIX_MSK) 1383#define PHY_M_PC_MDI_XMODE(x) (((x)<<5) & PHY_M_PC_MDIX_MSK)
1722 1384
1723enum { 1385enum {
1724 PHY_M_PC_MAN_MDI = 0, /* 00 = Manual MDI configuration */ 1386 PHY_M_PC_MAN_MDI = 0, /* 00 = Manual MDI configuration */
@@ -2105,7 +1767,7 @@ enum {
2105 GM_GPSR_FC_RX_DIS = 1<<2, /* Bit 2: Rx Flow-Control Mode Disabled */ 1767 GM_GPSR_FC_RX_DIS = 1<<2, /* Bit 2: Rx Flow-Control Mode Disabled */
2106 GM_GPSR_PROM_EN = 1<<1, /* Bit 1: Promiscuous Mode Enabled */ 1768 GM_GPSR_PROM_EN = 1<<1, /* Bit 1: Promiscuous Mode Enabled */
2107}; 1769};
2108 1770
2109/* GM_GP_CTRL 16 bit r/w General Purpose Control Register */ 1771/* GM_GP_CTRL 16 bit r/w General Purpose Control Register */
2110enum { 1772enum {
2111 GM_GPCR_PROM_ENA = 1<<14, /* Bit 14: Enable Promiscuous Mode */ 1773 GM_GPCR_PROM_ENA = 1<<14, /* Bit 14: Enable Promiscuous Mode */
@@ -2127,7 +1789,7 @@ enum {
2127 1789
2128#define GM_GPCR_SPEED_1000 (GM_GPCR_GIGS_ENA | GM_GPCR_SPEED_100) 1790#define GM_GPCR_SPEED_1000 (GM_GPCR_GIGS_ENA | GM_GPCR_SPEED_100)
2129#define GM_GPCR_AU_ALL_DIS (GM_GPCR_AU_DUP_DIS | GM_GPCR_AU_FCT_DIS|GM_GPCR_AU_SPD_DIS) 1791#define GM_GPCR_AU_ALL_DIS (GM_GPCR_AU_DUP_DIS | GM_GPCR_AU_FCT_DIS|GM_GPCR_AU_SPD_DIS)
2130 1792
2131/* GM_TX_CTRL 16 bit r/w Transmit Control Register */ 1793/* GM_TX_CTRL 16 bit r/w Transmit Control Register */
2132enum { 1794enum {
2133 GM_TXCR_FORCE_JAM = 1<<15, /* Bit 15: Force Jam / Flow-Control */ 1795 GM_TXCR_FORCE_JAM = 1<<15, /* Bit 15: Force Jam / Flow-Control */
@@ -2138,7 +1800,7 @@ enum {
2138 1800
2139#define TX_COL_THR(x) (((x)<<10) & GM_TXCR_COL_THR_MSK) 1801#define TX_COL_THR(x) (((x)<<10) & GM_TXCR_COL_THR_MSK)
2140#define TX_COL_DEF 0x04 1802#define TX_COL_DEF 0x04
2141 1803
2142/* GM_RX_CTRL 16 bit r/w Receive Control Register */ 1804/* GM_RX_CTRL 16 bit r/w Receive Control Register */
2143enum { 1805enum {
2144 GM_RXCR_UCF_ENA = 1<<15, /* Bit 15: Enable Unicast filtering */ 1806 GM_RXCR_UCF_ENA = 1<<15, /* Bit 15: Enable Unicast filtering */
@@ -2146,7 +1808,7 @@ enum {
2146 GM_RXCR_CRC_DIS = 1<<13, /* Bit 13: Remove 4-byte CRC */ 1808 GM_RXCR_CRC_DIS = 1<<13, /* Bit 13: Remove 4-byte CRC */
2147 GM_RXCR_PASS_FC = 1<<12, /* Bit 12: Pass FC packets to FIFO */ 1809 GM_RXCR_PASS_FC = 1<<12, /* Bit 12: Pass FC packets to FIFO */
2148}; 1810};
2149 1811
2150/* GM_TX_PARAM 16 bit r/w Transmit Parameter Register */ 1812/* GM_TX_PARAM 16 bit r/w Transmit Parameter Register */
2151enum { 1813enum {
2152 GM_TXPA_JAMLEN_MSK = 0x03<<14, /* Bit 15..14: Jam Length */ 1814 GM_TXPA_JAMLEN_MSK = 0x03<<14, /* Bit 15..14: Jam Length */
@@ -2171,7 +1833,7 @@ enum {
2171 GM_SMOD_JUMBO_ENA = 1<<8, /* Bit 8: Enable Jumbo (Max. Frame Len) */ 1833 GM_SMOD_JUMBO_ENA = 1<<8, /* Bit 8: Enable Jumbo (Max. Frame Len) */
2172 GM_SMOD_IPG_MSK = 0x1f /* Bit 4..0: Inter-Packet Gap (IPG) */ 1834 GM_SMOD_IPG_MSK = 0x1f /* Bit 4..0: Inter-Packet Gap (IPG) */
2173}; 1835};
2174 1836
2175#define DATA_BLIND_VAL(x) (((x)<<11) & GM_SMOD_DATABL_MSK) 1837#define DATA_BLIND_VAL(x) (((x)<<11) & GM_SMOD_DATABL_MSK)
2176#define DATA_BLIND_DEF 0x04 1838#define DATA_BLIND_DEF 0x04
2177 1839
@@ -2186,7 +1848,7 @@ enum {
2186 GM_SMI_CT_RD_VAL = 1<<4, /* Bit 4: Read Valid (Read completed) */ 1848 GM_SMI_CT_RD_VAL = 1<<4, /* Bit 4: Read Valid (Read completed) */
2187 GM_SMI_CT_BUSY = 1<<3, /* Bit 3: Busy (Operation in progress) */ 1849 GM_SMI_CT_BUSY = 1<<3, /* Bit 3: Busy (Operation in progress) */
2188}; 1850};
2189 1851
2190#define GM_SMI_CT_PHY_AD(x) (((x)<<11) & GM_SMI_CT_PHY_A_MSK) 1852#define GM_SMI_CT_PHY_AD(x) (((x)<<11) & GM_SMI_CT_PHY_A_MSK)
2191#define GM_SMI_CT_REG_AD(x) (((x)<<6) & GM_SMI_CT_REG_A_MSK) 1853#define GM_SMI_CT_REG_AD(x) (((x)<<6) & GM_SMI_CT_REG_A_MSK)
2192 1854
@@ -2195,7 +1857,7 @@ enum {
2195 GM_PAR_MIB_CLR = 1<<5, /* Bit 5: Set MIB Clear Counter Mode */ 1857 GM_PAR_MIB_CLR = 1<<5, /* Bit 5: Set MIB Clear Counter Mode */
2196 GM_PAR_MIB_TST = 1<<4, /* Bit 4: MIB Load Counter (Test Mode) */ 1858 GM_PAR_MIB_TST = 1<<4, /* Bit 4: MIB Load Counter (Test Mode) */
2197}; 1859};
2198 1860
2199/* Receive Frame Status Encoding */ 1861/* Receive Frame Status Encoding */
2200enum { 1862enum {
2201 GMR_FS_LEN = 0xffff<<16, /* Bit 31..16: Rx Frame Length */ 1863 GMR_FS_LEN = 0xffff<<16, /* Bit 31..16: Rx Frame Length */
@@ -2217,12 +1879,12 @@ enum {
2217/* 1879/*
2218 * GMR_FS_ANY_ERR (analogous to XMR_FS_ANY_ERR) 1880 * GMR_FS_ANY_ERR (analogous to XMR_FS_ANY_ERR)
2219 */ 1881 */
2220 GMR_FS_ANY_ERR = GMR_FS_CRC_ERR | GMR_FS_LONG_ERR | 1882 GMR_FS_ANY_ERR = GMR_FS_CRC_ERR | GMR_FS_LONG_ERR |
2221 GMR_FS_MII_ERR | GMR_FS_BAD_FC | GMR_FS_GOOD_FC | 1883 GMR_FS_MII_ERR | GMR_FS_BAD_FC | GMR_FS_GOOD_FC |
2222 GMR_FS_JABBER, 1884 GMR_FS_JABBER,
2223/* Rx GMAC FIFO Flush Mask (default) */ 1885/* Rx GMAC FIFO Flush Mask (default) */
2224 RX_FF_FL_DEF_MSK = GMR_FS_CRC_ERR | GMR_FS_RX_FF_OV |GMR_FS_MII_ERR | 1886 RX_FF_FL_DEF_MSK = GMR_FS_CRC_ERR | GMR_FS_RX_FF_OV |GMR_FS_MII_ERR |
2225 GMR_FS_BAD_FC | GMR_FS_GOOD_FC | GMR_FS_UN_SIZE | 1887 GMR_FS_BAD_FC | GMR_FS_GOOD_FC | GMR_FS_UN_SIZE |
2226 GMR_FS_JABBER, 1888 GMR_FS_JABBER,
2227}; 1889};
2228 1890
@@ -2540,10 +2202,6 @@ enum {
2540}; 2202};
2541 2203
2542 2204
2543/* XM_PHY_ADDR 16 bit r/w PHY Address Register */
2544#define XM_PHY_ADDR_SZ 0x1f /* Bit 4..0: PHY Address bits */
2545
2546
2547/* XM_GP_PORT 32 bit r/w General Purpose Port Register */ 2205/* XM_GP_PORT 32 bit r/w General Purpose Port Register */
2548enum { 2206enum {
2549 XM_GP_ANIP = 1<<6, /* Bit 6: (ro) Auto-Neg. in progress */ 2207 XM_GP_ANIP = 1<<6, /* Bit 6: (ro) Auto-Neg. in progress */
@@ -2662,8 +2320,8 @@ enum {
2662}; 2320};
2663 2321
2664#define XM_PAUSE_MODE (XM_MD_SPOE_E | XM_MD_SPOL_I | XM_MD_SPOH_I) 2322#define XM_PAUSE_MODE (XM_MD_SPOE_E | XM_MD_SPOL_I | XM_MD_SPOH_I)
2665#define XM_DEF_MODE (XM_MD_RX_RUNT | XM_MD_RX_IRLE | XM_MD_RX_LONG |\ 2323#define XM_DEF_MODE (XM_MD_RX_RUNT | XM_MD_RX_IRLE | XM_MD_RX_LONG |\
2666 XM_MD_RX_CRCE | XM_MD_RX_ERR | XM_MD_CSA | XM_MD_CAA) 2324 XM_MD_RX_CRCE | XM_MD_RX_ERR | XM_MD_CSA)
2667 2325
2668/* XM_STAT_CMD 16 bit r/w Statistics Command Register */ 2326/* XM_STAT_CMD 16 bit r/w Statistics Command Register */
2669enum { 2327enum {
@@ -2793,28 +2451,20 @@ struct skge_hw {
2793 u32 intr_mask; 2451 u32 intr_mask;
2794 struct net_device *dev[2]; 2452 struct net_device *dev[2];
2795 2453
2796 u8 mac_cfg;
2797 u8 chip_id; 2454 u8 chip_id;
2455 u8 chip_rev;
2798 u8 phy_type; 2456 u8 phy_type;
2799 u8 pmd_type; 2457 u8 pmd_type;
2800 u16 phy_addr; 2458 u16 phy_addr;
2459 u8 ports;
2801 2460
2802 u32 ram_size; 2461 u32 ram_size;
2803 u32 ram_offset; 2462 u32 ram_offset;
2804 2463
2805 struct tasklet_struct ext_tasklet; 2464 struct tasklet_struct ext_tasklet;
2806 spinlock_t phy_lock; 2465 spinlock_t phy_lock;
2807}; 2466};
2808 2467
2809static inline int isdualport(const struct skge_hw *hw)
2810{
2811 return !(hw->mac_cfg & CFG_SNG_MAC);
2812}
2813
2814static inline u8 chip_rev(const struct skge_hw *hw)
2815{
2816 return (hw->mac_cfg & CFG_CHIP_R_MSK) >> 4;
2817}
2818 2468
2819static inline int iscopper(const struct skge_hw *hw) 2469static inline int iscopper(const struct skge_hw *hw)
2820{ 2470{
@@ -2827,7 +2477,7 @@ enum {
2827 FLOW_MODE_REM_SEND = 2, /* Symmetric or just remote */ 2477 FLOW_MODE_REM_SEND = 2, /* Symmetric or just remote */
2828 FLOW_MODE_SYMMETRIC = 3, /* Both stations may send PAUSE */ 2478 FLOW_MODE_SYMMETRIC = 3, /* Both stations may send PAUSE */
2829}; 2479};
2830 2480
2831struct skge_port { 2481struct skge_port {
2832 u32 msg_enable; 2482 u32 msg_enable;
2833 struct skge_hw *hw; 2483 struct skge_hw *hw;
@@ -2853,8 +2503,8 @@ struct skge_port {
2853 void *mem; /* PCI memory for rings */ 2503 void *mem; /* PCI memory for rings */
2854 dma_addr_t dma; 2504 dma_addr_t dma;
2855 unsigned long mem_size; 2505 unsigned long mem_size;
2506 unsigned int rx_buf_size;
2856 2507
2857 struct timer_list link_check;
2858 struct timer_list led_blink; 2508 struct timer_list led_blink;
2859}; 2509};
2860 2510
@@ -2863,7 +2513,6 @@ struct skge_port {
2863static inline u32 skge_read32(const struct skge_hw *hw, int reg) 2513static inline u32 skge_read32(const struct skge_hw *hw, int reg)
2864{ 2514{
2865 return readl(hw->regs + reg); 2515 return readl(hw->regs + reg);
2866
2867} 2516}
2868 2517
2869static inline u16 skge_read16(const struct skge_hw *hw, int reg) 2518static inline u16 skge_read16(const struct skge_hw *hw, int reg)
@@ -2892,114 +2541,87 @@ static inline void skge_write8(const struct skge_hw *hw, int reg, u8 val)
2892} 2541}
2893 2542
2894/* MAC Related Registers inside the device. */ 2543/* MAC Related Registers inside the device. */
2895#define SKGEMAC_REG(port,reg) (((port)<<7)+(reg)) 2544#define SK_REG(port,reg) (((port)<<7)+(reg))
2896 2545#define SK_XMAC_REG(port, reg) \
2897/* PCI config space can be accessed via memory mapped space */
2898#define SKGEPCI_REG(reg) ((reg)+ 0x380)
2899
2900#define SKGEXM_REG(port, reg) \
2901 ((BASE_XMAC_1 + (port) * (BASE_XMAC_2 - BASE_XMAC_1)) | (reg) << 1) 2546 ((BASE_XMAC_1 + (port) * (BASE_XMAC_2 - BASE_XMAC_1)) | (reg) << 1)
2902 2547
2903static inline u32 skge_xm_read32(const struct skge_hw *hw, int port, int reg) 2548static inline u32 xm_read32(const struct skge_hw *hw, int port, int reg)
2904{
2905 return skge_read32(hw, SKGEXM_REG(port,reg));
2906}
2907
2908static inline u16 skge_xm_read16(const struct skge_hw *hw, int port, int reg)
2909{ 2549{
2910 return skge_read16(hw, SKGEXM_REG(port,reg)); 2550 u32 v;
2551 v = skge_read16(hw, SK_XMAC_REG(port, reg));
2552 v |= (u32)skge_read16(hw, SK_XMAC_REG(port, reg+2)) << 16;
2553 return v;
2911} 2554}
2912 2555
2913static inline u8 skge_xm_read8(const struct skge_hw *hw, int port, int reg) 2556static inline u16 xm_read16(const struct skge_hw *hw, int port, int reg)
2914{ 2557{
2915 return skge_read8(hw, SKGEXM_REG(port,reg)); 2558 return skge_read16(hw, SK_XMAC_REG(port,reg));
2916} 2559}
2917 2560
2918static inline void skge_xm_write32(const struct skge_hw *hw, int port, int r, u32 v) 2561static inline void xm_write32(const struct skge_hw *hw, int port, int r, u32 v)
2919{ 2562{
2920 skge_write32(hw, SKGEXM_REG(port,r), v); 2563 skge_write16(hw, SK_XMAC_REG(port,r), v & 0xffff);
2564 skge_write16(hw, SK_XMAC_REG(port,r+2), v >> 16);
2921} 2565}
2922 2566
2923static inline void skge_xm_write16(const struct skge_hw *hw, int port, int r, u16 v) 2567static inline void xm_write16(const struct skge_hw *hw, int port, int r, u16 v)
2924{ 2568{
2925 skge_write16(hw, SKGEXM_REG(port,r), v); 2569 skge_write16(hw, SK_XMAC_REG(port,r), v);
2926} 2570}
2927 2571
2928static inline void skge_xm_write8(const struct skge_hw *hw, int port, int r, u8 v) 2572static inline void xm_outhash(const struct skge_hw *hw, int port, int reg,
2929{
2930 skge_write8(hw, SKGEXM_REG(port,r), v);
2931}
2932
2933static inline void skge_xm_outhash(const struct skge_hw *hw, int port, int reg,
2934 const u8 *hash) 2573 const u8 *hash)
2935{ 2574{
2936 skge_xm_write16(hw, port, reg, 2575 xm_write16(hw, port, reg, (u16)hash[0] | ((u16)hash[1] << 8));
2937 (u16)hash[0] | ((u16)hash[1] << 8)); 2576 xm_write16(hw, port, reg+2, (u16)hash[2] | ((u16)hash[3] << 8));
2938 skge_xm_write16(hw, port, reg+2, 2577 xm_write16(hw, port, reg+4, (u16)hash[4] | ((u16)hash[5] << 8));
2939 (u16)hash[2] | ((u16)hash[3] << 8)); 2578 xm_write16(hw, port, reg+6, (u16)hash[6] | ((u16)hash[7] << 8));
2940 skge_xm_write16(hw, port, reg+4,
2941 (u16)hash[4] | ((u16)hash[5] << 8));
2942 skge_xm_write16(hw, port, reg+6,
2943 (u16)hash[6] | ((u16)hash[7] << 8));
2944} 2579}
2945 2580
2946static inline void skge_xm_outaddr(const struct skge_hw *hw, int port, int reg, 2581static inline void xm_outaddr(const struct skge_hw *hw, int port, int reg,
2947 const u8 *addr) 2582 const u8 *addr)
2948{ 2583{
2949 skge_xm_write16(hw, port, reg, 2584 xm_write16(hw, port, reg, (u16)addr[0] | ((u16)addr[1] << 8));
2950 (u16)addr[0] | ((u16)addr[1] << 8)); 2585 xm_write16(hw, port, reg+2, (u16)addr[2] | ((u16)addr[3] << 8));
2951 skge_xm_write16(hw, port, reg, 2586 xm_write16(hw, port, reg+4, (u16)addr[4] | ((u16)addr[5] << 8));
2952 (u16)addr[2] | ((u16)addr[3] << 8));
2953 skge_xm_write16(hw, port, reg,
2954 (u16)addr[4] | ((u16)addr[5] << 8));
2955} 2587}
2956 2588
2589#define SK_GMAC_REG(port,reg) \
2590 (BASE_GMAC_1 + (port) * (BASE_GMAC_2-BASE_GMAC_1) + (reg))
2957 2591
2958#define SKGEGMA_REG(port,reg) \ 2592static inline u16 gma_read16(const struct skge_hw *hw, int port, int reg)
2959 ((reg) + BASE_GMAC_1 + \
2960 (port) * (BASE_GMAC_2-BASE_GMAC_1))
2961
2962static inline u16 skge_gma_read16(const struct skge_hw *hw, int port, int reg)
2963{ 2593{
2964 return skge_read16(hw, SKGEGMA_REG(port,reg)); 2594 return skge_read16(hw, SK_GMAC_REG(port,reg));
2965} 2595}
2966 2596
2967static inline u32 skge_gma_read32(const struct skge_hw *hw, int port, int reg) 2597static inline u32 gma_read32(const struct skge_hw *hw, int port, int reg)
2968{ 2598{
2969 return (u32) skge_read16(hw, SKGEGMA_REG(port,reg)) 2599 return (u32) skge_read16(hw, SK_GMAC_REG(port,reg))
2970 | ((u32)skge_read16(hw, SKGEGMA_REG(port,reg+4)) << 16); 2600 | ((u32)skge_read16(hw, SK_GMAC_REG(port,reg+4)) << 16);
2971} 2601}
2972 2602
2973static inline u8 skge_gma_read8(const struct skge_hw *hw, int port, int reg) 2603static inline void gma_write16(const struct skge_hw *hw, int port, int r, u16 v)
2974{ 2604{
2975 return skge_read8(hw, SKGEGMA_REG(port,reg)); 2605 skge_write16(hw, SK_GMAC_REG(port,r), v);
2976} 2606}
2977 2607
2978static inline void skge_gma_write16(const struct skge_hw *hw, int port, int r, u16 v) 2608static inline void gma_write32(const struct skge_hw *hw, int port, int r, u32 v)
2979{ 2609{
2980 skge_write16(hw, SKGEGMA_REG(port,r), v); 2610 skge_write16(hw, SK_GMAC_REG(port, r), (u16) v);
2611 skge_write32(hw, SK_GMAC_REG(port, r+4), (u16)(v >> 16));
2981} 2612}
2982 2613
2983static inline void skge_gma_write32(const struct skge_hw *hw, int port, int r, u32 v) 2614static inline void gma_write8(const struct skge_hw *hw, int port, int r, u8 v)
2984{ 2615{
2985 skge_write16(hw, SKGEGMA_REG(port, r), (u16) v); 2616 skge_write8(hw, SK_GMAC_REG(port,r), v);
2986 skge_write32(hw, SKGEGMA_REG(port, r+4), (u16)(v >> 16));
2987} 2617}
2988 2618
2989static inline void skge_gma_write8(const struct skge_hw *hw, int port, int r, u8 v) 2619static inline void gma_set_addr(struct skge_hw *hw, int port, int reg,
2990{
2991 skge_write8(hw, SKGEGMA_REG(port,r), v);
2992}
2993
2994static inline void skge_gm_set_addr(struct skge_hw *hw, int port, int reg,
2995 const u8 *addr) 2620 const u8 *addr)
2996{ 2621{
2997 skge_gma_write16(hw, port, reg, 2622 gma_write16(hw, port, reg, (u16) addr[0] | ((u16) addr[1] << 8));
2998 (u16) addr[0] | ((u16) addr[1] << 8)); 2623 gma_write16(hw, port, reg+4,(u16) addr[2] | ((u16) addr[3] << 8));
2999 skge_gma_write16(hw, port, reg+4, 2624 gma_write16(hw, port, reg+8,(u16) addr[4] | ((u16) addr[5] << 8));
3000 (u16) addr[2] | ((u16) addr[3] << 8));
3001 skge_gma_write16(hw, port, reg+8,
3002 (u16) addr[4] | ((u16) addr[5] << 8));
3003} 2625}
3004 2626
3005#endif 2627#endif
diff --git a/drivers/net/slip.c b/drivers/net/slip.c
index 16363b5c6f56..404ea4297e32 100644
--- a/drivers/net/slip.c
+++ b/drivers/net/slip.c
@@ -74,6 +74,7 @@
74#include <linux/rtnetlink.h> 74#include <linux/rtnetlink.h>
75#include <linux/if_arp.h> 75#include <linux/if_arp.h>
76#include <linux/if_slip.h> 76#include <linux/if_slip.h>
77#include <linux/delay.h>
77#include <linux/init.h> 78#include <linux/init.h>
78#include "slip.h" 79#include "slip.h"
79#ifdef CONFIG_INET 80#ifdef CONFIG_INET
diff --git a/drivers/net/smc91x.c b/drivers/net/smc91x.c
index cfb9d3cdb04a..1438fdd20826 100644
--- a/drivers/net/smc91x.c
+++ b/drivers/net/smc91x.c
@@ -1998,7 +1998,7 @@ static int __init smc_probe(struct net_device *dev, void __iomem *ioaddr)
1998 if (retval) 1998 if (retval)
1999 goto err_out; 1999 goto err_out;
2000 2000
2001 set_irq_type(dev->irq, IRQT_RISING); 2001 set_irq_type(dev->irq, SMC_IRQ_TRIGGER_TYPE);
2002 2002
2003#ifdef SMC_USE_PXA_DMA 2003#ifdef SMC_USE_PXA_DMA
2004 { 2004 {
diff --git a/drivers/net/smc91x.h b/drivers/net/smc91x.h
index 946528e6b742..7089d86e857a 100644
--- a/drivers/net/smc91x.h
+++ b/drivers/net/smc91x.h
@@ -182,6 +182,16 @@ SMC_outw(u16 val, void __iomem *ioaddr, int reg)
182#define SMC_insl(a, r, p, l) readsl((a) + (r), p, l) 182#define SMC_insl(a, r, p, l) readsl((a) + (r), p, l)
183#define SMC_outsl(a, r, p, l) writesl((a) + (r), p, l) 183#define SMC_outsl(a, r, p, l) writesl((a) + (r), p, l)
184 184
185#include <asm/mach-types.h>
186#include <asm/arch/cpu.h>
187
188#define SMC_IRQ_TRIGGER_TYPE (( \
189 machine_is_omap_h2() \
190 || machine_is_omap_h3() \
191 || (machine_is_omap_innovator() && !cpu_is_omap150()) \
192 ) ? IRQT_FALLING : IRQT_RISING)
193
194
185#elif defined(CONFIG_SH_SH4202_MICRODEV) 195#elif defined(CONFIG_SH_SH4202_MICRODEV)
186 196
187#define SMC_CAN_USE_8BIT 0 197#define SMC_CAN_USE_8BIT 0
@@ -300,6 +310,9 @@ static inline void SMC_outsw (unsigned long a, int r, unsigned char* p, int l)
300 310
301#endif 311#endif
302 312
313#ifndef SMC_IRQ_TRIGGER_TYPE
314#define SMC_IRQ_TRIGGER_TYPE IRQT_RISING
315#endif
303 316
304#ifdef SMC_USE_PXA_DMA 317#ifdef SMC_USE_PXA_DMA
305/* 318/*
diff --git a/drivers/net/starfire.c b/drivers/net/starfire.c
index 12e2b6826fa3..88b89dc95c77 100644
--- a/drivers/net/starfire.c
+++ b/drivers/net/starfire.c
@@ -1286,7 +1286,7 @@ static void init_ring(struct net_device *dev)
1286 np->rx_info[i].skb = skb; 1286 np->rx_info[i].skb = skb;
1287 if (skb == NULL) 1287 if (skb == NULL)
1288 break; 1288 break;
1289 np->rx_info[i].mapping = pci_map_single(np->pci_dev, skb->tail, np->rx_buf_sz, PCI_DMA_FROMDEVICE); 1289 np->rx_info[i].mapping = pci_map_single(np->pci_dev, skb->data, np->rx_buf_sz, PCI_DMA_FROMDEVICE);
1290 skb->dev = dev; /* Mark as being used by this device. */ 1290 skb->dev = dev; /* Mark as being used by this device. */
1291 /* Grrr, we cannot offset to correctly align the IP header. */ 1291 /* Grrr, we cannot offset to correctly align the IP header. */
1292 np->rx_ring[i].rxaddr = cpu_to_dma(np->rx_info[i].mapping | RxDescValid); 1292 np->rx_ring[i].rxaddr = cpu_to_dma(np->rx_info[i].mapping | RxDescValid);
@@ -1572,7 +1572,7 @@ static int __netdev_rx(struct net_device *dev, int *quota)
1572 pci_dma_sync_single_for_cpu(np->pci_dev, 1572 pci_dma_sync_single_for_cpu(np->pci_dev,
1573 np->rx_info[entry].mapping, 1573 np->rx_info[entry].mapping,
1574 pkt_len, PCI_DMA_FROMDEVICE); 1574 pkt_len, PCI_DMA_FROMDEVICE);
1575 eth_copy_and_sum(skb, np->rx_info[entry].skb->tail, pkt_len, 0); 1575 eth_copy_and_sum(skb, np->rx_info[entry].skb->data, pkt_len, 0);
1576 pci_dma_sync_single_for_device(np->pci_dev, 1576 pci_dma_sync_single_for_device(np->pci_dev,
1577 np->rx_info[entry].mapping, 1577 np->rx_info[entry].mapping,
1578 pkt_len, PCI_DMA_FROMDEVICE); 1578 pkt_len, PCI_DMA_FROMDEVICE);
@@ -1696,7 +1696,7 @@ static void refill_rx_ring(struct net_device *dev)
1696 if (skb == NULL) 1696 if (skb == NULL)
1697 break; /* Better luck next round. */ 1697 break; /* Better luck next round. */
1698 np->rx_info[entry].mapping = 1698 np->rx_info[entry].mapping =
1699 pci_map_single(np->pci_dev, skb->tail, np->rx_buf_sz, PCI_DMA_FROMDEVICE); 1699 pci_map_single(np->pci_dev, skb->data, np->rx_buf_sz, PCI_DMA_FROMDEVICE);
1700 skb->dev = dev; /* Mark as being used by this device. */ 1700 skb->dev = dev; /* Mark as being used by this device. */
1701 np->rx_ring[entry].rxaddr = 1701 np->rx_ring[entry].rxaddr =
1702 cpu_to_dma(np->rx_info[entry].mapping | RxDescValid); 1702 cpu_to_dma(np->rx_info[entry].mapping | RxDescValid);
diff --git a/drivers/net/sundance.c b/drivers/net/sundance.c
index 08cb7177a175..d500a5771dbc 100644
--- a/drivers/net/sundance.c
+++ b/drivers/net/sundance.c
@@ -1028,7 +1028,7 @@ static void init_ring(struct net_device *dev)
1028 skb->dev = dev; /* Mark as being used by this device. */ 1028 skb->dev = dev; /* Mark as being used by this device. */
1029 skb_reserve(skb, 2); /* 16 byte align the IP header. */ 1029 skb_reserve(skb, 2); /* 16 byte align the IP header. */
1030 np->rx_ring[i].frag[0].addr = cpu_to_le32( 1030 np->rx_ring[i].frag[0].addr = cpu_to_le32(
1031 pci_map_single(np->pci_dev, skb->tail, np->rx_buf_sz, 1031 pci_map_single(np->pci_dev, skb->data, np->rx_buf_sz,
1032 PCI_DMA_FROMDEVICE)); 1032 PCI_DMA_FROMDEVICE));
1033 np->rx_ring[i].frag[0].length = cpu_to_le32(np->rx_buf_sz | LastFrag); 1033 np->rx_ring[i].frag[0].length = cpu_to_le32(np->rx_buf_sz | LastFrag);
1034 } 1034 }
@@ -1341,7 +1341,7 @@ static void rx_poll(unsigned long data)
1341 np->rx_buf_sz, 1341 np->rx_buf_sz,
1342 PCI_DMA_FROMDEVICE); 1342 PCI_DMA_FROMDEVICE);
1343 1343
1344 eth_copy_and_sum(skb, np->rx_skbuff[entry]->tail, pkt_len, 0); 1344 eth_copy_and_sum(skb, np->rx_skbuff[entry]->data, pkt_len, 0);
1345 pci_dma_sync_single_for_device(np->pci_dev, 1345 pci_dma_sync_single_for_device(np->pci_dev,
1346 desc->frag[0].addr, 1346 desc->frag[0].addr,
1347 np->rx_buf_sz, 1347 np->rx_buf_sz,
@@ -1400,7 +1400,7 @@ static void refill_rx (struct net_device *dev)
1400 skb->dev = dev; /* Mark as being used by this device. */ 1400 skb->dev = dev; /* Mark as being used by this device. */
1401 skb_reserve(skb, 2); /* Align IP on 16 byte boundaries */ 1401 skb_reserve(skb, 2); /* Align IP on 16 byte boundaries */
1402 np->rx_ring[entry].frag[0].addr = cpu_to_le32( 1402 np->rx_ring[entry].frag[0].addr = cpu_to_le32(
1403 pci_map_single(np->pci_dev, skb->tail, 1403 pci_map_single(np->pci_dev, skb->data,
1404 np->rx_buf_sz, PCI_DMA_FROMDEVICE)); 1404 np->rx_buf_sz, PCI_DMA_FROMDEVICE));
1405 } 1405 }
1406 /* Perhaps we need not reset this field. */ 1406 /* Perhaps we need not reset this field. */
diff --git a/drivers/net/tulip/de2104x.c b/drivers/net/tulip/de2104x.c
index dd357dd8c370..fc353e348f9a 100644
--- a/drivers/net/tulip/de2104x.c
+++ b/drivers/net/tulip/de2104x.c
@@ -446,13 +446,13 @@ static void de_rx (struct de_private *de)
446 446
447 mapping = 447 mapping =
448 de->rx_skb[rx_tail].mapping = 448 de->rx_skb[rx_tail].mapping =
449 pci_map_single(de->pdev, copy_skb->tail, 449 pci_map_single(de->pdev, copy_skb->data,
450 buflen, PCI_DMA_FROMDEVICE); 450 buflen, PCI_DMA_FROMDEVICE);
451 de->rx_skb[rx_tail].skb = copy_skb; 451 de->rx_skb[rx_tail].skb = copy_skb;
452 } else { 452 } else {
453 pci_dma_sync_single_for_cpu(de->pdev, mapping, len, PCI_DMA_FROMDEVICE); 453 pci_dma_sync_single_for_cpu(de->pdev, mapping, len, PCI_DMA_FROMDEVICE);
454 skb_reserve(copy_skb, RX_OFFSET); 454 skb_reserve(copy_skb, RX_OFFSET);
455 memcpy(skb_put(copy_skb, len), skb->tail, len); 455 memcpy(skb_put(copy_skb, len), skb->data, len);
456 456
457 pci_dma_sync_single_for_device(de->pdev, mapping, len, PCI_DMA_FROMDEVICE); 457 pci_dma_sync_single_for_device(de->pdev, mapping, len, PCI_DMA_FROMDEVICE);
458 458
@@ -1269,7 +1269,7 @@ static int de_refill_rx (struct de_private *de)
1269 skb->dev = de->dev; 1269 skb->dev = de->dev;
1270 1270
1271 de->rx_skb[i].mapping = pci_map_single(de->pdev, 1271 de->rx_skb[i].mapping = pci_map_single(de->pdev,
1272 skb->tail, de->rx_buf_sz, PCI_DMA_FROMDEVICE); 1272 skb->data, de->rx_buf_sz, PCI_DMA_FROMDEVICE);
1273 de->rx_skb[i].skb = skb; 1273 de->rx_skb[i].skb = skb;
1274 1274
1275 de->rx_ring[i].opts1 = cpu_to_le32(DescOwn); 1275 de->rx_ring[i].opts1 = cpu_to_le32(DescOwn);
diff --git a/drivers/net/tulip/dmfe.c b/drivers/net/tulip/dmfe.c
index 7b899702ceb9..74e9075d9c48 100644
--- a/drivers/net/tulip/dmfe.c
+++ b/drivers/net/tulip/dmfe.c
@@ -945,8 +945,8 @@ static void dmfe_rx_packet(struct DEVICE *dev, struct dmfe_board_info * db)
945 945
946 /* Received Packet CRC check need or not */ 946 /* Received Packet CRC check need or not */
947 if ( (db->dm910x_chk_mode & 1) && 947 if ( (db->dm910x_chk_mode & 1) &&
948 (cal_CRC(skb->tail, rxlen, 1) != 948 (cal_CRC(skb->data, rxlen, 1) !=
949 (*(u32 *) (skb->tail+rxlen) ))) { /* FIXME (?) */ 949 (*(u32 *) (skb->data+rxlen) ))) { /* FIXME (?) */
950 /* Found a error received packet */ 950 /* Found a error received packet */
951 dmfe_reuse_skb(db, rxptr->rx_skb_ptr); 951 dmfe_reuse_skb(db, rxptr->rx_skb_ptr);
952 db->dm910x_chk_mode = 3; 952 db->dm910x_chk_mode = 3;
@@ -959,7 +959,7 @@ static void dmfe_rx_packet(struct DEVICE *dev, struct dmfe_board_info * db)
959 /* size less than COPY_SIZE, allocate a rxlen SKB */ 959 /* size less than COPY_SIZE, allocate a rxlen SKB */
960 skb->dev = dev; 960 skb->dev = dev;
961 skb_reserve(skb, 2); /* 16byte align */ 961 skb_reserve(skb, 2); /* 16byte align */
962 memcpy(skb_put(skb, rxlen), rxptr->rx_skb_ptr->tail, rxlen); 962 memcpy(skb_put(skb, rxlen), rxptr->rx_skb_ptr->data, rxlen);
963 dmfe_reuse_skb(db, rxptr->rx_skb_ptr); 963 dmfe_reuse_skb(db, rxptr->rx_skb_ptr);
964 } else { 964 } else {
965 skb->dev = dev; 965 skb->dev = dev;
@@ -1252,7 +1252,7 @@ static void dmfe_reuse_skb(struct dmfe_board_info *db, struct sk_buff * skb)
1252 1252
1253 if (!(rxptr->rdes0 & cpu_to_le32(0x80000000))) { 1253 if (!(rxptr->rdes0 & cpu_to_le32(0x80000000))) {
1254 rxptr->rx_skb_ptr = skb; 1254 rxptr->rx_skb_ptr = skb;
1255 rxptr->rdes2 = cpu_to_le32( pci_map_single(db->pdev, skb->tail, RX_ALLOC_SIZE, PCI_DMA_FROMDEVICE) ); 1255 rxptr->rdes2 = cpu_to_le32( pci_map_single(db->pdev, skb->data, RX_ALLOC_SIZE, PCI_DMA_FROMDEVICE) );
1256 wmb(); 1256 wmb();
1257 rxptr->rdes0 = cpu_to_le32(0x80000000); 1257 rxptr->rdes0 = cpu_to_le32(0x80000000);
1258 db->rx_avail_cnt++; 1258 db->rx_avail_cnt++;
@@ -1463,7 +1463,7 @@ static void allocate_rx_buffer(struct dmfe_board_info *db)
1463 if ( ( skb = dev_alloc_skb(RX_ALLOC_SIZE) ) == NULL ) 1463 if ( ( skb = dev_alloc_skb(RX_ALLOC_SIZE) ) == NULL )
1464 break; 1464 break;
1465 rxptr->rx_skb_ptr = skb; /* FIXME (?) */ 1465 rxptr->rx_skb_ptr = skb; /* FIXME (?) */
1466 rxptr->rdes2 = cpu_to_le32( pci_map_single(db->pdev, skb->tail, RX_ALLOC_SIZE, PCI_DMA_FROMDEVICE) ); 1466 rxptr->rdes2 = cpu_to_le32( pci_map_single(db->pdev, skb->data, RX_ALLOC_SIZE, PCI_DMA_FROMDEVICE) );
1467 wmb(); 1467 wmb();
1468 rxptr->rdes0 = cpu_to_le32(0x80000000); 1468 rxptr->rdes0 = cpu_to_le32(0x80000000);
1469 rxptr = rxptr->next_rx_desc; 1469 rxptr = rxptr->next_rx_desc;
diff --git a/drivers/net/tulip/interrupt.c b/drivers/net/tulip/interrupt.c
index afb5cda9d8e1..bb3558164a5b 100644
--- a/drivers/net/tulip/interrupt.c
+++ b/drivers/net/tulip/interrupt.c
@@ -78,7 +78,7 @@ int tulip_refill_rx(struct net_device *dev)
78 if (skb == NULL) 78 if (skb == NULL)
79 break; 79 break;
80 80
81 mapping = pci_map_single(tp->pdev, skb->tail, PKT_BUF_SZ, 81 mapping = pci_map_single(tp->pdev, skb->data, PKT_BUF_SZ,
82 PCI_DMA_FROMDEVICE); 82 PCI_DMA_FROMDEVICE);
83 tp->rx_buffers[entry].mapping = mapping; 83 tp->rx_buffers[entry].mapping = mapping;
84 84
@@ -199,12 +199,12 @@ int tulip_poll(struct net_device *dev, int *budget)
199 tp->rx_buffers[entry].mapping, 199 tp->rx_buffers[entry].mapping,
200 pkt_len, PCI_DMA_FROMDEVICE); 200 pkt_len, PCI_DMA_FROMDEVICE);
201#if ! defined(__alpha__) 201#if ! defined(__alpha__)
202 eth_copy_and_sum(skb, tp->rx_buffers[entry].skb->tail, 202 eth_copy_and_sum(skb, tp->rx_buffers[entry].skb->data,
203 pkt_len, 0); 203 pkt_len, 0);
204 skb_put(skb, pkt_len); 204 skb_put(skb, pkt_len);
205#else 205#else
206 memcpy(skb_put(skb, pkt_len), 206 memcpy(skb_put(skb, pkt_len),
207 tp->rx_buffers[entry].skb->tail, 207 tp->rx_buffers[entry].skb->data,
208 pkt_len); 208 pkt_len);
209#endif 209#endif
210 pci_dma_sync_single_for_device(tp->pdev, 210 pci_dma_sync_single_for_device(tp->pdev,
@@ -423,12 +423,12 @@ static int tulip_rx(struct net_device *dev)
423 tp->rx_buffers[entry].mapping, 423 tp->rx_buffers[entry].mapping,
424 pkt_len, PCI_DMA_FROMDEVICE); 424 pkt_len, PCI_DMA_FROMDEVICE);
425#if ! defined(__alpha__) 425#if ! defined(__alpha__)
426 eth_copy_and_sum(skb, tp->rx_buffers[entry].skb->tail, 426 eth_copy_and_sum(skb, tp->rx_buffers[entry].skb->data,
427 pkt_len, 0); 427 pkt_len, 0);
428 skb_put(skb, pkt_len); 428 skb_put(skb, pkt_len);
429#else 429#else
430 memcpy(skb_put(skb, pkt_len), 430 memcpy(skb_put(skb, pkt_len),
431 tp->rx_buffers[entry].skb->tail, 431 tp->rx_buffers[entry].skb->data,
432 pkt_len); 432 pkt_len);
433#endif 433#endif
434 pci_dma_sync_single_for_device(tp->pdev, 434 pci_dma_sync_single_for_device(tp->pdev,
diff --git a/drivers/net/tulip/tulip_core.c b/drivers/net/tulip/tulip_core.c
index 08e0f80f89d5..d45d8f56e5b4 100644
--- a/drivers/net/tulip/tulip_core.c
+++ b/drivers/net/tulip/tulip_core.c
@@ -625,7 +625,7 @@ static void tulip_init_ring(struct net_device *dev)
625 tp->rx_buffers[i].skb = skb; 625 tp->rx_buffers[i].skb = skb;
626 if (skb == NULL) 626 if (skb == NULL)
627 break; 627 break;
628 mapping = pci_map_single(tp->pdev, skb->tail, 628 mapping = pci_map_single(tp->pdev, skb->data,
629 PKT_BUF_SZ, PCI_DMA_FROMDEVICE); 629 PKT_BUF_SZ, PCI_DMA_FROMDEVICE);
630 tp->rx_buffers[i].mapping = mapping; 630 tp->rx_buffers[i].mapping = mapping;
631 skb->dev = dev; /* Mark as being used by this device. */ 631 skb->dev = dev; /* Mark as being used by this device. */
diff --git a/drivers/net/tulip/winbond-840.c b/drivers/net/tulip/winbond-840.c
index db4b32c2369a..5b1af3986abf 100644
--- a/drivers/net/tulip/winbond-840.c
+++ b/drivers/net/tulip/winbond-840.c
@@ -849,7 +849,7 @@ static void init_rxtx_rings(struct net_device *dev)
849 if (skb == NULL) 849 if (skb == NULL)
850 break; 850 break;
851 skb->dev = dev; /* Mark as being used by this device. */ 851 skb->dev = dev; /* Mark as being used by this device. */
852 np->rx_addr[i] = pci_map_single(np->pci_dev,skb->tail, 852 np->rx_addr[i] = pci_map_single(np->pci_dev,skb->data,
853 skb->len,PCI_DMA_FROMDEVICE); 853 skb->len,PCI_DMA_FROMDEVICE);
854 854
855 np->rx_ring[i].buffer1 = np->rx_addr[i]; 855 np->rx_ring[i].buffer1 = np->rx_addr[i];
@@ -1269,7 +1269,7 @@ static int netdev_rx(struct net_device *dev)
1269 pci_dma_sync_single_for_cpu(np->pci_dev,np->rx_addr[entry], 1269 pci_dma_sync_single_for_cpu(np->pci_dev,np->rx_addr[entry],
1270 np->rx_skbuff[entry]->len, 1270 np->rx_skbuff[entry]->len,
1271 PCI_DMA_FROMDEVICE); 1271 PCI_DMA_FROMDEVICE);
1272 eth_copy_and_sum(skb, np->rx_skbuff[entry]->tail, pkt_len, 0); 1272 eth_copy_and_sum(skb, np->rx_skbuff[entry]->data, pkt_len, 0);
1273 skb_put(skb, pkt_len); 1273 skb_put(skb, pkt_len);
1274 pci_dma_sync_single_for_device(np->pci_dev,np->rx_addr[entry], 1274 pci_dma_sync_single_for_device(np->pci_dev,np->rx_addr[entry],
1275 np->rx_skbuff[entry]->len, 1275 np->rx_skbuff[entry]->len,
@@ -1315,7 +1315,7 @@ static int netdev_rx(struct net_device *dev)
1315 break; /* Better luck next round. */ 1315 break; /* Better luck next round. */
1316 skb->dev = dev; /* Mark as being used by this device. */ 1316 skb->dev = dev; /* Mark as being used by this device. */
1317 np->rx_addr[entry] = pci_map_single(np->pci_dev, 1317 np->rx_addr[entry] = pci_map_single(np->pci_dev,
1318 skb->tail, 1318 skb->data,
1319 skb->len, PCI_DMA_FROMDEVICE); 1319 skb->len, PCI_DMA_FROMDEVICE);
1320 np->rx_ring[entry].buffer1 = np->rx_addr[entry]; 1320 np->rx_ring[entry].buffer1 = np->rx_addr[entry];
1321 } 1321 }
diff --git a/drivers/net/tulip/xircom_tulip_cb.c b/drivers/net/tulip/xircom_tulip_cb.c
index b8a9b395c5ea..887d7245fe7b 100644
--- a/drivers/net/tulip/xircom_tulip_cb.c
+++ b/drivers/net/tulip/xircom_tulip_cb.c
@@ -899,7 +899,7 @@ static void xircom_init_ring(struct net_device *dev)
899 break; 899 break;
900 skb->dev = dev; /* Mark as being used by this device. */ 900 skb->dev = dev; /* Mark as being used by this device. */
901 tp->rx_ring[i].status = Rx0DescOwned; /* Owned by Xircom chip */ 901 tp->rx_ring[i].status = Rx0DescOwned; /* Owned by Xircom chip */
902 tp->rx_ring[i].buffer1 = virt_to_bus(skb->tail); 902 tp->rx_ring[i].buffer1 = virt_to_bus(skb->data);
903 } 903 }
904 tp->dirty_rx = (unsigned int)(i - RX_RING_SIZE); 904 tp->dirty_rx = (unsigned int)(i - RX_RING_SIZE);
905 905
@@ -1291,7 +1291,7 @@ xircom_rx(struct net_device *dev)
1291 if (skb == NULL) 1291 if (skb == NULL)
1292 break; 1292 break;
1293 skb->dev = dev; /* Mark as being used by this device. */ 1293 skb->dev = dev; /* Mark as being used by this device. */
1294 tp->rx_ring[entry].buffer1 = virt_to_bus(skb->tail); 1294 tp->rx_ring[entry].buffer1 = virt_to_bus(skb->data);
1295 work_done++; 1295 work_done++;
1296 } 1296 }
1297 tp->rx_ring[entry].status = Rx0DescOwned; 1297 tp->rx_ring[entry].status = Rx0DescOwned;
diff --git a/drivers/net/typhoon.c b/drivers/net/typhoon.c
index 8f3392989a06..0b5ca2537963 100644
--- a/drivers/net/typhoon.c
+++ b/drivers/net/typhoon.c
@@ -1661,7 +1661,7 @@ typhoon_alloc_rx_skb(struct typhoon *tp, u32 idx)
1661#endif 1661#endif
1662 1662
1663 skb->dev = tp->dev; 1663 skb->dev = tp->dev;
1664 dma_addr = pci_map_single(tp->pdev, skb->tail, 1664 dma_addr = pci_map_single(tp->pdev, skb->data,
1665 PKT_BUF_SZ, PCI_DMA_FROMDEVICE); 1665 PKT_BUF_SZ, PCI_DMA_FROMDEVICE);
1666 1666
1667 /* Since no card does 64 bit DAC, the high bits will never 1667 /* Since no card does 64 bit DAC, the high bits will never
@@ -1721,7 +1721,7 @@ typhoon_rx(struct typhoon *tp, struct basic_ring *rxRing, volatile u32 * ready,
1721 pci_dma_sync_single_for_cpu(tp->pdev, dma_addr, 1721 pci_dma_sync_single_for_cpu(tp->pdev, dma_addr,
1722 PKT_BUF_SZ, 1722 PKT_BUF_SZ,
1723 PCI_DMA_FROMDEVICE); 1723 PCI_DMA_FROMDEVICE);
1724 eth_copy_and_sum(new_skb, skb->tail, pkt_len, 0); 1724 eth_copy_and_sum(new_skb, skb->data, pkt_len, 0);
1725 pci_dma_sync_single_for_device(tp->pdev, dma_addr, 1725 pci_dma_sync_single_for_device(tp->pdev, dma_addr,
1726 PKT_BUF_SZ, 1726 PKT_BUF_SZ,
1727 PCI_DMA_FROMDEVICE); 1727 PCI_DMA_FROMDEVICE);
diff --git a/drivers/net/via-rhine.c b/drivers/net/via-rhine.c
index be1c1047b9ba..fc7738ffbfff 100644
--- a/drivers/net/via-rhine.c
+++ b/drivers/net/via-rhine.c
@@ -507,7 +507,7 @@ static struct net_device_stats *rhine_get_stats(struct net_device *dev);
507static int netdev_ioctl(struct net_device *dev, struct ifreq *rq, int cmd); 507static int netdev_ioctl(struct net_device *dev, struct ifreq *rq, int cmd);
508static struct ethtool_ops netdev_ethtool_ops; 508static struct ethtool_ops netdev_ethtool_ops;
509static int rhine_close(struct net_device *dev); 509static int rhine_close(struct net_device *dev);
510static void rhine_shutdown (struct device *gdev); 510static void rhine_shutdown (struct pci_dev *pdev);
511 511
512#define RHINE_WAIT_FOR(condition) do { \ 512#define RHINE_WAIT_FOR(condition) do { \
513 int i=1024; \ 513 int i=1024; \
@@ -990,7 +990,7 @@ static void alloc_rbufs(struct net_device *dev)
990 skb->dev = dev; /* Mark as being used by this device. */ 990 skb->dev = dev; /* Mark as being used by this device. */
991 991
992 rp->rx_skbuff_dma[i] = 992 rp->rx_skbuff_dma[i] =
993 pci_map_single(rp->pdev, skb->tail, rp->rx_buf_sz, 993 pci_map_single(rp->pdev, skb->data, rp->rx_buf_sz,
994 PCI_DMA_FROMDEVICE); 994 PCI_DMA_FROMDEVICE);
995 995
996 rp->rx_ring[i].addr = cpu_to_le32(rp->rx_skbuff_dma[i]); 996 rp->rx_ring[i].addr = cpu_to_le32(rp->rx_skbuff_dma[i]);
@@ -1518,7 +1518,7 @@ static void rhine_rx(struct net_device *dev)
1518 PCI_DMA_FROMDEVICE); 1518 PCI_DMA_FROMDEVICE);
1519 1519
1520 eth_copy_and_sum(skb, 1520 eth_copy_and_sum(skb,
1521 rp->rx_skbuff[entry]->tail, 1521 rp->rx_skbuff[entry]->data,
1522 pkt_len, 0); 1522 pkt_len, 0);
1523 skb_put(skb, pkt_len); 1523 skb_put(skb, pkt_len);
1524 pci_dma_sync_single_for_device(rp->pdev, 1524 pci_dma_sync_single_for_device(rp->pdev,
@@ -1561,7 +1561,7 @@ static void rhine_rx(struct net_device *dev)
1561 break; /* Better luck next round. */ 1561 break; /* Better luck next round. */
1562 skb->dev = dev; /* Mark as being used by this device. */ 1562 skb->dev = dev; /* Mark as being used by this device. */
1563 rp->rx_skbuff_dma[entry] = 1563 rp->rx_skbuff_dma[entry] =
1564 pci_map_single(rp->pdev, skb->tail, 1564 pci_map_single(rp->pdev, skb->data,
1565 rp->rx_buf_sz, 1565 rp->rx_buf_sz,
1566 PCI_DMA_FROMDEVICE); 1566 PCI_DMA_FROMDEVICE);
1567 rp->rx_ring[entry].addr = cpu_to_le32(rp->rx_skbuff_dma[entry]); 1567 rp->rx_ring[entry].addr = cpu_to_le32(rp->rx_skbuff_dma[entry]);
@@ -1895,9 +1895,8 @@ static void __devexit rhine_remove_one(struct pci_dev *pdev)
1895 pci_set_drvdata(pdev, NULL); 1895 pci_set_drvdata(pdev, NULL);
1896} 1896}
1897 1897
1898static void rhine_shutdown (struct device *gendev) 1898static void rhine_shutdown (struct pci_dev *pdev)
1899{ 1899{
1900 struct pci_dev *pdev = to_pci_dev(gendev);
1901 struct net_device *dev = pci_get_drvdata(pdev); 1900 struct net_device *dev = pci_get_drvdata(pdev);
1902 struct rhine_private *rp = netdev_priv(dev); 1901 struct rhine_private *rp = netdev_priv(dev);
1903 void __iomem *ioaddr = rp->base; 1902 void __iomem *ioaddr = rp->base;
@@ -1956,7 +1955,7 @@ static int rhine_suspend(struct pci_dev *pdev, pm_message_t state)
1956 pci_save_state(pdev); 1955 pci_save_state(pdev);
1957 1956
1958 spin_lock_irqsave(&rp->lock, flags); 1957 spin_lock_irqsave(&rp->lock, flags);
1959 rhine_shutdown(&pdev->dev); 1958 rhine_shutdown(pdev);
1960 spin_unlock_irqrestore(&rp->lock, flags); 1959 spin_unlock_irqrestore(&rp->lock, flags);
1961 1960
1962 free_irq(dev->irq, dev); 1961 free_irq(dev->irq, dev);
@@ -2010,9 +2009,7 @@ static struct pci_driver rhine_driver = {
2010 .suspend = rhine_suspend, 2009 .suspend = rhine_suspend,
2011 .resume = rhine_resume, 2010 .resume = rhine_resume,
2012#endif /* CONFIG_PM */ 2011#endif /* CONFIG_PM */
2013 .driver = { 2012 .shutdown = rhine_shutdown,
2014 .shutdown = rhine_shutdown,
2015 }
2016}; 2013};
2017 2014
2018 2015
diff --git a/drivers/net/via-velocity.c b/drivers/net/via-velocity.c
index 15e710283493..abc5cee6eedc 100644
--- a/drivers/net/via-velocity.c
+++ b/drivers/net/via-velocity.c
@@ -1335,7 +1335,7 @@ static inline int velocity_rx_copy(struct sk_buff **rx_skb, int pkt_size,
1335 if (vptr->flags & VELOCITY_FLAGS_IP_ALIGN) 1335 if (vptr->flags & VELOCITY_FLAGS_IP_ALIGN)
1336 skb_reserve(new_skb, 2); 1336 skb_reserve(new_skb, 2);
1337 1337
1338 memcpy(new_skb->data, rx_skb[0]->tail, pkt_size); 1338 memcpy(new_skb->data, rx_skb[0]->data, pkt_size);
1339 *rx_skb = new_skb; 1339 *rx_skb = new_skb;
1340 ret = 0; 1340 ret = 0;
1341 } 1341 }
@@ -1456,9 +1456,9 @@ static int velocity_alloc_rx_buf(struct velocity_info *vptr, int idx)
1456 * Do the gymnastics to get the buffer head for data at 1456 * Do the gymnastics to get the buffer head for data at
1457 * 64byte alignment. 1457 * 64byte alignment.
1458 */ 1458 */
1459 skb_reserve(rd_info->skb, (unsigned long) rd_info->skb->tail & 63); 1459 skb_reserve(rd_info->skb, (unsigned long) rd_info->skb->data & 63);
1460 rd_info->skb->dev = vptr->dev; 1460 rd_info->skb->dev = vptr->dev;
1461 rd_info->skb_dma = pci_map_single(vptr->pdev, rd_info->skb->tail, vptr->rx_buf_sz, PCI_DMA_FROMDEVICE); 1461 rd_info->skb_dma = pci_map_single(vptr->pdev, rd_info->skb->data, vptr->rx_buf_sz, PCI_DMA_FROMDEVICE);
1462 1462
1463 /* 1463 /*
1464 * Fill in the descriptor to match 1464 * Fill in the descriptor to match
diff --git a/drivers/net/wan/hdlc_cisco.c b/drivers/net/wan/hdlc_cisco.c
index c1b6896d7007..87496843681a 100644
--- a/drivers/net/wan/hdlc_cisco.c
+++ b/drivers/net/wan/hdlc_cisco.c
@@ -72,7 +72,7 @@ static void cisco_keepalive_send(struct net_device *dev, u32 type,
72 } 72 }
73 skb_reserve(skb, 4); 73 skb_reserve(skb, 4);
74 cisco_hard_header(skb, dev, CISCO_KEEPALIVE, NULL, NULL, 0); 74 cisco_hard_header(skb, dev, CISCO_KEEPALIVE, NULL, NULL, 0);
75 data = (cisco_packet*)skb->tail; 75 data = (cisco_packet*)skb->data;
76 76
77 data->type = htonl(type); 77 data->type = htonl(type);
78 data->par1 = htonl(par1); 78 data->par1 = htonl(par1);
diff --git a/drivers/net/yellowfin.c b/drivers/net/yellowfin.c
index 9da925430109..1c2506535f7e 100644
--- a/drivers/net/yellowfin.c
+++ b/drivers/net/yellowfin.c
@@ -786,7 +786,7 @@ static void yellowfin_init_ring(struct net_device *dev)
786 skb->dev = dev; /* Mark as being used by this device. */ 786 skb->dev = dev; /* Mark as being used by this device. */
787 skb_reserve(skb, 2); /* 16 byte align the IP header. */ 787 skb_reserve(skb, 2); /* 16 byte align the IP header. */
788 yp->rx_ring[i].addr = cpu_to_le32(pci_map_single(yp->pci_dev, 788 yp->rx_ring[i].addr = cpu_to_le32(pci_map_single(yp->pci_dev,
789 skb->tail, yp->rx_buf_sz, PCI_DMA_FROMDEVICE)); 789 skb->data, yp->rx_buf_sz, PCI_DMA_FROMDEVICE));
790 } 790 }
791 yp->rx_ring[i-1].dbdma_cmd = cpu_to_le32(CMD_STOP); 791 yp->rx_ring[i-1].dbdma_cmd = cpu_to_le32(CMD_STOP);
792 yp->dirty_rx = (unsigned int)(i - RX_RING_SIZE); 792 yp->dirty_rx = (unsigned int)(i - RX_RING_SIZE);
@@ -1111,7 +1111,7 @@ static int yellowfin_rx(struct net_device *dev)
1111 pci_dma_sync_single_for_cpu(yp->pci_dev, desc->addr, 1111 pci_dma_sync_single_for_cpu(yp->pci_dev, desc->addr,
1112 yp->rx_buf_sz, PCI_DMA_FROMDEVICE); 1112 yp->rx_buf_sz, PCI_DMA_FROMDEVICE);
1113 desc_status = le32_to_cpu(desc->result_status) >> 16; 1113 desc_status = le32_to_cpu(desc->result_status) >> 16;
1114 buf_addr = rx_skb->tail; 1114 buf_addr = rx_skb->data;
1115 data_size = (le32_to_cpu(desc->dbdma_cmd) - 1115 data_size = (le32_to_cpu(desc->dbdma_cmd) -
1116 le32_to_cpu(desc->result_status)) & 0xffff; 1116 le32_to_cpu(desc->result_status)) & 0xffff;
1117 frame_status = le16_to_cpu(get_unaligned((s16*)&(buf_addr[data_size - 2]))); 1117 frame_status = le16_to_cpu(get_unaligned((s16*)&(buf_addr[data_size - 2])));
@@ -1185,7 +1185,7 @@ static int yellowfin_rx(struct net_device *dev)
1185 break; 1185 break;
1186 skb->dev = dev; 1186 skb->dev = dev;
1187 skb_reserve(skb, 2); /* 16 byte align the IP header */ 1187 skb_reserve(skb, 2); /* 16 byte align the IP header */
1188 eth_copy_and_sum(skb, rx_skb->tail, pkt_len, 0); 1188 eth_copy_and_sum(skb, rx_skb->data, pkt_len, 0);
1189 skb_put(skb, pkt_len); 1189 skb_put(skb, pkt_len);
1190 pci_dma_sync_single_for_device(yp->pci_dev, desc->addr, 1190 pci_dma_sync_single_for_device(yp->pci_dev, desc->addr,
1191 yp->rx_buf_sz, 1191 yp->rx_buf_sz,
@@ -1211,7 +1211,7 @@ static int yellowfin_rx(struct net_device *dev)
1211 skb->dev = dev; /* Mark as being used by this device. */ 1211 skb->dev = dev; /* Mark as being used by this device. */
1212 skb_reserve(skb, 2); /* Align IP on 16 byte boundaries */ 1212 skb_reserve(skb, 2); /* Align IP on 16 byte boundaries */
1213 yp->rx_ring[entry].addr = cpu_to_le32(pci_map_single(yp->pci_dev, 1213 yp->rx_ring[entry].addr = cpu_to_le32(pci_map_single(yp->pci_dev,
1214 skb->tail, yp->rx_buf_sz, PCI_DMA_FROMDEVICE)); 1214 skb->data, yp->rx_buf_sz, PCI_DMA_FROMDEVICE));
1215 } 1215 }
1216 yp->rx_ring[entry].dbdma_cmd = cpu_to_le32(CMD_STOP); 1216 yp->rx_ring[entry].dbdma_cmd = cpu_to_le32(CMD_STOP);
1217 yp->rx_ring[entry].result_status = 0; /* Clear complete bit. */ 1217 yp->rx_ring[entry].result_status = 0; /* Clear complete bit. */
diff --git a/drivers/parisc/dino.c b/drivers/parisc/dino.c
index b0d2a73d1d47..2f2dbef2c3b7 100644
--- a/drivers/parisc/dino.c
+++ b/drivers/parisc/dino.c
@@ -993,6 +993,7 @@ dino_driver_callback(struct parisc_device *dev)
993 bus = pci_scan_bus_parented(&dev->dev, dino_current_bus, 993 bus = pci_scan_bus_parented(&dev->dev, dino_current_bus,
994 &dino_cfg_ops, NULL); 994 &dino_cfg_ops, NULL);
995 if(bus) { 995 if(bus) {
996 pci_bus_add_devices(bus);
996 /* This code *depends* on scanning being single threaded 997 /* This code *depends* on scanning being single threaded
997 * if it isn't, this global bus number count will fail 998 * if it isn't, this global bus number count will fail
998 */ 999 */
diff --git a/drivers/parisc/lba_pci.c b/drivers/parisc/lba_pci.c
index dc838804c0dd..7fdd80b7eb47 100644
--- a/drivers/parisc/lba_pci.c
+++ b/drivers/parisc/lba_pci.c
@@ -1570,6 +1570,8 @@ lba_driver_probe(struct parisc_device *dev)
1570 lba_bus = lba_dev->hba.hba_bus = 1570 lba_bus = lba_dev->hba.hba_bus =
1571 pci_scan_bus_parented(&dev->dev, lba_dev->hba.bus_num.start, 1571 pci_scan_bus_parented(&dev->dev, lba_dev->hba.bus_num.start,
1572 cfg_ops, NULL); 1572 cfg_ops, NULL);
1573 if (lba_bus)
1574 pci_bus_add_devices(lba_bus);
1573 1575
1574 /* This is in lieu of calling pci_assign_unassigned_resources() */ 1576 /* This is in lieu of calling pci_assign_unassigned_resources() */
1575 if (is_pdc_pat()) { 1577 if (is_pdc_pat()) {
diff --git a/drivers/pci/bus.c b/drivers/pci/bus.c
index dbd33605cc10..fedae89d8f7d 100644
--- a/drivers/pci/bus.c
+++ b/drivers/pci/bus.c
@@ -121,10 +121,13 @@ void __devinit pci_bus_add_devices(struct pci_bus *bus)
121 * If there is an unattached subordinate bus, attach 121 * If there is an unattached subordinate bus, attach
122 * it and then scan for unattached PCI devices. 122 * it and then scan for unattached PCI devices.
123 */ 123 */
124 if (dev->subordinate && list_empty(&dev->subordinate->node)) { 124 if (dev->subordinate) {
125 spin_lock(&pci_bus_lock); 125 if (list_empty(&dev->subordinate->node)) {
126 list_add_tail(&dev->subordinate->node, &dev->bus->children); 126 spin_lock(&pci_bus_lock);
127 spin_unlock(&pci_bus_lock); 127 list_add_tail(&dev->subordinate->node,
128 &dev->bus->children);
129 spin_unlock(&pci_bus_lock);
130 }
128 pci_bus_add_devices(dev->subordinate); 131 pci_bus_add_devices(dev->subordinate);
129 132
130 sysfs_create_link(&dev->subordinate->class_dev.kobj, &dev->dev.kobj, "bridge"); 133 sysfs_create_link(&dev->subordinate->class_dev.kobj, &dev->dev.kobj, "bridge");
diff --git a/drivers/pci/hotplug/Makefile b/drivers/pci/hotplug/Makefile
index 93c120ddbd39..3e632ff8c717 100644
--- a/drivers/pci/hotplug/Makefile
+++ b/drivers/pci/hotplug/Makefile
@@ -36,9 +36,7 @@ ibmphp-objs := ibmphp_core.o \
36 ibmphp_hpc.o 36 ibmphp_hpc.o
37 37
38acpiphp-objs := acpiphp_core.o \ 38acpiphp-objs := acpiphp_core.o \
39 acpiphp_glue.o \ 39 acpiphp_glue.o
40 acpiphp_pci.o \
41 acpiphp_res.o
42 40
43rpaphp-objs := rpaphp_core.o \ 41rpaphp-objs := rpaphp_core.o \
44 rpaphp_pci.o \ 42 rpaphp_pci.o \
diff --git a/drivers/pci/hotplug/acpiphp.h b/drivers/pci/hotplug/acpiphp.h
index d9499874c8a9..293603e1b7c3 100644
--- a/drivers/pci/hotplug/acpiphp.h
+++ b/drivers/pci/hotplug/acpiphp.h
@@ -7,6 +7,8 @@
7 * Copyright (C) 2002 Hiroshi Aono (h-aono@ap.jp.nec.com) 7 * Copyright (C) 2002 Hiroshi Aono (h-aono@ap.jp.nec.com)
8 * Copyright (C) 2002,2003 Takayoshi Kochi (t-kochi@bq.jp.nec.com) 8 * Copyright (C) 2002,2003 Takayoshi Kochi (t-kochi@bq.jp.nec.com)
9 * Copyright (C) 2002,2003 NEC Corporation 9 * Copyright (C) 2002,2003 NEC Corporation
10 * Copyright (C) 2003-2005 Matthew Wilcox (matthew.wilcox@hp.com)
11 * Copyright (C) 2003-2005 Hewlett Packard
10 * 12 *
11 * All rights reserved. 13 * All rights reserved.
12 * 14 *
@@ -52,7 +54,6 @@
52 54
53struct acpiphp_bridge; 55struct acpiphp_bridge;
54struct acpiphp_slot; 56struct acpiphp_slot;
55struct pci_resource;
56 57
57/* 58/*
58 * struct slot - slot information for each *physical* slot 59 * struct slot - slot information for each *physical* slot
@@ -65,15 +66,6 @@ struct slot {
65 struct acpiphp_slot *acpi_slot; 66 struct acpiphp_slot *acpi_slot;
66}; 67};
67 68
68/*
69 * struct pci_resource - describes pci resource (mem, pfmem, io, bus)
70 */
71struct pci_resource {
72 struct pci_resource * next;
73 u64 base;
74 u32 length;
75};
76
77/** 69/**
78 * struct hpp_param - ACPI 2.0 _HPP Hot Plug Parameters 70 * struct hpp_param - ACPI 2.0 _HPP Hot Plug Parameters
79 * @cache_line_size in DWORD 71 * @cache_line_size in DWORD
@@ -101,10 +93,6 @@ struct acpiphp_bridge {
101 int type; 93 int type;
102 int nr_slots; 94 int nr_slots;
103 95
104 u8 seg;
105 u8 bus;
106 u8 sub;
107
108 u32 flags; 96 u32 flags;
109 97
110 /* This bus (host bridge) or Secondary bus (PCI-to-PCI bridge) */ 98 /* This bus (host bridge) or Secondary bus (PCI-to-PCI bridge) */
@@ -117,12 +105,6 @@ struct acpiphp_bridge {
117 struct hpp_param hpp; 105 struct hpp_param hpp;
118 106
119 spinlock_t res_lock; 107 spinlock_t res_lock;
120
121 /* available resources on this bus */
122 struct pci_resource *mem_head;
123 struct pci_resource *p_mem_head;
124 struct pci_resource *io_head;
125 struct pci_resource *bus_head;
126}; 108};
127 109
128 110
@@ -163,12 +145,6 @@ struct acpiphp_func {
163 145
164 u8 function; /* pci function# */ 146 u8 function; /* pci function# */
165 u32 flags; /* see below */ 147 u32 flags; /* see below */
166
167 /* resources used for this function */
168 struct pci_resource *mem_head;
169 struct pci_resource *p_mem_head;
170 struct pci_resource *io_head;
171 struct pci_resource *bus_head;
172}; 148};
173 149
174/** 150/**
@@ -243,25 +219,6 @@ extern u8 acpiphp_get_latch_status (struct acpiphp_slot *slot);
243extern u8 acpiphp_get_adapter_status (struct acpiphp_slot *slot); 219extern u8 acpiphp_get_adapter_status (struct acpiphp_slot *slot);
244extern u32 acpiphp_get_address (struct acpiphp_slot *slot); 220extern u32 acpiphp_get_address (struct acpiphp_slot *slot);
245 221
246/* acpiphp_pci.c */
247extern struct pci_dev *acpiphp_allocate_pcidev (struct pci_bus *pbus, int dev, int fn);
248extern int acpiphp_configure_slot (struct acpiphp_slot *slot);
249extern int acpiphp_configure_function (struct acpiphp_func *func);
250extern void acpiphp_unconfigure_function (struct acpiphp_func *func);
251extern int acpiphp_detect_pci_resource (struct acpiphp_bridge *bridge);
252extern int acpiphp_init_func_resource (struct acpiphp_func *func);
253
254/* acpiphp_res.c */
255extern struct pci_resource *acpiphp_get_io_resource (struct pci_resource **head, u32 size);
256extern struct pci_resource *acpiphp_get_resource (struct pci_resource **head, u32 size);
257extern struct pci_resource *acpiphp_get_resource_with_base (struct pci_resource **head, u64 base, u32 size);
258extern int acpiphp_resource_sort_and_combine (struct pci_resource **head);
259extern struct pci_resource *acpiphp_make_resource (u64 base, u32 length);
260extern void acpiphp_move_resource (struct pci_resource **from, struct pci_resource **to);
261extern void acpiphp_free_resource (struct pci_resource **res);
262extern void acpiphp_dump_resource (struct acpiphp_bridge *bridge); /* debug */
263extern void acpiphp_dump_func_resource (struct acpiphp_func *func); /* debug */
264
265/* variables */ 222/* variables */
266extern int acpiphp_debug; 223extern int acpiphp_debug;
267 224
diff --git a/drivers/pci/hotplug/acpiphp_core.c b/drivers/pci/hotplug/acpiphp_core.c
index 4539e61a3dc1..60c4c38047a3 100644
--- a/drivers/pci/hotplug/acpiphp_core.c
+++ b/drivers/pci/hotplug/acpiphp_core.c
@@ -7,6 +7,8 @@
7 * Copyright (C) 2002 Hiroshi Aono (h-aono@ap.jp.nec.com) 7 * Copyright (C) 2002 Hiroshi Aono (h-aono@ap.jp.nec.com)
8 * Copyright (C) 2002,2003 Takayoshi Kochi (t-kochi@bq.jp.nec.com) 8 * Copyright (C) 2002,2003 Takayoshi Kochi (t-kochi@bq.jp.nec.com)
9 * Copyright (C) 2002,2003 NEC Corporation 9 * Copyright (C) 2002,2003 NEC Corporation
10 * Copyright (C) 2003-2005 Matthew Wilcox (matthew.wilcox@hp.com)
11 * Copyright (C) 2003-2005 Hewlett Packard
10 * 12 *
11 * All rights reserved. 13 * All rights reserved.
12 * 14 *
@@ -53,8 +55,8 @@ int acpiphp_debug;
53static int num_slots; 55static int num_slots;
54static struct acpiphp_attention_info *attention_info; 56static struct acpiphp_attention_info *attention_info;
55 57
56#define DRIVER_VERSION "0.4" 58#define DRIVER_VERSION "0.5"
57#define DRIVER_AUTHOR "Greg Kroah-Hartman <gregkh@us.ibm.com>, Takayoshi Kochi <t-kochi@bq.jp.nec.com>" 59#define DRIVER_AUTHOR "Greg Kroah-Hartman <gregkh@us.ibm.com>, Takayoshi Kochi <t-kochi@bq.jp.nec.com>, Matthew Wilcox <willy@hp.com>"
58#define DRIVER_DESC "ACPI Hot Plug PCI Controller Driver" 60#define DRIVER_DESC "ACPI Hot Plug PCI Controller Driver"
59 61
60MODULE_AUTHOR(DRIVER_AUTHOR); 62MODULE_AUTHOR(DRIVER_AUTHOR);
@@ -281,8 +283,7 @@ static int get_adapter_status(struct hotplug_slot *hotplug_slot, u8 *value)
281/** 283/**
282 * get_address - get pci address of a slot 284 * get_address - get pci address of a slot
283 * @hotplug_slot: slot to get status 285 * @hotplug_slot: slot to get status
284 * @busdev: pointer to struct pci_busdev (seg, bus, dev) 286 * @value: pointer to struct pci_busdev (seg, bus, dev)
285 *
286 */ 287 */
287static int get_address(struct hotplug_slot *hotplug_slot, u32 *value) 288static int get_address(struct hotplug_slot *hotplug_slot, u32 *value)
288{ 289{
diff --git a/drivers/pci/hotplug/acpiphp_glue.c b/drivers/pci/hotplug/acpiphp_glue.c
index e7f41294f811..424e7de181ae 100644
--- a/drivers/pci/hotplug/acpiphp_glue.c
+++ b/drivers/pci/hotplug/acpiphp_glue.c
@@ -4,6 +4,10 @@
4 * Copyright (C) 2002,2003 Takayoshi Kochi (t-kochi@bq.jp.nec.com) 4 * Copyright (C) 2002,2003 Takayoshi Kochi (t-kochi@bq.jp.nec.com)
5 * Copyright (C) 2002 Hiroshi Aono (h-aono@ap.jp.nec.com) 5 * Copyright (C) 2002 Hiroshi Aono (h-aono@ap.jp.nec.com)
6 * Copyright (C) 2002,2003 NEC Corporation 6 * Copyright (C) 2002,2003 NEC Corporation
7 * Copyright (C) 2003-2005 Matthew Wilcox (matthew.wilcox@hp.com)
8 * Copyright (C) 2003-2005 Hewlett Packard
9 * Copyright (C) 2005 Rajesh Shah (rajesh.shah@intel.com)
10 * Copyright (C) 2005 Intel Corporation
7 * 11 *
8 * All rights reserved. 12 * All rights reserved.
9 * 13 *
@@ -26,6 +30,16 @@
26 * 30 *
27 */ 31 */
28 32
33/*
34 * Lifetime rules for pci_dev:
35 * - The one in acpiphp_func has its refcount elevated by pci_get_slot()
36 * when the driver is loaded or when an insertion event occurs. It loses
37 * a refcount when its ejected or the driver unloads.
38 * - The one in acpiphp_bridge has its refcount elevated by pci_get_slot()
39 * when the bridge is scanned and it loses a refcount when the bridge
40 * is removed.
41 */
42
29#include <linux/init.h> 43#include <linux/init.h>
30#include <linux/module.h> 44#include <linux/module.h>
31 45
@@ -178,21 +192,18 @@ register_slot(acpi_handle handle, u32 lvl, void *context, void **rv)
178 192
179 bridge->nr_slots++; 193 bridge->nr_slots++;
180 194
181 dbg("found ACPI PCI Hotplug slot at PCI %02x:%02x Slot:%d\n", 195 dbg("found ACPI PCI Hotplug slot %d at PCI %04x:%02x:%02x\n",
182 slot->bridge->bus, slot->device, slot->sun); 196 slot->sun, pci_domain_nr(bridge->pci_bus),
197 bridge->pci_bus->number, slot->device);
183 } 198 }
184 199
185 newfunc->slot = slot; 200 newfunc->slot = slot;
186 list_add_tail(&newfunc->sibling, &slot->funcs); 201 list_add_tail(&newfunc->sibling, &slot->funcs);
187 202
188 /* associate corresponding pci_dev */ 203 /* associate corresponding pci_dev */
189 newfunc->pci_dev = pci_find_slot(bridge->bus, 204 newfunc->pci_dev = pci_get_slot(bridge->pci_bus,
190 PCI_DEVFN(device, function)); 205 PCI_DEVFN(device, function));
191 if (newfunc->pci_dev) { 206 if (newfunc->pci_dev) {
192 if (acpiphp_init_func_resource(newfunc) < 0) {
193 kfree(newfunc);
194 return AE_ERROR;
195 }
196 slot->flags |= (SLOT_ENABLED | SLOT_POWEREDON); 207 slot->flags |= (SLOT_ENABLED | SLOT_POWEREDON);
197 } 208 }
198 209
@@ -227,62 +238,6 @@ static int detect_ejectable_slots(acpi_handle *bridge_handle)
227} 238}
228 239
229 240
230/* decode ACPI _CRS data and convert into our internal resource list
231 * TBD: _TRA, etc.
232 */
233static acpi_status
234decode_acpi_resource(struct acpi_resource *resource, void *context)
235{
236 struct acpiphp_bridge *bridge = (struct acpiphp_bridge *) context;
237 struct acpi_resource_address64 address;
238 struct pci_resource *res;
239
240 if (resource->id != ACPI_RSTYPE_ADDRESS16 &&
241 resource->id != ACPI_RSTYPE_ADDRESS32 &&
242 resource->id != ACPI_RSTYPE_ADDRESS64)
243 return AE_OK;
244
245 acpi_resource_to_address64(resource, &address);
246
247 if (address.producer_consumer == ACPI_PRODUCER && address.address_length > 0) {
248 dbg("resource type: %d: 0x%llx - 0x%llx\n", address.resource_type,
249 (unsigned long long)address.min_address_range,
250 (unsigned long long)address.max_address_range);
251 res = acpiphp_make_resource(address.min_address_range,
252 address.address_length);
253 if (!res) {
254 err("out of memory\n");
255 return AE_OK;
256 }
257
258 switch (address.resource_type) {
259 case ACPI_MEMORY_RANGE:
260 if (address.attribute.memory.cache_attribute == ACPI_PREFETCHABLE_MEMORY) {
261 res->next = bridge->p_mem_head;
262 bridge->p_mem_head = res;
263 } else {
264 res->next = bridge->mem_head;
265 bridge->mem_head = res;
266 }
267 break;
268 case ACPI_IO_RANGE:
269 res->next = bridge->io_head;
270 bridge->io_head = res;
271 break;
272 case ACPI_BUS_NUMBER_RANGE:
273 res->next = bridge->bus_head;
274 bridge->bus_head = res;
275 break;
276 default:
277 /* invalid type */
278 kfree(res);
279 break;
280 }
281 }
282
283 return AE_OK;
284}
285
286/* decode ACPI 2.0 _HPP hot plug parameters */ 241/* decode ACPI 2.0 _HPP hot plug parameters */
287static void decode_hpp(struct acpiphp_bridge *bridge) 242static void decode_hpp(struct acpiphp_bridge *bridge)
288{ 243{
@@ -346,34 +301,29 @@ static void init_bridge_misc(struct acpiphp_bridge *bridge)
346 /* decode ACPI 2.0 _HPP (hot plug parameters) */ 301 /* decode ACPI 2.0 _HPP (hot plug parameters) */
347 decode_hpp(bridge); 302 decode_hpp(bridge);
348 303
349 /* subtract all resources already allocated */
350 acpiphp_detect_pci_resource(bridge);
351
352 /* register all slot objects under this bridge */ 304 /* register all slot objects under this bridge */
353 status = acpi_walk_namespace(ACPI_TYPE_DEVICE, bridge->handle, (u32)1, 305 status = acpi_walk_namespace(ACPI_TYPE_DEVICE, bridge->handle, (u32)1,
354 register_slot, bridge, NULL); 306 register_slot, bridge, NULL);
355 307
356 /* install notify handler */ 308 /* install notify handler */
357 status = acpi_install_notify_handler(bridge->handle, 309 if (bridge->type != BRIDGE_TYPE_HOST) {
310 status = acpi_install_notify_handler(bridge->handle,
358 ACPI_SYSTEM_NOTIFY, 311 ACPI_SYSTEM_NOTIFY,
359 handle_hotplug_event_bridge, 312 handle_hotplug_event_bridge,
360 bridge); 313 bridge);
361 314
362 if (ACPI_FAILURE(status)) { 315 if (ACPI_FAILURE(status)) {
363 err("failed to register interrupt notify handler\n"); 316 err("failed to register interrupt notify handler\n");
317 }
364 } 318 }
365 319
366 list_add(&bridge->list, &bridge_list); 320 list_add(&bridge->list, &bridge_list);
367
368 dbg("Bridge resource:\n");
369 acpiphp_dump_resource(bridge);
370} 321}
371 322
372 323
373/* allocate and initialize host bridge data structure */ 324/* allocate and initialize host bridge data structure */
374static void add_host_bridge(acpi_handle *handle, int seg, int bus) 325static void add_host_bridge(acpi_handle *handle, struct pci_bus *pci_bus)
375{ 326{
376 acpi_status status;
377 struct acpiphp_bridge *bridge; 327 struct acpiphp_bridge *bridge;
378 328
379 bridge = kmalloc(sizeof(struct acpiphp_bridge), GFP_KERNEL); 329 bridge = kmalloc(sizeof(struct acpiphp_bridge), GFP_KERNEL);
@@ -384,52 +334,19 @@ static void add_host_bridge(acpi_handle *handle, int seg, int bus)
384 334
385 bridge->type = BRIDGE_TYPE_HOST; 335 bridge->type = BRIDGE_TYPE_HOST;
386 bridge->handle = handle; 336 bridge->handle = handle;
387 bridge->seg = seg;
388 bridge->bus = bus;
389 337
390 bridge->pci_bus = pci_find_bus(seg, bus); 338 bridge->pci_bus = pci_bus;
391 339
392 spin_lock_init(&bridge->res_lock); 340 spin_lock_init(&bridge->res_lock);
393 341
394 /* to be overridden when we decode _CRS */
395 bridge->sub = bridge->bus;
396
397 /* decode resources */
398
399 status = acpi_walk_resources(handle, METHOD_NAME__CRS,
400 decode_acpi_resource, bridge);
401
402 if (ACPI_FAILURE(status)) {
403 err("failed to decode bridge resources\n");
404 kfree(bridge);
405 return;
406 }
407
408 acpiphp_resource_sort_and_combine(&bridge->io_head);
409 acpiphp_resource_sort_and_combine(&bridge->mem_head);
410 acpiphp_resource_sort_and_combine(&bridge->p_mem_head);
411 acpiphp_resource_sort_and_combine(&bridge->bus_head);
412
413 dbg("ACPI _CRS resource:\n");
414 acpiphp_dump_resource(bridge);
415
416 if (bridge->bus_head) {
417 bridge->bus = bridge->bus_head->base;
418 bridge->sub = bridge->bus_head->base + bridge->bus_head->length - 1;
419 }
420
421 init_bridge_misc(bridge); 342 init_bridge_misc(bridge);
422} 343}
423 344
424 345
425/* allocate and initialize PCI-to-PCI bridge data structure */ 346/* allocate and initialize PCI-to-PCI bridge data structure */
426static void add_p2p_bridge(acpi_handle *handle, int seg, int bus, int dev, int fn) 347static void add_p2p_bridge(acpi_handle *handle, struct pci_dev *pci_dev)
427{ 348{
428 struct acpiphp_bridge *bridge; 349 struct acpiphp_bridge *bridge;
429 u8 tmp8;
430 u16 tmp16;
431 u64 base64, limit64;
432 u32 base, limit, base32u, limit32u;
433 350
434 bridge = kmalloc(sizeof(struct acpiphp_bridge), GFP_KERNEL); 351 bridge = kmalloc(sizeof(struct acpiphp_bridge), GFP_KERNEL);
435 if (bridge == NULL) { 352 if (bridge == NULL) {
@@ -441,133 +358,22 @@ static void add_p2p_bridge(acpi_handle *handle, int seg, int bus, int dev, int f
441 358
442 bridge->type = BRIDGE_TYPE_P2P; 359 bridge->type = BRIDGE_TYPE_P2P;
443 bridge->handle = handle; 360 bridge->handle = handle;
444 bridge->seg = seg;
445
446 bridge->pci_dev = pci_find_slot(bus, PCI_DEVFN(dev, fn));
447 if (!bridge->pci_dev) {
448 err("Can't get pci_dev\n");
449 kfree(bridge);
450 return;
451 }
452 361
453 bridge->pci_bus = bridge->pci_dev->subordinate; 362 bridge->pci_dev = pci_dev_get(pci_dev);
363 bridge->pci_bus = pci_dev->subordinate;
454 if (!bridge->pci_bus) { 364 if (!bridge->pci_bus) {
455 err("This is not a PCI-to-PCI bridge!\n"); 365 err("This is not a PCI-to-PCI bridge!\n");
456 kfree(bridge); 366 goto err;
457 return;
458 } 367 }
459 368
460 spin_lock_init(&bridge->res_lock); 369 spin_lock_init(&bridge->res_lock);
461 370
462 bridge->bus = bridge->pci_bus->number;
463 bridge->sub = bridge->pci_bus->subordinate;
464
465 /*
466 * decode resources under this P2P bridge
467 */
468
469 /* I/O resources */
470 pci_read_config_byte(bridge->pci_dev, PCI_IO_BASE, &tmp8);
471 base = tmp8;
472 pci_read_config_byte(bridge->pci_dev, PCI_IO_LIMIT, &tmp8);
473 limit = tmp8;
474
475 switch (base & PCI_IO_RANGE_TYPE_MASK) {
476 case PCI_IO_RANGE_TYPE_16:
477 base = (base << 8) & 0xf000;
478 limit = ((limit << 8) & 0xf000) + 0xfff;
479 bridge->io_head = acpiphp_make_resource((u64)base, limit - base + 1);
480 if (!bridge->io_head) {
481 err("out of memory\n");
482 kfree(bridge);
483 return;
484 }
485 dbg("16bit I/O range: %04x-%04x\n",
486 (u32)bridge->io_head->base,
487 (u32)(bridge->io_head->base + bridge->io_head->length - 1));
488 break;
489 case PCI_IO_RANGE_TYPE_32:
490 pci_read_config_word(bridge->pci_dev, PCI_IO_BASE_UPPER16, &tmp16);
491 base = ((u32)tmp16 << 16) | ((base << 8) & 0xf000);
492 pci_read_config_word(bridge->pci_dev, PCI_IO_LIMIT_UPPER16, &tmp16);
493 limit = (((u32)tmp16 << 16) | ((limit << 8) & 0xf000)) + 0xfff;
494 bridge->io_head = acpiphp_make_resource((u64)base, limit - base + 1);
495 if (!bridge->io_head) {
496 err("out of memory\n");
497 kfree(bridge);
498 return;
499 }
500 dbg("32bit I/O range: %08x-%08x\n",
501 (u32)bridge->io_head->base,
502 (u32)(bridge->io_head->base + bridge->io_head->length - 1));
503 break;
504 case 0x0f:
505 dbg("I/O space unsupported\n");
506 break;
507 default:
508 warn("Unknown I/O range type\n");
509 }
510
511 /* Memory resources (mandatory for P2P bridge) */
512 pci_read_config_word(bridge->pci_dev, PCI_MEMORY_BASE, &tmp16);
513 base = (tmp16 & 0xfff0) << 16;
514 pci_read_config_word(bridge->pci_dev, PCI_MEMORY_LIMIT, &tmp16);
515 limit = ((tmp16 & 0xfff0) << 16) | 0xfffff;
516 bridge->mem_head = acpiphp_make_resource((u64)base, limit - base + 1);
517 if (!bridge->mem_head) {
518 err("out of memory\n");
519 kfree(bridge);
520 return;
521 }
522 dbg("32bit Memory range: %08x-%08x\n",
523 (u32)bridge->mem_head->base,
524 (u32)(bridge->mem_head->base + bridge->mem_head->length-1));
525
526 /* Prefetchable Memory resources (optional) */
527 pci_read_config_word(bridge->pci_dev, PCI_PREF_MEMORY_BASE, &tmp16);
528 base = tmp16;
529 pci_read_config_word(bridge->pci_dev, PCI_PREF_MEMORY_LIMIT, &tmp16);
530 limit = tmp16;
531
532 switch (base & PCI_MEMORY_RANGE_TYPE_MASK) {
533 case PCI_PREF_RANGE_TYPE_32:
534 base = (base & 0xfff0) << 16;
535 limit = ((limit & 0xfff0) << 16) | 0xfffff;
536 bridge->p_mem_head = acpiphp_make_resource((u64)base, limit - base + 1);
537 if (!bridge->p_mem_head) {
538 err("out of memory\n");
539 kfree(bridge);
540 return;
541 }
542 dbg("32bit Prefetchable memory range: %08x-%08x\n",
543 (u32)bridge->p_mem_head->base,
544 (u32)(bridge->p_mem_head->base + bridge->p_mem_head->length - 1));
545 break;
546 case PCI_PREF_RANGE_TYPE_64:
547 pci_read_config_dword(bridge->pci_dev, PCI_PREF_BASE_UPPER32, &base32u);
548 pci_read_config_dword(bridge->pci_dev, PCI_PREF_LIMIT_UPPER32, &limit32u);
549 base64 = ((u64)base32u << 32) | ((base & 0xfff0) << 16);
550 limit64 = (((u64)limit32u << 32) | ((limit & 0xfff0) << 16)) + 0xfffff;
551
552 bridge->p_mem_head = acpiphp_make_resource(base64, limit64 - base64 + 1);
553 if (!bridge->p_mem_head) {
554 err("out of memory\n");
555 kfree(bridge);
556 return;
557 }
558 dbg("64bit Prefetchable memory range: %08x%08x-%08x%08x\n",
559 (u32)(bridge->p_mem_head->base >> 32),
560 (u32)(bridge->p_mem_head->base & 0xffffffff),
561 (u32)((bridge->p_mem_head->base + bridge->p_mem_head->length - 1) >> 32),
562 (u32)((bridge->p_mem_head->base + bridge->p_mem_head->length - 1) & 0xffffffff));
563 break;
564 case 0x0f:
565 break;
566 default:
567 warn("Unknown prefetchale memory type\n");
568 }
569
570 init_bridge_misc(bridge); 371 init_bridge_misc(bridge);
372 return;
373 err:
374 pci_dev_put(pci_dev);
375 kfree(bridge);
376 return;
571} 377}
572 378
573 379
@@ -577,14 +383,10 @@ find_p2p_bridge(acpi_handle handle, u32 lvl, void *context, void **rv)
577{ 383{
578 acpi_status status; 384 acpi_status status;
579 acpi_handle dummy_handle; 385 acpi_handle dummy_handle;
580 unsigned long *segbus = context;
581 unsigned long tmp; 386 unsigned long tmp;
582 int seg, bus, device, function; 387 int device, function;
583 struct pci_dev *dev; 388 struct pci_dev *dev;
584 389 struct pci_bus *pci_bus = context;
585 /* get PCI address */
586 seg = (*segbus >> 8) & 0xff;
587 bus = *segbus & 0xff;
588 390
589 status = acpi_get_handle(handle, "_ADR", &dummy_handle); 391 status = acpi_get_handle(handle, "_ADR", &dummy_handle);
590 if (ACPI_FAILURE(status)) 392 if (ACPI_FAILURE(status))
@@ -599,20 +401,19 @@ find_p2p_bridge(acpi_handle handle, u32 lvl, void *context, void **rv)
599 device = (tmp >> 16) & 0xffff; 401 device = (tmp >> 16) & 0xffff;
600 function = tmp & 0xffff; 402 function = tmp & 0xffff;
601 403
602 dev = pci_find_slot(bus, PCI_DEVFN(device, function)); 404 dev = pci_get_slot(pci_bus, PCI_DEVFN(device, function));
603 405
604 if (!dev) 406 if (!dev || !dev->subordinate)
605 return AE_OK; 407 goto out;
606
607 if (!dev->subordinate)
608 return AE_OK;
609 408
610 /* check if this bridge has ejectable slots */ 409 /* check if this bridge has ejectable slots */
611 if (detect_ejectable_slots(handle) > 0) { 410 if (detect_ejectable_slots(handle) > 0) {
612 dbg("found PCI-to-PCI bridge at PCI %s\n", pci_name(dev)); 411 dbg("found PCI-to-PCI bridge at PCI %s\n", pci_name(dev));
613 add_p2p_bridge(handle, seg, bus, device, function); 412 add_p2p_bridge(handle, dev);
614 } 413 }
615 414
415 out:
416 pci_dev_put(dev);
616 return AE_OK; 417 return AE_OK;
617} 418}
618 419
@@ -624,6 +425,7 @@ static int add_bridge(acpi_handle handle)
624 unsigned long tmp; 425 unsigned long tmp;
625 int seg, bus; 426 int seg, bus;
626 acpi_handle dummy_handle; 427 acpi_handle dummy_handle;
428 struct pci_bus *pci_bus;
627 429
628 /* if the bridge doesn't have _STA, we assume it is always there */ 430 /* if the bridge doesn't have _STA, we assume it is always there */
629 status = acpi_get_handle(handle, "_STA", &dummy_handle); 431 status = acpi_get_handle(handle, "_STA", &dummy_handle);
@@ -653,18 +455,22 @@ static int add_bridge(acpi_handle handle)
653 bus = 0; 455 bus = 0;
654 } 456 }
655 457
458 pci_bus = pci_find_bus(seg, bus);
459 if (!pci_bus) {
460 err("Can't find bus %04x:%02x\n", seg, bus);
461 return 0;
462 }
463
656 /* check if this bridge has ejectable slots */ 464 /* check if this bridge has ejectable slots */
657 if (detect_ejectable_slots(handle) > 0) { 465 if (detect_ejectable_slots(handle) > 0) {
658 dbg("found PCI host-bus bridge with hot-pluggable slots\n"); 466 dbg("found PCI host-bus bridge with hot-pluggable slots\n");
659 add_host_bridge(handle, seg, bus); 467 add_host_bridge(handle, pci_bus);
660 return 0; 468 return 0;
661 } 469 }
662 470
663 tmp = seg << 8 | bus;
664
665 /* search P2P bridges under this host bridge */ 471 /* search P2P bridges under this host bridge */
666 status = acpi_walk_namespace(ACPI_TYPE_DEVICE, handle, (u32)1, 472 status = acpi_walk_namespace(ACPI_TYPE_DEVICE, handle, (u32)1,
667 find_p2p_bridge, &tmp, NULL); 473 find_p2p_bridge, pci_bus, NULL);
668 474
669 if (ACPI_FAILURE(status)) 475 if (ACPI_FAILURE(status))
670 warn("find_p2p_bridge faied (error code = 0x%x)\n",status); 476 warn("find_p2p_bridge faied (error code = 0x%x)\n",status);
@@ -672,12 +478,205 @@ static int add_bridge(acpi_handle handle)
672 return 0; 478 return 0;
673} 479}
674 480
481static struct acpiphp_bridge *acpiphp_handle_to_bridge(acpi_handle handle)
482{
483 struct list_head *head;
484 list_for_each(head, &bridge_list) {
485 struct acpiphp_bridge *bridge = list_entry(head,
486 struct acpiphp_bridge, list);
487 if (bridge->handle == handle)
488 return bridge;
489 }
490
491 return NULL;
492}
493
494static void cleanup_bridge(struct acpiphp_bridge *bridge)
495{
496 struct list_head *list, *tmp;
497 struct acpiphp_slot *slot;
498 acpi_status status;
499 acpi_handle handle = bridge->handle;
500
501 status = acpi_remove_notify_handler(handle, ACPI_SYSTEM_NOTIFY,
502 handle_hotplug_event_bridge);
503 if (ACPI_FAILURE(status))
504 err("failed to remove notify handler\n");
505
506 slot = bridge->slots;
507 while (slot) {
508 struct acpiphp_slot *next = slot->next;
509 list_for_each_safe (list, tmp, &slot->funcs) {
510 struct acpiphp_func *func;
511 func = list_entry(list, struct acpiphp_func, sibling);
512 status = acpi_remove_notify_handler(func->handle,
513 ACPI_SYSTEM_NOTIFY,
514 handle_hotplug_event_func);
515 if (ACPI_FAILURE(status))
516 err("failed to remove notify handler\n");
517 pci_dev_put(func->pci_dev);
518 list_del(list);
519 kfree(func);
520 }
521 kfree(slot);
522 slot = next;
523 }
524
525 pci_dev_put(bridge->pci_dev);
526 list_del(&bridge->list);
527 kfree(bridge);
528}
529
530static acpi_status
531cleanup_p2p_bridge(acpi_handle handle, u32 lvl, void *context, void **rv)
532{
533 struct acpiphp_bridge *bridge;
534
535 if (!(bridge = acpiphp_handle_to_bridge(handle)))
536 return AE_OK;
537 cleanup_bridge(bridge);
538 return AE_OK;
539}
675 540
676static void remove_bridge(acpi_handle handle) 541static void remove_bridge(acpi_handle handle)
677{ 542{
678 /* No-op for now .. */ 543 struct acpiphp_bridge *bridge;
544
545 bridge = acpiphp_handle_to_bridge(handle);
546 if (bridge) {
547 cleanup_bridge(bridge);
548 } else {
549 /* clean-up p2p bridges under this host bridge */
550 acpi_walk_namespace(ACPI_TYPE_DEVICE, handle,
551 (u32)1, cleanup_p2p_bridge, NULL, NULL);
552 }
553}
554
555static struct pci_dev * get_apic_pci_info(acpi_handle handle)
556{
557 struct acpi_pci_id id;
558 struct pci_bus *bus;
559 struct pci_dev *dev;
560
561 if (ACPI_FAILURE(acpi_get_pci_id(handle, &id)))
562 return NULL;
563
564 bus = pci_find_bus(id.segment, id.bus);
565 if (!bus)
566 return NULL;
567
568 dev = pci_get_slot(bus, PCI_DEVFN(id.device, id.function));
569 if (!dev)
570 return NULL;
571
572 if ((dev->class != PCI_CLASS_SYSTEM_PIC_IOAPIC) &&
573 (dev->class != PCI_CLASS_SYSTEM_PIC_IOXAPIC))
574 {
575 pci_dev_put(dev);
576 return NULL;
577 }
578
579 return dev;
580}
581
582static int get_gsi_base(acpi_handle handle, u32 *gsi_base)
583{
584 acpi_status status;
585 int result = -1;
586 unsigned long gsb;
587 struct acpi_buffer buffer = {ACPI_ALLOCATE_BUFFER, NULL};
588 union acpi_object *obj;
589 void *table;
590
591 status = acpi_evaluate_integer(handle, "_GSB", NULL, &gsb);
592 if (ACPI_SUCCESS(status)) {
593 *gsi_base = (u32)gsb;
594 return 0;
595 }
596
597 status = acpi_evaluate_object(handle, "_MAT", NULL, &buffer);
598 if (ACPI_FAILURE(status) || !buffer.length || !buffer.pointer)
599 return -1;
600
601 obj = buffer.pointer;
602 if (obj->type != ACPI_TYPE_BUFFER)
603 goto out;
604
605 table = obj->buffer.pointer;
606 switch (((acpi_table_entry_header *)table)->type) {
607 case ACPI_MADT_IOSAPIC:
608 *gsi_base = ((struct acpi_table_iosapic *)table)->global_irq_base;
609 result = 0;
610 break;
611 case ACPI_MADT_IOAPIC:
612 *gsi_base = ((struct acpi_table_ioapic *)table)->global_irq_base;
613 result = 0;
614 break;
615 default:
616 break;
617 }
618 out:
619 acpi_os_free(buffer.pointer);
620 return result;
621}
622
623static acpi_status
624ioapic_add(acpi_handle handle, u32 lvl, void *context, void **rv)
625{
626 acpi_status status;
627 unsigned long sta;
628 acpi_handle tmp;
629 struct pci_dev *pdev;
630 u32 gsi_base;
631 u64 phys_addr;
632
633 /* Evaluate _STA if present */
634 status = acpi_evaluate_integer(handle, "_STA", NULL, &sta);
635 if (ACPI_SUCCESS(status) && sta != ACPI_STA_ALL)
636 return AE_CTRL_DEPTH;
637
638 /* Scan only PCI bus scope */
639 status = acpi_get_handle(handle, "_HID", &tmp);
640 if (ACPI_SUCCESS(status))
641 return AE_CTRL_DEPTH;
642
643 if (get_gsi_base(handle, &gsi_base))
644 return AE_OK;
645
646 pdev = get_apic_pci_info(handle);
647 if (!pdev)
648 return AE_OK;
649
650 if (pci_enable_device(pdev)) {
651 pci_dev_put(pdev);
652 return AE_OK;
653 }
654
655 pci_set_master(pdev);
656
657 if (pci_request_region(pdev, 0, "I/O APIC(acpiphp)")) {
658 pci_disable_device(pdev);
659 pci_dev_put(pdev);
660 return AE_OK;
661 }
662
663 phys_addr = pci_resource_start(pdev, 0);
664 if (acpi_register_ioapic(handle, phys_addr, gsi_base)) {
665 pci_release_region(pdev, 0);
666 pci_disable_device(pdev);
667 pci_dev_put(pdev);
668 return AE_OK;
669 }
670
671 return AE_OK;
679} 672}
680 673
674static int acpiphp_configure_ioapics(acpi_handle handle)
675{
676 acpi_walk_namespace(ACPI_TYPE_DEVICE, handle,
677 ACPI_UINT32_MAX, ioapic_add, NULL, NULL);
678 return 0;
679}
681 680
682static int power_on_slot(struct acpiphp_slot *slot) 681static int power_on_slot(struct acpiphp_slot *slot)
683{ 682{
@@ -719,8 +718,6 @@ static int power_off_slot(struct acpiphp_slot *slot)
719 acpi_status status; 718 acpi_status status;
720 struct acpiphp_func *func; 719 struct acpiphp_func *func;
721 struct list_head *l; 720 struct list_head *l;
722 struct acpi_object_list arg_list;
723 union acpi_object arg;
724 721
725 int retval = 0; 722 int retval = 0;
726 723
@@ -731,7 +728,7 @@ static int power_off_slot(struct acpiphp_slot *slot)
731 list_for_each (l, &slot->funcs) { 728 list_for_each (l, &slot->funcs) {
732 func = list_entry(l, struct acpiphp_func, sibling); 729 func = list_entry(l, struct acpiphp_func, sibling);
733 730
734 if (func->pci_dev && (func->flags & FUNC_HAS_PS3)) { 731 if (func->flags & FUNC_HAS_PS3) {
735 status = acpi_evaluate_object(func->handle, "_PS3", NULL, NULL); 732 status = acpi_evaluate_object(func->handle, "_PS3", NULL, NULL);
736 if (ACPI_FAILURE(status)) { 733 if (ACPI_FAILURE(status)) {
737 warn("%s: _PS3 failed\n", __FUNCTION__); 734 warn("%s: _PS3 failed\n", __FUNCTION__);
@@ -742,27 +739,6 @@ static int power_off_slot(struct acpiphp_slot *slot)
742 } 739 }
743 } 740 }
744 741
745 list_for_each (l, &slot->funcs) {
746 func = list_entry(l, struct acpiphp_func, sibling);
747
748 /* We don't want to call _EJ0 on non-existing functions. */
749 if (func->pci_dev && (func->flags & FUNC_HAS_EJ0)) {
750 /* _EJ0 method take one argument */
751 arg_list.count = 1;
752 arg_list.pointer = &arg;
753 arg.type = ACPI_TYPE_INTEGER;
754 arg.integer.value = 1;
755
756 status = acpi_evaluate_object(func->handle, "_EJ0", &arg_list, NULL);
757 if (ACPI_FAILURE(status)) {
758 warn("%s: _EJ0 failed\n", __FUNCTION__);
759 retval = -1;
760 goto err_exit;
761 } else
762 break;
763 }
764 }
765
766 /* TBD: evaluate _STA to check if the slot is disabled */ 742 /* TBD: evaluate _STA to check if the slot is disabled */
767 743
768 slot->flags &= (~SLOT_POWEREDON); 744 slot->flags &= (~SLOT_POWEREDON);
@@ -782,70 +758,56 @@ static int power_off_slot(struct acpiphp_slot *slot)
782 */ 758 */
783static int enable_device(struct acpiphp_slot *slot) 759static int enable_device(struct acpiphp_slot *slot)
784{ 760{
785 u8 bus;
786 struct pci_dev *dev; 761 struct pci_dev *dev;
787 struct pci_bus *child; 762 struct pci_bus *bus = slot->bridge->pci_bus;
788 struct list_head *l; 763 struct list_head *l;
789 struct acpiphp_func *func; 764 struct acpiphp_func *func;
790 int retval = 0; 765 int retval = 0;
791 int num; 766 int num, max, pass;
792 767
793 if (slot->flags & SLOT_ENABLED) 768 if (slot->flags & SLOT_ENABLED)
794 goto err_exit; 769 goto err_exit;
795 770
796 /* sanity check: dev should be NULL when hot-plugged in */ 771 /* sanity check: dev should be NULL when hot-plugged in */
797 dev = pci_find_slot(slot->bridge->bus, PCI_DEVFN(slot->device, 0)); 772 dev = pci_get_slot(bus, PCI_DEVFN(slot->device, 0));
798 if (dev) { 773 if (dev) {
799 /* This case shouldn't happen */ 774 /* This case shouldn't happen */
800 err("pci_dev structure already exists.\n"); 775 err("pci_dev structure already exists.\n");
776 pci_dev_put(dev);
801 retval = -1; 777 retval = -1;
802 goto err_exit; 778 goto err_exit;
803 } 779 }
804 780
805 /* allocate resources to device */ 781 num = pci_scan_slot(bus, PCI_DEVFN(slot->device, 0));
806 retval = acpiphp_configure_slot(slot); 782 if (num == 0) {
807 if (retval)
808 goto err_exit;
809
810 /* returned `dev' is the *first function* only! */
811 num = pci_scan_slot(slot->bridge->pci_bus, PCI_DEVFN(slot->device, 0));
812 if (num)
813 pci_bus_add_devices(slot->bridge->pci_bus);
814 dev = pci_find_slot(slot->bridge->bus, PCI_DEVFN(slot->device, 0));
815
816 if (!dev) {
817 err("No new device found\n"); 783 err("No new device found\n");
818 retval = -1; 784 retval = -1;
819 goto err_exit; 785 goto err_exit;
820 } 786 }
821 787
822 if (dev->hdr_type == PCI_HEADER_TYPE_BRIDGE) { 788 max = bus->secondary;
823 pci_read_config_byte(dev, PCI_SECONDARY_BUS, &bus); 789 for (pass = 0; pass < 2; pass++) {
824 child = (struct pci_bus*) pci_add_new_bus(dev->bus, dev, bus); 790 list_for_each_entry(dev, &bus->devices, bus_list) {
825 pci_do_scan_bus(child); 791 if (PCI_SLOT(dev->devfn) != slot->device)
792 continue;
793 if (dev->hdr_type == PCI_HEADER_TYPE_BRIDGE ||
794 dev->hdr_type == PCI_HEADER_TYPE_CARDBUS)
795 max = pci_scan_bridge(bus, dev, max, pass);
796 }
826 } 797 }
827 798
799 pci_bus_assign_resources(bus);
800 pci_bus_add_devices(bus);
801
828 /* associate pci_dev to our representation */ 802 /* associate pci_dev to our representation */
829 list_for_each (l, &slot->funcs) { 803 list_for_each (l, &slot->funcs) {
830 func = list_entry(l, struct acpiphp_func, sibling); 804 func = list_entry(l, struct acpiphp_func, sibling);
831 805 func->pci_dev = pci_get_slot(bus, PCI_DEVFN(slot->device,
832 func->pci_dev = pci_find_slot(slot->bridge->bus,
833 PCI_DEVFN(slot->device,
834 func->function)); 806 func->function));
835 if (!func->pci_dev)
836 continue;
837
838 /* configure device */
839 retval = acpiphp_configure_function(func);
840 if (retval)
841 goto err_exit;
842 } 807 }
843 808
844 slot->flags |= SLOT_ENABLED; 809 slot->flags |= SLOT_ENABLED;
845 810
846 dbg("Available resources:\n");
847 acpiphp_dump_resource(slot->bridge);
848
849 err_exit: 811 err_exit:
850 return retval; 812 return retval;
851} 813}
@@ -866,9 +828,12 @@ static int disable_device(struct acpiphp_slot *slot)
866 828
867 list_for_each (l, &slot->funcs) { 829 list_for_each (l, &slot->funcs) {
868 func = list_entry(l, struct acpiphp_func, sibling); 830 func = list_entry(l, struct acpiphp_func, sibling);
831 if (!func->pci_dev)
832 continue;
869 833
870 if (func->pci_dev) 834 pci_remove_bus_device(func->pci_dev);
871 acpiphp_unconfigure_function(func); 835 pci_dev_put(func->pci_dev);
836 func->pci_dev = NULL;
872 } 837 }
873 838
874 slot->flags &= (~SLOT_ENABLED); 839 slot->flags &= (~SLOT_ENABLED);
@@ -920,6 +885,39 @@ static unsigned int get_slot_status(struct acpiphp_slot *slot)
920} 885}
921 886
922/** 887/**
888 * acpiphp_eject_slot - physically eject the slot
889 */
890static int acpiphp_eject_slot(struct acpiphp_slot *slot)
891{
892 acpi_status status;
893 struct acpiphp_func *func;
894 struct list_head *l;
895 struct acpi_object_list arg_list;
896 union acpi_object arg;
897
898 list_for_each (l, &slot->funcs) {
899 func = list_entry(l, struct acpiphp_func, sibling);
900
901 /* We don't want to call _EJ0 on non-existing functions. */
902 if ((func->flags & FUNC_HAS_EJ0)) {
903 /* _EJ0 method take one argument */
904 arg_list.count = 1;
905 arg_list.pointer = &arg;
906 arg.type = ACPI_TYPE_INTEGER;
907 arg.integer.value = 1;
908
909 status = acpi_evaluate_object(func->handle, "_EJ0", &arg_list, NULL);
910 if (ACPI_FAILURE(status)) {
911 warn("%s: _EJ0 failed\n", __FUNCTION__);
912 return -1;
913 } else
914 break;
915 }
916 }
917 return 0;
918}
919
920/**
923 * acpiphp_check_bridge - re-enumerate devices 921 * acpiphp_check_bridge - re-enumerate devices
924 * 922 *
925 * Iterate over all slots under this bridge and make sure that if a 923 * Iterate over all slots under this bridge and make sure that if a
@@ -942,6 +940,8 @@ static int acpiphp_check_bridge(struct acpiphp_bridge *bridge)
942 if (retval) { 940 if (retval) {
943 err("Error occurred in disabling\n"); 941 err("Error occurred in disabling\n");
944 goto err_exit; 942 goto err_exit;
943 } else {
944 acpiphp_eject_slot(slot);
945 } 945 }
946 disabled++; 946 disabled++;
947 } else { 947 } else {
@@ -962,6 +962,144 @@ static int acpiphp_check_bridge(struct acpiphp_bridge *bridge)
962 return retval; 962 return retval;
963} 963}
964 964
965static void program_hpp(struct pci_dev *dev, struct acpiphp_bridge *bridge)
966{
967 u16 pci_cmd, pci_bctl;
968 struct pci_dev *cdev;
969
970 /* Program hpp values for this device */
971 if (!(dev->hdr_type == PCI_HEADER_TYPE_NORMAL ||
972 (dev->hdr_type == PCI_HEADER_TYPE_BRIDGE &&
973 (dev->class >> 8) == PCI_CLASS_BRIDGE_PCI)))
974 return;
975 pci_write_config_byte(dev, PCI_CACHE_LINE_SIZE,
976 bridge->hpp.cache_line_size);
977 pci_write_config_byte(dev, PCI_LATENCY_TIMER,
978 bridge->hpp.latency_timer);
979 pci_read_config_word(dev, PCI_COMMAND, &pci_cmd);
980 if (bridge->hpp.enable_SERR)
981 pci_cmd |= PCI_COMMAND_SERR;
982 else
983 pci_cmd &= ~PCI_COMMAND_SERR;
984 if (bridge->hpp.enable_PERR)
985 pci_cmd |= PCI_COMMAND_PARITY;
986 else
987 pci_cmd &= ~PCI_COMMAND_PARITY;
988 pci_write_config_word(dev, PCI_COMMAND, pci_cmd);
989
990 /* Program bridge control value and child devices */
991 if ((dev->class >> 8) == PCI_CLASS_BRIDGE_PCI) {
992 pci_write_config_byte(dev, PCI_SEC_LATENCY_TIMER,
993 bridge->hpp.latency_timer);
994 pci_read_config_word(dev, PCI_BRIDGE_CONTROL, &pci_bctl);
995 if (bridge->hpp.enable_SERR)
996 pci_bctl |= PCI_BRIDGE_CTL_SERR;
997 else
998 pci_bctl &= ~PCI_BRIDGE_CTL_SERR;
999 if (bridge->hpp.enable_PERR)
1000 pci_bctl |= PCI_BRIDGE_CTL_PARITY;
1001 else
1002 pci_bctl &= ~PCI_BRIDGE_CTL_PARITY;
1003 pci_write_config_word(dev, PCI_BRIDGE_CONTROL, pci_bctl);
1004 if (dev->subordinate) {
1005 list_for_each_entry(cdev, &dev->subordinate->devices,
1006 bus_list)
1007 program_hpp(cdev, bridge);
1008 }
1009 }
1010}
1011
1012static void acpiphp_set_hpp_values(acpi_handle handle, struct pci_bus *bus)
1013{
1014 struct acpiphp_bridge bridge;
1015 struct pci_dev *dev;
1016
1017 memset(&bridge, 0, sizeof(bridge));
1018 bridge.handle = handle;
1019 decode_hpp(&bridge);
1020 list_for_each_entry(dev, &bus->devices, bus_list)
1021 program_hpp(dev, &bridge);
1022
1023}
1024
1025/*
1026 * Remove devices for which we could not assign resources, call
1027 * arch specific code to fix-up the bus
1028 */
1029static void acpiphp_sanitize_bus(struct pci_bus *bus)
1030{
1031 struct pci_dev *dev;
1032 int i;
1033 unsigned long type_mask = IORESOURCE_IO | IORESOURCE_MEM;
1034
1035 list_for_each_entry(dev, &bus->devices, bus_list) {
1036 for (i=0; i<PCI_BRIDGE_RESOURCES; i++) {
1037 struct resource *res = &dev->resource[i];
1038 if ((res->flags & type_mask) && !res->start &&
1039 res->end) {
1040 /* Could not assign a required resources
1041 * for this device, remove it */
1042 pci_remove_bus_device(dev);
1043 break;
1044 }
1045 }
1046 }
1047}
1048
1049/* Program resources in newly inserted bridge */
1050static int acpiphp_configure_bridge (acpi_handle handle)
1051{
1052 struct acpi_pci_id pci_id;
1053 struct pci_bus *bus;
1054
1055 if (ACPI_FAILURE(acpi_get_pci_id(handle, &pci_id))) {
1056 err("cannot get PCI domain and bus number for bridge\n");
1057 return -EINVAL;
1058 }
1059 bus = pci_find_bus(pci_id.segment, pci_id.bus);
1060 if (!bus) {
1061 err("cannot find bus %d:%d\n",
1062 pci_id.segment, pci_id.bus);
1063 return -EINVAL;
1064 }
1065
1066 pci_bus_size_bridges(bus);
1067 pci_bus_assign_resources(bus);
1068 acpiphp_sanitize_bus(bus);
1069 acpiphp_set_hpp_values(handle, bus);
1070 pci_enable_bridges(bus);
1071 acpiphp_configure_ioapics(handle);
1072 return 0;
1073}
1074
1075static void handle_bridge_insertion(acpi_handle handle, u32 type)
1076{
1077 struct acpi_device *device, *pdevice;
1078 acpi_handle phandle;
1079
1080 if ((type != ACPI_NOTIFY_BUS_CHECK) &&
1081 (type != ACPI_NOTIFY_DEVICE_CHECK)) {
1082 err("unexpected notification type %d\n", type);
1083 return;
1084 }
1085
1086 acpi_get_parent(handle, &phandle);
1087 if (acpi_bus_get_device(phandle, &pdevice)) {
1088 dbg("no parent device, assuming NULL\n");
1089 pdevice = NULL;
1090 }
1091 if (acpi_bus_add(&device, pdevice, handle, ACPI_BUS_TYPE_DEVICE)) {
1092 err("cannot add bridge to acpi list\n");
1093 return;
1094 }
1095 if (!acpiphp_configure_bridge(handle) &&
1096 !acpi_bus_start(device))
1097 add_bridge(handle);
1098 else
1099 err("cannot configure and start bridge\n");
1100
1101}
1102
965/* 1103/*
966 * ACPI event handlers 1104 * ACPI event handlers
967 */ 1105 */
@@ -982,8 +1120,19 @@ static void handle_hotplug_event_bridge(acpi_handle handle, u32 type, void *cont
982 char objname[64]; 1120 char objname[64];
983 struct acpi_buffer buffer = { .length = sizeof(objname), 1121 struct acpi_buffer buffer = { .length = sizeof(objname),
984 .pointer = objname }; 1122 .pointer = objname };
1123 struct acpi_device *device;
985 1124
986 bridge = (struct acpiphp_bridge *)context; 1125 if (acpi_bus_get_device(handle, &device)) {
1126 /* This bridge must have just been physically inserted */
1127 handle_bridge_insertion(handle, type);
1128 return;
1129 }
1130
1131 bridge = acpiphp_handle_to_bridge(handle);
1132 if (!bridge) {
1133 err("cannot get bridge info\n");
1134 return;
1135 }
987 1136
988 acpi_get_name(handle, ACPI_FULL_PATHNAME, &buffer); 1137 acpi_get_name(handle, ACPI_FULL_PATHNAME, &buffer);
989 1138
@@ -1031,7 +1180,6 @@ static void handle_hotplug_event_bridge(acpi_handle handle, u32 type, void *cont
1031 } 1180 }
1032} 1181}
1033 1182
1034
1035/** 1183/**
1036 * handle_hotplug_event_func - handle ACPI event on functions (i.e. slots) 1184 * handle_hotplug_event_func - handle ACPI event on functions (i.e. slots)
1037 * 1185 *
@@ -1074,7 +1222,8 @@ static void handle_hotplug_event_func(acpi_handle handle, u32 type, void *contex
1074 case ACPI_NOTIFY_EJECT_REQUEST: 1222 case ACPI_NOTIFY_EJECT_REQUEST:
1075 /* request device eject */ 1223 /* request device eject */
1076 dbg("%s: Device eject notify on %s\n", __FUNCTION__, objname); 1224 dbg("%s: Device eject notify on %s\n", __FUNCTION__, objname);
1077 acpiphp_disable_slot(func->slot); 1225 if (!(acpiphp_disable_slot(func->slot)))
1226 acpiphp_eject_slot(func->slot);
1078 break; 1227 break;
1079 1228
1080 default: 1229 default:
@@ -1083,6 +1232,47 @@ static void handle_hotplug_event_func(acpi_handle handle, u32 type, void *contex
1083 } 1232 }
1084} 1233}
1085 1234
1235static int is_root_bridge(acpi_handle handle)
1236{
1237 acpi_status status;
1238 struct acpi_device_info *info;
1239 struct acpi_buffer buffer = {ACPI_ALLOCATE_BUFFER, NULL};
1240 int i;
1241
1242 status = acpi_get_object_info(handle, &buffer);
1243 if (ACPI_SUCCESS(status)) {
1244 info = buffer.pointer;
1245 if ((info->valid & ACPI_VALID_HID) &&
1246 !strcmp(PCI_ROOT_HID_STRING,
1247 info->hardware_id.value)) {
1248 acpi_os_free(buffer.pointer);
1249 return 1;
1250 }
1251 if (info->valid & ACPI_VALID_CID) {
1252 for (i=0; i < info->compatibility_id.count; i++) {
1253 if (!strcmp(PCI_ROOT_HID_STRING,
1254 info->compatibility_id.id[i].value)) {
1255 acpi_os_free(buffer.pointer);
1256 return 1;
1257 }
1258 }
1259 }
1260 }
1261 return 0;
1262}
1263
1264static acpi_status
1265find_root_bridges(acpi_handle handle, u32 lvl, void *context, void **rv)
1266{
1267 int *count = (int *)context;
1268
1269 if (is_root_bridge(handle)) {
1270 acpi_install_notify_handler(handle, ACPI_SYSTEM_NOTIFY,
1271 handle_hotplug_event_bridge, NULL);
1272 (*count)++;
1273 }
1274 return AE_OK ;
1275}
1086 1276
1087static struct acpi_pci_driver acpi_pci_hp_driver = { 1277static struct acpi_pci_driver acpi_pci_hp_driver = {
1088 .add = add_bridge, 1278 .add = add_bridge,
@@ -1095,15 +1285,15 @@ static struct acpi_pci_driver acpi_pci_hp_driver = {
1095 */ 1285 */
1096int __init acpiphp_glue_init(void) 1286int __init acpiphp_glue_init(void)
1097{ 1287{
1098 int num; 1288 int num = 0;
1099
1100 if (list_empty(&pci_root_buses))
1101 return -1;
1102 1289
1103 num = acpi_pci_register_driver(&acpi_pci_hp_driver); 1290 acpi_walk_namespace(ACPI_TYPE_DEVICE, ACPI_ROOT_OBJECT,
1291 ACPI_UINT32_MAX, find_root_bridges, &num, NULL);
1104 1292
1105 if (num <= 0) 1293 if (num <= 0)
1106 return -1; 1294 return -1;
1295 else
1296 acpi_pci_register_driver(&acpi_pci_hp_driver);
1107 1297
1108 return 0; 1298 return 0;
1109} 1299}
@@ -1116,46 +1306,6 @@ int __init acpiphp_glue_init(void)
1116 */ 1306 */
1117void __exit acpiphp_glue_exit(void) 1307void __exit acpiphp_glue_exit(void)
1118{ 1308{
1119 struct list_head *l1, *l2, *n1, *n2;
1120 struct acpiphp_bridge *bridge;
1121 struct acpiphp_slot *slot, *next;
1122 struct acpiphp_func *func;
1123 acpi_status status;
1124
1125 list_for_each_safe (l1, n1, &bridge_list) {
1126 bridge = (struct acpiphp_bridge *)l1;
1127 slot = bridge->slots;
1128 while (slot) {
1129 next = slot->next;
1130 list_for_each_safe (l2, n2, &slot->funcs) {
1131 func = list_entry(l2, struct acpiphp_func, sibling);
1132 acpiphp_free_resource(&func->io_head);
1133 acpiphp_free_resource(&func->mem_head);
1134 acpiphp_free_resource(&func->p_mem_head);
1135 acpiphp_free_resource(&func->bus_head);
1136 status = acpi_remove_notify_handler(func->handle,
1137 ACPI_SYSTEM_NOTIFY,
1138 handle_hotplug_event_func);
1139 if (ACPI_FAILURE(status))
1140 err("failed to remove notify handler\n");
1141 kfree(func);
1142 }
1143 kfree(slot);
1144 slot = next;
1145 }
1146 status = acpi_remove_notify_handler(bridge->handle, ACPI_SYSTEM_NOTIFY,
1147 handle_hotplug_event_bridge);
1148 if (ACPI_FAILURE(status))
1149 err("failed to remove notify handler\n");
1150
1151 acpiphp_free_resource(&bridge->io_head);
1152 acpiphp_free_resource(&bridge->mem_head);
1153 acpiphp_free_resource(&bridge->p_mem_head);
1154 acpiphp_free_resource(&bridge->bus_head);
1155
1156 kfree(bridge);
1157 }
1158
1159 acpi_pci_unregister_driver(&acpi_pci_hp_driver); 1309 acpi_pci_unregister_driver(&acpi_pci_hp_driver);
1160} 1310}
1161 1311
@@ -1173,11 +1323,14 @@ int __init acpiphp_get_num_slots(void)
1173 1323
1174 list_for_each (node, &bridge_list) { 1324 list_for_each (node, &bridge_list) {
1175 bridge = (struct acpiphp_bridge *)node; 1325 bridge = (struct acpiphp_bridge *)node;
1176 dbg("Bus%d %dslot(s)\n", bridge->bus, bridge->nr_slots); 1326 dbg("Bus %04x:%02x has %d slot%s\n",
1327 pci_domain_nr(bridge->pci_bus),
1328 bridge->pci_bus->number, bridge->nr_slots,
1329 bridge->nr_slots == 1 ? "" : "s");
1177 num_slots += bridge->nr_slots; 1330 num_slots += bridge->nr_slots;
1178 } 1331 }
1179 1332
1180 dbg("Total %dslots\n", num_slots); 1333 dbg("Total %d slots\n", num_slots);
1181 return num_slots; 1334 return num_slots;
1182} 1335}
1183 1336
@@ -1254,7 +1407,6 @@ int acpiphp_enable_slot(struct acpiphp_slot *slot)
1254 return retval; 1407 return retval;
1255} 1408}
1256 1409
1257
1258/** 1410/**
1259 * acpiphp_disable_slot - power off slot 1411 * acpiphp_disable_slot - power off slot
1260 */ 1412 */
@@ -1274,13 +1426,6 @@ int acpiphp_disable_slot(struct acpiphp_slot *slot)
1274 if (retval) 1426 if (retval)
1275 goto err_exit; 1427 goto err_exit;
1276 1428
1277 acpiphp_resource_sort_and_combine(&slot->bridge->io_head);
1278 acpiphp_resource_sort_and_combine(&slot->bridge->mem_head);
1279 acpiphp_resource_sort_and_combine(&slot->bridge->p_mem_head);
1280 acpiphp_resource_sort_and_combine(&slot->bridge->bus_head);
1281 dbg("Available resources:\n");
1282 acpiphp_dump_resource(slot->bridge);
1283
1284 err_exit: 1429 err_exit:
1285 up(&slot->crit_sect); 1430 up(&slot->crit_sect);
1286 return retval; 1431 return retval;
@@ -1293,11 +1438,7 @@ int acpiphp_disable_slot(struct acpiphp_slot *slot)
1293 */ 1438 */
1294u8 acpiphp_get_power_status(struct acpiphp_slot *slot) 1439u8 acpiphp_get_power_status(struct acpiphp_slot *slot)
1295{ 1440{
1296 unsigned int sta; 1441 return (slot->flags & SLOT_POWEREDON);
1297
1298 sta = get_slot_status(slot);
1299
1300 return (sta & ACPI_STA_ENABLED) ? 1 : 0;
1301} 1442}
1302 1443
1303 1444
@@ -1335,9 +1476,10 @@ u8 acpiphp_get_adapter_status(struct acpiphp_slot *slot)
1335u32 acpiphp_get_address(struct acpiphp_slot *slot) 1476u32 acpiphp_get_address(struct acpiphp_slot *slot)
1336{ 1477{
1337 u32 address; 1478 u32 address;
1479 struct pci_bus *pci_bus = slot->bridge->pci_bus;
1338 1480
1339 address = ((slot->bridge->seg) << 16) | 1481 address = (pci_domain_nr(pci_bus) << 16) |
1340 ((slot->bridge->bus) << 8) | 1482 (pci_bus->number << 8) |
1341 slot->device; 1483 slot->device;
1342 1484
1343 return address; 1485 return address;
diff --git a/drivers/pci/hotplug/acpiphp_pci.c b/drivers/pci/hotplug/acpiphp_pci.c
deleted file mode 100644
index 54d97c9d1dff..000000000000
--- a/drivers/pci/hotplug/acpiphp_pci.c
+++ /dev/null
@@ -1,449 +0,0 @@
1/*
2 * ACPI PCI HotPlug PCI configuration space management
3 *
4 * Copyright (C) 1995,2001 Compaq Computer Corporation
5 * Copyright (C) 2001 Greg Kroah-Hartman (greg@kroah.com)
6 * Copyright (C) 2001,2002 IBM Corp.
7 * Copyright (C) 2002 Takayoshi Kochi (t-kochi@bq.jp.nec.com)
8 * Copyright (C) 2002 Hiroshi Aono (h-aono@ap.jp.nec.com)
9 * Copyright (C) 2002 NEC Corporation
10 *
11 * All rights reserved.
12 *
13 * This program is free software; you can redistribute it and/or modify
14 * it under the terms of the GNU General Public License as published by
15 * the Free Software Foundation; either version 2 of the License, or (at
16 * your option) any later version.
17 *
18 * This program is distributed in the hope that it will be useful, but
19 * WITHOUT ANY WARRANTY; without even the implied warranty of
20 * MERCHANTABILITY OR FITNESS FOR A PARTICULAR PURPOSE, GOOD TITLE or
21 * NON INFRINGEMENT. See the GNU General Public License for more
22 * details.
23 *
24 * You should have received a copy of the GNU General Public License
25 * along with this program; if not, write to the Free Software
26 * Foundation, Inc., 675 Mass Ave, Cambridge, MA 02139, USA.
27 *
28 * Send feedback to <t-kochi@bq.jp.nec.com>
29 *
30 */
31
32#include <linux/init.h>
33#include <linux/module.h>
34
35#include <linux/kernel.h>
36#include <linux/pci.h>
37#include <linux/acpi.h>
38#include "../pci.h"
39#include "pci_hotplug.h"
40#include "acpiphp.h"
41
42#define MY_NAME "acpiphp_pci"
43
44
45/* allocate mem/pmem/io resource to a new function */
46static int init_config_space (struct acpiphp_func *func)
47{
48 u32 bar, len;
49 u32 address[] = {
50 PCI_BASE_ADDRESS_0,
51 PCI_BASE_ADDRESS_1,
52 PCI_BASE_ADDRESS_2,
53 PCI_BASE_ADDRESS_3,
54 PCI_BASE_ADDRESS_4,
55 PCI_BASE_ADDRESS_5,
56 0
57 };
58 int count;
59 struct acpiphp_bridge *bridge;
60 struct pci_resource *res;
61 struct pci_bus *pbus;
62 int bus, device, function;
63 unsigned int devfn;
64 u16 tmp;
65
66 bridge = func->slot->bridge;
67 pbus = bridge->pci_bus;
68 bus = bridge->bus;
69 device = func->slot->device;
70 function = func->function;
71 devfn = PCI_DEVFN(device, function);
72
73 for (count = 0; address[count]; count++) { /* for 6 BARs */
74 pci_bus_write_config_dword(pbus, devfn,
75 address[count], 0xFFFFFFFF);
76 pci_bus_read_config_dword(pbus, devfn, address[count], &bar);
77
78 if (!bar) /* This BAR is not implemented */
79 continue;
80
81 dbg("Device %02x.%02x BAR %d wants %x\n", device, function, count, bar);
82
83 if (bar & PCI_BASE_ADDRESS_SPACE_IO) {
84 /* This is IO */
85
86 len = bar & (PCI_BASE_ADDRESS_IO_MASK & 0xFFFF);
87 len = len & ~(len - 1);
88
89 dbg("len in IO %x, BAR %d\n", len, count);
90
91 spin_lock(&bridge->res_lock);
92 res = acpiphp_get_io_resource(&bridge->io_head, len);
93 spin_unlock(&bridge->res_lock);
94
95 if (!res) {
96 err("cannot allocate requested io for %02x:%02x.%d len %x\n",
97 bus, device, function, len);
98 return -1;
99 }
100 pci_bus_write_config_dword(pbus, devfn,
101 address[count],
102 (u32)res->base);
103 res->next = func->io_head;
104 func->io_head = res;
105
106 } else {
107 /* This is Memory */
108 if (bar & PCI_BASE_ADDRESS_MEM_PREFETCH) {
109 /* pfmem */
110
111 len = bar & 0xFFFFFFF0;
112 len = ~len + 1;
113
114 dbg("len in PFMEM %x, BAR %d\n", len, count);
115
116 spin_lock(&bridge->res_lock);
117 res = acpiphp_get_resource(&bridge->p_mem_head, len);
118 spin_unlock(&bridge->res_lock);
119
120 if (!res) {
121 err("cannot allocate requested pfmem for %02x:%02x.%d len %x\n",
122 bus, device, function, len);
123 return -1;
124 }
125
126 pci_bus_write_config_dword(pbus, devfn,
127 address[count],
128 (u32)res->base);
129
130 if (bar & PCI_BASE_ADDRESS_MEM_TYPE_64) { /* takes up another dword */
131 dbg("inside the pfmem 64 case, count %d\n", count);
132 count += 1;
133 pci_bus_write_config_dword(pbus, devfn,
134 address[count],
135 (u32)(res->base >> 32));
136 }
137
138 res->next = func->p_mem_head;
139 func->p_mem_head = res;
140
141 } else {
142 /* regular memory */
143
144 len = bar & 0xFFFFFFF0;
145 len = ~len + 1;
146
147 dbg("len in MEM %x, BAR %d\n", len, count);
148
149 spin_lock(&bridge->res_lock);
150 res = acpiphp_get_resource(&bridge->mem_head, len);
151 spin_unlock(&bridge->res_lock);
152
153 if (!res) {
154 err("cannot allocate requested pfmem for %02x:%02x.%d len %x\n",
155 bus, device, function, len);
156 return -1;
157 }
158
159 pci_bus_write_config_dword(pbus, devfn,
160 address[count],
161 (u32)res->base);
162
163 if (bar & PCI_BASE_ADDRESS_MEM_TYPE_64) {
164 /* takes up another dword */
165 dbg("inside mem 64 case, reg. mem, count %d\n", count);
166 count += 1;
167 pci_bus_write_config_dword(pbus, devfn,
168 address[count],
169 (u32)(res->base >> 32));
170 }
171
172 res->next = func->mem_head;
173 func->mem_head = res;
174
175 }
176 }
177 }
178
179 /* disable expansion rom */
180 pci_bus_write_config_dword(pbus, devfn, PCI_ROM_ADDRESS, 0x00000000);
181
182 /* set PCI parameters from _HPP */
183 pci_bus_write_config_byte(pbus, devfn, PCI_CACHE_LINE_SIZE,
184 bridge->hpp.cache_line_size);
185 pci_bus_write_config_byte(pbus, devfn, PCI_LATENCY_TIMER,
186 bridge->hpp.latency_timer);
187
188 pci_bus_read_config_word(pbus, devfn, PCI_COMMAND, &tmp);
189 if (bridge->hpp.enable_SERR)
190 tmp |= PCI_COMMAND_SERR;
191 if (bridge->hpp.enable_PERR)
192 tmp |= PCI_COMMAND_PARITY;
193 pci_bus_write_config_word(pbus, devfn, PCI_COMMAND, tmp);
194
195 return 0;
196}
197
198/* detect_used_resource - subtract resource under dev from bridge */
199static int detect_used_resource (struct acpiphp_bridge *bridge, struct pci_dev *dev)
200{
201 int count;
202
203 dbg("Device %s\n", pci_name(dev));
204
205 for (count = 0; count < DEVICE_COUNT_RESOURCE; count++) {
206 struct pci_resource *res;
207 struct pci_resource **head;
208 unsigned long base = dev->resource[count].start;
209 unsigned long len = dev->resource[count].end - base + 1;
210 unsigned long flags = dev->resource[count].flags;
211
212 if (!flags)
213 continue;
214
215 dbg("BAR[%d] 0x%lx - 0x%lx (0x%lx)\n", count, base,
216 base + len - 1, flags);
217
218 if (flags & IORESOURCE_IO) {
219 head = &bridge->io_head;
220 } else if (flags & IORESOURCE_PREFETCH) {
221 head = &bridge->p_mem_head;
222 } else {
223 head = &bridge->mem_head;
224 }
225
226 spin_lock(&bridge->res_lock);
227 res = acpiphp_get_resource_with_base(head, base, len);
228 spin_unlock(&bridge->res_lock);
229 if (res)
230 kfree(res);
231 }
232
233 return 0;
234}
235
236
237/**
238 * acpiphp_detect_pci_resource - detect resources under bridge
239 * @bridge: detect all resources already used under this bridge
240 *
241 * collect all resources already allocated for all devices under a bridge.
242 */
243int acpiphp_detect_pci_resource (struct acpiphp_bridge *bridge)
244{
245 struct list_head *l;
246 struct pci_dev *dev;
247
248 list_for_each (l, &bridge->pci_bus->devices) {
249 dev = pci_dev_b(l);
250 detect_used_resource(bridge, dev);
251 }
252
253 return 0;
254}
255
256
257/**
258 * acpiphp_init_slot_resource - gather resource usage information of a slot
259 * @slot: ACPI slot object to be checked, should have valid pci_dev member
260 *
261 * TBD: PCI-to-PCI bridge case
262 * use pci_dev->resource[]
263 */
264int acpiphp_init_func_resource (struct acpiphp_func *func)
265{
266 u64 base;
267 u32 bar, len;
268 u32 address[] = {
269 PCI_BASE_ADDRESS_0,
270 PCI_BASE_ADDRESS_1,
271 PCI_BASE_ADDRESS_2,
272 PCI_BASE_ADDRESS_3,
273 PCI_BASE_ADDRESS_4,
274 PCI_BASE_ADDRESS_5,
275 0
276 };
277 int count;
278 struct pci_resource *res;
279 struct pci_dev *dev;
280
281 dev = func->pci_dev;
282 dbg("Hot-pluggable device %s\n", pci_name(dev));
283
284 for (count = 0; address[count]; count++) { /* for 6 BARs */
285 pci_read_config_dword(dev, address[count], &bar);
286
287 if (!bar) /* This BAR is not implemented */
288 continue;
289
290 pci_write_config_dword(dev, address[count], 0xFFFFFFFF);
291 pci_read_config_dword(dev, address[count], &len);
292
293 if (len & PCI_BASE_ADDRESS_SPACE_IO) {
294 /* This is IO */
295 base = bar & 0xFFFFFFFC;
296 len = len & (PCI_BASE_ADDRESS_IO_MASK & 0xFFFF);
297 len = len & ~(len - 1);
298
299 dbg("BAR[%d] %08x - %08x (IO)\n", count, (u32)base, (u32)base + len - 1);
300
301 res = acpiphp_make_resource(base, len);
302 if (!res)
303 goto no_memory;
304
305 res->next = func->io_head;
306 func->io_head = res;
307
308 } else {
309 /* This is Memory */
310 base = bar & 0xFFFFFFF0;
311 if (len & PCI_BASE_ADDRESS_MEM_PREFETCH) {
312 /* pfmem */
313
314 len &= 0xFFFFFFF0;
315 len = ~len + 1;
316
317 if (len & PCI_BASE_ADDRESS_MEM_TYPE_64) { /* takes up another dword */
318 dbg("prefetch mem 64\n");
319 count += 1;
320 }
321 dbg("BAR[%d] %08x - %08x (PMEM)\n", count, (u32)base, (u32)base + len - 1);
322 res = acpiphp_make_resource(base, len);
323 if (!res)
324 goto no_memory;
325
326 res->next = func->p_mem_head;
327 func->p_mem_head = res;
328
329 } else {
330 /* regular memory */
331
332 len &= 0xFFFFFFF0;
333 len = ~len + 1;
334
335 if (len & PCI_BASE_ADDRESS_MEM_TYPE_64) {
336 /* takes up another dword */
337 dbg("mem 64\n");
338 count += 1;
339 }
340 dbg("BAR[%d] %08x - %08x (MEM)\n", count, (u32)base, (u32)base + len - 1);
341 res = acpiphp_make_resource(base, len);
342 if (!res)
343 goto no_memory;
344
345 res->next = func->mem_head;
346 func->mem_head = res;
347
348 }
349 }
350
351 pci_write_config_dword(dev, address[count], bar);
352 }
353#if 1
354 acpiphp_dump_func_resource(func);
355#endif
356
357 return 0;
358
359 no_memory:
360 err("out of memory\n");
361 acpiphp_free_resource(&func->io_head);
362 acpiphp_free_resource(&func->mem_head);
363 acpiphp_free_resource(&func->p_mem_head);
364
365 return -1;
366}
367
368
369/**
370 * acpiphp_configure_slot - allocate PCI resources
371 * @slot: slot to be configured
372 *
373 * initializes a PCI functions on a device inserted
374 * into the slot
375 *
376 */
377int acpiphp_configure_slot (struct acpiphp_slot *slot)
378{
379 struct acpiphp_func *func;
380 struct list_head *l;
381 u8 hdr;
382 u32 dvid;
383 int retval = 0;
384 int is_multi = 0;
385
386 pci_bus_read_config_byte(slot->bridge->pci_bus,
387 PCI_DEVFN(slot->device, 0),
388 PCI_HEADER_TYPE, &hdr);
389
390 if (hdr & 0x80)
391 is_multi = 1;
392
393 list_for_each (l, &slot->funcs) {
394 func = list_entry(l, struct acpiphp_func, sibling);
395 if (is_multi || func->function == 0) {
396 pci_bus_read_config_dword(slot->bridge->pci_bus,
397 PCI_DEVFN(slot->device,
398 func->function),
399 PCI_VENDOR_ID, &dvid);
400 if (dvid != 0xffffffff) {
401 retval = init_config_space(func);
402 if (retval)
403 break;
404 }
405 }
406 }
407
408 return retval;
409}
410
411/**
412 * acpiphp_configure_function - configure PCI function
413 * @func: function to be configured
414 *
415 * initializes a PCI functions on a device inserted
416 * into the slot
417 *
418 */
419int acpiphp_configure_function (struct acpiphp_func *func)
420{
421 /* all handled by the pci core now */
422 return 0;
423}
424
425/**
426 * acpiphp_unconfigure_function - unconfigure PCI function
427 * @func: function to be unconfigured
428 *
429 */
430void acpiphp_unconfigure_function (struct acpiphp_func *func)
431{
432 struct acpiphp_bridge *bridge;
433
434 /* if pci_dev is NULL, ignore it */
435 if (!func->pci_dev)
436 return;
437
438 pci_remove_bus_device(func->pci_dev);
439
440 /* free all resources */
441 bridge = func->slot->bridge;
442
443 spin_lock(&bridge->res_lock);
444 acpiphp_move_resource(&func->io_head, &bridge->io_head);
445 acpiphp_move_resource(&func->mem_head, &bridge->mem_head);
446 acpiphp_move_resource(&func->p_mem_head, &bridge->p_mem_head);
447 acpiphp_move_resource(&func->bus_head, &bridge->bus_head);
448 spin_unlock(&bridge->res_lock);
449}
diff --git a/drivers/pci/hotplug/acpiphp_res.c b/drivers/pci/hotplug/acpiphp_res.c
deleted file mode 100644
index f54b1fa7b75a..000000000000
--- a/drivers/pci/hotplug/acpiphp_res.c
+++ /dev/null
@@ -1,700 +0,0 @@
1/*
2 * ACPI PCI HotPlug Utility functions
3 *
4 * Copyright (C) 1995,2001 Compaq Computer Corporation
5 * Copyright (C) 2001 Greg Kroah-Hartman (greg@kroah.com)
6 * Copyright (C) 2001 IBM Corp.
7 * Copyright (C) 2002 Hiroshi Aono (h-aono@ap.jp.nec.com)
8 * Copyright (C) 2002 Takayoshi Kochi (t-kochi@bq.jp.nec.com)
9 * Copyright (C) 2002 NEC Corporation
10 *
11 * All rights reserved.
12 *
13 * This program is free software; you can redistribute it and/or modify
14 * it under the terms of the GNU General Public License as published by
15 * the Free Software Foundation; either version 2 of the License, or (at
16 * your option) any later version.
17 *
18 * This program is distributed in the hope that it will be useful, but
19 * WITHOUT ANY WARRANTY; without even the implied warranty of
20 * MERCHANTABILITY OR FITNESS FOR A PARTICULAR PURPOSE, GOOD TITLE or
21 * NON INFRINGEMENT. See the GNU General Public License for more
22 * details.
23 *
24 * You should have received a copy of the GNU General Public License
25 * along with this program; if not, write to the Free Software
26 * Foundation, Inc., 675 Mass Ave, Cambridge, MA 02139, USA.
27 *
28 * Send feedback to <gregkh@us.ibm.com>, <t-kochi@bq.jp.nec.com>
29 *
30 */
31
32#include <linux/init.h>
33#include <linux/module.h>
34
35#include <linux/kernel.h>
36#include <linux/types.h>
37#include <linux/proc_fs.h>
38#include <linux/sysctl.h>
39#include <linux/pci.h>
40#include <linux/smp.h>
41#include <linux/smp_lock.h>
42
43#include <linux/string.h>
44#include <linux/mm.h>
45#include <linux/errno.h>
46#include <linux/ioport.h>
47#include <linux/slab.h>
48#include <linux/interrupt.h>
49#include <linux/timer.h>
50
51#include <linux/ioctl.h>
52#include <linux/fcntl.h>
53
54#include <linux/list.h>
55
56#include "pci_hotplug.h"
57#include "acpiphp.h"
58
59#define MY_NAME "acpiphp_res"
60
61
62/*
63 * sort_by_size - sort nodes by their length, smallest first
64 */
65static int sort_by_size(struct pci_resource **head)
66{
67 struct pci_resource *current_res;
68 struct pci_resource *next_res;
69 int out_of_order = 1;
70
71 if (!(*head))
72 return 1;
73
74 if (!((*head)->next))
75 return 0;
76
77 while (out_of_order) {
78 out_of_order = 0;
79
80 /* Special case for swapping list head */
81 if (((*head)->next) &&
82 ((*head)->length > (*head)->next->length)) {
83 out_of_order++;
84 current_res = *head;
85 *head = (*head)->next;
86 current_res->next = (*head)->next;
87 (*head)->next = current_res;
88 }
89
90 current_res = *head;
91
92 while (current_res->next && current_res->next->next) {
93 if (current_res->next->length > current_res->next->next->length) {
94 out_of_order++;
95 next_res = current_res->next;
96 current_res->next = current_res->next->next;
97 current_res = current_res->next;
98 next_res->next = current_res->next;
99 current_res->next = next_res;
100 } else
101 current_res = current_res->next;
102 }
103 } /* End of out_of_order loop */
104
105 return 0;
106}
107
108#if 0
109/*
110 * sort_by_max_size - sort nodes by their length, largest first
111 */
112static int sort_by_max_size(struct pci_resource **head)
113{
114 struct pci_resource *current_res;
115 struct pci_resource *next_res;
116 int out_of_order = 1;
117
118 if (!(*head))
119 return 1;
120
121 if (!((*head)->next))
122 return 0;
123
124 while (out_of_order) {
125 out_of_order = 0;
126
127 /* Special case for swapping list head */
128 if (((*head)->next) &&
129 ((*head)->length < (*head)->next->length)) {
130 out_of_order++;
131 current_res = *head;
132 *head = (*head)->next;
133 current_res->next = (*head)->next;
134 (*head)->next = current_res;
135 }
136
137 current_res = *head;
138
139 while (current_res->next && current_res->next->next) {
140 if (current_res->next->length < current_res->next->next->length) {
141 out_of_order++;
142 next_res = current_res->next;
143 current_res->next = current_res->next->next;
144 current_res = current_res->next;
145 next_res->next = current_res->next;
146 current_res->next = next_res;
147 } else
148 current_res = current_res->next;
149 }
150 } /* End of out_of_order loop */
151
152 return 0;
153}
154#endif
155
156/**
157 * get_io_resource - get resource for I/O ports
158 *
159 * this function sorts the resource list by size and then
160 * returns the first node of "size" length that is not in the
161 * ISA aliasing window. If it finds a node larger than "size"
162 * it will split it up.
163 *
164 * size must be a power of two.
165 *
166 * difference from get_resource is handling of ISA aliasing space.
167 *
168 */
169struct pci_resource *acpiphp_get_io_resource (struct pci_resource **head, u32 size)
170{
171 struct pci_resource *prevnode;
172 struct pci_resource *node;
173 struct pci_resource *split_node;
174 u64 temp_qword;
175
176 if (!(*head))
177 return NULL;
178
179 if (acpiphp_resource_sort_and_combine(head))
180 return NULL;
181
182 if (sort_by_size(head))
183 return NULL;
184
185 for (node = *head; node; node = node->next) {
186 if (node->length < size)
187 continue;
188
189 if (node->base & (size - 1)) {
190 /* this one isn't base aligned properly
191 so we'll make a new entry and split it up */
192 temp_qword = (node->base | (size-1)) + 1;
193
194 /* Short circuit if adjusted size is too small */
195 if ((node->length - (temp_qword - node->base)) < size)
196 continue;
197
198 split_node = acpiphp_make_resource(node->base, temp_qword - node->base);
199
200 if (!split_node)
201 return NULL;
202
203 node->base = temp_qword;
204 node->length -= split_node->length;
205
206 /* Put it in the list */
207 split_node->next = node->next;
208 node->next = split_node;
209 } /* End of non-aligned base */
210
211 /* Don't need to check if too small since we already did */
212 if (node->length > size) {
213 /* this one is longer than we need
214 so we'll make a new entry and split it up */
215 split_node = acpiphp_make_resource(node->base + size, node->length - size);
216
217 if (!split_node)
218 return NULL;
219
220 node->length = size;
221
222 /* Put it in the list */
223 split_node->next = node->next;
224 node->next = split_node;
225 } /* End of too big on top end */
226
227 /* For IO make sure it's not in the ISA aliasing space */
228 if ((node->base & 0x300L) && !(node->base & 0xfffff000))
229 continue;
230
231 /* If we got here, then it is the right size
232 Now take it out of the list */
233 if (*head == node) {
234 *head = node->next;
235 } else {
236 prevnode = *head;
237 while (prevnode->next != node)
238 prevnode = prevnode->next;
239
240 prevnode->next = node->next;
241 }
242 node->next = NULL;
243 /* Stop looping */
244 break;
245 }
246
247 return node;
248}
249
250
251#if 0
252/**
253 * get_max_resource - get the largest resource
254 *
255 * Gets the largest node that is at least "size" big from the
256 * list pointed to by head. It aligns the node on top and bottom
257 * to "size" alignment before returning it.
258 */
259static struct pci_resource *acpiphp_get_max_resource (struct pci_resource **head, u32 size)
260{
261 struct pci_resource *max;
262 struct pci_resource *temp;
263 struct pci_resource *split_node;
264 u64 temp_qword;
265
266 if (!(*head))
267 return NULL;
268
269 if (acpiphp_resource_sort_and_combine(head))
270 return NULL;
271
272 if (sort_by_max_size(head))
273 return NULL;
274
275 for (max = *head;max; max = max->next) {
276
277 /* If not big enough we could probably just bail,
278 instead we'll continue to the next. */
279 if (max->length < size)
280 continue;
281
282 if (max->base & (size - 1)) {
283 /* this one isn't base aligned properly
284 so we'll make a new entry and split it up */
285 temp_qword = (max->base | (size-1)) + 1;
286
287 /* Short circuit if adjusted size is too small */
288 if ((max->length - (temp_qword - max->base)) < size)
289 continue;
290
291 split_node = acpiphp_make_resource(max->base, temp_qword - max->base);
292
293 if (!split_node)
294 return NULL;
295
296 max->base = temp_qword;
297 max->length -= split_node->length;
298
299 /* Put it next in the list */
300 split_node->next = max->next;
301 max->next = split_node;
302 }
303
304 if ((max->base + max->length) & (size - 1)) {
305 /* this one isn't end aligned properly at the top
306 so we'll make a new entry and split it up */
307 temp_qword = ((max->base + max->length) & ~(size - 1));
308
309 split_node = acpiphp_make_resource(temp_qword,
310 max->length + max->base - temp_qword);
311
312 if (!split_node)
313 return NULL;
314
315 max->length -= split_node->length;
316
317 /* Put it in the list */
318 split_node->next = max->next;
319 max->next = split_node;
320 }
321
322 /* Make sure it didn't shrink too much when we aligned it */
323 if (max->length < size)
324 continue;
325
326 /* Now take it out of the list */
327 temp = (struct pci_resource*) *head;
328 if (temp == max) {
329 *head = max->next;
330 } else {
331 while (temp && temp->next != max) {
332 temp = temp->next;
333 }
334
335 temp->next = max->next;
336 }
337
338 max->next = NULL;
339 return max;
340 }
341
342 /* If we get here, we couldn't find one */
343 return NULL;
344}
345#endif
346
347/**
348 * get_resource - get resource (mem, pfmem)
349 *
350 * this function sorts the resource list by size and then
351 * returns the first node of "size" length. If it finds a node
352 * larger than "size" it will split it up.
353 *
354 * size must be a power of two.
355 *
356 */
357struct pci_resource *acpiphp_get_resource (struct pci_resource **head, u32 size)
358{
359 struct pci_resource *prevnode;
360 struct pci_resource *node;
361 struct pci_resource *split_node;
362 u64 temp_qword;
363
364 if (!(*head))
365 return NULL;
366
367 if (acpiphp_resource_sort_and_combine(head))
368 return NULL;
369
370 if (sort_by_size(head))
371 return NULL;
372
373 for (node = *head; node; node = node->next) {
374 dbg("%s: req_size =%x node=%p, base=%x, length=%x\n",
375 __FUNCTION__, size, node, (u32)node->base, node->length);
376 if (node->length < size)
377 continue;
378
379 if (node->base & (size - 1)) {
380 dbg("%s: not aligned\n", __FUNCTION__);
381 /* this one isn't base aligned properly
382 so we'll make a new entry and split it up */
383 temp_qword = (node->base | (size-1)) + 1;
384
385 /* Short circuit if adjusted size is too small */
386 if ((node->length - (temp_qword - node->base)) < size)
387 continue;
388
389 split_node = acpiphp_make_resource(node->base, temp_qword - node->base);
390
391 if (!split_node)
392 return NULL;
393
394 node->base = temp_qword;
395 node->length -= split_node->length;
396
397 /* Put it in the list */
398 split_node->next = node->next;
399 node->next = split_node;
400 } /* End of non-aligned base */
401
402 /* Don't need to check if too small since we already did */
403 if (node->length > size) {
404 dbg("%s: too big\n", __FUNCTION__);
405 /* this one is longer than we need
406 so we'll make a new entry and split it up */
407 split_node = acpiphp_make_resource(node->base + size, node->length - size);
408
409 if (!split_node)
410 return NULL;
411
412 node->length = size;
413
414 /* Put it in the list */
415 split_node->next = node->next;
416 node->next = split_node;
417 } /* End of too big on top end */
418
419 dbg("%s: got one!!!\n", __FUNCTION__);
420 /* If we got here, then it is the right size
421 Now take it out of the list */
422 if (*head == node) {
423 *head = node->next;
424 } else {
425 prevnode = *head;
426 while (prevnode->next != node)
427 prevnode = prevnode->next;
428
429 prevnode->next = node->next;
430 }
431 node->next = NULL;
432 /* Stop looping */
433 break;
434 }
435 return node;
436}
437
438/**
439 * get_resource_with_base - get resource with specific base address
440 *
441 * this function
442 * returns the first node of "size" length located at specified base address.
443 * If it finds a node larger than "size" it will split it up.
444 *
445 * size must be a power of two.
446 *
447 */
448struct pci_resource *acpiphp_get_resource_with_base (struct pci_resource **head, u64 base, u32 size)
449{
450 struct pci_resource *prevnode;
451 struct pci_resource *node;
452 struct pci_resource *split_node;
453 u64 temp_qword;
454
455 if (!(*head))
456 return NULL;
457
458 if (acpiphp_resource_sort_and_combine(head))
459 return NULL;
460
461 for (node = *head; node; node = node->next) {
462 dbg(": 1st req_base=%x req_size =%x node=%p, base=%x, length=%x\n",
463 (u32)base, size, node, (u32)node->base, node->length);
464 if (node->base > base)
465 continue;
466
467 if ((node->base + node->length) < (base + size))
468 continue;
469
470 if (node->base < base) {
471 dbg(": split 1\n");
472 /* this one isn't base aligned properly
473 so we'll make a new entry and split it up */
474 temp_qword = base;
475
476 /* Short circuit if adjusted size is too small */
477 if ((node->length - (temp_qword - node->base)) < size)
478 continue;
479
480 split_node = acpiphp_make_resource(node->base, temp_qword - node->base);
481
482 if (!split_node)
483 return NULL;
484
485 node->base = temp_qword;
486 node->length -= split_node->length;
487
488 /* Put it in the list */
489 split_node->next = node->next;
490 node->next = split_node;
491 }
492
493 dbg(": 2nd req_base=%x req_size =%x node=%p, base=%x, length=%x\n",
494 (u32)base, size, node, (u32)node->base, node->length);
495
496 /* Don't need to check if too small since we already did */
497 if (node->length > size) {
498 dbg(": split 2\n");
499 /* this one is longer than we need
500 so we'll make a new entry and split it up */
501 split_node = acpiphp_make_resource(node->base + size, node->length - size);
502
503 if (!split_node)
504 return NULL;
505
506 node->length = size;
507
508 /* Put it in the list */
509 split_node->next = node->next;
510 node->next = split_node;
511 } /* End of too big on top end */
512
513 dbg(": got one!!!\n");
514 /* If we got here, then it is the right size
515 Now take it out of the list */
516 if (*head == node) {
517 *head = node->next;
518 } else {
519 prevnode = *head;
520 while (prevnode->next != node)
521 prevnode = prevnode->next;
522
523 prevnode->next = node->next;
524 }
525 node->next = NULL;
526 /* Stop looping */
527 break;
528 }
529 return node;
530}
531
532
533/**
534 * acpiphp_resource_sort_and_combine
535 *
536 * Sorts all of the nodes in the list in ascending order by
537 * their base addresses. Also does garbage collection by
538 * combining adjacent nodes.
539 *
540 * returns 0 if success
541 */
542int acpiphp_resource_sort_and_combine (struct pci_resource **head)
543{
544 struct pci_resource *node1;
545 struct pci_resource *node2;
546 int out_of_order = 1;
547
548 if (!(*head))
549 return 1;
550
551 dbg("*head->next = %p\n",(*head)->next);
552
553 if (!(*head)->next)
554 return 0; /* only one item on the list, already sorted! */
555
556 dbg("*head->base = 0x%x\n",(u32)(*head)->base);
557 dbg("*head->next->base = 0x%x\n", (u32)(*head)->next->base);
558 while (out_of_order) {
559 out_of_order = 0;
560
561 /* Special case for swapping list head */
562 if (((*head)->next) &&
563 ((*head)->base > (*head)->next->base)) {
564 node1 = *head;
565 (*head) = (*head)->next;
566 node1->next = (*head)->next;
567 (*head)->next = node1;
568 out_of_order++;
569 }
570
571 node1 = (*head);
572
573 while (node1->next && node1->next->next) {
574 if (node1->next->base > node1->next->next->base) {
575 out_of_order++;
576 node2 = node1->next;
577 node1->next = node1->next->next;
578 node1 = node1->next;
579 node2->next = node1->next;
580 node1->next = node2;
581 } else
582 node1 = node1->next;
583 }
584 } /* End of out_of_order loop */
585
586 node1 = *head;
587
588 while (node1 && node1->next) {
589 if ((node1->base + node1->length) == node1->next->base) {
590 /* Combine */
591 dbg("8..\n");
592 node1->length += node1->next->length;
593 node2 = node1->next;
594 node1->next = node1->next->next;
595 kfree(node2);
596 } else
597 node1 = node1->next;
598 }
599
600 return 0;
601}
602
603
604/**
605 * acpiphp_make_resource - make resource structure
606 * @base: base address of a resource
607 * @length: length of a resource
608 */
609struct pci_resource *acpiphp_make_resource (u64 base, u32 length)
610{
611 struct pci_resource *res;
612
613 res = kmalloc(sizeof(struct pci_resource), GFP_KERNEL);
614 if (res) {
615 memset(res, 0, sizeof(struct pci_resource));
616 res->base = base;
617 res->length = length;
618 }
619
620 return res;
621}
622
623
624/**
625 * acpiphp_move_resource - move linked resources from one to another
626 * @from: head of linked resource list
627 * @to: head of linked resource list
628 */
629void acpiphp_move_resource (struct pci_resource **from, struct pci_resource **to)
630{
631 struct pci_resource *tmp;
632
633 while (*from) {
634 tmp = (*from)->next;
635 (*from)->next = *to;
636 *to = *from;
637 *from = tmp;
638 }
639
640 /* *from = NULL is guaranteed */
641}
642
643
644/**
645 * acpiphp_free_resource - free all linked resources
646 * @res: head of linked resource list
647 */
648void acpiphp_free_resource (struct pci_resource **res)
649{
650 struct pci_resource *tmp;
651
652 while (*res) {
653 tmp = (*res)->next;
654 kfree(*res);
655 *res = tmp;
656 }
657
658 /* *res = NULL is guaranteed */
659}
660
661
662/* debug support functions; will go away sometime :) */
663static void dump_resource(struct pci_resource *head)
664{
665 struct pci_resource *p;
666 int cnt;
667
668 p = head;
669 cnt = 0;
670
671 while (p) {
672 dbg("[%02d] %08x - %08x\n",
673 cnt++, (u32)p->base, (u32)p->base + p->length - 1);
674 p = p->next;
675 }
676}
677
678void acpiphp_dump_resource(struct acpiphp_bridge *bridge)
679{
680 dbg("I/O resource:\n");
681 dump_resource(bridge->io_head);
682 dbg("MEM resource:\n");
683 dump_resource(bridge->mem_head);
684 dbg("PMEM resource:\n");
685 dump_resource(bridge->p_mem_head);
686 dbg("BUS resource:\n");
687 dump_resource(bridge->bus_head);
688}
689
690void acpiphp_dump_func_resource(struct acpiphp_func *func)
691{
692 dbg("I/O resource:\n");
693 dump_resource(func->io_head);
694 dbg("MEM resource:\n");
695 dump_resource(func->mem_head);
696 dbg("PMEM resource:\n");
697 dump_resource(func->p_mem_head);
698 dbg("BUS resource:\n");
699 dump_resource(func->bus_head);
700}
diff --git a/drivers/pci/hotplug/cpqphp_core.c b/drivers/pci/hotplug/cpqphp_core.c
index afbccfa5217d..8c6d3987d461 100644
--- a/drivers/pci/hotplug/cpqphp_core.c
+++ b/drivers/pci/hotplug/cpqphp_core.c
@@ -60,6 +60,7 @@ static void __iomem *smbios_start;
60static void __iomem *cpqhp_rom_start; 60static void __iomem *cpqhp_rom_start;
61static int power_mode; 61static int power_mode;
62static int debug; 62static int debug;
63static int initialized;
63 64
64#define DRIVER_VERSION "0.9.8" 65#define DRIVER_VERSION "0.9.8"
65#define DRIVER_AUTHOR "Dan Zink <dan.zink@compaq.com>, Greg Kroah-Hartman <greg@kroah.com>" 66#define DRIVER_AUTHOR "Dan Zink <dan.zink@compaq.com>, Greg Kroah-Hartman <greg@kroah.com>"
@@ -1271,7 +1272,6 @@ static int one_time_init(void)
1271{ 1272{
1272 int loop; 1273 int loop;
1273 int retval = 0; 1274 int retval = 0;
1274 static int initialized = 0;
1275 1275
1276 if (initialized) 1276 if (initialized)
1277 return 0; 1277 return 0;
@@ -1441,7 +1441,8 @@ static void __exit unload_cpqphpd(void)
1441 } 1441 }
1442 1442
1443 // Stop the notification mechanism 1443 // Stop the notification mechanism
1444 cpqhp_event_stop_thread(); 1444 if (initialized)
1445 cpqhp_event_stop_thread();
1445 1446
1446 //unmap the rom address 1447 //unmap the rom address
1447 if (cpqhp_rom_start) 1448 if (cpqhp_rom_start)
diff --git a/drivers/pci/msi.c b/drivers/pci/msi.c
index 30206ac43c44..b5ab9aa6ff7c 100644
--- a/drivers/pci/msi.c
+++ b/drivers/pci/msi.c
@@ -28,10 +28,10 @@ static struct msi_desc* msi_desc[NR_IRQS] = { [0 ... NR_IRQS-1] = NULL };
28static kmem_cache_t* msi_cachep; 28static kmem_cache_t* msi_cachep;
29 29
30static int pci_msi_enable = 1; 30static int pci_msi_enable = 1;
31static int last_alloc_vector = 0; 31static int last_alloc_vector;
32static int nr_released_vectors = 0; 32static int nr_released_vectors;
33static int nr_reserved_vectors = NR_HP_RESERVED_VECTORS; 33static int nr_reserved_vectors = NR_HP_RESERVED_VECTORS;
34static int nr_msix_devices = 0; 34static int nr_msix_devices;
35 35
36#ifndef CONFIG_X86_IO_APIC 36#ifndef CONFIG_X86_IO_APIC
37int vector_irq[NR_VECTORS] = { [0 ... NR_VECTORS - 1] = -1}; 37int vector_irq[NR_VECTORS] = { [0 ... NR_VECTORS - 1] = -1};
@@ -170,44 +170,30 @@ static unsigned int startup_msi_irq_wo_maskbit(unsigned int vector)
170 return 0; /* never anything pending */ 170 return 0; /* never anything pending */
171} 171}
172 172
173static void release_msi(unsigned int vector); 173static unsigned int startup_msi_irq_w_maskbit(unsigned int vector)
174static void shutdown_msi_irq(unsigned int vector)
175{
176 release_msi(vector);
177}
178
179#define shutdown_msi_irq_wo_maskbit shutdown_msi_irq
180static void enable_msi_irq_wo_maskbit(unsigned int vector) {}
181static void disable_msi_irq_wo_maskbit(unsigned int vector) {}
182static void ack_msi_irq_wo_maskbit(unsigned int vector) {}
183static void end_msi_irq_wo_maskbit(unsigned int vector)
184{ 174{
185 move_msi(vector); 175 startup_msi_irq_wo_maskbit(vector);
186 ack_APIC_irq(); 176 unmask_MSI_irq(vector);
177 return 0; /* never anything pending */
187} 178}
188 179
189static unsigned int startup_msi_irq_w_maskbit(unsigned int vector) 180static void shutdown_msi_irq(unsigned int vector)
190{ 181{
191 struct msi_desc *entry; 182 struct msi_desc *entry;
192 unsigned long flags; 183 unsigned long flags;
193 184
194 spin_lock_irqsave(&msi_lock, flags); 185 spin_lock_irqsave(&msi_lock, flags);
195 entry = msi_desc[vector]; 186 entry = msi_desc[vector];
196 if (!entry || !entry->dev) { 187 if (entry && entry->dev)
197 spin_unlock_irqrestore(&msi_lock, flags); 188 entry->msi_attrib.state = 0; /* Mark it not active */
198 return 0;
199 }
200 entry->msi_attrib.state = 1; /* Mark it active */
201 spin_unlock_irqrestore(&msi_lock, flags); 189 spin_unlock_irqrestore(&msi_lock, flags);
202
203 unmask_MSI_irq(vector);
204 return 0; /* never anything pending */
205} 190}
206 191
207#define shutdown_msi_irq_w_maskbit shutdown_msi_irq 192static void end_msi_irq_wo_maskbit(unsigned int vector)
208#define enable_msi_irq_w_maskbit unmask_MSI_irq 193{
209#define disable_msi_irq_w_maskbit mask_MSI_irq 194 move_msi(vector);
210#define ack_msi_irq_w_maskbit mask_MSI_irq 195 ack_APIC_irq();
196}
211 197
212static void end_msi_irq_w_maskbit(unsigned int vector) 198static void end_msi_irq_w_maskbit(unsigned int vector)
213{ 199{
@@ -216,6 +202,10 @@ static void end_msi_irq_w_maskbit(unsigned int vector)
216 ack_APIC_irq(); 202 ack_APIC_irq();
217} 203}
218 204
205static void do_nothing(unsigned int vector)
206{
207}
208
219/* 209/*
220 * Interrupt Type for MSI-X PCI/PCI-X/PCI-Express Devices, 210 * Interrupt Type for MSI-X PCI/PCI-X/PCI-Express Devices,
221 * which implement the MSI-X Capability Structure. 211 * which implement the MSI-X Capability Structure.
@@ -223,10 +213,10 @@ static void end_msi_irq_w_maskbit(unsigned int vector)
223static struct hw_interrupt_type msix_irq_type = { 213static struct hw_interrupt_type msix_irq_type = {
224 .typename = "PCI-MSI-X", 214 .typename = "PCI-MSI-X",
225 .startup = startup_msi_irq_w_maskbit, 215 .startup = startup_msi_irq_w_maskbit,
226 .shutdown = shutdown_msi_irq_w_maskbit, 216 .shutdown = shutdown_msi_irq,
227 .enable = enable_msi_irq_w_maskbit, 217 .enable = unmask_MSI_irq,
228 .disable = disable_msi_irq_w_maskbit, 218 .disable = mask_MSI_irq,
229 .ack = ack_msi_irq_w_maskbit, 219 .ack = mask_MSI_irq,
230 .end = end_msi_irq_w_maskbit, 220 .end = end_msi_irq_w_maskbit,
231 .set_affinity = set_msi_irq_affinity 221 .set_affinity = set_msi_irq_affinity
232}; 222};
@@ -239,10 +229,10 @@ static struct hw_interrupt_type msix_irq_type = {
239static struct hw_interrupt_type msi_irq_w_maskbit_type = { 229static struct hw_interrupt_type msi_irq_w_maskbit_type = {
240 .typename = "PCI-MSI", 230 .typename = "PCI-MSI",
241 .startup = startup_msi_irq_w_maskbit, 231 .startup = startup_msi_irq_w_maskbit,
242 .shutdown = shutdown_msi_irq_w_maskbit, 232 .shutdown = shutdown_msi_irq,
243 .enable = enable_msi_irq_w_maskbit, 233 .enable = unmask_MSI_irq,
244 .disable = disable_msi_irq_w_maskbit, 234 .disable = mask_MSI_irq,
245 .ack = ack_msi_irq_w_maskbit, 235 .ack = mask_MSI_irq,
246 .end = end_msi_irq_w_maskbit, 236 .end = end_msi_irq_w_maskbit,
247 .set_affinity = set_msi_irq_affinity 237 .set_affinity = set_msi_irq_affinity
248}; 238};
@@ -255,10 +245,10 @@ static struct hw_interrupt_type msi_irq_w_maskbit_type = {
255static struct hw_interrupt_type msi_irq_wo_maskbit_type = { 245static struct hw_interrupt_type msi_irq_wo_maskbit_type = {
256 .typename = "PCI-MSI", 246 .typename = "PCI-MSI",
257 .startup = startup_msi_irq_wo_maskbit, 247 .startup = startup_msi_irq_wo_maskbit,
258 .shutdown = shutdown_msi_irq_wo_maskbit, 248 .shutdown = shutdown_msi_irq,
259 .enable = enable_msi_irq_wo_maskbit, 249 .enable = do_nothing,
260 .disable = disable_msi_irq_wo_maskbit, 250 .disable = do_nothing,
261 .ack = ack_msi_irq_wo_maskbit, 251 .ack = do_nothing,
262 .end = end_msi_irq_wo_maskbit, 252 .end = end_msi_irq_wo_maskbit,
263 .set_affinity = set_msi_irq_affinity 253 .set_affinity = set_msi_irq_affinity
264}; 254};
@@ -407,7 +397,7 @@ static struct msi_desc* alloc_msi_entry(void)
407{ 397{
408 struct msi_desc *entry; 398 struct msi_desc *entry;
409 399
410 entry = (struct msi_desc*) kmem_cache_alloc(msi_cachep, SLAB_KERNEL); 400 entry = kmem_cache_alloc(msi_cachep, SLAB_KERNEL);
411 if (!entry) 401 if (!entry)
412 return NULL; 402 return NULL;
413 403
@@ -796,18 +786,6 @@ void pci_disable_msi(struct pci_dev* dev)
796 } 786 }
797} 787}
798 788
799static void release_msi(unsigned int vector)
800{
801 struct msi_desc *entry;
802 unsigned long flags;
803
804 spin_lock_irqsave(&msi_lock, flags);
805 entry = msi_desc[vector];
806 if (entry && entry->dev)
807 entry->msi_attrib.state = 0; /* Mark it not active */
808 spin_unlock_irqrestore(&msi_lock, flags);
809}
810
811static int msi_free_vector(struct pci_dev* dev, int vector, int reassign) 789static int msi_free_vector(struct pci_dev* dev, int vector, int reassign)
812{ 790{
813 struct msi_desc *entry; 791 struct msi_desc *entry;
@@ -924,7 +902,7 @@ static int reroute_msix_table(int head, struct msix_entry *entries, int *nvec)
924/** 902/**
925 * pci_enable_msix - configure device's MSI-X capability structure 903 * pci_enable_msix - configure device's MSI-X capability structure
926 * @dev: pointer to the pci_dev data structure of MSI-X device function 904 * @dev: pointer to the pci_dev data structure of MSI-X device function
927 * @data: pointer to an array of MSI-X entries 905 * @entries: pointer to an array of MSI-X entries
928 * @nvec: number of MSI-X vectors requested for allocation by device driver 906 * @nvec: number of MSI-X vectors requested for allocation by device driver
929 * 907 *
930 * Setup the MSI-X capability structure of device function with the number 908 * Setup the MSI-X capability structure of device function with the number
diff --git a/drivers/pci/msi.h b/drivers/pci/msi.h
index bef21ae3cbd0..390f1851c0f1 100644
--- a/drivers/pci/msi.h
+++ b/drivers/pci/msi.h
@@ -41,11 +41,11 @@ static inline void move_msi(int vector) {}
41#define PCI_MSIX_FLAGS_BIRMASK (7 << 0) 41#define PCI_MSIX_FLAGS_BIRMASK (7 << 0)
42#define PCI_MSIX_FLAGS_BITMASK (1 << 0) 42#define PCI_MSIX_FLAGS_BITMASK (1 << 0)
43 43
44#define PCI_MSIX_ENTRY_LOWER_ADDR_OFFSET 0
45#define PCI_MSIX_ENTRY_UPPER_ADDR_OFFSET 4
46#define PCI_MSIX_ENTRY_DATA_OFFSET 8
47#define PCI_MSIX_ENTRY_VECTOR_CTRL_OFFSET 12
48#define PCI_MSIX_ENTRY_SIZE 16 44#define PCI_MSIX_ENTRY_SIZE 16
45#define PCI_MSIX_ENTRY_LOWER_ADDR_OFFSET 0
46#define PCI_MSIX_ENTRY_UPPER_ADDR_OFFSET 4
47#define PCI_MSIX_ENTRY_DATA_OFFSET 8
48#define PCI_MSIX_ENTRY_VECTOR_CTRL_OFFSET 12
49 49
50#define msi_control_reg(base) (base + PCI_MSI_FLAGS) 50#define msi_control_reg(base) (base + PCI_MSI_FLAGS)
51#define msi_lower_address_reg(base) (base + PCI_MSI_ADDRESS_LO) 51#define msi_lower_address_reg(base) (base + PCI_MSI_ADDRESS_LO)
@@ -64,7 +64,6 @@ static inline void move_msi(int vector) {}
64#define msi_enable(control, num) multi_msi_enable(control, num); \ 64#define msi_enable(control, num) multi_msi_enable(control, num); \
65 control |= PCI_MSI_FLAGS_ENABLE 65 control |= PCI_MSI_FLAGS_ENABLE
66 66
67#define msix_control_reg msi_control_reg
68#define msix_table_offset_reg(base) (base + 0x04) 67#define msix_table_offset_reg(base) (base + 0x04)
69#define msix_pba_offset_reg(base) (base + 0x08) 68#define msix_pba_offset_reg(base) (base + 0x08)
70#define msix_enable(control) control |= PCI_MSIX_FLAGS_ENABLE 69#define msix_enable(control) control |= PCI_MSIX_FLAGS_ENABLE
diff --git a/drivers/pci/pci-sysfs.c b/drivers/pci/pci-sysfs.c
index a15f94072a6f..cc9d65388e62 100644
--- a/drivers/pci/pci-sysfs.c
+++ b/drivers/pci/pci-sysfs.c
@@ -60,15 +60,18 @@ resource_show(struct device * dev, struct device_attribute *attr, char * buf)
60 char * str = buf; 60 char * str = buf;
61 int i; 61 int i;
62 int max = 7; 62 int max = 7;
63 u64 start, end;
63 64
64 if (pci_dev->subordinate) 65 if (pci_dev->subordinate)
65 max = DEVICE_COUNT_RESOURCE; 66 max = DEVICE_COUNT_RESOURCE;
66 67
67 for (i = 0; i < max; i++) { 68 for (i = 0; i < max; i++) {
68 str += sprintf(str,"0x%016lx 0x%016lx 0x%016lx\n", 69 struct resource *res = &pci_dev->resource[i];
69 pci_resource_start(pci_dev,i), 70 pci_resource_to_user(pci_dev, i, res, &start, &end);
70 pci_resource_end(pci_dev,i), 71 str += sprintf(str,"0x%016llx 0x%016llx 0x%016llx\n",
71 pci_resource_flags(pci_dev,i)); 72 (unsigned long long)start,
73 (unsigned long long)end,
74 (unsigned long long)res->flags);
72 } 75 }
73 return (str - buf); 76 return (str - buf);
74} 77}
@@ -313,8 +316,21 @@ pci_mmap_resource(struct kobject *kobj, struct bin_attribute *attr,
313 struct device, kobj)); 316 struct device, kobj));
314 struct resource *res = (struct resource *)attr->private; 317 struct resource *res = (struct resource *)attr->private;
315 enum pci_mmap_state mmap_type; 318 enum pci_mmap_state mmap_type;
319 u64 start, end;
320 int i;
316 321
317 vma->vm_pgoff += res->start >> PAGE_SHIFT; 322 for (i = 0; i < PCI_ROM_RESOURCE; i++)
323 if (res == &pdev->resource[i])
324 break;
325 if (i >= PCI_ROM_RESOURCE)
326 return -ENODEV;
327
328 /* pci_mmap_page_range() expects the same kind of entry as coming
329 * from /proc/bus/pci/ which is a "user visible" value. If this is
330 * different from the resource itself, arch will do necessary fixup.
331 */
332 pci_resource_to_user(pdev, i, res, &start, &end);
333 vma->vm_pgoff += start >> PAGE_SHIFT;
318 mmap_type = res->flags & IORESOURCE_MEM ? pci_mmap_mem : pci_mmap_io; 334 mmap_type = res->flags & IORESOURCE_MEM ? pci_mmap_mem : pci_mmap_io;
319 335
320 return pci_mmap_page_range(pdev, vma, mmap_type, 0); 336 return pci_mmap_page_range(pdev, vma, mmap_type, 0);
diff --git a/drivers/pci/probe.c b/drivers/pci/probe.c
index fd48b201eb53..6a0a82f0508b 100644
--- a/drivers/pci/probe.c
+++ b/drivers/pci/probe.c
@@ -374,8 +374,11 @@ struct pci_bus * __devinit pci_add_new_bus(struct pci_bus *parent, struct pci_de
374 struct pci_bus *child; 374 struct pci_bus *child;
375 375
376 child = pci_alloc_child_bus(parent, dev, busnr); 376 child = pci_alloc_child_bus(parent, dev, busnr);
377 if (child) 377 if (child) {
378 spin_lock(&pci_bus_lock);
378 list_add_tail(&child->node, &parent->children); 379 list_add_tail(&child->node, &parent->children);
380 spin_unlock(&pci_bus_lock);
381 }
379 return child; 382 return child;
380} 383}
381 384
@@ -411,7 +414,7 @@ int __devinit pci_scan_bridge(struct pci_bus *bus, struct pci_dev * dev, int max
411{ 414{
412 struct pci_bus *child; 415 struct pci_bus *child;
413 int is_cardbus = (dev->hdr_type == PCI_HEADER_TYPE_CARDBUS); 416 int is_cardbus = (dev->hdr_type == PCI_HEADER_TYPE_CARDBUS);
414 u32 buses; 417 u32 buses, i;
415 u16 bctl; 418 u16 bctl;
416 419
417 pci_read_config_dword(dev, PCI_PRIMARY_BUS, &buses); 420 pci_read_config_dword(dev, PCI_PRIMARY_BUS, &buses);
@@ -447,7 +450,7 @@ int __devinit pci_scan_bridge(struct pci_bus *bus, struct pci_dev * dev, int max
447 return max; 450 return max;
448 } 451 }
449 452
450 child = pci_alloc_child_bus(bus, dev, busnr); 453 child = pci_add_new_bus(bus, dev, busnr);
451 if (!child) 454 if (!child)
452 return max; 455 return max;
453 child->primary = buses & 0xFF; 456 child->primary = buses & 0xFF;
@@ -470,7 +473,11 @@ int __devinit pci_scan_bridge(struct pci_bus *bus, struct pci_dev * dev, int max
470 /* Clear errors */ 473 /* Clear errors */
471 pci_write_config_word(dev, PCI_STATUS, 0xffff); 474 pci_write_config_word(dev, PCI_STATUS, 0xffff);
472 475
473 child = pci_alloc_child_bus(bus, dev, ++max); 476 /* Prevent assigning a bus number that already exists.
477 * This can happen when a bridge is hot-plugged */
478 if (pci_find_bus(pci_domain_nr(bus), max+1))
479 return max;
480 child = pci_add_new_bus(bus, dev, ++max);
474 buses = (buses & 0xff000000) 481 buses = (buses & 0xff000000)
475 | ((unsigned int)(child->primary) << 0) 482 | ((unsigned int)(child->primary) << 0)
476 | ((unsigned int)(child->secondary) << 8) 483 | ((unsigned int)(child->secondary) << 8)
@@ -501,7 +508,11 @@ int __devinit pci_scan_bridge(struct pci_bus *bus, struct pci_dev * dev, int max
501 * as cards with a PCI-to-PCI bridge can be 508 * as cards with a PCI-to-PCI bridge can be
502 * inserted later. 509 * inserted later.
503 */ 510 */
504 max += CARDBUS_RESERVE_BUSNR; 511 for (i=0; i<CARDBUS_RESERVE_BUSNR; i++)
512 if (pci_find_bus(pci_domain_nr(bus),
513 max+i+1))
514 break;
515 max += i;
505 } 516 }
506 /* 517 /*
507 * Set the subordinate bus number to its real value. 518 * Set the subordinate bus number to its real value.
@@ -757,7 +768,9 @@ pci_scan_single_device(struct pci_bus *bus, int devfn)
757 * and the bus list for fixup functions, etc. 768 * and the bus list for fixup functions, etc.
758 */ 769 */
759 INIT_LIST_HEAD(&dev->global_list); 770 INIT_LIST_HEAD(&dev->global_list);
771 spin_lock(&pci_bus_lock);
760 list_add_tail(&dev->bus_list, &bus->devices); 772 list_add_tail(&dev->bus_list, &bus->devices);
773 spin_unlock(&pci_bus_lock);
761 774
762 return dev; 775 return dev;
763} 776}
@@ -878,7 +891,9 @@ struct pci_bus * __devinit pci_scan_bus_parented(struct device *parent, int bus,
878 pr_debug("PCI: Bus %04x:%02x already known\n", pci_domain_nr(b), bus); 891 pr_debug("PCI: Bus %04x:%02x already known\n", pci_domain_nr(b), bus);
879 goto err_out; 892 goto err_out;
880 } 893 }
894 spin_lock(&pci_bus_lock);
881 list_add_tail(&b->node, &pci_root_buses); 895 list_add_tail(&b->node, &pci_root_buses);
896 spin_unlock(&pci_bus_lock);
882 897
883 memset(dev, 0, sizeof(*dev)); 898 memset(dev, 0, sizeof(*dev));
884 dev->parent = parent; 899 dev->parent = parent;
@@ -911,8 +926,6 @@ struct pci_bus * __devinit pci_scan_bus_parented(struct device *parent, int bus,
911 926
912 b->subordinate = pci_scan_child_bus(b); 927 b->subordinate = pci_scan_child_bus(b);
913 928
914 pci_bus_add_devices(b);
915
916 return b; 929 return b;
917 930
918sys_create_link_err: 931sys_create_link_err:
@@ -922,7 +935,9 @@ class_dev_create_file_err:
922class_dev_reg_err: 935class_dev_reg_err:
923 device_unregister(dev); 936 device_unregister(dev);
924dev_reg_err: 937dev_reg_err:
938 spin_lock(&pci_bus_lock);
925 list_del(&b->node); 939 list_del(&b->node);
940 spin_unlock(&pci_bus_lock);
926err_out: 941err_out:
927 kfree(dev); 942 kfree(dev);
928 kfree(b); 943 kfree(b);
diff --git a/drivers/pci/proc.c b/drivers/pci/proc.c
index e68bbfb1e7c3..7988fc8df3fd 100644
--- a/drivers/pci/proc.c
+++ b/drivers/pci/proc.c
@@ -355,14 +355,20 @@ static int show_device(struct seq_file *m, void *v)
355 dev->device, 355 dev->device,
356 dev->irq); 356 dev->irq);
357 /* Here should be 7 and not PCI_NUM_RESOURCES as we need to preserve compatibility */ 357 /* Here should be 7 and not PCI_NUM_RESOURCES as we need to preserve compatibility */
358 for(i=0; i<7; i++) 358 for (i=0; i<7; i++) {
359 u64 start, end;
360 pci_resource_to_user(dev, i, &dev->resource[i], &start, &end);
359 seq_printf(m, LONG_FORMAT, 361 seq_printf(m, LONG_FORMAT,
360 dev->resource[i].start | 362 ((unsigned long)start) |
361 (dev->resource[i].flags & PCI_REGION_FLAG_MASK)); 363 (dev->resource[i].flags & PCI_REGION_FLAG_MASK));
362 for(i=0; i<7; i++) 364 }
365 for (i=0; i<7; i++) {
366 u64 start, end;
367 pci_resource_to_user(dev, i, &dev->resource[i], &start, &end);
363 seq_printf(m, LONG_FORMAT, 368 seq_printf(m, LONG_FORMAT,
364 dev->resource[i].start < dev->resource[i].end ? 369 dev->resource[i].start < dev->resource[i].end ?
365 dev->resource[i].end - dev->resource[i].start + 1 : 0); 370 (unsigned long)(end - start) + 1 : 0);
371 }
366 seq_putc(m, '\t'); 372 seq_putc(m, '\t');
367 if (drv) 373 if (drv)
368 seq_printf(m, "%s", drv->name); 374 seq_printf(m, "%s", drv->name);
diff --git a/drivers/pci/remove.c b/drivers/pci/remove.c
index 96f077f9a659..27a294b6965d 100644
--- a/drivers/pci/remove.c
+++ b/drivers/pci/remove.c
@@ -18,17 +18,21 @@ static void pci_free_resources(struct pci_dev *dev)
18 18
19static void pci_destroy_dev(struct pci_dev *dev) 19static void pci_destroy_dev(struct pci_dev *dev)
20{ 20{
21 pci_proc_detach_device(dev); 21 if (!list_empty(&dev->global_list)) {
22 pci_remove_sysfs_dev_files(dev); 22 pci_proc_detach_device(dev);
23 device_unregister(&dev->dev); 23 pci_remove_sysfs_dev_files(dev);
24 device_unregister(&dev->dev);
25 spin_lock(&pci_bus_lock);
26 list_del(&dev->global_list);
27 dev->global_list.next = dev->global_list.prev = NULL;
28 spin_unlock(&pci_bus_lock);
29 }
24 30
25 /* Remove the device from the device lists, and prevent any further 31 /* Remove the device from the device lists, and prevent any further
26 * list accesses from this device */ 32 * list accesses from this device */
27 spin_lock(&pci_bus_lock); 33 spin_lock(&pci_bus_lock);
28 list_del(&dev->bus_list); 34 list_del(&dev->bus_list);
29 list_del(&dev->global_list);
30 dev->bus_list.next = dev->bus_list.prev = NULL; 35 dev->bus_list.next = dev->bus_list.prev = NULL;
31 dev->global_list.next = dev->global_list.prev = NULL;
32 spin_unlock(&pci_bus_lock); 36 spin_unlock(&pci_bus_lock);
33 37
34 pci_free_resources(dev); 38 pci_free_resources(dev);
diff --git a/drivers/pci/setup-bus.c b/drivers/pci/setup-bus.c
index 1ba84be0b4c0..6b628de948af 100644
--- a/drivers/pci/setup-bus.c
+++ b/drivers/pci/setup-bus.c
@@ -72,7 +72,10 @@ pbus_assign_resources_sorted(struct pci_bus *bus)
72 for (list = head.next; list;) { 72 for (list = head.next; list;) {
73 res = list->res; 73 res = list->res;
74 idx = res - &list->dev->resource[0]; 74 idx = res - &list->dev->resource[0];
75 pci_assign_resource(list->dev, idx); 75 if (pci_assign_resource(list->dev, idx)) {
76 res->start = 0;
77 res->flags = 0;
78 }
76 tmp = list; 79 tmp = list;
77 list = list->next; 80 list = list->next;
78 kfree(tmp); 81 kfree(tmp);
diff --git a/drivers/scsi/3w-9xxx.c b/drivers/scsi/3w-9xxx.c
index 34dbc37a79d4..bc6e4627c7a1 100644
--- a/drivers/scsi/3w-9xxx.c
+++ b/drivers/scsi/3w-9xxx.c
@@ -1916,9 +1916,9 @@ static void __twa_shutdown(TW_Device_Extension *tw_dev)
1916} /* End __twa_shutdown() */ 1916} /* End __twa_shutdown() */
1917 1917
1918/* Wrapper for __twa_shutdown */ 1918/* Wrapper for __twa_shutdown */
1919static void twa_shutdown(struct device *dev) 1919static void twa_shutdown(struct pci_dev *pdev)
1920{ 1920{
1921 struct Scsi_Host *host = pci_get_drvdata(to_pci_dev(dev)); 1921 struct Scsi_Host *host = pci_get_drvdata(pdev);
1922 TW_Device_Extension *tw_dev = (TW_Device_Extension *)host->hostdata; 1922 TW_Device_Extension *tw_dev = (TW_Device_Extension *)host->hostdata;
1923 1923
1924 __twa_shutdown(tw_dev); 1924 __twa_shutdown(tw_dev);
@@ -2140,9 +2140,7 @@ static struct pci_driver twa_driver = {
2140 .id_table = twa_pci_tbl, 2140 .id_table = twa_pci_tbl,
2141 .probe = twa_probe, 2141 .probe = twa_probe,
2142 .remove = twa_remove, 2142 .remove = twa_remove,
2143 .driver = { 2143 .shutdown = twa_shutdown
2144 .shutdown = twa_shutdown
2145 }
2146}; 2144};
2147 2145
2148/* This function is called on driver initialization */ 2146/* This function is called on driver initialization */
diff --git a/drivers/scsi/3w-xxxx.c b/drivers/scsi/3w-xxxx.c
index b6dc576da430..973c51fb0fe2 100644
--- a/drivers/scsi/3w-xxxx.c
+++ b/drivers/scsi/3w-xxxx.c
@@ -2264,9 +2264,9 @@ static void __tw_shutdown(TW_Device_Extension *tw_dev)
2264} /* End __tw_shutdown() */ 2264} /* End __tw_shutdown() */
2265 2265
2266/* Wrapper for __tw_shutdown */ 2266/* Wrapper for __tw_shutdown */
2267static void tw_shutdown(struct device *dev) 2267static void tw_shutdown(struct pci_dev *pdev)
2268{ 2268{
2269 struct Scsi_Host *host = pci_get_drvdata(to_pci_dev(dev)); 2269 struct Scsi_Host *host = pci_get_drvdata(pdev);
2270 TW_Device_Extension *tw_dev = (TW_Device_Extension *)host->hostdata; 2270 TW_Device_Extension *tw_dev = (TW_Device_Extension *)host->hostdata;
2271 2271
2272 __tw_shutdown(tw_dev); 2272 __tw_shutdown(tw_dev);
@@ -2451,9 +2451,7 @@ static struct pci_driver tw_driver = {
2451 .id_table = tw_pci_tbl, 2451 .id_table = tw_pci_tbl,
2452 .probe = tw_probe, 2452 .probe = tw_probe,
2453 .remove = tw_remove, 2453 .remove = tw_remove,
2454 .driver = { 2454 .shutdown = tw_shutdown,
2455 .shutdown = tw_shutdown
2456 }
2457}; 2455};
2458 2456
2459/* This function is called on driver initialization */ 2457/* This function is called on driver initialization */
diff --git a/drivers/scsi/ahci.c b/drivers/scsi/ahci.c
index 9a547ca9c864..c5623694d10f 100644
--- a/drivers/scsi/ahci.c
+++ b/drivers/scsi/ahci.c
@@ -304,26 +304,19 @@ static int ahci_port_start(struct ata_port *ap)
304 struct device *dev = ap->host_set->dev; 304 struct device *dev = ap->host_set->dev;
305 struct ahci_host_priv *hpriv = ap->host_set->private_data; 305 struct ahci_host_priv *hpriv = ap->host_set->private_data;
306 struct ahci_port_priv *pp; 306 struct ahci_port_priv *pp;
307 int rc;
308 void *mem, *mmio = ap->host_set->mmio_base; 307 void *mem, *mmio = ap->host_set->mmio_base;
309 void *port_mmio = ahci_port_base(mmio, ap->port_no); 308 void *port_mmio = ahci_port_base(mmio, ap->port_no);
310 dma_addr_t mem_dma; 309 dma_addr_t mem_dma;
311 310
312 rc = ata_port_start(ap);
313 if (rc)
314 return rc;
315
316 pp = kmalloc(sizeof(*pp), GFP_KERNEL); 311 pp = kmalloc(sizeof(*pp), GFP_KERNEL);
317 if (!pp) { 312 if (!pp)
318 rc = -ENOMEM; 313 return -ENOMEM;
319 goto err_out;
320 }
321 memset(pp, 0, sizeof(*pp)); 314 memset(pp, 0, sizeof(*pp));
322 315
323 mem = dma_alloc_coherent(dev, AHCI_PORT_PRIV_DMA_SZ, &mem_dma, GFP_KERNEL); 316 mem = dma_alloc_coherent(dev, AHCI_PORT_PRIV_DMA_SZ, &mem_dma, GFP_KERNEL);
324 if (!mem) { 317 if (!mem) {
325 rc = -ENOMEM; 318 kfree(pp);
326 goto err_out_kfree; 319 return -ENOMEM;
327 } 320 }
328 memset(mem, 0, AHCI_PORT_PRIV_DMA_SZ); 321 memset(mem, 0, AHCI_PORT_PRIV_DMA_SZ);
329 322
@@ -373,12 +366,6 @@ static int ahci_port_start(struct ata_port *ap)
373 readl(port_mmio + PORT_CMD); /* flush */ 366 readl(port_mmio + PORT_CMD); /* flush */
374 367
375 return 0; 368 return 0;
376
377err_out_kfree:
378 kfree(pp);
379err_out:
380 ata_port_stop(ap);
381 return rc;
382} 369}
383 370
384 371
@@ -404,7 +391,6 @@ static void ahci_port_stop(struct ata_port *ap)
404 dma_free_coherent(dev, AHCI_PORT_PRIV_DMA_SZ, 391 dma_free_coherent(dev, AHCI_PORT_PRIV_DMA_SZ,
405 pp->cmd_slot, pp->cmd_slot_dma); 392 pp->cmd_slot, pp->cmd_slot_dma);
406 kfree(pp); 393 kfree(pp);
407 ata_port_stop(ap);
408} 394}
409 395
410static u32 ahci_scr_read (struct ata_port *ap, unsigned int sc_reg_in) 396static u32 ahci_scr_read (struct ata_port *ap, unsigned int sc_reg_in)
diff --git a/drivers/scsi/ipr.c b/drivers/scsi/ipr.c
index 80d022625c82..babd48363402 100644
--- a/drivers/scsi/ipr.c
+++ b/drivers/scsi/ipr.c
@@ -6012,7 +6012,7 @@ static int __devinit ipr_probe(struct pci_dev *pdev,
6012 6012
6013/** 6013/**
6014 * ipr_shutdown - Shutdown handler. 6014 * ipr_shutdown - Shutdown handler.
6015 * @dev: device struct 6015 * @pdev: pci device struct
6016 * 6016 *
6017 * This function is invoked upon system shutdown/reboot. It will issue 6017 * This function is invoked upon system shutdown/reboot. It will issue
6018 * an adapter shutdown to the adapter to flush the write cache. 6018 * an adapter shutdown to the adapter to flush the write cache.
@@ -6020,9 +6020,9 @@ static int __devinit ipr_probe(struct pci_dev *pdev,
6020 * Return value: 6020 * Return value:
6021 * none 6021 * none
6022 **/ 6022 **/
6023static void ipr_shutdown(struct device *dev) 6023static void ipr_shutdown(struct pci_dev *pdev)
6024{ 6024{
6025 struct ipr_ioa_cfg *ioa_cfg = pci_get_drvdata(to_pci_dev(dev)); 6025 struct ipr_ioa_cfg *ioa_cfg = pci_get_drvdata(pdev);
6026 unsigned long lock_flags = 0; 6026 unsigned long lock_flags = 0;
6027 6027
6028 spin_lock_irqsave(ioa_cfg->host->host_lock, lock_flags); 6028 spin_lock_irqsave(ioa_cfg->host->host_lock, lock_flags);
@@ -6068,9 +6068,7 @@ static struct pci_driver ipr_driver = {
6068 .id_table = ipr_pci_table, 6068 .id_table = ipr_pci_table,
6069 .probe = ipr_probe, 6069 .probe = ipr_probe,
6070 .remove = ipr_remove, 6070 .remove = ipr_remove,
6071 .driver = { 6071 .shutdown = ipr_shutdown,
6072 .shutdown = ipr_shutdown,
6073 },
6074}; 6072};
6075 6073
6076/** 6074/**
diff --git a/drivers/scsi/libata-core.c b/drivers/scsi/libata-core.c
index 36b401fee1f1..cb535fa185b9 100644
--- a/drivers/scsi/libata-core.c
+++ b/drivers/scsi/libata-core.c
@@ -1408,7 +1408,9 @@ void __sata_phy_reset(struct ata_port *ap)
1408 if (ap->flags & ATA_FLAG_SATA_RESET) { 1408 if (ap->flags & ATA_FLAG_SATA_RESET) {
1409 /* issue phy wake/reset */ 1409 /* issue phy wake/reset */
1410 scr_write_flush(ap, SCR_CONTROL, 0x301); 1410 scr_write_flush(ap, SCR_CONTROL, 0x301);
1411 udelay(400); /* FIXME: a guess */ 1411 /* Couldn't find anything in SATA I/II specs, but
1412 * AHCI-1.1 10.4.2 says at least 1 ms. */
1413 mdelay(1);
1412 } 1414 }
1413 scr_write_flush(ap, SCR_CONTROL, 0x300); /* phy wake/clear reset */ 1415 scr_write_flush(ap, SCR_CONTROL, 0x300); /* phy wake/clear reset */
1414 1416
@@ -1920,6 +1922,7 @@ static const char * ata_dma_blacklist [] = {
1920 "HITACHI CDR-8335", 1922 "HITACHI CDR-8335",
1921 "HITACHI CDR-8435", 1923 "HITACHI CDR-8435",
1922 "Toshiba CD-ROM XM-6202B", 1924 "Toshiba CD-ROM XM-6202B",
1925 "TOSHIBA CD-ROM XM-1702BC",
1923 "CD-532E-A", 1926 "CD-532E-A",
1924 "E-IDE CD-ROM CR-840", 1927 "E-IDE CD-ROM CR-840",
1925 "CD-ROM Drive/F5A", 1928 "CD-ROM Drive/F5A",
@@ -1927,7 +1930,6 @@ static const char * ata_dma_blacklist [] = {
1927 "SAMSUNG CD-ROM SC-148C", 1930 "SAMSUNG CD-ROM SC-148C",
1928 "SAMSUNG CD-ROM SC", 1931 "SAMSUNG CD-ROM SC",
1929 "SanDisk SDP3B-64", 1932 "SanDisk SDP3B-64",
1930 "SAMSUNG CD-ROM SN-124",
1931 "ATAPI CD-ROM DRIVE 40X MAXIMUM", 1933 "ATAPI CD-ROM DRIVE 40X MAXIMUM",
1932 "_NEC DV5800A", 1934 "_NEC DV5800A",
1933}; 1935};
diff --git a/drivers/scsi/megaraid.c b/drivers/scsi/megaraid.c
index ec81532eb845..a70cdf31311c 100644
--- a/drivers/scsi/megaraid.c
+++ b/drivers/scsi/megaraid.c
@@ -5036,9 +5036,9 @@ megaraid_remove_one(struct pci_dev *pdev)
5036} 5036}
5037 5037
5038static void 5038static void
5039megaraid_shutdown(struct device *dev) 5039megaraid_shutdown(struct pci_dev *pdev)
5040{ 5040{
5041 struct Scsi_Host *host = pci_get_drvdata(to_pci_dev(dev)); 5041 struct Scsi_Host *host = pci_get_drvdata(pdev);
5042 adapter_t *adapter = (adapter_t *)host->hostdata; 5042 adapter_t *adapter = (adapter_t *)host->hostdata;
5043 5043
5044 __megaraid_shutdown(adapter); 5044 __megaraid_shutdown(adapter);
@@ -5070,9 +5070,7 @@ static struct pci_driver megaraid_pci_driver = {
5070 .id_table = megaraid_pci_tbl, 5070 .id_table = megaraid_pci_tbl,
5071 .probe = megaraid_probe_one, 5071 .probe = megaraid_probe_one,
5072 .remove = __devexit_p(megaraid_remove_one), 5072 .remove = __devexit_p(megaraid_remove_one),
5073 .driver = { 5073 .shutdown = megaraid_shutdown,
5074 .shutdown = megaraid_shutdown,
5075 },
5076}; 5074};
5077 5075
5078static int __init megaraid_init(void) 5076static int __init megaraid_init(void)
diff --git a/drivers/scsi/scsi_lib.c b/drivers/scsi/scsi_lib.c
index 621dee8b8cb2..10506f9cd0c9 100644
--- a/drivers/scsi/scsi_lib.c
+++ b/drivers/scsi/scsi_lib.c
@@ -632,7 +632,7 @@ static void scsi_free_sgtable(struct scatterlist *sgl, int index)
632{ 632{
633 struct scsi_host_sg_pool *sgp; 633 struct scsi_host_sg_pool *sgp;
634 634
635 BUG_ON(index > SG_MEMPOOL_NR); 635 BUG_ON(index >= SG_MEMPOOL_NR);
636 636
637 sgp = scsi_sg_pools + index; 637 sgp = scsi_sg_pools + index;
638 mempool_free(sgl, sgp->pool); 638 mempool_free(sgl, sgp->pool);