diff options
334 files changed, 16759 insertions, 9072 deletions
diff --git a/Documentation/cachetlb.txt b/Documentation/cachetlb.txt index 4ae418889b88..53245c429f7d 100644 --- a/Documentation/cachetlb.txt +++ b/Documentation/cachetlb.txt | |||
@@ -362,6 +362,27 @@ maps this page at its virtual address. | |||
362 | likely that you will need to flush the instruction cache | 362 | likely that you will need to flush the instruction cache |
363 | for copy_to_user_page(). | 363 | for copy_to_user_page(). |
364 | 364 | ||
365 | void flush_anon_page(struct page *page, unsigned long vmaddr) | ||
366 | When the kernel needs to access the contents of an anonymous | ||
367 | page, it calls this function (currently only | ||
368 | get_user_pages()). Note: flush_dcache_page() deliberately | ||
369 | doesn't work for an anonymous page. The default | ||
370 | implementation is a nop (and should remain so for all coherent | ||
371 | architectures). For incoherent architectures, it should flush | ||
372 | the cache of the page at vmaddr in the current user process. | ||
373 | |||
374 | void flush_kernel_dcache_page(struct page *page) | ||
375 | When the kernel needs to modify a user page is has obtained | ||
376 | with kmap, it calls this function after all modifications are | ||
377 | complete (but before kunmapping it) to bring the underlying | ||
378 | page up to date. It is assumed here that the user has no | ||
379 | incoherent cached copies (i.e. the original page was obtained | ||
380 | from a mechanism like get_user_pages()). The default | ||
381 | implementation is a nop and should remain so on all coherent | ||
382 | architectures. On incoherent architectures, this should flush | ||
383 | the kernel cache for page (using page_address(page)). | ||
384 | |||
385 | |||
365 | void flush_icache_range(unsigned long start, unsigned long end) | 386 | void flush_icache_range(unsigned long start, unsigned long end) |
366 | When the kernel stores into addresses that it will execute | 387 | When the kernel stores into addresses that it will execute |
367 | out of (eg when loading modules), this function is called. | 388 | out of (eg when loading modules), this function is called. |
diff --git a/Documentation/drivers/edac/edac.txt b/Documentation/drivers/edac/edac.txt index d37191fe5681..70d96a62e5e1 100644 --- a/Documentation/drivers/edac/edac.txt +++ b/Documentation/drivers/edac/edac.txt | |||
@@ -21,7 +21,7 @@ within the computer system. In the initial release, memory Correctable Errors | |||
21 | 21 | ||
22 | Detecting CE events, then harvesting those events and reporting them, | 22 | Detecting CE events, then harvesting those events and reporting them, |
23 | CAN be a predictor of future UE events. With CE events, the system can | 23 | CAN be a predictor of future UE events. With CE events, the system can |
24 | continue to operate, but with less safety. Preventive maintainence and | 24 | continue to operate, but with less safety. Preventive maintenance and |
25 | proactive part replacement of memory DIMMs exhibiting CEs can reduce | 25 | proactive part replacement of memory DIMMs exhibiting CEs can reduce |
26 | the likelihood of the dreaded UE events and system 'panics'. | 26 | the likelihood of the dreaded UE events and system 'panics'. |
27 | 27 | ||
@@ -29,13 +29,13 @@ the likelihood of the dreaded UE events and system 'panics'. | |||
29 | In addition, PCI Bus Parity and SERR Errors are scanned for on PCI devices | 29 | In addition, PCI Bus Parity and SERR Errors are scanned for on PCI devices |
30 | in order to determine if errors are occurring on data transfers. | 30 | in order to determine if errors are occurring on data transfers. |
31 | The presence of PCI Parity errors must be examined with a grain of salt. | 31 | The presence of PCI Parity errors must be examined with a grain of salt. |
32 | There are several addin adapters that do NOT follow the PCI specification | 32 | There are several add-in adapters that do NOT follow the PCI specification |
33 | with regards to Parity generation and reporting. The specification says | 33 | with regards to Parity generation and reporting. The specification says |
34 | the vendor should tie the parity status bits to 0 if they do not intend | 34 | the vendor should tie the parity status bits to 0 if they do not intend |
35 | to generate parity. Some vendors do not do this, and thus the parity bit | 35 | to generate parity. Some vendors do not do this, and thus the parity bit |
36 | can "float" giving false positives. | 36 | can "float" giving false positives. |
37 | 37 | ||
38 | The PCI Parity EDAC device has the ability to "skip" known flakey | 38 | The PCI Parity EDAC device has the ability to "skip" known flaky |
39 | cards during the parity scan. These are set by the parity "blacklist" | 39 | cards during the parity scan. These are set by the parity "blacklist" |
40 | interface in the sysfs for PCI Parity. (See the PCI section in the sysfs | 40 | interface in the sysfs for PCI Parity. (See the PCI section in the sysfs |
41 | section below.) There is also a parity "whitelist" which is used as | 41 | section below.) There is also a parity "whitelist" which is used as |
@@ -101,7 +101,7 @@ Memory Controller (mc) Model | |||
101 | 101 | ||
102 | First a background on the memory controller's model abstracted in EDAC. | 102 | First a background on the memory controller's model abstracted in EDAC. |
103 | Each mc device controls a set of DIMM memory modules. These modules are | 103 | Each mc device controls a set of DIMM memory modules. These modules are |
104 | layed out in a Chip-Select Row (csrowX) and Channel table (chX). There can | 104 | laid out in a Chip-Select Row (csrowX) and Channel table (chX). There can |
105 | be multiple csrows and two channels. | 105 | be multiple csrows and two channels. |
106 | 106 | ||
107 | Memory controllers allow for several csrows, with 8 csrows being a typical value. | 107 | Memory controllers allow for several csrows, with 8 csrows being a typical value. |
@@ -131,7 +131,7 @@ for memory DIMMs: | |||
131 | DIMM_B1 | 131 | DIMM_B1 |
132 | 132 | ||
133 | Labels for these slots are usually silk screened on the motherboard. Slots | 133 | Labels for these slots are usually silk screened on the motherboard. Slots |
134 | labeled 'A' are channel 0 in this example. Slots labled 'B' | 134 | labeled 'A' are channel 0 in this example. Slots labeled 'B' |
135 | are channel 1. Notice that there are two csrows possible on a | 135 | are channel 1. Notice that there are two csrows possible on a |
136 | physical DIMM. These csrows are allocated their csrow assignment | 136 | physical DIMM. These csrows are allocated their csrow assignment |
137 | based on the slot into which the memory DIMM is placed. Thus, when 1 DIMM | 137 | based on the slot into which the memory DIMM is placed. Thus, when 1 DIMM |
@@ -140,7 +140,7 @@ is placed in each Channel, the csrows cross both DIMMs. | |||
140 | Memory DIMMs come single or dual "ranked". A rank is a populated csrow. | 140 | Memory DIMMs come single or dual "ranked". A rank is a populated csrow. |
141 | Thus, 2 single ranked DIMMs, placed in slots DIMM_A0 and DIMM_B0 above | 141 | Thus, 2 single ranked DIMMs, placed in slots DIMM_A0 and DIMM_B0 above |
142 | will have 1 csrow, csrow0. csrow1 will be empty. On the other hand, | 142 | will have 1 csrow, csrow0. csrow1 will be empty. On the other hand, |
143 | when 2 dual ranked DIMMs are similiaryly placed, then both csrow0 and | 143 | when 2 dual ranked DIMMs are similarly placed, then both csrow0 and |
144 | csrow1 will be populated. The pattern repeats itself for csrow2 and | 144 | csrow1 will be populated. The pattern repeats itself for csrow2 and |
145 | csrow3. | 145 | csrow3. |
146 | 146 | ||
@@ -246,7 +246,7 @@ Module Version read-only attribute file: | |||
246 | 246 | ||
247 | 'mc_version' | 247 | 'mc_version' |
248 | 248 | ||
249 | The EDAC CORE modules's version and compile date are shown here to | 249 | The EDAC CORE module's version and compile date are shown here to |
250 | indicate what EDAC is running. | 250 | indicate what EDAC is running. |
251 | 251 | ||
252 | 252 | ||
@@ -423,7 +423,7 @@ Total memory managed by this csrow attribute file: | |||
423 | 'size_mb' | 423 | 'size_mb' |
424 | 424 | ||
425 | This attribute file displays, in count of megabytes, of memory | 425 | This attribute file displays, in count of megabytes, of memory |
426 | that this csrow contatins. | 426 | that this csrow contains. |
427 | 427 | ||
428 | 428 | ||
429 | Memory Type attribute file: | 429 | Memory Type attribute file: |
@@ -557,7 +557,7 @@ On Header Type 00 devices the primary status is looked at | |||
557 | for any parity error regardless of whether Parity is enabled on the | 557 | for any parity error regardless of whether Parity is enabled on the |
558 | device. (The spec indicates parity is generated in some cases). | 558 | device. (The spec indicates parity is generated in some cases). |
559 | On Header Type 01 bridges, the secondary status register is also | 559 | On Header Type 01 bridges, the secondary status register is also |
560 | looked at to see if parity ocurred on the bus on the other side of | 560 | looked at to see if parity occurred on the bus on the other side of |
561 | the bridge. | 561 | the bridge. |
562 | 562 | ||
563 | 563 | ||
@@ -588,7 +588,7 @@ Panic on PCI PARITY Error: | |||
588 | 'panic_on_pci_parity' | 588 | 'panic_on_pci_parity' |
589 | 589 | ||
590 | 590 | ||
591 | This control files enables or disables panic'ing when a parity | 591 | This control files enables or disables panicking when a parity |
592 | error has been detected. | 592 | error has been detected. |
593 | 593 | ||
594 | 594 | ||
@@ -616,12 +616,12 @@ PCI Device Whitelist: | |||
616 | 616 | ||
617 | This control file allows for an explicit list of PCI devices to be | 617 | This control file allows for an explicit list of PCI devices to be |
618 | scanned for parity errors. Only devices found on this list will | 618 | scanned for parity errors. Only devices found on this list will |
619 | be examined. The list is a line of hexadecimel VENDOR and DEVICE | 619 | be examined. The list is a line of hexadecimal VENDOR and DEVICE |
620 | ID tuples: | 620 | ID tuples: |
621 | 621 | ||
622 | 1022:7450,1434:16a6 | 622 | 1022:7450,1434:16a6 |
623 | 623 | ||
624 | One or more can be inserted, seperated by a comma. | 624 | One or more can be inserted, separated by a comma. |
625 | 625 | ||
626 | To write the above list doing the following as one command line: | 626 | To write the above list doing the following as one command line: |
627 | 627 | ||
@@ -639,11 +639,11 @@ PCI Device Blacklist: | |||
639 | 639 | ||
640 | This control file allows for a list of PCI devices to be | 640 | This control file allows for a list of PCI devices to be |
641 | skipped for scanning. | 641 | skipped for scanning. |
642 | The list is a line of hexadecimel VENDOR and DEVICE ID tuples: | 642 | The list is a line of hexadecimal VENDOR and DEVICE ID tuples: |
643 | 643 | ||
644 | 1022:7450,1434:16a6 | 644 | 1022:7450,1434:16a6 |
645 | 645 | ||
646 | One or more can be inserted, seperated by a comma. | 646 | One or more can be inserted, separated by a comma. |
647 | 647 | ||
648 | To write the above list doing the following as one command line: | 648 | To write the above list doing the following as one command line: |
649 | 649 | ||
@@ -651,14 +651,14 @@ PCI Device Blacklist: | |||
651 | > /sys/devices/system/edac/pci/pci_parity_blacklist | 651 | > /sys/devices/system/edac/pci/pci_parity_blacklist |
652 | 652 | ||
653 | 653 | ||
654 | To display what the whitelist current contatins, | 654 | To display what the whitelist currently contains, |
655 | simply 'cat' the same file. | 655 | simply 'cat' the same file. |
656 | 656 | ||
657 | ======================================================================= | 657 | ======================================================================= |
658 | 658 | ||
659 | PCI Vendor and Devices IDs can be obtained with the lspci command. Using | 659 | PCI Vendor and Devices IDs can be obtained with the lspci command. Using |
660 | the -n option lspci will display the vendor and device IDs. The system | 660 | the -n option lspci will display the vendor and device IDs. The system |
661 | adminstrator will have to determine which devices should be scanned or | 661 | administrator will have to determine which devices should be scanned or |
662 | skipped. | 662 | skipped. |
663 | 663 | ||
664 | 664 | ||
@@ -669,5 +669,5 @@ Turn OFF a whitelist by an empty echo command: | |||
669 | 669 | ||
670 | echo > /sys/devices/system/edac/pci/pci_parity_whitelist | 670 | echo > /sys/devices/system/edac/pci/pci_parity_whitelist |
671 | 671 | ||
672 | and any previous blacklist will be utililzed. | 672 | and any previous blacklist will be utilized. |
673 | 673 | ||
diff --git a/Documentation/networking/vortex.txt b/Documentation/networking/vortex.txt index 3759acf95b29..6091e5f6794f 100644 --- a/Documentation/networking/vortex.txt +++ b/Documentation/networking/vortex.txt | |||
@@ -24,36 +24,44 @@ Since kernel 2.3.99-pre6, this driver incorporates the support for the | |||
24 | 24 | ||
25 | This driver supports the following hardware: | 25 | This driver supports the following hardware: |
26 | 26 | ||
27 | 3c590 Vortex 10Mbps | 27 | 3c590 Vortex 10Mbps |
28 | 3c592 EISA 10mbps Demon/Vortex | 28 | 3c592 EISA 10Mbps Demon/Vortex |
29 | 3c597 EISA Fast Demon/Vortex | 29 | 3c597 EISA Fast Demon/Vortex |
30 | 3c595 Vortex 100baseTx | 30 | 3c595 Vortex 100baseTx |
31 | 3c595 Vortex 100baseT4 | 31 | 3c595 Vortex 100baseT4 |
32 | 3c595 Vortex 100base-MII | 32 | 3c595 Vortex 100base-MII |
33 | 3Com Vortex | 33 | 3c900 Boomerang 10baseT |
34 | 3c900 Boomerang 10baseT | 34 | 3c900 Boomerang 10Mbps Combo |
35 | 3c900 Boomerang 10Mbps Combo | 35 | 3c900 Cyclone 10Mbps TPO |
36 | 3c900 Cyclone 10Mbps TPO | 36 | 3c900 Cyclone 10Mbps Combo |
37 | 3c900B Cyclone 10Mbps T | 37 | 3c900 Cyclone 10Mbps TPC |
38 | 3c900 Cyclone 10Mbps Combo | 38 | 3c900B-FL Cyclone 10base-FL |
39 | 3c900 Cyclone 10Mbps TPC | 39 | 3c905 Boomerang 100baseTx |
40 | 3c900B-FL Cyclone 10base-FL | 40 | 3c905 Boomerang 100baseT4 |
41 | 3c905 Boomerang 100baseTx | 41 | 3c905B Cyclone 100baseTx |
42 | 3c905 Boomerang 100baseT4 | 42 | 3c905B Cyclone 10/100/BNC |
43 | 3c905B Cyclone 100baseTx | 43 | 3c905B-FX Cyclone 100baseFx |
44 | 3c905B Cyclone 10/100/BNC | 44 | 3c905C Tornado |
45 | 3c905B-FX Cyclone 100baseFx | 45 | 3c920B-EMB-WNM (ATI Radeon 9100 IGP) |
46 | 3c905C Tornado | 46 | 3c980 Cyclone |
47 | 3c980 Cyclone | 47 | 3c980C Python-T |
48 | 3cSOHO100-TX Hurricane | 48 | 3cSOHO100-TX Hurricane |
49 | 3c555 Laptop Hurricane | 49 | 3c555 Laptop Hurricane |
50 | 3c575 Boomerang CardBus | 50 | 3c556 Laptop Tornado |
51 | 3CCFE575 Cyclone CardBus | 51 | 3c556B Laptop Hurricane |
52 | 3CCFE575CT Cyclone CardBus | 52 | 3c575 [Megahertz] 10/100 LAN CardBus |
53 | 3CCFE656 Cyclone CardBus | 53 | 3c575 Boomerang CardBus |
54 | 3CCFEM656 Cyclone CardBus | 54 | 3CCFE575BT Cyclone CardBus |
55 | 3c450 Cyclone/unknown | 55 | 3CCFE575CT Tornado CardBus |
56 | 56 | 3CCFE656 Cyclone CardBus | |
57 | 3CCFEM656B Cyclone+Winmodem CardBus | ||
58 | 3CXFEM656C Tornado+Winmodem CardBus | ||
59 | 3c450 HomePNA Tornado | ||
60 | 3c920 Tornado | ||
61 | 3c982 Hydra Dual Port A | ||
62 | 3c982 Hydra Dual Port B | ||
63 | 3c905B-T4 | ||
64 | 3c920B-EMB-WNM Tornado | ||
57 | 65 | ||
58 | Module parameters | 66 | Module parameters |
59 | ================= | 67 | ================= |
@@ -293,11 +301,6 @@ Donald's wake-on-LAN page: | |||
293 | 301 | ||
294 | http://www.scyld.com/wakeonlan.html | 302 | http://www.scyld.com/wakeonlan.html |
295 | 303 | ||
296 | 3Com's documentation for many NICs, including the ones supported by | ||
297 | this driver is available at | ||
298 | |||
299 | http://support.3com.com/partners/developer/developer_form.html | ||
300 | |||
301 | 3Com's DOS-based application for setting up the NICs EEPROMs: | 304 | 3Com's DOS-based application for setting up the NICs EEPROMs: |
302 | 305 | ||
303 | ftp://ftp.3com.com/pub/nic/3c90x/3c90xx2.exe | 306 | ftp://ftp.3com.com/pub/nic/3c90x/3c90xx2.exe |
@@ -312,10 +315,10 @@ Autonegotiation notes | |||
312 | --------------------- | 315 | --------------------- |
313 | 316 | ||
314 | The driver uses a one-minute heartbeat for adapting to changes in | 317 | The driver uses a one-minute heartbeat for adapting to changes in |
315 | the external LAN environment. This means that when, for example, a | 318 | the external LAN environment if link is up and 5 seconds if link is down. |
316 | machine is unplugged from a hubbed 10baseT LAN plugged into a | 319 | This means that when, for example, a machine is unplugged from a hubbed |
317 | switched 100baseT LAN, the throughput will be quite dreadful for up | 320 | 10baseT LAN plugged into a switched 100baseT LAN, the throughput |
318 | to sixty seconds. Be patient. | 321 | will be quite dreadful for up to sixty seconds. Be patient. |
319 | 322 | ||
320 | Cisco interoperability note from Walter Wong <wcw+@CMU.EDU>: | 323 | Cisco interoperability note from Walter Wong <wcw+@CMU.EDU>: |
321 | 324 | ||
diff --git a/Documentation/sound/alsa/DocBook/writing-an-alsa-driver.tmpl b/Documentation/sound/alsa/DocBook/writing-an-alsa-driver.tmpl index 6dc9d9f622ca..6feef9e82b63 100644 --- a/Documentation/sound/alsa/DocBook/writing-an-alsa-driver.tmpl +++ b/Documentation/sound/alsa/DocBook/writing-an-alsa-driver.tmpl | |||
@@ -2836,7 +2836,7 @@ struct _snd_pcm_runtime { | |||
2836 | 2836 | ||
2837 | <para> | 2837 | <para> |
2838 | Note that this callback became non-atomic since the recent version. | 2838 | Note that this callback became non-atomic since the recent version. |
2839 | You can use schedule-related fucntions safely in this callback now. | 2839 | You can use schedule-related functions safely in this callback now. |
2840 | </para> | 2840 | </para> |
2841 | 2841 | ||
2842 | <para> | 2842 | <para> |
diff --git a/MAINTAINERS b/MAINTAINERS index 4e8fbbc5566d..f27846734b06 100644 --- a/MAINTAINERS +++ b/MAINTAINERS | |||
@@ -882,13 +882,34 @@ W: http://ebtables.sourceforge.net/ | |||
882 | S: Maintained | 882 | S: Maintained |
883 | 883 | ||
884 | EDAC-CORE | 884 | EDAC-CORE |
885 | P: Doug Thompson | 885 | P: Doug Thompson |
886 | M: norsk5@xmission.com, dthompson@linuxnetworx.com | 886 | M: norsk5@xmission.com, dthompson@linuxnetworx.com |
887 | P: Dave Peterson | 887 | P: Dave Peterson |
888 | M: dsp@llnl.gov, dave_peterson@pobox.com | 888 | M: dsp@llnl.gov, dave_peterson@pobox.com |
889 | L: bluesmoke-devel@lists.sourceforge.net | 889 | L: bluesmoke-devel@lists.sourceforge.net |
890 | W: bluesmoke.sourceforge.net | 890 | W: bluesmoke.sourceforge.net |
891 | S: Maintained | 891 | S: Maintained |
892 | |||
893 | EDAC-E752X | ||
894 | P: Dave Peterson | ||
895 | M: dsp@llnl.gov, dave_peterson@pobox.com | ||
896 | L: bluesmoke-devel@lists.sourceforge.net | ||
897 | W: bluesmoke.sourceforge.net | ||
898 | S: Maintained | ||
899 | |||
900 | EDAC-E7XXX | ||
901 | P: Dave Peterson | ||
902 | M: dsp@llnl.gov, dave_peterson@pobox.com | ||
903 | L: bluesmoke-devel@lists.sourceforge.net | ||
904 | W: bluesmoke.sourceforge.net | ||
905 | S: Maintained | ||
906 | |||
907 | EDAC-R82600 | ||
908 | P: Tim Small | ||
909 | M: tim@buttersideup.com | ||
910 | L: bluesmoke-devel@lists.sourceforge.net | ||
911 | W: bluesmoke.sourceforge.net | ||
912 | S: Maintained | ||
892 | 913 | ||
893 | EEPRO100 NETWORK DRIVER | 914 | EEPRO100 NETWORK DRIVER |
894 | P: Andrey V. Savochkin | 915 | P: Andrey V. Savochkin |
@@ -1039,6 +1060,15 @@ M: khc@pm.waw.pl | |||
1039 | W: http://www.kernel.org/pub/linux/utils/net/hdlc/ | 1060 | W: http://www.kernel.org/pub/linux/utils/net/hdlc/ |
1040 | S: Maintained | 1061 | S: Maintained |
1041 | 1062 | ||
1063 | GIGASET ISDN DRIVERS | ||
1064 | P: Hansjoerg Lipp | ||
1065 | M: hjlipp@web.de | ||
1066 | P: Tilman Schmidt | ||
1067 | M: tilman@imap.cc | ||
1068 | L: gigaset307x-common@lists.sourceforge.net | ||
1069 | W: http://gigaset307x.sourceforge.net/ | ||
1070 | S: Maintained | ||
1071 | |||
1042 | HARDWARE MONITORING | 1072 | HARDWARE MONITORING |
1043 | P: Jean Delvare | 1073 | P: Jean Delvare |
1044 | M: khali@linux-fr.org | 1074 | M: khali@linux-fr.org |
diff --git a/arch/alpha/Kconfig b/arch/alpha/Kconfig index eedf41bf7057..9bef61b30367 100644 --- a/arch/alpha/Kconfig +++ b/arch/alpha/Kconfig | |||
@@ -25,6 +25,10 @@ config RWSEM_XCHGADD_ALGORITHM | |||
25 | bool | 25 | bool |
26 | default y | 26 | default y |
27 | 27 | ||
28 | config GENERIC_FIND_NEXT_BIT | ||
29 | bool | ||
30 | default y | ||
31 | |||
28 | config GENERIC_CALIBRATE_DELAY | 32 | config GENERIC_CALIBRATE_DELAY |
29 | bool | 33 | bool |
30 | default y | 34 | default y |
@@ -447,6 +451,10 @@ config ALPHA_IRONGATE | |||
447 | depends on ALPHA_NAUTILUS | 451 | depends on ALPHA_NAUTILUS |
448 | default y | 452 | default y |
449 | 453 | ||
454 | config GENERIC_HWEIGHT | ||
455 | bool | ||
456 | default y if !ALPHA_EV6 && !ALPHA_EV67 | ||
457 | |||
450 | config ALPHA_AVANTI | 458 | config ALPHA_AVANTI |
451 | bool | 459 | bool |
452 | depends on ALPHA_XL || ALPHA_AVANTI_CH | 460 | depends on ALPHA_XL || ALPHA_AVANTI_CH |
diff --git a/arch/alpha/kernel/osf_sys.c b/arch/alpha/kernel/osf_sys.c index 7fb14f42a125..31afe3d91ac6 100644 --- a/arch/alpha/kernel/osf_sys.c +++ b/arch/alpha/kernel/osf_sys.c | |||
@@ -821,7 +821,6 @@ osf_setsysinfo(unsigned long op, void __user *buffer, unsigned long nbytes, | |||
821 | affects all sorts of things, like timeval and itimerval. */ | 821 | affects all sorts of things, like timeval and itimerval. */ |
822 | 822 | ||
823 | extern struct timezone sys_tz; | 823 | extern struct timezone sys_tz; |
824 | extern int do_adjtimex(struct timex *); | ||
825 | 824 | ||
826 | struct timeval32 | 825 | struct timeval32 |
827 | { | 826 | { |
diff --git a/arch/alpha/lib/ev6-memchr.S b/arch/alpha/lib/ev6-memchr.S index a8e843dbcc23..1a5f71b9d8b1 100644 --- a/arch/alpha/lib/ev6-memchr.S +++ b/arch/alpha/lib/ev6-memchr.S | |||
@@ -84,7 +84,7 @@ $last_quad: | |||
84 | beq $2, $not_found # U : U L U L | 84 | beq $2, $not_found # U : U L U L |
85 | 85 | ||
86 | $found_it: | 86 | $found_it: |
87 | #if defined(__alpha_fix__) && defined(__alpha_cix__) | 87 | #ifdef CONFIG_ALPHA_EV67 |
88 | /* | 88 | /* |
89 | * Since we are guaranteed to have set one of the bits, we don't | 89 | * Since we are guaranteed to have set one of the bits, we don't |
90 | * have to worry about coming back with a 0x40 out of cttz... | 90 | * have to worry about coming back with a 0x40 out of cttz... |
diff --git a/arch/alpha/lib/fpreg.c b/arch/alpha/lib/fpreg.c index 97c4d9d7a4d5..05017ba34c3c 100644 --- a/arch/alpha/lib/fpreg.c +++ b/arch/alpha/lib/fpreg.c | |||
@@ -4,7 +4,7 @@ | |||
4 | * (C) Copyright 1998 Linus Torvalds | 4 | * (C) Copyright 1998 Linus Torvalds |
5 | */ | 5 | */ |
6 | 6 | ||
7 | #if defined(__alpha_cix__) || defined(__alpha_fix__) | 7 | #if defined(CONFIG_ALPHA_EV6) || defined(CONFIG_ALPHA_EV67) |
8 | #define STT(reg,val) asm volatile ("ftoit $f"#reg",%0" : "=r"(val)); | 8 | #define STT(reg,val) asm volatile ("ftoit $f"#reg",%0" : "=r"(val)); |
9 | #else | 9 | #else |
10 | #define STT(reg,val) asm volatile ("stt $f"#reg",%0" : "=m"(val)); | 10 | #define STT(reg,val) asm volatile ("stt $f"#reg",%0" : "=m"(val)); |
@@ -53,7 +53,7 @@ alpha_read_fp_reg (unsigned long reg) | |||
53 | return val; | 53 | return val; |
54 | } | 54 | } |
55 | 55 | ||
56 | #if defined(__alpha_cix__) || defined(__alpha_fix__) | 56 | #if defined(CONFIG_ALPHA_EV6) || defined(CONFIG_ALPHA_EV67) |
57 | #define LDT(reg,val) asm volatile ("itoft %0,$f"#reg : : "r"(val)); | 57 | #define LDT(reg,val) asm volatile ("itoft %0,$f"#reg : : "r"(val)); |
58 | #else | 58 | #else |
59 | #define LDT(reg,val) asm volatile ("ldt $f"#reg",%0" : : "m"(val)); | 59 | #define LDT(reg,val) asm volatile ("ldt $f"#reg",%0" : : "m"(val)); |
@@ -98,7 +98,7 @@ alpha_write_fp_reg (unsigned long reg, unsigned long val) | |||
98 | } | 98 | } |
99 | } | 99 | } |
100 | 100 | ||
101 | #if defined(__alpha_cix__) || defined(__alpha_fix__) | 101 | #if defined(CONFIG_ALPHA_EV6) || defined(CONFIG_ALPHA_EV67) |
102 | #define STS(reg,val) asm volatile ("ftois $f"#reg",%0" : "=r"(val)); | 102 | #define STS(reg,val) asm volatile ("ftois $f"#reg",%0" : "=r"(val)); |
103 | #else | 103 | #else |
104 | #define STS(reg,val) asm volatile ("sts $f"#reg",%0" : "=m"(val)); | 104 | #define STS(reg,val) asm volatile ("sts $f"#reg",%0" : "=m"(val)); |
@@ -147,7 +147,7 @@ alpha_read_fp_reg_s (unsigned long reg) | |||
147 | return val; | 147 | return val; |
148 | } | 148 | } |
149 | 149 | ||
150 | #if defined(__alpha_cix__) || defined(__alpha_fix__) | 150 | #if defined(CONFIG_ALPHA_EV6) || defined(CONFIG_ALPHA_EV67) |
151 | #define LDS(reg,val) asm volatile ("itofs %0,$f"#reg : : "r"(val)); | 151 | #define LDS(reg,val) asm volatile ("itofs %0,$f"#reg : : "r"(val)); |
152 | #else | 152 | #else |
153 | #define LDS(reg,val) asm volatile ("lds $f"#reg",%0" : : "m"(val)); | 153 | #define LDS(reg,val) asm volatile ("lds $f"#reg",%0" : : "m"(val)); |
diff --git a/arch/arm/Kconfig b/arch/arm/Kconfig index 0dd24ebdf6ac..bf2e72698d02 100644 --- a/arch/arm/Kconfig +++ b/arch/arm/Kconfig | |||
@@ -53,6 +53,10 @@ config RWSEM_GENERIC_SPINLOCK | |||
53 | config RWSEM_XCHGADD_ALGORITHM | 53 | config RWSEM_XCHGADD_ALGORITHM |
54 | bool | 54 | bool |
55 | 55 | ||
56 | config GENERIC_HWEIGHT | ||
57 | bool | ||
58 | default y | ||
59 | |||
56 | config GENERIC_CALIBRATE_DELAY | 60 | config GENERIC_CALIBRATE_DELAY |
57 | bool | 61 | bool |
58 | default y | 62 | default y |
diff --git a/arch/arm26/Kconfig b/arch/arm26/Kconfig index dee23d87fc5a..cf4ebf4c274d 100644 --- a/arch/arm26/Kconfig +++ b/arch/arm26/Kconfig | |||
@@ -41,6 +41,10 @@ config RWSEM_GENERIC_SPINLOCK | |||
41 | config RWSEM_XCHGADD_ALGORITHM | 41 | config RWSEM_XCHGADD_ALGORITHM |
42 | bool | 42 | bool |
43 | 43 | ||
44 | config GENERIC_HWEIGHT | ||
45 | bool | ||
46 | default y | ||
47 | |||
44 | config GENERIC_CALIBRATE_DELAY | 48 | config GENERIC_CALIBRATE_DELAY |
45 | bool | 49 | bool |
46 | default y | 50 | default y |
diff --git a/arch/arm26/kernel/traps.c b/arch/arm26/kernel/traps.c index 5847ea5d7747..a79de041b50e 100644 --- a/arch/arm26/kernel/traps.c +++ b/arch/arm26/kernel/traps.c | |||
@@ -34,7 +34,7 @@ | |||
34 | #include <asm/system.h> | 34 | #include <asm/system.h> |
35 | #include <asm/uaccess.h> | 35 | #include <asm/uaccess.h> |
36 | #include <asm/unistd.h> | 36 | #include <asm/unistd.h> |
37 | #include <asm/semaphore.h> | 37 | #include <linux/mutex.h> |
38 | 38 | ||
39 | #include "ptrace.h" | 39 | #include "ptrace.h" |
40 | 40 | ||
@@ -207,19 +207,19 @@ void die_if_kernel(const char *str, struct pt_regs *regs, int err) | |||
207 | die(str, regs, err); | 207 | die(str, regs, err); |
208 | } | 208 | } |
209 | 209 | ||
210 | static DECLARE_MUTEX(undef_sem); | 210 | static DEFINE_MUTEX(undef_mutex); |
211 | static int (*undef_hook)(struct pt_regs *); | 211 | static int (*undef_hook)(struct pt_regs *); |
212 | 212 | ||
213 | int request_undef_hook(int (*fn)(struct pt_regs *)) | 213 | int request_undef_hook(int (*fn)(struct pt_regs *)) |
214 | { | 214 | { |
215 | int ret = -EBUSY; | 215 | int ret = -EBUSY; |
216 | 216 | ||
217 | down(&undef_sem); | 217 | mutex_lock(&undef_mutex); |
218 | if (undef_hook == NULL) { | 218 | if (undef_hook == NULL) { |
219 | undef_hook = fn; | 219 | undef_hook = fn; |
220 | ret = 0; | 220 | ret = 0; |
221 | } | 221 | } |
222 | up(&undef_sem); | 222 | mutex_unlock(&undef_mutex); |
223 | 223 | ||
224 | return ret; | 224 | return ret; |
225 | } | 225 | } |
@@ -228,12 +228,12 @@ int release_undef_hook(int (*fn)(struct pt_regs *)) | |||
228 | { | 228 | { |
229 | int ret = -EINVAL; | 229 | int ret = -EINVAL; |
230 | 230 | ||
231 | down(&undef_sem); | 231 | mutex_lock(&undef_mutex); |
232 | if (undef_hook == fn) { | 232 | if (undef_hook == fn) { |
233 | undef_hook = NULL; | 233 | undef_hook = NULL; |
234 | ret = 0; | 234 | ret = 0; |
235 | } | 235 | } |
236 | up(&undef_sem); | 236 | mutex_unlock(&undef_mutex); |
237 | 237 | ||
238 | return ret; | 238 | return ret; |
239 | } | 239 | } |
diff --git a/arch/cris/Kconfig b/arch/cris/Kconfig index b83261949737..856b665020e7 100644 --- a/arch/cris/Kconfig +++ b/arch/cris/Kconfig | |||
@@ -16,6 +16,14 @@ config RWSEM_GENERIC_SPINLOCK | |||
16 | config RWSEM_XCHGADD_ALGORITHM | 16 | config RWSEM_XCHGADD_ALGORITHM |
17 | bool | 17 | bool |
18 | 18 | ||
19 | config GENERIC_FIND_NEXT_BIT | ||
20 | bool | ||
21 | default y | ||
22 | |||
23 | config GENERIC_HWEIGHT | ||
24 | bool | ||
25 | default y | ||
26 | |||
19 | config GENERIC_CALIBRATE_DELAY | 27 | config GENERIC_CALIBRATE_DELAY |
20 | bool | 28 | bool |
21 | default y | 29 | default y |
diff --git a/arch/frv/Kconfig b/arch/frv/Kconfig index e08383712370..95a3892b8d1b 100644 --- a/arch/frv/Kconfig +++ b/arch/frv/Kconfig | |||
@@ -17,6 +17,10 @@ config GENERIC_FIND_NEXT_BIT | |||
17 | bool | 17 | bool |
18 | default y | 18 | default y |
19 | 19 | ||
20 | config GENERIC_HWEIGHT | ||
21 | bool | ||
22 | default y | ||
23 | |||
20 | config GENERIC_CALIBRATE_DELAY | 24 | config GENERIC_CALIBRATE_DELAY |
21 | bool | 25 | bool |
22 | default n | 26 | default n |
diff --git a/arch/frv/mm/mmu-context.c b/arch/frv/mm/mmu-context.c index f2c6866fc88b..1530a4111e6d 100644 --- a/arch/frv/mm/mmu-context.c +++ b/arch/frv/mm/mmu-context.c | |||
@@ -54,9 +54,9 @@ static unsigned get_cxn(mm_context_t *ctx) | |||
54 | /* find the first unallocated context number | 54 | /* find the first unallocated context number |
55 | * - 0 is reserved for the kernel | 55 | * - 0 is reserved for the kernel |
56 | */ | 56 | */ |
57 | cxn = find_next_zero_bit(&cxn_bitmap, NR_CXN, 1); | 57 | cxn = find_next_zero_bit(cxn_bitmap, NR_CXN, 1); |
58 | if (cxn < NR_CXN) { | 58 | if (cxn < NR_CXN) { |
59 | set_bit(cxn, &cxn_bitmap); | 59 | set_bit(cxn, cxn_bitmap); |
60 | } | 60 | } |
61 | else { | 61 | else { |
62 | /* none remaining - need to steal someone else's cxn */ | 62 | /* none remaining - need to steal someone else's cxn */ |
@@ -138,7 +138,7 @@ void destroy_context(struct mm_struct *mm) | |||
138 | cxn_pinned = -1; | 138 | cxn_pinned = -1; |
139 | 139 | ||
140 | list_del_init(&ctx->id_link); | 140 | list_del_init(&ctx->id_link); |
141 | clear_bit(ctx->id, &cxn_bitmap); | 141 | clear_bit(ctx->id, cxn_bitmap); |
142 | __flush_tlb_mm(ctx->id); | 142 | __flush_tlb_mm(ctx->id); |
143 | ctx->id = 0; | 143 | ctx->id = 0; |
144 | } | 144 | } |
diff --git a/arch/h8300/Kconfig b/arch/h8300/Kconfig index 98308b018a35..cabf0bfffc53 100644 --- a/arch/h8300/Kconfig +++ b/arch/h8300/Kconfig | |||
@@ -29,6 +29,14 @@ config RWSEM_XCHGADD_ALGORITHM | |||
29 | bool | 29 | bool |
30 | default n | 30 | default n |
31 | 31 | ||
32 | config GENERIC_FIND_NEXT_BIT | ||
33 | bool | ||
34 | default y | ||
35 | |||
36 | config GENERIC_HWEIGHT | ||
37 | bool | ||
38 | default y | ||
39 | |||
32 | config GENERIC_CALIBRATE_DELAY | 40 | config GENERIC_CALIBRATE_DELAY |
33 | bool | 41 | bool |
34 | default y | 42 | default y |
diff --git a/arch/i386/Kconfig b/arch/i386/Kconfig index b008fb0cd7b7..f7db71d0b913 100644 --- a/arch/i386/Kconfig +++ b/arch/i386/Kconfig | |||
@@ -37,6 +37,10 @@ config GENERIC_IOMAP | |||
37 | bool | 37 | bool |
38 | default y | 38 | default y |
39 | 39 | ||
40 | config GENERIC_HWEIGHT | ||
41 | bool | ||
42 | default y | ||
43 | |||
40 | config ARCH_MAY_HAVE_PC_FDC | 44 | config ARCH_MAY_HAVE_PC_FDC |
41 | bool | 45 | bool |
42 | default y | 46 | default y |
diff --git a/arch/i386/Makefile b/arch/i386/Makefile index c848a5b30391..3e4adb1e2244 100644 --- a/arch/i386/Makefile +++ b/arch/i386/Makefile | |||
@@ -103,7 +103,7 @@ AFLAGS += $(mflags-y) | |||
103 | boot := arch/i386/boot | 103 | boot := arch/i386/boot |
104 | 104 | ||
105 | PHONY += zImage bzImage compressed zlilo bzlilo \ | 105 | PHONY += zImage bzImage compressed zlilo bzlilo \ |
106 | zdisk bzdisk fdimage fdimage144 fdimage288 install | 106 | zdisk bzdisk fdimage fdimage144 fdimage288 isoimage install |
107 | 107 | ||
108 | all: bzImage | 108 | all: bzImage |
109 | 109 | ||
@@ -122,7 +122,7 @@ zlilo bzlilo: vmlinux | |||
122 | zdisk bzdisk: vmlinux | 122 | zdisk bzdisk: vmlinux |
123 | $(Q)$(MAKE) $(build)=$(boot) BOOTIMAGE=$(KBUILD_IMAGE) zdisk | 123 | $(Q)$(MAKE) $(build)=$(boot) BOOTIMAGE=$(KBUILD_IMAGE) zdisk |
124 | 124 | ||
125 | fdimage fdimage144 fdimage288: vmlinux | 125 | fdimage fdimage144 fdimage288 isoimage: vmlinux |
126 | $(Q)$(MAKE) $(build)=$(boot) BOOTIMAGE=$(KBUILD_IMAGE) $@ | 126 | $(Q)$(MAKE) $(build)=$(boot) BOOTIMAGE=$(KBUILD_IMAGE) $@ |
127 | 127 | ||
128 | install: | 128 | install: |
@@ -139,6 +139,9 @@ define archhelp | |||
139 | echo ' install to $$(INSTALL_PATH) and run lilo' | 139 | echo ' install to $$(INSTALL_PATH) and run lilo' |
140 | echo ' bzdisk - Create a boot floppy in /dev/fd0' | 140 | echo ' bzdisk - Create a boot floppy in /dev/fd0' |
141 | echo ' fdimage - Create a boot floppy image' | 141 | echo ' fdimage - Create a boot floppy image' |
142 | echo ' isoimage - Create a boot CD-ROM image' | ||
142 | endef | 143 | endef |
143 | 144 | ||
144 | CLEAN_FILES += arch/$(ARCH)/boot/fdimage arch/$(ARCH)/boot/mtools.conf | 145 | CLEAN_FILES += arch/$(ARCH)/boot/fdimage \ |
146 | arch/$(ARCH)/boot/image.iso \ | ||
147 | arch/$(ARCH)/boot/mtools.conf | ||
diff --git a/arch/i386/boot/Makefile b/arch/i386/boot/Makefile index f136752563b1..33e55476381b 100644 --- a/arch/i386/boot/Makefile +++ b/arch/i386/boot/Makefile | |||
@@ -62,8 +62,12 @@ $(obj)/setup $(obj)/bootsect: %: %.o FORCE | |||
62 | $(obj)/compressed/vmlinux: FORCE | 62 | $(obj)/compressed/vmlinux: FORCE |
63 | $(Q)$(MAKE) $(build)=$(obj)/compressed IMAGE_OFFSET=$(IMAGE_OFFSET) $@ | 63 | $(Q)$(MAKE) $(build)=$(obj)/compressed IMAGE_OFFSET=$(IMAGE_OFFSET) $@ |
64 | 64 | ||
65 | # Set this if you want to pass append arguments to the zdisk/fdimage kernel | 65 | # Set this if you want to pass append arguments to the zdisk/fdimage/isoimage kernel |
66 | FDARGS = | 66 | FDARGS = |
67 | # Set this if you want an initrd included with the zdisk/fdimage/isoimage kernel | ||
68 | FDINITRD = | ||
69 | |||
70 | image_cmdline = default linux $(FDARGS) $(if $(FDINITRD),initrd=initrd.img,) | ||
67 | 71 | ||
68 | $(obj)/mtools.conf: $(src)/mtools.conf.in | 72 | $(obj)/mtools.conf: $(src)/mtools.conf.in |
69 | sed -e 's|@OBJ@|$(obj)|g' < $< > $@ | 73 | sed -e 's|@OBJ@|$(obj)|g' < $< > $@ |
@@ -72,8 +76,11 @@ $(obj)/mtools.conf: $(src)/mtools.conf.in | |||
72 | zdisk: $(BOOTIMAGE) $(obj)/mtools.conf | 76 | zdisk: $(BOOTIMAGE) $(obj)/mtools.conf |
73 | MTOOLSRC=$(obj)/mtools.conf mformat a: ; sync | 77 | MTOOLSRC=$(obj)/mtools.conf mformat a: ; sync |
74 | syslinux /dev/fd0 ; sync | 78 | syslinux /dev/fd0 ; sync |
75 | echo 'default linux $(FDARGS)' | \ | 79 | echo '$(image_cmdline)' | \ |
76 | MTOOLSRC=$(src)/mtools.conf mcopy - a:syslinux.cfg | 80 | MTOOLSRC=$(src)/mtools.conf mcopy - a:syslinux.cfg |
81 | if [ -f '$(FDINITRD)' ] ; then \ | ||
82 | MTOOLSRC=$(obj)/mtools.conf mcopy '$(FDINITRD)' a:initrd.img ; \ | ||
83 | fi | ||
77 | MTOOLSRC=$(obj)/mtools.conf mcopy $(BOOTIMAGE) a:linux ; sync | 84 | MTOOLSRC=$(obj)/mtools.conf mcopy $(BOOTIMAGE) a:linux ; sync |
78 | 85 | ||
79 | # These require being root or having syslinux 2.02 or higher installed | 86 | # These require being root or having syslinux 2.02 or higher installed |
@@ -81,18 +88,39 @@ fdimage fdimage144: $(BOOTIMAGE) $(obj)/mtools.conf | |||
81 | dd if=/dev/zero of=$(obj)/fdimage bs=1024 count=1440 | 88 | dd if=/dev/zero of=$(obj)/fdimage bs=1024 count=1440 |
82 | MTOOLSRC=$(obj)/mtools.conf mformat v: ; sync | 89 | MTOOLSRC=$(obj)/mtools.conf mformat v: ; sync |
83 | syslinux $(obj)/fdimage ; sync | 90 | syslinux $(obj)/fdimage ; sync |
84 | echo 'default linux $(FDARGS)' | \ | 91 | echo '$(image_cmdline)' | \ |
85 | MTOOLSRC=$(obj)/mtools.conf mcopy - v:syslinux.cfg | 92 | MTOOLSRC=$(obj)/mtools.conf mcopy - v:syslinux.cfg |
93 | if [ -f '$(FDINITRD)' ] ; then \ | ||
94 | MTOOLSRC=$(obj)/mtools.conf mcopy '$(FDINITRD)' v:initrd.img ; \ | ||
95 | fi | ||
86 | MTOOLSRC=$(obj)/mtools.conf mcopy $(BOOTIMAGE) v:linux ; sync | 96 | MTOOLSRC=$(obj)/mtools.conf mcopy $(BOOTIMAGE) v:linux ; sync |
87 | 97 | ||
88 | fdimage288: $(BOOTIMAGE) $(obj)/mtools.conf | 98 | fdimage288: $(BOOTIMAGE) $(obj)/mtools.conf |
89 | dd if=/dev/zero of=$(obj)/fdimage bs=1024 count=2880 | 99 | dd if=/dev/zero of=$(obj)/fdimage bs=1024 count=2880 |
90 | MTOOLSRC=$(obj)/mtools.conf mformat w: ; sync | 100 | MTOOLSRC=$(obj)/mtools.conf mformat w: ; sync |
91 | syslinux $(obj)/fdimage ; sync | 101 | syslinux $(obj)/fdimage ; sync |
92 | echo 'default linux $(FDARGS)' | \ | 102 | echo '$(image_cmdline)' | \ |
93 | MTOOLSRC=$(obj)/mtools.conf mcopy - w:syslinux.cfg | 103 | MTOOLSRC=$(obj)/mtools.conf mcopy - w:syslinux.cfg |
104 | if [ -f '$(FDINITRD)' ] ; then \ | ||
105 | MTOOLSRC=$(obj)/mtools.conf mcopy '$(FDINITRD)' w:initrd.img ; \ | ||
106 | fi | ||
94 | MTOOLSRC=$(obj)/mtools.conf mcopy $(BOOTIMAGE) w:linux ; sync | 107 | MTOOLSRC=$(obj)/mtools.conf mcopy $(BOOTIMAGE) w:linux ; sync |
95 | 108 | ||
109 | isoimage: $(BOOTIMAGE) | ||
110 | -rm -rf $(obj)/isoimage | ||
111 | mkdir $(obj)/isoimage | ||
112 | cp `echo /usr/lib*/syslinux/isolinux.bin | awk '{ print $1; }'` \ | ||
113 | $(obj)/isoimage | ||
114 | cp $(BOOTIMAGE) $(obj)/isoimage/linux | ||
115 | echo '$(image_cmdline)' > $(obj)/isoimage/isolinux.cfg | ||
116 | if [ -f '$(FDINITRD)' ] ; then \ | ||
117 | cp '$(FDINITRD)' $(obj)/isoimage/initrd.img ; \ | ||
118 | fi | ||
119 | mkisofs -J -r -o $(obj)/image.iso -b isolinux.bin -c boot.cat \ | ||
120 | -no-emul-boot -boot-load-size 4 -boot-info-table \ | ||
121 | $(obj)/isoimage | ||
122 | rm -rf $(obj)/isoimage | ||
123 | |||
96 | zlilo: $(BOOTIMAGE) | 124 | zlilo: $(BOOTIMAGE) |
97 | if [ -f $(INSTALL_PATH)/vmlinuz ]; then mv $(INSTALL_PATH)/vmlinuz $(INSTALL_PATH)/vmlinuz.old; fi | 125 | if [ -f $(INSTALL_PATH)/vmlinuz ]; then mv $(INSTALL_PATH)/vmlinuz $(INSTALL_PATH)/vmlinuz.old; fi |
98 | if [ -f $(INSTALL_PATH)/System.map ]; then mv $(INSTALL_PATH)/System.map $(INSTALL_PATH)/System.old; fi | 126 | if [ -f $(INSTALL_PATH)/System.map ]; then mv $(INSTALL_PATH)/System.map $(INSTALL_PATH)/System.old; fi |
diff --git a/arch/i386/kernel/acpi/boot.c b/arch/i386/kernel/acpi/boot.c index f1a21945963d..033066176b3e 100644 --- a/arch/i386/kernel/acpi/boot.c +++ b/arch/i386/kernel/acpi/boot.c | |||
@@ -668,10 +668,10 @@ unsigned long __init acpi_find_rsdp(void) | |||
668 | unsigned long rsdp_phys = 0; | 668 | unsigned long rsdp_phys = 0; |
669 | 669 | ||
670 | if (efi_enabled) { | 670 | if (efi_enabled) { |
671 | if (efi.acpi20) | 671 | if (efi.acpi20 != EFI_INVALID_TABLE_ADDR) |
672 | return __pa(efi.acpi20); | 672 | return efi.acpi20; |
673 | else if (efi.acpi) | 673 | else if (efi.acpi != EFI_INVALID_TABLE_ADDR) |
674 | return __pa(efi.acpi); | 674 | return efi.acpi; |
675 | } | 675 | } |
676 | /* | 676 | /* |
677 | * Scan memory looking for the RSDP signature. First search EBDA (low | 677 | * Scan memory looking for the RSDP signature. First search EBDA (low |
diff --git a/arch/i386/kernel/cpu/cpufreq/powernow-k8.c b/arch/i386/kernel/cpu/cpufreq/powernow-k8.c index e5bc06480ff9..1e70823e1cb5 100644 --- a/arch/i386/kernel/cpu/cpufreq/powernow-k8.c +++ b/arch/i386/kernel/cpu/cpufreq/powernow-k8.c | |||
@@ -40,6 +40,7 @@ | |||
40 | 40 | ||
41 | #ifdef CONFIG_X86_POWERNOW_K8_ACPI | 41 | #ifdef CONFIG_X86_POWERNOW_K8_ACPI |
42 | #include <linux/acpi.h> | 42 | #include <linux/acpi.h> |
43 | #include <linux/mutex.h> | ||
43 | #include <acpi/processor.h> | 44 | #include <acpi/processor.h> |
44 | #endif | 45 | #endif |
45 | 46 | ||
@@ -49,7 +50,7 @@ | |||
49 | #include "powernow-k8.h" | 50 | #include "powernow-k8.h" |
50 | 51 | ||
51 | /* serialize freq changes */ | 52 | /* serialize freq changes */ |
52 | static DECLARE_MUTEX(fidvid_sem); | 53 | static DEFINE_MUTEX(fidvid_mutex); |
53 | 54 | ||
54 | static struct powernow_k8_data *powernow_data[NR_CPUS]; | 55 | static struct powernow_k8_data *powernow_data[NR_CPUS]; |
55 | 56 | ||
@@ -943,17 +944,17 @@ static int powernowk8_target(struct cpufreq_policy *pol, unsigned targfreq, unsi | |||
943 | if (cpufreq_frequency_table_target(pol, data->powernow_table, targfreq, relation, &newstate)) | 944 | if (cpufreq_frequency_table_target(pol, data->powernow_table, targfreq, relation, &newstate)) |
944 | goto err_out; | 945 | goto err_out; |
945 | 946 | ||
946 | down(&fidvid_sem); | 947 | mutex_lock(&fidvid_mutex); |
947 | 948 | ||
948 | powernow_k8_acpi_pst_values(data, newstate); | 949 | powernow_k8_acpi_pst_values(data, newstate); |
949 | 950 | ||
950 | if (transition_frequency(data, newstate)) { | 951 | if (transition_frequency(data, newstate)) { |
951 | printk(KERN_ERR PFX "transition frequency failed\n"); | 952 | printk(KERN_ERR PFX "transition frequency failed\n"); |
952 | ret = 1; | 953 | ret = 1; |
953 | up(&fidvid_sem); | 954 | mutex_unlock(&fidvid_mutex); |
954 | goto err_out; | 955 | goto err_out; |
955 | } | 956 | } |
956 | up(&fidvid_sem); | 957 | mutex_unlock(&fidvid_mutex); |
957 | 958 | ||
958 | pol->cur = find_khz_freq_from_fid(data->currfid); | 959 | pol->cur = find_khz_freq_from_fid(data->currfid); |
959 | ret = 0; | 960 | ret = 0; |
diff --git a/arch/i386/kernel/cpu/mtrr/main.c b/arch/i386/kernel/cpu/mtrr/main.c index 3b4618bed70d..fff90bda4733 100644 --- a/arch/i386/kernel/cpu/mtrr/main.c +++ b/arch/i386/kernel/cpu/mtrr/main.c | |||
@@ -36,6 +36,7 @@ | |||
36 | #include <linux/pci.h> | 36 | #include <linux/pci.h> |
37 | #include <linux/smp.h> | 37 | #include <linux/smp.h> |
38 | #include <linux/cpu.h> | 38 | #include <linux/cpu.h> |
39 | #include <linux/mutex.h> | ||
39 | 40 | ||
40 | #include <asm/mtrr.h> | 41 | #include <asm/mtrr.h> |
41 | 42 | ||
@@ -47,7 +48,7 @@ | |||
47 | u32 num_var_ranges = 0; | 48 | u32 num_var_ranges = 0; |
48 | 49 | ||
49 | unsigned int *usage_table; | 50 | unsigned int *usage_table; |
50 | static DECLARE_MUTEX(mtrr_sem); | 51 | static DEFINE_MUTEX(mtrr_mutex); |
51 | 52 | ||
52 | u32 size_or_mask, size_and_mask; | 53 | u32 size_or_mask, size_and_mask; |
53 | 54 | ||
@@ -333,7 +334,7 @@ int mtrr_add_page(unsigned long base, unsigned long size, | |||
333 | /* No CPU hotplug when we change MTRR entries */ | 334 | /* No CPU hotplug when we change MTRR entries */ |
334 | lock_cpu_hotplug(); | 335 | lock_cpu_hotplug(); |
335 | /* Search for existing MTRR */ | 336 | /* Search for existing MTRR */ |
336 | down(&mtrr_sem); | 337 | mutex_lock(&mtrr_mutex); |
337 | for (i = 0; i < num_var_ranges; ++i) { | 338 | for (i = 0; i < num_var_ranges; ++i) { |
338 | mtrr_if->get(i, &lbase, &lsize, <ype); | 339 | mtrr_if->get(i, &lbase, &lsize, <ype); |
339 | if (base >= lbase + lsize) | 340 | if (base >= lbase + lsize) |
@@ -371,7 +372,7 @@ int mtrr_add_page(unsigned long base, unsigned long size, | |||
371 | printk(KERN_INFO "mtrr: no more MTRRs available\n"); | 372 | printk(KERN_INFO "mtrr: no more MTRRs available\n"); |
372 | error = i; | 373 | error = i; |
373 | out: | 374 | out: |
374 | up(&mtrr_sem); | 375 | mutex_unlock(&mtrr_mutex); |
375 | unlock_cpu_hotplug(); | 376 | unlock_cpu_hotplug(); |
376 | return error; | 377 | return error; |
377 | } | 378 | } |
@@ -464,7 +465,7 @@ int mtrr_del_page(int reg, unsigned long base, unsigned long size) | |||
464 | max = num_var_ranges; | 465 | max = num_var_ranges; |
465 | /* No CPU hotplug when we change MTRR entries */ | 466 | /* No CPU hotplug when we change MTRR entries */ |
466 | lock_cpu_hotplug(); | 467 | lock_cpu_hotplug(); |
467 | down(&mtrr_sem); | 468 | mutex_lock(&mtrr_mutex); |
468 | if (reg < 0) { | 469 | if (reg < 0) { |
469 | /* Search for existing MTRR */ | 470 | /* Search for existing MTRR */ |
470 | for (i = 0; i < max; ++i) { | 471 | for (i = 0; i < max; ++i) { |
@@ -503,7 +504,7 @@ int mtrr_del_page(int reg, unsigned long base, unsigned long size) | |||
503 | set_mtrr(reg, 0, 0, 0); | 504 | set_mtrr(reg, 0, 0, 0); |
504 | error = reg; | 505 | error = reg; |
505 | out: | 506 | out: |
506 | up(&mtrr_sem); | 507 | mutex_unlock(&mtrr_mutex); |
507 | unlock_cpu_hotplug(); | 508 | unlock_cpu_hotplug(); |
508 | return error; | 509 | return error; |
509 | } | 510 | } |
@@ -685,7 +686,7 @@ void mtrr_ap_init(void) | |||
685 | if (!mtrr_if || !use_intel()) | 686 | if (!mtrr_if || !use_intel()) |
686 | return; | 687 | return; |
687 | /* | 688 | /* |
688 | * Ideally we should hold mtrr_sem here to avoid mtrr entries changed, | 689 | * Ideally we should hold mtrr_mutex here to avoid mtrr entries changed, |
689 | * but this routine will be called in cpu boot time, holding the lock | 690 | * but this routine will be called in cpu boot time, holding the lock |
690 | * breaks it. This routine is called in two cases: 1.very earily time | 691 | * breaks it. This routine is called in two cases: 1.very earily time |
691 | * of software resume, when there absolutely isn't mtrr entry changes; | 692 | * of software resume, when there absolutely isn't mtrr entry changes; |
diff --git a/arch/i386/kernel/dmi_scan.c b/arch/i386/kernel/dmi_scan.c index ebc8dc116c43..5efceebc48dc 100644 --- a/arch/i386/kernel/dmi_scan.c +++ b/arch/i386/kernel/dmi_scan.c | |||
@@ -3,6 +3,7 @@ | |||
3 | #include <linux/init.h> | 3 | #include <linux/init.h> |
4 | #include <linux/module.h> | 4 | #include <linux/module.h> |
5 | #include <linux/dmi.h> | 5 | #include <linux/dmi.h> |
6 | #include <linux/efi.h> | ||
6 | #include <linux/bootmem.h> | 7 | #include <linux/bootmem.h> |
7 | #include <linux/slab.h> | 8 | #include <linux/slab.h> |
8 | #include <asm/dmi.h> | 9 | #include <asm/dmi.h> |
@@ -185,47 +186,72 @@ static void __init dmi_decode(struct dmi_header *dm) | |||
185 | } | 186 | } |
186 | } | 187 | } |
187 | 188 | ||
188 | void __init dmi_scan_machine(void) | 189 | static int __init dmi_present(char __iomem *p) |
189 | { | 190 | { |
190 | u8 buf[15]; | 191 | u8 buf[15]; |
191 | char __iomem *p, *q; | 192 | memcpy_fromio(buf, p, 15); |
193 | if ((memcmp(buf, "_DMI_", 5) == 0) && dmi_checksum(buf)) { | ||
194 | u16 num = (buf[13] << 8) | buf[12]; | ||
195 | u16 len = (buf[7] << 8) | buf[6]; | ||
196 | u32 base = (buf[11] << 24) | (buf[10] << 16) | | ||
197 | (buf[9] << 8) | buf[8]; | ||
192 | 198 | ||
193 | /* | 199 | /* |
194 | * no iounmap() for that ioremap(); it would be a no-op, but it's | 200 | * DMI version 0.0 means that the real version is taken from |
195 | * so early in setup that sucker gets confused into doing what | 201 | * the SMBIOS version, which we don't know at this point. |
196 | * it shouldn't if we actually call it. | 202 | */ |
197 | */ | 203 | if (buf[14] != 0) |
198 | p = ioremap(0xF0000, 0x10000); | 204 | printk(KERN_INFO "DMI %d.%d present.\n", |
199 | if (p == NULL) | 205 | buf[14] >> 4, buf[14] & 0xF); |
200 | goto out; | 206 | else |
201 | 207 | printk(KERN_INFO "DMI present.\n"); | |
202 | for (q = p; q < p + 0x10000; q += 16) { | 208 | if (dmi_table(base,len, num, dmi_decode) == 0) |
203 | memcpy_fromio(buf, q, 15); | 209 | return 0; |
204 | if ((memcmp(buf, "_DMI_", 5) == 0) && dmi_checksum(buf)) { | 210 | } |
205 | u16 num = (buf[13] << 8) | buf[12]; | 211 | return 1; |
206 | u16 len = (buf[7] << 8) | buf[6]; | 212 | } |
207 | u32 base = (buf[11] << 24) | (buf[10] << 16) | | ||
208 | (buf[9] << 8) | buf[8]; | ||
209 | |||
210 | /* | ||
211 | * DMI version 0.0 means that the real version is taken from | ||
212 | * the SMBIOS version, which we don't know at this point. | ||
213 | */ | ||
214 | if (buf[14] != 0) | ||
215 | printk(KERN_INFO "DMI %d.%d present.\n", | ||
216 | buf[14] >> 4, buf[14] & 0xF); | ||
217 | else | ||
218 | printk(KERN_INFO "DMI present.\n"); | ||
219 | 213 | ||
220 | if (dmi_table(base,len, num, dmi_decode) == 0) | 214 | void __init dmi_scan_machine(void) |
215 | { | ||
216 | char __iomem *p, *q; | ||
217 | int rc; | ||
218 | |||
219 | if (efi_enabled) { | ||
220 | if (efi.smbios == EFI_INVALID_TABLE_ADDR) | ||
221 | goto out; | ||
222 | |||
223 | /* This is called as a core_initcall() because it isn't | ||
224 | * needed during early boot. This also means we can | ||
225 | * iounmap the space when we're done with it. | ||
226 | */ | ||
227 | p = dmi_ioremap(efi.smbios, 32); | ||
228 | if (p == NULL) | ||
229 | goto out; | ||
230 | |||
231 | rc = dmi_present(p + 0x10); /* offset of _DMI_ string */ | ||
232 | dmi_iounmap(p, 32); | ||
233 | if (!rc) | ||
234 | return; | ||
235 | } | ||
236 | else { | ||
237 | /* | ||
238 | * no iounmap() for that ioremap(); it would be a no-op, but | ||
239 | * it's so early in setup that sucker gets confused into doing | ||
240 | * what it shouldn't if we actually call it. | ||
241 | */ | ||
242 | p = dmi_ioremap(0xF0000, 0x10000); | ||
243 | if (p == NULL) | ||
244 | goto out; | ||
245 | |||
246 | for (q = p; q < p + 0x10000; q += 16) { | ||
247 | rc = dmi_present(q); | ||
248 | if (!rc) | ||
221 | return; | 249 | return; |
222 | } | 250 | } |
223 | } | 251 | } |
224 | 252 | out: printk(KERN_INFO "DMI not present or invalid.\n"); | |
225 | out: printk(KERN_INFO "DMI not present or invalid.\n"); | ||
226 | } | 253 | } |
227 | 254 | ||
228 | |||
229 | /** | 255 | /** |
230 | * dmi_check_system - check system DMI data | 256 | * dmi_check_system - check system DMI data |
231 | * @list: array of dmi_system_id structures to match against | 257 | * @list: array of dmi_system_id structures to match against |
diff --git a/arch/i386/kernel/efi.c b/arch/i386/kernel/efi.c index 7ec6cfa01fb3..9202b67c4b2e 100644 --- a/arch/i386/kernel/efi.c +++ b/arch/i386/kernel/efi.c | |||
@@ -361,7 +361,7 @@ void __init efi_init(void) | |||
361 | */ | 361 | */ |
362 | c16 = (efi_char16_t *) boot_ioremap(efi.systab->fw_vendor, 2); | 362 | c16 = (efi_char16_t *) boot_ioremap(efi.systab->fw_vendor, 2); |
363 | if (c16) { | 363 | if (c16) { |
364 | for (i = 0; i < sizeof(vendor) && *c16; ++i) | 364 | for (i = 0; i < (sizeof(vendor) - 1) && *c16; ++i) |
365 | vendor[i] = *c16++; | 365 | vendor[i] = *c16++; |
366 | vendor[i] = '\0'; | 366 | vendor[i] = '\0'; |
367 | } else | 367 | } else |
@@ -381,29 +381,38 @@ void __init efi_init(void) | |||
381 | if (config_tables == NULL) | 381 | if (config_tables == NULL) |
382 | printk(KERN_ERR PFX "Could not map EFI Configuration Table!\n"); | 382 | printk(KERN_ERR PFX "Could not map EFI Configuration Table!\n"); |
383 | 383 | ||
384 | efi.mps = EFI_INVALID_TABLE_ADDR; | ||
385 | efi.acpi = EFI_INVALID_TABLE_ADDR; | ||
386 | efi.acpi20 = EFI_INVALID_TABLE_ADDR; | ||
387 | efi.smbios = EFI_INVALID_TABLE_ADDR; | ||
388 | efi.sal_systab = EFI_INVALID_TABLE_ADDR; | ||
389 | efi.boot_info = EFI_INVALID_TABLE_ADDR; | ||
390 | efi.hcdp = EFI_INVALID_TABLE_ADDR; | ||
391 | efi.uga = EFI_INVALID_TABLE_ADDR; | ||
392 | |||
384 | for (i = 0; i < num_config_tables; i++) { | 393 | for (i = 0; i < num_config_tables; i++) { |
385 | if (efi_guidcmp(config_tables[i].guid, MPS_TABLE_GUID) == 0) { | 394 | if (efi_guidcmp(config_tables[i].guid, MPS_TABLE_GUID) == 0) { |
386 | efi.mps = (void *)config_tables[i].table; | 395 | efi.mps = config_tables[i].table; |
387 | printk(KERN_INFO " MPS=0x%lx ", config_tables[i].table); | 396 | printk(KERN_INFO " MPS=0x%lx ", config_tables[i].table); |
388 | } else | 397 | } else |
389 | if (efi_guidcmp(config_tables[i].guid, ACPI_20_TABLE_GUID) == 0) { | 398 | if (efi_guidcmp(config_tables[i].guid, ACPI_20_TABLE_GUID) == 0) { |
390 | efi.acpi20 = __va(config_tables[i].table); | 399 | efi.acpi20 = config_tables[i].table; |
391 | printk(KERN_INFO " ACPI 2.0=0x%lx ", config_tables[i].table); | 400 | printk(KERN_INFO " ACPI 2.0=0x%lx ", config_tables[i].table); |
392 | } else | 401 | } else |
393 | if (efi_guidcmp(config_tables[i].guid, ACPI_TABLE_GUID) == 0) { | 402 | if (efi_guidcmp(config_tables[i].guid, ACPI_TABLE_GUID) == 0) { |
394 | efi.acpi = __va(config_tables[i].table); | 403 | efi.acpi = config_tables[i].table; |
395 | printk(KERN_INFO " ACPI=0x%lx ", config_tables[i].table); | 404 | printk(KERN_INFO " ACPI=0x%lx ", config_tables[i].table); |
396 | } else | 405 | } else |
397 | if (efi_guidcmp(config_tables[i].guid, SMBIOS_TABLE_GUID) == 0) { | 406 | if (efi_guidcmp(config_tables[i].guid, SMBIOS_TABLE_GUID) == 0) { |
398 | efi.smbios = (void *) config_tables[i].table; | 407 | efi.smbios = config_tables[i].table; |
399 | printk(KERN_INFO " SMBIOS=0x%lx ", config_tables[i].table); | 408 | printk(KERN_INFO " SMBIOS=0x%lx ", config_tables[i].table); |
400 | } else | 409 | } else |
401 | if (efi_guidcmp(config_tables[i].guid, HCDP_TABLE_GUID) == 0) { | 410 | if (efi_guidcmp(config_tables[i].guid, HCDP_TABLE_GUID) == 0) { |
402 | efi.hcdp = (void *)config_tables[i].table; | 411 | efi.hcdp = config_tables[i].table; |
403 | printk(KERN_INFO " HCDP=0x%lx ", config_tables[i].table); | 412 | printk(KERN_INFO " HCDP=0x%lx ", config_tables[i].table); |
404 | } else | 413 | } else |
405 | if (efi_guidcmp(config_tables[i].guid, UGA_IO_PROTOCOL_GUID) == 0) { | 414 | if (efi_guidcmp(config_tables[i].guid, UGA_IO_PROTOCOL_GUID) == 0) { |
406 | efi.uga = (void *)config_tables[i].table; | 415 | efi.uga = config_tables[i].table; |
407 | printk(KERN_INFO " UGA=0x%lx ", config_tables[i].table); | 416 | printk(KERN_INFO " UGA=0x%lx ", config_tables[i].table); |
408 | } | 417 | } |
409 | } | 418 | } |
diff --git a/arch/i386/kernel/kprobes.c b/arch/i386/kernel/kprobes.c index 7a59050242a7..f19768789e8a 100644 --- a/arch/i386/kernel/kprobes.c +++ b/arch/i386/kernel/kprobes.c | |||
@@ -35,12 +35,56 @@ | |||
35 | #include <asm/cacheflush.h> | 35 | #include <asm/cacheflush.h> |
36 | #include <asm/kdebug.h> | 36 | #include <asm/kdebug.h> |
37 | #include <asm/desc.h> | 37 | #include <asm/desc.h> |
38 | #include <asm/uaccess.h> | ||
38 | 39 | ||
39 | void jprobe_return_end(void); | 40 | void jprobe_return_end(void); |
40 | 41 | ||
41 | DEFINE_PER_CPU(struct kprobe *, current_kprobe) = NULL; | 42 | DEFINE_PER_CPU(struct kprobe *, current_kprobe) = NULL; |
42 | DEFINE_PER_CPU(struct kprobe_ctlblk, kprobe_ctlblk); | 43 | DEFINE_PER_CPU(struct kprobe_ctlblk, kprobe_ctlblk); |
43 | 44 | ||
45 | /* insert a jmp code */ | ||
46 | static inline void set_jmp_op(void *from, void *to) | ||
47 | { | ||
48 | struct __arch_jmp_op { | ||
49 | char op; | ||
50 | long raddr; | ||
51 | } __attribute__((packed)) *jop; | ||
52 | jop = (struct __arch_jmp_op *)from; | ||
53 | jop->raddr = (long)(to) - ((long)(from) + 5); | ||
54 | jop->op = RELATIVEJUMP_INSTRUCTION; | ||
55 | } | ||
56 | |||
57 | /* | ||
58 | * returns non-zero if opcodes can be boosted. | ||
59 | */ | ||
60 | static inline int can_boost(kprobe_opcode_t opcode) | ||
61 | { | ||
62 | switch (opcode & 0xf0 ) { | ||
63 | case 0x70: | ||
64 | return 0; /* can't boost conditional jump */ | ||
65 | case 0x90: | ||
66 | /* can't boost call and pushf */ | ||
67 | return opcode != 0x9a && opcode != 0x9c; | ||
68 | case 0xc0: | ||
69 | /* can't boost undefined opcodes and soft-interruptions */ | ||
70 | return (0xc1 < opcode && opcode < 0xc6) || | ||
71 | (0xc7 < opcode && opcode < 0xcc) || opcode == 0xcf; | ||
72 | case 0xd0: | ||
73 | /* can boost AA* and XLAT */ | ||
74 | return (opcode == 0xd4 || opcode == 0xd5 || opcode == 0xd7); | ||
75 | case 0xe0: | ||
76 | /* can boost in/out and (may be) jmps */ | ||
77 | return (0xe3 < opcode && opcode != 0xe8); | ||
78 | case 0xf0: | ||
79 | /* clear and set flags can be boost */ | ||
80 | return (opcode == 0xf5 || (0xf7 < opcode && opcode < 0xfe)); | ||
81 | default: | ||
82 | /* currently, can't boost 2 bytes opcodes */ | ||
83 | return opcode != 0x0f; | ||
84 | } | ||
85 | } | ||
86 | |||
87 | |||
44 | /* | 88 | /* |
45 | * returns non-zero if opcode modifies the interrupt flag. | 89 | * returns non-zero if opcode modifies the interrupt flag. |
46 | */ | 90 | */ |
@@ -65,6 +109,11 @@ int __kprobes arch_prepare_kprobe(struct kprobe *p) | |||
65 | 109 | ||
66 | memcpy(p->ainsn.insn, p->addr, MAX_INSN_SIZE * sizeof(kprobe_opcode_t)); | 110 | memcpy(p->ainsn.insn, p->addr, MAX_INSN_SIZE * sizeof(kprobe_opcode_t)); |
67 | p->opcode = *p->addr; | 111 | p->opcode = *p->addr; |
112 | if (can_boost(p->opcode)) { | ||
113 | p->ainsn.boostable = 0; | ||
114 | } else { | ||
115 | p->ainsn.boostable = -1; | ||
116 | } | ||
68 | return 0; | 117 | return 0; |
69 | } | 118 | } |
70 | 119 | ||
@@ -155,9 +204,13 @@ static int __kprobes kprobe_handler(struct pt_regs *regs) | |||
155 | { | 204 | { |
156 | struct kprobe *p; | 205 | struct kprobe *p; |
157 | int ret = 0; | 206 | int ret = 0; |
158 | kprobe_opcode_t *addr = NULL; | 207 | kprobe_opcode_t *addr; |
159 | unsigned long *lp; | ||
160 | struct kprobe_ctlblk *kcb; | 208 | struct kprobe_ctlblk *kcb; |
209 | #ifdef CONFIG_PREEMPT | ||
210 | unsigned pre_preempt_count = preempt_count(); | ||
211 | #endif /* CONFIG_PREEMPT */ | ||
212 | |||
213 | addr = (kprobe_opcode_t *)(regs->eip - sizeof(kprobe_opcode_t)); | ||
161 | 214 | ||
162 | /* | 215 | /* |
163 | * We don't want to be preempted for the entire | 216 | * We don't want to be preempted for the entire |
@@ -166,17 +219,6 @@ static int __kprobes kprobe_handler(struct pt_regs *regs) | |||
166 | preempt_disable(); | 219 | preempt_disable(); |
167 | kcb = get_kprobe_ctlblk(); | 220 | kcb = get_kprobe_ctlblk(); |
168 | 221 | ||
169 | /* Check if the application is using LDT entry for its code segment and | ||
170 | * calculate the address by reading the base address from the LDT entry. | ||
171 | */ | ||
172 | if ((regs->xcs & 4) && (current->mm)) { | ||
173 | lp = (unsigned long *) ((unsigned long)((regs->xcs >> 3) * 8) | ||
174 | + (char *) current->mm->context.ldt); | ||
175 | addr = (kprobe_opcode_t *) (get_desc_base(lp) + regs->eip - | ||
176 | sizeof(kprobe_opcode_t)); | ||
177 | } else { | ||
178 | addr = (kprobe_opcode_t *)(regs->eip - sizeof(kprobe_opcode_t)); | ||
179 | } | ||
180 | /* Check we're not actually recursing */ | 222 | /* Check we're not actually recursing */ |
181 | if (kprobe_running()) { | 223 | if (kprobe_running()) { |
182 | p = get_kprobe(addr); | 224 | p = get_kprobe(addr); |
@@ -252,6 +294,21 @@ static int __kprobes kprobe_handler(struct pt_regs *regs) | |||
252 | /* handler has already set things up, so skip ss setup */ | 294 | /* handler has already set things up, so skip ss setup */ |
253 | return 1; | 295 | return 1; |
254 | 296 | ||
297 | if (p->ainsn.boostable == 1 && | ||
298 | #ifdef CONFIG_PREEMPT | ||
299 | !(pre_preempt_count) && /* | ||
300 | * This enables booster when the direct | ||
301 | * execution path aren't preempted. | ||
302 | */ | ||
303 | #endif /* CONFIG_PREEMPT */ | ||
304 | !p->post_handler && !p->break_handler ) { | ||
305 | /* Boost up -- we can execute copied instructions directly */ | ||
306 | reset_current_kprobe(); | ||
307 | regs->eip = (unsigned long)p->ainsn.insn; | ||
308 | preempt_enable_no_resched(); | ||
309 | return 1; | ||
310 | } | ||
311 | |||
255 | ss_probe: | 312 | ss_probe: |
256 | prepare_singlestep(p, regs); | 313 | prepare_singlestep(p, regs); |
257 | kcb->kprobe_status = KPROBE_HIT_SS; | 314 | kcb->kprobe_status = KPROBE_HIT_SS; |
@@ -267,17 +324,44 @@ no_kprobe: | |||
267 | * here. When a retprobed function returns, this probe is hit and | 324 | * here. When a retprobed function returns, this probe is hit and |
268 | * trampoline_probe_handler() runs, calling the kretprobe's handler. | 325 | * trampoline_probe_handler() runs, calling the kretprobe's handler. |
269 | */ | 326 | */ |
270 | void kretprobe_trampoline_holder(void) | 327 | void __kprobes kretprobe_trampoline_holder(void) |
271 | { | 328 | { |
272 | asm volatile ( ".global kretprobe_trampoline\n" | 329 | asm volatile ( ".global kretprobe_trampoline\n" |
273 | "kretprobe_trampoline: \n" | 330 | "kretprobe_trampoline: \n" |
274 | "nop\n"); | 331 | " pushf\n" |
275 | } | 332 | /* skip cs, eip, orig_eax, es, ds */ |
333 | " subl $20, %esp\n" | ||
334 | " pushl %eax\n" | ||
335 | " pushl %ebp\n" | ||
336 | " pushl %edi\n" | ||
337 | " pushl %esi\n" | ||
338 | " pushl %edx\n" | ||
339 | " pushl %ecx\n" | ||
340 | " pushl %ebx\n" | ||
341 | " movl %esp, %eax\n" | ||
342 | " call trampoline_handler\n" | ||
343 | /* move eflags to cs */ | ||
344 | " movl 48(%esp), %edx\n" | ||
345 | " movl %edx, 44(%esp)\n" | ||
346 | /* save true return address on eflags */ | ||
347 | " movl %eax, 48(%esp)\n" | ||
348 | " popl %ebx\n" | ||
349 | " popl %ecx\n" | ||
350 | " popl %edx\n" | ||
351 | " popl %esi\n" | ||
352 | " popl %edi\n" | ||
353 | " popl %ebp\n" | ||
354 | " popl %eax\n" | ||
355 | /* skip eip, orig_eax, es, ds */ | ||
356 | " addl $16, %esp\n" | ||
357 | " popf\n" | ||
358 | " ret\n"); | ||
359 | } | ||
276 | 360 | ||
277 | /* | 361 | /* |
278 | * Called when we hit the probe point at kretprobe_trampoline | 362 | * Called from kretprobe_trampoline |
279 | */ | 363 | */ |
280 | int __kprobes trampoline_probe_handler(struct kprobe *p, struct pt_regs *regs) | 364 | fastcall void *__kprobes trampoline_handler(struct pt_regs *regs) |
281 | { | 365 | { |
282 | struct kretprobe_instance *ri = NULL; | 366 | struct kretprobe_instance *ri = NULL; |
283 | struct hlist_head *head; | 367 | struct hlist_head *head; |
@@ -306,8 +390,11 @@ int __kprobes trampoline_probe_handler(struct kprobe *p, struct pt_regs *regs) | |||
306 | /* another task is sharing our hash bucket */ | 390 | /* another task is sharing our hash bucket */ |
307 | continue; | 391 | continue; |
308 | 392 | ||
309 | if (ri->rp && ri->rp->handler) | 393 | if (ri->rp && ri->rp->handler){ |
394 | __get_cpu_var(current_kprobe) = &ri->rp->kp; | ||
310 | ri->rp->handler(ri, regs); | 395 | ri->rp->handler(ri, regs); |
396 | __get_cpu_var(current_kprobe) = NULL; | ||
397 | } | ||
311 | 398 | ||
312 | orig_ret_address = (unsigned long)ri->ret_addr; | 399 | orig_ret_address = (unsigned long)ri->ret_addr; |
313 | recycle_rp_inst(ri); | 400 | recycle_rp_inst(ri); |
@@ -322,18 +409,10 @@ int __kprobes trampoline_probe_handler(struct kprobe *p, struct pt_regs *regs) | |||
322 | } | 409 | } |
323 | 410 | ||
324 | BUG_ON(!orig_ret_address || (orig_ret_address == trampoline_address)); | 411 | BUG_ON(!orig_ret_address || (orig_ret_address == trampoline_address)); |
325 | regs->eip = orig_ret_address; | ||
326 | 412 | ||
327 | reset_current_kprobe(); | ||
328 | spin_unlock_irqrestore(&kretprobe_lock, flags); | 413 | spin_unlock_irqrestore(&kretprobe_lock, flags); |
329 | preempt_enable_no_resched(); | ||
330 | 414 | ||
331 | /* | 415 | return (void*)orig_ret_address; |
332 | * By returning a non-zero value, we are telling | ||
333 | * kprobe_handler() that we don't want the post_handler | ||
334 | * to run (and have re-enabled preemption) | ||
335 | */ | ||
336 | return 1; | ||
337 | } | 416 | } |
338 | 417 | ||
339 | /* | 418 | /* |
@@ -357,15 +436,17 @@ int __kprobes trampoline_probe_handler(struct kprobe *p, struct pt_regs *regs) | |||
357 | * 2) If the single-stepped instruction was a call, the return address | 436 | * 2) If the single-stepped instruction was a call, the return address |
358 | * that is atop the stack is the address following the copied instruction. | 437 | * that is atop the stack is the address following the copied instruction. |
359 | * We need to make it the address following the original instruction. | 438 | * We need to make it the address following the original instruction. |
439 | * | ||
440 | * This function also checks instruction size for preparing direct execution. | ||
360 | */ | 441 | */ |
361 | static void __kprobes resume_execution(struct kprobe *p, | 442 | static void __kprobes resume_execution(struct kprobe *p, |
362 | struct pt_regs *regs, struct kprobe_ctlblk *kcb) | 443 | struct pt_regs *regs, struct kprobe_ctlblk *kcb) |
363 | { | 444 | { |
364 | unsigned long *tos = (unsigned long *)®s->esp; | 445 | unsigned long *tos = (unsigned long *)®s->esp; |
365 | unsigned long next_eip = 0; | ||
366 | unsigned long copy_eip = (unsigned long)p->ainsn.insn; | 446 | unsigned long copy_eip = (unsigned long)p->ainsn.insn; |
367 | unsigned long orig_eip = (unsigned long)p->addr; | 447 | unsigned long orig_eip = (unsigned long)p->addr; |
368 | 448 | ||
449 | regs->eflags &= ~TF_MASK; | ||
369 | switch (p->ainsn.insn[0]) { | 450 | switch (p->ainsn.insn[0]) { |
370 | case 0x9c: /* pushfl */ | 451 | case 0x9c: /* pushfl */ |
371 | *tos &= ~(TF_MASK | IF_MASK); | 452 | *tos &= ~(TF_MASK | IF_MASK); |
@@ -375,37 +456,51 @@ static void __kprobes resume_execution(struct kprobe *p, | |||
375 | case 0xcb: | 456 | case 0xcb: |
376 | case 0xc2: | 457 | case 0xc2: |
377 | case 0xca: | 458 | case 0xca: |
378 | regs->eflags &= ~TF_MASK; | 459 | case 0xea: /* jmp absolute -- eip is correct */ |
379 | /* eip is already adjusted, no more changes required*/ | 460 | /* eip is already adjusted, no more changes required */ |
380 | return; | 461 | p->ainsn.boostable = 1; |
462 | goto no_change; | ||
381 | case 0xe8: /* call relative - Fix return addr */ | 463 | case 0xe8: /* call relative - Fix return addr */ |
382 | *tos = orig_eip + (*tos - copy_eip); | 464 | *tos = orig_eip + (*tos - copy_eip); |
383 | break; | 465 | break; |
384 | case 0xff: | 466 | case 0xff: |
385 | if ((p->ainsn.insn[1] & 0x30) == 0x10) { | 467 | if ((p->ainsn.insn[1] & 0x30) == 0x10) { |
386 | /* call absolute, indirect */ | 468 | /* call absolute, indirect */ |
387 | /* Fix return addr; eip is correct. */ | 469 | /* |
388 | next_eip = regs->eip; | 470 | * Fix return addr; eip is correct. |
471 | * But this is not boostable | ||
472 | */ | ||
389 | *tos = orig_eip + (*tos - copy_eip); | 473 | *tos = orig_eip + (*tos - copy_eip); |
474 | goto no_change; | ||
390 | } else if (((p->ainsn.insn[1] & 0x31) == 0x20) || /* jmp near, absolute indirect */ | 475 | } else if (((p->ainsn.insn[1] & 0x31) == 0x20) || /* jmp near, absolute indirect */ |
391 | ((p->ainsn.insn[1] & 0x31) == 0x21)) { /* jmp far, absolute indirect */ | 476 | ((p->ainsn.insn[1] & 0x31) == 0x21)) { /* jmp far, absolute indirect */ |
392 | /* eip is correct. */ | 477 | /* eip is correct. And this is boostable */ |
393 | next_eip = regs->eip; | 478 | p->ainsn.boostable = 1; |
479 | goto no_change; | ||
394 | } | 480 | } |
395 | break; | ||
396 | case 0xea: /* jmp absolute -- eip is correct */ | ||
397 | next_eip = regs->eip; | ||
398 | break; | ||
399 | default: | 481 | default: |
400 | break; | 482 | break; |
401 | } | 483 | } |
402 | 484 | ||
403 | regs->eflags &= ~TF_MASK; | 485 | if (p->ainsn.boostable == 0) { |
404 | if (next_eip) { | 486 | if ((regs->eip > copy_eip) && |
405 | regs->eip = next_eip; | 487 | (regs->eip - copy_eip) + 5 < MAX_INSN_SIZE) { |
406 | } else { | 488 | /* |
407 | regs->eip = orig_eip + (regs->eip - copy_eip); | 489 | * These instructions can be executed directly if it |
490 | * jumps back to correct address. | ||
491 | */ | ||
492 | set_jmp_op((void *)regs->eip, | ||
493 | (void *)orig_eip + (regs->eip - copy_eip)); | ||
494 | p->ainsn.boostable = 1; | ||
495 | } else { | ||
496 | p->ainsn.boostable = -1; | ||
497 | } | ||
408 | } | 498 | } |
499 | |||
500 | regs->eip = orig_eip + (regs->eip - copy_eip); | ||
501 | |||
502 | no_change: | ||
503 | return; | ||
409 | } | 504 | } |
410 | 505 | ||
411 | /* | 506 | /* |
@@ -453,15 +548,57 @@ static inline int kprobe_fault_handler(struct pt_regs *regs, int trapnr) | |||
453 | struct kprobe *cur = kprobe_running(); | 548 | struct kprobe *cur = kprobe_running(); |
454 | struct kprobe_ctlblk *kcb = get_kprobe_ctlblk(); | 549 | struct kprobe_ctlblk *kcb = get_kprobe_ctlblk(); |
455 | 550 | ||
456 | if (cur->fault_handler && cur->fault_handler(cur, regs, trapnr)) | 551 | switch(kcb->kprobe_status) { |
457 | return 1; | 552 | case KPROBE_HIT_SS: |
458 | 553 | case KPROBE_REENTER: | |
459 | if (kcb->kprobe_status & KPROBE_HIT_SS) { | 554 | /* |
460 | resume_execution(cur, regs, kcb); | 555 | * We are here because the instruction being single |
556 | * stepped caused a page fault. We reset the current | ||
557 | * kprobe and the eip points back to the probe address | ||
558 | * and allow the page fault handler to continue as a | ||
559 | * normal page fault. | ||
560 | */ | ||
561 | regs->eip = (unsigned long)cur->addr; | ||
461 | regs->eflags |= kcb->kprobe_old_eflags; | 562 | regs->eflags |= kcb->kprobe_old_eflags; |
462 | 563 | if (kcb->kprobe_status == KPROBE_REENTER) | |
463 | reset_current_kprobe(); | 564 | restore_previous_kprobe(kcb); |
565 | else | ||
566 | reset_current_kprobe(); | ||
464 | preempt_enable_no_resched(); | 567 | preempt_enable_no_resched(); |
568 | break; | ||
569 | case KPROBE_HIT_ACTIVE: | ||
570 | case KPROBE_HIT_SSDONE: | ||
571 | /* | ||
572 | * We increment the nmissed count for accounting, | ||
573 | * we can also use npre/npostfault count for accouting | ||
574 | * these specific fault cases. | ||
575 | */ | ||
576 | kprobes_inc_nmissed_count(cur); | ||
577 | |||
578 | /* | ||
579 | * We come here because instructions in the pre/post | ||
580 | * handler caused the page_fault, this could happen | ||
581 | * if handler tries to access user space by | ||
582 | * copy_from_user(), get_user() etc. Let the | ||
583 | * user-specified handler try to fix it first. | ||
584 | */ | ||
585 | if (cur->fault_handler && cur->fault_handler(cur, regs, trapnr)) | ||
586 | return 1; | ||
587 | |||
588 | /* | ||
589 | * In case the user-specified fault handler returned | ||
590 | * zero, try to fix up. | ||
591 | */ | ||
592 | if (fixup_exception(regs)) | ||
593 | return 1; | ||
594 | |||
595 | /* | ||
596 | * fixup_exception() could not handle it, | ||
597 | * Let do_page_fault() fix it. | ||
598 | */ | ||
599 | break; | ||
600 | default: | ||
601 | break; | ||
465 | } | 602 | } |
466 | return 0; | 603 | return 0; |
467 | } | 604 | } |
@@ -475,6 +612,9 @@ int __kprobes kprobe_exceptions_notify(struct notifier_block *self, | |||
475 | struct die_args *args = (struct die_args *)data; | 612 | struct die_args *args = (struct die_args *)data; |
476 | int ret = NOTIFY_DONE; | 613 | int ret = NOTIFY_DONE; |
477 | 614 | ||
615 | if (args->regs && user_mode(args->regs)) | ||
616 | return ret; | ||
617 | |||
478 | switch (val) { | 618 | switch (val) { |
479 | case DIE_INT3: | 619 | case DIE_INT3: |
480 | if (kprobe_handler(args->regs)) | 620 | if (kprobe_handler(args->regs)) |
@@ -564,12 +704,7 @@ int __kprobes longjmp_break_handler(struct kprobe *p, struct pt_regs *regs) | |||
564 | return 0; | 704 | return 0; |
565 | } | 705 | } |
566 | 706 | ||
567 | static struct kprobe trampoline_p = { | ||
568 | .addr = (kprobe_opcode_t *) &kretprobe_trampoline, | ||
569 | .pre_handler = trampoline_probe_handler | ||
570 | }; | ||
571 | |||
572 | int __init arch_init_kprobes(void) | 707 | int __init arch_init_kprobes(void) |
573 | { | 708 | { |
574 | return register_kprobe(&trampoline_p); | 709 | return 0; |
575 | } | 710 | } |
diff --git a/arch/i386/kernel/microcode.c b/arch/i386/kernel/microcode.c index 55bc365b8753..dd780a00553f 100644 --- a/arch/i386/kernel/microcode.c +++ b/arch/i386/kernel/microcode.c | |||
@@ -81,6 +81,7 @@ | |||
81 | #include <linux/miscdevice.h> | 81 | #include <linux/miscdevice.h> |
82 | #include <linux/spinlock.h> | 82 | #include <linux/spinlock.h> |
83 | #include <linux/mm.h> | 83 | #include <linux/mm.h> |
84 | #include <linux/mutex.h> | ||
84 | 85 | ||
85 | #include <asm/msr.h> | 86 | #include <asm/msr.h> |
86 | #include <asm/uaccess.h> | 87 | #include <asm/uaccess.h> |
@@ -114,7 +115,7 @@ MODULE_LICENSE("GPL"); | |||
114 | static DEFINE_SPINLOCK(microcode_update_lock); | 115 | static DEFINE_SPINLOCK(microcode_update_lock); |
115 | 116 | ||
116 | /* no concurrent ->write()s are allowed on /dev/cpu/microcode */ | 117 | /* no concurrent ->write()s are allowed on /dev/cpu/microcode */ |
117 | static DECLARE_MUTEX(microcode_sem); | 118 | static DEFINE_MUTEX(microcode_mutex); |
118 | 119 | ||
119 | static void __user *user_buffer; /* user area microcode data buffer */ | 120 | static void __user *user_buffer; /* user area microcode data buffer */ |
120 | static unsigned int user_buffer_size; /* it's size */ | 121 | static unsigned int user_buffer_size; /* it's size */ |
@@ -444,7 +445,7 @@ static ssize_t microcode_write (struct file *file, const char __user *buf, size_ | |||
444 | return -EINVAL; | 445 | return -EINVAL; |
445 | } | 446 | } |
446 | 447 | ||
447 | down(µcode_sem); | 448 | mutex_lock(µcode_mutex); |
448 | 449 | ||
449 | user_buffer = (void __user *) buf; | 450 | user_buffer = (void __user *) buf; |
450 | user_buffer_size = (int) len; | 451 | user_buffer_size = (int) len; |
@@ -453,7 +454,7 @@ static ssize_t microcode_write (struct file *file, const char __user *buf, size_ | |||
453 | if (!ret) | 454 | if (!ret) |
454 | ret = (ssize_t)len; | 455 | ret = (ssize_t)len; |
455 | 456 | ||
456 | up(µcode_sem); | 457 | mutex_unlock(µcode_mutex); |
457 | 458 | ||
458 | return ret; | 459 | return ret; |
459 | } | 460 | } |
diff --git a/arch/i386/kernel/process.c b/arch/i386/kernel/process.c index 299e61674084..24b3e745478b 100644 --- a/arch/i386/kernel/process.c +++ b/arch/i386/kernel/process.c | |||
@@ -38,7 +38,6 @@ | |||
38 | #include <linux/kallsyms.h> | 38 | #include <linux/kallsyms.h> |
39 | #include <linux/ptrace.h> | 39 | #include <linux/ptrace.h> |
40 | #include <linux/random.h> | 40 | #include <linux/random.h> |
41 | #include <linux/kprobes.h> | ||
42 | 41 | ||
43 | #include <asm/uaccess.h> | 42 | #include <asm/uaccess.h> |
44 | #include <asm/pgtable.h> | 43 | #include <asm/pgtable.h> |
@@ -364,13 +363,6 @@ void exit_thread(void) | |||
364 | struct task_struct *tsk = current; | 363 | struct task_struct *tsk = current; |
365 | struct thread_struct *t = &tsk->thread; | 364 | struct thread_struct *t = &tsk->thread; |
366 | 365 | ||
367 | /* | ||
368 | * Remove function-return probe instances associated with this task | ||
369 | * and put them back on the free list. Do not insert an exit probe for | ||
370 | * this function, it will be disabled by kprobe_flush_task if you do. | ||
371 | */ | ||
372 | kprobe_flush_task(tsk); | ||
373 | |||
374 | /* The process may have allocated an io port bitmap... nuke it. */ | 366 | /* The process may have allocated an io port bitmap... nuke it. */ |
375 | if (unlikely(NULL != t->io_bitmap_ptr)) { | 367 | if (unlikely(NULL != t->io_bitmap_ptr)) { |
376 | int cpu = get_cpu(); | 368 | int cpu = get_cpu(); |
diff --git a/arch/i386/kernel/setup.c b/arch/i386/kernel/setup.c index d313a11acafa..6917daa159ab 100644 --- a/arch/i386/kernel/setup.c +++ b/arch/i386/kernel/setup.c | |||
@@ -1058,10 +1058,10 @@ static int __init | |||
1058 | free_available_memory(unsigned long start, unsigned long end, void *arg) | 1058 | free_available_memory(unsigned long start, unsigned long end, void *arg) |
1059 | { | 1059 | { |
1060 | /* check max_low_pfn */ | 1060 | /* check max_low_pfn */ |
1061 | if (start >= ((max_low_pfn + 1) << PAGE_SHIFT)) | 1061 | if (start >= (max_low_pfn << PAGE_SHIFT)) |
1062 | return 0; | 1062 | return 0; |
1063 | if (end >= ((max_low_pfn + 1) << PAGE_SHIFT)) | 1063 | if (end >= (max_low_pfn << PAGE_SHIFT)) |
1064 | end = (max_low_pfn + 1) << PAGE_SHIFT; | 1064 | end = max_low_pfn << PAGE_SHIFT; |
1065 | if (start < end) | 1065 | if (start < end) |
1066 | free_bootmem(start, end - start); | 1066 | free_bootmem(start, end - start); |
1067 | 1067 | ||
@@ -1286,8 +1286,6 @@ legacy_init_iomem_resources(struct resource *code_resource, struct resource *dat | |||
1286 | probe_roms(); | 1286 | probe_roms(); |
1287 | for (i = 0; i < e820.nr_map; i++) { | 1287 | for (i = 0; i < e820.nr_map; i++) { |
1288 | struct resource *res; | 1288 | struct resource *res; |
1289 | if (e820.map[i].addr + e820.map[i].size > 0x100000000ULL) | ||
1290 | continue; | ||
1291 | res = kzalloc(sizeof(struct resource), GFP_ATOMIC); | 1289 | res = kzalloc(sizeof(struct resource), GFP_ATOMIC); |
1292 | switch (e820.map[i].type) { | 1290 | switch (e820.map[i].type) { |
1293 | case E820_RAM: res->name = "System RAM"; break; | 1291 | case E820_RAM: res->name = "System RAM"; break; |
diff --git a/arch/i386/kernel/traps.c b/arch/i386/kernel/traps.c index de5386b01d38..4624f8ca2459 100644 --- a/arch/i386/kernel/traps.c +++ b/arch/i386/kernel/traps.c | |||
@@ -386,8 +386,12 @@ void die(const char * str, struct pt_regs * regs, long err) | |||
386 | #endif | 386 | #endif |
387 | if (nl) | 387 | if (nl) |
388 | printk("\n"); | 388 | printk("\n"); |
389 | notify_die(DIE_OOPS, (char *)str, regs, err, 255, SIGSEGV); | 389 | if (notify_die(DIE_OOPS, str, regs, err, |
390 | show_registers(regs); | 390 | current->thread.trap_no, SIGSEGV) != |
391 | NOTIFY_STOP) | ||
392 | show_registers(regs); | ||
393 | else | ||
394 | regs = NULL; | ||
391 | } else | 395 | } else |
392 | printk(KERN_EMERG "Recursive die() failure, output suppressed\n"); | 396 | printk(KERN_EMERG "Recursive die() failure, output suppressed\n"); |
393 | 397 | ||
@@ -395,6 +399,9 @@ void die(const char * str, struct pt_regs * regs, long err) | |||
395 | die.lock_owner = -1; | 399 | die.lock_owner = -1; |
396 | spin_unlock_irqrestore(&die.lock, flags); | 400 | spin_unlock_irqrestore(&die.lock, flags); |
397 | 401 | ||
402 | if (!regs) | ||
403 | return; | ||
404 | |||
398 | if (kexec_should_crash(current)) | 405 | if (kexec_should_crash(current)) |
399 | crash_kexec(regs); | 406 | crash_kexec(regs); |
400 | 407 | ||
@@ -623,7 +630,7 @@ static DEFINE_SPINLOCK(nmi_print_lock); | |||
623 | 630 | ||
624 | void die_nmi (struct pt_regs *regs, const char *msg) | 631 | void die_nmi (struct pt_regs *regs, const char *msg) |
625 | { | 632 | { |
626 | if (notify_die(DIE_NMIWATCHDOG, msg, regs, 0, 0, SIGINT) == | 633 | if (notify_die(DIE_NMIWATCHDOG, msg, regs, 0, 2, SIGINT) == |
627 | NOTIFY_STOP) | 634 | NOTIFY_STOP) |
628 | return; | 635 | return; |
629 | 636 | ||
@@ -662,7 +669,7 @@ static void default_do_nmi(struct pt_regs * regs) | |||
662 | reason = get_nmi_reason(); | 669 | reason = get_nmi_reason(); |
663 | 670 | ||
664 | if (!(reason & 0xc0)) { | 671 | if (!(reason & 0xc0)) { |
665 | if (notify_die(DIE_NMI_IPI, "nmi_ipi", regs, reason, 0, SIGINT) | 672 | if (notify_die(DIE_NMI_IPI, "nmi_ipi", regs, reason, 2, SIGINT) |
666 | == NOTIFY_STOP) | 673 | == NOTIFY_STOP) |
667 | return; | 674 | return; |
668 | #ifdef CONFIG_X86_LOCAL_APIC | 675 | #ifdef CONFIG_X86_LOCAL_APIC |
@@ -678,7 +685,7 @@ static void default_do_nmi(struct pt_regs * regs) | |||
678 | unknown_nmi_error(reason, regs); | 685 | unknown_nmi_error(reason, regs); |
679 | return; | 686 | return; |
680 | } | 687 | } |
681 | if (notify_die(DIE_NMI, "nmi", regs, reason, 0, SIGINT) == NOTIFY_STOP) | 688 | if (notify_die(DIE_NMI, "nmi", regs, reason, 2, SIGINT) == NOTIFY_STOP) |
682 | return; | 689 | return; |
683 | if (reason & 0x80) | 690 | if (reason & 0x80) |
684 | mem_parity_error(reason, regs); | 691 | mem_parity_error(reason, regs); |
diff --git a/arch/ia64/Kconfig b/arch/ia64/Kconfig index 10b6b9e7716b..edffe25a477a 100644 --- a/arch/ia64/Kconfig +++ b/arch/ia64/Kconfig | |||
@@ -34,6 +34,10 @@ config RWSEM_XCHGADD_ALGORITHM | |||
34 | bool | 34 | bool |
35 | default y | 35 | default y |
36 | 36 | ||
37 | config GENERIC_FIND_NEXT_BIT | ||
38 | bool | ||
39 | default y | ||
40 | |||
37 | config GENERIC_CALIBRATE_DELAY | 41 | config GENERIC_CALIBRATE_DELAY |
38 | bool | 42 | bool |
39 | default y | 43 | default y |
@@ -42,6 +46,10 @@ config TIME_INTERPOLATION | |||
42 | bool | 46 | bool |
43 | default y | 47 | default y |
44 | 48 | ||
49 | config DMI | ||
50 | bool | ||
51 | default y | ||
52 | |||
45 | config EFI | 53 | config EFI |
46 | bool | 54 | bool |
47 | default y | 55 | default y |
diff --git a/arch/ia64/ia32/ia32priv.h b/arch/ia64/ia32/ia32priv.h index 68ceb4e690c7..ccb98ed48e58 100644 --- a/arch/ia64/ia32/ia32priv.h +++ b/arch/ia64/ia32/ia32priv.h | |||
@@ -29,9 +29,9 @@ | |||
29 | struct partial_page { | 29 | struct partial_page { |
30 | struct partial_page *next; /* linked list, sorted by address */ | 30 | struct partial_page *next; /* linked list, sorted by address */ |
31 | struct rb_node pp_rb; | 31 | struct rb_node pp_rb; |
32 | /* 64K is the largest "normal" page supported by ia64 ABI. So 4K*32 | 32 | /* 64K is the largest "normal" page supported by ia64 ABI. So 4K*64 |
33 | * should suffice.*/ | 33 | * should suffice.*/ |
34 | unsigned int bitmap; | 34 | unsigned long bitmap; |
35 | unsigned int base; | 35 | unsigned int base; |
36 | }; | 36 | }; |
37 | 37 | ||
diff --git a/arch/ia64/ia32/sys_ia32.c b/arch/ia64/ia32/sys_ia32.c index 13e739e4c84d..5366b3b23d09 100644 --- a/arch/ia64/ia32/sys_ia32.c +++ b/arch/ia64/ia32/sys_ia32.c | |||
@@ -25,7 +25,6 @@ | |||
25 | #include <linux/resource.h> | 25 | #include <linux/resource.h> |
26 | #include <linux/times.h> | 26 | #include <linux/times.h> |
27 | #include <linux/utsname.h> | 27 | #include <linux/utsname.h> |
28 | #include <linux/timex.h> | ||
29 | #include <linux/smp.h> | 28 | #include <linux/smp.h> |
30 | #include <linux/smp_lock.h> | 29 | #include <linux/smp_lock.h> |
31 | #include <linux/sem.h> | 30 | #include <linux/sem.h> |
@@ -2591,78 +2590,4 @@ sys32_setresgid(compat_gid_t rgid, compat_gid_t egid, | |||
2591 | ssgid = (sgid == (compat_gid_t)-1) ? ((gid_t)-1) : ((gid_t)sgid); | 2590 | ssgid = (sgid == (compat_gid_t)-1) ? ((gid_t)-1) : ((gid_t)sgid); |
2592 | return sys_setresgid(srgid, segid, ssgid); | 2591 | return sys_setresgid(srgid, segid, ssgid); |
2593 | } | 2592 | } |
2594 | |||
2595 | /* Handle adjtimex compatibility. */ | ||
2596 | |||
2597 | struct timex32 { | ||
2598 | u32 modes; | ||
2599 | s32 offset, freq, maxerror, esterror; | ||
2600 | s32 status, constant, precision, tolerance; | ||
2601 | struct compat_timeval time; | ||
2602 | s32 tick; | ||
2603 | s32 ppsfreq, jitter, shift, stabil; | ||
2604 | s32 jitcnt, calcnt, errcnt, stbcnt; | ||
2605 | s32 :32; s32 :32; s32 :32; s32 :32; | ||
2606 | s32 :32; s32 :32; s32 :32; s32 :32; | ||
2607 | s32 :32; s32 :32; s32 :32; s32 :32; | ||
2608 | }; | ||
2609 | |||
2610 | extern int do_adjtimex(struct timex *); | ||
2611 | |||
2612 | asmlinkage long | ||
2613 | sys32_adjtimex(struct timex32 *utp) | ||
2614 | { | ||
2615 | struct timex txc; | ||
2616 | int ret; | ||
2617 | |||
2618 | memset(&txc, 0, sizeof(struct timex)); | ||
2619 | |||
2620 | if(get_user(txc.modes, &utp->modes) || | ||
2621 | __get_user(txc.offset, &utp->offset) || | ||
2622 | __get_user(txc.freq, &utp->freq) || | ||
2623 | __get_user(txc.maxerror, &utp->maxerror) || | ||
2624 | __get_user(txc.esterror, &utp->esterror) || | ||
2625 | __get_user(txc.status, &utp->status) || | ||
2626 | __get_user(txc.constant, &utp->constant) || | ||
2627 | __get_user(txc.precision, &utp->precision) || | ||
2628 | __get_user(txc.tolerance, &utp->tolerance) || | ||
2629 | __get_user(txc.time.tv_sec, &utp->time.tv_sec) || | ||
2630 | __get_user(txc.time.tv_usec, &utp->time.tv_usec) || | ||
2631 | __get_user(txc.tick, &utp->tick) || | ||
2632 | __get_user(txc.ppsfreq, &utp->ppsfreq) || | ||
2633 | __get_user(txc.jitter, &utp->jitter) || | ||
2634 | __get_user(txc.shift, &utp->shift) || | ||
2635 | __get_user(txc.stabil, &utp->stabil) || | ||
2636 | __get_user(txc.jitcnt, &utp->jitcnt) || | ||
2637 | __get_user(txc.calcnt, &utp->calcnt) || | ||
2638 | __get_user(txc.errcnt, &utp->errcnt) || | ||
2639 | __get_user(txc.stbcnt, &utp->stbcnt)) | ||
2640 | return -EFAULT; | ||
2641 | |||
2642 | ret = do_adjtimex(&txc); | ||
2643 | |||
2644 | if(put_user(txc.modes, &utp->modes) || | ||
2645 | __put_user(txc.offset, &utp->offset) || | ||
2646 | __put_user(txc.freq, &utp->freq) || | ||
2647 | __put_user(txc.maxerror, &utp->maxerror) || | ||
2648 | __put_user(txc.esterror, &utp->esterror) || | ||
2649 | __put_user(txc.status, &utp->status) || | ||
2650 | __put_user(txc.constant, &utp->constant) || | ||
2651 | __put_user(txc.precision, &utp->precision) || | ||
2652 | __put_user(txc.tolerance, &utp->tolerance) || | ||
2653 | __put_user(txc.time.tv_sec, &utp->time.tv_sec) || | ||
2654 | __put_user(txc.time.tv_usec, &utp->time.tv_usec) || | ||
2655 | __put_user(txc.tick, &utp->tick) || | ||
2656 | __put_user(txc.ppsfreq, &utp->ppsfreq) || | ||
2657 | __put_user(txc.jitter, &utp->jitter) || | ||
2658 | __put_user(txc.shift, &utp->shift) || | ||
2659 | __put_user(txc.stabil, &utp->stabil) || | ||
2660 | __put_user(txc.jitcnt, &utp->jitcnt) || | ||
2661 | __put_user(txc.calcnt, &utp->calcnt) || | ||
2662 | __put_user(txc.errcnt, &utp->errcnt) || | ||
2663 | __put_user(txc.stbcnt, &utp->stbcnt)) | ||
2664 | ret = -EFAULT; | ||
2665 | |||
2666 | return ret; | ||
2667 | } | ||
2668 | #endif /* NOTYET */ | 2593 | #endif /* NOTYET */ |
diff --git a/arch/ia64/kernel/Makefile b/arch/ia64/kernel/Makefile index 09a0dbc17fb6..59e871dae742 100644 --- a/arch/ia64/kernel/Makefile +++ b/arch/ia64/kernel/Makefile | |||
@@ -7,7 +7,7 @@ extra-y := head.o init_task.o vmlinux.lds | |||
7 | obj-y := acpi.o entry.o efi.o efi_stub.o gate-data.o fsys.o ia64_ksyms.o irq.o irq_ia64.o \ | 7 | obj-y := acpi.o entry.o efi.o efi_stub.o gate-data.o fsys.o ia64_ksyms.o irq.o irq_ia64.o \ |
8 | irq_lsapic.o ivt.o machvec.o pal.o patch.o process.o perfmon.o ptrace.o sal.o \ | 8 | irq_lsapic.o ivt.o machvec.o pal.o patch.o process.o perfmon.o ptrace.o sal.o \ |
9 | salinfo.o semaphore.o setup.o signal.o sys_ia64.o time.o traps.o unaligned.o \ | 9 | salinfo.o semaphore.o setup.o signal.o sys_ia64.o time.o traps.o unaligned.o \ |
10 | unwind.o mca.o mca_asm.o topology.o | 10 | unwind.o mca.o mca_asm.o topology.o dmi_scan.o |
11 | 11 | ||
12 | obj-$(CONFIG_IA64_BRL_EMU) += brl_emu.o | 12 | obj-$(CONFIG_IA64_BRL_EMU) += brl_emu.o |
13 | obj-$(CONFIG_IA64_GENERIC) += acpi-ext.o | 13 | obj-$(CONFIG_IA64_GENERIC) += acpi-ext.o |
@@ -30,6 +30,7 @@ obj-$(CONFIG_IA64_MCA_RECOVERY) += mca_recovery.o | |||
30 | obj-$(CONFIG_KPROBES) += kprobes.o jprobes.o | 30 | obj-$(CONFIG_KPROBES) += kprobes.o jprobes.o |
31 | obj-$(CONFIG_IA64_UNCACHED_ALLOCATOR) += uncached.o | 31 | obj-$(CONFIG_IA64_UNCACHED_ALLOCATOR) += uncached.o |
32 | mca_recovery-y += mca_drv.o mca_drv_asm.o | 32 | mca_recovery-y += mca_drv.o mca_drv_asm.o |
33 | dmi_scan-y += ../../i386/kernel/dmi_scan.o | ||
33 | 34 | ||
34 | # The gate DSO image is built using a special linker script. | 35 | # The gate DSO image is built using a special linker script. |
35 | targets += gate.so gate-syms.o | 36 | targets += gate.so gate-syms.o |
diff --git a/arch/ia64/kernel/acpi.c b/arch/ia64/kernel/acpi.c index a4e218ce2edb..58c93a30348c 100644 --- a/arch/ia64/kernel/acpi.c +++ b/arch/ia64/kernel/acpi.c | |||
@@ -651,9 +651,9 @@ unsigned long __init acpi_find_rsdp(void) | |||
651 | { | 651 | { |
652 | unsigned long rsdp_phys = 0; | 652 | unsigned long rsdp_phys = 0; |
653 | 653 | ||
654 | if (efi.acpi20) | 654 | if (efi.acpi20 != EFI_INVALID_TABLE_ADDR) |
655 | rsdp_phys = __pa(efi.acpi20); | 655 | rsdp_phys = efi.acpi20; |
656 | else if (efi.acpi) | 656 | else if (efi.acpi != EFI_INVALID_TABLE_ADDR) |
657 | printk(KERN_WARNING PREFIX | 657 | printk(KERN_WARNING PREFIX |
658 | "v1.0/r0.71 tables no longer supported\n"); | 658 | "v1.0/r0.71 tables no longer supported\n"); |
659 | return rsdp_phys; | 659 | return rsdp_phys; |
diff --git a/arch/ia64/kernel/efi.c b/arch/ia64/kernel/efi.c index 9990320b6f9a..12cfedce73b1 100644 --- a/arch/ia64/kernel/efi.c +++ b/arch/ia64/kernel/efi.c | |||
@@ -458,24 +458,33 @@ efi_init (void) | |||
458 | printk(KERN_INFO "EFI v%u.%.02u by %s:", | 458 | printk(KERN_INFO "EFI v%u.%.02u by %s:", |
459 | efi.systab->hdr.revision >> 16, efi.systab->hdr.revision & 0xffff, vendor); | 459 | efi.systab->hdr.revision >> 16, efi.systab->hdr.revision & 0xffff, vendor); |
460 | 460 | ||
461 | efi.mps = EFI_INVALID_TABLE_ADDR; | ||
462 | efi.acpi = EFI_INVALID_TABLE_ADDR; | ||
463 | efi.acpi20 = EFI_INVALID_TABLE_ADDR; | ||
464 | efi.smbios = EFI_INVALID_TABLE_ADDR; | ||
465 | efi.sal_systab = EFI_INVALID_TABLE_ADDR; | ||
466 | efi.boot_info = EFI_INVALID_TABLE_ADDR; | ||
467 | efi.hcdp = EFI_INVALID_TABLE_ADDR; | ||
468 | efi.uga = EFI_INVALID_TABLE_ADDR; | ||
469 | |||
461 | for (i = 0; i < (int) efi.systab->nr_tables; i++) { | 470 | for (i = 0; i < (int) efi.systab->nr_tables; i++) { |
462 | if (efi_guidcmp(config_tables[i].guid, MPS_TABLE_GUID) == 0) { | 471 | if (efi_guidcmp(config_tables[i].guid, MPS_TABLE_GUID) == 0) { |
463 | efi.mps = __va(config_tables[i].table); | 472 | efi.mps = config_tables[i].table; |
464 | printk(" MPS=0x%lx", config_tables[i].table); | 473 | printk(" MPS=0x%lx", config_tables[i].table); |
465 | } else if (efi_guidcmp(config_tables[i].guid, ACPI_20_TABLE_GUID) == 0) { | 474 | } else if (efi_guidcmp(config_tables[i].guid, ACPI_20_TABLE_GUID) == 0) { |
466 | efi.acpi20 = __va(config_tables[i].table); | 475 | efi.acpi20 = config_tables[i].table; |
467 | printk(" ACPI 2.0=0x%lx", config_tables[i].table); | 476 | printk(" ACPI 2.0=0x%lx", config_tables[i].table); |
468 | } else if (efi_guidcmp(config_tables[i].guid, ACPI_TABLE_GUID) == 0) { | 477 | } else if (efi_guidcmp(config_tables[i].guid, ACPI_TABLE_GUID) == 0) { |
469 | efi.acpi = __va(config_tables[i].table); | 478 | efi.acpi = config_tables[i].table; |
470 | printk(" ACPI=0x%lx", config_tables[i].table); | 479 | printk(" ACPI=0x%lx", config_tables[i].table); |
471 | } else if (efi_guidcmp(config_tables[i].guid, SMBIOS_TABLE_GUID) == 0) { | 480 | } else if (efi_guidcmp(config_tables[i].guid, SMBIOS_TABLE_GUID) == 0) { |
472 | efi.smbios = __va(config_tables[i].table); | 481 | efi.smbios = config_tables[i].table; |
473 | printk(" SMBIOS=0x%lx", config_tables[i].table); | 482 | printk(" SMBIOS=0x%lx", config_tables[i].table); |
474 | } else if (efi_guidcmp(config_tables[i].guid, SAL_SYSTEM_TABLE_GUID) == 0) { | 483 | } else if (efi_guidcmp(config_tables[i].guid, SAL_SYSTEM_TABLE_GUID) == 0) { |
475 | efi.sal_systab = __va(config_tables[i].table); | 484 | efi.sal_systab = config_tables[i].table; |
476 | printk(" SALsystab=0x%lx", config_tables[i].table); | 485 | printk(" SALsystab=0x%lx", config_tables[i].table); |
477 | } else if (efi_guidcmp(config_tables[i].guid, HCDP_TABLE_GUID) == 0) { | 486 | } else if (efi_guidcmp(config_tables[i].guid, HCDP_TABLE_GUID) == 0) { |
478 | efi.hcdp = __va(config_tables[i].table); | 487 | efi.hcdp = config_tables[i].table; |
479 | printk(" HCDP=0x%lx", config_tables[i].table); | 488 | printk(" HCDP=0x%lx", config_tables[i].table); |
480 | } | 489 | } |
481 | } | 490 | } |
@@ -677,27 +686,34 @@ EXPORT_SYMBOL(efi_mem_attributes); | |||
677 | /* | 686 | /* |
678 | * Determines whether the memory at phys_addr supports the desired | 687 | * Determines whether the memory at phys_addr supports the desired |
679 | * attribute (WB, UC, etc). If this returns 1, the caller can safely | 688 | * attribute (WB, UC, etc). If this returns 1, the caller can safely |
680 | * access *size bytes at phys_addr with the specified attribute. | 689 | * access size bytes at phys_addr with the specified attribute. |
681 | */ | 690 | */ |
682 | static int | 691 | int |
683 | efi_mem_attribute_range (unsigned long phys_addr, unsigned long *size, u64 attr) | 692 | efi_mem_attribute_range (unsigned long phys_addr, unsigned long size, u64 attr) |
684 | { | 693 | { |
694 | unsigned long end = phys_addr + size; | ||
685 | efi_memory_desc_t *md = efi_memory_descriptor(phys_addr); | 695 | efi_memory_desc_t *md = efi_memory_descriptor(phys_addr); |
686 | unsigned long md_end; | ||
687 | 696 | ||
688 | if (!md || (md->attribute & attr) != attr) | 697 | /* |
698 | * Some firmware doesn't report MMIO regions in the EFI memory | ||
699 | * map. The Intel BigSur (a.k.a. HP i2000) has this problem. | ||
700 | * On those platforms, we have to assume UC is valid everywhere. | ||
701 | */ | ||
702 | if (!md || (md->attribute & attr) != attr) { | ||
703 | if (attr == EFI_MEMORY_UC && !efi_memmap_has_mmio()) | ||
704 | return 1; | ||
689 | return 0; | 705 | return 0; |
706 | } | ||
690 | 707 | ||
691 | do { | 708 | do { |
692 | md_end = efi_md_end(md); | 709 | unsigned long md_end = efi_md_end(md); |
693 | if (phys_addr + *size <= md_end) | 710 | |
711 | if (end <= md_end) | ||
694 | return 1; | 712 | return 1; |
695 | 713 | ||
696 | md = efi_memory_descriptor(md_end); | 714 | md = efi_memory_descriptor(md_end); |
697 | if (!md || (md->attribute & attr) != attr) { | 715 | if (!md || (md->attribute & attr) != attr) |
698 | *size = md_end - phys_addr; | 716 | return 0; |
699 | return 1; | ||
700 | } | ||
701 | } while (md); | 717 | } while (md); |
702 | return 0; | 718 | return 0; |
703 | } | 719 | } |
@@ -708,7 +724,7 @@ efi_mem_attribute_range (unsigned long phys_addr, unsigned long *size, u64 attr) | |||
708 | * control access size. | 724 | * control access size. |
709 | */ | 725 | */ |
710 | int | 726 | int |
711 | valid_phys_addr_range (unsigned long phys_addr, unsigned long *size) | 727 | valid_phys_addr_range (unsigned long phys_addr, unsigned long size) |
712 | { | 728 | { |
713 | return efi_mem_attribute_range(phys_addr, size, EFI_MEMORY_WB); | 729 | return efi_mem_attribute_range(phys_addr, size, EFI_MEMORY_WB); |
714 | } | 730 | } |
@@ -723,7 +739,7 @@ valid_phys_addr_range (unsigned long phys_addr, unsigned long *size) | |||
723 | * because that doesn't appear in the boot-time EFI memory map. | 739 | * because that doesn't appear in the boot-time EFI memory map. |
724 | */ | 740 | */ |
725 | int | 741 | int |
726 | valid_mmap_phys_addr_range (unsigned long phys_addr, unsigned long *size) | 742 | valid_mmap_phys_addr_range (unsigned long phys_addr, unsigned long size) |
727 | { | 743 | { |
728 | if (efi_mem_attribute_range(phys_addr, size, EFI_MEMORY_WB)) | 744 | if (efi_mem_attribute_range(phys_addr, size, EFI_MEMORY_WB)) |
729 | return 1; | 745 | return 1; |
@@ -731,14 +747,6 @@ valid_mmap_phys_addr_range (unsigned long phys_addr, unsigned long *size) | |||
731 | if (efi_mem_attribute_range(phys_addr, size, EFI_MEMORY_UC)) | 747 | if (efi_mem_attribute_range(phys_addr, size, EFI_MEMORY_UC)) |
732 | return 1; | 748 | return 1; |
733 | 749 | ||
734 | /* | ||
735 | * Some firmware doesn't report MMIO regions in the EFI memory map. | ||
736 | * The Intel BigSur (a.k.a. HP i2000) has this problem. In this | ||
737 | * case, we can't use the EFI memory map to validate mmap requests. | ||
738 | */ | ||
739 | if (!efi_memmap_has_mmio()) | ||
740 | return 1; | ||
741 | |||
742 | return 0; | 750 | return 0; |
743 | } | 751 | } |
744 | 752 | ||
diff --git a/arch/ia64/kernel/kprobes.c b/arch/ia64/kernel/kprobes.c index 50ae8c7d453d..789881ca83d4 100644 --- a/arch/ia64/kernel/kprobes.c +++ b/arch/ia64/kernel/kprobes.c | |||
@@ -34,6 +34,7 @@ | |||
34 | #include <asm/pgtable.h> | 34 | #include <asm/pgtable.h> |
35 | #include <asm/kdebug.h> | 35 | #include <asm/kdebug.h> |
36 | #include <asm/sections.h> | 36 | #include <asm/sections.h> |
37 | #include <asm/uaccess.h> | ||
37 | 38 | ||
38 | extern void jprobe_inst_return(void); | 39 | extern void jprobe_inst_return(void); |
39 | 40 | ||
@@ -722,13 +723,50 @@ static int __kprobes kprobes_fault_handler(struct pt_regs *regs, int trapnr) | |||
722 | struct kprobe *cur = kprobe_running(); | 723 | struct kprobe *cur = kprobe_running(); |
723 | struct kprobe_ctlblk *kcb = get_kprobe_ctlblk(); | 724 | struct kprobe_ctlblk *kcb = get_kprobe_ctlblk(); |
724 | 725 | ||
725 | if (cur->fault_handler && cur->fault_handler(cur, regs, trapnr)) | ||
726 | return 1; | ||
727 | 726 | ||
728 | if (kcb->kprobe_status & KPROBE_HIT_SS) { | 727 | switch(kcb->kprobe_status) { |
729 | resume_execution(cur, regs); | 728 | case KPROBE_HIT_SS: |
730 | reset_current_kprobe(); | 729 | case KPROBE_REENTER: |
730 | /* | ||
731 | * We are here because the instruction being single | ||
732 | * stepped caused a page fault. We reset the current | ||
733 | * kprobe and the instruction pointer points back to | ||
734 | * the probe address and allow the page fault handler | ||
735 | * to continue as a normal page fault. | ||
736 | */ | ||
737 | regs->cr_iip = ((unsigned long)cur->addr) & ~0xFULL; | ||
738 | ia64_psr(regs)->ri = ((unsigned long)cur->addr) & 0xf; | ||
739 | if (kcb->kprobe_status == KPROBE_REENTER) | ||
740 | restore_previous_kprobe(kcb); | ||
741 | else | ||
742 | reset_current_kprobe(); | ||
731 | preempt_enable_no_resched(); | 743 | preempt_enable_no_resched(); |
744 | break; | ||
745 | case KPROBE_HIT_ACTIVE: | ||
746 | case KPROBE_HIT_SSDONE: | ||
747 | /* | ||
748 | * We increment the nmissed count for accounting, | ||
749 | * we can also use npre/npostfault count for accouting | ||
750 | * these specific fault cases. | ||
751 | */ | ||
752 | kprobes_inc_nmissed_count(cur); | ||
753 | |||
754 | /* | ||
755 | * We come here because instructions in the pre/post | ||
756 | * handler caused the page_fault, this could happen | ||
757 | * if handler tries to access user space by | ||
758 | * copy_from_user(), get_user() etc. Let the | ||
759 | * user-specified handler try to fix it first. | ||
760 | */ | ||
761 | if (cur->fault_handler && cur->fault_handler(cur, regs, trapnr)) | ||
762 | return 1; | ||
763 | |||
764 | /* | ||
765 | * Let ia64_do_page_fault() fix it. | ||
766 | */ | ||
767 | break; | ||
768 | default: | ||
769 | break; | ||
732 | } | 770 | } |
733 | 771 | ||
734 | return 0; | 772 | return 0; |
@@ -740,6 +778,9 @@ int __kprobes kprobe_exceptions_notify(struct notifier_block *self, | |||
740 | struct die_args *args = (struct die_args *)data; | 778 | struct die_args *args = (struct die_args *)data; |
741 | int ret = NOTIFY_DONE; | 779 | int ret = NOTIFY_DONE; |
742 | 780 | ||
781 | if (args->regs && user_mode(args->regs)) | ||
782 | return ret; | ||
783 | |||
743 | switch(val) { | 784 | switch(val) { |
744 | case DIE_BREAK: | 785 | case DIE_BREAK: |
745 | /* err is break number from ia64_bad_break() */ | 786 | /* err is break number from ia64_bad_break() */ |
diff --git a/arch/ia64/kernel/mca.c b/arch/ia64/kernel/mca.c index 87ff7fe33cfb..8963171788d5 100644 --- a/arch/ia64/kernel/mca.c +++ b/arch/ia64/kernel/mca.c | |||
@@ -69,6 +69,7 @@ | |||
69 | #include <linux/kernel.h> | 69 | #include <linux/kernel.h> |
70 | #include <linux/smp.h> | 70 | #include <linux/smp.h> |
71 | #include <linux/workqueue.h> | 71 | #include <linux/workqueue.h> |
72 | #include <linux/cpumask.h> | ||
72 | 73 | ||
73 | #include <asm/delay.h> | 74 | #include <asm/delay.h> |
74 | #include <asm/kdebug.h> | 75 | #include <asm/kdebug.h> |
@@ -1505,7 +1506,7 @@ format_mca_init_stack(void *mca_data, unsigned long offset, | |||
1505 | ti->cpu = cpu; | 1506 | ti->cpu = cpu; |
1506 | p->thread_info = ti; | 1507 | p->thread_info = ti; |
1507 | p->state = TASK_UNINTERRUPTIBLE; | 1508 | p->state = TASK_UNINTERRUPTIBLE; |
1508 | __set_bit(cpu, &p->cpus_allowed); | 1509 | cpu_set(cpu, p->cpus_allowed); |
1509 | INIT_LIST_HEAD(&p->tasks); | 1510 | INIT_LIST_HEAD(&p->tasks); |
1510 | p->parent = p->real_parent = p->group_leader = p; | 1511 | p->parent = p->real_parent = p->group_leader = p; |
1511 | INIT_LIST_HEAD(&p->children); | 1512 | INIT_LIST_HEAD(&p->children); |
diff --git a/arch/ia64/kernel/process.c b/arch/ia64/kernel/process.c index 309d59658e5f..355d57970ba3 100644 --- a/arch/ia64/kernel/process.c +++ b/arch/ia64/kernel/process.c | |||
@@ -30,7 +30,6 @@ | |||
30 | #include <linux/efi.h> | 30 | #include <linux/efi.h> |
31 | #include <linux/interrupt.h> | 31 | #include <linux/interrupt.h> |
32 | #include <linux/delay.h> | 32 | #include <linux/delay.h> |
33 | #include <linux/kprobes.h> | ||
34 | 33 | ||
35 | #include <asm/cpu.h> | 34 | #include <asm/cpu.h> |
36 | #include <asm/delay.h> | 35 | #include <asm/delay.h> |
@@ -738,13 +737,6 @@ void | |||
738 | exit_thread (void) | 737 | exit_thread (void) |
739 | { | 738 | { |
740 | 739 | ||
741 | /* | ||
742 | * Remove function-return probe instances associated with this task | ||
743 | * and put them back on the free list. Do not insert an exit probe for | ||
744 | * this function, it will be disabled by kprobe_flush_task if you do. | ||
745 | */ | ||
746 | kprobe_flush_task(current); | ||
747 | |||
748 | ia64_drop_fpu(current); | 740 | ia64_drop_fpu(current); |
749 | #ifdef CONFIG_PERFMON | 741 | #ifdef CONFIG_PERFMON |
750 | /* if needed, stop monitoring and flush state to perfmon context */ | 742 | /* if needed, stop monitoring and flush state to perfmon context */ |
diff --git a/arch/ia64/kernel/setup.c b/arch/ia64/kernel/setup.c index eb388e271b2b..e4dfda1eb7dd 100644 --- a/arch/ia64/kernel/setup.c +++ b/arch/ia64/kernel/setup.c | |||
@@ -37,6 +37,7 @@ | |||
37 | #include <linux/string.h> | 37 | #include <linux/string.h> |
38 | #include <linux/threads.h> | 38 | #include <linux/threads.h> |
39 | #include <linux/tty.h> | 39 | #include <linux/tty.h> |
40 | #include <linux/dmi.h> | ||
40 | #include <linux/serial.h> | 41 | #include <linux/serial.h> |
41 | #include <linux/serial_core.h> | 42 | #include <linux/serial_core.h> |
42 | #include <linux/efi.h> | 43 | #include <linux/efi.h> |
@@ -433,7 +434,7 @@ setup_arch (char **cmdline_p) | |||
433 | find_memory(); | 434 | find_memory(); |
434 | 435 | ||
435 | /* process SAL system table: */ | 436 | /* process SAL system table: */ |
436 | ia64_sal_init(efi.sal_systab); | 437 | ia64_sal_init(__va(efi.sal_systab)); |
437 | 438 | ||
438 | ia64_setup_printk_clock(); | 439 | ia64_setup_printk_clock(); |
439 | 440 | ||
@@ -887,3 +888,10 @@ check_bugs (void) | |||
887 | ia64_patch_mckinley_e9((unsigned long) __start___mckinley_e9_bundles, | 888 | ia64_patch_mckinley_e9((unsigned long) __start___mckinley_e9_bundles, |
888 | (unsigned long) __end___mckinley_e9_bundles); | 889 | (unsigned long) __end___mckinley_e9_bundles); |
889 | } | 890 | } |
891 | |||
892 | static int __init run_dmi_scan(void) | ||
893 | { | ||
894 | dmi_scan_machine(); | ||
895 | return 0; | ||
896 | } | ||
897 | core_initcall(run_dmi_scan); | ||
diff --git a/arch/ia64/lib/Makefile b/arch/ia64/lib/Makefile index ac64664a1807..d8536a2c22a9 100644 --- a/arch/ia64/lib/Makefile +++ b/arch/ia64/lib/Makefile | |||
@@ -6,7 +6,7 @@ obj-y := io.o | |||
6 | 6 | ||
7 | lib-y := __divsi3.o __udivsi3.o __modsi3.o __umodsi3.o \ | 7 | lib-y := __divsi3.o __udivsi3.o __modsi3.o __umodsi3.o \ |
8 | __divdi3.o __udivdi3.o __moddi3.o __umoddi3.o \ | 8 | __divdi3.o __udivdi3.o __moddi3.o __umoddi3.o \ |
9 | bitop.o checksum.o clear_page.o csum_partial_copy.o \ | 9 | checksum.o clear_page.o csum_partial_copy.o \ |
10 | clear_user.o strncpy_from_user.o strlen_user.o strnlen_user.o \ | 10 | clear_user.o strncpy_from_user.o strlen_user.o strnlen_user.o \ |
11 | flush.o ip_fast_csum.o do_csum.o \ | 11 | flush.o ip_fast_csum.o do_csum.o \ |
12 | memset.o strlen.o | 12 | memset.o strlen.o |
diff --git a/arch/ia64/lib/bitop.c b/arch/ia64/lib/bitop.c deleted file mode 100644 index 82e299c8464e..000000000000 --- a/arch/ia64/lib/bitop.c +++ /dev/null | |||
@@ -1,88 +0,0 @@ | |||
1 | #include <linux/compiler.h> | ||
2 | #include <linux/types.h> | ||
3 | #include <asm/intrinsics.h> | ||
4 | #include <linux/module.h> | ||
5 | #include <linux/bitops.h> | ||
6 | |||
7 | /* | ||
8 | * Find next zero bit in a bitmap reasonably efficiently.. | ||
9 | */ | ||
10 | |||
11 | int __find_next_zero_bit (const void *addr, unsigned long size, unsigned long offset) | ||
12 | { | ||
13 | unsigned long *p = ((unsigned long *) addr) + (offset >> 6); | ||
14 | unsigned long result = offset & ~63UL; | ||
15 | unsigned long tmp; | ||
16 | |||
17 | if (offset >= size) | ||
18 | return size; | ||
19 | size -= result; | ||
20 | offset &= 63UL; | ||
21 | if (offset) { | ||
22 | tmp = *(p++); | ||
23 | tmp |= ~0UL >> (64-offset); | ||
24 | if (size < 64) | ||
25 | goto found_first; | ||
26 | if (~tmp) | ||
27 | goto found_middle; | ||
28 | size -= 64; | ||
29 | result += 64; | ||
30 | } | ||
31 | while (size & ~63UL) { | ||
32 | if (~(tmp = *(p++))) | ||
33 | goto found_middle; | ||
34 | result += 64; | ||
35 | size -= 64; | ||
36 | } | ||
37 | if (!size) | ||
38 | return result; | ||
39 | tmp = *p; | ||
40 | found_first: | ||
41 | tmp |= ~0UL << size; | ||
42 | if (tmp == ~0UL) /* any bits zero? */ | ||
43 | return result + size; /* nope */ | ||
44 | found_middle: | ||
45 | return result + ffz(tmp); | ||
46 | } | ||
47 | EXPORT_SYMBOL(__find_next_zero_bit); | ||
48 | |||
49 | /* | ||
50 | * Find next bit in a bitmap reasonably efficiently.. | ||
51 | */ | ||
52 | int __find_next_bit(const void *addr, unsigned long size, unsigned long offset) | ||
53 | { | ||
54 | unsigned long *p = ((unsigned long *) addr) + (offset >> 6); | ||
55 | unsigned long result = offset & ~63UL; | ||
56 | unsigned long tmp; | ||
57 | |||
58 | if (offset >= size) | ||
59 | return size; | ||
60 | size -= result; | ||
61 | offset &= 63UL; | ||
62 | if (offset) { | ||
63 | tmp = *(p++); | ||
64 | tmp &= ~0UL << offset; | ||
65 | if (size < 64) | ||
66 | goto found_first; | ||
67 | if (tmp) | ||
68 | goto found_middle; | ||
69 | size -= 64; | ||
70 | result += 64; | ||
71 | } | ||
72 | while (size & ~63UL) { | ||
73 | if ((tmp = *(p++))) | ||
74 | goto found_middle; | ||
75 | result += 64; | ||
76 | size -= 64; | ||
77 | } | ||
78 | if (!size) | ||
79 | return result; | ||
80 | tmp = *p; | ||
81 | found_first: | ||
82 | tmp &= ~0UL >> (64-size); | ||
83 | if (tmp == 0UL) /* Are any bits set? */ | ||
84 | return result + size; /* Nope. */ | ||
85 | found_middle: | ||
86 | return result + __ffs(tmp); | ||
87 | } | ||
88 | EXPORT_SYMBOL(__find_next_bit); | ||
diff --git a/arch/ia64/mm/Makefile b/arch/ia64/mm/Makefile index d78d20f0a0f0..bb0a01a81878 100644 --- a/arch/ia64/mm/Makefile +++ b/arch/ia64/mm/Makefile | |||
@@ -2,7 +2,7 @@ | |||
2 | # Makefile for the ia64-specific parts of the memory manager. | 2 | # Makefile for the ia64-specific parts of the memory manager. |
3 | # | 3 | # |
4 | 4 | ||
5 | obj-y := init.o fault.o tlb.o extable.o | 5 | obj-y := init.o fault.o tlb.o extable.o ioremap.o |
6 | 6 | ||
7 | obj-$(CONFIG_HUGETLB_PAGE) += hugetlbpage.o | 7 | obj-$(CONFIG_HUGETLB_PAGE) += hugetlbpage.o |
8 | obj-$(CONFIG_NUMA) += numa.o | 8 | obj-$(CONFIG_NUMA) += numa.o |
diff --git a/arch/ia64/mm/ioremap.c b/arch/ia64/mm/ioremap.c new file mode 100644 index 000000000000..62328621f99c --- /dev/null +++ b/arch/ia64/mm/ioremap.c | |||
@@ -0,0 +1,43 @@ | |||
1 | /* | ||
2 | * (c) Copyright 2006 Hewlett-Packard Development Company, L.P. | ||
3 | * Bjorn Helgaas <bjorn.helgaas@hp.com> | ||
4 | * | ||
5 | * This program is free software; you can redistribute it and/or modify | ||
6 | * it under the terms of the GNU General Public License version 2 as | ||
7 | * published by the Free Software Foundation. | ||
8 | */ | ||
9 | |||
10 | #include <linux/compiler.h> | ||
11 | #include <linux/module.h> | ||
12 | #include <linux/efi.h> | ||
13 | #include <asm/io.h> | ||
14 | |||
15 | static inline void __iomem * | ||
16 | __ioremap (unsigned long offset, unsigned long size) | ||
17 | { | ||
18 | return (void __iomem *) (__IA64_UNCACHED_OFFSET | offset); | ||
19 | } | ||
20 | |||
21 | void __iomem * | ||
22 | ioremap (unsigned long offset, unsigned long size) | ||
23 | { | ||
24 | if (efi_mem_attribute_range(offset, size, EFI_MEMORY_UC)) | ||
25 | return __ioremap(offset, size); | ||
26 | |||
27 | if (efi_mem_attribute_range(offset, size, EFI_MEMORY_WB)) | ||
28 | return phys_to_virt(offset); | ||
29 | |||
30 | /* | ||
31 | * Someday this should check ACPI resources so we | ||
32 | * can do the right thing for hot-plugged regions. | ||
33 | */ | ||
34 | return __ioremap(offset, size); | ||
35 | } | ||
36 | EXPORT_SYMBOL(ioremap); | ||
37 | |||
38 | void __iomem * | ||
39 | ioremap_nocache (unsigned long offset, unsigned long size) | ||
40 | { | ||
41 | return __ioremap(offset, size); | ||
42 | } | ||
43 | EXPORT_SYMBOL(ioremap_nocache); | ||
diff --git a/arch/ia64/sn/kernel/setup.c b/arch/ia64/sn/kernel/setup.c index 8b6d5c844708..30988dfbddff 100644 --- a/arch/ia64/sn/kernel/setup.c +++ b/arch/ia64/sn/kernel/setup.c | |||
@@ -327,10 +327,11 @@ sn_scan_pcdp(void) | |||
327 | struct pcdp_interface_pci if_pci; | 327 | struct pcdp_interface_pci if_pci; |
328 | extern struct efi efi; | 328 | extern struct efi efi; |
329 | 329 | ||
330 | pcdp = efi.hcdp; | 330 | if (efi.hcdp == EFI_INVALID_TABLE_ADDR) |
331 | if (! pcdp) | ||
332 | return; /* no hcdp/pcdp table */ | 331 | return; /* no hcdp/pcdp table */ |
333 | 332 | ||
333 | pcdp = __va(efi.hcdp); | ||
334 | |||
334 | if (pcdp->rev < 3) | 335 | if (pcdp->rev < 3) |
335 | return; /* only support PCDP (rev >= 3) */ | 336 | return; /* only support PCDP (rev >= 3) */ |
336 | 337 | ||
diff --git a/arch/m32r/Kconfig b/arch/m32r/Kconfig index a3dcc3fab4b7..05c864c6c2d9 100644 --- a/arch/m32r/Kconfig +++ b/arch/m32r/Kconfig | |||
@@ -214,6 +214,14 @@ config RWSEM_XCHGADD_ALGORITHM | |||
214 | bool | 214 | bool |
215 | default n | 215 | default n |
216 | 216 | ||
217 | config GENERIC_FIND_NEXT_BIT | ||
218 | bool | ||
219 | default y | ||
220 | |||
221 | config GENERIC_HWEIGHT | ||
222 | bool | ||
223 | default y | ||
224 | |||
217 | config GENERIC_CALIBRATE_DELAY | 225 | config GENERIC_CALIBRATE_DELAY |
218 | bool | 226 | bool |
219 | default y | 227 | default y |
diff --git a/arch/m68k/Kconfig b/arch/m68k/Kconfig index 8849439e88dd..805b81fedf80 100644 --- a/arch/m68k/Kconfig +++ b/arch/m68k/Kconfig | |||
@@ -17,6 +17,10 @@ config RWSEM_GENERIC_SPINLOCK | |||
17 | config RWSEM_XCHGADD_ALGORITHM | 17 | config RWSEM_XCHGADD_ALGORITHM |
18 | bool | 18 | bool |
19 | 19 | ||
20 | config GENERIC_HWEIGHT | ||
21 | bool | ||
22 | default y | ||
23 | |||
20 | config GENERIC_CALIBRATE_DELAY | 24 | config GENERIC_CALIBRATE_DELAY |
21 | bool | 25 | bool |
22 | default y | 26 | default y |
diff --git a/arch/m68k/bvme6000/config.c b/arch/m68k/bvme6000/config.c index 3ffc84f9c291..c90cb5fcc8ef 100644 --- a/arch/m68k/bvme6000/config.c +++ b/arch/m68k/bvme6000/config.c | |||
@@ -142,7 +142,7 @@ void __init config_bvme6000(void) | |||
142 | /* Now do the PIT configuration */ | 142 | /* Now do the PIT configuration */ |
143 | 143 | ||
144 | pit->pgcr = 0x00; /* Unidirectional 8 bit, no handshake for now */ | 144 | pit->pgcr = 0x00; /* Unidirectional 8 bit, no handshake for now */ |
145 | pit->psrr = 0x18; /* PIACK and PIRQ fucntions enabled */ | 145 | pit->psrr = 0x18; /* PIACK and PIRQ functions enabled */ |
146 | pit->pacr = 0x00; /* Sub Mode 00, H2 i/p, no DMA */ | 146 | pit->pacr = 0x00; /* Sub Mode 00, H2 i/p, no DMA */ |
147 | pit->padr = 0x00; /* Just to be tidy! */ | 147 | pit->padr = 0x00; /* Just to be tidy! */ |
148 | pit->paddr = 0x00; /* All inputs for now (safest) */ | 148 | pit->paddr = 0x00; /* All inputs for now (safest) */ |
diff --git a/arch/m68knommu/Kconfig b/arch/m68knommu/Kconfig index e50858dbc237..3cde6822ead1 100644 --- a/arch/m68knommu/Kconfig +++ b/arch/m68knommu/Kconfig | |||
@@ -25,6 +25,14 @@ config RWSEM_XCHGADD_ALGORITHM | |||
25 | bool | 25 | bool |
26 | default n | 26 | default n |
27 | 27 | ||
28 | config GENERIC_FIND_NEXT_BIT | ||
29 | bool | ||
30 | default y | ||
31 | |||
32 | config GENERIC_HWEIGHT | ||
33 | bool | ||
34 | default y | ||
35 | |||
28 | config GENERIC_CALIBRATE_DELAY | 36 | config GENERIC_CALIBRATE_DELAY |
29 | bool | 37 | bool |
30 | default y | 38 | default y |
diff --git a/arch/mips/Kconfig b/arch/mips/Kconfig index ac2012f033d6..5080ea1799a4 100644 --- a/arch/mips/Kconfig +++ b/arch/mips/Kconfig | |||
@@ -801,6 +801,14 @@ config RWSEM_GENERIC_SPINLOCK | |||
801 | config RWSEM_XCHGADD_ALGORITHM | 801 | config RWSEM_XCHGADD_ALGORITHM |
802 | bool | 802 | bool |
803 | 803 | ||
804 | config GENERIC_FIND_NEXT_BIT | ||
805 | bool | ||
806 | default y | ||
807 | |||
808 | config GENERIC_HWEIGHT | ||
809 | bool | ||
810 | default y | ||
811 | |||
804 | config GENERIC_CALIBRATE_DELAY | 812 | config GENERIC_CALIBRATE_DELAY |
805 | bool | 813 | bool |
806 | default y | 814 | default y |
diff --git a/arch/mips/kernel/linux32.c b/arch/mips/kernel/linux32.c index 013bc93688e8..3f40c37a9ee6 100644 --- a/arch/mips/kernel/linux32.c +++ b/arch/mips/kernel/linux32.c | |||
@@ -30,7 +30,6 @@ | |||
30 | #include <linux/utime.h> | 30 | #include <linux/utime.h> |
31 | #include <linux/utsname.h> | 31 | #include <linux/utsname.h> |
32 | #include <linux/personality.h> | 32 | #include <linux/personality.h> |
33 | #include <linux/timex.h> | ||
34 | #include <linux/dnotify.h> | 33 | #include <linux/dnotify.h> |
35 | #include <linux/module.h> | 34 | #include <linux/module.h> |
36 | #include <linux/binfmts.h> | 35 | #include <linux/binfmts.h> |
@@ -1157,79 +1156,6 @@ out: | |||
1157 | return err; | 1156 | return err; |
1158 | } | 1157 | } |
1159 | 1158 | ||
1160 | /* Handle adjtimex compatibility. */ | ||
1161 | |||
1162 | struct timex32 { | ||
1163 | u32 modes; | ||
1164 | s32 offset, freq, maxerror, esterror; | ||
1165 | s32 status, constant, precision, tolerance; | ||
1166 | struct compat_timeval time; | ||
1167 | s32 tick; | ||
1168 | s32 ppsfreq, jitter, shift, stabil; | ||
1169 | s32 jitcnt, calcnt, errcnt, stbcnt; | ||
1170 | s32 :32; s32 :32; s32 :32; s32 :32; | ||
1171 | s32 :32; s32 :32; s32 :32; s32 :32; | ||
1172 | s32 :32; s32 :32; s32 :32; s32 :32; | ||
1173 | }; | ||
1174 | |||
1175 | extern int do_adjtimex(struct timex *); | ||
1176 | |||
1177 | asmlinkage int sys32_adjtimex(struct timex32 __user *utp) | ||
1178 | { | ||
1179 | struct timex txc; | ||
1180 | int ret; | ||
1181 | |||
1182 | memset(&txc, 0, sizeof(struct timex)); | ||
1183 | |||
1184 | if (get_user(txc.modes, &utp->modes) || | ||
1185 | __get_user(txc.offset, &utp->offset) || | ||
1186 | __get_user(txc.freq, &utp->freq) || | ||
1187 | __get_user(txc.maxerror, &utp->maxerror) || | ||
1188 | __get_user(txc.esterror, &utp->esterror) || | ||
1189 | __get_user(txc.status, &utp->status) || | ||
1190 | __get_user(txc.constant, &utp->constant) || | ||
1191 | __get_user(txc.precision, &utp->precision) || | ||
1192 | __get_user(txc.tolerance, &utp->tolerance) || | ||
1193 | __get_user(txc.time.tv_sec, &utp->time.tv_sec) || | ||
1194 | __get_user(txc.time.tv_usec, &utp->time.tv_usec) || | ||
1195 | __get_user(txc.tick, &utp->tick) || | ||
1196 | __get_user(txc.ppsfreq, &utp->ppsfreq) || | ||
1197 | __get_user(txc.jitter, &utp->jitter) || | ||
1198 | __get_user(txc.shift, &utp->shift) || | ||
1199 | __get_user(txc.stabil, &utp->stabil) || | ||
1200 | __get_user(txc.jitcnt, &utp->jitcnt) || | ||
1201 | __get_user(txc.calcnt, &utp->calcnt) || | ||
1202 | __get_user(txc.errcnt, &utp->errcnt) || | ||
1203 | __get_user(txc.stbcnt, &utp->stbcnt)) | ||
1204 | return -EFAULT; | ||
1205 | |||
1206 | ret = do_adjtimex(&txc); | ||
1207 | |||
1208 | if (put_user(txc.modes, &utp->modes) || | ||
1209 | __put_user(txc.offset, &utp->offset) || | ||
1210 | __put_user(txc.freq, &utp->freq) || | ||
1211 | __put_user(txc.maxerror, &utp->maxerror) || | ||
1212 | __put_user(txc.esterror, &utp->esterror) || | ||
1213 | __put_user(txc.status, &utp->status) || | ||
1214 | __put_user(txc.constant, &utp->constant) || | ||
1215 | __put_user(txc.precision, &utp->precision) || | ||
1216 | __put_user(txc.tolerance, &utp->tolerance) || | ||
1217 | __put_user(txc.time.tv_sec, &utp->time.tv_sec) || | ||
1218 | __put_user(txc.time.tv_usec, &utp->time.tv_usec) || | ||
1219 | __put_user(txc.tick, &utp->tick) || | ||
1220 | __put_user(txc.ppsfreq, &utp->ppsfreq) || | ||
1221 | __put_user(txc.jitter, &utp->jitter) || | ||
1222 | __put_user(txc.shift, &utp->shift) || | ||
1223 | __put_user(txc.stabil, &utp->stabil) || | ||
1224 | __put_user(txc.jitcnt, &utp->jitcnt) || | ||
1225 | __put_user(txc.calcnt, &utp->calcnt) || | ||
1226 | __put_user(txc.errcnt, &utp->errcnt) || | ||
1227 | __put_user(txc.stbcnt, &utp->stbcnt)) | ||
1228 | ret = -EFAULT; | ||
1229 | |||
1230 | return ret; | ||
1231 | } | ||
1232 | |||
1233 | asmlinkage int sys32_sendfile(int out_fd, int in_fd, compat_off_t __user *offset, | 1159 | asmlinkage int sys32_sendfile(int out_fd, int in_fd, compat_off_t __user *offset, |
1234 | s32 count) | 1160 | s32 count) |
1235 | { | 1161 | { |
diff --git a/arch/mips/kernel/scall64-n32.S b/arch/mips/kernel/scall64-n32.S index 02c8267e45e7..05a2c0567dae 100644 --- a/arch/mips/kernel/scall64-n32.S +++ b/arch/mips/kernel/scall64-n32.S | |||
@@ -273,7 +273,7 @@ EXPORT(sysn32_call_table) | |||
273 | PTR sys_pivot_root | 273 | PTR sys_pivot_root |
274 | PTR sys32_sysctl | 274 | PTR sys32_sysctl |
275 | PTR sys_prctl | 275 | PTR sys_prctl |
276 | PTR sys32_adjtimex | 276 | PTR compat_sys_adjtimex |
277 | PTR compat_sys_setrlimit /* 6155 */ | 277 | PTR compat_sys_setrlimit /* 6155 */ |
278 | PTR sys_chroot | 278 | PTR sys_chroot |
279 | PTR sys_sync | 279 | PTR sys_sync |
diff --git a/arch/mips/kernel/scall64-o32.S b/arch/mips/kernel/scall64-o32.S index 797e0d874889..19c4ca481b02 100644 --- a/arch/mips/kernel/scall64-o32.S +++ b/arch/mips/kernel/scall64-o32.S | |||
@@ -328,7 +328,7 @@ sys_call_table: | |||
328 | PTR sys_setdomainname | 328 | PTR sys_setdomainname |
329 | PTR sys32_newuname | 329 | PTR sys32_newuname |
330 | PTR sys_ni_syscall /* sys_modify_ldt */ | 330 | PTR sys_ni_syscall /* sys_modify_ldt */ |
331 | PTR sys32_adjtimex | 331 | PTR compat_sys_adjtimex |
332 | PTR sys_mprotect /* 4125 */ | 332 | PTR sys_mprotect /* 4125 */ |
333 | PTR compat_sys_sigprocmask | 333 | PTR compat_sys_sigprocmask |
334 | PTR sys_ni_syscall /* was creat_module */ | 334 | PTR sys_ni_syscall /* was creat_module */ |
diff --git a/arch/mips/lasat/sysctl.c b/arch/mips/lasat/sysctl.c index 8ff43a1c1e99..e3d5aaa90f0d 100644 --- a/arch/mips/lasat/sysctl.c +++ b/arch/mips/lasat/sysctl.c | |||
@@ -30,12 +30,13 @@ | |||
30 | #include <linux/string.h> | 30 | #include <linux/string.h> |
31 | #include <linux/net.h> | 31 | #include <linux/net.h> |
32 | #include <linux/inet.h> | 32 | #include <linux/inet.h> |
33 | #include <linux/mutex.h> | ||
33 | #include <asm/uaccess.h> | 34 | #include <asm/uaccess.h> |
34 | 35 | ||
35 | #include "sysctl.h" | 36 | #include "sysctl.h" |
36 | #include "ds1603.h" | 37 | #include "ds1603.h" |
37 | 38 | ||
38 | static DECLARE_MUTEX(lasat_info_sem); | 39 | static DEFINE_MUTEX(lasat_info_mutex); |
39 | 40 | ||
40 | /* Strategy function to write EEPROM after changing string entry */ | 41 | /* Strategy function to write EEPROM after changing string entry */ |
41 | int sysctl_lasatstring(ctl_table *table, int *name, int nlen, | 42 | int sysctl_lasatstring(ctl_table *table, int *name, int nlen, |
@@ -43,17 +44,17 @@ int sysctl_lasatstring(ctl_table *table, int *name, int nlen, | |||
43 | void *newval, size_t newlen, void **context) | 44 | void *newval, size_t newlen, void **context) |
44 | { | 45 | { |
45 | int r; | 46 | int r; |
46 | down(&lasat_info_sem); | 47 | mutex_lock(&lasat_info_mutex); |
47 | r = sysctl_string(table, name, | 48 | r = sysctl_string(table, name, |
48 | nlen, oldval, oldlenp, newval, newlen, context); | 49 | nlen, oldval, oldlenp, newval, newlen, context); |
49 | if (r < 0) { | 50 | if (r < 0) { |
50 | up(&lasat_info_sem); | 51 | mutex_unlock(&lasat_info_mutex); |
51 | return r; | 52 | return r; |
52 | } | 53 | } |
53 | if (newval && newlen) { | 54 | if (newval && newlen) { |
54 | lasat_write_eeprom_info(); | 55 | lasat_write_eeprom_info(); |
55 | } | 56 | } |
56 | up(&lasat_info_sem); | 57 | mutex_unlock(&lasat_info_mutex); |
57 | return 1; | 58 | return 1; |
58 | } | 59 | } |
59 | 60 | ||
@@ -63,14 +64,14 @@ int proc_dolasatstring(ctl_table *table, int write, struct file *filp, | |||
63 | void *buffer, size_t *lenp, loff_t *ppos) | 64 | void *buffer, size_t *lenp, loff_t *ppos) |
64 | { | 65 | { |
65 | int r; | 66 | int r; |
66 | down(&lasat_info_sem); | 67 | mutex_lock(&lasat_info_mutex); |
67 | r = proc_dostring(table, write, filp, buffer, lenp, ppos); | 68 | r = proc_dostring(table, write, filp, buffer, lenp, ppos); |
68 | if ( (!write) || r) { | 69 | if ( (!write) || r) { |
69 | up(&lasat_info_sem); | 70 | mutex_unlock(&lasat_info_mutex); |
70 | return r; | 71 | return r; |
71 | } | 72 | } |
72 | lasat_write_eeprom_info(); | 73 | lasat_write_eeprom_info(); |
73 | up(&lasat_info_sem); | 74 | mutex_unlock(&lasat_info_mutex); |
74 | return 0; | 75 | return 0; |
75 | } | 76 | } |
76 | 77 | ||
@@ -79,14 +80,14 @@ int proc_dolasatint(ctl_table *table, int write, struct file *filp, | |||
79 | void *buffer, size_t *lenp, loff_t *ppos) | 80 | void *buffer, size_t *lenp, loff_t *ppos) |
80 | { | 81 | { |
81 | int r; | 82 | int r; |
82 | down(&lasat_info_sem); | 83 | mutex_lock(&lasat_info_mutex); |
83 | r = proc_dointvec(table, write, filp, buffer, lenp, ppos); | 84 | r = proc_dointvec(table, write, filp, buffer, lenp, ppos); |
84 | if ( (!write) || r) { | 85 | if ( (!write) || r) { |
85 | up(&lasat_info_sem); | 86 | mutex_unlock(&lasat_info_mutex); |
86 | return r; | 87 | return r; |
87 | } | 88 | } |
88 | lasat_write_eeprom_info(); | 89 | lasat_write_eeprom_info(); |
89 | up(&lasat_info_sem); | 90 | mutex_unlock(&lasat_info_mutex); |
90 | return 0; | 91 | return 0; |
91 | } | 92 | } |
92 | 93 | ||
@@ -98,7 +99,7 @@ int proc_dolasatrtc(ctl_table *table, int write, struct file *filp, | |||
98 | void *buffer, size_t *lenp, loff_t *ppos) | 99 | void *buffer, size_t *lenp, loff_t *ppos) |
99 | { | 100 | { |
100 | int r; | 101 | int r; |
101 | down(&lasat_info_sem); | 102 | mutex_lock(&lasat_info_mutex); |
102 | if (!write) { | 103 | if (!write) { |
103 | rtctmp = ds1603_read(); | 104 | rtctmp = ds1603_read(); |
104 | /* check for time < 0 and set to 0 */ | 105 | /* check for time < 0 and set to 0 */ |
@@ -107,11 +108,11 @@ int proc_dolasatrtc(ctl_table *table, int write, struct file *filp, | |||
107 | } | 108 | } |
108 | r = proc_dointvec(table, write, filp, buffer, lenp, ppos); | 109 | r = proc_dointvec(table, write, filp, buffer, lenp, ppos); |
109 | if ( (!write) || r) { | 110 | if ( (!write) || r) { |
110 | up(&lasat_info_sem); | 111 | mutex_unlock(&lasat_info_mutex); |
111 | return r; | 112 | return r; |
112 | } | 113 | } |
113 | ds1603_set(rtctmp); | 114 | ds1603_set(rtctmp); |
114 | up(&lasat_info_sem); | 115 | mutex_unlock(&lasat_info_mutex); |
115 | return 0; | 116 | return 0; |
116 | } | 117 | } |
117 | #endif | 118 | #endif |
@@ -122,16 +123,16 @@ int sysctl_lasat_intvec(ctl_table *table, int *name, int nlen, | |||
122 | void *newval, size_t newlen, void **context) | 123 | void *newval, size_t newlen, void **context) |
123 | { | 124 | { |
124 | int r; | 125 | int r; |
125 | down(&lasat_info_sem); | 126 | mutex_lock(&lasat_info_mutex); |
126 | r = sysctl_intvec(table, name, nlen, oldval, oldlenp, newval, newlen, context); | 127 | r = sysctl_intvec(table, name, nlen, oldval, oldlenp, newval, newlen, context); |
127 | if (r < 0) { | 128 | if (r < 0) { |
128 | up(&lasat_info_sem); | 129 | mutex_unlock(&lasat_info_mutex); |
129 | return r; | 130 | return r; |
130 | } | 131 | } |
131 | if (newval && newlen) { | 132 | if (newval && newlen) { |
132 | lasat_write_eeprom_info(); | 133 | lasat_write_eeprom_info(); |
133 | } | 134 | } |
134 | up(&lasat_info_sem); | 135 | mutex_unlock(&lasat_info_mutex); |
135 | return 1; | 136 | return 1; |
136 | } | 137 | } |
137 | 138 | ||
@@ -142,19 +143,19 @@ int sysctl_lasat_rtc(ctl_table *table, int *name, int nlen, | |||
142 | void *newval, size_t newlen, void **context) | 143 | void *newval, size_t newlen, void **context) |
143 | { | 144 | { |
144 | int r; | 145 | int r; |
145 | down(&lasat_info_sem); | 146 | mutex_lock(&lasat_info_mutex); |
146 | rtctmp = ds1603_read(); | 147 | rtctmp = ds1603_read(); |
147 | if (rtctmp < 0) | 148 | if (rtctmp < 0) |
148 | rtctmp = 0; | 149 | rtctmp = 0; |
149 | r = sysctl_intvec(table, name, nlen, oldval, oldlenp, newval, newlen, context); | 150 | r = sysctl_intvec(table, name, nlen, oldval, oldlenp, newval, newlen, context); |
150 | if (r < 0) { | 151 | if (r < 0) { |
151 | up(&lasat_info_sem); | 152 | mutex_unlock(&lasat_info_mutex); |
152 | return r; | 153 | return r; |
153 | } | 154 | } |
154 | if (newval && newlen) { | 155 | if (newval && newlen) { |
155 | ds1603_set(rtctmp); | 156 | ds1603_set(rtctmp); |
156 | } | 157 | } |
157 | up(&lasat_info_sem); | 158 | mutex_unlock(&lasat_info_mutex); |
158 | return 1; | 159 | return 1; |
159 | } | 160 | } |
160 | #endif | 161 | #endif |
@@ -192,13 +193,13 @@ int proc_lasat_ip(ctl_table *table, int write, struct file *filp, | |||
192 | return 0; | 193 | return 0; |
193 | } | 194 | } |
194 | 195 | ||
195 | down(&lasat_info_sem); | 196 | mutex_lock(&lasat_info_mutex); |
196 | if (write) { | 197 | if (write) { |
197 | len = 0; | 198 | len = 0; |
198 | p = buffer; | 199 | p = buffer; |
199 | while (len < *lenp) { | 200 | while (len < *lenp) { |
200 | if(get_user(c, p++)) { | 201 | if(get_user(c, p++)) { |
201 | up(&lasat_info_sem); | 202 | mutex_unlock(&lasat_info_mutex); |
202 | return -EFAULT; | 203 | return -EFAULT; |
203 | } | 204 | } |
204 | if (c == 0 || c == '\n') | 205 | if (c == 0 || c == '\n') |
@@ -209,7 +210,7 @@ int proc_lasat_ip(ctl_table *table, int write, struct file *filp, | |||
209 | len = sizeof(proc_lasat_ipbuf) - 1; | 210 | len = sizeof(proc_lasat_ipbuf) - 1; |
210 | if (copy_from_user(proc_lasat_ipbuf, buffer, len)) | 211 | if (copy_from_user(proc_lasat_ipbuf, buffer, len)) |
211 | { | 212 | { |
212 | up(&lasat_info_sem); | 213 | mutex_unlock(&lasat_info_mutex); |
213 | return -EFAULT; | 214 | return -EFAULT; |
214 | } | 215 | } |
215 | proc_lasat_ipbuf[len] = 0; | 216 | proc_lasat_ipbuf[len] = 0; |
@@ -230,12 +231,12 @@ int proc_lasat_ip(ctl_table *table, int write, struct file *filp, | |||
230 | len = *lenp; | 231 | len = *lenp; |
231 | if (len) | 232 | if (len) |
232 | if(copy_to_user(buffer, proc_lasat_ipbuf, len)) { | 233 | if(copy_to_user(buffer, proc_lasat_ipbuf, len)) { |
233 | up(&lasat_info_sem); | 234 | mutex_unlock(&lasat_info_mutex); |
234 | return -EFAULT; | 235 | return -EFAULT; |
235 | } | 236 | } |
236 | if (len < *lenp) { | 237 | if (len < *lenp) { |
237 | if(put_user('\n', ((char *) buffer) + len)) { | 238 | if(put_user('\n', ((char *) buffer) + len)) { |
238 | up(&lasat_info_sem); | 239 | mutex_unlock(&lasat_info_mutex); |
239 | return -EFAULT; | 240 | return -EFAULT; |
240 | } | 241 | } |
241 | len++; | 242 | len++; |
@@ -244,7 +245,7 @@ int proc_lasat_ip(ctl_table *table, int write, struct file *filp, | |||
244 | *ppos += len; | 245 | *ppos += len; |
245 | } | 246 | } |
246 | update_bcastaddr(); | 247 | update_bcastaddr(); |
247 | up(&lasat_info_sem); | 248 | mutex_unlock(&lasat_info_mutex); |
248 | return 0; | 249 | return 0; |
249 | } | 250 | } |
250 | #endif /* defined(CONFIG_INET) */ | 251 | #endif /* defined(CONFIG_INET) */ |
@@ -256,10 +257,10 @@ static int sysctl_lasat_eeprom_value(ctl_table *table, int *name, int nlen, | |||
256 | { | 257 | { |
257 | int r; | 258 | int r; |
258 | 259 | ||
259 | down(&lasat_info_sem); | 260 | mutex_lock(&lasat_info_mutex); |
260 | r = sysctl_intvec(table, name, nlen, oldval, oldlenp, newval, newlen, context); | 261 | r = sysctl_intvec(table, name, nlen, oldval, oldlenp, newval, newlen, context); |
261 | if (r < 0) { | 262 | if (r < 0) { |
262 | up(&lasat_info_sem); | 263 | mutex_unlock(&lasat_info_mutex); |
263 | return r; | 264 | return r; |
264 | } | 265 | } |
265 | 266 | ||
@@ -271,7 +272,7 @@ static int sysctl_lasat_eeprom_value(ctl_table *table, int *name, int nlen, | |||
271 | lasat_write_eeprom_info(); | 272 | lasat_write_eeprom_info(); |
272 | lasat_init_board_info(); | 273 | lasat_init_board_info(); |
273 | } | 274 | } |
274 | up(&lasat_info_sem); | 275 | mutex_unlock(&lasat_info_mutex); |
275 | 276 | ||
276 | return 0; | 277 | return 0; |
277 | } | 278 | } |
@@ -280,10 +281,10 @@ int proc_lasat_eeprom_value(ctl_table *table, int write, struct file *filp, | |||
280 | void *buffer, size_t *lenp, loff_t *ppos) | 281 | void *buffer, size_t *lenp, loff_t *ppos) |
281 | { | 282 | { |
282 | int r; | 283 | int r; |
283 | down(&lasat_info_sem); | 284 | mutex_lock(&lasat_info_mutex); |
284 | r = proc_dointvec(table, write, filp, buffer, lenp, ppos); | 285 | r = proc_dointvec(table, write, filp, buffer, lenp, ppos); |
285 | if ( (!write) || r) { | 286 | if ( (!write) || r) { |
286 | up(&lasat_info_sem); | 287 | mutex_unlock(&lasat_info_mutex); |
287 | return r; | 288 | return r; |
288 | } | 289 | } |
289 | if (filp && filp->f_dentry) | 290 | if (filp && filp->f_dentry) |
@@ -294,7 +295,7 @@ int proc_lasat_eeprom_value(ctl_table *table, int write, struct file *filp, | |||
294 | lasat_board_info.li_eeprom_info.debugaccess = lasat_board_info.li_debugaccess; | 295 | lasat_board_info.li_eeprom_info.debugaccess = lasat_board_info.li_debugaccess; |
295 | } | 296 | } |
296 | lasat_write_eeprom_info(); | 297 | lasat_write_eeprom_info(); |
297 | up(&lasat_info_sem); | 298 | mutex_unlock(&lasat_info_mutex); |
298 | return 0; | 299 | return 0; |
299 | } | 300 | } |
300 | 301 | ||
diff --git a/arch/parisc/Kconfig b/arch/parisc/Kconfig index eca33cfa8a4c..6b3c50964ca9 100644 --- a/arch/parisc/Kconfig +++ b/arch/parisc/Kconfig | |||
@@ -25,6 +25,14 @@ config RWSEM_GENERIC_SPINLOCK | |||
25 | config RWSEM_XCHGADD_ALGORITHM | 25 | config RWSEM_XCHGADD_ALGORITHM |
26 | bool | 26 | bool |
27 | 27 | ||
28 | config GENERIC_FIND_NEXT_BIT | ||
29 | bool | ||
30 | default y | ||
31 | |||
32 | config GENERIC_HWEIGHT | ||
33 | bool | ||
34 | default y | ||
35 | |||
28 | config GENERIC_CALIBRATE_DELAY | 36 | config GENERIC_CALIBRATE_DELAY |
29 | bool | 37 | bool |
30 | default y | 38 | default y |
diff --git a/arch/parisc/kernel/sys_parisc32.c b/arch/parisc/kernel/sys_parisc32.c index 613569018410..d286f68a3d3a 100644 --- a/arch/parisc/kernel/sys_parisc32.c +++ b/arch/parisc/kernel/sys_parisc32.c | |||
@@ -21,7 +21,6 @@ | |||
21 | #include <linux/times.h> | 21 | #include <linux/times.h> |
22 | #include <linux/utsname.h> | 22 | #include <linux/utsname.h> |
23 | #include <linux/time.h> | 23 | #include <linux/time.h> |
24 | #include <linux/timex.h> | ||
25 | #include <linux/smp.h> | 24 | #include <linux/smp.h> |
26 | #include <linux/smp_lock.h> | 25 | #include <linux/smp_lock.h> |
27 | #include <linux/sem.h> | 26 | #include <linux/sem.h> |
@@ -567,63 +566,6 @@ asmlinkage int sys32_sendfile64(int out_fd, int in_fd, compat_loff_t __user *off | |||
567 | } | 566 | } |
568 | 567 | ||
569 | 568 | ||
570 | struct timex32 { | ||
571 | unsigned int modes; /* mode selector */ | ||
572 | int offset; /* time offset (usec) */ | ||
573 | int freq; /* frequency offset (scaled ppm) */ | ||
574 | int maxerror; /* maximum error (usec) */ | ||
575 | int esterror; /* estimated error (usec) */ | ||
576 | int status; /* clock command/status */ | ||
577 | int constant; /* pll time constant */ | ||
578 | int precision; /* clock precision (usec) (read only) */ | ||
579 | int tolerance; /* clock frequency tolerance (ppm) | ||
580 | * (read only) | ||
581 | */ | ||
582 | struct compat_timeval time; /* (read only) */ | ||
583 | int tick; /* (modified) usecs between clock ticks */ | ||
584 | |||
585 | int ppsfreq; /* pps frequency (scaled ppm) (ro) */ | ||
586 | int jitter; /* pps jitter (us) (ro) */ | ||
587 | int shift; /* interval duration (s) (shift) (ro) */ | ||
588 | int stabil; /* pps stability (scaled ppm) (ro) */ | ||
589 | int jitcnt; /* jitter limit exceeded (ro) */ | ||
590 | int calcnt; /* calibration intervals (ro) */ | ||
591 | int errcnt; /* calibration errors (ro) */ | ||
592 | int stbcnt; /* stability limit exceeded (ro) */ | ||
593 | |||
594 | int :32; int :32; int :32; int :32; | ||
595 | int :32; int :32; int :32; int :32; | ||
596 | int :32; int :32; int :32; int :32; | ||
597 | }; | ||
598 | |||
599 | asmlinkage long sys32_adjtimex(struct timex32 __user *txc_p32) | ||
600 | { | ||
601 | struct timex txc; | ||
602 | struct timex32 t32; | ||
603 | int ret; | ||
604 | extern int do_adjtimex(struct timex *txc); | ||
605 | |||
606 | if(copy_from_user(&t32, txc_p32, sizeof(struct timex32))) | ||
607 | return -EFAULT; | ||
608 | #undef CP | ||
609 | #define CP(x) txc.x = t32.x | ||
610 | CP(modes); CP(offset); CP(freq); CP(maxerror); CP(esterror); | ||
611 | CP(status); CP(constant); CP(precision); CP(tolerance); | ||
612 | CP(time.tv_sec); CP(time.tv_usec); CP(tick); CP(ppsfreq); CP(jitter); | ||
613 | CP(shift); CP(stabil); CP(jitcnt); CP(calcnt); CP(errcnt); | ||
614 | CP(stbcnt); | ||
615 | ret = do_adjtimex(&txc); | ||
616 | #undef CP | ||
617 | #define CP(x) t32.x = txc.x | ||
618 | CP(modes); CP(offset); CP(freq); CP(maxerror); CP(esterror); | ||
619 | CP(status); CP(constant); CP(precision); CP(tolerance); | ||
620 | CP(time.tv_sec); CP(time.tv_usec); CP(tick); CP(ppsfreq); CP(jitter); | ||
621 | CP(shift); CP(stabil); CP(jitcnt); CP(calcnt); CP(errcnt); | ||
622 | CP(stbcnt); | ||
623 | return copy_to_user(txc_p32, &t32, sizeof(struct timex32)) ? -EFAULT : ret; | ||
624 | } | ||
625 | |||
626 | |||
627 | struct sysinfo32 { | 569 | struct sysinfo32 { |
628 | s32 uptime; | 570 | s32 uptime; |
629 | u32 loads[3]; | 571 | u32 loads[3]; |
diff --git a/arch/parisc/kernel/syscall_table.S b/arch/parisc/kernel/syscall_table.S index 71011eadb872..89b6c56ea0a8 100644 --- a/arch/parisc/kernel/syscall_table.S +++ b/arch/parisc/kernel/syscall_table.S | |||
@@ -207,7 +207,7 @@ | |||
207 | /* struct sockaddr... */ | 207 | /* struct sockaddr... */ |
208 | ENTRY_SAME(recvfrom) | 208 | ENTRY_SAME(recvfrom) |
209 | /* struct timex contains longs */ | 209 | /* struct timex contains longs */ |
210 | ENTRY_DIFF(adjtimex) | 210 | ENTRY_COMP(adjtimex) |
211 | ENTRY_SAME(mprotect) /* 125 */ | 211 | ENTRY_SAME(mprotect) /* 125 */ |
212 | /* old_sigset_t forced to 32 bits. Beware glibc sigset_t */ | 212 | /* old_sigset_t forced to 32 bits. Beware glibc sigset_t */ |
213 | ENTRY_COMP(sigprocmask) | 213 | ENTRY_COMP(sigprocmask) |
diff --git a/arch/powerpc/Kconfig b/arch/powerpc/Kconfig index fae42da7468d..a433b7126d33 100644 --- a/arch/powerpc/Kconfig +++ b/arch/powerpc/Kconfig | |||
@@ -37,6 +37,10 @@ config RWSEM_XCHGADD_ALGORITHM | |||
37 | bool | 37 | bool |
38 | default y | 38 | default y |
39 | 39 | ||
40 | config GENERIC_HWEIGHT | ||
41 | bool | ||
42 | default y | ||
43 | |||
40 | config GENERIC_CALIBRATE_DELAY | 44 | config GENERIC_CALIBRATE_DELAY |
41 | bool | 45 | bool |
42 | default y | 46 | default y |
diff --git a/arch/powerpc/kernel/kprobes.c b/arch/powerpc/kernel/kprobes.c index cb1fe5878e8b..ad7a90212204 100644 --- a/arch/powerpc/kernel/kprobes.c +++ b/arch/powerpc/kernel/kprobes.c | |||
@@ -30,9 +30,11 @@ | |||
30 | #include <linux/kprobes.h> | 30 | #include <linux/kprobes.h> |
31 | #include <linux/ptrace.h> | 31 | #include <linux/ptrace.h> |
32 | #include <linux/preempt.h> | 32 | #include <linux/preempt.h> |
33 | #include <linux/module.h> | ||
33 | #include <asm/cacheflush.h> | 34 | #include <asm/cacheflush.h> |
34 | #include <asm/kdebug.h> | 35 | #include <asm/kdebug.h> |
35 | #include <asm/sstep.h> | 36 | #include <asm/sstep.h> |
37 | #include <asm/uaccess.h> | ||
36 | 38 | ||
37 | DEFINE_PER_CPU(struct kprobe *, current_kprobe) = NULL; | 39 | DEFINE_PER_CPU(struct kprobe *, current_kprobe) = NULL; |
38 | DEFINE_PER_CPU(struct kprobe_ctlblk, kprobe_ctlblk); | 40 | DEFINE_PER_CPU(struct kprobe_ctlblk, kprobe_ctlblk); |
@@ -372,17 +374,62 @@ static inline int kprobe_fault_handler(struct pt_regs *regs, int trapnr) | |||
372 | { | 374 | { |
373 | struct kprobe *cur = kprobe_running(); | 375 | struct kprobe *cur = kprobe_running(); |
374 | struct kprobe_ctlblk *kcb = get_kprobe_ctlblk(); | 376 | struct kprobe_ctlblk *kcb = get_kprobe_ctlblk(); |
375 | 377 | const struct exception_table_entry *entry; | |
376 | if (cur->fault_handler && cur->fault_handler(cur, regs, trapnr)) | 378 | |
377 | return 1; | 379 | switch(kcb->kprobe_status) { |
378 | 380 | case KPROBE_HIT_SS: | |
379 | if (kcb->kprobe_status & KPROBE_HIT_SS) { | 381 | case KPROBE_REENTER: |
380 | resume_execution(cur, regs); | 382 | /* |
383 | * We are here because the instruction being single | ||
384 | * stepped caused a page fault. We reset the current | ||
385 | * kprobe and the nip points back to the probe address | ||
386 | * and allow the page fault handler to continue as a | ||
387 | * normal page fault. | ||
388 | */ | ||
389 | regs->nip = (unsigned long)cur->addr; | ||
381 | regs->msr &= ~MSR_SE; | 390 | regs->msr &= ~MSR_SE; |
382 | regs->msr |= kcb->kprobe_saved_msr; | 391 | regs->msr |= kcb->kprobe_saved_msr; |
383 | 392 | if (kcb->kprobe_status == KPROBE_REENTER) | |
384 | reset_current_kprobe(); | 393 | restore_previous_kprobe(kcb); |
394 | else | ||
395 | reset_current_kprobe(); | ||
385 | preempt_enable_no_resched(); | 396 | preempt_enable_no_resched(); |
397 | break; | ||
398 | case KPROBE_HIT_ACTIVE: | ||
399 | case KPROBE_HIT_SSDONE: | ||
400 | /* | ||
401 | * We increment the nmissed count for accounting, | ||
402 | * we can also use npre/npostfault count for accouting | ||
403 | * these specific fault cases. | ||
404 | */ | ||
405 | kprobes_inc_nmissed_count(cur); | ||
406 | |||
407 | /* | ||
408 | * We come here because instructions in the pre/post | ||
409 | * handler caused the page_fault, this could happen | ||
410 | * if handler tries to access user space by | ||
411 | * copy_from_user(), get_user() etc. Let the | ||
412 | * user-specified handler try to fix it first. | ||
413 | */ | ||
414 | if (cur->fault_handler && cur->fault_handler(cur, regs, trapnr)) | ||
415 | return 1; | ||
416 | |||
417 | /* | ||
418 | * In case the user-specified fault handler returned | ||
419 | * zero, try to fix up. | ||
420 | */ | ||
421 | if ((entry = search_exception_tables(regs->nip)) != NULL) { | ||
422 | regs->nip = entry->fixup; | ||
423 | return 1; | ||
424 | } | ||
425 | |||
426 | /* | ||
427 | * fixup_exception() could not handle it, | ||
428 | * Let do_page_fault() fix it. | ||
429 | */ | ||
430 | break; | ||
431 | default: | ||
432 | break; | ||
386 | } | 433 | } |
387 | return 0; | 434 | return 0; |
388 | } | 435 | } |
@@ -396,6 +443,9 @@ int __kprobes kprobe_exceptions_notify(struct notifier_block *self, | |||
396 | struct die_args *args = (struct die_args *)data; | 443 | struct die_args *args = (struct die_args *)data; |
397 | int ret = NOTIFY_DONE; | 444 | int ret = NOTIFY_DONE; |
398 | 445 | ||
446 | if (args->regs && user_mode(args->regs)) | ||
447 | return ret; | ||
448 | |||
399 | switch (val) { | 449 | switch (val) { |
400 | case DIE_BPT: | 450 | case DIE_BPT: |
401 | if (kprobe_handler(args->regs)) | 451 | if (kprobe_handler(args->regs)) |
diff --git a/arch/powerpc/kernel/process.c b/arch/powerpc/kernel/process.c index 1770a066c217..f698aa77127e 100644 --- a/arch/powerpc/kernel/process.c +++ b/arch/powerpc/kernel/process.c | |||
@@ -35,7 +35,6 @@ | |||
35 | #include <linux/mqueue.h> | 35 | #include <linux/mqueue.h> |
36 | #include <linux/hardirq.h> | 36 | #include <linux/hardirq.h> |
37 | #include <linux/utsname.h> | 37 | #include <linux/utsname.h> |
38 | #include <linux/kprobes.h> | ||
39 | 38 | ||
40 | #include <asm/pgtable.h> | 39 | #include <asm/pgtable.h> |
41 | #include <asm/uaccess.h> | 40 | #include <asm/uaccess.h> |
@@ -460,7 +459,6 @@ void show_regs(struct pt_regs * regs) | |||
460 | 459 | ||
461 | void exit_thread(void) | 460 | void exit_thread(void) |
462 | { | 461 | { |
463 | kprobe_flush_task(current); | ||
464 | discard_lazy_cpu_state(); | 462 | discard_lazy_cpu_state(); |
465 | } | 463 | } |
466 | 464 | ||
diff --git a/arch/powerpc/kernel/sys_ppc32.c b/arch/powerpc/kernel/sys_ppc32.c index cd75ab2908fa..ec274e688816 100644 --- a/arch/powerpc/kernel/sys_ppc32.c +++ b/arch/powerpc/kernel/sys_ppc32.c | |||
@@ -24,7 +24,6 @@ | |||
24 | #include <linux/resource.h> | 24 | #include <linux/resource.h> |
25 | #include <linux/times.h> | 25 | #include <linux/times.h> |
26 | #include <linux/utsname.h> | 26 | #include <linux/utsname.h> |
27 | #include <linux/timex.h> | ||
28 | #include <linux/smp.h> | 27 | #include <linux/smp.h> |
29 | #include <linux/smp_lock.h> | 28 | #include <linux/smp_lock.h> |
30 | #include <linux/sem.h> | 29 | #include <linux/sem.h> |
@@ -161,78 +160,6 @@ asmlinkage long compat_sys_sysfs(u32 option, u32 arg1, u32 arg2) | |||
161 | return sys_sysfs((int)option, arg1, arg2); | 160 | return sys_sysfs((int)option, arg1, arg2); |
162 | } | 161 | } |
163 | 162 | ||
164 | /* Handle adjtimex compatibility. */ | ||
165 | struct timex32 { | ||
166 | u32 modes; | ||
167 | s32 offset, freq, maxerror, esterror; | ||
168 | s32 status, constant, precision, tolerance; | ||
169 | struct compat_timeval time; | ||
170 | s32 tick; | ||
171 | s32 ppsfreq, jitter, shift, stabil; | ||
172 | s32 jitcnt, calcnt, errcnt, stbcnt; | ||
173 | s32 :32; s32 :32; s32 :32; s32 :32; | ||
174 | s32 :32; s32 :32; s32 :32; s32 :32; | ||
175 | s32 :32; s32 :32; s32 :32; s32 :32; | ||
176 | }; | ||
177 | |||
178 | extern int do_adjtimex(struct timex *); | ||
179 | |||
180 | asmlinkage long compat_sys_adjtimex(struct timex32 __user *utp) | ||
181 | { | ||
182 | struct timex txc; | ||
183 | int ret; | ||
184 | |||
185 | memset(&txc, 0, sizeof(struct timex)); | ||
186 | |||
187 | if(get_user(txc.modes, &utp->modes) || | ||
188 | __get_user(txc.offset, &utp->offset) || | ||
189 | __get_user(txc.freq, &utp->freq) || | ||
190 | __get_user(txc.maxerror, &utp->maxerror) || | ||
191 | __get_user(txc.esterror, &utp->esterror) || | ||
192 | __get_user(txc.status, &utp->status) || | ||
193 | __get_user(txc.constant, &utp->constant) || | ||
194 | __get_user(txc.precision, &utp->precision) || | ||
195 | __get_user(txc.tolerance, &utp->tolerance) || | ||
196 | __get_user(txc.time.tv_sec, &utp->time.tv_sec) || | ||
197 | __get_user(txc.time.tv_usec, &utp->time.tv_usec) || | ||
198 | __get_user(txc.tick, &utp->tick) || | ||
199 | __get_user(txc.ppsfreq, &utp->ppsfreq) || | ||
200 | __get_user(txc.jitter, &utp->jitter) || | ||
201 | __get_user(txc.shift, &utp->shift) || | ||
202 | __get_user(txc.stabil, &utp->stabil) || | ||
203 | __get_user(txc.jitcnt, &utp->jitcnt) || | ||
204 | __get_user(txc.calcnt, &utp->calcnt) || | ||
205 | __get_user(txc.errcnt, &utp->errcnt) || | ||
206 | __get_user(txc.stbcnt, &utp->stbcnt)) | ||
207 | return -EFAULT; | ||
208 | |||
209 | ret = do_adjtimex(&txc); | ||
210 | |||
211 | if(put_user(txc.modes, &utp->modes) || | ||
212 | __put_user(txc.offset, &utp->offset) || | ||
213 | __put_user(txc.freq, &utp->freq) || | ||
214 | __put_user(txc.maxerror, &utp->maxerror) || | ||
215 | __put_user(txc.esterror, &utp->esterror) || | ||
216 | __put_user(txc.status, &utp->status) || | ||
217 | __put_user(txc.constant, &utp->constant) || | ||
218 | __put_user(txc.precision, &utp->precision) || | ||
219 | __put_user(txc.tolerance, &utp->tolerance) || | ||
220 | __put_user(txc.time.tv_sec, &utp->time.tv_sec) || | ||
221 | __put_user(txc.time.tv_usec, &utp->time.tv_usec) || | ||
222 | __put_user(txc.tick, &utp->tick) || | ||
223 | __put_user(txc.ppsfreq, &utp->ppsfreq) || | ||
224 | __put_user(txc.jitter, &utp->jitter) || | ||
225 | __put_user(txc.shift, &utp->shift) || | ||
226 | __put_user(txc.stabil, &utp->stabil) || | ||
227 | __put_user(txc.jitcnt, &utp->jitcnt) || | ||
228 | __put_user(txc.calcnt, &utp->calcnt) || | ||
229 | __put_user(txc.errcnt, &utp->errcnt) || | ||
230 | __put_user(txc.stbcnt, &utp->stbcnt)) | ||
231 | ret = -EFAULT; | ||
232 | |||
233 | return ret; | ||
234 | } | ||
235 | |||
236 | asmlinkage long compat_sys_pause(void) | 163 | asmlinkage long compat_sys_pause(void) |
237 | { | 164 | { |
238 | current->state = TASK_INTERRUPTIBLE; | 165 | current->state = TASK_INTERRUPTIBLE; |
diff --git a/arch/powerpc/mm/imalloc.c b/arch/powerpc/mm/imalloc.c index 8b0c132bc163..add8c1a9af68 100644 --- a/arch/powerpc/mm/imalloc.c +++ b/arch/powerpc/mm/imalloc.c | |||
@@ -13,12 +13,12 @@ | |||
13 | #include <asm/uaccess.h> | 13 | #include <asm/uaccess.h> |
14 | #include <asm/pgalloc.h> | 14 | #include <asm/pgalloc.h> |
15 | #include <asm/pgtable.h> | 15 | #include <asm/pgtable.h> |
16 | #include <asm/semaphore.h> | 16 | #include <linux/mutex.h> |
17 | #include <asm/cacheflush.h> | 17 | #include <asm/cacheflush.h> |
18 | 18 | ||
19 | #include "mmu_decl.h" | 19 | #include "mmu_decl.h" |
20 | 20 | ||
21 | static DECLARE_MUTEX(imlist_sem); | 21 | static DEFINE_MUTEX(imlist_mutex); |
22 | struct vm_struct * imlist = NULL; | 22 | struct vm_struct * imlist = NULL; |
23 | 23 | ||
24 | static int get_free_im_addr(unsigned long size, unsigned long *im_addr) | 24 | static int get_free_im_addr(unsigned long size, unsigned long *im_addr) |
@@ -257,7 +257,7 @@ struct vm_struct * im_get_free_area(unsigned long size) | |||
257 | struct vm_struct *area; | 257 | struct vm_struct *area; |
258 | unsigned long addr; | 258 | unsigned long addr; |
259 | 259 | ||
260 | down(&imlist_sem); | 260 | mutex_lock(&imlist_mutex); |
261 | if (get_free_im_addr(size, &addr)) { | 261 | if (get_free_im_addr(size, &addr)) { |
262 | printk(KERN_ERR "%s() cannot obtain addr for size 0x%lx\n", | 262 | printk(KERN_ERR "%s() cannot obtain addr for size 0x%lx\n", |
263 | __FUNCTION__, size); | 263 | __FUNCTION__, size); |
@@ -272,7 +272,7 @@ struct vm_struct * im_get_free_area(unsigned long size) | |||
272 | __FUNCTION__, addr, size); | 272 | __FUNCTION__, addr, size); |
273 | } | 273 | } |
274 | next_im_done: | 274 | next_im_done: |
275 | up(&imlist_sem); | 275 | mutex_unlock(&imlist_mutex); |
276 | return area; | 276 | return area; |
277 | } | 277 | } |
278 | 278 | ||
@@ -281,9 +281,9 @@ struct vm_struct * im_get_area(unsigned long v_addr, unsigned long size, | |||
281 | { | 281 | { |
282 | struct vm_struct *area; | 282 | struct vm_struct *area; |
283 | 283 | ||
284 | down(&imlist_sem); | 284 | mutex_lock(&imlist_mutex); |
285 | area = __im_get_area(v_addr, size, criteria); | 285 | area = __im_get_area(v_addr, size, criteria); |
286 | up(&imlist_sem); | 286 | mutex_unlock(&imlist_mutex); |
287 | return area; | 287 | return area; |
288 | } | 288 | } |
289 | 289 | ||
@@ -297,17 +297,17 @@ void im_free(void * addr) | |||
297 | printk(KERN_ERR "Trying to %s bad address (%p)\n", __FUNCTION__, addr); | 297 | printk(KERN_ERR "Trying to %s bad address (%p)\n", __FUNCTION__, addr); |
298 | return; | 298 | return; |
299 | } | 299 | } |
300 | down(&imlist_sem); | 300 | mutex_lock(&imlist_mutex); |
301 | for (p = &imlist ; (tmp = *p) ; p = &tmp->next) { | 301 | for (p = &imlist ; (tmp = *p) ; p = &tmp->next) { |
302 | if (tmp->addr == addr) { | 302 | if (tmp->addr == addr) { |
303 | *p = tmp->next; | 303 | *p = tmp->next; |
304 | unmap_vm_area(tmp); | 304 | unmap_vm_area(tmp); |
305 | kfree(tmp); | 305 | kfree(tmp); |
306 | up(&imlist_sem); | 306 | mutex_unlock(&imlist_mutex); |
307 | return; | 307 | return; |
308 | } | 308 | } |
309 | } | 309 | } |
310 | up(&imlist_sem); | 310 | mutex_unlock(&imlist_mutex); |
311 | printk(KERN_ERR "Trying to %s nonexistent area (%p)\n", __FUNCTION__, | 311 | printk(KERN_ERR "Trying to %s nonexistent area (%p)\n", __FUNCTION__, |
312 | addr); | 312 | addr); |
313 | } | 313 | } |
diff --git a/arch/powerpc/platforms/cell/spu_base.c b/arch/powerpc/platforms/cell/spu_base.c index d75ae03df686..a8fa1eeeb174 100644 --- a/arch/powerpc/platforms/cell/spu_base.c +++ b/arch/powerpc/platforms/cell/spu_base.c | |||
@@ -32,7 +32,7 @@ | |||
32 | 32 | ||
33 | #include <asm/io.h> | 33 | #include <asm/io.h> |
34 | #include <asm/prom.h> | 34 | #include <asm/prom.h> |
35 | #include <asm/semaphore.h> | 35 | #include <linux/mutex.h> |
36 | #include <asm/spu.h> | 36 | #include <asm/spu.h> |
37 | #include <asm/mmu_context.h> | 37 | #include <asm/mmu_context.h> |
38 | 38 | ||
@@ -342,7 +342,7 @@ spu_free_irqs(struct spu *spu) | |||
342 | } | 342 | } |
343 | 343 | ||
344 | static LIST_HEAD(spu_list); | 344 | static LIST_HEAD(spu_list); |
345 | static DECLARE_MUTEX(spu_mutex); | 345 | static DEFINE_MUTEX(spu_mutex); |
346 | 346 | ||
347 | static void spu_init_channels(struct spu *spu) | 347 | static void spu_init_channels(struct spu *spu) |
348 | { | 348 | { |
@@ -382,7 +382,7 @@ struct spu *spu_alloc(void) | |||
382 | { | 382 | { |
383 | struct spu *spu; | 383 | struct spu *spu; |
384 | 384 | ||
385 | down(&spu_mutex); | 385 | mutex_lock(&spu_mutex); |
386 | if (!list_empty(&spu_list)) { | 386 | if (!list_empty(&spu_list)) { |
387 | spu = list_entry(spu_list.next, struct spu, list); | 387 | spu = list_entry(spu_list.next, struct spu, list); |
388 | list_del_init(&spu->list); | 388 | list_del_init(&spu->list); |
@@ -391,7 +391,7 @@ struct spu *spu_alloc(void) | |||
391 | pr_debug("No SPU left\n"); | 391 | pr_debug("No SPU left\n"); |
392 | spu = NULL; | 392 | spu = NULL; |
393 | } | 393 | } |
394 | up(&spu_mutex); | 394 | mutex_unlock(&spu_mutex); |
395 | 395 | ||
396 | if (spu) | 396 | if (spu) |
397 | spu_init_channels(spu); | 397 | spu_init_channels(spu); |
@@ -402,9 +402,9 @@ EXPORT_SYMBOL_GPL(spu_alloc); | |||
402 | 402 | ||
403 | void spu_free(struct spu *spu) | 403 | void spu_free(struct spu *spu) |
404 | { | 404 | { |
405 | down(&spu_mutex); | 405 | mutex_lock(&spu_mutex); |
406 | list_add_tail(&spu->list, &spu_list); | 406 | list_add_tail(&spu->list, &spu_list); |
407 | up(&spu_mutex); | 407 | mutex_unlock(&spu_mutex); |
408 | } | 408 | } |
409 | EXPORT_SYMBOL_GPL(spu_free); | 409 | EXPORT_SYMBOL_GPL(spu_free); |
410 | 410 | ||
@@ -633,14 +633,14 @@ static int __init create_spu(struct device_node *spe) | |||
633 | spu->wbox_callback = NULL; | 633 | spu->wbox_callback = NULL; |
634 | spu->stop_callback = NULL; | 634 | spu->stop_callback = NULL; |
635 | 635 | ||
636 | down(&spu_mutex); | 636 | mutex_lock(&spu_mutex); |
637 | spu->number = number++; | 637 | spu->number = number++; |
638 | ret = spu_request_irqs(spu); | 638 | ret = spu_request_irqs(spu); |
639 | if (ret) | 639 | if (ret) |
640 | goto out_unmap; | 640 | goto out_unmap; |
641 | 641 | ||
642 | list_add(&spu->list, &spu_list); | 642 | list_add(&spu->list, &spu_list); |
643 | up(&spu_mutex); | 643 | mutex_unlock(&spu_mutex); |
644 | 644 | ||
645 | pr_debug(KERN_DEBUG "Using SPE %s %02x %p %p %p %p %d\n", | 645 | pr_debug(KERN_DEBUG "Using SPE %s %02x %p %p %p %p %d\n", |
646 | spu->name, spu->isrc, spu->local_store, | 646 | spu->name, spu->isrc, spu->local_store, |
@@ -648,7 +648,7 @@ static int __init create_spu(struct device_node *spe) | |||
648 | goto out; | 648 | goto out; |
649 | 649 | ||
650 | out_unmap: | 650 | out_unmap: |
651 | up(&spu_mutex); | 651 | mutex_unlock(&spu_mutex); |
652 | spu_unmap(spu); | 652 | spu_unmap(spu); |
653 | out_free: | 653 | out_free: |
654 | kfree(spu); | 654 | kfree(spu); |
@@ -668,10 +668,10 @@ static void destroy_spu(struct spu *spu) | |||
668 | static void cleanup_spu_base(void) | 668 | static void cleanup_spu_base(void) |
669 | { | 669 | { |
670 | struct spu *spu, *tmp; | 670 | struct spu *spu, *tmp; |
671 | down(&spu_mutex); | 671 | mutex_lock(&spu_mutex); |
672 | list_for_each_entry_safe(spu, tmp, &spu_list, list) | 672 | list_for_each_entry_safe(spu, tmp, &spu_list, list) |
673 | destroy_spu(spu); | 673 | destroy_spu(spu); |
674 | up(&spu_mutex); | 674 | mutex_unlock(&spu_mutex); |
675 | } | 675 | } |
676 | module_exit(cleanup_spu_base); | 676 | module_exit(cleanup_spu_base); |
677 | 677 | ||
diff --git a/arch/powerpc/platforms/powermac/cpufreq_64.c b/arch/powerpc/platforms/powermac/cpufreq_64.c index a415e8d2f7af..b57e465a1b71 100644 --- a/arch/powerpc/platforms/powermac/cpufreq_64.c +++ b/arch/powerpc/platforms/powermac/cpufreq_64.c | |||
@@ -21,6 +21,7 @@ | |||
21 | #include <linux/cpufreq.h> | 21 | #include <linux/cpufreq.h> |
22 | #include <linux/init.h> | 22 | #include <linux/init.h> |
23 | #include <linux/completion.h> | 23 | #include <linux/completion.h> |
24 | #include <linux/mutex.h> | ||
24 | #include <asm/prom.h> | 25 | #include <asm/prom.h> |
25 | #include <asm/machdep.h> | 26 | #include <asm/machdep.h> |
26 | #include <asm/irq.h> | 27 | #include <asm/irq.h> |
@@ -90,7 +91,7 @@ static void (*g5_switch_volt)(int speed_mode); | |||
90 | static int (*g5_switch_freq)(int speed_mode); | 91 | static int (*g5_switch_freq)(int speed_mode); |
91 | static int (*g5_query_freq)(void); | 92 | static int (*g5_query_freq)(void); |
92 | 93 | ||
93 | static DECLARE_MUTEX(g5_switch_mutex); | 94 | static DEFINE_MUTEX(g5_switch_mutex); |
94 | 95 | ||
95 | 96 | ||
96 | static struct smu_sdbp_fvt *g5_fvt_table; /* table of op. points */ | 97 | static struct smu_sdbp_fvt *g5_fvt_table; /* table of op. points */ |
@@ -327,7 +328,7 @@ static int g5_cpufreq_target(struct cpufreq_policy *policy, | |||
327 | if (g5_pmode_cur == newstate) | 328 | if (g5_pmode_cur == newstate) |
328 | return 0; | 329 | return 0; |
329 | 330 | ||
330 | down(&g5_switch_mutex); | 331 | mutex_lock(&g5_switch_mutex); |
331 | 332 | ||
332 | freqs.old = g5_cpu_freqs[g5_pmode_cur].frequency; | 333 | freqs.old = g5_cpu_freqs[g5_pmode_cur].frequency; |
333 | freqs.new = g5_cpu_freqs[newstate].frequency; | 334 | freqs.new = g5_cpu_freqs[newstate].frequency; |
@@ -337,7 +338,7 @@ static int g5_cpufreq_target(struct cpufreq_policy *policy, | |||
337 | rc = g5_switch_freq(newstate); | 338 | rc = g5_switch_freq(newstate); |
338 | cpufreq_notify_transition(&freqs, CPUFREQ_POSTCHANGE); | 339 | cpufreq_notify_transition(&freqs, CPUFREQ_POSTCHANGE); |
339 | 340 | ||
340 | up(&g5_switch_mutex); | 341 | mutex_unlock(&g5_switch_mutex); |
341 | 342 | ||
342 | return rc; | 343 | return rc; |
343 | } | 344 | } |
diff --git a/arch/ppc/Kconfig b/arch/ppc/Kconfig index 54a0a9bb12dd..3a3e302b4ea2 100644 --- a/arch/ppc/Kconfig +++ b/arch/ppc/Kconfig | |||
@@ -19,6 +19,10 @@ config RWSEM_XCHGADD_ALGORITHM | |||
19 | bool | 19 | bool |
20 | default y | 20 | default y |
21 | 21 | ||
22 | config GENERIC_HWEIGHT | ||
23 | bool | ||
24 | default y | ||
25 | |||
22 | config GENERIC_CALIBRATE_DELAY | 26 | config GENERIC_CALIBRATE_DELAY |
23 | bool | 27 | bool |
24 | default y | 28 | default y |
diff --git a/arch/s390/Kconfig b/arch/s390/Kconfig index 2b7364ed23bc..01c5c082f970 100644 --- a/arch/s390/Kconfig +++ b/arch/s390/Kconfig | |||
@@ -14,6 +14,10 @@ config RWSEM_XCHGADD_ALGORITHM | |||
14 | bool | 14 | bool |
15 | default y | 15 | default y |
16 | 16 | ||
17 | config GENERIC_HWEIGHT | ||
18 | bool | ||
19 | default y | ||
20 | |||
17 | config GENERIC_CALIBRATE_DELAY | 21 | config GENERIC_CALIBRATE_DELAY |
18 | bool | 22 | bool |
19 | default y | 23 | default y |
diff --git a/arch/s390/crypto/crypt_s390_query.c b/arch/s390/crypto/crypt_s390_query.c index def02bdc44a4..54fb11d7fadd 100644 --- a/arch/s390/crypto/crypt_s390_query.c +++ b/arch/s390/crypto/crypt_s390_query.c | |||
@@ -55,7 +55,7 @@ static void query_available_functions(void) | |||
55 | printk(KERN_INFO "KMC_AES_256: %d\n", | 55 | printk(KERN_INFO "KMC_AES_256: %d\n", |
56 | crypt_s390_func_available(KMC_AES_256_ENCRYPT)); | 56 | crypt_s390_func_available(KMC_AES_256_ENCRYPT)); |
57 | 57 | ||
58 | /* query available KIMD fucntions */ | 58 | /* query available KIMD functions */ |
59 | printk(KERN_INFO "KIMD_QUERY: %d\n", | 59 | printk(KERN_INFO "KIMD_QUERY: %d\n", |
60 | crypt_s390_func_available(KIMD_QUERY)); | 60 | crypt_s390_func_available(KIMD_QUERY)); |
61 | printk(KERN_INFO "KIMD_SHA_1: %d\n", | 61 | printk(KERN_INFO "KIMD_SHA_1: %d\n", |
diff --git a/arch/s390/kernel/compat_linux.c b/arch/s390/kernel/compat_linux.c index cc058dc3bc8b..5e14de37c17b 100644 --- a/arch/s390/kernel/compat_linux.c +++ b/arch/s390/kernel/compat_linux.c | |||
@@ -26,7 +26,6 @@ | |||
26 | #include <linux/resource.h> | 26 | #include <linux/resource.h> |
27 | #include <linux/times.h> | 27 | #include <linux/times.h> |
28 | #include <linux/utsname.h> | 28 | #include <linux/utsname.h> |
29 | #include <linux/timex.h> | ||
30 | #include <linux/smp.h> | 29 | #include <linux/smp.h> |
31 | #include <linux/smp_lock.h> | 30 | #include <linux/smp_lock.h> |
32 | #include <linux/sem.h> | 31 | #include <linux/sem.h> |
@@ -705,79 +704,6 @@ asmlinkage long sys32_sendfile64(int out_fd, int in_fd, | |||
705 | return ret; | 704 | return ret; |
706 | } | 705 | } |
707 | 706 | ||
708 | /* Handle adjtimex compatibility. */ | ||
709 | |||
710 | struct timex32 { | ||
711 | u32 modes; | ||
712 | s32 offset, freq, maxerror, esterror; | ||
713 | s32 status, constant, precision, tolerance; | ||
714 | struct compat_timeval time; | ||
715 | s32 tick; | ||
716 | s32 ppsfreq, jitter, shift, stabil; | ||
717 | s32 jitcnt, calcnt, errcnt, stbcnt; | ||
718 | s32 :32; s32 :32; s32 :32; s32 :32; | ||
719 | s32 :32; s32 :32; s32 :32; s32 :32; | ||
720 | s32 :32; s32 :32; s32 :32; s32 :32; | ||
721 | }; | ||
722 | |||
723 | extern int do_adjtimex(struct timex *); | ||
724 | |||
725 | asmlinkage long sys32_adjtimex(struct timex32 __user *utp) | ||
726 | { | ||
727 | struct timex txc; | ||
728 | int ret; | ||
729 | |||
730 | memset(&txc, 0, sizeof(struct timex)); | ||
731 | |||
732 | if(get_user(txc.modes, &utp->modes) || | ||
733 | __get_user(txc.offset, &utp->offset) || | ||
734 | __get_user(txc.freq, &utp->freq) || | ||
735 | __get_user(txc.maxerror, &utp->maxerror) || | ||
736 | __get_user(txc.esterror, &utp->esterror) || | ||
737 | __get_user(txc.status, &utp->status) || | ||
738 | __get_user(txc.constant, &utp->constant) || | ||
739 | __get_user(txc.precision, &utp->precision) || | ||
740 | __get_user(txc.tolerance, &utp->tolerance) || | ||
741 | __get_user(txc.time.tv_sec, &utp->time.tv_sec) || | ||
742 | __get_user(txc.time.tv_usec, &utp->time.tv_usec) || | ||
743 | __get_user(txc.tick, &utp->tick) || | ||
744 | __get_user(txc.ppsfreq, &utp->ppsfreq) || | ||
745 | __get_user(txc.jitter, &utp->jitter) || | ||
746 | __get_user(txc.shift, &utp->shift) || | ||
747 | __get_user(txc.stabil, &utp->stabil) || | ||
748 | __get_user(txc.jitcnt, &utp->jitcnt) || | ||
749 | __get_user(txc.calcnt, &utp->calcnt) || | ||
750 | __get_user(txc.errcnt, &utp->errcnt) || | ||
751 | __get_user(txc.stbcnt, &utp->stbcnt)) | ||
752 | return -EFAULT; | ||
753 | |||
754 | ret = do_adjtimex(&txc); | ||
755 | |||
756 | if(put_user(txc.modes, &utp->modes) || | ||
757 | __put_user(txc.offset, &utp->offset) || | ||
758 | __put_user(txc.freq, &utp->freq) || | ||
759 | __put_user(txc.maxerror, &utp->maxerror) || | ||
760 | __put_user(txc.esterror, &utp->esterror) || | ||
761 | __put_user(txc.status, &utp->status) || | ||
762 | __put_user(txc.constant, &utp->constant) || | ||
763 | __put_user(txc.precision, &utp->precision) || | ||
764 | __put_user(txc.tolerance, &utp->tolerance) || | ||
765 | __put_user(txc.time.tv_sec, &utp->time.tv_sec) || | ||
766 | __put_user(txc.time.tv_usec, &utp->time.tv_usec) || | ||
767 | __put_user(txc.tick, &utp->tick) || | ||
768 | __put_user(txc.ppsfreq, &utp->ppsfreq) || | ||
769 | __put_user(txc.jitter, &utp->jitter) || | ||
770 | __put_user(txc.shift, &utp->shift) || | ||
771 | __put_user(txc.stabil, &utp->stabil) || | ||
772 | __put_user(txc.jitcnt, &utp->jitcnt) || | ||
773 | __put_user(txc.calcnt, &utp->calcnt) || | ||
774 | __put_user(txc.errcnt, &utp->errcnt) || | ||
775 | __put_user(txc.stbcnt, &utp->stbcnt)) | ||
776 | ret = -EFAULT; | ||
777 | |||
778 | return ret; | ||
779 | } | ||
780 | |||
781 | #ifdef CONFIG_SYSCTL | 707 | #ifdef CONFIG_SYSCTL |
782 | struct __sysctl_args32 { | 708 | struct __sysctl_args32 { |
783 | u32 name; | 709 | u32 name; |
diff --git a/arch/s390/kernel/compat_wrapper.S b/arch/s390/kernel/compat_wrapper.S index 50e80138e7ad..199da68bd7be 100644 --- a/arch/s390/kernel/compat_wrapper.S +++ b/arch/s390/kernel/compat_wrapper.S | |||
@@ -551,10 +551,10 @@ sys32_newuname_wrapper: | |||
551 | llgtr %r2,%r2 # struct new_utsname * | 551 | llgtr %r2,%r2 # struct new_utsname * |
552 | jg s390x_newuname # branch to system call | 552 | jg s390x_newuname # branch to system call |
553 | 553 | ||
554 | .globl sys32_adjtimex_wrapper | 554 | .globl compat_sys_adjtimex_wrapper |
555 | sys32_adjtimex_wrapper: | 555 | compat_sys_adjtimex_wrapper: |
556 | llgtr %r2,%r2 # struct timex_emu31 * | 556 | llgtr %r2,%r2 # struct compat_timex * |
557 | jg sys32_adjtimex # branch to system call | 557 | jg compat_sys_adjtimex # branch to system call |
558 | 558 | ||
559 | .globl sys32_mprotect_wrapper | 559 | .globl sys32_mprotect_wrapper |
560 | sys32_mprotect_wrapper: | 560 | sys32_mprotect_wrapper: |
diff --git a/arch/s390/kernel/syscalls.S b/arch/s390/kernel/syscalls.S index 7c88d85c3597..2f56654da821 100644 --- a/arch/s390/kernel/syscalls.S +++ b/arch/s390/kernel/syscalls.S | |||
@@ -132,7 +132,7 @@ SYSCALL(sys_clone_glue,sys_clone_glue,sys32_clone_glue) /* 120 */ | |||
132 | SYSCALL(sys_setdomainname,sys_setdomainname,sys32_setdomainname_wrapper) | 132 | SYSCALL(sys_setdomainname,sys_setdomainname,sys32_setdomainname_wrapper) |
133 | SYSCALL(sys_newuname,s390x_newuname,sys32_newuname_wrapper) | 133 | SYSCALL(sys_newuname,s390x_newuname,sys32_newuname_wrapper) |
134 | NI_SYSCALL /* modify_ldt for i386 */ | 134 | NI_SYSCALL /* modify_ldt for i386 */ |
135 | SYSCALL(sys_adjtimex,sys_adjtimex,sys32_adjtimex_wrapper) | 135 | SYSCALL(sys_adjtimex,sys_adjtimex,compat_sys_adjtimex_wrapper) |
136 | SYSCALL(sys_mprotect,sys_mprotect,sys32_mprotect_wrapper) /* 125 */ | 136 | SYSCALL(sys_mprotect,sys_mprotect,sys32_mprotect_wrapper) /* 125 */ |
137 | SYSCALL(sys_sigprocmask,sys_sigprocmask,compat_sys_sigprocmask_wrapper) | 137 | SYSCALL(sys_sigprocmask,sys_sigprocmask,compat_sys_sigprocmask_wrapper) |
138 | NI_SYSCALL /* old "create module" */ | 138 | NI_SYSCALL /* old "create module" */ |
diff --git a/arch/sh/Kconfig b/arch/sh/Kconfig index e9b275d90737..58583f459471 100644 --- a/arch/sh/Kconfig +++ b/arch/sh/Kconfig | |||
@@ -21,6 +21,14 @@ config RWSEM_GENERIC_SPINLOCK | |||
21 | config RWSEM_XCHGADD_ALGORITHM | 21 | config RWSEM_XCHGADD_ALGORITHM |
22 | bool | 22 | bool |
23 | 23 | ||
24 | config GENERIC_FIND_NEXT_BIT | ||
25 | bool | ||
26 | default y | ||
27 | |||
28 | config GENERIC_HWEIGHT | ||
29 | bool | ||
30 | default y | ||
31 | |||
24 | config GENERIC_HARDIRQS | 32 | config GENERIC_HARDIRQS |
25 | bool | 33 | bool |
26 | default y | 34 | default y |
diff --git a/arch/sh64/Kconfig b/arch/sh64/Kconfig index 07b172deb872..58c678e06667 100644 --- a/arch/sh64/Kconfig +++ b/arch/sh64/Kconfig | |||
@@ -21,6 +21,14 @@ config RWSEM_GENERIC_SPINLOCK | |||
21 | bool | 21 | bool |
22 | default y | 22 | default y |
23 | 23 | ||
24 | config GENERIC_FIND_NEXT_BIT | ||
25 | bool | ||
26 | default y | ||
27 | |||
28 | config GENERIC_HWEIGHT | ||
29 | bool | ||
30 | default y | ||
31 | |||
24 | config GENERIC_CALIBRATE_DELAY | 32 | config GENERIC_CALIBRATE_DELAY |
25 | bool | 33 | bool |
26 | default y | 34 | default y |
diff --git a/arch/sparc/Kconfig b/arch/sparc/Kconfig index 7c58fc1a39c4..9431e967aa45 100644 --- a/arch/sparc/Kconfig +++ b/arch/sparc/Kconfig | |||
@@ -150,6 +150,14 @@ config RWSEM_GENERIC_SPINLOCK | |||
150 | config RWSEM_XCHGADD_ALGORITHM | 150 | config RWSEM_XCHGADD_ALGORITHM |
151 | bool | 151 | bool |
152 | 152 | ||
153 | config GENERIC_FIND_NEXT_BIT | ||
154 | bool | ||
155 | default y | ||
156 | |||
157 | config GENERIC_HWEIGHT | ||
158 | bool | ||
159 | default y | ||
160 | |||
153 | config GENERIC_CALIBRATE_DELAY | 161 | config GENERIC_CALIBRATE_DELAY |
154 | bool | 162 | bool |
155 | default y | 163 | default y |
diff --git a/arch/sparc64/Kconfig b/arch/sparc64/Kconfig index 267afddf63cf..d1e2fc566486 100644 --- a/arch/sparc64/Kconfig +++ b/arch/sparc64/Kconfig | |||
@@ -162,6 +162,14 @@ config RWSEM_XCHGADD_ALGORITHM | |||
162 | bool | 162 | bool |
163 | default y | 163 | default y |
164 | 164 | ||
165 | config GENERIC_FIND_NEXT_BIT | ||
166 | bool | ||
167 | default y | ||
168 | |||
169 | config GENERIC_HWEIGHT | ||
170 | bool | ||
171 | default y if !ULTRA_HAS_POPULATION_COUNT | ||
172 | |||
165 | config GENERIC_CALIBRATE_DELAY | 173 | config GENERIC_CALIBRATE_DELAY |
166 | bool | 174 | bool |
167 | default y | 175 | default y |
diff --git a/arch/sparc64/kernel/kprobes.c b/arch/sparc64/kernel/kprobes.c index b9a9ce70e55c..ffc7309e9f22 100644 --- a/arch/sparc64/kernel/kprobes.c +++ b/arch/sparc64/kernel/kprobes.c | |||
@@ -6,9 +6,11 @@ | |||
6 | #include <linux/config.h> | 6 | #include <linux/config.h> |
7 | #include <linux/kernel.h> | 7 | #include <linux/kernel.h> |
8 | #include <linux/kprobes.h> | 8 | #include <linux/kprobes.h> |
9 | #include <linux/module.h> | ||
9 | #include <asm/kdebug.h> | 10 | #include <asm/kdebug.h> |
10 | #include <asm/signal.h> | 11 | #include <asm/signal.h> |
11 | #include <asm/cacheflush.h> | 12 | #include <asm/cacheflush.h> |
13 | #include <asm/uaccess.h> | ||
12 | 14 | ||
13 | /* We do not have hardware single-stepping on sparc64. | 15 | /* We do not have hardware single-stepping on sparc64. |
14 | * So we implement software single-stepping with breakpoint | 16 | * So we implement software single-stepping with breakpoint |
@@ -302,16 +304,68 @@ static inline int kprobe_fault_handler(struct pt_regs *regs, int trapnr) | |||
302 | { | 304 | { |
303 | struct kprobe *cur = kprobe_running(); | 305 | struct kprobe *cur = kprobe_running(); |
304 | struct kprobe_ctlblk *kcb = get_kprobe_ctlblk(); | 306 | struct kprobe_ctlblk *kcb = get_kprobe_ctlblk(); |
307 | const struct exception_table_entry *entry; | ||
308 | |||
309 | switch(kcb->kprobe_status) { | ||
310 | case KPROBE_HIT_SS: | ||
311 | case KPROBE_REENTER: | ||
312 | /* | ||
313 | * We are here because the instruction being single | ||
314 | * stepped caused a page fault. We reset the current | ||
315 | * kprobe and the tpc points back to the probe address | ||
316 | * and allow the page fault handler to continue as a | ||
317 | * normal page fault. | ||
318 | */ | ||
319 | regs->tpc = (unsigned long)cur->addr; | ||
320 | regs->tnpc = kcb->kprobe_orig_tnpc; | ||
321 | regs->tstate = ((regs->tstate & ~TSTATE_PIL) | | ||
322 | kcb->kprobe_orig_tstate_pil); | ||
323 | if (kcb->kprobe_status == KPROBE_REENTER) | ||
324 | restore_previous_kprobe(kcb); | ||
325 | else | ||
326 | reset_current_kprobe(); | ||
327 | preempt_enable_no_resched(); | ||
328 | break; | ||
329 | case KPROBE_HIT_ACTIVE: | ||
330 | case KPROBE_HIT_SSDONE: | ||
331 | /* | ||
332 | * We increment the nmissed count for accounting, | ||
333 | * we can also use npre/npostfault count for accouting | ||
334 | * these specific fault cases. | ||
335 | */ | ||
336 | kprobes_inc_nmissed_count(cur); | ||
337 | |||
338 | /* | ||
339 | * We come here because instructions in the pre/post | ||
340 | * handler caused the page_fault, this could happen | ||
341 | * if handler tries to access user space by | ||
342 | * copy_from_user(), get_user() etc. Let the | ||
343 | * user-specified handler try to fix it first. | ||
344 | */ | ||
345 | if (cur->fault_handler && cur->fault_handler(cur, regs, trapnr)) | ||
346 | return 1; | ||
305 | 347 | ||
306 | if (cur->fault_handler && cur->fault_handler(cur, regs, trapnr)) | 348 | /* |
307 | return 1; | 349 | * In case the user-specified fault handler returned |
350 | * zero, try to fix up. | ||
351 | */ | ||
308 | 352 | ||
309 | if (kcb->kprobe_status & KPROBE_HIT_SS) { | 353 | entry = search_exception_tables(regs->tpc); |
310 | resume_execution(cur, regs, kcb); | 354 | if (entry) { |
355 | regs->tpc = entry->fixup; | ||
356 | regs->tnpc = regs->tpc + 4; | ||
357 | return 1; | ||
358 | } | ||
311 | 359 | ||
312 | reset_current_kprobe(); | 360 | /* |
313 | preempt_enable_no_resched(); | 361 | * fixup_exception() could not handle it, |
362 | * Let do_page_fault() fix it. | ||
363 | */ | ||
364 | break; | ||
365 | default: | ||
366 | break; | ||
314 | } | 367 | } |
368 | |||
315 | return 0; | 369 | return 0; |
316 | } | 370 | } |
317 | 371 | ||
@@ -324,6 +378,9 @@ int __kprobes kprobe_exceptions_notify(struct notifier_block *self, | |||
324 | struct die_args *args = (struct die_args *)data; | 378 | struct die_args *args = (struct die_args *)data; |
325 | int ret = NOTIFY_DONE; | 379 | int ret = NOTIFY_DONE; |
326 | 380 | ||
381 | if (args->regs && user_mode(args->regs)) | ||
382 | return ret; | ||
383 | |||
327 | switch (val) { | 384 | switch (val) { |
328 | case DIE_DEBUG: | 385 | case DIE_DEBUG: |
329 | if (kprobe_handler(args->regs)) | 386 | if (kprobe_handler(args->regs)) |
diff --git a/arch/sparc64/kernel/sparc64_ksyms.c b/arch/sparc64/kernel/sparc64_ksyms.c index 9914a17651b4..c7fbbcfce824 100644 --- a/arch/sparc64/kernel/sparc64_ksyms.c +++ b/arch/sparc64/kernel/sparc64_ksyms.c | |||
@@ -175,11 +175,6 @@ EXPORT_SYMBOL(set_bit); | |||
175 | EXPORT_SYMBOL(clear_bit); | 175 | EXPORT_SYMBOL(clear_bit); |
176 | EXPORT_SYMBOL(change_bit); | 176 | EXPORT_SYMBOL(change_bit); |
177 | 177 | ||
178 | /* Bit searching */ | ||
179 | EXPORT_SYMBOL(find_next_bit); | ||
180 | EXPORT_SYMBOL(find_next_zero_bit); | ||
181 | EXPORT_SYMBOL(find_next_zero_le_bit); | ||
182 | |||
183 | EXPORT_SYMBOL(ivector_table); | 178 | EXPORT_SYMBOL(ivector_table); |
184 | EXPORT_SYMBOL(enable_irq); | 179 | EXPORT_SYMBOL(enable_irq); |
185 | EXPORT_SYMBOL(disable_irq); | 180 | EXPORT_SYMBOL(disable_irq); |
diff --git a/arch/sparc64/kernel/sys_sparc32.c b/arch/sparc64/kernel/sys_sparc32.c index 0e41df024489..2e906bad56fa 100644 --- a/arch/sparc64/kernel/sys_sparc32.c +++ b/arch/sparc64/kernel/sys_sparc32.c | |||
@@ -19,7 +19,6 @@ | |||
19 | #include <linux/resource.h> | 19 | #include <linux/resource.h> |
20 | #include <linux/times.h> | 20 | #include <linux/times.h> |
21 | #include <linux/utsname.h> | 21 | #include <linux/utsname.h> |
22 | #include <linux/timex.h> | ||
23 | #include <linux/smp.h> | 22 | #include <linux/smp.h> |
24 | #include <linux/smp_lock.h> | 23 | #include <linux/smp_lock.h> |
25 | #include <linux/sem.h> | 24 | #include <linux/sem.h> |
@@ -945,79 +944,6 @@ asmlinkage long compat_sys_sendfile64(int out_fd, int in_fd, | |||
945 | return ret; | 944 | return ret; |
946 | } | 945 | } |
947 | 946 | ||
948 | /* Handle adjtimex compatibility. */ | ||
949 | |||
950 | struct timex32 { | ||
951 | u32 modes; | ||
952 | s32 offset, freq, maxerror, esterror; | ||
953 | s32 status, constant, precision, tolerance; | ||
954 | struct compat_timeval time; | ||
955 | s32 tick; | ||
956 | s32 ppsfreq, jitter, shift, stabil; | ||
957 | s32 jitcnt, calcnt, errcnt, stbcnt; | ||
958 | s32 :32; s32 :32; s32 :32; s32 :32; | ||
959 | s32 :32; s32 :32; s32 :32; s32 :32; | ||
960 | s32 :32; s32 :32; s32 :32; s32 :32; | ||
961 | }; | ||
962 | |||
963 | extern int do_adjtimex(struct timex *); | ||
964 | |||
965 | asmlinkage long sys32_adjtimex(struct timex32 __user *utp) | ||
966 | { | ||
967 | struct timex txc; | ||
968 | int ret; | ||
969 | |||
970 | memset(&txc, 0, sizeof(struct timex)); | ||
971 | |||
972 | if (get_user(txc.modes, &utp->modes) || | ||
973 | __get_user(txc.offset, &utp->offset) || | ||
974 | __get_user(txc.freq, &utp->freq) || | ||
975 | __get_user(txc.maxerror, &utp->maxerror) || | ||
976 | __get_user(txc.esterror, &utp->esterror) || | ||
977 | __get_user(txc.status, &utp->status) || | ||
978 | __get_user(txc.constant, &utp->constant) || | ||
979 | __get_user(txc.precision, &utp->precision) || | ||
980 | __get_user(txc.tolerance, &utp->tolerance) || | ||
981 | __get_user(txc.time.tv_sec, &utp->time.tv_sec) || | ||
982 | __get_user(txc.time.tv_usec, &utp->time.tv_usec) || | ||
983 | __get_user(txc.tick, &utp->tick) || | ||
984 | __get_user(txc.ppsfreq, &utp->ppsfreq) || | ||
985 | __get_user(txc.jitter, &utp->jitter) || | ||
986 | __get_user(txc.shift, &utp->shift) || | ||
987 | __get_user(txc.stabil, &utp->stabil) || | ||
988 | __get_user(txc.jitcnt, &utp->jitcnt) || | ||
989 | __get_user(txc.calcnt, &utp->calcnt) || | ||
990 | __get_user(txc.errcnt, &utp->errcnt) || | ||
991 | __get_user(txc.stbcnt, &utp->stbcnt)) | ||
992 | return -EFAULT; | ||
993 | |||
994 | ret = do_adjtimex(&txc); | ||
995 | |||
996 | if (put_user(txc.modes, &utp->modes) || | ||
997 | __put_user(txc.offset, &utp->offset) || | ||
998 | __put_user(txc.freq, &utp->freq) || | ||
999 | __put_user(txc.maxerror, &utp->maxerror) || | ||
1000 | __put_user(txc.esterror, &utp->esterror) || | ||
1001 | __put_user(txc.status, &utp->status) || | ||
1002 | __put_user(txc.constant, &utp->constant) || | ||
1003 | __put_user(txc.precision, &utp->precision) || | ||
1004 | __put_user(txc.tolerance, &utp->tolerance) || | ||
1005 | __put_user(txc.time.tv_sec, &utp->time.tv_sec) || | ||
1006 | __put_user(txc.time.tv_usec, &utp->time.tv_usec) || | ||
1007 | __put_user(txc.tick, &utp->tick) || | ||
1008 | __put_user(txc.ppsfreq, &utp->ppsfreq) || | ||
1009 | __put_user(txc.jitter, &utp->jitter) || | ||
1010 | __put_user(txc.shift, &utp->shift) || | ||
1011 | __put_user(txc.stabil, &utp->stabil) || | ||
1012 | __put_user(txc.jitcnt, &utp->jitcnt) || | ||
1013 | __put_user(txc.calcnt, &utp->calcnt) || | ||
1014 | __put_user(txc.errcnt, &utp->errcnt) || | ||
1015 | __put_user(txc.stbcnt, &utp->stbcnt)) | ||
1016 | ret = -EFAULT; | ||
1017 | |||
1018 | return ret; | ||
1019 | } | ||
1020 | |||
1021 | /* This is just a version for 32-bit applications which does | 947 | /* This is just a version for 32-bit applications which does |
1022 | * not force O_LARGEFILE on. | 948 | * not force O_LARGEFILE on. |
1023 | */ | 949 | */ |
diff --git a/arch/sparc64/kernel/systbls.S b/arch/sparc64/kernel/systbls.S index c3adb7ac167d..3b250f2318fd 100644 --- a/arch/sparc64/kernel/systbls.S +++ b/arch/sparc64/kernel/systbls.S | |||
@@ -63,7 +63,7 @@ sys_call_table32: | |||
63 | /*200*/ .word sys32_ssetmask, sys_sigsuspend, compat_sys_newlstat, sys_uselib, compat_sys_old_readdir | 63 | /*200*/ .word sys32_ssetmask, sys_sigsuspend, compat_sys_newlstat, sys_uselib, compat_sys_old_readdir |
64 | .word sys32_readahead, sys32_socketcall, sys32_syslog, sys32_lookup_dcookie, sys32_fadvise64 | 64 | .word sys32_readahead, sys32_socketcall, sys32_syslog, sys32_lookup_dcookie, sys32_fadvise64 |
65 | /*210*/ .word sys32_fadvise64_64, sys32_tgkill, sys32_waitpid, sys_swapoff, sys32_sysinfo | 65 | /*210*/ .word sys32_fadvise64_64, sys32_tgkill, sys32_waitpid, sys_swapoff, sys32_sysinfo |
66 | .word sys32_ipc, sys32_sigreturn, sys_clone, sys32_ioprio_get, sys32_adjtimex | 66 | .word sys32_ipc, sys32_sigreturn, sys_clone, sys32_ioprio_get, compat_sys_adjtimex |
67 | /*220*/ .word sys32_sigprocmask, sys_ni_syscall, sys32_delete_module, sys_ni_syscall, sys32_getpgid | 67 | /*220*/ .word sys32_sigprocmask, sys_ni_syscall, sys32_delete_module, sys_ni_syscall, sys32_getpgid |
68 | .word sys32_bdflush, sys32_sysfs, sys_nis_syscall, sys32_setfsuid16, sys32_setfsgid16 | 68 | .word sys32_bdflush, sys32_sysfs, sys_nis_syscall, sys32_setfsuid16, sys32_setfsgid16 |
69 | /*230*/ .word sys32_select, compat_sys_time, sys_nis_syscall, compat_sys_stime, compat_sys_statfs64 | 69 | /*230*/ .word sys32_select, compat_sys_time, sys_nis_syscall, compat_sys_stime, compat_sys_statfs64 |
diff --git a/arch/sparc64/lib/Makefile b/arch/sparc64/lib/Makefile index 8812ded19f01..4a725d8985f1 100644 --- a/arch/sparc64/lib/Makefile +++ b/arch/sparc64/lib/Makefile | |||
@@ -14,6 +14,6 @@ lib-y := PeeCeeI.o copy_page.o clear_page.o strlen.o strncmp.o \ | |||
14 | NGmemcpy.o NGcopy_from_user.o NGcopy_to_user.o NGpatch.o \ | 14 | NGmemcpy.o NGcopy_from_user.o NGcopy_to_user.o NGpatch.o \ |
15 | NGpage.o NGbzero.o \ | 15 | NGpage.o NGbzero.o \ |
16 | copy_in_user.o user_fixup.o memmove.o \ | 16 | copy_in_user.o user_fixup.o memmove.o \ |
17 | mcount.o ipcsum.o rwsem.o xor.o find_bit.o delay.o | 17 | mcount.o ipcsum.o rwsem.o xor.o delay.o |
18 | 18 | ||
19 | obj-y += iomap.o | 19 | obj-y += iomap.o |
diff --git a/arch/sparc64/lib/find_bit.c b/arch/sparc64/lib/find_bit.c deleted file mode 100644 index 6059557067b4..000000000000 --- a/arch/sparc64/lib/find_bit.c +++ /dev/null | |||
@@ -1,127 +0,0 @@ | |||
1 | #include <linux/bitops.h> | ||
2 | |||
3 | /** | ||
4 | * find_next_bit - find the next set bit in a memory region | ||
5 | * @addr: The address to base the search on | ||
6 | * @offset: The bitnumber to start searching at | ||
7 | * @size: The maximum size to search | ||
8 | */ | ||
9 | unsigned long find_next_bit(const unsigned long *addr, unsigned long size, | ||
10 | unsigned long offset) | ||
11 | { | ||
12 | const unsigned long *p = addr + (offset >> 6); | ||
13 | unsigned long result = offset & ~63UL; | ||
14 | unsigned long tmp; | ||
15 | |||
16 | if (offset >= size) | ||
17 | return size; | ||
18 | size -= result; | ||
19 | offset &= 63UL; | ||
20 | if (offset) { | ||
21 | tmp = *(p++); | ||
22 | tmp &= (~0UL << offset); | ||
23 | if (size < 64) | ||
24 | goto found_first; | ||
25 | if (tmp) | ||
26 | goto found_middle; | ||
27 | size -= 64; | ||
28 | result += 64; | ||
29 | } | ||
30 | while (size & ~63UL) { | ||
31 | if ((tmp = *(p++))) | ||
32 | goto found_middle; | ||
33 | result += 64; | ||
34 | size -= 64; | ||
35 | } | ||
36 | if (!size) | ||
37 | return result; | ||
38 | tmp = *p; | ||
39 | |||
40 | found_first: | ||
41 | tmp &= (~0UL >> (64 - size)); | ||
42 | if (tmp == 0UL) /* Are any bits set? */ | ||
43 | return result + size; /* Nope. */ | ||
44 | found_middle: | ||
45 | return result + __ffs(tmp); | ||
46 | } | ||
47 | |||
48 | /* find_next_zero_bit() finds the first zero bit in a bit string of length | ||
49 | * 'size' bits, starting the search at bit 'offset'. This is largely based | ||
50 | * on Linus's ALPHA routines, which are pretty portable BTW. | ||
51 | */ | ||
52 | |||
53 | unsigned long find_next_zero_bit(const unsigned long *addr, | ||
54 | unsigned long size, unsigned long offset) | ||
55 | { | ||
56 | const unsigned long *p = addr + (offset >> 6); | ||
57 | unsigned long result = offset & ~63UL; | ||
58 | unsigned long tmp; | ||
59 | |||
60 | if (offset >= size) | ||
61 | return size; | ||
62 | size -= result; | ||
63 | offset &= 63UL; | ||
64 | if (offset) { | ||
65 | tmp = *(p++); | ||
66 | tmp |= ~0UL >> (64-offset); | ||
67 | if (size < 64) | ||
68 | goto found_first; | ||
69 | if (~tmp) | ||
70 | goto found_middle; | ||
71 | size -= 64; | ||
72 | result += 64; | ||
73 | } | ||
74 | while (size & ~63UL) { | ||
75 | if (~(tmp = *(p++))) | ||
76 | goto found_middle; | ||
77 | result += 64; | ||
78 | size -= 64; | ||
79 | } | ||
80 | if (!size) | ||
81 | return result; | ||
82 | tmp = *p; | ||
83 | |||
84 | found_first: | ||
85 | tmp |= ~0UL << size; | ||
86 | if (tmp == ~0UL) /* Are any bits zero? */ | ||
87 | return result + size; /* Nope. */ | ||
88 | found_middle: | ||
89 | return result + ffz(tmp); | ||
90 | } | ||
91 | |||
92 | unsigned long find_next_zero_le_bit(unsigned long *addr, unsigned long size, unsigned long offset) | ||
93 | { | ||
94 | unsigned long *p = addr + (offset >> 6); | ||
95 | unsigned long result = offset & ~63UL; | ||
96 | unsigned long tmp; | ||
97 | |||
98 | if (offset >= size) | ||
99 | return size; | ||
100 | size -= result; | ||
101 | offset &= 63UL; | ||
102 | if(offset) { | ||
103 | tmp = __swab64p(p++); | ||
104 | tmp |= (~0UL >> (64-offset)); | ||
105 | if(size < 64) | ||
106 | goto found_first; | ||
107 | if(~tmp) | ||
108 | goto found_middle; | ||
109 | size -= 64; | ||
110 | result += 64; | ||
111 | } | ||
112 | while(size & ~63) { | ||
113 | if(~(tmp = __swab64p(p++))) | ||
114 | goto found_middle; | ||
115 | result += 64; | ||
116 | size -= 64; | ||
117 | } | ||
118 | if(!size) | ||
119 | return result; | ||
120 | tmp = __swab64p(p); | ||
121 | found_first: | ||
122 | tmp |= (~0UL << size); | ||
123 | if (tmp == ~0UL) /* Are any bits zero? */ | ||
124 | return result + size; /* Nope. */ | ||
125 | found_middle: | ||
126 | return result + ffz(tmp); | ||
127 | } | ||
diff --git a/arch/um/Kconfig.i386 b/arch/um/Kconfig.i386 index ef79ed25aecd..85e6a55b3b59 100644 --- a/arch/um/Kconfig.i386 +++ b/arch/um/Kconfig.i386 | |||
@@ -52,3 +52,8 @@ config ARCH_HAS_SC_SIGNALS | |||
52 | config ARCH_REUSE_HOST_VSYSCALL_AREA | 52 | config ARCH_REUSE_HOST_VSYSCALL_AREA |
53 | bool | 53 | bool |
54 | default y | 54 | default y |
55 | |||
56 | config GENERIC_HWEIGHT | ||
57 | bool | ||
58 | default y | ||
59 | |||
diff --git a/arch/um/Kconfig.x86_64 b/arch/um/Kconfig.x86_64 index aae19bc4b06a..f60e9e506424 100644 --- a/arch/um/Kconfig.x86_64 +++ b/arch/um/Kconfig.x86_64 | |||
@@ -46,3 +46,8 @@ config ARCH_REUSE_HOST_VSYSCALL_AREA | |||
46 | config SMP_BROKEN | 46 | config SMP_BROKEN |
47 | bool | 47 | bool |
48 | default y | 48 | default y |
49 | |||
50 | config GENERIC_HWEIGHT | ||
51 | bool | ||
52 | default y | ||
53 | |||
diff --git a/arch/v850/Kconfig b/arch/v850/Kconfig index e7fc3e500342..37ec644603ab 100644 --- a/arch/v850/Kconfig +++ b/arch/v850/Kconfig | |||
@@ -16,6 +16,12 @@ config RWSEM_GENERIC_SPINLOCK | |||
16 | config RWSEM_XCHGADD_ALGORITHM | 16 | config RWSEM_XCHGADD_ALGORITHM |
17 | bool | 17 | bool |
18 | default n | 18 | default n |
19 | config GENERIC_FIND_NEXT_BIT | ||
20 | bool | ||
21 | default y | ||
22 | config GENERIC_HWEIGHT | ||
23 | bool | ||
24 | default y | ||
19 | config GENERIC_CALIBRATE_DELAY | 25 | config GENERIC_CALIBRATE_DELAY |
20 | bool | 26 | bool |
21 | default y | 27 | default y |
diff --git a/arch/x86_64/Kconfig b/arch/x86_64/Kconfig index 6420baeb8c1f..45efe0ca88f8 100644 --- a/arch/x86_64/Kconfig +++ b/arch/x86_64/Kconfig | |||
@@ -45,6 +45,10 @@ config RWSEM_GENERIC_SPINLOCK | |||
45 | config RWSEM_XCHGADD_ALGORITHM | 45 | config RWSEM_XCHGADD_ALGORITHM |
46 | bool | 46 | bool |
47 | 47 | ||
48 | config GENERIC_HWEIGHT | ||
49 | bool | ||
50 | default y | ||
51 | |||
48 | config GENERIC_CALIBRATE_DELAY | 52 | config GENERIC_CALIBRATE_DELAY |
49 | bool | 53 | bool |
50 | default y | 54 | default y |
diff --git a/arch/x86_64/Makefile b/arch/x86_64/Makefile index 0fbc0283609c..585fd4a559c8 100644 --- a/arch/x86_64/Makefile +++ b/arch/x86_64/Makefile | |||
@@ -70,7 +70,7 @@ drivers-$(CONFIG_OPROFILE) += arch/x86_64/oprofile/ | |||
70 | boot := arch/x86_64/boot | 70 | boot := arch/x86_64/boot |
71 | 71 | ||
72 | PHONY += bzImage bzlilo install archmrproper \ | 72 | PHONY += bzImage bzlilo install archmrproper \ |
73 | fdimage fdimage144 fdimage288 archclean | 73 | fdimage fdimage144 fdimage288 isoimage archclean |
74 | 74 | ||
75 | #Default target when executing "make" | 75 | #Default target when executing "make" |
76 | all: bzImage | 76 | all: bzImage |
@@ -87,7 +87,7 @@ bzlilo: vmlinux | |||
87 | bzdisk: vmlinux | 87 | bzdisk: vmlinux |
88 | $(Q)$(MAKE) $(build)=$(boot) BOOTIMAGE=$(BOOTIMAGE) zdisk | 88 | $(Q)$(MAKE) $(build)=$(boot) BOOTIMAGE=$(BOOTIMAGE) zdisk |
89 | 89 | ||
90 | fdimage fdimage144 fdimage288: vmlinux | 90 | fdimage fdimage144 fdimage288 isoimage: vmlinux |
91 | $(Q)$(MAKE) $(build)=$(boot) BOOTIMAGE=$(BOOTIMAGE) $@ | 91 | $(Q)$(MAKE) $(build)=$(boot) BOOTIMAGE=$(BOOTIMAGE) $@ |
92 | 92 | ||
93 | install: | 93 | install: |
@@ -99,11 +99,16 @@ archclean: | |||
99 | define archhelp | 99 | define archhelp |
100 | echo '* bzImage - Compressed kernel image (arch/$(ARCH)/boot/bzImage)' | 100 | echo '* bzImage - Compressed kernel image (arch/$(ARCH)/boot/bzImage)' |
101 | echo ' install - Install kernel using' | 101 | echo ' install - Install kernel using' |
102 | echo ' (your) ~/bin/installkernel or' | 102 | echo ' (your) ~/bin/installkernel or' |
103 | echo ' (distribution) /sbin/installkernel or' | 103 | echo ' (distribution) /sbin/installkernel or' |
104 | echo ' install to $$(INSTALL_PATH) and run lilo' | 104 | echo ' install to $$(INSTALL_PATH) and run lilo' |
105 | echo ' bzdisk - Create a boot floppy in /dev/fd0' | ||
106 | echo ' fdimage - Create a boot floppy image' | ||
107 | echo ' isoimage - Create a boot CD-ROM image' | ||
105 | endef | 108 | endef |
106 | 109 | ||
107 | CLEAN_FILES += arch/$(ARCH)/boot/fdimage arch/$(ARCH)/boot/mtools.conf | 110 | CLEAN_FILES += arch/$(ARCH)/boot/fdimage \ |
111 | arch/$(ARCH)/boot/image.iso \ | ||
112 | arch/$(ARCH)/boot/mtools.conf | ||
108 | 113 | ||
109 | 114 | ||
diff --git a/arch/x86_64/boot/Makefile b/arch/x86_64/boot/Makefile index 29f8396ed151..43ee6c50c277 100644 --- a/arch/x86_64/boot/Makefile +++ b/arch/x86_64/boot/Makefile | |||
@@ -60,8 +60,12 @@ $(obj)/setup $(obj)/bootsect: %: %.o FORCE | |||
60 | $(obj)/compressed/vmlinux: FORCE | 60 | $(obj)/compressed/vmlinux: FORCE |
61 | $(Q)$(MAKE) $(build)=$(obj)/compressed IMAGE_OFFSET=$(IMAGE_OFFSET) $@ | 61 | $(Q)$(MAKE) $(build)=$(obj)/compressed IMAGE_OFFSET=$(IMAGE_OFFSET) $@ |
62 | 62 | ||
63 | # Set this if you want to pass append arguments to the zdisk/fdimage kernel | 63 | # Set this if you want to pass append arguments to the zdisk/fdimage/isoimage kernel |
64 | FDARGS = | 64 | FDARGS = |
65 | # Set this if you want an initrd included with the zdisk/fdimage/isoimage kernel | ||
66 | FDINITRD = | ||
67 | |||
68 | image_cmdline = default linux $(FDARGS) $(if $(FDINITRD),initrd=initrd.img,) | ||
65 | 69 | ||
66 | $(obj)/mtools.conf: $(src)/mtools.conf.in | 70 | $(obj)/mtools.conf: $(src)/mtools.conf.in |
67 | sed -e 's|@OBJ@|$(obj)|g' < $< > $@ | 71 | sed -e 's|@OBJ@|$(obj)|g' < $< > $@ |
@@ -70,8 +74,11 @@ $(obj)/mtools.conf: $(src)/mtools.conf.in | |||
70 | zdisk: $(BOOTIMAGE) $(obj)/mtools.conf | 74 | zdisk: $(BOOTIMAGE) $(obj)/mtools.conf |
71 | MTOOLSRC=$(obj)/mtools.conf mformat a: ; sync | 75 | MTOOLSRC=$(obj)/mtools.conf mformat a: ; sync |
72 | syslinux /dev/fd0 ; sync | 76 | syslinux /dev/fd0 ; sync |
73 | echo 'default linux $(FDARGS)' | \ | 77 | echo '$(image_cmdline)' | \ |
74 | MTOOLSRC=$(obj)/mtools.conf mcopy - a:syslinux.cfg | 78 | MTOOLSRC=$(obj)/mtools.conf mcopy - a:syslinux.cfg |
79 | if [ -f '$(FDINITRD)' ] ; then \ | ||
80 | MTOOLSRC=$(obj)/mtools.conf mcopy '$(FDINITRD)' a:initrd.img ; \ | ||
81 | fi | ||
75 | MTOOLSRC=$(obj)/mtools.conf mcopy $(BOOTIMAGE) a:linux ; sync | 82 | MTOOLSRC=$(obj)/mtools.conf mcopy $(BOOTIMAGE) a:linux ; sync |
76 | 83 | ||
77 | # These require being root or having syslinux 2.02 or higher installed | 84 | # These require being root or having syslinux 2.02 or higher installed |
@@ -79,18 +86,39 @@ fdimage fdimage144: $(BOOTIMAGE) $(obj)/mtools.conf | |||
79 | dd if=/dev/zero of=$(obj)/fdimage bs=1024 count=1440 | 86 | dd if=/dev/zero of=$(obj)/fdimage bs=1024 count=1440 |
80 | MTOOLSRC=$(obj)/mtools.conf mformat v: ; sync | 87 | MTOOLSRC=$(obj)/mtools.conf mformat v: ; sync |
81 | syslinux $(obj)/fdimage ; sync | 88 | syslinux $(obj)/fdimage ; sync |
82 | echo 'default linux $(FDARGS)' | \ | 89 | echo '$(image_cmdline)' | \ |
83 | MTOOLSRC=$(obj)/mtools.conf mcopy - v:syslinux.cfg | 90 | MTOOLSRC=$(obj)/mtools.conf mcopy - v:syslinux.cfg |
91 | if [ -f '$(FDINITRD)' ] ; then \ | ||
92 | MTOOLSRC=$(obj)/mtools.conf mcopy '$(FDINITRD)' v:initrd.img ; \ | ||
93 | fi | ||
84 | MTOOLSRC=$(obj)/mtools.conf mcopy $(BOOTIMAGE) v:linux ; sync | 94 | MTOOLSRC=$(obj)/mtools.conf mcopy $(BOOTIMAGE) v:linux ; sync |
85 | 95 | ||
86 | fdimage288: $(BOOTIMAGE) $(obj)/mtools.conf | 96 | fdimage288: $(BOOTIMAGE) $(obj)/mtools.conf |
87 | dd if=/dev/zero of=$(obj)/fdimage bs=1024 count=2880 | 97 | dd if=/dev/zero of=$(obj)/fdimage bs=1024 count=2880 |
88 | MTOOLSRC=$(obj)/mtools.conf mformat w: ; sync | 98 | MTOOLSRC=$(obj)/mtools.conf mformat w: ; sync |
89 | syslinux $(obj)/fdimage ; sync | 99 | syslinux $(obj)/fdimage ; sync |
90 | echo 'default linux $(FDARGS)' | \ | 100 | echo '$(image_cmdline)' | \ |
91 | MTOOLSRC=$(obj)/mtools.conf mcopy - w:syslinux.cfg | 101 | MTOOLSRC=$(obj)/mtools.conf mcopy - w:syslinux.cfg |
102 | if [ -f '$(FDINITRD)' ] ; then \ | ||
103 | MTOOLSRC=$(obj)/mtools.conf mcopy '$(FDINITRD)' w:initrd.img ; \ | ||
104 | fi | ||
92 | MTOOLSRC=$(obj)/mtools.conf mcopy $(BOOTIMAGE) w:linux ; sync | 105 | MTOOLSRC=$(obj)/mtools.conf mcopy $(BOOTIMAGE) w:linux ; sync |
93 | 106 | ||
107 | isoimage: $(BOOTIMAGE) | ||
108 | -rm -rf $(obj)/isoimage | ||
109 | mkdir $(obj)/isoimage | ||
110 | cp `echo /usr/lib*/syslinux/isolinux.bin | awk '{ print $1; }'` \ | ||
111 | $(obj)/isoimage | ||
112 | cp $(BOOTIMAGE) $(obj)/isoimage/linux | ||
113 | echo '$(image_cmdline)' > $(obj)/isoimage/isolinux.cfg | ||
114 | if [ -f '$(FDINITRD)' ] ; then \ | ||
115 | cp '$(FDINITRD)' $(obj)/isoimage/initrd.img ; \ | ||
116 | fi | ||
117 | mkisofs -J -r -o $(obj)/image.iso -b isolinux.bin -c boot.cat \ | ||
118 | -no-emul-boot -boot-load-size 4 -boot-info-table \ | ||
119 | $(obj)/isoimage | ||
120 | rm -rf $(obj)/isoimage | ||
121 | |||
94 | zlilo: $(BOOTIMAGE) | 122 | zlilo: $(BOOTIMAGE) |
95 | if [ -f $(INSTALL_PATH)/vmlinuz ]; then mv $(INSTALL_PATH)/vmlinuz $(INSTALL_PATH)/vmlinuz.old; fi | 123 | if [ -f $(INSTALL_PATH)/vmlinuz ]; then mv $(INSTALL_PATH)/vmlinuz $(INSTALL_PATH)/vmlinuz.old; fi |
96 | if [ -f $(INSTALL_PATH)/System.map ]; then mv $(INSTALL_PATH)/System.map $(INSTALL_PATH)/System.old; fi | 124 | if [ -f $(INSTALL_PATH)/System.map ]; then mv $(INSTALL_PATH)/System.map $(INSTALL_PATH)/System.old; fi |
diff --git a/arch/x86_64/ia32/ia32entry.S b/arch/x86_64/ia32/ia32entry.S index 00dee176c08e..7549a4389fbf 100644 --- a/arch/x86_64/ia32/ia32entry.S +++ b/arch/x86_64/ia32/ia32entry.S | |||
@@ -501,7 +501,7 @@ ia32_sys_call_table: | |||
501 | .quad sys_setdomainname | 501 | .quad sys_setdomainname |
502 | .quad sys_uname | 502 | .quad sys_uname |
503 | .quad sys_modify_ldt | 503 | .quad sys_modify_ldt |
504 | .quad sys32_adjtimex | 504 | .quad compat_sys_adjtimex |
505 | .quad sys32_mprotect /* 125 */ | 505 | .quad sys32_mprotect /* 125 */ |
506 | .quad compat_sys_sigprocmask | 506 | .quad compat_sys_sigprocmask |
507 | .quad quiet_ni_syscall /* create_module */ | 507 | .quad quiet_ni_syscall /* create_module */ |
diff --git a/arch/x86_64/ia32/sys_ia32.c b/arch/x86_64/ia32/sys_ia32.c index 2b2d029f477c..f182b20858e2 100644 --- a/arch/x86_64/ia32/sys_ia32.c +++ b/arch/x86_64/ia32/sys_ia32.c | |||
@@ -30,7 +30,6 @@ | |||
30 | #include <linux/resource.h> | 30 | #include <linux/resource.h> |
31 | #include <linux/times.h> | 31 | #include <linux/times.h> |
32 | #include <linux/utsname.h> | 32 | #include <linux/utsname.h> |
33 | #include <linux/timex.h> | ||
34 | #include <linux/smp.h> | 33 | #include <linux/smp.h> |
35 | #include <linux/smp_lock.h> | 34 | #include <linux/smp_lock.h> |
36 | #include <linux/sem.h> | 35 | #include <linux/sem.h> |
@@ -767,82 +766,6 @@ sys32_sendfile(int out_fd, int in_fd, compat_off_t __user *offset, s32 count) | |||
767 | return ret; | 766 | return ret; |
768 | } | 767 | } |
769 | 768 | ||
770 | /* Handle adjtimex compatibility. */ | ||
771 | |||
772 | struct timex32 { | ||
773 | u32 modes; | ||
774 | s32 offset, freq, maxerror, esterror; | ||
775 | s32 status, constant, precision, tolerance; | ||
776 | struct compat_timeval time; | ||
777 | s32 tick; | ||
778 | s32 ppsfreq, jitter, shift, stabil; | ||
779 | s32 jitcnt, calcnt, errcnt, stbcnt; | ||
780 | s32 :32; s32 :32; s32 :32; s32 :32; | ||
781 | s32 :32; s32 :32; s32 :32; s32 :32; | ||
782 | s32 :32; s32 :32; s32 :32; s32 :32; | ||
783 | }; | ||
784 | |||
785 | extern int do_adjtimex(struct timex *); | ||
786 | |||
787 | asmlinkage long | ||
788 | sys32_adjtimex(struct timex32 __user *utp) | ||
789 | { | ||
790 | struct timex txc; | ||
791 | int ret; | ||
792 | |||
793 | memset(&txc, 0, sizeof(struct timex)); | ||
794 | |||
795 | if (!access_ok(VERIFY_READ, utp, sizeof(struct timex32)) || | ||
796 | __get_user(txc.modes, &utp->modes) || | ||
797 | __get_user(txc.offset, &utp->offset) || | ||
798 | __get_user(txc.freq, &utp->freq) || | ||
799 | __get_user(txc.maxerror, &utp->maxerror) || | ||
800 | __get_user(txc.esterror, &utp->esterror) || | ||
801 | __get_user(txc.status, &utp->status) || | ||
802 | __get_user(txc.constant, &utp->constant) || | ||
803 | __get_user(txc.precision, &utp->precision) || | ||
804 | __get_user(txc.tolerance, &utp->tolerance) || | ||
805 | __get_user(txc.time.tv_sec, &utp->time.tv_sec) || | ||
806 | __get_user(txc.time.tv_usec, &utp->time.tv_usec) || | ||
807 | __get_user(txc.tick, &utp->tick) || | ||
808 | __get_user(txc.ppsfreq, &utp->ppsfreq) || | ||
809 | __get_user(txc.jitter, &utp->jitter) || | ||
810 | __get_user(txc.shift, &utp->shift) || | ||
811 | __get_user(txc.stabil, &utp->stabil) || | ||
812 | __get_user(txc.jitcnt, &utp->jitcnt) || | ||
813 | __get_user(txc.calcnt, &utp->calcnt) || | ||
814 | __get_user(txc.errcnt, &utp->errcnt) || | ||
815 | __get_user(txc.stbcnt, &utp->stbcnt)) | ||
816 | return -EFAULT; | ||
817 | |||
818 | ret = do_adjtimex(&txc); | ||
819 | |||
820 | if (!access_ok(VERIFY_WRITE, utp, sizeof(struct timex32)) || | ||
821 | __put_user(txc.modes, &utp->modes) || | ||
822 | __put_user(txc.offset, &utp->offset) || | ||
823 | __put_user(txc.freq, &utp->freq) || | ||
824 | __put_user(txc.maxerror, &utp->maxerror) || | ||
825 | __put_user(txc.esterror, &utp->esterror) || | ||
826 | __put_user(txc.status, &utp->status) || | ||
827 | __put_user(txc.constant, &utp->constant) || | ||
828 | __put_user(txc.precision, &utp->precision) || | ||
829 | __put_user(txc.tolerance, &utp->tolerance) || | ||
830 | __put_user(txc.time.tv_sec, &utp->time.tv_sec) || | ||
831 | __put_user(txc.time.tv_usec, &utp->time.tv_usec) || | ||
832 | __put_user(txc.tick, &utp->tick) || | ||
833 | __put_user(txc.ppsfreq, &utp->ppsfreq) || | ||
834 | __put_user(txc.jitter, &utp->jitter) || | ||
835 | __put_user(txc.shift, &utp->shift) || | ||
836 | __put_user(txc.stabil, &utp->stabil) || | ||
837 | __put_user(txc.jitcnt, &utp->jitcnt) || | ||
838 | __put_user(txc.calcnt, &utp->calcnt) || | ||
839 | __put_user(txc.errcnt, &utp->errcnt) || | ||
840 | __put_user(txc.stbcnt, &utp->stbcnt)) | ||
841 | ret = -EFAULT; | ||
842 | |||
843 | return ret; | ||
844 | } | ||
845 | |||
846 | asmlinkage long sys32_mmap2(unsigned long addr, unsigned long len, | 769 | asmlinkage long sys32_mmap2(unsigned long addr, unsigned long len, |
847 | unsigned long prot, unsigned long flags, | 770 | unsigned long prot, unsigned long flags, |
848 | unsigned long fd, unsigned long pgoff) | 771 | unsigned long fd, unsigned long pgoff) |
diff --git a/arch/x86_64/kernel/kprobes.c b/arch/x86_64/kernel/kprobes.c index 14f0ced613b6..accbff3fec49 100644 --- a/arch/x86_64/kernel/kprobes.c +++ b/arch/x86_64/kernel/kprobes.c | |||
@@ -37,10 +37,12 @@ | |||
37 | #include <linux/string.h> | 37 | #include <linux/string.h> |
38 | #include <linux/slab.h> | 38 | #include <linux/slab.h> |
39 | #include <linux/preempt.h> | 39 | #include <linux/preempt.h> |
40 | #include <linux/module.h> | ||
40 | 41 | ||
41 | #include <asm/cacheflush.h> | 42 | #include <asm/cacheflush.h> |
42 | #include <asm/pgtable.h> | 43 | #include <asm/pgtable.h> |
43 | #include <asm/kdebug.h> | 44 | #include <asm/kdebug.h> |
45 | #include <asm/uaccess.h> | ||
44 | 46 | ||
45 | void jprobe_return_end(void); | 47 | void jprobe_return_end(void); |
46 | static void __kprobes arch_copy_kprobe(struct kprobe *p); | 48 | static void __kprobes arch_copy_kprobe(struct kprobe *p); |
@@ -578,16 +580,62 @@ int __kprobes kprobe_fault_handler(struct pt_regs *regs, int trapnr) | |||
578 | { | 580 | { |
579 | struct kprobe *cur = kprobe_running(); | 581 | struct kprobe *cur = kprobe_running(); |
580 | struct kprobe_ctlblk *kcb = get_kprobe_ctlblk(); | 582 | struct kprobe_ctlblk *kcb = get_kprobe_ctlblk(); |
583 | const struct exception_table_entry *fixup; | ||
581 | 584 | ||
582 | if (cur->fault_handler && cur->fault_handler(cur, regs, trapnr)) | 585 | switch(kcb->kprobe_status) { |
583 | return 1; | 586 | case KPROBE_HIT_SS: |
584 | 587 | case KPROBE_REENTER: | |
585 | if (kcb->kprobe_status & KPROBE_HIT_SS) { | 588 | /* |
586 | resume_execution(cur, regs, kcb); | 589 | * We are here because the instruction being single |
590 | * stepped caused a page fault. We reset the current | ||
591 | * kprobe and the rip points back to the probe address | ||
592 | * and allow the page fault handler to continue as a | ||
593 | * normal page fault. | ||
594 | */ | ||
595 | regs->rip = (unsigned long)cur->addr; | ||
587 | regs->eflags |= kcb->kprobe_old_rflags; | 596 | regs->eflags |= kcb->kprobe_old_rflags; |
588 | 597 | if (kcb->kprobe_status == KPROBE_REENTER) | |
589 | reset_current_kprobe(); | 598 | restore_previous_kprobe(kcb); |
599 | else | ||
600 | reset_current_kprobe(); | ||
590 | preempt_enable_no_resched(); | 601 | preempt_enable_no_resched(); |
602 | break; | ||
603 | case KPROBE_HIT_ACTIVE: | ||
604 | case KPROBE_HIT_SSDONE: | ||
605 | /* | ||
606 | * We increment the nmissed count for accounting, | ||
607 | * we can also use npre/npostfault count for accouting | ||
608 | * these specific fault cases. | ||
609 | */ | ||
610 | kprobes_inc_nmissed_count(cur); | ||
611 | |||
612 | /* | ||
613 | * We come here because instructions in the pre/post | ||
614 | * handler caused the page_fault, this could happen | ||
615 | * if handler tries to access user space by | ||
616 | * copy_from_user(), get_user() etc. Let the | ||
617 | * user-specified handler try to fix it first. | ||
618 | */ | ||
619 | if (cur->fault_handler && cur->fault_handler(cur, regs, trapnr)) | ||
620 | return 1; | ||
621 | |||
622 | /* | ||
623 | * In case the user-specified fault handler returned | ||
624 | * zero, try to fix up. | ||
625 | */ | ||
626 | fixup = search_exception_tables(regs->rip); | ||
627 | if (fixup) { | ||
628 | regs->rip = fixup->fixup; | ||
629 | return 1; | ||
630 | } | ||
631 | |||
632 | /* | ||
633 | * fixup() could not handle it, | ||
634 | * Let do_page_fault() fix it. | ||
635 | */ | ||
636 | break; | ||
637 | default: | ||
638 | break; | ||
591 | } | 639 | } |
592 | return 0; | 640 | return 0; |
593 | } | 641 | } |
@@ -601,6 +649,9 @@ int __kprobes kprobe_exceptions_notify(struct notifier_block *self, | |||
601 | struct die_args *args = (struct die_args *)data; | 649 | struct die_args *args = (struct die_args *)data; |
602 | int ret = NOTIFY_DONE; | 650 | int ret = NOTIFY_DONE; |
603 | 651 | ||
652 | if (args->regs && user_mode(args->regs)) | ||
653 | return ret; | ||
654 | |||
604 | switch (val) { | 655 | switch (val) { |
605 | case DIE_INT3: | 656 | case DIE_INT3: |
606 | if (kprobe_handler(args->regs)) | 657 | if (kprobe_handler(args->regs)) |
diff --git a/arch/x86_64/kernel/process.c b/arch/x86_64/kernel/process.c index 81111835722d..0370720515f1 100644 --- a/arch/x86_64/kernel/process.c +++ b/arch/x86_64/kernel/process.c | |||
@@ -35,8 +35,8 @@ | |||
35 | #include <linux/ptrace.h> | 35 | #include <linux/ptrace.h> |
36 | #include <linux/utsname.h> | 36 | #include <linux/utsname.h> |
37 | #include <linux/random.h> | 37 | #include <linux/random.h> |
38 | #include <linux/kprobes.h> | ||
39 | #include <linux/notifier.h> | 38 | #include <linux/notifier.h> |
39 | #include <linux/kprobes.h> | ||
40 | 40 | ||
41 | #include <asm/uaccess.h> | 41 | #include <asm/uaccess.h> |
42 | #include <asm/pgtable.h> | 42 | #include <asm/pgtable.h> |
@@ -353,13 +353,6 @@ void exit_thread(void) | |||
353 | struct task_struct *me = current; | 353 | struct task_struct *me = current; |
354 | struct thread_struct *t = &me->thread; | 354 | struct thread_struct *t = &me->thread; |
355 | 355 | ||
356 | /* | ||
357 | * Remove function-return probe instances associated with this task | ||
358 | * and put them back on the free list. Do not insert an exit probe for | ||
359 | * this function, it will be disabled by kprobe_flush_task if you do. | ||
360 | */ | ||
361 | kprobe_flush_task(me); | ||
362 | |||
363 | if (me->thread.io_bitmap_ptr) { | 356 | if (me->thread.io_bitmap_ptr) { |
364 | struct tss_struct *tss = &per_cpu(init_tss, get_cpu()); | 357 | struct tss_struct *tss = &per_cpu(init_tss, get_cpu()); |
365 | 358 | ||
diff --git a/arch/xtensa/Kconfig b/arch/xtensa/Kconfig index e90ef5db8913..dbeb3504c3c8 100644 --- a/arch/xtensa/Kconfig +++ b/arch/xtensa/Kconfig | |||
@@ -22,6 +22,14 @@ config RWSEM_XCHGADD_ALGORITHM | |||
22 | bool | 22 | bool |
23 | default y | 23 | default y |
24 | 24 | ||
25 | config GENERIC_FIND_NEXT_BIT | ||
26 | bool | ||
27 | default y | ||
28 | |||
29 | config GENERIC_HWEIGHT | ||
30 | bool | ||
31 | default y | ||
32 | |||
25 | config GENERIC_HARDIRQS | 33 | config GENERIC_HARDIRQS |
26 | bool | 34 | bool |
27 | default y | 35 | default y |
diff --git a/block/Kconfig b/block/Kconfig index 96783645092d..43ca070dc0f8 100644 --- a/block/Kconfig +++ b/block/Kconfig | |||
@@ -23,4 +23,13 @@ config BLK_DEV_IO_TRACE | |||
23 | 23 | ||
24 | git://brick.kernel.dk/data/git/blktrace.git | 24 | git://brick.kernel.dk/data/git/blktrace.git |
25 | 25 | ||
26 | config LSF | ||
27 | bool "Support for Large Single Files" | ||
28 | depends on X86 || (MIPS && 32BIT) || PPC32 || ARCH_S390_31 || SUPERH || UML | ||
29 | default n | ||
30 | help | ||
31 | When CONFIG_LBD is disabled, say Y here if you want to | ||
32 | handle large file(bigger than 2TB), otherwise say N. | ||
33 | When CONFIG_LBD is enabled, Y is set automatically. | ||
34 | |||
26 | source block/Kconfig.iosched | 35 | source block/Kconfig.iosched |
diff --git a/block/cfq-iosched.c b/block/cfq-iosched.c index c4a0d5d8d7f0..bde40a6ae665 100644 --- a/block/cfq-iosched.c +++ b/block/cfq-iosched.c | |||
@@ -2191,7 +2191,7 @@ static int cfq_init_queue(request_queue_t *q, elevator_t *e) | |||
2191 | if (!cfqd->cfq_hash) | 2191 | if (!cfqd->cfq_hash) |
2192 | goto out_cfqhash; | 2192 | goto out_cfqhash; |
2193 | 2193 | ||
2194 | cfqd->crq_pool = mempool_create(BLKDEV_MIN_RQ, mempool_alloc_slab, mempool_free_slab, crq_pool); | 2194 | cfqd->crq_pool = mempool_create_slab_pool(BLKDEV_MIN_RQ, crq_pool); |
2195 | if (!cfqd->crq_pool) | 2195 | if (!cfqd->crq_pool) |
2196 | goto out_crqpool; | 2196 | goto out_crqpool; |
2197 | 2197 | ||
diff --git a/drivers/acpi/osl.c b/drivers/acpi/osl.c index ac5bbaedac1b..13b5fd5854a8 100644 --- a/drivers/acpi/osl.c +++ b/drivers/acpi/osl.c | |||
@@ -156,12 +156,10 @@ acpi_status acpi_os_get_root_pointer(u32 flags, struct acpi_pointer *addr) | |||
156 | { | 156 | { |
157 | if (efi_enabled) { | 157 | if (efi_enabled) { |
158 | addr->pointer_type = ACPI_PHYSICAL_POINTER; | 158 | addr->pointer_type = ACPI_PHYSICAL_POINTER; |
159 | if (efi.acpi20) | 159 | if (efi.acpi20 != EFI_INVALID_TABLE_ADDR) |
160 | addr->pointer.physical = | 160 | addr->pointer.physical = efi.acpi20; |
161 | (acpi_physical_address) virt_to_phys(efi.acpi20); | 161 | else if (efi.acpi != EFI_INVALID_TABLE_ADDR) |
162 | else if (efi.acpi) | 162 | addr->pointer.physical = efi.acpi; |
163 | addr->pointer.physical = | ||
164 | (acpi_physical_address) virt_to_phys(efi.acpi); | ||
165 | else { | 163 | else { |
166 | printk(KERN_ERR PREFIX | 164 | printk(KERN_ERR PREFIX |
167 | "System description tables not found\n"); | 165 | "System description tables not found\n"); |
@@ -182,22 +180,14 @@ acpi_status | |||
182 | acpi_os_map_memory(acpi_physical_address phys, acpi_size size, | 180 | acpi_os_map_memory(acpi_physical_address phys, acpi_size size, |
183 | void __iomem ** virt) | 181 | void __iomem ** virt) |
184 | { | 182 | { |
185 | if (efi_enabled) { | 183 | if (phys > ULONG_MAX) { |
186 | if (EFI_MEMORY_WB & efi_mem_attributes(phys)) { | 184 | printk(KERN_ERR PREFIX "Cannot map memory that high\n"); |
187 | *virt = (void __iomem *)phys_to_virt(phys); | 185 | return AE_BAD_PARAMETER; |
188 | } else { | ||
189 | *virt = ioremap(phys, size); | ||
190 | } | ||
191 | } else { | ||
192 | if (phys > ULONG_MAX) { | ||
193 | printk(KERN_ERR PREFIX "Cannot map memory that high\n"); | ||
194 | return AE_BAD_PARAMETER; | ||
195 | } | ||
196 | /* | ||
197 | * ioremap checks to ensure this is in reserved space | ||
198 | */ | ||
199 | *virt = ioremap((unsigned long)phys, size); | ||
200 | } | 186 | } |
187 | /* | ||
188 | * ioremap checks to ensure this is in reserved space | ||
189 | */ | ||
190 | *virt = ioremap((unsigned long)phys, size); | ||
201 | 191 | ||
202 | if (!*virt) | 192 | if (!*virt) |
203 | return AE_NO_MEMORY; | 193 | return AE_NO_MEMORY; |
@@ -409,18 +399,8 @@ acpi_os_read_memory(acpi_physical_address phys_addr, u32 * value, u32 width) | |||
409 | { | 399 | { |
410 | u32 dummy; | 400 | u32 dummy; |
411 | void __iomem *virt_addr; | 401 | void __iomem *virt_addr; |
412 | int iomem = 0; | ||
413 | 402 | ||
414 | if (efi_enabled) { | 403 | virt_addr = ioremap(phys_addr, width); |
415 | if (EFI_MEMORY_WB & efi_mem_attributes(phys_addr)) { | ||
416 | /* HACK ALERT! We can use readb/w/l on real memory too.. */ | ||
417 | virt_addr = (void __iomem *)phys_to_virt(phys_addr); | ||
418 | } else { | ||
419 | iomem = 1; | ||
420 | virt_addr = ioremap(phys_addr, width); | ||
421 | } | ||
422 | } else | ||
423 | virt_addr = (void __iomem *)phys_to_virt(phys_addr); | ||
424 | if (!value) | 404 | if (!value) |
425 | value = &dummy; | 405 | value = &dummy; |
426 | 406 | ||
@@ -438,10 +418,7 @@ acpi_os_read_memory(acpi_physical_address phys_addr, u32 * value, u32 width) | |||
438 | BUG(); | 418 | BUG(); |
439 | } | 419 | } |
440 | 420 | ||
441 | if (efi_enabled) { | 421 | iounmap(virt_addr); |
442 | if (iomem) | ||
443 | iounmap(virt_addr); | ||
444 | } | ||
445 | 422 | ||
446 | return AE_OK; | 423 | return AE_OK; |
447 | } | 424 | } |
@@ -450,18 +427,8 @@ acpi_status | |||
450 | acpi_os_write_memory(acpi_physical_address phys_addr, u32 value, u32 width) | 427 | acpi_os_write_memory(acpi_physical_address phys_addr, u32 value, u32 width) |
451 | { | 428 | { |
452 | void __iomem *virt_addr; | 429 | void __iomem *virt_addr; |
453 | int iomem = 0; | ||
454 | 430 | ||
455 | if (efi_enabled) { | 431 | virt_addr = ioremap(phys_addr, width); |
456 | if (EFI_MEMORY_WB & efi_mem_attributes(phys_addr)) { | ||
457 | /* HACK ALERT! We can use writeb/w/l on real memory too */ | ||
458 | virt_addr = (void __iomem *)phys_to_virt(phys_addr); | ||
459 | } else { | ||
460 | iomem = 1; | ||
461 | virt_addr = ioremap(phys_addr, width); | ||
462 | } | ||
463 | } else | ||
464 | virt_addr = (void __iomem *)phys_to_virt(phys_addr); | ||
465 | 432 | ||
466 | switch (width) { | 433 | switch (width) { |
467 | case 8: | 434 | case 8: |
@@ -477,8 +444,7 @@ acpi_os_write_memory(acpi_physical_address phys_addr, u32 value, u32 width) | |||
477 | BUG(); | 444 | BUG(); |
478 | } | 445 | } |
479 | 446 | ||
480 | if (iomem) | 447 | iounmap(virt_addr); |
481 | iounmap(virt_addr); | ||
482 | 448 | ||
483 | return AE_OK; | 449 | return AE_OK; |
484 | } | 450 | } |
diff --git a/drivers/acpi/processor_core.c b/drivers/acpi/processor_core.c index 99a3a28594da..713b763884a9 100644 --- a/drivers/acpi/processor_core.c +++ b/drivers/acpi/processor_core.c | |||
@@ -246,7 +246,7 @@ static int acpi_processor_errata(struct acpi_processor *pr) | |||
246 | } | 246 | } |
247 | 247 | ||
248 | /* -------------------------------------------------------------------------- | 248 | /* -------------------------------------------------------------------------- |
249 | Common ACPI processor fucntions | 249 | Common ACPI processor functions |
250 | -------------------------------------------------------------------------- */ | 250 | -------------------------------------------------------------------------- */ |
251 | 251 | ||
252 | /* | 252 | /* |
diff --git a/drivers/acpi/tables.c b/drivers/acpi/tables.c index 31d4f3ffc265..7f37c7cc5ef1 100644 --- a/drivers/acpi/tables.c +++ b/drivers/acpi/tables.c | |||
@@ -587,7 +587,8 @@ int __init acpi_table_init(void) | |||
587 | return -ENODEV; | 587 | return -ENODEV; |
588 | } | 588 | } |
589 | 589 | ||
590 | rsdp = (struct acpi_table_rsdp *)__va(rsdp_phys); | 590 | rsdp = (struct acpi_table_rsdp *)__acpi_map_table(rsdp_phys, |
591 | sizeof(struct acpi_table_rsdp)); | ||
591 | if (!rsdp) { | 592 | if (!rsdp) { |
592 | printk(KERN_WARNING PREFIX "Unable to map RSDP\n"); | 593 | printk(KERN_WARNING PREFIX "Unable to map RSDP\n"); |
593 | return -ENODEV; | 594 | return -ENODEV; |
diff --git a/drivers/block/Kconfig b/drivers/block/Kconfig index e57ac5a43246..875ae7699025 100644 --- a/drivers/block/Kconfig +++ b/drivers/block/Kconfig | |||
@@ -400,13 +400,16 @@ config BLK_DEV_RAM_SIZE | |||
400 | 8192. | 400 | 8192. |
401 | 401 | ||
402 | config BLK_DEV_INITRD | 402 | config BLK_DEV_INITRD |
403 | bool "Initial RAM disk (initrd) support" | 403 | bool "Initial RAM filesystem and RAM disk (initramfs/initrd) support" |
404 | help | 404 | help |
405 | The initial RAM disk is a RAM disk that is loaded by the boot loader | 405 | The initial RAM filesystem is a ramfs which is loaded by the |
406 | (loadlin or lilo) and that is mounted as root before the normal boot | 406 | boot loader (loadlin or lilo) and that is mounted as root |
407 | procedure. It is typically used to load modules needed to mount the | 407 | before the normal boot procedure. It is typically used to |
408 | "real" root file system, etc. See <file:Documentation/initrd.txt> | 408 | load modules needed to mount the "real" root file system, |
409 | for details. | 409 | etc. See <file:Documentation/initrd.txt> for details. |
410 | |||
411 | If RAM disk support (BLK_DEV_RAM) is also included, this | ||
412 | also enables initial RAM disk (initrd) support. | ||
410 | 413 | ||
411 | 414 | ||
412 | config CDROM_PKTCDVD | 415 | config CDROM_PKTCDVD |
diff --git a/drivers/block/aoe/aoeblk.c b/drivers/block/aoe/aoeblk.c index 32fea55fac48..393b86a3dbf8 100644 --- a/drivers/block/aoe/aoeblk.c +++ b/drivers/block/aoe/aoeblk.c | |||
@@ -211,9 +211,7 @@ aoeblk_gdalloc(void *vp) | |||
211 | return; | 211 | return; |
212 | } | 212 | } |
213 | 213 | ||
214 | d->bufpool = mempool_create(MIN_BUFS, | 214 | d->bufpool = mempool_create_slab_pool(MIN_BUFS, buf_pool_cache); |
215 | mempool_alloc_slab, mempool_free_slab, | ||
216 | buf_pool_cache); | ||
217 | if (d->bufpool == NULL) { | 215 | if (d->bufpool == NULL) { |
218 | printk(KERN_ERR "aoe: aoeblk_gdalloc: cannot allocate bufpool " | 216 | printk(KERN_ERR "aoe: aoeblk_gdalloc: cannot allocate bufpool " |
219 | "for %ld.%ld\n", d->aoemajor, d->aoeminor); | 217 | "for %ld.%ld\n", d->aoemajor, d->aoeminor); |
diff --git a/drivers/block/floppy.c b/drivers/block/floppy.c index 840919bba76c..d3ad9081697e 100644 --- a/drivers/block/floppy.c +++ b/drivers/block/floppy.c | |||
@@ -250,6 +250,18 @@ static int irqdma_allocated; | |||
250 | #include <linux/cdrom.h> /* for the compatibility eject ioctl */ | 250 | #include <linux/cdrom.h> /* for the compatibility eject ioctl */ |
251 | #include <linux/completion.h> | 251 | #include <linux/completion.h> |
252 | 252 | ||
253 | /* | ||
254 | * Interrupt freeing also means /proc VFS work - dont do it | ||
255 | * from interrupt context. We push this work into keventd: | ||
256 | */ | ||
257 | static void fd_free_irq_fn(void *data) | ||
258 | { | ||
259 | fd_free_irq(); | ||
260 | } | ||
261 | |||
262 | static DECLARE_WORK(fd_free_irq_work, fd_free_irq_fn, NULL); | ||
263 | |||
264 | |||
253 | static struct request *current_req; | 265 | static struct request *current_req; |
254 | static struct request_queue *floppy_queue; | 266 | static struct request_queue *floppy_queue; |
255 | static void do_fd_request(request_queue_t * q); | 267 | static void do_fd_request(request_queue_t * q); |
@@ -4433,6 +4445,13 @@ static int floppy_grab_irq_and_dma(void) | |||
4433 | return 0; | 4445 | return 0; |
4434 | } | 4446 | } |
4435 | spin_unlock_irqrestore(&floppy_usage_lock, flags); | 4447 | spin_unlock_irqrestore(&floppy_usage_lock, flags); |
4448 | |||
4449 | /* | ||
4450 | * We might have scheduled a free_irq(), wait it to | ||
4451 | * drain first: | ||
4452 | */ | ||
4453 | flush_scheduled_work(); | ||
4454 | |||
4436 | if (fd_request_irq()) { | 4455 | if (fd_request_irq()) { |
4437 | DPRINT("Unable to grab IRQ%d for the floppy driver\n", | 4456 | DPRINT("Unable to grab IRQ%d for the floppy driver\n", |
4438 | FLOPPY_IRQ); | 4457 | FLOPPY_IRQ); |
@@ -4522,7 +4541,7 @@ static void floppy_release_irq_and_dma(void) | |||
4522 | if (irqdma_allocated) { | 4541 | if (irqdma_allocated) { |
4523 | fd_disable_dma(); | 4542 | fd_disable_dma(); |
4524 | fd_free_dma(); | 4543 | fd_free_dma(); |
4525 | fd_free_irq(); | 4544 | schedule_work(&fd_free_irq_work); |
4526 | irqdma_allocated = 0; | 4545 | irqdma_allocated = 0; |
4527 | } | 4546 | } |
4528 | set_dor(0, ~0, 8); | 4547 | set_dor(0, ~0, 8); |
@@ -4633,6 +4652,8 @@ void cleanup_module(void) | |||
4633 | /* eject disk, if any */ | 4652 | /* eject disk, if any */ |
4634 | fd_eject(0); | 4653 | fd_eject(0); |
4635 | 4654 | ||
4655 | flush_scheduled_work(); /* fd_free_irq() might be pending */ | ||
4656 | |||
4636 | wait_for_completion(&device_release); | 4657 | wait_for_completion(&device_release); |
4637 | } | 4658 | } |
4638 | 4659 | ||
diff --git a/drivers/block/loop.c b/drivers/block/loop.c index 74bf0255e98f..9c3b94e8f03b 100644 --- a/drivers/block/loop.c +++ b/drivers/block/loop.c | |||
@@ -839,7 +839,9 @@ static int loop_set_fd(struct loop_device *lo, struct file *lo_file, | |||
839 | 839 | ||
840 | set_blocksize(bdev, lo_blocksize); | 840 | set_blocksize(bdev, lo_blocksize); |
841 | 841 | ||
842 | kernel_thread(loop_thread, lo, CLONE_KERNEL); | 842 | error = kernel_thread(loop_thread, lo, CLONE_KERNEL); |
843 | if (error < 0) | ||
844 | goto out_putf; | ||
843 | wait_for_completion(&lo->lo_done); | 845 | wait_for_completion(&lo->lo_done); |
844 | return 0; | 846 | return 0; |
845 | 847 | ||
diff --git a/drivers/block/pktcdvd.c b/drivers/block/pktcdvd.c index 1d261f985f31..a04f60693c39 100644 --- a/drivers/block/pktcdvd.c +++ b/drivers/block/pktcdvd.c | |||
@@ -230,16 +230,6 @@ static int pkt_grow_pktlist(struct pktcdvd_device *pd, int nr_packets) | |||
230 | return 1; | 230 | return 1; |
231 | } | 231 | } |
232 | 232 | ||
233 | static void *pkt_rb_alloc(gfp_t gfp_mask, void *data) | ||
234 | { | ||
235 | return kmalloc(sizeof(struct pkt_rb_node), gfp_mask); | ||
236 | } | ||
237 | |||
238 | static void pkt_rb_free(void *ptr, void *data) | ||
239 | { | ||
240 | kfree(ptr); | ||
241 | } | ||
242 | |||
243 | static inline struct pkt_rb_node *pkt_rbtree_next(struct pkt_rb_node *node) | 233 | static inline struct pkt_rb_node *pkt_rbtree_next(struct pkt_rb_node *node) |
244 | { | 234 | { |
245 | struct rb_node *n = rb_next(&node->rb_node); | 235 | struct rb_node *n = rb_next(&node->rb_node); |
@@ -2073,16 +2063,6 @@ static int pkt_close(struct inode *inode, struct file *file) | |||
2073 | } | 2063 | } |
2074 | 2064 | ||
2075 | 2065 | ||
2076 | static void *psd_pool_alloc(gfp_t gfp_mask, void *data) | ||
2077 | { | ||
2078 | return kmalloc(sizeof(struct packet_stacked_data), gfp_mask); | ||
2079 | } | ||
2080 | |||
2081 | static void psd_pool_free(void *ptr, void *data) | ||
2082 | { | ||
2083 | kfree(ptr); | ||
2084 | } | ||
2085 | |||
2086 | static int pkt_end_io_read_cloned(struct bio *bio, unsigned int bytes_done, int err) | 2066 | static int pkt_end_io_read_cloned(struct bio *bio, unsigned int bytes_done, int err) |
2087 | { | 2067 | { |
2088 | struct packet_stacked_data *psd = bio->bi_private; | 2068 | struct packet_stacked_data *psd = bio->bi_private; |
@@ -2475,7 +2455,8 @@ static int pkt_setup_dev(struct pkt_ctrl_command *ctrl_cmd) | |||
2475 | if (!pd) | 2455 | if (!pd) |
2476 | return ret; | 2456 | return ret; |
2477 | 2457 | ||
2478 | pd->rb_pool = mempool_create(PKT_RB_POOL_SIZE, pkt_rb_alloc, pkt_rb_free, NULL); | 2458 | pd->rb_pool = mempool_create_kmalloc_pool(PKT_RB_POOL_SIZE, |
2459 | sizeof(struct pkt_rb_node)); | ||
2479 | if (!pd->rb_pool) | 2460 | if (!pd->rb_pool) |
2480 | goto out_mem; | 2461 | goto out_mem; |
2481 | 2462 | ||
@@ -2639,7 +2620,8 @@ static int __init pkt_init(void) | |||
2639 | { | 2620 | { |
2640 | int ret; | 2621 | int ret; |
2641 | 2622 | ||
2642 | psd_pool = mempool_create(PSD_POOL_SIZE, psd_pool_alloc, psd_pool_free, NULL); | 2623 | psd_pool = mempool_create_kmalloc_pool(PSD_POOL_SIZE, |
2624 | sizeof(struct packet_stacked_data)); | ||
2643 | if (!psd_pool) | 2625 | if (!psd_pool) |
2644 | return -ENOMEM; | 2626 | return -ENOMEM; |
2645 | 2627 | ||
diff --git a/drivers/char/Kconfig b/drivers/char/Kconfig index 5980f3e886fc..facc3f1d9e37 100644 --- a/drivers/char/Kconfig +++ b/drivers/char/Kconfig | |||
@@ -187,6 +187,7 @@ config MOXA_SMARTIO | |||
187 | config ISI | 187 | config ISI |
188 | tristate "Multi-Tech multiport card support (EXPERIMENTAL)" | 188 | tristate "Multi-Tech multiport card support (EXPERIMENTAL)" |
189 | depends on SERIAL_NONSTANDARD | 189 | depends on SERIAL_NONSTANDARD |
190 | select FW_LOADER | ||
190 | help | 191 | help |
191 | This is a driver for the Multi-Tech cards which provide several | 192 | This is a driver for the Multi-Tech cards which provide several |
192 | serial ports. The driver is experimental and can currently only be | 193 | serial ports. The driver is experimental and can currently only be |
diff --git a/drivers/char/ipmi/ipmi_devintf.c b/drivers/char/ipmi/ipmi_devintf.c index 7c0684deea06..932feedda262 100644 --- a/drivers/char/ipmi/ipmi_devintf.c +++ b/drivers/char/ipmi/ipmi_devintf.c | |||
@@ -90,7 +90,7 @@ static unsigned int ipmi_poll(struct file *file, poll_table *wait) | |||
90 | 90 | ||
91 | spin_lock_irqsave(&priv->recv_msg_lock, flags); | 91 | spin_lock_irqsave(&priv->recv_msg_lock, flags); |
92 | 92 | ||
93 | if (! list_empty(&(priv->recv_msgs))) | 93 | if (!list_empty(&(priv->recv_msgs))) |
94 | mask |= (POLLIN | POLLRDNORM); | 94 | mask |= (POLLIN | POLLRDNORM); |
95 | 95 | ||
96 | spin_unlock_irqrestore(&priv->recv_msg_lock, flags); | 96 | spin_unlock_irqrestore(&priv->recv_msg_lock, flags); |
@@ -789,21 +789,53 @@ MODULE_PARM_DESC(ipmi_major, "Sets the major number of the IPMI device. By" | |||
789 | " interface. Other values will set the major device number" | 789 | " interface. Other values will set the major device number" |
790 | " to that value."); | 790 | " to that value."); |
791 | 791 | ||
792 | /* Keep track of the devices that are registered. */ | ||
793 | struct ipmi_reg_list { | ||
794 | dev_t dev; | ||
795 | struct list_head link; | ||
796 | }; | ||
797 | static LIST_HEAD(reg_list); | ||
798 | static DEFINE_MUTEX(reg_list_mutex); | ||
799 | |||
792 | static struct class *ipmi_class; | 800 | static struct class *ipmi_class; |
793 | 801 | ||
794 | static void ipmi_new_smi(int if_num) | 802 | static void ipmi_new_smi(int if_num, struct device *device) |
795 | { | 803 | { |
796 | dev_t dev = MKDEV(ipmi_major, if_num); | 804 | dev_t dev = MKDEV(ipmi_major, if_num); |
805 | struct ipmi_reg_list *entry; | ||
797 | 806 | ||
798 | devfs_mk_cdev(dev, S_IFCHR | S_IRUSR | S_IWUSR, | 807 | devfs_mk_cdev(dev, S_IFCHR | S_IRUSR | S_IWUSR, |
799 | "ipmidev/%d", if_num); | 808 | "ipmidev/%d", if_num); |
800 | 809 | ||
801 | class_device_create(ipmi_class, NULL, dev, NULL, "ipmi%d", if_num); | 810 | entry = kmalloc(sizeof(*entry), GFP_KERNEL); |
811 | if (!entry) { | ||
812 | printk(KERN_ERR "ipmi_devintf: Unable to create the" | ||
813 | " ipmi class device link\n"); | ||
814 | return; | ||
815 | } | ||
816 | entry->dev = dev; | ||
817 | |||
818 | mutex_lock(®_list_mutex); | ||
819 | class_device_create(ipmi_class, NULL, dev, device, "ipmi%d", if_num); | ||
820 | list_add(&entry->link, ®_list); | ||
821 | mutex_unlock(®_list_mutex); | ||
802 | } | 822 | } |
803 | 823 | ||
804 | static void ipmi_smi_gone(int if_num) | 824 | static void ipmi_smi_gone(int if_num) |
805 | { | 825 | { |
806 | class_device_destroy(ipmi_class, MKDEV(ipmi_major, if_num)); | 826 | dev_t dev = MKDEV(ipmi_major, if_num); |
827 | struct ipmi_reg_list *entry; | ||
828 | |||
829 | mutex_lock(®_list_mutex); | ||
830 | list_for_each_entry(entry, ®_list, link) { | ||
831 | if (entry->dev == dev) { | ||
832 | list_del(&entry->link); | ||
833 | kfree(entry); | ||
834 | break; | ||
835 | } | ||
836 | } | ||
837 | class_device_destroy(ipmi_class, dev); | ||
838 | mutex_unlock(®_list_mutex); | ||
807 | devfs_remove("ipmidev/%d", if_num); | 839 | devfs_remove("ipmidev/%d", if_num); |
808 | } | 840 | } |
809 | 841 | ||
@@ -856,6 +888,14 @@ module_init(init_ipmi_devintf); | |||
856 | 888 | ||
857 | static __exit void cleanup_ipmi(void) | 889 | static __exit void cleanup_ipmi(void) |
858 | { | 890 | { |
891 | struct ipmi_reg_list *entry, *entry2; | ||
892 | mutex_lock(®_list_mutex); | ||
893 | list_for_each_entry_safe(entry, entry2, ®_list, link) { | ||
894 | list_del(&entry->link); | ||
895 | class_device_destroy(ipmi_class, entry->dev); | ||
896 | kfree(entry); | ||
897 | } | ||
898 | mutex_unlock(®_list_mutex); | ||
859 | class_destroy(ipmi_class); | 899 | class_destroy(ipmi_class); |
860 | ipmi_smi_watcher_unregister(&smi_watcher); | 900 | ipmi_smi_watcher_unregister(&smi_watcher); |
861 | devfs_remove(DEVICE_NAME); | 901 | devfs_remove(DEVICE_NAME); |
diff --git a/drivers/char/ipmi/ipmi_msghandler.c b/drivers/char/ipmi/ipmi_msghandler.c index abd4c5118a1b..b8fb87c6c29f 100644 --- a/drivers/char/ipmi/ipmi_msghandler.c +++ b/drivers/char/ipmi/ipmi_msghandler.c | |||
@@ -48,7 +48,7 @@ | |||
48 | 48 | ||
49 | #define PFX "IPMI message handler: " | 49 | #define PFX "IPMI message handler: " |
50 | 50 | ||
51 | #define IPMI_DRIVER_VERSION "38.0" | 51 | #define IPMI_DRIVER_VERSION "39.0" |
52 | 52 | ||
53 | static struct ipmi_recv_msg *ipmi_alloc_recv_msg(void); | 53 | static struct ipmi_recv_msg *ipmi_alloc_recv_msg(void); |
54 | static int ipmi_init_msghandler(void); | 54 | static int ipmi_init_msghandler(void); |
@@ -162,6 +162,28 @@ struct ipmi_proc_entry | |||
162 | }; | 162 | }; |
163 | #endif | 163 | #endif |
164 | 164 | ||
165 | struct bmc_device | ||
166 | { | ||
167 | struct platform_device *dev; | ||
168 | struct ipmi_device_id id; | ||
169 | unsigned char guid[16]; | ||
170 | int guid_set; | ||
171 | |||
172 | struct kref refcount; | ||
173 | |||
174 | /* bmc device attributes */ | ||
175 | struct device_attribute device_id_attr; | ||
176 | struct device_attribute provides_dev_sdrs_attr; | ||
177 | struct device_attribute revision_attr; | ||
178 | struct device_attribute firmware_rev_attr; | ||
179 | struct device_attribute version_attr; | ||
180 | struct device_attribute add_dev_support_attr; | ||
181 | struct device_attribute manufacturer_id_attr; | ||
182 | struct device_attribute product_id_attr; | ||
183 | struct device_attribute guid_attr; | ||
184 | struct device_attribute aux_firmware_rev_attr; | ||
185 | }; | ||
186 | |||
165 | #define IPMI_IPMB_NUM_SEQ 64 | 187 | #define IPMI_IPMB_NUM_SEQ 64 |
166 | #define IPMI_MAX_CHANNELS 16 | 188 | #define IPMI_MAX_CHANNELS 16 |
167 | struct ipmi_smi | 189 | struct ipmi_smi |
@@ -178,9 +200,8 @@ struct ipmi_smi | |||
178 | /* Used for wake ups at startup. */ | 200 | /* Used for wake ups at startup. */ |
179 | wait_queue_head_t waitq; | 201 | wait_queue_head_t waitq; |
180 | 202 | ||
181 | /* The IPMI version of the BMC on the other end. */ | 203 | struct bmc_device *bmc; |
182 | unsigned char version_major; | 204 | char *my_dev_name; |
183 | unsigned char version_minor; | ||
184 | 205 | ||
185 | /* This is the lower-layer's sender routine. */ | 206 | /* This is the lower-layer's sender routine. */ |
186 | struct ipmi_smi_handlers *handlers; | 207 | struct ipmi_smi_handlers *handlers; |
@@ -194,6 +215,9 @@ struct ipmi_smi | |||
194 | struct ipmi_proc_entry *proc_entries; | 215 | struct ipmi_proc_entry *proc_entries; |
195 | #endif | 216 | #endif |
196 | 217 | ||
218 | /* Driver-model device for the system interface. */ | ||
219 | struct device *si_dev; | ||
220 | |||
197 | /* A table of sequence numbers for this interface. We use the | 221 | /* A table of sequence numbers for this interface. We use the |
198 | sequence numbers for IPMB messages that go out of the | 222 | sequence numbers for IPMB messages that go out of the |
199 | interface to match them up with their responses. A routine | 223 | interface to match them up with their responses. A routine |
@@ -312,6 +336,7 @@ struct ipmi_smi | |||
312 | /* Events that were received with the proper format. */ | 336 | /* Events that were received with the proper format. */ |
313 | unsigned int events; | 337 | unsigned int events; |
314 | }; | 338 | }; |
339 | #define to_si_intf_from_dev(device) container_of(device, struct ipmi_smi, dev) | ||
315 | 340 | ||
316 | /* Used to mark an interface entry that cannot be used but is not a | 341 | /* Used to mark an interface entry that cannot be used but is not a |
317 | * free entry, either, primarily used at creation and deletion time so | 342 | * free entry, either, primarily used at creation and deletion time so |
@@ -320,6 +345,15 @@ struct ipmi_smi | |||
320 | #define IPMI_INVALID_INTERFACE(i) (((i) == NULL) \ | 345 | #define IPMI_INVALID_INTERFACE(i) (((i) == NULL) \ |
321 | || (i == IPMI_INVALID_INTERFACE_ENTRY)) | 346 | || (i == IPMI_INVALID_INTERFACE_ENTRY)) |
322 | 347 | ||
348 | /** | ||
349 | * The driver model view of the IPMI messaging driver. | ||
350 | */ | ||
351 | static struct device_driver ipmidriver = { | ||
352 | .name = "ipmi", | ||
353 | .bus = &platform_bus_type | ||
354 | }; | ||
355 | static DEFINE_MUTEX(ipmidriver_mutex); | ||
356 | |||
323 | #define MAX_IPMI_INTERFACES 4 | 357 | #define MAX_IPMI_INTERFACES 4 |
324 | static ipmi_smi_t ipmi_interfaces[MAX_IPMI_INTERFACES]; | 358 | static ipmi_smi_t ipmi_interfaces[MAX_IPMI_INTERFACES]; |
325 | 359 | ||
@@ -393,7 +427,7 @@ int ipmi_smi_watcher_register(struct ipmi_smi_watcher *watcher) | |||
393 | if (IPMI_INVALID_INTERFACE(intf)) | 427 | if (IPMI_INVALID_INTERFACE(intf)) |
394 | continue; | 428 | continue; |
395 | spin_unlock_irqrestore(&interfaces_lock, flags); | 429 | spin_unlock_irqrestore(&interfaces_lock, flags); |
396 | watcher->new_smi(i); | 430 | watcher->new_smi(i, intf->si_dev); |
397 | spin_lock_irqsave(&interfaces_lock, flags); | 431 | spin_lock_irqsave(&interfaces_lock, flags); |
398 | } | 432 | } |
399 | spin_unlock_irqrestore(&interfaces_lock, flags); | 433 | spin_unlock_irqrestore(&interfaces_lock, flags); |
@@ -409,14 +443,14 @@ int ipmi_smi_watcher_unregister(struct ipmi_smi_watcher *watcher) | |||
409 | } | 443 | } |
410 | 444 | ||
411 | static void | 445 | static void |
412 | call_smi_watchers(int i) | 446 | call_smi_watchers(int i, struct device *dev) |
413 | { | 447 | { |
414 | struct ipmi_smi_watcher *w; | 448 | struct ipmi_smi_watcher *w; |
415 | 449 | ||
416 | down_read(&smi_watchers_sem); | 450 | down_read(&smi_watchers_sem); |
417 | list_for_each_entry(w, &smi_watchers, link) { | 451 | list_for_each_entry(w, &smi_watchers, link) { |
418 | if (try_module_get(w->owner)) { | 452 | if (try_module_get(w->owner)) { |
419 | w->new_smi(i); | 453 | w->new_smi(i, dev); |
420 | module_put(w->owner); | 454 | module_put(w->owner); |
421 | } | 455 | } |
422 | } | 456 | } |
@@ -844,8 +878,8 @@ void ipmi_get_version(ipmi_user_t user, | |||
844 | unsigned char *major, | 878 | unsigned char *major, |
845 | unsigned char *minor) | 879 | unsigned char *minor) |
846 | { | 880 | { |
847 | *major = user->intf->version_major; | 881 | *major = ipmi_version_major(&user->intf->bmc->id); |
848 | *minor = user->intf->version_minor; | 882 | *minor = ipmi_version_minor(&user->intf->bmc->id); |
849 | } | 883 | } |
850 | 884 | ||
851 | int ipmi_set_my_address(ipmi_user_t user, | 885 | int ipmi_set_my_address(ipmi_user_t user, |
@@ -1553,7 +1587,8 @@ static int version_file_read_proc(char *page, char **start, off_t off, | |||
1553 | ipmi_smi_t intf = data; | 1587 | ipmi_smi_t intf = data; |
1554 | 1588 | ||
1555 | return sprintf(out, "%d.%d\n", | 1589 | return sprintf(out, "%d.%d\n", |
1556 | intf->version_major, intf->version_minor); | 1590 | ipmi_version_major(&intf->bmc->id), |
1591 | ipmi_version_minor(&intf->bmc->id)); | ||
1557 | } | 1592 | } |
1558 | 1593 | ||
1559 | static int stat_file_read_proc(char *page, char **start, off_t off, | 1594 | static int stat_file_read_proc(char *page, char **start, off_t off, |
@@ -1712,6 +1747,470 @@ static void remove_proc_entries(ipmi_smi_t smi) | |||
1712 | #endif /* CONFIG_PROC_FS */ | 1747 | #endif /* CONFIG_PROC_FS */ |
1713 | } | 1748 | } |
1714 | 1749 | ||
1750 | static int __find_bmc_guid(struct device *dev, void *data) | ||
1751 | { | ||
1752 | unsigned char *id = data; | ||
1753 | struct bmc_device *bmc = dev_get_drvdata(dev); | ||
1754 | return memcmp(bmc->guid, id, 16) == 0; | ||
1755 | } | ||
1756 | |||
1757 | static struct bmc_device *ipmi_find_bmc_guid(struct device_driver *drv, | ||
1758 | unsigned char *guid) | ||
1759 | { | ||
1760 | struct device *dev; | ||
1761 | |||
1762 | dev = driver_find_device(drv, NULL, guid, __find_bmc_guid); | ||
1763 | if (dev) | ||
1764 | return dev_get_drvdata(dev); | ||
1765 | else | ||
1766 | return NULL; | ||
1767 | } | ||
1768 | |||
1769 | struct prod_dev_id { | ||
1770 | unsigned int product_id; | ||
1771 | unsigned char device_id; | ||
1772 | }; | ||
1773 | |||
1774 | static int __find_bmc_prod_dev_id(struct device *dev, void *data) | ||
1775 | { | ||
1776 | struct prod_dev_id *id = data; | ||
1777 | struct bmc_device *bmc = dev_get_drvdata(dev); | ||
1778 | |||
1779 | return (bmc->id.product_id == id->product_id | ||
1780 | && bmc->id.product_id == id->product_id | ||
1781 | && bmc->id.device_id == id->device_id); | ||
1782 | } | ||
1783 | |||
1784 | static struct bmc_device *ipmi_find_bmc_prod_dev_id( | ||
1785 | struct device_driver *drv, | ||
1786 | unsigned char product_id, unsigned char device_id) | ||
1787 | { | ||
1788 | struct prod_dev_id id = { | ||
1789 | .product_id = product_id, | ||
1790 | .device_id = device_id, | ||
1791 | }; | ||
1792 | struct device *dev; | ||
1793 | |||
1794 | dev = driver_find_device(drv, NULL, &id, __find_bmc_prod_dev_id); | ||
1795 | if (dev) | ||
1796 | return dev_get_drvdata(dev); | ||
1797 | else | ||
1798 | return NULL; | ||
1799 | } | ||
1800 | |||
1801 | static ssize_t device_id_show(struct device *dev, | ||
1802 | struct device_attribute *attr, | ||
1803 | char *buf) | ||
1804 | { | ||
1805 | struct bmc_device *bmc = dev_get_drvdata(dev); | ||
1806 | |||
1807 | return snprintf(buf, 10, "%u\n", bmc->id.device_id); | ||
1808 | } | ||
1809 | |||
1810 | static ssize_t provides_dev_sdrs_show(struct device *dev, | ||
1811 | struct device_attribute *attr, | ||
1812 | char *buf) | ||
1813 | { | ||
1814 | struct bmc_device *bmc = dev_get_drvdata(dev); | ||
1815 | |||
1816 | return snprintf(buf, 10, "%u\n", | ||
1817 | bmc->id.device_revision && 0x80 >> 7); | ||
1818 | } | ||
1819 | |||
1820 | static ssize_t revision_show(struct device *dev, struct device_attribute *attr, | ||
1821 | char *buf) | ||
1822 | { | ||
1823 | struct bmc_device *bmc = dev_get_drvdata(dev); | ||
1824 | |||
1825 | return snprintf(buf, 20, "%u\n", | ||
1826 | bmc->id.device_revision && 0x0F); | ||
1827 | } | ||
1828 | |||
1829 | static ssize_t firmware_rev_show(struct device *dev, | ||
1830 | struct device_attribute *attr, | ||
1831 | char *buf) | ||
1832 | { | ||
1833 | struct bmc_device *bmc = dev_get_drvdata(dev); | ||
1834 | |||
1835 | return snprintf(buf, 20, "%u.%x\n", bmc->id.firmware_revision_1, | ||
1836 | bmc->id.firmware_revision_2); | ||
1837 | } | ||
1838 | |||
1839 | static ssize_t ipmi_version_show(struct device *dev, | ||
1840 | struct device_attribute *attr, | ||
1841 | char *buf) | ||
1842 | { | ||
1843 | struct bmc_device *bmc = dev_get_drvdata(dev); | ||
1844 | |||
1845 | return snprintf(buf, 20, "%u.%u\n", | ||
1846 | ipmi_version_major(&bmc->id), | ||
1847 | ipmi_version_minor(&bmc->id)); | ||
1848 | } | ||
1849 | |||
1850 | static ssize_t add_dev_support_show(struct device *dev, | ||
1851 | struct device_attribute *attr, | ||
1852 | char *buf) | ||
1853 | { | ||
1854 | struct bmc_device *bmc = dev_get_drvdata(dev); | ||
1855 | |||
1856 | return snprintf(buf, 10, "0x%02x\n", | ||
1857 | bmc->id.additional_device_support); | ||
1858 | } | ||
1859 | |||
1860 | static ssize_t manufacturer_id_show(struct device *dev, | ||
1861 | struct device_attribute *attr, | ||
1862 | char *buf) | ||
1863 | { | ||
1864 | struct bmc_device *bmc = dev_get_drvdata(dev); | ||
1865 | |||
1866 | return snprintf(buf, 20, "0x%6.6x\n", bmc->id.manufacturer_id); | ||
1867 | } | ||
1868 | |||
1869 | static ssize_t product_id_show(struct device *dev, | ||
1870 | struct device_attribute *attr, | ||
1871 | char *buf) | ||
1872 | { | ||
1873 | struct bmc_device *bmc = dev_get_drvdata(dev); | ||
1874 | |||
1875 | return snprintf(buf, 10, "0x%4.4x\n", bmc->id.product_id); | ||
1876 | } | ||
1877 | |||
1878 | static ssize_t aux_firmware_rev_show(struct device *dev, | ||
1879 | struct device_attribute *attr, | ||
1880 | char *buf) | ||
1881 | { | ||
1882 | struct bmc_device *bmc = dev_get_drvdata(dev); | ||
1883 | |||
1884 | return snprintf(buf, 21, "0x%02x 0x%02x 0x%02x 0x%02x\n", | ||
1885 | bmc->id.aux_firmware_revision[3], | ||
1886 | bmc->id.aux_firmware_revision[2], | ||
1887 | bmc->id.aux_firmware_revision[1], | ||
1888 | bmc->id.aux_firmware_revision[0]); | ||
1889 | } | ||
1890 | |||
1891 | static ssize_t guid_show(struct device *dev, struct device_attribute *attr, | ||
1892 | char *buf) | ||
1893 | { | ||
1894 | struct bmc_device *bmc = dev_get_drvdata(dev); | ||
1895 | |||
1896 | return snprintf(buf, 100, "%Lx%Lx\n", | ||
1897 | (long long) bmc->guid[0], | ||
1898 | (long long) bmc->guid[8]); | ||
1899 | } | ||
1900 | |||
1901 | static void | ||
1902 | cleanup_bmc_device(struct kref *ref) | ||
1903 | { | ||
1904 | struct bmc_device *bmc; | ||
1905 | |||
1906 | bmc = container_of(ref, struct bmc_device, refcount); | ||
1907 | |||
1908 | device_remove_file(&bmc->dev->dev, | ||
1909 | &bmc->device_id_attr); | ||
1910 | device_remove_file(&bmc->dev->dev, | ||
1911 | &bmc->provides_dev_sdrs_attr); | ||
1912 | device_remove_file(&bmc->dev->dev, | ||
1913 | &bmc->revision_attr); | ||
1914 | device_remove_file(&bmc->dev->dev, | ||
1915 | &bmc->firmware_rev_attr); | ||
1916 | device_remove_file(&bmc->dev->dev, | ||
1917 | &bmc->version_attr); | ||
1918 | device_remove_file(&bmc->dev->dev, | ||
1919 | &bmc->add_dev_support_attr); | ||
1920 | device_remove_file(&bmc->dev->dev, | ||
1921 | &bmc->manufacturer_id_attr); | ||
1922 | device_remove_file(&bmc->dev->dev, | ||
1923 | &bmc->product_id_attr); | ||
1924 | if (bmc->id.aux_firmware_revision_set) | ||
1925 | device_remove_file(&bmc->dev->dev, | ||
1926 | &bmc->aux_firmware_rev_attr); | ||
1927 | if (bmc->guid_set) | ||
1928 | device_remove_file(&bmc->dev->dev, | ||
1929 | &bmc->guid_attr); | ||
1930 | platform_device_unregister(bmc->dev); | ||
1931 | kfree(bmc); | ||
1932 | } | ||
1933 | |||
1934 | static void ipmi_bmc_unregister(ipmi_smi_t intf) | ||
1935 | { | ||
1936 | struct bmc_device *bmc = intf->bmc; | ||
1937 | |||
1938 | sysfs_remove_link(&intf->si_dev->kobj, "bmc"); | ||
1939 | if (intf->my_dev_name) { | ||
1940 | sysfs_remove_link(&bmc->dev->dev.kobj, intf->my_dev_name); | ||
1941 | kfree(intf->my_dev_name); | ||
1942 | intf->my_dev_name = NULL; | ||
1943 | } | ||
1944 | |||
1945 | mutex_lock(&ipmidriver_mutex); | ||
1946 | kref_put(&bmc->refcount, cleanup_bmc_device); | ||
1947 | mutex_unlock(&ipmidriver_mutex); | ||
1948 | } | ||
1949 | |||
1950 | static int ipmi_bmc_register(ipmi_smi_t intf) | ||
1951 | { | ||
1952 | int rv; | ||
1953 | struct bmc_device *bmc = intf->bmc; | ||
1954 | struct bmc_device *old_bmc; | ||
1955 | int size; | ||
1956 | char dummy[1]; | ||
1957 | |||
1958 | mutex_lock(&ipmidriver_mutex); | ||
1959 | |||
1960 | /* | ||
1961 | * Try to find if there is an bmc_device struct | ||
1962 | * representing the interfaced BMC already | ||
1963 | */ | ||
1964 | if (bmc->guid_set) | ||
1965 | old_bmc = ipmi_find_bmc_guid(&ipmidriver, bmc->guid); | ||
1966 | else | ||
1967 | old_bmc = ipmi_find_bmc_prod_dev_id(&ipmidriver, | ||
1968 | bmc->id.product_id, | ||
1969 | bmc->id.device_id); | ||
1970 | |||
1971 | /* | ||
1972 | * If there is already an bmc_device, free the new one, | ||
1973 | * otherwise register the new BMC device | ||
1974 | */ | ||
1975 | if (old_bmc) { | ||
1976 | kfree(bmc); | ||
1977 | intf->bmc = old_bmc; | ||
1978 | bmc = old_bmc; | ||
1979 | |||
1980 | kref_get(&bmc->refcount); | ||
1981 | mutex_unlock(&ipmidriver_mutex); | ||
1982 | |||
1983 | printk(KERN_INFO | ||
1984 | "ipmi: interfacing existing BMC (man_id: 0x%6.6x," | ||
1985 | " prod_id: 0x%4.4x, dev_id: 0x%2.2x)\n", | ||
1986 | bmc->id.manufacturer_id, | ||
1987 | bmc->id.product_id, | ||
1988 | bmc->id.device_id); | ||
1989 | } else { | ||
1990 | bmc->dev = platform_device_alloc("ipmi_bmc", | ||
1991 | bmc->id.device_id); | ||
1992 | if (! bmc->dev) { | ||
1993 | printk(KERN_ERR | ||
1994 | "ipmi_msghandler:" | ||
1995 | " Unable to allocate platform device\n"); | ||
1996 | return -ENOMEM; | ||
1997 | } | ||
1998 | bmc->dev->dev.driver = &ipmidriver; | ||
1999 | dev_set_drvdata(&bmc->dev->dev, bmc); | ||
2000 | kref_init(&bmc->refcount); | ||
2001 | |||
2002 | rv = platform_device_register(bmc->dev); | ||
2003 | mutex_unlock(&ipmidriver_mutex); | ||
2004 | if (rv) { | ||
2005 | printk(KERN_ERR | ||
2006 | "ipmi_msghandler:" | ||
2007 | " Unable to register bmc device: %d\n", | ||
2008 | rv); | ||
2009 | /* Don't go to out_err, you can only do that if | ||
2010 | the device is registered already. */ | ||
2011 | return rv; | ||
2012 | } | ||
2013 | |||
2014 | bmc->device_id_attr.attr.name = "device_id"; | ||
2015 | bmc->device_id_attr.attr.owner = THIS_MODULE; | ||
2016 | bmc->device_id_attr.attr.mode = S_IRUGO; | ||
2017 | bmc->device_id_attr.show = device_id_show; | ||
2018 | |||
2019 | bmc->provides_dev_sdrs_attr.attr.name = "provides_device_sdrs"; | ||
2020 | bmc->provides_dev_sdrs_attr.attr.owner = THIS_MODULE; | ||
2021 | bmc->provides_dev_sdrs_attr.attr.mode = S_IRUGO; | ||
2022 | bmc->provides_dev_sdrs_attr.show = provides_dev_sdrs_show; | ||
2023 | |||
2024 | |||
2025 | bmc->revision_attr.attr.name = "revision"; | ||
2026 | bmc->revision_attr.attr.owner = THIS_MODULE; | ||
2027 | bmc->revision_attr.attr.mode = S_IRUGO; | ||
2028 | bmc->revision_attr.show = revision_show; | ||
2029 | |||
2030 | bmc->firmware_rev_attr.attr.name = "firmware_revision"; | ||
2031 | bmc->firmware_rev_attr.attr.owner = THIS_MODULE; | ||
2032 | bmc->firmware_rev_attr.attr.mode = S_IRUGO; | ||
2033 | bmc->firmware_rev_attr.show = firmware_rev_show; | ||
2034 | |||
2035 | bmc->version_attr.attr.name = "ipmi_version"; | ||
2036 | bmc->version_attr.attr.owner = THIS_MODULE; | ||
2037 | bmc->version_attr.attr.mode = S_IRUGO; | ||
2038 | bmc->version_attr.show = ipmi_version_show; | ||
2039 | |||
2040 | bmc->add_dev_support_attr.attr.name | ||
2041 | = "additional_device_support"; | ||
2042 | bmc->add_dev_support_attr.attr.owner = THIS_MODULE; | ||
2043 | bmc->add_dev_support_attr.attr.mode = S_IRUGO; | ||
2044 | bmc->add_dev_support_attr.show = add_dev_support_show; | ||
2045 | |||
2046 | bmc->manufacturer_id_attr.attr.name = "manufacturer_id"; | ||
2047 | bmc->manufacturer_id_attr.attr.owner = THIS_MODULE; | ||
2048 | bmc->manufacturer_id_attr.attr.mode = S_IRUGO; | ||
2049 | bmc->manufacturer_id_attr.show = manufacturer_id_show; | ||
2050 | |||
2051 | bmc->product_id_attr.attr.name = "product_id"; | ||
2052 | bmc->product_id_attr.attr.owner = THIS_MODULE; | ||
2053 | bmc->product_id_attr.attr.mode = S_IRUGO; | ||
2054 | bmc->product_id_attr.show = product_id_show; | ||
2055 | |||
2056 | bmc->guid_attr.attr.name = "guid"; | ||
2057 | bmc->guid_attr.attr.owner = THIS_MODULE; | ||
2058 | bmc->guid_attr.attr.mode = S_IRUGO; | ||
2059 | bmc->guid_attr.show = guid_show; | ||
2060 | |||
2061 | bmc->aux_firmware_rev_attr.attr.name = "aux_firmware_revision"; | ||
2062 | bmc->aux_firmware_rev_attr.attr.owner = THIS_MODULE; | ||
2063 | bmc->aux_firmware_rev_attr.attr.mode = S_IRUGO; | ||
2064 | bmc->aux_firmware_rev_attr.show = aux_firmware_rev_show; | ||
2065 | |||
2066 | device_create_file(&bmc->dev->dev, | ||
2067 | &bmc->device_id_attr); | ||
2068 | device_create_file(&bmc->dev->dev, | ||
2069 | &bmc->provides_dev_sdrs_attr); | ||
2070 | device_create_file(&bmc->dev->dev, | ||
2071 | &bmc->revision_attr); | ||
2072 | device_create_file(&bmc->dev->dev, | ||
2073 | &bmc->firmware_rev_attr); | ||
2074 | device_create_file(&bmc->dev->dev, | ||
2075 | &bmc->version_attr); | ||
2076 | device_create_file(&bmc->dev->dev, | ||
2077 | &bmc->add_dev_support_attr); | ||
2078 | device_create_file(&bmc->dev->dev, | ||
2079 | &bmc->manufacturer_id_attr); | ||
2080 | device_create_file(&bmc->dev->dev, | ||
2081 | &bmc->product_id_attr); | ||
2082 | if (bmc->id.aux_firmware_revision_set) | ||
2083 | device_create_file(&bmc->dev->dev, | ||
2084 | &bmc->aux_firmware_rev_attr); | ||
2085 | if (bmc->guid_set) | ||
2086 | device_create_file(&bmc->dev->dev, | ||
2087 | &bmc->guid_attr); | ||
2088 | |||
2089 | printk(KERN_INFO | ||
2090 | "ipmi: Found new BMC (man_id: 0x%6.6x, " | ||
2091 | " prod_id: 0x%4.4x, dev_id: 0x%2.2x)\n", | ||
2092 | bmc->id.manufacturer_id, | ||
2093 | bmc->id.product_id, | ||
2094 | bmc->id.device_id); | ||
2095 | } | ||
2096 | |||
2097 | /* | ||
2098 | * create symlink from system interface device to bmc device | ||
2099 | * and back. | ||
2100 | */ | ||
2101 | rv = sysfs_create_link(&intf->si_dev->kobj, | ||
2102 | &bmc->dev->dev.kobj, "bmc"); | ||
2103 | if (rv) { | ||
2104 | printk(KERN_ERR | ||
2105 | "ipmi_msghandler: Unable to create bmc symlink: %d\n", | ||
2106 | rv); | ||
2107 | goto out_err; | ||
2108 | } | ||
2109 | |||
2110 | size = snprintf(dummy, 0, "ipmi%d", intf->intf_num); | ||
2111 | intf->my_dev_name = kmalloc(size+1, GFP_KERNEL); | ||
2112 | if (!intf->my_dev_name) { | ||
2113 | rv = -ENOMEM; | ||
2114 | printk(KERN_ERR | ||
2115 | "ipmi_msghandler: allocate link from BMC: %d\n", | ||
2116 | rv); | ||
2117 | goto out_err; | ||
2118 | } | ||
2119 | snprintf(intf->my_dev_name, size+1, "ipmi%d", intf->intf_num); | ||
2120 | |||
2121 | rv = sysfs_create_link(&bmc->dev->dev.kobj, &intf->si_dev->kobj, | ||
2122 | intf->my_dev_name); | ||
2123 | if (rv) { | ||
2124 | kfree(intf->my_dev_name); | ||
2125 | intf->my_dev_name = NULL; | ||
2126 | printk(KERN_ERR | ||
2127 | "ipmi_msghandler:" | ||
2128 | " Unable to create symlink to bmc: %d\n", | ||
2129 | rv); | ||
2130 | goto out_err; | ||
2131 | } | ||
2132 | |||
2133 | return 0; | ||
2134 | |||
2135 | out_err: | ||
2136 | ipmi_bmc_unregister(intf); | ||
2137 | return rv; | ||
2138 | } | ||
2139 | |||
2140 | static int | ||
2141 | send_guid_cmd(ipmi_smi_t intf, int chan) | ||
2142 | { | ||
2143 | struct kernel_ipmi_msg msg; | ||
2144 | struct ipmi_system_interface_addr si; | ||
2145 | |||
2146 | si.addr_type = IPMI_SYSTEM_INTERFACE_ADDR_TYPE; | ||
2147 | si.channel = IPMI_BMC_CHANNEL; | ||
2148 | si.lun = 0; | ||
2149 | |||
2150 | msg.netfn = IPMI_NETFN_APP_REQUEST; | ||
2151 | msg.cmd = IPMI_GET_DEVICE_GUID_CMD; | ||
2152 | msg.data = NULL; | ||
2153 | msg.data_len = 0; | ||
2154 | return i_ipmi_request(NULL, | ||
2155 | intf, | ||
2156 | (struct ipmi_addr *) &si, | ||
2157 | 0, | ||
2158 | &msg, | ||
2159 | intf, | ||
2160 | NULL, | ||
2161 | NULL, | ||
2162 | 0, | ||
2163 | intf->channels[0].address, | ||
2164 | intf->channels[0].lun, | ||
2165 | -1, 0); | ||
2166 | } | ||
2167 | |||
2168 | static void | ||
2169 | guid_handler(ipmi_smi_t intf, struct ipmi_recv_msg *msg) | ||
2170 | { | ||
2171 | if ((msg->addr.addr_type != IPMI_SYSTEM_INTERFACE_ADDR_TYPE) | ||
2172 | || (msg->msg.netfn != IPMI_NETFN_APP_RESPONSE) | ||
2173 | || (msg->msg.cmd != IPMI_GET_DEVICE_GUID_CMD)) | ||
2174 | /* Not for me */ | ||
2175 | return; | ||
2176 | |||
2177 | if (msg->msg.data[0] != 0) { | ||
2178 | /* Error from getting the GUID, the BMC doesn't have one. */ | ||
2179 | intf->bmc->guid_set = 0; | ||
2180 | goto out; | ||
2181 | } | ||
2182 | |||
2183 | if (msg->msg.data_len < 17) { | ||
2184 | intf->bmc->guid_set = 0; | ||
2185 | printk(KERN_WARNING PFX | ||
2186 | "guid_handler: The GUID response from the BMC was too" | ||
2187 | " short, it was %d but should have been 17. Assuming" | ||
2188 | " GUID is not available.\n", | ||
2189 | msg->msg.data_len); | ||
2190 | goto out; | ||
2191 | } | ||
2192 | |||
2193 | memcpy(intf->bmc->guid, msg->msg.data, 16); | ||
2194 | intf->bmc->guid_set = 1; | ||
2195 | out: | ||
2196 | wake_up(&intf->waitq); | ||
2197 | } | ||
2198 | |||
2199 | static void | ||
2200 | get_guid(ipmi_smi_t intf) | ||
2201 | { | ||
2202 | int rv; | ||
2203 | |||
2204 | intf->bmc->guid_set = 0x2; | ||
2205 | intf->null_user_handler = guid_handler; | ||
2206 | rv = send_guid_cmd(intf, 0); | ||
2207 | if (rv) | ||
2208 | /* Send failed, no GUID available. */ | ||
2209 | intf->bmc->guid_set = 0; | ||
2210 | wait_event(intf->waitq, intf->bmc->guid_set != 2); | ||
2211 | intf->null_user_handler = NULL; | ||
2212 | } | ||
2213 | |||
1715 | static int | 2214 | static int |
1716 | send_channel_info_cmd(ipmi_smi_t intf, int chan) | 2215 | send_channel_info_cmd(ipmi_smi_t intf, int chan) |
1717 | { | 2216 | { |
@@ -1804,8 +2303,8 @@ channel_handler(ipmi_smi_t intf, struct ipmi_recv_msg *msg) | |||
1804 | 2303 | ||
1805 | int ipmi_register_smi(struct ipmi_smi_handlers *handlers, | 2304 | int ipmi_register_smi(struct ipmi_smi_handlers *handlers, |
1806 | void *send_info, | 2305 | void *send_info, |
1807 | unsigned char version_major, | 2306 | struct ipmi_device_id *device_id, |
1808 | unsigned char version_minor, | 2307 | struct device *si_dev, |
1809 | unsigned char slave_addr, | 2308 | unsigned char slave_addr, |
1810 | ipmi_smi_t *new_intf) | 2309 | ipmi_smi_t *new_intf) |
1811 | { | 2310 | { |
@@ -1813,7 +2312,11 @@ int ipmi_register_smi(struct ipmi_smi_handlers *handlers, | |||
1813 | int rv; | 2312 | int rv; |
1814 | ipmi_smi_t intf; | 2313 | ipmi_smi_t intf; |
1815 | unsigned long flags; | 2314 | unsigned long flags; |
2315 | int version_major; | ||
2316 | int version_minor; | ||
1816 | 2317 | ||
2318 | version_major = ipmi_version_major(device_id); | ||
2319 | version_minor = ipmi_version_minor(device_id); | ||
1817 | 2320 | ||
1818 | /* Make sure the driver is actually initialized, this handles | 2321 | /* Make sure the driver is actually initialized, this handles |
1819 | problems with initialization order. */ | 2322 | problems with initialization order. */ |
@@ -1831,10 +2334,15 @@ int ipmi_register_smi(struct ipmi_smi_handlers *handlers, | |||
1831 | if (!intf) | 2334 | if (!intf) |
1832 | return -ENOMEM; | 2335 | return -ENOMEM; |
1833 | memset(intf, 0, sizeof(*intf)); | 2336 | memset(intf, 0, sizeof(*intf)); |
2337 | intf->bmc = kzalloc(sizeof(*intf->bmc), GFP_KERNEL); | ||
2338 | if (!intf->bmc) { | ||
2339 | kfree(intf); | ||
2340 | return -ENOMEM; | ||
2341 | } | ||
1834 | intf->intf_num = -1; | 2342 | intf->intf_num = -1; |
1835 | kref_init(&intf->refcount); | 2343 | kref_init(&intf->refcount); |
1836 | intf->version_major = version_major; | 2344 | intf->bmc->id = *device_id; |
1837 | intf->version_minor = version_minor; | 2345 | intf->si_dev = si_dev; |
1838 | for (j = 0; j < IPMI_MAX_CHANNELS; j++) { | 2346 | for (j = 0; j < IPMI_MAX_CHANNELS; j++) { |
1839 | intf->channels[j].address = IPMI_BMC_SLAVE_ADDR; | 2347 | intf->channels[j].address = IPMI_BMC_SLAVE_ADDR; |
1840 | intf->channels[j].lun = 2; | 2348 | intf->channels[j].lun = 2; |
@@ -1884,6 +2392,8 @@ int ipmi_register_smi(struct ipmi_smi_handlers *handlers, | |||
1884 | caller before sending any messages with it. */ | 2392 | caller before sending any messages with it. */ |
1885 | *new_intf = intf; | 2393 | *new_intf = intf; |
1886 | 2394 | ||
2395 | get_guid(intf); | ||
2396 | |||
1887 | if ((version_major > 1) | 2397 | if ((version_major > 1) |
1888 | || ((version_major == 1) && (version_minor >= 5))) | 2398 | || ((version_major == 1) && (version_minor >= 5))) |
1889 | { | 2399 | { |
@@ -1898,6 +2408,7 @@ int ipmi_register_smi(struct ipmi_smi_handlers *handlers, | |||
1898 | /* Wait for the channel info to be read. */ | 2408 | /* Wait for the channel info to be read. */ |
1899 | wait_event(intf->waitq, | 2409 | wait_event(intf->waitq, |
1900 | intf->curr_channel >= IPMI_MAX_CHANNELS); | 2410 | intf->curr_channel >= IPMI_MAX_CHANNELS); |
2411 | intf->null_user_handler = NULL; | ||
1901 | } else { | 2412 | } else { |
1902 | /* Assume a single IPMB channel at zero. */ | 2413 | /* Assume a single IPMB channel at zero. */ |
1903 | intf->channels[0].medium = IPMI_CHANNEL_MEDIUM_IPMB; | 2414 | intf->channels[0].medium = IPMI_CHANNEL_MEDIUM_IPMB; |
@@ -1907,6 +2418,8 @@ int ipmi_register_smi(struct ipmi_smi_handlers *handlers, | |||
1907 | if (rv == 0) | 2418 | if (rv == 0) |
1908 | rv = add_proc_entries(intf, i); | 2419 | rv = add_proc_entries(intf, i); |
1909 | 2420 | ||
2421 | rv = ipmi_bmc_register(intf); | ||
2422 | |||
1910 | out: | 2423 | out: |
1911 | if (rv) { | 2424 | if (rv) { |
1912 | if (intf->proc_dir) | 2425 | if (intf->proc_dir) |
@@ -1921,7 +2434,7 @@ int ipmi_register_smi(struct ipmi_smi_handlers *handlers, | |||
1921 | spin_lock_irqsave(&interfaces_lock, flags); | 2434 | spin_lock_irqsave(&interfaces_lock, flags); |
1922 | ipmi_interfaces[i] = intf; | 2435 | ipmi_interfaces[i] = intf; |
1923 | spin_unlock_irqrestore(&interfaces_lock, flags); | 2436 | spin_unlock_irqrestore(&interfaces_lock, flags); |
1924 | call_smi_watchers(i); | 2437 | call_smi_watchers(i, intf->si_dev); |
1925 | } | 2438 | } |
1926 | 2439 | ||
1927 | return rv; | 2440 | return rv; |
@@ -1933,6 +2446,8 @@ int ipmi_unregister_smi(ipmi_smi_t intf) | |||
1933 | struct ipmi_smi_watcher *w; | 2446 | struct ipmi_smi_watcher *w; |
1934 | unsigned long flags; | 2447 | unsigned long flags; |
1935 | 2448 | ||
2449 | ipmi_bmc_unregister(intf); | ||
2450 | |||
1936 | spin_lock_irqsave(&interfaces_lock, flags); | 2451 | spin_lock_irqsave(&interfaces_lock, flags); |
1937 | for (i = 0; i < MAX_IPMI_INTERFACES; i++) { | 2452 | for (i = 0; i < MAX_IPMI_INTERFACES; i++) { |
1938 | if (ipmi_interfaces[i] == intf) { | 2453 | if (ipmi_interfaces[i] == intf) { |
@@ -3196,10 +3711,17 @@ static struct notifier_block panic_block = { | |||
3196 | static int ipmi_init_msghandler(void) | 3711 | static int ipmi_init_msghandler(void) |
3197 | { | 3712 | { |
3198 | int i; | 3713 | int i; |
3714 | int rv; | ||
3199 | 3715 | ||
3200 | if (initialized) | 3716 | if (initialized) |
3201 | return 0; | 3717 | return 0; |
3202 | 3718 | ||
3719 | rv = driver_register(&ipmidriver); | ||
3720 | if (rv) { | ||
3721 | printk(KERN_ERR PFX "Could not register IPMI driver\n"); | ||
3722 | return rv; | ||
3723 | } | ||
3724 | |||
3203 | printk(KERN_INFO "ipmi message handler version " | 3725 | printk(KERN_INFO "ipmi message handler version " |
3204 | IPMI_DRIVER_VERSION "\n"); | 3726 | IPMI_DRIVER_VERSION "\n"); |
3205 | 3727 | ||
@@ -3256,6 +3778,8 @@ static __exit void cleanup_ipmi(void) | |||
3256 | remove_proc_entry(proc_ipmi_root->name, &proc_root); | 3778 | remove_proc_entry(proc_ipmi_root->name, &proc_root); |
3257 | #endif /* CONFIG_PROC_FS */ | 3779 | #endif /* CONFIG_PROC_FS */ |
3258 | 3780 | ||
3781 | driver_unregister(&ipmidriver); | ||
3782 | |||
3259 | initialized = 0; | 3783 | initialized = 0; |
3260 | 3784 | ||
3261 | /* Check for buffer leaks. */ | 3785 | /* Check for buffer leaks. */ |
diff --git a/drivers/char/ipmi/ipmi_poweroff.c b/drivers/char/ipmi/ipmi_poweroff.c index e8ed26b77d4c..786a2802ca34 100644 --- a/drivers/char/ipmi/ipmi_poweroff.c +++ b/drivers/char/ipmi/ipmi_poweroff.c | |||
@@ -464,7 +464,7 @@ static void ipmi_poweroff_function (void) | |||
464 | 464 | ||
465 | /* Wait for an IPMI interface to be installed, the first one installed | 465 | /* Wait for an IPMI interface to be installed, the first one installed |
466 | will be grabbed by this code and used to perform the powerdown. */ | 466 | will be grabbed by this code and used to perform the powerdown. */ |
467 | static void ipmi_po_new_smi(int if_num) | 467 | static void ipmi_po_new_smi(int if_num, struct device *device) |
468 | { | 468 | { |
469 | struct ipmi_system_interface_addr smi_addr; | 469 | struct ipmi_system_interface_addr smi_addr; |
470 | struct kernel_ipmi_msg send_msg; | 470 | struct kernel_ipmi_msg send_msg; |
diff --git a/drivers/char/ipmi/ipmi_si_intf.c b/drivers/char/ipmi/ipmi_si_intf.c index e59b638766ef..12f858dc9994 100644 --- a/drivers/char/ipmi/ipmi_si_intf.c +++ b/drivers/char/ipmi/ipmi_si_intf.c | |||
@@ -52,6 +52,7 @@ | |||
52 | #include <linux/pci.h> | 52 | #include <linux/pci.h> |
53 | #include <linux/ioport.h> | 53 | #include <linux/ioport.h> |
54 | #include <linux/notifier.h> | 54 | #include <linux/notifier.h> |
55 | #include <linux/mutex.h> | ||
55 | #include <linux/kthread.h> | 56 | #include <linux/kthread.h> |
56 | #include <asm/irq.h> | 57 | #include <asm/irq.h> |
57 | #ifdef CONFIG_HIGH_RES_TIMERS | 58 | #ifdef CONFIG_HIGH_RES_TIMERS |
@@ -109,21 +110,15 @@ enum si_intf_state { | |||
109 | enum si_type { | 110 | enum si_type { |
110 | SI_KCS, SI_SMIC, SI_BT | 111 | SI_KCS, SI_SMIC, SI_BT |
111 | }; | 112 | }; |
113 | static char *si_to_str[] = { "KCS", "SMIC", "BT" }; | ||
112 | 114 | ||
113 | struct ipmi_device_id { | 115 | #define DEVICE_NAME "ipmi_si" |
114 | unsigned char device_id; | 116 | |
115 | unsigned char device_revision; | 117 | static struct device_driver ipmi_driver = |
116 | unsigned char firmware_revision_1; | 118 | { |
117 | unsigned char firmware_revision_2; | 119 | .name = DEVICE_NAME, |
118 | unsigned char ipmi_version; | 120 | .bus = &platform_bus_type |
119 | unsigned char additional_device_support; | 121 | }; |
120 | unsigned char manufacturer_id[3]; | ||
121 | unsigned char product_id[2]; | ||
122 | unsigned char aux_firmware_revision[4]; | ||
123 | } __attribute__((packed)); | ||
124 | |||
125 | #define ipmi_version_major(v) ((v)->ipmi_version & 0xf) | ||
126 | #define ipmi_version_minor(v) ((v)->ipmi_version >> 4) | ||
127 | 122 | ||
128 | struct smi_info | 123 | struct smi_info |
129 | { | 124 | { |
@@ -147,6 +142,9 @@ struct smi_info | |||
147 | int (*irq_setup)(struct smi_info *info); | 142 | int (*irq_setup)(struct smi_info *info); |
148 | void (*irq_cleanup)(struct smi_info *info); | 143 | void (*irq_cleanup)(struct smi_info *info); |
149 | unsigned int io_size; | 144 | unsigned int io_size; |
145 | char *addr_source; /* ACPI, PCI, SMBIOS, hardcode, default. */ | ||
146 | void (*addr_source_cleanup)(struct smi_info *info); | ||
147 | void *addr_source_data; | ||
150 | 148 | ||
151 | /* Per-OEM handler, called from handle_flags(). | 149 | /* Per-OEM handler, called from handle_flags(). |
152 | Returns 1 when handle_flags() needs to be re-run | 150 | Returns 1 when handle_flags() needs to be re-run |
@@ -203,8 +201,17 @@ struct smi_info | |||
203 | interrupts. */ | 201 | interrupts. */ |
204 | int interrupt_disabled; | 202 | int interrupt_disabled; |
205 | 203 | ||
204 | /* From the get device id response... */ | ||
206 | struct ipmi_device_id device_id; | 205 | struct ipmi_device_id device_id; |
207 | 206 | ||
207 | /* Driver model stuff. */ | ||
208 | struct device *dev; | ||
209 | struct platform_device *pdev; | ||
210 | |||
211 | /* True if we allocated the device, false if it came from | ||
212 | * someplace else (like PCI). */ | ||
213 | int dev_registered; | ||
214 | |||
208 | /* Slave address, could be reported from DMI. */ | 215 | /* Slave address, could be reported from DMI. */ |
209 | unsigned char slave_addr; | 216 | unsigned char slave_addr; |
210 | 217 | ||
@@ -224,8 +231,12 @@ struct smi_info | |||
224 | unsigned long incoming_messages; | 231 | unsigned long incoming_messages; |
225 | 232 | ||
226 | struct task_struct *thread; | 233 | struct task_struct *thread; |
234 | |||
235 | struct list_head link; | ||
227 | }; | 236 | }; |
228 | 237 | ||
238 | static int try_smi_init(struct smi_info *smi); | ||
239 | |||
229 | static struct notifier_block *xaction_notifier_list; | 240 | static struct notifier_block *xaction_notifier_list; |
230 | static int register_xaction_notifier(struct notifier_block * nb) | 241 | static int register_xaction_notifier(struct notifier_block * nb) |
231 | { | 242 | { |
@@ -271,13 +282,13 @@ static enum si_sm_result start_next_msg(struct smi_info *smi_info) | |||
271 | spin_lock(&(smi_info->msg_lock)); | 282 | spin_lock(&(smi_info->msg_lock)); |
272 | 283 | ||
273 | /* Pick the high priority queue first. */ | 284 | /* Pick the high priority queue first. */ |
274 | if (! list_empty(&(smi_info->hp_xmit_msgs))) { | 285 | if (!list_empty(&(smi_info->hp_xmit_msgs))) { |
275 | entry = smi_info->hp_xmit_msgs.next; | 286 | entry = smi_info->hp_xmit_msgs.next; |
276 | } else if (! list_empty(&(smi_info->xmit_msgs))) { | 287 | } else if (!list_empty(&(smi_info->xmit_msgs))) { |
277 | entry = smi_info->xmit_msgs.next; | 288 | entry = smi_info->xmit_msgs.next; |
278 | } | 289 | } |
279 | 290 | ||
280 | if (! entry) { | 291 | if (!entry) { |
281 | smi_info->curr_msg = NULL; | 292 | smi_info->curr_msg = NULL; |
282 | rv = SI_SM_IDLE; | 293 | rv = SI_SM_IDLE; |
283 | } else { | 294 | } else { |
@@ -344,7 +355,7 @@ static void start_clear_flags(struct smi_info *smi_info) | |||
344 | memory, we will re-enable the interrupt. */ | 355 | memory, we will re-enable the interrupt. */ |
345 | static inline void disable_si_irq(struct smi_info *smi_info) | 356 | static inline void disable_si_irq(struct smi_info *smi_info) |
346 | { | 357 | { |
347 | if ((smi_info->irq) && (! smi_info->interrupt_disabled)) { | 358 | if ((smi_info->irq) && (!smi_info->interrupt_disabled)) { |
348 | disable_irq_nosync(smi_info->irq); | 359 | disable_irq_nosync(smi_info->irq); |
349 | smi_info->interrupt_disabled = 1; | 360 | smi_info->interrupt_disabled = 1; |
350 | } | 361 | } |
@@ -375,7 +386,7 @@ static void handle_flags(struct smi_info *smi_info) | |||
375 | } else if (smi_info->msg_flags & RECEIVE_MSG_AVAIL) { | 386 | } else if (smi_info->msg_flags & RECEIVE_MSG_AVAIL) { |
376 | /* Messages available. */ | 387 | /* Messages available. */ |
377 | smi_info->curr_msg = ipmi_alloc_smi_msg(); | 388 | smi_info->curr_msg = ipmi_alloc_smi_msg(); |
378 | if (! smi_info->curr_msg) { | 389 | if (!smi_info->curr_msg) { |
379 | disable_si_irq(smi_info); | 390 | disable_si_irq(smi_info); |
380 | smi_info->si_state = SI_NORMAL; | 391 | smi_info->si_state = SI_NORMAL; |
381 | return; | 392 | return; |
@@ -394,7 +405,7 @@ static void handle_flags(struct smi_info *smi_info) | |||
394 | } else if (smi_info->msg_flags & EVENT_MSG_BUFFER_FULL) { | 405 | } else if (smi_info->msg_flags & EVENT_MSG_BUFFER_FULL) { |
395 | /* Events available. */ | 406 | /* Events available. */ |
396 | smi_info->curr_msg = ipmi_alloc_smi_msg(); | 407 | smi_info->curr_msg = ipmi_alloc_smi_msg(); |
397 | if (! smi_info->curr_msg) { | 408 | if (!smi_info->curr_msg) { |
398 | disable_si_irq(smi_info); | 409 | disable_si_irq(smi_info); |
399 | smi_info->si_state = SI_NORMAL; | 410 | smi_info->si_state = SI_NORMAL; |
400 | return; | 411 | return; |
@@ -430,7 +441,7 @@ static void handle_transaction_done(struct smi_info *smi_info) | |||
430 | #endif | 441 | #endif |
431 | switch (smi_info->si_state) { | 442 | switch (smi_info->si_state) { |
432 | case SI_NORMAL: | 443 | case SI_NORMAL: |
433 | if (! smi_info->curr_msg) | 444 | if (!smi_info->curr_msg) |
434 | break; | 445 | break; |
435 | 446 | ||
436 | smi_info->curr_msg->rsp_size | 447 | smi_info->curr_msg->rsp_size |
@@ -880,7 +891,7 @@ static void smi_timeout(unsigned long data) | |||
880 | 891 | ||
881 | smi_info->last_timeout_jiffies = jiffies_now; | 892 | smi_info->last_timeout_jiffies = jiffies_now; |
882 | 893 | ||
883 | if ((smi_info->irq) && (! smi_info->interrupt_disabled)) { | 894 | if ((smi_info->irq) && (!smi_info->interrupt_disabled)) { |
884 | /* Running with interrupts, only do long timeouts. */ | 895 | /* Running with interrupts, only do long timeouts. */ |
885 | smi_info->si_timer.expires = jiffies + SI_TIMEOUT_JIFFIES; | 896 | smi_info->si_timer.expires = jiffies + SI_TIMEOUT_JIFFIES; |
886 | spin_lock_irqsave(&smi_info->count_lock, flags); | 897 | spin_lock_irqsave(&smi_info->count_lock, flags); |
@@ -974,15 +985,10 @@ static struct ipmi_smi_handlers handlers = | |||
974 | a default IO port, and 1 ACPI/SPMI address. That sets SI_MAX_DRIVERS */ | 985 | a default IO port, and 1 ACPI/SPMI address. That sets SI_MAX_DRIVERS */ |
975 | 986 | ||
976 | #define SI_MAX_PARMS 4 | 987 | #define SI_MAX_PARMS 4 |
977 | #define SI_MAX_DRIVERS ((SI_MAX_PARMS * 2) + 2) | 988 | static LIST_HEAD(smi_infos); |
978 | static struct smi_info *smi_infos[SI_MAX_DRIVERS] = | 989 | static DECLARE_MUTEX(smi_infos_lock); |
979 | { NULL, NULL, NULL, NULL }; | 990 | static int smi_num; /* Used to sequence the SMIs */ |
980 | 991 | ||
981 | #define DEVICE_NAME "ipmi_si" | ||
982 | |||
983 | #define DEFAULT_KCS_IO_PORT 0xca2 | ||
984 | #define DEFAULT_SMIC_IO_PORT 0xca9 | ||
985 | #define DEFAULT_BT_IO_PORT 0xe4 | ||
986 | #define DEFAULT_REGSPACING 1 | 992 | #define DEFAULT_REGSPACING 1 |
987 | 993 | ||
988 | static int si_trydefaults = 1; | 994 | static int si_trydefaults = 1; |
@@ -1053,38 +1059,23 @@ MODULE_PARM_DESC(slave_addrs, "Set the default IPMB slave address for" | |||
1053 | " by interface number."); | 1059 | " by interface number."); |
1054 | 1060 | ||
1055 | 1061 | ||
1062 | #define IPMI_IO_ADDR_SPACE 0 | ||
1056 | #define IPMI_MEM_ADDR_SPACE 1 | 1063 | #define IPMI_MEM_ADDR_SPACE 1 |
1057 | #define IPMI_IO_ADDR_SPACE 2 | 1064 | static char *addr_space_to_str[] = { "I/O", "memory" }; |
1058 | 1065 | ||
1059 | #if defined(CONFIG_ACPI) || defined(CONFIG_DMI) || defined(CONFIG_PCI) | 1066 | static void std_irq_cleanup(struct smi_info *info) |
1060 | static int is_new_interface(int intf, u8 addr_space, unsigned long base_addr) | ||
1061 | { | 1067 | { |
1062 | int i; | 1068 | if (info->si_type == SI_BT) |
1063 | 1069 | /* Disable the interrupt in the BT interface. */ | |
1064 | for (i = 0; i < SI_MAX_PARMS; ++i) { | 1070 | info->io.outputb(&info->io, IPMI_BT_INTMASK_REG, 0); |
1065 | /* Don't check our address. */ | 1071 | free_irq(info->irq, info); |
1066 | if (i == intf) | ||
1067 | continue; | ||
1068 | if (si_type[i] != NULL) { | ||
1069 | if ((addr_space == IPMI_MEM_ADDR_SPACE && | ||
1070 | base_addr == addrs[i]) || | ||
1071 | (addr_space == IPMI_IO_ADDR_SPACE && | ||
1072 | base_addr == ports[i])) | ||
1073 | return 0; | ||
1074 | } | ||
1075 | else | ||
1076 | break; | ||
1077 | } | ||
1078 | |||
1079 | return 1; | ||
1080 | } | 1072 | } |
1081 | #endif | ||
1082 | 1073 | ||
1083 | static int std_irq_setup(struct smi_info *info) | 1074 | static int std_irq_setup(struct smi_info *info) |
1084 | { | 1075 | { |
1085 | int rv; | 1076 | int rv; |
1086 | 1077 | ||
1087 | if (! info->irq) | 1078 | if (!info->irq) |
1088 | return 0; | 1079 | return 0; |
1089 | 1080 | ||
1090 | if (info->si_type == SI_BT) { | 1081 | if (info->si_type == SI_BT) { |
@@ -1093,7 +1084,7 @@ static int std_irq_setup(struct smi_info *info) | |||
1093 | SA_INTERRUPT, | 1084 | SA_INTERRUPT, |
1094 | DEVICE_NAME, | 1085 | DEVICE_NAME, |
1095 | info); | 1086 | info); |
1096 | if (! rv) | 1087 | if (!rv) |
1097 | /* Enable the interrupt in the BT interface. */ | 1088 | /* Enable the interrupt in the BT interface. */ |
1098 | info->io.outputb(&info->io, IPMI_BT_INTMASK_REG, | 1089 | info->io.outputb(&info->io, IPMI_BT_INTMASK_REG, |
1099 | IPMI_BT_INTMASK_ENABLE_IRQ_BIT); | 1090 | IPMI_BT_INTMASK_ENABLE_IRQ_BIT); |
@@ -1110,88 +1101,77 @@ static int std_irq_setup(struct smi_info *info) | |||
1110 | DEVICE_NAME, info->irq); | 1101 | DEVICE_NAME, info->irq); |
1111 | info->irq = 0; | 1102 | info->irq = 0; |
1112 | } else { | 1103 | } else { |
1104 | info->irq_cleanup = std_irq_cleanup; | ||
1113 | printk(" Using irq %d\n", info->irq); | 1105 | printk(" Using irq %d\n", info->irq); |
1114 | } | 1106 | } |
1115 | 1107 | ||
1116 | return rv; | 1108 | return rv; |
1117 | } | 1109 | } |
1118 | 1110 | ||
1119 | static void std_irq_cleanup(struct smi_info *info) | ||
1120 | { | ||
1121 | if (! info->irq) | ||
1122 | return; | ||
1123 | |||
1124 | if (info->si_type == SI_BT) | ||
1125 | /* Disable the interrupt in the BT interface. */ | ||
1126 | info->io.outputb(&info->io, IPMI_BT_INTMASK_REG, 0); | ||
1127 | free_irq(info->irq, info); | ||
1128 | } | ||
1129 | |||
1130 | static unsigned char port_inb(struct si_sm_io *io, unsigned int offset) | 1111 | static unsigned char port_inb(struct si_sm_io *io, unsigned int offset) |
1131 | { | 1112 | { |
1132 | unsigned int *addr = io->info; | 1113 | unsigned int addr = io->addr_data; |
1133 | 1114 | ||
1134 | return inb((*addr)+(offset*io->regspacing)); | 1115 | return inb(addr + (offset * io->regspacing)); |
1135 | } | 1116 | } |
1136 | 1117 | ||
1137 | static void port_outb(struct si_sm_io *io, unsigned int offset, | 1118 | static void port_outb(struct si_sm_io *io, unsigned int offset, |
1138 | unsigned char b) | 1119 | unsigned char b) |
1139 | { | 1120 | { |
1140 | unsigned int *addr = io->info; | 1121 | unsigned int addr = io->addr_data; |
1141 | 1122 | ||
1142 | outb(b, (*addr)+(offset * io->regspacing)); | 1123 | outb(b, addr + (offset * io->regspacing)); |
1143 | } | 1124 | } |
1144 | 1125 | ||
1145 | static unsigned char port_inw(struct si_sm_io *io, unsigned int offset) | 1126 | static unsigned char port_inw(struct si_sm_io *io, unsigned int offset) |
1146 | { | 1127 | { |
1147 | unsigned int *addr = io->info; | 1128 | unsigned int addr = io->addr_data; |
1148 | 1129 | ||
1149 | return (inw((*addr)+(offset * io->regspacing)) >> io->regshift) & 0xff; | 1130 | return (inw(addr + (offset * io->regspacing)) >> io->regshift) & 0xff; |
1150 | } | 1131 | } |
1151 | 1132 | ||
1152 | static void port_outw(struct si_sm_io *io, unsigned int offset, | 1133 | static void port_outw(struct si_sm_io *io, unsigned int offset, |
1153 | unsigned char b) | 1134 | unsigned char b) |
1154 | { | 1135 | { |
1155 | unsigned int *addr = io->info; | 1136 | unsigned int addr = io->addr_data; |
1156 | 1137 | ||
1157 | outw(b << io->regshift, (*addr)+(offset * io->regspacing)); | 1138 | outw(b << io->regshift, addr + (offset * io->regspacing)); |
1158 | } | 1139 | } |
1159 | 1140 | ||
1160 | static unsigned char port_inl(struct si_sm_io *io, unsigned int offset) | 1141 | static unsigned char port_inl(struct si_sm_io *io, unsigned int offset) |
1161 | { | 1142 | { |
1162 | unsigned int *addr = io->info; | 1143 | unsigned int addr = io->addr_data; |
1163 | 1144 | ||
1164 | return (inl((*addr)+(offset * io->regspacing)) >> io->regshift) & 0xff; | 1145 | return (inl(addr + (offset * io->regspacing)) >> io->regshift) & 0xff; |
1165 | } | 1146 | } |
1166 | 1147 | ||
1167 | static void port_outl(struct si_sm_io *io, unsigned int offset, | 1148 | static void port_outl(struct si_sm_io *io, unsigned int offset, |
1168 | unsigned char b) | 1149 | unsigned char b) |
1169 | { | 1150 | { |
1170 | unsigned int *addr = io->info; | 1151 | unsigned int addr = io->addr_data; |
1171 | 1152 | ||
1172 | outl(b << io->regshift, (*addr)+(offset * io->regspacing)); | 1153 | outl(b << io->regshift, addr+(offset * io->regspacing)); |
1173 | } | 1154 | } |
1174 | 1155 | ||
1175 | static void port_cleanup(struct smi_info *info) | 1156 | static void port_cleanup(struct smi_info *info) |
1176 | { | 1157 | { |
1177 | unsigned int *addr = info->io.info; | 1158 | unsigned int addr = info->io.addr_data; |
1178 | int mapsize; | 1159 | int mapsize; |
1179 | 1160 | ||
1180 | if (addr && (*addr)) { | 1161 | if (addr) { |
1181 | mapsize = ((info->io_size * info->io.regspacing) | 1162 | mapsize = ((info->io_size * info->io.regspacing) |
1182 | - (info->io.regspacing - info->io.regsize)); | 1163 | - (info->io.regspacing - info->io.regsize)); |
1183 | 1164 | ||
1184 | release_region (*addr, mapsize); | 1165 | release_region (addr, mapsize); |
1185 | } | 1166 | } |
1186 | kfree(info); | ||
1187 | } | 1167 | } |
1188 | 1168 | ||
1189 | static int port_setup(struct smi_info *info) | 1169 | static int port_setup(struct smi_info *info) |
1190 | { | 1170 | { |
1191 | unsigned int *addr = info->io.info; | 1171 | unsigned int addr = info->io.addr_data; |
1192 | int mapsize; | 1172 | int mapsize; |
1193 | 1173 | ||
1194 | if (! addr || (! *addr)) | 1174 | if (!addr) |
1195 | return -ENODEV; | 1175 | return -ENODEV; |
1196 | 1176 | ||
1197 | info->io_cleanup = port_cleanup; | 1177 | info->io_cleanup = port_cleanup; |
@@ -1225,51 +1205,11 @@ static int port_setup(struct smi_info *info) | |||
1225 | mapsize = ((info->io_size * info->io.regspacing) | 1205 | mapsize = ((info->io_size * info->io.regspacing) |
1226 | - (info->io.regspacing - info->io.regsize)); | 1206 | - (info->io.regspacing - info->io.regsize)); |
1227 | 1207 | ||
1228 | if (request_region(*addr, mapsize, DEVICE_NAME) == NULL) | 1208 | if (request_region(addr, mapsize, DEVICE_NAME) == NULL) |
1229 | return -EIO; | 1209 | return -EIO; |
1230 | return 0; | 1210 | return 0; |
1231 | } | 1211 | } |
1232 | 1212 | ||
1233 | static int try_init_port(int intf_num, struct smi_info **new_info) | ||
1234 | { | ||
1235 | struct smi_info *info; | ||
1236 | |||
1237 | if (! ports[intf_num]) | ||
1238 | return -ENODEV; | ||
1239 | |||
1240 | if (! is_new_interface(intf_num, IPMI_IO_ADDR_SPACE, | ||
1241 | ports[intf_num])) | ||
1242 | return -ENODEV; | ||
1243 | |||
1244 | info = kmalloc(sizeof(*info), GFP_KERNEL); | ||
1245 | if (! info) { | ||
1246 | printk(KERN_ERR "ipmi_si: Could not allocate SI data (1)\n"); | ||
1247 | return -ENOMEM; | ||
1248 | } | ||
1249 | memset(info, 0, sizeof(*info)); | ||
1250 | |||
1251 | info->io_setup = port_setup; | ||
1252 | info->io.info = &(ports[intf_num]); | ||
1253 | info->io.addr = NULL; | ||
1254 | info->io.regspacing = regspacings[intf_num]; | ||
1255 | if (! info->io.regspacing) | ||
1256 | info->io.regspacing = DEFAULT_REGSPACING; | ||
1257 | info->io.regsize = regsizes[intf_num]; | ||
1258 | if (! info->io.regsize) | ||
1259 | info->io.regsize = DEFAULT_REGSPACING; | ||
1260 | info->io.regshift = regshifts[intf_num]; | ||
1261 | info->irq = 0; | ||
1262 | info->irq_setup = NULL; | ||
1263 | *new_info = info; | ||
1264 | |||
1265 | if (si_type[intf_num] == NULL) | ||
1266 | si_type[intf_num] = "kcs"; | ||
1267 | |||
1268 | printk("ipmi_si: Trying \"%s\" at I/O port 0x%x\n", | ||
1269 | si_type[intf_num], ports[intf_num]); | ||
1270 | return 0; | ||
1271 | } | ||
1272 | |||
1273 | static unsigned char intf_mem_inb(struct si_sm_io *io, unsigned int offset) | 1213 | static unsigned char intf_mem_inb(struct si_sm_io *io, unsigned int offset) |
1274 | { | 1214 | { |
1275 | return readb((io->addr)+(offset * io->regspacing)); | 1215 | return readb((io->addr)+(offset * io->regspacing)); |
@@ -1321,7 +1261,7 @@ static void mem_outq(struct si_sm_io *io, unsigned int offset, | |||
1321 | 1261 | ||
1322 | static void mem_cleanup(struct smi_info *info) | 1262 | static void mem_cleanup(struct smi_info *info) |
1323 | { | 1263 | { |
1324 | unsigned long *addr = info->io.info; | 1264 | unsigned long addr = info->io.addr_data; |
1325 | int mapsize; | 1265 | int mapsize; |
1326 | 1266 | ||
1327 | if (info->io.addr) { | 1267 | if (info->io.addr) { |
@@ -1330,17 +1270,16 @@ static void mem_cleanup(struct smi_info *info) | |||
1330 | mapsize = ((info->io_size * info->io.regspacing) | 1270 | mapsize = ((info->io_size * info->io.regspacing) |
1331 | - (info->io.regspacing - info->io.regsize)); | 1271 | - (info->io.regspacing - info->io.regsize)); |
1332 | 1272 | ||
1333 | release_mem_region(*addr, mapsize); | 1273 | release_mem_region(addr, mapsize); |
1334 | } | 1274 | } |
1335 | kfree(info); | ||
1336 | } | 1275 | } |
1337 | 1276 | ||
1338 | static int mem_setup(struct smi_info *info) | 1277 | static int mem_setup(struct smi_info *info) |
1339 | { | 1278 | { |
1340 | unsigned long *addr = info->io.info; | 1279 | unsigned long addr = info->io.addr_data; |
1341 | int mapsize; | 1280 | int mapsize; |
1342 | 1281 | ||
1343 | if (! addr || (! *addr)) | 1282 | if (!addr) |
1344 | return -ENODEV; | 1283 | return -ENODEV; |
1345 | 1284 | ||
1346 | info->io_cleanup = mem_cleanup; | 1285 | info->io_cleanup = mem_cleanup; |
@@ -1380,57 +1319,83 @@ static int mem_setup(struct smi_info *info) | |||
1380 | mapsize = ((info->io_size * info->io.regspacing) | 1319 | mapsize = ((info->io_size * info->io.regspacing) |
1381 | - (info->io.regspacing - info->io.regsize)); | 1320 | - (info->io.regspacing - info->io.regsize)); |
1382 | 1321 | ||
1383 | if (request_mem_region(*addr, mapsize, DEVICE_NAME) == NULL) | 1322 | if (request_mem_region(addr, mapsize, DEVICE_NAME) == NULL) |
1384 | return -EIO; | 1323 | return -EIO; |
1385 | 1324 | ||
1386 | info->io.addr = ioremap(*addr, mapsize); | 1325 | info->io.addr = ioremap(addr, mapsize); |
1387 | if (info->io.addr == NULL) { | 1326 | if (info->io.addr == NULL) { |
1388 | release_mem_region(*addr, mapsize); | 1327 | release_mem_region(addr, mapsize); |
1389 | return -EIO; | 1328 | return -EIO; |
1390 | } | 1329 | } |
1391 | return 0; | 1330 | return 0; |
1392 | } | 1331 | } |
1393 | 1332 | ||
1394 | static int try_init_mem(int intf_num, struct smi_info **new_info) | 1333 | |
1334 | static __devinit void hardcode_find_bmc(void) | ||
1395 | { | 1335 | { |
1336 | int i; | ||
1396 | struct smi_info *info; | 1337 | struct smi_info *info; |
1397 | 1338 | ||
1398 | if (! addrs[intf_num]) | 1339 | for (i = 0; i < SI_MAX_PARMS; i++) { |
1399 | return -ENODEV; | 1340 | if (!ports[i] && !addrs[i]) |
1341 | continue; | ||
1400 | 1342 | ||
1401 | if (! is_new_interface(intf_num, IPMI_MEM_ADDR_SPACE, | 1343 | info = kzalloc(sizeof(*info), GFP_KERNEL); |
1402 | addrs[intf_num])) | 1344 | if (!info) |
1403 | return -ENODEV; | 1345 | return; |
1404 | 1346 | ||
1405 | info = kmalloc(sizeof(*info), GFP_KERNEL); | 1347 | info->addr_source = "hardcoded"; |
1406 | if (! info) { | ||
1407 | printk(KERN_ERR "ipmi_si: Could not allocate SI data (2)\n"); | ||
1408 | return -ENOMEM; | ||
1409 | } | ||
1410 | memset(info, 0, sizeof(*info)); | ||
1411 | 1348 | ||
1412 | info->io_setup = mem_setup; | 1349 | if (!si_type[i] || strcmp(si_type[i], "kcs") == 0) { |
1413 | info->io.info = &addrs[intf_num]; | 1350 | info->si_type = SI_KCS; |
1414 | info->io.addr = NULL; | 1351 | } else if (strcmp(si_type[i], "smic") == 0) { |
1415 | info->io.regspacing = regspacings[intf_num]; | 1352 | info->si_type = SI_SMIC; |
1416 | if (! info->io.regspacing) | 1353 | } else if (strcmp(si_type[i], "bt") == 0) { |
1417 | info->io.regspacing = DEFAULT_REGSPACING; | 1354 | info->si_type = SI_BT; |
1418 | info->io.regsize = regsizes[intf_num]; | 1355 | } else { |
1419 | if (! info->io.regsize) | 1356 | printk(KERN_WARNING |
1420 | info->io.regsize = DEFAULT_REGSPACING; | 1357 | "ipmi_si: Interface type specified " |
1421 | info->io.regshift = regshifts[intf_num]; | 1358 | "for interface %d, was invalid: %s\n", |
1422 | info->irq = 0; | 1359 | i, si_type[i]); |
1423 | info->irq_setup = NULL; | 1360 | kfree(info); |
1424 | *new_info = info; | 1361 | continue; |
1362 | } | ||
1425 | 1363 | ||
1426 | if (si_type[intf_num] == NULL) | 1364 | if (ports[i]) { |
1427 | si_type[intf_num] = "kcs"; | 1365 | /* An I/O port */ |
1366 | info->io_setup = port_setup; | ||
1367 | info->io.addr_data = ports[i]; | ||
1368 | info->io.addr_type = IPMI_IO_ADDR_SPACE; | ||
1369 | } else if (addrs[i]) { | ||
1370 | /* A memory port */ | ||
1371 | info->io_setup = mem_setup; | ||
1372 | info->io.addr_data = addrs[i]; | ||
1373 | info->io.addr_type = IPMI_MEM_ADDR_SPACE; | ||
1374 | } else { | ||
1375 | printk(KERN_WARNING | ||
1376 | "ipmi_si: Interface type specified " | ||
1377 | "for interface %d, " | ||
1378 | "but port and address were not set or " | ||
1379 | "set to zero.\n", i); | ||
1380 | kfree(info); | ||
1381 | continue; | ||
1382 | } | ||
1428 | 1383 | ||
1429 | printk("ipmi_si: Trying \"%s\" at memory address 0x%lx\n", | 1384 | info->io.addr = NULL; |
1430 | si_type[intf_num], addrs[intf_num]); | 1385 | info->io.regspacing = regspacings[i]; |
1431 | return 0; | 1386 | if (!info->io.regspacing) |
1432 | } | 1387 | info->io.regspacing = DEFAULT_REGSPACING; |
1388 | info->io.regsize = regsizes[i]; | ||
1389 | if (!info->io.regsize) | ||
1390 | info->io.regsize = DEFAULT_REGSPACING; | ||
1391 | info->io.regshift = regshifts[i]; | ||
1392 | info->irq = irqs[i]; | ||
1393 | if (info->irq) | ||
1394 | info->irq_setup = std_irq_setup; | ||
1433 | 1395 | ||
1396 | try_smi_init(info); | ||
1397 | } | ||
1398 | } | ||
1434 | 1399 | ||
1435 | #ifdef CONFIG_ACPI | 1400 | #ifdef CONFIG_ACPI |
1436 | 1401 | ||
@@ -1470,11 +1435,19 @@ static u32 ipmi_acpi_gpe(void *context) | |||
1470 | return ACPI_INTERRUPT_HANDLED; | 1435 | return ACPI_INTERRUPT_HANDLED; |
1471 | } | 1436 | } |
1472 | 1437 | ||
1438 | static void acpi_gpe_irq_cleanup(struct smi_info *info) | ||
1439 | { | ||
1440 | if (!info->irq) | ||
1441 | return; | ||
1442 | |||
1443 | acpi_remove_gpe_handler(NULL, info->irq, &ipmi_acpi_gpe); | ||
1444 | } | ||
1445 | |||
1473 | static int acpi_gpe_irq_setup(struct smi_info *info) | 1446 | static int acpi_gpe_irq_setup(struct smi_info *info) |
1474 | { | 1447 | { |
1475 | acpi_status status; | 1448 | acpi_status status; |
1476 | 1449 | ||
1477 | if (! info->irq) | 1450 | if (!info->irq) |
1478 | return 0; | 1451 | return 0; |
1479 | 1452 | ||
1480 | /* FIXME - is level triggered right? */ | 1453 | /* FIXME - is level triggered right? */ |
@@ -1491,19 +1464,12 @@ static int acpi_gpe_irq_setup(struct smi_info *info) | |||
1491 | info->irq = 0; | 1464 | info->irq = 0; |
1492 | return -EINVAL; | 1465 | return -EINVAL; |
1493 | } else { | 1466 | } else { |
1467 | info->irq_cleanup = acpi_gpe_irq_cleanup; | ||
1494 | printk(" Using ACPI GPE %d\n", info->irq); | 1468 | printk(" Using ACPI GPE %d\n", info->irq); |
1495 | return 0; | 1469 | return 0; |
1496 | } | 1470 | } |
1497 | } | 1471 | } |
1498 | 1472 | ||
1499 | static void acpi_gpe_irq_cleanup(struct smi_info *info) | ||
1500 | { | ||
1501 | if (! info->irq) | ||
1502 | return; | ||
1503 | |||
1504 | acpi_remove_gpe_handler(NULL, info->irq, &ipmi_acpi_gpe); | ||
1505 | } | ||
1506 | |||
1507 | /* | 1473 | /* |
1508 | * Defined at | 1474 | * Defined at |
1509 | * http://h21007.www2.hp.com/dspp/files/unprotected/devresource/Docs/TechPapers/IA64/hpspmi.pdf | 1475 | * http://h21007.www2.hp.com/dspp/files/unprotected/devresource/Docs/TechPapers/IA64/hpspmi.pdf |
@@ -1546,28 +1512,12 @@ struct SPMITable { | |||
1546 | s8 spmi_id[1]; /* A '\0' terminated array starts here. */ | 1512 | s8 spmi_id[1]; /* A '\0' terminated array starts here. */ |
1547 | }; | 1513 | }; |
1548 | 1514 | ||
1549 | static int try_init_acpi(int intf_num, struct smi_info **new_info) | 1515 | static __devinit int try_init_acpi(struct SPMITable *spmi) |
1550 | { | 1516 | { |
1551 | struct smi_info *info; | 1517 | struct smi_info *info; |
1552 | acpi_status status; | ||
1553 | struct SPMITable *spmi; | ||
1554 | char *io_type; | 1518 | char *io_type; |
1555 | u8 addr_space; | 1519 | u8 addr_space; |
1556 | 1520 | ||
1557 | if (acpi_disabled) | ||
1558 | return -ENODEV; | ||
1559 | |||
1560 | if (acpi_failure) | ||
1561 | return -ENODEV; | ||
1562 | |||
1563 | status = acpi_get_firmware_table("SPMI", intf_num+1, | ||
1564 | ACPI_LOGICAL_ADDRESSING, | ||
1565 | (struct acpi_table_header **) &spmi); | ||
1566 | if (status != AE_OK) { | ||
1567 | acpi_failure = 1; | ||
1568 | return -ENODEV; | ||
1569 | } | ||
1570 | |||
1571 | if (spmi->IPMIlegacy != 1) { | 1521 | if (spmi->IPMIlegacy != 1) { |
1572 | printk(KERN_INFO "IPMI: Bad SPMI legacy %d\n", spmi->IPMIlegacy); | 1522 | printk(KERN_INFO "IPMI: Bad SPMI legacy %d\n", spmi->IPMIlegacy); |
1573 | return -ENODEV; | 1523 | return -ENODEV; |
@@ -1577,47 +1527,42 @@ static int try_init_acpi(int intf_num, struct smi_info **new_info) | |||
1577 | addr_space = IPMI_MEM_ADDR_SPACE; | 1527 | addr_space = IPMI_MEM_ADDR_SPACE; |
1578 | else | 1528 | else |
1579 | addr_space = IPMI_IO_ADDR_SPACE; | 1529 | addr_space = IPMI_IO_ADDR_SPACE; |
1580 | if (! is_new_interface(-1, addr_space, spmi->addr.address)) | 1530 | |
1581 | return -ENODEV; | 1531 | info = kzalloc(sizeof(*info), GFP_KERNEL); |
1532 | if (!info) { | ||
1533 | printk(KERN_ERR "ipmi_si: Could not allocate SI data (3)\n"); | ||
1534 | return -ENOMEM; | ||
1535 | } | ||
1536 | |||
1537 | info->addr_source = "ACPI"; | ||
1582 | 1538 | ||
1583 | /* Figure out the interface type. */ | 1539 | /* Figure out the interface type. */ |
1584 | switch (spmi->InterfaceType) | 1540 | switch (spmi->InterfaceType) |
1585 | { | 1541 | { |
1586 | case 1: /* KCS */ | 1542 | case 1: /* KCS */ |
1587 | si_type[intf_num] = "kcs"; | 1543 | info->si_type = SI_KCS; |
1588 | break; | 1544 | break; |
1589 | |||
1590 | case 2: /* SMIC */ | 1545 | case 2: /* SMIC */ |
1591 | si_type[intf_num] = "smic"; | 1546 | info->si_type = SI_SMIC; |
1592 | break; | 1547 | break; |
1593 | |||
1594 | case 3: /* BT */ | 1548 | case 3: /* BT */ |
1595 | si_type[intf_num] = "bt"; | 1549 | info->si_type = SI_BT; |
1596 | break; | 1550 | break; |
1597 | |||
1598 | default: | 1551 | default: |
1599 | printk(KERN_INFO "ipmi_si: Unknown ACPI/SPMI SI type %d\n", | 1552 | printk(KERN_INFO "ipmi_si: Unknown ACPI/SPMI SI type %d\n", |
1600 | spmi->InterfaceType); | 1553 | spmi->InterfaceType); |
1554 | kfree(info); | ||
1601 | return -EIO; | 1555 | return -EIO; |
1602 | } | 1556 | } |
1603 | 1557 | ||
1604 | info = kmalloc(sizeof(*info), GFP_KERNEL); | ||
1605 | if (! info) { | ||
1606 | printk(KERN_ERR "ipmi_si: Could not allocate SI data (3)\n"); | ||
1607 | return -ENOMEM; | ||
1608 | } | ||
1609 | memset(info, 0, sizeof(*info)); | ||
1610 | |||
1611 | if (spmi->InterruptType & 1) { | 1558 | if (spmi->InterruptType & 1) { |
1612 | /* We've got a GPE interrupt. */ | 1559 | /* We've got a GPE interrupt. */ |
1613 | info->irq = spmi->GPE; | 1560 | info->irq = spmi->GPE; |
1614 | info->irq_setup = acpi_gpe_irq_setup; | 1561 | info->irq_setup = acpi_gpe_irq_setup; |
1615 | info->irq_cleanup = acpi_gpe_irq_cleanup; | ||
1616 | } else if (spmi->InterruptType & 2) { | 1562 | } else if (spmi->InterruptType & 2) { |
1617 | /* We've got an APIC/SAPIC interrupt. */ | 1563 | /* We've got an APIC/SAPIC interrupt. */ |
1618 | info->irq = spmi->GlobalSystemInterrupt; | 1564 | info->irq = spmi->GlobalSystemInterrupt; |
1619 | info->irq_setup = std_irq_setup; | 1565 | info->irq_setup = std_irq_setup; |
1620 | info->irq_cleanup = std_irq_cleanup; | ||
1621 | } else { | 1566 | } else { |
1622 | /* Use the default interrupt setting. */ | 1567 | /* Use the default interrupt setting. */ |
1623 | info->irq = 0; | 1568 | info->irq = 0; |
@@ -1626,43 +1571,60 @@ static int try_init_acpi(int intf_num, struct smi_info **new_info) | |||
1626 | 1571 | ||
1627 | if (spmi->addr.register_bit_width) { | 1572 | if (spmi->addr.register_bit_width) { |
1628 | /* A (hopefully) properly formed register bit width. */ | 1573 | /* A (hopefully) properly formed register bit width. */ |
1629 | regspacings[intf_num] = spmi->addr.register_bit_width / 8; | ||
1630 | info->io.regspacing = spmi->addr.register_bit_width / 8; | 1574 | info->io.regspacing = spmi->addr.register_bit_width / 8; |
1631 | } else { | 1575 | } else { |
1632 | regspacings[intf_num] = DEFAULT_REGSPACING; | ||
1633 | info->io.regspacing = DEFAULT_REGSPACING; | 1576 | info->io.regspacing = DEFAULT_REGSPACING; |
1634 | } | 1577 | } |
1635 | regsizes[intf_num] = regspacings[intf_num]; | 1578 | info->io.regsize = info->io.regspacing; |
1636 | info->io.regsize = regsizes[intf_num]; | 1579 | info->io.regshift = spmi->addr.register_bit_offset; |
1637 | regshifts[intf_num] = spmi->addr.register_bit_offset; | ||
1638 | info->io.regshift = regshifts[intf_num]; | ||
1639 | 1580 | ||
1640 | if (spmi->addr.address_space_id == ACPI_ADR_SPACE_SYSTEM_MEMORY) { | 1581 | if (spmi->addr.address_space_id == ACPI_ADR_SPACE_SYSTEM_MEMORY) { |
1641 | io_type = "memory"; | 1582 | io_type = "memory"; |
1642 | info->io_setup = mem_setup; | 1583 | info->io_setup = mem_setup; |
1643 | addrs[intf_num] = spmi->addr.address; | 1584 | info->io.addr_type = IPMI_IO_ADDR_SPACE; |
1644 | info->io.info = &(addrs[intf_num]); | ||
1645 | } else if (spmi->addr.address_space_id == ACPI_ADR_SPACE_SYSTEM_IO) { | 1585 | } else if (spmi->addr.address_space_id == ACPI_ADR_SPACE_SYSTEM_IO) { |
1646 | io_type = "I/O"; | 1586 | io_type = "I/O"; |
1647 | info->io_setup = port_setup; | 1587 | info->io_setup = port_setup; |
1648 | ports[intf_num] = spmi->addr.address; | 1588 | info->io.addr_type = IPMI_MEM_ADDR_SPACE; |
1649 | info->io.info = &(ports[intf_num]); | ||
1650 | } else { | 1589 | } else { |
1651 | kfree(info); | 1590 | kfree(info); |
1652 | printk("ipmi_si: Unknown ACPI I/O Address type\n"); | 1591 | printk("ipmi_si: Unknown ACPI I/O Address type\n"); |
1653 | return -EIO; | 1592 | return -EIO; |
1654 | } | 1593 | } |
1594 | info->io.addr_data = spmi->addr.address; | ||
1655 | 1595 | ||
1656 | *new_info = info; | 1596 | try_smi_init(info); |
1657 | 1597 | ||
1658 | printk("ipmi_si: ACPI/SPMI specifies \"%s\" %s SI @ 0x%lx\n", | ||
1659 | si_type[intf_num], io_type, (unsigned long) spmi->addr.address); | ||
1660 | return 0; | 1598 | return 0; |
1661 | } | 1599 | } |
1600 | |||
1601 | static __devinit void acpi_find_bmc(void) | ||
1602 | { | ||
1603 | acpi_status status; | ||
1604 | struct SPMITable *spmi; | ||
1605 | int i; | ||
1606 | |||
1607 | if (acpi_disabled) | ||
1608 | return; | ||
1609 | |||
1610 | if (acpi_failure) | ||
1611 | return; | ||
1612 | |||
1613 | for (i = 0; ; i++) { | ||
1614 | status = acpi_get_firmware_table("SPMI", i+1, | ||
1615 | ACPI_LOGICAL_ADDRESSING, | ||
1616 | (struct acpi_table_header **) | ||
1617 | &spmi); | ||
1618 | if (status != AE_OK) | ||
1619 | return; | ||
1620 | |||
1621 | try_init_acpi(spmi); | ||
1622 | } | ||
1623 | } | ||
1662 | #endif | 1624 | #endif |
1663 | 1625 | ||
1664 | #ifdef CONFIG_DMI | 1626 | #ifdef CONFIG_DMI |
1665 | typedef struct dmi_ipmi_data | 1627 | struct dmi_ipmi_data |
1666 | { | 1628 | { |
1667 | u8 type; | 1629 | u8 type; |
1668 | u8 addr_space; | 1630 | u8 addr_space; |
@@ -1670,49 +1632,46 @@ typedef struct dmi_ipmi_data | |||
1670 | u8 irq; | 1632 | u8 irq; |
1671 | u8 offset; | 1633 | u8 offset; |
1672 | u8 slave_addr; | 1634 | u8 slave_addr; |
1673 | } dmi_ipmi_data_t; | 1635 | }; |
1674 | |||
1675 | static dmi_ipmi_data_t dmi_data[SI_MAX_DRIVERS]; | ||
1676 | static int dmi_data_entries; | ||
1677 | 1636 | ||
1678 | static int __init decode_dmi(struct dmi_header *dm, int intf_num) | 1637 | static int __devinit decode_dmi(struct dmi_header *dm, |
1638 | struct dmi_ipmi_data *dmi) | ||
1679 | { | 1639 | { |
1680 | u8 *data = (u8 *)dm; | 1640 | u8 *data = (u8 *)dm; |
1681 | unsigned long base_addr; | 1641 | unsigned long base_addr; |
1682 | u8 reg_spacing; | 1642 | u8 reg_spacing; |
1683 | u8 len = dm->length; | 1643 | u8 len = dm->length; |
1684 | dmi_ipmi_data_t *ipmi_data = dmi_data+intf_num; | ||
1685 | 1644 | ||
1686 | ipmi_data->type = data[4]; | 1645 | dmi->type = data[4]; |
1687 | 1646 | ||
1688 | memcpy(&base_addr, data+8, sizeof(unsigned long)); | 1647 | memcpy(&base_addr, data+8, sizeof(unsigned long)); |
1689 | if (len >= 0x11) { | 1648 | if (len >= 0x11) { |
1690 | if (base_addr & 1) { | 1649 | if (base_addr & 1) { |
1691 | /* I/O */ | 1650 | /* I/O */ |
1692 | base_addr &= 0xFFFE; | 1651 | base_addr &= 0xFFFE; |
1693 | ipmi_data->addr_space = IPMI_IO_ADDR_SPACE; | 1652 | dmi->addr_space = IPMI_IO_ADDR_SPACE; |
1694 | } | 1653 | } |
1695 | else { | 1654 | else { |
1696 | /* Memory */ | 1655 | /* Memory */ |
1697 | ipmi_data->addr_space = IPMI_MEM_ADDR_SPACE; | 1656 | dmi->addr_space = IPMI_MEM_ADDR_SPACE; |
1698 | } | 1657 | } |
1699 | /* If bit 4 of byte 0x10 is set, then the lsb for the address | 1658 | /* If bit 4 of byte 0x10 is set, then the lsb for the address |
1700 | is odd. */ | 1659 | is odd. */ |
1701 | ipmi_data->base_addr = base_addr | ((data[0x10] & 0x10) >> 4); | 1660 | dmi->base_addr = base_addr | ((data[0x10] & 0x10) >> 4); |
1702 | 1661 | ||
1703 | ipmi_data->irq = data[0x11]; | 1662 | dmi->irq = data[0x11]; |
1704 | 1663 | ||
1705 | /* The top two bits of byte 0x10 hold the register spacing. */ | 1664 | /* The top two bits of byte 0x10 hold the register spacing. */ |
1706 | reg_spacing = (data[0x10] & 0xC0) >> 6; | 1665 | reg_spacing = (data[0x10] & 0xC0) >> 6; |
1707 | switch(reg_spacing){ | 1666 | switch(reg_spacing){ |
1708 | case 0x00: /* Byte boundaries */ | 1667 | case 0x00: /* Byte boundaries */ |
1709 | ipmi_data->offset = 1; | 1668 | dmi->offset = 1; |
1710 | break; | 1669 | break; |
1711 | case 0x01: /* 32-bit boundaries */ | 1670 | case 0x01: /* 32-bit boundaries */ |
1712 | ipmi_data->offset = 4; | 1671 | dmi->offset = 4; |
1713 | break; | 1672 | break; |
1714 | case 0x02: /* 16-byte boundaries */ | 1673 | case 0x02: /* 16-byte boundaries */ |
1715 | ipmi_data->offset = 16; | 1674 | dmi->offset = 16; |
1716 | break; | 1675 | break; |
1717 | default: | 1676 | default: |
1718 | /* Some other interface, just ignore it. */ | 1677 | /* Some other interface, just ignore it. */ |
@@ -1726,217 +1685,227 @@ static int __init decode_dmi(struct dmi_header *dm, int intf_num) | |||
1726 | * wrong (and all that I have seen are I/O) so we just | 1685 | * wrong (and all that I have seen are I/O) so we just |
1727 | * ignore that bit and assume I/O. Systems that use | 1686 | * ignore that bit and assume I/O. Systems that use |
1728 | * memory should use the newer spec, anyway. */ | 1687 | * memory should use the newer spec, anyway. */ |
1729 | ipmi_data->base_addr = base_addr & 0xfffe; | 1688 | dmi->base_addr = base_addr & 0xfffe; |
1730 | ipmi_data->addr_space = IPMI_IO_ADDR_SPACE; | 1689 | dmi->addr_space = IPMI_IO_ADDR_SPACE; |
1731 | ipmi_data->offset = 1; | 1690 | dmi->offset = 1; |
1732 | } | ||
1733 | |||
1734 | ipmi_data->slave_addr = data[6]; | ||
1735 | |||
1736 | if (is_new_interface(-1, ipmi_data->addr_space,ipmi_data->base_addr)) { | ||
1737 | dmi_data_entries++; | ||
1738 | return 0; | ||
1739 | } | 1691 | } |
1740 | 1692 | ||
1741 | memset(ipmi_data, 0, sizeof(dmi_ipmi_data_t)); | 1693 | dmi->slave_addr = data[6]; |
1742 | 1694 | ||
1743 | return -1; | 1695 | return 0; |
1744 | } | 1696 | } |
1745 | 1697 | ||
1746 | static void __init dmi_find_bmc(void) | 1698 | static __devinit void try_init_dmi(struct dmi_ipmi_data *ipmi_data) |
1747 | { | 1699 | { |
1748 | struct dmi_device *dev = NULL; | 1700 | struct smi_info *info; |
1749 | int intf_num = 0; | ||
1750 | |||
1751 | while ((dev = dmi_find_device(DMI_DEV_TYPE_IPMI, NULL, dev))) { | ||
1752 | if (intf_num >= SI_MAX_DRIVERS) | ||
1753 | break; | ||
1754 | 1701 | ||
1755 | decode_dmi((struct dmi_header *) dev->device_data, intf_num++); | 1702 | info = kzalloc(sizeof(*info), GFP_KERNEL); |
1703 | if (!info) { | ||
1704 | printk(KERN_ERR | ||
1705 | "ipmi_si: Could not allocate SI data\n"); | ||
1706 | return; | ||
1756 | } | 1707 | } |
1757 | } | ||
1758 | |||
1759 | static int try_init_smbios(int intf_num, struct smi_info **new_info) | ||
1760 | { | ||
1761 | struct smi_info *info; | ||
1762 | dmi_ipmi_data_t *ipmi_data = dmi_data+intf_num; | ||
1763 | char *io_type; | ||
1764 | 1708 | ||
1765 | if (intf_num >= dmi_data_entries) | 1709 | info->addr_source = "SMBIOS"; |
1766 | return -ENODEV; | ||
1767 | 1710 | ||
1768 | switch (ipmi_data->type) { | 1711 | switch (ipmi_data->type) { |
1769 | case 0x01: /* KCS */ | 1712 | case 0x01: /* KCS */ |
1770 | si_type[intf_num] = "kcs"; | 1713 | info->si_type = SI_KCS; |
1771 | break; | 1714 | break; |
1772 | case 0x02: /* SMIC */ | 1715 | case 0x02: /* SMIC */ |
1773 | si_type[intf_num] = "smic"; | 1716 | info->si_type = SI_SMIC; |
1774 | break; | 1717 | break; |
1775 | case 0x03: /* BT */ | 1718 | case 0x03: /* BT */ |
1776 | si_type[intf_num] = "bt"; | 1719 | info->si_type = SI_BT; |
1777 | break; | 1720 | break; |
1778 | default: | 1721 | default: |
1779 | return -EIO; | 1722 | return; |
1780 | } | ||
1781 | |||
1782 | info = kmalloc(sizeof(*info), GFP_KERNEL); | ||
1783 | if (! info) { | ||
1784 | printk(KERN_ERR "ipmi_si: Could not allocate SI data (4)\n"); | ||
1785 | return -ENOMEM; | ||
1786 | } | 1723 | } |
1787 | memset(info, 0, sizeof(*info)); | ||
1788 | 1724 | ||
1789 | if (ipmi_data->addr_space == 1) { | 1725 | switch (ipmi_data->addr_space) { |
1790 | io_type = "memory"; | 1726 | case IPMI_MEM_ADDR_SPACE: |
1791 | info->io_setup = mem_setup; | 1727 | info->io_setup = mem_setup; |
1792 | addrs[intf_num] = ipmi_data->base_addr; | 1728 | info->io.addr_type = IPMI_MEM_ADDR_SPACE; |
1793 | info->io.info = &(addrs[intf_num]); | 1729 | break; |
1794 | } else if (ipmi_data->addr_space == 2) { | 1730 | |
1795 | io_type = "I/O"; | 1731 | case IPMI_IO_ADDR_SPACE: |
1796 | info->io_setup = port_setup; | 1732 | info->io_setup = port_setup; |
1797 | ports[intf_num] = ipmi_data->base_addr; | 1733 | info->io.addr_type = IPMI_IO_ADDR_SPACE; |
1798 | info->io.info = &(ports[intf_num]); | 1734 | break; |
1799 | } else { | 1735 | |
1736 | default: | ||
1800 | kfree(info); | 1737 | kfree(info); |
1801 | printk("ipmi_si: Unknown SMBIOS I/O Address type.\n"); | 1738 | printk(KERN_WARNING |
1802 | return -EIO; | 1739 | "ipmi_si: Unknown SMBIOS I/O Address type: %d.\n", |
1740 | ipmi_data->addr_space); | ||
1741 | return; | ||
1803 | } | 1742 | } |
1743 | info->io.addr_data = ipmi_data->base_addr; | ||
1804 | 1744 | ||
1805 | regspacings[intf_num] = ipmi_data->offset; | 1745 | info->io.regspacing = ipmi_data->offset; |
1806 | info->io.regspacing = regspacings[intf_num]; | 1746 | if (!info->io.regspacing) |
1807 | if (! info->io.regspacing) | ||
1808 | info->io.regspacing = DEFAULT_REGSPACING; | 1747 | info->io.regspacing = DEFAULT_REGSPACING; |
1809 | info->io.regsize = DEFAULT_REGSPACING; | 1748 | info->io.regsize = DEFAULT_REGSPACING; |
1810 | info->io.regshift = regshifts[intf_num]; | 1749 | info->io.regshift = 0; |
1811 | 1750 | ||
1812 | info->slave_addr = ipmi_data->slave_addr; | 1751 | info->slave_addr = ipmi_data->slave_addr; |
1813 | 1752 | ||
1814 | irqs[intf_num] = ipmi_data->irq; | 1753 | info->irq = ipmi_data->irq; |
1754 | if (info->irq) | ||
1755 | info->irq_setup = std_irq_setup; | ||
1815 | 1756 | ||
1816 | *new_info = info; | 1757 | try_smi_init(info); |
1758 | } | ||
1817 | 1759 | ||
1818 | printk("ipmi_si: Found SMBIOS-specified state machine at %s" | 1760 | static void __devinit dmi_find_bmc(void) |
1819 | " address 0x%lx, slave address 0x%x\n", | 1761 | { |
1820 | io_type, (unsigned long)ipmi_data->base_addr, | 1762 | struct dmi_device *dev = NULL; |
1821 | ipmi_data->slave_addr); | 1763 | struct dmi_ipmi_data data; |
1822 | return 0; | 1764 | int rv; |
1765 | |||
1766 | while ((dev = dmi_find_device(DMI_DEV_TYPE_IPMI, NULL, dev))) { | ||
1767 | rv = decode_dmi((struct dmi_header *) dev->device_data, &data); | ||
1768 | if (!rv) | ||
1769 | try_init_dmi(&data); | ||
1770 | } | ||
1823 | } | 1771 | } |
1824 | #endif /* CONFIG_DMI */ | 1772 | #endif /* CONFIG_DMI */ |
1825 | 1773 | ||
1826 | #ifdef CONFIG_PCI | 1774 | #ifdef CONFIG_PCI |
1827 | 1775 | ||
1828 | #define PCI_ERMC_CLASSCODE 0x0C0700 | 1776 | #define PCI_ERMC_CLASSCODE 0x0C0700 |
1777 | #define PCI_ERMC_CLASSCODE_MASK 0xffffff00 | ||
1778 | #define PCI_ERMC_CLASSCODE_TYPE_MASK 0xff | ||
1779 | #define PCI_ERMC_CLASSCODE_TYPE_SMIC 0x00 | ||
1780 | #define PCI_ERMC_CLASSCODE_TYPE_KCS 0x01 | ||
1781 | #define PCI_ERMC_CLASSCODE_TYPE_BT 0x02 | ||
1782 | |||
1829 | #define PCI_HP_VENDOR_ID 0x103C | 1783 | #define PCI_HP_VENDOR_ID 0x103C |
1830 | #define PCI_MMC_DEVICE_ID 0x121A | 1784 | #define PCI_MMC_DEVICE_ID 0x121A |
1831 | #define PCI_MMC_ADDR_CW 0x10 | 1785 | #define PCI_MMC_ADDR_CW 0x10 |
1832 | 1786 | ||
1833 | /* Avoid more than one attempt to probe pci smic. */ | 1787 | static void ipmi_pci_cleanup(struct smi_info *info) |
1834 | static int pci_smic_checked = 0; | 1788 | { |
1789 | struct pci_dev *pdev = info->addr_source_data; | ||
1790 | |||
1791 | pci_disable_device(pdev); | ||
1792 | } | ||
1835 | 1793 | ||
1836 | static int find_pci_smic(int intf_num, struct smi_info **new_info) | 1794 | static int __devinit ipmi_pci_probe(struct pci_dev *pdev, |
1795 | const struct pci_device_id *ent) | ||
1837 | { | 1796 | { |
1838 | struct smi_info *info; | 1797 | int rv; |
1839 | int error; | 1798 | int class_type = pdev->class & PCI_ERMC_CLASSCODE_TYPE_MASK; |
1840 | struct pci_dev *pci_dev = NULL; | 1799 | struct smi_info *info; |
1841 | u16 base_addr; | 1800 | int first_reg_offset = 0; |
1842 | int fe_rmc = 0; | ||
1843 | 1801 | ||
1844 | if (pci_smic_checked) | 1802 | info = kzalloc(sizeof(*info), GFP_KERNEL); |
1845 | return -ENODEV; | 1803 | if (!info) |
1804 | return ENOMEM; | ||
1846 | 1805 | ||
1847 | pci_smic_checked = 1; | 1806 | info->addr_source = "PCI"; |
1848 | 1807 | ||
1849 | pci_dev = pci_get_device(PCI_HP_VENDOR_ID, PCI_MMC_DEVICE_ID, NULL); | 1808 | switch (class_type) { |
1850 | if (! pci_dev) { | 1809 | case PCI_ERMC_CLASSCODE_TYPE_SMIC: |
1851 | pci_dev = pci_get_class(PCI_ERMC_CLASSCODE, NULL); | 1810 | info->si_type = SI_SMIC; |
1852 | if (pci_dev && (pci_dev->subsystem_vendor == PCI_HP_VENDOR_ID)) | 1811 | break; |
1853 | fe_rmc = 1; | ||
1854 | else | ||
1855 | return -ENODEV; | ||
1856 | } | ||
1857 | 1812 | ||
1858 | error = pci_read_config_word(pci_dev, PCI_MMC_ADDR_CW, &base_addr); | 1813 | case PCI_ERMC_CLASSCODE_TYPE_KCS: |
1859 | if (error) | 1814 | info->si_type = SI_KCS; |
1860 | { | 1815 | break; |
1861 | pci_dev_put(pci_dev); | 1816 | |
1862 | printk(KERN_ERR | 1817 | case PCI_ERMC_CLASSCODE_TYPE_BT: |
1863 | "ipmi_si: pci_read_config_word() failed (%d).\n", | 1818 | info->si_type = SI_BT; |
1864 | error); | 1819 | break; |
1865 | return -ENODEV; | 1820 | |
1821 | default: | ||
1822 | kfree(info); | ||
1823 | printk(KERN_INFO "ipmi_si: %s: Unknown IPMI type: %d\n", | ||
1824 | pci_name(pdev), class_type); | ||
1825 | return ENOMEM; | ||
1866 | } | 1826 | } |
1867 | 1827 | ||
1868 | /* Bit 0: 1 specifies programmed I/O, 0 specifies memory mapped I/O */ | 1828 | rv = pci_enable_device(pdev); |
1869 | if (! (base_addr & 0x0001)) | 1829 | if (rv) { |
1870 | { | 1830 | printk(KERN_ERR "ipmi_si: %s: couldn't enable PCI device\n", |
1871 | pci_dev_put(pci_dev); | 1831 | pci_name(pdev)); |
1872 | printk(KERN_ERR | 1832 | kfree(info); |
1873 | "ipmi_si: memory mapped I/O not supported for PCI" | 1833 | return rv; |
1874 | " smic.\n"); | ||
1875 | return -ENODEV; | ||
1876 | } | 1834 | } |
1877 | 1835 | ||
1878 | base_addr &= 0xFFFE; | 1836 | info->addr_source_cleanup = ipmi_pci_cleanup; |
1879 | if (! fe_rmc) | 1837 | info->addr_source_data = pdev; |
1880 | /* Data register starts at base address + 1 in eRMC */ | ||
1881 | ++base_addr; | ||
1882 | 1838 | ||
1883 | if (! is_new_interface(-1, IPMI_IO_ADDR_SPACE, base_addr)) { | 1839 | if (pdev->subsystem_vendor == PCI_HP_VENDOR_ID) |
1884 | pci_dev_put(pci_dev); | 1840 | first_reg_offset = 1; |
1885 | return -ENODEV; | ||
1886 | } | ||
1887 | 1841 | ||
1888 | info = kmalloc(sizeof(*info), GFP_KERNEL); | 1842 | if (pci_resource_flags(pdev, 0) & IORESOURCE_IO) { |
1889 | if (! info) { | 1843 | info->io_setup = port_setup; |
1890 | pci_dev_put(pci_dev); | 1844 | info->io.addr_type = IPMI_IO_ADDR_SPACE; |
1891 | printk(KERN_ERR "ipmi_si: Could not allocate SI data (5)\n"); | 1845 | } else { |
1892 | return -ENOMEM; | 1846 | info->io_setup = mem_setup; |
1847 | info->io.addr_type = IPMI_MEM_ADDR_SPACE; | ||
1893 | } | 1848 | } |
1894 | memset(info, 0, sizeof(*info)); | 1849 | info->io.addr_data = pci_resource_start(pdev, 0); |
1895 | 1850 | ||
1896 | info->io_setup = port_setup; | 1851 | info->io.regspacing = DEFAULT_REGSPACING; |
1897 | ports[intf_num] = base_addr; | ||
1898 | info->io.info = &(ports[intf_num]); | ||
1899 | info->io.regspacing = regspacings[intf_num]; | ||
1900 | if (! info->io.regspacing) | ||
1901 | info->io.regspacing = DEFAULT_REGSPACING; | ||
1902 | info->io.regsize = DEFAULT_REGSPACING; | 1852 | info->io.regsize = DEFAULT_REGSPACING; |
1903 | info->io.regshift = regshifts[intf_num]; | 1853 | info->io.regshift = 0; |
1904 | 1854 | ||
1905 | *new_info = info; | 1855 | info->irq = pdev->irq; |
1856 | if (info->irq) | ||
1857 | info->irq_setup = std_irq_setup; | ||
1906 | 1858 | ||
1907 | irqs[intf_num] = pci_dev->irq; | 1859 | info->dev = &pdev->dev; |
1908 | si_type[intf_num] = "smic"; | ||
1909 | 1860 | ||
1910 | printk("ipmi_si: Found PCI SMIC at I/O address 0x%lx\n", | 1861 | return try_smi_init(info); |
1911 | (long unsigned int) base_addr); | 1862 | } |
1912 | 1863 | ||
1913 | pci_dev_put(pci_dev); | 1864 | static void __devexit ipmi_pci_remove(struct pci_dev *pdev) |
1865 | { | ||
1866 | } | ||
1867 | |||
1868 | #ifdef CONFIG_PM | ||
1869 | static int ipmi_pci_suspend(struct pci_dev *pdev, pm_message_t state) | ||
1870 | { | ||
1914 | return 0; | 1871 | return 0; |
1915 | } | 1872 | } |
1916 | #endif /* CONFIG_PCI */ | ||
1917 | 1873 | ||
1918 | static int try_init_plug_and_play(int intf_num, struct smi_info **new_info) | 1874 | static int ipmi_pci_resume(struct pci_dev *pdev) |
1919 | { | 1875 | { |
1920 | #ifdef CONFIG_PCI | 1876 | return 0; |
1921 | if (find_pci_smic(intf_num, new_info) == 0) | 1877 | } |
1922 | return 0; | ||
1923 | #endif | 1878 | #endif |
1924 | /* Include other methods here. */ | ||
1925 | 1879 | ||
1926 | return -ENODEV; | 1880 | static struct pci_device_id ipmi_pci_devices[] = { |
1927 | } | 1881 | { PCI_DEVICE(PCI_HP_VENDOR_ID, PCI_MMC_DEVICE_ID) }, |
1882 | { PCI_DEVICE_CLASS(PCI_ERMC_CLASSCODE, PCI_ERMC_CLASSCODE) } | ||
1883 | }; | ||
1884 | MODULE_DEVICE_TABLE(pci, ipmi_pci_devices); | ||
1885 | |||
1886 | static struct pci_driver ipmi_pci_driver = { | ||
1887 | .name = DEVICE_NAME, | ||
1888 | .id_table = ipmi_pci_devices, | ||
1889 | .probe = ipmi_pci_probe, | ||
1890 | .remove = __devexit_p(ipmi_pci_remove), | ||
1891 | #ifdef CONFIG_PM | ||
1892 | .suspend = ipmi_pci_suspend, | ||
1893 | .resume = ipmi_pci_resume, | ||
1894 | #endif | ||
1895 | }; | ||
1896 | #endif /* CONFIG_PCI */ | ||
1928 | 1897 | ||
1929 | 1898 | ||
1930 | static int try_get_dev_id(struct smi_info *smi_info) | 1899 | static int try_get_dev_id(struct smi_info *smi_info) |
1931 | { | 1900 | { |
1932 | unsigned char msg[2]; | 1901 | unsigned char msg[2]; |
1933 | unsigned char *resp; | 1902 | unsigned char *resp; |
1934 | unsigned long resp_len; | 1903 | unsigned long resp_len; |
1935 | enum si_sm_result smi_result; | 1904 | enum si_sm_result smi_result; |
1936 | int rv = 0; | 1905 | int rv = 0; |
1937 | 1906 | ||
1938 | resp = kmalloc(IPMI_MAX_MSG_LENGTH, GFP_KERNEL); | 1907 | resp = kmalloc(IPMI_MAX_MSG_LENGTH, GFP_KERNEL); |
1939 | if (! resp) | 1908 | if (!resp) |
1940 | return -ENOMEM; | 1909 | return -ENOMEM; |
1941 | 1910 | ||
1942 | /* Do a Get Device ID command, since it comes back with some | 1911 | /* Do a Get Device ID command, since it comes back with some |
@@ -1972,7 +1941,7 @@ static int try_get_dev_id(struct smi_info *smi_info) | |||
1972 | /* Otherwise, we got some data. */ | 1941 | /* Otherwise, we got some data. */ |
1973 | resp_len = smi_info->handlers->get_result(smi_info->si_sm, | 1942 | resp_len = smi_info->handlers->get_result(smi_info->si_sm, |
1974 | resp, IPMI_MAX_MSG_LENGTH); | 1943 | resp, IPMI_MAX_MSG_LENGTH); |
1975 | if (resp_len < 6) { | 1944 | if (resp_len < 14) { |
1976 | /* That's odd, it should be longer. */ | 1945 | /* That's odd, it should be longer. */ |
1977 | rv = -EINVAL; | 1946 | rv = -EINVAL; |
1978 | goto out; | 1947 | goto out; |
@@ -1985,8 +1954,7 @@ static int try_get_dev_id(struct smi_info *smi_info) | |||
1985 | } | 1954 | } |
1986 | 1955 | ||
1987 | /* Record info from the get device id, in case we need it. */ | 1956 | /* Record info from the get device id, in case we need it. */ |
1988 | memcpy(&smi_info->device_id, &resp[3], | 1957 | ipmi_demangle_device_id(resp+3, resp_len-3, &smi_info->device_id); |
1989 | min_t(unsigned long, resp_len-3, sizeof(smi_info->device_id))); | ||
1990 | 1958 | ||
1991 | out: | 1959 | out: |
1992 | kfree(resp); | 1960 | kfree(resp); |
@@ -2018,7 +1986,7 @@ static int stat_file_read_proc(char *page, char **start, off_t off, | |||
2018 | struct smi_info *smi = data; | 1986 | struct smi_info *smi = data; |
2019 | 1987 | ||
2020 | out += sprintf(out, "interrupts_enabled: %d\n", | 1988 | out += sprintf(out, "interrupts_enabled: %d\n", |
2021 | smi->irq && ! smi->interrupt_disabled); | 1989 | smi->irq && !smi->interrupt_disabled); |
2022 | out += sprintf(out, "short_timeouts: %ld\n", | 1990 | out += sprintf(out, "short_timeouts: %ld\n", |
2023 | smi->short_timeouts); | 1991 | smi->short_timeouts); |
2024 | out += sprintf(out, "long_timeouts: %ld\n", | 1992 | out += sprintf(out, "long_timeouts: %ld\n", |
@@ -2089,15 +2057,14 @@ static int oem_data_avail_to_receive_msg_avail(struct smi_info *smi_info) | |||
2089 | #define DELL_POWEREDGE_8G_BMC_DEVICE_ID 0x20 | 2057 | #define DELL_POWEREDGE_8G_BMC_DEVICE_ID 0x20 |
2090 | #define DELL_POWEREDGE_8G_BMC_DEVICE_REV 0x80 | 2058 | #define DELL_POWEREDGE_8G_BMC_DEVICE_REV 0x80 |
2091 | #define DELL_POWEREDGE_8G_BMC_IPMI_VERSION 0x51 | 2059 | #define DELL_POWEREDGE_8G_BMC_IPMI_VERSION 0x51 |
2092 | #define DELL_IANA_MFR_ID {0xA2, 0x02, 0x00} | 2060 | #define DELL_IANA_MFR_ID 0x0002a2 |
2093 | static void setup_dell_poweredge_oem_data_handler(struct smi_info *smi_info) | 2061 | static void setup_dell_poweredge_oem_data_handler(struct smi_info *smi_info) |
2094 | { | 2062 | { |
2095 | struct ipmi_device_id *id = &smi_info->device_id; | 2063 | struct ipmi_device_id *id = &smi_info->device_id; |
2096 | const char mfr[3]=DELL_IANA_MFR_ID; | 2064 | if (id->manufacturer_id == DELL_IANA_MFR_ID) { |
2097 | if (! memcmp(mfr, id->manufacturer_id, sizeof(mfr))) { | ||
2098 | if (id->device_id == DELL_POWEREDGE_8G_BMC_DEVICE_ID && | 2065 | if (id->device_id == DELL_POWEREDGE_8G_BMC_DEVICE_ID && |
2099 | id->device_revision == DELL_POWEREDGE_8G_BMC_DEVICE_REV && | 2066 | id->device_revision == DELL_POWEREDGE_8G_BMC_DEVICE_REV && |
2100 | id->ipmi_version == DELL_POWEREDGE_8G_BMC_IPMI_VERSION) { | 2067 | id->ipmi_version == DELL_POWEREDGE_8G_BMC_IPMI_VERSION) { |
2101 | smi_info->oem_data_avail_handler = | 2068 | smi_info->oem_data_avail_handler = |
2102 | oem_data_avail_to_receive_msg_avail; | 2069 | oem_data_avail_to_receive_msg_avail; |
2103 | } | 2070 | } |
@@ -2169,8 +2136,7 @@ static void | |||
2169 | setup_dell_poweredge_bt_xaction_handler(struct smi_info *smi_info) | 2136 | setup_dell_poweredge_bt_xaction_handler(struct smi_info *smi_info) |
2170 | { | 2137 | { |
2171 | struct ipmi_device_id *id = &smi_info->device_id; | 2138 | struct ipmi_device_id *id = &smi_info->device_id; |
2172 | const char mfr[3]=DELL_IANA_MFR_ID; | 2139 | if (id->manufacturer_id == DELL_IANA_MFR_ID && |
2173 | if (! memcmp(mfr, id->manufacturer_id, sizeof(mfr)) && | ||
2174 | smi_info->si_type == SI_BT) | 2140 | smi_info->si_type == SI_BT) |
2175 | register_xaction_notifier(&dell_poweredge_bt_xaction_notifier); | 2141 | register_xaction_notifier(&dell_poweredge_bt_xaction_notifier); |
2176 | } | 2142 | } |
@@ -2200,62 +2166,110 @@ static inline void wait_for_timer_and_thread(struct smi_info *smi_info) | |||
2200 | del_timer_sync(&smi_info->si_timer); | 2166 | del_timer_sync(&smi_info->si_timer); |
2201 | } | 2167 | } |
2202 | 2168 | ||
2203 | /* Returns 0 if initialized, or negative on an error. */ | 2169 | static struct ipmi_default_vals |
2204 | static int init_one_smi(int intf_num, struct smi_info **smi) | ||
2205 | { | 2170 | { |
2206 | int rv; | 2171 | int type; |
2207 | struct smi_info *new_smi; | 2172 | int port; |
2173 | } __devinit ipmi_defaults[] = | ||
2174 | { | ||
2175 | { .type = SI_KCS, .port = 0xca2 }, | ||
2176 | { .type = SI_SMIC, .port = 0xca9 }, | ||
2177 | { .type = SI_BT, .port = 0xe4 }, | ||
2178 | { .port = 0 } | ||
2179 | }; | ||
2208 | 2180 | ||
2181 | static __devinit void default_find_bmc(void) | ||
2182 | { | ||
2183 | struct smi_info *info; | ||
2184 | int i; | ||
2209 | 2185 | ||
2210 | rv = try_init_mem(intf_num, &new_smi); | 2186 | for (i = 0; ; i++) { |
2211 | if (rv) | 2187 | if (!ipmi_defaults[i].port) |
2212 | rv = try_init_port(intf_num, &new_smi); | 2188 | break; |
2213 | #ifdef CONFIG_ACPI | ||
2214 | if (rv && si_trydefaults) | ||
2215 | rv = try_init_acpi(intf_num, &new_smi); | ||
2216 | #endif | ||
2217 | #ifdef CONFIG_DMI | ||
2218 | if (rv && si_trydefaults) | ||
2219 | rv = try_init_smbios(intf_num, &new_smi); | ||
2220 | #endif | ||
2221 | if (rv && si_trydefaults) | ||
2222 | rv = try_init_plug_and_play(intf_num, &new_smi); | ||
2223 | 2189 | ||
2224 | if (rv) | 2190 | info = kzalloc(sizeof(*info), GFP_KERNEL); |
2225 | return rv; | 2191 | if (!info) |
2192 | return; | ||
2226 | 2193 | ||
2227 | /* So we know not to free it unless we have allocated one. */ | 2194 | info->addr_source = NULL; |
2228 | new_smi->intf = NULL; | ||
2229 | new_smi->si_sm = NULL; | ||
2230 | new_smi->handlers = NULL; | ||
2231 | 2195 | ||
2232 | if (! new_smi->irq_setup) { | 2196 | info->si_type = ipmi_defaults[i].type; |
2233 | new_smi->irq = irqs[intf_num]; | 2197 | info->io_setup = port_setup; |
2234 | new_smi->irq_setup = std_irq_setup; | 2198 | info->io.addr_data = ipmi_defaults[i].port; |
2235 | new_smi->irq_cleanup = std_irq_cleanup; | 2199 | info->io.addr_type = IPMI_IO_ADDR_SPACE; |
2236 | } | ||
2237 | 2200 | ||
2238 | /* Default to KCS if no type is specified. */ | 2201 | info->io.addr = NULL; |
2239 | if (si_type[intf_num] == NULL) { | 2202 | info->io.regspacing = DEFAULT_REGSPACING; |
2240 | if (si_trydefaults) | 2203 | info->io.regsize = DEFAULT_REGSPACING; |
2241 | si_type[intf_num] = "kcs"; | 2204 | info->io.regshift = 0; |
2242 | else { | 2205 | |
2243 | rv = -EINVAL; | 2206 | if (try_smi_init(info) == 0) { |
2244 | goto out_err; | 2207 | /* Found one... */ |
2208 | printk(KERN_INFO "ipmi_si: Found default %s state" | ||
2209 | " machine at %s address 0x%lx\n", | ||
2210 | si_to_str[info->si_type], | ||
2211 | addr_space_to_str[info->io.addr_type], | ||
2212 | info->io.addr_data); | ||
2213 | return; | ||
2245 | } | 2214 | } |
2246 | } | 2215 | } |
2216 | } | ||
2217 | |||
2218 | static int is_new_interface(struct smi_info *info) | ||
2219 | { | ||
2220 | struct smi_info *e; | ||
2221 | |||
2222 | list_for_each_entry(e, &smi_infos, link) { | ||
2223 | if (e->io.addr_type != info->io.addr_type) | ||
2224 | continue; | ||
2225 | if (e->io.addr_data == info->io.addr_data) | ||
2226 | return 0; | ||
2227 | } | ||
2228 | |||
2229 | return 1; | ||
2230 | } | ||
2231 | |||
2232 | static int try_smi_init(struct smi_info *new_smi) | ||
2233 | { | ||
2234 | int rv; | ||
2235 | |||
2236 | if (new_smi->addr_source) { | ||
2237 | printk(KERN_INFO "ipmi_si: Trying %s-specified %s state" | ||
2238 | " machine at %s address 0x%lx, slave address 0x%x," | ||
2239 | " irq %d\n", | ||
2240 | new_smi->addr_source, | ||
2241 | si_to_str[new_smi->si_type], | ||
2242 | addr_space_to_str[new_smi->io.addr_type], | ||
2243 | new_smi->io.addr_data, | ||
2244 | new_smi->slave_addr, new_smi->irq); | ||
2245 | } | ||
2246 | |||
2247 | down(&smi_infos_lock); | ||
2248 | if (!is_new_interface(new_smi)) { | ||
2249 | printk(KERN_WARNING "ipmi_si: duplicate interface\n"); | ||
2250 | rv = -EBUSY; | ||
2251 | goto out_err; | ||
2252 | } | ||
2247 | 2253 | ||
2248 | /* Set up the state machine to use. */ | 2254 | /* So we know not to free it unless we have allocated one. */ |
2249 | if (strcmp(si_type[intf_num], "kcs") == 0) { | 2255 | new_smi->intf = NULL; |
2256 | new_smi->si_sm = NULL; | ||
2257 | new_smi->handlers = NULL; | ||
2258 | |||
2259 | switch (new_smi->si_type) { | ||
2260 | case SI_KCS: | ||
2250 | new_smi->handlers = &kcs_smi_handlers; | 2261 | new_smi->handlers = &kcs_smi_handlers; |
2251 | new_smi->si_type = SI_KCS; | 2262 | break; |
2252 | } else if (strcmp(si_type[intf_num], "smic") == 0) { | 2263 | |
2264 | case SI_SMIC: | ||
2253 | new_smi->handlers = &smic_smi_handlers; | 2265 | new_smi->handlers = &smic_smi_handlers; |
2254 | new_smi->si_type = SI_SMIC; | 2266 | break; |
2255 | } else if (strcmp(si_type[intf_num], "bt") == 0) { | 2267 | |
2268 | case SI_BT: | ||
2256 | new_smi->handlers = &bt_smi_handlers; | 2269 | new_smi->handlers = &bt_smi_handlers; |
2257 | new_smi->si_type = SI_BT; | 2270 | break; |
2258 | } else { | 2271 | |
2272 | default: | ||
2259 | /* No support for anything else yet. */ | 2273 | /* No support for anything else yet. */ |
2260 | rv = -EIO; | 2274 | rv = -EIO; |
2261 | goto out_err; | 2275 | goto out_err; |
@@ -2263,7 +2277,7 @@ static int init_one_smi(int intf_num, struct smi_info **smi) | |||
2263 | 2277 | ||
2264 | /* Allocate the state machine's data and initialize it. */ | 2278 | /* Allocate the state machine's data and initialize it. */ |
2265 | new_smi->si_sm = kmalloc(new_smi->handlers->size(), GFP_KERNEL); | 2279 | new_smi->si_sm = kmalloc(new_smi->handlers->size(), GFP_KERNEL); |
2266 | if (! new_smi->si_sm) { | 2280 | if (!new_smi->si_sm) { |
2267 | printk(" Could not allocate state machine memory\n"); | 2281 | printk(" Could not allocate state machine memory\n"); |
2268 | rv = -ENOMEM; | 2282 | rv = -ENOMEM; |
2269 | goto out_err; | 2283 | goto out_err; |
@@ -2284,21 +2298,29 @@ static int init_one_smi(int intf_num, struct smi_info **smi) | |||
2284 | 2298 | ||
2285 | /* Do low-level detection first. */ | 2299 | /* Do low-level detection first. */ |
2286 | if (new_smi->handlers->detect(new_smi->si_sm)) { | 2300 | if (new_smi->handlers->detect(new_smi->si_sm)) { |
2301 | if (new_smi->addr_source) | ||
2302 | printk(KERN_INFO "ipmi_si: Interface detection" | ||
2303 | " failed\n"); | ||
2287 | rv = -ENODEV; | 2304 | rv = -ENODEV; |
2288 | goto out_err; | 2305 | goto out_err; |
2289 | } | 2306 | } |
2290 | 2307 | ||
2291 | /* Attempt a get device id command. If it fails, we probably | 2308 | /* Attempt a get device id command. If it fails, we probably |
2292 | don't have a SMI here. */ | 2309 | don't have a BMC here. */ |
2293 | rv = try_get_dev_id(new_smi); | 2310 | rv = try_get_dev_id(new_smi); |
2294 | if (rv) | 2311 | if (rv) { |
2312 | if (new_smi->addr_source) | ||
2313 | printk(KERN_INFO "ipmi_si: There appears to be no BMC" | ||
2314 | " at this location\n"); | ||
2295 | goto out_err; | 2315 | goto out_err; |
2316 | } | ||
2296 | 2317 | ||
2297 | setup_oem_data_handler(new_smi); | 2318 | setup_oem_data_handler(new_smi); |
2298 | setup_xaction_handlers(new_smi); | 2319 | setup_xaction_handlers(new_smi); |
2299 | 2320 | ||
2300 | /* Try to claim any interrupts. */ | 2321 | /* Try to claim any interrupts. */ |
2301 | new_smi->irq_setup(new_smi); | 2322 | if (new_smi->irq_setup) |
2323 | new_smi->irq_setup(new_smi); | ||
2302 | 2324 | ||
2303 | INIT_LIST_HEAD(&(new_smi->xmit_msgs)); | 2325 | INIT_LIST_HEAD(&(new_smi->xmit_msgs)); |
2304 | INIT_LIST_HEAD(&(new_smi->hp_xmit_msgs)); | 2326 | INIT_LIST_HEAD(&(new_smi->hp_xmit_msgs)); |
@@ -2308,7 +2330,8 @@ static int init_one_smi(int intf_num, struct smi_info **smi) | |||
2308 | 2330 | ||
2309 | new_smi->interrupt_disabled = 0; | 2331 | new_smi->interrupt_disabled = 0; |
2310 | atomic_set(&new_smi->stop_operation, 0); | 2332 | atomic_set(&new_smi->stop_operation, 0); |
2311 | new_smi->intf_num = intf_num; | 2333 | new_smi->intf_num = smi_num; |
2334 | smi_num++; | ||
2312 | 2335 | ||
2313 | /* Start clearing the flags before we enable interrupts or the | 2336 | /* Start clearing the flags before we enable interrupts or the |
2314 | timer to avoid racing with the timer. */ | 2337 | timer to avoid racing with the timer. */ |
@@ -2332,10 +2355,36 @@ static int init_one_smi(int intf_num, struct smi_info **smi) | |||
2332 | new_smi->thread = kthread_run(ipmi_thread, new_smi, | 2355 | new_smi->thread = kthread_run(ipmi_thread, new_smi, |
2333 | "kipmi%d", new_smi->intf_num); | 2356 | "kipmi%d", new_smi->intf_num); |
2334 | 2357 | ||
2358 | if (!new_smi->dev) { | ||
2359 | /* If we don't already have a device from something | ||
2360 | * else (like PCI), then register a new one. */ | ||
2361 | new_smi->pdev = platform_device_alloc("ipmi_si", | ||
2362 | new_smi->intf_num); | ||
2363 | if (rv) { | ||
2364 | printk(KERN_ERR | ||
2365 | "ipmi_si_intf:" | ||
2366 | " Unable to allocate platform device\n"); | ||
2367 | goto out_err_stop_timer; | ||
2368 | } | ||
2369 | new_smi->dev = &new_smi->pdev->dev; | ||
2370 | new_smi->dev->driver = &ipmi_driver; | ||
2371 | |||
2372 | rv = platform_device_register(new_smi->pdev); | ||
2373 | if (rv) { | ||
2374 | printk(KERN_ERR | ||
2375 | "ipmi_si_intf:" | ||
2376 | " Unable to register system interface device:" | ||
2377 | " %d\n", | ||
2378 | rv); | ||
2379 | goto out_err_stop_timer; | ||
2380 | } | ||
2381 | new_smi->dev_registered = 1; | ||
2382 | } | ||
2383 | |||
2335 | rv = ipmi_register_smi(&handlers, | 2384 | rv = ipmi_register_smi(&handlers, |
2336 | new_smi, | 2385 | new_smi, |
2337 | ipmi_version_major(&new_smi->device_id), | 2386 | &new_smi->device_id, |
2338 | ipmi_version_minor(&new_smi->device_id), | 2387 | new_smi->dev, |
2339 | new_smi->slave_addr, | 2388 | new_smi->slave_addr, |
2340 | &(new_smi->intf)); | 2389 | &(new_smi->intf)); |
2341 | if (rv) { | 2390 | if (rv) { |
@@ -2365,9 +2414,11 @@ static int init_one_smi(int intf_num, struct smi_info **smi) | |||
2365 | goto out_err_stop_timer; | 2414 | goto out_err_stop_timer; |
2366 | } | 2415 | } |
2367 | 2416 | ||
2368 | *smi = new_smi; | 2417 | list_add_tail(&new_smi->link, &smi_infos); |
2418 | |||
2419 | up(&smi_infos_lock); | ||
2369 | 2420 | ||
2370 | printk(" IPMI %s interface initialized\n", si_type[intf_num]); | 2421 | printk(" IPMI %s interface initialized\n",si_to_str[new_smi->si_type]); |
2371 | 2422 | ||
2372 | return 0; | 2423 | return 0; |
2373 | 2424 | ||
@@ -2379,7 +2430,8 @@ static int init_one_smi(int intf_num, struct smi_info **smi) | |||
2379 | if (new_smi->intf) | 2430 | if (new_smi->intf) |
2380 | ipmi_unregister_smi(new_smi->intf); | 2431 | ipmi_unregister_smi(new_smi->intf); |
2381 | 2432 | ||
2382 | new_smi->irq_cleanup(new_smi); | 2433 | if (new_smi->irq_cleanup) |
2434 | new_smi->irq_cleanup(new_smi); | ||
2383 | 2435 | ||
2384 | /* Wait until we know that we are out of any interrupt | 2436 | /* Wait until we know that we are out of any interrupt |
2385 | handlers might have been running before we freed the | 2437 | handlers might have been running before we freed the |
@@ -2391,23 +2443,41 @@ static int init_one_smi(int intf_num, struct smi_info **smi) | |||
2391 | new_smi->handlers->cleanup(new_smi->si_sm); | 2443 | new_smi->handlers->cleanup(new_smi->si_sm); |
2392 | kfree(new_smi->si_sm); | 2444 | kfree(new_smi->si_sm); |
2393 | } | 2445 | } |
2446 | if (new_smi->addr_source_cleanup) | ||
2447 | new_smi->addr_source_cleanup(new_smi); | ||
2394 | if (new_smi->io_cleanup) | 2448 | if (new_smi->io_cleanup) |
2395 | new_smi->io_cleanup(new_smi); | 2449 | new_smi->io_cleanup(new_smi); |
2396 | 2450 | ||
2451 | if (new_smi->dev_registered) | ||
2452 | platform_device_unregister(new_smi->pdev); | ||
2453 | |||
2454 | kfree(new_smi); | ||
2455 | |||
2456 | up(&smi_infos_lock); | ||
2457 | |||
2397 | return rv; | 2458 | return rv; |
2398 | } | 2459 | } |
2399 | 2460 | ||
2400 | static __init int init_ipmi_si(void) | 2461 | static __devinit int init_ipmi_si(void) |
2401 | { | 2462 | { |
2402 | int rv = 0; | ||
2403 | int pos = 0; | ||
2404 | int i; | 2463 | int i; |
2405 | char *str; | 2464 | char *str; |
2465 | int rv; | ||
2406 | 2466 | ||
2407 | if (initialized) | 2467 | if (initialized) |
2408 | return 0; | 2468 | return 0; |
2409 | initialized = 1; | 2469 | initialized = 1; |
2410 | 2470 | ||
2471 | /* Register the device drivers. */ | ||
2472 | rv = driver_register(&ipmi_driver); | ||
2473 | if (rv) { | ||
2474 | printk(KERN_ERR | ||
2475 | "init_ipmi_si: Unable to register driver: %d\n", | ||
2476 | rv); | ||
2477 | return rv; | ||
2478 | } | ||
2479 | |||
2480 | |||
2411 | /* Parse out the si_type string into its components. */ | 2481 | /* Parse out the si_type string into its components. */ |
2412 | str = si_type_str; | 2482 | str = si_type_str; |
2413 | if (*str != '\0') { | 2483 | if (*str != '\0') { |
@@ -2425,63 +2495,66 @@ static __init int init_ipmi_si(void) | |||
2425 | 2495 | ||
2426 | printk(KERN_INFO "IPMI System Interface driver.\n"); | 2496 | printk(KERN_INFO "IPMI System Interface driver.\n"); |
2427 | 2497 | ||
2498 | hardcode_find_bmc(); | ||
2499 | |||
2428 | #ifdef CONFIG_DMI | 2500 | #ifdef CONFIG_DMI |
2429 | dmi_find_bmc(); | 2501 | dmi_find_bmc(); |
2430 | #endif | 2502 | #endif |
2431 | 2503 | ||
2432 | rv = init_one_smi(0, &(smi_infos[pos])); | 2504 | #ifdef CONFIG_ACPI |
2433 | if (rv && ! ports[0] && si_trydefaults) { | 2505 | if (si_trydefaults) |
2434 | /* If we are trying defaults and the initial port is | 2506 | acpi_find_bmc(); |
2435 | not set, then set it. */ | 2507 | #endif |
2436 | si_type[0] = "kcs"; | ||
2437 | ports[0] = DEFAULT_KCS_IO_PORT; | ||
2438 | rv = init_one_smi(0, &(smi_infos[pos])); | ||
2439 | if (rv) { | ||
2440 | /* No KCS - try SMIC */ | ||
2441 | si_type[0] = "smic"; | ||
2442 | ports[0] = DEFAULT_SMIC_IO_PORT; | ||
2443 | rv = init_one_smi(0, &(smi_infos[pos])); | ||
2444 | } | ||
2445 | if (rv) { | ||
2446 | /* No SMIC - try BT */ | ||
2447 | si_type[0] = "bt"; | ||
2448 | ports[0] = DEFAULT_BT_IO_PORT; | ||
2449 | rv = init_one_smi(0, &(smi_infos[pos])); | ||
2450 | } | ||
2451 | } | ||
2452 | if (rv == 0) | ||
2453 | pos++; | ||
2454 | 2508 | ||
2455 | for (i = 1; i < SI_MAX_PARMS; i++) { | 2509 | #ifdef CONFIG_PCI |
2456 | rv = init_one_smi(i, &(smi_infos[pos])); | 2510 | pci_module_init(&ipmi_pci_driver); |
2457 | if (rv == 0) | 2511 | #endif |
2458 | pos++; | 2512 | |
2513 | if (si_trydefaults) { | ||
2514 | down(&smi_infos_lock); | ||
2515 | if (list_empty(&smi_infos)) { | ||
2516 | /* No BMC was found, try defaults. */ | ||
2517 | up(&smi_infos_lock); | ||
2518 | default_find_bmc(); | ||
2519 | } else { | ||
2520 | up(&smi_infos_lock); | ||
2521 | } | ||
2459 | } | 2522 | } |
2460 | 2523 | ||
2461 | if (smi_infos[0] == NULL) { | 2524 | down(&smi_infos_lock); |
2525 | if (list_empty(&smi_infos)) { | ||
2526 | up(&smi_infos_lock); | ||
2527 | #ifdef CONFIG_PCI | ||
2528 | pci_unregister_driver(&ipmi_pci_driver); | ||
2529 | #endif | ||
2462 | printk("ipmi_si: Unable to find any System Interface(s)\n"); | 2530 | printk("ipmi_si: Unable to find any System Interface(s)\n"); |
2463 | return -ENODEV; | 2531 | return -ENODEV; |
2532 | } else { | ||
2533 | up(&smi_infos_lock); | ||
2534 | return 0; | ||
2464 | } | 2535 | } |
2465 | |||
2466 | return 0; | ||
2467 | } | 2536 | } |
2468 | module_init(init_ipmi_si); | 2537 | module_init(init_ipmi_si); |
2469 | 2538 | ||
2470 | static void __exit cleanup_one_si(struct smi_info *to_clean) | 2539 | static void __devexit cleanup_one_si(struct smi_info *to_clean) |
2471 | { | 2540 | { |
2472 | int rv; | 2541 | int rv; |
2473 | unsigned long flags; | 2542 | unsigned long flags; |
2474 | 2543 | ||
2475 | if (! to_clean) | 2544 | if (!to_clean) |
2476 | return; | 2545 | return; |
2477 | 2546 | ||
2547 | list_del(&to_clean->link); | ||
2548 | |||
2478 | /* Tell the timer and interrupt handlers that we are shutting | 2549 | /* Tell the timer and interrupt handlers that we are shutting |
2479 | down. */ | 2550 | down. */ |
2480 | spin_lock_irqsave(&(to_clean->si_lock), flags); | 2551 | spin_lock_irqsave(&(to_clean->si_lock), flags); |
2481 | spin_lock(&(to_clean->msg_lock)); | 2552 | spin_lock(&(to_clean->msg_lock)); |
2482 | 2553 | ||
2483 | atomic_inc(&to_clean->stop_operation); | 2554 | atomic_inc(&to_clean->stop_operation); |
2484 | to_clean->irq_cleanup(to_clean); | 2555 | |
2556 | if (to_clean->irq_cleanup) | ||
2557 | to_clean->irq_cleanup(to_clean); | ||
2485 | 2558 | ||
2486 | spin_unlock(&(to_clean->msg_lock)); | 2559 | spin_unlock(&(to_clean->msg_lock)); |
2487 | spin_unlock_irqrestore(&(to_clean->si_lock), flags); | 2560 | spin_unlock_irqrestore(&(to_clean->si_lock), flags); |
@@ -2511,20 +2584,34 @@ static void __exit cleanup_one_si(struct smi_info *to_clean) | |||
2511 | 2584 | ||
2512 | kfree(to_clean->si_sm); | 2585 | kfree(to_clean->si_sm); |
2513 | 2586 | ||
2587 | if (to_clean->addr_source_cleanup) | ||
2588 | to_clean->addr_source_cleanup(to_clean); | ||
2514 | if (to_clean->io_cleanup) | 2589 | if (to_clean->io_cleanup) |
2515 | to_clean->io_cleanup(to_clean); | 2590 | to_clean->io_cleanup(to_clean); |
2591 | |||
2592 | if (to_clean->dev_registered) | ||
2593 | platform_device_unregister(to_clean->pdev); | ||
2594 | |||
2595 | kfree(to_clean); | ||
2516 | } | 2596 | } |
2517 | 2597 | ||
2518 | static __exit void cleanup_ipmi_si(void) | 2598 | static __exit void cleanup_ipmi_si(void) |
2519 | { | 2599 | { |
2520 | int i; | 2600 | struct smi_info *e, *tmp_e; |
2521 | 2601 | ||
2522 | if (! initialized) | 2602 | if (!initialized) |
2523 | return; | 2603 | return; |
2524 | 2604 | ||
2525 | for (i = 0; i < SI_MAX_DRIVERS; i++) { | 2605 | #ifdef CONFIG_PCI |
2526 | cleanup_one_si(smi_infos[i]); | 2606 | pci_unregister_driver(&ipmi_pci_driver); |
2527 | } | 2607 | #endif |
2608 | |||
2609 | down(&smi_infos_lock); | ||
2610 | list_for_each_entry_safe(e, tmp_e, &smi_infos, link) | ||
2611 | cleanup_one_si(e); | ||
2612 | up(&smi_infos_lock); | ||
2613 | |||
2614 | driver_unregister(&ipmi_driver); | ||
2528 | } | 2615 | } |
2529 | module_exit(cleanup_ipmi_si); | 2616 | module_exit(cleanup_ipmi_si); |
2530 | 2617 | ||
diff --git a/drivers/char/ipmi/ipmi_si_sm.h b/drivers/char/ipmi/ipmi_si_sm.h index bf3d4962d6a5..4b731b24dc16 100644 --- a/drivers/char/ipmi/ipmi_si_sm.h +++ b/drivers/char/ipmi/ipmi_si_sm.h | |||
@@ -50,11 +50,12 @@ struct si_sm_io | |||
50 | 50 | ||
51 | /* Generic info used by the actual handling routines, the | 51 | /* Generic info used by the actual handling routines, the |
52 | state machine shouldn't touch these. */ | 52 | state machine shouldn't touch these. */ |
53 | void *info; | ||
54 | void __iomem *addr; | 53 | void __iomem *addr; |
55 | int regspacing; | 54 | int regspacing; |
56 | int regsize; | 55 | int regsize; |
57 | int regshift; | 56 | int regshift; |
57 | int addr_type; | ||
58 | long addr_data; | ||
58 | }; | 59 | }; |
59 | 60 | ||
60 | /* Results of SMI events. */ | 61 | /* Results of SMI events. */ |
diff --git a/drivers/char/ipmi/ipmi_watchdog.c b/drivers/char/ipmi/ipmi_watchdog.c index 1f3159eb1ede..616539310d9a 100644 --- a/drivers/char/ipmi/ipmi_watchdog.c +++ b/drivers/char/ipmi/ipmi_watchdog.c | |||
@@ -996,7 +996,7 @@ static struct notifier_block wdog_panic_notifier = { | |||
996 | }; | 996 | }; |
997 | 997 | ||
998 | 998 | ||
999 | static void ipmi_new_smi(int if_num) | 999 | static void ipmi_new_smi(int if_num, struct device *device) |
1000 | { | 1000 | { |
1001 | ipmi_register_watchdog(if_num); | 1001 | ipmi_register_watchdog(if_num); |
1002 | } | 1002 | } |
diff --git a/drivers/char/mem.c b/drivers/char/mem.c index 26d0116b48d4..5245ba1649ed 100644 --- a/drivers/char/mem.c +++ b/drivers/char/mem.c | |||
@@ -88,21 +88,15 @@ static inline int uncached_access(struct file *file, unsigned long addr) | |||
88 | } | 88 | } |
89 | 89 | ||
90 | #ifndef ARCH_HAS_VALID_PHYS_ADDR_RANGE | 90 | #ifndef ARCH_HAS_VALID_PHYS_ADDR_RANGE |
91 | static inline int valid_phys_addr_range(unsigned long addr, size_t *count) | 91 | static inline int valid_phys_addr_range(unsigned long addr, size_t count) |
92 | { | 92 | { |
93 | unsigned long end_mem; | 93 | if (addr + count > __pa(high_memory)) |
94 | |||
95 | end_mem = __pa(high_memory); | ||
96 | if (addr >= end_mem) | ||
97 | return 0; | 94 | return 0; |
98 | 95 | ||
99 | if (*count > end_mem - addr) | ||
100 | *count = end_mem - addr; | ||
101 | |||
102 | return 1; | 96 | return 1; |
103 | } | 97 | } |
104 | 98 | ||
105 | static inline int valid_mmap_phys_addr_range(unsigned long addr, size_t *size) | 99 | static inline int valid_mmap_phys_addr_range(unsigned long addr, size_t size) |
106 | { | 100 | { |
107 | return 1; | 101 | return 1; |
108 | } | 102 | } |
@@ -119,7 +113,7 @@ static ssize_t read_mem(struct file * file, char __user * buf, | |||
119 | ssize_t read, sz; | 113 | ssize_t read, sz; |
120 | char *ptr; | 114 | char *ptr; |
121 | 115 | ||
122 | if (!valid_phys_addr_range(p, &count)) | 116 | if (!valid_phys_addr_range(p, count)) |
123 | return -EFAULT; | 117 | return -EFAULT; |
124 | read = 0; | 118 | read = 0; |
125 | #ifdef __ARCH_HAS_NO_PAGE_ZERO_MAPPED | 119 | #ifdef __ARCH_HAS_NO_PAGE_ZERO_MAPPED |
@@ -177,7 +171,7 @@ static ssize_t write_mem(struct file * file, const char __user * buf, | |||
177 | unsigned long copied; | 171 | unsigned long copied; |
178 | void *ptr; | 172 | void *ptr; |
179 | 173 | ||
180 | if (!valid_phys_addr_range(p, &count)) | 174 | if (!valid_phys_addr_range(p, count)) |
181 | return -EFAULT; | 175 | return -EFAULT; |
182 | 176 | ||
183 | written = 0; | 177 | written = 0; |
@@ -249,7 +243,7 @@ static int mmap_mem(struct file * file, struct vm_area_struct * vma) | |||
249 | { | 243 | { |
250 | size_t size = vma->vm_end - vma->vm_start; | 244 | size_t size = vma->vm_end - vma->vm_start; |
251 | 245 | ||
252 | if (!valid_mmap_phys_addr_range(vma->vm_pgoff << PAGE_SHIFT, &size)) | 246 | if (!valid_mmap_phys_addr_range(vma->vm_pgoff << PAGE_SHIFT, size)) |
253 | return -EINVAL; | 247 | return -EINVAL; |
254 | 248 | ||
255 | vma->vm_page_prot = phys_mem_access_prot(file, vma->vm_pgoff, | 249 | vma->vm_page_prot = phys_mem_access_prot(file, vma->vm_pgoff, |
diff --git a/drivers/char/tlclk.c b/drivers/char/tlclk.c index 4c272189cd42..2546637a55c0 100644 --- a/drivers/char/tlclk.c +++ b/drivers/char/tlclk.c | |||
@@ -767,6 +767,7 @@ static int __init tlclk_init(void) | |||
767 | printk(KERN_ERR "tlclk: can't get major %d.\n", tlclk_major); | 767 | printk(KERN_ERR "tlclk: can't get major %d.\n", tlclk_major); |
768 | return ret; | 768 | return ret; |
769 | } | 769 | } |
770 | tlclk_major = ret; | ||
770 | alarm_events = kzalloc( sizeof(struct tlclk_alarms), GFP_KERNEL); | 771 | alarm_events = kzalloc( sizeof(struct tlclk_alarms), GFP_KERNEL); |
771 | if (!alarm_events) | 772 | if (!alarm_events) |
772 | goto out1; | 773 | goto out1; |
diff --git a/drivers/edac/Kconfig b/drivers/edac/Kconfig index 52f3eb45d2b9..b582d0cdc24f 100644 --- a/drivers/edac/Kconfig +++ b/drivers/edac/Kconfig | |||
@@ -64,35 +64,35 @@ config EDAC_AMD76X | |||
64 | 64 | ||
65 | config EDAC_E7XXX | 65 | config EDAC_E7XXX |
66 | tristate "Intel e7xxx (e7205, e7500, e7501, e7505)" | 66 | tristate "Intel e7xxx (e7205, e7500, e7501, e7505)" |
67 | depends on EDAC_MM_EDAC && PCI | 67 | depends on EDAC_MM_EDAC && PCI && X86_32 |
68 | help | 68 | help |
69 | Support for error detection and correction on the Intel | 69 | Support for error detection and correction on the Intel |
70 | E7205, E7500, E7501 and E7505 server chipsets. | 70 | E7205, E7500, E7501 and E7505 server chipsets. |
71 | 71 | ||
72 | config EDAC_E752X | 72 | config EDAC_E752X |
73 | tristate "Intel e752x (e7520, e7525, e7320)" | 73 | tristate "Intel e752x (e7520, e7525, e7320)" |
74 | depends on EDAC_MM_EDAC && PCI | 74 | depends on EDAC_MM_EDAC && PCI && X86 |
75 | help | 75 | help |
76 | Support for error detection and correction on the Intel | 76 | Support for error detection and correction on the Intel |
77 | E7520, E7525, E7320 server chipsets. | 77 | E7520, E7525, E7320 server chipsets. |
78 | 78 | ||
79 | config EDAC_I82875P | 79 | config EDAC_I82875P |
80 | tristate "Intel 82875p (D82875P, E7210)" | 80 | tristate "Intel 82875p (D82875P, E7210)" |
81 | depends on EDAC_MM_EDAC && PCI | 81 | depends on EDAC_MM_EDAC && PCI && X86_32 |
82 | help | 82 | help |
83 | Support for error detection and correction on the Intel | 83 | Support for error detection and correction on the Intel |
84 | DP82785P and E7210 server chipsets. | 84 | DP82785P and E7210 server chipsets. |
85 | 85 | ||
86 | config EDAC_I82860 | 86 | config EDAC_I82860 |
87 | tristate "Intel 82860" | 87 | tristate "Intel 82860" |
88 | depends on EDAC_MM_EDAC && PCI | 88 | depends on EDAC_MM_EDAC && PCI && X86_32 |
89 | help | 89 | help |
90 | Support for error detection and correction on the Intel | 90 | Support for error detection and correction on the Intel |
91 | 82860 chipset. | 91 | 82860 chipset. |
92 | 92 | ||
93 | config EDAC_R82600 | 93 | config EDAC_R82600 |
94 | tristate "Radisys 82600 embedded chipset" | 94 | tristate "Radisys 82600 embedded chipset" |
95 | depends on EDAC_MM_EDAC | 95 | depends on EDAC_MM_EDAC && PCI && X86_32 |
96 | help | 96 | help |
97 | Support for error detection and correction on the Radisys | 97 | Support for error detection and correction on the Radisys |
98 | 82600 embedded chipset. | 98 | 82600 embedded chipset. |
diff --git a/drivers/edac/amd76x_edac.c b/drivers/edac/amd76x_edac.c index 2fcc8120b53c..53423ad6d4a3 100644 --- a/drivers/edac/amd76x_edac.c +++ b/drivers/edac/amd76x_edac.c | |||
@@ -12,25 +12,26 @@ | |||
12 | * | 12 | * |
13 | */ | 13 | */ |
14 | 14 | ||
15 | |||
16 | #include <linux/config.h> | 15 | #include <linux/config.h> |
17 | #include <linux/module.h> | 16 | #include <linux/module.h> |
18 | #include <linux/init.h> | 17 | #include <linux/init.h> |
19 | |||
20 | #include <linux/pci.h> | 18 | #include <linux/pci.h> |
21 | #include <linux/pci_ids.h> | 19 | #include <linux/pci_ids.h> |
22 | |||
23 | #include <linux/slab.h> | 20 | #include <linux/slab.h> |
24 | |||
25 | #include "edac_mc.h" | 21 | #include "edac_mc.h" |
26 | 22 | ||
23 | #define amd76x_printk(level, fmt, arg...) \ | ||
24 | edac_printk(level, "amd76x", fmt, ##arg) | ||
25 | |||
26 | #define amd76x_mc_printk(mci, level, fmt, arg...) \ | ||
27 | edac_mc_chipset_printk(mci, level, "amd76x", fmt, ##arg) | ||
27 | 28 | ||
28 | #define AMD76X_NR_CSROWS 8 | 29 | #define AMD76X_NR_CSROWS 8 |
29 | #define AMD76X_NR_CHANS 1 | 30 | #define AMD76X_NR_CHANS 1 |
30 | #define AMD76X_NR_DIMMS 4 | 31 | #define AMD76X_NR_DIMMS 4 |
31 | 32 | ||
32 | |||
33 | /* AMD 76x register addresses - device 0 function 0 - PCI bridge */ | 33 | /* AMD 76x register addresses - device 0 function 0 - PCI bridge */ |
34 | |||
34 | #define AMD76X_ECC_MODE_STATUS 0x48 /* Mode and status of ECC (32b) | 35 | #define AMD76X_ECC_MODE_STATUS 0x48 /* Mode and status of ECC (32b) |
35 | * | 36 | * |
36 | * 31:16 reserved | 37 | * 31:16 reserved |
@@ -42,6 +43,7 @@ | |||
42 | * 7:4 UE cs row | 43 | * 7:4 UE cs row |
43 | * 3:0 CE cs row | 44 | * 3:0 CE cs row |
44 | */ | 45 | */ |
46 | |||
45 | #define AMD76X_DRAM_MODE_STATUS 0x58 /* DRAM Mode and status (32b) | 47 | #define AMD76X_DRAM_MODE_STATUS 0x58 /* DRAM Mode and status (32b) |
46 | * | 48 | * |
47 | * 31:26 clock disable 5 - 0 | 49 | * 31:26 clock disable 5 - 0 |
@@ -56,6 +58,7 @@ | |||
56 | * 15:8 reserved | 58 | * 15:8 reserved |
57 | * 7:0 x4 mode enable 7 - 0 | 59 | * 7:0 x4 mode enable 7 - 0 |
58 | */ | 60 | */ |
61 | |||
59 | #define AMD76X_MEM_BASE_ADDR 0xC0 /* Memory base address (8 x 32b) | 62 | #define AMD76X_MEM_BASE_ADDR 0xC0 /* Memory base address (8 x 32b) |
60 | * | 63 | * |
61 | * 31:23 chip-select base | 64 | * 31:23 chip-select base |
@@ -66,29 +69,28 @@ | |||
66 | * 0 chip-select enable | 69 | * 0 chip-select enable |
67 | */ | 70 | */ |
68 | 71 | ||
69 | |||
70 | struct amd76x_error_info { | 72 | struct amd76x_error_info { |
71 | u32 ecc_mode_status; | 73 | u32 ecc_mode_status; |
72 | }; | 74 | }; |
73 | 75 | ||
74 | |||
75 | enum amd76x_chips { | 76 | enum amd76x_chips { |
76 | AMD761 = 0, | 77 | AMD761 = 0, |
77 | AMD762 | 78 | AMD762 |
78 | }; | 79 | }; |
79 | 80 | ||
80 | |||
81 | struct amd76x_dev_info { | 81 | struct amd76x_dev_info { |
82 | const char *ctl_name; | 82 | const char *ctl_name; |
83 | }; | 83 | }; |
84 | 84 | ||
85 | |||
86 | static const struct amd76x_dev_info amd76x_devs[] = { | 85 | static const struct amd76x_dev_info amd76x_devs[] = { |
87 | [AMD761] = {.ctl_name = "AMD761"}, | 86 | [AMD761] = { |
88 | [AMD762] = {.ctl_name = "AMD762"}, | 87 | .ctl_name = "AMD761" |
88 | }, | ||
89 | [AMD762] = { | ||
90 | .ctl_name = "AMD762" | ||
91 | }, | ||
89 | }; | 92 | }; |
90 | 93 | ||
91 | |||
92 | /** | 94 | /** |
93 | * amd76x_get_error_info - fetch error information | 95 | * amd76x_get_error_info - fetch error information |
94 | * @mci: Memory controller | 96 | * @mci: Memory controller |
@@ -97,23 +99,21 @@ static const struct amd76x_dev_info amd76x_devs[] = { | |||
97 | * Fetch and store the AMD76x ECC status. Clear pending status | 99 | * Fetch and store the AMD76x ECC status. Clear pending status |
98 | * on the chip so that further errors will be reported | 100 | * on the chip so that further errors will be reported |
99 | */ | 101 | */ |
100 | 102 | static void amd76x_get_error_info(struct mem_ctl_info *mci, | |
101 | static void amd76x_get_error_info (struct mem_ctl_info *mci, | 103 | struct amd76x_error_info *info) |
102 | struct amd76x_error_info *info) | ||
103 | { | 104 | { |
104 | pci_read_config_dword(mci->pdev, AMD76X_ECC_MODE_STATUS, | 105 | pci_read_config_dword(mci->pdev, AMD76X_ECC_MODE_STATUS, |
105 | &info->ecc_mode_status); | 106 | &info->ecc_mode_status); |
106 | 107 | ||
107 | if (info->ecc_mode_status & BIT(8)) | 108 | if (info->ecc_mode_status & BIT(8)) |
108 | pci_write_bits32(mci->pdev, AMD76X_ECC_MODE_STATUS, | 109 | pci_write_bits32(mci->pdev, AMD76X_ECC_MODE_STATUS, |
109 | (u32) BIT(8), (u32) BIT(8)); | 110 | (u32) BIT(8), (u32) BIT(8)); |
110 | 111 | ||
111 | if (info->ecc_mode_status & BIT(9)) | 112 | if (info->ecc_mode_status & BIT(9)) |
112 | pci_write_bits32(mci->pdev, AMD76X_ECC_MODE_STATUS, | 113 | pci_write_bits32(mci->pdev, AMD76X_ECC_MODE_STATUS, |
113 | (u32) BIT(9), (u32) BIT(9)); | 114 | (u32) BIT(9), (u32) BIT(9)); |
114 | } | 115 | } |
115 | 116 | ||
116 | |||
117 | /** | 117 | /** |
118 | * amd76x_process_error_info - Error check | 118 | * amd76x_process_error_info - Error check |
119 | * @mci: Memory controller | 119 | * @mci: Memory controller |
@@ -124,8 +124,7 @@ static void amd76x_get_error_info (struct mem_ctl_info *mci, | |||
124 | * A return of 1 indicates an error. Also if handle_errors is true | 124 | * A return of 1 indicates an error. Also if handle_errors is true |
125 | * then attempt to handle and clean up after the error | 125 | * then attempt to handle and clean up after the error |
126 | */ | 126 | */ |
127 | 127 | static int amd76x_process_error_info(struct mem_ctl_info *mci, | |
128 | static int amd76x_process_error_info (struct mem_ctl_info *mci, | ||
129 | struct amd76x_error_info *info, int handle_errors) | 128 | struct amd76x_error_info *info, int handle_errors) |
130 | { | 129 | { |
131 | int error_found; | 130 | int error_found; |
@@ -141,9 +140,8 @@ static int amd76x_process_error_info (struct mem_ctl_info *mci, | |||
141 | 140 | ||
142 | if (handle_errors) { | 141 | if (handle_errors) { |
143 | row = (info->ecc_mode_status >> 4) & 0xf; | 142 | row = (info->ecc_mode_status >> 4) & 0xf; |
144 | edac_mc_handle_ue(mci, | 143 | edac_mc_handle_ue(mci, mci->csrows[row].first_page, 0, |
145 | mci->csrows[row].first_page, 0, row, | 144 | row, mci->ctl_name); |
146 | mci->ctl_name); | ||
147 | } | 145 | } |
148 | } | 146 | } |
149 | 147 | ||
@@ -155,11 +153,11 @@ static int amd76x_process_error_info (struct mem_ctl_info *mci, | |||
155 | 153 | ||
156 | if (handle_errors) { | 154 | if (handle_errors) { |
157 | row = info->ecc_mode_status & 0xf; | 155 | row = info->ecc_mode_status & 0xf; |
158 | edac_mc_handle_ce(mci, | 156 | edac_mc_handle_ce(mci, mci->csrows[row].first_page, 0, |
159 | mci->csrows[row].first_page, 0, 0, row, 0, | 157 | 0, row, 0, mci->ctl_name); |
160 | mci->ctl_name); | ||
161 | } | 158 | } |
162 | } | 159 | } |
160 | |||
163 | return error_found; | 161 | return error_found; |
164 | } | 162 | } |
165 | 163 | ||
@@ -170,16 +168,14 @@ static int amd76x_process_error_info (struct mem_ctl_info *mci, | |||
170 | * Called by the poll handlers this function reads the status | 168 | * Called by the poll handlers this function reads the status |
171 | * from the controller and checks for errors. | 169 | * from the controller and checks for errors. |
172 | */ | 170 | */ |
173 | |||
174 | static void amd76x_check(struct mem_ctl_info *mci) | 171 | static void amd76x_check(struct mem_ctl_info *mci) |
175 | { | 172 | { |
176 | struct amd76x_error_info info; | 173 | struct amd76x_error_info info; |
177 | debugf3("MC: " __FILE__ ": %s()\n", __func__); | 174 | debugf3("%s()\n", __func__); |
178 | amd76x_get_error_info(mci, &info); | 175 | amd76x_get_error_info(mci, &info); |
179 | amd76x_process_error_info(mci, &info, 1); | 176 | amd76x_process_error_info(mci, &info, 1); |
180 | } | 177 | } |
181 | 178 | ||
182 | |||
183 | /** | 179 | /** |
184 | * amd76x_probe1 - Perform set up for detected device | 180 | * amd76x_probe1 - Perform set up for detected device |
185 | * @pdev; PCI device detected | 181 | * @pdev; PCI device detected |
@@ -189,7 +185,6 @@ static void amd76x_check(struct mem_ctl_info *mci) | |||
189 | * controller status reporting. We configure and set up the | 185 | * controller status reporting. We configure and set up the |
190 | * memory controller reporting and claim the device. | 186 | * memory controller reporting and claim the device. |
191 | */ | 187 | */ |
192 | |||
193 | static int amd76x_probe1(struct pci_dev *pdev, int dev_idx) | 188 | static int amd76x_probe1(struct pci_dev *pdev, int dev_idx) |
194 | { | 189 | { |
195 | int rc = -ENODEV; | 190 | int rc = -ENODEV; |
@@ -203,12 +198,11 @@ static int amd76x_probe1(struct pci_dev *pdev, int dev_idx) | |||
203 | }; | 198 | }; |
204 | u32 ems; | 199 | u32 ems; |
205 | u32 ems_mode; | 200 | u32 ems_mode; |
201 | struct amd76x_error_info discard; | ||
206 | 202 | ||
207 | debugf0("MC: " __FILE__ ": %s()\n", __func__); | 203 | debugf0("%s()\n", __func__); |
208 | |||
209 | pci_read_config_dword(pdev, AMD76X_ECC_MODE_STATUS, &ems); | 204 | pci_read_config_dword(pdev, AMD76X_ECC_MODE_STATUS, &ems); |
210 | ems_mode = (ems >> 10) & 0x3; | 205 | ems_mode = (ems >> 10) & 0x3; |
211 | |||
212 | mci = edac_mc_alloc(0, AMD76X_NR_CSROWS, AMD76X_NR_CHANS); | 206 | mci = edac_mc_alloc(0, AMD76X_NR_CSROWS, AMD76X_NR_CHANS); |
213 | 207 | ||
214 | if (mci == NULL) { | 208 | if (mci == NULL) { |
@@ -216,16 +210,13 @@ static int amd76x_probe1(struct pci_dev *pdev, int dev_idx) | |||
216 | goto fail; | 210 | goto fail; |
217 | } | 211 | } |
218 | 212 | ||
219 | debugf0("MC: " __FILE__ ": %s(): mci = %p\n", __func__, mci); | 213 | debugf0("%s(): mci = %p\n", __func__, mci); |
220 | 214 | mci->pdev = pdev; | |
221 | mci->pdev = pci_dev_get(pdev); | ||
222 | mci->mtype_cap = MEM_FLAG_RDDR; | 215 | mci->mtype_cap = MEM_FLAG_RDDR; |
223 | |||
224 | mci->edac_ctl_cap = EDAC_FLAG_NONE | EDAC_FLAG_EC | EDAC_FLAG_SECDED; | 216 | mci->edac_ctl_cap = EDAC_FLAG_NONE | EDAC_FLAG_EC | EDAC_FLAG_SECDED; |
225 | mci->edac_cap = ems_mode ? | 217 | mci->edac_cap = ems_mode ? |
226 | (EDAC_FLAG_EC | EDAC_FLAG_SECDED) : EDAC_FLAG_NONE; | 218 | (EDAC_FLAG_EC | EDAC_FLAG_SECDED) : EDAC_FLAG_NONE; |
227 | 219 | mci->mod_name = EDAC_MOD_STR; | |
228 | mci->mod_name = BS_MOD_STR; | ||
229 | mci->mod_ver = "$Revision: 1.4.2.5 $"; | 220 | mci->mod_ver = "$Revision: 1.4.2.5 $"; |
230 | mci->ctl_name = amd76x_devs[dev_idx].ctl_name; | 221 | mci->ctl_name = amd76x_devs[dev_idx].ctl_name; |
231 | mci->edac_check = amd76x_check; | 222 | mci->edac_check = amd76x_check; |
@@ -240,18 +231,15 @@ static int amd76x_probe1(struct pci_dev *pdev, int dev_idx) | |||
240 | 231 | ||
241 | /* find the DRAM Chip Select Base address and mask */ | 232 | /* find the DRAM Chip Select Base address and mask */ |
242 | pci_read_config_dword(mci->pdev, | 233 | pci_read_config_dword(mci->pdev, |
243 | AMD76X_MEM_BASE_ADDR + (index * 4), | 234 | AMD76X_MEM_BASE_ADDR + (index * 4), &mba); |
244 | &mba); | ||
245 | 235 | ||
246 | if (!(mba & BIT(0))) | 236 | if (!(mba & BIT(0))) |
247 | continue; | 237 | continue; |
248 | 238 | ||
249 | mba_base = mba & 0xff800000UL; | 239 | mba_base = mba & 0xff800000UL; |
250 | mba_mask = ((mba & 0xff80) << 16) | 0x7fffffUL; | 240 | mba_mask = ((mba & 0xff80) << 16) | 0x7fffffUL; |
251 | |||
252 | pci_read_config_dword(mci->pdev, AMD76X_DRAM_MODE_STATUS, | 241 | pci_read_config_dword(mci->pdev, AMD76X_DRAM_MODE_STATUS, |
253 | &dms); | 242 | &dms); |
254 | |||
255 | csrow->first_page = mba_base >> PAGE_SHIFT; | 243 | csrow->first_page = mba_base >> PAGE_SHIFT; |
256 | csrow->nr_pages = (mba_mask + 1) >> PAGE_SHIFT; | 244 | csrow->nr_pages = (mba_mask + 1) >> PAGE_SHIFT; |
257 | csrow->last_page = csrow->first_page + csrow->nr_pages - 1; | 245 | csrow->last_page = csrow->first_page + csrow->nr_pages - 1; |
@@ -262,40 +250,33 @@ static int amd76x_probe1(struct pci_dev *pdev, int dev_idx) | |||
262 | csrow->edac_mode = ems_modes[ems_mode]; | 250 | csrow->edac_mode = ems_modes[ems_mode]; |
263 | } | 251 | } |
264 | 252 | ||
265 | /* clear counters */ | 253 | amd76x_get_error_info(mci, &discard); /* clear counters */ |
266 | pci_write_bits32(mci->pdev, AMD76X_ECC_MODE_STATUS, (u32) (0x3 << 8), | ||
267 | (u32) (0x3 << 8)); | ||
268 | 254 | ||
269 | if (edac_mc_add_mc(mci)) { | 255 | if (edac_mc_add_mc(mci)) { |
270 | debugf3("MC: " __FILE__ | 256 | debugf3("%s(): failed edac_mc_add_mc()\n", __func__); |
271 | ": %s(): failed edac_mc_add_mc()\n", __func__); | ||
272 | goto fail; | 257 | goto fail; |
273 | } | 258 | } |
274 | 259 | ||
275 | /* get this far and it's successful */ | 260 | /* get this far and it's successful */ |
276 | debugf3("MC: " __FILE__ ": %s(): success\n", __func__); | 261 | debugf3("%s(): success\n", __func__); |
277 | return 0; | 262 | return 0; |
278 | 263 | ||
279 | fail: | 264 | fail: |
280 | if (mci) { | 265 | if (mci != NULL) |
281 | if(mci->pdev) | ||
282 | pci_dev_put(mci->pdev); | ||
283 | edac_mc_free(mci); | 266 | edac_mc_free(mci); |
284 | } | ||
285 | return rc; | 267 | return rc; |
286 | } | 268 | } |
287 | 269 | ||
288 | /* returns count (>= 0), or negative on error */ | 270 | /* returns count (>= 0), or negative on error */ |
289 | static int __devinit amd76x_init_one(struct pci_dev *pdev, | 271 | static int __devinit amd76x_init_one(struct pci_dev *pdev, |
290 | const struct pci_device_id *ent) | 272 | const struct pci_device_id *ent) |
291 | { | 273 | { |
292 | debugf0("MC: " __FILE__ ": %s()\n", __func__); | 274 | debugf0("%s()\n", __func__); |
293 | 275 | ||
294 | /* don't need to call pci_device_enable() */ | 276 | /* don't need to call pci_device_enable() */ |
295 | return amd76x_probe1(pdev, ent->driver_data); | 277 | return amd76x_probe1(pdev, ent->driver_data); |
296 | } | 278 | } |
297 | 279 | ||
298 | |||
299 | /** | 280 | /** |
300 | * amd76x_remove_one - driver shutdown | 281 | * amd76x_remove_one - driver shutdown |
301 | * @pdev: PCI device being handed back | 282 | * @pdev: PCI device being handed back |
@@ -304,35 +285,36 @@ static int __devinit amd76x_init_one(struct pci_dev *pdev, | |||
304 | * structure for the device then delete the mci and free the | 285 | * structure for the device then delete the mci and free the |
305 | * resources. | 286 | * resources. |
306 | */ | 287 | */ |
307 | |||
308 | static void __devexit amd76x_remove_one(struct pci_dev *pdev) | 288 | static void __devexit amd76x_remove_one(struct pci_dev *pdev) |
309 | { | 289 | { |
310 | struct mem_ctl_info *mci; | 290 | struct mem_ctl_info *mci; |
311 | 291 | ||
312 | debugf0(__FILE__ ": %s()\n", __func__); | 292 | debugf0("%s()\n", __func__); |
313 | 293 | ||
314 | if ((mci = edac_mc_find_mci_by_pdev(pdev)) == NULL) | 294 | if ((mci = edac_mc_del_mc(pdev)) == NULL) |
315 | return; | 295 | return; |
316 | if (edac_mc_del_mc(mci)) | 296 | |
317 | return; | ||
318 | pci_dev_put(mci->pdev); | ||
319 | edac_mc_free(mci); | 297 | edac_mc_free(mci); |
320 | } | 298 | } |
321 | 299 | ||
322 | |||
323 | static const struct pci_device_id amd76x_pci_tbl[] __devinitdata = { | 300 | static const struct pci_device_id amd76x_pci_tbl[] __devinitdata = { |
324 | {PCI_VEND_DEV(AMD, FE_GATE_700C), PCI_ANY_ID, PCI_ANY_ID, 0, 0, | 301 | { |
325 | AMD762}, | 302 | PCI_VEND_DEV(AMD, FE_GATE_700C), PCI_ANY_ID, PCI_ANY_ID, 0, 0, |
326 | {PCI_VEND_DEV(AMD, FE_GATE_700E), PCI_ANY_ID, PCI_ANY_ID, 0, 0, | 303 | AMD762 |
327 | AMD761}, | 304 | }, |
328 | {0,} /* 0 terminated list. */ | 305 | { |
306 | PCI_VEND_DEV(AMD, FE_GATE_700E), PCI_ANY_ID, PCI_ANY_ID, 0, 0, | ||
307 | AMD761 | ||
308 | }, | ||
309 | { | ||
310 | 0, | ||
311 | } /* 0 terminated list. */ | ||
329 | }; | 312 | }; |
330 | 313 | ||
331 | MODULE_DEVICE_TABLE(pci, amd76x_pci_tbl); | 314 | MODULE_DEVICE_TABLE(pci, amd76x_pci_tbl); |
332 | 315 | ||
333 | |||
334 | static struct pci_driver amd76x_driver = { | 316 | static struct pci_driver amd76x_driver = { |
335 | .name = BS_MOD_STR, | 317 | .name = EDAC_MOD_STR, |
336 | .probe = amd76x_init_one, | 318 | .probe = amd76x_init_one, |
337 | .remove = __devexit_p(amd76x_remove_one), | 319 | .remove = __devexit_p(amd76x_remove_one), |
338 | .id_table = amd76x_pci_tbl, | 320 | .id_table = amd76x_pci_tbl, |
diff --git a/drivers/edac/e752x_edac.c b/drivers/edac/e752x_edac.c index c454ded2b060..66572c5323ad 100644 --- a/drivers/edac/e752x_edac.c +++ b/drivers/edac/e752x_edac.c | |||
@@ -17,18 +17,19 @@ | |||
17 | * | 17 | * |
18 | */ | 18 | */ |
19 | 19 | ||
20 | |||
21 | #include <linux/config.h> | 20 | #include <linux/config.h> |
22 | #include <linux/module.h> | 21 | #include <linux/module.h> |
23 | #include <linux/init.h> | 22 | #include <linux/init.h> |
24 | |||
25 | #include <linux/pci.h> | 23 | #include <linux/pci.h> |
26 | #include <linux/pci_ids.h> | 24 | #include <linux/pci_ids.h> |
27 | |||
28 | #include <linux/slab.h> | 25 | #include <linux/slab.h> |
29 | |||
30 | #include "edac_mc.h" | 26 | #include "edac_mc.h" |
31 | 27 | ||
28 | #define e752x_printk(level, fmt, arg...) \ | ||
29 | edac_printk(level, "e752x", fmt, ##arg) | ||
30 | |||
31 | #define e752x_mc_printk(mci, level, fmt, arg...) \ | ||
32 | edac_mc_chipset_printk(mci, level, "e752x", fmt, ##arg) | ||
32 | 33 | ||
33 | #ifndef PCI_DEVICE_ID_INTEL_7520_0 | 34 | #ifndef PCI_DEVICE_ID_INTEL_7520_0 |
34 | #define PCI_DEVICE_ID_INTEL_7520_0 0x3590 | 35 | #define PCI_DEVICE_ID_INTEL_7520_0 0x3590 |
@@ -56,7 +57,6 @@ | |||
56 | 57 | ||
57 | #define E752X_NR_CSROWS 8 /* number of csrows */ | 58 | #define E752X_NR_CSROWS 8 /* number of csrows */ |
58 | 59 | ||
59 | |||
60 | /* E752X register addresses - device 0 function 0 */ | 60 | /* E752X register addresses - device 0 function 0 */ |
61 | #define E752X_DRB 0x60 /* DRAM row boundary register (8b) */ | 61 | #define E752X_DRB 0x60 /* DRAM row boundary register (8b) */ |
62 | #define E752X_DRA 0x70 /* DRAM row attribute register (8b) */ | 62 | #define E752X_DRA 0x70 /* DRAM row attribute register (8b) */ |
@@ -156,7 +156,6 @@ enum e752x_chips { | |||
156 | E7320 = 2 | 156 | E7320 = 2 |
157 | }; | 157 | }; |
158 | 158 | ||
159 | |||
160 | struct e752x_pvt { | 159 | struct e752x_pvt { |
161 | struct pci_dev *bridge_ck; | 160 | struct pci_dev *bridge_ck; |
162 | struct pci_dev *dev_d0f0; | 161 | struct pci_dev *dev_d0f0; |
@@ -170,9 +169,9 @@ struct e752x_pvt { | |||
170 | const struct e752x_dev_info *dev_info; | 169 | const struct e752x_dev_info *dev_info; |
171 | }; | 170 | }; |
172 | 171 | ||
173 | |||
174 | struct e752x_dev_info { | 172 | struct e752x_dev_info { |
175 | u16 err_dev; | 173 | u16 err_dev; |
174 | u16 ctl_dev; | ||
176 | const char *ctl_name; | 175 | const char *ctl_name; |
177 | }; | 176 | }; |
178 | 177 | ||
@@ -198,38 +197,47 @@ struct e752x_error_info { | |||
198 | 197 | ||
199 | static const struct e752x_dev_info e752x_devs[] = { | 198 | static const struct e752x_dev_info e752x_devs[] = { |
200 | [E7520] = { | 199 | [E7520] = { |
201 | .err_dev = PCI_DEVICE_ID_INTEL_7520_1_ERR, | 200 | .err_dev = PCI_DEVICE_ID_INTEL_7520_1_ERR, |
202 | .ctl_name = "E7520"}, | 201 | .ctl_dev = PCI_DEVICE_ID_INTEL_7520_0, |
202 | .ctl_name = "E7520" | ||
203 | }, | ||
203 | [E7525] = { | 204 | [E7525] = { |
204 | .err_dev = PCI_DEVICE_ID_INTEL_7525_1_ERR, | 205 | .err_dev = PCI_DEVICE_ID_INTEL_7525_1_ERR, |
205 | .ctl_name = "E7525"}, | 206 | .ctl_dev = PCI_DEVICE_ID_INTEL_7525_0, |
207 | .ctl_name = "E7525" | ||
208 | }, | ||
206 | [E7320] = { | 209 | [E7320] = { |
207 | .err_dev = PCI_DEVICE_ID_INTEL_7320_1_ERR, | 210 | .err_dev = PCI_DEVICE_ID_INTEL_7320_1_ERR, |
208 | .ctl_name = "E7320"}, | 211 | .ctl_dev = PCI_DEVICE_ID_INTEL_7320_0, |
212 | .ctl_name = "E7320" | ||
213 | }, | ||
209 | }; | 214 | }; |
210 | 215 | ||
211 | |||
212 | static unsigned long ctl_page_to_phys(struct mem_ctl_info *mci, | 216 | static unsigned long ctl_page_to_phys(struct mem_ctl_info *mci, |
213 | unsigned long page) | 217 | unsigned long page) |
214 | { | 218 | { |
215 | u32 remap; | 219 | u32 remap; |
216 | struct e752x_pvt *pvt = (struct e752x_pvt *) mci->pvt_info; | 220 | struct e752x_pvt *pvt = (struct e752x_pvt *) mci->pvt_info; |
217 | 221 | ||
218 | debugf3("MC: " __FILE__ ": %s()\n", __func__); | 222 | debugf3("%s()\n", __func__); |
219 | 223 | ||
220 | if (page < pvt->tolm) | 224 | if (page < pvt->tolm) |
221 | return page; | 225 | return page; |
226 | |||
222 | if ((page >= 0x100000) && (page < pvt->remapbase)) | 227 | if ((page >= 0x100000) && (page < pvt->remapbase)) |
223 | return page; | 228 | return page; |
229 | |||
224 | remap = (page - pvt->tolm) + pvt->remapbase; | 230 | remap = (page - pvt->tolm) + pvt->remapbase; |
231 | |||
225 | if (remap < pvt->remaplimit) | 232 | if (remap < pvt->remaplimit) |
226 | return remap; | 233 | return remap; |
227 | printk(KERN_ERR "Invalid page %lx - out of range\n", page); | 234 | |
235 | e752x_printk(KERN_ERR, "Invalid page %lx - out of range\n", page); | ||
228 | return pvt->tolm - 1; | 236 | return pvt->tolm - 1; |
229 | } | 237 | } |
230 | 238 | ||
231 | static void do_process_ce(struct mem_ctl_info *mci, u16 error_one, | 239 | static void do_process_ce(struct mem_ctl_info *mci, u16 error_one, |
232 | u32 sec1_add, u16 sec1_syndrome) | 240 | u32 sec1_add, u16 sec1_syndrome) |
233 | { | 241 | { |
234 | u32 page; | 242 | u32 page; |
235 | int row; | 243 | int row; |
@@ -237,7 +245,7 @@ static void do_process_ce(struct mem_ctl_info *mci, u16 error_one, | |||
237 | int i; | 245 | int i; |
238 | struct e752x_pvt *pvt = (struct e752x_pvt *) mci->pvt_info; | 246 | struct e752x_pvt *pvt = (struct e752x_pvt *) mci->pvt_info; |
239 | 247 | ||
240 | debugf3("MC: " __FILE__ ": %s()\n", __func__); | 248 | debugf3("%s()\n", __func__); |
241 | 249 | ||
242 | /* convert the addr to 4k page */ | 250 | /* convert the addr to 4k page */ |
243 | page = sec1_add >> (PAGE_SHIFT - 4); | 251 | page = sec1_add >> (PAGE_SHIFT - 4); |
@@ -246,36 +254,37 @@ static void do_process_ce(struct mem_ctl_info *mci, u16 error_one, | |||
246 | if (pvt->mc_symmetric) { | 254 | if (pvt->mc_symmetric) { |
247 | /* chip select are bits 14 & 13 */ | 255 | /* chip select are bits 14 & 13 */ |
248 | row = ((page >> 1) & 3); | 256 | row = ((page >> 1) & 3); |
249 | printk(KERN_WARNING | 257 | e752x_printk(KERN_WARNING, |
250 | "Test row %d Table %d %d %d %d %d %d %d %d\n", | 258 | "Test row %d Table %d %d %d %d %d %d %d %d\n", row, |
251 | row, pvt->map[0], pvt->map[1], pvt->map[2], | 259 | pvt->map[0], pvt->map[1], pvt->map[2], pvt->map[3], |
252 | pvt->map[3], pvt->map[4], pvt->map[5], | 260 | pvt->map[4], pvt->map[5], pvt->map[6], pvt->map[7]); |
253 | pvt->map[6], pvt->map[7]); | ||
254 | 261 | ||
255 | /* test for channel remapping */ | 262 | /* test for channel remapping */ |
256 | for (i = 0; i < 8; i++) { | 263 | for (i = 0; i < 8; i++) { |
257 | if (pvt->map[i] == row) | 264 | if (pvt->map[i] == row) |
258 | break; | 265 | break; |
259 | } | 266 | } |
260 | printk(KERN_WARNING "Test computed row %d\n", i); | 267 | |
268 | e752x_printk(KERN_WARNING, "Test computed row %d\n", i); | ||
269 | |||
261 | if (i < 8) | 270 | if (i < 8) |
262 | row = i; | 271 | row = i; |
263 | else | 272 | else |
264 | printk(KERN_WARNING | 273 | e752x_mc_printk(mci, KERN_WARNING, |
265 | "MC%d: row %d not found in remap table\n", | 274 | "row %d not found in remap table\n", row); |
266 | mci->mc_idx, row); | ||
267 | } else | 275 | } else |
268 | row = edac_mc_find_csrow_by_page(mci, page); | 276 | row = edac_mc_find_csrow_by_page(mci, page); |
277 | |||
269 | /* 0 = channel A, 1 = channel B */ | 278 | /* 0 = channel A, 1 = channel B */ |
270 | channel = !(error_one & 1); | 279 | channel = !(error_one & 1); |
271 | 280 | ||
272 | if (!pvt->map_type) | 281 | if (!pvt->map_type) |
273 | row = 7 - row; | 282 | row = 7 - row; |
283 | |||
274 | edac_mc_handle_ce(mci, page, 0, sec1_syndrome, row, channel, | 284 | edac_mc_handle_ce(mci, page, 0, sec1_syndrome, row, channel, |
275 | "e752x CE"); | 285 | "e752x CE"); |
276 | } | 286 | } |
277 | 287 | ||
278 | |||
279 | static inline void process_ce(struct mem_ctl_info *mci, u16 error_one, | 288 | static inline void process_ce(struct mem_ctl_info *mci, u16 error_one, |
280 | u32 sec1_add, u16 sec1_syndrome, int *error_found, | 289 | u32 sec1_add, u16 sec1_syndrome, int *error_found, |
281 | int handle_error) | 290 | int handle_error) |
@@ -286,36 +295,42 @@ static inline void process_ce(struct mem_ctl_info *mci, u16 error_one, | |||
286 | do_process_ce(mci, error_one, sec1_add, sec1_syndrome); | 295 | do_process_ce(mci, error_one, sec1_add, sec1_syndrome); |
287 | } | 296 | } |
288 | 297 | ||
289 | static void do_process_ue(struct mem_ctl_info *mci, u16 error_one, u32 ded_add, | 298 | static void do_process_ue(struct mem_ctl_info *mci, u16 error_one, |
290 | u32 scrb_add) | 299 | u32 ded_add, u32 scrb_add) |
291 | { | 300 | { |
292 | u32 error_2b, block_page; | 301 | u32 error_2b, block_page; |
293 | int row; | 302 | int row; |
294 | struct e752x_pvt *pvt = (struct e752x_pvt *) mci->pvt_info; | 303 | struct e752x_pvt *pvt = (struct e752x_pvt *) mci->pvt_info; |
295 | 304 | ||
296 | debugf3("MC: " __FILE__ ": %s()\n", __func__); | 305 | debugf3("%s()\n", __func__); |
297 | 306 | ||
298 | if (error_one & 0x0202) { | 307 | if (error_one & 0x0202) { |
299 | error_2b = ded_add; | 308 | error_2b = ded_add; |
309 | |||
300 | /* convert to 4k address */ | 310 | /* convert to 4k address */ |
301 | block_page = error_2b >> (PAGE_SHIFT - 4); | 311 | block_page = error_2b >> (PAGE_SHIFT - 4); |
312 | |||
302 | row = pvt->mc_symmetric ? | 313 | row = pvt->mc_symmetric ? |
303 | /* chip select are bits 14 & 13 */ | 314 | /* chip select are bits 14 & 13 */ |
304 | ((block_page >> 1) & 3) : | 315 | ((block_page >> 1) & 3) : |
305 | edac_mc_find_csrow_by_page(mci, block_page); | 316 | edac_mc_find_csrow_by_page(mci, block_page); |
317 | |||
306 | edac_mc_handle_ue(mci, block_page, 0, row, | 318 | edac_mc_handle_ue(mci, block_page, 0, row, |
307 | "e752x UE from Read"); | 319 | "e752x UE from Read"); |
308 | } | 320 | } |
309 | if (error_one & 0x0404) { | 321 | if (error_one & 0x0404) { |
310 | error_2b = scrb_add; | 322 | error_2b = scrb_add; |
323 | |||
311 | /* convert to 4k address */ | 324 | /* convert to 4k address */ |
312 | block_page = error_2b >> (PAGE_SHIFT - 4); | 325 | block_page = error_2b >> (PAGE_SHIFT - 4); |
326 | |||
313 | row = pvt->mc_symmetric ? | 327 | row = pvt->mc_symmetric ? |
314 | /* chip select are bits 14 & 13 */ | 328 | /* chip select are bits 14 & 13 */ |
315 | ((block_page >> 1) & 3) : | 329 | ((block_page >> 1) & 3) : |
316 | edac_mc_find_csrow_by_page(mci, block_page); | 330 | edac_mc_find_csrow_by_page(mci, block_page); |
331 | |||
317 | edac_mc_handle_ue(mci, block_page, 0, row, | 332 | edac_mc_handle_ue(mci, block_page, 0, row, |
318 | "e752x UE from Scruber"); | 333 | "e752x UE from Scruber"); |
319 | } | 334 | } |
320 | } | 335 | } |
321 | 336 | ||
@@ -336,7 +351,7 @@ static inline void process_ue_no_info_wr(struct mem_ctl_info *mci, | |||
336 | if (!handle_error) | 351 | if (!handle_error) |
337 | return; | 352 | return; |
338 | 353 | ||
339 | debugf3("MC: " __FILE__ ": %s()\n", __func__); | 354 | debugf3("%s()\n", __func__); |
340 | edac_mc_handle_ue_no_info(mci, "e752x UE log memory write"); | 355 | edac_mc_handle_ue_no_info(mci, "e752x UE log memory write"); |
341 | } | 356 | } |
342 | 357 | ||
@@ -348,13 +363,13 @@ static void do_process_ded_retry(struct mem_ctl_info *mci, u16 error, | |||
348 | struct e752x_pvt *pvt = (struct e752x_pvt *) mci->pvt_info; | 363 | struct e752x_pvt *pvt = (struct e752x_pvt *) mci->pvt_info; |
349 | 364 | ||
350 | error_1b = retry_add; | 365 | error_1b = retry_add; |
351 | page = error_1b >> (PAGE_SHIFT - 4); /* convert the addr to 4k page */ | 366 | page = error_1b >> (PAGE_SHIFT - 4); /* convert the addr to 4k page */ |
352 | row = pvt->mc_symmetric ? | 367 | row = pvt->mc_symmetric ? |
353 | ((page >> 1) & 3) : /* chip select are bits 14 & 13 */ | 368 | ((page >> 1) & 3) : /* chip select are bits 14 & 13 */ |
354 | edac_mc_find_csrow_by_page(mci, page); | 369 | edac_mc_find_csrow_by_page(mci, page); |
355 | printk(KERN_WARNING | 370 | e752x_mc_printk(mci, KERN_WARNING, |
356 | "MC%d: CE page 0x%lx, row %d : Memory read retry\n", | 371 | "CE page 0x%lx, row %d : Memory read retry\n", |
357 | mci->mc_idx, (long unsigned int) page, row); | 372 | (long unsigned int) page, row); |
358 | } | 373 | } |
359 | 374 | ||
360 | static inline void process_ded_retry(struct mem_ctl_info *mci, u16 error, | 375 | static inline void process_ded_retry(struct mem_ctl_info *mci, u16 error, |
@@ -372,8 +387,7 @@ static inline void process_threshold_ce(struct mem_ctl_info *mci, u16 error, | |||
372 | *error_found = 1; | 387 | *error_found = 1; |
373 | 388 | ||
374 | if (handle_error) | 389 | if (handle_error) |
375 | printk(KERN_WARNING "MC%d: Memory threshold CE\n", | 390 | e752x_mc_printk(mci, KERN_WARNING, "Memory threshold CE\n"); |
376 | mci->mc_idx); | ||
377 | } | 391 | } |
378 | 392 | ||
379 | static char *global_message[11] = { | 393 | static char *global_message[11] = { |
@@ -391,8 +405,8 @@ static void do_global_error(int fatal, u32 errors) | |||
391 | 405 | ||
392 | for (i = 0; i < 11; i++) { | 406 | for (i = 0; i < 11; i++) { |
393 | if (errors & (1 << i)) | 407 | if (errors & (1 << i)) |
394 | printk(KERN_WARNING "%sError %s\n", | 408 | e752x_printk(KERN_WARNING, "%sError %s\n", |
395 | fatal_message[fatal], global_message[i]); | 409 | fatal_message[fatal], global_message[i]); |
396 | } | 410 | } |
397 | } | 411 | } |
398 | 412 | ||
@@ -418,8 +432,8 @@ static void do_hub_error(int fatal, u8 errors) | |||
418 | 432 | ||
419 | for (i = 0; i < 7; i++) { | 433 | for (i = 0; i < 7; i++) { |
420 | if (errors & (1 << i)) | 434 | if (errors & (1 << i)) |
421 | printk(KERN_WARNING "%sError %s\n", | 435 | e752x_printk(KERN_WARNING, "%sError %s\n", |
422 | fatal_message[fatal], hub_message[i]); | 436 | fatal_message[fatal], hub_message[i]); |
423 | } | 437 | } |
424 | } | 438 | } |
425 | 439 | ||
@@ -445,8 +459,8 @@ static void do_membuf_error(u8 errors) | |||
445 | 459 | ||
446 | for (i = 0; i < 4; i++) { | 460 | for (i = 0; i < 4; i++) { |
447 | if (errors & (1 << i)) | 461 | if (errors & (1 << i)) |
448 | printk(KERN_WARNING "Non-Fatal Error %s\n", | 462 | e752x_printk(KERN_WARNING, "Non-Fatal Error %s\n", |
449 | membuf_message[i]); | 463 | membuf_message[i]); |
450 | } | 464 | } |
451 | } | 465 | } |
452 | 466 | ||
@@ -458,8 +472,7 @@ static inline void membuf_error(u8 errors, int *error_found, int handle_error) | |||
458 | do_membuf_error(errors); | 472 | do_membuf_error(errors); |
459 | } | 473 | } |
460 | 474 | ||
461 | #if 0 | 475 | static char *sysbus_message[10] = { |
462 | char *sysbus_message[10] = { | ||
463 | "Addr or Request Parity", | 476 | "Addr or Request Parity", |
464 | "Data Strobe Glitch", | 477 | "Data Strobe Glitch", |
465 | "Addr Strobe Glitch", | 478 | "Addr Strobe Glitch", |
@@ -470,7 +483,6 @@ char *sysbus_message[10] = { | |||
470 | "Memory Parity", | 483 | "Memory Parity", |
471 | "IO Subsystem Parity" | 484 | "IO Subsystem Parity" |
472 | }; | 485 | }; |
473 | #endif /* 0 */ | ||
474 | 486 | ||
475 | static void do_sysbus_error(int fatal, u32 errors) | 487 | static void do_sysbus_error(int fatal, u32 errors) |
476 | { | 488 | { |
@@ -478,8 +490,8 @@ static void do_sysbus_error(int fatal, u32 errors) | |||
478 | 490 | ||
479 | for (i = 0; i < 10; i++) { | 491 | for (i = 0; i < 10; i++) { |
480 | if (errors & (1 << i)) | 492 | if (errors & (1 << i)) |
481 | printk(KERN_WARNING "%sError System Bus %s\n", | 493 | e752x_printk(KERN_WARNING, "%sError System Bus %s\n", |
482 | fatal_message[fatal], global_message[i]); | 494 | fatal_message[fatal], sysbus_message[i]); |
483 | } | 495 | } |
484 | } | 496 | } |
485 | 497 | ||
@@ -492,33 +504,42 @@ static inline void sysbus_error(int fatal, u32 errors, int *error_found, | |||
492 | do_sysbus_error(fatal, errors); | 504 | do_sysbus_error(fatal, errors); |
493 | } | 505 | } |
494 | 506 | ||
495 | static void e752x_check_hub_interface (struct e752x_error_info *info, | 507 | static void e752x_check_hub_interface(struct e752x_error_info *info, |
496 | int *error_found, int handle_error) | 508 | int *error_found, int handle_error) |
497 | { | 509 | { |
498 | u8 stat8; | 510 | u8 stat8; |
499 | 511 | ||
500 | //pci_read_config_byte(dev,E752X_HI_FERR,&stat8); | 512 | //pci_read_config_byte(dev,E752X_HI_FERR,&stat8); |
513 | |||
501 | stat8 = info->hi_ferr; | 514 | stat8 = info->hi_ferr; |
515 | |||
502 | if(stat8 & 0x7f) { /* Error, so process */ | 516 | if(stat8 & 0x7f) { /* Error, so process */ |
503 | stat8 &= 0x7f; | 517 | stat8 &= 0x7f; |
518 | |||
504 | if(stat8 & 0x2b) | 519 | if(stat8 & 0x2b) |
505 | hub_error(1, stat8 & 0x2b, error_found, handle_error); | 520 | hub_error(1, stat8 & 0x2b, error_found, handle_error); |
521 | |||
506 | if(stat8 & 0x54) | 522 | if(stat8 & 0x54) |
507 | hub_error(0, stat8 & 0x54, error_found, handle_error); | 523 | hub_error(0, stat8 & 0x54, error_found, handle_error); |
508 | } | 524 | } |
525 | |||
509 | //pci_read_config_byte(dev,E752X_HI_NERR,&stat8); | 526 | //pci_read_config_byte(dev,E752X_HI_NERR,&stat8); |
527 | |||
510 | stat8 = info->hi_nerr; | 528 | stat8 = info->hi_nerr; |
529 | |||
511 | if(stat8 & 0x7f) { /* Error, so process */ | 530 | if(stat8 & 0x7f) { /* Error, so process */ |
512 | stat8 &= 0x7f; | 531 | stat8 &= 0x7f; |
532 | |||
513 | if (stat8 & 0x2b) | 533 | if (stat8 & 0x2b) |
514 | hub_error(1, stat8 & 0x2b, error_found, handle_error); | 534 | hub_error(1, stat8 & 0x2b, error_found, handle_error); |
535 | |||
515 | if(stat8 & 0x54) | 536 | if(stat8 & 0x54) |
516 | hub_error(0, stat8 & 0x54, error_found, handle_error); | 537 | hub_error(0, stat8 & 0x54, error_found, handle_error); |
517 | } | 538 | } |
518 | } | 539 | } |
519 | 540 | ||
520 | static void e752x_check_sysbus (struct e752x_error_info *info, int *error_found, | 541 | static void e752x_check_sysbus(struct e752x_error_info *info, |
521 | int handle_error) | 542 | int *error_found, int handle_error) |
522 | { | 543 | { |
523 | u32 stat32, error32; | 544 | u32 stat32, error32; |
524 | 545 | ||
@@ -530,27 +551,34 @@ static void e752x_check_sysbus (struct e752x_error_info *info, int *error_found, | |||
530 | 551 | ||
531 | error32 = (stat32 >> 16) & 0x3ff; | 552 | error32 = (stat32 >> 16) & 0x3ff; |
532 | stat32 = stat32 & 0x3ff; | 553 | stat32 = stat32 & 0x3ff; |
554 | |||
533 | if(stat32 & 0x083) | 555 | if(stat32 & 0x083) |
534 | sysbus_error(1, stat32 & 0x083, error_found, handle_error); | 556 | sysbus_error(1, stat32 & 0x083, error_found, handle_error); |
557 | |||
535 | if(stat32 & 0x37c) | 558 | if(stat32 & 0x37c) |
536 | sysbus_error(0, stat32 & 0x37c, error_found, handle_error); | 559 | sysbus_error(0, stat32 & 0x37c, error_found, handle_error); |
560 | |||
537 | if(error32 & 0x083) | 561 | if(error32 & 0x083) |
538 | sysbus_error(1, error32 & 0x083, error_found, handle_error); | 562 | sysbus_error(1, error32 & 0x083, error_found, handle_error); |
563 | |||
539 | if(error32 & 0x37c) | 564 | if(error32 & 0x37c) |
540 | sysbus_error(0, error32 & 0x37c, error_found, handle_error); | 565 | sysbus_error(0, error32 & 0x37c, error_found, handle_error); |
541 | } | 566 | } |
542 | 567 | ||
543 | static void e752x_check_membuf (struct e752x_error_info *info, int *error_found, | 568 | static void e752x_check_membuf (struct e752x_error_info *info, |
544 | int handle_error) | 569 | int *error_found, int handle_error) |
545 | { | 570 | { |
546 | u8 stat8; | 571 | u8 stat8; |
547 | 572 | ||
548 | stat8 = info->buf_ferr; | 573 | stat8 = info->buf_ferr; |
574 | |||
549 | if (stat8 & 0x0f) { /* Error, so process */ | 575 | if (stat8 & 0x0f) { /* Error, so process */ |
550 | stat8 &= 0x0f; | 576 | stat8 &= 0x0f; |
551 | membuf_error(stat8, error_found, handle_error); | 577 | membuf_error(stat8, error_found, handle_error); |
552 | } | 578 | } |
579 | |||
553 | stat8 = info->buf_nerr; | 580 | stat8 = info->buf_nerr; |
581 | |||
554 | if (stat8 & 0x0f) { /* Error, so process */ | 582 | if (stat8 & 0x0f) { /* Error, so process */ |
555 | stat8 &= 0x0f; | 583 | stat8 &= 0x0f; |
556 | membuf_error(stat8, error_found, handle_error); | 584 | membuf_error(stat8, error_found, handle_error); |
@@ -558,7 +586,8 @@ static void e752x_check_membuf (struct e752x_error_info *info, int *error_found, | |||
558 | } | 586 | } |
559 | 587 | ||
560 | static void e752x_check_dram (struct mem_ctl_info *mci, | 588 | static void e752x_check_dram (struct mem_ctl_info *mci, |
561 | struct e752x_error_info *info, int *error_found, int handle_error) | 589 | struct e752x_error_info *info, int *error_found, |
590 | int handle_error) | ||
562 | { | 591 | { |
563 | u16 error_one, error_next; | 592 | u16 error_one, error_next; |
564 | 593 | ||
@@ -608,7 +637,7 @@ static void e752x_check_dram (struct mem_ctl_info *mci, | |||
608 | } | 637 | } |
609 | 638 | ||
610 | static void e752x_get_error_info (struct mem_ctl_info *mci, | 639 | static void e752x_get_error_info (struct mem_ctl_info *mci, |
611 | struct e752x_error_info *info) | 640 | struct e752x_error_info *info) |
612 | { | 641 | { |
613 | struct pci_dev *dev; | 642 | struct pci_dev *dev; |
614 | struct e752x_pvt *pvt; | 643 | struct e752x_pvt *pvt; |
@@ -616,7 +645,6 @@ static void e752x_get_error_info (struct mem_ctl_info *mci, | |||
616 | memset(info, 0, sizeof(*info)); | 645 | memset(info, 0, sizeof(*info)); |
617 | pvt = (struct e752x_pvt *) mci->pvt_info; | 646 | pvt = (struct e752x_pvt *) mci->pvt_info; |
618 | dev = pvt->dev_d0f1; | 647 | dev = pvt->dev_d0f1; |
619 | |||
620 | pci_read_config_dword(dev, E752X_FERR_GLOBAL, &info->ferr_global); | 648 | pci_read_config_dword(dev, E752X_FERR_GLOBAL, &info->ferr_global); |
621 | 649 | ||
622 | if (info->ferr_global) { | 650 | if (info->ferr_global) { |
@@ -727,7 +755,8 @@ static int e752x_process_error_info (struct mem_ctl_info *mci, | |||
727 | static void e752x_check(struct mem_ctl_info *mci) | 755 | static void e752x_check(struct mem_ctl_info *mci) |
728 | { | 756 | { |
729 | struct e752x_error_info info; | 757 | struct e752x_error_info info; |
730 | debugf3("MC: " __FILE__ ": %s()\n", __func__); | 758 | |
759 | debugf3("%s()\n", __func__); | ||
731 | e752x_get_error_info(mci, &info); | 760 | e752x_get_error_info(mci, &info); |
732 | e752x_process_error_info(mci, &info, 1); | 761 | e752x_process_error_info(mci, &info, 1); |
733 | } | 762 | } |
@@ -736,23 +765,21 @@ static int e752x_probe1(struct pci_dev *pdev, int dev_idx) | |||
736 | { | 765 | { |
737 | int rc = -ENODEV; | 766 | int rc = -ENODEV; |
738 | int index; | 767 | int index; |
739 | u16 pci_data, stat; | 768 | u16 pci_data; |
740 | u32 stat32; | ||
741 | u16 stat16; | ||
742 | u8 stat8; | 769 | u8 stat8; |
743 | struct mem_ctl_info *mci = NULL; | 770 | struct mem_ctl_info *mci = NULL; |
744 | struct e752x_pvt *pvt = NULL; | 771 | struct e752x_pvt *pvt = NULL; |
745 | u16 ddrcsr; | 772 | u16 ddrcsr; |
746 | u32 drc; | 773 | u32 drc; |
747 | int drc_chan; /* Number of channels 0=1chan,1=2chan */ | 774 | int drc_chan; /* Number of channels 0=1chan,1=2chan */ |
748 | int drc_drbg; /* DRB granularity 0=64mb,1=128mb */ | 775 | int drc_drbg; /* DRB granularity 0=64mb, 1=128mb */ |
749 | int drc_ddim; /* DRAM Data Integrity Mode 0=none,2=edac */ | 776 | int drc_ddim; /* DRAM Data Integrity Mode 0=none,2=edac */ |
750 | u32 dra; | 777 | u32 dra; |
751 | unsigned long last_cumul_size; | 778 | unsigned long last_cumul_size; |
752 | struct pci_dev *pres_dev; | ||
753 | struct pci_dev *dev = NULL; | 779 | struct pci_dev *dev = NULL; |
780 | struct e752x_error_info discard; | ||
754 | 781 | ||
755 | debugf0("MC: " __FILE__ ": %s(): mci\n", __func__); | 782 | debugf0("%s(): mci\n", __func__); |
756 | debugf0("Starting Probe1\n"); | 783 | debugf0("Starting Probe1\n"); |
757 | 784 | ||
758 | /* enable device 0 function 1 */ | 785 | /* enable device 0 function 1 */ |
@@ -776,34 +803,35 @@ static int e752x_probe1(struct pci_dev *pdev, int dev_idx) | |||
776 | goto fail; | 803 | goto fail; |
777 | } | 804 | } |
778 | 805 | ||
779 | debugf3("MC: " __FILE__ ": %s(): init mci\n", __func__); | 806 | debugf3("%s(): init mci\n", __func__); |
780 | |||
781 | mci->mtype_cap = MEM_FLAG_RDDR; | 807 | mci->mtype_cap = MEM_FLAG_RDDR; |
782 | mci->edac_ctl_cap = EDAC_FLAG_NONE | EDAC_FLAG_SECDED | | 808 | mci->edac_ctl_cap = EDAC_FLAG_NONE | EDAC_FLAG_SECDED | |
783 | EDAC_FLAG_S4ECD4ED; | 809 | EDAC_FLAG_S4ECD4ED; |
784 | /* FIXME - what if different memory types are in different csrows? */ | 810 | /* FIXME - what if different memory types are in different csrows? */ |
785 | mci->mod_name = BS_MOD_STR; | 811 | mci->mod_name = EDAC_MOD_STR; |
786 | mci->mod_ver = "$Revision: 1.5.2.11 $"; | 812 | mci->mod_ver = "$Revision: 1.5.2.11 $"; |
787 | mci->pdev = pdev; | 813 | mci->pdev = pdev; |
788 | 814 | ||
789 | debugf3("MC: " __FILE__ ": %s(): init pvt\n", __func__); | 815 | debugf3("%s(): init pvt\n", __func__); |
790 | pvt = (struct e752x_pvt *) mci->pvt_info; | 816 | pvt = (struct e752x_pvt *) mci->pvt_info; |
791 | pvt->dev_info = &e752x_devs[dev_idx]; | 817 | pvt->dev_info = &e752x_devs[dev_idx]; |
792 | pvt->bridge_ck = pci_get_device(PCI_VENDOR_ID_INTEL, | 818 | pvt->bridge_ck = pci_get_device(PCI_VENDOR_ID_INTEL, |
793 | pvt->dev_info->err_dev, | 819 | pvt->dev_info->err_dev, |
794 | pvt->bridge_ck); | 820 | pvt->bridge_ck); |
821 | |||
795 | if (pvt->bridge_ck == NULL) | 822 | if (pvt->bridge_ck == NULL) |
796 | pvt->bridge_ck = pci_scan_single_device(pdev->bus, | 823 | pvt->bridge_ck = pci_scan_single_device(pdev->bus, |
797 | PCI_DEVFN(0, 1)); | 824 | PCI_DEVFN(0, 1)); |
825 | |||
798 | if (pvt->bridge_ck == NULL) { | 826 | if (pvt->bridge_ck == NULL) { |
799 | printk(KERN_ERR "MC: error reporting device not found:" | 827 | e752x_printk(KERN_ERR, "error reporting device not found:" |
800 | "vendor %x device 0x%x (broken BIOS?)\n", | 828 | "vendor %x device 0x%x (broken BIOS?)\n", |
801 | PCI_VENDOR_ID_INTEL, e752x_devs[dev_idx].err_dev); | 829 | PCI_VENDOR_ID_INTEL, e752x_devs[dev_idx].err_dev); |
802 | goto fail; | 830 | goto fail; |
803 | } | 831 | } |
804 | pvt->mc_symmetric = ((ddrcsr & 0x10) != 0); | ||
805 | 832 | ||
806 | debugf3("MC: " __FILE__ ": %s(): more mci init\n", __func__); | 833 | pvt->mc_symmetric = ((ddrcsr & 0x10) != 0); |
834 | debugf3("%s(): more mci init\n", __func__); | ||
807 | mci->ctl_name = pvt->dev_info->ctl_name; | 835 | mci->ctl_name = pvt->dev_info->ctl_name; |
808 | mci->edac_check = e752x_check; | 836 | mci->edac_check = e752x_check; |
809 | mci->ctl_page_to_phys = ctl_page_to_phys; | 837 | mci->ctl_page_to_phys = ctl_page_to_phys; |
@@ -820,6 +848,7 @@ static int e752x_probe1(struct pci_dev *pdev, int dev_idx) | |||
820 | for (last_cumul_size = index = 0; index < mci->nr_csrows; index++) { | 848 | for (last_cumul_size = index = 0; index < mci->nr_csrows; index++) { |
821 | u8 value; | 849 | u8 value; |
822 | u32 cumul_size; | 850 | u32 cumul_size; |
851 | |||
823 | /* mem_dev 0=x8, 1=x4 */ | 852 | /* mem_dev 0=x8, 1=x4 */ |
824 | int mem_dev = (dra >> (index * 4 + 2)) & 0x3; | 853 | int mem_dev = (dra >> (index * 4 + 2)) & 0x3; |
825 | struct csrow_info *csrow = &mci->csrows[index]; | 854 | struct csrow_info *csrow = &mci->csrows[index]; |
@@ -828,17 +857,18 @@ static int e752x_probe1(struct pci_dev *pdev, int dev_idx) | |||
828 | pci_read_config_byte(mci->pdev, E752X_DRB + index, &value); | 857 | pci_read_config_byte(mci->pdev, E752X_DRB + index, &value); |
829 | /* convert a 128 or 64 MiB DRB to a page size. */ | 858 | /* convert a 128 or 64 MiB DRB to a page size. */ |
830 | cumul_size = value << (25 + drc_drbg - PAGE_SHIFT); | 859 | cumul_size = value << (25 + drc_drbg - PAGE_SHIFT); |
831 | debugf3("MC: " __FILE__ ": %s(): (%d) cumul_size 0x%x\n", | 860 | debugf3("%s(): (%d) cumul_size 0x%x\n", __func__, index, |
832 | __func__, index, cumul_size); | 861 | cumul_size); |
862 | |||
833 | if (cumul_size == last_cumul_size) | 863 | if (cumul_size == last_cumul_size) |
834 | continue; /* not populated */ | 864 | continue; /* not populated */ |
835 | 865 | ||
836 | csrow->first_page = last_cumul_size; | 866 | csrow->first_page = last_cumul_size; |
837 | csrow->last_page = cumul_size - 1; | 867 | csrow->last_page = cumul_size - 1; |
838 | csrow->nr_pages = cumul_size - last_cumul_size; | 868 | csrow->nr_pages = cumul_size - last_cumul_size; |
839 | last_cumul_size = cumul_size; | 869 | last_cumul_size = cumul_size; |
840 | csrow->grain = 1 << 12; /* 4KiB - resolution of CELOG */ | 870 | csrow->grain = 1 << 12; /* 4KiB - resolution of CELOG */ |
841 | csrow->mtype = MEM_RDDR; /* only one type supported */ | 871 | csrow->mtype = MEM_RDDR; /* only one type supported */ |
842 | csrow->dtype = mem_dev ? DEV_X4 : DEV_X8; | 872 | csrow->dtype = mem_dev ? DEV_X4 : DEV_X8; |
843 | 873 | ||
844 | /* | 874 | /* |
@@ -862,29 +892,32 @@ static int e752x_probe1(struct pci_dev *pdev, int dev_idx) | |||
862 | u8 value; | 892 | u8 value; |
863 | u8 last = 0; | 893 | u8 last = 0; |
864 | u8 row = 0; | 894 | u8 row = 0; |
865 | for (index = 0; index < 8; index += 2) { | ||
866 | 895 | ||
896 | for (index = 0; index < 8; index += 2) { | ||
867 | pci_read_config_byte(mci->pdev, E752X_DRB + index, | 897 | pci_read_config_byte(mci->pdev, E752X_DRB + index, |
868 | &value); | 898 | &value); |
899 | |||
869 | /* test if there is a dimm in this slot */ | 900 | /* test if there is a dimm in this slot */ |
870 | if (value == last) { | 901 | if (value == last) { |
871 | /* no dimm in the slot, so flag it as empty */ | 902 | /* no dimm in the slot, so flag it as empty */ |
872 | pvt->map[index] = 0xff; | 903 | pvt->map[index] = 0xff; |
873 | pvt->map[index + 1] = 0xff; | 904 | pvt->map[index + 1] = 0xff; |
874 | } else { /* there is a dimm in the slot */ | 905 | } else { /* there is a dimm in the slot */ |
875 | pvt->map[index] = row; | 906 | pvt->map[index] = row; |
876 | row++; | 907 | row++; |
877 | last = value; | 908 | last = value; |
878 | /* test the next value to see if the dimm is | 909 | /* test the next value to see if the dimm is |
879 | double sided */ | 910 | double sided */ |
880 | pci_read_config_byte(mci->pdev, | 911 | pci_read_config_byte(mci->pdev, |
881 | E752X_DRB + index + 1, | 912 | E752X_DRB + index + 1, |
882 | &value); | 913 | &value); |
883 | pvt->map[index + 1] = (value == last) ? | 914 | pvt->map[index + 1] = (value == last) ? |
884 | 0xff : /* the dimm is single sided, | 915 | 0xff : /* the dimm is single sided, |
885 | so flag as empty */ | 916 | * so flag as empty |
886 | row; /* this is a double sided dimm | 917 | */ |
887 | to save the next row # */ | 918 | row; /* this is a double sided dimm |
919 | * to save the next row # | ||
920 | */ | ||
888 | row++; | 921 | row++; |
889 | last = value; | 922 | last = value; |
890 | } | 923 | } |
@@ -896,9 +929,8 @@ static int e752x_probe1(struct pci_dev *pdev, int dev_idx) | |||
896 | pvt->map_type = ((stat8 & 0x0f) > ((stat8 >> 4) & 0x0f)); | 929 | pvt->map_type = ((stat8 & 0x0f) > ((stat8 >> 4) & 0x0f)); |
897 | 930 | ||
898 | mci->edac_cap |= EDAC_FLAG_NONE; | 931 | mci->edac_cap |= EDAC_FLAG_NONE; |
932 | debugf3("%s(): tolm, remapbase, remaplimit\n", __func__); | ||
899 | 933 | ||
900 | debugf3("MC: " __FILE__ ": %s(): tolm, remapbase, remaplimit\n", | ||
901 | __func__); | ||
902 | /* load the top of low memory, remap base, and remap limit vars */ | 934 | /* load the top of low memory, remap base, and remap limit vars */ |
903 | pci_read_config_word(mci->pdev, E752X_TOLM, &pci_data); | 935 | pci_read_config_word(mci->pdev, E752X_TOLM, &pci_data); |
904 | pvt->tolm = ((u32) pci_data) << 4; | 936 | pvt->tolm = ((u32) pci_data) << 4; |
@@ -906,43 +938,18 @@ static int e752x_probe1(struct pci_dev *pdev, int dev_idx) | |||
906 | pvt->remapbase = ((u32) pci_data) << 14; | 938 | pvt->remapbase = ((u32) pci_data) << 14; |
907 | pci_read_config_word(mci->pdev, E752X_REMAPLIMIT, &pci_data); | 939 | pci_read_config_word(mci->pdev, E752X_REMAPLIMIT, &pci_data); |
908 | pvt->remaplimit = ((u32) pci_data) << 14; | 940 | pvt->remaplimit = ((u32) pci_data) << 14; |
909 | printk("tolm = %x, remapbase = %x, remaplimit = %x\n", pvt->tolm, | 941 | e752x_printk(KERN_INFO, |
910 | pvt->remapbase, pvt->remaplimit); | 942 | "tolm = %x, remapbase = %x, remaplimit = %x\n", pvt->tolm, |
943 | pvt->remapbase, pvt->remaplimit); | ||
911 | 944 | ||
912 | if (edac_mc_add_mc(mci)) { | 945 | if (edac_mc_add_mc(mci)) { |
913 | debugf3("MC: " __FILE__ | 946 | debugf3("%s(): failed edac_mc_add_mc()\n", __func__); |
914 | ": %s(): failed edac_mc_add_mc()\n", | ||
915 | __func__); | ||
916 | goto fail; | 947 | goto fail; |
917 | } | 948 | } |
918 | 949 | ||
919 | /* Walk through the PCI table and clear errors */ | 950 | dev = pci_get_device(PCI_VENDOR_ID_INTEL, e752x_devs[dev_idx].ctl_dev, |
920 | switch (dev_idx) { | 951 | NULL); |
921 | case E7520: | ||
922 | dev = pci_get_device(PCI_VENDOR_ID_INTEL, | ||
923 | PCI_DEVICE_ID_INTEL_7520_0, NULL); | ||
924 | break; | ||
925 | case E7525: | ||
926 | dev = pci_get_device(PCI_VENDOR_ID_INTEL, | ||
927 | PCI_DEVICE_ID_INTEL_7525_0, NULL); | ||
928 | break; | ||
929 | case E7320: | ||
930 | dev = pci_get_device(PCI_VENDOR_ID_INTEL, | ||
931 | PCI_DEVICE_ID_INTEL_7320_0, NULL); | ||
932 | break; | ||
933 | } | ||
934 | |||
935 | |||
936 | pvt->dev_d0f0 = dev; | 952 | pvt->dev_d0f0 = dev; |
937 | for (pres_dev = dev; | ||
938 | ((struct pci_dev *) pres_dev->global_list.next != dev); | ||
939 | pres_dev = (struct pci_dev *) pres_dev->global_list.next) { | ||
940 | pci_read_config_dword(pres_dev, PCI_COMMAND, &stat32); | ||
941 | stat = (u16) (stat32 >> 16); | ||
942 | /* clear any error bits */ | ||
943 | if (stat32 & ((1 << 6) + (1 << 8))) | ||
944 | pci_write_config_word(pres_dev, PCI_STATUS, stat); | ||
945 | } | ||
946 | /* find the error reporting device and clear errors */ | 953 | /* find the error reporting device and clear errors */ |
947 | dev = pvt->dev_d0f1 = pci_dev_get(pvt->bridge_ck); | 954 | dev = pvt->dev_d0f1 = pci_dev_get(pvt->bridge_ck); |
948 | /* Turn off error disable & SMI in case the BIOS turned it on */ | 955 | /* Turn off error disable & SMI in case the BIOS turned it on */ |
@@ -954,67 +961,51 @@ static int e752x_probe1(struct pci_dev *pdev, int dev_idx) | |||
954 | pci_write_config_byte(dev, E752X_BUF_SMICMD, 0x00); | 961 | pci_write_config_byte(dev, E752X_BUF_SMICMD, 0x00); |
955 | pci_write_config_byte(dev, E752X_DRAM_ERRMASK, 0x00); | 962 | pci_write_config_byte(dev, E752X_DRAM_ERRMASK, 0x00); |
956 | pci_write_config_byte(dev, E752X_DRAM_SMICMD, 0x00); | 963 | pci_write_config_byte(dev, E752X_DRAM_SMICMD, 0x00); |
957 | /* clear other MCH errors */ | 964 | |
958 | pci_read_config_dword(dev, E752X_FERR_GLOBAL, &stat32); | 965 | e752x_get_error_info(mci, &discard); /* clear other MCH errors */ |
959 | pci_write_config_dword(dev, E752X_FERR_GLOBAL, stat32); | ||
960 | pci_read_config_dword(dev, E752X_NERR_GLOBAL, &stat32); | ||
961 | pci_write_config_dword(dev, E752X_NERR_GLOBAL, stat32); | ||
962 | pci_read_config_byte(dev, E752X_HI_FERR, &stat8); | ||
963 | pci_write_config_byte(dev, E752X_HI_FERR, stat8); | ||
964 | pci_read_config_byte(dev, E752X_HI_NERR, &stat8); | ||
965 | pci_write_config_byte(dev, E752X_HI_NERR, stat8); | ||
966 | pci_read_config_dword(dev, E752X_SYSBUS_FERR, &stat32); | ||
967 | pci_write_config_dword(dev, E752X_SYSBUS_FERR, stat32); | ||
968 | pci_read_config_byte(dev, E752X_BUF_FERR, &stat8); | ||
969 | pci_write_config_byte(dev, E752X_BUF_FERR, stat8); | ||
970 | pci_read_config_byte(dev, E752X_BUF_NERR, &stat8); | ||
971 | pci_write_config_byte(dev, E752X_BUF_NERR, stat8); | ||
972 | pci_read_config_word(dev, E752X_DRAM_FERR, &stat16); | ||
973 | pci_write_config_word(dev, E752X_DRAM_FERR, stat16); | ||
974 | pci_read_config_word(dev, E752X_DRAM_NERR, &stat16); | ||
975 | pci_write_config_word(dev, E752X_DRAM_NERR, stat16); | ||
976 | 966 | ||
977 | /* get this far and it's successful */ | 967 | /* get this far and it's successful */ |
978 | debugf3("MC: " __FILE__ ": %s(): success\n", __func__); | 968 | debugf3("%s(): success\n", __func__); |
979 | return 0; | 969 | return 0; |
980 | 970 | ||
981 | fail: | 971 | fail: |
982 | if (mci) { | 972 | if (mci) { |
983 | if (pvt->dev_d0f0) | 973 | if (pvt->dev_d0f0) |
984 | pci_dev_put(pvt->dev_d0f0); | 974 | pci_dev_put(pvt->dev_d0f0); |
975 | |||
985 | if (pvt->dev_d0f1) | 976 | if (pvt->dev_d0f1) |
986 | pci_dev_put(pvt->dev_d0f1); | 977 | pci_dev_put(pvt->dev_d0f1); |
978 | |||
987 | if (pvt->bridge_ck) | 979 | if (pvt->bridge_ck) |
988 | pci_dev_put(pvt->bridge_ck); | 980 | pci_dev_put(pvt->bridge_ck); |
981 | |||
989 | edac_mc_free(mci); | 982 | edac_mc_free(mci); |
990 | } | 983 | } |
984 | |||
991 | return rc; | 985 | return rc; |
992 | } | 986 | } |
993 | 987 | ||
994 | /* returns count (>= 0), or negative on error */ | 988 | /* returns count (>= 0), or negative on error */ |
995 | static int __devinit e752x_init_one(struct pci_dev *pdev, | 989 | static int __devinit e752x_init_one(struct pci_dev *pdev, |
996 | const struct pci_device_id *ent) | 990 | const struct pci_device_id *ent) |
997 | { | 991 | { |
998 | debugf0("MC: " __FILE__ ": %s()\n", __func__); | 992 | debugf0("%s()\n", __func__); |
999 | 993 | ||
1000 | /* wake up and enable device */ | 994 | /* wake up and enable device */ |
1001 | if(pci_enable_device(pdev) < 0) | 995 | if(pci_enable_device(pdev) < 0) |
1002 | return -EIO; | 996 | return -EIO; |
997 | |||
1003 | return e752x_probe1(pdev, ent->driver_data); | 998 | return e752x_probe1(pdev, ent->driver_data); |
1004 | } | 999 | } |
1005 | 1000 | ||
1006 | |||
1007 | static void __devexit e752x_remove_one(struct pci_dev *pdev) | 1001 | static void __devexit e752x_remove_one(struct pci_dev *pdev) |
1008 | { | 1002 | { |
1009 | struct mem_ctl_info *mci; | 1003 | struct mem_ctl_info *mci; |
1010 | struct e752x_pvt *pvt; | 1004 | struct e752x_pvt *pvt; |
1011 | 1005 | ||
1012 | debugf0(__FILE__ ": %s()\n", __func__); | 1006 | debugf0("%s()\n", __func__); |
1013 | |||
1014 | if ((mci = edac_mc_find_mci_by_pdev(pdev)) == NULL) | ||
1015 | return; | ||
1016 | 1007 | ||
1017 | if (edac_mc_del_mc(mci)) | 1008 | if ((mci = edac_mc_del_mc(pdev)) == NULL) |
1018 | return; | 1009 | return; |
1019 | 1010 | ||
1020 | pvt = (struct e752x_pvt *) mci->pvt_info; | 1011 | pvt = (struct e752x_pvt *) mci->pvt_info; |
@@ -1024,45 +1015,48 @@ static void __devexit e752x_remove_one(struct pci_dev *pdev) | |||
1024 | edac_mc_free(mci); | 1015 | edac_mc_free(mci); |
1025 | } | 1016 | } |
1026 | 1017 | ||
1027 | |||
1028 | static const struct pci_device_id e752x_pci_tbl[] __devinitdata = { | 1018 | static const struct pci_device_id e752x_pci_tbl[] __devinitdata = { |
1029 | {PCI_VEND_DEV(INTEL, 7520_0), PCI_ANY_ID, PCI_ANY_ID, 0, 0, | 1019 | { |
1030 | E7520}, | 1020 | PCI_VEND_DEV(INTEL, 7520_0), PCI_ANY_ID, PCI_ANY_ID, 0, 0, |
1031 | {PCI_VEND_DEV(INTEL, 7525_0), PCI_ANY_ID, PCI_ANY_ID, 0, 0, | 1021 | E7520 |
1032 | E7525}, | 1022 | }, |
1033 | {PCI_VEND_DEV(INTEL, 7320_0), PCI_ANY_ID, PCI_ANY_ID, 0, 0, | 1023 | { |
1034 | E7320}, | 1024 | PCI_VEND_DEV(INTEL, 7525_0), PCI_ANY_ID, PCI_ANY_ID, 0, 0, |
1035 | {0,} /* 0 terminated list. */ | 1025 | E7525 |
1026 | }, | ||
1027 | { | ||
1028 | PCI_VEND_DEV(INTEL, 7320_0), PCI_ANY_ID, PCI_ANY_ID, 0, 0, | ||
1029 | E7320 | ||
1030 | }, | ||
1031 | { | ||
1032 | 0, | ||
1033 | } /* 0 terminated list. */ | ||
1036 | }; | 1034 | }; |
1037 | 1035 | ||
1038 | MODULE_DEVICE_TABLE(pci, e752x_pci_tbl); | 1036 | MODULE_DEVICE_TABLE(pci, e752x_pci_tbl); |
1039 | 1037 | ||
1040 | |||
1041 | static struct pci_driver e752x_driver = { | 1038 | static struct pci_driver e752x_driver = { |
1042 | .name = BS_MOD_STR, | 1039 | .name = EDAC_MOD_STR, |
1043 | .probe = e752x_init_one, | 1040 | .probe = e752x_init_one, |
1044 | .remove = __devexit_p(e752x_remove_one), | 1041 | .remove = __devexit_p(e752x_remove_one), |
1045 | .id_table = e752x_pci_tbl, | 1042 | .id_table = e752x_pci_tbl, |
1046 | }; | 1043 | }; |
1047 | 1044 | ||
1048 | |||
1049 | static int __init e752x_init(void) | 1045 | static int __init e752x_init(void) |
1050 | { | 1046 | { |
1051 | int pci_rc; | 1047 | int pci_rc; |
1052 | 1048 | ||
1053 | debugf3("MC: " __FILE__ ": %s()\n", __func__); | 1049 | debugf3("%s()\n", __func__); |
1054 | pci_rc = pci_register_driver(&e752x_driver); | 1050 | pci_rc = pci_register_driver(&e752x_driver); |
1055 | return (pci_rc < 0) ? pci_rc : 0; | 1051 | return (pci_rc < 0) ? pci_rc : 0; |
1056 | } | 1052 | } |
1057 | 1053 | ||
1058 | |||
1059 | static void __exit e752x_exit(void) | 1054 | static void __exit e752x_exit(void) |
1060 | { | 1055 | { |
1061 | debugf3("MC: " __FILE__ ": %s()\n", __func__); | 1056 | debugf3("%s()\n", __func__); |
1062 | pci_unregister_driver(&e752x_driver); | 1057 | pci_unregister_driver(&e752x_driver); |
1063 | } | 1058 | } |
1064 | 1059 | ||
1065 | |||
1066 | module_init(e752x_init); | 1060 | module_init(e752x_init); |
1067 | module_exit(e752x_exit); | 1061 | module_exit(e752x_exit); |
1068 | 1062 | ||
diff --git a/drivers/edac/e7xxx_edac.c b/drivers/edac/e7xxx_edac.c index d5e320dfc66f..a9518d3e4be4 100644 --- a/drivers/edac/e7xxx_edac.c +++ b/drivers/edac/e7xxx_edac.c | |||
@@ -11,9 +11,9 @@ | |||
11 | * http://www.anime.net/~goemon/linux-ecc/ | 11 | * http://www.anime.net/~goemon/linux-ecc/ |
12 | * | 12 | * |
13 | * Contributors: | 13 | * Contributors: |
14 | * Eric Biederman (Linux Networx) | 14 | * Eric Biederman (Linux Networx) |
15 | * Tom Zimmerman (Linux Networx) | 15 | * Tom Zimmerman (Linux Networx) |
16 | * Jim Garlick (Lawrence Livermore National Labs) | 16 | * Jim Garlick (Lawrence Livermore National Labs) |
17 | * Dave Peterson (Lawrence Livermore National Labs) | 17 | * Dave Peterson (Lawrence Livermore National Labs) |
18 | * That One Guy (Some other place) | 18 | * That One Guy (Some other place) |
19 | * Wang Zhenyu (intel.com) | 19 | * Wang Zhenyu (intel.com) |
@@ -22,7 +22,6 @@ | |||
22 | * | 22 | * |
23 | */ | 23 | */ |
24 | 24 | ||
25 | |||
26 | #include <linux/config.h> | 25 | #include <linux/config.h> |
27 | #include <linux/module.h> | 26 | #include <linux/module.h> |
28 | #include <linux/init.h> | 27 | #include <linux/init.h> |
@@ -31,6 +30,11 @@ | |||
31 | #include <linux/slab.h> | 30 | #include <linux/slab.h> |
32 | #include "edac_mc.h" | 31 | #include "edac_mc.h" |
33 | 32 | ||
33 | #define e7xxx_printk(level, fmt, arg...) \ | ||
34 | edac_printk(level, "e7xxx", fmt, ##arg) | ||
35 | |||
36 | #define e7xxx_mc_printk(mci, level, fmt, arg...) \ | ||
37 | edac_mc_chipset_printk(mci, level, "e7xxx", fmt, ##arg) | ||
34 | 38 | ||
35 | #ifndef PCI_DEVICE_ID_INTEL_7205_0 | 39 | #ifndef PCI_DEVICE_ID_INTEL_7205_0 |
36 | #define PCI_DEVICE_ID_INTEL_7205_0 0x255d | 40 | #define PCI_DEVICE_ID_INTEL_7205_0 0x255d |
@@ -64,11 +68,9 @@ | |||
64 | #define PCI_DEVICE_ID_INTEL_7505_1_ERR 0x2551 | 68 | #define PCI_DEVICE_ID_INTEL_7505_1_ERR 0x2551 |
65 | #endif /* PCI_DEVICE_ID_INTEL_7505_1_ERR */ | 69 | #endif /* PCI_DEVICE_ID_INTEL_7505_1_ERR */ |
66 | 70 | ||
67 | |||
68 | #define E7XXX_NR_CSROWS 8 /* number of csrows */ | 71 | #define E7XXX_NR_CSROWS 8 /* number of csrows */ |
69 | #define E7XXX_NR_DIMMS 8 /* FIXME - is this correct? */ | 72 | #define E7XXX_NR_DIMMS 8 /* FIXME - is this correct? */ |
70 | 73 | ||
71 | |||
72 | /* E7XXX register addresses - device 0 function 0 */ | 74 | /* E7XXX register addresses - device 0 function 0 */ |
73 | #define E7XXX_DRB 0x60 /* DRAM row boundary register (8b) */ | 75 | #define E7XXX_DRB 0x60 /* DRAM row boundary register (8b) */ |
74 | #define E7XXX_DRA 0x70 /* DRAM row attribute register (8b) */ | 76 | #define E7XXX_DRA 0x70 /* DRAM row attribute register (8b) */ |
@@ -118,7 +120,6 @@ enum e7xxx_chips { | |||
118 | E7205, | 120 | E7205, |
119 | }; | 121 | }; |
120 | 122 | ||
121 | |||
122 | struct e7xxx_pvt { | 123 | struct e7xxx_pvt { |
123 | struct pci_dev *bridge_ck; | 124 | struct pci_dev *bridge_ck; |
124 | u32 tolm; | 125 | u32 tolm; |
@@ -127,13 +128,11 @@ struct e7xxx_pvt { | |||
127 | const struct e7xxx_dev_info *dev_info; | 128 | const struct e7xxx_dev_info *dev_info; |
128 | }; | 129 | }; |
129 | 130 | ||
130 | |||
131 | struct e7xxx_dev_info { | 131 | struct e7xxx_dev_info { |
132 | u16 err_dev; | 132 | u16 err_dev; |
133 | const char *ctl_name; | 133 | const char *ctl_name; |
134 | }; | 134 | }; |
135 | 135 | ||
136 | |||
137 | struct e7xxx_error_info { | 136 | struct e7xxx_error_info { |
138 | u8 dram_ferr; | 137 | u8 dram_ferr; |
139 | u8 dram_nerr; | 138 | u8 dram_nerr; |
@@ -144,108 +143,110 @@ struct e7xxx_error_info { | |||
144 | 143 | ||
145 | static const struct e7xxx_dev_info e7xxx_devs[] = { | 144 | static const struct e7xxx_dev_info e7xxx_devs[] = { |
146 | [E7500] = { | 145 | [E7500] = { |
147 | .err_dev = PCI_DEVICE_ID_INTEL_7500_1_ERR, | 146 | .err_dev = PCI_DEVICE_ID_INTEL_7500_1_ERR, |
148 | .ctl_name = "E7500"}, | 147 | .ctl_name = "E7500" |
148 | }, | ||
149 | [E7501] = { | 149 | [E7501] = { |
150 | .err_dev = PCI_DEVICE_ID_INTEL_7501_1_ERR, | 150 | .err_dev = PCI_DEVICE_ID_INTEL_7501_1_ERR, |
151 | .ctl_name = "E7501"}, | 151 | .ctl_name = "E7501" |
152 | }, | ||
152 | [E7505] = { | 153 | [E7505] = { |
153 | .err_dev = PCI_DEVICE_ID_INTEL_7505_1_ERR, | 154 | .err_dev = PCI_DEVICE_ID_INTEL_7505_1_ERR, |
154 | .ctl_name = "E7505"}, | 155 | .ctl_name = "E7505" |
156 | }, | ||
155 | [E7205] = { | 157 | [E7205] = { |
156 | .err_dev = PCI_DEVICE_ID_INTEL_7205_1_ERR, | 158 | .err_dev = PCI_DEVICE_ID_INTEL_7205_1_ERR, |
157 | .ctl_name = "E7205"}, | 159 | .ctl_name = "E7205" |
160 | }, | ||
158 | }; | 161 | }; |
159 | 162 | ||
160 | |||
161 | /* FIXME - is this valid for both SECDED and S4ECD4ED? */ | 163 | /* FIXME - is this valid for both SECDED and S4ECD4ED? */ |
162 | static inline int e7xxx_find_channel(u16 syndrome) | 164 | static inline int e7xxx_find_channel(u16 syndrome) |
163 | { | 165 | { |
164 | debugf3("MC: " __FILE__ ": %s()\n", __func__); | 166 | debugf3("%s()\n", __func__); |
165 | 167 | ||
166 | if ((syndrome & 0xff00) == 0) | 168 | if ((syndrome & 0xff00) == 0) |
167 | return 0; | 169 | return 0; |
170 | |||
168 | if ((syndrome & 0x00ff) == 0) | 171 | if ((syndrome & 0x00ff) == 0) |
169 | return 1; | 172 | return 1; |
173 | |||
170 | if ((syndrome & 0xf000) == 0 || (syndrome & 0x0f00) == 0) | 174 | if ((syndrome & 0xf000) == 0 || (syndrome & 0x0f00) == 0) |
171 | return 0; | 175 | return 0; |
176 | |||
172 | return 1; | 177 | return 1; |
173 | } | 178 | } |
174 | 179 | ||
175 | 180 | static unsigned long ctl_page_to_phys(struct mem_ctl_info *mci, | |
176 | static unsigned long | 181 | unsigned long page) |
177 | ctl_page_to_phys(struct mem_ctl_info *mci, unsigned long page) | ||
178 | { | 182 | { |
179 | u32 remap; | 183 | u32 remap; |
180 | struct e7xxx_pvt *pvt = (struct e7xxx_pvt *) mci->pvt_info; | 184 | struct e7xxx_pvt *pvt = (struct e7xxx_pvt *) mci->pvt_info; |
181 | 185 | ||
182 | debugf3("MC: " __FILE__ ": %s()\n", __func__); | 186 | debugf3("%s()\n", __func__); |
183 | 187 | ||
184 | if ((page < pvt->tolm) || | 188 | if ((page < pvt->tolm) || |
185 | ((page >= 0x100000) && (page < pvt->remapbase))) | 189 | ((page >= 0x100000) && (page < pvt->remapbase))) |
186 | return page; | 190 | return page; |
191 | |||
187 | remap = (page - pvt->tolm) + pvt->remapbase; | 192 | remap = (page - pvt->tolm) + pvt->remapbase; |
193 | |||
188 | if (remap < pvt->remaplimit) | 194 | if (remap < pvt->remaplimit) |
189 | return remap; | 195 | return remap; |
190 | printk(KERN_ERR "Invalid page %lx - out of range\n", page); | 196 | |
197 | e7xxx_printk(KERN_ERR, "Invalid page %lx - out of range\n", page); | ||
191 | return pvt->tolm - 1; | 198 | return pvt->tolm - 1; |
192 | } | 199 | } |
193 | 200 | ||
194 | 201 | static void process_ce(struct mem_ctl_info *mci, | |
195 | static void process_ce(struct mem_ctl_info *mci, struct e7xxx_error_info *info) | 202 | struct e7xxx_error_info *info) |
196 | { | 203 | { |
197 | u32 error_1b, page; | 204 | u32 error_1b, page; |
198 | u16 syndrome; | 205 | u16 syndrome; |
199 | int row; | 206 | int row; |
200 | int channel; | 207 | int channel; |
201 | 208 | ||
202 | debugf3("MC: " __FILE__ ": %s()\n", __func__); | 209 | debugf3("%s()\n", __func__); |
203 | |||
204 | /* read the error address */ | 210 | /* read the error address */ |
205 | error_1b = info->dram_celog_add; | 211 | error_1b = info->dram_celog_add; |
206 | /* FIXME - should use PAGE_SHIFT */ | 212 | /* FIXME - should use PAGE_SHIFT */ |
207 | page = error_1b >> 6; /* convert the address to 4k page */ | 213 | page = error_1b >> 6; /* convert the address to 4k page */ |
208 | /* read the syndrome */ | 214 | /* read the syndrome */ |
209 | syndrome = info->dram_celog_syndrome; | 215 | syndrome = info->dram_celog_syndrome; |
210 | /* FIXME - check for -1 */ | 216 | /* FIXME - check for -1 */ |
211 | row = edac_mc_find_csrow_by_page(mci, page); | 217 | row = edac_mc_find_csrow_by_page(mci, page); |
212 | /* convert syndrome to channel */ | 218 | /* convert syndrome to channel */ |
213 | channel = e7xxx_find_channel(syndrome); | 219 | channel = e7xxx_find_channel(syndrome); |
214 | edac_mc_handle_ce(mci, page, 0, syndrome, row, channel, | 220 | edac_mc_handle_ce(mci, page, 0, syndrome, row, channel, "e7xxx CE"); |
215 | "e7xxx CE"); | ||
216 | } | 221 | } |
217 | 222 | ||
218 | |||
219 | static void process_ce_no_info(struct mem_ctl_info *mci) | 223 | static void process_ce_no_info(struct mem_ctl_info *mci) |
220 | { | 224 | { |
221 | debugf3("MC: " __FILE__ ": %s()\n", __func__); | 225 | debugf3("%s()\n", __func__); |
222 | edac_mc_handle_ce_no_info(mci, "e7xxx CE log register overflow"); | 226 | edac_mc_handle_ce_no_info(mci, "e7xxx CE log register overflow"); |
223 | } | 227 | } |
224 | 228 | ||
225 | 229 | static void process_ue(struct mem_ctl_info *mci, | |
226 | static void process_ue(struct mem_ctl_info *mci, struct e7xxx_error_info *info) | 230 | struct e7xxx_error_info *info) |
227 | { | 231 | { |
228 | u32 error_2b, block_page; | 232 | u32 error_2b, block_page; |
229 | int row; | 233 | int row; |
230 | 234 | ||
231 | debugf3("MC: " __FILE__ ": %s()\n", __func__); | 235 | debugf3("%s()\n", __func__); |
232 | |||
233 | /* read the error address */ | 236 | /* read the error address */ |
234 | error_2b = info->dram_uelog_add; | 237 | error_2b = info->dram_uelog_add; |
235 | /* FIXME - should use PAGE_SHIFT */ | 238 | /* FIXME - should use PAGE_SHIFT */ |
236 | block_page = error_2b >> 6; /* convert to 4k address */ | 239 | block_page = error_2b >> 6; /* convert to 4k address */ |
237 | row = edac_mc_find_csrow_by_page(mci, block_page); | 240 | row = edac_mc_find_csrow_by_page(mci, block_page); |
238 | edac_mc_handle_ue(mci, block_page, 0, row, "e7xxx UE"); | 241 | edac_mc_handle_ue(mci, block_page, 0, row, "e7xxx UE"); |
239 | } | 242 | } |
240 | 243 | ||
241 | |||
242 | static void process_ue_no_info(struct mem_ctl_info *mci) | 244 | static void process_ue_no_info(struct mem_ctl_info *mci) |
243 | { | 245 | { |
244 | debugf3("MC: " __FILE__ ": %s()\n", __func__); | 246 | debugf3("%s()\n", __func__); |
245 | edac_mc_handle_ue_no_info(mci, "e7xxx UE log register overflow"); | 247 | edac_mc_handle_ue_no_info(mci, "e7xxx UE log register overflow"); |
246 | } | 248 | } |
247 | 249 | ||
248 | |||
249 | static void e7xxx_get_error_info (struct mem_ctl_info *mci, | 250 | static void e7xxx_get_error_info (struct mem_ctl_info *mci, |
250 | struct e7xxx_error_info *info) | 251 | struct e7xxx_error_info *info) |
251 | { | 252 | { |
@@ -253,31 +254,29 @@ static void e7xxx_get_error_info (struct mem_ctl_info *mci, | |||
253 | 254 | ||
254 | pvt = (struct e7xxx_pvt *) mci->pvt_info; | 255 | pvt = (struct e7xxx_pvt *) mci->pvt_info; |
255 | pci_read_config_byte(pvt->bridge_ck, E7XXX_DRAM_FERR, | 256 | pci_read_config_byte(pvt->bridge_ck, E7XXX_DRAM_FERR, |
256 | &info->dram_ferr); | 257 | &info->dram_ferr); |
257 | pci_read_config_byte(pvt->bridge_ck, E7XXX_DRAM_NERR, | 258 | pci_read_config_byte(pvt->bridge_ck, E7XXX_DRAM_NERR, |
258 | &info->dram_nerr); | 259 | &info->dram_nerr); |
259 | 260 | ||
260 | if ((info->dram_ferr & 1) || (info->dram_nerr & 1)) { | 261 | if ((info->dram_ferr & 1) || (info->dram_nerr & 1)) { |
261 | pci_read_config_dword(pvt->bridge_ck, E7XXX_DRAM_CELOG_ADD, | 262 | pci_read_config_dword(pvt->bridge_ck, E7XXX_DRAM_CELOG_ADD, |
262 | &info->dram_celog_add); | 263 | &info->dram_celog_add); |
263 | pci_read_config_word(pvt->bridge_ck, | 264 | pci_read_config_word(pvt->bridge_ck, |
264 | E7XXX_DRAM_CELOG_SYNDROME, &info->dram_celog_syndrome); | 265 | E7XXX_DRAM_CELOG_SYNDROME, |
266 | &info->dram_celog_syndrome); | ||
265 | } | 267 | } |
266 | 268 | ||
267 | if ((info->dram_ferr & 2) || (info->dram_nerr & 2)) | 269 | if ((info->dram_ferr & 2) || (info->dram_nerr & 2)) |
268 | pci_read_config_dword(pvt->bridge_ck, E7XXX_DRAM_UELOG_ADD, | 270 | pci_read_config_dword(pvt->bridge_ck, E7XXX_DRAM_UELOG_ADD, |
269 | &info->dram_uelog_add); | 271 | &info->dram_uelog_add); |
270 | 272 | ||
271 | if (info->dram_ferr & 3) | 273 | if (info->dram_ferr & 3) |
272 | pci_write_bits8(pvt->bridge_ck, E7XXX_DRAM_FERR, 0x03, | 274 | pci_write_bits8(pvt->bridge_ck, E7XXX_DRAM_FERR, 0x03, 0x03); |
273 | 0x03); | ||
274 | 275 | ||
275 | if (info->dram_nerr & 3) | 276 | if (info->dram_nerr & 3) |
276 | pci_write_bits8(pvt->bridge_ck, E7XXX_DRAM_NERR, 0x03, | 277 | pci_write_bits8(pvt->bridge_ck, E7XXX_DRAM_NERR, 0x03, 0x03); |
277 | 0x03); | ||
278 | } | 278 | } |
279 | 279 | ||
280 | |||
281 | static int e7xxx_process_error_info (struct mem_ctl_info *mci, | 280 | static int e7xxx_process_error_info (struct mem_ctl_info *mci, |
282 | struct e7xxx_error_info *info, int handle_errors) | 281 | struct e7xxx_error_info *info, int handle_errors) |
283 | { | 282 | { |
@@ -325,17 +324,15 @@ static int e7xxx_process_error_info (struct mem_ctl_info *mci, | |||
325 | return error_found; | 324 | return error_found; |
326 | } | 325 | } |
327 | 326 | ||
328 | |||
329 | static void e7xxx_check(struct mem_ctl_info *mci) | 327 | static void e7xxx_check(struct mem_ctl_info *mci) |
330 | { | 328 | { |
331 | struct e7xxx_error_info info; | 329 | struct e7xxx_error_info info; |
332 | 330 | ||
333 | debugf3("MC: " __FILE__ ": %s()\n", __func__); | 331 | debugf3("%s()\n", __func__); |
334 | e7xxx_get_error_info(mci, &info); | 332 | e7xxx_get_error_info(mci, &info); |
335 | e7xxx_process_error_info(mci, &info, 1); | 333 | e7xxx_process_error_info(mci, &info, 1); |
336 | } | 334 | } |
337 | 335 | ||
338 | |||
339 | static int e7xxx_probe1(struct pci_dev *pdev, int dev_idx) | 336 | static int e7xxx_probe1(struct pci_dev *pdev, int dev_idx) |
340 | { | 337 | { |
341 | int rc = -ENODEV; | 338 | int rc = -ENODEV; |
@@ -349,19 +346,20 @@ static int e7xxx_probe1(struct pci_dev *pdev, int dev_idx) | |||
349 | int drc_ddim; /* DRAM Data Integrity Mode 0=none,2=edac */ | 346 | int drc_ddim; /* DRAM Data Integrity Mode 0=none,2=edac */ |
350 | u32 dra; | 347 | u32 dra; |
351 | unsigned long last_cumul_size; | 348 | unsigned long last_cumul_size; |
349 | struct e7xxx_error_info discard; | ||
352 | 350 | ||
353 | 351 | debugf0("%s(): mci\n", __func__); | |
354 | debugf0("MC: " __FILE__ ": %s(): mci\n", __func__); | ||
355 | 352 | ||
356 | /* need to find out the number of channels */ | 353 | /* need to find out the number of channels */ |
357 | pci_read_config_dword(pdev, E7XXX_DRC, &drc); | 354 | pci_read_config_dword(pdev, E7XXX_DRC, &drc); |
355 | |||
358 | /* only e7501 can be single channel */ | 356 | /* only e7501 can be single channel */ |
359 | if (dev_idx == E7501) { | 357 | if (dev_idx == E7501) { |
360 | drc_chan = ((drc >> 22) & 0x1); | 358 | drc_chan = ((drc >> 22) & 0x1); |
361 | drc_drbg = (drc >> 18) & 0x3; | 359 | drc_drbg = (drc >> 18) & 0x3; |
362 | } | 360 | } |
363 | drc_ddim = (drc >> 20) & 0x3; | ||
364 | 361 | ||
362 | drc_ddim = (drc >> 20) & 0x3; | ||
365 | mci = edac_mc_alloc(sizeof(*pvt), E7XXX_NR_CSROWS, drc_chan + 1); | 363 | mci = edac_mc_alloc(sizeof(*pvt), E7XXX_NR_CSROWS, drc_chan + 1); |
366 | 364 | ||
367 | if (mci == NULL) { | 365 | if (mci == NULL) { |
@@ -369,33 +367,31 @@ static int e7xxx_probe1(struct pci_dev *pdev, int dev_idx) | |||
369 | goto fail; | 367 | goto fail; |
370 | } | 368 | } |
371 | 369 | ||
372 | debugf3("MC: " __FILE__ ": %s(): init mci\n", __func__); | 370 | debugf3("%s(): init mci\n", __func__); |
373 | |||
374 | mci->mtype_cap = MEM_FLAG_RDDR; | 371 | mci->mtype_cap = MEM_FLAG_RDDR; |
375 | mci->edac_ctl_cap = | 372 | mci->edac_ctl_cap = EDAC_FLAG_NONE | EDAC_FLAG_SECDED | |
376 | EDAC_FLAG_NONE | EDAC_FLAG_SECDED | EDAC_FLAG_S4ECD4ED; | 373 | EDAC_FLAG_S4ECD4ED; |
377 | /* FIXME - what if different memory types are in different csrows? */ | 374 | /* FIXME - what if different memory types are in different csrows? */ |
378 | mci->mod_name = BS_MOD_STR; | 375 | mci->mod_name = EDAC_MOD_STR; |
379 | mci->mod_ver = "$Revision: 1.5.2.9 $"; | 376 | mci->mod_ver = "$Revision: 1.5.2.9 $"; |
380 | mci->pdev = pdev; | 377 | mci->pdev = pdev; |
381 | 378 | ||
382 | debugf3("MC: " __FILE__ ": %s(): init pvt\n", __func__); | 379 | debugf3("%s(): init pvt\n", __func__); |
383 | pvt = (struct e7xxx_pvt *) mci->pvt_info; | 380 | pvt = (struct e7xxx_pvt *) mci->pvt_info; |
384 | pvt->dev_info = &e7xxx_devs[dev_idx]; | 381 | pvt->dev_info = &e7xxx_devs[dev_idx]; |
385 | pvt->bridge_ck = pci_get_device(PCI_VENDOR_ID_INTEL, | 382 | pvt->bridge_ck = pci_get_device(PCI_VENDOR_ID_INTEL, |
386 | pvt->dev_info->err_dev, | 383 | pvt->dev_info->err_dev, |
387 | pvt->bridge_ck); | 384 | pvt->bridge_ck); |
385 | |||
388 | if (!pvt->bridge_ck) { | 386 | if (!pvt->bridge_ck) { |
389 | printk(KERN_ERR | 387 | e7xxx_printk(KERN_ERR, "error reporting device not found:" |
390 | "MC: error reporting device not found:" | 388 | "vendor %x device 0x%x (broken BIOS?)\n", |
391 | "vendor %x device 0x%x (broken BIOS?)\n", | 389 | PCI_VENDOR_ID_INTEL, e7xxx_devs[dev_idx].err_dev); |
392 | PCI_VENDOR_ID_INTEL, e7xxx_devs[dev_idx].err_dev); | ||
393 | goto fail; | 390 | goto fail; |
394 | } | 391 | } |
395 | 392 | ||
396 | debugf3("MC: " __FILE__ ": %s(): more mci init\n", __func__); | 393 | debugf3("%s(): more mci init\n", __func__); |
397 | mci->ctl_name = pvt->dev_info->ctl_name; | 394 | mci->ctl_name = pvt->dev_info->ctl_name; |
398 | |||
399 | mci->edac_check = e7xxx_check; | 395 | mci->edac_check = e7xxx_check; |
400 | mci->ctl_page_to_phys = ctl_page_to_phys; | 396 | mci->ctl_page_to_phys = ctl_page_to_phys; |
401 | 397 | ||
@@ -418,17 +414,18 @@ static int e7xxx_probe1(struct pci_dev *pdev, int dev_idx) | |||
418 | pci_read_config_byte(mci->pdev, E7XXX_DRB + index, &value); | 414 | pci_read_config_byte(mci->pdev, E7XXX_DRB + index, &value); |
419 | /* convert a 64 or 32 MiB DRB to a page size. */ | 415 | /* convert a 64 or 32 MiB DRB to a page size. */ |
420 | cumul_size = value << (25 + drc_drbg - PAGE_SHIFT); | 416 | cumul_size = value << (25 + drc_drbg - PAGE_SHIFT); |
421 | debugf3("MC: " __FILE__ ": %s(): (%d) cumul_size 0x%x\n", | 417 | debugf3("%s(): (%d) cumul_size 0x%x\n", __func__, index, |
422 | __func__, index, cumul_size); | 418 | cumul_size); |
419 | |||
423 | if (cumul_size == last_cumul_size) | 420 | if (cumul_size == last_cumul_size) |
424 | continue; /* not populated */ | 421 | continue; /* not populated */ |
425 | 422 | ||
426 | csrow->first_page = last_cumul_size; | 423 | csrow->first_page = last_cumul_size; |
427 | csrow->last_page = cumul_size - 1; | 424 | csrow->last_page = cumul_size - 1; |
428 | csrow->nr_pages = cumul_size - last_cumul_size; | 425 | csrow->nr_pages = cumul_size - last_cumul_size; |
429 | last_cumul_size = cumul_size; | 426 | last_cumul_size = cumul_size; |
430 | csrow->grain = 1 << 12; /* 4KiB - resolution of CELOG */ | 427 | csrow->grain = 1 << 12; /* 4KiB - resolution of CELOG */ |
431 | csrow->mtype = MEM_RDDR; /* only one type supported */ | 428 | csrow->mtype = MEM_RDDR; /* only one type supported */ |
432 | csrow->dtype = mem_dev ? DEV_X4 : DEV_X8; | 429 | csrow->dtype = mem_dev ? DEV_X4 : DEV_X8; |
433 | 430 | ||
434 | /* | 431 | /* |
@@ -449,8 +446,7 @@ static int e7xxx_probe1(struct pci_dev *pdev, int dev_idx) | |||
449 | 446 | ||
450 | mci->edac_cap |= EDAC_FLAG_NONE; | 447 | mci->edac_cap |= EDAC_FLAG_NONE; |
451 | 448 | ||
452 | debugf3("MC: " __FILE__ ": %s(): tolm, remapbase, remaplimit\n", | 449 | debugf3("%s(): tolm, remapbase, remaplimit\n", __func__); |
453 | __func__); | ||
454 | /* load the top of low memory, remap base, and remap limit vars */ | 450 | /* load the top of low memory, remap base, and remap limit vars */ |
455 | pci_read_config_word(mci->pdev, E7XXX_TOLM, &pci_data); | 451 | pci_read_config_word(mci->pdev, E7XXX_TOLM, &pci_data); |
456 | pvt->tolm = ((u32) pci_data) << 4; | 452 | pvt->tolm = ((u32) pci_data) << 4; |
@@ -458,22 +454,20 @@ static int e7xxx_probe1(struct pci_dev *pdev, int dev_idx) | |||
458 | pvt->remapbase = ((u32) pci_data) << 14; | 454 | pvt->remapbase = ((u32) pci_data) << 14; |
459 | pci_read_config_word(mci->pdev, E7XXX_REMAPLIMIT, &pci_data); | 455 | pci_read_config_word(mci->pdev, E7XXX_REMAPLIMIT, &pci_data); |
460 | pvt->remaplimit = ((u32) pci_data) << 14; | 456 | pvt->remaplimit = ((u32) pci_data) << 14; |
461 | printk("tolm = %x, remapbase = %x, remaplimit = %x\n", pvt->tolm, | 457 | e7xxx_printk(KERN_INFO, |
462 | pvt->remapbase, pvt->remaplimit); | 458 | "tolm = %x, remapbase = %x, remaplimit = %x\n", pvt->tolm, |
459 | pvt->remapbase, pvt->remaplimit); | ||
463 | 460 | ||
464 | /* clear any pending errors, or initial state bits */ | 461 | /* clear any pending errors, or initial state bits */ |
465 | pci_write_bits8(pvt->bridge_ck, E7XXX_DRAM_FERR, 0x03, 0x03); | 462 | e7xxx_get_error_info(mci, &discard); |
466 | pci_write_bits8(pvt->bridge_ck, E7XXX_DRAM_NERR, 0x03, 0x03); | ||
467 | 463 | ||
468 | if (edac_mc_add_mc(mci) != 0) { | 464 | if (edac_mc_add_mc(mci) != 0) { |
469 | debugf3("MC: " __FILE__ | 465 | debugf3("%s(): failed edac_mc_add_mc()\n", __func__); |
470 | ": %s(): failed edac_mc_add_mc()\n", | ||
471 | __func__); | ||
472 | goto fail; | 466 | goto fail; |
473 | } | 467 | } |
474 | 468 | ||
475 | /* get this far and it's successful */ | 469 | /* get this far and it's successful */ |
476 | debugf3("MC: " __FILE__ ": %s(): success\n", __func__); | 470 | debugf3("%s(): success\n", __func__); |
477 | return 0; | 471 | return 0; |
478 | 472 | ||
479 | fail: | 473 | fail: |
@@ -487,62 +481,67 @@ fail: | |||
487 | } | 481 | } |
488 | 482 | ||
489 | /* returns count (>= 0), or negative on error */ | 483 | /* returns count (>= 0), or negative on error */ |
490 | static int __devinit | 484 | static int __devinit e7xxx_init_one(struct pci_dev *pdev, |
491 | e7xxx_init_one(struct pci_dev *pdev, const struct pci_device_id *ent) | 485 | const struct pci_device_id *ent) |
492 | { | 486 | { |
493 | debugf0("MC: " __FILE__ ": %s()\n", __func__); | 487 | debugf0("%s()\n", __func__); |
494 | 488 | ||
495 | /* wake up and enable device */ | 489 | /* wake up and enable device */ |
496 | return pci_enable_device(pdev) ? | 490 | return pci_enable_device(pdev) ? |
497 | -EIO : e7xxx_probe1(pdev, ent->driver_data); | 491 | -EIO : e7xxx_probe1(pdev, ent->driver_data); |
498 | } | 492 | } |
499 | 493 | ||
500 | |||
501 | static void __devexit e7xxx_remove_one(struct pci_dev *pdev) | 494 | static void __devexit e7xxx_remove_one(struct pci_dev *pdev) |
502 | { | 495 | { |
503 | struct mem_ctl_info *mci; | 496 | struct mem_ctl_info *mci; |
504 | struct e7xxx_pvt *pvt; | 497 | struct e7xxx_pvt *pvt; |
505 | 498 | ||
506 | debugf0(__FILE__ ": %s()\n", __func__); | 499 | debugf0("%s()\n", __func__); |
507 | 500 | ||
508 | if (((mci = edac_mc_find_mci_by_pdev(pdev)) != 0) && | 501 | if ((mci = edac_mc_del_mc(pdev)) == NULL) |
509 | edac_mc_del_mc(mci)) { | 502 | return; |
510 | pvt = (struct e7xxx_pvt *) mci->pvt_info; | ||
511 | pci_dev_put(pvt->bridge_ck); | ||
512 | edac_mc_free(mci); | ||
513 | } | ||
514 | } | ||
515 | 503 | ||
504 | pvt = (struct e7xxx_pvt *) mci->pvt_info; | ||
505 | pci_dev_put(pvt->bridge_ck); | ||
506 | edac_mc_free(mci); | ||
507 | } | ||
516 | 508 | ||
517 | static const struct pci_device_id e7xxx_pci_tbl[] __devinitdata = { | 509 | static const struct pci_device_id e7xxx_pci_tbl[] __devinitdata = { |
518 | {PCI_VEND_DEV(INTEL, 7205_0), PCI_ANY_ID, PCI_ANY_ID, 0, 0, | 510 | { |
519 | E7205}, | 511 | PCI_VEND_DEV(INTEL, 7205_0), PCI_ANY_ID, PCI_ANY_ID, 0, 0, |
520 | {PCI_VEND_DEV(INTEL, 7500_0), PCI_ANY_ID, PCI_ANY_ID, 0, 0, | 512 | E7205 |
521 | E7500}, | 513 | }, |
522 | {PCI_VEND_DEV(INTEL, 7501_0), PCI_ANY_ID, PCI_ANY_ID, 0, 0, | 514 | { |
523 | E7501}, | 515 | PCI_VEND_DEV(INTEL, 7500_0), PCI_ANY_ID, PCI_ANY_ID, 0, 0, |
524 | {PCI_VEND_DEV(INTEL, 7505_0), PCI_ANY_ID, PCI_ANY_ID, 0, 0, | 516 | E7500 |
525 | E7505}, | 517 | }, |
526 | {0,} /* 0 terminated list. */ | 518 | { |
519 | PCI_VEND_DEV(INTEL, 7501_0), PCI_ANY_ID, PCI_ANY_ID, 0, 0, | ||
520 | E7501 | ||
521 | }, | ||
522 | { | ||
523 | PCI_VEND_DEV(INTEL, 7505_0), PCI_ANY_ID, PCI_ANY_ID, 0, 0, | ||
524 | E7505 | ||
525 | }, | ||
526 | { | ||
527 | 0, | ||
528 | } /* 0 terminated list. */ | ||
527 | }; | 529 | }; |
528 | 530 | ||
529 | MODULE_DEVICE_TABLE(pci, e7xxx_pci_tbl); | 531 | MODULE_DEVICE_TABLE(pci, e7xxx_pci_tbl); |
530 | 532 | ||
531 | |||
532 | static struct pci_driver e7xxx_driver = { | 533 | static struct pci_driver e7xxx_driver = { |
533 | .name = BS_MOD_STR, | 534 | .name = EDAC_MOD_STR, |
534 | .probe = e7xxx_init_one, | 535 | .probe = e7xxx_init_one, |
535 | .remove = __devexit_p(e7xxx_remove_one), | 536 | .remove = __devexit_p(e7xxx_remove_one), |
536 | .id_table = e7xxx_pci_tbl, | 537 | .id_table = e7xxx_pci_tbl, |
537 | }; | 538 | }; |
538 | 539 | ||
539 | |||
540 | static int __init e7xxx_init(void) | 540 | static int __init e7xxx_init(void) |
541 | { | 541 | { |
542 | return pci_register_driver(&e7xxx_driver); | 542 | return pci_register_driver(&e7xxx_driver); |
543 | } | 543 | } |
544 | 544 | ||
545 | |||
546 | static void __exit e7xxx_exit(void) | 545 | static void __exit e7xxx_exit(void) |
547 | { | 546 | { |
548 | pci_unregister_driver(&e7xxx_driver); | 547 | pci_unregister_driver(&e7xxx_driver); |
@@ -551,8 +550,7 @@ static void __exit e7xxx_exit(void) | |||
551 | module_init(e7xxx_init); | 550 | module_init(e7xxx_init); |
552 | module_exit(e7xxx_exit); | 551 | module_exit(e7xxx_exit); |
553 | 552 | ||
554 | |||
555 | MODULE_LICENSE("GPL"); | 553 | MODULE_LICENSE("GPL"); |
556 | MODULE_AUTHOR("Linux Networx (http://lnxi.com) Thayne Harbaugh et al\n" | 554 | MODULE_AUTHOR("Linux Networx (http://lnxi.com) Thayne Harbaugh et al\n" |
557 | "Based on.work by Dan Hollis et al"); | 555 | "Based on.work by Dan Hollis et al"); |
558 | MODULE_DESCRIPTION("MC support for Intel e7xxx memory controllers"); | 556 | MODULE_DESCRIPTION("MC support for Intel e7xxx memory controllers"); |
diff --git a/drivers/edac/edac_mc.c b/drivers/edac/edac_mc.c index 9c205274c1cb..905f58ba8e16 100644 --- a/drivers/edac/edac_mc.c +++ b/drivers/edac/edac_mc.c | |||
@@ -12,7 +12,6 @@ | |||
12 | * | 12 | * |
13 | */ | 13 | */ |
14 | 14 | ||
15 | |||
16 | #include <linux/config.h> | 15 | #include <linux/config.h> |
17 | #include <linux/module.h> | 16 | #include <linux/module.h> |
18 | #include <linux/proc_fs.h> | 17 | #include <linux/proc_fs.h> |
@@ -29,14 +28,13 @@ | |||
29 | #include <linux/list.h> | 28 | #include <linux/list.h> |
30 | #include <linux/sysdev.h> | 29 | #include <linux/sysdev.h> |
31 | #include <linux/ctype.h> | 30 | #include <linux/ctype.h> |
32 | 31 | #include <linux/kthread.h> | |
33 | #include <asm/uaccess.h> | 32 | #include <asm/uaccess.h> |
34 | #include <asm/page.h> | 33 | #include <asm/page.h> |
35 | #include <asm/edac.h> | 34 | #include <asm/edac.h> |
36 | |||
37 | #include "edac_mc.h" | 35 | #include "edac_mc.h" |
38 | 36 | ||
39 | #define EDAC_MC_VERSION "edac_mc Ver: 2.0.0 " __DATE__ | 37 | #define EDAC_MC_VERSION "Ver: 2.0.0 " __DATE__ |
40 | 38 | ||
41 | /* For now, disable the EDAC sysfs code. The sysfs interface that EDAC | 39 | /* For now, disable the EDAC sysfs code. The sysfs interface that EDAC |
42 | * presents to user space needs more thought, and is likely to change | 40 | * presents to user space needs more thought, and is likely to change |
@@ -47,7 +45,7 @@ | |||
47 | #ifdef CONFIG_EDAC_DEBUG | 45 | #ifdef CONFIG_EDAC_DEBUG |
48 | /* Values of 0 to 4 will generate output */ | 46 | /* Values of 0 to 4 will generate output */ |
49 | int edac_debug_level = 1; | 47 | int edac_debug_level = 1; |
50 | EXPORT_SYMBOL(edac_debug_level); | 48 | EXPORT_SYMBOL_GPL(edac_debug_level); |
51 | #endif | 49 | #endif |
52 | 50 | ||
53 | /* EDAC Controls, setable by module parameter, and sysfs */ | 51 | /* EDAC Controls, setable by module parameter, and sysfs */ |
@@ -64,13 +62,14 @@ static atomic_t pci_parity_count = ATOMIC_INIT(0); | |||
64 | static DECLARE_MUTEX(mem_ctls_mutex); | 62 | static DECLARE_MUTEX(mem_ctls_mutex); |
65 | static struct list_head mc_devices = LIST_HEAD_INIT(mc_devices); | 63 | static struct list_head mc_devices = LIST_HEAD_INIT(mc_devices); |
66 | 64 | ||
65 | static struct task_struct *edac_thread; | ||
66 | |||
67 | /* Structure of the whitelist and blacklist arrays */ | 67 | /* Structure of the whitelist and blacklist arrays */ |
68 | struct edac_pci_device_list { | 68 | struct edac_pci_device_list { |
69 | unsigned int vendor; /* Vendor ID */ | 69 | unsigned int vendor; /* Vendor ID */ |
70 | unsigned int device; /* Deviice ID */ | 70 | unsigned int device; /* Deviice ID */ |
71 | }; | 71 | }; |
72 | 72 | ||
73 | |||
74 | #define MAX_LISTED_PCI_DEVICES 32 | 73 | #define MAX_LISTED_PCI_DEVICES 32 |
75 | 74 | ||
76 | /* List of PCI devices (vendor-id:device-id) that should be skipped */ | 75 | /* List of PCI devices (vendor-id:device-id) that should be skipped */ |
@@ -123,7 +122,6 @@ static const char *edac_caps[] = { | |||
123 | [EDAC_S16ECD16ED] = "S16ECD16ED" | 122 | [EDAC_S16ECD16ED] = "S16ECD16ED" |
124 | }; | 123 | }; |
125 | 124 | ||
126 | |||
127 | /* sysfs object: /sys/devices/system/edac */ | 125 | /* sysfs object: /sys/devices/system/edac */ |
128 | static struct sysdev_class edac_class = { | 126 | static struct sysdev_class edac_class = { |
129 | set_kset_name("edac"), | 127 | set_kset_name("edac"), |
@@ -136,9 +134,15 @@ static struct sysdev_class edac_class = { | |||
136 | static struct kobject edac_memctrl_kobj; | 134 | static struct kobject edac_memctrl_kobj; |
137 | static struct kobject edac_pci_kobj; | 135 | static struct kobject edac_pci_kobj; |
138 | 136 | ||
137 | /* We use these to wait for the reference counts on edac_memctrl_kobj and | ||
138 | * edac_pci_kobj to reach 0. | ||
139 | */ | ||
140 | static struct completion edac_memctrl_kobj_complete; | ||
141 | static struct completion edac_pci_kobj_complete; | ||
142 | |||
139 | /* | 143 | /* |
140 | * /sys/devices/system/edac/mc; | 144 | * /sys/devices/system/edac/mc; |
141 | * data structures and methods | 145 | * data structures and methods |
142 | */ | 146 | */ |
143 | #if 0 | 147 | #if 0 |
144 | static ssize_t memctrl_string_show(void *ptr, char *buffer) | 148 | static ssize_t memctrl_string_show(void *ptr, char *buffer) |
@@ -165,33 +169,34 @@ static ssize_t memctrl_int_store(void *ptr, const char *buffer, size_t count) | |||
165 | } | 169 | } |
166 | 170 | ||
167 | struct memctrl_dev_attribute { | 171 | struct memctrl_dev_attribute { |
168 | struct attribute attr; | 172 | struct attribute attr; |
169 | void *value; | 173 | void *value; |
170 | ssize_t (*show)(void *,char *); | 174 | ssize_t (*show)(void *,char *); |
171 | ssize_t (*store)(void *, const char *, size_t); | 175 | ssize_t (*store)(void *, const char *, size_t); |
172 | }; | 176 | }; |
173 | 177 | ||
174 | /* Set of show/store abstract level functions for memory control object */ | 178 | /* Set of show/store abstract level functions for memory control object */ |
175 | static ssize_t | 179 | static ssize_t memctrl_dev_show(struct kobject *kobj, |
176 | memctrl_dev_show(struct kobject *kobj, struct attribute *attr, char *buffer) | 180 | struct attribute *attr, char *buffer) |
177 | { | 181 | { |
178 | struct memctrl_dev_attribute *memctrl_dev; | 182 | struct memctrl_dev_attribute *memctrl_dev; |
179 | memctrl_dev = (struct memctrl_dev_attribute*)attr; | 183 | memctrl_dev = (struct memctrl_dev_attribute*)attr; |
180 | 184 | ||
181 | if (memctrl_dev->show) | 185 | if (memctrl_dev->show) |
182 | return memctrl_dev->show(memctrl_dev->value, buffer); | 186 | return memctrl_dev->show(memctrl_dev->value, buffer); |
187 | |||
183 | return -EIO; | 188 | return -EIO; |
184 | } | 189 | } |
185 | 190 | ||
186 | static ssize_t | 191 | static ssize_t memctrl_dev_store(struct kobject *kobj, struct attribute *attr, |
187 | memctrl_dev_store(struct kobject *kobj, struct attribute *attr, | 192 | const char *buffer, size_t count) |
188 | const char *buffer, size_t count) | ||
189 | { | 193 | { |
190 | struct memctrl_dev_attribute *memctrl_dev; | 194 | struct memctrl_dev_attribute *memctrl_dev; |
191 | memctrl_dev = (struct memctrl_dev_attribute*)attr; | 195 | memctrl_dev = (struct memctrl_dev_attribute*)attr; |
192 | 196 | ||
193 | if (memctrl_dev->store) | 197 | if (memctrl_dev->store) |
194 | return memctrl_dev->store(memctrl_dev->value, buffer, count); | 198 | return memctrl_dev->store(memctrl_dev->value, buffer, count); |
199 | |||
195 | return -EIO; | 200 | return -EIO; |
196 | } | 201 | } |
197 | 202 | ||
@@ -227,7 +232,6 @@ MEMCTRL_ATTR(log_ue,S_IRUGO|S_IWUSR,memctrl_int_show,memctrl_int_store); | |||
227 | MEMCTRL_ATTR(log_ce,S_IRUGO|S_IWUSR,memctrl_int_show,memctrl_int_store); | 232 | MEMCTRL_ATTR(log_ce,S_IRUGO|S_IWUSR,memctrl_int_show,memctrl_int_store); |
228 | MEMCTRL_ATTR(poll_msec,S_IRUGO|S_IWUSR,memctrl_int_show,memctrl_int_store); | 233 | MEMCTRL_ATTR(poll_msec,S_IRUGO|S_IWUSR,memctrl_int_show,memctrl_int_store); |
229 | 234 | ||
230 | |||
231 | /* Base Attributes of the memory ECC object */ | 235 | /* Base Attributes of the memory ECC object */ |
232 | static struct memctrl_dev_attribute *memctrl_attr[] = { | 236 | static struct memctrl_dev_attribute *memctrl_attr[] = { |
233 | &attr_panic_on_ue, | 237 | &attr_panic_on_ue, |
@@ -240,13 +244,14 @@ static struct memctrl_dev_attribute *memctrl_attr[] = { | |||
240 | /* Main MC kobject release() function */ | 244 | /* Main MC kobject release() function */ |
241 | static void edac_memctrl_master_release(struct kobject *kobj) | 245 | static void edac_memctrl_master_release(struct kobject *kobj) |
242 | { | 246 | { |
243 | debugf1("EDAC MC: " __FILE__ ": %s()\n", __func__); | 247 | debugf1("%s()\n", __func__); |
248 | complete(&edac_memctrl_kobj_complete); | ||
244 | } | 249 | } |
245 | 250 | ||
246 | static struct kobj_type ktype_memctrl = { | 251 | static struct kobj_type ktype_memctrl = { |
247 | .release = edac_memctrl_master_release, | 252 | .release = edac_memctrl_master_release, |
248 | .sysfs_ops = &memctrlfs_ops, | 253 | .sysfs_ops = &memctrlfs_ops, |
249 | .default_attrs = (struct attribute **) memctrl_attr, | 254 | .default_attrs = (struct attribute **) memctrl_attr, |
250 | }; | 255 | }; |
251 | 256 | ||
252 | #endif /* DISABLE_EDAC_SYSFS */ | 257 | #endif /* DISABLE_EDAC_SYSFS */ |
@@ -268,32 +273,31 @@ static int edac_sysfs_memctrl_setup(void) | |||
268 | { | 273 | { |
269 | int err=0; | 274 | int err=0; |
270 | 275 | ||
271 | debugf1("MC: " __FILE__ ": %s()\n", __func__); | 276 | debugf1("%s()\n", __func__); |
272 | 277 | ||
273 | /* create the /sys/devices/system/edac directory */ | 278 | /* create the /sys/devices/system/edac directory */ |
274 | err = sysdev_class_register(&edac_class); | 279 | err = sysdev_class_register(&edac_class); |
280 | |||
275 | if (!err) { | 281 | if (!err) { |
276 | /* Init the MC's kobject */ | 282 | /* Init the MC's kobject */ |
277 | memset(&edac_memctrl_kobj, 0, sizeof (edac_memctrl_kobj)); | 283 | memset(&edac_memctrl_kobj, 0, sizeof (edac_memctrl_kobj)); |
278 | kobject_init(&edac_memctrl_kobj); | ||
279 | |||
280 | edac_memctrl_kobj.parent = &edac_class.kset.kobj; | 284 | edac_memctrl_kobj.parent = &edac_class.kset.kobj; |
281 | edac_memctrl_kobj.ktype = &ktype_memctrl; | 285 | edac_memctrl_kobj.ktype = &ktype_memctrl; |
282 | 286 | ||
283 | /* generate sysfs "..../edac/mc" */ | 287 | /* generate sysfs "..../edac/mc" */ |
284 | err = kobject_set_name(&edac_memctrl_kobj,"mc"); | 288 | err = kobject_set_name(&edac_memctrl_kobj,"mc"); |
289 | |||
285 | if (!err) { | 290 | if (!err) { |
286 | /* FIXME: maybe new sysdev_create_subdir() */ | 291 | /* FIXME: maybe new sysdev_create_subdir() */ |
287 | err = kobject_register(&edac_memctrl_kobj); | 292 | err = kobject_register(&edac_memctrl_kobj); |
288 | if (err) { | 293 | |
294 | if (err) | ||
289 | debugf1("Failed to register '.../edac/mc'\n"); | 295 | debugf1("Failed to register '.../edac/mc'\n"); |
290 | } else { | 296 | else |
291 | debugf1("Registered '.../edac/mc' kobject\n"); | 297 | debugf1("Registered '.../edac/mc' kobject\n"); |
292 | } | ||
293 | } | 298 | } |
294 | } else { | 299 | } else |
295 | debugf1(KERN_WARNING "__FILE__ %s() error=%d\n", __func__,err); | 300 | debugf1("%s() error=%d\n", __func__, err); |
296 | } | ||
297 | 301 | ||
298 | return err; | 302 | return err; |
299 | } | 303 | } |
@@ -308,11 +312,12 @@ static void edac_sysfs_memctrl_teardown(void) | |||
308 | #ifndef DISABLE_EDAC_SYSFS | 312 | #ifndef DISABLE_EDAC_SYSFS |
309 | debugf0("MC: " __FILE__ ": %s()\n", __func__); | 313 | debugf0("MC: " __FILE__ ": %s()\n", __func__); |
310 | 314 | ||
311 | /* Unregister the MC's kobject */ | 315 | /* Unregister the MC's kobject and wait for reference count to reach |
316 | * 0. | ||
317 | */ | ||
318 | init_completion(&edac_memctrl_kobj_complete); | ||
312 | kobject_unregister(&edac_memctrl_kobj); | 319 | kobject_unregister(&edac_memctrl_kobj); |
313 | 320 | wait_for_completion(&edac_memctrl_kobj_complete); | |
314 | /* release the master edac mc kobject */ | ||
315 | kobject_put(&edac_memctrl_kobj); | ||
316 | 321 | ||
317 | /* Unregister the 'edac' object */ | 322 | /* Unregister the 'edac' object */ |
318 | sysdev_class_unregister(&edac_class); | 323 | sysdev_class_unregister(&edac_class); |
@@ -331,7 +336,6 @@ struct list_control { | |||
331 | int *count; | 336 | int *count; |
332 | }; | 337 | }; |
333 | 338 | ||
334 | |||
335 | #if 0 | 339 | #if 0 |
336 | /* Output the list as: vendor_id:device:id<,vendor_id:device_id> */ | 340 | /* Output the list as: vendor_id:device:id<,vendor_id:device_id> */ |
337 | static ssize_t edac_pci_list_string_show(void *ptr, char *buffer) | 341 | static ssize_t edac_pci_list_string_show(void *ptr, char *buffer) |
@@ -356,7 +360,6 @@ static ssize_t edac_pci_list_string_show(void *ptr, char *buffer) | |||
356 | } | 360 | } |
357 | 361 | ||
358 | len += snprintf(p + len,(PAGE_SIZE-len), "\n"); | 362 | len += snprintf(p + len,(PAGE_SIZE-len), "\n"); |
359 | |||
360 | return (ssize_t) len; | 363 | return (ssize_t) len; |
361 | } | 364 | } |
362 | 365 | ||
@@ -378,7 +381,7 @@ static int parse_one_device(const char **s,const char **e, | |||
378 | 381 | ||
379 | /* if null byte, we are done */ | 382 | /* if null byte, we are done */ |
380 | if (!**s) { | 383 | if (!**s) { |
381 | (*s)++; /* keep *s moving */ | 384 | (*s)++; /* keep *s moving */ |
382 | return 0; | 385 | return 0; |
383 | } | 386 | } |
384 | 387 | ||
@@ -395,6 +398,7 @@ static int parse_one_device(const char **s,const char **e, | |||
395 | 398 | ||
396 | /* parse vendor_id */ | 399 | /* parse vendor_id */ |
397 | runner = *s; | 400 | runner = *s; |
401 | |||
398 | while (runner < *e) { | 402 | while (runner < *e) { |
399 | /* scan for vendor:device delimiter */ | 403 | /* scan for vendor:device delimiter */ |
400 | if (*runner == ':') { | 404 | if (*runner == ':') { |
@@ -402,6 +406,7 @@ static int parse_one_device(const char **s,const char **e, | |||
402 | runner = p + 1; | 406 | runner = p + 1; |
403 | break; | 407 | break; |
404 | } | 408 | } |
409 | |||
405 | runner++; | 410 | runner++; |
406 | } | 411 | } |
407 | 412 | ||
@@ -417,12 +422,11 @@ static int parse_one_device(const char **s,const char **e, | |||
417 | } | 422 | } |
418 | 423 | ||
419 | *s = runner; | 424 | *s = runner; |
420 | |||
421 | return 1; | 425 | return 1; |
422 | } | 426 | } |
423 | 427 | ||
424 | static ssize_t edac_pci_list_string_store(void *ptr, const char *buffer, | 428 | static ssize_t edac_pci_list_string_store(void *ptr, const char *buffer, |
425 | size_t count) | 429 | size_t count) |
426 | { | 430 | { |
427 | struct list_control *listctl; | 431 | struct list_control *listctl; |
428 | struct edac_pci_device_list *list; | 432 | struct edac_pci_device_list *list; |
@@ -432,14 +436,12 @@ static ssize_t edac_pci_list_string_store(void *ptr, const char *buffer, | |||
432 | 436 | ||
433 | s = (char*)buffer; | 437 | s = (char*)buffer; |
434 | e = s + count; | 438 | e = s + count; |
435 | |||
436 | listctl = ptr; | 439 | listctl = ptr; |
437 | list = listctl->list; | 440 | list = listctl->list; |
438 | index = listctl->count; | 441 | index = listctl->count; |
439 | |||
440 | *index = 0; | 442 | *index = 0; |
441 | while (*index < MAX_LISTED_PCI_DEVICES) { | ||
442 | 443 | ||
444 | while (*index < MAX_LISTED_PCI_DEVICES) { | ||
443 | if (parse_one_device(&s,&e,&vendor_id,&device_id)) { | 445 | if (parse_one_device(&s,&e,&vendor_id,&device_id)) { |
444 | list[ *index ].vendor = vendor_id; | 446 | list[ *index ].vendor = vendor_id; |
445 | list[ *index ].device = device_id; | 447 | list[ *index ].device = device_id; |
@@ -472,15 +474,15 @@ static ssize_t edac_pci_int_store(void *ptr, const char *buffer, size_t count) | |||
472 | } | 474 | } |
473 | 475 | ||
474 | struct edac_pci_dev_attribute { | 476 | struct edac_pci_dev_attribute { |
475 | struct attribute attr; | 477 | struct attribute attr; |
476 | void *value; | 478 | void *value; |
477 | ssize_t (*show)(void *,char *); | 479 | ssize_t (*show)(void *,char *); |
478 | ssize_t (*store)(void *, const char *,size_t); | 480 | ssize_t (*store)(void *, const char *,size_t); |
479 | }; | 481 | }; |
480 | 482 | ||
481 | /* Set of show/store abstract level functions for PCI Parity object */ | 483 | /* Set of show/store abstract level functions for PCI Parity object */ |
482 | static ssize_t edac_pci_dev_show(struct kobject *kobj, struct attribute *attr, | 484 | static ssize_t edac_pci_dev_show(struct kobject *kobj, struct attribute *attr, |
483 | char *buffer) | 485 | char *buffer) |
484 | { | 486 | { |
485 | struct edac_pci_dev_attribute *edac_pci_dev; | 487 | struct edac_pci_dev_attribute *edac_pci_dev; |
486 | edac_pci_dev= (struct edac_pci_dev_attribute*)attr; | 488 | edac_pci_dev= (struct edac_pci_dev_attribute*)attr; |
@@ -490,8 +492,8 @@ static ssize_t edac_pci_dev_show(struct kobject *kobj, struct attribute *attr, | |||
490 | return -EIO; | 492 | return -EIO; |
491 | } | 493 | } |
492 | 494 | ||
493 | static ssize_t edac_pci_dev_store(struct kobject *kobj, struct attribute *attr, | 495 | static ssize_t edac_pci_dev_store(struct kobject *kobj, |
494 | const char *buffer, size_t count) | 496 | struct attribute *attr, const char *buffer, size_t count) |
495 | { | 497 | { |
496 | struct edac_pci_dev_attribute *edac_pci_dev; | 498 | struct edac_pci_dev_attribute *edac_pci_dev; |
497 | edac_pci_dev= (struct edac_pci_dev_attribute*)attr; | 499 | edac_pci_dev= (struct edac_pci_dev_attribute*)attr; |
@@ -506,7 +508,6 @@ static struct sysfs_ops edac_pci_sysfs_ops = { | |||
506 | .store = edac_pci_dev_store | 508 | .store = edac_pci_dev_store |
507 | }; | 509 | }; |
508 | 510 | ||
509 | |||
510 | #define EDAC_PCI_ATTR(_name,_mode,_show,_store) \ | 511 | #define EDAC_PCI_ATTR(_name,_mode,_show,_store) \ |
511 | struct edac_pci_dev_attribute edac_pci_attr_##_name = { \ | 512 | struct edac_pci_dev_attribute edac_pci_attr_##_name = { \ |
512 | .attr = {.name = __stringify(_name), .mode = _mode }, \ | 513 | .attr = {.name = __stringify(_name), .mode = _mode }, \ |
@@ -549,9 +550,11 @@ EDAC_PCI_STRING_ATTR(pci_parity_blacklist, | |||
549 | #endif | 550 | #endif |
550 | 551 | ||
551 | /* PCI Parity control files */ | 552 | /* PCI Parity control files */ |
552 | EDAC_PCI_ATTR(check_pci_parity,S_IRUGO|S_IWUSR,edac_pci_int_show,edac_pci_int_store); | 553 | EDAC_PCI_ATTR(check_pci_parity, S_IRUGO|S_IWUSR, edac_pci_int_show, |
553 | EDAC_PCI_ATTR(panic_on_pci_parity,S_IRUGO|S_IWUSR,edac_pci_int_show,edac_pci_int_store); | 554 | edac_pci_int_store); |
554 | EDAC_PCI_ATTR(pci_parity_count,S_IRUGO,edac_pci_int_show,NULL); | 555 | EDAC_PCI_ATTR(panic_on_pci_parity, S_IRUGO|S_IWUSR, edac_pci_int_show, |
556 | edac_pci_int_store); | ||
557 | EDAC_PCI_ATTR(pci_parity_count, S_IRUGO, edac_pci_int_show, NULL); | ||
555 | 558 | ||
556 | /* Base Attributes of the memory ECC object */ | 559 | /* Base Attributes of the memory ECC object */ |
557 | static struct edac_pci_dev_attribute *edac_pci_attr[] = { | 560 | static struct edac_pci_dev_attribute *edac_pci_attr[] = { |
@@ -564,13 +567,14 @@ static struct edac_pci_dev_attribute *edac_pci_attr[] = { | |||
564 | /* No memory to release */ | 567 | /* No memory to release */ |
565 | static void edac_pci_release(struct kobject *kobj) | 568 | static void edac_pci_release(struct kobject *kobj) |
566 | { | 569 | { |
567 | debugf1("EDAC PCI: " __FILE__ ": %s()\n", __func__); | 570 | debugf1("%s()\n", __func__); |
571 | complete(&edac_pci_kobj_complete); | ||
568 | } | 572 | } |
569 | 573 | ||
570 | static struct kobj_type ktype_edac_pci = { | 574 | static struct kobj_type ktype_edac_pci = { |
571 | .release = edac_pci_release, | 575 | .release = edac_pci_release, |
572 | .sysfs_ops = &edac_pci_sysfs_ops, | 576 | .sysfs_ops = &edac_pci_sysfs_ops, |
573 | .default_attrs = (struct attribute **) edac_pci_attr, | 577 | .default_attrs = (struct attribute **) edac_pci_attr, |
574 | }; | 578 | }; |
575 | 579 | ||
576 | #endif /* DISABLE_EDAC_SYSFS */ | 580 | #endif /* DISABLE_EDAC_SYSFS */ |
@@ -588,24 +592,24 @@ static int edac_sysfs_pci_setup(void) | |||
588 | { | 592 | { |
589 | int err; | 593 | int err; |
590 | 594 | ||
591 | debugf1("MC: " __FILE__ ": %s()\n", __func__); | 595 | debugf1("%s()\n", __func__); |
592 | 596 | ||
593 | memset(&edac_pci_kobj, 0, sizeof(edac_pci_kobj)); | 597 | memset(&edac_pci_kobj, 0, sizeof(edac_pci_kobj)); |
594 | |||
595 | kobject_init(&edac_pci_kobj); | ||
596 | edac_pci_kobj.parent = &edac_class.kset.kobj; | 598 | edac_pci_kobj.parent = &edac_class.kset.kobj; |
597 | edac_pci_kobj.ktype = &ktype_edac_pci; | 599 | edac_pci_kobj.ktype = &ktype_edac_pci; |
598 | |||
599 | err = kobject_set_name(&edac_pci_kobj, "pci"); | 600 | err = kobject_set_name(&edac_pci_kobj, "pci"); |
601 | |||
600 | if (!err) { | 602 | if (!err) { |
601 | /* Instanstiate the csrow object */ | 603 | /* Instanstiate the csrow object */ |
602 | /* FIXME: maybe new sysdev_create_subdir() */ | 604 | /* FIXME: maybe new sysdev_create_subdir() */ |
603 | err = kobject_register(&edac_pci_kobj); | 605 | err = kobject_register(&edac_pci_kobj); |
606 | |||
604 | if (err) | 607 | if (err) |
605 | debugf1("Failed to register '.../edac/pci'\n"); | 608 | debugf1("Failed to register '.../edac/pci'\n"); |
606 | else | 609 | else |
607 | debugf1("Registered '.../edac/pci' kobject\n"); | 610 | debugf1("Registered '.../edac/pci' kobject\n"); |
608 | } | 611 | } |
612 | |||
609 | return err; | 613 | return err; |
610 | } | 614 | } |
611 | #endif /* DISABLE_EDAC_SYSFS */ | 615 | #endif /* DISABLE_EDAC_SYSFS */ |
@@ -613,10 +617,10 @@ static int edac_sysfs_pci_setup(void) | |||
613 | static void edac_sysfs_pci_teardown(void) | 617 | static void edac_sysfs_pci_teardown(void) |
614 | { | 618 | { |
615 | #ifndef DISABLE_EDAC_SYSFS | 619 | #ifndef DISABLE_EDAC_SYSFS |
616 | debugf0("MC: " __FILE__ ": %s()\n", __func__); | 620 | debugf0("%s()\n", __func__); |
617 | 621 | init_completion(&edac_pci_kobj_complete); | |
618 | kobject_unregister(&edac_pci_kobj); | 622 | kobject_unregister(&edac_pci_kobj); |
619 | kobject_put(&edac_pci_kobj); | 623 | wait_for_completion(&edac_pci_kobj_complete); |
620 | #endif | 624 | #endif |
621 | } | 625 | } |
622 | 626 | ||
@@ -633,6 +637,7 @@ static ssize_t csrow_ch0_dimm_label_show(struct csrow_info *csrow, char *data) | |||
633 | size = snprintf(data, EDAC_MC_LABEL_LEN,"%s\n", | 637 | size = snprintf(data, EDAC_MC_LABEL_LEN,"%s\n", |
634 | csrow->channels[0].label); | 638 | csrow->channels[0].label); |
635 | } | 639 | } |
640 | |||
636 | return size; | 641 | return size; |
637 | } | 642 | } |
638 | 643 | ||
@@ -644,11 +649,12 @@ static ssize_t csrow_ch1_dimm_label_show(struct csrow_info *csrow, char *data) | |||
644 | size = snprintf(data, EDAC_MC_LABEL_LEN, "%s\n", | 649 | size = snprintf(data, EDAC_MC_LABEL_LEN, "%s\n", |
645 | csrow->channels[1].label); | 650 | csrow->channels[1].label); |
646 | } | 651 | } |
652 | |||
647 | return size; | 653 | return size; |
648 | } | 654 | } |
649 | 655 | ||
650 | static ssize_t csrow_ch0_dimm_label_store(struct csrow_info *csrow, | 656 | static ssize_t csrow_ch0_dimm_label_store(struct csrow_info *csrow, |
651 | const char *data, size_t size) | 657 | const char *data, size_t size) |
652 | { | 658 | { |
653 | ssize_t max_size = 0; | 659 | ssize_t max_size = 0; |
654 | 660 | ||
@@ -657,11 +663,12 @@ static ssize_t csrow_ch0_dimm_label_store(struct csrow_info *csrow, | |||
657 | strncpy(csrow->channels[0].label, data, max_size); | 663 | strncpy(csrow->channels[0].label, data, max_size); |
658 | csrow->channels[0].label[max_size] = '\0'; | 664 | csrow->channels[0].label[max_size] = '\0'; |
659 | } | 665 | } |
666 | |||
660 | return size; | 667 | return size; |
661 | } | 668 | } |
662 | 669 | ||
663 | static ssize_t csrow_ch1_dimm_label_store(struct csrow_info *csrow, | 670 | static ssize_t csrow_ch1_dimm_label_store(struct csrow_info *csrow, |
664 | const char *data, size_t size) | 671 | const char *data, size_t size) |
665 | { | 672 | { |
666 | ssize_t max_size = 0; | 673 | ssize_t max_size = 0; |
667 | 674 | ||
@@ -670,6 +677,7 @@ static ssize_t csrow_ch1_dimm_label_store(struct csrow_info *csrow, | |||
670 | strncpy(csrow->channels[1].label, data, max_size); | 677 | strncpy(csrow->channels[1].label, data, max_size); |
671 | csrow->channels[1].label[max_size] = '\0'; | 678 | csrow->channels[1].label[max_size] = '\0'; |
672 | } | 679 | } |
680 | |||
673 | return max_size; | 681 | return max_size; |
674 | } | 682 | } |
675 | 683 | ||
@@ -690,6 +698,7 @@ static ssize_t csrow_ch0_ce_count_show(struct csrow_info *csrow, char *data) | |||
690 | if (csrow->nr_channels > 0) { | 698 | if (csrow->nr_channels > 0) { |
691 | size = sprintf(data,"%u\n", csrow->channels[0].ce_count); | 699 | size = sprintf(data,"%u\n", csrow->channels[0].ce_count); |
692 | } | 700 | } |
701 | |||
693 | return size; | 702 | return size; |
694 | } | 703 | } |
695 | 704 | ||
@@ -700,6 +709,7 @@ static ssize_t csrow_ch1_ce_count_show(struct csrow_info *csrow, char *data) | |||
700 | if (csrow->nr_channels > 1) { | 709 | if (csrow->nr_channels > 1) { |
701 | size = sprintf(data,"%u\n", csrow->channels[1].ce_count); | 710 | size = sprintf(data,"%u\n", csrow->channels[1].ce_count); |
702 | } | 711 | } |
712 | |||
703 | return size; | 713 | return size; |
704 | } | 714 | } |
705 | 715 | ||
@@ -724,7 +734,7 @@ static ssize_t csrow_edac_mode_show(struct csrow_info *csrow, char *data) | |||
724 | } | 734 | } |
725 | 735 | ||
726 | struct csrowdev_attribute { | 736 | struct csrowdev_attribute { |
727 | struct attribute attr; | 737 | struct attribute attr; |
728 | ssize_t (*show)(struct csrow_info *,char *); | 738 | ssize_t (*show)(struct csrow_info *,char *); |
729 | ssize_t (*store)(struct csrow_info *, const char *,size_t); | 739 | ssize_t (*store)(struct csrow_info *, const char *,size_t); |
730 | }; | 740 | }; |
@@ -734,24 +744,26 @@ struct csrowdev_attribute { | |||
734 | 744 | ||
735 | /* Set of show/store higher level functions for csrow objects */ | 745 | /* Set of show/store higher level functions for csrow objects */ |
736 | static ssize_t csrowdev_show(struct kobject *kobj, struct attribute *attr, | 746 | static ssize_t csrowdev_show(struct kobject *kobj, struct attribute *attr, |
737 | char *buffer) | 747 | char *buffer) |
738 | { | 748 | { |
739 | struct csrow_info *csrow = to_csrow(kobj); | 749 | struct csrow_info *csrow = to_csrow(kobj); |
740 | struct csrowdev_attribute *csrowdev_attr = to_csrowdev_attr(attr); | 750 | struct csrowdev_attribute *csrowdev_attr = to_csrowdev_attr(attr); |
741 | 751 | ||
742 | if (csrowdev_attr->show) | 752 | if (csrowdev_attr->show) |
743 | return csrowdev_attr->show(csrow, buffer); | 753 | return csrowdev_attr->show(csrow, buffer); |
754 | |||
744 | return -EIO; | 755 | return -EIO; |
745 | } | 756 | } |
746 | 757 | ||
747 | static ssize_t csrowdev_store(struct kobject *kobj, struct attribute *attr, | 758 | static ssize_t csrowdev_store(struct kobject *kobj, struct attribute *attr, |
748 | const char *buffer, size_t count) | 759 | const char *buffer, size_t count) |
749 | { | 760 | { |
750 | struct csrow_info *csrow = to_csrow(kobj); | 761 | struct csrow_info *csrow = to_csrow(kobj); |
751 | struct csrowdev_attribute * csrowdev_attr = to_csrowdev_attr(attr); | 762 | struct csrowdev_attribute * csrowdev_attr = to_csrowdev_attr(attr); |
752 | 763 | ||
753 | if (csrowdev_attr->store) | 764 | if (csrowdev_attr->store) |
754 | return csrowdev_attr->store(csrow, buffer, count); | 765 | return csrowdev_attr->store(csrow, buffer, count); |
766 | |||
755 | return -EIO; | 767 | return -EIO; |
756 | } | 768 | } |
757 | 769 | ||
@@ -785,7 +797,6 @@ CSROWDEV_ATTR(ch1_dimm_label,S_IRUGO|S_IWUSR, | |||
785 | csrow_ch1_dimm_label_show, | 797 | csrow_ch1_dimm_label_show, |
786 | csrow_ch1_dimm_label_store); | 798 | csrow_ch1_dimm_label_store); |
787 | 799 | ||
788 | |||
789 | /* Attributes of the CSROW<id> object */ | 800 | /* Attributes of the CSROW<id> object */ |
790 | static struct csrowdev_attribute *csrow_attr[] = { | 801 | static struct csrowdev_attribute *csrow_attr[] = { |
791 | &attr_dev_type, | 802 | &attr_dev_type, |
@@ -801,40 +812,43 @@ static struct csrowdev_attribute *csrow_attr[] = { | |||
801 | NULL, | 812 | NULL, |
802 | }; | 813 | }; |
803 | 814 | ||
804 | |||
805 | /* No memory to release */ | 815 | /* No memory to release */ |
806 | static void edac_csrow_instance_release(struct kobject *kobj) | 816 | static void edac_csrow_instance_release(struct kobject *kobj) |
807 | { | 817 | { |
808 | debugf1("EDAC MC: " __FILE__ ": %s()\n", __func__); | 818 | struct csrow_info *cs; |
819 | |||
820 | debugf1("%s()\n", __func__); | ||
821 | cs = container_of(kobj, struct csrow_info, kobj); | ||
822 | complete(&cs->kobj_complete); | ||
809 | } | 823 | } |
810 | 824 | ||
811 | static struct kobj_type ktype_csrow = { | 825 | static struct kobj_type ktype_csrow = { |
812 | .release = edac_csrow_instance_release, | 826 | .release = edac_csrow_instance_release, |
813 | .sysfs_ops = &csrowfs_ops, | 827 | .sysfs_ops = &csrowfs_ops, |
814 | .default_attrs = (struct attribute **) csrow_attr, | 828 | .default_attrs = (struct attribute **) csrow_attr, |
815 | }; | 829 | }; |
816 | 830 | ||
817 | /* Create a CSROW object under specifed edac_mc_device */ | 831 | /* Create a CSROW object under specifed edac_mc_device */ |
818 | static int edac_create_csrow_object(struct kobject *edac_mci_kobj, | 832 | static int edac_create_csrow_object(struct kobject *edac_mci_kobj, |
819 | struct csrow_info *csrow, int index ) | 833 | struct csrow_info *csrow, int index) |
820 | { | 834 | { |
821 | int err = 0; | 835 | int err = 0; |
822 | 836 | ||
823 | debugf0("MC: " __FILE__ ": %s()\n", __func__); | 837 | debugf0("%s()\n", __func__); |
824 | |||
825 | memset(&csrow->kobj, 0, sizeof(csrow->kobj)); | 838 | memset(&csrow->kobj, 0, sizeof(csrow->kobj)); |
826 | 839 | ||
827 | /* generate ..../edac/mc/mc<id>/csrow<index> */ | 840 | /* generate ..../edac/mc/mc<id>/csrow<index> */ |
828 | 841 | ||
829 | kobject_init(&csrow->kobj); | ||
830 | csrow->kobj.parent = edac_mci_kobj; | 842 | csrow->kobj.parent = edac_mci_kobj; |
831 | csrow->kobj.ktype = &ktype_csrow; | 843 | csrow->kobj.ktype = &ktype_csrow; |
832 | 844 | ||
833 | /* name this instance of csrow<id> */ | 845 | /* name this instance of csrow<id> */ |
834 | err = kobject_set_name(&csrow->kobj,"csrow%d",index); | 846 | err = kobject_set_name(&csrow->kobj,"csrow%d",index); |
847 | |||
835 | if (!err) { | 848 | if (!err) { |
836 | /* Instanstiate the csrow object */ | 849 | /* Instanstiate the csrow object */ |
837 | err = kobject_register(&csrow->kobj); | 850 | err = kobject_register(&csrow->kobj); |
851 | |||
838 | if (err) | 852 | if (err) |
839 | debugf0("Failed to register CSROW%d\n",index); | 853 | debugf0("Failed to register CSROW%d\n",index); |
840 | else | 854 | else |
@@ -846,8 +860,8 @@ static int edac_create_csrow_object(struct kobject *edac_mci_kobj, | |||
846 | 860 | ||
847 | /* sysfs data structures and methods for the MCI kobjects */ | 861 | /* sysfs data structures and methods for the MCI kobjects */ |
848 | 862 | ||
849 | static ssize_t mci_reset_counters_store(struct mem_ctl_info *mci, | 863 | static ssize_t mci_reset_counters_store(struct mem_ctl_info *mci, |
850 | const char *data, size_t count ) | 864 | const char *data, size_t count) |
851 | { | 865 | { |
852 | int row, chan; | 866 | int row, chan; |
853 | 867 | ||
@@ -855,16 +869,18 @@ static ssize_t mci_reset_counters_store(struct mem_ctl_info *mci, | |||
855 | mci->ce_noinfo_count = 0; | 869 | mci->ce_noinfo_count = 0; |
856 | mci->ue_count = 0; | 870 | mci->ue_count = 0; |
857 | mci->ce_count = 0; | 871 | mci->ce_count = 0; |
872 | |||
858 | for (row = 0; row < mci->nr_csrows; row++) { | 873 | for (row = 0; row < mci->nr_csrows; row++) { |
859 | struct csrow_info *ri = &mci->csrows[row]; | 874 | struct csrow_info *ri = &mci->csrows[row]; |
860 | 875 | ||
861 | ri->ue_count = 0; | 876 | ri->ue_count = 0; |
862 | ri->ce_count = 0; | 877 | ri->ce_count = 0; |
878 | |||
863 | for (chan = 0; chan < ri->nr_channels; chan++) | 879 | for (chan = 0; chan < ri->nr_channels; chan++) |
864 | ri->channels[chan].ce_count = 0; | 880 | ri->channels[chan].ce_count = 0; |
865 | } | 881 | } |
866 | mci->start_time = jiffies; | ||
867 | 882 | ||
883 | mci->start_time = jiffies; | ||
868 | return count; | 884 | return count; |
869 | } | 885 | } |
870 | 886 | ||
@@ -922,18 +938,16 @@ static ssize_t mci_edac_capability_show(struct mem_ctl_info *mci, char *data) | |||
922 | 938 | ||
923 | p += mci_output_edac_cap(p,mci->edac_ctl_cap); | 939 | p += mci_output_edac_cap(p,mci->edac_ctl_cap); |
924 | p += sprintf(p, "\n"); | 940 | p += sprintf(p, "\n"); |
925 | |||
926 | return p - data; | 941 | return p - data; |
927 | } | 942 | } |
928 | 943 | ||
929 | static ssize_t mci_edac_current_capability_show(struct mem_ctl_info *mci, | 944 | static ssize_t mci_edac_current_capability_show(struct mem_ctl_info *mci, |
930 | char *data) | 945 | char *data) |
931 | { | 946 | { |
932 | char *p = data; | 947 | char *p = data; |
933 | 948 | ||
934 | p += mci_output_edac_cap(p,mci->edac_cap); | 949 | p += mci_output_edac_cap(p,mci->edac_cap); |
935 | p += sprintf(p, "\n"); | 950 | p += sprintf(p, "\n"); |
936 | |||
937 | return p - data; | 951 | return p - data; |
938 | } | 952 | } |
939 | 953 | ||
@@ -950,13 +964,13 @@ static int mci_output_mtype_cap(char *buf, unsigned long mtype_cap) | |||
950 | return p - buf; | 964 | return p - buf; |
951 | } | 965 | } |
952 | 966 | ||
953 | static ssize_t mci_supported_mem_type_show(struct mem_ctl_info *mci, char *data) | 967 | static ssize_t mci_supported_mem_type_show(struct mem_ctl_info *mci, |
968 | char *data) | ||
954 | { | 969 | { |
955 | char *p = data; | 970 | char *p = data; |
956 | 971 | ||
957 | p += mci_output_mtype_cap(p,mci->mtype_cap); | 972 | p += mci_output_mtype_cap(p,mci->mtype_cap); |
958 | p += sprintf(p, "\n"); | 973 | p += sprintf(p, "\n"); |
959 | |||
960 | return p - data; | 974 | return p - data; |
961 | } | 975 | } |
962 | 976 | ||
@@ -970,6 +984,7 @@ static ssize_t mci_size_mb_show(struct mem_ctl_info *mci, char *data) | |||
970 | 984 | ||
971 | if (!csrow->nr_pages) | 985 | if (!csrow->nr_pages) |
972 | continue; | 986 | continue; |
987 | |||
973 | total_pages += csrow->nr_pages; | 988 | total_pages += csrow->nr_pages; |
974 | } | 989 | } |
975 | 990 | ||
@@ -977,7 +992,7 @@ static ssize_t mci_size_mb_show(struct mem_ctl_info *mci, char *data) | |||
977 | } | 992 | } |
978 | 993 | ||
979 | struct mcidev_attribute { | 994 | struct mcidev_attribute { |
980 | struct attribute attr; | 995 | struct attribute attr; |
981 | ssize_t (*show)(struct mem_ctl_info *,char *); | 996 | ssize_t (*show)(struct mem_ctl_info *,char *); |
982 | ssize_t (*store)(struct mem_ctl_info *, const char *,size_t); | 997 | ssize_t (*store)(struct mem_ctl_info *, const char *,size_t); |
983 | }; | 998 | }; |
@@ -986,30 +1001,32 @@ struct mcidev_attribute { | |||
986 | #define to_mcidev_attr(a) container_of(a, struct mcidev_attribute, attr) | 1001 | #define to_mcidev_attr(a) container_of(a, struct mcidev_attribute, attr) |
987 | 1002 | ||
988 | static ssize_t mcidev_show(struct kobject *kobj, struct attribute *attr, | 1003 | static ssize_t mcidev_show(struct kobject *kobj, struct attribute *attr, |
989 | char *buffer) | 1004 | char *buffer) |
990 | { | 1005 | { |
991 | struct mem_ctl_info *mem_ctl_info = to_mci(kobj); | 1006 | struct mem_ctl_info *mem_ctl_info = to_mci(kobj); |
992 | struct mcidev_attribute * mcidev_attr = to_mcidev_attr(attr); | 1007 | struct mcidev_attribute * mcidev_attr = to_mcidev_attr(attr); |
993 | 1008 | ||
994 | if (mcidev_attr->show) | 1009 | if (mcidev_attr->show) |
995 | return mcidev_attr->show(mem_ctl_info, buffer); | 1010 | return mcidev_attr->show(mem_ctl_info, buffer); |
1011 | |||
996 | return -EIO; | 1012 | return -EIO; |
997 | } | 1013 | } |
998 | 1014 | ||
999 | static ssize_t mcidev_store(struct kobject *kobj, struct attribute *attr, | 1015 | static ssize_t mcidev_store(struct kobject *kobj, struct attribute *attr, |
1000 | const char *buffer, size_t count) | 1016 | const char *buffer, size_t count) |
1001 | { | 1017 | { |
1002 | struct mem_ctl_info *mem_ctl_info = to_mci(kobj); | 1018 | struct mem_ctl_info *mem_ctl_info = to_mci(kobj); |
1003 | struct mcidev_attribute * mcidev_attr = to_mcidev_attr(attr); | 1019 | struct mcidev_attribute * mcidev_attr = to_mcidev_attr(attr); |
1004 | 1020 | ||
1005 | if (mcidev_attr->store) | 1021 | if (mcidev_attr->store) |
1006 | return mcidev_attr->store(mem_ctl_info, buffer, count); | 1022 | return mcidev_attr->store(mem_ctl_info, buffer, count); |
1023 | |||
1007 | return -EIO; | 1024 | return -EIO; |
1008 | } | 1025 | } |
1009 | 1026 | ||
1010 | static struct sysfs_ops mci_ops = { | 1027 | static struct sysfs_ops mci_ops = { |
1011 | .show = mcidev_show, | 1028 | .show = mcidev_show, |
1012 | .store = mcidev_store | 1029 | .store = mcidev_store |
1013 | }; | 1030 | }; |
1014 | 1031 | ||
1015 | #define MCIDEV_ATTR(_name,_mode,_show,_store) \ | 1032 | #define MCIDEV_ATTR(_name,_mode,_show,_store) \ |
@@ -1037,7 +1054,6 @@ MCIDEV_ATTR(edac_current_capability,S_IRUGO, | |||
1037 | MCIDEV_ATTR(supported_mem_type,S_IRUGO, | 1054 | MCIDEV_ATTR(supported_mem_type,S_IRUGO, |
1038 | mci_supported_mem_type_show,NULL); | 1055 | mci_supported_mem_type_show,NULL); |
1039 | 1056 | ||
1040 | |||
1041 | static struct mcidev_attribute *mci_attr[] = { | 1057 | static struct mcidev_attribute *mci_attr[] = { |
1042 | &mci_attr_reset_counters, | 1058 | &mci_attr_reset_counters, |
1043 | &mci_attr_module_name, | 1059 | &mci_attr_module_name, |
@@ -1054,25 +1070,22 @@ static struct mcidev_attribute *mci_attr[] = { | |||
1054 | NULL | 1070 | NULL |
1055 | }; | 1071 | }; |
1056 | 1072 | ||
1057 | |||
1058 | /* | 1073 | /* |
1059 | * Release of a MC controlling instance | 1074 | * Release of a MC controlling instance |
1060 | */ | 1075 | */ |
1061 | static void edac_mci_instance_release(struct kobject *kobj) | 1076 | static void edac_mci_instance_release(struct kobject *kobj) |
1062 | { | 1077 | { |
1063 | struct mem_ctl_info *mci; | 1078 | struct mem_ctl_info *mci; |
1064 | mci = container_of(kobj,struct mem_ctl_info,edac_mci_kobj); | ||
1065 | 1079 | ||
1066 | debugf0("MC: " __FILE__ ": %s() idx=%d calling kfree\n", | 1080 | mci = to_mci(kobj); |
1067 | __func__, mci->mc_idx); | 1081 | debugf0("%s() idx=%d\n", __func__, mci->mc_idx); |
1068 | 1082 | complete(&mci->kobj_complete); | |
1069 | kfree(mci); | ||
1070 | } | 1083 | } |
1071 | 1084 | ||
1072 | static struct kobj_type ktype_mci = { | 1085 | static struct kobj_type ktype_mci = { |
1073 | .release = edac_mci_instance_release, | 1086 | .release = edac_mci_instance_release, |
1074 | .sysfs_ops = &mci_ops, | 1087 | .sysfs_ops = &mci_ops, |
1075 | .default_attrs = (struct attribute **) mci_attr, | 1088 | .default_attrs = (struct attribute **) mci_attr, |
1076 | }; | 1089 | }; |
1077 | 1090 | ||
1078 | #endif /* DISABLE_EDAC_SYSFS */ | 1091 | #endif /* DISABLE_EDAC_SYSFS */ |
@@ -1099,13 +1112,12 @@ static int edac_create_sysfs_mci_device(struct mem_ctl_info *mci) | |||
1099 | struct csrow_info *csrow; | 1112 | struct csrow_info *csrow; |
1100 | struct kobject *edac_mci_kobj=&mci->edac_mci_kobj; | 1113 | struct kobject *edac_mci_kobj=&mci->edac_mci_kobj; |
1101 | 1114 | ||
1102 | debugf0("MC: " __FILE__ ": %s() idx=%d\n", __func__, mci->mc_idx); | 1115 | debugf0("%s() idx=%d\n", __func__, mci->mc_idx); |
1103 | |||
1104 | memset(edac_mci_kobj, 0, sizeof(*edac_mci_kobj)); | 1116 | memset(edac_mci_kobj, 0, sizeof(*edac_mci_kobj)); |
1105 | kobject_init(edac_mci_kobj); | ||
1106 | 1117 | ||
1107 | /* set the name of the mc<id> object */ | 1118 | /* set the name of the mc<id> object */ |
1108 | err = kobject_set_name(edac_mci_kobj,"mc%d",mci->mc_idx); | 1119 | err = kobject_set_name(edac_mci_kobj,"mc%d",mci->mc_idx); |
1120 | |||
1109 | if (err) | 1121 | if (err) |
1110 | return err; | 1122 | return err; |
1111 | 1123 | ||
@@ -1115,50 +1127,48 @@ static int edac_create_sysfs_mci_device(struct mem_ctl_info *mci) | |||
1115 | 1127 | ||
1116 | /* register the mc<id> kobject */ | 1128 | /* register the mc<id> kobject */ |
1117 | err = kobject_register(edac_mci_kobj); | 1129 | err = kobject_register(edac_mci_kobj); |
1130 | |||
1118 | if (err) | 1131 | if (err) |
1119 | return err; | 1132 | return err; |
1120 | 1133 | ||
1121 | /* create a symlink for the device */ | 1134 | /* create a symlink for the device */ |
1122 | err = sysfs_create_link(edac_mci_kobj, &mci->pdev->dev.kobj, | 1135 | err = sysfs_create_link(edac_mci_kobj, &mci->pdev->dev.kobj, |
1123 | EDAC_DEVICE_SYMLINK); | 1136 | EDAC_DEVICE_SYMLINK); |
1124 | if (err) { | 1137 | |
1125 | kobject_unregister(edac_mci_kobj); | 1138 | if (err) |
1126 | return err; | 1139 | goto fail0; |
1127 | } | ||
1128 | 1140 | ||
1129 | /* Make directories for each CSROW object | 1141 | /* Make directories for each CSROW object |
1130 | * under the mc<id> kobject | 1142 | * under the mc<id> kobject |
1131 | */ | 1143 | */ |
1132 | for (i = 0; i < mci->nr_csrows; i++) { | 1144 | for (i = 0; i < mci->nr_csrows; i++) { |
1133 | |||
1134 | csrow = &mci->csrows[i]; | 1145 | csrow = &mci->csrows[i]; |
1135 | 1146 | ||
1136 | /* Only expose populated CSROWs */ | 1147 | /* Only expose populated CSROWs */ |
1137 | if (csrow->nr_pages > 0) { | 1148 | if (csrow->nr_pages > 0) { |
1138 | err = edac_create_csrow_object(edac_mci_kobj,csrow,i); | 1149 | err = edac_create_csrow_object(edac_mci_kobj,csrow,i); |
1150 | |||
1139 | if (err) | 1151 | if (err) |
1140 | goto fail; | 1152 | goto fail1; |
1141 | } | 1153 | } |
1142 | } | 1154 | } |
1143 | 1155 | ||
1144 | /* Mark this MCI instance as having sysfs entries */ | ||
1145 | mci->sysfs_active = MCI_SYSFS_ACTIVE; | ||
1146 | |||
1147 | return 0; | 1156 | return 0; |
1148 | 1157 | ||
1149 | |||
1150 | /* CSROW error: backout what has already been registered, */ | 1158 | /* CSROW error: backout what has already been registered, */ |
1151 | fail: | 1159 | fail1: |
1152 | for ( i--; i >= 0; i--) { | 1160 | for ( i--; i >= 0; i--) { |
1153 | if (csrow->nr_pages > 0) { | 1161 | if (csrow->nr_pages > 0) { |
1162 | init_completion(&csrow->kobj_complete); | ||
1154 | kobject_unregister(&mci->csrows[i].kobj); | 1163 | kobject_unregister(&mci->csrows[i].kobj); |
1155 | kobject_put(&mci->csrows[i].kobj); | 1164 | wait_for_completion(&csrow->kobj_complete); |
1156 | } | 1165 | } |
1157 | } | 1166 | } |
1158 | 1167 | ||
1168 | fail0: | ||
1169 | init_completion(&mci->kobj_complete); | ||
1159 | kobject_unregister(edac_mci_kobj); | 1170 | kobject_unregister(edac_mci_kobj); |
1160 | kobject_put(edac_mci_kobj); | 1171 | wait_for_completion(&mci->kobj_complete); |
1161 | |||
1162 | return err; | 1172 | return err; |
1163 | } | 1173 | } |
1164 | #endif /* DISABLE_EDAC_SYSFS */ | 1174 | #endif /* DISABLE_EDAC_SYSFS */ |
@@ -1171,20 +1181,21 @@ static void edac_remove_sysfs_mci_device(struct mem_ctl_info *mci) | |||
1171 | #ifndef DISABLE_EDAC_SYSFS | 1181 | #ifndef DISABLE_EDAC_SYSFS |
1172 | int i; | 1182 | int i; |
1173 | 1183 | ||
1174 | debugf0("MC: " __FILE__ ": %s()\n", __func__); | 1184 | debugf0("%s()\n", __func__); |
1175 | 1185 | ||
1176 | /* remove all csrow kobjects */ | 1186 | /* remove all csrow kobjects */ |
1177 | for (i = 0; i < mci->nr_csrows; i++) { | 1187 | for (i = 0; i < mci->nr_csrows; i++) { |
1178 | if (mci->csrows[i].nr_pages > 0) { | 1188 | if (mci->csrows[i].nr_pages > 0) { |
1189 | init_completion(&mci->csrows[i].kobj_complete); | ||
1179 | kobject_unregister(&mci->csrows[i].kobj); | 1190 | kobject_unregister(&mci->csrows[i].kobj); |
1180 | kobject_put(&mci->csrows[i].kobj); | 1191 | wait_for_completion(&mci->csrows[i].kobj_complete); |
1181 | } | 1192 | } |
1182 | } | 1193 | } |
1183 | 1194 | ||
1184 | sysfs_remove_link(&mci->edac_mci_kobj, EDAC_DEVICE_SYMLINK); | 1195 | sysfs_remove_link(&mci->edac_mci_kobj, EDAC_DEVICE_SYMLINK); |
1185 | 1196 | init_completion(&mci->kobj_complete); | |
1186 | kobject_unregister(&mci->edac_mci_kobj); | 1197 | kobject_unregister(&mci->edac_mci_kobj); |
1187 | kobject_put(&mci->edac_mci_kobj); | 1198 | wait_for_completion(&mci->kobj_complete); |
1188 | #endif /* DISABLE_EDAC_SYSFS */ | 1199 | #endif /* DISABLE_EDAC_SYSFS */ |
1189 | } | 1200 | } |
1190 | 1201 | ||
@@ -1192,8 +1203,6 @@ static void edac_remove_sysfs_mci_device(struct mem_ctl_info *mci) | |||
1192 | 1203 | ||
1193 | #ifdef CONFIG_EDAC_DEBUG | 1204 | #ifdef CONFIG_EDAC_DEBUG |
1194 | 1205 | ||
1195 | EXPORT_SYMBOL(edac_mc_dump_channel); | ||
1196 | |||
1197 | void edac_mc_dump_channel(struct channel_info *chan) | 1206 | void edac_mc_dump_channel(struct channel_info *chan) |
1198 | { | 1207 | { |
1199 | debugf4("\tchannel = %p\n", chan); | 1208 | debugf4("\tchannel = %p\n", chan); |
@@ -1202,9 +1211,7 @@ void edac_mc_dump_channel(struct channel_info *chan) | |||
1202 | debugf4("\tchannel->label = '%s'\n", chan->label); | 1211 | debugf4("\tchannel->label = '%s'\n", chan->label); |
1203 | debugf4("\tchannel->csrow = %p\n\n", chan->csrow); | 1212 | debugf4("\tchannel->csrow = %p\n\n", chan->csrow); |
1204 | } | 1213 | } |
1205 | 1214 | EXPORT_SYMBOL_GPL(edac_mc_dump_channel); | |
1206 | |||
1207 | EXPORT_SYMBOL(edac_mc_dump_csrow); | ||
1208 | 1215 | ||
1209 | void edac_mc_dump_csrow(struct csrow_info *csrow) | 1216 | void edac_mc_dump_csrow(struct csrow_info *csrow) |
1210 | { | 1217 | { |
@@ -1220,9 +1227,7 @@ void edac_mc_dump_csrow(struct csrow_info *csrow) | |||
1220 | debugf4("\tcsrow->channels = %p\n", csrow->channels); | 1227 | debugf4("\tcsrow->channels = %p\n", csrow->channels); |
1221 | debugf4("\tcsrow->mci = %p\n\n", csrow->mci); | 1228 | debugf4("\tcsrow->mci = %p\n\n", csrow->mci); |
1222 | } | 1229 | } |
1223 | 1230 | EXPORT_SYMBOL_GPL(edac_mc_dump_csrow); | |
1224 | |||
1225 | EXPORT_SYMBOL(edac_mc_dump_mci); | ||
1226 | 1231 | ||
1227 | void edac_mc_dump_mci(struct mem_ctl_info *mci) | 1232 | void edac_mc_dump_mci(struct mem_ctl_info *mci) |
1228 | { | 1233 | { |
@@ -1238,9 +1243,9 @@ void edac_mc_dump_mci(struct mem_ctl_info *mci) | |||
1238 | mci->mod_name, mci->ctl_name); | 1243 | mci->mod_name, mci->ctl_name); |
1239 | debugf3("\tpvt_info = %p\n\n", mci->pvt_info); | 1244 | debugf3("\tpvt_info = %p\n\n", mci->pvt_info); |
1240 | } | 1245 | } |
1246 | EXPORT_SYMBOL_GPL(edac_mc_dump_mci); | ||
1241 | 1247 | ||
1242 | 1248 | #endif /* CONFIG_EDAC_DEBUG */ | |
1243 | #endif /* CONFIG_EDAC_DEBUG */ | ||
1244 | 1249 | ||
1245 | /* 'ptr' points to a possibly unaligned item X such that sizeof(X) is 'size'. | 1250 | /* 'ptr' points to a possibly unaligned item X such that sizeof(X) is 'size'. |
1246 | * Adjust 'ptr' so that its alignment is at least as stringent as what the | 1251 | * Adjust 'ptr' so that its alignment is at least as stringent as what the |
@@ -1249,7 +1254,7 @@ void edac_mc_dump_mci(struct mem_ctl_info *mci) | |||
1249 | * If 'size' is a constant, the compiler will optimize this whole function | 1254 | * If 'size' is a constant, the compiler will optimize this whole function |
1250 | * down to either a no-op or the addition of a constant to the value of 'ptr'. | 1255 | * down to either a no-op or the addition of a constant to the value of 'ptr'. |
1251 | */ | 1256 | */ |
1252 | static inline char * align_ptr (void *ptr, unsigned size) | 1257 | static inline char * align_ptr(void *ptr, unsigned size) |
1253 | { | 1258 | { |
1254 | unsigned align, r; | 1259 | unsigned align, r; |
1255 | 1260 | ||
@@ -1276,9 +1281,6 @@ static inline char * align_ptr (void *ptr, unsigned size) | |||
1276 | return (char *) (((unsigned long) ptr) + align - r); | 1281 | return (char *) (((unsigned long) ptr) + align - r); |
1277 | } | 1282 | } |
1278 | 1283 | ||
1279 | |||
1280 | EXPORT_SYMBOL(edac_mc_alloc); | ||
1281 | |||
1282 | /** | 1284 | /** |
1283 | * edac_mc_alloc: Allocate a struct mem_ctl_info structure | 1285 | * edac_mc_alloc: Allocate a struct mem_ctl_info structure |
1284 | * @size_pvt: size of private storage needed | 1286 | * @size_pvt: size of private storage needed |
@@ -1296,7 +1298,7 @@ EXPORT_SYMBOL(edac_mc_alloc); | |||
1296 | * struct mem_ctl_info pointer | 1298 | * struct mem_ctl_info pointer |
1297 | */ | 1299 | */ |
1298 | struct mem_ctl_info *edac_mc_alloc(unsigned sz_pvt, unsigned nr_csrows, | 1300 | struct mem_ctl_info *edac_mc_alloc(unsigned sz_pvt, unsigned nr_csrows, |
1299 | unsigned nr_chans) | 1301 | unsigned nr_chans) |
1300 | { | 1302 | { |
1301 | struct mem_ctl_info *mci; | 1303 | struct mem_ctl_info *mci; |
1302 | struct csrow_info *csi, *csrow; | 1304 | struct csrow_info *csi, *csrow; |
@@ -1327,8 +1329,7 @@ struct mem_ctl_info *edac_mc_alloc(unsigned sz_pvt, unsigned nr_csrows, | |||
1327 | chi = (struct channel_info *) (((char *) mci) + ((unsigned long) chi)); | 1329 | chi = (struct channel_info *) (((char *) mci) + ((unsigned long) chi)); |
1328 | pvt = sz_pvt ? (((char *) mci) + ((unsigned long) pvt)) : NULL; | 1330 | pvt = sz_pvt ? (((char *) mci) + ((unsigned long) pvt)) : NULL; |
1329 | 1331 | ||
1330 | memset(mci, 0, size); /* clear all fields */ | 1332 | memset(mci, 0, size); /* clear all fields */ |
1331 | |||
1332 | mci->csrows = csi; | 1333 | mci->csrows = csi; |
1333 | mci->pvt_info = pvt; | 1334 | mci->pvt_info = pvt; |
1334 | mci->nr_csrows = nr_csrows; | 1335 | mci->nr_csrows = nr_csrows; |
@@ -1350,50 +1351,24 @@ struct mem_ctl_info *edac_mc_alloc(unsigned sz_pvt, unsigned nr_csrows, | |||
1350 | 1351 | ||
1351 | return mci; | 1352 | return mci; |
1352 | } | 1353 | } |
1353 | 1354 | EXPORT_SYMBOL_GPL(edac_mc_alloc); | |
1354 | |||
1355 | EXPORT_SYMBOL(edac_mc_free); | ||
1356 | 1355 | ||
1357 | /** | 1356 | /** |
1358 | * edac_mc_free: Free a previously allocated 'mci' structure | 1357 | * edac_mc_free: Free a previously allocated 'mci' structure |
1359 | * @mci: pointer to a struct mem_ctl_info structure | 1358 | * @mci: pointer to a struct mem_ctl_info structure |
1360 | * | ||
1361 | * Free up a previously allocated mci structure | ||
1362 | * A MCI structure can be in 2 states after being allocated | ||
1363 | * by edac_mc_alloc(). | ||
1364 | * 1) Allocated in a MC driver's probe, but not yet committed | ||
1365 | * 2) Allocated and committed, by a call to edac_mc_add_mc() | ||
1366 | * edac_mc_add_mc() is the function that adds the sysfs entries | ||
1367 | * thus, this free function must determine which state the 'mci' | ||
1368 | * structure is in, then either free it directly or | ||
1369 | * perform kobject cleanup by calling edac_remove_sysfs_mci_device(). | ||
1370 | * | ||
1371 | * VOID Return | ||
1372 | */ | 1359 | */ |
1373 | void edac_mc_free(struct mem_ctl_info *mci) | 1360 | void edac_mc_free(struct mem_ctl_info *mci) |
1374 | { | 1361 | { |
1375 | /* only if sysfs entries for this mci instance exist | 1362 | kfree(mci); |
1376 | * do we remove them and defer the actual kfree via | ||
1377 | * the kobject 'release()' callback. | ||
1378 | * | ||
1379 | * Otherwise, do a straight kfree now. | ||
1380 | */ | ||
1381 | if (mci->sysfs_active == MCI_SYSFS_ACTIVE) | ||
1382 | edac_remove_sysfs_mci_device(mci); | ||
1383 | else | ||
1384 | kfree(mci); | ||
1385 | } | 1363 | } |
1364 | EXPORT_SYMBOL_GPL(edac_mc_free); | ||
1386 | 1365 | ||
1387 | 1366 | static struct mem_ctl_info *find_mci_by_pdev(struct pci_dev *pdev) | |
1388 | |||
1389 | EXPORT_SYMBOL(edac_mc_find_mci_by_pdev); | ||
1390 | |||
1391 | struct mem_ctl_info *edac_mc_find_mci_by_pdev(struct pci_dev *pdev) | ||
1392 | { | 1367 | { |
1393 | struct mem_ctl_info *mci; | 1368 | struct mem_ctl_info *mci; |
1394 | struct list_head *item; | 1369 | struct list_head *item; |
1395 | 1370 | ||
1396 | debugf3("MC: " __FILE__ ": %s()\n", __func__); | 1371 | debugf3("%s()\n", __func__); |
1397 | 1372 | ||
1398 | list_for_each(item, &mc_devices) { | 1373 | list_for_each(item, &mc_devices) { |
1399 | mci = list_entry(item, struct mem_ctl_info, link); | 1374 | mci = list_entry(item, struct mem_ctl_info, link); |
@@ -1405,7 +1380,7 @@ struct mem_ctl_info *edac_mc_find_mci_by_pdev(struct pci_dev *pdev) | |||
1405 | return NULL; | 1380 | return NULL; |
1406 | } | 1381 | } |
1407 | 1382 | ||
1408 | static int add_mc_to_global_list (struct mem_ctl_info *mci) | 1383 | static int add_mc_to_global_list(struct mem_ctl_info *mci) |
1409 | { | 1384 | { |
1410 | struct list_head *item, *insert_before; | 1385 | struct list_head *item, *insert_before; |
1411 | struct mem_ctl_info *p; | 1386 | struct mem_ctl_info *p; |
@@ -1415,11 +1390,12 @@ static int add_mc_to_global_list (struct mem_ctl_info *mci) | |||
1415 | mci->mc_idx = 0; | 1390 | mci->mc_idx = 0; |
1416 | insert_before = &mc_devices; | 1391 | insert_before = &mc_devices; |
1417 | } else { | 1392 | } else { |
1418 | if (edac_mc_find_mci_by_pdev(mci->pdev)) { | 1393 | if (find_mci_by_pdev(mci->pdev)) { |
1419 | printk(KERN_WARNING | 1394 | edac_printk(KERN_WARNING, EDAC_MC, |
1420 | "EDAC MC: %s (%s) %s %s already assigned %d\n", | 1395 | "%s (%s) %s %s already assigned %d\n", |
1421 | mci->pdev->dev.bus_id, pci_name(mci->pdev), | 1396 | mci->pdev->dev.bus_id, |
1422 | mci->mod_name, mci->ctl_name, mci->mc_idx); | 1397 | pci_name(mci->pdev), mci->mod_name, |
1398 | mci->ctl_name, mci->mc_idx); | ||
1423 | return 1; | 1399 | return 1; |
1424 | } | 1400 | } |
1425 | 1401 | ||
@@ -1447,12 +1423,26 @@ static int add_mc_to_global_list (struct mem_ctl_info *mci) | |||
1447 | return 0; | 1423 | return 0; |
1448 | } | 1424 | } |
1449 | 1425 | ||
1426 | static void complete_mc_list_del(struct rcu_head *head) | ||
1427 | { | ||
1428 | struct mem_ctl_info *mci; | ||
1450 | 1429 | ||
1430 | mci = container_of(head, struct mem_ctl_info, rcu); | ||
1431 | INIT_LIST_HEAD(&mci->link); | ||
1432 | complete(&mci->complete); | ||
1433 | } | ||
1451 | 1434 | ||
1452 | EXPORT_SYMBOL(edac_mc_add_mc); | 1435 | static void del_mc_from_global_list(struct mem_ctl_info *mci) |
1436 | { | ||
1437 | list_del_rcu(&mci->link); | ||
1438 | init_completion(&mci->complete); | ||
1439 | call_rcu(&mci->rcu, complete_mc_list_del); | ||
1440 | wait_for_completion(&mci->complete); | ||
1441 | } | ||
1453 | 1442 | ||
1454 | /** | 1443 | /** |
1455 | * edac_mc_add_mc: Insert the 'mci' structure into the mci global list | 1444 | * edac_mc_add_mc: Insert the 'mci' structure into the mci global list and |
1445 | * create sysfs entries associated with mci structure | ||
1456 | * @mci: pointer to the mci structure to be added to the list | 1446 | * @mci: pointer to the mci structure to be added to the list |
1457 | * | 1447 | * |
1458 | * Return: | 1448 | * Return: |
@@ -1463,111 +1453,90 @@ EXPORT_SYMBOL(edac_mc_add_mc); | |||
1463 | /* FIXME - should a warning be printed if no error detection? correction? */ | 1453 | /* FIXME - should a warning be printed if no error detection? correction? */ |
1464 | int edac_mc_add_mc(struct mem_ctl_info *mci) | 1454 | int edac_mc_add_mc(struct mem_ctl_info *mci) |
1465 | { | 1455 | { |
1466 | int rc = 1; | 1456 | debugf0("%s()\n", __func__); |
1467 | |||
1468 | debugf0("MC: " __FILE__ ": %s()\n", __func__); | ||
1469 | #ifdef CONFIG_EDAC_DEBUG | 1457 | #ifdef CONFIG_EDAC_DEBUG |
1470 | if (edac_debug_level >= 3) | 1458 | if (edac_debug_level >= 3) |
1471 | edac_mc_dump_mci(mci); | 1459 | edac_mc_dump_mci(mci); |
1460 | |||
1472 | if (edac_debug_level >= 4) { | 1461 | if (edac_debug_level >= 4) { |
1473 | int i; | 1462 | int i; |
1474 | 1463 | ||
1475 | for (i = 0; i < mci->nr_csrows; i++) { | 1464 | for (i = 0; i < mci->nr_csrows; i++) { |
1476 | int j; | 1465 | int j; |
1466 | |||
1477 | edac_mc_dump_csrow(&mci->csrows[i]); | 1467 | edac_mc_dump_csrow(&mci->csrows[i]); |
1478 | for (j = 0; j < mci->csrows[i].nr_channels; j++) | 1468 | for (j = 0; j < mci->csrows[i].nr_channels; j++) |
1479 | edac_mc_dump_channel(&mci->csrows[i]. | 1469 | edac_mc_dump_channel( |
1480 | channels[j]); | 1470 | &mci->csrows[i].channels[j]); |
1481 | } | 1471 | } |
1482 | } | 1472 | } |
1483 | #endif | 1473 | #endif |
1484 | down(&mem_ctls_mutex); | 1474 | down(&mem_ctls_mutex); |
1485 | 1475 | ||
1486 | if (add_mc_to_global_list(mci)) | 1476 | if (add_mc_to_global_list(mci)) |
1487 | goto finish; | 1477 | goto fail0; |
1488 | 1478 | ||
1489 | /* set load time so that error rate can be tracked */ | 1479 | /* set load time so that error rate can be tracked */ |
1490 | mci->start_time = jiffies; | 1480 | mci->start_time = jiffies; |
1491 | 1481 | ||
1492 | if (edac_create_sysfs_mci_device(mci)) { | 1482 | if (edac_create_sysfs_mci_device(mci)) { |
1493 | printk(KERN_WARNING | 1483 | edac_mc_printk(mci, KERN_WARNING, |
1494 | "EDAC MC%d: failed to create sysfs device\n", | 1484 | "failed to create sysfs device\n"); |
1495 | mci->mc_idx); | 1485 | goto fail1; |
1496 | /* FIXME - should there be an error code and unwind? */ | ||
1497 | goto finish; | ||
1498 | } | 1486 | } |
1499 | 1487 | ||
1500 | /* Report action taken */ | 1488 | /* Report action taken */ |
1501 | printk(KERN_INFO | 1489 | edac_mc_printk(mci, KERN_INFO, "Giving out device to %s %s: PCI %s\n", |
1502 | "EDAC MC%d: Giving out device to %s %s: PCI %s\n", | 1490 | mci->mod_name, mci->ctl_name, pci_name(mci->pdev)); |
1503 | mci->mc_idx, mci->mod_name, mci->ctl_name, | ||
1504 | pci_name(mci->pdev)); | ||
1505 | 1491 | ||
1506 | |||
1507 | rc = 0; | ||
1508 | |||
1509 | finish: | ||
1510 | up(&mem_ctls_mutex); | 1492 | up(&mem_ctls_mutex); |
1511 | return rc; | 1493 | return 0; |
1512 | } | ||
1513 | |||
1514 | |||
1515 | |||
1516 | static void complete_mc_list_del (struct rcu_head *head) | ||
1517 | { | ||
1518 | struct mem_ctl_info *mci; | ||
1519 | 1494 | ||
1520 | mci = container_of(head, struct mem_ctl_info, rcu); | 1495 | fail1: |
1521 | INIT_LIST_HEAD(&mci->link); | 1496 | del_mc_from_global_list(mci); |
1522 | complete(&mci->complete); | ||
1523 | } | ||
1524 | 1497 | ||
1525 | static void del_mc_from_global_list (struct mem_ctl_info *mci) | 1498 | fail0: |
1526 | { | 1499 | up(&mem_ctls_mutex); |
1527 | list_del_rcu(&mci->link); | 1500 | return 1; |
1528 | init_completion(&mci->complete); | ||
1529 | call_rcu(&mci->rcu, complete_mc_list_del); | ||
1530 | wait_for_completion(&mci->complete); | ||
1531 | } | 1501 | } |
1532 | 1502 | EXPORT_SYMBOL_GPL(edac_mc_add_mc); | |
1533 | EXPORT_SYMBOL(edac_mc_del_mc); | ||
1534 | 1503 | ||
1535 | /** | 1504 | /** |
1536 | * edac_mc_del_mc: Remove the specified mci structure from global list | 1505 | * edac_mc_del_mc: Remove sysfs entries for specified mci structure and |
1537 | * @mci: Pointer to struct mem_ctl_info structure | 1506 | * remove mci structure from global list |
1507 | * @pdev: Pointer to 'struct pci_dev' representing mci structure to remove. | ||
1538 | * | 1508 | * |
1539 | * Returns: | 1509 | * Return pointer to removed mci structure, or NULL if device not found. |
1540 | * 0 Success | ||
1541 | * 1 Failure | ||
1542 | */ | 1510 | */ |
1543 | int edac_mc_del_mc(struct mem_ctl_info *mci) | 1511 | struct mem_ctl_info * edac_mc_del_mc(struct pci_dev *pdev) |
1544 | { | 1512 | { |
1545 | int rc = 1; | 1513 | struct mem_ctl_info *mci; |
1546 | 1514 | ||
1547 | debugf0("MC%d: " __FILE__ ": %s()\n", mci->mc_idx, __func__); | 1515 | debugf0("MC: %s()\n", __func__); |
1548 | down(&mem_ctls_mutex); | 1516 | down(&mem_ctls_mutex); |
1517 | |||
1518 | if ((mci = find_mci_by_pdev(pdev)) == NULL) { | ||
1519 | up(&mem_ctls_mutex); | ||
1520 | return NULL; | ||
1521 | } | ||
1522 | |||
1523 | edac_remove_sysfs_mci_device(mci); | ||
1549 | del_mc_from_global_list(mci); | 1524 | del_mc_from_global_list(mci); |
1550 | printk(KERN_INFO | ||
1551 | "EDAC MC%d: Removed device %d for %s %s: PCI %s\n", | ||
1552 | mci->mc_idx, mci->mc_idx, mci->mod_name, mci->ctl_name, | ||
1553 | pci_name(mci->pdev)); | ||
1554 | rc = 0; | ||
1555 | up(&mem_ctls_mutex); | 1525 | up(&mem_ctls_mutex); |
1556 | 1526 | edac_printk(KERN_INFO, EDAC_MC, | |
1557 | return rc; | 1527 | "Removed device %d for %s %s: PCI %s\n", mci->mc_idx, |
1528 | mci->mod_name, mci->ctl_name, pci_name(mci->pdev)); | ||
1529 | return mci; | ||
1558 | } | 1530 | } |
1531 | EXPORT_SYMBOL_GPL(edac_mc_del_mc); | ||
1559 | 1532 | ||
1560 | 1533 | void edac_mc_scrub_block(unsigned long page, unsigned long offset, u32 size) | |
1561 | EXPORT_SYMBOL(edac_mc_scrub_block); | ||
1562 | |||
1563 | void edac_mc_scrub_block(unsigned long page, unsigned long offset, | ||
1564 | u32 size) | ||
1565 | { | 1534 | { |
1566 | struct page *pg; | 1535 | struct page *pg; |
1567 | void *virt_addr; | 1536 | void *virt_addr; |
1568 | unsigned long flags = 0; | 1537 | unsigned long flags = 0; |
1569 | 1538 | ||
1570 | debugf3("MC: " __FILE__ ": %s()\n", __func__); | 1539 | debugf3("%s()\n", __func__); |
1571 | 1540 | ||
1572 | /* ECC error page was not in our memory. Ignore it. */ | 1541 | /* ECC error page was not in our memory. Ignore it. */ |
1573 | if(!pfn_valid(page)) | 1542 | if(!pfn_valid(page)) |
@@ -1590,19 +1559,15 @@ void edac_mc_scrub_block(unsigned long page, unsigned long offset, | |||
1590 | if (PageHighMem(pg)) | 1559 | if (PageHighMem(pg)) |
1591 | local_irq_restore(flags); | 1560 | local_irq_restore(flags); |
1592 | } | 1561 | } |
1593 | 1562 | EXPORT_SYMBOL_GPL(edac_mc_scrub_block); | |
1594 | 1563 | ||
1595 | /* FIXME - should return -1 */ | 1564 | /* FIXME - should return -1 */ |
1596 | EXPORT_SYMBOL(edac_mc_find_csrow_by_page); | 1565 | int edac_mc_find_csrow_by_page(struct mem_ctl_info *mci, unsigned long page) |
1597 | |||
1598 | int edac_mc_find_csrow_by_page(struct mem_ctl_info *mci, | ||
1599 | unsigned long page) | ||
1600 | { | 1566 | { |
1601 | struct csrow_info *csrows = mci->csrows; | 1567 | struct csrow_info *csrows = mci->csrows; |
1602 | int row, i; | 1568 | int row, i; |
1603 | 1569 | ||
1604 | debugf1("MC%d: " __FILE__ ": %s(): 0x%lx\n", mci->mc_idx, __func__, | 1570 | debugf1("MC%d: %s(): 0x%lx\n", mci->mc_idx, __func__, page); |
1605 | page); | ||
1606 | row = -1; | 1571 | row = -1; |
1607 | 1572 | ||
1608 | for (i = 0; i < mci->nr_csrows; i++) { | 1573 | for (i = 0; i < mci->nr_csrows; i++) { |
@@ -1611,11 +1576,10 @@ int edac_mc_find_csrow_by_page(struct mem_ctl_info *mci, | |||
1611 | if (csrow->nr_pages == 0) | 1576 | if (csrow->nr_pages == 0) |
1612 | continue; | 1577 | continue; |
1613 | 1578 | ||
1614 | debugf3("MC%d: " __FILE__ | 1579 | debugf3("MC%d: %s(): first(0x%lx) page(0x%lx) last(0x%lx) " |
1615 | ": %s(): first(0x%lx) page(0x%lx)" | 1580 | "mask(0x%lx)\n", mci->mc_idx, __func__, |
1616 | " last(0x%lx) mask(0x%lx)\n", mci->mc_idx, | 1581 | csrow->first_page, page, csrow->last_page, |
1617 | __func__, csrow->first_page, page, | 1582 | csrow->page_mask); |
1618 | csrow->last_page, csrow->page_mask); | ||
1619 | 1583 | ||
1620 | if ((page >= csrow->first_page) && | 1584 | if ((page >= csrow->first_page) && |
1621 | (page <= csrow->last_page) && | 1585 | (page <= csrow->last_page) && |
@@ -1627,56 +1591,52 @@ int edac_mc_find_csrow_by_page(struct mem_ctl_info *mci, | |||
1627 | } | 1591 | } |
1628 | 1592 | ||
1629 | if (row == -1) | 1593 | if (row == -1) |
1630 | printk(KERN_ERR | 1594 | edac_mc_printk(mci, KERN_ERR, |
1631 | "EDAC MC%d: could not look up page error address %lx\n", | 1595 | "could not look up page error address %lx\n", |
1632 | mci->mc_idx, (unsigned long) page); | 1596 | (unsigned long) page); |
1633 | 1597 | ||
1634 | return row; | 1598 | return row; |
1635 | } | 1599 | } |
1636 | 1600 | EXPORT_SYMBOL_GPL(edac_mc_find_csrow_by_page); | |
1637 | |||
1638 | EXPORT_SYMBOL(edac_mc_handle_ce); | ||
1639 | 1601 | ||
1640 | /* FIXME - setable log (warning/emerg) levels */ | 1602 | /* FIXME - setable log (warning/emerg) levels */ |
1641 | /* FIXME - integrate with evlog: http://evlog.sourceforge.net/ */ | 1603 | /* FIXME - integrate with evlog: http://evlog.sourceforge.net/ */ |
1642 | void edac_mc_handle_ce(struct mem_ctl_info *mci, | 1604 | void edac_mc_handle_ce(struct mem_ctl_info *mci, |
1643 | unsigned long page_frame_number, | 1605 | unsigned long page_frame_number, unsigned long offset_in_page, |
1644 | unsigned long offset_in_page, | 1606 | unsigned long syndrome, int row, int channel, const char *msg) |
1645 | unsigned long syndrome, int row, int channel, | ||
1646 | const char *msg) | ||
1647 | { | 1607 | { |
1648 | unsigned long remapped_page; | 1608 | unsigned long remapped_page; |
1649 | 1609 | ||
1650 | debugf3("MC%d: " __FILE__ ": %s()\n", mci->mc_idx, __func__); | 1610 | debugf3("MC%d: %s()\n", mci->mc_idx, __func__); |
1651 | 1611 | ||
1652 | /* FIXME - maybe make panic on INTERNAL ERROR an option */ | 1612 | /* FIXME - maybe make panic on INTERNAL ERROR an option */ |
1653 | if (row >= mci->nr_csrows || row < 0) { | 1613 | if (row >= mci->nr_csrows || row < 0) { |
1654 | /* something is wrong */ | 1614 | /* something is wrong */ |
1655 | printk(KERN_ERR | 1615 | edac_mc_printk(mci, KERN_ERR, |
1656 | "EDAC MC%d: INTERNAL ERROR: row out of range (%d >= %d)\n", | 1616 | "INTERNAL ERROR: row out of range " |
1657 | mci->mc_idx, row, mci->nr_csrows); | 1617 | "(%d >= %d)\n", row, mci->nr_csrows); |
1658 | edac_mc_handle_ce_no_info(mci, "INTERNAL ERROR"); | 1618 | edac_mc_handle_ce_no_info(mci, "INTERNAL ERROR"); |
1659 | return; | 1619 | return; |
1660 | } | 1620 | } |
1621 | |||
1661 | if (channel >= mci->csrows[row].nr_channels || channel < 0) { | 1622 | if (channel >= mci->csrows[row].nr_channels || channel < 0) { |
1662 | /* something is wrong */ | 1623 | /* something is wrong */ |
1663 | printk(KERN_ERR | 1624 | edac_mc_printk(mci, KERN_ERR, |
1664 | "EDAC MC%d: INTERNAL ERROR: channel out of range " | 1625 | "INTERNAL ERROR: channel out of range " |
1665 | "(%d >= %d)\n", | 1626 | "(%d >= %d)\n", channel, |
1666 | mci->mc_idx, channel, mci->csrows[row].nr_channels); | 1627 | mci->csrows[row].nr_channels); |
1667 | edac_mc_handle_ce_no_info(mci, "INTERNAL ERROR"); | 1628 | edac_mc_handle_ce_no_info(mci, "INTERNAL ERROR"); |
1668 | return; | 1629 | return; |
1669 | } | 1630 | } |
1670 | 1631 | ||
1671 | if (log_ce) | 1632 | if (log_ce) |
1672 | /* FIXME - put in DIMM location */ | 1633 | /* FIXME - put in DIMM location */ |
1673 | printk(KERN_WARNING | 1634 | edac_mc_printk(mci, KERN_WARNING, |
1674 | "EDAC MC%d: CE page 0x%lx, offset 0x%lx," | 1635 | "CE page 0x%lx, offset 0x%lx, grain %d, syndrome " |
1675 | " grain %d, syndrome 0x%lx, row %d, channel %d," | 1636 | "0x%lx, row %d, channel %d, label \"%s\": %s\n", |
1676 | " label \"%s\": %s\n", mci->mc_idx, | 1637 | page_frame_number, offset_in_page, |
1677 | page_frame_number, offset_in_page, | 1638 | mci->csrows[row].grain, syndrome, row, channel, |
1678 | mci->csrows[row].grain, syndrome, row, channel, | 1639 | mci->csrows[row].channels[channel].label, msg); |
1679 | mci->csrows[row].channels[channel].label, msg); | ||
1680 | 1640 | ||
1681 | mci->ce_count++; | 1641 | mci->ce_count++; |
1682 | mci->csrows[row].ce_count++; | 1642 | mci->csrows[row].ce_count++; |
@@ -1697,31 +1657,25 @@ void edac_mc_handle_ce(struct mem_ctl_info *mci, | |||
1697 | page_frame_number; | 1657 | page_frame_number; |
1698 | 1658 | ||
1699 | edac_mc_scrub_block(remapped_page, offset_in_page, | 1659 | edac_mc_scrub_block(remapped_page, offset_in_page, |
1700 | mci->csrows[row].grain); | 1660 | mci->csrows[row].grain); |
1701 | } | 1661 | } |
1702 | } | 1662 | } |
1663 | EXPORT_SYMBOL_GPL(edac_mc_handle_ce); | ||
1703 | 1664 | ||
1704 | 1665 | void edac_mc_handle_ce_no_info(struct mem_ctl_info *mci, const char *msg) | |
1705 | EXPORT_SYMBOL(edac_mc_handle_ce_no_info); | ||
1706 | |||
1707 | void edac_mc_handle_ce_no_info(struct mem_ctl_info *mci, | ||
1708 | const char *msg) | ||
1709 | { | 1666 | { |
1710 | if (log_ce) | 1667 | if (log_ce) |
1711 | printk(KERN_WARNING | 1668 | edac_mc_printk(mci, KERN_WARNING, |
1712 | "EDAC MC%d: CE - no information available: %s\n", | 1669 | "CE - no information available: %s\n", msg); |
1713 | mci->mc_idx, msg); | 1670 | |
1714 | mci->ce_noinfo_count++; | 1671 | mci->ce_noinfo_count++; |
1715 | mci->ce_count++; | 1672 | mci->ce_count++; |
1716 | } | 1673 | } |
1717 | 1674 | EXPORT_SYMBOL_GPL(edac_mc_handle_ce_no_info); | |
1718 | |||
1719 | EXPORT_SYMBOL(edac_mc_handle_ue); | ||
1720 | 1675 | ||
1721 | void edac_mc_handle_ue(struct mem_ctl_info *mci, | 1676 | void edac_mc_handle_ue(struct mem_ctl_info *mci, |
1722 | unsigned long page_frame_number, | 1677 | unsigned long page_frame_number, unsigned long offset_in_page, |
1723 | unsigned long offset_in_page, int row, | 1678 | int row, const char *msg) |
1724 | const char *msg) | ||
1725 | { | 1679 | { |
1726 | int len = EDAC_MC_LABEL_LEN * 4; | 1680 | int len = EDAC_MC_LABEL_LEN * 4; |
1727 | char labels[len + 1]; | 1681 | char labels[len + 1]; |
@@ -1729,65 +1683,61 @@ void edac_mc_handle_ue(struct mem_ctl_info *mci, | |||
1729 | int chan; | 1683 | int chan; |
1730 | int chars; | 1684 | int chars; |
1731 | 1685 | ||
1732 | debugf3("MC%d: " __FILE__ ": %s()\n", mci->mc_idx, __func__); | 1686 | debugf3("MC%d: %s()\n", mci->mc_idx, __func__); |
1733 | 1687 | ||
1734 | /* FIXME - maybe make panic on INTERNAL ERROR an option */ | 1688 | /* FIXME - maybe make panic on INTERNAL ERROR an option */ |
1735 | if (row >= mci->nr_csrows || row < 0) { | 1689 | if (row >= mci->nr_csrows || row < 0) { |
1736 | /* something is wrong */ | 1690 | /* something is wrong */ |
1737 | printk(KERN_ERR | 1691 | edac_mc_printk(mci, KERN_ERR, |
1738 | "EDAC MC%d: INTERNAL ERROR: row out of range (%d >= %d)\n", | 1692 | "INTERNAL ERROR: row out of range " |
1739 | mci->mc_idx, row, mci->nr_csrows); | 1693 | "(%d >= %d)\n", row, mci->nr_csrows); |
1740 | edac_mc_handle_ue_no_info(mci, "INTERNAL ERROR"); | 1694 | edac_mc_handle_ue_no_info(mci, "INTERNAL ERROR"); |
1741 | return; | 1695 | return; |
1742 | } | 1696 | } |
1743 | 1697 | ||
1744 | chars = snprintf(pos, len + 1, "%s", | 1698 | chars = snprintf(pos, len + 1, "%s", |
1745 | mci->csrows[row].channels[0].label); | 1699 | mci->csrows[row].channels[0].label); |
1746 | len -= chars; | 1700 | len -= chars; |
1747 | pos += chars; | 1701 | pos += chars; |
1702 | |||
1748 | for (chan = 1; (chan < mci->csrows[row].nr_channels) && (len > 0); | 1703 | for (chan = 1; (chan < mci->csrows[row].nr_channels) && (len > 0); |
1749 | chan++) { | 1704 | chan++) { |
1750 | chars = snprintf(pos, len + 1, ":%s", | 1705 | chars = snprintf(pos, len + 1, ":%s", |
1751 | mci->csrows[row].channels[chan].label); | 1706 | mci->csrows[row].channels[chan].label); |
1752 | len -= chars; | 1707 | len -= chars; |
1753 | pos += chars; | 1708 | pos += chars; |
1754 | } | 1709 | } |
1755 | 1710 | ||
1756 | if (log_ue) | 1711 | if (log_ue) |
1757 | printk(KERN_EMERG | 1712 | edac_mc_printk(mci, KERN_EMERG, |
1758 | "EDAC MC%d: UE page 0x%lx, offset 0x%lx, grain %d, row %d," | 1713 | "UE page 0x%lx, offset 0x%lx, grain %d, row %d, " |
1759 | " labels \"%s\": %s\n", mci->mc_idx, | 1714 | "labels \"%s\": %s\n", page_frame_number, |
1760 | page_frame_number, offset_in_page, | 1715 | offset_in_page, mci->csrows[row].grain, row, labels, |
1761 | mci->csrows[row].grain, row, labels, msg); | 1716 | msg); |
1762 | 1717 | ||
1763 | if (panic_on_ue) | 1718 | if (panic_on_ue) |
1764 | panic | 1719 | panic("EDAC MC%d: UE page 0x%lx, offset 0x%lx, grain %d, " |
1765 | ("EDAC MC%d: UE page 0x%lx, offset 0x%lx, grain %d, row %d," | 1720 | "row %d, labels \"%s\": %s\n", mci->mc_idx, |
1766 | " labels \"%s\": %s\n", mci->mc_idx, | 1721 | page_frame_number, offset_in_page, |
1767 | page_frame_number, offset_in_page, | 1722 | mci->csrows[row].grain, row, labels, msg); |
1768 | mci->csrows[row].grain, row, labels, msg); | ||
1769 | 1723 | ||
1770 | mci->ue_count++; | 1724 | mci->ue_count++; |
1771 | mci->csrows[row].ue_count++; | 1725 | mci->csrows[row].ue_count++; |
1772 | } | 1726 | } |
1727 | EXPORT_SYMBOL_GPL(edac_mc_handle_ue); | ||
1773 | 1728 | ||
1774 | 1729 | void edac_mc_handle_ue_no_info(struct mem_ctl_info *mci, const char *msg) | |
1775 | EXPORT_SYMBOL(edac_mc_handle_ue_no_info); | ||
1776 | |||
1777 | void edac_mc_handle_ue_no_info(struct mem_ctl_info *mci, | ||
1778 | const char *msg) | ||
1779 | { | 1730 | { |
1780 | if (panic_on_ue) | 1731 | if (panic_on_ue) |
1781 | panic("EDAC MC%d: Uncorrected Error", mci->mc_idx); | 1732 | panic("EDAC MC%d: Uncorrected Error", mci->mc_idx); |
1782 | 1733 | ||
1783 | if (log_ue) | 1734 | if (log_ue) |
1784 | printk(KERN_WARNING | 1735 | edac_mc_printk(mci, KERN_WARNING, |
1785 | "EDAC MC%d: UE - no information available: %s\n", | 1736 | "UE - no information available: %s\n", msg); |
1786 | mci->mc_idx, msg); | ||
1787 | mci->ue_noinfo_count++; | 1737 | mci->ue_noinfo_count++; |
1788 | mci->ue_count++; | 1738 | mci->ue_count++; |
1789 | } | 1739 | } |
1790 | 1740 | EXPORT_SYMBOL_GPL(edac_mc_handle_ue_no_info); | |
1791 | 1741 | ||
1792 | #ifdef CONFIG_PCI | 1742 | #ifdef CONFIG_PCI |
1793 | 1743 | ||
@@ -1799,18 +1749,22 @@ static u16 get_pci_parity_status(struct pci_dev *dev, int secondary) | |||
1799 | where = secondary ? PCI_SEC_STATUS : PCI_STATUS; | 1749 | where = secondary ? PCI_SEC_STATUS : PCI_STATUS; |
1800 | pci_read_config_word(dev, where, &status); | 1750 | pci_read_config_word(dev, where, &status); |
1801 | 1751 | ||
1802 | /* If we get back 0xFFFF then we must suspect that the card has been pulled but | 1752 | /* If we get back 0xFFFF then we must suspect that the card has been |
1803 | the Linux PCI layer has not yet finished cleaning up. We don't want to report | 1753 | * pulled but the Linux PCI layer has not yet finished cleaning up. |
1804 | on such devices */ | 1754 | * We don't want to report on such devices |
1755 | */ | ||
1805 | 1756 | ||
1806 | if (status == 0xFFFF) { | 1757 | if (status == 0xFFFF) { |
1807 | u32 sanity; | 1758 | u32 sanity; |
1759 | |||
1808 | pci_read_config_dword(dev, 0, &sanity); | 1760 | pci_read_config_dword(dev, 0, &sanity); |
1761 | |||
1809 | if (sanity == 0xFFFFFFFF) | 1762 | if (sanity == 0xFFFFFFFF) |
1810 | return 0; | 1763 | return 0; |
1811 | } | 1764 | } |
1765 | |||
1812 | status &= PCI_STATUS_DETECTED_PARITY | PCI_STATUS_SIG_SYSTEM_ERROR | | 1766 | status &= PCI_STATUS_DETECTED_PARITY | PCI_STATUS_SIG_SYSTEM_ERROR | |
1813 | PCI_STATUS_PARITY; | 1767 | PCI_STATUS_PARITY; |
1814 | 1768 | ||
1815 | if (status) | 1769 | if (status) |
1816 | /* reset only the bits we are interested in */ | 1770 | /* reset only the bits we are interested in */ |
@@ -1822,7 +1776,7 @@ static u16 get_pci_parity_status(struct pci_dev *dev, int secondary) | |||
1822 | typedef void (*pci_parity_check_fn_t) (struct pci_dev *dev); | 1776 | typedef void (*pci_parity_check_fn_t) (struct pci_dev *dev); |
1823 | 1777 | ||
1824 | /* Clear any PCI parity errors logged by this device. */ | 1778 | /* Clear any PCI parity errors logged by this device. */ |
1825 | static void edac_pci_dev_parity_clear( struct pci_dev *dev ) | 1779 | static void edac_pci_dev_parity_clear(struct pci_dev *dev) |
1826 | { | 1780 | { |
1827 | u8 header_type; | 1781 | u8 header_type; |
1828 | 1782 | ||
@@ -1853,25 +1807,22 @@ static void edac_pci_dev_parity_test(struct pci_dev *dev) | |||
1853 | /* check the status reg for errors */ | 1807 | /* check the status reg for errors */ |
1854 | if (status) { | 1808 | if (status) { |
1855 | if (status & (PCI_STATUS_SIG_SYSTEM_ERROR)) | 1809 | if (status & (PCI_STATUS_SIG_SYSTEM_ERROR)) |
1856 | printk(KERN_CRIT | 1810 | edac_printk(KERN_CRIT, EDAC_PCI, |
1857 | "EDAC PCI- " | ||
1858 | "Signaled System Error on %s\n", | 1811 | "Signaled System Error on %s\n", |
1859 | pci_name (dev)); | 1812 | pci_name(dev)); |
1860 | 1813 | ||
1861 | if (status & (PCI_STATUS_PARITY)) { | 1814 | if (status & (PCI_STATUS_PARITY)) { |
1862 | printk(KERN_CRIT | 1815 | edac_printk(KERN_CRIT, EDAC_PCI, |
1863 | "EDAC PCI- " | ||
1864 | "Master Data Parity Error on %s\n", | 1816 | "Master Data Parity Error on %s\n", |
1865 | pci_name (dev)); | 1817 | pci_name(dev)); |
1866 | 1818 | ||
1867 | atomic_inc(&pci_parity_count); | 1819 | atomic_inc(&pci_parity_count); |
1868 | } | 1820 | } |
1869 | 1821 | ||
1870 | if (status & (PCI_STATUS_DETECTED_PARITY)) { | 1822 | if (status & (PCI_STATUS_DETECTED_PARITY)) { |
1871 | printk(KERN_CRIT | 1823 | edac_printk(KERN_CRIT, EDAC_PCI, |
1872 | "EDAC PCI- " | ||
1873 | "Detected Parity Error on %s\n", | 1824 | "Detected Parity Error on %s\n", |
1874 | pci_name (dev)); | 1825 | pci_name(dev)); |
1875 | 1826 | ||
1876 | atomic_inc(&pci_parity_count); | 1827 | atomic_inc(&pci_parity_count); |
1877 | } | 1828 | } |
@@ -1892,25 +1843,22 @@ static void edac_pci_dev_parity_test(struct pci_dev *dev) | |||
1892 | /* check the secondary status reg for errors */ | 1843 | /* check the secondary status reg for errors */ |
1893 | if (status) { | 1844 | if (status) { |
1894 | if (status & (PCI_STATUS_SIG_SYSTEM_ERROR)) | 1845 | if (status & (PCI_STATUS_SIG_SYSTEM_ERROR)) |
1895 | printk(KERN_CRIT | 1846 | edac_printk(KERN_CRIT, EDAC_PCI, "Bridge " |
1896 | "EDAC PCI-Bridge- " | ||
1897 | "Signaled System Error on %s\n", | 1847 | "Signaled System Error on %s\n", |
1898 | pci_name (dev)); | 1848 | pci_name(dev)); |
1899 | 1849 | ||
1900 | if (status & (PCI_STATUS_PARITY)) { | 1850 | if (status & (PCI_STATUS_PARITY)) { |
1901 | printk(KERN_CRIT | 1851 | edac_printk(KERN_CRIT, EDAC_PCI, "Bridge " |
1902 | "EDAC PCI-Bridge- " | 1852 | "Master Data Parity Error on " |
1903 | "Master Data Parity Error on %s\n", | 1853 | "%s\n", pci_name(dev)); |
1904 | pci_name (dev)); | ||
1905 | 1854 | ||
1906 | atomic_inc(&pci_parity_count); | 1855 | atomic_inc(&pci_parity_count); |
1907 | } | 1856 | } |
1908 | 1857 | ||
1909 | if (status & (PCI_STATUS_DETECTED_PARITY)) { | 1858 | if (status & (PCI_STATUS_DETECTED_PARITY)) { |
1910 | printk(KERN_CRIT | 1859 | edac_printk(KERN_CRIT, EDAC_PCI, "Bridge " |
1911 | "EDAC PCI-Bridge- " | ||
1912 | "Detected Parity Error on %s\n", | 1860 | "Detected Parity Error on %s\n", |
1913 | pci_name (dev)); | 1861 | pci_name(dev)); |
1914 | 1862 | ||
1915 | atomic_inc(&pci_parity_count); | 1863 | atomic_inc(&pci_parity_count); |
1916 | } | 1864 | } |
@@ -1929,58 +1877,55 @@ static void edac_pci_dev_parity_test(struct pci_dev *dev) | |||
1929 | * Returns: 0 not found | 1877 | * Returns: 0 not found |
1930 | * 1 found on list | 1878 | * 1 found on list |
1931 | */ | 1879 | */ |
1932 | static int check_dev_on_list(struct edac_pci_device_list *list, int free_index, | 1880 | static int check_dev_on_list(struct edac_pci_device_list *list, |
1933 | struct pci_dev *dev) | 1881 | int free_index, struct pci_dev *dev) |
1934 | { | 1882 | { |
1935 | int i; | 1883 | int i; |
1936 | int rc = 0; /* Assume not found */ | 1884 | int rc = 0; /* Assume not found */ |
1937 | unsigned short vendor=dev->vendor; | 1885 | unsigned short vendor=dev->vendor; |
1938 | unsigned short device=dev->device; | 1886 | unsigned short device=dev->device; |
1939 | 1887 | ||
1940 | /* Scan the list, looking for a vendor/device match | 1888 | /* Scan the list, looking for a vendor/device match */ |
1941 | */ | 1889 | for (i = 0; i < free_index; i++, list++ ) { |
1942 | for (i = 0; i < free_index; i++, list++ ) { | 1890 | if ((list->vendor == vendor ) && (list->device == device )) { |
1943 | if ( (list->vendor == vendor ) && | 1891 | rc = 1; |
1944 | (list->device == device )) { | 1892 | break; |
1945 | rc = 1; | 1893 | } |
1946 | break; | 1894 | } |
1947 | } | ||
1948 | } | ||
1949 | 1895 | ||
1950 | return rc; | 1896 | return rc; |
1951 | } | 1897 | } |
1952 | 1898 | ||
1953 | /* | 1899 | /* |
1954 | * pci_dev parity list iterator | 1900 | * pci_dev parity list iterator |
1955 | * Scan the PCI device list for one iteration, looking for SERRORs | 1901 | * Scan the PCI device list for one iteration, looking for SERRORs |
1956 | * Master Parity ERRORS or Parity ERRORs on primary or secondary devices | 1902 | * Master Parity ERRORS or Parity ERRORs on primary or secondary devices |
1957 | */ | 1903 | */ |
1958 | static inline void edac_pci_dev_parity_iterator(pci_parity_check_fn_t fn) | 1904 | static inline void edac_pci_dev_parity_iterator(pci_parity_check_fn_t fn) |
1959 | { | 1905 | { |
1960 | struct pci_dev *dev=NULL; | 1906 | struct pci_dev *dev = NULL; |
1961 | 1907 | ||
1962 | /* request for kernel access to the next PCI device, if any, | 1908 | /* request for kernel access to the next PCI device, if any, |
1963 | * and while we are looking at it have its reference count | 1909 | * and while we are looking at it have its reference count |
1964 | * bumped until we are done with it | 1910 | * bumped until we are done with it |
1965 | */ | 1911 | */ |
1966 | while((dev = pci_get_device(PCI_ANY_ID, PCI_ANY_ID, dev)) != NULL) { | 1912 | while((dev = pci_get_device(PCI_ANY_ID, PCI_ANY_ID, dev)) != NULL) { |
1967 | 1913 | /* if whitelist exists then it has priority, so only scan | |
1968 | /* if whitelist exists then it has priority, so only scan those | 1914 | * those devices on the whitelist |
1969 | * devices on the whitelist | 1915 | */ |
1970 | */ | 1916 | if (pci_whitelist_count > 0 ) { |
1971 | if (pci_whitelist_count > 0 ) { | 1917 | if (check_dev_on_list(pci_whitelist, |
1972 | if (check_dev_on_list(pci_whitelist, | ||
1973 | pci_whitelist_count, dev)) | 1918 | pci_whitelist_count, dev)) |
1974 | fn(dev); | 1919 | fn(dev); |
1975 | } else { | 1920 | } else { |
1976 | /* | 1921 | /* |
1977 | * if no whitelist, then check if this devices is | 1922 | * if no whitelist, then check if this devices is |
1978 | * blacklisted | 1923 | * blacklisted |
1979 | */ | 1924 | */ |
1980 | if (!check_dev_on_list(pci_blacklist, | 1925 | if (!check_dev_on_list(pci_blacklist, |
1981 | pci_blacklist_count, dev)) | 1926 | pci_blacklist_count, dev)) |
1982 | fn(dev); | 1927 | fn(dev); |
1983 | } | 1928 | } |
1984 | } | 1929 | } |
1985 | } | 1930 | } |
1986 | 1931 | ||
@@ -1989,7 +1934,7 @@ static void do_pci_parity_check(void) | |||
1989 | unsigned long flags; | 1934 | unsigned long flags; |
1990 | int before_count; | 1935 | int before_count; |
1991 | 1936 | ||
1992 | debugf3("MC: " __FILE__ ": %s()\n", __func__); | 1937 | debugf3("%s()\n", __func__); |
1993 | 1938 | ||
1994 | if (!check_pci_parity) | 1939 | if (!check_pci_parity) |
1995 | return; | 1940 | return; |
@@ -2011,7 +1956,6 @@ static void do_pci_parity_check(void) | |||
2011 | } | 1956 | } |
2012 | } | 1957 | } |
2013 | 1958 | ||
2014 | |||
2015 | static inline void clear_pci_parity_errors(void) | 1959 | static inline void clear_pci_parity_errors(void) |
2016 | { | 1960 | { |
2017 | /* Clear any PCI bus parity errors that devices initially have logged | 1961 | /* Clear any PCI bus parity errors that devices initially have logged |
@@ -2020,37 +1964,30 @@ static inline void clear_pci_parity_errors(void) | |||
2020 | edac_pci_dev_parity_iterator(edac_pci_dev_parity_clear); | 1964 | edac_pci_dev_parity_iterator(edac_pci_dev_parity_clear); |
2021 | } | 1965 | } |
2022 | 1966 | ||
2023 | |||
2024 | #else /* CONFIG_PCI */ | 1967 | #else /* CONFIG_PCI */ |
2025 | 1968 | ||
2026 | |||
2027 | static inline void do_pci_parity_check(void) | 1969 | static inline void do_pci_parity_check(void) |
2028 | { | 1970 | { |
2029 | /* no-op */ | 1971 | /* no-op */ |
2030 | } | 1972 | } |
2031 | 1973 | ||
2032 | |||
2033 | static inline void clear_pci_parity_errors(void) | 1974 | static inline void clear_pci_parity_errors(void) |
2034 | { | 1975 | { |
2035 | /* no-op */ | 1976 | /* no-op */ |
2036 | } | 1977 | } |
2037 | 1978 | ||
2038 | |||
2039 | #endif /* CONFIG_PCI */ | 1979 | #endif /* CONFIG_PCI */ |
2040 | 1980 | ||
2041 | /* | 1981 | /* |
2042 | * Iterate over all MC instances and check for ECC, et al, errors | 1982 | * Iterate over all MC instances and check for ECC, et al, errors |
2043 | */ | 1983 | */ |
2044 | static inline void check_mc_devices (void) | 1984 | static inline void check_mc_devices(void) |
2045 | { | 1985 | { |
2046 | unsigned long flags; | ||
2047 | struct list_head *item; | 1986 | struct list_head *item; |
2048 | struct mem_ctl_info *mci; | 1987 | struct mem_ctl_info *mci; |
2049 | 1988 | ||
2050 | debugf3("MC: " __FILE__ ": %s()\n", __func__); | 1989 | debugf3("%s()\n", __func__); |
2051 | 1990 | down(&mem_ctls_mutex); | |
2052 | /* during poll, have interrupts off */ | ||
2053 | local_irq_save(flags); | ||
2054 | 1991 | ||
2055 | list_for_each(item, &mc_devices) { | 1992 | list_for_each(item, &mc_devices) { |
2056 | mci = list_entry(item, struct mem_ctl_info, link); | 1993 | mci = list_entry(item, struct mem_ctl_info, link); |
@@ -2059,10 +1996,9 @@ static inline void check_mc_devices (void) | |||
2059 | mci->edac_check(mci); | 1996 | mci->edac_check(mci); |
2060 | } | 1997 | } |
2061 | 1998 | ||
2062 | local_irq_restore(flags); | 1999 | up(&mem_ctls_mutex); |
2063 | } | 2000 | } |
2064 | 2001 | ||
2065 | |||
2066 | /* | 2002 | /* |
2067 | * Check MC status every poll_msec. | 2003 | * Check MC status every poll_msec. |
2068 | * Check PCI status every poll_msec as well. | 2004 | * Check PCI status every poll_msec as well. |
@@ -2073,70 +2009,21 @@ static inline void check_mc_devices (void) | |||
2073 | */ | 2009 | */ |
2074 | static void do_edac_check(void) | 2010 | static void do_edac_check(void) |
2075 | { | 2011 | { |
2076 | 2012 | debugf3("%s()\n", __func__); | |
2077 | debugf3("MC: " __FILE__ ": %s()\n", __func__); | ||
2078 | |||
2079 | check_mc_devices(); | 2013 | check_mc_devices(); |
2080 | |||
2081 | do_pci_parity_check(); | 2014 | do_pci_parity_check(); |
2082 | } | 2015 | } |
2083 | 2016 | ||
2084 | |||
2085 | /* | ||
2086 | * EDAC thread state information | ||
2087 | */ | ||
2088 | struct bs_thread_info | ||
2089 | { | ||
2090 | struct task_struct *task; | ||
2091 | struct completion *event; | ||
2092 | char *name; | ||
2093 | void (*run)(void); | ||
2094 | }; | ||
2095 | |||
2096 | static struct bs_thread_info bs_thread; | ||
2097 | |||
2098 | /* | ||
2099 | * edac_kernel_thread | ||
2100 | * This the kernel thread that processes edac operations | ||
2101 | * in a normal thread environment | ||
2102 | */ | ||
2103 | static int edac_kernel_thread(void *arg) | 2017 | static int edac_kernel_thread(void *arg) |
2104 | { | 2018 | { |
2105 | struct bs_thread_info *thread = (struct bs_thread_info *) arg; | 2019 | while (!kthread_should_stop()) { |
2106 | 2020 | do_edac_check(); | |
2107 | /* detach thread */ | ||
2108 | daemonize(thread->name); | ||
2109 | |||
2110 | current->exit_signal = SIGCHLD; | ||
2111 | allow_signal(SIGKILL); | ||
2112 | thread->task = current; | ||
2113 | |||
2114 | /* indicate to starting task we have started */ | ||
2115 | complete(thread->event); | ||
2116 | |||
2117 | /* loop forever, until we are told to stop */ | ||
2118 | while(thread->run != NULL) { | ||
2119 | void (*run)(void); | ||
2120 | |||
2121 | /* call the function to check the memory controllers */ | ||
2122 | run = thread->run; | ||
2123 | if (run) | ||
2124 | run(); | ||
2125 | |||
2126 | if (signal_pending(current)) | ||
2127 | flush_signals(current); | ||
2128 | |||
2129 | /* ensure we are interruptable */ | ||
2130 | set_current_state(TASK_INTERRUPTIBLE); | ||
2131 | 2021 | ||
2132 | /* goto sleep for the interval */ | 2022 | /* goto sleep for the interval */ |
2133 | schedule_timeout((HZ * poll_msec) / 1000); | 2023 | schedule_timeout_interruptible((HZ * poll_msec) / 1000); |
2134 | try_to_freeze(); | 2024 | try_to_freeze(); |
2135 | } | 2025 | } |
2136 | 2026 | ||
2137 | /* notify waiter that we are exiting */ | ||
2138 | complete(thread->event); | ||
2139 | |||
2140 | return 0; | 2027 | return 0; |
2141 | } | 2028 | } |
2142 | 2029 | ||
@@ -2146,10 +2033,7 @@ static int edac_kernel_thread(void *arg) | |||
2146 | */ | 2033 | */ |
2147 | static int __init edac_mc_init(void) | 2034 | static int __init edac_mc_init(void) |
2148 | { | 2035 | { |
2149 | int ret; | 2036 | edac_printk(KERN_INFO, EDAC_MC, EDAC_MC_VERSION "\n"); |
2150 | struct completion event; | ||
2151 | |||
2152 | printk(KERN_INFO "MC: " __FILE__ " version " EDAC_MC_VERSION "\n"); | ||
2153 | 2037 | ||
2154 | /* | 2038 | /* |
2155 | * Harvest and clear any boot/initialization PCI parity errors | 2039 | * Harvest and clear any boot/initialization PCI parity errors |
@@ -2160,80 +2044,54 @@ static int __init edac_mc_init(void) | |||
2160 | */ | 2044 | */ |
2161 | clear_pci_parity_errors(); | 2045 | clear_pci_parity_errors(); |
2162 | 2046 | ||
2163 | /* perform check for first time to harvest boot leftovers */ | ||
2164 | do_edac_check(); | ||
2165 | |||
2166 | /* Create the MC sysfs entires */ | 2047 | /* Create the MC sysfs entires */ |
2167 | if (edac_sysfs_memctrl_setup()) { | 2048 | if (edac_sysfs_memctrl_setup()) { |
2168 | printk(KERN_ERR "EDAC MC: Error initializing sysfs code\n"); | 2049 | edac_printk(KERN_ERR, EDAC_MC, |
2050 | "Error initializing sysfs code\n"); | ||
2169 | return -ENODEV; | 2051 | return -ENODEV; |
2170 | } | 2052 | } |
2171 | 2053 | ||
2172 | /* Create the PCI parity sysfs entries */ | 2054 | /* Create the PCI parity sysfs entries */ |
2173 | if (edac_sysfs_pci_setup()) { | 2055 | if (edac_sysfs_pci_setup()) { |
2174 | edac_sysfs_memctrl_teardown(); | 2056 | edac_sysfs_memctrl_teardown(); |
2175 | printk(KERN_ERR "EDAC PCI: Error initializing sysfs code\n"); | 2057 | edac_printk(KERN_ERR, EDAC_MC, |
2058 | "EDAC PCI: Error initializing sysfs code\n"); | ||
2176 | return -ENODEV; | 2059 | return -ENODEV; |
2177 | } | 2060 | } |
2178 | 2061 | ||
2179 | /* Create our kernel thread */ | ||
2180 | init_completion(&event); | ||
2181 | bs_thread.event = &event; | ||
2182 | bs_thread.name = "kedac"; | ||
2183 | bs_thread.run = do_edac_check; | ||
2184 | |||
2185 | /* create our kernel thread */ | 2062 | /* create our kernel thread */ |
2186 | ret = kernel_thread(edac_kernel_thread, &bs_thread, CLONE_KERNEL); | 2063 | edac_thread = kthread_run(edac_kernel_thread, NULL, "kedac"); |
2187 | if (ret < 0) { | 2064 | |
2065 | if (IS_ERR(edac_thread)) { | ||
2188 | /* remove the sysfs entries */ | 2066 | /* remove the sysfs entries */ |
2189 | edac_sysfs_memctrl_teardown(); | 2067 | edac_sysfs_memctrl_teardown(); |
2190 | edac_sysfs_pci_teardown(); | 2068 | edac_sysfs_pci_teardown(); |
2191 | return -ENOMEM; | 2069 | return PTR_ERR(edac_thread); |
2192 | } | 2070 | } |
2193 | 2071 | ||
2194 | /* wait for our kernel theard ack that it is up and running */ | ||
2195 | wait_for_completion(&event); | ||
2196 | |||
2197 | return 0; | 2072 | return 0; |
2198 | } | 2073 | } |
2199 | 2074 | ||
2200 | |||
2201 | /* | 2075 | /* |
2202 | * edac_mc_exit() | 2076 | * edac_mc_exit() |
2203 | * module exit/termination functioni | 2077 | * module exit/termination functioni |
2204 | */ | 2078 | */ |
2205 | static void __exit edac_mc_exit(void) | 2079 | static void __exit edac_mc_exit(void) |
2206 | { | 2080 | { |
2207 | struct completion event; | 2081 | debugf0("%s()\n", __func__); |
2208 | 2082 | kthread_stop(edac_thread); | |
2209 | debugf0("MC: " __FILE__ ": %s()\n", __func__); | ||
2210 | |||
2211 | init_completion(&event); | ||
2212 | bs_thread.event = &event; | ||
2213 | |||
2214 | /* As soon as ->run is set to NULL, the task could disappear, | ||
2215 | * so we need to hold tasklist_lock until we have sent the signal | ||
2216 | */ | ||
2217 | read_lock(&tasklist_lock); | ||
2218 | bs_thread.run = NULL; | ||
2219 | send_sig(SIGKILL, bs_thread.task, 1); | ||
2220 | read_unlock(&tasklist_lock); | ||
2221 | wait_for_completion(&event); | ||
2222 | 2083 | ||
2223 | /* tear down the sysfs device */ | 2084 | /* tear down the sysfs device */ |
2224 | edac_sysfs_memctrl_teardown(); | 2085 | edac_sysfs_memctrl_teardown(); |
2225 | edac_sysfs_pci_teardown(); | 2086 | edac_sysfs_pci_teardown(); |
2226 | } | 2087 | } |
2227 | 2088 | ||
2228 | |||
2229 | |||
2230 | |||
2231 | module_init(edac_mc_init); | 2089 | module_init(edac_mc_init); |
2232 | module_exit(edac_mc_exit); | 2090 | module_exit(edac_mc_exit); |
2233 | 2091 | ||
2234 | MODULE_LICENSE("GPL"); | 2092 | MODULE_LICENSE("GPL"); |
2235 | MODULE_AUTHOR("Linux Networx (http://lnxi.com) Thayne Harbaugh et al\n" | 2093 | MODULE_AUTHOR("Linux Networx (http://lnxi.com) Thayne Harbaugh et al\n" |
2236 | "Based on.work by Dan Hollis et al"); | 2094 | "Based on work by Dan Hollis et al"); |
2237 | MODULE_DESCRIPTION("Core library routines for MC reporting"); | 2095 | MODULE_DESCRIPTION("Core library routines for MC reporting"); |
2238 | 2096 | ||
2239 | module_param(panic_on_ue, int, 0644); | 2097 | module_param(panic_on_ue, int, 0644); |
diff --git a/drivers/edac/edac_mc.h b/drivers/edac/edac_mc.h index 75ecf484a43a..8d9e83909b9c 100644 --- a/drivers/edac/edac_mc.h +++ b/drivers/edac/edac_mc.h | |||
@@ -15,11 +15,9 @@ | |||
15 | * | 15 | * |
16 | */ | 16 | */ |
17 | 17 | ||
18 | |||
19 | #ifndef _EDAC_MC_H_ | 18 | #ifndef _EDAC_MC_H_ |
20 | #define _EDAC_MC_H_ | 19 | #define _EDAC_MC_H_ |
21 | 20 | ||
22 | |||
23 | #include <linux/config.h> | 21 | #include <linux/config.h> |
24 | #include <linux/kernel.h> | 22 | #include <linux/kernel.h> |
25 | #include <linux/types.h> | 23 | #include <linux/types.h> |
@@ -33,7 +31,6 @@ | |||
33 | #include <linux/completion.h> | 31 | #include <linux/completion.h> |
34 | #include <linux/kobject.h> | 32 | #include <linux/kobject.h> |
35 | 33 | ||
36 | |||
37 | #define EDAC_MC_LABEL_LEN 31 | 34 | #define EDAC_MC_LABEL_LEN 31 |
38 | #define MC_PROC_NAME_MAX_LEN 7 | 35 | #define MC_PROC_NAME_MAX_LEN 7 |
39 | 36 | ||
@@ -43,31 +40,53 @@ | |||
43 | #define PAGES_TO_MiB( pages ) ( ( pages ) << ( PAGE_SHIFT - 20 ) ) | 40 | #define PAGES_TO_MiB( pages ) ( ( pages ) << ( PAGE_SHIFT - 20 ) ) |
44 | #endif | 41 | #endif |
45 | 42 | ||
43 | #define edac_printk(level, prefix, fmt, arg...) \ | ||
44 | printk(level "EDAC " prefix ": " fmt, ##arg) | ||
45 | |||
46 | #define edac_mc_printk(mci, level, fmt, arg...) \ | ||
47 | printk(level "EDAC MC%d: " fmt, mci->mc_idx, ##arg) | ||
48 | |||
49 | #define edac_mc_chipset_printk(mci, level, prefix, fmt, arg...) \ | ||
50 | printk(level "EDAC " prefix " MC%d: " fmt, mci->mc_idx, ##arg) | ||
51 | |||
52 | /* prefixes for edac_printk() and edac_mc_printk() */ | ||
53 | #define EDAC_MC "MC" | ||
54 | #define EDAC_PCI "PCI" | ||
55 | #define EDAC_DEBUG "DEBUG" | ||
56 | |||
46 | #ifdef CONFIG_EDAC_DEBUG | 57 | #ifdef CONFIG_EDAC_DEBUG |
47 | extern int edac_debug_level; | 58 | extern int edac_debug_level; |
48 | #define edac_debug_printk(level, fmt, args...) \ | 59 | |
49 | do { if (level <= edac_debug_level) printk(KERN_DEBUG fmt, ##args); } while(0) | 60 | #define edac_debug_printk(level, fmt, arg...) \ |
61 | do { \ | ||
62 | if (level <= edac_debug_level) \ | ||
63 | edac_printk(KERN_DEBUG, EDAC_DEBUG, fmt, ##arg); \ | ||
64 | } while(0) | ||
65 | |||
50 | #define debugf0( ... ) edac_debug_printk(0, __VA_ARGS__ ) | 66 | #define debugf0( ... ) edac_debug_printk(0, __VA_ARGS__ ) |
51 | #define debugf1( ... ) edac_debug_printk(1, __VA_ARGS__ ) | 67 | #define debugf1( ... ) edac_debug_printk(1, __VA_ARGS__ ) |
52 | #define debugf2( ... ) edac_debug_printk(2, __VA_ARGS__ ) | 68 | #define debugf2( ... ) edac_debug_printk(2, __VA_ARGS__ ) |
53 | #define debugf3( ... ) edac_debug_printk(3, __VA_ARGS__ ) | 69 | #define debugf3( ... ) edac_debug_printk(3, __VA_ARGS__ ) |
54 | #define debugf4( ... ) edac_debug_printk(4, __VA_ARGS__ ) | 70 | #define debugf4( ... ) edac_debug_printk(4, __VA_ARGS__ ) |
55 | #else /* !CONFIG_EDAC_DEBUG */ | 71 | |
72 | #else /* !CONFIG_EDAC_DEBUG */ | ||
73 | |||
56 | #define debugf0( ... ) | 74 | #define debugf0( ... ) |
57 | #define debugf1( ... ) | 75 | #define debugf1( ... ) |
58 | #define debugf2( ... ) | 76 | #define debugf2( ... ) |
59 | #define debugf3( ... ) | 77 | #define debugf3( ... ) |
60 | #define debugf4( ... ) | 78 | #define debugf4( ... ) |
61 | #endif /* !CONFIG_EDAC_DEBUG */ | ||
62 | 79 | ||
80 | #endif /* !CONFIG_EDAC_DEBUG */ | ||
63 | 81 | ||
64 | #define bs_xstr(s) bs_str(s) | 82 | #define edac_xstr(s) edac_str(s) |
65 | #define bs_str(s) #s | 83 | #define edac_str(s) #s |
66 | #define BS_MOD_STR bs_xstr(KBUILD_BASENAME) | 84 | #define EDAC_MOD_STR edac_xstr(KBUILD_BASENAME) |
67 | 85 | ||
68 | #define BIT(x) (1 << (x)) | 86 | #define BIT(x) (1 << (x)) |
69 | 87 | ||
70 | #define PCI_VEND_DEV(vend, dev) PCI_VENDOR_ID_ ## vend, PCI_DEVICE_ID_ ## vend ## _ ## dev | 88 | #define PCI_VEND_DEV(vend, dev) PCI_VENDOR_ID_ ## vend, \ |
89 | PCI_DEVICE_ID_ ## vend ## _ ## dev | ||
71 | 90 | ||
72 | /* memory devices */ | 91 | /* memory devices */ |
73 | enum dev_type { | 92 | enum dev_type { |
@@ -117,7 +136,6 @@ enum mem_type { | |||
117 | #define MEM_FLAG_RDDR BIT(MEM_RDDR) | 136 | #define MEM_FLAG_RDDR BIT(MEM_RDDR) |
118 | #define MEM_FLAG_RMBS BIT(MEM_RMBS) | 137 | #define MEM_FLAG_RMBS BIT(MEM_RMBS) |
119 | 138 | ||
120 | |||
121 | /* chipset Error Detection and Correction capabilities and mode */ | 139 | /* chipset Error Detection and Correction capabilities and mode */ |
122 | enum edac_type { | 140 | enum edac_type { |
123 | EDAC_UNKNOWN = 0, /* Unknown if ECC is available */ | 141 | EDAC_UNKNOWN = 0, /* Unknown if ECC is available */ |
@@ -142,7 +160,6 @@ enum edac_type { | |||
142 | #define EDAC_FLAG_S8ECD8ED BIT(EDAC_S8ECD8ED) | 160 | #define EDAC_FLAG_S8ECD8ED BIT(EDAC_S8ECD8ED) |
143 | #define EDAC_FLAG_S16ECD16ED BIT(EDAC_S16ECD16ED) | 161 | #define EDAC_FLAG_S16ECD16ED BIT(EDAC_S16ECD16ED) |
144 | 162 | ||
145 | |||
146 | /* scrubbing capabilities */ | 163 | /* scrubbing capabilities */ |
147 | enum scrub_type { | 164 | enum scrub_type { |
148 | SCRUB_UNKNOWN = 0, /* Unknown if scrubber is available */ | 165 | SCRUB_UNKNOWN = 0, /* Unknown if scrubber is available */ |
@@ -166,11 +183,6 @@ enum scrub_type { | |||
166 | #define SCRUB_FLAG_HW_PROG_SRC BIT(SCRUB_HW_PROG_SRC_CORR) | 183 | #define SCRUB_FLAG_HW_PROG_SRC BIT(SCRUB_HW_PROG_SRC_CORR) |
167 | #define SCRUB_FLAG_HW_TUN BIT(SCRUB_HW_TUNABLE) | 184 | #define SCRUB_FLAG_HW_TUN BIT(SCRUB_HW_TUNABLE) |
168 | 185 | ||
169 | enum mci_sysfs_status { | ||
170 | MCI_SYSFS_INACTIVE = 0, /* sysfs entries NOT registered */ | ||
171 | MCI_SYSFS_ACTIVE /* sysfs entries ARE registered */ | ||
172 | }; | ||
173 | |||
174 | /* FIXME - should have notify capabilities: NMI, LOG, PROC, etc */ | 186 | /* FIXME - should have notify capabilities: NMI, LOG, PROC, etc */ |
175 | 187 | ||
176 | /* | 188 | /* |
@@ -255,20 +267,19 @@ enum mci_sysfs_status { | |||
255 | * PS - I enjoyed writing all that about as much as you enjoyed reading it. | 267 | * PS - I enjoyed writing all that about as much as you enjoyed reading it. |
256 | */ | 268 | */ |
257 | 269 | ||
258 | |||
259 | struct channel_info { | 270 | struct channel_info { |
260 | int chan_idx; /* channel index */ | 271 | int chan_idx; /* channel index */ |
261 | u32 ce_count; /* Correctable Errors for this CHANNEL */ | 272 | u32 ce_count; /* Correctable Errors for this CHANNEL */ |
262 | char label[EDAC_MC_LABEL_LEN + 1]; /* DIMM label on motherboard */ | 273 | char label[EDAC_MC_LABEL_LEN + 1]; /* DIMM label on motherboard */ |
263 | struct csrow_info *csrow; /* the parent */ | 274 | struct csrow_info *csrow; /* the parent */ |
264 | }; | 275 | }; |
265 | 276 | ||
266 | |||
267 | struct csrow_info { | 277 | struct csrow_info { |
268 | unsigned long first_page; /* first page number in dimm */ | 278 | unsigned long first_page; /* first page number in dimm */ |
269 | unsigned long last_page; /* last page number in dimm */ | 279 | unsigned long last_page; /* last page number in dimm */ |
270 | unsigned long page_mask; /* used for interleaving - | 280 | unsigned long page_mask; /* used for interleaving - |
271 | 0UL for non intlv */ | 281 | * 0UL for non intlv |
282 | */ | ||
272 | u32 nr_pages; /* number of pages in csrow */ | 283 | u32 nr_pages; /* number of pages in csrow */ |
273 | u32 grain; /* granularity of reported error in bytes */ | 284 | u32 grain; /* granularity of reported error in bytes */ |
274 | int csrow_idx; /* the chip-select row */ | 285 | int csrow_idx; /* the chip-select row */ |
@@ -280,29 +291,28 @@ struct csrow_info { | |||
280 | struct mem_ctl_info *mci; /* the parent */ | 291 | struct mem_ctl_info *mci; /* the parent */ |
281 | 292 | ||
282 | struct kobject kobj; /* sysfs kobject for this csrow */ | 293 | struct kobject kobj; /* sysfs kobject for this csrow */ |
294 | struct completion kobj_complete; | ||
283 | 295 | ||
284 | /* FIXME the number of CHANNELs might need to become dynamic */ | 296 | /* FIXME the number of CHANNELs might need to become dynamic */ |
285 | u32 nr_channels; | 297 | u32 nr_channels; |
286 | struct channel_info *channels; | 298 | struct channel_info *channels; |
287 | }; | 299 | }; |
288 | 300 | ||
289 | |||
290 | struct mem_ctl_info { | 301 | struct mem_ctl_info { |
291 | struct list_head link; /* for global list of mem_ctl_info structs */ | 302 | struct list_head link; /* for global list of mem_ctl_info structs */ |
292 | unsigned long mtype_cap; /* memory types supported by mc */ | 303 | unsigned long mtype_cap; /* memory types supported by mc */ |
293 | unsigned long edac_ctl_cap; /* Mem controller EDAC capabilities */ | 304 | unsigned long edac_ctl_cap; /* Mem controller EDAC capabilities */ |
294 | unsigned long edac_cap; /* configuration capabilities - this is | 305 | unsigned long edac_cap; /* configuration capabilities - this is |
295 | closely related to edac_ctl_cap. The | 306 | * closely related to edac_ctl_cap. The |
296 | difference is that the controller | 307 | * difference is that the controller may be |
297 | may be capable of s4ecd4ed which would | 308 | * capable of s4ecd4ed which would be listed |
298 | be listed in edac_ctl_cap, but if | 309 | * in edac_ctl_cap, but if channels aren't |
299 | channels aren't capable of s4ecd4ed then the | 310 | * capable of s4ecd4ed then the edac_cap would |
300 | edac_cap would not have that capability. */ | 311 | * not have that capability. |
312 | */ | ||
301 | unsigned long scrub_cap; /* chipset scrub capabilities */ | 313 | unsigned long scrub_cap; /* chipset scrub capabilities */ |
302 | enum scrub_type scrub_mode; /* current scrub mode */ | 314 | enum scrub_type scrub_mode; /* current scrub mode */ |
303 | 315 | ||
304 | enum mci_sysfs_status sysfs_active; /* status of sysfs */ | ||
305 | |||
306 | /* pointer to edac checking routine */ | 316 | /* pointer to edac checking routine */ |
307 | void (*edac_check) (struct mem_ctl_info * mci); | 317 | void (*edac_check) (struct mem_ctl_info * mci); |
308 | /* | 318 | /* |
@@ -311,7 +321,7 @@ struct mem_ctl_info { | |||
311 | */ | 321 | */ |
312 | /* FIXME - why not send the phys page to begin with? */ | 322 | /* FIXME - why not send the phys page to begin with? */ |
313 | unsigned long (*ctl_page_to_phys) (struct mem_ctl_info * mci, | 323 | unsigned long (*ctl_page_to_phys) (struct mem_ctl_info * mci, |
314 | unsigned long page); | 324 | unsigned long page); |
315 | int mc_idx; | 325 | int mc_idx; |
316 | int nr_csrows; | 326 | int nr_csrows; |
317 | struct csrow_info *csrows; | 327 | struct csrow_info *csrows; |
@@ -340,72 +350,69 @@ struct mem_ctl_info { | |||
340 | 350 | ||
341 | /* edac sysfs device control */ | 351 | /* edac sysfs device control */ |
342 | struct kobject edac_mci_kobj; | 352 | struct kobject edac_mci_kobj; |
353 | struct completion kobj_complete; | ||
343 | }; | 354 | }; |
344 | 355 | ||
345 | |||
346 | |||
347 | /* write all or some bits in a byte-register*/ | 356 | /* write all or some bits in a byte-register*/ |
348 | static inline void pci_write_bits8(struct pci_dev *pdev, int offset, | 357 | static inline void pci_write_bits8(struct pci_dev *pdev, int offset, u8 value, |
349 | u8 value, u8 mask) | 358 | u8 mask) |
350 | { | 359 | { |
351 | if (mask != 0xff) { | 360 | if (mask != 0xff) { |
352 | u8 buf; | 361 | u8 buf; |
362 | |||
353 | pci_read_config_byte(pdev, offset, &buf); | 363 | pci_read_config_byte(pdev, offset, &buf); |
354 | value &= mask; | 364 | value &= mask; |
355 | buf &= ~mask; | 365 | buf &= ~mask; |
356 | value |= buf; | 366 | value |= buf; |
357 | } | 367 | } |
368 | |||
358 | pci_write_config_byte(pdev, offset, value); | 369 | pci_write_config_byte(pdev, offset, value); |
359 | } | 370 | } |
360 | 371 | ||
361 | |||
362 | /* write all or some bits in a word-register*/ | 372 | /* write all or some bits in a word-register*/ |
363 | static inline void pci_write_bits16(struct pci_dev *pdev, int offset, | 373 | static inline void pci_write_bits16(struct pci_dev *pdev, int offset, |
364 | u16 value, u16 mask) | 374 | u16 value, u16 mask) |
365 | { | 375 | { |
366 | if (mask != 0xffff) { | 376 | if (mask != 0xffff) { |
367 | u16 buf; | 377 | u16 buf; |
378 | |||
368 | pci_read_config_word(pdev, offset, &buf); | 379 | pci_read_config_word(pdev, offset, &buf); |
369 | value &= mask; | 380 | value &= mask; |
370 | buf &= ~mask; | 381 | buf &= ~mask; |
371 | value |= buf; | 382 | value |= buf; |
372 | } | 383 | } |
384 | |||
373 | pci_write_config_word(pdev, offset, value); | 385 | pci_write_config_word(pdev, offset, value); |
374 | } | 386 | } |
375 | 387 | ||
376 | |||
377 | /* write all or some bits in a dword-register*/ | 388 | /* write all or some bits in a dword-register*/ |
378 | static inline void pci_write_bits32(struct pci_dev *pdev, int offset, | 389 | static inline void pci_write_bits32(struct pci_dev *pdev, int offset, |
379 | u32 value, u32 mask) | 390 | u32 value, u32 mask) |
380 | { | 391 | { |
381 | if (mask != 0xffff) { | 392 | if (mask != 0xffff) { |
382 | u32 buf; | 393 | u32 buf; |
394 | |||
383 | pci_read_config_dword(pdev, offset, &buf); | 395 | pci_read_config_dword(pdev, offset, &buf); |
384 | value &= mask; | 396 | value &= mask; |
385 | buf &= ~mask; | 397 | buf &= ~mask; |
386 | value |= buf; | 398 | value |= buf; |
387 | } | 399 | } |
400 | |||
388 | pci_write_config_dword(pdev, offset, value); | 401 | pci_write_config_dword(pdev, offset, value); |
389 | } | 402 | } |
390 | 403 | ||
391 | |||
392 | #ifdef CONFIG_EDAC_DEBUG | 404 | #ifdef CONFIG_EDAC_DEBUG |
393 | void edac_mc_dump_channel(struct channel_info *chan); | 405 | void edac_mc_dump_channel(struct channel_info *chan); |
394 | void edac_mc_dump_mci(struct mem_ctl_info *mci); | 406 | void edac_mc_dump_mci(struct mem_ctl_info *mci); |
395 | void edac_mc_dump_csrow(struct csrow_info *csrow); | 407 | void edac_mc_dump_csrow(struct csrow_info *csrow); |
396 | #endif /* CONFIG_EDAC_DEBUG */ | 408 | #endif /* CONFIG_EDAC_DEBUG */ |
397 | 409 | ||
398 | extern int edac_mc_add_mc(struct mem_ctl_info *mci); | 410 | extern int edac_mc_add_mc(struct mem_ctl_info *mci); |
399 | extern int edac_mc_del_mc(struct mem_ctl_info *mci); | 411 | extern struct mem_ctl_info * edac_mc_del_mc(struct pci_dev *pdev); |
400 | |||
401 | extern int edac_mc_find_csrow_by_page(struct mem_ctl_info *mci, | 412 | extern int edac_mc_find_csrow_by_page(struct mem_ctl_info *mci, |
402 | unsigned long page); | 413 | unsigned long page); |
403 | 414 | extern void edac_mc_scrub_block(unsigned long page, unsigned long offset, | |
404 | extern struct mem_ctl_info *edac_mc_find_mci_by_pdev(struct pci_dev | 415 | u32 size); |
405 | *pdev); | ||
406 | |||
407 | extern void edac_mc_scrub_block(unsigned long page, | ||
408 | unsigned long offset, u32 size); | ||
409 | 416 | ||
410 | /* | 417 | /* |
411 | * The no info errors are used when error overflows are reported. | 418 | * The no info errors are used when error overflows are reported. |
@@ -418,31 +425,25 @@ extern void edac_mc_scrub_block(unsigned long page, | |||
418 | * statement clutter and extra function arguments. | 425 | * statement clutter and extra function arguments. |
419 | */ | 426 | */ |
420 | extern void edac_mc_handle_ce(struct mem_ctl_info *mci, | 427 | extern void edac_mc_handle_ce(struct mem_ctl_info *mci, |
421 | unsigned long page_frame_number, | 428 | unsigned long page_frame_number, unsigned long offset_in_page, |
422 | unsigned long offset_in_page, | 429 | unsigned long syndrome, int row, int channel, |
423 | unsigned long syndrome, | 430 | const char *msg); |
424 | int row, int channel, const char *msg); | ||
425 | |||
426 | extern void edac_mc_handle_ce_no_info(struct mem_ctl_info *mci, | 431 | extern void edac_mc_handle_ce_no_info(struct mem_ctl_info *mci, |
427 | const char *msg); | 432 | const char *msg); |
428 | |||
429 | extern void edac_mc_handle_ue(struct mem_ctl_info *mci, | 433 | extern void edac_mc_handle_ue(struct mem_ctl_info *mci, |
430 | unsigned long page_frame_number, | 434 | unsigned long page_frame_number, unsigned long offset_in_page, |
431 | unsigned long offset_in_page, | 435 | int row, const char *msg); |
432 | int row, const char *msg); | ||
433 | |||
434 | extern void edac_mc_handle_ue_no_info(struct mem_ctl_info *mci, | 436 | extern void edac_mc_handle_ue_no_info(struct mem_ctl_info *mci, |
435 | const char *msg); | 437 | const char *msg); |
436 | 438 | ||
437 | /* | 439 | /* |
438 | * This kmalloc's and initializes all the structures. | 440 | * This kmalloc's and initializes all the structures. |
439 | * Can't be used if all structures don't have the same lifetime. | 441 | * Can't be used if all structures don't have the same lifetime. |
440 | */ | 442 | */ |
441 | extern struct mem_ctl_info *edac_mc_alloc(unsigned sz_pvt, | 443 | extern struct mem_ctl_info *edac_mc_alloc(unsigned sz_pvt, unsigned nr_csrows, |
442 | unsigned nr_csrows, unsigned nr_chans); | 444 | unsigned nr_chans); |
443 | 445 | ||
444 | /* Free an mc previously allocated by edac_mc_alloc() */ | 446 | /* Free an mc previously allocated by edac_mc_alloc() */ |
445 | extern void edac_mc_free(struct mem_ctl_info *mci); | 447 | extern void edac_mc_free(struct mem_ctl_info *mci); |
446 | 448 | ||
447 | |||
448 | #endif /* _EDAC_MC_H_ */ | 449 | #endif /* _EDAC_MC_H_ */ |
diff --git a/drivers/edac/i82860_edac.c b/drivers/edac/i82860_edac.c index 52596e75f9c2..fd342163cf97 100644 --- a/drivers/edac/i82860_edac.c +++ b/drivers/edac/i82860_edac.c | |||
@@ -9,7 +9,6 @@ | |||
9 | * by Thayne Harbaugh of Linux Networx. (http://lnxi.com) | 9 | * by Thayne Harbaugh of Linux Networx. (http://lnxi.com) |
10 | */ | 10 | */ |
11 | 11 | ||
12 | |||
13 | #include <linux/config.h> | 12 | #include <linux/config.h> |
14 | #include <linux/module.h> | 13 | #include <linux/module.h> |
15 | #include <linux/init.h> | 14 | #include <linux/init.h> |
@@ -18,6 +17,11 @@ | |||
18 | #include <linux/slab.h> | 17 | #include <linux/slab.h> |
19 | #include "edac_mc.h" | 18 | #include "edac_mc.h" |
20 | 19 | ||
20 | #define i82860_printk(level, fmt, arg...) \ | ||
21 | edac_printk(level, "i82860", fmt, ##arg) | ||
22 | |||
23 | #define i82860_mc_printk(mci, level, fmt, arg...) \ | ||
24 | edac_mc_chipset_printk(mci, level, "i82860", fmt, ##arg) | ||
21 | 25 | ||
22 | #ifndef PCI_DEVICE_ID_INTEL_82860_0 | 26 | #ifndef PCI_DEVICE_ID_INTEL_82860_0 |
23 | #define PCI_DEVICE_ID_INTEL_82860_0 0x2531 | 27 | #define PCI_DEVICE_ID_INTEL_82860_0 0x2531 |
@@ -48,15 +52,15 @@ struct i82860_error_info { | |||
48 | 52 | ||
49 | static const struct i82860_dev_info i82860_devs[] = { | 53 | static const struct i82860_dev_info i82860_devs[] = { |
50 | [I82860] = { | 54 | [I82860] = { |
51 | .ctl_name = "i82860"}, | 55 | .ctl_name = "i82860" |
56 | }, | ||
52 | }; | 57 | }; |
53 | 58 | ||
54 | static struct pci_dev *mci_pdev = NULL; /* init dev: in case that AGP code | 59 | static struct pci_dev *mci_pdev = NULL; /* init dev: in case that AGP code |
55 | has already registered driver */ | 60 | * has already registered driver |
61 | */ | ||
56 | 62 | ||
57 | static int i82860_registered = 1; | 63 | static void i82860_get_error_info(struct mem_ctl_info *mci, |
58 | |||
59 | static void i82860_get_error_info (struct mem_ctl_info *mci, | ||
60 | struct i82860_error_info *info) | 64 | struct i82860_error_info *info) |
61 | { | 65 | { |
62 | /* | 66 | /* |
@@ -78,14 +82,15 @@ static void i82860_get_error_info (struct mem_ctl_info *mci, | |||
78 | */ | 82 | */ |
79 | if (!(info->errsts2 & 0x0003)) | 83 | if (!(info->errsts2 & 0x0003)) |
80 | return; | 84 | return; |
85 | |||
81 | if ((info->errsts ^ info->errsts2) & 0x0003) { | 86 | if ((info->errsts ^ info->errsts2) & 0x0003) { |
82 | pci_read_config_dword(mci->pdev, I82860_EAP, &info->eap); | 87 | pci_read_config_dword(mci->pdev, I82860_EAP, &info->eap); |
83 | pci_read_config_word(mci->pdev, I82860_DERRCTL_STS, | 88 | pci_read_config_word(mci->pdev, I82860_DERRCTL_STS, |
84 | &info->derrsyn); | 89 | &info->derrsyn); |
85 | } | 90 | } |
86 | } | 91 | } |
87 | 92 | ||
88 | static int i82860_process_error_info (struct mem_ctl_info *mci, | 93 | static int i82860_process_error_info(struct mem_ctl_info *mci, |
89 | struct i82860_error_info *info, int handle_errors) | 94 | struct i82860_error_info *info, int handle_errors) |
90 | { | 95 | { |
91 | int row; | 96 | int row; |
@@ -107,8 +112,8 @@ static int i82860_process_error_info (struct mem_ctl_info *mci, | |||
107 | if (info->errsts & 0x0002) | 112 | if (info->errsts & 0x0002) |
108 | edac_mc_handle_ue(mci, info->eap, 0, row, "i82860 UE"); | 113 | edac_mc_handle_ue(mci, info->eap, 0, row, "i82860 UE"); |
109 | else | 114 | else |
110 | edac_mc_handle_ce(mci, info->eap, 0, info->derrsyn, row, | 115 | edac_mc_handle_ce(mci, info->eap, 0, info->derrsyn, row, 0, |
111 | 0, "i82860 UE"); | 116 | "i82860 UE"); |
112 | 117 | ||
113 | return 1; | 118 | return 1; |
114 | } | 119 | } |
@@ -117,7 +122,7 @@ static void i82860_check(struct mem_ctl_info *mci) | |||
117 | { | 122 | { |
118 | struct i82860_error_info info; | 123 | struct i82860_error_info info; |
119 | 124 | ||
120 | debugf1("MC%d: " __FILE__ ": %s()\n", mci->mc_idx, __func__); | 125 | debugf1("MC%d: %s()\n", mci->mc_idx, __func__); |
121 | i82860_get_error_info(mci, &info); | 126 | i82860_get_error_info(mci, &info); |
122 | i82860_process_error_info(mci, &info, 1); | 127 | i82860_process_error_info(mci, &info, 1); |
123 | } | 128 | } |
@@ -128,6 +133,7 @@ static int i82860_probe1(struct pci_dev *pdev, int dev_idx) | |||
128 | int index; | 133 | int index; |
129 | struct mem_ctl_info *mci = NULL; | 134 | struct mem_ctl_info *mci = NULL; |
130 | unsigned long last_cumul_size; | 135 | unsigned long last_cumul_size; |
136 | struct i82860_error_info discard; | ||
131 | 137 | ||
132 | u16 mchcfg_ddim; /* DRAM Data Integrity Mode 0=none,2=edac */ | 138 | u16 mchcfg_ddim; /* DRAM Data Integrity Mode 0=none,2=edac */ |
133 | 139 | ||
@@ -140,21 +146,20 @@ static int i82860_probe1(struct pci_dev *pdev, int dev_idx) | |||
140 | going to make 1 channel for group. | 146 | going to make 1 channel for group. |
141 | */ | 147 | */ |
142 | mci = edac_mc_alloc(0, 16, 1); | 148 | mci = edac_mc_alloc(0, 16, 1); |
149 | |||
143 | if (!mci) | 150 | if (!mci) |
144 | return -ENOMEM; | 151 | return -ENOMEM; |
145 | 152 | ||
146 | debugf3("MC: " __FILE__ ": %s(): init mci\n", __func__); | 153 | debugf3("%s(): init mci\n", __func__); |
147 | |||
148 | mci->pdev = pdev; | 154 | mci->pdev = pdev; |
149 | mci->mtype_cap = MEM_FLAG_DDR; | 155 | mci->mtype_cap = MEM_FLAG_DDR; |
150 | 156 | ||
151 | |||
152 | mci->edac_ctl_cap = EDAC_FLAG_NONE | EDAC_FLAG_SECDED; | 157 | mci->edac_ctl_cap = EDAC_FLAG_NONE | EDAC_FLAG_SECDED; |
153 | /* I"m not sure about this but I think that all RDRAM is SECDED */ | 158 | /* I"m not sure about this but I think that all RDRAM is SECDED */ |
154 | mci->edac_cap = EDAC_FLAG_SECDED; | 159 | mci->edac_cap = EDAC_FLAG_SECDED; |
155 | /* adjust FLAGS */ | 160 | /* adjust FLAGS */ |
156 | 161 | ||
157 | mci->mod_name = BS_MOD_STR; | 162 | mci->mod_name = EDAC_MOD_STR; |
158 | mci->mod_ver = "$Revision: 1.1.2.6 $"; | 163 | mci->mod_ver = "$Revision: 1.1.2.6 $"; |
159 | mci->ctl_name = i82860_devs[dev_idx].ctl_name; | 164 | mci->ctl_name = i82860_devs[dev_idx].ctl_name; |
160 | mci->edac_check = i82860_check; | 165 | mci->edac_check = i82860_check; |
@@ -175,12 +180,13 @@ static int i82860_probe1(struct pci_dev *pdev, int dev_idx) | |||
175 | struct csrow_info *csrow = &mci->csrows[index]; | 180 | struct csrow_info *csrow = &mci->csrows[index]; |
176 | 181 | ||
177 | pci_read_config_word(mci->pdev, I82860_GBA + index * 2, | 182 | pci_read_config_word(mci->pdev, I82860_GBA + index * 2, |
178 | &value); | 183 | &value); |
179 | 184 | ||
180 | cumul_size = (value & I82860_GBA_MASK) << | 185 | cumul_size = (value & I82860_GBA_MASK) << |
181 | (I82860_GBA_SHIFT - PAGE_SHIFT); | 186 | (I82860_GBA_SHIFT - PAGE_SHIFT); |
182 | debugf3("MC: " __FILE__ ": %s(): (%d) cumul_size 0x%x\n", | 187 | debugf3("%s(): (%d) cumul_size 0x%x\n", __func__, index, |
183 | __func__, index, cumul_size); | 188 | cumul_size); |
189 | |||
184 | if (cumul_size == last_cumul_size) | 190 | if (cumul_size == last_cumul_size) |
185 | continue; /* not populated */ | 191 | continue; /* not populated */ |
186 | 192 | ||
@@ -188,42 +194,43 @@ static int i82860_probe1(struct pci_dev *pdev, int dev_idx) | |||
188 | csrow->last_page = cumul_size - 1; | 194 | csrow->last_page = cumul_size - 1; |
189 | csrow->nr_pages = cumul_size - last_cumul_size; | 195 | csrow->nr_pages = cumul_size - last_cumul_size; |
190 | last_cumul_size = cumul_size; | 196 | last_cumul_size = cumul_size; |
191 | csrow->grain = 1 << 12; /* I82860_EAP has 4KiB reolution */ | 197 | csrow->grain = 1 << 12; /* I82860_EAP has 4KiB reolution */ |
192 | csrow->mtype = MEM_RMBS; | 198 | csrow->mtype = MEM_RMBS; |
193 | csrow->dtype = DEV_UNKNOWN; | 199 | csrow->dtype = DEV_UNKNOWN; |
194 | csrow->edac_mode = mchcfg_ddim ? EDAC_SECDED : EDAC_NONE; | 200 | csrow->edac_mode = mchcfg_ddim ? EDAC_SECDED : EDAC_NONE; |
195 | } | 201 | } |
196 | 202 | ||
197 | /* clear counters */ | 203 | i82860_get_error_info(mci, &discard); /* clear counters */ |
198 | pci_write_bits16(mci->pdev, I82860_ERRSTS, 0x0003, 0x0003); | ||
199 | 204 | ||
200 | if (edac_mc_add_mc(mci)) { | 205 | if (edac_mc_add_mc(mci)) { |
201 | debugf3("MC: " __FILE__ | 206 | debugf3("%s(): failed edac_mc_add_mc()\n", __func__); |
202 | ": %s(): failed edac_mc_add_mc()\n", | ||
203 | __func__); | ||
204 | edac_mc_free(mci); | 207 | edac_mc_free(mci); |
205 | } else { | 208 | } else { |
206 | /* get this far and it's successful */ | 209 | /* get this far and it's successful */ |
207 | debugf3("MC: " __FILE__ ": %s(): success\n", __func__); | 210 | debugf3("%s(): success\n", __func__); |
208 | rc = 0; | 211 | rc = 0; |
209 | } | 212 | } |
213 | |||
210 | return rc; | 214 | return rc; |
211 | } | 215 | } |
212 | 216 | ||
213 | /* returns count (>= 0), or negative on error */ | 217 | /* returns count (>= 0), or negative on error */ |
214 | static int __devinit i82860_init_one(struct pci_dev *pdev, | 218 | static int __devinit i82860_init_one(struct pci_dev *pdev, |
215 | const struct pci_device_id *ent) | 219 | const struct pci_device_id *ent) |
216 | { | 220 | { |
217 | int rc; | 221 | int rc; |
218 | 222 | ||
219 | debugf0("MC: " __FILE__ ": %s()\n", __func__); | 223 | debugf0("%s()\n", __func__); |
224 | i82860_printk(KERN_INFO, "i82860 init one\n"); | ||
220 | 225 | ||
221 | printk(KERN_INFO "i82860 init one\n"); | 226 | if (pci_enable_device(pdev) < 0) |
222 | if(pci_enable_device(pdev) < 0) | ||
223 | return -EIO; | 227 | return -EIO; |
228 | |||
224 | rc = i82860_probe1(pdev, ent->driver_data); | 229 | rc = i82860_probe1(pdev, ent->driver_data); |
225 | if(rc == 0) | 230 | |
231 | if (rc == 0) | ||
226 | mci_pdev = pci_dev_get(pdev); | 232 | mci_pdev = pci_dev_get(pdev); |
233 | |||
227 | return rc; | 234 | return rc; |
228 | } | 235 | } |
229 | 236 | ||
@@ -231,23 +238,28 @@ static void __devexit i82860_remove_one(struct pci_dev *pdev) | |||
231 | { | 238 | { |
232 | struct mem_ctl_info *mci; | 239 | struct mem_ctl_info *mci; |
233 | 240 | ||
234 | debugf0(__FILE__ ": %s()\n", __func__); | 241 | debugf0("%s()\n", __func__); |
235 | 242 | ||
236 | mci = edac_mc_find_mci_by_pdev(pdev); | 243 | if ((mci = edac_mc_del_mc(pdev)) == NULL) |
237 | if ((mci != NULL) && (edac_mc_del_mc(mci) == 0)) | 244 | return; |
238 | edac_mc_free(mci); | 245 | |
246 | edac_mc_free(mci); | ||
239 | } | 247 | } |
240 | 248 | ||
241 | static const struct pci_device_id i82860_pci_tbl[] __devinitdata = { | 249 | static const struct pci_device_id i82860_pci_tbl[] __devinitdata = { |
242 | {PCI_VEND_DEV(INTEL, 82860_0), PCI_ANY_ID, PCI_ANY_ID, 0, 0, | 250 | { |
243 | I82860}, | 251 | PCI_VEND_DEV(INTEL, 82860_0), PCI_ANY_ID, PCI_ANY_ID, 0, 0, |
244 | {0,} /* 0 terminated list. */ | 252 | I82860 |
253 | }, | ||
254 | { | ||
255 | 0, | ||
256 | } /* 0 terminated list. */ | ||
245 | }; | 257 | }; |
246 | 258 | ||
247 | MODULE_DEVICE_TABLE(pci, i82860_pci_tbl); | 259 | MODULE_DEVICE_TABLE(pci, i82860_pci_tbl); |
248 | 260 | ||
249 | static struct pci_driver i82860_driver = { | 261 | static struct pci_driver i82860_driver = { |
250 | .name = BS_MOD_STR, | 262 | .name = EDAC_MOD_STR, |
251 | .probe = i82860_init_one, | 263 | .probe = i82860_init_one, |
252 | .remove = __devexit_p(i82860_remove_one), | 264 | .remove = __devexit_p(i82860_remove_one), |
253 | .id_table = i82860_pci_tbl, | 265 | .id_table = i82860_pci_tbl, |
@@ -257,43 +269,56 @@ static int __init i82860_init(void) | |||
257 | { | 269 | { |
258 | int pci_rc; | 270 | int pci_rc; |
259 | 271 | ||
260 | debugf3("MC: " __FILE__ ": %s()\n", __func__); | 272 | debugf3("%s()\n", __func__); |
273 | |||
261 | if ((pci_rc = pci_register_driver(&i82860_driver)) < 0) | 274 | if ((pci_rc = pci_register_driver(&i82860_driver)) < 0) |
262 | return pci_rc; | 275 | goto fail0; |
263 | 276 | ||
264 | if (!mci_pdev) { | 277 | if (!mci_pdev) { |
265 | i82860_registered = 0; | ||
266 | mci_pdev = pci_get_device(PCI_VENDOR_ID_INTEL, | 278 | mci_pdev = pci_get_device(PCI_VENDOR_ID_INTEL, |
267 | PCI_DEVICE_ID_INTEL_82860_0, NULL); | 279 | PCI_DEVICE_ID_INTEL_82860_0, NULL); |
280 | |||
268 | if (mci_pdev == NULL) { | 281 | if (mci_pdev == NULL) { |
269 | debugf0("860 pci_get_device fail\n"); | 282 | debugf0("860 pci_get_device fail\n"); |
270 | return -ENODEV; | 283 | pci_rc = -ENODEV; |
284 | goto fail1; | ||
271 | } | 285 | } |
286 | |||
272 | pci_rc = i82860_init_one(mci_pdev, i82860_pci_tbl); | 287 | pci_rc = i82860_init_one(mci_pdev, i82860_pci_tbl); |
288 | |||
273 | if (pci_rc < 0) { | 289 | if (pci_rc < 0) { |
274 | debugf0("860 init fail\n"); | 290 | debugf0("860 init fail\n"); |
275 | pci_dev_put(mci_pdev); | 291 | pci_rc = -ENODEV; |
276 | return -ENODEV; | 292 | goto fail1; |
277 | } | 293 | } |
278 | } | 294 | } |
295 | |||
279 | return 0; | 296 | return 0; |
297 | |||
298 | fail1: | ||
299 | pci_unregister_driver(&i82860_driver); | ||
300 | |||
301 | fail0: | ||
302 | if (mci_pdev != NULL) | ||
303 | pci_dev_put(mci_pdev); | ||
304 | |||
305 | return pci_rc; | ||
280 | } | 306 | } |
281 | 307 | ||
282 | static void __exit i82860_exit(void) | 308 | static void __exit i82860_exit(void) |
283 | { | 309 | { |
284 | debugf3("MC: " __FILE__ ": %s()\n", __func__); | 310 | debugf3("%s()\n", __func__); |
285 | 311 | ||
286 | pci_unregister_driver(&i82860_driver); | 312 | pci_unregister_driver(&i82860_driver); |
287 | if (!i82860_registered) { | 313 | |
288 | i82860_remove_one(mci_pdev); | 314 | if (mci_pdev != NULL) |
289 | pci_dev_put(mci_pdev); | 315 | pci_dev_put(mci_pdev); |
290 | } | ||
291 | } | 316 | } |
292 | 317 | ||
293 | module_init(i82860_init); | 318 | module_init(i82860_init); |
294 | module_exit(i82860_exit); | 319 | module_exit(i82860_exit); |
295 | 320 | ||
296 | MODULE_LICENSE("GPL"); | 321 | MODULE_LICENSE("GPL"); |
297 | MODULE_AUTHOR | 322 | MODULE_AUTHOR("Red Hat Inc. (http://www.redhat.com) " |
298 | ("Red Hat Inc. (http://www.redhat.com.com) Ben Woodard <woodard@redhat.com>"); | 323 | "Ben Woodard <woodard@redhat.com>"); |
299 | MODULE_DESCRIPTION("ECC support for Intel 82860 memory hub controllers"); | 324 | MODULE_DESCRIPTION("ECC support for Intel 82860 memory hub controllers"); |
diff --git a/drivers/edac/i82875p_edac.c b/drivers/edac/i82875p_edac.c index 1991f94af753..0aec92698f17 100644 --- a/drivers/edac/i82875p_edac.c +++ b/drivers/edac/i82875p_edac.c | |||
@@ -13,18 +13,19 @@ | |||
13 | * Note: E7210 appears same as D82875P - zhenyu.z.wang at intel.com | 13 | * Note: E7210 appears same as D82875P - zhenyu.z.wang at intel.com |
14 | */ | 14 | */ |
15 | 15 | ||
16 | |||
17 | #include <linux/config.h> | 16 | #include <linux/config.h> |
18 | #include <linux/module.h> | 17 | #include <linux/module.h> |
19 | #include <linux/init.h> | 18 | #include <linux/init.h> |
20 | |||
21 | #include <linux/pci.h> | 19 | #include <linux/pci.h> |
22 | #include <linux/pci_ids.h> | 20 | #include <linux/pci_ids.h> |
23 | |||
24 | #include <linux/slab.h> | 21 | #include <linux/slab.h> |
25 | |||
26 | #include "edac_mc.h" | 22 | #include "edac_mc.h" |
27 | 23 | ||
24 | #define i82875p_printk(level, fmt, arg...) \ | ||
25 | edac_printk(level, "i82875p", fmt, ##arg) | ||
26 | |||
27 | #define i82875p_mc_printk(mci, level, fmt, arg...) \ | ||
28 | edac_mc_chipset_printk(mci, level, "i82875p", fmt, ##arg) | ||
28 | 29 | ||
29 | #ifndef PCI_DEVICE_ID_INTEL_82875_0 | 30 | #ifndef PCI_DEVICE_ID_INTEL_82875_0 |
30 | #define PCI_DEVICE_ID_INTEL_82875_0 0x2578 | 31 | #define PCI_DEVICE_ID_INTEL_82875_0 0x2578 |
@@ -34,11 +35,9 @@ | |||
34 | #define PCI_DEVICE_ID_INTEL_82875_6 0x257e | 35 | #define PCI_DEVICE_ID_INTEL_82875_6 0x257e |
35 | #endif /* PCI_DEVICE_ID_INTEL_82875_6 */ | 36 | #endif /* PCI_DEVICE_ID_INTEL_82875_6 */ |
36 | 37 | ||
37 | |||
38 | /* four csrows in dual channel, eight in single channel */ | 38 | /* four csrows in dual channel, eight in single channel */ |
39 | #define I82875P_NR_CSROWS(nr_chans) (8/(nr_chans)) | 39 | #define I82875P_NR_CSROWS(nr_chans) (8/(nr_chans)) |
40 | 40 | ||
41 | |||
42 | /* Intel 82875p register addresses - device 0 function 0 - DRAM Controller */ | 41 | /* Intel 82875p register addresses - device 0 function 0 - DRAM Controller */ |
43 | #define I82875P_EAP 0x58 /* Error Address Pointer (32b) | 42 | #define I82875P_EAP 0x58 /* Error Address Pointer (32b) |
44 | * | 43 | * |
@@ -87,7 +86,6 @@ | |||
87 | * 0 reserved | 86 | * 0 reserved |
88 | */ | 87 | */ |
89 | 88 | ||
90 | |||
91 | /* Intel 82875p register addresses - device 6 function 0 - DRAM Controller */ | 89 | /* Intel 82875p register addresses - device 6 function 0 - DRAM Controller */ |
92 | #define I82875P_PCICMD6 0x04 /* PCI Command Register (16b) | 90 | #define I82875P_PCICMD6 0x04 /* PCI Command Register (16b) |
93 | * | 91 | * |
@@ -151,23 +149,19 @@ | |||
151 | * 1:0 DRAM type 01=DDR | 149 | * 1:0 DRAM type 01=DDR |
152 | */ | 150 | */ |
153 | 151 | ||
154 | |||
155 | enum i82875p_chips { | 152 | enum i82875p_chips { |
156 | I82875P = 0, | 153 | I82875P = 0, |
157 | }; | 154 | }; |
158 | 155 | ||
159 | |||
160 | struct i82875p_pvt { | 156 | struct i82875p_pvt { |
161 | struct pci_dev *ovrfl_pdev; | 157 | struct pci_dev *ovrfl_pdev; |
162 | void __iomem *ovrfl_window; | 158 | void __iomem *ovrfl_window; |
163 | }; | 159 | }; |
164 | 160 | ||
165 | |||
166 | struct i82875p_dev_info { | 161 | struct i82875p_dev_info { |
167 | const char *ctl_name; | 162 | const char *ctl_name; |
168 | }; | 163 | }; |
169 | 164 | ||
170 | |||
171 | struct i82875p_error_info { | 165 | struct i82875p_error_info { |
172 | u16 errsts; | 166 | u16 errsts; |
173 | u32 eap; | 167 | u32 eap; |
@@ -176,17 +170,19 @@ struct i82875p_error_info { | |||
176 | u16 errsts2; | 170 | u16 errsts2; |
177 | }; | 171 | }; |
178 | 172 | ||
179 | |||
180 | static const struct i82875p_dev_info i82875p_devs[] = { | 173 | static const struct i82875p_dev_info i82875p_devs[] = { |
181 | [I82875P] = { | 174 | [I82875P] = { |
182 | .ctl_name = "i82875p"}, | 175 | .ctl_name = "i82875p" |
176 | }, | ||
183 | }; | 177 | }; |
184 | 178 | ||
185 | static struct pci_dev *mci_pdev = NULL; /* init dev: in case that AGP code | 179 | static struct pci_dev *mci_pdev = NULL; /* init dev: in case that AGP code has |
186 | has already registered driver */ | 180 | * already registered driver |
181 | */ | ||
182 | |||
187 | static int i82875p_registered = 1; | 183 | static int i82875p_registered = 1; |
188 | 184 | ||
189 | static void i82875p_get_error_info (struct mem_ctl_info *mci, | 185 | static void i82875p_get_error_info(struct mem_ctl_info *mci, |
190 | struct i82875p_error_info *info) | 186 | struct i82875p_error_info *info) |
191 | { | 187 | { |
192 | /* | 188 | /* |
@@ -210,15 +206,16 @@ static void i82875p_get_error_info (struct mem_ctl_info *mci, | |||
210 | */ | 206 | */ |
211 | if (!(info->errsts2 & 0x0081)) | 207 | if (!(info->errsts2 & 0x0081)) |
212 | return; | 208 | return; |
209 | |||
213 | if ((info->errsts ^ info->errsts2) & 0x0081) { | 210 | if ((info->errsts ^ info->errsts2) & 0x0081) { |
214 | pci_read_config_dword(mci->pdev, I82875P_EAP, &info->eap); | 211 | pci_read_config_dword(mci->pdev, I82875P_EAP, &info->eap); |
215 | pci_read_config_byte(mci->pdev, I82875P_DES, &info->des); | 212 | pci_read_config_byte(mci->pdev, I82875P_DES, &info->des); |
216 | pci_read_config_byte(mci->pdev, I82875P_DERRSYN, | 213 | pci_read_config_byte(mci->pdev, I82875P_DERRSYN, |
217 | &info->derrsyn); | 214 | &info->derrsyn); |
218 | } | 215 | } |
219 | } | 216 | } |
220 | 217 | ||
221 | static int i82875p_process_error_info (struct mem_ctl_info *mci, | 218 | static int i82875p_process_error_info(struct mem_ctl_info *mci, |
222 | struct i82875p_error_info *info, int handle_errors) | 219 | struct i82875p_error_info *info, int handle_errors) |
223 | { | 220 | { |
224 | int row, multi_chan; | 221 | int row, multi_chan; |
@@ -243,23 +240,21 @@ static int i82875p_process_error_info (struct mem_ctl_info *mci, | |||
243 | edac_mc_handle_ue(mci, info->eap, 0, row, "i82875p UE"); | 240 | edac_mc_handle_ue(mci, info->eap, 0, row, "i82875p UE"); |
244 | else | 241 | else |
245 | edac_mc_handle_ce(mci, info->eap, 0, info->derrsyn, row, | 242 | edac_mc_handle_ce(mci, info->eap, 0, info->derrsyn, row, |
246 | multi_chan ? (info->des & 0x1) : 0, | 243 | multi_chan ? (info->des & 0x1) : 0, |
247 | "i82875p CE"); | 244 | "i82875p CE"); |
248 | 245 | ||
249 | return 1; | 246 | return 1; |
250 | } | 247 | } |
251 | 248 | ||
252 | |||
253 | static void i82875p_check(struct mem_ctl_info *mci) | 249 | static void i82875p_check(struct mem_ctl_info *mci) |
254 | { | 250 | { |
255 | struct i82875p_error_info info; | 251 | struct i82875p_error_info info; |
256 | 252 | ||
257 | debugf1("MC%d: " __FILE__ ": %s()\n", mci->mc_idx, __func__); | 253 | debugf1("MC%d: %s()\n", mci->mc_idx, __func__); |
258 | i82875p_get_error_info(mci, &info); | 254 | i82875p_get_error_info(mci, &info); |
259 | i82875p_process_error_info(mci, &info, 1); | 255 | i82875p_process_error_info(mci, &info, 1); |
260 | } | 256 | } |
261 | 257 | ||
262 | |||
263 | #ifdef CONFIG_PROC_FS | 258 | #ifdef CONFIG_PROC_FS |
264 | extern int pci_proc_attach_device(struct pci_dev *); | 259 | extern int pci_proc_attach_device(struct pci_dev *); |
265 | #endif | 260 | #endif |
@@ -273,15 +268,14 @@ static int i82875p_probe1(struct pci_dev *pdev, int dev_idx) | |||
273 | unsigned long last_cumul_size; | 268 | unsigned long last_cumul_size; |
274 | struct pci_dev *ovrfl_pdev; | 269 | struct pci_dev *ovrfl_pdev; |
275 | void __iomem *ovrfl_window = NULL; | 270 | void __iomem *ovrfl_window = NULL; |
276 | |||
277 | u32 drc; | 271 | u32 drc; |
278 | u32 drc_chan; /* Number of channels 0=1chan,1=2chan */ | 272 | u32 drc_chan; /* Number of channels 0=1chan,1=2chan */ |
279 | u32 nr_chans; | 273 | u32 nr_chans; |
280 | u32 drc_ddim; /* DRAM Data Integrity Mode 0=none,2=edac */ | 274 | u32 drc_ddim; /* DRAM Data Integrity Mode 0=none,2=edac */ |
275 | struct i82875p_error_info discard; | ||
281 | 276 | ||
282 | debugf0("MC: " __FILE__ ": %s()\n", __func__); | 277 | debugf0("%s()\n", __func__); |
283 | 278 | ovrfl_pdev = pci_get_device(PCI_VEND_DEV(INTEL, 82875_6), NULL); | |
284 | ovrfl_pdev = pci_find_device(PCI_VEND_DEV(INTEL, 82875_6), NULL); | ||
285 | 279 | ||
286 | if (!ovrfl_pdev) { | 280 | if (!ovrfl_pdev) { |
287 | /* | 281 | /* |
@@ -292,71 +286,69 @@ static int i82875p_probe1(struct pci_dev *pdev, int dev_idx) | |||
292 | */ | 286 | */ |
293 | pci_write_bits8(pdev, 0xf4, 0x2, 0x2); | 287 | pci_write_bits8(pdev, 0xf4, 0x2, 0x2); |
294 | ovrfl_pdev = | 288 | ovrfl_pdev = |
295 | pci_scan_single_device(pdev->bus, PCI_DEVFN(6, 0)); | 289 | pci_scan_single_device(pdev->bus, PCI_DEVFN(6, 0)); |
290 | |||
296 | if (!ovrfl_pdev) | 291 | if (!ovrfl_pdev) |
297 | goto fail; | 292 | return -ENODEV; |
298 | } | 293 | } |
294 | |||
299 | #ifdef CONFIG_PROC_FS | 295 | #ifdef CONFIG_PROC_FS |
300 | if (!ovrfl_pdev->procent && pci_proc_attach_device(ovrfl_pdev)) { | 296 | if (!ovrfl_pdev->procent && pci_proc_attach_device(ovrfl_pdev)) { |
301 | printk(KERN_ERR "MC: " __FILE__ | 297 | i82875p_printk(KERN_ERR, |
302 | ": %s(): Failed to attach overflow device\n", | 298 | "%s(): Failed to attach overflow device\n", __func__); |
303 | __func__); | 299 | return -ENODEV; |
304 | goto fail; | ||
305 | } | 300 | } |
306 | #endif /* CONFIG_PROC_FS */ | 301 | #endif |
302 | /* CONFIG_PROC_FS */ | ||
307 | if (pci_enable_device(ovrfl_pdev)) { | 303 | if (pci_enable_device(ovrfl_pdev)) { |
308 | printk(KERN_ERR "MC: " __FILE__ | 304 | i82875p_printk(KERN_ERR, |
309 | ": %s(): Failed to enable overflow device\n", | 305 | "%s(): Failed to enable overflow device\n", __func__); |
310 | __func__); | 306 | return -ENODEV; |
311 | goto fail; | ||
312 | } | 307 | } |
313 | 308 | ||
314 | if (pci_request_regions(ovrfl_pdev, pci_name(ovrfl_pdev))) { | 309 | if (pci_request_regions(ovrfl_pdev, pci_name(ovrfl_pdev))) { |
315 | #ifdef CORRECT_BIOS | 310 | #ifdef CORRECT_BIOS |
316 | goto fail; | 311 | goto fail0; |
317 | #endif | 312 | #endif |
318 | } | 313 | } |
314 | |||
319 | /* cache is irrelevant for PCI bus reads/writes */ | 315 | /* cache is irrelevant for PCI bus reads/writes */ |
320 | ovrfl_window = ioremap_nocache(pci_resource_start(ovrfl_pdev, 0), | 316 | ovrfl_window = ioremap_nocache(pci_resource_start(ovrfl_pdev, 0), |
321 | pci_resource_len(ovrfl_pdev, 0)); | 317 | pci_resource_len(ovrfl_pdev, 0)); |
322 | 318 | ||
323 | if (!ovrfl_window) { | 319 | if (!ovrfl_window) { |
324 | printk(KERN_ERR "MC: " __FILE__ | 320 | i82875p_printk(KERN_ERR, "%s(): Failed to ioremap bar6\n", |
325 | ": %s(): Failed to ioremap bar6\n", __func__); | 321 | __func__); |
326 | goto fail; | 322 | goto fail1; |
327 | } | 323 | } |
328 | 324 | ||
329 | /* need to find out the number of channels */ | 325 | /* need to find out the number of channels */ |
330 | drc = readl(ovrfl_window + I82875P_DRC); | 326 | drc = readl(ovrfl_window + I82875P_DRC); |
331 | drc_chan = ((drc >> 21) & 0x1); | 327 | drc_chan = ((drc >> 21) & 0x1); |
332 | nr_chans = drc_chan + 1; | 328 | nr_chans = drc_chan + 1; |
333 | drc_ddim = (drc >> 18) & 0x1; | ||
334 | 329 | ||
330 | drc_ddim = (drc >> 18) & 0x1; | ||
335 | mci = edac_mc_alloc(sizeof(*pvt), I82875P_NR_CSROWS(nr_chans), | 331 | mci = edac_mc_alloc(sizeof(*pvt), I82875P_NR_CSROWS(nr_chans), |
336 | nr_chans); | 332 | nr_chans); |
337 | 333 | ||
338 | if (!mci) { | 334 | if (!mci) { |
339 | rc = -ENOMEM; | 335 | rc = -ENOMEM; |
340 | goto fail; | 336 | goto fail2; |
341 | } | 337 | } |
342 | 338 | ||
343 | debugf3("MC: " __FILE__ ": %s(): init mci\n", __func__); | 339 | debugf3("%s(): init mci\n", __func__); |
344 | |||
345 | mci->pdev = pdev; | 340 | mci->pdev = pdev; |
346 | mci->mtype_cap = MEM_FLAG_DDR; | 341 | mci->mtype_cap = MEM_FLAG_DDR; |
347 | |||
348 | mci->edac_ctl_cap = EDAC_FLAG_NONE | EDAC_FLAG_SECDED; | 342 | mci->edac_ctl_cap = EDAC_FLAG_NONE | EDAC_FLAG_SECDED; |
349 | mci->edac_cap = EDAC_FLAG_UNKNOWN; | 343 | mci->edac_cap = EDAC_FLAG_UNKNOWN; |
350 | /* adjust FLAGS */ | 344 | /* adjust FLAGS */ |
351 | 345 | ||
352 | mci->mod_name = BS_MOD_STR; | 346 | mci->mod_name = EDAC_MOD_STR; |
353 | mci->mod_ver = "$Revision: 1.5.2.11 $"; | 347 | mci->mod_ver = "$Revision: 1.5.2.11 $"; |
354 | mci->ctl_name = i82875p_devs[dev_idx].ctl_name; | 348 | mci->ctl_name = i82875p_devs[dev_idx].ctl_name; |
355 | mci->edac_check = i82875p_check; | 349 | mci->edac_check = i82875p_check; |
356 | mci->ctl_page_to_phys = NULL; | 350 | mci->ctl_page_to_phys = NULL; |
357 | 351 | debugf3("%s(): init pvt\n", __func__); | |
358 | debugf3("MC: " __FILE__ ": %s(): init pvt\n", __func__); | ||
359 | |||
360 | pvt = (struct i82875p_pvt *) mci->pvt_info; | 352 | pvt = (struct i82875p_pvt *) mci->pvt_info; |
361 | pvt->ovrfl_pdev = ovrfl_pdev; | 353 | pvt->ovrfl_pdev = ovrfl_pdev; |
362 | pvt->ovrfl_window = ovrfl_window; | 354 | pvt->ovrfl_window = ovrfl_window; |
@@ -374,8 +366,9 @@ static int i82875p_probe1(struct pci_dev *pdev, int dev_idx) | |||
374 | 366 | ||
375 | value = readb(ovrfl_window + I82875P_DRB + index); | 367 | value = readb(ovrfl_window + I82875P_DRB + index); |
376 | cumul_size = value << (I82875P_DRB_SHIFT - PAGE_SHIFT); | 368 | cumul_size = value << (I82875P_DRB_SHIFT - PAGE_SHIFT); |
377 | debugf3("MC: " __FILE__ ": %s(): (%d) cumul_size 0x%x\n", | 369 | debugf3("%s(): (%d) cumul_size 0x%x\n", __func__, index, |
378 | __func__, index, cumul_size); | 370 | cumul_size); |
371 | |||
379 | if (cumul_size == last_cumul_size) | 372 | if (cumul_size == last_cumul_size) |
380 | continue; /* not populated */ | 373 | continue; /* not populated */ |
381 | 374 | ||
@@ -383,71 +376,72 @@ static int i82875p_probe1(struct pci_dev *pdev, int dev_idx) | |||
383 | csrow->last_page = cumul_size - 1; | 376 | csrow->last_page = cumul_size - 1; |
384 | csrow->nr_pages = cumul_size - last_cumul_size; | 377 | csrow->nr_pages = cumul_size - last_cumul_size; |
385 | last_cumul_size = cumul_size; | 378 | last_cumul_size = cumul_size; |
386 | csrow->grain = 1 << 12; /* I82875P_EAP has 4KiB reolution */ | 379 | csrow->grain = 1 << 12; /* I82875P_EAP has 4KiB reolution */ |
387 | csrow->mtype = MEM_DDR; | 380 | csrow->mtype = MEM_DDR; |
388 | csrow->dtype = DEV_UNKNOWN; | 381 | csrow->dtype = DEV_UNKNOWN; |
389 | csrow->edac_mode = drc_ddim ? EDAC_SECDED : EDAC_NONE; | 382 | csrow->edac_mode = drc_ddim ? EDAC_SECDED : EDAC_NONE; |
390 | } | 383 | } |
391 | 384 | ||
392 | /* clear counters */ | 385 | i82875p_get_error_info(mci, &discard); /* clear counters */ |
393 | pci_write_bits16(mci->pdev, I82875P_ERRSTS, 0x0081, 0x0081); | ||
394 | 386 | ||
395 | if (edac_mc_add_mc(mci)) { | 387 | if (edac_mc_add_mc(mci)) { |
396 | debugf3("MC: " __FILE__ | 388 | debugf3("%s(): failed edac_mc_add_mc()\n", __func__); |
397 | ": %s(): failed edac_mc_add_mc()\n", __func__); | 389 | goto fail3; |
398 | goto fail; | ||
399 | } | 390 | } |
400 | 391 | ||
401 | /* get this far and it's successful */ | 392 | /* get this far and it's successful */ |
402 | debugf3("MC: " __FILE__ ": %s(): success\n", __func__); | 393 | debugf3("%s(): success\n", __func__); |
403 | return 0; | 394 | return 0; |
404 | 395 | ||
405 | fail: | 396 | fail3: |
406 | if (mci) | 397 | edac_mc_free(mci); |
407 | edac_mc_free(mci); | ||
408 | 398 | ||
409 | if (ovrfl_window) | 399 | fail2: |
410 | iounmap(ovrfl_window); | 400 | iounmap(ovrfl_window); |
411 | 401 | ||
412 | if (ovrfl_pdev) { | 402 | fail1: |
413 | pci_release_regions(ovrfl_pdev); | 403 | pci_release_regions(ovrfl_pdev); |
414 | pci_disable_device(ovrfl_pdev); | ||
415 | } | ||
416 | 404 | ||
405 | #ifdef CORRECT_BIOS | ||
406 | fail0: | ||
407 | #endif | ||
408 | pci_disable_device(ovrfl_pdev); | ||
417 | /* NOTE: the ovrfl proc entry and pci_dev are intentionally left */ | 409 | /* NOTE: the ovrfl proc entry and pci_dev are intentionally left */ |
418 | return rc; | 410 | return rc; |
419 | } | 411 | } |
420 | 412 | ||
421 | |||
422 | /* returns count (>= 0), or negative on error */ | 413 | /* returns count (>= 0), or negative on error */ |
423 | static int __devinit i82875p_init_one(struct pci_dev *pdev, | 414 | static int __devinit i82875p_init_one(struct pci_dev *pdev, |
424 | const struct pci_device_id *ent) | 415 | const struct pci_device_id *ent) |
425 | { | 416 | { |
426 | int rc; | 417 | int rc; |
427 | 418 | ||
428 | debugf0("MC: " __FILE__ ": %s()\n", __func__); | 419 | debugf0("%s()\n", __func__); |
420 | i82875p_printk(KERN_INFO, "i82875p init one\n"); | ||
429 | 421 | ||
430 | printk(KERN_INFO "i82875p init one\n"); | 422 | if (pci_enable_device(pdev) < 0) |
431 | if(pci_enable_device(pdev) < 0) | ||
432 | return -EIO; | 423 | return -EIO; |
424 | |||
433 | rc = i82875p_probe1(pdev, ent->driver_data); | 425 | rc = i82875p_probe1(pdev, ent->driver_data); |
426 | |||
434 | if (mci_pdev == NULL) | 427 | if (mci_pdev == NULL) |
435 | mci_pdev = pci_dev_get(pdev); | 428 | mci_pdev = pci_dev_get(pdev); |
429 | |||
436 | return rc; | 430 | return rc; |
437 | } | 431 | } |
438 | 432 | ||
439 | |||
440 | static void __devexit i82875p_remove_one(struct pci_dev *pdev) | 433 | static void __devexit i82875p_remove_one(struct pci_dev *pdev) |
441 | { | 434 | { |
442 | struct mem_ctl_info *mci; | 435 | struct mem_ctl_info *mci; |
443 | struct i82875p_pvt *pvt = NULL; | 436 | struct i82875p_pvt *pvt = NULL; |
444 | 437 | ||
445 | debugf0(__FILE__ ": %s()\n", __func__); | 438 | debugf0("%s()\n", __func__); |
446 | 439 | ||
447 | if ((mci = edac_mc_find_mci_by_pdev(pdev)) == NULL) | 440 | if ((mci = edac_mc_del_mc(pdev)) == NULL) |
448 | return; | 441 | return; |
449 | 442 | ||
450 | pvt = (struct i82875p_pvt *) mci->pvt_info; | 443 | pvt = (struct i82875p_pvt *) mci->pvt_info; |
444 | |||
451 | if (pvt->ovrfl_window) | 445 | if (pvt->ovrfl_window) |
452 | iounmap(pvt->ovrfl_window); | 446 | iounmap(pvt->ovrfl_window); |
453 | 447 | ||
@@ -459,74 +453,84 @@ static void __devexit i82875p_remove_one(struct pci_dev *pdev) | |||
459 | pci_dev_put(pvt->ovrfl_pdev); | 453 | pci_dev_put(pvt->ovrfl_pdev); |
460 | } | 454 | } |
461 | 455 | ||
462 | if (edac_mc_del_mc(mci)) | ||
463 | return; | ||
464 | |||
465 | edac_mc_free(mci); | 456 | edac_mc_free(mci); |
466 | } | 457 | } |
467 | 458 | ||
468 | |||
469 | static const struct pci_device_id i82875p_pci_tbl[] __devinitdata = { | 459 | static const struct pci_device_id i82875p_pci_tbl[] __devinitdata = { |
470 | {PCI_VEND_DEV(INTEL, 82875_0), PCI_ANY_ID, PCI_ANY_ID, 0, 0, | 460 | { |
471 | I82875P}, | 461 | PCI_VEND_DEV(INTEL, 82875_0), PCI_ANY_ID, PCI_ANY_ID, 0, 0, |
472 | {0,} /* 0 terminated list. */ | 462 | I82875P |
463 | }, | ||
464 | { | ||
465 | 0, | ||
466 | } /* 0 terminated list. */ | ||
473 | }; | 467 | }; |
474 | 468 | ||
475 | MODULE_DEVICE_TABLE(pci, i82875p_pci_tbl); | 469 | MODULE_DEVICE_TABLE(pci, i82875p_pci_tbl); |
476 | 470 | ||
477 | |||
478 | static struct pci_driver i82875p_driver = { | 471 | static struct pci_driver i82875p_driver = { |
479 | .name = BS_MOD_STR, | 472 | .name = EDAC_MOD_STR, |
480 | .probe = i82875p_init_one, | 473 | .probe = i82875p_init_one, |
481 | .remove = __devexit_p(i82875p_remove_one), | 474 | .remove = __devexit_p(i82875p_remove_one), |
482 | .id_table = i82875p_pci_tbl, | 475 | .id_table = i82875p_pci_tbl, |
483 | }; | 476 | }; |
484 | 477 | ||
485 | |||
486 | static int __init i82875p_init(void) | 478 | static int __init i82875p_init(void) |
487 | { | 479 | { |
488 | int pci_rc; | 480 | int pci_rc; |
489 | 481 | ||
490 | debugf3("MC: " __FILE__ ": %s()\n", __func__); | 482 | debugf3("%s()\n", __func__); |
491 | pci_rc = pci_register_driver(&i82875p_driver); | 483 | pci_rc = pci_register_driver(&i82875p_driver); |
484 | |||
492 | if (pci_rc < 0) | 485 | if (pci_rc < 0) |
493 | return pci_rc; | 486 | goto fail0; |
487 | |||
494 | if (mci_pdev == NULL) { | 488 | if (mci_pdev == NULL) { |
495 | i82875p_registered = 0; | 489 | mci_pdev = pci_get_device(PCI_VENDOR_ID_INTEL, |
496 | mci_pdev = | 490 | PCI_DEVICE_ID_INTEL_82875_0, NULL); |
497 | pci_get_device(PCI_VENDOR_ID_INTEL, | 491 | |
498 | PCI_DEVICE_ID_INTEL_82875_0, NULL); | ||
499 | if (!mci_pdev) { | 492 | if (!mci_pdev) { |
500 | debugf0("875p pci_get_device fail\n"); | 493 | debugf0("875p pci_get_device fail\n"); |
501 | return -ENODEV; | 494 | pci_rc = -ENODEV; |
495 | goto fail1; | ||
502 | } | 496 | } |
497 | |||
503 | pci_rc = i82875p_init_one(mci_pdev, i82875p_pci_tbl); | 498 | pci_rc = i82875p_init_one(mci_pdev, i82875p_pci_tbl); |
499 | |||
504 | if (pci_rc < 0) { | 500 | if (pci_rc < 0) { |
505 | debugf0("875p init fail\n"); | 501 | debugf0("875p init fail\n"); |
506 | pci_dev_put(mci_pdev); | 502 | pci_rc = -ENODEV; |
507 | return -ENODEV; | 503 | goto fail1; |
508 | } | 504 | } |
509 | } | 505 | } |
506 | |||
510 | return 0; | 507 | return 0; |
511 | } | ||
512 | 508 | ||
509 | fail1: | ||
510 | pci_unregister_driver(&i82875p_driver); | ||
511 | |||
512 | fail0: | ||
513 | if (mci_pdev != NULL) | ||
514 | pci_dev_put(mci_pdev); | ||
515 | |||
516 | return pci_rc; | ||
517 | } | ||
513 | 518 | ||
514 | static void __exit i82875p_exit(void) | 519 | static void __exit i82875p_exit(void) |
515 | { | 520 | { |
516 | debugf3("MC: " __FILE__ ": %s()\n", __func__); | 521 | debugf3("%s()\n", __func__); |
517 | 522 | ||
518 | pci_unregister_driver(&i82875p_driver); | 523 | pci_unregister_driver(&i82875p_driver); |
524 | |||
519 | if (!i82875p_registered) { | 525 | if (!i82875p_registered) { |
520 | i82875p_remove_one(mci_pdev); | 526 | i82875p_remove_one(mci_pdev); |
521 | pci_dev_put(mci_pdev); | 527 | pci_dev_put(mci_pdev); |
522 | } | 528 | } |
523 | } | 529 | } |
524 | 530 | ||
525 | |||
526 | module_init(i82875p_init); | 531 | module_init(i82875p_init); |
527 | module_exit(i82875p_exit); | 532 | module_exit(i82875p_exit); |
528 | 533 | ||
529 | |||
530 | MODULE_LICENSE("GPL"); | 534 | MODULE_LICENSE("GPL"); |
531 | MODULE_AUTHOR("Linux Networx (http://lnxi.com) Thayne Harbaugh"); | 535 | MODULE_AUTHOR("Linux Networx (http://lnxi.com) Thayne Harbaugh"); |
532 | MODULE_DESCRIPTION("MC support for Intel 82875 memory hub controllers"); | 536 | MODULE_DESCRIPTION("MC support for Intel 82875 memory hub controllers"); |
diff --git a/drivers/edac/r82600_edac.c b/drivers/edac/r82600_edac.c index e90892831b90..2c29fafe67c7 100644 --- a/drivers/edac/r82600_edac.c +++ b/drivers/edac/r82600_edac.c | |||
@@ -18,14 +18,17 @@ | |||
18 | #include <linux/config.h> | 18 | #include <linux/config.h> |
19 | #include <linux/module.h> | 19 | #include <linux/module.h> |
20 | #include <linux/init.h> | 20 | #include <linux/init.h> |
21 | |||
22 | #include <linux/pci.h> | 21 | #include <linux/pci.h> |
23 | #include <linux/pci_ids.h> | 22 | #include <linux/pci_ids.h> |
24 | |||
25 | #include <linux/slab.h> | 23 | #include <linux/slab.h> |
26 | |||
27 | #include "edac_mc.h" | 24 | #include "edac_mc.h" |
28 | 25 | ||
26 | #define r82600_printk(level, fmt, arg...) \ | ||
27 | edac_printk(level, "r82600", fmt, ##arg) | ||
28 | |||
29 | #define r82600_mc_printk(mci, level, fmt, arg...) \ | ||
30 | edac_mc_chipset_printk(mci, level, "r82600", fmt, ##arg) | ||
31 | |||
29 | /* Radisys say "The 82600 integrates a main memory SDRAM controller that | 32 | /* Radisys say "The 82600 integrates a main memory SDRAM controller that |
30 | * supports up to four banks of memory. The four banks can support a mix of | 33 | * supports up to four banks of memory. The four banks can support a mix of |
31 | * sizes of 64 bit wide (72 bits with ECC) Synchronous DRAM (SDRAM) DIMMs, | 34 | * sizes of 64 bit wide (72 bits with ECC) Synchronous DRAM (SDRAM) DIMMs, |
@@ -126,10 +129,8 @@ struct r82600_error_info { | |||
126 | u32 eapr; | 129 | u32 eapr; |
127 | }; | 130 | }; |
128 | 131 | ||
129 | |||
130 | static unsigned int disable_hardware_scrub = 0; | 132 | static unsigned int disable_hardware_scrub = 0; |
131 | 133 | ||
132 | |||
133 | static void r82600_get_error_info (struct mem_ctl_info *mci, | 134 | static void r82600_get_error_info (struct mem_ctl_info *mci, |
134 | struct r82600_error_info *info) | 135 | struct r82600_error_info *info) |
135 | { | 136 | { |
@@ -138,17 +139,16 @@ static void r82600_get_error_info (struct mem_ctl_info *mci, | |||
138 | if (info->eapr & BIT(0)) | 139 | if (info->eapr & BIT(0)) |
139 | /* Clear error to allow next error to be reported [p.62] */ | 140 | /* Clear error to allow next error to be reported [p.62] */ |
140 | pci_write_bits32(mci->pdev, R82600_EAP, | 141 | pci_write_bits32(mci->pdev, R82600_EAP, |
141 | ((u32) BIT(0) & (u32) BIT(1)), | 142 | ((u32) BIT(0) & (u32) BIT(1)), |
142 | ((u32) BIT(0) & (u32) BIT(1))); | 143 | ((u32) BIT(0) & (u32) BIT(1))); |
143 | 144 | ||
144 | if (info->eapr & BIT(1)) | 145 | if (info->eapr & BIT(1)) |
145 | /* Clear error to allow next error to be reported [p.62] */ | 146 | /* Clear error to allow next error to be reported [p.62] */ |
146 | pci_write_bits32(mci->pdev, R82600_EAP, | 147 | pci_write_bits32(mci->pdev, R82600_EAP, |
147 | ((u32) BIT(0) & (u32) BIT(1)), | 148 | ((u32) BIT(0) & (u32) BIT(1)), |
148 | ((u32) BIT(0) & (u32) BIT(1))); | 149 | ((u32) BIT(0) & (u32) BIT(1))); |
149 | } | 150 | } |
150 | 151 | ||
151 | |||
152 | static int r82600_process_error_info (struct mem_ctl_info *mci, | 152 | static int r82600_process_error_info (struct mem_ctl_info *mci, |
153 | struct r82600_error_info *info, int handle_errors) | 153 | struct r82600_error_info *info, int handle_errors) |
154 | { | 154 | { |
@@ -167,26 +167,25 @@ static int r82600_process_error_info (struct mem_ctl_info *mci, | |||
167 | * granularity (upper 19 bits only) */ | 167 | * granularity (upper 19 bits only) */ |
168 | page = eapaddr >> PAGE_SHIFT; | 168 | page = eapaddr >> PAGE_SHIFT; |
169 | 169 | ||
170 | if (info->eapr & BIT(0)) { /* CE? */ | 170 | if (info->eapr & BIT(0)) { /* CE? */ |
171 | error_found = 1; | 171 | error_found = 1; |
172 | 172 | ||
173 | if (handle_errors) | 173 | if (handle_errors) |
174 | edac_mc_handle_ce( | 174 | edac_mc_handle_ce(mci, page, 0, /* not avail */ |
175 | mci, page, 0, /* not avail */ | 175 | syndrome, |
176 | syndrome, | 176 | edac_mc_find_csrow_by_page(mci, page), |
177 | edac_mc_find_csrow_by_page(mci, page), | 177 | 0, /* channel */ |
178 | 0, /* channel */ | 178 | mci->ctl_name); |
179 | mci->ctl_name); | ||
180 | } | 179 | } |
181 | 180 | ||
182 | if (info->eapr & BIT(1)) { /* UE? */ | 181 | if (info->eapr & BIT(1)) { /* UE? */ |
183 | error_found = 1; | 182 | error_found = 1; |
184 | 183 | ||
185 | if (handle_errors) | 184 | if (handle_errors) |
186 | /* 82600 doesn't give enough info */ | 185 | /* 82600 doesn't give enough info */ |
187 | edac_mc_handle_ue(mci, page, 0, | 186 | edac_mc_handle_ue(mci, page, 0, |
188 | edac_mc_find_csrow_by_page(mci, page), | 187 | edac_mc_find_csrow_by_page(mci, page), |
189 | mci->ctl_name); | 188 | mci->ctl_name); |
190 | } | 189 | } |
191 | 190 | ||
192 | return error_found; | 191 | return error_found; |
@@ -196,7 +195,7 @@ static void r82600_check(struct mem_ctl_info *mci) | |||
196 | { | 195 | { |
197 | struct r82600_error_info info; | 196 | struct r82600_error_info info; |
198 | 197 | ||
199 | debugf1("MC%d: " __FILE__ ": %s()\n", mci->mc_idx, __func__); | 198 | debugf1("MC%d: %s()\n", mci->mc_idx, __func__); |
200 | r82600_get_error_info(mci, &info); | 199 | r82600_get_error_info(mci, &info); |
201 | r82600_process_error_info(mci, &info, 1); | 200 | r82600_process_error_info(mci, &info, 1); |
202 | } | 201 | } |
@@ -213,25 +212,18 @@ static int r82600_probe1(struct pci_dev *pdev, int dev_idx) | |||
213 | u32 scrub_disabled; | 212 | u32 scrub_disabled; |
214 | u32 sdram_refresh_rate; | 213 | u32 sdram_refresh_rate; |
215 | u32 row_high_limit_last = 0; | 214 | u32 row_high_limit_last = 0; |
216 | u32 eap_init_bits; | 215 | struct r82600_error_info discard; |
217 | |||
218 | debugf0("MC: " __FILE__ ": %s()\n", __func__); | ||
219 | |||
220 | 216 | ||
217 | debugf0("%s()\n", __func__); | ||
221 | pci_read_config_byte(pdev, R82600_DRAMC, &dramcr); | 218 | pci_read_config_byte(pdev, R82600_DRAMC, &dramcr); |
222 | pci_read_config_dword(pdev, R82600_EAP, &eapr); | 219 | pci_read_config_dword(pdev, R82600_EAP, &eapr); |
223 | |||
224 | ecc_on = dramcr & BIT(5); | 220 | ecc_on = dramcr & BIT(5); |
225 | reg_sdram = dramcr & BIT(4); | 221 | reg_sdram = dramcr & BIT(4); |
226 | scrub_disabled = eapr & BIT(31); | 222 | scrub_disabled = eapr & BIT(31); |
227 | sdram_refresh_rate = dramcr & (BIT(0) | BIT(1)); | 223 | sdram_refresh_rate = dramcr & (BIT(0) | BIT(1)); |
228 | 224 | debugf2("%s(): sdram refresh rate = %#0x\n", __func__, | |
229 | debugf2("MC: " __FILE__ ": %s(): sdram refresh rate = %#0x\n", | 225 | sdram_refresh_rate); |
230 | __func__, sdram_refresh_rate); | 226 | debugf2("%s(): DRAMC register = %#0x\n", __func__, dramcr); |
231 | |||
232 | debugf2("MC: " __FILE__ ": %s(): DRAMC register = %#0x\n", __func__, | ||
233 | dramcr); | ||
234 | |||
235 | mci = edac_mc_alloc(0, R82600_NR_CSROWS, R82600_NR_CHANS); | 227 | mci = edac_mc_alloc(0, R82600_NR_CSROWS, R82600_NR_CHANS); |
236 | 228 | ||
237 | if (mci == NULL) { | 229 | if (mci == NULL) { |
@@ -239,29 +231,28 @@ static int r82600_probe1(struct pci_dev *pdev, int dev_idx) | |||
239 | goto fail; | 231 | goto fail; |
240 | } | 232 | } |
241 | 233 | ||
242 | debugf0("MC: " __FILE__ ": %s(): mci = %p\n", __func__, mci); | 234 | debugf0("%s(): mci = %p\n", __func__, mci); |
243 | |||
244 | mci->pdev = pdev; | 235 | mci->pdev = pdev; |
245 | mci->mtype_cap = MEM_FLAG_RDDR | MEM_FLAG_DDR; | 236 | mci->mtype_cap = MEM_FLAG_RDDR | MEM_FLAG_DDR; |
246 | |||
247 | mci->edac_ctl_cap = EDAC_FLAG_NONE | EDAC_FLAG_EC | EDAC_FLAG_SECDED; | 237 | mci->edac_ctl_cap = EDAC_FLAG_NONE | EDAC_FLAG_EC | EDAC_FLAG_SECDED; |
248 | /* FIXME try to work out if the chip leads have been * | 238 | /* FIXME try to work out if the chip leads have been used for COM2 |
249 | * used for COM2 instead on this board? [MA6?] MAYBE: */ | 239 | * instead on this board? [MA6?] MAYBE: |
240 | */ | ||
250 | 241 | ||
251 | /* On the R82600, the pins for memory bits 72:65 - i.e. the * | 242 | /* On the R82600, the pins for memory bits 72:65 - i.e. the * |
252 | * EC bits are shared with the pins for COM2 (!), so if COM2 * | 243 | * EC bits are shared with the pins for COM2 (!), so if COM2 * |
253 | * is enabled, we assume COM2 is wired up, and thus no EDAC * | 244 | * is enabled, we assume COM2 is wired up, and thus no EDAC * |
254 | * is possible. */ | 245 | * is possible. */ |
255 | mci->edac_cap = EDAC_FLAG_NONE | EDAC_FLAG_EC | EDAC_FLAG_SECDED; | 246 | mci->edac_cap = EDAC_FLAG_NONE | EDAC_FLAG_EC | EDAC_FLAG_SECDED; |
247 | |||
256 | if (ecc_on) { | 248 | if (ecc_on) { |
257 | if (scrub_disabled) | 249 | if (scrub_disabled) |
258 | debugf3("MC: " __FILE__ ": %s(): mci = %p - " | 250 | debugf3("%s(): mci = %p - Scrubbing disabled! EAP: " |
259 | "Scrubbing disabled! EAP: %#0x\n", __func__, | 251 | "%#0x\n", __func__, mci, eapr); |
260 | mci, eapr); | ||
261 | } else | 252 | } else |
262 | mci->edac_cap = EDAC_FLAG_NONE; | 253 | mci->edac_cap = EDAC_FLAG_NONE; |
263 | 254 | ||
264 | mci->mod_name = BS_MOD_STR; | 255 | mci->mod_name = EDAC_MOD_STR; |
265 | mci->mod_ver = "$Revision: 1.1.2.6 $"; | 256 | mci->mod_ver = "$Revision: 1.1.2.6 $"; |
266 | mci->ctl_name = "R82600"; | 257 | mci->ctl_name = "R82600"; |
267 | mci->edac_check = r82600_check; | 258 | mci->edac_check = r82600_check; |
@@ -276,23 +267,21 @@ static int r82600_probe1(struct pci_dev *pdev, int dev_idx) | |||
276 | /* find the DRAM Chip Select Base address and mask */ | 267 | /* find the DRAM Chip Select Base address and mask */ |
277 | pci_read_config_byte(mci->pdev, R82600_DRBA + index, &drbar); | 268 | pci_read_config_byte(mci->pdev, R82600_DRBA + index, &drbar); |
278 | 269 | ||
279 | debugf1("MC%d: " __FILE__ ": %s() Row=%d DRBA = %#0x\n", | 270 | debugf1("MC%d: %s() Row=%d DRBA = %#0x\n", mci->mc_idx, |
280 | mci->mc_idx, __func__, index, drbar); | 271 | __func__, index, drbar); |
281 | 272 | ||
282 | row_high_limit = ((u32) drbar << 24); | 273 | row_high_limit = ((u32) drbar << 24); |
283 | /* row_high_limit = ((u32)drbar << 24) | 0xffffffUL; */ | 274 | /* row_high_limit = ((u32)drbar << 24) | 0xffffffUL; */ |
284 | 275 | ||
285 | debugf1("MC%d: " __FILE__ ": %s() Row=%d, " | 276 | debugf1("MC%d: %s() Row=%d, Boundry Address=%#0x, Last = " |
286 | "Boundry Address=%#0x, Last = %#0x \n", | 277 | "%#0x \n", mci->mc_idx, __func__, index, |
287 | mci->mc_idx, __func__, index, row_high_limit, | 278 | row_high_limit, row_high_limit_last); |
288 | row_high_limit_last); | ||
289 | 279 | ||
290 | /* Empty row [p.57] */ | 280 | /* Empty row [p.57] */ |
291 | if (row_high_limit == row_high_limit_last) | 281 | if (row_high_limit == row_high_limit_last) |
292 | continue; | 282 | continue; |
293 | 283 | ||
294 | row_base = row_high_limit_last; | 284 | row_base = row_high_limit_last; |
295 | |||
296 | csrow->first_page = row_base >> PAGE_SHIFT; | 285 | csrow->first_page = row_base >> PAGE_SHIFT; |
297 | csrow->last_page = (row_high_limit >> PAGE_SHIFT) - 1; | 286 | csrow->last_page = (row_high_limit >> PAGE_SHIFT) - 1; |
298 | csrow->nr_pages = csrow->last_page - csrow->first_page + 1; | 287 | csrow->nr_pages = csrow->last_page - csrow->first_page + 1; |
@@ -308,31 +297,22 @@ static int r82600_probe1(struct pci_dev *pdev, int dev_idx) | |||
308 | row_high_limit_last = row_high_limit; | 297 | row_high_limit_last = row_high_limit; |
309 | } | 298 | } |
310 | 299 | ||
311 | /* clear counters */ | 300 | r82600_get_error_info(mci, &discard); /* clear counters */ |
312 | /* FIXME should we? */ | ||
313 | 301 | ||
314 | if (edac_mc_add_mc(mci)) { | 302 | if (edac_mc_add_mc(mci)) { |
315 | debugf3("MC: " __FILE__ | 303 | debugf3("%s(): failed edac_mc_add_mc()\n", __func__); |
316 | ": %s(): failed edac_mc_add_mc()\n", __func__); | ||
317 | goto fail; | 304 | goto fail; |
318 | } | 305 | } |
319 | 306 | ||
320 | /* get this far and it's successful */ | 307 | /* get this far and it's successful */ |
321 | 308 | ||
322 | /* Clear error flags to allow next error to be reported [p.62] */ | ||
323 | /* Test systems seem to always have the UE flag raised on boot */ | ||
324 | |||
325 | eap_init_bits = BIT(0) & BIT(1); | ||
326 | if (disable_hardware_scrub) { | 309 | if (disable_hardware_scrub) { |
327 | eap_init_bits |= BIT(31); | 310 | debugf3("%s(): Disabling Hardware Scrub (scrub on error)\n", |
328 | debugf3("MC: " __FILE__ ": %s(): Disabling Hardware Scrub " | 311 | __func__); |
329 | "(scrub on error)\n", __func__); | 312 | pci_write_bits32(mci->pdev, R82600_EAP, BIT(31), BIT(31)); |
330 | } | 313 | } |
331 | 314 | ||
332 | pci_write_bits32(mci->pdev, R82600_EAP, eap_init_bits, | 315 | debugf3("%s(): success\n", __func__); |
333 | eap_init_bits); | ||
334 | |||
335 | debugf3("MC: " __FILE__ ": %s(): success\n", __func__); | ||
336 | return 0; | 316 | return 0; |
337 | 317 | ||
338 | fail: | 318 | fail: |
@@ -344,62 +324,60 @@ fail: | |||
344 | 324 | ||
345 | /* returns count (>= 0), or negative on error */ | 325 | /* returns count (>= 0), or negative on error */ |
346 | static int __devinit r82600_init_one(struct pci_dev *pdev, | 326 | static int __devinit r82600_init_one(struct pci_dev *pdev, |
347 | const struct pci_device_id *ent) | 327 | const struct pci_device_id *ent) |
348 | { | 328 | { |
349 | debugf0("MC: " __FILE__ ": %s()\n", __func__); | 329 | debugf0("%s()\n", __func__); |
350 | 330 | ||
351 | /* don't need to call pci_device_enable() */ | 331 | /* don't need to call pci_device_enable() */ |
352 | return r82600_probe1(pdev, ent->driver_data); | 332 | return r82600_probe1(pdev, ent->driver_data); |
353 | } | 333 | } |
354 | 334 | ||
355 | |||
356 | static void __devexit r82600_remove_one(struct pci_dev *pdev) | 335 | static void __devexit r82600_remove_one(struct pci_dev *pdev) |
357 | { | 336 | { |
358 | struct mem_ctl_info *mci; | 337 | struct mem_ctl_info *mci; |
359 | 338 | ||
360 | debugf0(__FILE__ ": %s()\n", __func__); | 339 | debugf0("%s()\n", __func__); |
361 | 340 | ||
362 | if (((mci = edac_mc_find_mci_by_pdev(pdev)) != NULL) && | 341 | if ((mci = edac_mc_del_mc(pdev)) == NULL) |
363 | !edac_mc_del_mc(mci)) | 342 | return; |
364 | edac_mc_free(mci); | ||
365 | } | ||
366 | 343 | ||
344 | edac_mc_free(mci); | ||
345 | } | ||
367 | 346 | ||
368 | static const struct pci_device_id r82600_pci_tbl[] __devinitdata = { | 347 | static const struct pci_device_id r82600_pci_tbl[] __devinitdata = { |
369 | {PCI_DEVICE(PCI_VENDOR_ID_RADISYS, R82600_BRIDGE_ID)}, | 348 | { |
370 | {0,} /* 0 terminated list. */ | 349 | PCI_DEVICE(PCI_VENDOR_ID_RADISYS, R82600_BRIDGE_ID) |
350 | }, | ||
351 | { | ||
352 | 0, | ||
353 | } /* 0 terminated list. */ | ||
371 | }; | 354 | }; |
372 | 355 | ||
373 | MODULE_DEVICE_TABLE(pci, r82600_pci_tbl); | 356 | MODULE_DEVICE_TABLE(pci, r82600_pci_tbl); |
374 | 357 | ||
375 | |||
376 | static struct pci_driver r82600_driver = { | 358 | static struct pci_driver r82600_driver = { |
377 | .name = BS_MOD_STR, | 359 | .name = EDAC_MOD_STR, |
378 | .probe = r82600_init_one, | 360 | .probe = r82600_init_one, |
379 | .remove = __devexit_p(r82600_remove_one), | 361 | .remove = __devexit_p(r82600_remove_one), |
380 | .id_table = r82600_pci_tbl, | 362 | .id_table = r82600_pci_tbl, |
381 | }; | 363 | }; |
382 | 364 | ||
383 | |||
384 | static int __init r82600_init(void) | 365 | static int __init r82600_init(void) |
385 | { | 366 | { |
386 | return pci_register_driver(&r82600_driver); | 367 | return pci_register_driver(&r82600_driver); |
387 | } | 368 | } |
388 | 369 | ||
389 | |||
390 | static void __exit r82600_exit(void) | 370 | static void __exit r82600_exit(void) |
391 | { | 371 | { |
392 | pci_unregister_driver(&r82600_driver); | 372 | pci_unregister_driver(&r82600_driver); |
393 | } | 373 | } |
394 | 374 | ||
395 | |||
396 | module_init(r82600_init); | 375 | module_init(r82600_init); |
397 | module_exit(r82600_exit); | 376 | module_exit(r82600_exit); |
398 | 377 | ||
399 | |||
400 | MODULE_LICENSE("GPL"); | 378 | MODULE_LICENSE("GPL"); |
401 | MODULE_AUTHOR("Tim Small <tim@buttersideup.com> - WPAD Ltd. " | 379 | MODULE_AUTHOR("Tim Small <tim@buttersideup.com> - WPAD Ltd. " |
402 | "on behalf of EADS Astrium"); | 380 | "on behalf of EADS Astrium"); |
403 | MODULE_DESCRIPTION("MC support for Radisys 82600 memory controllers"); | 381 | MODULE_DESCRIPTION("MC support for Radisys 82600 memory controllers"); |
404 | 382 | ||
405 | module_param(disable_hardware_scrub, bool, 0644); | 383 | module_param(disable_hardware_scrub, bool, 0644); |
diff --git a/drivers/firmware/efivars.c b/drivers/firmware/efivars.c index 343379f23a53..9b7e4d52ffd4 100644 --- a/drivers/firmware/efivars.c +++ b/drivers/firmware/efivars.c | |||
@@ -568,20 +568,20 @@ systab_read(struct subsystem *entry, char *buf) | |||
568 | if (!entry || !buf) | 568 | if (!entry || !buf) |
569 | return -EINVAL; | 569 | return -EINVAL; |
570 | 570 | ||
571 | if (efi.mps) | 571 | if (efi.mps != EFI_INVALID_TABLE_ADDR) |
572 | str += sprintf(str, "MPS=0x%lx\n", __pa(efi.mps)); | 572 | str += sprintf(str, "MPS=0x%lx\n", efi.mps); |
573 | if (efi.acpi20) | 573 | if (efi.acpi20 != EFI_INVALID_TABLE_ADDR) |
574 | str += sprintf(str, "ACPI20=0x%lx\n", __pa(efi.acpi20)); | 574 | str += sprintf(str, "ACPI20=0x%lx\n", efi.acpi20); |
575 | if (efi.acpi) | 575 | if (efi.acpi != EFI_INVALID_TABLE_ADDR) |
576 | str += sprintf(str, "ACPI=0x%lx\n", __pa(efi.acpi)); | 576 | str += sprintf(str, "ACPI=0x%lx\n", efi.acpi); |
577 | if (efi.smbios) | 577 | if (efi.smbios != EFI_INVALID_TABLE_ADDR) |
578 | str += sprintf(str, "SMBIOS=0x%lx\n", __pa(efi.smbios)); | 578 | str += sprintf(str, "SMBIOS=0x%lx\n", efi.smbios); |
579 | if (efi.hcdp) | 579 | if (efi.hcdp != EFI_INVALID_TABLE_ADDR) |
580 | str += sprintf(str, "HCDP=0x%lx\n", __pa(efi.hcdp)); | 580 | str += sprintf(str, "HCDP=0x%lx\n", efi.hcdp); |
581 | if (efi.boot_info) | 581 | if (efi.boot_info != EFI_INVALID_TABLE_ADDR) |
582 | str += sprintf(str, "BOOTINFO=0x%lx\n", __pa(efi.boot_info)); | 582 | str += sprintf(str, "BOOTINFO=0x%lx\n", efi.boot_info); |
583 | if (efi.uga) | 583 | if (efi.uga != EFI_INVALID_TABLE_ADDR) |
584 | str += sprintf(str, "UGA=0x%lx\n", __pa(efi.uga)); | 584 | str += sprintf(str, "UGA=0x%lx\n", efi.uga); |
585 | 585 | ||
586 | return str - buf; | 586 | return str - buf; |
587 | } | 587 | } |
diff --git a/drivers/firmware/pcdp.c b/drivers/firmware/pcdp.c index ae1fb45dbb40..c37baf9448bc 100644 --- a/drivers/firmware/pcdp.c +++ b/drivers/firmware/pcdp.c | |||
@@ -89,19 +89,20 @@ efi_setup_pcdp_console(char *cmdline) | |||
89 | struct pcdp_uart *uart; | 89 | struct pcdp_uart *uart; |
90 | struct pcdp_device *dev, *end; | 90 | struct pcdp_device *dev, *end; |
91 | int i, serial = 0; | 91 | int i, serial = 0; |
92 | int rc = -ENODEV; | ||
92 | 93 | ||
93 | pcdp = efi.hcdp; | 94 | if (efi.hcdp == EFI_INVALID_TABLE_ADDR) |
94 | if (!pcdp) | ||
95 | return -ENODEV; | 95 | return -ENODEV; |
96 | 96 | ||
97 | printk(KERN_INFO "PCDP: v%d at 0x%lx\n", pcdp->rev, __pa(pcdp)); | 97 | pcdp = ioremap(efi.hcdp, 4096); |
98 | printk(KERN_INFO "PCDP: v%d at 0x%lx\n", pcdp->rev, efi.hcdp); | ||
98 | 99 | ||
99 | if (strstr(cmdline, "console=hcdp")) { | 100 | if (strstr(cmdline, "console=hcdp")) { |
100 | if (pcdp->rev < 3) | 101 | if (pcdp->rev < 3) |
101 | serial = 1; | 102 | serial = 1; |
102 | } else if (strstr(cmdline, "console=")) { | 103 | } else if (strstr(cmdline, "console=")) { |
103 | printk(KERN_INFO "Explicit \"console=\"; ignoring PCDP\n"); | 104 | printk(KERN_INFO "Explicit \"console=\"; ignoring PCDP\n"); |
104 | return -ENODEV; | 105 | goto out; |
105 | } | 106 | } |
106 | 107 | ||
107 | if (pcdp->rev < 3 && efi_uart_console_only()) | 108 | if (pcdp->rev < 3 && efi_uart_console_only()) |
@@ -110,7 +111,8 @@ efi_setup_pcdp_console(char *cmdline) | |||
110 | for (i = 0, uart = pcdp->uart; i < pcdp->num_uarts; i++, uart++) { | 111 | for (i = 0, uart = pcdp->uart; i < pcdp->num_uarts; i++, uart++) { |
111 | if (uart->flags & PCDP_UART_PRIMARY_CONSOLE || serial) { | 112 | if (uart->flags & PCDP_UART_PRIMARY_CONSOLE || serial) { |
112 | if (uart->type == PCDP_CONSOLE_UART) { | 113 | if (uart->type == PCDP_CONSOLE_UART) { |
113 | return setup_serial_console(uart); | 114 | rc = setup_serial_console(uart); |
115 | goto out; | ||
114 | } | 116 | } |
115 | } | 117 | } |
116 | } | 118 | } |
@@ -121,10 +123,13 @@ efi_setup_pcdp_console(char *cmdline) | |||
121 | dev = (struct pcdp_device *) ((u8 *) dev + dev->length)) { | 123 | dev = (struct pcdp_device *) ((u8 *) dev + dev->length)) { |
122 | if (dev->flags & PCDP_PRIMARY_CONSOLE) { | 124 | if (dev->flags & PCDP_PRIMARY_CONSOLE) { |
123 | if (dev->type == PCDP_CONSOLE_VGA) { | 125 | if (dev->type == PCDP_CONSOLE_VGA) { |
124 | return setup_vga_console(dev); | 126 | rc = setup_vga_console(dev); |
127 | goto out; | ||
125 | } | 128 | } |
126 | } | 129 | } |
127 | } | 130 | } |
128 | 131 | ||
129 | return -ENODEV; | 132 | out: |
133 | iounmap(pcdp); | ||
134 | return rc; | ||
130 | } | 135 | } |
diff --git a/drivers/ieee1394/highlevel.c b/drivers/ieee1394/highlevel.c index 734b121a0554..491e6032bdec 100644 --- a/drivers/ieee1394/highlevel.c +++ b/drivers/ieee1394/highlevel.c | |||
@@ -306,8 +306,7 @@ u64 hpsb_allocate_and_register_addrspace(struct hpsb_highlevel *hl, | |||
306 | u64 align_mask = ~(alignment - 1); | 306 | u64 align_mask = ~(alignment - 1); |
307 | 307 | ||
308 | if ((alignment & 3) || (alignment > 0x800000000000ULL) || | 308 | if ((alignment & 3) || (alignment > 0x800000000000ULL) || |
309 | ((hweight32(alignment >> 32) + | 309 | (hweight64(alignment) != 1)) { |
310 | hweight32(alignment & 0xffffffff) != 1))) { | ||
311 | HPSB_ERR("%s called with invalid alignment: 0x%048llx", | 310 | HPSB_ERR("%s called with invalid alignment: 0x%048llx", |
312 | __FUNCTION__, (unsigned long long)alignment); | 311 | __FUNCTION__, (unsigned long long)alignment); |
313 | return retval; | 312 | return retval; |
diff --git a/drivers/input/touchscreen/ads7846.c b/drivers/input/touchscreen/ads7846.c index a81f987978c8..46d1fec2cfd8 100644 --- a/drivers/input/touchscreen/ads7846.c +++ b/drivers/input/touchscreen/ads7846.c | |||
@@ -23,7 +23,7 @@ | |||
23 | #include <linux/slab.h> | 23 | #include <linux/slab.h> |
24 | #include <linux/spi/spi.h> | 24 | #include <linux/spi/spi.h> |
25 | #include <linux/spi/ads7846.h> | 25 | #include <linux/spi/ads7846.h> |
26 | #include <linux/interrupt.h> | 26 | #include <asm/irq.h> |
27 | 27 | ||
28 | #ifdef CONFIG_ARM | 28 | #ifdef CONFIG_ARM |
29 | #include <asm/mach-types.h> | 29 | #include <asm/mach-types.h> |
diff --git a/drivers/isdn/Makefile b/drivers/isdn/Makefile index 03d8ccd51955..988142c30a6d 100644 --- a/drivers/isdn/Makefile +++ b/drivers/isdn/Makefile | |||
@@ -13,3 +13,4 @@ obj-$(CONFIG_ISDN_DRV_SC) += sc/ | |||
13 | obj-$(CONFIG_ISDN_DRV_LOOP) += isdnloop/ | 13 | obj-$(CONFIG_ISDN_DRV_LOOP) += isdnloop/ |
14 | obj-$(CONFIG_ISDN_DRV_ACT2000) += act2000/ | 14 | obj-$(CONFIG_ISDN_DRV_ACT2000) += act2000/ |
15 | obj-$(CONFIG_HYSDN) += hysdn/ | 15 | obj-$(CONFIG_HYSDN) += hysdn/ |
16 | obj-$(CONFIG_ISDN_DRV_GIGASET) += gigaset/ | ||
diff --git a/drivers/isdn/gigaset/Kconfig b/drivers/isdn/gigaset/Kconfig new file mode 100644 index 000000000000..53c4fb62ed85 --- /dev/null +++ b/drivers/isdn/gigaset/Kconfig | |||
@@ -0,0 +1,42 @@ | |||
1 | menu "Siemens Gigaset" | ||
2 | depends on ISDN_I4L | ||
3 | |||
4 | config ISDN_DRV_GIGASET | ||
5 | tristate "Siemens Gigaset support (isdn)" | ||
6 | depends on ISDN_I4L && m | ||
7 | # depends on ISDN_I4L && MODULES | ||
8 | help | ||
9 | Say m here if you have a Gigaset or Sinus isdn device. | ||
10 | |||
11 | if ISDN_DRV_GIGASET!=n | ||
12 | |||
13 | config GIGASET_BASE | ||
14 | tristate "Gigaset base station support" | ||
15 | depends on ISDN_DRV_GIGASET && USB | ||
16 | help | ||
17 | Say m here if you need to communicate with the base | ||
18 | directly via USB. | ||
19 | |||
20 | config GIGASET_M105 | ||
21 | tristate "Gigaset M105 support" | ||
22 | depends on ISDN_DRV_GIGASET && USB | ||
23 | help | ||
24 | Say m here if you need the driver for the Gigaset M105 device. | ||
25 | |||
26 | config GIGASET_DEBUG | ||
27 | bool "Gigaset debugging" | ||
28 | help | ||
29 | This enables debugging code in the Gigaset drivers. | ||
30 | If in doubt, say yes. | ||
31 | |||
32 | config GIGASET_UNDOCREQ | ||
33 | bool "Support for undocumented USB requests" | ||
34 | help | ||
35 | This enables support for USB requests we only know from | ||
36 | reverse engineering (currently M105 only). If you need | ||
37 | features like configuration mode of M105, say yes. If you | ||
38 | care about your device, say no. | ||
39 | |||
40 | endif | ||
41 | |||
42 | endmenu | ||
diff --git a/drivers/isdn/gigaset/Makefile b/drivers/isdn/gigaset/Makefile new file mode 100644 index 000000000000..9b9acf1a21ad --- /dev/null +++ b/drivers/isdn/gigaset/Makefile | |||
@@ -0,0 +1,6 @@ | |||
1 | gigaset-y := common.o interface.o proc.o ev-layer.o i4l.o | ||
2 | usb_gigaset-y := usb-gigaset.o asyncdata.o | ||
3 | bas_gigaset-y := bas-gigaset.o isocdata.o | ||
4 | |||
5 | obj-$(CONFIG_GIGASET_M105) += usb_gigaset.o gigaset.o | ||
6 | obj-$(CONFIG_GIGASET_BASE) += bas_gigaset.o gigaset.o | ||
diff --git a/drivers/isdn/gigaset/asyncdata.c b/drivers/isdn/gigaset/asyncdata.c new file mode 100644 index 000000000000..171f8b703d61 --- /dev/null +++ b/drivers/isdn/gigaset/asyncdata.c | |||
@@ -0,0 +1,597 @@ | |||
1 | /* | ||
2 | * Common data handling layer for ser_gigaset and usb_gigaset | ||
3 | * | ||
4 | * Copyright (c) 2005 by Tilman Schmidt <tilman@imap.cc>, | ||
5 | * Hansjoerg Lipp <hjlipp@web.de>, | ||
6 | * Stefan Eilers <Eilers.Stefan@epost.de>. | ||
7 | * | ||
8 | * ===================================================================== | ||
9 | * This program is free software; you can redistribute it and/or | ||
10 | * modify it under the terms of the GNU General Public License as | ||
11 | * published by the Free Software Foundation; either version 2 of | ||
12 | * the License, or (at your option) any later version. | ||
13 | * ===================================================================== | ||
14 | * ToDo: ... | ||
15 | * ===================================================================== | ||
16 | * Version: $Id: asyncdata.c,v 1.2.2.7 2005/11/13 23:05:18 hjlipp Exp $ | ||
17 | * ===================================================================== | ||
18 | */ | ||
19 | |||
20 | #include "gigaset.h" | ||
21 | #include <linux/crc-ccitt.h> | ||
22 | |||
23 | //#define GIG_M10x_STUFF_VOICE_DATA | ||
24 | |||
25 | /* check if byte must be stuffed/escaped | ||
26 | * I'm not sure which data should be encoded. | ||
27 | * Therefore I will go the hard way and decode every value | ||
28 | * less than 0x20, the flag sequence and the control escape char. | ||
29 | */ | ||
30 | static inline int muststuff(unsigned char c) | ||
31 | { | ||
32 | if (c < PPP_TRANS) return 1; | ||
33 | if (c == PPP_FLAG) return 1; | ||
34 | if (c == PPP_ESCAPE) return 1; | ||
35 | /* other possible candidates: */ | ||
36 | /* 0x91: XON with parity set */ | ||
37 | /* 0x93: XOFF with parity set */ | ||
38 | return 0; | ||
39 | } | ||
40 | |||
41 | /* == data input =========================================================== */ | ||
42 | |||
43 | /* process a block of received bytes in command mode (modem response) | ||
44 | * Return value: | ||
45 | * number of processed bytes | ||
46 | */ | ||
47 | static inline int cmd_loop(unsigned char c, unsigned char *src, int numbytes, | ||
48 | struct inbuf_t *inbuf) | ||
49 | { | ||
50 | struct cardstate *cs = inbuf->cs; | ||
51 | unsigned cbytes = cs->cbytes; | ||
52 | int inputstate = inbuf->inputstate; | ||
53 | int startbytes = numbytes; | ||
54 | |||
55 | for (;;) { | ||
56 | cs->respdata[cbytes] = c; | ||
57 | if (c == 10 || c == 13) { | ||
58 | dbg(DEBUG_TRANSCMD, "%s: End of Command (%d Bytes)", | ||
59 | __func__, cbytes); | ||
60 | cs->cbytes = cbytes; | ||
61 | gigaset_handle_modem_response(cs); /* can change cs->dle */ | ||
62 | cbytes = 0; | ||
63 | |||
64 | if (cs->dle && | ||
65 | !(inputstate & INS_DLE_command)) { | ||
66 | inputstate &= ~INS_command; | ||
67 | break; | ||
68 | } | ||
69 | } else { | ||
70 | /* advance in line buffer, checking for overflow */ | ||
71 | if (cbytes < MAX_RESP_SIZE - 1) | ||
72 | cbytes++; | ||
73 | else | ||
74 | warn("response too large"); | ||
75 | } | ||
76 | |||
77 | if (!numbytes) | ||
78 | break; | ||
79 | c = *src++; | ||
80 | --numbytes; | ||
81 | if (c == DLE_FLAG && | ||
82 | (cs->dle || inputstate & INS_DLE_command)) { | ||
83 | inputstate |= INS_DLE_char; | ||
84 | break; | ||
85 | } | ||
86 | } | ||
87 | |||
88 | cs->cbytes = cbytes; | ||
89 | inbuf->inputstate = inputstate; | ||
90 | |||
91 | return startbytes - numbytes; | ||
92 | } | ||
93 | |||
94 | /* process a block of received bytes in lock mode (tty i/f) | ||
95 | * Return value: | ||
96 | * number of processed bytes | ||
97 | */ | ||
98 | static inline int lock_loop(unsigned char *src, int numbytes, | ||
99 | struct inbuf_t *inbuf) | ||
100 | { | ||
101 | struct cardstate *cs = inbuf->cs; | ||
102 | |||
103 | gigaset_dbg_buffer(DEBUG_LOCKCMD, "received response", numbytes, src, 0); | ||
104 | gigaset_if_receive(cs, src, numbytes); | ||
105 | |||
106 | return numbytes; | ||
107 | } | ||
108 | |||
109 | /* process a block of received bytes in HDLC data mode | ||
110 | * Collect HDLC frames, undoing byte stuffing and watching for DLE escapes. | ||
111 | * When a frame is complete, check the FCS and pass valid frames to the LL. | ||
112 | * If DLE is encountered, return immediately to let the caller handle it. | ||
113 | * Return value: | ||
114 | * number of processed bytes | ||
115 | * numbytes (all bytes processed) on error --FIXME | ||
116 | */ | ||
117 | static inline int hdlc_loop(unsigned char c, unsigned char *src, int numbytes, | ||
118 | struct inbuf_t *inbuf) | ||
119 | { | ||
120 | struct cardstate *cs = inbuf->cs; | ||
121 | struct bc_state *bcs = inbuf->bcs; | ||
122 | int inputstate; | ||
123 | __u16 fcs; | ||
124 | struct sk_buff *skb; | ||
125 | unsigned char error; | ||
126 | struct sk_buff *compskb; | ||
127 | int startbytes = numbytes; | ||
128 | int l; | ||
129 | |||
130 | IFNULLRETVAL(bcs, numbytes); | ||
131 | inputstate = bcs->inputstate; | ||
132 | fcs = bcs->fcs; | ||
133 | skb = bcs->skb; | ||
134 | IFNULLRETVAL(skb, numbytes); | ||
135 | |||
136 | if (unlikely(inputstate & INS_byte_stuff)) { | ||
137 | inputstate &= ~INS_byte_stuff; | ||
138 | goto byte_stuff; | ||
139 | } | ||
140 | for (;;) { | ||
141 | if (unlikely(c == PPP_ESCAPE)) { | ||
142 | if (unlikely(!numbytes)) { | ||
143 | inputstate |= INS_byte_stuff; | ||
144 | break; | ||
145 | } | ||
146 | c = *src++; | ||
147 | --numbytes; | ||
148 | if (unlikely(c == DLE_FLAG && | ||
149 | (cs->dle || | ||
150 | inbuf->inputstate & INS_DLE_command))) { | ||
151 | inbuf->inputstate |= INS_DLE_char; | ||
152 | inputstate |= INS_byte_stuff; | ||
153 | break; | ||
154 | } | ||
155 | byte_stuff: | ||
156 | c ^= PPP_TRANS; | ||
157 | #ifdef CONFIG_GIGASET_DEBUG | ||
158 | if (unlikely(!muststuff(c))) | ||
159 | dbg(DEBUG_HDLC, | ||
160 | "byte stuffed: 0x%02x", c); | ||
161 | #endif | ||
162 | } else if (unlikely(c == PPP_FLAG)) { | ||
163 | if (unlikely(inputstate & INS_skip_frame)) { | ||
164 | if (!(inputstate & INS_have_data)) { /* 7E 7E */ | ||
165 | //dbg(DEBUG_HDLC, "(7e)7e------------------------"); | ||
166 | #ifdef CONFIG_GIGASET_DEBUG | ||
167 | ++bcs->emptycount; | ||
168 | #endif | ||
169 | } else | ||
170 | dbg(DEBUG_HDLC, | ||
171 | "7e----------------------------"); | ||
172 | |||
173 | /* end of frame */ | ||
174 | error = 1; | ||
175 | gigaset_rcv_error(NULL, cs, bcs); | ||
176 | } else if (!(inputstate & INS_have_data)) { /* 7E 7E */ | ||
177 | //dbg(DEBUG_HDLC, "(7e)7e------------------------"); | ||
178 | #ifdef CONFIG_GIGASET_DEBUG | ||
179 | ++bcs->emptycount; | ||
180 | #endif | ||
181 | break; | ||
182 | } else { | ||
183 | dbg(DEBUG_HDLC, | ||
184 | "7e----------------------------"); | ||
185 | |||
186 | /* end of frame */ | ||
187 | error = 0; | ||
188 | |||
189 | if (unlikely(fcs != PPP_GOODFCS)) { | ||
190 | err("Packet checksum at %lu failed, " | ||
191 | "packet is corrupted (%u bytes)!", | ||
192 | bcs->rcvbytes, skb->len); | ||
193 | compskb = NULL; | ||
194 | gigaset_rcv_error(compskb, cs, bcs); | ||
195 | error = 1; | ||
196 | } else { | ||
197 | if (likely((l = skb->len) > 2)) { | ||
198 | skb->tail -= 2; | ||
199 | skb->len -= 2; | ||
200 | } else { | ||
201 | dev_kfree_skb(skb); | ||
202 | skb = NULL; | ||
203 | inputstate |= INS_skip_frame; | ||
204 | if (l == 1) { | ||
205 | err("invalid packet size (1)!"); | ||
206 | error = 1; | ||
207 | gigaset_rcv_error(NULL, cs, bcs); | ||
208 | } | ||
209 | } | ||
210 | if (likely(!(error || | ||
211 | (inputstate & | ||
212 | INS_skip_frame)))) { | ||
213 | gigaset_rcv_skb(skb, cs, bcs); | ||
214 | } | ||
215 | } | ||
216 | } | ||
217 | |||
218 | if (unlikely(error)) | ||
219 | if (skb) | ||
220 | dev_kfree_skb(skb); | ||
221 | |||
222 | fcs = PPP_INITFCS; | ||
223 | inputstate &= ~(INS_have_data | INS_skip_frame); | ||
224 | if (unlikely(bcs->ignore)) { | ||
225 | inputstate |= INS_skip_frame; | ||
226 | skb = NULL; | ||
227 | } else if (likely((skb = dev_alloc_skb(SBUFSIZE + HW_HDR_LEN)) != NULL)) { | ||
228 | skb_reserve(skb, HW_HDR_LEN); | ||
229 | } else { | ||
230 | warn("could not allocate new skb"); | ||
231 | inputstate |= INS_skip_frame; | ||
232 | } | ||
233 | |||
234 | break; | ||
235 | #ifdef CONFIG_GIGASET_DEBUG | ||
236 | } else if (unlikely(muststuff(c))) { | ||
237 | /* Should not happen. Possible after ZDLE=1<CR><LF>. */ | ||
238 | dbg(DEBUG_HDLC, "not byte stuffed: 0x%02x", c); | ||
239 | #endif | ||
240 | } | ||
241 | |||
242 | /* add character */ | ||
243 | |||
244 | #ifdef CONFIG_GIGASET_DEBUG | ||
245 | if (unlikely(!(inputstate & INS_have_data))) { | ||
246 | dbg(DEBUG_HDLC, | ||
247 | "7e (%d x) ================", bcs->emptycount); | ||
248 | bcs->emptycount = 0; | ||
249 | } | ||
250 | #endif | ||
251 | |||
252 | inputstate |= INS_have_data; | ||
253 | |||
254 | if (likely(!(inputstate & INS_skip_frame))) { | ||
255 | if (unlikely(skb->len == SBUFSIZE)) { | ||
256 | warn("received packet too long"); | ||
257 | dev_kfree_skb_any(skb); | ||
258 | skb = NULL; | ||
259 | inputstate |= INS_skip_frame; | ||
260 | break; | ||
261 | } | ||
262 | *gigaset_skb_put_quick(skb, 1) = c; | ||
263 | /* *__skb_put (skb, 1) = c; */ | ||
264 | fcs = crc_ccitt_byte(fcs, c); | ||
265 | } | ||
266 | |||
267 | if (unlikely(!numbytes)) | ||
268 | break; | ||
269 | c = *src++; | ||
270 | --numbytes; | ||
271 | if (unlikely(c == DLE_FLAG && | ||
272 | (cs->dle || | ||
273 | inbuf->inputstate & INS_DLE_command))) { | ||
274 | inbuf->inputstate |= INS_DLE_char; | ||
275 | break; | ||
276 | } | ||
277 | } | ||
278 | bcs->inputstate = inputstate; | ||
279 | bcs->fcs = fcs; | ||
280 | bcs->skb = skb; | ||
281 | return startbytes - numbytes; | ||
282 | } | ||
283 | |||
284 | /* process a block of received bytes in transparent data mode | ||
285 | * Invert bytes, undoing byte stuffing and watching for DLE escapes. | ||
286 | * If DLE is encountered, return immediately to let the caller handle it. | ||
287 | * Return value: | ||
288 | * number of processed bytes | ||
289 | * numbytes (all bytes processed) on error --FIXME | ||
290 | */ | ||
291 | static inline int iraw_loop(unsigned char c, unsigned char *src, int numbytes, | ||
292 | struct inbuf_t *inbuf) | ||
293 | { | ||
294 | struct cardstate *cs = inbuf->cs; | ||
295 | struct bc_state *bcs = inbuf->bcs; | ||
296 | int inputstate; | ||
297 | struct sk_buff *skb; | ||
298 | int startbytes = numbytes; | ||
299 | |||
300 | IFNULLRETVAL(bcs, numbytes); | ||
301 | inputstate = bcs->inputstate; | ||
302 | skb = bcs->skb; | ||
303 | IFNULLRETVAL(skb, numbytes); | ||
304 | |||
305 | for (;;) { | ||
306 | /* add character */ | ||
307 | inputstate |= INS_have_data; | ||
308 | |||
309 | if (likely(!(inputstate & INS_skip_frame))) { | ||
310 | if (unlikely(skb->len == SBUFSIZE)) { | ||
311 | //FIXME just pass skb up and allocate a new one | ||
312 | warn("received packet too long"); | ||
313 | dev_kfree_skb_any(skb); | ||
314 | skb = NULL; | ||
315 | inputstate |= INS_skip_frame; | ||
316 | break; | ||
317 | } | ||
318 | *gigaset_skb_put_quick(skb, 1) = gigaset_invtab[c]; | ||
319 | } | ||
320 | |||
321 | if (unlikely(!numbytes)) | ||
322 | break; | ||
323 | c = *src++; | ||
324 | --numbytes; | ||
325 | if (unlikely(c == DLE_FLAG && | ||
326 | (cs->dle || | ||
327 | inbuf->inputstate & INS_DLE_command))) { | ||
328 | inbuf->inputstate |= INS_DLE_char; | ||
329 | break; | ||
330 | } | ||
331 | } | ||
332 | |||
333 | /* pass data up */ | ||
334 | if (likely(inputstate & INS_have_data)) { | ||
335 | if (likely(!(inputstate & INS_skip_frame))) { | ||
336 | gigaset_rcv_skb(skb, cs, bcs); | ||
337 | } | ||
338 | inputstate &= ~(INS_have_data | INS_skip_frame); | ||
339 | if (unlikely(bcs->ignore)) { | ||
340 | inputstate |= INS_skip_frame; | ||
341 | skb = NULL; | ||
342 | } else if (likely((skb = dev_alloc_skb(SBUFSIZE + HW_HDR_LEN)) | ||
343 | != NULL)) { | ||
344 | skb_reserve(skb, HW_HDR_LEN); | ||
345 | } else { | ||
346 | warn("could not allocate new skb"); | ||
347 | inputstate |= INS_skip_frame; | ||
348 | } | ||
349 | } | ||
350 | |||
351 | bcs->inputstate = inputstate; | ||
352 | bcs->skb = skb; | ||
353 | return startbytes - numbytes; | ||
354 | } | ||
355 | |||
356 | /* process a block of data received from the device | ||
357 | */ | ||
358 | void gigaset_m10x_input(struct inbuf_t *inbuf) | ||
359 | { | ||
360 | struct cardstate *cs; | ||
361 | unsigned tail, head, numbytes; | ||
362 | unsigned char *src, c; | ||
363 | int procbytes; | ||
364 | |||
365 | head = atomic_read(&inbuf->head); | ||
366 | tail = atomic_read(&inbuf->tail); | ||
367 | dbg(DEBUG_INTR, "buffer state: %u -> %u", head, tail); | ||
368 | |||
369 | if (head != tail) { | ||
370 | cs = inbuf->cs; | ||
371 | src = inbuf->data + head; | ||
372 | numbytes = (head > tail ? RBUFSIZE : tail) - head; | ||
373 | dbg(DEBUG_INTR, "processing %u bytes", numbytes); | ||
374 | |||
375 | while (numbytes) { | ||
376 | if (atomic_read(&cs->mstate) == MS_LOCKED) { | ||
377 | procbytes = lock_loop(src, numbytes, inbuf); | ||
378 | src += procbytes; | ||
379 | numbytes -= procbytes; | ||
380 | } else { | ||
381 | c = *src++; | ||
382 | --numbytes; | ||
383 | if (c == DLE_FLAG && (cs->dle || | ||
384 | inbuf->inputstate & INS_DLE_command)) { | ||
385 | if (!(inbuf->inputstate & INS_DLE_char)) { | ||
386 | inbuf->inputstate |= INS_DLE_char; | ||
387 | goto nextbyte; | ||
388 | } | ||
389 | /* <DLE> <DLE> => <DLE> in data stream */ | ||
390 | inbuf->inputstate &= ~INS_DLE_char; | ||
391 | } | ||
392 | |||
393 | if (!(inbuf->inputstate & INS_DLE_char)) { | ||
394 | |||
395 | /* FIXME Einfach je nach Modus Funktionszeiger in cs setzen [hier+hdlc_loop]? */ | ||
396 | /* FIXME Spart folgendes "if" und ermoeglicht andere Protokolle */ | ||
397 | if (inbuf->inputstate & INS_command) | ||
398 | procbytes = cmd_loop(c, src, numbytes, inbuf); | ||
399 | else if (inbuf->bcs->proto2 == ISDN_PROTO_L2_HDLC) | ||
400 | procbytes = hdlc_loop(c, src, numbytes, inbuf); | ||
401 | else | ||
402 | procbytes = iraw_loop(c, src, numbytes, inbuf); | ||
403 | |||
404 | src += procbytes; | ||
405 | numbytes -= procbytes; | ||
406 | } else { /* DLE-char */ | ||
407 | inbuf->inputstate &= ~INS_DLE_char; | ||
408 | switch (c) { | ||
409 | case 'X': /*begin of command*/ | ||
410 | #ifdef CONFIG_GIGASET_DEBUG | ||
411 | if (inbuf->inputstate & INS_command) | ||
412 | err("received <DLE> 'X' in command mode"); | ||
413 | #endif | ||
414 | inbuf->inputstate |= | ||
415 | INS_command | INS_DLE_command; | ||
416 | break; | ||
417 | case '.': /*end of command*/ | ||
418 | #ifdef CONFIG_GIGASET_DEBUG | ||
419 | if (!(inbuf->inputstate & INS_command)) | ||
420 | err("received <DLE> '.' in hdlc mode"); | ||
421 | #endif | ||
422 | inbuf->inputstate &= cs->dle ? | ||
423 | ~(INS_DLE_command|INS_command) | ||
424 | : ~INS_DLE_command; | ||
425 | break; | ||
426 | //case DLE_FLAG: /*DLE_FLAG in data stream*/ /* schon oben behandelt! */ | ||
427 | default: | ||
428 | err("received 0x10 0x%02x!", (int) c); | ||
429 | /* FIXME: reset driver?? */ | ||
430 | } | ||
431 | } | ||
432 | } | ||
433 | nextbyte: | ||
434 | if (!numbytes) { | ||
435 | /* end of buffer, check for wrap */ | ||
436 | if (head > tail) { | ||
437 | head = 0; | ||
438 | src = inbuf->data; | ||
439 | numbytes = tail; | ||
440 | } else { | ||
441 | head = tail; | ||
442 | break; | ||
443 | } | ||
444 | } | ||
445 | } | ||
446 | |||
447 | dbg(DEBUG_INTR, "setting head to %u", head); | ||
448 | atomic_set(&inbuf->head, head); | ||
449 | } | ||
450 | } | ||
451 | |||
452 | |||
453 | /* == data output ========================================================== */ | ||
454 | |||
455 | /* Encoding of a PPP packet into an octet stuffed HDLC frame | ||
456 | * with FCS, opening and closing flags. | ||
457 | * parameters: | ||
458 | * skb skb containing original packet (freed upon return) | ||
459 | * head number of headroom bytes to allocate in result skb | ||
460 | * tail number of tailroom bytes to allocate in result skb | ||
461 | * Return value: | ||
462 | * pointer to newly allocated skb containing the result frame | ||
463 | */ | ||
464 | static struct sk_buff *HDLC_Encode(struct sk_buff *skb, int head, int tail) | ||
465 | { | ||
466 | struct sk_buff *hdlc_skb; | ||
467 | __u16 fcs; | ||
468 | unsigned char c; | ||
469 | unsigned char *cp; | ||
470 | int len; | ||
471 | unsigned int stuf_cnt; | ||
472 | |||
473 | stuf_cnt = 0; | ||
474 | fcs = PPP_INITFCS; | ||
475 | cp = skb->data; | ||
476 | len = skb->len; | ||
477 | while (len--) { | ||
478 | if (muststuff(*cp)) | ||
479 | stuf_cnt++; | ||
480 | fcs = crc_ccitt_byte(fcs, *cp++); | ||
481 | } | ||
482 | fcs ^= 0xffff; /* complement */ | ||
483 | |||
484 | /* size of new buffer: original size + number of stuffing bytes | ||
485 | * + 2 bytes FCS + 2 stuffing bytes for FCS (if needed) + 2 flag bytes | ||
486 | */ | ||
487 | hdlc_skb = dev_alloc_skb(skb->len + stuf_cnt + 6 + tail + head); | ||
488 | if (!hdlc_skb) { | ||
489 | err("unable to allocate memory for HDLC encoding!"); | ||
490 | dev_kfree_skb(skb); | ||
491 | return NULL; | ||
492 | } | ||
493 | skb_reserve(hdlc_skb, head); | ||
494 | |||
495 | /* Copy acknowledge request into new skb */ | ||
496 | memcpy(hdlc_skb->head, skb->head, 2); | ||
497 | |||
498 | /* Add flag sequence in front of everything.. */ | ||
499 | *(skb_put(hdlc_skb, 1)) = PPP_FLAG; | ||
500 | |||
501 | /* Perform byte stuffing while copying data. */ | ||
502 | while (skb->len--) { | ||
503 | if (muststuff(*skb->data)) { | ||
504 | *(skb_put(hdlc_skb, 1)) = PPP_ESCAPE; | ||
505 | *(skb_put(hdlc_skb, 1)) = (*skb->data++) ^ PPP_TRANS; | ||
506 | } else | ||
507 | *(skb_put(hdlc_skb, 1)) = *skb->data++; | ||
508 | } | ||
509 | |||
510 | /* Finally add FCS (byte stuffed) and flag sequence */ | ||
511 | c = (fcs & 0x00ff); /* least significant byte first */ | ||
512 | if (muststuff(c)) { | ||
513 | *(skb_put(hdlc_skb, 1)) = PPP_ESCAPE; | ||
514 | c ^= PPP_TRANS; | ||
515 | } | ||
516 | *(skb_put(hdlc_skb, 1)) = c; | ||
517 | |||
518 | c = ((fcs >> 8) & 0x00ff); | ||
519 | if (muststuff(c)) { | ||
520 | *(skb_put(hdlc_skb, 1)) = PPP_ESCAPE; | ||
521 | c ^= PPP_TRANS; | ||
522 | } | ||
523 | *(skb_put(hdlc_skb, 1)) = c; | ||
524 | |||
525 | *(skb_put(hdlc_skb, 1)) = PPP_FLAG; | ||
526 | |||
527 | dev_kfree_skb(skb); | ||
528 | return hdlc_skb; | ||
529 | } | ||
530 | |||
531 | /* Encoding of a raw packet into an octet stuffed bit inverted frame | ||
532 | * parameters: | ||
533 | * skb skb containing original packet (freed upon return) | ||
534 | * head number of headroom bytes to allocate in result skb | ||
535 | * tail number of tailroom bytes to allocate in result skb | ||
536 | * Return value: | ||
537 | * pointer to newly allocated skb containing the result frame | ||
538 | */ | ||
539 | static struct sk_buff *iraw_encode(struct sk_buff *skb, int head, int tail) | ||
540 | { | ||
541 | struct sk_buff *iraw_skb; | ||
542 | unsigned char c; | ||
543 | unsigned char *cp; | ||
544 | int len; | ||
545 | |||
546 | /* worst case: every byte must be stuffed */ | ||
547 | iraw_skb = dev_alloc_skb(2*skb->len + tail + head); | ||
548 | if (!iraw_skb) { | ||
549 | err("unable to allocate memory for HDLC encoding!"); | ||
550 | dev_kfree_skb(skb); | ||
551 | return NULL; | ||
552 | } | ||
553 | skb_reserve(iraw_skb, head); | ||
554 | |||
555 | cp = skb->data; | ||
556 | len = skb->len; | ||
557 | while (len--) { | ||
558 | c = gigaset_invtab[*cp++]; | ||
559 | if (c == DLE_FLAG) | ||
560 | *(skb_put(iraw_skb, 1)) = c; | ||
561 | *(skb_put(iraw_skb, 1)) = c; | ||
562 | } | ||
563 | dev_kfree_skb(skb); | ||
564 | return iraw_skb; | ||
565 | } | ||
566 | |||
567 | /* gigaset_send_skb | ||
568 | * called by common.c to queue an skb for sending | ||
569 | * and start transmission if necessary | ||
570 | * parameters: | ||
571 | * B Channel control structure | ||
572 | * skb | ||
573 | * Return value: | ||
574 | * number of bytes accepted for sending | ||
575 | * (skb->len if ok, 0 if out of buffer space) | ||
576 | * or error code (< 0, eg. -EINVAL) | ||
577 | */ | ||
578 | int gigaset_m10x_send_skb(struct bc_state *bcs, struct sk_buff *skb) | ||
579 | { | ||
580 | unsigned len; | ||
581 | |||
582 | IFNULLRETVAL(bcs, -EFAULT); | ||
583 | IFNULLRETVAL(skb, -EFAULT); | ||
584 | len = skb->len; | ||
585 | |||
586 | if (bcs->proto2 == ISDN_PROTO_L2_HDLC) | ||
587 | skb = HDLC_Encode(skb, HW_HDR_LEN, 0); | ||
588 | else | ||
589 | skb = iraw_encode(skb, HW_HDR_LEN, 0); | ||
590 | if (!skb) | ||
591 | return -ENOMEM; | ||
592 | |||
593 | skb_queue_tail(&bcs->squeue, skb); | ||
594 | tasklet_schedule(&bcs->cs->write_tasklet); | ||
595 | |||
596 | return len; /* ok so far */ | ||
597 | } | ||
diff --git a/drivers/isdn/gigaset/bas-gigaset.c b/drivers/isdn/gigaset/bas-gigaset.c new file mode 100644 index 000000000000..31f0f07832bc --- /dev/null +++ b/drivers/isdn/gigaset/bas-gigaset.c | |||
@@ -0,0 +1,2365 @@ | |||
1 | /* | ||
2 | * USB driver for Gigaset 307x base via direct USB connection. | ||
3 | * | ||
4 | * Copyright (c) 2001 by Hansjoerg Lipp <hjlipp@web.de>, | ||
5 | * Tilman Schmidt <tilman@imap.cc>, | ||
6 | * Stefan Eilers <Eilers.Stefan@epost.de>. | ||
7 | * | ||
8 | * Based on usb-gigaset.c. | ||
9 | * | ||
10 | * ===================================================================== | ||
11 | * This program is free software; you can redistribute it and/or | ||
12 | * modify it under the terms of the GNU General Public License as | ||
13 | * published by the Free Software Foundation; either version 2 of | ||
14 | * the License, or (at your option) any later version. | ||
15 | * ===================================================================== | ||
16 | * ToDo: ... | ||
17 | * ===================================================================== | ||
18 | * Version: $Id: bas-gigaset.c,v 1.52.4.19 2006/02/04 18:28:16 hjlipp Exp $ | ||
19 | * ===================================================================== | ||
20 | */ | ||
21 | |||
22 | #include "gigaset.h" | ||
23 | |||
24 | #include <linux/errno.h> | ||
25 | #include <linux/init.h> | ||
26 | #include <linux/slab.h> | ||
27 | #include <linux/timer.h> | ||
28 | #include <linux/usb.h> | ||
29 | #include <linux/module.h> | ||
30 | #include <linux/moduleparam.h> | ||
31 | |||
32 | /* Version Information */ | ||
33 | #define DRIVER_AUTHOR "Tilman Schmidt <tilman@imap.cc>, Hansjoerg Lipp <hjlipp@web.de>, Stefan Eilers <Eilers.Stefan@epost.de>" | ||
34 | #define DRIVER_DESC "USB Driver for Gigaset 307x" | ||
35 | |||
36 | |||
37 | /* Module parameters */ | ||
38 | |||
39 | static int startmode = SM_ISDN; | ||
40 | static int cidmode = 1; | ||
41 | |||
42 | module_param(startmode, int, S_IRUGO); | ||
43 | module_param(cidmode, int, S_IRUGO); | ||
44 | MODULE_PARM_DESC(startmode, "start in isdn4linux mode"); | ||
45 | MODULE_PARM_DESC(cidmode, "Call-ID mode"); | ||
46 | |||
47 | #define GIGASET_MINORS 1 | ||
48 | #define GIGASET_MINOR 16 | ||
49 | #define GIGASET_MODULENAME "bas_gigaset" | ||
50 | #define GIGASET_DEVFSNAME "gig/bas/" | ||
51 | #define GIGASET_DEVNAME "ttyGB" | ||
52 | |||
53 | #define IF_WRITEBUF 256 //FIXME | ||
54 | |||
55 | /* Values for the Gigaset 307x */ | ||
56 | #define USB_GIGA_VENDOR_ID 0x0681 | ||
57 | #define USB_GIGA_PRODUCT_ID 0x0001 | ||
58 | #define USB_4175_PRODUCT_ID 0x0002 | ||
59 | #define USB_SX303_PRODUCT_ID 0x0021 | ||
60 | #define USB_SX353_PRODUCT_ID 0x0022 | ||
61 | |||
62 | /* table of devices that work with this driver */ | ||
63 | static struct usb_device_id gigaset_table [] = { | ||
64 | { USB_DEVICE(USB_GIGA_VENDOR_ID, USB_GIGA_PRODUCT_ID) }, | ||
65 | { USB_DEVICE(USB_GIGA_VENDOR_ID, USB_4175_PRODUCT_ID) }, | ||
66 | { USB_DEVICE(USB_GIGA_VENDOR_ID, USB_SX303_PRODUCT_ID) }, | ||
67 | { USB_DEVICE(USB_GIGA_VENDOR_ID, USB_SX353_PRODUCT_ID) }, | ||
68 | { } /* Terminating entry */ | ||
69 | }; | ||
70 | |||
71 | MODULE_DEVICE_TABLE(usb, gigaset_table); | ||
72 | |||
73 | /* Get a minor range for your devices from the usb maintainer */ | ||
74 | #define USB_SKEL_MINOR_BASE 200 | ||
75 | |||
76 | /*======================= local function prototypes =============================*/ | ||
77 | |||
78 | /* This function is called if a new device is connected to the USB port. It | ||
79 | * checks whether this new device belongs to this driver. | ||
80 | */ | ||
81 | static int gigaset_probe(struct usb_interface *interface, | ||
82 | const struct usb_device_id *id); | ||
83 | |||
84 | /* Function will be called if the device is unplugged */ | ||
85 | static void gigaset_disconnect(struct usb_interface *interface); | ||
86 | |||
87 | |||
88 | /*==============================================================================*/ | ||
89 | |||
90 | struct bas_cardstate { | ||
91 | struct usb_device *udev; /* USB device pointer */ | ||
92 | struct usb_interface *interface; /* interface for this device */ | ||
93 | unsigned char minor; /* starting minor number */ | ||
94 | |||
95 | struct urb *urb_ctrl; /* control pipe default URB */ | ||
96 | struct usb_ctrlrequest dr_ctrl; | ||
97 | struct timer_list timer_ctrl; /* control request timeout */ | ||
98 | |||
99 | struct timer_list timer_atrdy; /* AT command ready timeout */ | ||
100 | struct urb *urb_cmd_out; /* for sending AT commands */ | ||
101 | struct usb_ctrlrequest dr_cmd_out; | ||
102 | int retry_cmd_out; | ||
103 | |||
104 | struct urb *urb_cmd_in; /* for receiving AT replies */ | ||
105 | struct usb_ctrlrequest dr_cmd_in; | ||
106 | struct timer_list timer_cmd_in; /* receive request timeout */ | ||
107 | unsigned char *rcvbuf; /* AT reply receive buffer */ | ||
108 | |||
109 | struct urb *urb_int_in; /* URB for interrupt pipe */ | ||
110 | unsigned char int_in_buf[3]; | ||
111 | |||
112 | spinlock_t lock; /* locks all following */ | ||
113 | atomic_t basstate; /* bitmap (BS_*) */ | ||
114 | int pending; /* uncompleted base request */ | ||
115 | int rcvbuf_size; /* size of AT receive buffer */ | ||
116 | /* 0: no receive in progress */ | ||
117 | int retry_cmd_in; /* receive req retry count */ | ||
118 | }; | ||
119 | |||
120 | /* status of direct USB connection to 307x base (bits in basstate) */ | ||
121 | #define BS_ATOPEN 0x001 | ||
122 | #define BS_B1OPEN 0x002 | ||
123 | #define BS_B2OPEN 0x004 | ||
124 | #define BS_ATREADY 0x008 | ||
125 | #define BS_INIT 0x010 | ||
126 | #define BS_ATTIMER 0x020 | ||
127 | |||
128 | |||
129 | static struct gigaset_driver *driver = NULL; | ||
130 | static struct cardstate *cardstate = NULL; | ||
131 | |||
132 | /* usb specific object needed to register this driver with the usb subsystem */ | ||
133 | static struct usb_driver gigaset_usb_driver = { | ||
134 | .name = GIGASET_MODULENAME, | ||
135 | .probe = gigaset_probe, | ||
136 | .disconnect = gigaset_disconnect, | ||
137 | .id_table = gigaset_table, | ||
138 | }; | ||
139 | |||
140 | /* get message text for USB status code | ||
141 | */ | ||
142 | static char *get_usb_statmsg(int status) | ||
143 | { | ||
144 | static char unkmsg[28]; | ||
145 | |||
146 | switch (status) { | ||
147 | case 0: | ||
148 | return "success"; | ||
149 | case -ENOENT: | ||
150 | return "canceled"; | ||
151 | case -ECONNRESET: | ||
152 | return "canceled (async)"; | ||
153 | case -EINPROGRESS: | ||
154 | return "pending"; | ||
155 | case -EPROTO: | ||
156 | return "bit stuffing or unknown USB error"; | ||
157 | case -EILSEQ: | ||
158 | return "Illegal byte sequence (CRC mismatch)"; | ||
159 | case -EPIPE: | ||
160 | return "babble detect or endpoint stalled"; | ||
161 | case -ENOSR: | ||
162 | return "buffer error"; | ||
163 | case -ETIMEDOUT: | ||
164 | return "timed out"; | ||
165 | case -ENODEV: | ||
166 | return "device not present"; | ||
167 | case -EREMOTEIO: | ||
168 | return "short packet detected"; | ||
169 | case -EXDEV: | ||
170 | return "partial isochronous transfer"; | ||
171 | case -EINVAL: | ||
172 | return "invalid argument"; | ||
173 | case -ENXIO: | ||
174 | return "URB already queued"; | ||
175 | case -EAGAIN: | ||
176 | return "isochronous start frame too early or too much scheduled"; | ||
177 | case -EFBIG: | ||
178 | return "too many isochronous frames requested"; | ||
179 | case -EMSGSIZE: | ||
180 | return "endpoint message size zero"; | ||
181 | case -ESHUTDOWN: | ||
182 | return "endpoint shutdown"; | ||
183 | case -EBUSY: | ||
184 | return "another request pending"; | ||
185 | default: | ||
186 | snprintf(unkmsg, sizeof(unkmsg), "unknown error %d", status); | ||
187 | return unkmsg; | ||
188 | } | ||
189 | } | ||
190 | |||
191 | /* usb_pipetype_str | ||
192 | * retrieve string representation of USB pipe type | ||
193 | */ | ||
194 | static inline char *usb_pipetype_str(int pipe) | ||
195 | { | ||
196 | if (usb_pipeisoc(pipe)) | ||
197 | return "Isoc"; | ||
198 | if (usb_pipeint(pipe)) | ||
199 | return "Int"; | ||
200 | if (usb_pipecontrol(pipe)) | ||
201 | return "Ctrl"; | ||
202 | if (usb_pipebulk(pipe)) | ||
203 | return "Bulk"; | ||
204 | return "?"; | ||
205 | } | ||
206 | |||
207 | /* dump_urb | ||
208 | * write content of URB to syslog for debugging | ||
209 | */ | ||
210 | static inline void dump_urb(enum debuglevel level, const char *tag, | ||
211 | struct urb *urb) | ||
212 | { | ||
213 | #ifdef CONFIG_GIGASET_DEBUG | ||
214 | int i; | ||
215 | IFNULLRET(tag); | ||
216 | dbg(level, "%s urb(0x%08lx)->{", tag, (unsigned long) urb); | ||
217 | if (urb) { | ||
218 | dbg(level, | ||
219 | " dev=0x%08lx, pipe=%s:EP%d/DV%d:%s, " | ||
220 | "status=%d, hcpriv=0x%08lx, transfer_flags=0x%x,", | ||
221 | (unsigned long) urb->dev, | ||
222 | usb_pipetype_str(urb->pipe), | ||
223 | usb_pipeendpoint(urb->pipe), usb_pipedevice(urb->pipe), | ||
224 | usb_pipein(urb->pipe) ? "in" : "out", | ||
225 | urb->status, (unsigned long) urb->hcpriv, | ||
226 | urb->transfer_flags); | ||
227 | dbg(level, | ||
228 | " transfer_buffer=0x%08lx[%d], actual_length=%d, " | ||
229 | "bandwidth=%d, setup_packet=0x%08lx,", | ||
230 | (unsigned long) urb->transfer_buffer, | ||
231 | urb->transfer_buffer_length, urb->actual_length, | ||
232 | urb->bandwidth, (unsigned long) urb->setup_packet); | ||
233 | dbg(level, | ||
234 | " start_frame=%d, number_of_packets=%d, interval=%d, " | ||
235 | "error_count=%d,", | ||
236 | urb->start_frame, urb->number_of_packets, urb->interval, | ||
237 | urb->error_count); | ||
238 | dbg(level, | ||
239 | " context=0x%08lx, complete=0x%08lx, iso_frame_desc[]={", | ||
240 | (unsigned long) urb->context, | ||
241 | (unsigned long) urb->complete); | ||
242 | for (i = 0; i < urb->number_of_packets; i++) { | ||
243 | struct usb_iso_packet_descriptor *pifd = &urb->iso_frame_desc[i]; | ||
244 | dbg(level, | ||
245 | " {offset=%u, length=%u, actual_length=%u, " | ||
246 | "status=%u}", | ||
247 | pifd->offset, pifd->length, pifd->actual_length, | ||
248 | pifd->status); | ||
249 | } | ||
250 | } | ||
251 | dbg(level, "}}"); | ||
252 | #endif | ||
253 | } | ||
254 | |||
255 | /* read/set modem control bits etc. (m10x only) */ | ||
256 | static int gigaset_set_modem_ctrl(struct cardstate *cs, unsigned old_state, | ||
257 | unsigned new_state) | ||
258 | { | ||
259 | return -EINVAL; | ||
260 | } | ||
261 | |||
262 | static int gigaset_baud_rate(struct cardstate *cs, unsigned cflag) | ||
263 | { | ||
264 | return -EINVAL; | ||
265 | } | ||
266 | |||
267 | static int gigaset_set_line_ctrl(struct cardstate *cs, unsigned cflag) | ||
268 | { | ||
269 | return -EINVAL; | ||
270 | } | ||
271 | |||
272 | /* error_hangup | ||
273 | * hang up any existing connection because of an unrecoverable error | ||
274 | * This function may be called from any context and takes care of scheduling | ||
275 | * the necessary actions for execution outside of interrupt context. | ||
276 | * argument: | ||
277 | * B channel control structure | ||
278 | */ | ||
279 | static inline void error_hangup(struct bc_state *bcs) | ||
280 | { | ||
281 | struct cardstate *cs = bcs->cs; | ||
282 | |||
283 | dbg(DEBUG_ANY, | ||
284 | "%s: scheduling HUP for channel %d", __func__, bcs->channel); | ||
285 | |||
286 | if (!gigaset_add_event(cs, &bcs->at_state, EV_HUP, NULL, 0, NULL)) { | ||
287 | //FIXME what should we do? | ||
288 | return; | ||
289 | } | ||
290 | |||
291 | gigaset_schedule_event(cs); | ||
292 | } | ||
293 | |||
294 | /* error_reset | ||
295 | * reset Gigaset device because of an unrecoverable error | ||
296 | * This function may be called from any context and takes care of scheduling | ||
297 | * the necessary actions for execution outside of interrupt context. | ||
298 | * argument: | ||
299 | * controller state structure | ||
300 | */ | ||
301 | static inline void error_reset(struct cardstate *cs) | ||
302 | { | ||
303 | //FIXME try to recover without bothering the user | ||
304 | err("unrecoverable error - please disconnect the Gigaset base to reset"); | ||
305 | } | ||
306 | |||
307 | /* check_pending | ||
308 | * check for completion of pending control request | ||
309 | * parameter: | ||
310 | * urb USB request block of completed request | ||
311 | * urb->context = hardware specific controller state structure | ||
312 | */ | ||
313 | static void check_pending(struct bas_cardstate *ucs) | ||
314 | { | ||
315 | unsigned long flags; | ||
316 | |||
317 | IFNULLRET(ucs); | ||
318 | IFNULLRET(cardstate); | ||
319 | |||
320 | spin_lock_irqsave(&ucs->lock, flags); | ||
321 | switch (ucs->pending) { | ||
322 | case 0: | ||
323 | break; | ||
324 | case HD_OPEN_ATCHANNEL: | ||
325 | if (atomic_read(&ucs->basstate) & BS_ATOPEN) | ||
326 | ucs->pending = 0; | ||
327 | break; | ||
328 | case HD_OPEN_B1CHANNEL: | ||
329 | if (atomic_read(&ucs->basstate) & BS_B1OPEN) | ||
330 | ucs->pending = 0; | ||
331 | break; | ||
332 | case HD_OPEN_B2CHANNEL: | ||
333 | if (atomic_read(&ucs->basstate) & BS_B2OPEN) | ||
334 | ucs->pending = 0; | ||
335 | break; | ||
336 | case HD_CLOSE_ATCHANNEL: | ||
337 | if (!(atomic_read(&ucs->basstate) & BS_ATOPEN)) | ||
338 | ucs->pending = 0; | ||
339 | //wake_up_interruptible(cs->initwait); | ||
340 | //FIXME need own wait queue? | ||
341 | break; | ||
342 | case HD_CLOSE_B1CHANNEL: | ||
343 | if (!(atomic_read(&ucs->basstate) & BS_B1OPEN)) | ||
344 | ucs->pending = 0; | ||
345 | break; | ||
346 | case HD_CLOSE_B2CHANNEL: | ||
347 | if (!(atomic_read(&ucs->basstate) & BS_B2OPEN)) | ||
348 | ucs->pending = 0; | ||
349 | break; | ||
350 | case HD_DEVICE_INIT_ACK: /* no reply expected */ | ||
351 | ucs->pending = 0; | ||
352 | break; | ||
353 | /* HD_READ_ATMESSAGE, HD_WRITE_ATMESSAGE, HD_RESET_INTERRUPTPIPE | ||
354 | * are handled separately and should never end up here | ||
355 | */ | ||
356 | default: | ||
357 | warn("unknown pending request 0x%02x cleared", ucs->pending); | ||
358 | ucs->pending = 0; | ||
359 | } | ||
360 | |||
361 | if (!ucs->pending) | ||
362 | del_timer(&ucs->timer_ctrl); | ||
363 | |||
364 | spin_unlock_irqrestore(&ucs->lock, flags); | ||
365 | } | ||
366 | |||
367 | /* cmd_in_timeout | ||
368 | * timeout routine for command input request | ||
369 | * argument: | ||
370 | * controller state structure | ||
371 | */ | ||
372 | static void cmd_in_timeout(unsigned long data) | ||
373 | { | ||
374 | struct cardstate *cs = (struct cardstate *) data; | ||
375 | struct bas_cardstate *ucs; | ||
376 | unsigned long flags; | ||
377 | |||
378 | IFNULLRET(cs); | ||
379 | ucs = cs->hw.bas; | ||
380 | IFNULLRET(ucs); | ||
381 | |||
382 | spin_lock_irqsave(&cs->lock, flags); | ||
383 | if (!atomic_read(&cs->connected)) { | ||
384 | dbg(DEBUG_USBREQ, "%s: disconnected", __func__); | ||
385 | spin_unlock_irqrestore(&cs->lock, flags); | ||
386 | return; | ||
387 | } | ||
388 | if (!ucs->rcvbuf_size) { | ||
389 | dbg(DEBUG_USBREQ, "%s: no receive in progress", __func__); | ||
390 | spin_unlock_irqrestore(&cs->lock, flags); | ||
391 | return; | ||
392 | } | ||
393 | spin_unlock_irqrestore(&cs->lock, flags); | ||
394 | |||
395 | err("timeout reading AT response"); | ||
396 | error_reset(cs); //FIXME retry? | ||
397 | } | ||
398 | |||
399 | |||
400 | static void read_ctrl_callback(struct urb *urb, struct pt_regs *regs); | ||
401 | |||
402 | /* atread_submit | ||
403 | * submit an HD_READ_ATMESSAGE command URB | ||
404 | * parameters: | ||
405 | * cs controller state structure | ||
406 | * timeout timeout in 1/10 sec., 0: none | ||
407 | * return value: | ||
408 | * 0 on success | ||
409 | * -EINVAL if a NULL pointer is encountered somewhere | ||
410 | * -EBUSY if another request is pending | ||
411 | * any URB submission error code | ||
412 | */ | ||
413 | static int atread_submit(struct cardstate *cs, int timeout) | ||
414 | { | ||
415 | struct bas_cardstate *ucs; | ||
416 | int ret; | ||
417 | |||
418 | IFNULLRETVAL(cs, -EINVAL); | ||
419 | ucs = cs->hw.bas; | ||
420 | IFNULLRETVAL(ucs, -EINVAL); | ||
421 | IFNULLRETVAL(ucs->urb_cmd_in, -EINVAL); | ||
422 | |||
423 | dbg(DEBUG_USBREQ, "-------> HD_READ_ATMESSAGE (%d)", ucs->rcvbuf_size); | ||
424 | |||
425 | if (ucs->urb_cmd_in->status == -EINPROGRESS) { | ||
426 | err("could not submit HD_READ_ATMESSAGE: URB busy"); | ||
427 | return -EBUSY; | ||
428 | } | ||
429 | |||
430 | ucs->dr_cmd_in.bRequestType = IN_VENDOR_REQ; | ||
431 | ucs->dr_cmd_in.bRequest = HD_READ_ATMESSAGE; | ||
432 | ucs->dr_cmd_in.wValue = 0; | ||
433 | ucs->dr_cmd_in.wIndex = 0; | ||
434 | ucs->dr_cmd_in.wLength = cpu_to_le16(ucs->rcvbuf_size); | ||
435 | usb_fill_control_urb(ucs->urb_cmd_in, ucs->udev, | ||
436 | usb_rcvctrlpipe(ucs->udev, 0), | ||
437 | (unsigned char*) & ucs->dr_cmd_in, | ||
438 | ucs->rcvbuf, ucs->rcvbuf_size, | ||
439 | read_ctrl_callback, cs->inbuf); | ||
440 | |||
441 | if ((ret = usb_submit_urb(ucs->urb_cmd_in, SLAB_ATOMIC)) != 0) { | ||
442 | err("could not submit HD_READ_ATMESSAGE: %s", | ||
443 | get_usb_statmsg(ret)); | ||
444 | return ret; | ||
445 | } | ||
446 | |||
447 | if (timeout > 0) { | ||
448 | dbg(DEBUG_USBREQ, "setting timeout of %d/10 secs", timeout); | ||
449 | ucs->timer_cmd_in.expires = jiffies + timeout * HZ / 10; | ||
450 | ucs->timer_cmd_in.data = (unsigned long) cs; | ||
451 | ucs->timer_cmd_in.function = cmd_in_timeout; | ||
452 | add_timer(&ucs->timer_cmd_in); | ||
453 | } | ||
454 | return 0; | ||
455 | } | ||
456 | |||
457 | static void stopurbs(struct bas_bc_state *); | ||
458 | static int start_cbsend(struct cardstate *); | ||
459 | |||
460 | /* set/clear bits in base connection state | ||
461 | */ | ||
462 | inline static void update_basstate(struct bas_cardstate *ucs, | ||
463 | int set, int clear) | ||
464 | { | ||
465 | unsigned long flags; | ||
466 | int state; | ||
467 | |||
468 | spin_lock_irqsave(&ucs->lock, flags); | ||
469 | state = atomic_read(&ucs->basstate); | ||
470 | state &= ~clear; | ||
471 | state |= set; | ||
472 | atomic_set(&ucs->basstate, state); | ||
473 | spin_unlock_irqrestore(&ucs->lock, flags); | ||
474 | } | ||
475 | |||
476 | |||
477 | /* read_int_callback | ||
478 | * USB completion handler for interrupt pipe input | ||
479 | * called by the USB subsystem in interrupt context | ||
480 | * parameter: | ||
481 | * urb USB request block | ||
482 | * urb->context = controller state structure | ||
483 | */ | ||
484 | static void read_int_callback(struct urb *urb, struct pt_regs *regs) | ||
485 | { | ||
486 | struct cardstate *cs; | ||
487 | struct bas_cardstate *ucs; | ||
488 | struct bc_state *bcs; | ||
489 | unsigned long flags; | ||
490 | int status; | ||
491 | unsigned l; | ||
492 | int channel; | ||
493 | |||
494 | IFNULLRET(urb); | ||
495 | cs = (struct cardstate *) urb->context; | ||
496 | IFNULLRET(cs); | ||
497 | ucs = cs->hw.bas; | ||
498 | IFNULLRET(ucs); | ||
499 | |||
500 | if (unlikely(!atomic_read(&cs->connected))) { | ||
501 | warn("%s: disconnected", __func__); | ||
502 | return; | ||
503 | } | ||
504 | |||
505 | switch (urb->status) { | ||
506 | case 0: /* success */ | ||
507 | break; | ||
508 | case -ENOENT: /* canceled */ | ||
509 | case -ECONNRESET: /* canceled (async) */ | ||
510 | case -EINPROGRESS: /* pending */ | ||
511 | /* ignore silently */ | ||
512 | dbg(DEBUG_USBREQ, | ||
513 | "%s: %s", __func__, get_usb_statmsg(urb->status)); | ||
514 | return; | ||
515 | default: /* severe trouble */ | ||
516 | warn("interrupt read: %s", get_usb_statmsg(urb->status)); | ||
517 | //FIXME corrective action? resubmission always ok? | ||
518 | goto resubmit; | ||
519 | } | ||
520 | |||
521 | l = (unsigned) ucs->int_in_buf[1] + | ||
522 | (((unsigned) ucs->int_in_buf[2]) << 8); | ||
523 | |||
524 | dbg(DEBUG_USBREQ, | ||
525 | "<-------%d: 0x%02x (%u [0x%02x 0x%02x])", urb->actual_length, | ||
526 | (int)ucs->int_in_buf[0], l, | ||
527 | (int)ucs->int_in_buf[1], (int)ucs->int_in_buf[2]); | ||
528 | |||
529 | channel = 0; | ||
530 | |||
531 | switch (ucs->int_in_buf[0]) { | ||
532 | case HD_DEVICE_INIT_OK: | ||
533 | update_basstate(ucs, BS_INIT, 0); | ||
534 | break; | ||
535 | |||
536 | case HD_READY_SEND_ATDATA: | ||
537 | del_timer(&ucs->timer_atrdy); | ||
538 | update_basstate(ucs, BS_ATREADY, BS_ATTIMER); | ||
539 | start_cbsend(cs); | ||
540 | break; | ||
541 | |||
542 | case HD_OPEN_B2CHANNEL_ACK: | ||
543 | ++channel; | ||
544 | case HD_OPEN_B1CHANNEL_ACK: | ||
545 | bcs = cs->bcs + channel; | ||
546 | update_basstate(ucs, BS_B1OPEN << channel, 0); | ||
547 | gigaset_bchannel_up(bcs); | ||
548 | break; | ||
549 | |||
550 | case HD_OPEN_ATCHANNEL_ACK: | ||
551 | update_basstate(ucs, BS_ATOPEN, 0); | ||
552 | start_cbsend(cs); | ||
553 | break; | ||
554 | |||
555 | case HD_CLOSE_B2CHANNEL_ACK: | ||
556 | ++channel; | ||
557 | case HD_CLOSE_B1CHANNEL_ACK: | ||
558 | bcs = cs->bcs + channel; | ||
559 | update_basstate(ucs, 0, BS_B1OPEN << channel); | ||
560 | stopurbs(bcs->hw.bas); | ||
561 | gigaset_bchannel_down(bcs); | ||
562 | break; | ||
563 | |||
564 | case HD_CLOSE_ATCHANNEL_ACK: | ||
565 | update_basstate(ucs, 0, BS_ATOPEN); | ||
566 | break; | ||
567 | |||
568 | case HD_B2_FLOW_CONTROL: | ||
569 | ++channel; | ||
570 | case HD_B1_FLOW_CONTROL: | ||
571 | bcs = cs->bcs + channel; | ||
572 | atomic_add((l - BAS_NORMFRAME) * BAS_CORRFRAMES, | ||
573 | &bcs->hw.bas->corrbytes); | ||
574 | dbg(DEBUG_ISO, | ||
575 | "Flow control (channel %d, sub %d): 0x%02x => %d", | ||
576 | channel, bcs->hw.bas->numsub, l, | ||
577 | atomic_read(&bcs->hw.bas->corrbytes)); | ||
578 | break; | ||
579 | |||
580 | case HD_RECEIVEATDATA_ACK: /* AT response ready to be received */ | ||
581 | if (!l) { | ||
582 | warn("HD_RECEIVEATDATA_ACK with length 0 ignored"); | ||
583 | break; | ||
584 | } | ||
585 | spin_lock_irqsave(&cs->lock, flags); | ||
586 | if (ucs->rcvbuf_size) { | ||
587 | spin_unlock_irqrestore(&cs->lock, flags); | ||
588 | err("receive AT data overrun, %d bytes lost", l); | ||
589 | error_reset(cs); //FIXME reschedule | ||
590 | break; | ||
591 | } | ||
592 | if ((ucs->rcvbuf = kmalloc(l, GFP_ATOMIC)) == NULL) { | ||
593 | spin_unlock_irqrestore(&cs->lock, flags); | ||
594 | err("%s: out of memory, %d bytes lost", __func__, l); | ||
595 | error_reset(cs); //FIXME reschedule | ||
596 | break; | ||
597 | } | ||
598 | ucs->rcvbuf_size = l; | ||
599 | ucs->retry_cmd_in = 0; | ||
600 | if ((status = atread_submit(cs, BAS_TIMEOUT)) < 0) { | ||
601 | kfree(ucs->rcvbuf); | ||
602 | ucs->rcvbuf = NULL; | ||
603 | ucs->rcvbuf_size = 0; | ||
604 | error_reset(cs); //FIXME reschedule | ||
605 | } | ||
606 | spin_unlock_irqrestore(&cs->lock, flags); | ||
607 | break; | ||
608 | |||
609 | case HD_RESET_INTERRUPT_PIPE_ACK: | ||
610 | dbg(DEBUG_USBREQ, "HD_RESET_INTERRUPT_PIPE_ACK"); | ||
611 | break; | ||
612 | |||
613 | case HD_SUSPEND_END: | ||
614 | dbg(DEBUG_USBREQ, "HD_SUSPEND_END"); | ||
615 | break; | ||
616 | |||
617 | default: | ||
618 | warn("unknown Gigaset signal 0x%02x (%u) ignored", | ||
619 | (int) ucs->int_in_buf[0], l); | ||
620 | } | ||
621 | |||
622 | check_pending(ucs); | ||
623 | |||
624 | resubmit: | ||
625 | status = usb_submit_urb(urb, SLAB_ATOMIC); | ||
626 | if (unlikely(status)) { | ||
627 | err("could not resubmit interrupt URB: %s", | ||
628 | get_usb_statmsg(status)); | ||
629 | error_reset(cs); | ||
630 | } | ||
631 | } | ||
632 | |||
633 | /* read_ctrl_callback | ||
634 | * USB completion handler for control pipe input | ||
635 | * called by the USB subsystem in interrupt context | ||
636 | * parameter: | ||
637 | * urb USB request block | ||
638 | * urb->context = inbuf structure for controller state | ||
639 | */ | ||
640 | static void read_ctrl_callback(struct urb *urb, struct pt_regs *regs) | ||
641 | { | ||
642 | struct cardstate *cs; | ||
643 | struct bas_cardstate *ucs; | ||
644 | unsigned numbytes; | ||
645 | unsigned long flags; | ||
646 | struct inbuf_t *inbuf; | ||
647 | int have_data = 0; | ||
648 | |||
649 | IFNULLRET(urb); | ||
650 | inbuf = (struct inbuf_t *) urb->context; | ||
651 | IFNULLRET(inbuf); | ||
652 | cs = inbuf->cs; | ||
653 | IFNULLRET(cs); | ||
654 | ucs = cs->hw.bas; | ||
655 | IFNULLRET(ucs); | ||
656 | |||
657 | spin_lock_irqsave(&cs->lock, flags); | ||
658 | if (!atomic_read(&cs->connected)) { | ||
659 | warn("%s: disconnected", __func__); | ||
660 | spin_unlock_irqrestore(&cs->lock, flags); | ||
661 | return; | ||
662 | } | ||
663 | |||
664 | if (!ucs->rcvbuf_size) { | ||
665 | warn("%s: no receive in progress", __func__); | ||
666 | spin_unlock_irqrestore(&cs->lock, flags); | ||
667 | return; | ||
668 | } | ||
669 | |||
670 | del_timer(&ucs->timer_cmd_in); | ||
671 | |||
672 | switch (urb->status) { | ||
673 | case 0: /* normal completion */ | ||
674 | numbytes = urb->actual_length; | ||
675 | if (unlikely(numbytes == 0)) { | ||
676 | warn("control read: empty block received"); | ||
677 | goto retry; | ||
678 | } | ||
679 | if (unlikely(numbytes != ucs->rcvbuf_size)) { | ||
680 | warn("control read: received %d chars, expected %d", | ||
681 | numbytes, ucs->rcvbuf_size); | ||
682 | if (numbytes > ucs->rcvbuf_size) | ||
683 | numbytes = ucs->rcvbuf_size; | ||
684 | } | ||
685 | |||
686 | /* copy received bytes to inbuf */ | ||
687 | have_data = gigaset_fill_inbuf(inbuf, ucs->rcvbuf, numbytes); | ||
688 | |||
689 | if (unlikely(numbytes < ucs->rcvbuf_size)) { | ||
690 | /* incomplete - resubmit for remaining bytes */ | ||
691 | ucs->rcvbuf_size -= numbytes; | ||
692 | ucs->retry_cmd_in = 0; | ||
693 | goto retry; | ||
694 | } | ||
695 | break; | ||
696 | |||
697 | case -ENOENT: /* canceled */ | ||
698 | case -ECONNRESET: /* canceled (async) */ | ||
699 | case -EINPROGRESS: /* pending */ | ||
700 | /* no action necessary */ | ||
701 | dbg(DEBUG_USBREQ, | ||
702 | "%s: %s", __func__, get_usb_statmsg(urb->status)); | ||
703 | break; | ||
704 | |||
705 | default: /* severe trouble */ | ||
706 | warn("control read: %s", get_usb_statmsg(urb->status)); | ||
707 | retry: | ||
708 | if (ucs->retry_cmd_in++ < BAS_RETRY) { | ||
709 | notice("control read: retry %d", ucs->retry_cmd_in); | ||
710 | if (atread_submit(cs, BAS_TIMEOUT) >= 0) { | ||
711 | /* resubmitted - bypass regular exit block */ | ||
712 | spin_unlock_irqrestore(&cs->lock, flags); | ||
713 | return; | ||
714 | } | ||
715 | } else { | ||
716 | err("control read: giving up after %d tries", | ||
717 | ucs->retry_cmd_in); | ||
718 | } | ||
719 | error_reset(cs); | ||
720 | } | ||
721 | |||
722 | kfree(ucs->rcvbuf); | ||
723 | ucs->rcvbuf = NULL; | ||
724 | ucs->rcvbuf_size = 0; | ||
725 | spin_unlock_irqrestore(&cs->lock, flags); | ||
726 | if (have_data) { | ||
727 | dbg(DEBUG_INTR, "%s-->BH", __func__); | ||
728 | gigaset_schedule_event(cs); | ||
729 | } | ||
730 | } | ||
731 | |||
732 | /* read_iso_callback | ||
733 | * USB completion handler for B channel isochronous input | ||
734 | * called by the USB subsystem in interrupt context | ||
735 | * parameter: | ||
736 | * urb USB request block of completed request | ||
737 | * urb->context = bc_state structure | ||
738 | */ | ||
739 | static void read_iso_callback(struct urb *urb, struct pt_regs *regs) | ||
740 | { | ||
741 | struct bc_state *bcs; | ||
742 | struct bas_bc_state *ubc; | ||
743 | unsigned long flags; | ||
744 | int i, rc; | ||
745 | |||
746 | IFNULLRET(urb); | ||
747 | IFNULLRET(urb->context); | ||
748 | IFNULLRET(cardstate); | ||
749 | |||
750 | /* status codes not worth bothering the tasklet with */ | ||
751 | if (unlikely(urb->status == -ENOENT || urb->status == -ECONNRESET || | ||
752 | urb->status == -EINPROGRESS)) { | ||
753 | dbg(DEBUG_ISO, | ||
754 | "%s: %s", __func__, get_usb_statmsg(urb->status)); | ||
755 | return; | ||
756 | } | ||
757 | |||
758 | bcs = (struct bc_state *) urb->context; | ||
759 | ubc = bcs->hw.bas; | ||
760 | IFNULLRET(ubc); | ||
761 | |||
762 | spin_lock_irqsave(&ubc->isoinlock, flags); | ||
763 | if (likely(ubc->isoindone == NULL)) { | ||
764 | /* pass URB to tasklet */ | ||
765 | ubc->isoindone = urb; | ||
766 | tasklet_schedule(&ubc->rcvd_tasklet); | ||
767 | } else { | ||
768 | /* tasklet still busy, drop data and resubmit URB */ | ||
769 | ubc->loststatus = urb->status; | ||
770 | for (i = 0; i < BAS_NUMFRAMES; i++) { | ||
771 | ubc->isoinlost += urb->iso_frame_desc[i].actual_length; | ||
772 | if (unlikely(urb->iso_frame_desc[i].status != 0 && | ||
773 | urb->iso_frame_desc[i].status != -EINPROGRESS)) { | ||
774 | ubc->loststatus = urb->iso_frame_desc[i].status; | ||
775 | } | ||
776 | urb->iso_frame_desc[i].status = 0; | ||
777 | urb->iso_frame_desc[i].actual_length = 0; | ||
778 | } | ||
779 | if (likely(atomic_read(&ubc->running))) { | ||
780 | urb->dev = bcs->cs->hw.bas->udev; /* clobbered by USB subsystem */ | ||
781 | urb->transfer_flags = URB_ISO_ASAP; | ||
782 | urb->number_of_packets = BAS_NUMFRAMES; | ||
783 | dbg(DEBUG_ISO, "%s: isoc read overrun/resubmit", __func__); | ||
784 | rc = usb_submit_urb(urb, SLAB_ATOMIC); | ||
785 | if (unlikely(rc != 0)) { | ||
786 | err("could not resubmit isochronous read URB: %s", | ||
787 | get_usb_statmsg(rc)); | ||
788 | dump_urb(DEBUG_ISO, "isoc read", urb); | ||
789 | error_hangup(bcs); | ||
790 | } | ||
791 | } | ||
792 | } | ||
793 | spin_unlock_irqrestore(&ubc->isoinlock, flags); | ||
794 | } | ||
795 | |||
796 | /* write_iso_callback | ||
797 | * USB completion handler for B channel isochronous output | ||
798 | * called by the USB subsystem in interrupt context | ||
799 | * parameter: | ||
800 | * urb USB request block of completed request | ||
801 | * urb->context = isow_urbctx_t structure | ||
802 | */ | ||
803 | static void write_iso_callback(struct urb *urb, struct pt_regs *regs) | ||
804 | { | ||
805 | struct isow_urbctx_t *ucx; | ||
806 | struct bas_bc_state *ubc; | ||
807 | unsigned long flags; | ||
808 | |||
809 | IFNULLRET(urb); | ||
810 | IFNULLRET(urb->context); | ||
811 | IFNULLRET(cardstate); | ||
812 | |||
813 | /* status codes not worth bothering the tasklet with */ | ||
814 | if (unlikely(urb->status == -ENOENT || urb->status == -ECONNRESET || | ||
815 | urb->status == -EINPROGRESS)) { | ||
816 | dbg(DEBUG_ISO, | ||
817 | "%s: %s", __func__, get_usb_statmsg(urb->status)); | ||
818 | return; | ||
819 | } | ||
820 | |||
821 | /* pass URB context to tasklet */ | ||
822 | ucx = (struct isow_urbctx_t *) urb->context; | ||
823 | IFNULLRET(ucx->bcs); | ||
824 | ubc = ucx->bcs->hw.bas; | ||
825 | IFNULLRET(ubc); | ||
826 | |||
827 | spin_lock_irqsave(&ubc->isooutlock, flags); | ||
828 | ubc->isooutovfl = ubc->isooutdone; | ||
829 | ubc->isooutdone = ucx; | ||
830 | spin_unlock_irqrestore(&ubc->isooutlock, flags); | ||
831 | tasklet_schedule(&ubc->sent_tasklet); | ||
832 | } | ||
833 | |||
834 | /* starturbs | ||
835 | * prepare and submit USB request blocks for isochronous input and output | ||
836 | * argument: | ||
837 | * B channel control structure | ||
838 | * return value: | ||
839 | * 0 on success | ||
840 | * < 0 on error (no URBs submitted) | ||
841 | */ | ||
842 | static int starturbs(struct bc_state *bcs) | ||
843 | { | ||
844 | struct urb *urb; | ||
845 | struct bas_bc_state *ubc; | ||
846 | int j, k; | ||
847 | int rc; | ||
848 | |||
849 | IFNULLRETVAL(bcs, -EFAULT); | ||
850 | ubc = bcs->hw.bas; | ||
851 | IFNULLRETVAL(ubc, -EFAULT); | ||
852 | |||
853 | /* initialize L2 reception */ | ||
854 | if (bcs->proto2 == ISDN_PROTO_L2_HDLC) | ||
855 | bcs->inputstate |= INS_flag_hunt; | ||
856 | |||
857 | /* submit all isochronous input URBs */ | ||
858 | atomic_set(&ubc->running, 1); | ||
859 | for (k = 0; k < BAS_INURBS; k++) { | ||
860 | urb = ubc->isoinurbs[k]; | ||
861 | if (!urb) { | ||
862 | err("isoinurbs[%d]==NULL", k); | ||
863 | rc = -EFAULT; | ||
864 | goto error; | ||
865 | } | ||
866 | |||
867 | urb->dev = bcs->cs->hw.bas->udev; | ||
868 | urb->pipe = usb_rcvisocpipe(urb->dev, 3 + 2 * bcs->channel); | ||
869 | urb->transfer_flags = URB_ISO_ASAP; | ||
870 | urb->transfer_buffer = ubc->isoinbuf + k * BAS_INBUFSIZE; | ||
871 | urb->transfer_buffer_length = BAS_INBUFSIZE; | ||
872 | urb->number_of_packets = BAS_NUMFRAMES; | ||
873 | urb->interval = BAS_FRAMETIME; | ||
874 | urb->complete = read_iso_callback; | ||
875 | urb->context = bcs; | ||
876 | for (j = 0; j < BAS_NUMFRAMES; j++) { | ||
877 | urb->iso_frame_desc[j].offset = j * BAS_MAXFRAME; | ||
878 | urb->iso_frame_desc[j].length = BAS_MAXFRAME; | ||
879 | urb->iso_frame_desc[j].status = 0; | ||
880 | urb->iso_frame_desc[j].actual_length = 0; | ||
881 | } | ||
882 | |||
883 | dump_urb(DEBUG_ISO, "Initial isoc read", urb); | ||
884 | if ((rc = usb_submit_urb(urb, SLAB_ATOMIC)) != 0) { | ||
885 | err("could not submit isochronous read URB %d: %s", | ||
886 | k, get_usb_statmsg(rc)); | ||
887 | goto error; | ||
888 | } | ||
889 | } | ||
890 | |||
891 | /* initialize L2 transmission */ | ||
892 | gigaset_isowbuf_init(ubc->isooutbuf, PPP_FLAG); | ||
893 | |||
894 | /* set up isochronous output URBs for flag idling */ | ||
895 | for (k = 0; k < BAS_OUTURBS; ++k) { | ||
896 | urb = ubc->isoouturbs[k].urb; | ||
897 | if (!urb) { | ||
898 | err("isoouturbs[%d].urb==NULL", k); | ||
899 | rc = -EFAULT; | ||
900 | goto error; | ||
901 | } | ||
902 | urb->dev = bcs->cs->hw.bas->udev; | ||
903 | urb->pipe = usb_sndisocpipe(urb->dev, 4 + 2 * bcs->channel); | ||
904 | urb->transfer_flags = URB_ISO_ASAP; | ||
905 | urb->transfer_buffer = ubc->isooutbuf->data; | ||
906 | urb->transfer_buffer_length = sizeof(ubc->isooutbuf->data); | ||
907 | urb->number_of_packets = BAS_NUMFRAMES; | ||
908 | urb->interval = BAS_FRAMETIME; | ||
909 | urb->complete = write_iso_callback; | ||
910 | urb->context = &ubc->isoouturbs[k]; | ||
911 | for (j = 0; j < BAS_NUMFRAMES; ++j) { | ||
912 | urb->iso_frame_desc[j].offset = BAS_OUTBUFSIZE; | ||
913 | urb->iso_frame_desc[j].length = BAS_NORMFRAME; | ||
914 | urb->iso_frame_desc[j].status = 0; | ||
915 | urb->iso_frame_desc[j].actual_length = 0; | ||
916 | } | ||
917 | ubc->isoouturbs[k].limit = -1; | ||
918 | } | ||
919 | |||
920 | /* submit two URBs, keep third one */ | ||
921 | for (k = 0; k < 2; ++k) { | ||
922 | dump_urb(DEBUG_ISO, "Initial isoc write", urb); | ||
923 | rc = usb_submit_urb(ubc->isoouturbs[k].urb, SLAB_ATOMIC); | ||
924 | if (rc != 0) { | ||
925 | err("could not submit isochronous write URB %d: %s", | ||
926 | k, get_usb_statmsg(rc)); | ||
927 | goto error; | ||
928 | } | ||
929 | } | ||
930 | dump_urb(DEBUG_ISO, "Initial isoc write (free)", urb); | ||
931 | ubc->isooutfree = &ubc->isoouturbs[2]; | ||
932 | ubc->isooutdone = ubc->isooutovfl = NULL; | ||
933 | return 0; | ||
934 | error: | ||
935 | stopurbs(ubc); | ||
936 | return rc; | ||
937 | } | ||
938 | |||
939 | /* stopurbs | ||
940 | * cancel the USB request blocks for isochronous input and output | ||
941 | * errors are silently ignored | ||
942 | * argument: | ||
943 | * B channel control structure | ||
944 | */ | ||
945 | static void stopurbs(struct bas_bc_state *ubc) | ||
946 | { | ||
947 | int k, rc; | ||
948 | |||
949 | IFNULLRET(ubc); | ||
950 | |||
951 | atomic_set(&ubc->running, 0); | ||
952 | |||
953 | for (k = 0; k < BAS_INURBS; ++k) { | ||
954 | rc = usb_unlink_urb(ubc->isoinurbs[k]); | ||
955 | dbg(DEBUG_ISO, "%s: isoc input URB %d unlinked, result = %d", | ||
956 | __func__, k, rc); | ||
957 | } | ||
958 | |||
959 | for (k = 0; k < BAS_OUTURBS; ++k) { | ||
960 | rc = usb_unlink_urb(ubc->isoouturbs[k].urb); | ||
961 | dbg(DEBUG_ISO, "%s: isoc output URB %d unlinked, result = %d", | ||
962 | __func__, k, rc); | ||
963 | } | ||
964 | } | ||
965 | |||
966 | /* Isochronous Write - Bottom Half */ | ||
967 | /* =============================== */ | ||
968 | |||
969 | /* submit_iso_write_urb | ||
970 | * fill and submit the next isochronous write URB | ||
971 | * parameters: | ||
972 | * bcs B channel state structure | ||
973 | * return value: | ||
974 | * number of frames submitted in URB | ||
975 | * 0 if URB not submitted because no data available (isooutbuf busy) | ||
976 | * error code < 0 on error | ||
977 | */ | ||
978 | static int submit_iso_write_urb(struct isow_urbctx_t *ucx) | ||
979 | { | ||
980 | struct urb *urb; | ||
981 | struct bas_bc_state *ubc; | ||
982 | struct usb_iso_packet_descriptor *ifd; | ||
983 | int corrbytes, nframe, rc; | ||
984 | |||
985 | IFNULLRETVAL(ucx, -EFAULT); | ||
986 | urb = ucx->urb; | ||
987 | IFNULLRETVAL(urb, -EFAULT); | ||
988 | IFNULLRETVAL(ucx->bcs, -EFAULT); | ||
989 | ubc = ucx->bcs->hw.bas; | ||
990 | IFNULLRETVAL(ubc, -EFAULT); | ||
991 | |||
992 | urb->dev = ucx->bcs->cs->hw.bas->udev; /* clobbered by USB subsystem */ | ||
993 | urb->transfer_flags = URB_ISO_ASAP; | ||
994 | urb->transfer_buffer = ubc->isooutbuf->data; | ||
995 | urb->transfer_buffer_length = sizeof(ubc->isooutbuf->data); | ||
996 | |||
997 | for (nframe = 0; nframe < BAS_NUMFRAMES; nframe++) { | ||
998 | ifd = &urb->iso_frame_desc[nframe]; | ||
999 | |||
1000 | /* compute frame length according to flow control */ | ||
1001 | ifd->length = BAS_NORMFRAME; | ||
1002 | if ((corrbytes = atomic_read(&ubc->corrbytes)) != 0) { | ||
1003 | dbg(DEBUG_ISO, "%s: corrbytes=%d", __func__, corrbytes); | ||
1004 | if (corrbytes > BAS_HIGHFRAME - BAS_NORMFRAME) | ||
1005 | corrbytes = BAS_HIGHFRAME - BAS_NORMFRAME; | ||
1006 | else if (corrbytes < BAS_LOWFRAME - BAS_NORMFRAME) | ||
1007 | corrbytes = BAS_LOWFRAME - BAS_NORMFRAME; | ||
1008 | ifd->length += corrbytes; | ||
1009 | atomic_add(-corrbytes, &ubc->corrbytes); | ||
1010 | } | ||
1011 | //dbg(DEBUG_ISO, "%s: frame %d length=%d", __func__, nframe, ifd->length); | ||
1012 | |||
1013 | /* retrieve block of data to send */ | ||
1014 | ifd->offset = gigaset_isowbuf_getbytes(ubc->isooutbuf, ifd->length); | ||
1015 | if (ifd->offset < 0) { | ||
1016 | if (ifd->offset == -EBUSY) { | ||
1017 | dbg(DEBUG_ISO, "%s: buffer busy at frame %d", | ||
1018 | __func__, nframe); | ||
1019 | /* tasklet will be restarted from gigaset_send_skb() */ | ||
1020 | } else { | ||
1021 | err("%s: buffer error %d at frame %d", | ||
1022 | __func__, ifd->offset, nframe); | ||
1023 | return ifd->offset; | ||
1024 | } | ||
1025 | break; | ||
1026 | } | ||
1027 | ucx->limit = atomic_read(&ubc->isooutbuf->nextread); | ||
1028 | ifd->status = 0; | ||
1029 | ifd->actual_length = 0; | ||
1030 | } | ||
1031 | if ((urb->number_of_packets = nframe) > 0) { | ||
1032 | if ((rc = usb_submit_urb(urb, SLAB_ATOMIC)) != 0) { | ||
1033 | err("could not submit isochronous write URB: %s", | ||
1034 | get_usb_statmsg(rc)); | ||
1035 | dump_urb(DEBUG_ISO, "isoc write", urb); | ||
1036 | return rc; | ||
1037 | } | ||
1038 | ++ubc->numsub; | ||
1039 | } | ||
1040 | return nframe; | ||
1041 | } | ||
1042 | |||
1043 | /* write_iso_tasklet | ||
1044 | * tasklet scheduled when an isochronous output URB from the Gigaset device | ||
1045 | * has completed | ||
1046 | * parameter: | ||
1047 | * data B channel state structure | ||
1048 | */ | ||
1049 | static void write_iso_tasklet(unsigned long data) | ||
1050 | { | ||
1051 | struct bc_state *bcs; | ||
1052 | struct bas_bc_state *ubc; | ||
1053 | struct cardstate *cs; | ||
1054 | struct isow_urbctx_t *done, *next, *ovfl; | ||
1055 | struct urb *urb; | ||
1056 | struct usb_iso_packet_descriptor *ifd; | ||
1057 | int offset; | ||
1058 | unsigned long flags; | ||
1059 | int i; | ||
1060 | struct sk_buff *skb; | ||
1061 | int len; | ||
1062 | |||
1063 | bcs = (struct bc_state *) data; | ||
1064 | IFNULLRET(bcs); | ||
1065 | ubc = bcs->hw.bas; | ||
1066 | IFNULLRET(ubc); | ||
1067 | cs = bcs->cs; | ||
1068 | IFNULLRET(cs); | ||
1069 | |||
1070 | /* loop while completed URBs arrive in time */ | ||
1071 | for (;;) { | ||
1072 | if (unlikely(!atomic_read(&cs->connected))) { | ||
1073 | warn("%s: disconnected", __func__); | ||
1074 | return; | ||
1075 | } | ||
1076 | |||
1077 | if (unlikely(!(atomic_read(&ubc->running)))) { | ||
1078 | dbg(DEBUG_ISO, "%s: not running", __func__); | ||
1079 | return; | ||
1080 | } | ||
1081 | |||
1082 | /* retrieve completed URBs */ | ||
1083 | spin_lock_irqsave(&ubc->isooutlock, flags); | ||
1084 | done = ubc->isooutdone; | ||
1085 | ubc->isooutdone = NULL; | ||
1086 | ovfl = ubc->isooutovfl; | ||
1087 | ubc->isooutovfl = NULL; | ||
1088 | spin_unlock_irqrestore(&ubc->isooutlock, flags); | ||
1089 | if (ovfl) { | ||
1090 | err("isochronous write buffer underrun - buy a faster machine :-)"); | ||
1091 | error_hangup(bcs); | ||
1092 | break; | ||
1093 | } | ||
1094 | if (!done) | ||
1095 | break; | ||
1096 | |||
1097 | /* submit free URB if available */ | ||
1098 | spin_lock_irqsave(&ubc->isooutlock, flags); | ||
1099 | next = ubc->isooutfree; | ||
1100 | ubc->isooutfree = NULL; | ||
1101 | spin_unlock_irqrestore(&ubc->isooutlock, flags); | ||
1102 | if (next) { | ||
1103 | if (submit_iso_write_urb(next) <= 0) { | ||
1104 | /* could not submit URB, put it back */ | ||
1105 | spin_lock_irqsave(&ubc->isooutlock, flags); | ||
1106 | if (ubc->isooutfree == NULL) { | ||
1107 | ubc->isooutfree = next; | ||
1108 | next = NULL; | ||
1109 | } | ||
1110 | spin_unlock_irqrestore(&ubc->isooutlock, flags); | ||
1111 | if (next) { | ||
1112 | /* couldn't put it back */ | ||
1113 | err("losing isochronous write URB"); | ||
1114 | error_hangup(bcs); | ||
1115 | } | ||
1116 | } | ||
1117 | } | ||
1118 | |||
1119 | /* process completed URB */ | ||
1120 | urb = done->urb; | ||
1121 | switch (urb->status) { | ||
1122 | case 0: /* normal completion */ | ||
1123 | break; | ||
1124 | case -EXDEV: /* inspect individual frames */ | ||
1125 | /* assumptions (for lack of documentation): | ||
1126 | * - actual_length bytes of the frame in error are successfully sent | ||
1127 | * - all following frames are not sent at all | ||
1128 | */ | ||
1129 | dbg(DEBUG_ISO, "%s: URB partially completed", __func__); | ||
1130 | offset = done->limit; /* just in case */ | ||
1131 | for (i = 0; i < BAS_NUMFRAMES; i++) { | ||
1132 | ifd = &urb->iso_frame_desc[i]; | ||
1133 | if (ifd->status || | ||
1134 | ifd->actual_length != ifd->length) { | ||
1135 | warn("isochronous write: frame %d: %s, " | ||
1136 | "only %d of %d bytes sent", | ||
1137 | i, get_usb_statmsg(ifd->status), | ||
1138 | ifd->actual_length, ifd->length); | ||
1139 | offset = (ifd->offset + | ||
1140 | ifd->actual_length) | ||
1141 | % BAS_OUTBUFSIZE; | ||
1142 | break; | ||
1143 | } | ||
1144 | } | ||
1145 | #ifdef CONFIG_GIGASET_DEBUG | ||
1146 | /* check assumption on remaining frames */ | ||
1147 | for (; i < BAS_NUMFRAMES; i++) { | ||
1148 | ifd = &urb->iso_frame_desc[i]; | ||
1149 | if (ifd->status != -EINPROGRESS | ||
1150 | || ifd->actual_length != 0) { | ||
1151 | warn("isochronous write: frame %d: %s, " | ||
1152 | "%d of %d bytes sent", | ||
1153 | i, get_usb_statmsg(ifd->status), | ||
1154 | ifd->actual_length, ifd->length); | ||
1155 | offset = (ifd->offset + | ||
1156 | ifd->actual_length) | ||
1157 | % BAS_OUTBUFSIZE; | ||
1158 | break; | ||
1159 | } | ||
1160 | } | ||
1161 | #endif | ||
1162 | break; | ||
1163 | case -EPIPE: //FIXME is this the code for "underrun"? | ||
1164 | err("isochronous write stalled"); | ||
1165 | error_hangup(bcs); | ||
1166 | break; | ||
1167 | default: /* severe trouble */ | ||
1168 | warn("isochronous write: %s", | ||
1169 | get_usb_statmsg(urb->status)); | ||
1170 | } | ||
1171 | |||
1172 | /* mark the write buffer area covered by this URB as free */ | ||
1173 | if (done->limit >= 0) | ||
1174 | atomic_set(&ubc->isooutbuf->read, done->limit); | ||
1175 | |||
1176 | /* mark URB as free */ | ||
1177 | spin_lock_irqsave(&ubc->isooutlock, flags); | ||
1178 | next = ubc->isooutfree; | ||
1179 | ubc->isooutfree = done; | ||
1180 | spin_unlock_irqrestore(&ubc->isooutlock, flags); | ||
1181 | if (next) { | ||
1182 | /* only one URB still active - resubmit one */ | ||
1183 | if (submit_iso_write_urb(next) <= 0) { | ||
1184 | /* couldn't submit */ | ||
1185 | error_hangup(bcs); | ||
1186 | } | ||
1187 | } | ||
1188 | } | ||
1189 | |||
1190 | /* process queued SKBs */ | ||
1191 | while ((skb = skb_dequeue(&bcs->squeue))) { | ||
1192 | /* copy to output buffer, doing L2 encapsulation */ | ||
1193 | len = skb->len; | ||
1194 | if (gigaset_isoc_buildframe(bcs, skb->data, len) == -EAGAIN) { | ||
1195 | /* insufficient buffer space, push back onto queue */ | ||
1196 | skb_queue_head(&bcs->squeue, skb); | ||
1197 | dbg(DEBUG_ISO, "%s: skb requeued, qlen=%d", | ||
1198 | __func__, skb_queue_len(&bcs->squeue)); | ||
1199 | break; | ||
1200 | } | ||
1201 | skb_pull(skb, len); | ||
1202 | gigaset_skb_sent(bcs, skb); | ||
1203 | dev_kfree_skb_any(skb); | ||
1204 | } | ||
1205 | } | ||
1206 | |||
1207 | /* Isochronous Read - Bottom Half */ | ||
1208 | /* ============================== */ | ||
1209 | |||
1210 | /* read_iso_tasklet | ||
1211 | * tasklet scheduled when an isochronous input URB from the Gigaset device | ||
1212 | * has completed | ||
1213 | * parameter: | ||
1214 | * data B channel state structure | ||
1215 | */ | ||
1216 | static void read_iso_tasklet(unsigned long data) | ||
1217 | { | ||
1218 | struct bc_state *bcs; | ||
1219 | struct bas_bc_state *ubc; | ||
1220 | struct cardstate *cs; | ||
1221 | struct urb *urb; | ||
1222 | char *rcvbuf; | ||
1223 | unsigned long flags; | ||
1224 | int totleft, numbytes, offset, frame, rc; | ||
1225 | |||
1226 | bcs = (struct bc_state *) data; | ||
1227 | IFNULLRET(bcs); | ||
1228 | ubc = bcs->hw.bas; | ||
1229 | IFNULLRET(ubc); | ||
1230 | cs = bcs->cs; | ||
1231 | IFNULLRET(cs); | ||
1232 | |||
1233 | /* loop while more completed URBs arrive in the meantime */ | ||
1234 | for (;;) { | ||
1235 | if (!atomic_read(&cs->connected)) { | ||
1236 | warn("%s: disconnected", __func__); | ||
1237 | return; | ||
1238 | } | ||
1239 | |||
1240 | /* retrieve URB */ | ||
1241 | spin_lock_irqsave(&ubc->isoinlock, flags); | ||
1242 | if (!(urb = ubc->isoindone)) { | ||
1243 | spin_unlock_irqrestore(&ubc->isoinlock, flags); | ||
1244 | return; | ||
1245 | } | ||
1246 | ubc->isoindone = NULL; | ||
1247 | if (unlikely(ubc->loststatus != -EINPROGRESS)) { | ||
1248 | warn("isochronous read overrun, dropped URB with status: %s, %d bytes lost", | ||
1249 | get_usb_statmsg(ubc->loststatus), ubc->isoinlost); | ||
1250 | ubc->loststatus = -EINPROGRESS; | ||
1251 | } | ||
1252 | spin_unlock_irqrestore(&ubc->isoinlock, flags); | ||
1253 | |||
1254 | if (unlikely(!(atomic_read(&ubc->running)))) { | ||
1255 | dbg(DEBUG_ISO, "%s: channel not running, dropped URB with status: %s", | ||
1256 | __func__, get_usb_statmsg(urb->status)); | ||
1257 | return; | ||
1258 | } | ||
1259 | |||
1260 | switch (urb->status) { | ||
1261 | case 0: /* normal completion */ | ||
1262 | break; | ||
1263 | case -EXDEV: /* inspect individual frames (we do that anyway) */ | ||
1264 | dbg(DEBUG_ISO, "%s: URB partially completed", __func__); | ||
1265 | break; | ||
1266 | case -ENOENT: | ||
1267 | case -ECONNRESET: | ||
1268 | dbg(DEBUG_ISO, "%s: URB canceled", __func__); | ||
1269 | continue; /* -> skip */ | ||
1270 | case -EINPROGRESS: /* huh? */ | ||
1271 | dbg(DEBUG_ISO, "%s: URB still pending", __func__); | ||
1272 | continue; /* -> skip */ | ||
1273 | case -EPIPE: | ||
1274 | err("isochronous read stalled"); | ||
1275 | error_hangup(bcs); | ||
1276 | continue; /* -> skip */ | ||
1277 | default: /* severe trouble */ | ||
1278 | warn("isochronous read: %s", | ||
1279 | get_usb_statmsg(urb->status)); | ||
1280 | goto error; | ||
1281 | } | ||
1282 | |||
1283 | rcvbuf = urb->transfer_buffer; | ||
1284 | totleft = urb->actual_length; | ||
1285 | for (frame = 0; totleft > 0 && frame < BAS_NUMFRAMES; frame++) { | ||
1286 | if (unlikely(urb->iso_frame_desc[frame].status)) { | ||
1287 | warn("isochronous read: frame %d: %s", | ||
1288 | frame, get_usb_statmsg(urb->iso_frame_desc[frame].status)); | ||
1289 | break; | ||
1290 | } | ||
1291 | numbytes = urb->iso_frame_desc[frame].actual_length; | ||
1292 | if (unlikely(numbytes > BAS_MAXFRAME)) { | ||
1293 | warn("isochronous read: frame %d: numbytes (%d) > BAS_MAXFRAME", | ||
1294 | frame, numbytes); | ||
1295 | break; | ||
1296 | } | ||
1297 | if (unlikely(numbytes > totleft)) { | ||
1298 | warn("isochronous read: frame %d: numbytes (%d) > totleft (%d)", | ||
1299 | frame, numbytes, totleft); | ||
1300 | break; | ||
1301 | } | ||
1302 | offset = urb->iso_frame_desc[frame].offset; | ||
1303 | if (unlikely(offset + numbytes > BAS_INBUFSIZE)) { | ||
1304 | warn("isochronous read: frame %d: offset (%d) + numbytes (%d) > BAS_INBUFSIZE", | ||
1305 | frame, offset, numbytes); | ||
1306 | break; | ||
1307 | } | ||
1308 | gigaset_isoc_receive(rcvbuf + offset, numbytes, bcs); | ||
1309 | totleft -= numbytes; | ||
1310 | } | ||
1311 | if (unlikely(totleft > 0)) | ||
1312 | warn("isochronous read: %d data bytes missing", | ||
1313 | totleft); | ||
1314 | |||
1315 | error: | ||
1316 | /* URB processed, resubmit */ | ||
1317 | for (frame = 0; frame < BAS_NUMFRAMES; frame++) { | ||
1318 | urb->iso_frame_desc[frame].status = 0; | ||
1319 | urb->iso_frame_desc[frame].actual_length = 0; | ||
1320 | } | ||
1321 | urb->dev = bcs->cs->hw.bas->udev; /* clobbered by USB subsystem */ | ||
1322 | urb->transfer_flags = URB_ISO_ASAP; | ||
1323 | urb->number_of_packets = BAS_NUMFRAMES; | ||
1324 | if ((rc = usb_submit_urb(urb, SLAB_ATOMIC)) != 0) { | ||
1325 | err("could not resubmit isochronous read URB: %s", | ||
1326 | get_usb_statmsg(rc)); | ||
1327 | dump_urb(DEBUG_ISO, "resubmit iso read", urb); | ||
1328 | error_hangup(bcs); | ||
1329 | } | ||
1330 | } | ||
1331 | } | ||
1332 | |||
1333 | /* Channel Operations */ | ||
1334 | /* ================== */ | ||
1335 | |||
1336 | /* req_timeout | ||
1337 | * timeout routine for control output request | ||
1338 | * argument: | ||
1339 | * B channel control structure | ||
1340 | */ | ||
1341 | static void req_timeout(unsigned long data) | ||
1342 | { | ||
1343 | struct bc_state *bcs = (struct bc_state *) data; | ||
1344 | struct bas_cardstate *ucs; | ||
1345 | int pending; | ||
1346 | unsigned long flags; | ||
1347 | |||
1348 | IFNULLRET(bcs); | ||
1349 | IFNULLRET(bcs->cs); | ||
1350 | ucs = bcs->cs->hw.bas; | ||
1351 | IFNULLRET(ucs); | ||
1352 | |||
1353 | check_pending(ucs); | ||
1354 | |||
1355 | spin_lock_irqsave(&ucs->lock, flags); | ||
1356 | pending = ucs->pending; | ||
1357 | ucs->pending = 0; | ||
1358 | spin_unlock_irqrestore(&ucs->lock, flags); | ||
1359 | |||
1360 | switch (pending) { | ||
1361 | case 0: /* no pending request */ | ||
1362 | dbg(DEBUG_USBREQ, "%s: no request pending", __func__); | ||
1363 | break; | ||
1364 | |||
1365 | case HD_OPEN_ATCHANNEL: | ||
1366 | err("timeout opening AT channel"); | ||
1367 | error_reset(bcs->cs); | ||
1368 | break; | ||
1369 | |||
1370 | case HD_OPEN_B2CHANNEL: | ||
1371 | case HD_OPEN_B1CHANNEL: | ||
1372 | err("timeout opening channel %d", bcs->channel + 1); | ||
1373 | error_hangup(bcs); | ||
1374 | break; | ||
1375 | |||
1376 | case HD_CLOSE_ATCHANNEL: | ||
1377 | err("timeout closing AT channel"); | ||
1378 | //wake_up_interruptible(cs->initwait); | ||
1379 | //FIXME need own wait queue? | ||
1380 | break; | ||
1381 | |||
1382 | case HD_CLOSE_B2CHANNEL: | ||
1383 | case HD_CLOSE_B1CHANNEL: | ||
1384 | err("timeout closing channel %d", bcs->channel + 1); | ||
1385 | break; | ||
1386 | |||
1387 | default: | ||
1388 | warn("request 0x%02x timed out, clearing", pending); | ||
1389 | } | ||
1390 | } | ||
1391 | |||
1392 | /* write_ctrl_callback | ||
1393 | * USB completion handler for control pipe output | ||
1394 | * called by the USB subsystem in interrupt context | ||
1395 | * parameter: | ||
1396 | * urb USB request block of completed request | ||
1397 | * urb->context = hardware specific controller state structure | ||
1398 | */ | ||
1399 | static void write_ctrl_callback(struct urb *urb, struct pt_regs *regs) | ||
1400 | { | ||
1401 | struct bas_cardstate *ucs; | ||
1402 | unsigned long flags; | ||
1403 | |||
1404 | IFNULLRET(urb); | ||
1405 | IFNULLRET(urb->context); | ||
1406 | IFNULLRET(cardstate); | ||
1407 | |||
1408 | ucs = (struct bas_cardstate *) urb->context; | ||
1409 | spin_lock_irqsave(&ucs->lock, flags); | ||
1410 | if (urb->status && ucs->pending) { | ||
1411 | err("control request 0x%02x failed: %s", | ||
1412 | ucs->pending, get_usb_statmsg(urb->status)); | ||
1413 | del_timer(&ucs->timer_ctrl); | ||
1414 | ucs->pending = 0; | ||
1415 | } | ||
1416 | /* individual handling of specific request types */ | ||
1417 | switch (ucs->pending) { | ||
1418 | case HD_DEVICE_INIT_ACK: /* no reply expected */ | ||
1419 | ucs->pending = 0; | ||
1420 | break; | ||
1421 | } | ||
1422 | spin_unlock_irqrestore(&ucs->lock, flags); | ||
1423 | } | ||
1424 | |||
1425 | /* req_submit | ||
1426 | * submit a control output request without message buffer to the Gigaset base | ||
1427 | * and optionally start a timeout | ||
1428 | * parameters: | ||
1429 | * bcs B channel control structure | ||
1430 | * req control request code (HD_*) | ||
1431 | * val control request parameter value (set to 0 if unused) | ||
1432 | * timeout timeout in seconds (0: no timeout) | ||
1433 | * return value: | ||
1434 | * 0 on success | ||
1435 | * -EINVAL if a NULL pointer is encountered somewhere | ||
1436 | * -EBUSY if another request is pending | ||
1437 | * any URB submission error code | ||
1438 | */ | ||
1439 | static int req_submit(struct bc_state *bcs, int req, int val, int timeout) | ||
1440 | { | ||
1441 | struct bas_cardstate *ucs; | ||
1442 | int ret; | ||
1443 | unsigned long flags; | ||
1444 | |||
1445 | IFNULLRETVAL(bcs, -EINVAL); | ||
1446 | IFNULLRETVAL(bcs->cs, -EINVAL); | ||
1447 | ucs = bcs->cs->hw.bas; | ||
1448 | IFNULLRETVAL(ucs, -EINVAL); | ||
1449 | IFNULLRETVAL(ucs->urb_ctrl, -EINVAL); | ||
1450 | |||
1451 | dbg(DEBUG_USBREQ, "-------> 0x%02x (%d)", req, val); | ||
1452 | |||
1453 | spin_lock_irqsave(&ucs->lock, flags); | ||
1454 | if (ucs->pending) { | ||
1455 | spin_unlock_irqrestore(&ucs->lock, flags); | ||
1456 | err("submission of request 0x%02x failed: request 0x%02x still pending", | ||
1457 | req, ucs->pending); | ||
1458 | return -EBUSY; | ||
1459 | } | ||
1460 | if (ucs->urb_ctrl->status == -EINPROGRESS) { | ||
1461 | spin_unlock_irqrestore(&ucs->lock, flags); | ||
1462 | err("could not submit request 0x%02x: URB busy", req); | ||
1463 | return -EBUSY; | ||
1464 | } | ||
1465 | |||
1466 | ucs->dr_ctrl.bRequestType = OUT_VENDOR_REQ; | ||
1467 | ucs->dr_ctrl.bRequest = req; | ||
1468 | ucs->dr_ctrl.wValue = cpu_to_le16(val); | ||
1469 | ucs->dr_ctrl.wIndex = 0; | ||
1470 | ucs->dr_ctrl.wLength = 0; | ||
1471 | usb_fill_control_urb(ucs->urb_ctrl, ucs->udev, | ||
1472 | usb_sndctrlpipe(ucs->udev, 0), | ||
1473 | (unsigned char*) &ucs->dr_ctrl, NULL, 0, | ||
1474 | write_ctrl_callback, ucs); | ||
1475 | if ((ret = usb_submit_urb(ucs->urb_ctrl, SLAB_ATOMIC)) != 0) { | ||
1476 | err("could not submit request 0x%02x: %s", | ||
1477 | req, get_usb_statmsg(ret)); | ||
1478 | spin_unlock_irqrestore(&ucs->lock, flags); | ||
1479 | return ret; | ||
1480 | } | ||
1481 | ucs->pending = req; | ||
1482 | |||
1483 | if (timeout > 0) { | ||
1484 | dbg(DEBUG_USBREQ, "setting timeout of %d/10 secs", timeout); | ||
1485 | ucs->timer_ctrl.expires = jiffies + timeout * HZ / 10; | ||
1486 | ucs->timer_ctrl.data = (unsigned long) bcs; | ||
1487 | ucs->timer_ctrl.function = req_timeout; | ||
1488 | add_timer(&ucs->timer_ctrl); | ||
1489 | } | ||
1490 | |||
1491 | spin_unlock_irqrestore(&ucs->lock, flags); | ||
1492 | return 0; | ||
1493 | } | ||
1494 | |||
1495 | /* gigaset_init_bchannel | ||
1496 | * called by common.c to connect a B channel | ||
1497 | * initialize isochronous I/O and tell the Gigaset base to open the channel | ||
1498 | * argument: | ||
1499 | * B channel control structure | ||
1500 | * return value: | ||
1501 | * 0 on success, error code < 0 on error | ||
1502 | */ | ||
1503 | static int gigaset_init_bchannel(struct bc_state *bcs) | ||
1504 | { | ||
1505 | int req, ret; | ||
1506 | |||
1507 | IFNULLRETVAL(bcs, -EINVAL); | ||
1508 | |||
1509 | if ((ret = starturbs(bcs)) < 0) { | ||
1510 | err("could not start isochronous I/O for channel %d", | ||
1511 | bcs->channel + 1); | ||
1512 | error_hangup(bcs); | ||
1513 | return ret; | ||
1514 | } | ||
1515 | |||
1516 | req = bcs->channel ? HD_OPEN_B2CHANNEL : HD_OPEN_B1CHANNEL; | ||
1517 | if ((ret = req_submit(bcs, req, 0, BAS_TIMEOUT)) < 0) { | ||
1518 | err("could not open channel %d: %s", | ||
1519 | bcs->channel + 1, get_usb_statmsg(ret)); | ||
1520 | stopurbs(bcs->hw.bas); | ||
1521 | error_hangup(bcs); | ||
1522 | } | ||
1523 | return ret; | ||
1524 | } | ||
1525 | |||
1526 | /* gigaset_close_bchannel | ||
1527 | * called by common.c to disconnect a B channel | ||
1528 | * tell the Gigaset base to close the channel | ||
1529 | * stopping isochronous I/O and LL notification will be done when the | ||
1530 | * acknowledgement for the close arrives | ||
1531 | * argument: | ||
1532 | * B channel control structure | ||
1533 | * return value: | ||
1534 | * 0 on success, error code < 0 on error | ||
1535 | */ | ||
1536 | static int gigaset_close_bchannel(struct bc_state *bcs) | ||
1537 | { | ||
1538 | int req, ret; | ||
1539 | |||
1540 | IFNULLRETVAL(bcs, -EINVAL); | ||
1541 | |||
1542 | if (!(atomic_read(&bcs->cs->hw.bas->basstate) & | ||
1543 | (bcs->channel ? BS_B2OPEN : BS_B1OPEN))) { | ||
1544 | /* channel not running: just signal common.c */ | ||
1545 | gigaset_bchannel_down(bcs); | ||
1546 | return 0; | ||
1547 | } | ||
1548 | |||
1549 | req = bcs->channel ? HD_CLOSE_B2CHANNEL : HD_CLOSE_B1CHANNEL; | ||
1550 | if ((ret = req_submit(bcs, req, 0, BAS_TIMEOUT)) < 0) | ||
1551 | err("could not submit HD_CLOSE_BxCHANNEL request: %s", | ||
1552 | get_usb_statmsg(ret)); | ||
1553 | return ret; | ||
1554 | } | ||
1555 | |||
1556 | /* Device Operations */ | ||
1557 | /* ================= */ | ||
1558 | |||
1559 | /* complete_cb | ||
1560 | * unqueue first command buffer from queue, waking any sleepers | ||
1561 | * must be called with cs->cmdlock held | ||
1562 | * parameter: | ||
1563 | * cs controller state structure | ||
1564 | */ | ||
1565 | static void complete_cb(struct cardstate *cs) | ||
1566 | { | ||
1567 | struct cmdbuf_t *cb; | ||
1568 | |||
1569 | IFNULLRET(cs); | ||
1570 | cb = cs->cmdbuf; | ||
1571 | IFNULLRET(cb); | ||
1572 | |||
1573 | /* unqueue completed buffer */ | ||
1574 | cs->cmdbytes -= cs->curlen; | ||
1575 | dbg(DEBUG_TRANSCMD | DEBUG_LOCKCMD, | ||
1576 | "write_command: sent %u bytes, %u left", | ||
1577 | cs->curlen, cs->cmdbytes); | ||
1578 | if ((cs->cmdbuf = cb->next) != NULL) { | ||
1579 | cs->cmdbuf->prev = NULL; | ||
1580 | cs->curlen = cs->cmdbuf->len; | ||
1581 | } else { | ||
1582 | cs->lastcmdbuf = NULL; | ||
1583 | cs->curlen = 0; | ||
1584 | } | ||
1585 | |||
1586 | if (cb->wake_tasklet) | ||
1587 | tasklet_schedule(cb->wake_tasklet); | ||
1588 | |||
1589 | kfree(cb); | ||
1590 | } | ||
1591 | |||
1592 | static int atwrite_submit(struct cardstate *cs, unsigned char *buf, int len); | ||
1593 | |||
1594 | /* write_command_callback | ||
1595 | * USB completion handler for AT command transmission | ||
1596 | * called by the USB subsystem in interrupt context | ||
1597 | * parameter: | ||
1598 | * urb USB request block of completed request | ||
1599 | * urb->context = controller state structure | ||
1600 | */ | ||
1601 | static void write_command_callback(struct urb *urb, struct pt_regs *regs) | ||
1602 | { | ||
1603 | struct cardstate *cs; | ||
1604 | unsigned long flags; | ||
1605 | struct bas_cardstate *ucs; | ||
1606 | |||
1607 | IFNULLRET(urb); | ||
1608 | cs = (struct cardstate *) urb->context; | ||
1609 | IFNULLRET(cs); | ||
1610 | ucs = cs->hw.bas; | ||
1611 | IFNULLRET(ucs); | ||
1612 | |||
1613 | /* check status */ | ||
1614 | switch (urb->status) { | ||
1615 | case 0: /* normal completion */ | ||
1616 | break; | ||
1617 | case -ENOENT: /* canceled */ | ||
1618 | case -ECONNRESET: /* canceled (async) */ | ||
1619 | case -EINPROGRESS: /* pending */ | ||
1620 | /* ignore silently */ | ||
1621 | dbg(DEBUG_USBREQ, | ||
1622 | "%s: %s", __func__, get_usb_statmsg(urb->status)); | ||
1623 | return; | ||
1624 | default: /* any failure */ | ||
1625 | if (++ucs->retry_cmd_out > BAS_RETRY) { | ||
1626 | warn("command write: %s, giving up after %d retries", | ||
1627 | get_usb_statmsg(urb->status), ucs->retry_cmd_out); | ||
1628 | break; | ||
1629 | } | ||
1630 | if (cs->cmdbuf == NULL) { | ||
1631 | warn("command write: %s, cannot retry - cmdbuf gone", | ||
1632 | get_usb_statmsg(urb->status)); | ||
1633 | break; | ||
1634 | } | ||
1635 | notice("command write: %s, retry %d", | ||
1636 | get_usb_statmsg(urb->status), ucs->retry_cmd_out); | ||
1637 | if (atwrite_submit(cs, cs->cmdbuf->buf, cs->cmdbuf->len) >= 0) | ||
1638 | /* resubmitted - bypass regular exit block */ | ||
1639 | return; | ||
1640 | /* command send failed, assume base still waiting */ | ||
1641 | update_basstate(ucs, BS_ATREADY, 0); | ||
1642 | } | ||
1643 | |||
1644 | spin_lock_irqsave(&cs->cmdlock, flags); | ||
1645 | if (cs->cmdbuf != NULL) | ||
1646 | complete_cb(cs); | ||
1647 | spin_unlock_irqrestore(&cs->cmdlock, flags); | ||
1648 | } | ||
1649 | |||
1650 | /* atrdy_timeout | ||
1651 | * timeout routine for AT command transmission | ||
1652 | * argument: | ||
1653 | * controller state structure | ||
1654 | */ | ||
1655 | static void atrdy_timeout(unsigned long data) | ||
1656 | { | ||
1657 | struct cardstate *cs = (struct cardstate *) data; | ||
1658 | struct bas_cardstate *ucs; | ||
1659 | |||
1660 | IFNULLRET(cs); | ||
1661 | ucs = cs->hw.bas; | ||
1662 | IFNULLRET(ucs); | ||
1663 | |||
1664 | warn("timeout waiting for HD_READY_SEND_ATDATA"); | ||
1665 | |||
1666 | /* fake the missing signal - what else can I do? */ | ||
1667 | update_basstate(ucs, BS_ATREADY, BS_ATTIMER); | ||
1668 | start_cbsend(cs); | ||
1669 | } | ||
1670 | |||
1671 | /* atwrite_submit | ||
1672 | * submit an HD_WRITE_ATMESSAGE command URB | ||
1673 | * parameters: | ||
1674 | * cs controller state structure | ||
1675 | * buf buffer containing command to send | ||
1676 | * len length of command to send | ||
1677 | * return value: | ||
1678 | * 0 on success | ||
1679 | * -EFAULT if a NULL pointer is encountered somewhere | ||
1680 | * -EBUSY if another request is pending | ||
1681 | * any URB submission error code | ||
1682 | */ | ||
1683 | static int atwrite_submit(struct cardstate *cs, unsigned char *buf, int len) | ||
1684 | { | ||
1685 | struct bas_cardstate *ucs; | ||
1686 | int ret; | ||
1687 | |||
1688 | IFNULLRETVAL(cs, -EFAULT); | ||
1689 | ucs = cs->hw.bas; | ||
1690 | IFNULLRETVAL(ucs, -EFAULT); | ||
1691 | IFNULLRETVAL(ucs->urb_cmd_out, -EFAULT); | ||
1692 | |||
1693 | dbg(DEBUG_USBREQ, "-------> HD_WRITE_ATMESSAGE (%d)", len); | ||
1694 | |||
1695 | if (ucs->urb_cmd_out->status == -EINPROGRESS) { | ||
1696 | err("could not submit HD_WRITE_ATMESSAGE: URB busy"); | ||
1697 | return -EBUSY; | ||
1698 | } | ||
1699 | |||
1700 | ucs->dr_cmd_out.bRequestType = OUT_VENDOR_REQ; | ||
1701 | ucs->dr_cmd_out.bRequest = HD_WRITE_ATMESSAGE; | ||
1702 | ucs->dr_cmd_out.wValue = 0; | ||
1703 | ucs->dr_cmd_out.wIndex = 0; | ||
1704 | ucs->dr_cmd_out.wLength = cpu_to_le16(len); | ||
1705 | usb_fill_control_urb(ucs->urb_cmd_out, ucs->udev, | ||
1706 | usb_sndctrlpipe(ucs->udev, 0), | ||
1707 | (unsigned char*) &ucs->dr_cmd_out, buf, len, | ||
1708 | write_command_callback, cs); | ||
1709 | |||
1710 | if ((ret = usb_submit_urb(ucs->urb_cmd_out, SLAB_ATOMIC)) != 0) { | ||
1711 | err("could not submit HD_WRITE_ATMESSAGE: %s", | ||
1712 | get_usb_statmsg(ret)); | ||
1713 | return ret; | ||
1714 | } | ||
1715 | |||
1716 | /* submitted successfully */ | ||
1717 | update_basstate(ucs, 0, BS_ATREADY); | ||
1718 | |||
1719 | /* start timeout if necessary */ | ||
1720 | if (!(atomic_read(&ucs->basstate) & BS_ATTIMER)) { | ||
1721 | dbg(DEBUG_OUTPUT, | ||
1722 | "setting ATREADY timeout of %d/10 secs", ATRDY_TIMEOUT); | ||
1723 | ucs->timer_atrdy.expires = jiffies + ATRDY_TIMEOUT * HZ / 10; | ||
1724 | ucs->timer_atrdy.data = (unsigned long) cs; | ||
1725 | ucs->timer_atrdy.function = atrdy_timeout; | ||
1726 | add_timer(&ucs->timer_atrdy); | ||
1727 | update_basstate(ucs, BS_ATTIMER, 0); | ||
1728 | } | ||
1729 | return 0; | ||
1730 | } | ||
1731 | |||
1732 | /* start_cbsend | ||
1733 | * start transmission of AT command queue if necessary | ||
1734 | * parameter: | ||
1735 | * cs controller state structure | ||
1736 | * return value: | ||
1737 | * 0 on success | ||
1738 | * error code < 0 on error | ||
1739 | */ | ||
1740 | static int start_cbsend(struct cardstate *cs) | ||
1741 | { | ||
1742 | struct cmdbuf_t *cb; | ||
1743 | struct bas_cardstate *ucs; | ||
1744 | unsigned long flags; | ||
1745 | int rc; | ||
1746 | int retval = 0; | ||
1747 | |||
1748 | IFNULLRETVAL(cs, -EFAULT); | ||
1749 | ucs = cs->hw.bas; | ||
1750 | IFNULLRETVAL(ucs, -EFAULT); | ||
1751 | |||
1752 | /* check if AT channel is open */ | ||
1753 | if (!(atomic_read(&ucs->basstate) & BS_ATOPEN)) { | ||
1754 | dbg(DEBUG_TRANSCMD | DEBUG_LOCKCMD, "AT channel not open"); | ||
1755 | rc = req_submit(cs->bcs, HD_OPEN_ATCHANNEL, 0, BAS_TIMEOUT); | ||
1756 | if (rc < 0) { | ||
1757 | err("could not open AT channel"); | ||
1758 | /* flush command queue */ | ||
1759 | spin_lock_irqsave(&cs->cmdlock, flags); | ||
1760 | while (cs->cmdbuf != NULL) | ||
1761 | complete_cb(cs); | ||
1762 | spin_unlock_irqrestore(&cs->cmdlock, flags); | ||
1763 | } | ||
1764 | return rc; | ||
1765 | } | ||
1766 | |||
1767 | /* try to send first command in queue */ | ||
1768 | spin_lock_irqsave(&cs->cmdlock, flags); | ||
1769 | |||
1770 | while ((cb = cs->cmdbuf) != NULL && | ||
1771 | atomic_read(&ucs->basstate) & BS_ATREADY) { | ||
1772 | ucs->retry_cmd_out = 0; | ||
1773 | rc = atwrite_submit(cs, cb->buf, cb->len); | ||
1774 | if (unlikely(rc)) { | ||
1775 | retval = rc; | ||
1776 | complete_cb(cs); | ||
1777 | } | ||
1778 | } | ||
1779 | |||
1780 | spin_unlock_irqrestore(&cs->cmdlock, flags); | ||
1781 | return retval; | ||
1782 | } | ||
1783 | |||
1784 | /* gigaset_write_cmd | ||
1785 | * This function is called by the device independent part of the driver | ||
1786 | * to transmit an AT command string to the Gigaset device. | ||
1787 | * It encapsulates the device specific method for transmission over the | ||
1788 | * direct USB connection to the base. | ||
1789 | * The command string is added to the queue of commands to send, and | ||
1790 | * USB transmission is started if necessary. | ||
1791 | * parameters: | ||
1792 | * cs controller state structure | ||
1793 | * buf command string to send | ||
1794 | * len number of bytes to send (max. IF_WRITEBUF) | ||
1795 | * wake_tasklet tasklet to run when transmission is completed (NULL if none) | ||
1796 | * return value: | ||
1797 | * number of bytes queued on success | ||
1798 | * error code < 0 on error | ||
1799 | */ | ||
1800 | static int gigaset_write_cmd(struct cardstate *cs, | ||
1801 | const unsigned char *buf, int len, | ||
1802 | struct tasklet_struct *wake_tasklet) | ||
1803 | { | ||
1804 | struct cmdbuf_t *cb; | ||
1805 | unsigned long flags; | ||
1806 | int status; | ||
1807 | |||
1808 | gigaset_dbg_buffer(atomic_read(&cs->mstate) != MS_LOCKED ? | ||
1809 | DEBUG_TRANSCMD : DEBUG_LOCKCMD, | ||
1810 | "CMD Transmit", len, buf, 0); | ||
1811 | |||
1812 | if (!atomic_read(&cs->connected)) { | ||
1813 | err("%s: not connected", __func__); | ||
1814 | return -ENODEV; | ||
1815 | } | ||
1816 | |||
1817 | if (len <= 0) | ||
1818 | return 0; /* nothing to do */ | ||
1819 | |||
1820 | if (len > IF_WRITEBUF) | ||
1821 | len = IF_WRITEBUF; | ||
1822 | if (!(cb = kmalloc(sizeof(struct cmdbuf_t) + len, GFP_ATOMIC))) { | ||
1823 | err("%s: out of memory", __func__); | ||
1824 | return -ENOMEM; | ||
1825 | } | ||
1826 | |||
1827 | memcpy(cb->buf, buf, len); | ||
1828 | cb->len = len; | ||
1829 | cb->offset = 0; | ||
1830 | cb->next = NULL; | ||
1831 | cb->wake_tasklet = wake_tasklet; | ||
1832 | |||
1833 | spin_lock_irqsave(&cs->cmdlock, flags); | ||
1834 | cb->prev = cs->lastcmdbuf; | ||
1835 | if (cs->lastcmdbuf) | ||
1836 | cs->lastcmdbuf->next = cb; | ||
1837 | else { | ||
1838 | cs->cmdbuf = cb; | ||
1839 | cs->curlen = len; | ||
1840 | } | ||
1841 | cs->cmdbytes += len; | ||
1842 | cs->lastcmdbuf = cb; | ||
1843 | spin_unlock_irqrestore(&cs->cmdlock, flags); | ||
1844 | |||
1845 | status = start_cbsend(cs); | ||
1846 | |||
1847 | return status < 0 ? status : len; | ||
1848 | } | ||
1849 | |||
1850 | /* gigaset_write_room | ||
1851 | * tty_driver.write_room interface routine | ||
1852 | * return number of characters the driver will accept to be written via gigaset_write_cmd | ||
1853 | * parameter: | ||
1854 | * controller state structure | ||
1855 | * return value: | ||
1856 | * number of characters | ||
1857 | */ | ||
1858 | static int gigaset_write_room(struct cardstate *cs) | ||
1859 | { | ||
1860 | return IF_WRITEBUF; | ||
1861 | } | ||
1862 | |||
1863 | /* gigaset_chars_in_buffer | ||
1864 | * tty_driver.chars_in_buffer interface routine | ||
1865 | * return number of characters waiting to be sent | ||
1866 | * parameter: | ||
1867 | * controller state structure | ||
1868 | * return value: | ||
1869 | * number of characters | ||
1870 | */ | ||
1871 | static int gigaset_chars_in_buffer(struct cardstate *cs) | ||
1872 | { | ||
1873 | unsigned long flags; | ||
1874 | unsigned bytes; | ||
1875 | |||
1876 | spin_lock_irqsave(&cs->cmdlock, flags); | ||
1877 | bytes = cs->cmdbytes; | ||
1878 | spin_unlock_irqrestore(&cs->cmdlock, flags); | ||
1879 | |||
1880 | return bytes; | ||
1881 | } | ||
1882 | |||
1883 | /* gigaset_brkchars | ||
1884 | * implementation of ioctl(GIGASET_BRKCHARS) | ||
1885 | * parameter: | ||
1886 | * controller state structure | ||
1887 | * return value: | ||
1888 | * -EINVAL (unimplemented function) | ||
1889 | */ | ||
1890 | static int gigaset_brkchars(struct cardstate *cs, const unsigned char buf[6]) | ||
1891 | { | ||
1892 | return -EINVAL; | ||
1893 | } | ||
1894 | |||
1895 | |||
1896 | /* Device Initialization/Shutdown */ | ||
1897 | /* ============================== */ | ||
1898 | |||
1899 | /* Free hardware dependent part of the B channel structure | ||
1900 | * parameter: | ||
1901 | * bcs B channel structure | ||
1902 | * return value: | ||
1903 | * !=0 on success | ||
1904 | */ | ||
1905 | static int gigaset_freebcshw(struct bc_state *bcs) | ||
1906 | { | ||
1907 | if (!bcs->hw.bas) | ||
1908 | return 0; | ||
1909 | |||
1910 | if (bcs->hw.bas->isooutbuf) | ||
1911 | kfree(bcs->hw.bas->isooutbuf); | ||
1912 | kfree(bcs->hw.bas); | ||
1913 | bcs->hw.bas = NULL; | ||
1914 | return 1; | ||
1915 | } | ||
1916 | |||
1917 | /* Initialize hardware dependent part of the B channel structure | ||
1918 | * parameter: | ||
1919 | * bcs B channel structure | ||
1920 | * return value: | ||
1921 | * !=0 on success | ||
1922 | */ | ||
1923 | static int gigaset_initbcshw(struct bc_state *bcs) | ||
1924 | { | ||
1925 | int i; | ||
1926 | struct bas_bc_state *ubc; | ||
1927 | |||
1928 | bcs->hw.bas = ubc = kmalloc(sizeof(struct bas_bc_state), GFP_KERNEL); | ||
1929 | if (!ubc) { | ||
1930 | err("could not allocate bas_bc_state"); | ||
1931 | return 0; | ||
1932 | } | ||
1933 | |||
1934 | atomic_set(&ubc->running, 0); | ||
1935 | atomic_set(&ubc->corrbytes, 0); | ||
1936 | spin_lock_init(&ubc->isooutlock); | ||
1937 | for (i = 0; i < BAS_OUTURBS; ++i) { | ||
1938 | ubc->isoouturbs[i].urb = NULL; | ||
1939 | ubc->isoouturbs[i].bcs = bcs; | ||
1940 | } | ||
1941 | ubc->isooutdone = ubc->isooutfree = ubc->isooutovfl = NULL; | ||
1942 | ubc->numsub = 0; | ||
1943 | if (!(ubc->isooutbuf = kmalloc(sizeof(struct isowbuf_t), GFP_KERNEL))) { | ||
1944 | err("could not allocate isochronous output buffer"); | ||
1945 | kfree(ubc); | ||
1946 | bcs->hw.bas = NULL; | ||
1947 | return 0; | ||
1948 | } | ||
1949 | tasklet_init(&ubc->sent_tasklet, | ||
1950 | &write_iso_tasklet, (unsigned long) bcs); | ||
1951 | |||
1952 | spin_lock_init(&ubc->isoinlock); | ||
1953 | for (i = 0; i < BAS_INURBS; ++i) | ||
1954 | ubc->isoinurbs[i] = NULL; | ||
1955 | ubc->isoindone = NULL; | ||
1956 | ubc->loststatus = -EINPROGRESS; | ||
1957 | ubc->isoinlost = 0; | ||
1958 | ubc->seqlen = 0; | ||
1959 | ubc->inbyte = 0; | ||
1960 | ubc->inbits = 0; | ||
1961 | ubc->goodbytes = 0; | ||
1962 | ubc->alignerrs = 0; | ||
1963 | ubc->fcserrs = 0; | ||
1964 | ubc->frameerrs = 0; | ||
1965 | ubc->giants = 0; | ||
1966 | ubc->runts = 0; | ||
1967 | ubc->aborts = 0; | ||
1968 | ubc->shared0s = 0; | ||
1969 | ubc->stolen0s = 0; | ||
1970 | tasklet_init(&ubc->rcvd_tasklet, | ||
1971 | &read_iso_tasklet, (unsigned long) bcs); | ||
1972 | return 1; | ||
1973 | } | ||
1974 | |||
1975 | static void gigaset_reinitbcshw(struct bc_state *bcs) | ||
1976 | { | ||
1977 | struct bas_bc_state *ubc = bcs->hw.bas; | ||
1978 | |||
1979 | atomic_set(&bcs->hw.bas->running, 0); | ||
1980 | atomic_set(&bcs->hw.bas->corrbytes, 0); | ||
1981 | bcs->hw.bas->numsub = 0; | ||
1982 | spin_lock_init(&ubc->isooutlock); | ||
1983 | spin_lock_init(&ubc->isoinlock); | ||
1984 | ubc->loststatus = -EINPROGRESS; | ||
1985 | } | ||
1986 | |||
1987 | static void gigaset_freecshw(struct cardstate *cs) | ||
1988 | { | ||
1989 | struct bas_cardstate *ucs = cs->hw.bas; | ||
1990 | |||
1991 | del_timer(&ucs->timer_ctrl); | ||
1992 | del_timer(&ucs->timer_atrdy); | ||
1993 | del_timer(&ucs->timer_cmd_in); | ||
1994 | |||
1995 | kfree(cs->hw.bas); | ||
1996 | } | ||
1997 | |||
1998 | static int gigaset_initcshw(struct cardstate *cs) | ||
1999 | { | ||
2000 | struct bas_cardstate *ucs; | ||
2001 | |||
2002 | cs->hw.bas = ucs = kmalloc(sizeof *ucs, GFP_KERNEL); | ||
2003 | if (!ucs) | ||
2004 | return 0; | ||
2005 | |||
2006 | ucs->urb_cmd_in = NULL; | ||
2007 | ucs->urb_cmd_out = NULL; | ||
2008 | ucs->rcvbuf = NULL; | ||
2009 | ucs->rcvbuf_size = 0; | ||
2010 | |||
2011 | spin_lock_init(&ucs->lock); | ||
2012 | ucs->pending = 0; | ||
2013 | |||
2014 | atomic_set(&ucs->basstate, 0); | ||
2015 | init_timer(&ucs->timer_ctrl); | ||
2016 | init_timer(&ucs->timer_atrdy); | ||
2017 | init_timer(&ucs->timer_cmd_in); | ||
2018 | |||
2019 | return 1; | ||
2020 | } | ||
2021 | |||
2022 | /* freeurbs | ||
2023 | * unlink and deallocate all URBs unconditionally | ||
2024 | * caller must make sure that no commands are still in progress | ||
2025 | * parameter: | ||
2026 | * cs controller state structure | ||
2027 | */ | ||
2028 | static void freeurbs(struct cardstate *cs) | ||
2029 | { | ||
2030 | struct bas_cardstate *ucs; | ||
2031 | struct bas_bc_state *ubc; | ||
2032 | int i, j; | ||
2033 | |||
2034 | IFNULLRET(cs); | ||
2035 | ucs = cs->hw.bas; | ||
2036 | IFNULLRET(ucs); | ||
2037 | |||
2038 | for (j = 0; j < 2; ++j) { | ||
2039 | ubc = cs->bcs[j].hw.bas; | ||
2040 | IFNULLCONT(ubc); | ||
2041 | for (i = 0; i < BAS_OUTURBS; ++i) | ||
2042 | if (ubc->isoouturbs[i].urb) { | ||
2043 | usb_kill_urb(ubc->isoouturbs[i].urb); | ||
2044 | dbg(DEBUG_INIT, | ||
2045 | "%s: isoc output URB %d/%d unlinked", | ||
2046 | __func__, j, i); | ||
2047 | usb_free_urb(ubc->isoouturbs[i].urb); | ||
2048 | ubc->isoouturbs[i].urb = NULL; | ||
2049 | } | ||
2050 | for (i = 0; i < BAS_INURBS; ++i) | ||
2051 | if (ubc->isoinurbs[i]) { | ||
2052 | usb_kill_urb(ubc->isoinurbs[i]); | ||
2053 | dbg(DEBUG_INIT, | ||
2054 | "%s: isoc input URB %d/%d unlinked", | ||
2055 | __func__, j, i); | ||
2056 | usb_free_urb(ubc->isoinurbs[i]); | ||
2057 | ubc->isoinurbs[i] = NULL; | ||
2058 | } | ||
2059 | } | ||
2060 | if (ucs->urb_int_in) { | ||
2061 | usb_kill_urb(ucs->urb_int_in); | ||
2062 | dbg(DEBUG_INIT, "%s: interrupt input URB unlinked", __func__); | ||
2063 | usb_free_urb(ucs->urb_int_in); | ||
2064 | ucs->urb_int_in = NULL; | ||
2065 | } | ||
2066 | if (ucs->urb_cmd_out) { | ||
2067 | usb_kill_urb(ucs->urb_cmd_out); | ||
2068 | dbg(DEBUG_INIT, "%s: command output URB unlinked", __func__); | ||
2069 | usb_free_urb(ucs->urb_cmd_out); | ||
2070 | ucs->urb_cmd_out = NULL; | ||
2071 | } | ||
2072 | if (ucs->urb_cmd_in) { | ||
2073 | usb_kill_urb(ucs->urb_cmd_in); | ||
2074 | dbg(DEBUG_INIT, "%s: command input URB unlinked", __func__); | ||
2075 | usb_free_urb(ucs->urb_cmd_in); | ||
2076 | ucs->urb_cmd_in = NULL; | ||
2077 | } | ||
2078 | if (ucs->urb_ctrl) { | ||
2079 | usb_kill_urb(ucs->urb_ctrl); | ||
2080 | dbg(DEBUG_INIT, "%s: control output URB unlinked", __func__); | ||
2081 | usb_free_urb(ucs->urb_ctrl); | ||
2082 | ucs->urb_ctrl = NULL; | ||
2083 | } | ||
2084 | } | ||
2085 | |||
2086 | /* gigaset_probe | ||
2087 | * This function is called when a new USB device is connected. | ||
2088 | * It checks whether the new device is handled by this driver. | ||
2089 | */ | ||
2090 | static int gigaset_probe(struct usb_interface *interface, | ||
2091 | const struct usb_device_id *id) | ||
2092 | { | ||
2093 | struct usb_host_interface *hostif; | ||
2094 | struct usb_device *udev = interface_to_usbdev(interface); | ||
2095 | struct cardstate *cs = NULL; | ||
2096 | struct bas_cardstate *ucs = NULL; | ||
2097 | struct bas_bc_state *ubc; | ||
2098 | struct usb_endpoint_descriptor *endpoint; | ||
2099 | int i, j; | ||
2100 | int ret; | ||
2101 | |||
2102 | IFNULLRETVAL(udev, -ENODEV); | ||
2103 | |||
2104 | dbg(DEBUG_ANY, | ||
2105 | "%s: Check if device matches .. (Vendor: 0x%x, Product: 0x%x)", | ||
2106 | __func__, le16_to_cpu(udev->descriptor.idVendor), | ||
2107 | le16_to_cpu(udev->descriptor.idProduct)); | ||
2108 | |||
2109 | /* See if the device offered us matches what we can accept */ | ||
2110 | if ((le16_to_cpu(udev->descriptor.idVendor) != USB_GIGA_VENDOR_ID) || | ||
2111 | (le16_to_cpu(udev->descriptor.idProduct) != USB_GIGA_PRODUCT_ID && | ||
2112 | le16_to_cpu(udev->descriptor.idProduct) != USB_4175_PRODUCT_ID && | ||
2113 | le16_to_cpu(udev->descriptor.idProduct) != USB_SX303_PRODUCT_ID && | ||
2114 | le16_to_cpu(udev->descriptor.idProduct) != USB_SX353_PRODUCT_ID)) { | ||
2115 | dbg(DEBUG_ANY, "%s: unmatched ID - exiting", __func__); | ||
2116 | return -ENODEV; | ||
2117 | } | ||
2118 | |||
2119 | /* set required alternate setting */ | ||
2120 | hostif = interface->cur_altsetting; | ||
2121 | if (hostif->desc.bAlternateSetting != 3) { | ||
2122 | dbg(DEBUG_ANY, | ||
2123 | "%s: wrong alternate setting %d - trying to switch", | ||
2124 | __func__, hostif->desc.bAlternateSetting); | ||
2125 | if (usb_set_interface(udev, hostif->desc.bInterfaceNumber, 3) < 0) { | ||
2126 | warn("usb_set_interface failed, device %d interface %d altsetting %d", | ||
2127 | udev->devnum, hostif->desc.bInterfaceNumber, | ||
2128 | hostif->desc.bAlternateSetting); | ||
2129 | return -ENODEV; | ||
2130 | } | ||
2131 | hostif = interface->cur_altsetting; | ||
2132 | } | ||
2133 | |||
2134 | /* Reject application specific interfaces | ||
2135 | */ | ||
2136 | if (hostif->desc.bInterfaceClass != 255) { | ||
2137 | warn("%s: bInterfaceClass == %d", | ||
2138 | __func__, hostif->desc.bInterfaceClass); | ||
2139 | return -ENODEV; | ||
2140 | } | ||
2141 | |||
2142 | info("%s: Device matched (Vendor: 0x%x, Product: 0x%x)", | ||
2143 | __func__, le16_to_cpu(udev->descriptor.idVendor), | ||
2144 | le16_to_cpu(udev->descriptor.idProduct)); | ||
2145 | |||
2146 | cs = gigaset_getunassignedcs(driver); | ||
2147 | if (!cs) { | ||
2148 | err("%s: no free cardstate", __func__); | ||
2149 | return -ENODEV; | ||
2150 | } | ||
2151 | ucs = cs->hw.bas; | ||
2152 | ucs->udev = udev; | ||
2153 | ucs->interface = interface; | ||
2154 | |||
2155 | /* allocate URBs: | ||
2156 | * - one for the interrupt pipe | ||
2157 | * - three for the different uses of the default control pipe | ||
2158 | * - three for each isochronous pipe | ||
2159 | */ | ||
2160 | ucs->urb_int_in = usb_alloc_urb(0, SLAB_KERNEL); | ||
2161 | if (!ucs->urb_int_in) { | ||
2162 | err("No free urbs available"); | ||
2163 | goto error; | ||
2164 | } | ||
2165 | ucs->urb_cmd_in = usb_alloc_urb(0, SLAB_KERNEL); | ||
2166 | if (!ucs->urb_cmd_in) { | ||
2167 | err("No free urbs available"); | ||
2168 | goto error; | ||
2169 | } | ||
2170 | ucs->urb_cmd_out = usb_alloc_urb(0, SLAB_KERNEL); | ||
2171 | if (!ucs->urb_cmd_out) { | ||
2172 | err("No free urbs available"); | ||
2173 | goto error; | ||
2174 | } | ||
2175 | ucs->urb_ctrl = usb_alloc_urb(0, SLAB_KERNEL); | ||
2176 | if (!ucs->urb_ctrl) { | ||
2177 | err("No free urbs available"); | ||
2178 | goto error; | ||
2179 | } | ||
2180 | |||
2181 | for (j = 0; j < 2; ++j) { | ||
2182 | ubc = cs->bcs[j].hw.bas; | ||
2183 | for (i = 0; i < BAS_OUTURBS; ++i) { | ||
2184 | ubc->isoouturbs[i].urb = | ||
2185 | usb_alloc_urb(BAS_NUMFRAMES, SLAB_KERNEL); | ||
2186 | if (!ubc->isoouturbs[i].urb) { | ||
2187 | err("No free urbs available"); | ||
2188 | goto error; | ||
2189 | } | ||
2190 | } | ||
2191 | for (i = 0; i < BAS_INURBS; ++i) { | ||
2192 | ubc->isoinurbs[i] = | ||
2193 | usb_alloc_urb(BAS_NUMFRAMES, SLAB_KERNEL); | ||
2194 | if (!ubc->isoinurbs[i]) { | ||
2195 | err("No free urbs available"); | ||
2196 | goto error; | ||
2197 | } | ||
2198 | } | ||
2199 | } | ||
2200 | |||
2201 | ucs->rcvbuf = NULL; | ||
2202 | ucs->rcvbuf_size = 0; | ||
2203 | |||
2204 | /* Fill the interrupt urb and send it to the core */ | ||
2205 | endpoint = &hostif->endpoint[0].desc; | ||
2206 | usb_fill_int_urb(ucs->urb_int_in, udev, | ||
2207 | usb_rcvintpipe(udev, | ||
2208 | (endpoint->bEndpointAddress) & 0x0f), | ||
2209 | ucs->int_in_buf, 3, read_int_callback, cs, | ||
2210 | endpoint->bInterval); | ||
2211 | ret = usb_submit_urb(ucs->urb_int_in, SLAB_KERNEL); | ||
2212 | if (ret) { | ||
2213 | err("could not submit interrupt URB: %s", get_usb_statmsg(ret)); | ||
2214 | goto error; | ||
2215 | } | ||
2216 | |||
2217 | /* tell the device that the driver is ready */ | ||
2218 | if ((ret = req_submit(cs->bcs, HD_DEVICE_INIT_ACK, 0, 0)) != 0) | ||
2219 | goto error; | ||
2220 | |||
2221 | /* tell common part that the device is ready */ | ||
2222 | if (startmode == SM_LOCKED) | ||
2223 | atomic_set(&cs->mstate, MS_LOCKED); | ||
2224 | if (!gigaset_start(cs)) | ||
2225 | goto error; | ||
2226 | |||
2227 | /* save address of controller structure */ | ||
2228 | usb_set_intfdata(interface, cs); | ||
2229 | |||
2230 | /* set up device sysfs */ | ||
2231 | gigaset_init_dev_sysfs(interface); | ||
2232 | return 0; | ||
2233 | |||
2234 | error: | ||
2235 | freeurbs(cs); | ||
2236 | gigaset_unassign(cs); | ||
2237 | return -ENODEV; | ||
2238 | } | ||
2239 | |||
2240 | /* gigaset_disconnect | ||
2241 | * This function is called when the Gigaset base is unplugged. | ||
2242 | */ | ||
2243 | static void gigaset_disconnect(struct usb_interface *interface) | ||
2244 | { | ||
2245 | struct cardstate *cs; | ||
2246 | struct bas_cardstate *ucs; | ||
2247 | |||
2248 | /* clear device sysfs */ | ||
2249 | gigaset_free_dev_sysfs(interface); | ||
2250 | |||
2251 | cs = usb_get_intfdata(interface); | ||
2252 | usb_set_intfdata(interface, NULL); | ||
2253 | |||
2254 | IFNULLRET(cs); | ||
2255 | ucs = cs->hw.bas; | ||
2256 | IFNULLRET(ucs); | ||
2257 | |||
2258 | info("disconnecting GigaSet base"); | ||
2259 | gigaset_stop(cs); | ||
2260 | freeurbs(cs); | ||
2261 | kfree(ucs->rcvbuf); | ||
2262 | ucs->rcvbuf = NULL; | ||
2263 | ucs->rcvbuf_size = 0; | ||
2264 | atomic_set(&ucs->basstate, 0); | ||
2265 | gigaset_unassign(cs); | ||
2266 | } | ||
2267 | |||
2268 | static struct gigaset_ops gigops = { | ||
2269 | gigaset_write_cmd, | ||
2270 | gigaset_write_room, | ||
2271 | gigaset_chars_in_buffer, | ||
2272 | gigaset_brkchars, | ||
2273 | gigaset_init_bchannel, | ||
2274 | gigaset_close_bchannel, | ||
2275 | gigaset_initbcshw, | ||
2276 | gigaset_freebcshw, | ||
2277 | gigaset_reinitbcshw, | ||
2278 | gigaset_initcshw, | ||
2279 | gigaset_freecshw, | ||
2280 | gigaset_set_modem_ctrl, | ||
2281 | gigaset_baud_rate, | ||
2282 | gigaset_set_line_ctrl, | ||
2283 | gigaset_isoc_send_skb, | ||
2284 | gigaset_isoc_input, | ||
2285 | }; | ||
2286 | |||
2287 | /* bas_gigaset_init | ||
2288 | * This function is called after the kernel module is loaded. | ||
2289 | */ | ||
2290 | static int __init bas_gigaset_init(void) | ||
2291 | { | ||
2292 | int result; | ||
2293 | |||
2294 | /* allocate memory for our driver state and intialize it */ | ||
2295 | if ((driver = gigaset_initdriver(GIGASET_MINOR, GIGASET_MINORS, | ||
2296 | GIGASET_MODULENAME, GIGASET_DEVNAME, | ||
2297 | GIGASET_DEVFSNAME, &gigops, | ||
2298 | THIS_MODULE)) == NULL) | ||
2299 | goto error; | ||
2300 | |||
2301 | /* allocate memory for our device state and intialize it */ | ||
2302 | cardstate = gigaset_initcs(driver, 2, 0, 0, cidmode, GIGASET_MODULENAME); | ||
2303 | if (!cardstate) | ||
2304 | goto error; | ||
2305 | |||
2306 | /* register this driver with the USB subsystem */ | ||
2307 | result = usb_register(&gigaset_usb_driver); | ||
2308 | if (result < 0) { | ||
2309 | err("usb_register failed (error %d)", -result); | ||
2310 | goto error; | ||
2311 | } | ||
2312 | |||
2313 | info(DRIVER_AUTHOR); | ||
2314 | info(DRIVER_DESC); | ||
2315 | return 0; | ||
2316 | |||
2317 | error: if (cardstate) | ||
2318 | gigaset_freecs(cardstate); | ||
2319 | cardstate = NULL; | ||
2320 | if (driver) | ||
2321 | gigaset_freedriver(driver); | ||
2322 | driver = NULL; | ||
2323 | return -1; | ||
2324 | } | ||
2325 | |||
2326 | /* bas_gigaset_exit | ||
2327 | * This function is called before the kernel module is unloaded. | ||
2328 | */ | ||
2329 | static void __exit bas_gigaset_exit(void) | ||
2330 | { | ||
2331 | gigaset_blockdriver(driver); /* => probe will fail | ||
2332 | * => no gigaset_start any more | ||
2333 | */ | ||
2334 | |||
2335 | gigaset_shutdown(cardstate); | ||
2336 | /* from now on, no isdn callback should be possible */ | ||
2337 | |||
2338 | if (atomic_read(&cardstate->hw.bas->basstate) & BS_ATOPEN) { | ||
2339 | dbg(DEBUG_ANY, "closing AT channel"); | ||
2340 | if (req_submit(cardstate->bcs, | ||
2341 | HD_CLOSE_ATCHANNEL, 0, BAS_TIMEOUT) >= 0) { | ||
2342 | /* successfully submitted - wait for completion */ | ||
2343 | //wait_event_interruptible(cs->initwait, !cs->hw.bas->pending); | ||
2344 | //FIXME need own wait queue? wakeup? | ||
2345 | } | ||
2346 | } | ||
2347 | |||
2348 | /* deregister this driver with the USB subsystem */ | ||
2349 | usb_deregister(&gigaset_usb_driver); | ||
2350 | /* this will call the disconnect-callback */ | ||
2351 | /* from now on, no disconnect/probe callback should be running */ | ||
2352 | |||
2353 | gigaset_freecs(cardstate); | ||
2354 | cardstate = NULL; | ||
2355 | gigaset_freedriver(driver); | ||
2356 | driver = NULL; | ||
2357 | } | ||
2358 | |||
2359 | |||
2360 | module_init(bas_gigaset_init); | ||
2361 | module_exit(bas_gigaset_exit); | ||
2362 | |||
2363 | MODULE_AUTHOR(DRIVER_AUTHOR); | ||
2364 | MODULE_DESCRIPTION(DRIVER_DESC); | ||
2365 | MODULE_LICENSE("GPL"); | ||
diff --git a/drivers/isdn/gigaset/common.c b/drivers/isdn/gigaset/common.c new file mode 100644 index 000000000000..64371995c1a9 --- /dev/null +++ b/drivers/isdn/gigaset/common.c | |||
@@ -0,0 +1,1203 @@ | |||
1 | /* | ||
2 | * Stuff used by all variants of the driver | ||
3 | * | ||
4 | * Copyright (c) 2001 by Stefan Eilers <Eilers.Stefan@epost.de>, | ||
5 | * Hansjoerg Lipp <hjlipp@web.de>, | ||
6 | * Tilman Schmidt <tilman@imap.cc>. | ||
7 | * | ||
8 | * ===================================================================== | ||
9 | * This program is free software; you can redistribute it and/or | ||
10 | * modify it under the terms of the GNU General Public License as | ||
11 | * published by the Free Software Foundation; either version 2 of | ||
12 | * the License, or (at your option) any later version. | ||
13 | * ===================================================================== | ||
14 | * ToDo: ... | ||
15 | * ===================================================================== | ||
16 | * Version: $Id: common.c,v 1.104.4.22 2006/02/04 18:28:16 hjlipp Exp $ | ||
17 | * ===================================================================== | ||
18 | */ | ||
19 | |||
20 | #include "gigaset.h" | ||
21 | #include <linux/ctype.h> | ||
22 | #include <linux/module.h> | ||
23 | #include <linux/moduleparam.h> | ||
24 | |||
25 | /* Version Information */ | ||
26 | #define DRIVER_AUTHOR "Hansjoerg Lipp <hjlipp@web.de>, Tilman Schmidt <tilman@imap.cc>, Stefan Eilers <Eilers.Stefan@epost.de>" | ||
27 | #define DRIVER_DESC "Driver for Gigaset 307x" | ||
28 | |||
29 | /* Module parameters */ | ||
30 | int gigaset_debuglevel = DEBUG_DEFAULT; | ||
31 | EXPORT_SYMBOL_GPL(gigaset_debuglevel); | ||
32 | module_param_named(debug, gigaset_debuglevel, int, S_IRUGO|S_IWUSR); | ||
33 | MODULE_PARM_DESC(debug, "debug level"); | ||
34 | |||
35 | /*====================================================================== | ||
36 | Prototypes of internal functions | ||
37 | */ | ||
38 | |||
39 | //static void gigaset_process_response(int resp_code, int parameter, | ||
40 | // struct at_state_t *at_state, | ||
41 | // unsigned char ** pstring); | ||
42 | static struct cardstate *alloc_cs(struct gigaset_driver *drv); | ||
43 | static void free_cs(struct cardstate *cs); | ||
44 | static void make_valid(struct cardstate *cs, unsigned mask); | ||
45 | static void make_invalid(struct cardstate *cs, unsigned mask); | ||
46 | |||
47 | #define VALID_MINOR 0x01 | ||
48 | #define VALID_ID 0x02 | ||
49 | #define ASSIGNED 0x04 | ||
50 | |||
51 | /* bitwise byte inversion table */ | ||
52 | __u8 gigaset_invtab[256] = { | ||
53 | 0x00, 0x80, 0x40, 0xc0, 0x20, 0xa0, 0x60, 0xe0, | ||
54 | 0x10, 0x90, 0x50, 0xd0, 0x30, 0xb0, 0x70, 0xf0, | ||
55 | 0x08, 0x88, 0x48, 0xc8, 0x28, 0xa8, 0x68, 0xe8, | ||
56 | 0x18, 0x98, 0x58, 0xd8, 0x38, 0xb8, 0x78, 0xf8, | ||
57 | 0x04, 0x84, 0x44, 0xc4, 0x24, 0xa4, 0x64, 0xe4, | ||
58 | 0x14, 0x94, 0x54, 0xd4, 0x34, 0xb4, 0x74, 0xf4, | ||
59 | 0x0c, 0x8c, 0x4c, 0xcc, 0x2c, 0xac, 0x6c, 0xec, | ||
60 | 0x1c, 0x9c, 0x5c, 0xdc, 0x3c, 0xbc, 0x7c, 0xfc, | ||
61 | 0x02, 0x82, 0x42, 0xc2, 0x22, 0xa2, 0x62, 0xe2, | ||
62 | 0x12, 0x92, 0x52, 0xd2, 0x32, 0xb2, 0x72, 0xf2, | ||
63 | 0x0a, 0x8a, 0x4a, 0xca, 0x2a, 0xaa, 0x6a, 0xea, | ||
64 | 0x1a, 0x9a, 0x5a, 0xda, 0x3a, 0xba, 0x7a, 0xfa, | ||
65 | 0x06, 0x86, 0x46, 0xc6, 0x26, 0xa6, 0x66, 0xe6, | ||
66 | 0x16, 0x96, 0x56, 0xd6, 0x36, 0xb6, 0x76, 0xf6, | ||
67 | 0x0e, 0x8e, 0x4e, 0xce, 0x2e, 0xae, 0x6e, 0xee, | ||
68 | 0x1e, 0x9e, 0x5e, 0xde, 0x3e, 0xbe, 0x7e, 0xfe, | ||
69 | 0x01, 0x81, 0x41, 0xc1, 0x21, 0xa1, 0x61, 0xe1, | ||
70 | 0x11, 0x91, 0x51, 0xd1, 0x31, 0xb1, 0x71, 0xf1, | ||
71 | 0x09, 0x89, 0x49, 0xc9, 0x29, 0xa9, 0x69, 0xe9, | ||
72 | 0x19, 0x99, 0x59, 0xd9, 0x39, 0xb9, 0x79, 0xf9, | ||
73 | 0x05, 0x85, 0x45, 0xc5, 0x25, 0xa5, 0x65, 0xe5, | ||
74 | 0x15, 0x95, 0x55, 0xd5, 0x35, 0xb5, 0x75, 0xf5, | ||
75 | 0x0d, 0x8d, 0x4d, 0xcd, 0x2d, 0xad, 0x6d, 0xed, | ||
76 | 0x1d, 0x9d, 0x5d, 0xdd, 0x3d, 0xbd, 0x7d, 0xfd, | ||
77 | 0x03, 0x83, 0x43, 0xc3, 0x23, 0xa3, 0x63, 0xe3, | ||
78 | 0x13, 0x93, 0x53, 0xd3, 0x33, 0xb3, 0x73, 0xf3, | ||
79 | 0x0b, 0x8b, 0x4b, 0xcb, 0x2b, 0xab, 0x6b, 0xeb, | ||
80 | 0x1b, 0x9b, 0x5b, 0xdb, 0x3b, 0xbb, 0x7b, 0xfb, | ||
81 | 0x07, 0x87, 0x47, 0xc7, 0x27, 0xa7, 0x67, 0xe7, | ||
82 | 0x17, 0x97, 0x57, 0xd7, 0x37, 0xb7, 0x77, 0xf7, | ||
83 | 0x0f, 0x8f, 0x4f, 0xcf, 0x2f, 0xaf, 0x6f, 0xef, | ||
84 | 0x1f, 0x9f, 0x5f, 0xdf, 0x3f, 0xbf, 0x7f, 0xff | ||
85 | }; | ||
86 | EXPORT_SYMBOL_GPL(gigaset_invtab); | ||
87 | |||
88 | void gigaset_dbg_buffer(enum debuglevel level, const unsigned char *msg, | ||
89 | size_t len, const unsigned char *buf, int from_user) | ||
90 | { | ||
91 | unsigned char outbuf[80]; | ||
92 | unsigned char inbuf[80 - 1]; | ||
93 | size_t numin; | ||
94 | const unsigned char *in; | ||
95 | size_t space = sizeof outbuf - 1; | ||
96 | unsigned char *out = outbuf; | ||
97 | |||
98 | if (!from_user) { | ||
99 | in = buf; | ||
100 | numin = len; | ||
101 | } else { | ||
102 | numin = len < sizeof inbuf ? len : sizeof inbuf; | ||
103 | in = inbuf; | ||
104 | if (copy_from_user(inbuf, (const unsigned char __user *) buf, numin)) { | ||
105 | strncpy(inbuf, "<FAULT>", sizeof inbuf); | ||
106 | numin = sizeof "<FAULT>" - 1; | ||
107 | } | ||
108 | } | ||
109 | |||
110 | for (; numin && space; --numin, ++in) { | ||
111 | --space; | ||
112 | if (*in >= 32) | ||
113 | *out++ = *in; | ||
114 | else { | ||
115 | *out++ = '^'; | ||
116 | if (space) { | ||
117 | *out++ = '@' + *in; | ||
118 | --space; | ||
119 | } | ||
120 | } | ||
121 | } | ||
122 | *out = 0; | ||
123 | |||
124 | dbg(level, "%s (%u bytes): %s", msg, (unsigned) len, outbuf); | ||
125 | } | ||
126 | EXPORT_SYMBOL_GPL(gigaset_dbg_buffer); | ||
127 | |||
128 | static int setflags(struct cardstate *cs, unsigned flags, unsigned delay) | ||
129 | { | ||
130 | int r; | ||
131 | |||
132 | r = cs->ops->set_modem_ctrl(cs, cs->control_state, flags); | ||
133 | cs->control_state = flags; | ||
134 | if (r < 0) | ||
135 | return r; | ||
136 | |||
137 | if (delay) { | ||
138 | set_current_state(TASK_INTERRUPTIBLE); | ||
139 | schedule_timeout(delay * HZ / 1000); | ||
140 | } | ||
141 | |||
142 | return 0; | ||
143 | } | ||
144 | |||
145 | int gigaset_enterconfigmode(struct cardstate *cs) | ||
146 | { | ||
147 | int i, r; | ||
148 | |||
149 | if (!atomic_read(&cs->connected)) { | ||
150 | err("not connected!"); | ||
151 | return -1; | ||
152 | } | ||
153 | |||
154 | cs->control_state = TIOCM_RTS; //FIXME | ||
155 | |||
156 | r = setflags(cs, TIOCM_DTR, 200); | ||
157 | if (r < 0) | ||
158 | goto error; | ||
159 | r = setflags(cs, 0, 200); | ||
160 | if (r < 0) | ||
161 | goto error; | ||
162 | for (i = 0; i < 5; ++i) { | ||
163 | r = setflags(cs, TIOCM_RTS, 100); | ||
164 | if (r < 0) | ||
165 | goto error; | ||
166 | r = setflags(cs, 0, 100); | ||
167 | if (r < 0) | ||
168 | goto error; | ||
169 | } | ||
170 | r = setflags(cs, TIOCM_RTS|TIOCM_DTR, 800); | ||
171 | if (r < 0) | ||
172 | goto error; | ||
173 | |||
174 | return 0; | ||
175 | |||
176 | error: | ||
177 | err("error %d on setuartbits!\n", -r); | ||
178 | cs->control_state = TIOCM_RTS|TIOCM_DTR; // FIXME is this a good value? | ||
179 | cs->ops->set_modem_ctrl(cs, 0, TIOCM_RTS|TIOCM_DTR); | ||
180 | |||
181 | return -1; //r | ||
182 | } | ||
183 | |||
184 | static int test_timeout(struct at_state_t *at_state) | ||
185 | { | ||
186 | if (!at_state->timer_expires) | ||
187 | return 0; | ||
188 | |||
189 | if (--at_state->timer_expires) { | ||
190 | dbg(DEBUG_MCMD, "decreased timer of %p to %lu", | ||
191 | at_state, at_state->timer_expires); | ||
192 | return 0; | ||
193 | } | ||
194 | |||
195 | if (!gigaset_add_event(at_state->cs, at_state, EV_TIMEOUT, NULL, | ||
196 | atomic_read(&at_state->timer_index), NULL)) { | ||
197 | //FIXME what should we do? | ||
198 | } | ||
199 | |||
200 | return 1; | ||
201 | } | ||
202 | |||
203 | static void timer_tick(unsigned long data) | ||
204 | { | ||
205 | struct cardstate *cs = (struct cardstate *) data; | ||
206 | unsigned long flags; | ||
207 | unsigned channel; | ||
208 | struct at_state_t *at_state; | ||
209 | int timeout = 0; | ||
210 | |||
211 | spin_lock_irqsave(&cs->lock, flags); | ||
212 | |||
213 | for (channel = 0; channel < cs->channels; ++channel) | ||
214 | if (test_timeout(&cs->bcs[channel].at_state)) | ||
215 | timeout = 1; | ||
216 | |||
217 | if (test_timeout(&cs->at_state)) | ||
218 | timeout = 1; | ||
219 | |||
220 | list_for_each_entry(at_state, &cs->temp_at_states, list) | ||
221 | if (test_timeout(at_state)) | ||
222 | timeout = 1; | ||
223 | |||
224 | if (atomic_read(&cs->running)) { | ||
225 | mod_timer(&cs->timer, jiffies + GIG_TICK); | ||
226 | if (timeout) { | ||
227 | dbg(DEBUG_CMD, "scheduling timeout"); | ||
228 | tasklet_schedule(&cs->event_tasklet); | ||
229 | } | ||
230 | } | ||
231 | |||
232 | spin_unlock_irqrestore(&cs->lock, flags); | ||
233 | } | ||
234 | |||
235 | int gigaset_get_channel(struct bc_state *bcs) | ||
236 | { | ||
237 | unsigned long flags; | ||
238 | |||
239 | spin_lock_irqsave(&bcs->cs->lock, flags); | ||
240 | if (bcs->use_count) { | ||
241 | dbg(DEBUG_ANY, "could not allocate channel %d", bcs->channel); | ||
242 | spin_unlock_irqrestore(&bcs->cs->lock, flags); | ||
243 | return 0; | ||
244 | } | ||
245 | ++bcs->use_count; | ||
246 | bcs->busy = 1; | ||
247 | dbg(DEBUG_ANY, "allocated channel %d", bcs->channel); | ||
248 | spin_unlock_irqrestore(&bcs->cs->lock, flags); | ||
249 | return 1; | ||
250 | } | ||
251 | |||
252 | void gigaset_free_channel(struct bc_state *bcs) | ||
253 | { | ||
254 | unsigned long flags; | ||
255 | |||
256 | spin_lock_irqsave(&bcs->cs->lock, flags); | ||
257 | if (!bcs->busy) { | ||
258 | dbg(DEBUG_ANY, "could not free channel %d", bcs->channel); | ||
259 | spin_unlock_irqrestore(&bcs->cs->lock, flags); | ||
260 | return; | ||
261 | } | ||
262 | --bcs->use_count; | ||
263 | bcs->busy = 0; | ||
264 | dbg(DEBUG_ANY, "freed channel %d", bcs->channel); | ||
265 | spin_unlock_irqrestore(&bcs->cs->lock, flags); | ||
266 | } | ||
267 | |||
268 | int gigaset_get_channels(struct cardstate *cs) | ||
269 | { | ||
270 | unsigned long flags; | ||
271 | int i; | ||
272 | |||
273 | spin_lock_irqsave(&cs->lock, flags); | ||
274 | for (i = 0; i < cs->channels; ++i) | ||
275 | if (cs->bcs[i].use_count) { | ||
276 | spin_unlock_irqrestore(&cs->lock, flags); | ||
277 | dbg(DEBUG_ANY, "could not allocated all channels"); | ||
278 | return 0; | ||
279 | } | ||
280 | for (i = 0; i < cs->channels; ++i) | ||
281 | ++cs->bcs[i].use_count; | ||
282 | spin_unlock_irqrestore(&cs->lock, flags); | ||
283 | |||
284 | dbg(DEBUG_ANY, "allocated all channels"); | ||
285 | |||
286 | return 1; | ||
287 | } | ||
288 | |||
289 | void gigaset_free_channels(struct cardstate *cs) | ||
290 | { | ||
291 | unsigned long flags; | ||
292 | int i; | ||
293 | |||
294 | dbg(DEBUG_ANY, "unblocking all channels"); | ||
295 | spin_lock_irqsave(&cs->lock, flags); | ||
296 | for (i = 0; i < cs->channels; ++i) | ||
297 | --cs->bcs[i].use_count; | ||
298 | spin_unlock_irqrestore(&cs->lock, flags); | ||
299 | } | ||
300 | |||
301 | void gigaset_block_channels(struct cardstate *cs) | ||
302 | { | ||
303 | unsigned long flags; | ||
304 | int i; | ||
305 | |||
306 | dbg(DEBUG_ANY, "blocking all channels"); | ||
307 | spin_lock_irqsave(&cs->lock, flags); | ||
308 | for (i = 0; i < cs->channels; ++i) | ||
309 | ++cs->bcs[i].use_count; | ||
310 | spin_unlock_irqrestore(&cs->lock, flags); | ||
311 | } | ||
312 | |||
313 | static void clear_events(struct cardstate *cs) | ||
314 | { | ||
315 | struct event_t *ev; | ||
316 | unsigned head, tail; | ||
317 | |||
318 | /* no locking needed (no reader/writer allowed) */ | ||
319 | |||
320 | head = atomic_read(&cs->ev_head); | ||
321 | tail = atomic_read(&cs->ev_tail); | ||
322 | |||
323 | while (tail != head) { | ||
324 | ev = cs->events + head; | ||
325 | kfree(ev->ptr); | ||
326 | |||
327 | head = (head + 1) % MAX_EVENTS; | ||
328 | } | ||
329 | |||
330 | atomic_set(&cs->ev_head, tail); | ||
331 | } | ||
332 | |||
333 | struct event_t *gigaset_add_event(struct cardstate *cs, | ||
334 | struct at_state_t *at_state, int type, | ||
335 | void *ptr, int parameter, void *arg) | ||
336 | { | ||
337 | unsigned long flags; | ||
338 | unsigned next, tail; | ||
339 | struct event_t *event = NULL; | ||
340 | |||
341 | spin_lock_irqsave(&cs->ev_lock, flags); | ||
342 | |||
343 | tail = atomic_read(&cs->ev_tail); | ||
344 | next = (tail + 1) % MAX_EVENTS; | ||
345 | if (unlikely(next == atomic_read(&cs->ev_head))) | ||
346 | err("event queue full"); | ||
347 | else { | ||
348 | event = cs->events + tail; | ||
349 | event->type = type; | ||
350 | event->at_state = at_state; | ||
351 | event->cid = -1; | ||
352 | event->ptr = ptr; | ||
353 | event->arg = arg; | ||
354 | event->parameter = parameter; | ||
355 | atomic_set(&cs->ev_tail, next); | ||
356 | } | ||
357 | |||
358 | spin_unlock_irqrestore(&cs->ev_lock, flags); | ||
359 | |||
360 | return event; | ||
361 | } | ||
362 | EXPORT_SYMBOL_GPL(gigaset_add_event); | ||
363 | |||
364 | static void free_strings(struct at_state_t *at_state) | ||
365 | { | ||
366 | int i; | ||
367 | |||
368 | for (i = 0; i < STR_NUM; ++i) { | ||
369 | kfree(at_state->str_var[i]); | ||
370 | at_state->str_var[i] = NULL; | ||
371 | } | ||
372 | } | ||
373 | |||
374 | static void clear_at_state(struct at_state_t *at_state) | ||
375 | { | ||
376 | free_strings(at_state); | ||
377 | } | ||
378 | |||
379 | static void dealloc_at_states(struct cardstate *cs) | ||
380 | { | ||
381 | struct at_state_t *cur, *next; | ||
382 | |||
383 | list_for_each_entry_safe(cur, next, &cs->temp_at_states, list) { | ||
384 | list_del(&cur->list); | ||
385 | free_strings(cur); | ||
386 | kfree(cur); | ||
387 | } | ||
388 | } | ||
389 | |||
390 | static void gigaset_freebcs(struct bc_state *bcs) | ||
391 | { | ||
392 | int i; | ||
393 | |||
394 | dbg(DEBUG_INIT, "freeing bcs[%d]->hw", bcs->channel); | ||
395 | if (!bcs->cs->ops->freebcshw(bcs)) { | ||
396 | dbg(DEBUG_INIT, "failed"); | ||
397 | } | ||
398 | |||
399 | dbg(DEBUG_INIT, "clearing bcs[%d]->at_state", bcs->channel); | ||
400 | clear_at_state(&bcs->at_state); | ||
401 | dbg(DEBUG_INIT, "freeing bcs[%d]->skb", bcs->channel); | ||
402 | |||
403 | if (bcs->skb) | ||
404 | dev_kfree_skb(bcs->skb); | ||
405 | for (i = 0; i < AT_NUM; ++i) { | ||
406 | kfree(bcs->commands[i]); | ||
407 | bcs->commands[i] = NULL; | ||
408 | } | ||
409 | } | ||
410 | |||
411 | void gigaset_freecs(struct cardstate *cs) | ||
412 | { | ||
413 | int i; | ||
414 | unsigned long flags; | ||
415 | |||
416 | if (!cs) | ||
417 | return; | ||
418 | |||
419 | down(&cs->sem); | ||
420 | |||
421 | if (!cs->bcs) | ||
422 | goto f_cs; | ||
423 | if (!cs->inbuf) | ||
424 | goto f_bcs; | ||
425 | |||
426 | spin_lock_irqsave(&cs->lock, flags); | ||
427 | atomic_set(&cs->running, 0); | ||
428 | spin_unlock_irqrestore(&cs->lock, flags); /* event handler and timer are not rescheduled below */ | ||
429 | |||
430 | tasklet_kill(&cs->event_tasklet); | ||
431 | del_timer_sync(&cs->timer); | ||
432 | |||
433 | switch (cs->cs_init) { | ||
434 | default: | ||
435 | gigaset_if_free(cs); | ||
436 | |||
437 | dbg(DEBUG_INIT, "clearing hw"); | ||
438 | cs->ops->freecshw(cs); | ||
439 | |||
440 | //FIXME cmdbuf | ||
441 | |||
442 | /* fall through */ | ||
443 | case 2: /* error in initcshw */ | ||
444 | /* Deregister from LL */ | ||
445 | make_invalid(cs, VALID_ID); | ||
446 | dbg(DEBUG_INIT, "clearing iif"); | ||
447 | gigaset_i4l_cmd(cs, ISDN_STAT_UNLOAD); | ||
448 | |||
449 | /* fall through */ | ||
450 | case 1: /* error when regestering to LL */ | ||
451 | dbg(DEBUG_INIT, "clearing at_state"); | ||
452 | clear_at_state(&cs->at_state); | ||
453 | dealloc_at_states(cs); | ||
454 | |||
455 | /* fall through */ | ||
456 | case 0: /* error in one call to initbcs */ | ||
457 | for (i = 0; i < cs->channels; ++i) { | ||
458 | dbg(DEBUG_INIT, "clearing bcs[%d]", i); | ||
459 | gigaset_freebcs(cs->bcs + i); | ||
460 | } | ||
461 | |||
462 | clear_events(cs); | ||
463 | dbg(DEBUG_INIT, "freeing inbuf"); | ||
464 | kfree(cs->inbuf); | ||
465 | } | ||
466 | f_bcs: dbg(DEBUG_INIT, "freeing bcs[]"); | ||
467 | kfree(cs->bcs); | ||
468 | f_cs: dbg(DEBUG_INIT, "freeing cs"); | ||
469 | up(&cs->sem); | ||
470 | free_cs(cs); | ||
471 | } | ||
472 | EXPORT_SYMBOL_GPL(gigaset_freecs); | ||
473 | |||
474 | void gigaset_at_init(struct at_state_t *at_state, struct bc_state *bcs, | ||
475 | struct cardstate *cs, int cid) | ||
476 | { | ||
477 | int i; | ||
478 | |||
479 | INIT_LIST_HEAD(&at_state->list); | ||
480 | at_state->waiting = 0; | ||
481 | at_state->getstring = 0; | ||
482 | at_state->pending_commands = 0; | ||
483 | at_state->timer_expires = 0; | ||
484 | at_state->timer_active = 0; | ||
485 | atomic_set(&at_state->timer_index, 0); | ||
486 | atomic_set(&at_state->seq_index, 0); | ||
487 | at_state->ConState = 0; | ||
488 | for (i = 0; i < STR_NUM; ++i) | ||
489 | at_state->str_var[i] = NULL; | ||
490 | at_state->int_var[VAR_ZDLE] = 0; | ||
491 | at_state->int_var[VAR_ZCTP] = -1; | ||
492 | at_state->int_var[VAR_ZSAU] = ZSAU_NULL; | ||
493 | at_state->cs = cs; | ||
494 | at_state->bcs = bcs; | ||
495 | at_state->cid = cid; | ||
496 | if (!cid) | ||
497 | at_state->replystruct = cs->tabnocid; | ||
498 | else | ||
499 | at_state->replystruct = cs->tabcid; | ||
500 | } | ||
501 | |||
502 | |||
503 | static void gigaset_inbuf_init(struct inbuf_t *inbuf, struct bc_state *bcs, | ||
504 | struct cardstate *cs, int inputstate) | ||
505 | /* inbuf->read must be allocated before! */ | ||
506 | { | ||
507 | atomic_set(&inbuf->head, 0); | ||
508 | atomic_set(&inbuf->tail, 0); | ||
509 | inbuf->cs = cs; | ||
510 | inbuf->bcs = bcs; /*base driver: NULL*/ | ||
511 | inbuf->rcvbuf = NULL; //FIXME | ||
512 | inbuf->inputstate = inputstate; | ||
513 | } | ||
514 | |||
515 | /* Initialize the b-channel structure */ | ||
516 | static struct bc_state *gigaset_initbcs(struct bc_state *bcs, | ||
517 | struct cardstate *cs, int channel) | ||
518 | { | ||
519 | int i; | ||
520 | |||
521 | bcs->tx_skb = NULL; //FIXME -> hw part | ||
522 | |||
523 | skb_queue_head_init(&bcs->squeue); | ||
524 | |||
525 | bcs->corrupted = 0; | ||
526 | bcs->trans_down = 0; | ||
527 | bcs->trans_up = 0; | ||
528 | |||
529 | dbg(DEBUG_INIT, "setting up bcs[%d]->at_state", channel); | ||
530 | gigaset_at_init(&bcs->at_state, bcs, cs, -1); | ||
531 | |||
532 | bcs->rcvbytes = 0; | ||
533 | |||
534 | #ifdef CONFIG_GIGASET_DEBUG | ||
535 | bcs->emptycount = 0; | ||
536 | #endif | ||
537 | |||
538 | dbg(DEBUG_INIT, "allocating bcs[%d]->skb", channel); | ||
539 | bcs->fcs = PPP_INITFCS; | ||
540 | bcs->inputstate = 0; | ||
541 | if (cs->ignoreframes) { | ||
542 | bcs->inputstate |= INS_skip_frame; | ||
543 | bcs->skb = NULL; | ||
544 | } else if ((bcs->skb = dev_alloc_skb(SBUFSIZE + HW_HDR_LEN)) != NULL) | ||
545 | skb_reserve(bcs->skb, HW_HDR_LEN); | ||
546 | else { | ||
547 | warn("could not allocate skb"); | ||
548 | bcs->inputstate |= INS_skip_frame; | ||
549 | } | ||
550 | |||
551 | bcs->channel = channel; | ||
552 | bcs->cs = cs; | ||
553 | |||
554 | bcs->chstate = 0; | ||
555 | bcs->use_count = 1; | ||
556 | bcs->busy = 0; | ||
557 | bcs->ignore = cs->ignoreframes; | ||
558 | |||
559 | for (i = 0; i < AT_NUM; ++i) | ||
560 | bcs->commands[i] = NULL; | ||
561 | |||
562 | dbg(DEBUG_INIT, " setting up bcs[%d]->hw", channel); | ||
563 | if (cs->ops->initbcshw(bcs)) | ||
564 | return bcs; | ||
565 | |||
566 | //error: | ||
567 | dbg(DEBUG_INIT, " failed"); | ||
568 | |||
569 | dbg(DEBUG_INIT, " freeing bcs[%d]->skb", channel); | ||
570 | if (bcs->skb) | ||
571 | dev_kfree_skb(bcs->skb); | ||
572 | |||
573 | return NULL; | ||
574 | } | ||
575 | |||
576 | /* gigaset_initcs | ||
577 | * Allocate and initialize cardstate structure for Gigaset driver | ||
578 | * Calls hardware dependent gigaset_initcshw() function | ||
579 | * Calls B channel initialization function gigaset_initbcs() for each B channel | ||
580 | * parameters: | ||
581 | * drv hardware driver the device belongs to | ||
582 | * channels number of B channels supported by device | ||
583 | * onechannel !=0: B channel data and AT commands share one communication channel | ||
584 | * ==0: B channels have separate communication channels | ||
585 | * ignoreframes number of frames to ignore after setting up B channel | ||
586 | * cidmode !=0: start in CallID mode | ||
587 | * modulename name of driver module (used for I4L registration) | ||
588 | * return value: | ||
589 | * pointer to cardstate structure | ||
590 | */ | ||
591 | struct cardstate *gigaset_initcs(struct gigaset_driver *drv, int channels, | ||
592 | int onechannel, int ignoreframes, | ||
593 | int cidmode, const char *modulename) | ||
594 | { | ||
595 | struct cardstate *cs = NULL; | ||
596 | int i; | ||
597 | |||
598 | dbg(DEBUG_INIT, "allocating cs"); | ||
599 | cs = alloc_cs(drv); | ||
600 | if (!cs) | ||
601 | goto error; | ||
602 | dbg(DEBUG_INIT, "allocating bcs[0..%d]", channels - 1); | ||
603 | cs->bcs = kmalloc(channels * sizeof(struct bc_state), GFP_KERNEL); | ||
604 | if (!cs->bcs) | ||
605 | goto error; | ||
606 | dbg(DEBUG_INIT, "allocating inbuf"); | ||
607 | cs->inbuf = kmalloc(sizeof(struct inbuf_t), GFP_KERNEL); | ||
608 | if (!cs->inbuf) | ||
609 | goto error; | ||
610 | |||
611 | cs->cs_init = 0; | ||
612 | cs->channels = channels; | ||
613 | cs->onechannel = onechannel; | ||
614 | cs->ignoreframes = ignoreframes; | ||
615 | INIT_LIST_HEAD(&cs->temp_at_states); | ||
616 | atomic_set(&cs->running, 0); | ||
617 | init_timer(&cs->timer); /* clear next & prev */ | ||
618 | spin_lock_init(&cs->ev_lock); | ||
619 | atomic_set(&cs->ev_tail, 0); | ||
620 | atomic_set(&cs->ev_head, 0); | ||
621 | init_MUTEX_LOCKED(&cs->sem); | ||
622 | tasklet_init(&cs->event_tasklet, &gigaset_handle_event, (unsigned long) cs); | ||
623 | atomic_set(&cs->commands_pending, 0); | ||
624 | cs->cur_at_seq = 0; | ||
625 | cs->gotfwver = -1; | ||
626 | cs->open_count = 0; | ||
627 | cs->tty = NULL; | ||
628 | atomic_set(&cs->cidmode, cidmode != 0); | ||
629 | |||
630 | //if(onechannel) { //FIXME | ||
631 | cs->tabnocid = gigaset_tab_nocid_m10x; | ||
632 | cs->tabcid = gigaset_tab_cid_m10x; | ||
633 | //} else { | ||
634 | // cs->tabnocid = gigaset_tab_nocid; | ||
635 | // cs->tabcid = gigaset_tab_cid; | ||
636 | //} | ||
637 | |||
638 | init_waitqueue_head(&cs->waitqueue); | ||
639 | cs->waiting = 0; | ||
640 | |||
641 | atomic_set(&cs->mode, M_UNKNOWN); | ||
642 | atomic_set(&cs->mstate, MS_UNINITIALIZED); | ||
643 | |||
644 | for (i = 0; i < channels; ++i) { | ||
645 | dbg(DEBUG_INIT, "setting up bcs[%d].read", i); | ||
646 | if (!gigaset_initbcs(cs->bcs + i, cs, i)) | ||
647 | goto error; | ||
648 | } | ||
649 | |||
650 | ++cs->cs_init; | ||
651 | |||
652 | dbg(DEBUG_INIT, "setting up at_state"); | ||
653 | spin_lock_init(&cs->lock); | ||
654 | gigaset_at_init(&cs->at_state, NULL, cs, 0); | ||
655 | cs->dle = 0; | ||
656 | cs->cbytes = 0; | ||
657 | |||
658 | dbg(DEBUG_INIT, "setting up inbuf"); | ||
659 | if (onechannel) { //FIXME distinction necessary? | ||
660 | gigaset_inbuf_init(cs->inbuf, cs->bcs, cs, INS_command); | ||
661 | } else | ||
662 | gigaset_inbuf_init(cs->inbuf, NULL, cs, INS_command); | ||
663 | |||
664 | atomic_set(&cs->connected, 0); | ||
665 | |||
666 | dbg(DEBUG_INIT, "setting up cmdbuf"); | ||
667 | cs->cmdbuf = cs->lastcmdbuf = NULL; | ||
668 | spin_lock_init(&cs->cmdlock); | ||
669 | cs->curlen = 0; | ||
670 | cs->cmdbytes = 0; | ||
671 | |||
672 | /* | ||
673 | * Tell the ISDN4Linux subsystem (the LL) that | ||
674 | * a driver for a USB-Device is available ! | ||
675 | * If this is done, "isdnctrl" is able to bind a device for this driver even | ||
676 | * if no physical usb-device is currently connected. | ||
677 | * But this device will just be accessable if a physical USB device is connected | ||
678 | * (via "gigaset_probe") . | ||
679 | */ | ||
680 | dbg(DEBUG_INIT, "setting up iif"); | ||
681 | if (!gigaset_register_to_LL(cs, modulename)) { | ||
682 | err("register_isdn=>error"); | ||
683 | goto error; | ||
684 | } | ||
685 | |||
686 | make_valid(cs, VALID_ID); | ||
687 | ++cs->cs_init; | ||
688 | dbg(DEBUG_INIT, "setting up hw"); | ||
689 | if (!cs->ops->initcshw(cs)) | ||
690 | goto error; | ||
691 | |||
692 | ++cs->cs_init; | ||
693 | |||
694 | gigaset_if_init(cs); | ||
695 | |||
696 | atomic_set(&cs->running, 1); | ||
697 | cs->timer.data = (unsigned long) cs; | ||
698 | cs->timer.function = timer_tick; | ||
699 | cs->timer.expires = jiffies + GIG_TICK; | ||
700 | /* FIXME: can jiffies increase too much until the timer is added? | ||
701 | * Same problem(?) with mod_timer() in timer_tick(). */ | ||
702 | add_timer(&cs->timer); | ||
703 | |||
704 | dbg(DEBUG_INIT, "cs initialized!"); | ||
705 | up(&cs->sem); | ||
706 | return cs; | ||
707 | |||
708 | error: if (cs) | ||
709 | up(&cs->sem); | ||
710 | dbg(DEBUG_INIT, "failed"); | ||
711 | gigaset_freecs(cs); | ||
712 | return NULL; | ||
713 | } | ||
714 | EXPORT_SYMBOL_GPL(gigaset_initcs); | ||
715 | |||
716 | /* ReInitialize the b-channel structure */ /* e.g. called on hangup, disconnect */ | ||
717 | void gigaset_bcs_reinit(struct bc_state *bcs) | ||
718 | { | ||
719 | struct sk_buff *skb; | ||
720 | struct cardstate *cs = bcs->cs; | ||
721 | unsigned long flags; | ||
722 | |||
723 | while ((skb = skb_dequeue(&bcs->squeue)) != NULL) | ||
724 | dev_kfree_skb(skb); | ||
725 | |||
726 | spin_lock_irqsave(&cs->lock, flags); //FIXME | ||
727 | clear_at_state(&bcs->at_state); | ||
728 | bcs->at_state.ConState = 0; | ||
729 | bcs->at_state.timer_active = 0; | ||
730 | bcs->at_state.timer_expires = 0; | ||
731 | bcs->at_state.cid = -1; /* No CID defined */ | ||
732 | spin_unlock_irqrestore(&cs->lock, flags); | ||
733 | |||
734 | bcs->inputstate = 0; | ||
735 | |||
736 | #ifdef CONFIG_GIGASET_DEBUG | ||
737 | bcs->emptycount = 0; | ||
738 | #endif | ||
739 | |||
740 | bcs->fcs = PPP_INITFCS; | ||
741 | bcs->chstate = 0; | ||
742 | |||
743 | bcs->ignore = cs->ignoreframes; | ||
744 | if (bcs->ignore) | ||
745 | bcs->inputstate |= INS_skip_frame; | ||
746 | |||
747 | |||
748 | cs->ops->reinitbcshw(bcs); | ||
749 | } | ||
750 | |||
751 | static void cleanup_cs(struct cardstate *cs) | ||
752 | { | ||
753 | struct cmdbuf_t *cb, *tcb; | ||
754 | int i; | ||
755 | unsigned long flags; | ||
756 | |||
757 | spin_lock_irqsave(&cs->lock, flags); | ||
758 | |||
759 | atomic_set(&cs->mode, M_UNKNOWN); | ||
760 | atomic_set(&cs->mstate, MS_UNINITIALIZED); | ||
761 | |||
762 | clear_at_state(&cs->at_state); | ||
763 | dealloc_at_states(cs); | ||
764 | free_strings(&cs->at_state); | ||
765 | gigaset_at_init(&cs->at_state, NULL, cs, 0); | ||
766 | |||
767 | kfree(cs->inbuf->rcvbuf); | ||
768 | cs->inbuf->rcvbuf = NULL; | ||
769 | cs->inbuf->inputstate = INS_command; | ||
770 | atomic_set(&cs->inbuf->head, 0); | ||
771 | atomic_set(&cs->inbuf->tail, 0); | ||
772 | |||
773 | cb = cs->cmdbuf; | ||
774 | while (cb) { | ||
775 | tcb = cb; | ||
776 | cb = cb->next; | ||
777 | kfree(tcb); | ||
778 | } | ||
779 | cs->cmdbuf = cs->lastcmdbuf = NULL; | ||
780 | cs->curlen = 0; | ||
781 | cs->cmdbytes = 0; | ||
782 | cs->gotfwver = -1; | ||
783 | cs->dle = 0; | ||
784 | cs->cur_at_seq = 0; | ||
785 | atomic_set(&cs->commands_pending, 0); | ||
786 | cs->cbytes = 0; | ||
787 | |||
788 | spin_unlock_irqrestore(&cs->lock, flags); | ||
789 | |||
790 | for (i = 0; i < cs->channels; ++i) { | ||
791 | gigaset_freebcs(cs->bcs + i); | ||
792 | if (!gigaset_initbcs(cs->bcs + i, cs, i)) | ||
793 | break; //FIXME error handling | ||
794 | } | ||
795 | |||
796 | if (cs->waiting) { | ||
797 | cs->cmd_result = -ENODEV; | ||
798 | cs->waiting = 0; | ||
799 | wake_up_interruptible(&cs->waitqueue); | ||
800 | } | ||
801 | } | ||
802 | |||
803 | |||
804 | int gigaset_start(struct cardstate *cs) | ||
805 | { | ||
806 | if (down_interruptible(&cs->sem)) | ||
807 | return 0; | ||
808 | //info("USB device for Gigaset 307x now attached to Dev %d", ucs->minor); | ||
809 | |||
810 | atomic_set(&cs->connected, 1); | ||
811 | |||
812 | if (atomic_read(&cs->mstate) != MS_LOCKED) { | ||
813 | cs->ops->set_modem_ctrl(cs, 0, TIOCM_DTR|TIOCM_RTS); | ||
814 | cs->ops->baud_rate(cs, B115200); | ||
815 | cs->ops->set_line_ctrl(cs, CS8); | ||
816 | cs->control_state = TIOCM_DTR|TIOCM_RTS; | ||
817 | } else { | ||
818 | //FIXME use some saved values? | ||
819 | } | ||
820 | |||
821 | cs->waiting = 1; | ||
822 | |||
823 | if (!gigaset_add_event(cs, &cs->at_state, EV_START, NULL, 0, NULL)) { | ||
824 | cs->waiting = 0; | ||
825 | //FIXME what should we do? | ||
826 | goto error; | ||
827 | } | ||
828 | |||
829 | dbg(DEBUG_CMD, "scheduling START"); | ||
830 | gigaset_schedule_event(cs); | ||
831 | |||
832 | wait_event(cs->waitqueue, !cs->waiting); | ||
833 | |||
834 | up(&cs->sem); | ||
835 | return 1; | ||
836 | |||
837 | error: | ||
838 | up(&cs->sem); | ||
839 | return 0; | ||
840 | } | ||
841 | EXPORT_SYMBOL_GPL(gigaset_start); | ||
842 | |||
843 | void gigaset_shutdown(struct cardstate *cs) | ||
844 | { | ||
845 | down(&cs->sem); | ||
846 | |||
847 | cs->waiting = 1; | ||
848 | |||
849 | if (!gigaset_add_event(cs, &cs->at_state, EV_SHUTDOWN, NULL, 0, NULL)) { | ||
850 | //FIXME what should we do? | ||
851 | goto exit; | ||
852 | } | ||
853 | |||
854 | dbg(DEBUG_CMD, "scheduling SHUTDOWN"); | ||
855 | gigaset_schedule_event(cs); | ||
856 | |||
857 | if (wait_event_interruptible(cs->waitqueue, !cs->waiting)) { | ||
858 | warn("aborted"); | ||
859 | //FIXME | ||
860 | } | ||
861 | |||
862 | if (atomic_read(&cs->mstate) != MS_LOCKED) { | ||
863 | //FIXME? | ||
864 | //gigaset_baud_rate(cs, B115200); | ||
865 | //gigaset_set_line_ctrl(cs, CS8); | ||
866 | //gigaset_set_modem_ctrl(cs, TIOCM_DTR|TIOCM_RTS, 0); | ||
867 | //cs->control_state = 0; | ||
868 | } else { | ||
869 | //FIXME use some saved values? | ||
870 | } | ||
871 | |||
872 | cleanup_cs(cs); | ||
873 | |||
874 | exit: | ||
875 | up(&cs->sem); | ||
876 | } | ||
877 | EXPORT_SYMBOL_GPL(gigaset_shutdown); | ||
878 | |||
879 | void gigaset_stop(struct cardstate *cs) | ||
880 | { | ||
881 | down(&cs->sem); | ||
882 | |||
883 | atomic_set(&cs->connected, 0); | ||
884 | |||
885 | cs->waiting = 1; | ||
886 | |||
887 | if (!gigaset_add_event(cs, &cs->at_state, EV_STOP, NULL, 0, NULL)) { | ||
888 | //FIXME what should we do? | ||
889 | goto exit; | ||
890 | } | ||
891 | |||
892 | dbg(DEBUG_CMD, "scheduling STOP"); | ||
893 | gigaset_schedule_event(cs); | ||
894 | |||
895 | if (wait_event_interruptible(cs->waitqueue, !cs->waiting)) { | ||
896 | warn("aborted"); | ||
897 | //FIXME | ||
898 | } | ||
899 | |||
900 | /* Tell the LL that the device is not available .. */ | ||
901 | gigaset_i4l_cmd(cs, ISDN_STAT_STOP); // FIXME move to event layer? | ||
902 | |||
903 | cleanup_cs(cs); | ||
904 | |||
905 | exit: | ||
906 | up(&cs->sem); | ||
907 | } | ||
908 | EXPORT_SYMBOL_GPL(gigaset_stop); | ||
909 | |||
910 | static LIST_HEAD(drivers); | ||
911 | static spinlock_t driver_lock = SPIN_LOCK_UNLOCKED; | ||
912 | |||
913 | struct cardstate *gigaset_get_cs_by_id(int id) | ||
914 | { | ||
915 | unsigned long flags; | ||
916 | static struct cardstate *ret = NULL; | ||
917 | static struct cardstate *cs; | ||
918 | struct gigaset_driver *drv; | ||
919 | unsigned i; | ||
920 | |||
921 | spin_lock_irqsave(&driver_lock, flags); | ||
922 | list_for_each_entry(drv, &drivers, list) { | ||
923 | spin_lock(&drv->lock); | ||
924 | for (i = 0; i < drv->minors; ++i) { | ||
925 | if (drv->flags[i] & VALID_ID) { | ||
926 | cs = drv->cs + i; | ||
927 | if (cs->myid == id) | ||
928 | ret = cs; | ||
929 | } | ||
930 | if (ret) | ||
931 | break; | ||
932 | } | ||
933 | spin_unlock(&drv->lock); | ||
934 | if (ret) | ||
935 | break; | ||
936 | } | ||
937 | spin_unlock_irqrestore(&driver_lock, flags); | ||
938 | return ret; | ||
939 | } | ||
940 | |||
941 | void gigaset_debugdrivers(void) | ||
942 | { | ||
943 | unsigned long flags; | ||
944 | static struct cardstate *cs; | ||
945 | struct gigaset_driver *drv; | ||
946 | unsigned i; | ||
947 | |||
948 | spin_lock_irqsave(&driver_lock, flags); | ||
949 | list_for_each_entry(drv, &drivers, list) { | ||
950 | dbg(DEBUG_DRIVER, "driver %p", drv); | ||
951 | spin_lock(&drv->lock); | ||
952 | for (i = 0; i < drv->minors; ++i) { | ||
953 | dbg(DEBUG_DRIVER, " index %u", i); | ||
954 | dbg(DEBUG_DRIVER, " flags 0x%02x", drv->flags[i]); | ||
955 | cs = drv->cs + i; | ||
956 | dbg(DEBUG_DRIVER, " cardstate %p", cs); | ||
957 | dbg(DEBUG_DRIVER, " minor_index %u", cs->minor_index); | ||
958 | dbg(DEBUG_DRIVER, " driver %p", cs->driver); | ||
959 | dbg(DEBUG_DRIVER, " i4l id %d", cs->myid); | ||
960 | } | ||
961 | spin_unlock(&drv->lock); | ||
962 | } | ||
963 | spin_unlock_irqrestore(&driver_lock, flags); | ||
964 | } | ||
965 | EXPORT_SYMBOL_GPL(gigaset_debugdrivers); | ||
966 | |||
967 | struct cardstate *gigaset_get_cs_by_tty(struct tty_struct *tty) | ||
968 | { | ||
969 | if (tty->index < 0 || tty->index >= tty->driver->num) | ||
970 | return NULL; | ||
971 | return gigaset_get_cs_by_minor(tty->index + tty->driver->minor_start); | ||
972 | } | ||
973 | |||
974 | struct cardstate *gigaset_get_cs_by_minor(unsigned minor) | ||
975 | { | ||
976 | unsigned long flags; | ||
977 | static struct cardstate *ret = NULL; | ||
978 | struct gigaset_driver *drv; | ||
979 | unsigned index; | ||
980 | |||
981 | spin_lock_irqsave(&driver_lock, flags); | ||
982 | list_for_each_entry(drv, &drivers, list) { | ||
983 | if (minor < drv->minor || minor >= drv->minor + drv->minors) | ||
984 | continue; | ||
985 | index = minor - drv->minor; | ||
986 | spin_lock(&drv->lock); | ||
987 | if (drv->flags[index] & VALID_MINOR) | ||
988 | ret = drv->cs + index; | ||
989 | spin_unlock(&drv->lock); | ||
990 | if (ret) | ||
991 | break; | ||
992 | } | ||
993 | spin_unlock_irqrestore(&driver_lock, flags); | ||
994 | return ret; | ||
995 | } | ||
996 | |||
997 | void gigaset_freedriver(struct gigaset_driver *drv) | ||
998 | { | ||
999 | unsigned long flags; | ||
1000 | |||
1001 | spin_lock_irqsave(&driver_lock, flags); | ||
1002 | list_del(&drv->list); | ||
1003 | spin_unlock_irqrestore(&driver_lock, flags); | ||
1004 | |||
1005 | gigaset_if_freedriver(drv); | ||
1006 | module_put(drv->owner); | ||
1007 | |||
1008 | kfree(drv->cs); | ||
1009 | kfree(drv->flags); | ||
1010 | kfree(drv); | ||
1011 | } | ||
1012 | EXPORT_SYMBOL_GPL(gigaset_freedriver); | ||
1013 | |||
1014 | /* gigaset_initdriver | ||
1015 | * Allocate and initialize gigaset_driver structure. Initialize interface. | ||
1016 | * parameters: | ||
1017 | * minor First minor number | ||
1018 | * minors Number of minors this driver can handle | ||
1019 | * procname Name of the driver (e.g. for /proc/tty/drivers, path in /proc/driver) | ||
1020 | * devname Name of the device files (prefix without minor number) | ||
1021 | * devfsname Devfs name of the device files without %d | ||
1022 | * return value: | ||
1023 | * Pointer to the gigaset_driver structure on success, NULL on failure. | ||
1024 | */ | ||
1025 | struct gigaset_driver *gigaset_initdriver(unsigned minor, unsigned minors, | ||
1026 | const char *procname, | ||
1027 | const char *devname, | ||
1028 | const char *devfsname, | ||
1029 | const struct gigaset_ops *ops, | ||
1030 | struct module *owner) | ||
1031 | { | ||
1032 | struct gigaset_driver *drv; | ||
1033 | unsigned long flags; | ||
1034 | unsigned i; | ||
1035 | |||
1036 | drv = kmalloc(sizeof *drv, GFP_KERNEL); | ||
1037 | if (!drv) | ||
1038 | return NULL; | ||
1039 | if (!try_module_get(owner)) | ||
1040 | return NULL; | ||
1041 | |||
1042 | drv->cs = NULL; | ||
1043 | drv->have_tty = 0; | ||
1044 | drv->minor = minor; | ||
1045 | drv->minors = minors; | ||
1046 | spin_lock_init(&drv->lock); | ||
1047 | drv->blocked = 0; | ||
1048 | drv->ops = ops; | ||
1049 | drv->owner = owner; | ||
1050 | INIT_LIST_HEAD(&drv->list); | ||
1051 | |||
1052 | drv->cs = kmalloc(minors * sizeof *drv->cs, GFP_KERNEL); | ||
1053 | if (!drv->cs) | ||
1054 | goto out1; | ||
1055 | drv->flags = kmalloc(minors * sizeof *drv->flags, GFP_KERNEL); | ||
1056 | if (!drv->flags) | ||
1057 | goto out2; | ||
1058 | |||
1059 | for (i = 0; i < minors; ++i) { | ||
1060 | drv->flags[i] = 0; | ||
1061 | drv->cs[i].driver = drv; | ||
1062 | drv->cs[i].ops = drv->ops; | ||
1063 | drv->cs[i].minor_index = i; | ||
1064 | } | ||
1065 | |||
1066 | gigaset_if_initdriver(drv, procname, devname, devfsname); | ||
1067 | |||
1068 | spin_lock_irqsave(&driver_lock, flags); | ||
1069 | list_add(&drv->list, &drivers); | ||
1070 | spin_unlock_irqrestore(&driver_lock, flags); | ||
1071 | |||
1072 | return drv; | ||
1073 | |||
1074 | out2: | ||
1075 | kfree(drv->cs); | ||
1076 | out1: | ||
1077 | kfree(drv); | ||
1078 | module_put(owner); | ||
1079 | return NULL; | ||
1080 | } | ||
1081 | EXPORT_SYMBOL_GPL(gigaset_initdriver); | ||
1082 | |||
1083 | static struct cardstate *alloc_cs(struct gigaset_driver *drv) | ||
1084 | { | ||
1085 | unsigned long flags; | ||
1086 | unsigned i; | ||
1087 | static struct cardstate *ret = NULL; | ||
1088 | |||
1089 | spin_lock_irqsave(&drv->lock, flags); | ||
1090 | for (i = 0; i < drv->minors; ++i) { | ||
1091 | if (!(drv->flags[i] & VALID_MINOR)) { | ||
1092 | drv->flags[i] = VALID_MINOR; | ||
1093 | ret = drv->cs + i; | ||
1094 | } | ||
1095 | if (ret) | ||
1096 | break; | ||
1097 | } | ||
1098 | spin_unlock_irqrestore(&drv->lock, flags); | ||
1099 | return ret; | ||
1100 | } | ||
1101 | |||
1102 | static void free_cs(struct cardstate *cs) | ||
1103 | { | ||
1104 | unsigned long flags; | ||
1105 | struct gigaset_driver *drv = cs->driver; | ||
1106 | spin_lock_irqsave(&drv->lock, flags); | ||
1107 | drv->flags[cs->minor_index] = 0; | ||
1108 | spin_unlock_irqrestore(&drv->lock, flags); | ||
1109 | } | ||
1110 | |||
1111 | static void make_valid(struct cardstate *cs, unsigned mask) | ||
1112 | { | ||
1113 | unsigned long flags; | ||
1114 | struct gigaset_driver *drv = cs->driver; | ||
1115 | spin_lock_irqsave(&drv->lock, flags); | ||
1116 | drv->flags[cs->minor_index] |= mask; | ||
1117 | spin_unlock_irqrestore(&drv->lock, flags); | ||
1118 | } | ||
1119 | |||
1120 | static void make_invalid(struct cardstate *cs, unsigned mask) | ||
1121 | { | ||
1122 | unsigned long flags; | ||
1123 | struct gigaset_driver *drv = cs->driver; | ||
1124 | spin_lock_irqsave(&drv->lock, flags); | ||
1125 | drv->flags[cs->minor_index] &= ~mask; | ||
1126 | spin_unlock_irqrestore(&drv->lock, flags); | ||
1127 | } | ||
1128 | |||
1129 | /* For drivers without fixed assignment device<->cardstate (usb) */ | ||
1130 | struct cardstate *gigaset_getunassignedcs(struct gigaset_driver *drv) | ||
1131 | { | ||
1132 | unsigned long flags; | ||
1133 | struct cardstate *cs = NULL; | ||
1134 | unsigned i; | ||
1135 | |||
1136 | spin_lock_irqsave(&drv->lock, flags); | ||
1137 | if (drv->blocked) | ||
1138 | goto exit; | ||
1139 | for (i = 0; i < drv->minors; ++i) { | ||
1140 | if ((drv->flags[i] & VALID_MINOR) && | ||
1141 | !(drv->flags[i] & ASSIGNED)) { | ||
1142 | drv->flags[i] |= ASSIGNED; | ||
1143 | cs = drv->cs + i; | ||
1144 | break; | ||
1145 | } | ||
1146 | } | ||
1147 | exit: | ||
1148 | spin_unlock_irqrestore(&drv->lock, flags); | ||
1149 | return cs; | ||
1150 | } | ||
1151 | EXPORT_SYMBOL_GPL(gigaset_getunassignedcs); | ||
1152 | |||
1153 | void gigaset_unassign(struct cardstate *cs) | ||
1154 | { | ||
1155 | unsigned long flags; | ||
1156 | unsigned *minor_flags; | ||
1157 | struct gigaset_driver *drv; | ||
1158 | |||
1159 | if (!cs) | ||
1160 | return; | ||
1161 | drv = cs->driver; | ||
1162 | spin_lock_irqsave(&drv->lock, flags); | ||
1163 | minor_flags = drv->flags + cs->minor_index; | ||
1164 | if (*minor_flags & VALID_MINOR) | ||
1165 | *minor_flags &= ~ASSIGNED; | ||
1166 | spin_unlock_irqrestore(&drv->lock, flags); | ||
1167 | } | ||
1168 | EXPORT_SYMBOL_GPL(gigaset_unassign); | ||
1169 | |||
1170 | void gigaset_blockdriver(struct gigaset_driver *drv) | ||
1171 | { | ||
1172 | unsigned long flags; | ||
1173 | spin_lock_irqsave(&drv->lock, flags); | ||
1174 | drv->blocked = 1; | ||
1175 | spin_unlock_irqrestore(&drv->lock, flags); | ||
1176 | } | ||
1177 | EXPORT_SYMBOL_GPL(gigaset_blockdriver); | ||
1178 | |||
1179 | static int __init gigaset_init_module(void) | ||
1180 | { | ||
1181 | /* in accordance with the principle of least astonishment, | ||
1182 | * setting the 'debug' parameter to 1 activates a sensible | ||
1183 | * set of default debug levels | ||
1184 | */ | ||
1185 | if (gigaset_debuglevel == 1) | ||
1186 | gigaset_debuglevel = DEBUG_DEFAULT; | ||
1187 | |||
1188 | info(DRIVER_AUTHOR); | ||
1189 | info(DRIVER_DESC); | ||
1190 | return 0; | ||
1191 | } | ||
1192 | |||
1193 | static void __exit gigaset_exit_module(void) | ||
1194 | { | ||
1195 | } | ||
1196 | |||
1197 | module_init(gigaset_init_module); | ||
1198 | module_exit(gigaset_exit_module); | ||
1199 | |||
1200 | MODULE_AUTHOR(DRIVER_AUTHOR); | ||
1201 | MODULE_DESCRIPTION(DRIVER_DESC); | ||
1202 | |||
1203 | MODULE_LICENSE("GPL"); | ||
diff --git a/drivers/isdn/gigaset/ev-layer.c b/drivers/isdn/gigaset/ev-layer.c new file mode 100644 index 000000000000..fdcb80bb21c7 --- /dev/null +++ b/drivers/isdn/gigaset/ev-layer.c | |||
@@ -0,0 +1,1983 @@ | |||
1 | /* | ||
2 | * Stuff used by all variants of the driver | ||
3 | * | ||
4 | * Copyright (c) 2001 by Stefan Eilers <Eilers.Stefan@epost.de>, | ||
5 | * Hansjoerg Lipp <hjlipp@web.de>, | ||
6 | * Tilman Schmidt <tilman@imap.cc>. | ||
7 | * | ||
8 | * ===================================================================== | ||
9 | * This program is free software; you can redistribute it and/or | ||
10 | * modify it under the terms of the GNU General Public License as | ||
11 | * published by the Free Software Foundation; either version 2 of | ||
12 | * the License, or (at your option) any later version. | ||
13 | * ===================================================================== | ||
14 | * ToDo: ... | ||
15 | * ===================================================================== | ||
16 | * Version: $Id: ev-layer.c,v 1.4.2.18 2006/02/04 18:28:16 hjlipp Exp $ | ||
17 | * ===================================================================== | ||
18 | */ | ||
19 | |||
20 | #include "gigaset.h" | ||
21 | |||
22 | /* ========================================================== */ | ||
23 | /* bit masks for pending commands */ | ||
24 | #define PC_INIT 0x004 | ||
25 | #define PC_DLE0 0x008 | ||
26 | #define PC_DLE1 0x010 | ||
27 | #define PC_CID 0x080 | ||
28 | #define PC_NOCID 0x100 | ||
29 | #define PC_HUP 0x002 | ||
30 | #define PC_DIAL 0x001 | ||
31 | #define PC_ACCEPT 0x040 | ||
32 | #define PC_SHUTDOWN 0x020 | ||
33 | #define PC_CIDMODE 0x200 | ||
34 | #define PC_UMMODE 0x400 | ||
35 | |||
36 | /* types of modem responses */ | ||
37 | #define RT_NOTHING 0 | ||
38 | #define RT_ZSAU 1 | ||
39 | #define RT_RING 2 | ||
40 | #define RT_NUMBER 3 | ||
41 | #define RT_STRING 4 | ||
42 | #define RT_HEX 5 | ||
43 | #define RT_ZCAU 6 | ||
44 | |||
45 | /* Possible ASCII responses */ | ||
46 | #define RSP_OK 0 | ||
47 | //#define RSP_BUSY 1 | ||
48 | //#define RSP_CONNECT 2 | ||
49 | #define RSP_ZGCI 3 | ||
50 | #define RSP_RING 4 | ||
51 | #define RSP_ZAOC 5 | ||
52 | #define RSP_ZCSTR 6 | ||
53 | #define RSP_ZCFGT 7 | ||
54 | #define RSP_ZCFG 8 | ||
55 | #define RSP_ZCCR 9 | ||
56 | #define RSP_EMPTY 10 | ||
57 | #define RSP_ZLOG 11 | ||
58 | #define RSP_ZCAU 12 | ||
59 | #define RSP_ZMWI 13 | ||
60 | #define RSP_ZABINFO 14 | ||
61 | #define RSP_ZSMLSTCHG 15 | ||
62 | #define RSP_VAR 100 | ||
63 | #define RSP_ZSAU (RSP_VAR + VAR_ZSAU) | ||
64 | #define RSP_ZDLE (RSP_VAR + VAR_ZDLE) | ||
65 | #define RSP_ZVLS (RSP_VAR + VAR_ZVLS) | ||
66 | #define RSP_ZCTP (RSP_VAR + VAR_ZCTP) | ||
67 | #define RSP_STR (RSP_VAR + VAR_NUM) | ||
68 | #define RSP_NMBR (RSP_STR + STR_NMBR) | ||
69 | #define RSP_ZCPN (RSP_STR + STR_ZCPN) | ||
70 | #define RSP_ZCON (RSP_STR + STR_ZCON) | ||
71 | #define RSP_ZBC (RSP_STR + STR_ZBC) | ||
72 | #define RSP_ZHLC (RSP_STR + STR_ZHLC) | ||
73 | #define RSP_ERROR -1 /* ERROR */ | ||
74 | #define RSP_WRONG_CID -2 /* unknown cid in cmd */ | ||
75 | //#define RSP_EMPTY -3 | ||
76 | #define RSP_UNKNOWN -4 /* unknown response */ | ||
77 | #define RSP_FAIL -5 /* internal error */ | ||
78 | #define RSP_INVAL -6 /* invalid response */ | ||
79 | |||
80 | #define RSP_NONE -19 | ||
81 | #define RSP_STRING -20 | ||
82 | #define RSP_NULL -21 | ||
83 | //#define RSP_RETRYFAIL -22 | ||
84 | //#define RSP_RETRY -23 | ||
85 | //#define RSP_SKIP -24 | ||
86 | #define RSP_INIT -27 | ||
87 | #define RSP_ANY -26 | ||
88 | #define RSP_LAST -28 | ||
89 | #define RSP_NODEV -9 | ||
90 | |||
91 | /* actions for process_response */ | ||
92 | #define ACT_NOTHING 0 | ||
93 | #define ACT_SETDLE1 1 | ||
94 | #define ACT_SETDLE0 2 | ||
95 | #define ACT_FAILINIT 3 | ||
96 | #define ACT_HUPMODEM 4 | ||
97 | #define ACT_CONFIGMODE 5 | ||
98 | #define ACT_INIT 6 | ||
99 | #define ACT_DLE0 7 | ||
100 | #define ACT_DLE1 8 | ||
101 | #define ACT_FAILDLE0 9 | ||
102 | #define ACT_FAILDLE1 10 | ||
103 | #define ACT_RING 11 | ||
104 | #define ACT_CID 12 | ||
105 | #define ACT_FAILCID 13 | ||
106 | #define ACT_SDOWN 14 | ||
107 | #define ACT_FAILSDOWN 15 | ||
108 | #define ACT_DEBUG 16 | ||
109 | #define ACT_WARN 17 | ||
110 | #define ACT_DIALING 18 | ||
111 | #define ACT_ABORTDIAL 19 | ||
112 | #define ACT_DISCONNECT 20 | ||
113 | #define ACT_CONNECT 21 | ||
114 | #define ACT_REMOTEREJECT 22 | ||
115 | #define ACT_CONNTIMEOUT 23 | ||
116 | #define ACT_REMOTEHUP 24 | ||
117 | #define ACT_ABORTHUP 25 | ||
118 | #define ACT_ICALL 26 | ||
119 | #define ACT_ACCEPTED 27 | ||
120 | #define ACT_ABORTACCEPT 28 | ||
121 | #define ACT_TIMEOUT 29 | ||
122 | #define ACT_GETSTRING 30 | ||
123 | #define ACT_SETVER 31 | ||
124 | #define ACT_FAILVER 32 | ||
125 | #define ACT_GOTVER 33 | ||
126 | #define ACT_TEST 34 | ||
127 | #define ACT_ERROR 35 | ||
128 | #define ACT_ABORTCID 36 | ||
129 | #define ACT_ZCAU 37 | ||
130 | #define ACT_NOTIFY_BC_DOWN 38 | ||
131 | #define ACT_NOTIFY_BC_UP 39 | ||
132 | #define ACT_DIAL 40 | ||
133 | #define ACT_ACCEPT 41 | ||
134 | #define ACT_PROTO_L2 42 | ||
135 | #define ACT_HUP 43 | ||
136 | #define ACT_IF_LOCK 44 | ||
137 | #define ACT_START 45 | ||
138 | #define ACT_STOP 46 | ||
139 | #define ACT_FAKEDLE0 47 | ||
140 | #define ACT_FAKEHUP 48 | ||
141 | #define ACT_FAKESDOWN 49 | ||
142 | #define ACT_SHUTDOWN 50 | ||
143 | #define ACT_PROC_CIDMODE 51 | ||
144 | #define ACT_UMODESET 52 | ||
145 | #define ACT_FAILUMODE 53 | ||
146 | #define ACT_CMODESET 54 | ||
147 | #define ACT_FAILCMODE 55 | ||
148 | #define ACT_IF_VER 56 | ||
149 | #define ACT_CMD 100 | ||
150 | |||
151 | /* at command sequences */ | ||
152 | #define SEQ_NONE 0 | ||
153 | #define SEQ_INIT 100 | ||
154 | #define SEQ_DLE0 200 | ||
155 | #define SEQ_DLE1 250 | ||
156 | #define SEQ_CID 300 | ||
157 | #define SEQ_NOCID 350 | ||
158 | #define SEQ_HUP 400 | ||
159 | #define SEQ_DIAL 600 | ||
160 | #define SEQ_ACCEPT 720 | ||
161 | #define SEQ_SHUTDOWN 500 | ||
162 | #define SEQ_CIDMODE 10 | ||
163 | #define SEQ_UMMODE 11 | ||
164 | |||
165 | |||
166 | // 100: init, 200: dle0, 250:dle1, 300: get cid (dial), 350: "hup" (no cid), 400: hup, 500: reset, 600: dial, 700: ring | ||
167 | struct reply_t gigaset_tab_nocid_m10x[]= /* with dle mode */ | ||
168 | { | ||
169 | /* resp_code, min_ConState, max_ConState, parameter, new_ConState, timeout, action, command */ | ||
170 | |||
171 | /* initialize device, set cid mode if possible */ | ||
172 | //{RSP_INIT, -1, -1,100, 900, 0, {ACT_TEST}}, | ||
173 | //{RSP_ERROR, 900,900, -1, 0, 0, {ACT_FAILINIT}}, | ||
174 | //{RSP_OK, 900,900, -1, 100, INIT_TIMEOUT, | ||
175 | // {ACT_TIMEOUT}}, | ||
176 | |||
177 | {RSP_INIT, -1, -1,SEQ_INIT, 100, INIT_TIMEOUT, | ||
178 | {ACT_TIMEOUT}}, /* wait until device is ready */ | ||
179 | |||
180 | {EV_TIMEOUT, 100,100, -1, 101, 3, {0}, "Z\r"}, /* device in transparent mode? try to initialize it. */ | ||
181 | {RSP_OK, 101,103, -1, 120, 5, {ACT_GETSTRING}, "+GMR\r"}, /* get version */ | ||
182 | |||
183 | {EV_TIMEOUT, 101,101, -1, 102, 5, {0}, "Z\r"}, /* timeout => try once again. */ | ||
184 | {RSP_ERROR, 101,101, -1, 102, 5, {0}, "Z\r"}, /* error => try once again. */ | ||
185 | |||
186 | {EV_TIMEOUT, 102,102, -1, 108, 5, {ACT_SETDLE1}, "^SDLE=0\r"}, /* timeout => try again in DLE mode. */ | ||
187 | {RSP_OK, 108,108, -1, 104,-1}, | ||
188 | {RSP_ZDLE, 104,104, 0, 103, 5, {0}, "Z\r"}, | ||
189 | {EV_TIMEOUT, 104,104, -1, 0, 0, {ACT_FAILINIT}}, | ||
190 | {RSP_ERROR, 108,108, -1, 0, 0, {ACT_FAILINIT}}, | ||
191 | |||
192 | {EV_TIMEOUT, 108,108, -1, 105, 2, {ACT_SETDLE0, | ||
193 | ACT_HUPMODEM, | ||
194 | ACT_TIMEOUT}}, /* still timeout => connection in unimodem mode? */ | ||
195 | {EV_TIMEOUT, 105,105, -1, 103, 5, {0}, "Z\r"}, | ||
196 | |||
197 | {RSP_ERROR, 102,102, -1, 107, 5, {0}, "^GETPRE\r"}, /* ERROR on ATZ => maybe in config mode? */ | ||
198 | {RSP_OK, 107,107, -1, 0, 0, {ACT_CONFIGMODE}}, | ||
199 | {RSP_ERROR, 107,107, -1, 0, 0, {ACT_FAILINIT}}, | ||
200 | {EV_TIMEOUT, 107,107, -1, 0, 0, {ACT_FAILINIT}}, | ||
201 | |||
202 | {RSP_ERROR, 103,103, -1, 0, 0, {ACT_FAILINIT}}, | ||
203 | {EV_TIMEOUT, 103,103, -1, 0, 0, {ACT_FAILINIT}}, | ||
204 | |||
205 | {RSP_STRING, 120,120, -1, 121,-1, {ACT_SETVER}}, | ||
206 | |||
207 | {EV_TIMEOUT, 120,121, -1, 0, 0, {ACT_FAILVER, ACT_INIT}}, | ||
208 | {RSP_ERROR, 120,121, -1, 0, 0, {ACT_FAILVER, ACT_INIT}}, | ||
209 | {RSP_OK, 121,121, -1, 0, 0, {ACT_GOTVER, ACT_INIT}}, | ||
210 | #if 0 | ||
211 | {EV_TIMEOUT, 120,121, -1, 130, 5, {ACT_FAILVER}, "^SGCI=1\r"}, | ||
212 | {RSP_ERROR, 120,121, -1, 130, 5, {ACT_FAILVER}, "^SGCI=1\r"}, | ||
213 | {RSP_OK, 121,121, -1, 130, 5, {ACT_GOTVER}, "^SGCI=1\r"}, | ||
214 | |||
215 | {RSP_OK, 130,130, -1, 0, 0, {ACT_INIT}}, | ||
216 | {RSP_ERROR, 130,130, -1, 0, 0, {ACT_FAILINIT}}, | ||
217 | {EV_TIMEOUT, 130,130, -1, 0, 0, {ACT_FAILINIT}}, | ||
218 | #endif | ||
219 | |||
220 | /* leave dle mode */ | ||
221 | {RSP_INIT, 0, 0,SEQ_DLE0, 201, 5, {0}, "^SDLE=0\r"}, | ||
222 | {RSP_OK, 201,201, -1, 202,-1}, | ||
223 | //{RSP_ZDLE, 202,202, 0, 202, 0, {ACT_ERROR}},//DELETE | ||
224 | {RSP_ZDLE, 202,202, 0, 0, 0, {ACT_DLE0}}, | ||
225 | {RSP_NODEV, 200,249, -1, 0, 0, {ACT_FAKEDLE0}}, | ||
226 | {RSP_ERROR, 200,249, -1, 0, 0, {ACT_FAILDLE0}}, | ||
227 | {EV_TIMEOUT, 200,249, -1, 0, 0, {ACT_FAILDLE0}}, | ||
228 | |||
229 | /* enter dle mode */ | ||
230 | {RSP_INIT, 0, 0,SEQ_DLE1, 251, 5, {0}, "^SDLE=1\r"}, | ||
231 | {RSP_OK, 251,251, -1, 252,-1}, | ||
232 | {RSP_ZDLE, 252,252, 1, 0, 0, {ACT_DLE1}}, | ||
233 | {RSP_ERROR, 250,299, -1, 0, 0, {ACT_FAILDLE1}}, | ||
234 | {EV_TIMEOUT, 250,299, -1, 0, 0, {ACT_FAILDLE1}}, | ||
235 | |||
236 | /* incoming call */ | ||
237 | {RSP_RING, -1, -1, -1, -1,-1, {ACT_RING}}, | ||
238 | |||
239 | /* get cid */ | ||
240 | //{RSP_INIT, 0, 0,300, 901, 0, {ACT_TEST}}, | ||
241 | //{RSP_ERROR, 901,901, -1, 0, 0, {ACT_FAILCID}}, | ||
242 | //{RSP_OK, 901,901, -1, 301, 5, {0}, "^SGCI?\r"}, | ||
243 | |||
244 | {RSP_INIT, 0, 0,SEQ_CID, 301, 5, {0}, "^SGCI?\r"}, | ||
245 | {RSP_OK, 301,301, -1, 302,-1}, | ||
246 | {RSP_ZGCI, 302,302, -1, 0, 0, {ACT_CID}}, | ||
247 | {RSP_ERROR, 301,349, -1, 0, 0, {ACT_FAILCID}}, | ||
248 | {EV_TIMEOUT, 301,349, -1, 0, 0, {ACT_FAILCID}}, | ||
249 | |||
250 | /* enter cid mode */ | ||
251 | {RSP_INIT, 0, 0,SEQ_CIDMODE, 150, 5, {0}, "^SGCI=1\r"}, | ||
252 | {RSP_OK, 150,150, -1, 0, 0, {ACT_CMODESET}}, | ||
253 | {RSP_ERROR, 150,150, -1, 0, 0, {ACT_FAILCMODE}}, | ||
254 | {EV_TIMEOUT, 150,150, -1, 0, 0, {ACT_FAILCMODE}}, | ||
255 | |||
256 | /* leave cid mode */ | ||
257 | //{RSP_INIT, 0, 0,SEQ_UMMODE, 160, 5, {0}, "^SGCI=0\r"}, | ||
258 | {RSP_INIT, 0, 0,SEQ_UMMODE, 160, 5, {0}, "Z\r"}, | ||
259 | {RSP_OK, 160,160, -1, 0, 0, {ACT_UMODESET}}, | ||
260 | {RSP_ERROR, 160,160, -1, 0, 0, {ACT_FAILUMODE}}, | ||
261 | {EV_TIMEOUT, 160,160, -1, 0, 0, {ACT_FAILUMODE}}, | ||
262 | |||
263 | /* abort getting cid */ | ||
264 | {RSP_INIT, 0, 0,SEQ_NOCID, 0, 0, {ACT_ABORTCID}}, | ||
265 | |||
266 | /* reset */ | ||
267 | #if 0 | ||
268 | {RSP_INIT, 0, 0,SEQ_SHUTDOWN, 503, 5, {0}, "^SGCI=0\r"}, | ||
269 | {RSP_OK, 503,503, -1, 504, 5, {0}, "Z\r"}, | ||
270 | #endif | ||
271 | {RSP_INIT, 0, 0,SEQ_SHUTDOWN, 504, 5, {0}, "Z\r"}, | ||
272 | {RSP_OK, 504,504, -1, 0, 0, {ACT_SDOWN}}, | ||
273 | {RSP_ERROR, 501,599, -1, 0, 0, {ACT_FAILSDOWN}}, | ||
274 | {EV_TIMEOUT, 501,599, -1, 0, 0, {ACT_FAILSDOWN}}, | ||
275 | {RSP_NODEV, 501,599, -1, 0, 0, {ACT_FAKESDOWN}}, | ||
276 | |||
277 | {EV_PROC_CIDMODE,-1, -1, -1, -1,-1, {ACT_PROC_CIDMODE}}, //FIXME | ||
278 | {EV_IF_LOCK, -1, -1, -1, -1,-1, {ACT_IF_LOCK}}, //FIXME | ||
279 | {EV_IF_VER, -1, -1, -1, -1,-1, {ACT_IF_VER}}, //FIXME | ||
280 | {EV_START, -1, -1, -1, -1,-1, {ACT_START}}, //FIXME | ||
281 | {EV_STOP, -1, -1, -1, -1,-1, {ACT_STOP}}, //FIXME | ||
282 | {EV_SHUTDOWN, -1, -1, -1, -1,-1, {ACT_SHUTDOWN}}, //FIXME | ||
283 | |||
284 | /* misc. */ | ||
285 | {RSP_EMPTY, -1, -1, -1, -1,-1, {ACT_DEBUG}}, //FIXME | ||
286 | {RSP_ZCFGT, -1, -1, -1, -1,-1, {ACT_DEBUG}}, //FIXME | ||
287 | {RSP_ZCFG, -1, -1, -1, -1,-1, {ACT_DEBUG}}, //FIXME | ||
288 | {RSP_ZLOG, -1, -1, -1, -1,-1, {ACT_DEBUG}}, //FIXME | ||
289 | {RSP_ZMWI, -1, -1, -1, -1,-1, {ACT_DEBUG}}, //FIXME | ||
290 | {RSP_ZABINFO, -1, -1, -1, -1,-1, {ACT_DEBUG}}, //FIXME | ||
291 | {RSP_ZSMLSTCHG,-1, -1, -1, -1,-1, {ACT_DEBUG}}, //FIXME | ||
292 | |||
293 | {RSP_ZCAU, -1, -1, -1, -1,-1, {ACT_ZCAU}}, | ||
294 | {RSP_NONE, -1, -1, -1, -1,-1, {ACT_DEBUG}}, | ||
295 | {RSP_ANY, -1, -1, -1, -1,-1, {ACT_WARN}}, | ||
296 | {RSP_LAST} | ||
297 | }; | ||
298 | |||
299 | // 600: start dialing, 650: dial in progress, 800: connection is up, 700: ring, 400: hup, 750: accepted icall | ||
300 | struct reply_t gigaset_tab_cid_m10x[] = /* for M10x */ | ||
301 | { | ||
302 | /* resp_code, min_ConState, max_ConState, parameter, new_ConState, timeout, action, command */ | ||
303 | |||
304 | /* dial */ | ||
305 | {EV_DIAL, -1, -1, -1, -1,-1, {ACT_DIAL}}, //FIXME | ||
306 | {RSP_INIT, 0, 0,SEQ_DIAL, 601, 5, {ACT_CMD+AT_BC}}, | ||
307 | {RSP_OK, 601,601, -1, 602, 5, {ACT_CMD+AT_HLC}}, | ||
308 | {RSP_NULL, 602,602, -1, 603, 5, {ACT_CMD+AT_PROTO}}, | ||
309 | {RSP_OK, 602,602, -1, 603, 5, {ACT_CMD+AT_PROTO}}, | ||
310 | {RSP_OK, 603,603, -1, 604, 5, {ACT_CMD+AT_TYPE}}, | ||
311 | {RSP_OK, 604,604, -1, 605, 5, {ACT_CMD+AT_MSN}}, | ||
312 | {RSP_OK, 605,605, -1, 606, 5, {ACT_CMD+AT_ISO}}, | ||
313 | {RSP_NULL, 605,605, -1, 606, 5, {ACT_CMD+AT_ISO}}, | ||
314 | {RSP_OK, 606,606, -1, 607, 5, {0}, "+VLS=17\r"}, /* set "Endgeraetemodus" */ | ||
315 | {RSP_OK, 607,607, -1, 608,-1}, | ||
316 | //{RSP_ZSAU, 608,608,ZSAU_PROCEEDING, 608, 0, {ACT_ERROR}},//DELETE | ||
317 | {RSP_ZSAU, 608,608,ZSAU_PROCEEDING, 609, 5, {ACT_CMD+AT_DIAL}}, | ||
318 | {RSP_OK, 609,609, -1, 650, 0, {ACT_DIALING}}, | ||
319 | |||
320 | {RSP_ZVLS, 608,608, 17, -1,-1, {ACT_DEBUG}}, | ||
321 | {RSP_ZCTP, 609,609, -1, -1,-1, {ACT_DEBUG}}, | ||
322 | {RSP_ZCPN, 609,609, -1, -1,-1, {ACT_DEBUG}}, | ||
323 | {RSP_ERROR, 601,609, -1, 0, 0, {ACT_ABORTDIAL}}, | ||
324 | {EV_TIMEOUT, 601,609, -1, 0, 0, {ACT_ABORTDIAL}}, | ||
325 | |||
326 | /* dialing */ | ||
327 | {RSP_ZCTP, 650,650, -1, -1,-1, {ACT_DEBUG}}, | ||
328 | {RSP_ZCPN, 650,650, -1, -1,-1, {ACT_DEBUG}}, | ||
329 | {RSP_ZSAU, 650,650,ZSAU_CALL_DELIVERED, -1,-1, {ACT_DEBUG}}, /* some devices don't send this */ | ||
330 | |||
331 | /* connection established */ | ||
332 | {RSP_ZSAU, 650,650,ZSAU_ACTIVE, 800,-1, {ACT_CONNECT}}, //FIXME -> DLE1 | ||
333 | {RSP_ZSAU, 750,750,ZSAU_ACTIVE, 800,-1, {ACT_CONNECT}}, //FIXME -> DLE1 | ||
334 | |||
335 | {EV_BC_OPEN, 800,800, -1, 800,-1, {ACT_NOTIFY_BC_UP}}, //FIXME new constate + timeout | ||
336 | |||
337 | /* remote hangup */ | ||
338 | {RSP_ZSAU, 650,650,ZSAU_DISCONNECT_IND, 0, 0, {ACT_REMOTEREJECT}}, | ||
339 | {RSP_ZSAU, 750,750,ZSAU_DISCONNECT_IND, 0, 0, {ACT_REMOTEHUP}}, | ||
340 | {RSP_ZSAU, 800,800,ZSAU_DISCONNECT_IND, 0, 0, {ACT_REMOTEHUP}}, | ||
341 | |||
342 | /* hangup */ | ||
343 | {EV_HUP, -1, -1, -1, -1,-1, {ACT_HUP}}, //FIXME | ||
344 | {RSP_INIT, -1, -1,SEQ_HUP, 401, 5, {0}, "+VLS=0\r"}, /* hang up */ //-1,-1? | ||
345 | {RSP_OK, 401,401, -1, 402, 5}, | ||
346 | {RSP_ZVLS, 402,402, 0, 403, 5}, | ||
347 | {RSP_ZSAU, 403,403,ZSAU_DISCONNECT_REQ, -1,-1, {ACT_DEBUG}}, /* if not remote hup */ | ||
348 | //{RSP_ZSAU, 403,403,ZSAU_NULL, 401, 0, {ACT_ERROR}}, //DELETE//FIXME -> DLE0 // should we do this _before_ hanging up for base driver? | ||
349 | {RSP_ZSAU, 403,403,ZSAU_NULL, 0, 0, {ACT_DISCONNECT}}, //FIXME -> DLE0 // should we do this _before_ hanging up for base driver? | ||
350 | {RSP_NODEV, 401,403, -1, 0, 0, {ACT_FAKEHUP}}, //FIXME -> DLE0 // should we do this _before_ hanging up for base driver? | ||
351 | {RSP_ERROR, 401,401, -1, 0, 0, {ACT_ABORTHUP}}, | ||
352 | {EV_TIMEOUT, 401,403, -1, 0, 0, {ACT_ABORTHUP}}, | ||
353 | |||
354 | {EV_BC_CLOSED, 0, 0, -1, 0,-1, {ACT_NOTIFY_BC_DOWN}}, //FIXME new constate + timeout | ||
355 | |||
356 | /* ring */ | ||
357 | {RSP_ZBC, 700,700, -1, -1,-1, {0}}, | ||
358 | {RSP_ZHLC, 700,700, -1, -1,-1, {0}}, | ||
359 | {RSP_NMBR, 700,700, -1, -1,-1, {0}}, | ||
360 | {RSP_ZCPN, 700,700, -1, -1,-1, {0}}, | ||
361 | {RSP_ZCTP, 700,700, -1, -1,-1, {0}}, | ||
362 | {EV_TIMEOUT, 700,700, -1, 720,720, {ACT_ICALL}}, | ||
363 | {EV_BC_CLOSED,720,720, -1, 0,-1, {ACT_NOTIFY_BC_DOWN}}, | ||
364 | |||
365 | /*accept icall*/ | ||
366 | {EV_ACCEPT, -1, -1, -1, -1,-1, {ACT_ACCEPT}}, //FIXME | ||
367 | {RSP_INIT, 720,720,SEQ_ACCEPT, 721, 5, {ACT_CMD+AT_PROTO}}, | ||
368 | {RSP_OK, 721,721, -1, 722, 5, {ACT_CMD+AT_ISO}}, | ||
369 | {RSP_OK, 722,722, -1, 723, 5, {0}, "+VLS=17\r"}, /* set "Endgeraetemodus" */ | ||
370 | {RSP_OK, 723,723, -1, 724, 5, {0}}, | ||
371 | {RSP_ZVLS, 724,724, 17, 750,50, {ACT_ACCEPTED}}, | ||
372 | {RSP_ERROR, 721,729, -1, 0, 0, {ACT_ABORTACCEPT}}, | ||
373 | {EV_TIMEOUT, 721,729, -1, 0, 0, {ACT_ABORTACCEPT}}, | ||
374 | {RSP_ZSAU, 700,729,ZSAU_NULL, 0, 0, {ACT_ABORTACCEPT}}, | ||
375 | {RSP_ZSAU, 700,729,ZSAU_ACTIVE, 0, 0, {ACT_ABORTACCEPT}}, | ||
376 | {RSP_ZSAU, 700,729,ZSAU_DISCONNECT_IND, 0, 0, {ACT_ABORTACCEPT}}, | ||
377 | |||
378 | {EV_TIMEOUT, 750,750, -1, 0, 0, {ACT_CONNTIMEOUT}}, | ||
379 | |||
380 | /* misc. */ | ||
381 | {EV_PROTO_L2, -1, -1, -1, -1,-1, {ACT_PROTO_L2}}, //FIXME | ||
382 | |||
383 | {RSP_ZCON, -1, -1, -1, -1,-1, {ACT_DEBUG}}, //FIXME | ||
384 | {RSP_ZCCR, -1, -1, -1, -1,-1, {ACT_DEBUG}}, //FIXME | ||
385 | {RSP_ZAOC, -1, -1, -1, -1,-1, {ACT_DEBUG}}, //FIXME | ||
386 | {RSP_ZCSTR, -1, -1, -1, -1,-1, {ACT_DEBUG}}, //FIXME | ||
387 | |||
388 | {RSP_ZCAU, -1, -1, -1, -1,-1, {ACT_ZCAU}}, | ||
389 | {RSP_NONE, -1, -1, -1, -1,-1, {ACT_DEBUG}}, | ||
390 | {RSP_ANY, -1, -1, -1, -1,-1, {ACT_WARN}}, | ||
391 | {RSP_LAST} | ||
392 | }; | ||
393 | |||
394 | |||
395 | #if 0 | ||
396 | static struct reply_t tab_nocid[]= /* no dle mode */ //FIXME aenderungen uebernehmen | ||
397 | { | ||
398 | /* resp_code, min_ConState, max_ConState, parameter, new_ConState, timeout, action, command */ | ||
399 | |||
400 | {RSP_ANY, -1, -1, -1, -1,-1, ACT_WARN, NULL}, | ||
401 | {RSP_LAST,0,0,0,0,0,0} | ||
402 | }; | ||
403 | |||
404 | static struct reply_t tab_cid[] = /* no dle mode */ //FIXME aenderungen uebernehmen | ||
405 | { | ||
406 | /* resp_code, min_ConState, max_ConState, parameter, new_ConState, timeout, action, command */ | ||
407 | |||
408 | {RSP_ANY, -1, -1, -1, -1,-1, ACT_WARN, NULL}, | ||
409 | {RSP_LAST,0,0,0,0,0,0} | ||
410 | }; | ||
411 | #endif | ||
412 | |||
413 | static struct resp_type_t resp_type[]= | ||
414 | { | ||
415 | /*{"", RSP_EMPTY, RT_NOTHING},*/ | ||
416 | {"OK", RSP_OK, RT_NOTHING}, | ||
417 | {"ERROR", RSP_ERROR, RT_NOTHING}, | ||
418 | {"ZSAU", RSP_ZSAU, RT_ZSAU}, | ||
419 | {"ZCAU", RSP_ZCAU, RT_ZCAU}, | ||
420 | {"RING", RSP_RING, RT_RING}, | ||
421 | {"ZGCI", RSP_ZGCI, RT_NUMBER}, | ||
422 | {"ZVLS", RSP_ZVLS, RT_NUMBER}, | ||
423 | {"ZCTP", RSP_ZCTP, RT_NUMBER}, | ||
424 | {"ZDLE", RSP_ZDLE, RT_NUMBER}, | ||
425 | {"ZCFGT", RSP_ZCFGT, RT_NUMBER}, | ||
426 | {"ZCCR", RSP_ZCCR, RT_NUMBER}, | ||
427 | {"ZMWI", RSP_ZMWI, RT_NUMBER}, | ||
428 | {"ZHLC", RSP_ZHLC, RT_STRING}, | ||
429 | {"ZBC", RSP_ZBC, RT_STRING}, | ||
430 | {"NMBR", RSP_NMBR, RT_STRING}, | ||
431 | {"ZCPN", RSP_ZCPN, RT_STRING}, | ||
432 | {"ZCON", RSP_ZCON, RT_STRING}, | ||
433 | {"ZAOC", RSP_ZAOC, RT_STRING}, | ||
434 | {"ZCSTR", RSP_ZCSTR, RT_STRING}, | ||
435 | {"ZCFG", RSP_ZCFG, RT_HEX}, | ||
436 | {"ZLOG", RSP_ZLOG, RT_NOTHING}, | ||
437 | {"ZABINFO", RSP_ZABINFO, RT_NOTHING}, | ||
438 | {"ZSMLSTCHG", RSP_ZSMLSTCHG, RT_NOTHING}, | ||
439 | {NULL,0,0} | ||
440 | }; | ||
441 | |||
442 | /* | ||
443 | * Get integer from char-pointer | ||
444 | */ | ||
445 | static int isdn_getnum(char *p) | ||
446 | { | ||
447 | int v = -1; | ||
448 | |||
449 | IFNULLRETVAL(p, -1); | ||
450 | |||
451 | dbg(DEBUG_TRANSCMD, "string: %s", p); | ||
452 | |||
453 | while (*p >= '0' && *p <= '9') | ||
454 | v = ((v < 0) ? 0 : (v * 10)) + (int) ((*p++) - '0'); | ||
455 | if (*p) | ||
456 | v = -1; /* invalid Character */ | ||
457 | return v; | ||
458 | } | ||
459 | |||
460 | /* | ||
461 | * Get integer from char-pointer | ||
462 | */ | ||
463 | static int isdn_gethex(char *p) | ||
464 | { | ||
465 | int v = 0; | ||
466 | int c; | ||
467 | |||
468 | IFNULLRETVAL(p, -1); | ||
469 | |||
470 | dbg(DEBUG_TRANSCMD, "string: %s", p); | ||
471 | |||
472 | if (!*p) | ||
473 | return -1; | ||
474 | |||
475 | do { | ||
476 | if (v > (INT_MAX - 15) / 16) | ||
477 | return -1; | ||
478 | c = *p; | ||
479 | if (c >= '0' && c <= '9') | ||
480 | c -= '0'; | ||
481 | else if (c >= 'a' && c <= 'f') | ||
482 | c -= 'a' - 10; | ||
483 | else if (c >= 'A' && c <= 'F') | ||
484 | c -= 'A' - 10; | ||
485 | else | ||
486 | return -1; | ||
487 | v = v * 16 + c; | ||
488 | } while (*++p); | ||
489 | |||
490 | return v; | ||
491 | } | ||
492 | |||
493 | static inline void new_index(atomic_t *index, int max) | ||
494 | { | ||
495 | if (atomic_read(index) == max) //FIXME race? | ||
496 | atomic_set(index, 0); | ||
497 | else | ||
498 | atomic_inc(index); | ||
499 | } | ||
500 | |||
501 | /* retrieve CID from parsed response | ||
502 | * returns 0 if no CID, -1 if invalid CID, or CID value 1..65535 | ||
503 | */ | ||
504 | static int cid_of_response(char *s) | ||
505 | { | ||
506 | int cid; | ||
507 | |||
508 | if (s[-1] != ';') | ||
509 | return 0; /* no CID separator */ | ||
510 | cid = isdn_getnum(s); | ||
511 | if (cid < 0) | ||
512 | return 0; /* CID not numeric */ | ||
513 | if (cid < 1 || cid > 65535) | ||
514 | return -1; /* CID out of range */ | ||
515 | return cid; | ||
516 | //FIXME is ;<digit>+ at end of non-CID response really impossible? | ||
517 | } | ||
518 | |||
519 | /* This function will be called via task queue from the callback handler. | ||
520 | * We received a modem response and have to handle it.. | ||
521 | */ | ||
522 | void gigaset_handle_modem_response(struct cardstate *cs) | ||
523 | { | ||
524 | unsigned char *argv[MAX_REC_PARAMS + 1]; | ||
525 | int params; | ||
526 | int i, j; | ||
527 | struct resp_type_t *rt; | ||
528 | int curarg; | ||
529 | unsigned long flags; | ||
530 | unsigned next, tail, head; | ||
531 | struct event_t *event; | ||
532 | int resp_code; | ||
533 | int param_type; | ||
534 | int abort; | ||
535 | size_t len; | ||
536 | int cid; | ||
537 | int rawstring; | ||
538 | |||
539 | IFNULLRET(cs); | ||
540 | |||
541 | len = cs->cbytes; | ||
542 | if (!len) { | ||
543 | /* ignore additional LFs/CRs (M10x config mode or cx100) */ | ||
544 | dbg(DEBUG_MCMD, "skipped EOL [%02X]", cs->respdata[len]); | ||
545 | return; | ||
546 | } | ||
547 | cs->respdata[len] = 0; | ||
548 | dbg(DEBUG_TRANSCMD, "raw string: '%s'", cs->respdata); | ||
549 | argv[0] = cs->respdata; | ||
550 | params = 1; | ||
551 | if (cs->at_state.getstring) { | ||
552 | /* getstring only allowed without cid at the moment */ | ||
553 | cs->at_state.getstring = 0; | ||
554 | rawstring = 1; | ||
555 | cid = 0; | ||
556 | } else { | ||
557 | /* parse line */ | ||
558 | for (i = 0; i < len; i++) | ||
559 | switch (cs->respdata[i]) { | ||
560 | case ';': | ||
561 | case ',': | ||
562 | case '=': | ||
563 | if (params > MAX_REC_PARAMS) { | ||
564 | warn("too many parameters in response"); | ||
565 | /* need last parameter (might be CID) */ | ||
566 | params--; | ||
567 | } | ||
568 | argv[params++] = cs->respdata + i + 1; | ||
569 | } | ||
570 | |||
571 | rawstring = 0; | ||
572 | cid = params > 1 ? cid_of_response(argv[params-1]) : 0; | ||
573 | if (cid < 0) { | ||
574 | gigaset_add_event(cs, &cs->at_state, RSP_INVAL, | ||
575 | NULL, 0, NULL); | ||
576 | return; | ||
577 | } | ||
578 | |||
579 | for (j = 1; j < params; ++j) | ||
580 | argv[j][-1] = 0; | ||
581 | |||
582 | dbg(DEBUG_TRANSCMD, "CMD received: %s", argv[0]); | ||
583 | if (cid) { | ||
584 | --params; | ||
585 | dbg(DEBUG_TRANSCMD, "CID: %s", argv[params]); | ||
586 | } | ||
587 | dbg(DEBUG_TRANSCMD, "available params: %d", params - 1); | ||
588 | for (j = 1; j < params; j++) | ||
589 | dbg(DEBUG_TRANSCMD, "param %d: %s", j, argv[j]); | ||
590 | } | ||
591 | |||
592 | spin_lock_irqsave(&cs->ev_lock, flags); | ||
593 | head = atomic_read(&cs->ev_head); | ||
594 | tail = atomic_read(&cs->ev_tail); | ||
595 | |||
596 | abort = 1; | ||
597 | curarg = 0; | ||
598 | while (curarg < params) { | ||
599 | next = (tail + 1) % MAX_EVENTS; | ||
600 | if (unlikely(next == head)) { | ||
601 | err("event queue full"); | ||
602 | break; | ||
603 | } | ||
604 | |||
605 | event = cs->events + tail; | ||
606 | event->at_state = NULL; | ||
607 | event->cid = cid; | ||
608 | event->ptr = NULL; | ||
609 | event->arg = NULL; | ||
610 | tail = next; | ||
611 | |||
612 | if (rawstring) { | ||
613 | resp_code = RSP_STRING; | ||
614 | param_type = RT_STRING; | ||
615 | } else { | ||
616 | for (rt = resp_type; rt->response; ++rt) | ||
617 | if (!strcmp(argv[curarg], rt->response)) | ||
618 | break; | ||
619 | |||
620 | if (!rt->response) { | ||
621 | event->type = RSP_UNKNOWN; | ||
622 | warn("unknown modem response: %s", | ||
623 | argv[curarg]); | ||
624 | break; | ||
625 | } | ||
626 | |||
627 | resp_code = rt->resp_code; | ||
628 | param_type = rt->type; | ||
629 | ++curarg; | ||
630 | } | ||
631 | |||
632 | event->type = resp_code; | ||
633 | |||
634 | switch (param_type) { | ||
635 | case RT_NOTHING: | ||
636 | break; | ||
637 | case RT_RING: | ||
638 | if (!cid) { | ||
639 | err("received RING without CID!"); | ||
640 | event->type = RSP_INVAL; | ||
641 | abort = 1; | ||
642 | } else { | ||
643 | event->cid = 0; | ||
644 | event->parameter = cid; | ||
645 | abort = 0; | ||
646 | } | ||
647 | break; | ||
648 | case RT_ZSAU: | ||
649 | if (curarg >= params) { | ||
650 | event->parameter = ZSAU_NONE; | ||
651 | break; | ||
652 | } | ||
653 | if (!strcmp(argv[curarg], "OUTGOING_CALL_PROCEEDING")) | ||
654 | event->parameter = ZSAU_OUTGOING_CALL_PROCEEDING; | ||
655 | else if (!strcmp(argv[curarg], "CALL_DELIVERED")) | ||
656 | event->parameter = ZSAU_CALL_DELIVERED; | ||
657 | else if (!strcmp(argv[curarg], "ACTIVE")) | ||
658 | event->parameter = ZSAU_ACTIVE; | ||
659 | else if (!strcmp(argv[curarg], "DISCONNECT_IND")) | ||
660 | event->parameter = ZSAU_DISCONNECT_IND; | ||
661 | else if (!strcmp(argv[curarg], "NULL")) | ||
662 | event->parameter = ZSAU_NULL; | ||
663 | else if (!strcmp(argv[curarg], "DISCONNECT_REQ")) | ||
664 | event->parameter = ZSAU_DISCONNECT_REQ; | ||
665 | else { | ||
666 | event->parameter = ZSAU_UNKNOWN; | ||
667 | warn("%s: unknown parameter %s after ZSAU", | ||
668 | __func__, argv[curarg]); | ||
669 | } | ||
670 | ++curarg; | ||
671 | break; | ||
672 | case RT_STRING: | ||
673 | if (curarg < params) { | ||
674 | len = strlen(argv[curarg]) + 1; | ||
675 | event->ptr = kmalloc(len, GFP_ATOMIC); | ||
676 | if (event->ptr) | ||
677 | memcpy(event->ptr, argv[curarg], len); | ||
678 | else | ||
679 | err("no memory for string!"); | ||
680 | ++curarg; | ||
681 | } | ||
682 | #ifdef CONFIG_GIGASET_DEBUG | ||
683 | if (!event->ptr) | ||
684 | dbg(DEBUG_CMD, "string==NULL"); | ||
685 | else | ||
686 | dbg(DEBUG_CMD, | ||
687 | "string==%s", (char *) event->ptr); | ||
688 | #endif | ||
689 | break; | ||
690 | case RT_ZCAU: | ||
691 | event->parameter = -1; | ||
692 | if (curarg + 1 < params) { | ||
693 | i = isdn_gethex(argv[curarg]); | ||
694 | j = isdn_gethex(argv[curarg + 1]); | ||
695 | if (i >= 0 && i < 256 && j >= 0 && j < 256) | ||
696 | event->parameter = (unsigned) i << 8 | ||
697 | | j; | ||
698 | curarg += 2; | ||
699 | } else | ||
700 | curarg = params - 1; | ||
701 | break; | ||
702 | case RT_NUMBER: | ||
703 | case RT_HEX: | ||
704 | if (curarg < params) { | ||
705 | if (param_type == RT_HEX) | ||
706 | event->parameter = | ||
707 | isdn_gethex(argv[curarg]); | ||
708 | else | ||
709 | event->parameter = | ||
710 | isdn_getnum(argv[curarg]); | ||
711 | ++curarg; | ||
712 | } else | ||
713 | event->parameter = -1; | ||
714 | #ifdef CONFIG_GIGASET_DEBUG | ||
715 | dbg(DEBUG_CMD, "parameter==%d", event->parameter); | ||
716 | #endif | ||
717 | break; | ||
718 | } | ||
719 | |||
720 | if (resp_code == RSP_ZDLE) | ||
721 | cs->dle = event->parameter; | ||
722 | |||
723 | if (abort) | ||
724 | break; | ||
725 | } | ||
726 | |||
727 | atomic_set(&cs->ev_tail, tail); | ||
728 | spin_unlock_irqrestore(&cs->ev_lock, flags); | ||
729 | |||
730 | if (curarg != params) | ||
731 | dbg(DEBUG_ANY, "invalid number of processed parameters: %d/%d", | ||
732 | curarg, params); | ||
733 | } | ||
734 | EXPORT_SYMBOL_GPL(gigaset_handle_modem_response); | ||
735 | |||
736 | /* disconnect | ||
737 | * process closing of connection associated with given AT state structure | ||
738 | */ | ||
739 | static void disconnect(struct at_state_t **at_state_p) | ||
740 | { | ||
741 | unsigned long flags; | ||
742 | struct bc_state *bcs; | ||
743 | struct cardstate *cs; | ||
744 | |||
745 | IFNULLRET(at_state_p); | ||
746 | IFNULLRET(*at_state_p); | ||
747 | bcs = (*at_state_p)->bcs; | ||
748 | cs = (*at_state_p)->cs; | ||
749 | IFNULLRET(cs); | ||
750 | |||
751 | new_index(&(*at_state_p)->seq_index, MAX_SEQ_INDEX); | ||
752 | |||
753 | /* revert to selected idle mode */ | ||
754 | if (!atomic_read(&cs->cidmode)) { | ||
755 | cs->at_state.pending_commands |= PC_UMMODE; | ||
756 | atomic_set(&cs->commands_pending, 1); //FIXME | ||
757 | dbg(DEBUG_CMD, "Scheduling PC_UMMODE"); | ||
758 | } | ||
759 | |||
760 | if (bcs) { | ||
761 | /* B channel assigned: invoke hardware specific handler */ | ||
762 | cs->ops->close_bchannel(bcs); | ||
763 | } else { | ||
764 | /* no B channel assigned: just deallocate */ | ||
765 | spin_lock_irqsave(&cs->lock, flags); | ||
766 | list_del(&(*at_state_p)->list); | ||
767 | kfree(*at_state_p); | ||
768 | *at_state_p = NULL; | ||
769 | spin_unlock_irqrestore(&cs->lock, flags); | ||
770 | } | ||
771 | } | ||
772 | |||
773 | /* get_free_channel | ||
774 | * get a free AT state structure: either one of those associated with the | ||
775 | * B channels of the Gigaset device, or if none of those is available, | ||
776 | * a newly allocated one with bcs=NULL | ||
777 | * The structure should be freed by calling disconnect() after use. | ||
778 | */ | ||
779 | static inline struct at_state_t *get_free_channel(struct cardstate *cs, | ||
780 | int cid) | ||
781 | /* cids: >0: siemens-cid | ||
782 | 0: without cid | ||
783 | -1: no cid assigned yet | ||
784 | */ | ||
785 | { | ||
786 | unsigned long flags; | ||
787 | int i; | ||
788 | struct at_state_t *ret; | ||
789 | |||
790 | for (i = 0; i < cs->channels; ++i) | ||
791 | if (gigaset_get_channel(cs->bcs + i)) { | ||
792 | ret = &cs->bcs[i].at_state; | ||
793 | ret->cid = cid; | ||
794 | return ret; | ||
795 | } | ||
796 | |||
797 | spin_lock_irqsave(&cs->lock, flags); | ||
798 | ret = kmalloc(sizeof(struct at_state_t), GFP_ATOMIC); | ||
799 | if (ret) { | ||
800 | gigaset_at_init(ret, NULL, cs, cid); | ||
801 | list_add(&ret->list, &cs->temp_at_states); | ||
802 | } | ||
803 | spin_unlock_irqrestore(&cs->lock, flags); | ||
804 | return ret; | ||
805 | } | ||
806 | |||
807 | static void init_failed(struct cardstate *cs, int mode) | ||
808 | { | ||
809 | int i; | ||
810 | struct at_state_t *at_state; | ||
811 | |||
812 | cs->at_state.pending_commands &= ~PC_INIT; | ||
813 | atomic_set(&cs->mode, mode); | ||
814 | atomic_set(&cs->mstate, MS_UNINITIALIZED); | ||
815 | gigaset_free_channels(cs); | ||
816 | for (i = 0; i < cs->channels; ++i) { | ||
817 | at_state = &cs->bcs[i].at_state; | ||
818 | if (at_state->pending_commands & PC_CID) { | ||
819 | at_state->pending_commands &= ~PC_CID; | ||
820 | at_state->pending_commands |= PC_NOCID; | ||
821 | atomic_set(&cs->commands_pending, 1); | ||
822 | } | ||
823 | } | ||
824 | } | ||
825 | |||
826 | static void schedule_init(struct cardstate *cs, int state) | ||
827 | { | ||
828 | if (cs->at_state.pending_commands & PC_INIT) { | ||
829 | dbg(DEBUG_CMD, "not scheduling PC_INIT again"); | ||
830 | return; | ||
831 | } | ||
832 | atomic_set(&cs->mstate, state); | ||
833 | atomic_set(&cs->mode, M_UNKNOWN); | ||
834 | gigaset_block_channels(cs); | ||
835 | cs->at_state.pending_commands |= PC_INIT; | ||
836 | atomic_set(&cs->commands_pending, 1); | ||
837 | dbg(DEBUG_CMD, "Scheduling PC_INIT"); | ||
838 | } | ||
839 | |||
840 | /* Add "AT" to a command, add the cid, dle encode it, send the result to the hardware. */ | ||
841 | static void send_command(struct cardstate *cs, const char *cmd, int cid, | ||
842 | int dle, gfp_t kmallocflags) | ||
843 | { | ||
844 | size_t cmdlen, buflen; | ||
845 | char *cmdpos, *cmdbuf, *cmdtail; | ||
846 | |||
847 | cmdlen = strlen(cmd); | ||
848 | buflen = 11 + cmdlen; | ||
849 | |||
850 | if (likely(buflen > cmdlen)) { | ||
851 | cmdbuf = kmalloc(buflen, kmallocflags); | ||
852 | if (likely(cmdbuf != NULL)) { | ||
853 | cmdpos = cmdbuf + 9; | ||
854 | cmdtail = cmdpos + cmdlen; | ||
855 | memcpy(cmdpos, cmd, cmdlen); | ||
856 | |||
857 | if (cid > 0 && cid <= 65535) { | ||
858 | do { | ||
859 | *--cmdpos = '0' + cid % 10; | ||
860 | cid /= 10; | ||
861 | ++cmdlen; | ||
862 | } while (cid); | ||
863 | } | ||
864 | |||
865 | cmdlen += 2; | ||
866 | *--cmdpos = 'T'; | ||
867 | *--cmdpos = 'A'; | ||
868 | |||
869 | if (dle) { | ||
870 | cmdlen += 4; | ||
871 | *--cmdpos = '('; | ||
872 | *--cmdpos = 0x10; | ||
873 | *cmdtail++ = 0x10; | ||
874 | *cmdtail++ = ')'; | ||
875 | } | ||
876 | |||
877 | cs->ops->write_cmd(cs, cmdpos, cmdlen, NULL); | ||
878 | kfree(cmdbuf); | ||
879 | } else | ||
880 | err("no memory for command buffer"); | ||
881 | } else | ||
882 | err("overflow in buflen"); | ||
883 | } | ||
884 | |||
885 | static struct at_state_t *at_state_from_cid(struct cardstate *cs, int cid) | ||
886 | { | ||
887 | struct at_state_t *at_state; | ||
888 | int i; | ||
889 | unsigned long flags; | ||
890 | |||
891 | if (cid == 0) | ||
892 | return &cs->at_state; | ||
893 | |||
894 | for (i = 0; i < cs->channels; ++i) | ||
895 | if (cid == cs->bcs[i].at_state.cid) | ||
896 | return &cs->bcs[i].at_state; | ||
897 | |||
898 | spin_lock_irqsave(&cs->lock, flags); | ||
899 | |||
900 | list_for_each_entry(at_state, &cs->temp_at_states, list) | ||
901 | if (cid == at_state->cid) { | ||
902 | spin_unlock_irqrestore(&cs->lock, flags); | ||
903 | return at_state; | ||
904 | } | ||
905 | |||
906 | spin_unlock_irqrestore(&cs->lock, flags); | ||
907 | |||
908 | return NULL; | ||
909 | } | ||
910 | |||
911 | static void bchannel_down(struct bc_state *bcs) | ||
912 | { | ||
913 | IFNULLRET(bcs); | ||
914 | IFNULLRET(bcs->cs); | ||
915 | |||
916 | if (bcs->chstate & CHS_B_UP) { | ||
917 | bcs->chstate &= ~CHS_B_UP; | ||
918 | gigaset_i4l_channel_cmd(bcs, ISDN_STAT_BHUP); | ||
919 | } | ||
920 | |||
921 | if (bcs->chstate & (CHS_D_UP | CHS_NOTIFY_LL)) { | ||
922 | bcs->chstate &= ~(CHS_D_UP | CHS_NOTIFY_LL); | ||
923 | gigaset_i4l_channel_cmd(bcs, ISDN_STAT_DHUP); | ||
924 | } | ||
925 | |||
926 | gigaset_free_channel(bcs); | ||
927 | |||
928 | gigaset_bcs_reinit(bcs); | ||
929 | } | ||
930 | |||
931 | static void bchannel_up(struct bc_state *bcs) | ||
932 | { | ||
933 | IFNULLRET(bcs); | ||
934 | |||
935 | if (!(bcs->chstate & CHS_D_UP)) { | ||
936 | notice("%s: D channel not up", __func__); | ||
937 | bcs->chstate |= CHS_D_UP; | ||
938 | gigaset_i4l_channel_cmd(bcs, ISDN_STAT_DCONN); | ||
939 | } | ||
940 | |||
941 | if (bcs->chstate & CHS_B_UP) { | ||
942 | notice("%s: B channel already up", __func__); | ||
943 | return; | ||
944 | } | ||
945 | |||
946 | bcs->chstate |= CHS_B_UP; | ||
947 | gigaset_i4l_channel_cmd(bcs, ISDN_STAT_BCONN); | ||
948 | } | ||
949 | |||
950 | static void start_dial(struct at_state_t *at_state, void *data, int seq_index) | ||
951 | { | ||
952 | struct bc_state *bcs = at_state->bcs; | ||
953 | struct cardstate *cs = at_state->cs; | ||
954 | int retval; | ||
955 | |||
956 | bcs->chstate |= CHS_NOTIFY_LL; | ||
957 | //atomic_set(&bcs->status, BCS_INIT); | ||
958 | |||
959 | if (atomic_read(&at_state->seq_index) != seq_index) | ||
960 | goto error; | ||
961 | |||
962 | retval = gigaset_isdn_setup_dial(at_state, data); | ||
963 | if (retval != 0) | ||
964 | goto error; | ||
965 | |||
966 | |||
967 | at_state->pending_commands |= PC_CID; | ||
968 | dbg(DEBUG_CMD, "Scheduling PC_CID"); | ||
969 | //#ifdef GIG_MAYINITONDIAL | ||
970 | // if (atomic_read(&cs->MState) == MS_UNKNOWN) { | ||
971 | // cs->at_state.pending_commands |= PC_INIT; | ||
972 | // dbg(DEBUG_CMD, "Scheduling PC_INIT"); | ||
973 | // } | ||
974 | //#endif | ||
975 | atomic_set(&cs->commands_pending, 1); //FIXME | ||
976 | return; | ||
977 | |||
978 | error: | ||
979 | at_state->pending_commands |= PC_NOCID; | ||
980 | dbg(DEBUG_CMD, "Scheduling PC_NOCID"); | ||
981 | atomic_set(&cs->commands_pending, 1); //FIXME | ||
982 | return; | ||
983 | } | ||
984 | |||
985 | static void start_accept(struct at_state_t *at_state) | ||
986 | { | ||
987 | struct cardstate *cs = at_state->cs; | ||
988 | int retval; | ||
989 | |||
990 | retval = gigaset_isdn_setup_accept(at_state); | ||
991 | |||
992 | if (retval == 0) { | ||
993 | at_state->pending_commands |= PC_ACCEPT; | ||
994 | dbg(DEBUG_CMD, "Scheduling PC_ACCEPT"); | ||
995 | atomic_set(&cs->commands_pending, 1); //FIXME | ||
996 | } else { | ||
997 | //FIXME | ||
998 | at_state->pending_commands |= PC_HUP; | ||
999 | dbg(DEBUG_CMD, "Scheduling PC_HUP"); | ||
1000 | atomic_set(&cs->commands_pending, 1); //FIXME | ||
1001 | } | ||
1002 | } | ||
1003 | |||
1004 | static void do_start(struct cardstate *cs) | ||
1005 | { | ||
1006 | gigaset_free_channels(cs); | ||
1007 | |||
1008 | if (atomic_read(&cs->mstate) != MS_LOCKED) | ||
1009 | schedule_init(cs, MS_INIT); | ||
1010 | |||
1011 | gigaset_i4l_cmd(cs, ISDN_STAT_RUN); | ||
1012 | // FIXME: not in locked mode | ||
1013 | // FIXME 2: only after init sequence | ||
1014 | |||
1015 | cs->waiting = 0; | ||
1016 | wake_up(&cs->waitqueue); | ||
1017 | } | ||
1018 | |||
1019 | static void finish_shutdown(struct cardstate *cs) | ||
1020 | { | ||
1021 | if (atomic_read(&cs->mstate) != MS_LOCKED) { | ||
1022 | atomic_set(&cs->mstate, MS_UNINITIALIZED); | ||
1023 | atomic_set(&cs->mode, M_UNKNOWN); | ||
1024 | } | ||
1025 | |||
1026 | /* The rest is done by cleanup_cs () in user mode. */ | ||
1027 | |||
1028 | cs->cmd_result = -ENODEV; | ||
1029 | cs->waiting = 0; | ||
1030 | wake_up_interruptible(&cs->waitqueue); | ||
1031 | } | ||
1032 | |||
1033 | static void do_shutdown(struct cardstate *cs) | ||
1034 | { | ||
1035 | gigaset_block_channels(cs); | ||
1036 | |||
1037 | if (atomic_read(&cs->mstate) == MS_READY) { | ||
1038 | atomic_set(&cs->mstate, MS_SHUTDOWN); | ||
1039 | cs->at_state.pending_commands |= PC_SHUTDOWN; | ||
1040 | atomic_set(&cs->commands_pending, 1); //FIXME | ||
1041 | dbg(DEBUG_CMD, "Scheduling PC_SHUTDOWN"); //FIXME | ||
1042 | //gigaset_schedule_event(cs); //FIXME | ||
1043 | } else | ||
1044 | finish_shutdown(cs); | ||
1045 | } | ||
1046 | |||
1047 | static void do_stop(struct cardstate *cs) | ||
1048 | { | ||
1049 | do_shutdown(cs); | ||
1050 | } | ||
1051 | |||
1052 | /* Entering cid mode or getting a cid failed: | ||
1053 | * try to initialize the device and try again. | ||
1054 | * | ||
1055 | * channel >= 0: getting cid for the channel failed | ||
1056 | * channel < 0: entering cid mode failed | ||
1057 | * | ||
1058 | * returns 0 on failure | ||
1059 | */ | ||
1060 | static int reinit_and_retry(struct cardstate *cs, int channel) | ||
1061 | { | ||
1062 | int i; | ||
1063 | |||
1064 | if (--cs->retry_count <= 0) | ||
1065 | return 0; | ||
1066 | |||
1067 | for (i = 0; i < cs->channels; ++i) | ||
1068 | if (cs->bcs[i].at_state.cid > 0) | ||
1069 | return 0; | ||
1070 | |||
1071 | if (channel < 0) | ||
1072 | warn("Could not enter cid mode. Reinit device and try again."); | ||
1073 | else { | ||
1074 | warn("Could not get a call id. Reinit device and try again."); | ||
1075 | cs->bcs[channel].at_state.pending_commands |= PC_CID; | ||
1076 | } | ||
1077 | schedule_init(cs, MS_INIT); | ||
1078 | return 1; | ||
1079 | } | ||
1080 | |||
1081 | static int at_state_invalid(struct cardstate *cs, | ||
1082 | struct at_state_t *test_ptr) | ||
1083 | { | ||
1084 | unsigned long flags; | ||
1085 | unsigned channel; | ||
1086 | struct at_state_t *at_state; | ||
1087 | int retval = 0; | ||
1088 | |||
1089 | spin_lock_irqsave(&cs->lock, flags); | ||
1090 | |||
1091 | if (test_ptr == &cs->at_state) | ||
1092 | goto exit; | ||
1093 | |||
1094 | list_for_each_entry(at_state, &cs->temp_at_states, list) | ||
1095 | if (at_state == test_ptr) | ||
1096 | goto exit; | ||
1097 | |||
1098 | for (channel = 0; channel < cs->channels; ++channel) | ||
1099 | if (&cs->bcs[channel].at_state == test_ptr) | ||
1100 | goto exit; | ||
1101 | |||
1102 | retval = 1; | ||
1103 | exit: | ||
1104 | spin_unlock_irqrestore(&cs->lock, flags); | ||
1105 | return retval; | ||
1106 | } | ||
1107 | |||
1108 | static void handle_icall(struct cardstate *cs, struct bc_state *bcs, | ||
1109 | struct at_state_t **p_at_state) | ||
1110 | { | ||
1111 | int retval; | ||
1112 | struct at_state_t *at_state = *p_at_state; | ||
1113 | |||
1114 | retval = gigaset_isdn_icall(at_state); | ||
1115 | switch (retval) { | ||
1116 | case ICALL_ACCEPT: | ||
1117 | break; | ||
1118 | default: | ||
1119 | err("internal error: disposition=%d", retval); | ||
1120 | /* --v-- fall through --v-- */ | ||
1121 | case ICALL_IGNORE: | ||
1122 | case ICALL_REJECT: | ||
1123 | /* hang up actively | ||
1124 | * Device doc says that would reject the call. | ||
1125 | * In fact it doesn't. | ||
1126 | */ | ||
1127 | at_state->pending_commands |= PC_HUP; | ||
1128 | atomic_set(&cs->commands_pending, 1); | ||
1129 | break; | ||
1130 | } | ||
1131 | } | ||
1132 | |||
1133 | static int do_lock(struct cardstate *cs) | ||
1134 | { | ||
1135 | int mode; | ||
1136 | int i; | ||
1137 | |||
1138 | switch (atomic_read(&cs->mstate)) { | ||
1139 | case MS_UNINITIALIZED: | ||
1140 | case MS_READY: | ||
1141 | if (cs->cur_at_seq || !list_empty(&cs->temp_at_states) || | ||
1142 | cs->at_state.pending_commands) | ||
1143 | return -EBUSY; | ||
1144 | |||
1145 | for (i = 0; i < cs->channels; ++i) | ||
1146 | if (cs->bcs[i].at_state.pending_commands) | ||
1147 | return -EBUSY; | ||
1148 | |||
1149 | if (!gigaset_get_channels(cs)) | ||
1150 | return -EBUSY; | ||
1151 | |||
1152 | break; | ||
1153 | case MS_LOCKED: | ||
1154 | //retval = -EACCES; | ||
1155 | break; | ||
1156 | default: | ||
1157 | return -EBUSY; | ||
1158 | } | ||
1159 | |||
1160 | mode = atomic_read(&cs->mode); | ||
1161 | atomic_set(&cs->mstate, MS_LOCKED); | ||
1162 | atomic_set(&cs->mode, M_UNKNOWN); | ||
1163 | //FIXME reset card state / at states / bcs states | ||
1164 | |||
1165 | return mode; | ||
1166 | } | ||
1167 | |||
1168 | static int do_unlock(struct cardstate *cs) | ||
1169 | { | ||
1170 | if (atomic_read(&cs->mstate) != MS_LOCKED) | ||
1171 | return -EINVAL; | ||
1172 | |||
1173 | atomic_set(&cs->mstate, MS_UNINITIALIZED); | ||
1174 | atomic_set(&cs->mode, M_UNKNOWN); | ||
1175 | gigaset_free_channels(cs); | ||
1176 | //FIXME reset card state / at states / bcs states | ||
1177 | if (atomic_read(&cs->connected)) | ||
1178 | schedule_init(cs, MS_INIT); | ||
1179 | |||
1180 | return 0; | ||
1181 | } | ||
1182 | |||
1183 | static void do_action(int action, struct cardstate *cs, | ||
1184 | struct bc_state *bcs, | ||
1185 | struct at_state_t **p_at_state, char **pp_command, | ||
1186 | int *p_genresp, int *p_resp_code, | ||
1187 | struct event_t *ev) | ||
1188 | { | ||
1189 | struct at_state_t *at_state = *p_at_state; | ||
1190 | struct at_state_t *at_state2; | ||
1191 | unsigned long flags; | ||
1192 | |||
1193 | int channel; | ||
1194 | |||
1195 | unsigned char *s, *e; | ||
1196 | int i; | ||
1197 | unsigned long val; | ||
1198 | |||
1199 | switch (action) { | ||
1200 | case ACT_NOTHING: | ||
1201 | break; | ||
1202 | case ACT_TIMEOUT: | ||
1203 | at_state->waiting = 1; | ||
1204 | break; | ||
1205 | case ACT_INIT: | ||
1206 | //FIXME setup everything | ||
1207 | cs->at_state.pending_commands &= ~PC_INIT; | ||
1208 | cs->cur_at_seq = SEQ_NONE; | ||
1209 | atomic_set(&cs->mode, M_UNIMODEM); | ||
1210 | if (!atomic_read(&cs->cidmode)) { | ||
1211 | gigaset_free_channels(cs); | ||
1212 | atomic_set(&cs->mstate, MS_READY); | ||
1213 | break; | ||
1214 | } | ||
1215 | cs->at_state.pending_commands |= PC_CIDMODE; | ||
1216 | atomic_set(&cs->commands_pending, 1); //FIXME | ||
1217 | dbg(DEBUG_CMD, "Scheduling PC_CIDMODE"); | ||
1218 | break; | ||
1219 | case ACT_FAILINIT: | ||
1220 | warn("Could not initialize the device."); | ||
1221 | cs->dle = 0; | ||
1222 | init_failed(cs, M_UNKNOWN); | ||
1223 | cs->cur_at_seq = SEQ_NONE; | ||
1224 | break; | ||
1225 | case ACT_CONFIGMODE: | ||
1226 | init_failed(cs, M_CONFIG); | ||
1227 | cs->cur_at_seq = SEQ_NONE; | ||
1228 | break; | ||
1229 | case ACT_SETDLE1: | ||
1230 | cs->dle = 1; | ||
1231 | /* cs->inbuf[0].inputstate |= INS_command | INS_DLE_command; */ | ||
1232 | cs->inbuf[0].inputstate &= | ||
1233 | ~(INS_command | INS_DLE_command); | ||
1234 | break; | ||
1235 | case ACT_SETDLE0: | ||
1236 | cs->dle = 0; | ||
1237 | cs->inbuf[0].inputstate = | ||
1238 | (cs->inbuf[0].inputstate & ~INS_DLE_command) | ||
1239 | | INS_command; | ||
1240 | break; | ||
1241 | case ACT_CMODESET: | ||
1242 | if (atomic_read(&cs->mstate) == MS_INIT || | ||
1243 | atomic_read(&cs->mstate) == MS_RECOVER) { | ||
1244 | gigaset_free_channels(cs); | ||
1245 | atomic_set(&cs->mstate, MS_READY); | ||
1246 | } | ||
1247 | atomic_set(&cs->mode, M_CID); | ||
1248 | cs->cur_at_seq = SEQ_NONE; | ||
1249 | break; | ||
1250 | case ACT_UMODESET: | ||
1251 | atomic_set(&cs->mode, M_UNIMODEM); | ||
1252 | cs->cur_at_seq = SEQ_NONE; | ||
1253 | break; | ||
1254 | case ACT_FAILCMODE: | ||
1255 | cs->cur_at_seq = SEQ_NONE; | ||
1256 | if (atomic_read(&cs->mstate) == MS_INIT || | ||
1257 | atomic_read(&cs->mstate) == MS_RECOVER) { | ||
1258 | init_failed(cs, M_UNKNOWN); | ||
1259 | break; | ||
1260 | } | ||
1261 | if (!reinit_and_retry(cs, -1)) | ||
1262 | schedule_init(cs, MS_RECOVER); | ||
1263 | break; | ||
1264 | case ACT_FAILUMODE: | ||
1265 | cs->cur_at_seq = SEQ_NONE; | ||
1266 | schedule_init(cs, MS_RECOVER); | ||
1267 | break; | ||
1268 | case ACT_HUPMODEM: | ||
1269 | /* send "+++" (hangup in unimodem mode) */ | ||
1270 | cs->ops->write_cmd(cs, "+++", 3, NULL); | ||
1271 | break; | ||
1272 | case ACT_RING: | ||
1273 | /* get fresh AT state structure for new CID */ | ||
1274 | at_state2 = get_free_channel(cs, ev->parameter); | ||
1275 | if (!at_state2) { | ||
1276 | warn("RING ignored: " | ||
1277 | "could not allocate channel structure"); | ||
1278 | break; | ||
1279 | } | ||
1280 | |||
1281 | /* initialize AT state structure | ||
1282 | * note that bcs may be NULL if no B channel is free | ||
1283 | */ | ||
1284 | at_state2->ConState = 700; | ||
1285 | kfree(at_state2->str_var[STR_NMBR]); | ||
1286 | at_state2->str_var[STR_NMBR] = NULL; | ||
1287 | kfree(at_state2->str_var[STR_ZCPN]); | ||
1288 | at_state2->str_var[STR_ZCPN] = NULL; | ||
1289 | kfree(at_state2->str_var[STR_ZBC]); | ||
1290 | at_state2->str_var[STR_ZBC] = NULL; | ||
1291 | kfree(at_state2->str_var[STR_ZHLC]); | ||
1292 | at_state2->str_var[STR_ZHLC] = NULL; | ||
1293 | at_state2->int_var[VAR_ZCTP] = -1; | ||
1294 | |||
1295 | spin_lock_irqsave(&cs->lock, flags); | ||
1296 | at_state2->timer_expires = RING_TIMEOUT; | ||
1297 | at_state2->timer_active = 1; | ||
1298 | spin_unlock_irqrestore(&cs->lock, flags); | ||
1299 | break; | ||
1300 | case ACT_ICALL: | ||
1301 | handle_icall(cs, bcs, p_at_state); | ||
1302 | at_state = *p_at_state; | ||
1303 | break; | ||
1304 | case ACT_FAILSDOWN: | ||
1305 | warn("Could not shut down the device."); | ||
1306 | /* fall through */ | ||
1307 | case ACT_FAKESDOWN: | ||
1308 | case ACT_SDOWN: | ||
1309 | cs->cur_at_seq = SEQ_NONE; | ||
1310 | finish_shutdown(cs); | ||
1311 | break; | ||
1312 | case ACT_CONNECT: | ||
1313 | if (cs->onechannel) { | ||
1314 | at_state->pending_commands |= PC_DLE1; | ||
1315 | atomic_set(&cs->commands_pending, 1); | ||
1316 | break; | ||
1317 | } | ||
1318 | bcs->chstate |= CHS_D_UP; | ||
1319 | gigaset_i4l_channel_cmd(bcs, ISDN_STAT_DCONN); | ||
1320 | cs->ops->init_bchannel(bcs); | ||
1321 | break; | ||
1322 | case ACT_DLE1: | ||
1323 | cs->cur_at_seq = SEQ_NONE; | ||
1324 | bcs = cs->bcs + cs->curchannel; | ||
1325 | |||
1326 | bcs->chstate |= CHS_D_UP; | ||
1327 | gigaset_i4l_channel_cmd(bcs, ISDN_STAT_DCONN); | ||
1328 | cs->ops->init_bchannel(bcs); | ||
1329 | break; | ||
1330 | case ACT_FAKEHUP: | ||
1331 | at_state->int_var[VAR_ZSAU] = ZSAU_NULL; | ||
1332 | /* fall through */ | ||
1333 | case ACT_DISCONNECT: | ||
1334 | cs->cur_at_seq = SEQ_NONE; | ||
1335 | at_state->cid = -1; | ||
1336 | if (bcs && cs->onechannel && cs->dle) { | ||
1337 | /* Check for other open channels not needed: | ||
1338 | * DLE only used for M10x with one B channel. | ||
1339 | */ | ||
1340 | at_state->pending_commands |= PC_DLE0; | ||
1341 | atomic_set(&cs->commands_pending, 1); | ||
1342 | } else { | ||
1343 | disconnect(p_at_state); | ||
1344 | at_state = *p_at_state; | ||
1345 | } | ||
1346 | break; | ||
1347 | case ACT_FAKEDLE0: | ||
1348 | at_state->int_var[VAR_ZDLE] = 0; | ||
1349 | cs->dle = 0; | ||
1350 | /* fall through */ | ||
1351 | case ACT_DLE0: | ||
1352 | cs->cur_at_seq = SEQ_NONE; | ||
1353 | at_state2 = &cs->bcs[cs->curchannel].at_state; | ||
1354 | disconnect(&at_state2); | ||
1355 | break; | ||
1356 | case ACT_ABORTHUP: | ||
1357 | cs->cur_at_seq = SEQ_NONE; | ||
1358 | warn("Could not hang up."); | ||
1359 | at_state->cid = -1; | ||
1360 | if (bcs && cs->onechannel) | ||
1361 | at_state->pending_commands |= PC_DLE0; | ||
1362 | else { | ||
1363 | disconnect(p_at_state); | ||
1364 | at_state = *p_at_state; | ||
1365 | } | ||
1366 | schedule_init(cs, MS_RECOVER); | ||
1367 | break; | ||
1368 | case ACT_FAILDLE0: | ||
1369 | cs->cur_at_seq = SEQ_NONE; | ||
1370 | warn("Could not leave DLE mode."); | ||
1371 | at_state2 = &cs->bcs[cs->curchannel].at_state; | ||
1372 | disconnect(&at_state2); | ||
1373 | schedule_init(cs, MS_RECOVER); | ||
1374 | break; | ||
1375 | case ACT_FAILDLE1: | ||
1376 | cs->cur_at_seq = SEQ_NONE; | ||
1377 | warn("Could not enter DLE mode. Try to hang up."); | ||
1378 | channel = cs->curchannel; | ||
1379 | cs->bcs[channel].at_state.pending_commands |= PC_HUP; | ||
1380 | atomic_set(&cs->commands_pending, 1); | ||
1381 | break; | ||
1382 | |||
1383 | case ACT_CID: /* got cid; start dialing */ | ||
1384 | cs->cur_at_seq = SEQ_NONE; | ||
1385 | channel = cs->curchannel; | ||
1386 | if (ev->parameter > 0 && ev->parameter <= 65535) { | ||
1387 | cs->bcs[channel].at_state.cid = ev->parameter; | ||
1388 | cs->bcs[channel].at_state.pending_commands |= | ||
1389 | PC_DIAL; | ||
1390 | atomic_set(&cs->commands_pending, 1); | ||
1391 | break; | ||
1392 | } | ||
1393 | /* fall through */ | ||
1394 | case ACT_FAILCID: | ||
1395 | cs->cur_at_seq = SEQ_NONE; | ||
1396 | channel = cs->curchannel; | ||
1397 | if (!reinit_and_retry(cs, channel)) { | ||
1398 | warn("Could not get a call id. Dialing not possible"); | ||
1399 | at_state2 = &cs->bcs[channel].at_state; | ||
1400 | disconnect(&at_state2); | ||
1401 | } | ||
1402 | break; | ||
1403 | case ACT_ABORTCID: | ||
1404 | cs->cur_at_seq = SEQ_NONE; | ||
1405 | at_state2 = &cs->bcs[cs->curchannel].at_state; | ||
1406 | disconnect(&at_state2); | ||
1407 | break; | ||
1408 | |||
1409 | case ACT_DIALING: | ||
1410 | case ACT_ACCEPTED: | ||
1411 | cs->cur_at_seq = SEQ_NONE; | ||
1412 | break; | ||
1413 | |||
1414 | case ACT_ABORTACCEPT: /* hangup/error/timeout during ICALL processing */ | ||
1415 | disconnect(p_at_state); | ||
1416 | at_state = *p_at_state; | ||
1417 | break; | ||
1418 | |||
1419 | case ACT_ABORTDIAL: /* error/timeout during dial preparation */ | ||
1420 | cs->cur_at_seq = SEQ_NONE; | ||
1421 | at_state->pending_commands |= PC_HUP; | ||
1422 | atomic_set(&cs->commands_pending, 1); | ||
1423 | break; | ||
1424 | |||
1425 | case ACT_REMOTEREJECT: /* DISCONNECT_IND after dialling */ | ||
1426 | case ACT_CONNTIMEOUT: /* timeout waiting for ZSAU=ACTIVE */ | ||
1427 | case ACT_REMOTEHUP: /* DISCONNECT_IND with established connection */ | ||
1428 | at_state->pending_commands |= PC_HUP; | ||
1429 | atomic_set(&cs->commands_pending, 1); | ||
1430 | break; | ||
1431 | case ACT_GETSTRING: /* warning: RING, ZDLE, ... are not handled properly any more */ | ||
1432 | at_state->getstring = 1; | ||
1433 | break; | ||
1434 | case ACT_SETVER: | ||
1435 | if (!ev->ptr) { | ||
1436 | *p_genresp = 1; | ||
1437 | *p_resp_code = RSP_ERROR; | ||
1438 | break; | ||
1439 | } | ||
1440 | s = ev->ptr; | ||
1441 | |||
1442 | if (!strcmp(s, "OK")) { | ||
1443 | *p_genresp = 1; | ||
1444 | *p_resp_code = RSP_ERROR; | ||
1445 | break; | ||
1446 | } | ||
1447 | |||
1448 | for (i = 0; i < 4; ++i) { | ||
1449 | val = simple_strtoul(s, (char **) &e, 10); | ||
1450 | if (val > INT_MAX || e == s) | ||
1451 | break; | ||
1452 | if (i == 3) { | ||
1453 | if (*e) | ||
1454 | break; | ||
1455 | } else if (*e != '.') | ||
1456 | break; | ||
1457 | else | ||
1458 | s = e + 1; | ||
1459 | cs->fwver[i] = val; | ||
1460 | } | ||
1461 | if (i != 4) { | ||
1462 | *p_genresp = 1; | ||
1463 | *p_resp_code = RSP_ERROR; | ||
1464 | break; | ||
1465 | } | ||
1466 | /*at_state->getstring = 1;*/ | ||
1467 | cs->gotfwver = 0; | ||
1468 | break; | ||
1469 | case ACT_GOTVER: | ||
1470 | if (cs->gotfwver == 0) { | ||
1471 | cs->gotfwver = 1; | ||
1472 | dbg(DEBUG_ANY, | ||
1473 | "firmware version %02d.%03d.%02d.%02d", | ||
1474 | cs->fwver[0], cs->fwver[1], | ||
1475 | cs->fwver[2], cs->fwver[3]); | ||
1476 | break; | ||
1477 | } | ||
1478 | /* fall through */ | ||
1479 | case ACT_FAILVER: | ||
1480 | cs->gotfwver = -1; | ||
1481 | err("could not read firmware version."); | ||
1482 | break; | ||
1483 | #ifdef CONFIG_GIGASET_DEBUG | ||
1484 | case ACT_ERROR: | ||
1485 | *p_genresp = 1; | ||
1486 | *p_resp_code = RSP_ERROR; | ||
1487 | break; | ||
1488 | case ACT_TEST: | ||
1489 | { | ||
1490 | static int count = 3; //2; //1; | ||
1491 | *p_genresp = 1; | ||
1492 | *p_resp_code = count ? RSP_ERROR : RSP_OK; | ||
1493 | if (count > 0) | ||
1494 | --count; | ||
1495 | } | ||
1496 | break; | ||
1497 | #endif | ||
1498 | case ACT_DEBUG: | ||
1499 | dbg(DEBUG_ANY, "%s: resp_code %d in ConState %d", | ||
1500 | __func__, ev->type, at_state->ConState); | ||
1501 | break; | ||
1502 | case ACT_WARN: | ||
1503 | warn("%s: resp_code %d in ConState %d!", | ||
1504 | __func__, ev->type, at_state->ConState); | ||
1505 | break; | ||
1506 | case ACT_ZCAU: | ||
1507 | warn("cause code %04x in connection state %d.", | ||
1508 | ev->parameter, at_state->ConState); | ||
1509 | break; | ||
1510 | |||
1511 | /* events from the LL */ | ||
1512 | case ACT_DIAL: | ||
1513 | start_dial(at_state, ev->ptr, ev->parameter); | ||
1514 | break; | ||
1515 | case ACT_ACCEPT: | ||
1516 | start_accept(at_state); | ||
1517 | break; | ||
1518 | case ACT_PROTO_L2: | ||
1519 | dbg(DEBUG_CMD, | ||
1520 | "set protocol to %u", (unsigned) ev->parameter); | ||
1521 | at_state->bcs->proto2 = ev->parameter; | ||
1522 | break; | ||
1523 | case ACT_HUP: | ||
1524 | at_state->pending_commands |= PC_HUP; | ||
1525 | atomic_set(&cs->commands_pending, 1); //FIXME | ||
1526 | dbg(DEBUG_CMD, "Scheduling PC_HUP"); | ||
1527 | break; | ||
1528 | |||
1529 | /* hotplug events */ | ||
1530 | case ACT_STOP: | ||
1531 | do_stop(cs); | ||
1532 | break; | ||
1533 | case ACT_START: | ||
1534 | do_start(cs); | ||
1535 | break; | ||
1536 | |||
1537 | /* events from the interface */ // FIXME without ACT_xxxx? | ||
1538 | case ACT_IF_LOCK: | ||
1539 | cs->cmd_result = ev->parameter ? do_lock(cs) : do_unlock(cs); | ||
1540 | cs->waiting = 0; | ||
1541 | wake_up(&cs->waitqueue); | ||
1542 | break; | ||
1543 | case ACT_IF_VER: | ||
1544 | if (ev->parameter != 0) | ||
1545 | cs->cmd_result = -EINVAL; | ||
1546 | else if (cs->gotfwver != 1) { | ||
1547 | cs->cmd_result = -ENOENT; | ||
1548 | } else { | ||
1549 | memcpy(ev->arg, cs->fwver, sizeof cs->fwver); | ||
1550 | cs->cmd_result = 0; | ||
1551 | } | ||
1552 | cs->waiting = 0; | ||
1553 | wake_up(&cs->waitqueue); | ||
1554 | break; | ||
1555 | |||
1556 | /* events from the proc file system */ // FIXME without ACT_xxxx? | ||
1557 | case ACT_PROC_CIDMODE: | ||
1558 | if (ev->parameter != atomic_read(&cs->cidmode)) { | ||
1559 | atomic_set(&cs->cidmode, ev->parameter); | ||
1560 | if (ev->parameter) { | ||
1561 | cs->at_state.pending_commands |= PC_CIDMODE; | ||
1562 | dbg(DEBUG_CMD, "Scheduling PC_CIDMODE"); | ||
1563 | } else { | ||
1564 | cs->at_state.pending_commands |= PC_UMMODE; | ||
1565 | dbg(DEBUG_CMD, "Scheduling PC_UMMODE"); | ||
1566 | } | ||
1567 | atomic_set(&cs->commands_pending, 1); | ||
1568 | } | ||
1569 | cs->waiting = 0; | ||
1570 | wake_up(&cs->waitqueue); | ||
1571 | break; | ||
1572 | |||
1573 | /* events from the hardware drivers */ | ||
1574 | case ACT_NOTIFY_BC_DOWN: | ||
1575 | bchannel_down(bcs); | ||
1576 | break; | ||
1577 | case ACT_NOTIFY_BC_UP: | ||
1578 | bchannel_up(bcs); | ||
1579 | break; | ||
1580 | case ACT_SHUTDOWN: | ||
1581 | do_shutdown(cs); | ||
1582 | break; | ||
1583 | |||
1584 | |||
1585 | default: | ||
1586 | if (action >= ACT_CMD && action < ACT_CMD + AT_NUM) { | ||
1587 | *pp_command = at_state->bcs->commands[action - ACT_CMD]; | ||
1588 | if (!*pp_command) { | ||
1589 | *p_genresp = 1; | ||
1590 | *p_resp_code = RSP_NULL; | ||
1591 | } | ||
1592 | } else | ||
1593 | err("%s: action==%d!", __func__, action); | ||
1594 | } | ||
1595 | } | ||
1596 | |||
1597 | /* State machine to do the calling and hangup procedure */ | ||
1598 | static void process_event(struct cardstate *cs, struct event_t *ev) | ||
1599 | { | ||
1600 | struct bc_state *bcs; | ||
1601 | char *p_command = NULL; | ||
1602 | struct reply_t *rep; | ||
1603 | int rcode; | ||
1604 | int genresp = 0; | ||
1605 | int resp_code = RSP_ERROR; | ||
1606 | int sendcid; | ||
1607 | struct at_state_t *at_state; | ||
1608 | int index; | ||
1609 | int curact; | ||
1610 | unsigned long flags; | ||
1611 | |||
1612 | IFNULLRET(cs); | ||
1613 | IFNULLRET(ev); | ||
1614 | |||
1615 | if (ev->cid >= 0) { | ||
1616 | at_state = at_state_from_cid(cs, ev->cid); | ||
1617 | if (!at_state) { | ||
1618 | gigaset_add_event(cs, &cs->at_state, RSP_WRONG_CID, | ||
1619 | NULL, 0, NULL); | ||
1620 | return; | ||
1621 | } | ||
1622 | } else { | ||
1623 | at_state = ev->at_state; | ||
1624 | if (at_state_invalid(cs, at_state)) { | ||
1625 | dbg(DEBUG_ANY, | ||
1626 | "event for invalid at_state %p", at_state); | ||
1627 | return; | ||
1628 | } | ||
1629 | } | ||
1630 | |||
1631 | dbg(DEBUG_CMD, | ||
1632 | "connection state %d, event %d", at_state->ConState, ev->type); | ||
1633 | |||
1634 | bcs = at_state->bcs; | ||
1635 | sendcid = at_state->cid; | ||
1636 | |||
1637 | /* Setting the pointer to the dial array */ | ||
1638 | rep = at_state->replystruct; | ||
1639 | IFNULLRET(rep); | ||
1640 | |||
1641 | if (ev->type == EV_TIMEOUT) { | ||
1642 | if (ev->parameter != atomic_read(&at_state->timer_index) | ||
1643 | || !at_state->timer_active) { | ||
1644 | ev->type = RSP_NONE; /* old timeout */ | ||
1645 | dbg(DEBUG_ANY, "old timeout"); | ||
1646 | } else if (!at_state->waiting) | ||
1647 | dbg(DEBUG_ANY, "timeout occured"); | ||
1648 | else | ||
1649 | dbg(DEBUG_ANY, "stopped waiting"); | ||
1650 | } | ||
1651 | |||
1652 | /* if the response belongs to a variable in at_state->int_var[VAR_XXXX] or at_state->str_var[STR_XXXX], set it */ | ||
1653 | if (ev->type >= RSP_VAR && ev->type < RSP_VAR + VAR_NUM) { | ||
1654 | index = ev->type - RSP_VAR; | ||
1655 | at_state->int_var[index] = ev->parameter; | ||
1656 | } else if (ev->type >= RSP_STR && ev->type < RSP_STR + STR_NUM) { | ||
1657 | index = ev->type - RSP_STR; | ||
1658 | kfree(at_state->str_var[index]); | ||
1659 | at_state->str_var[index] = ev->ptr; | ||
1660 | ev->ptr = NULL; /* prevent process_events() from deallocating ptr */ | ||
1661 | } | ||
1662 | |||
1663 | if (ev->type == EV_TIMEOUT || ev->type == RSP_STRING) | ||
1664 | at_state->getstring = 0; | ||
1665 | |||
1666 | /* Search row in dial array which matches modem response and current constate */ | ||
1667 | for (;; rep++) { | ||
1668 | rcode = rep->resp_code; | ||
1669 | /* dbg (DEBUG_ANY, "rcode %d", rcode); */ | ||
1670 | if (rcode == RSP_LAST) { | ||
1671 | /* found nothing...*/ | ||
1672 | warn("%s: rcode=RSP_LAST: resp_code %d in ConState %d!", | ||
1673 | __func__, ev->type, at_state->ConState); | ||
1674 | return; | ||
1675 | } | ||
1676 | if ((rcode == RSP_ANY || rcode == ev->type) | ||
1677 | && ((int) at_state->ConState >= rep->min_ConState) | ||
1678 | && (rep->max_ConState < 0 | ||
1679 | || (int) at_state->ConState <= rep->max_ConState) | ||
1680 | && (rep->parameter < 0 || rep->parameter == ev->parameter)) | ||
1681 | break; | ||
1682 | } | ||
1683 | |||
1684 | p_command = rep->command; | ||
1685 | |||
1686 | at_state->waiting = 0; | ||
1687 | for (curact = 0; curact < MAXACT; ++curact) { | ||
1688 | /* The row tells us what we should do .. | ||
1689 | */ | ||
1690 | do_action(rep->action[curact], cs, bcs, &at_state, &p_command, &genresp, &resp_code, ev); | ||
1691 | if (!at_state) | ||
1692 | break; /* may be freed after disconnect */ | ||
1693 | } | ||
1694 | |||
1695 | if (at_state) { | ||
1696 | /* Jump to the next con-state regarding the array */ | ||
1697 | if (rep->new_ConState >= 0) | ||
1698 | at_state->ConState = rep->new_ConState; | ||
1699 | |||
1700 | if (genresp) { | ||
1701 | spin_lock_irqsave(&cs->lock, flags); | ||
1702 | at_state->timer_expires = 0; //FIXME | ||
1703 | at_state->timer_active = 0; //FIXME | ||
1704 | spin_unlock_irqrestore(&cs->lock, flags); | ||
1705 | gigaset_add_event(cs, at_state, resp_code, NULL, 0, NULL); | ||
1706 | } else { | ||
1707 | /* Send command to modem if not NULL... */ | ||
1708 | if (p_command/*rep->command*/) { | ||
1709 | if (atomic_read(&cs->connected)) | ||
1710 | send_command(cs, p_command, | ||
1711 | sendcid, cs->dle, | ||
1712 | GFP_ATOMIC); | ||
1713 | else | ||
1714 | gigaset_add_event(cs, at_state, | ||
1715 | RSP_NODEV, | ||
1716 | NULL, 0, NULL); | ||
1717 | } | ||
1718 | |||
1719 | spin_lock_irqsave(&cs->lock, flags); | ||
1720 | if (!rep->timeout) { | ||
1721 | at_state->timer_expires = 0; | ||
1722 | at_state->timer_active = 0; | ||
1723 | } else if (rep->timeout > 0) { /* new timeout */ | ||
1724 | at_state->timer_expires = rep->timeout * 10; | ||
1725 | at_state->timer_active = 1; | ||
1726 | new_index(&at_state->timer_index, | ||
1727 | MAX_TIMER_INDEX); | ||
1728 | } | ||
1729 | spin_unlock_irqrestore(&cs->lock, flags); | ||
1730 | } | ||
1731 | } | ||
1732 | } | ||
1733 | |||
1734 | static void schedule_sequence(struct cardstate *cs, | ||
1735 | struct at_state_t *at_state, int sequence) | ||
1736 | { | ||
1737 | cs->cur_at_seq = sequence; | ||
1738 | gigaset_add_event(cs, at_state, RSP_INIT, NULL, sequence, NULL); | ||
1739 | } | ||
1740 | |||
1741 | static void process_command_flags(struct cardstate *cs) | ||
1742 | { | ||
1743 | struct at_state_t *at_state = NULL; | ||
1744 | struct bc_state *bcs; | ||
1745 | int i; | ||
1746 | int sequence; | ||
1747 | |||
1748 | IFNULLRET(cs); | ||
1749 | |||
1750 | atomic_set(&cs->commands_pending, 0); | ||
1751 | |||
1752 | if (cs->cur_at_seq) { | ||
1753 | dbg(DEBUG_CMD, "not searching scheduled commands: busy"); | ||
1754 | return; | ||
1755 | } | ||
1756 | |||
1757 | dbg(DEBUG_CMD, "searching scheduled commands"); | ||
1758 | |||
1759 | sequence = SEQ_NONE; | ||
1760 | |||
1761 | /* clear pending_commands and hangup channels on shutdown */ | ||
1762 | if (cs->at_state.pending_commands & PC_SHUTDOWN) { | ||
1763 | cs->at_state.pending_commands &= ~PC_CIDMODE; | ||
1764 | for (i = 0; i < cs->channels; ++i) { | ||
1765 | bcs = cs->bcs + i; | ||
1766 | at_state = &bcs->at_state; | ||
1767 | at_state->pending_commands &= | ||
1768 | ~(PC_DLE1 | PC_ACCEPT | PC_DIAL); | ||
1769 | if (at_state->cid > 0) | ||
1770 | at_state->pending_commands |= PC_HUP; | ||
1771 | if (at_state->pending_commands & PC_CID) { | ||
1772 | at_state->pending_commands |= PC_NOCID; | ||
1773 | at_state->pending_commands &= ~PC_CID; | ||
1774 | } | ||
1775 | } | ||
1776 | } | ||
1777 | |||
1778 | /* clear pending_commands and hangup channels on reset */ | ||
1779 | if (cs->at_state.pending_commands & PC_INIT) { | ||
1780 | cs->at_state.pending_commands &= ~PC_CIDMODE; | ||
1781 | for (i = 0; i < cs->channels; ++i) { | ||
1782 | bcs = cs->bcs + i; | ||
1783 | at_state = &bcs->at_state; | ||
1784 | at_state->pending_commands &= | ||
1785 | ~(PC_DLE1 | PC_ACCEPT | PC_DIAL); | ||
1786 | if (at_state->cid > 0) | ||
1787 | at_state->pending_commands |= PC_HUP; | ||
1788 | if (atomic_read(&cs->mstate) == MS_RECOVER) { | ||
1789 | if (at_state->pending_commands & PC_CID) { | ||
1790 | at_state->pending_commands |= PC_NOCID; | ||
1791 | at_state->pending_commands &= ~PC_CID; | ||
1792 | } | ||
1793 | } | ||
1794 | } | ||
1795 | } | ||
1796 | |||
1797 | /* only switch back to unimodem mode, if no commands are pending and no channels are up */ | ||
1798 | if (cs->at_state.pending_commands == PC_UMMODE | ||
1799 | && !atomic_read(&cs->cidmode) | ||
1800 | && list_empty(&cs->temp_at_states) | ||
1801 | && atomic_read(&cs->mode) == M_CID) { | ||
1802 | sequence = SEQ_UMMODE; | ||
1803 | at_state = &cs->at_state; | ||
1804 | for (i = 0; i < cs->channels; ++i) { | ||
1805 | bcs = cs->bcs + i; | ||
1806 | if (bcs->at_state.pending_commands || | ||
1807 | bcs->at_state.cid > 0) { | ||
1808 | sequence = SEQ_NONE; | ||
1809 | break; | ||
1810 | } | ||
1811 | } | ||
1812 | } | ||
1813 | cs->at_state.pending_commands &= ~PC_UMMODE; | ||
1814 | if (sequence != SEQ_NONE) { | ||
1815 | schedule_sequence(cs, at_state, sequence); | ||
1816 | return; | ||
1817 | } | ||
1818 | |||
1819 | for (i = 0; i < cs->channels; ++i) { | ||
1820 | bcs = cs->bcs + i; | ||
1821 | if (bcs->at_state.pending_commands & PC_HUP) { | ||
1822 | bcs->at_state.pending_commands &= ~PC_HUP; | ||
1823 | if (bcs->at_state.pending_commands & PC_CID) { | ||
1824 | /* not yet dialing: PC_NOCID is sufficient */ | ||
1825 | bcs->at_state.pending_commands |= PC_NOCID; | ||
1826 | bcs->at_state.pending_commands &= ~PC_CID; | ||
1827 | } else { | ||
1828 | schedule_sequence(cs, &bcs->at_state, SEQ_HUP); | ||
1829 | return; | ||
1830 | } | ||
1831 | } | ||
1832 | if (bcs->at_state.pending_commands & PC_NOCID) { | ||
1833 | bcs->at_state.pending_commands &= ~PC_NOCID; | ||
1834 | cs->curchannel = bcs->channel; | ||
1835 | schedule_sequence(cs, &cs->at_state, SEQ_NOCID); | ||
1836 | return; | ||
1837 | } else if (bcs->at_state.pending_commands & PC_DLE0) { | ||
1838 | bcs->at_state.pending_commands &= ~PC_DLE0; | ||
1839 | cs->curchannel = bcs->channel; | ||
1840 | schedule_sequence(cs, &cs->at_state, SEQ_DLE0); | ||
1841 | return; | ||
1842 | } | ||
1843 | } | ||
1844 | |||
1845 | list_for_each_entry(at_state, &cs->temp_at_states, list) | ||
1846 | if (at_state->pending_commands & PC_HUP) { | ||
1847 | at_state->pending_commands &= ~PC_HUP; | ||
1848 | schedule_sequence(cs, at_state, SEQ_HUP); | ||
1849 | return; | ||
1850 | } | ||
1851 | |||
1852 | if (cs->at_state.pending_commands & PC_INIT) { | ||
1853 | cs->at_state.pending_commands &= ~PC_INIT; | ||
1854 | cs->dle = 0; //FIXME | ||
1855 | cs->inbuf->inputstate = INS_command; | ||
1856 | //FIXME reset card state (or -> LOCK0)? | ||
1857 | schedule_sequence(cs, &cs->at_state, SEQ_INIT); | ||
1858 | return; | ||
1859 | } | ||
1860 | if (cs->at_state.pending_commands & PC_SHUTDOWN) { | ||
1861 | cs->at_state.pending_commands &= ~PC_SHUTDOWN; | ||
1862 | schedule_sequence(cs, &cs->at_state, SEQ_SHUTDOWN); | ||
1863 | return; | ||
1864 | } | ||
1865 | if (cs->at_state.pending_commands & PC_CIDMODE) { | ||
1866 | cs->at_state.pending_commands &= ~PC_CIDMODE; | ||
1867 | if (atomic_read(&cs->mode) == M_UNIMODEM) { | ||
1868 | #if 0 | ||
1869 | cs->retry_count = 2; | ||
1870 | #else | ||
1871 | cs->retry_count = 1; | ||
1872 | #endif | ||
1873 | schedule_sequence(cs, &cs->at_state, SEQ_CIDMODE); | ||
1874 | return; | ||
1875 | } | ||
1876 | } | ||
1877 | |||
1878 | for (i = 0; i < cs->channels; ++i) { | ||
1879 | bcs = cs->bcs + i; | ||
1880 | if (bcs->at_state.pending_commands & PC_DLE1) { | ||
1881 | bcs->at_state.pending_commands &= ~PC_DLE1; | ||
1882 | cs->curchannel = bcs->channel; | ||
1883 | schedule_sequence(cs, &cs->at_state, SEQ_DLE1); | ||
1884 | return; | ||
1885 | } | ||
1886 | if (bcs->at_state.pending_commands & PC_ACCEPT) { | ||
1887 | bcs->at_state.pending_commands &= ~PC_ACCEPT; | ||
1888 | schedule_sequence(cs, &bcs->at_state, SEQ_ACCEPT); | ||
1889 | return; | ||
1890 | } | ||
1891 | if (bcs->at_state.pending_commands & PC_DIAL) { | ||
1892 | bcs->at_state.pending_commands &= ~PC_DIAL; | ||
1893 | schedule_sequence(cs, &bcs->at_state, SEQ_DIAL); | ||
1894 | return; | ||
1895 | } | ||
1896 | if (bcs->at_state.pending_commands & PC_CID) { | ||
1897 | switch (atomic_read(&cs->mode)) { | ||
1898 | case M_UNIMODEM: | ||
1899 | cs->at_state.pending_commands |= PC_CIDMODE; | ||
1900 | dbg(DEBUG_CMD, "Scheduling PC_CIDMODE"); | ||
1901 | atomic_set(&cs->commands_pending, 1); | ||
1902 | return; | ||
1903 | #ifdef GIG_MAYINITONDIAL | ||
1904 | case M_UNKNOWN: | ||
1905 | schedule_init(cs, MS_INIT); | ||
1906 | return; | ||
1907 | #endif | ||
1908 | } | ||
1909 | bcs->at_state.pending_commands &= ~PC_CID; | ||
1910 | cs->curchannel = bcs->channel; | ||
1911 | #ifdef GIG_RETRYCID | ||
1912 | cs->retry_count = 2; | ||
1913 | #else | ||
1914 | cs->retry_count = 1; | ||
1915 | #endif | ||
1916 | schedule_sequence(cs, &cs->at_state, SEQ_CID); | ||
1917 | return; | ||
1918 | } | ||
1919 | } | ||
1920 | } | ||
1921 | |||
1922 | static void process_events(struct cardstate *cs) | ||
1923 | { | ||
1924 | struct event_t *ev; | ||
1925 | unsigned head, tail; | ||
1926 | int i; | ||
1927 | int check_flags = 0; | ||
1928 | int was_busy; | ||
1929 | |||
1930 | /* no locking needed (only one reader) */ | ||
1931 | head = atomic_read(&cs->ev_head); | ||
1932 | |||
1933 | for (i = 0; i < 2 * MAX_EVENTS; ++i) { | ||
1934 | tail = atomic_read(&cs->ev_tail); | ||
1935 | if (tail == head) { | ||
1936 | if (!check_flags && !atomic_read(&cs->commands_pending)) | ||
1937 | break; | ||
1938 | check_flags = 0; | ||
1939 | process_command_flags(cs); | ||
1940 | tail = atomic_read(&cs->ev_tail); | ||
1941 | if (tail == head) { | ||
1942 | if (!atomic_read(&cs->commands_pending)) | ||
1943 | break; | ||
1944 | continue; | ||
1945 | } | ||
1946 | } | ||
1947 | |||
1948 | ev = cs->events + head; | ||
1949 | was_busy = cs->cur_at_seq != SEQ_NONE; | ||
1950 | process_event(cs, ev); | ||
1951 | kfree(ev->ptr); | ||
1952 | ev->ptr = NULL; | ||
1953 | if (was_busy && cs->cur_at_seq == SEQ_NONE) | ||
1954 | check_flags = 1; | ||
1955 | |||
1956 | head = (head + 1) % MAX_EVENTS; | ||
1957 | atomic_set(&cs->ev_head, head); | ||
1958 | } | ||
1959 | |||
1960 | if (i == 2 * MAX_EVENTS) { | ||
1961 | err("infinite loop in process_events; aborting."); | ||
1962 | } | ||
1963 | } | ||
1964 | |||
1965 | /* tasklet scheduled on any event received from the Gigaset device | ||
1966 | * parameter: | ||
1967 | * data ISDN controller state structure | ||
1968 | */ | ||
1969 | void gigaset_handle_event(unsigned long data) | ||
1970 | { | ||
1971 | struct cardstate *cs = (struct cardstate *) data; | ||
1972 | |||
1973 | IFNULLRET(cs); | ||
1974 | IFNULLRET(cs->inbuf); | ||
1975 | |||
1976 | /* handle incoming data on control/common channel */ | ||
1977 | if (atomic_read(&cs->inbuf->head) != atomic_read(&cs->inbuf->tail)) { | ||
1978 | dbg(DEBUG_INTR, "processing new data"); | ||
1979 | cs->ops->handle_input(cs->inbuf); | ||
1980 | } | ||
1981 | |||
1982 | process_events(cs); | ||
1983 | } | ||
diff --git a/drivers/isdn/gigaset/gigaset.h b/drivers/isdn/gigaset/gigaset.h new file mode 100644 index 000000000000..729edcdb6dac --- /dev/null +++ b/drivers/isdn/gigaset/gigaset.h | |||
@@ -0,0 +1,938 @@ | |||
1 | /* Siemens Gigaset 307x driver | ||
2 | * Common header file for all connection variants | ||
3 | * | ||
4 | * Written by Stefan Eilers <Eilers.Stefan@epost.de> | ||
5 | * and Hansjoerg Lipp <hjlipp@web.de> | ||
6 | * | ||
7 | * Version: $Id: gigaset.h,v 1.97.4.26 2006/02/04 18:28:16 hjlipp Exp $ | ||
8 | * =========================================================================== | ||
9 | */ | ||
10 | |||
11 | #ifndef GIGASET_H | ||
12 | #define GIGASET_H | ||
13 | |||
14 | #include <linux/config.h> | ||
15 | #include <linux/kernel.h> | ||
16 | #include <linux/compiler.h> | ||
17 | #include <linux/types.h> | ||
18 | #include <asm/atomic.h> | ||
19 | #include <linux/spinlock.h> | ||
20 | #include <linux/isdnif.h> | ||
21 | #include <linux/usb.h> | ||
22 | #include <linux/skbuff.h> | ||
23 | #include <linux/netdevice.h> | ||
24 | #include <linux/ppp_defs.h> | ||
25 | #include <linux/timer.h> | ||
26 | #include <linux/interrupt.h> | ||
27 | #include <linux/tty.h> | ||
28 | #include <linux/tty_driver.h> | ||
29 | #include <linux/list.h> | ||
30 | |||
31 | #define GIG_VERSION {0,5,0,0} | ||
32 | #define GIG_COMPAT {0,4,0,0} | ||
33 | |||
34 | #define MAX_REC_PARAMS 10 /* Max. number of params in response string */ | ||
35 | #define MAX_RESP_SIZE 512 /* Max. size of a response string */ | ||
36 | #define HW_HDR_LEN 2 /* Header size used to store ack info */ | ||
37 | |||
38 | #define MAX_EVENTS 64 /* size of event queue */ | ||
39 | |||
40 | #define RBUFSIZE 8192 | ||
41 | #define SBUFSIZE 4096 /* sk_buff payload size */ | ||
42 | |||
43 | #define MAX_BUF_SIZE (SBUFSIZE - 2) /* Max. size of a data packet from LL */ | ||
44 | #define TRANSBUFSIZE 768 /* bytes per skb for transparent receive */ | ||
45 | |||
46 | /* compile time options */ | ||
47 | #define GIG_MAJOR 0 | ||
48 | |||
49 | #define GIG_MAYINITONDIAL | ||
50 | #define GIG_RETRYCID | ||
51 | #define GIG_X75 | ||
52 | |||
53 | #define MAX_TIMER_INDEX 1000 | ||
54 | #define MAX_SEQ_INDEX 1000 | ||
55 | |||
56 | #define GIG_TICK (HZ / 10) | ||
57 | |||
58 | /* timeout values (unit: 1 sec) */ | ||
59 | #define INIT_TIMEOUT 1 | ||
60 | |||
61 | /* timeout values (unit: 0.1 sec) */ | ||
62 | #define RING_TIMEOUT 3 /* for additional parameters to RING */ | ||
63 | #define BAS_TIMEOUT 20 /* for response to Base USB ops */ | ||
64 | #define ATRDY_TIMEOUT 3 /* for HD_READY_SEND_ATDATA */ | ||
65 | |||
66 | #define BAS_RETRY 3 /* max. retries for base USB ops */ | ||
67 | |||
68 | #define MAXACT 3 | ||
69 | |||
70 | #define IFNULL(a) if (unlikely(!(a))) | ||
71 | #define IFNULLRET(a) if (unlikely(!(a))) {err("%s==NULL at %s:%d!", #a, __FILE__, __LINE__); return; } | ||
72 | #define IFNULLRETVAL(a,b) if (unlikely(!(a))) {err("%s==NULL at %s:%d!", #a, __FILE__, __LINE__); return (b); } | ||
73 | #define IFNULLCONT(a) if (unlikely(!(a))) {err("%s==NULL at %s:%d!", #a, __FILE__, __LINE__); continue; } | ||
74 | #define IFNULLGOTO(a,b) if (unlikely(!(a))) {err("%s==NULL at %s:%d!", #a, __FILE__, __LINE__); goto b; } | ||
75 | |||
76 | extern int gigaset_debuglevel; /* "needs" cast to (enum debuglevel) */ | ||
77 | |||
78 | /* any combination of these can be given with the 'debug=' parameter to insmod, e.g. | ||
79 | * 'insmod usb_gigaset.o debug=0x2c' will set DEBUG_OPEN, DEBUG_CMD and DEBUG_INTR. */ | ||
80 | enum debuglevel { /* up to 24 bits (atomic_t) */ | ||
81 | DEBUG_REG = 0x0002, /* serial port I/O register operations */ | ||
82 | DEBUG_OPEN = 0x0004, /* open/close serial port */ | ||
83 | DEBUG_INTR = 0x0008, /* interrupt processing */ | ||
84 | DEBUG_INTR_DUMP = 0x0010, /* Activating hexdump debug output on interrupt | ||
85 | requests, not available as run-time option */ | ||
86 | DEBUG_CMD = 0x00020, /* sent/received LL commands */ | ||
87 | DEBUG_STREAM = 0x00040, /* application data stream I/O events */ | ||
88 | DEBUG_STREAM_DUMP = 0x00080, /* application data stream content */ | ||
89 | DEBUG_LLDATA = 0x00100, /* sent/received LL data */ | ||
90 | DEBUG_INTR_0 = 0x00200, /* serial port output interrupt processing */ | ||
91 | DEBUG_DRIVER = 0x00400, /* driver structure */ | ||
92 | DEBUG_HDLC = 0x00800, /* M10x HDLC processing */ | ||
93 | DEBUG_WRITE = 0x01000, /* M105 data write */ | ||
94 | DEBUG_TRANSCMD = 0x02000, /*AT-COMMANDS+RESPONSES*/ | ||
95 | DEBUG_MCMD = 0x04000, /*COMMANDS THAT ARE SENT VERY OFTEN*/ | ||
96 | DEBUG_INIT = 0x08000, /* (de)allocation+initialization of data structures */ | ||
97 | DEBUG_LOCK = 0x10000, /* semaphore operations */ | ||
98 | DEBUG_OUTPUT = 0x20000, /* output to device */ | ||
99 | DEBUG_ISO = 0x40000, /* isochronous transfers */ | ||
100 | DEBUG_IF = 0x80000, /* character device operations */ | ||
101 | DEBUG_USBREQ = 0x100000, /* USB communication (except payload data) */ | ||
102 | DEBUG_LOCKCMD = 0x200000, /* AT commands and responses when MS_LOCKED */ | ||
103 | |||
104 | DEBUG_ANY = 0x3fffff, /* print message if any of the others is activated */ | ||
105 | }; | ||
106 | |||
107 | #ifdef CONFIG_GIGASET_DEBUG | ||
108 | #define DEBUG_DEFAULT (DEBUG_INIT | DEBUG_TRANSCMD | DEBUG_CMD | DEBUG_USBREQ) | ||
109 | //#define DEBUG_DEFAULT (DEBUG_LOCK | DEBUG_INIT | DEBUG_TRANSCMD | DEBUG_CMD | DEBUF_IF | DEBUG_DRIVER | DEBUG_OUTPUT | DEBUG_INTR) | ||
110 | #else | ||
111 | #define DEBUG_DEFAULT 0 | ||
112 | #endif | ||
113 | |||
114 | /* redefine syslog macros to prepend module name instead of entire source path */ | ||
115 | /* The space before the comma in ", ##" is needed by gcc 2.95 */ | ||
116 | #undef info | ||
117 | #define info(format, arg...) printk(KERN_INFO "%s: " format "\n", THIS_MODULE ? THIS_MODULE->name : "gigaset_hw" , ## arg) | ||
118 | |||
119 | #undef notice | ||
120 | #define notice(format, arg...) printk(KERN_NOTICE "%s: " format "\n", THIS_MODULE ? THIS_MODULE->name : "gigaset_hw" , ## arg) | ||
121 | |||
122 | #undef warn | ||
123 | #define warn(format, arg...) printk(KERN_WARNING "%s: " format "\n", THIS_MODULE ? THIS_MODULE->name : "gigaset_hw" , ## arg) | ||
124 | |||
125 | #undef err | ||
126 | #define err(format, arg...) printk(KERN_ERR "%s: " format "\n", THIS_MODULE ? THIS_MODULE->name : "gigaset_hw" , ## arg) | ||
127 | |||
128 | #undef dbg | ||
129 | #ifdef CONFIG_GIGASET_DEBUG | ||
130 | #define dbg(level, format, arg...) do { if (unlikely(((enum debuglevel)gigaset_debuglevel) & (level))) \ | ||
131 | printk(KERN_DEBUG "%s: " format "\n", THIS_MODULE ? THIS_MODULE->name : "gigaset_hw" , ## arg); } while (0) | ||
132 | #else | ||
133 | #define dbg(level, format, arg...) do {} while (0) | ||
134 | #endif | ||
135 | |||
136 | void gigaset_dbg_buffer(enum debuglevel level, const unsigned char *msg, | ||
137 | size_t len, const unsigned char *buf, int from_user); | ||
138 | |||
139 | /* connection state */ | ||
140 | #define ZSAU_NONE 0 | ||
141 | #define ZSAU_DISCONNECT_IND 4 | ||
142 | #define ZSAU_OUTGOING_CALL_PROCEEDING 1 | ||
143 | #define ZSAU_PROCEEDING 1 | ||
144 | #define ZSAU_CALL_DELIVERED 2 | ||
145 | #define ZSAU_ACTIVE 3 | ||
146 | #define ZSAU_NULL 5 | ||
147 | #define ZSAU_DISCONNECT_REQ 6 | ||
148 | #define ZSAU_UNKNOWN -1 | ||
149 | |||
150 | /* USB control transfer requests */ | ||
151 | #define OUT_VENDOR_REQ (USB_DIR_OUT | USB_TYPE_VENDOR | USB_RECIP_ENDPOINT) | ||
152 | #define IN_VENDOR_REQ (USB_DIR_IN | USB_TYPE_VENDOR | USB_RECIP_ENDPOINT) | ||
153 | |||
154 | /* int-in-events 3070 */ | ||
155 | #define HD_B1_FLOW_CONTROL 0x80 | ||
156 | #define HD_B2_FLOW_CONTROL 0x81 | ||
157 | #define HD_RECEIVEATDATA_ACK (0x35) // 3070 // att: HD_RECEIVE>>AT<<DATA_ACK | ||
158 | #define HD_READY_SEND_ATDATA (0x36) // 3070 | ||
159 | #define HD_OPEN_ATCHANNEL_ACK (0x37) // 3070 | ||
160 | #define HD_CLOSE_ATCHANNEL_ACK (0x38) // 3070 | ||
161 | #define HD_DEVICE_INIT_OK (0x11) // ISurf USB + 3070 | ||
162 | #define HD_OPEN_B1CHANNEL_ACK (0x51) // ISurf USB + 3070 | ||
163 | #define HD_OPEN_B2CHANNEL_ACK (0x52) // ISurf USB + 3070 | ||
164 | #define HD_CLOSE_B1CHANNEL_ACK (0x53) // ISurf USB + 3070 | ||
165 | #define HD_CLOSE_B2CHANNEL_ACK (0x54) // ISurf USB + 3070 | ||
166 | // Powermangment | ||
167 | #define HD_SUSPEND_END (0x61) // ISurf USB | ||
168 | // Configuration | ||
169 | #define HD_RESET_INTERRUPT_PIPE_ACK (0xFF) // ISurf USB + 3070 | ||
170 | |||
171 | /* control requests 3070 */ | ||
172 | #define HD_OPEN_B1CHANNEL (0x23) // ISurf USB + 3070 | ||
173 | #define HD_CLOSE_B1CHANNEL (0x24) // ISurf USB + 3070 | ||
174 | #define HD_OPEN_B2CHANNEL (0x25) // ISurf USB + 3070 | ||
175 | #define HD_CLOSE_B2CHANNEL (0x26) // ISurf USB + 3070 | ||
176 | #define HD_RESET_INTERRUPT_PIPE (0x27) // ISurf USB + 3070 | ||
177 | #define HD_DEVICE_INIT_ACK (0x34) // ISurf USB + 3070 | ||
178 | #define HD_WRITE_ATMESSAGE (0x12) // 3070 | ||
179 | #define HD_READ_ATMESSAGE (0x13) // 3070 | ||
180 | #define HD_OPEN_ATCHANNEL (0x28) // 3070 | ||
181 | #define HD_CLOSE_ATCHANNEL (0x29) // 3070 | ||
182 | |||
183 | /* USB frames for isochronous transfer */ | ||
184 | #define BAS_FRAMETIME 1 /* number of milliseconds between frames */ | ||
185 | #define BAS_NUMFRAMES 8 /* number of frames per URB */ | ||
186 | #define BAS_MAXFRAME 16 /* allocated bytes per frame */ | ||
187 | #define BAS_NORMFRAME 8 /* send size without flow control */ | ||
188 | #define BAS_HIGHFRAME 10 /* " " with positive flow control */ | ||
189 | #define BAS_LOWFRAME 5 /* " " with negative flow control */ | ||
190 | #define BAS_CORRFRAMES 4 /* flow control multiplicator */ | ||
191 | |||
192 | #define BAS_INBUFSIZE (BAS_MAXFRAME * BAS_NUMFRAMES) /* size of isochronous input buffer per URB */ | ||
193 | #define BAS_OUTBUFSIZE 4096 /* size of common isochronous output buffer */ | ||
194 | #define BAS_OUTBUFPAD BAS_MAXFRAME /* size of pad area for isochronous output buffer */ | ||
195 | |||
196 | #define BAS_INURBS 3 | ||
197 | #define BAS_OUTURBS 3 | ||
198 | |||
199 | /* variable commands in struct bc_state */ | ||
200 | #define AT_ISO 0 | ||
201 | #define AT_DIAL 1 | ||
202 | #define AT_MSN 2 | ||
203 | #define AT_BC 3 | ||
204 | #define AT_PROTO 4 | ||
205 | #define AT_TYPE 5 | ||
206 | #define AT_HLC 6 | ||
207 | #define AT_NUM 7 | ||
208 | |||
209 | /* variables in struct at_state_t */ | ||
210 | #define VAR_ZSAU 0 | ||
211 | #define VAR_ZDLE 1 | ||
212 | #define VAR_ZVLS 2 | ||
213 | #define VAR_ZCTP 3 | ||
214 | #define VAR_NUM 4 | ||
215 | |||
216 | #define STR_NMBR 0 | ||
217 | #define STR_ZCPN 1 | ||
218 | #define STR_ZCON 2 | ||
219 | #define STR_ZBC 3 | ||
220 | #define STR_ZHLC 4 | ||
221 | #define STR_NUM 5 | ||
222 | |||
223 | #define EV_TIMEOUT -105 | ||
224 | #define EV_IF_VER -106 | ||
225 | #define EV_PROC_CIDMODE -107 | ||
226 | #define EV_SHUTDOWN -108 | ||
227 | #define EV_START -110 | ||
228 | #define EV_STOP -111 | ||
229 | #define EV_IF_LOCK -112 | ||
230 | #define EV_PROTO_L2 -113 | ||
231 | #define EV_ACCEPT -114 | ||
232 | #define EV_DIAL -115 | ||
233 | #define EV_HUP -116 | ||
234 | #define EV_BC_OPEN -117 | ||
235 | #define EV_BC_CLOSED -118 | ||
236 | |||
237 | /* input state */ | ||
238 | #define INS_command 0x0001 | ||
239 | #define INS_DLE_char 0x0002 | ||
240 | #define INS_byte_stuff 0x0004 | ||
241 | #define INS_have_data 0x0008 | ||
242 | #define INS_skip_frame 0x0010 | ||
243 | #define INS_DLE_command 0x0020 | ||
244 | #define INS_flag_hunt 0x0040 | ||
245 | |||
246 | /* channel state */ | ||
247 | #define CHS_D_UP 0x01 | ||
248 | #define CHS_B_UP 0x02 | ||
249 | #define CHS_NOTIFY_LL 0x04 | ||
250 | |||
251 | #define ICALL_REJECT 0 | ||
252 | #define ICALL_ACCEPT 1 | ||
253 | #define ICALL_IGNORE 2 | ||
254 | |||
255 | /* device state */ | ||
256 | #define MS_UNINITIALIZED 0 | ||
257 | #define MS_INIT 1 | ||
258 | #define MS_LOCKED 2 | ||
259 | #define MS_SHUTDOWN 3 | ||
260 | #define MS_RECOVER 4 | ||
261 | #define MS_READY 5 | ||
262 | |||
263 | /* mode */ | ||
264 | #define M_UNKNOWN 0 | ||
265 | #define M_CONFIG 1 | ||
266 | #define M_UNIMODEM 2 | ||
267 | #define M_CID 3 | ||
268 | |||
269 | /* start mode */ | ||
270 | #define SM_LOCKED 0 | ||
271 | #define SM_ISDN 1 /* default */ | ||
272 | |||
273 | struct gigaset_ops; | ||
274 | struct gigaset_driver; | ||
275 | |||
276 | struct usb_cardstate; | ||
277 | struct ser_cardstate; | ||
278 | struct bas_cardstate; | ||
279 | |||
280 | struct bc_state; | ||
281 | struct usb_bc_state; | ||
282 | struct ser_bc_state; | ||
283 | struct bas_bc_state; | ||
284 | |||
285 | struct reply_t { | ||
286 | int resp_code; /* RSP_XXXX */ | ||
287 | int min_ConState; /* <0 => ignore */ | ||
288 | int max_ConState; /* <0 => ignore */ | ||
289 | int parameter; /* e.g. ZSAU_XXXX <0: ignore*/ | ||
290 | int new_ConState; /* <0 => ignore */ | ||
291 | int timeout; /* >0 => *HZ; <=0 => TOUT_XXXX*/ | ||
292 | int action[MAXACT]; /* ACT_XXXX */ | ||
293 | char *command; /* NULL==none */ | ||
294 | }; | ||
295 | |||
296 | extern struct reply_t gigaset_tab_cid_m10x[]; | ||
297 | extern struct reply_t gigaset_tab_nocid_m10x[]; | ||
298 | |||
299 | struct inbuf_t { | ||
300 | unsigned char *rcvbuf; /* usb-gigaset receive buffer */ | ||
301 | struct bc_state *bcs; | ||
302 | struct cardstate *cs; | ||
303 | int inputstate; | ||
304 | |||
305 | atomic_t head, tail; | ||
306 | unsigned char data[RBUFSIZE]; | ||
307 | }; | ||
308 | |||
309 | /* isochronous write buffer structure | ||
310 | * circular buffer with pad area for extraction of complete USB frames | ||
311 | * - data[read..nextread-1] is valid data already submitted to the USB subsystem | ||
312 | * - data[nextread..write-1] is valid data yet to be sent | ||
313 | * - data[write] is the next byte to write to | ||
314 | * - in byte-oriented L2 procotols, it is completely free | ||
315 | * - in bit-oriented L2 procotols, it may contain a partial byte of valid data | ||
316 | * - data[write+1..read-1] is free | ||
317 | * - wbits is the number of valid data bits in data[write], starting at the LSB | ||
318 | * - writesem is the semaphore for writing to the buffer: | ||
319 | * if writesem <= 0, data[write..read-1] is currently being written to | ||
320 | * - idle contains the byte value to repeat when the end of valid data is | ||
321 | * reached; if nextread==write (buffer contains no data to send), either the | ||
322 | * BAS_OUTBUFPAD bytes immediately before data[write] (if write>=BAS_OUTBUFPAD) | ||
323 | * or those of the pad area (if write<BAS_OUTBUFPAD) are also filled with that | ||
324 | * value | ||
325 | * - optionally, the following statistics on the buffer's usage can be collected: | ||
326 | * maxfill: maximum number of bytes occupied | ||
327 | * idlefills: number of times a frame of idle bytes is prepared | ||
328 | * emptygets: number of times the buffer was empty when a data frame was requested | ||
329 | * backtoback: number of times two data packets were entered into the buffer | ||
330 | * without intervening idle flags | ||
331 | * nakedback: set if no idle flags have been inserted since the last data packet | ||
332 | */ | ||
333 | struct isowbuf_t { | ||
334 | atomic_t read; | ||
335 | atomic_t nextread; | ||
336 | atomic_t write; | ||
337 | atomic_t writesem; | ||
338 | int wbits; | ||
339 | unsigned char data[BAS_OUTBUFSIZE + BAS_OUTBUFPAD]; | ||
340 | unsigned char idle; | ||
341 | }; | ||
342 | |||
343 | /* isochronous write URB context structure | ||
344 | * data to be stored along with the URB and retrieved when it is returned | ||
345 | * as completed by the USB subsystem | ||
346 | * - urb: pointer to the URB itself | ||
347 | * - bcs: pointer to the B Channel control structure | ||
348 | * - limit: end of write buffer area covered by this URB | ||
349 | */ | ||
350 | struct isow_urbctx_t { | ||
351 | struct urb *urb; | ||
352 | struct bc_state *bcs; | ||
353 | int limit; | ||
354 | }; | ||
355 | |||
356 | /* AT state structure | ||
357 | * data associated with the state of an ISDN connection, whether or not | ||
358 | * it is currently assigned a B channel | ||
359 | */ | ||
360 | struct at_state_t { | ||
361 | struct list_head list; | ||
362 | int waiting; | ||
363 | int getstring; | ||
364 | atomic_t timer_index; | ||
365 | unsigned long timer_expires; | ||
366 | int timer_active; | ||
367 | unsigned int ConState; /* State of connection */ | ||
368 | struct reply_t *replystruct; | ||
369 | int cid; | ||
370 | int int_var[VAR_NUM]; /* see VAR_XXXX */ | ||
371 | char *str_var[STR_NUM]; /* see STR_XXXX */ | ||
372 | unsigned pending_commands; /* see PC_XXXX */ | ||
373 | atomic_t seq_index; | ||
374 | |||
375 | struct cardstate *cs; | ||
376 | struct bc_state *bcs; | ||
377 | }; | ||
378 | |||
379 | struct resp_type_t { | ||
380 | unsigned char *response; | ||
381 | int resp_code; /* RSP_XXXX */ | ||
382 | int type; /* RT_XXXX */ | ||
383 | }; | ||
384 | |||
385 | struct prot_skb { | ||
386 | atomic_t empty; | ||
387 | struct semaphore *sem; | ||
388 | struct sk_buff *skb; | ||
389 | }; | ||
390 | |||
391 | struct event_t { | ||
392 | int type; | ||
393 | void *ptr, *arg; | ||
394 | int parameter; | ||
395 | int cid; | ||
396 | struct at_state_t *at_state; | ||
397 | }; | ||
398 | |||
399 | /* This buffer holds all information about the used B-Channel */ | ||
400 | struct bc_state { | ||
401 | struct sk_buff *tx_skb; /* Current transfer buffer to modem */ | ||
402 | struct sk_buff_head squeue; /* B-Channel send Queue */ | ||
403 | |||
404 | /* Variables for debugging .. */ | ||
405 | int corrupted; /* Counter for corrupted packages */ | ||
406 | int trans_down; /* Counter of packages (downstream) */ | ||
407 | int trans_up; /* Counter of packages (upstream) */ | ||
408 | |||
409 | struct at_state_t at_state; | ||
410 | unsigned long rcvbytes; | ||
411 | |||
412 | __u16 fcs; | ||
413 | struct sk_buff *skb; | ||
414 | int inputstate; /* see INS_XXXX */ | ||
415 | |||
416 | int channel; | ||
417 | |||
418 | struct cardstate *cs; | ||
419 | |||
420 | unsigned chstate; /* bitmap (CHS_*) */ | ||
421 | int ignore; | ||
422 | unsigned proto2; /* Layer 2 protocol (ISDN_PROTO_L2_*) */ | ||
423 | char *commands[AT_NUM]; /* see AT_XXXX */ | ||
424 | |||
425 | #ifdef CONFIG_GIGASET_DEBUG | ||
426 | int emptycount; | ||
427 | #endif | ||
428 | int busy; | ||
429 | int use_count; | ||
430 | |||
431 | /* hardware drivers */ | ||
432 | union { | ||
433 | struct ser_bc_state *ser; /* private data of serial hardware driver */ | ||
434 | struct usb_bc_state *usb; /* private data of usb hardware driver */ | ||
435 | struct bas_bc_state *bas; | ||
436 | } hw; | ||
437 | }; | ||
438 | |||
439 | struct cardstate { | ||
440 | struct gigaset_driver *driver; | ||
441 | unsigned minor_index; | ||
442 | |||
443 | const struct gigaset_ops *ops; | ||
444 | |||
445 | /* Stuff to handle communication */ | ||
446 | //wait_queue_head_t initwait; | ||
447 | wait_queue_head_t waitqueue; | ||
448 | int waiting; | ||
449 | atomic_t mode; /* see M_XXXX */ | ||
450 | atomic_t mstate; /* Modem state: see MS_XXXX */ | ||
451 | /* only changed by the event layer */ | ||
452 | int cmd_result; | ||
453 | |||
454 | int channels; | ||
455 | struct bc_state *bcs; /* Array of struct bc_state */ | ||
456 | |||
457 | int onechannel; /* data and commands transmitted in one stream (M10x) */ | ||
458 | |||
459 | spinlock_t lock; | ||
460 | struct at_state_t at_state; /* at_state_t for cid == 0 */ | ||
461 | struct list_head temp_at_states; /* list of temporary "struct at_state_t"s without B channel */ | ||
462 | |||
463 | struct inbuf_t *inbuf; | ||
464 | |||
465 | struct cmdbuf_t *cmdbuf, *lastcmdbuf; | ||
466 | spinlock_t cmdlock; | ||
467 | unsigned curlen, cmdbytes; | ||
468 | |||
469 | unsigned open_count; | ||
470 | struct tty_struct *tty; | ||
471 | struct tasklet_struct if_wake_tasklet; | ||
472 | unsigned control_state; | ||
473 | |||
474 | unsigned fwver[4]; | ||
475 | int gotfwver; | ||
476 | |||
477 | atomic_t running; /* !=0 if events are handled */ | ||
478 | atomic_t connected; /* !=0 if hardware is connected */ | ||
479 | |||
480 | atomic_t cidmode; | ||
481 | |||
482 | int myid; /* id for communication with LL */ | ||
483 | isdn_if iif; | ||
484 | |||
485 | struct reply_t *tabnocid; | ||
486 | struct reply_t *tabcid; | ||
487 | int cs_init; | ||
488 | int ignoreframes; /* frames to ignore after setting up the B channel */ | ||
489 | struct semaphore sem; /* locks this structure: */ | ||
490 | /* connected is not changed, */ | ||
491 | /* hardware_up is not changed, */ | ||
492 | /* MState is not changed to or from MS_LOCKED */ | ||
493 | |||
494 | struct timer_list timer; | ||
495 | int retry_count; | ||
496 | int dle; /* !=0 if modem commands/responses are dle encoded */ | ||
497 | int cur_at_seq; /* sequence of AT commands being processed */ | ||
498 | int curchannel; /* channel, those commands are meant for */ | ||
499 | atomic_t commands_pending; /* flag(s) in xxx.commands_pending have been set */ | ||
500 | struct tasklet_struct event_tasklet; /* tasklet for serializing AT commands. Scheduled | ||
501 | * -> for modem reponses (and incomming data for M10x) | ||
502 | * -> on timeout | ||
503 | * -> after setting bits in xxx.at_state.pending_command | ||
504 | * (e.g. command from LL) */ | ||
505 | struct tasklet_struct write_tasklet; /* tasklet for serial output | ||
506 | * (not used in base driver) */ | ||
507 | |||
508 | /* event queue */ | ||
509 | struct event_t events[MAX_EVENTS]; | ||
510 | atomic_t ev_tail, ev_head; | ||
511 | spinlock_t ev_lock; | ||
512 | |||
513 | /* current modem response */ | ||
514 | unsigned char respdata[MAX_RESP_SIZE]; | ||
515 | unsigned cbytes; | ||
516 | |||
517 | /* hardware drivers */ | ||
518 | union { | ||
519 | struct usb_cardstate *usb; /* private data of USB hardware driver */ | ||
520 | struct ser_cardstate *ser; /* private data of serial hardware driver */ | ||
521 | struct bas_cardstate *bas; /* private data of base hardware driver */ | ||
522 | } hw; | ||
523 | }; | ||
524 | |||
525 | struct gigaset_driver { | ||
526 | struct list_head list; | ||
527 | spinlock_t lock; /* locks minor tables and blocked */ | ||
528 | //struct semaphore sem; /* locks this structure */ | ||
529 | struct tty_driver *tty; | ||
530 | unsigned have_tty; | ||
531 | unsigned minor; | ||
532 | unsigned minors; | ||
533 | struct cardstate *cs; | ||
534 | unsigned *flags; | ||
535 | int blocked; | ||
536 | |||
537 | const struct gigaset_ops *ops; | ||
538 | struct module *owner; | ||
539 | }; | ||
540 | |||
541 | struct cmdbuf_t { | ||
542 | struct cmdbuf_t *next, *prev; | ||
543 | int len, offset; | ||
544 | struct tasklet_struct *wake_tasklet; | ||
545 | unsigned char buf[0]; | ||
546 | }; | ||
547 | |||
548 | struct bas_bc_state { | ||
549 | /* isochronous output state */ | ||
550 | atomic_t running; | ||
551 | atomic_t corrbytes; | ||
552 | spinlock_t isooutlock; | ||
553 | struct isow_urbctx_t isoouturbs[BAS_OUTURBS]; | ||
554 | struct isow_urbctx_t *isooutdone, *isooutfree, *isooutovfl; | ||
555 | struct isowbuf_t *isooutbuf; | ||
556 | unsigned numsub; /* submitted URB counter (for diagnostic messages only) */ | ||
557 | struct tasklet_struct sent_tasklet; | ||
558 | |||
559 | /* isochronous input state */ | ||
560 | spinlock_t isoinlock; | ||
561 | struct urb *isoinurbs[BAS_INURBS]; | ||
562 | unsigned char isoinbuf[BAS_INBUFSIZE * BAS_INURBS]; | ||
563 | struct urb *isoindone; /* completed isoc read URB */ | ||
564 | int loststatus; /* status of dropped URB */ | ||
565 | unsigned isoinlost; /* number of bytes lost */ | ||
566 | /* state of bit unstuffing algorithm (in addition to BC_state.inputstate) */ | ||
567 | unsigned seqlen; /* number of '1' bits not yet unstuffed */ | ||
568 | unsigned inbyte, inbits; /* collected bits for next byte */ | ||
569 | /* statistics */ | ||
570 | unsigned goodbytes; /* bytes correctly received */ | ||
571 | unsigned alignerrs; /* frames with incomplete byte at end */ | ||
572 | unsigned fcserrs; /* FCS errors */ | ||
573 | unsigned frameerrs; /* framing errors */ | ||
574 | unsigned giants; /* long frames */ | ||
575 | unsigned runts; /* short frames */ | ||
576 | unsigned aborts; /* HDLC aborts */ | ||
577 | unsigned shared0s; /* '0' bits shared between flags */ | ||
578 | unsigned stolen0s; /* '0' stuff bits also serving as leading flag bits */ | ||
579 | struct tasklet_struct rcvd_tasklet; | ||
580 | }; | ||
581 | |||
582 | struct gigaset_ops { | ||
583 | /* Called from ev-layer.c/interface.c for sending AT commands to the device */ | ||
584 | int (*write_cmd)(struct cardstate *cs, | ||
585 | const unsigned char *buf, int len, | ||
586 | struct tasklet_struct *wake_tasklet); | ||
587 | |||
588 | /* Called from interface.c for additional device control */ | ||
589 | int (*write_room)(struct cardstate *cs); | ||
590 | int (*chars_in_buffer)(struct cardstate *cs); | ||
591 | int (*brkchars)(struct cardstate *cs, const unsigned char buf[6]); | ||
592 | |||
593 | /* Called from ev-layer.c after setting up connection | ||
594 | * Should call gigaset_bchannel_up(), when finished. */ | ||
595 | int (*init_bchannel)(struct bc_state *bcs); | ||
596 | |||
597 | /* Called from ev-layer.c after hanging up | ||
598 | * Should call gigaset_bchannel_down(), when finished. */ | ||
599 | int (*close_bchannel)(struct bc_state *bcs); | ||
600 | |||
601 | /* Called by gigaset_initcs() for setting up bcs->hw.xxx */ | ||
602 | int (*initbcshw)(struct bc_state *bcs); | ||
603 | |||
604 | /* Called by gigaset_freecs() for freeing bcs->hw.xxx */ | ||
605 | int (*freebcshw)(struct bc_state *bcs); | ||
606 | |||
607 | /* Called by gigaset_stop() or gigaset_bchannel_down() for resetting bcs->hw.xxx */ | ||
608 | void (*reinitbcshw)(struct bc_state *bcs); | ||
609 | |||
610 | /* Called by gigaset_initcs() for setting up cs->hw.xxx */ | ||
611 | int (*initcshw)(struct cardstate *cs); | ||
612 | |||
613 | /* Called by gigaset_freecs() for freeing cs->hw.xxx */ | ||
614 | void (*freecshw)(struct cardstate *cs); | ||
615 | |||
616 | ///* Called by gigaset_stop() for killing URBs, shutting down the device, ... | ||
617 | // hardwareup: ==0: don't try to shut down the device, hardware is really not accessible | ||
618 | // !=0: hardware still up */ | ||
619 | //void (*stophw)(struct cardstate *cs, int hardwareup); | ||
620 | |||
621 | /* Called from common.c/interface.c for additional serial port control */ | ||
622 | int (*set_modem_ctrl)(struct cardstate *cs, unsigned old_state, unsigned new_state); | ||
623 | int (*baud_rate)(struct cardstate *cs, unsigned cflag); | ||
624 | int (*set_line_ctrl)(struct cardstate *cs, unsigned cflag); | ||
625 | |||
626 | /* Called from i4l.c to put an skb into the send-queue. */ | ||
627 | int (*send_skb)(struct bc_state *bcs, struct sk_buff *skb); | ||
628 | |||
629 | /* Called from ev-layer.c to process a block of data | ||
630 | * received through the common/control channel. */ | ||
631 | void (*handle_input)(struct inbuf_t *inbuf); | ||
632 | |||
633 | }; | ||
634 | |||
635 | /* = Common structures and definitions ======================================= */ | ||
636 | |||
637 | /* Parser states for DLE-Event: | ||
638 | * <DLE-EVENT>: <DLE_FLAG> "X" <EVENT> <DLE_FLAG> "." | ||
639 | * <DLE_FLAG>: 0x10 | ||
640 | * <EVENT>: ((a-z)* | (A-Z)* | (0-10)*)+ | ||
641 | */ | ||
642 | #define DLE_FLAG 0x10 | ||
643 | |||
644 | /* =========================================================================== | ||
645 | * Functions implemented in asyncdata.c | ||
646 | */ | ||
647 | |||
648 | /* Called from i4l.c to put an skb into the send-queue. | ||
649 | * After sending gigaset_skb_sent() should be called. */ | ||
650 | int gigaset_m10x_send_skb(struct bc_state *bcs, struct sk_buff *skb); | ||
651 | |||
652 | /* Called from ev-layer.c to process a block of data | ||
653 | * received through the common/control channel. */ | ||
654 | void gigaset_m10x_input(struct inbuf_t *inbuf); | ||
655 | |||
656 | /* =========================================================================== | ||
657 | * Functions implemented in isocdata.c | ||
658 | */ | ||
659 | |||
660 | /* Called from i4l.c to put an skb into the send-queue. | ||
661 | * After sending gigaset_skb_sent() should be called. */ | ||
662 | int gigaset_isoc_send_skb(struct bc_state *bcs, struct sk_buff *skb); | ||
663 | |||
664 | /* Called from ev-layer.c to process a block of data | ||
665 | * received through the common/control channel. */ | ||
666 | void gigaset_isoc_input(struct inbuf_t *inbuf); | ||
667 | |||
668 | /* Called from bas-gigaset.c to process a block of data | ||
669 | * received through the isochronous channel */ | ||
670 | void gigaset_isoc_receive(unsigned char *src, unsigned count, struct bc_state *bcs); | ||
671 | |||
672 | /* Called from bas-gigaset.c to put a block of data | ||
673 | * into the isochronous output buffer */ | ||
674 | int gigaset_isoc_buildframe(struct bc_state *bcs, unsigned char *in, int len); | ||
675 | |||
676 | /* Called from bas-gigaset.c to initialize the isochronous output buffer */ | ||
677 | void gigaset_isowbuf_init(struct isowbuf_t *iwb, unsigned char idle); | ||
678 | |||
679 | /* Called from bas-gigaset.c to retrieve a block of bytes for sending */ | ||
680 | int gigaset_isowbuf_getbytes(struct isowbuf_t *iwb, int size); | ||
681 | |||
682 | /* =========================================================================== | ||
683 | * Functions implemented in i4l.c/gigaset.h | ||
684 | */ | ||
685 | |||
686 | /* Called by gigaset_initcs() for setting up with the isdn4linux subsystem */ | ||
687 | int gigaset_register_to_LL(struct cardstate *cs, const char *isdnid); | ||
688 | |||
689 | /* Called from xxx-gigaset.c to indicate completion of sending an skb */ | ||
690 | void gigaset_skb_sent(struct bc_state *bcs, struct sk_buff *skb); | ||
691 | |||
692 | /* Called from common.c/ev-layer.c to indicate events relevant to the LL */ | ||
693 | int gigaset_isdn_icall(struct at_state_t *at_state); | ||
694 | int gigaset_isdn_setup_accept(struct at_state_t *at_state); | ||
695 | int gigaset_isdn_setup_dial(struct at_state_t *at_state, void *data); | ||
696 | |||
697 | void gigaset_i4l_cmd(struct cardstate *cs, int cmd); | ||
698 | void gigaset_i4l_channel_cmd(struct bc_state *bcs, int cmd); | ||
699 | |||
700 | |||
701 | static inline void gigaset_isdn_rcv_err(struct bc_state *bcs) | ||
702 | { | ||
703 | isdn_ctrl response; | ||
704 | |||
705 | /* error -> LL */ | ||
706 | dbg(DEBUG_CMD, "sending L1ERR"); | ||
707 | response.driver = bcs->cs->myid; | ||
708 | response.command = ISDN_STAT_L1ERR; | ||
709 | response.arg = bcs->channel; | ||
710 | response.parm.errcode = ISDN_STAT_L1ERR_RECV; | ||
711 | bcs->cs->iif.statcallb(&response); | ||
712 | } | ||
713 | |||
714 | /* =========================================================================== | ||
715 | * Functions implemented in ev-layer.c | ||
716 | */ | ||
717 | |||
718 | /* tasklet called from common.c to process queued events */ | ||
719 | void gigaset_handle_event(unsigned long data); | ||
720 | |||
721 | /* called from isocdata.c / asyncdata.c | ||
722 | * when a complete modem response line has been received */ | ||
723 | void gigaset_handle_modem_response(struct cardstate *cs); | ||
724 | |||
725 | /* =========================================================================== | ||
726 | * Functions implemented in proc.c | ||
727 | */ | ||
728 | |||
729 | /* initialize sysfs for device */ | ||
730 | void gigaset_init_dev_sysfs(struct usb_interface *interface); | ||
731 | void gigaset_free_dev_sysfs(struct usb_interface *interface); | ||
732 | |||
733 | /* =========================================================================== | ||
734 | * Functions implemented in common.c/gigaset.h | ||
735 | */ | ||
736 | |||
737 | void gigaset_bcs_reinit(struct bc_state *bcs); | ||
738 | void gigaset_at_init(struct at_state_t *at_state, struct bc_state *bcs, | ||
739 | struct cardstate *cs, int cid); | ||
740 | int gigaset_get_channel(struct bc_state *bcs); | ||
741 | void gigaset_free_channel(struct bc_state *bcs); | ||
742 | int gigaset_get_channels(struct cardstate *cs); | ||
743 | void gigaset_free_channels(struct cardstate *cs); | ||
744 | void gigaset_block_channels(struct cardstate *cs); | ||
745 | |||
746 | /* Allocate and initialize driver structure. */ | ||
747 | struct gigaset_driver *gigaset_initdriver(unsigned minor, unsigned minors, | ||
748 | const char *procname, | ||
749 | const char *devname, | ||
750 | const char *devfsname, | ||
751 | const struct gigaset_ops *ops, | ||
752 | struct module *owner); | ||
753 | |||
754 | /* Deallocate driver structure. */ | ||
755 | void gigaset_freedriver(struct gigaset_driver *drv); | ||
756 | void gigaset_debugdrivers(void); | ||
757 | struct cardstate *gigaset_get_cs_by_minor(unsigned minor); | ||
758 | struct cardstate *gigaset_get_cs_by_tty(struct tty_struct *tty); | ||
759 | struct cardstate *gigaset_get_cs_by_id(int id); | ||
760 | |||
761 | /* For drivers without fixed assignment device<->cardstate (usb) */ | ||
762 | struct cardstate *gigaset_getunassignedcs(struct gigaset_driver *drv); | ||
763 | void gigaset_unassign(struct cardstate *cs); | ||
764 | void gigaset_blockdriver(struct gigaset_driver *drv); | ||
765 | |||
766 | /* Allocate and initialize card state. Calls hardware dependent gigaset_init[b]cs(). */ | ||
767 | struct cardstate *gigaset_initcs(struct gigaset_driver *drv, int channels, | ||
768 | int onechannel, int ignoreframes, | ||
769 | int cidmode, const char *modulename); | ||
770 | |||
771 | /* Free card state. Calls hardware dependent gigaset_free[b]cs(). */ | ||
772 | void gigaset_freecs(struct cardstate *cs); | ||
773 | |||
774 | /* Tell common.c that hardware and driver are ready. */ | ||
775 | int gigaset_start(struct cardstate *cs); | ||
776 | |||
777 | /* Tell common.c that the device is not present any more. */ | ||
778 | void gigaset_stop(struct cardstate *cs); | ||
779 | |||
780 | /* Tell common.c that the driver is being unloaded. */ | ||
781 | void gigaset_shutdown(struct cardstate *cs); | ||
782 | |||
783 | /* Tell common.c that an skb has been sent. */ | ||
784 | void gigaset_skb_sent(struct bc_state *bcs, struct sk_buff *skb); | ||
785 | |||
786 | /* Append event to the queue. | ||
787 | * Returns NULL on failure or a pointer to the event on success. | ||
788 | * ptr must be kmalloc()ed (and not be freed by the caller). | ||
789 | */ | ||
790 | struct event_t *gigaset_add_event(struct cardstate *cs, | ||
791 | struct at_state_t *at_state, int type, | ||
792 | void *ptr, int parameter, void *arg); | ||
793 | |||
794 | /* Called on CONFIG1 command from frontend. */ | ||
795 | int gigaset_enterconfigmode(struct cardstate *cs); //0: success <0: errorcode | ||
796 | |||
797 | /* cs->lock must not be locked */ | ||
798 | static inline void gigaset_schedule_event(struct cardstate *cs) | ||
799 | { | ||
800 | unsigned long flags; | ||
801 | spin_lock_irqsave(&cs->lock, flags); | ||
802 | if (atomic_read(&cs->running)) | ||
803 | tasklet_schedule(&cs->event_tasklet); | ||
804 | spin_unlock_irqrestore(&cs->lock, flags); | ||
805 | } | ||
806 | |||
807 | /* Tell common.c that B channel has been closed. */ | ||
808 | /* cs->lock must not be locked */ | ||
809 | static inline void gigaset_bchannel_down(struct bc_state *bcs) | ||
810 | { | ||
811 | gigaset_add_event(bcs->cs, &bcs->at_state, EV_BC_CLOSED, NULL, 0, NULL); | ||
812 | |||
813 | dbg(DEBUG_CMD, "scheduling BC_CLOSED"); | ||
814 | gigaset_schedule_event(bcs->cs); | ||
815 | } | ||
816 | |||
817 | /* Tell common.c that B channel has been opened. */ | ||
818 | /* cs->lock must not be locked */ | ||
819 | static inline void gigaset_bchannel_up(struct bc_state *bcs) | ||
820 | { | ||
821 | gigaset_add_event(bcs->cs, &bcs->at_state, EV_BC_OPEN, NULL, 0, NULL); | ||
822 | |||
823 | dbg(DEBUG_CMD, "scheduling BC_OPEN"); | ||
824 | gigaset_schedule_event(bcs->cs); | ||
825 | } | ||
826 | |||
827 | /* handling routines for sk_buff */ | ||
828 | /* ============================= */ | ||
829 | |||
830 | /* private version of __skb_put() | ||
831 | * append 'len' bytes to the content of 'skb', already knowing that the | ||
832 | * existing buffer can accomodate them | ||
833 | * returns a pointer to the location where the new bytes should be copied to | ||
834 | * This function does not take any locks so it must be called with the | ||
835 | * appropriate locks held only. | ||
836 | */ | ||
837 | static inline unsigned char *gigaset_skb_put_quick(struct sk_buff *skb, | ||
838 | unsigned int len) | ||
839 | { | ||
840 | unsigned char *tmp = skb->tail; | ||
841 | /*SKB_LINEAR_ASSERT(skb);*/ /* not needed here */ | ||
842 | skb->tail += len; | ||
843 | skb->len += len; | ||
844 | return tmp; | ||
845 | } | ||
846 | |||
847 | /* pass received skb to LL | ||
848 | * Warning: skb must not be accessed anymore! | ||
849 | */ | ||
850 | static inline void gigaset_rcv_skb(struct sk_buff *skb, | ||
851 | struct cardstate *cs, | ||
852 | struct bc_state *bcs) | ||
853 | { | ||
854 | cs->iif.rcvcallb_skb(cs->myid, bcs->channel, skb); | ||
855 | bcs->trans_down++; | ||
856 | } | ||
857 | |||
858 | /* handle reception of corrupted skb | ||
859 | * Warning: skb must not be accessed anymore! | ||
860 | */ | ||
861 | static inline void gigaset_rcv_error(struct sk_buff *procskb, | ||
862 | struct cardstate *cs, | ||
863 | struct bc_state *bcs) | ||
864 | { | ||
865 | if (procskb) | ||
866 | dev_kfree_skb(procskb); | ||
867 | |||
868 | if (bcs->ignore) | ||
869 | --bcs->ignore; | ||
870 | else { | ||
871 | ++bcs->corrupted; | ||
872 | gigaset_isdn_rcv_err(bcs); | ||
873 | } | ||
874 | } | ||
875 | |||
876 | |||
877 | /* bitwise byte inversion table */ | ||
878 | extern __u8 gigaset_invtab[]; /* in common.c */ | ||
879 | |||
880 | |||
881 | /* append received bytes to inbuf */ | ||
882 | static inline int gigaset_fill_inbuf(struct inbuf_t *inbuf, | ||
883 | const unsigned char *src, | ||
884 | unsigned numbytes) | ||
885 | { | ||
886 | unsigned n, head, tail, bytesleft; | ||
887 | |||
888 | dbg(DEBUG_INTR, "received %u bytes", numbytes); | ||
889 | |||
890 | if (!numbytes) | ||
891 | return 0; | ||
892 | |||
893 | bytesleft = numbytes; | ||
894 | tail = atomic_read(&inbuf->tail); | ||
895 | head = atomic_read(&inbuf->head); | ||
896 | dbg(DEBUG_INTR, "buffer state: %u -> %u", head, tail); | ||
897 | |||
898 | while (bytesleft) { | ||
899 | if (head > tail) | ||
900 | n = head - 1 - tail; | ||
901 | else if (head == 0) | ||
902 | n = (RBUFSIZE-1) - tail; | ||
903 | else | ||
904 | n = RBUFSIZE - tail; | ||
905 | if (!n) { | ||
906 | err("buffer overflow (%u bytes lost)", bytesleft); | ||
907 | break; | ||
908 | } | ||
909 | if (n > bytesleft) | ||
910 | n = bytesleft; | ||
911 | memcpy(inbuf->data + tail, src, n); | ||
912 | bytesleft -= n; | ||
913 | tail = (tail + n) % RBUFSIZE; | ||
914 | src += n; | ||
915 | } | ||
916 | dbg(DEBUG_INTR, "setting tail to %u", tail); | ||
917 | atomic_set(&inbuf->tail, tail); | ||
918 | return numbytes != bytesleft; | ||
919 | } | ||
920 | |||
921 | /* =========================================================================== | ||
922 | * Functions implemented in interface.c | ||
923 | */ | ||
924 | |||
925 | /* initialize interface */ | ||
926 | void gigaset_if_initdriver(struct gigaset_driver *drv, const char *procname, | ||
927 | const char *devname, const char *devfsname); | ||
928 | /* release interface */ | ||
929 | void gigaset_if_freedriver(struct gigaset_driver *drv); | ||
930 | /* add minor */ | ||
931 | void gigaset_if_init(struct cardstate *cs); | ||
932 | /* remove minor */ | ||
933 | void gigaset_if_free(struct cardstate *cs); | ||
934 | /* device received data */ | ||
935 | void gigaset_if_receive(struct cardstate *cs, | ||
936 | unsigned char *buffer, size_t len); | ||
937 | |||
938 | #endif | ||
diff --git a/drivers/isdn/gigaset/i4l.c b/drivers/isdn/gigaset/i4l.c new file mode 100644 index 000000000000..731a675f21b0 --- /dev/null +++ b/drivers/isdn/gigaset/i4l.c | |||
@@ -0,0 +1,567 @@ | |||
1 | /* | ||
2 | * Stuff used by all variants of the driver | ||
3 | * | ||
4 | * Copyright (c) 2001 by Stefan Eilers (Eilers.Stefan@epost.de), | ||
5 | * Hansjoerg Lipp (hjlipp@web.de), | ||
6 | * Tilman Schmidt (tilman@imap.cc). | ||
7 | * | ||
8 | * ===================================================================== | ||
9 | * This program is free software; you can redistribute it and/or | ||
10 | * modify it under the terms of the GNU General Public License as | ||
11 | * published by the Free Software Foundation; either version 2 of | ||
12 | * the License, or (at your option) any later version. | ||
13 | * ===================================================================== | ||
14 | * ToDo: ... | ||
15 | * ===================================================================== | ||
16 | * Version: $Id: i4l.c,v 1.3.2.9 2006/02/04 18:28:16 hjlipp Exp $ | ||
17 | * ===================================================================== | ||
18 | */ | ||
19 | |||
20 | #include "gigaset.h" | ||
21 | |||
22 | /* == Handling of I4L IO ============================================================================*/ | ||
23 | |||
24 | /* writebuf_from_LL | ||
25 | * called by LL to transmit data on an open channel | ||
26 | * inserts the buffer data into the send queue and starts the transmission | ||
27 | * Note that this operation must not sleep! | ||
28 | * When the buffer is processed completely, gigaset_skb_sent() should be called. | ||
29 | * parameters: | ||
30 | * driverID driver ID as assigned by LL | ||
31 | * channel channel number | ||
32 | * ack if != 0 LL wants to be notified on completion via statcallb(ISDN_STAT_BSENT) | ||
33 | * skb skb containing data to send | ||
34 | * return value: | ||
35 | * number of accepted bytes | ||
36 | * 0 if temporarily unable to accept data (out of buffer space) | ||
37 | * <0 on error (eg. -EINVAL) | ||
38 | */ | ||
39 | static int writebuf_from_LL(int driverID, int channel, int ack, struct sk_buff *skb) | ||
40 | { | ||
41 | struct cardstate *cs; | ||
42 | struct bc_state *bcs; | ||
43 | unsigned len; | ||
44 | unsigned skblen; | ||
45 | |||
46 | if (!(cs = gigaset_get_cs_by_id(driverID))) { | ||
47 | err("%s: invalid driver ID (%d)", __func__, driverID); | ||
48 | return -ENODEV; | ||
49 | } | ||
50 | if (channel < 0 || channel >= cs->channels) { | ||
51 | err("%s: invalid channel ID (%d)", __func__, channel); | ||
52 | return -ENODEV; | ||
53 | } | ||
54 | bcs = &cs->bcs[channel]; | ||
55 | len = skb->len; | ||
56 | |||
57 | dbg(DEBUG_LLDATA, | ||
58 | "Receiving data from LL (id: %d, channel: %d, ack: %d, size: %d)", | ||
59 | driverID, channel, ack, len); | ||
60 | |||
61 | if (!len) { | ||
62 | if (ack) | ||
63 | warn("not ACKing empty packet from LL"); | ||
64 | return 0; | ||
65 | } | ||
66 | if (len > MAX_BUF_SIZE) { | ||
67 | err("%s: packet too large (%d bytes)", __func__, channel); | ||
68 | return -EINVAL; | ||
69 | } | ||
70 | |||
71 | if (!atomic_read(&cs->connected)) | ||
72 | return -ENODEV; | ||
73 | |||
74 | skblen = ack ? len : 0; | ||
75 | skb->head[0] = skblen & 0xff; | ||
76 | skb->head[1] = skblen >> 8; | ||
77 | dbg(DEBUG_MCMD, "skb: len=%u, skblen=%u: %02x %02x", len, skblen, | ||
78 | (unsigned) skb->head[0], (unsigned) skb->head[1]); | ||
79 | |||
80 | /* pass to device-specific module */ | ||
81 | return cs->ops->send_skb(bcs, skb); | ||
82 | } | ||
83 | |||
84 | void gigaset_skb_sent(struct bc_state *bcs, struct sk_buff *skb) | ||
85 | { | ||
86 | unsigned len; | ||
87 | isdn_ctrl response; | ||
88 | |||
89 | ++bcs->trans_up; | ||
90 | |||
91 | if (skb->len) | ||
92 | warn("%s: skb->len==%d", __func__, skb->len); | ||
93 | |||
94 | len = (unsigned char) skb->head[0] | | ||
95 | (unsigned) (unsigned char) skb->head[1] << 8; | ||
96 | if (len) { | ||
97 | dbg(DEBUG_MCMD, | ||
98 | "Acknowledge sending to LL (id: %d, channel: %d size: %u)", | ||
99 | bcs->cs->myid, bcs->channel, len); | ||
100 | |||
101 | response.driver = bcs->cs->myid; | ||
102 | response.command = ISDN_STAT_BSENT; | ||
103 | response.arg = bcs->channel; | ||
104 | response.parm.length = len; | ||
105 | bcs->cs->iif.statcallb(&response); | ||
106 | } | ||
107 | } | ||
108 | EXPORT_SYMBOL_GPL(gigaset_skb_sent); | ||
109 | |||
110 | /* This function will be called by LL to send commands | ||
111 | * NOTE: LL ignores the returned value, for commands other than ISDN_CMD_IOCTL, | ||
112 | * so don't put too much effort into it. | ||
113 | */ | ||
114 | static int command_from_LL(isdn_ctrl *cntrl) | ||
115 | { | ||
116 | struct cardstate *cs = gigaset_get_cs_by_id(cntrl->driver); | ||
117 | //isdn_ctrl response; | ||
118 | //unsigned long flags; | ||
119 | struct bc_state *bcs; | ||
120 | int retval = 0; | ||
121 | struct setup_parm *sp; | ||
122 | |||
123 | //dbg(DEBUG_ANY, "Gigaset_HW: Receiving command"); | ||
124 | gigaset_debugdrivers(); | ||
125 | |||
126 | /* Terminate this call if no device is present. Bt if the command is "ISDN_CMD_LOCK" or | ||
127 | * "ISDN_CMD_UNLOCK" then execute it due to the fact that they are device independent ! | ||
128 | */ | ||
129 | //FIXME "remove test for &connected" | ||
130 | if ((!cs || !atomic_read(&cs->connected))) { | ||
131 | warn("LL tried to access unknown device with nr. %d", | ||
132 | cntrl->driver); | ||
133 | return -ENODEV; | ||
134 | } | ||
135 | |||
136 | switch (cntrl->command) { | ||
137 | case ISDN_CMD_IOCTL: | ||
138 | |||
139 | dbg(DEBUG_ANY, "ISDN_CMD_IOCTL (driver:%d,arg: %ld)", | ||
140 | cntrl->driver, cntrl->arg); | ||
141 | |||
142 | warn("ISDN_CMD_IOCTL is not supported."); | ||
143 | return -EINVAL; | ||
144 | |||
145 | case ISDN_CMD_DIAL: | ||
146 | dbg(DEBUG_ANY, "ISDN_CMD_DIAL (driver: %d, channel: %ld, " | ||
147 | "phone: %s,ownmsn: %s, si1: %d, si2: %d)", | ||
148 | cntrl->driver, cntrl->arg, | ||
149 | cntrl->parm.setup.phone, cntrl->parm.setup.eazmsn, | ||
150 | cntrl->parm.setup.si1, cntrl->parm.setup.si2); | ||
151 | |||
152 | if (cntrl->arg >= cs->channels) { | ||
153 | err("invalid channel (%d)", (int) cntrl->arg); | ||
154 | return -EINVAL; | ||
155 | } | ||
156 | |||
157 | bcs = cs->bcs + cntrl->arg; | ||
158 | |||
159 | if (!gigaset_get_channel(bcs)) { | ||
160 | err("channel not free"); | ||
161 | return -EBUSY; | ||
162 | } | ||
163 | |||
164 | sp = kmalloc(sizeof *sp, GFP_ATOMIC); | ||
165 | if (!sp) { | ||
166 | gigaset_free_channel(bcs); | ||
167 | err("ISDN_CMD_DIAL: out of memory"); | ||
168 | return -ENOMEM; | ||
169 | } | ||
170 | *sp = cntrl->parm.setup; | ||
171 | |||
172 | if (!gigaset_add_event(cs, &bcs->at_state, EV_DIAL, sp, | ||
173 | atomic_read(&bcs->at_state.seq_index), | ||
174 | NULL)) { | ||
175 | //FIXME what should we do? | ||
176 | kfree(sp); | ||
177 | gigaset_free_channel(bcs); | ||
178 | return -ENOMEM; | ||
179 | } | ||
180 | |||
181 | dbg(DEBUG_CMD, "scheduling DIAL"); | ||
182 | gigaset_schedule_event(cs); | ||
183 | break; | ||
184 | case ISDN_CMD_ACCEPTD: //FIXME | ||
185 | dbg(DEBUG_ANY, "ISDN_CMD_ACCEPTD"); | ||
186 | |||
187 | if (cntrl->arg >= cs->channels) { | ||
188 | err("invalid channel (%d)", (int) cntrl->arg); | ||
189 | return -EINVAL; | ||
190 | } | ||
191 | |||
192 | if (!gigaset_add_event(cs, &cs->bcs[cntrl->arg].at_state, | ||
193 | EV_ACCEPT, NULL, 0, NULL)) { | ||
194 | //FIXME what should we do? | ||
195 | return -ENOMEM; | ||
196 | } | ||
197 | |||
198 | dbg(DEBUG_CMD, "scheduling ACCEPT"); | ||
199 | gigaset_schedule_event(cs); | ||
200 | |||
201 | break; | ||
202 | case ISDN_CMD_ACCEPTB: | ||
203 | dbg(DEBUG_ANY, "ISDN_CMD_ACCEPTB"); | ||
204 | break; | ||
205 | case ISDN_CMD_HANGUP: | ||
206 | dbg(DEBUG_ANY, | ||
207 | "ISDN_CMD_HANGUP (channel: %d)", (int) cntrl->arg); | ||
208 | |||
209 | if (cntrl->arg >= cs->channels) { | ||
210 | err("ISDN_CMD_HANGUP: invalid channel (%u)", | ||
211 | (unsigned) cntrl->arg); | ||
212 | return -EINVAL; | ||
213 | } | ||
214 | |||
215 | if (!gigaset_add_event(cs, &cs->bcs[cntrl->arg].at_state, | ||
216 | EV_HUP, NULL, 0, NULL)) { | ||
217 | //FIXME what should we do? | ||
218 | return -ENOMEM; | ||
219 | } | ||
220 | |||
221 | dbg(DEBUG_CMD, "scheduling HUP"); | ||
222 | gigaset_schedule_event(cs); | ||
223 | |||
224 | break; | ||
225 | case ISDN_CMD_CLREAZ: /* Do not signal incoming signals */ //FIXME | ||
226 | dbg(DEBUG_ANY, "ISDN_CMD_CLREAZ"); | ||
227 | break; | ||
228 | case ISDN_CMD_SETEAZ: /* Signal incoming calls for given MSN */ //FIXME | ||
229 | dbg(DEBUG_ANY, | ||
230 | "ISDN_CMD_SETEAZ (id:%d, channel: %ld, number: %s)", | ||
231 | cntrl->driver, cntrl->arg, cntrl->parm.num); | ||
232 | break; | ||
233 | case ISDN_CMD_SETL2: /* Set L2 to given protocol */ | ||
234 | dbg(DEBUG_ANY, "ISDN_CMD_SETL2 (Channel: %ld, Proto: %lx)", | ||
235 | cntrl->arg & 0xff, (cntrl->arg >> 8)); | ||
236 | |||
237 | if ((cntrl->arg & 0xff) >= cs->channels) { | ||
238 | err("invalid channel (%u)", | ||
239 | (unsigned) cntrl->arg & 0xff); | ||
240 | return -EINVAL; | ||
241 | } | ||
242 | |||
243 | if (!gigaset_add_event(cs, &cs->bcs[cntrl->arg & 0xff].at_state, | ||
244 | EV_PROTO_L2, NULL, cntrl->arg >> 8, | ||
245 | NULL)) { | ||
246 | //FIXME what should we do? | ||
247 | return -ENOMEM; | ||
248 | } | ||
249 | |||
250 | dbg(DEBUG_CMD, "scheduling PROTO_L2"); | ||
251 | gigaset_schedule_event(cs); | ||
252 | break; | ||
253 | case ISDN_CMD_SETL3: /* Set L3 to given protocol */ | ||
254 | dbg(DEBUG_ANY, "ISDN_CMD_SETL3 (Channel: %ld, Proto: %lx)", | ||
255 | cntrl->arg & 0xff, (cntrl->arg >> 8)); | ||
256 | |||
257 | if ((cntrl->arg & 0xff) >= cs->channels) { | ||
258 | err("invalid channel (%u)", | ||
259 | (unsigned) cntrl->arg & 0xff); | ||
260 | return -EINVAL; | ||
261 | } | ||
262 | |||
263 | if (cntrl->arg >> 8 != ISDN_PROTO_L3_TRANS) { | ||
264 | err("invalid protocol %lu", cntrl->arg >> 8); | ||
265 | return -EINVAL; | ||
266 | } | ||
267 | |||
268 | break; | ||
269 | case ISDN_CMD_PROCEED: | ||
270 | dbg(DEBUG_ANY, "ISDN_CMD_PROCEED"); //FIXME | ||
271 | break; | ||
272 | case ISDN_CMD_ALERT: | ||
273 | dbg(DEBUG_ANY, "ISDN_CMD_ALERT"); //FIXME | ||
274 | if (cntrl->arg >= cs->channels) { | ||
275 | err("invalid channel (%d)", (int) cntrl->arg); | ||
276 | return -EINVAL; | ||
277 | } | ||
278 | //bcs = cs->bcs + cntrl->arg; | ||
279 | //bcs->proto2 = -1; | ||
280 | // FIXME | ||
281 | break; | ||
282 | case ISDN_CMD_REDIR: | ||
283 | dbg(DEBUG_ANY, "ISDN_CMD_REDIR"); //FIXME | ||
284 | break; | ||
285 | case ISDN_CMD_PROT_IO: | ||
286 | dbg(DEBUG_ANY, "ISDN_CMD_PROT_IO"); | ||
287 | break; | ||
288 | case ISDN_CMD_FAXCMD: | ||
289 | dbg(DEBUG_ANY, "ISDN_CMD_FAXCMD"); | ||
290 | break; | ||
291 | case ISDN_CMD_GETL2: | ||
292 | dbg(DEBUG_ANY, "ISDN_CMD_GETL2"); | ||
293 | break; | ||
294 | case ISDN_CMD_GETL3: | ||
295 | dbg(DEBUG_ANY, "ISDN_CMD_GETL3"); | ||
296 | break; | ||
297 | case ISDN_CMD_GETEAZ: | ||
298 | dbg(DEBUG_ANY, "ISDN_CMD_GETEAZ"); | ||
299 | break; | ||
300 | case ISDN_CMD_SETSIL: | ||
301 | dbg(DEBUG_ANY, "ISDN_CMD_SETSIL"); | ||
302 | break; | ||
303 | case ISDN_CMD_GETSIL: | ||
304 | dbg(DEBUG_ANY, "ISDN_CMD_GETSIL"); | ||
305 | break; | ||
306 | default: | ||
307 | err("unknown command %d from LL", | ||
308 | cntrl->command); | ||
309 | return -EINVAL; | ||
310 | } | ||
311 | |||
312 | return retval; | ||
313 | } | ||
314 | |||
315 | void gigaset_i4l_cmd(struct cardstate *cs, int cmd) | ||
316 | { | ||
317 | isdn_ctrl command; | ||
318 | |||
319 | command.driver = cs->myid; | ||
320 | command.command = cmd; | ||
321 | command.arg = 0; | ||
322 | cs->iif.statcallb(&command); | ||
323 | } | ||
324 | |||
325 | void gigaset_i4l_channel_cmd(struct bc_state *bcs, int cmd) | ||
326 | { | ||
327 | isdn_ctrl command; | ||
328 | |||
329 | command.driver = bcs->cs->myid; | ||
330 | command.command = cmd; | ||
331 | command.arg = bcs->channel; | ||
332 | bcs->cs->iif.statcallb(&command); | ||
333 | } | ||
334 | |||
335 | int gigaset_isdn_setup_dial(struct at_state_t *at_state, void *data) | ||
336 | { | ||
337 | struct bc_state *bcs = at_state->bcs; | ||
338 | unsigned proto; | ||
339 | const char *bc; | ||
340 | size_t length[AT_NUM]; | ||
341 | size_t l; | ||
342 | int i; | ||
343 | struct setup_parm *sp = data; | ||
344 | |||
345 | switch (bcs->proto2) { | ||
346 | case ISDN_PROTO_L2_HDLC: | ||
347 | proto = 1; /* 0: Bitsynchron, 1: HDLC, 2: voice */ | ||
348 | break; | ||
349 | case ISDN_PROTO_L2_TRANS: | ||
350 | proto = 2; /* 0: Bitsynchron, 1: HDLC, 2: voice */ | ||
351 | break; | ||
352 | default: | ||
353 | err("invalid protocol: %u", bcs->proto2); | ||
354 | return -EINVAL; | ||
355 | } | ||
356 | |||
357 | switch (sp->si1) { | ||
358 | case 1: /* audio */ | ||
359 | bc = "9090A3"; /* 3.1 kHz audio, A-law */ | ||
360 | break; | ||
361 | case 7: /* data */ | ||
362 | default: /* hope the app knows what it is doing */ | ||
363 | bc = "8890"; /* unrestricted digital information */ | ||
364 | } | ||
365 | //FIXME add missing si1 values from 1TR6, inspect si2, set HLC/LLC | ||
366 | |||
367 | length[AT_DIAL ] = 1 + strlen(sp->phone) + 1 + 1; | ||
368 | l = strlen(sp->eazmsn); | ||
369 | length[AT_MSN ] = l ? 6 + l + 1 + 1 : 0; | ||
370 | length[AT_BC ] = 5 + strlen(bc) + 1 + 1; | ||
371 | length[AT_PROTO] = 6 + 1 + 1 + 1; /* proto: 1 character */ | ||
372 | length[AT_ISO ] = 6 + 1 + 1 + 1; /* channel: 1 character */ | ||
373 | length[AT_TYPE ] = 6 + 1 + 1 + 1; /* call type: 1 character */ | ||
374 | length[AT_HLC ] = 0; | ||
375 | |||
376 | for (i = 0; i < AT_NUM; ++i) { | ||
377 | kfree(bcs->commands[i]); | ||
378 | bcs->commands[i] = NULL; | ||
379 | if (length[i] && | ||
380 | !(bcs->commands[i] = kmalloc(length[i], GFP_ATOMIC))) { | ||
381 | err("out of memory"); | ||
382 | return -ENOMEM; | ||
383 | } | ||
384 | } | ||
385 | |||
386 | /* type = 1: extern, 0: intern, 2: recall, 3: door, 4: centrex */ | ||
387 | if (sp->phone[0] == '*' && sp->phone[1] == '*') { | ||
388 | /* internal call: translate ** prefix to CTP value */ | ||
389 | snprintf(bcs->commands[AT_DIAL], length[AT_DIAL], | ||
390 | "D%s\r", sp->phone+2); | ||
391 | strncpy(bcs->commands[AT_TYPE], "^SCTP=0\r", length[AT_TYPE]); | ||
392 | } else { | ||
393 | snprintf(bcs->commands[AT_DIAL], length[AT_DIAL], | ||
394 | "D%s\r", sp->phone); | ||
395 | strncpy(bcs->commands[AT_TYPE], "^SCTP=1\r", length[AT_TYPE]); | ||
396 | } | ||
397 | |||
398 | if (bcs->commands[AT_MSN]) | ||
399 | snprintf(bcs->commands[AT_MSN], length[AT_MSN], "^SMSN=%s\r", sp->eazmsn); | ||
400 | snprintf(bcs->commands[AT_BC ], length[AT_BC ], "^SBC=%s\r", bc); | ||
401 | snprintf(bcs->commands[AT_PROTO], length[AT_PROTO], "^SBPR=%u\r", proto); | ||
402 | snprintf(bcs->commands[AT_ISO ], length[AT_ISO ], "^SISO=%u\r", (unsigned)bcs->channel + 1); | ||
403 | |||
404 | return 0; | ||
405 | } | ||
406 | |||
407 | int gigaset_isdn_setup_accept(struct at_state_t *at_state) | ||
408 | { | ||
409 | unsigned proto; | ||
410 | size_t length[AT_NUM]; | ||
411 | int i; | ||
412 | struct bc_state *bcs = at_state->bcs; | ||
413 | |||
414 | switch (bcs->proto2) { | ||
415 | case ISDN_PROTO_L2_HDLC: | ||
416 | proto = 1; /* 0: Bitsynchron, 1: HDLC, 2: voice */ | ||
417 | break; | ||
418 | case ISDN_PROTO_L2_TRANS: | ||
419 | proto = 2; /* 0: Bitsynchron, 1: HDLC, 2: voice */ | ||
420 | break; | ||
421 | default: | ||
422 | err("invalid protocol: %u", bcs->proto2); | ||
423 | return -EINVAL; | ||
424 | } | ||
425 | |||
426 | length[AT_DIAL ] = 0; | ||
427 | length[AT_MSN ] = 0; | ||
428 | length[AT_BC ] = 0; | ||
429 | length[AT_PROTO] = 6 + 1 + 1 + 1; /* proto: 1 character */ | ||
430 | length[AT_ISO ] = 6 + 1 + 1 + 1; /* channel: 1 character */ | ||
431 | length[AT_TYPE ] = 0; | ||
432 | length[AT_HLC ] = 0; | ||
433 | |||
434 | for (i = 0; i < AT_NUM; ++i) { | ||
435 | kfree(bcs->commands[i]); | ||
436 | bcs->commands[i] = NULL; | ||
437 | if (length[i] && | ||
438 | !(bcs->commands[i] = kmalloc(length[i], GFP_ATOMIC))) { | ||
439 | err("out of memory"); | ||
440 | return -ENOMEM; | ||
441 | } | ||
442 | } | ||
443 | |||
444 | snprintf(bcs->commands[AT_PROTO], length[AT_PROTO], "^SBPR=%u\r", proto); | ||
445 | snprintf(bcs->commands[AT_ISO ], length[AT_ISO ], "^SISO=%u\r", (unsigned) bcs->channel + 1); | ||
446 | |||
447 | return 0; | ||
448 | } | ||
449 | |||
450 | int gigaset_isdn_icall(struct at_state_t *at_state) | ||
451 | { | ||
452 | struct cardstate *cs = at_state->cs; | ||
453 | struct bc_state *bcs = at_state->bcs; | ||
454 | isdn_ctrl response; | ||
455 | int retval; | ||
456 | |||
457 | /* fill ICALL structure */ | ||
458 | response.parm.setup.si1 = 0; /* default: unknown */ | ||
459 | response.parm.setup.si2 = 0; | ||
460 | response.parm.setup.screen = 0; //FIXME how to set these? | ||
461 | response.parm.setup.plan = 0; | ||
462 | if (!at_state->str_var[STR_ZBC]) { | ||
463 | /* no BC (internal call): assume speech, A-law */ | ||
464 | response.parm.setup.si1 = 1; | ||
465 | } else if (!strcmp(at_state->str_var[STR_ZBC], "8890")) { | ||
466 | /* unrestricted digital information */ | ||
467 | response.parm.setup.si1 = 7; | ||
468 | } else if (!strcmp(at_state->str_var[STR_ZBC], "8090A3")) { | ||
469 | /* speech, A-law */ | ||
470 | response.parm.setup.si1 = 1; | ||
471 | } else if (!strcmp(at_state->str_var[STR_ZBC], "9090A3")) { | ||
472 | /* 3,1 kHz audio, A-law */ | ||
473 | response.parm.setup.si1 = 1; | ||
474 | response.parm.setup.si2 = 2; | ||
475 | } else { | ||
476 | warn("RING ignored - unsupported BC %s", | ||
477 | at_state->str_var[STR_ZBC]); | ||
478 | return ICALL_IGNORE; | ||
479 | } | ||
480 | if (at_state->str_var[STR_NMBR]) { | ||
481 | strncpy(response.parm.setup.phone, at_state->str_var[STR_NMBR], | ||
482 | sizeof response.parm.setup.phone - 1); | ||
483 | response.parm.setup.phone[sizeof response.parm.setup.phone - 1] = 0; | ||
484 | } else | ||
485 | response.parm.setup.phone[0] = 0; | ||
486 | if (at_state->str_var[STR_ZCPN]) { | ||
487 | strncpy(response.parm.setup.eazmsn, at_state->str_var[STR_ZCPN], | ||
488 | sizeof response.parm.setup.eazmsn - 1); | ||
489 | response.parm.setup.eazmsn[sizeof response.parm.setup.eazmsn - 1] = 0; | ||
490 | } else | ||
491 | response.parm.setup.eazmsn[0] = 0; | ||
492 | |||
493 | if (!bcs) { | ||
494 | notice("no channel for incoming call"); | ||
495 | dbg(DEBUG_CMD, "Sending ICALLW"); | ||
496 | response.command = ISDN_STAT_ICALLW; | ||
497 | response.arg = 0; //FIXME | ||
498 | } else { | ||
499 | dbg(DEBUG_CMD, "Sending ICALL"); | ||
500 | response.command = ISDN_STAT_ICALL; | ||
501 | response.arg = bcs->channel; //FIXME | ||
502 | } | ||
503 | response.driver = cs->myid; | ||
504 | retval = cs->iif.statcallb(&response); | ||
505 | dbg(DEBUG_CMD, "Response: %d", retval); | ||
506 | switch (retval) { | ||
507 | case 0: /* no takers */ | ||
508 | return ICALL_IGNORE; | ||
509 | case 1: /* alerting */ | ||
510 | bcs->chstate |= CHS_NOTIFY_LL; | ||
511 | return ICALL_ACCEPT; | ||
512 | case 2: /* reject */ | ||
513 | return ICALL_REJECT; | ||
514 | case 3: /* incomplete */ | ||
515 | warn("LL requested unsupported feature: Incomplete Number"); | ||
516 | return ICALL_IGNORE; | ||
517 | case 4: /* proceeding */ | ||
518 | /* Gigaset will send ALERTING anyway. | ||
519 | * There doesn't seem to be a way to avoid this. | ||
520 | */ | ||
521 | return ICALL_ACCEPT; | ||
522 | case 5: /* deflect */ | ||
523 | warn("LL requested unsupported feature: Call Deflection"); | ||
524 | return ICALL_IGNORE; | ||
525 | default: | ||
526 | err("LL error %d on ICALL", retval); | ||
527 | return ICALL_IGNORE; | ||
528 | } | ||
529 | } | ||
530 | |||
531 | /* Set Callback function pointer */ | ||
532 | int gigaset_register_to_LL(struct cardstate *cs, const char *isdnid) | ||
533 | { | ||
534 | isdn_if *iif = &cs->iif; | ||
535 | |||
536 | dbg(DEBUG_ANY, "Register driver capabilities to LL"); | ||
537 | |||
538 | //iif->id[sizeof(iif->id) - 1]=0; | ||
539 | //strncpy(iif->id, isdnid, sizeof(iif->id) - 1); | ||
540 | if (snprintf(iif->id, sizeof iif->id, "%s_%u", isdnid, cs->minor_index) | ||
541 | >= sizeof iif->id) | ||
542 | return -ENOMEM; //FIXME EINVAL/...?? | ||
543 | |||
544 | iif->owner = THIS_MODULE; | ||
545 | iif->channels = cs->channels; /* I am supporting just one channel *//* I was supporting...*/ | ||
546 | iif->maxbufsize = MAX_BUF_SIZE; | ||
547 | iif->features = ISDN_FEATURE_L2_TRANS | /* Our device is very advanced, therefore */ | ||
548 | ISDN_FEATURE_L2_HDLC | | ||
549 | #ifdef GIG_X75 | ||
550 | ISDN_FEATURE_L2_X75I | | ||
551 | #endif | ||
552 | ISDN_FEATURE_L3_TRANS | | ||
553 | ISDN_FEATURE_P_EURO; | ||
554 | iif->hl_hdrlen = HW_HDR_LEN; /* Area for storing ack */ | ||
555 | iif->command = command_from_LL; | ||
556 | iif->writebuf_skb = writebuf_from_LL; | ||
557 | iif->writecmd = NULL; /* Don't support isdnctrl */ | ||
558 | iif->readstat = NULL; /* Don't support isdnctrl */ | ||
559 | iif->rcvcallb_skb = NULL; /* Will be set by LL */ | ||
560 | iif->statcallb = NULL; /* Will be set by LL */ | ||
561 | |||
562 | if (!register_isdn(iif)) | ||
563 | return 0; | ||
564 | |||
565 | cs->myid = iif->channels; /* Set my device id */ | ||
566 | return 1; | ||
567 | } | ||
diff --git a/drivers/isdn/gigaset/interface.c b/drivers/isdn/gigaset/interface.c new file mode 100644 index 000000000000..3a81d9c65141 --- /dev/null +++ b/drivers/isdn/gigaset/interface.c | |||
@@ -0,0 +1,718 @@ | |||
1 | /* | ||
2 | * interface to user space for the gigaset driver | ||
3 | * | ||
4 | * Copyright (c) 2004 by Hansjoerg Lipp <hjlipp@web.de> | ||
5 | * | ||
6 | * ===================================================================== | ||
7 | * This program is free software; you can redistribute it and/or | ||
8 | * modify it under the terms of the GNU General Public License as | ||
9 | * published by the Free Software Foundation; either version 2 of | ||
10 | * the License, or (at your option) any later version. | ||
11 | * ===================================================================== | ||
12 | * Version: $Id: interface.c,v 1.14.4.15 2006/02/04 18:28:16 hjlipp Exp $ | ||
13 | * ===================================================================== | ||
14 | */ | ||
15 | |||
16 | #include "gigaset.h" | ||
17 | #include <linux/gigaset_dev.h> | ||
18 | #include <linux/tty.h> | ||
19 | #include <linux/tty_flip.h> | ||
20 | |||
21 | /*** our ioctls ***/ | ||
22 | |||
23 | static int if_lock(struct cardstate *cs, int *arg) | ||
24 | { | ||
25 | int cmd = *arg; | ||
26 | |||
27 | dbg(DEBUG_IF, "%u: if_lock (%d)", cs->minor_index, cmd); | ||
28 | |||
29 | if (cmd > 1) | ||
30 | return -EINVAL; | ||
31 | |||
32 | if (cmd < 0) { | ||
33 | *arg = atomic_read(&cs->mstate) == MS_LOCKED; //FIXME remove? | ||
34 | return 0; | ||
35 | } | ||
36 | |||
37 | if (!cmd && atomic_read(&cs->mstate) == MS_LOCKED | ||
38 | && atomic_read(&cs->connected)) { | ||
39 | cs->ops->set_modem_ctrl(cs, 0, TIOCM_DTR|TIOCM_RTS); | ||
40 | cs->ops->baud_rate(cs, B115200); | ||
41 | cs->ops->set_line_ctrl(cs, CS8); | ||
42 | cs->control_state = TIOCM_DTR|TIOCM_RTS; | ||
43 | } | ||
44 | |||
45 | cs->waiting = 1; | ||
46 | if (!gigaset_add_event(cs, &cs->at_state, EV_IF_LOCK, | ||
47 | NULL, cmd, NULL)) { | ||
48 | cs->waiting = 0; | ||
49 | return -ENOMEM; | ||
50 | } | ||
51 | |||
52 | dbg(DEBUG_CMD, "scheduling IF_LOCK"); | ||
53 | gigaset_schedule_event(cs); | ||
54 | |||
55 | wait_event(cs->waitqueue, !cs->waiting); | ||
56 | |||
57 | if (cs->cmd_result >= 0) { | ||
58 | *arg = cs->cmd_result; | ||
59 | return 0; | ||
60 | } | ||
61 | |||
62 | return cs->cmd_result; | ||
63 | } | ||
64 | |||
65 | static int if_version(struct cardstate *cs, unsigned arg[4]) | ||
66 | { | ||
67 | static const unsigned version[4] = GIG_VERSION; | ||
68 | static const unsigned compat[4] = GIG_COMPAT; | ||
69 | unsigned cmd = arg[0]; | ||
70 | |||
71 | dbg(DEBUG_IF, "%u: if_version (%d)", cs->minor_index, cmd); | ||
72 | |||
73 | switch (cmd) { | ||
74 | case GIGVER_DRIVER: | ||
75 | memcpy(arg, version, sizeof version); | ||
76 | return 0; | ||
77 | case GIGVER_COMPAT: | ||
78 | memcpy(arg, compat, sizeof compat); | ||
79 | return 0; | ||
80 | case GIGVER_FWBASE: | ||
81 | cs->waiting = 1; | ||
82 | if (!gigaset_add_event(cs, &cs->at_state, EV_IF_VER, | ||
83 | NULL, 0, arg)) { | ||
84 | cs->waiting = 0; | ||
85 | return -ENOMEM; | ||
86 | } | ||
87 | |||
88 | dbg(DEBUG_CMD, "scheduling IF_VER"); | ||
89 | gigaset_schedule_event(cs); | ||
90 | |||
91 | wait_event(cs->waitqueue, !cs->waiting); | ||
92 | |||
93 | if (cs->cmd_result >= 0) | ||
94 | return 0; | ||
95 | |||
96 | return cs->cmd_result; | ||
97 | default: | ||
98 | return -EINVAL; | ||
99 | } | ||
100 | } | ||
101 | |||
102 | static int if_config(struct cardstate *cs, int *arg) | ||
103 | { | ||
104 | dbg(DEBUG_IF, "%u: if_config (%d)", cs->minor_index, *arg); | ||
105 | |||
106 | if (*arg != 1) | ||
107 | return -EINVAL; | ||
108 | |||
109 | if (atomic_read(&cs->mstate) != MS_LOCKED) | ||
110 | return -EBUSY; | ||
111 | |||
112 | *arg = 0; | ||
113 | return gigaset_enterconfigmode(cs); | ||
114 | } | ||
115 | |||
116 | /*** the terminal driver ***/ | ||
117 | /* stolen from usbserial and some other tty drivers */ | ||
118 | |||
119 | static int if_open(struct tty_struct *tty, struct file *filp); | ||
120 | static void if_close(struct tty_struct *tty, struct file *filp); | ||
121 | static int if_ioctl(struct tty_struct *tty, struct file *file, | ||
122 | unsigned int cmd, unsigned long arg); | ||
123 | static int if_write_room(struct tty_struct *tty); | ||
124 | static int if_chars_in_buffer(struct tty_struct *tty); | ||
125 | static void if_throttle(struct tty_struct *tty); | ||
126 | static void if_unthrottle(struct tty_struct *tty); | ||
127 | static void if_set_termios(struct tty_struct *tty, struct termios *old); | ||
128 | static int if_tiocmget(struct tty_struct *tty, struct file *file); | ||
129 | static int if_tiocmset(struct tty_struct *tty, struct file *file, | ||
130 | unsigned int set, unsigned int clear); | ||
131 | static int if_write(struct tty_struct *tty, | ||
132 | const unsigned char *buf, int count); | ||
133 | |||
134 | static struct tty_operations if_ops = { | ||
135 | .open = if_open, | ||
136 | .close = if_close, | ||
137 | .ioctl = if_ioctl, | ||
138 | .write = if_write, | ||
139 | .write_room = if_write_room, | ||
140 | .chars_in_buffer = if_chars_in_buffer, | ||
141 | .set_termios = if_set_termios, | ||
142 | .throttle = if_throttle, | ||
143 | .unthrottle = if_unthrottle, | ||
144 | #if 0 | ||
145 | .break_ctl = serial_break, | ||
146 | #endif | ||
147 | .tiocmget = if_tiocmget, | ||
148 | .tiocmset = if_tiocmset, | ||
149 | }; | ||
150 | |||
151 | static int if_open(struct tty_struct *tty, struct file *filp) | ||
152 | { | ||
153 | struct cardstate *cs; | ||
154 | unsigned long flags; | ||
155 | |||
156 | dbg(DEBUG_IF, "%d+%d: %s()", tty->driver->minor_start, tty->index, | ||
157 | __FUNCTION__); | ||
158 | |||
159 | tty->driver_data = NULL; | ||
160 | |||
161 | cs = gigaset_get_cs_by_tty(tty); | ||
162 | if (!cs) | ||
163 | return -ENODEV; | ||
164 | |||
165 | if (down_interruptible(&cs->sem)) | ||
166 | return -ERESTARTSYS; // FIXME -EINTR? | ||
167 | tty->driver_data = cs; | ||
168 | |||
169 | ++cs->open_count; | ||
170 | |||
171 | if (cs->open_count == 1) { | ||
172 | spin_lock_irqsave(&cs->lock, flags); | ||
173 | cs->tty = tty; | ||
174 | spin_unlock_irqrestore(&cs->lock, flags); | ||
175 | tty->low_latency = 1; //FIXME test | ||
176 | //FIXME | ||
177 | } | ||
178 | |||
179 | up(&cs->sem); | ||
180 | return 0; | ||
181 | } | ||
182 | |||
183 | static void if_close(struct tty_struct *tty, struct file *filp) | ||
184 | { | ||
185 | struct cardstate *cs; | ||
186 | unsigned long flags; | ||
187 | |||
188 | cs = (struct cardstate *) tty->driver_data; | ||
189 | if (!cs) { | ||
190 | err("cs==NULL in %s", __FUNCTION__); | ||
191 | return; | ||
192 | } | ||
193 | |||
194 | dbg(DEBUG_IF, "%u: %s()", cs->minor_index, __FUNCTION__); | ||
195 | |||
196 | down(&cs->sem); | ||
197 | |||
198 | if (!cs->open_count) | ||
199 | warn("%s: device not opened", __FUNCTION__); | ||
200 | else { | ||
201 | if (!--cs->open_count) { | ||
202 | spin_lock_irqsave(&cs->lock, flags); | ||
203 | cs->tty = NULL; | ||
204 | spin_unlock_irqrestore(&cs->lock, flags); | ||
205 | //FIXME | ||
206 | } | ||
207 | } | ||
208 | |||
209 | up(&cs->sem); | ||
210 | } | ||
211 | |||
212 | static int if_ioctl(struct tty_struct *tty, struct file *file, | ||
213 | unsigned int cmd, unsigned long arg) | ||
214 | { | ||
215 | struct cardstate *cs; | ||
216 | int retval = -ENODEV; | ||
217 | int int_arg; | ||
218 | unsigned char buf[6]; | ||
219 | unsigned version[4]; | ||
220 | |||
221 | cs = (struct cardstate *) tty->driver_data; | ||
222 | if (!cs) { | ||
223 | err("cs==NULL in %s", __FUNCTION__); | ||
224 | return -ENODEV; | ||
225 | } | ||
226 | |||
227 | dbg(DEBUG_IF, "%u: %s(0x%x)", cs->minor_index, __FUNCTION__, cmd); | ||
228 | |||
229 | if (down_interruptible(&cs->sem)) | ||
230 | return -ERESTARTSYS; // FIXME -EINTR? | ||
231 | |||
232 | if (!cs->open_count) | ||
233 | warn("%s: device not opened", __FUNCTION__); | ||
234 | else { | ||
235 | retval = 0; | ||
236 | switch (cmd) { | ||
237 | case GIGASET_REDIR: | ||
238 | retval = get_user(int_arg, (int __user *) arg); | ||
239 | if (retval >= 0) | ||
240 | retval = if_lock(cs, &int_arg); | ||
241 | if (retval >= 0) | ||
242 | retval = put_user(int_arg, (int __user *) arg); | ||
243 | break; | ||
244 | case GIGASET_CONFIG: | ||
245 | retval = get_user(int_arg, (int __user *) arg); | ||
246 | if (retval >= 0) | ||
247 | retval = if_config(cs, &int_arg); | ||
248 | if (retval >= 0) | ||
249 | retval = put_user(int_arg, (int __user *) arg); | ||
250 | break; | ||
251 | case GIGASET_BRKCHARS: | ||
252 | //FIXME test if MS_LOCKED | ||
253 | gigaset_dbg_buffer(DEBUG_IF, "GIGASET_BRKCHARS", | ||
254 | 6, (const unsigned char *) arg, 1); | ||
255 | if (!atomic_read(&cs->connected)) { | ||
256 | dbg(DEBUG_ANY, "can't communicate with unplugged device"); | ||
257 | retval = -ENODEV; | ||
258 | break; | ||
259 | } | ||
260 | retval = copy_from_user(&buf, | ||
261 | (const unsigned char __user *) arg, 6) | ||
262 | ? -EFAULT : 0; | ||
263 | if (retval >= 0) | ||
264 | retval = cs->ops->brkchars(cs, buf); | ||
265 | break; | ||
266 | case GIGASET_VERSION: | ||
267 | retval = copy_from_user(version, (unsigned __user *) arg, | ||
268 | sizeof version) ? -EFAULT : 0; | ||
269 | if (retval >= 0) | ||
270 | retval = if_version(cs, version); | ||
271 | if (retval >= 0) | ||
272 | retval = copy_to_user((unsigned __user *) arg, version, | ||
273 | sizeof version) | ||
274 | ? -EFAULT : 0; | ||
275 | break; | ||
276 | default: | ||
277 | dbg(DEBUG_ANY, "%s: arg not supported - 0x%04x", | ||
278 | __FUNCTION__, cmd); | ||
279 | retval = -ENOIOCTLCMD; | ||
280 | } | ||
281 | } | ||
282 | |||
283 | up(&cs->sem); | ||
284 | |||
285 | return retval; | ||
286 | } | ||
287 | |||
288 | static int if_tiocmget(struct tty_struct *tty, struct file *file) | ||
289 | { | ||
290 | struct cardstate *cs; | ||
291 | int retval; | ||
292 | |||
293 | cs = (struct cardstate *) tty->driver_data; | ||
294 | if (!cs) { | ||
295 | err("cs==NULL in %s", __FUNCTION__); | ||
296 | return -ENODEV; | ||
297 | } | ||
298 | |||
299 | dbg(DEBUG_IF, "%u: %s()", cs->minor_index, __FUNCTION__); | ||
300 | |||
301 | if (down_interruptible(&cs->sem)) | ||
302 | return -ERESTARTSYS; // FIXME -EINTR? | ||
303 | |||
304 | // FIXME read from device? | ||
305 | retval = cs->control_state & (TIOCM_RTS|TIOCM_DTR); | ||
306 | |||
307 | up(&cs->sem); | ||
308 | |||
309 | return retval; | ||
310 | } | ||
311 | |||
312 | static int if_tiocmset(struct tty_struct *tty, struct file *file, | ||
313 | unsigned int set, unsigned int clear) | ||
314 | { | ||
315 | struct cardstate *cs; | ||
316 | int retval; | ||
317 | unsigned mc; | ||
318 | |||
319 | cs = (struct cardstate *) tty->driver_data; | ||
320 | if (!cs) { | ||
321 | err("cs==NULL in %s", __FUNCTION__); | ||
322 | return -ENODEV; | ||
323 | } | ||
324 | |||
325 | dbg(DEBUG_IF, | ||
326 | "%u: %s(0x%x, 0x%x)", cs->minor_index, __FUNCTION__, set, clear); | ||
327 | |||
328 | if (down_interruptible(&cs->sem)) | ||
329 | return -ERESTARTSYS; // FIXME -EINTR? | ||
330 | |||
331 | if (!atomic_read(&cs->connected)) { | ||
332 | dbg(DEBUG_ANY, "can't communicate with unplugged device"); | ||
333 | retval = -ENODEV; | ||
334 | } else { | ||
335 | mc = (cs->control_state | set) & ~clear & (TIOCM_RTS|TIOCM_DTR); | ||
336 | retval = cs->ops->set_modem_ctrl(cs, cs->control_state, mc); | ||
337 | cs->control_state = mc; | ||
338 | } | ||
339 | |||
340 | up(&cs->sem); | ||
341 | |||
342 | return retval; | ||
343 | } | ||
344 | |||
345 | static int if_write(struct tty_struct *tty, const unsigned char *buf, int count) | ||
346 | { | ||
347 | struct cardstate *cs; | ||
348 | int retval = -ENODEV; | ||
349 | |||
350 | cs = (struct cardstate *) tty->driver_data; | ||
351 | if (!cs) { | ||
352 | err("cs==NULL in %s", __FUNCTION__); | ||
353 | return -ENODEV; | ||
354 | } | ||
355 | |||
356 | dbg(DEBUG_IF, "%u: %s()", cs->minor_index, __FUNCTION__); | ||
357 | |||
358 | if (down_interruptible(&cs->sem)) | ||
359 | return -ERESTARTSYS; // FIXME -EINTR? | ||
360 | |||
361 | if (!cs->open_count) | ||
362 | warn("%s: device not opened", __FUNCTION__); | ||
363 | else if (atomic_read(&cs->mstate) != MS_LOCKED) { | ||
364 | warn("can't write to unlocked device"); | ||
365 | retval = -EBUSY; | ||
366 | } else if (!atomic_read(&cs->connected)) { | ||
367 | dbg(DEBUG_ANY, "can't write to unplugged device"); | ||
368 | retval = -EBUSY; //FIXME | ||
369 | } else { | ||
370 | retval = cs->ops->write_cmd(cs, buf, count, | ||
371 | &cs->if_wake_tasklet); | ||
372 | } | ||
373 | |||
374 | up(&cs->sem); | ||
375 | |||
376 | return retval; | ||
377 | } | ||
378 | |||
379 | static int if_write_room(struct tty_struct *tty) | ||
380 | { | ||
381 | struct cardstate *cs; | ||
382 | int retval = -ENODEV; | ||
383 | |||
384 | cs = (struct cardstate *) tty->driver_data; | ||
385 | if (!cs) { | ||
386 | err("cs==NULL in %s", __FUNCTION__); | ||
387 | return -ENODEV; | ||
388 | } | ||
389 | |||
390 | dbg(DEBUG_IF, "%u: %s()", cs->minor_index, __FUNCTION__); | ||
391 | |||
392 | if (down_interruptible(&cs->sem)) | ||
393 | return -ERESTARTSYS; // FIXME -EINTR? | ||
394 | |||
395 | if (!cs->open_count) | ||
396 | warn("%s: device not opened", __FUNCTION__); | ||
397 | else if (atomic_read(&cs->mstate) != MS_LOCKED) { | ||
398 | warn("can't write to unlocked device"); | ||
399 | retval = -EBUSY; //FIXME | ||
400 | } else if (!atomic_read(&cs->connected)) { | ||
401 | dbg(DEBUG_ANY, "can't write to unplugged device"); | ||
402 | retval = -EBUSY; //FIXME | ||
403 | } else | ||
404 | retval = cs->ops->write_room(cs); | ||
405 | |||
406 | up(&cs->sem); | ||
407 | |||
408 | return retval; | ||
409 | } | ||
410 | |||
411 | static int if_chars_in_buffer(struct tty_struct *tty) | ||
412 | { | ||
413 | struct cardstate *cs; | ||
414 | int retval = -ENODEV; | ||
415 | |||
416 | cs = (struct cardstate *) tty->driver_data; | ||
417 | if (!cs) { | ||
418 | err("cs==NULL in %s", __FUNCTION__); | ||
419 | return -ENODEV; | ||
420 | } | ||
421 | |||
422 | dbg(DEBUG_IF, "%u: %s()", cs->minor_index, __FUNCTION__); | ||
423 | |||
424 | if (down_interruptible(&cs->sem)) | ||
425 | return -ERESTARTSYS; // FIXME -EINTR? | ||
426 | |||
427 | if (!cs->open_count) | ||
428 | warn("%s: device not opened", __FUNCTION__); | ||
429 | else if (atomic_read(&cs->mstate) != MS_LOCKED) { | ||
430 | warn("can't write to unlocked device"); | ||
431 | retval = -EBUSY; | ||
432 | } else if (!atomic_read(&cs->connected)) { | ||
433 | dbg(DEBUG_ANY, "can't write to unplugged device"); | ||
434 | retval = -EBUSY; //FIXME | ||
435 | } else | ||
436 | retval = cs->ops->chars_in_buffer(cs); | ||
437 | |||
438 | up(&cs->sem); | ||
439 | |||
440 | return retval; | ||
441 | } | ||
442 | |||
443 | static void if_throttle(struct tty_struct *tty) | ||
444 | { | ||
445 | struct cardstate *cs; | ||
446 | |||
447 | cs = (struct cardstate *) tty->driver_data; | ||
448 | if (!cs) { | ||
449 | err("cs==NULL in %s", __FUNCTION__); | ||
450 | return; | ||
451 | } | ||
452 | |||
453 | dbg(DEBUG_IF, "%u: %s()", cs->minor_index, __FUNCTION__); | ||
454 | |||
455 | down(&cs->sem); | ||
456 | |||
457 | if (!cs->open_count) | ||
458 | warn("%s: device not opened", __FUNCTION__); | ||
459 | else { | ||
460 | //FIXME | ||
461 | } | ||
462 | |||
463 | up(&cs->sem); | ||
464 | } | ||
465 | |||
466 | static void if_unthrottle(struct tty_struct *tty) | ||
467 | { | ||
468 | struct cardstate *cs; | ||
469 | |||
470 | cs = (struct cardstate *) tty->driver_data; | ||
471 | if (!cs) { | ||
472 | err("cs==NULL in %s", __FUNCTION__); | ||
473 | return; | ||
474 | } | ||
475 | |||
476 | dbg(DEBUG_IF, "%u: %s()", cs->minor_index, __FUNCTION__); | ||
477 | |||
478 | down(&cs->sem); | ||
479 | |||
480 | if (!cs->open_count) | ||
481 | warn("%s: device not opened", __FUNCTION__); | ||
482 | else { | ||
483 | //FIXME | ||
484 | } | ||
485 | |||
486 | up(&cs->sem); | ||
487 | } | ||
488 | |||
489 | static void if_set_termios(struct tty_struct *tty, struct termios *old) | ||
490 | { | ||
491 | struct cardstate *cs; | ||
492 | unsigned int iflag; | ||
493 | unsigned int cflag; | ||
494 | unsigned int old_cflag; | ||
495 | unsigned int control_state, new_state; | ||
496 | |||
497 | cs = (struct cardstate *) tty->driver_data; | ||
498 | if (!cs) { | ||
499 | err("cs==NULL in %s", __FUNCTION__); | ||
500 | return; | ||
501 | } | ||
502 | |||
503 | dbg(DEBUG_IF, "%u: %s()", cs->minor_index, __FUNCTION__); | ||
504 | |||
505 | down(&cs->sem); | ||
506 | |||
507 | if (!cs->open_count) { | ||
508 | warn("%s: device not opened", __FUNCTION__); | ||
509 | goto out; | ||
510 | } | ||
511 | |||
512 | if (!atomic_read(&cs->connected)) { | ||
513 | dbg(DEBUG_ANY, "can't communicate with unplugged device"); | ||
514 | goto out; | ||
515 | } | ||
516 | |||
517 | // stolen from mct_u232.c | ||
518 | iflag = tty->termios->c_iflag; | ||
519 | cflag = tty->termios->c_cflag; | ||
520 | old_cflag = old ? old->c_cflag : cflag; //FIXME? | ||
521 | dbg(DEBUG_IF, "%u: iflag %x cflag %x old %x", cs->minor_index, | ||
522 | iflag, cflag, old_cflag); | ||
523 | |||
524 | /* get a local copy of the current port settings */ | ||
525 | control_state = cs->control_state; | ||
526 | |||
527 | /* | ||
528 | * Update baud rate. | ||
529 | * Do not attempt to cache old rates and skip settings, | ||
530 | * disconnects screw such tricks up completely. | ||
531 | * Premature optimization is the root of all evil. | ||
532 | */ | ||
533 | |||
534 | /* reassert DTR and (maybe) RTS on transition from B0 */ | ||
535 | if ((old_cflag & CBAUD) == B0) { | ||
536 | new_state = control_state | TIOCM_DTR; | ||
537 | /* don't set RTS if using hardware flow control */ | ||
538 | if (!(old_cflag & CRTSCTS)) | ||
539 | new_state |= TIOCM_RTS; | ||
540 | dbg(DEBUG_IF, "%u: from B0 - set DTR%s", cs->minor_index, | ||
541 | (new_state & TIOCM_RTS) ? " only" : "/RTS"); | ||
542 | cs->ops->set_modem_ctrl(cs, control_state, new_state); | ||
543 | control_state = new_state; | ||
544 | } | ||
545 | |||
546 | cs->ops->baud_rate(cs, cflag & CBAUD); | ||
547 | |||
548 | if ((cflag & CBAUD) == B0) { | ||
549 | /* Drop RTS and DTR */ | ||
550 | dbg(DEBUG_IF, "%u: to B0 - drop DTR/RTS", cs->minor_index); | ||
551 | new_state = control_state & ~(TIOCM_DTR | TIOCM_RTS); | ||
552 | cs->ops->set_modem_ctrl(cs, control_state, new_state); | ||
553 | control_state = new_state; | ||
554 | } | ||
555 | |||
556 | /* | ||
557 | * Update line control register (LCR) | ||
558 | */ | ||
559 | |||
560 | cs->ops->set_line_ctrl(cs, cflag); | ||
561 | |||
562 | #if 0 | ||
563 | //FIXME this hangs M101 [ts 2005-03-09] | ||
564 | //FIXME do we need this? | ||
565 | /* | ||
566 | * Set flow control: well, I do not really now how to handle DTR/RTS. | ||
567 | * Just do what we have seen with SniffUSB on Win98. | ||
568 | */ | ||
569 | /* Drop DTR/RTS if no flow control otherwise assert */ | ||
570 | dbg(DEBUG_IF, "%u: control_state %x", cs->minor_index, control_state); | ||
571 | new_state = control_state; | ||
572 | if ((iflag & IXOFF) || (iflag & IXON) || (cflag & CRTSCTS)) | ||
573 | new_state |= TIOCM_DTR | TIOCM_RTS; | ||
574 | else | ||
575 | new_state &= ~(TIOCM_DTR | TIOCM_RTS); | ||
576 | if (new_state != control_state) { | ||
577 | dbg(DEBUG_IF, "%u: new_state %x", cs->minor_index, new_state); | ||
578 | gigaset_set_modem_ctrl(cs, control_state, new_state); // FIXME: mct_u232.c sets the old state here. is this a bug? | ||
579 | control_state = new_state; | ||
580 | } | ||
581 | #endif | ||
582 | |||
583 | /* save off the modified port settings */ | ||
584 | cs->control_state = control_state; | ||
585 | |||
586 | out: | ||
587 | up(&cs->sem); | ||
588 | } | ||
589 | |||
590 | |||
591 | /* wakeup tasklet for the write operation */ | ||
592 | static void if_wake(unsigned long data) | ||
593 | { | ||
594 | struct cardstate *cs = (struct cardstate *) data; | ||
595 | struct tty_struct *tty; | ||
596 | |||
597 | tty = cs->tty; | ||
598 | if (!tty) | ||
599 | return; | ||
600 | |||
601 | if ((tty->flags & (1 << TTY_DO_WRITE_WAKEUP)) && | ||
602 | tty->ldisc.write_wakeup) { | ||
603 | dbg(DEBUG_IF, "write wakeup call"); | ||
604 | tty->ldisc.write_wakeup(tty); | ||
605 | } | ||
606 | |||
607 | wake_up_interruptible(&tty->write_wait); | ||
608 | } | ||
609 | |||
610 | /*** interface to common ***/ | ||
611 | |||
612 | void gigaset_if_init(struct cardstate *cs) | ||
613 | { | ||
614 | struct gigaset_driver *drv; | ||
615 | |||
616 | drv = cs->driver; | ||
617 | if (!drv->have_tty) | ||
618 | return; | ||
619 | |||
620 | tasklet_init(&cs->if_wake_tasklet, &if_wake, (unsigned long) cs); | ||
621 | tty_register_device(drv->tty, cs->minor_index, NULL); | ||
622 | } | ||
623 | |||
624 | void gigaset_if_free(struct cardstate *cs) | ||
625 | { | ||
626 | struct gigaset_driver *drv; | ||
627 | |||
628 | drv = cs->driver; | ||
629 | if (!drv->have_tty) | ||
630 | return; | ||
631 | |||
632 | tasklet_disable(&cs->if_wake_tasklet); | ||
633 | tasklet_kill(&cs->if_wake_tasklet); | ||
634 | tty_unregister_device(drv->tty, cs->minor_index); | ||
635 | } | ||
636 | |||
637 | void gigaset_if_receive(struct cardstate *cs, | ||
638 | unsigned char *buffer, size_t len) | ||
639 | { | ||
640 | unsigned long flags; | ||
641 | struct tty_struct *tty; | ||
642 | |||
643 | spin_lock_irqsave(&cs->lock, flags); | ||
644 | if ((tty = cs->tty) == NULL) | ||
645 | dbg(DEBUG_ANY, "receive on closed device"); | ||
646 | else { | ||
647 | tty_buffer_request_room(tty, len); | ||
648 | tty_insert_flip_string(tty, buffer, len); | ||
649 | tty_flip_buffer_push(tty); | ||
650 | } | ||
651 | spin_unlock_irqrestore(&cs->lock, flags); | ||
652 | } | ||
653 | EXPORT_SYMBOL_GPL(gigaset_if_receive); | ||
654 | |||
655 | /* gigaset_if_initdriver | ||
656 | * Initialize tty interface. | ||
657 | * parameters: | ||
658 | * drv Driver | ||
659 | * procname Name of the driver (e.g. for /proc/tty/drivers) | ||
660 | * devname Name of the device files (prefix without minor number) | ||
661 | * devfsname Devfs name of the device files without %d | ||
662 | */ | ||
663 | void gigaset_if_initdriver(struct gigaset_driver *drv, const char *procname, | ||
664 | const char *devname, const char *devfsname) | ||
665 | { | ||
666 | unsigned minors = drv->minors; | ||
667 | int ret; | ||
668 | struct tty_driver *tty; | ||
669 | |||
670 | drv->have_tty = 0; | ||
671 | |||
672 | if ((drv->tty = alloc_tty_driver(minors)) == NULL) | ||
673 | goto enomem; | ||
674 | tty = drv->tty; | ||
675 | |||
676 | tty->magic = TTY_DRIVER_MAGIC, | ||
677 | tty->major = GIG_MAJOR, | ||
678 | tty->type = TTY_DRIVER_TYPE_SERIAL, | ||
679 | tty->subtype = SERIAL_TYPE_NORMAL, | ||
680 | tty->flags = TTY_DRIVER_REAL_RAW | TTY_DRIVER_NO_DEVFS, | ||
681 | |||
682 | tty->driver_name = procname; | ||
683 | tty->name = devname; | ||
684 | tty->minor_start = drv->minor; | ||
685 | tty->num = drv->minors; | ||
686 | |||
687 | tty->owner = THIS_MODULE; | ||
688 | tty->devfs_name = devfsname; | ||
689 | |||
690 | tty->init_termios = tty_std_termios; //FIXME | ||
691 | tty->init_termios.c_cflag = B9600 | CS8 | CREAD | HUPCL | CLOCAL; //FIXME | ||
692 | tty_set_operations(tty, &if_ops); | ||
693 | |||
694 | ret = tty_register_driver(tty); | ||
695 | if (ret < 0) { | ||
696 | warn("failed to register tty driver (error %d)", ret); | ||
697 | goto error; | ||
698 | } | ||
699 | dbg(DEBUG_IF, "tty driver initialized"); | ||
700 | drv->have_tty = 1; | ||
701 | return; | ||
702 | |||
703 | enomem: | ||
704 | warn("could not allocate tty structures"); | ||
705 | error: | ||
706 | if (drv->tty) | ||
707 | put_tty_driver(drv->tty); | ||
708 | } | ||
709 | |||
710 | void gigaset_if_freedriver(struct gigaset_driver *drv) | ||
711 | { | ||
712 | if (!drv->have_tty) | ||
713 | return; | ||
714 | |||
715 | drv->have_tty = 0; | ||
716 | tty_unregister_driver(drv->tty); | ||
717 | put_tty_driver(drv->tty); | ||
718 | } | ||
diff --git a/drivers/isdn/gigaset/isocdata.c b/drivers/isdn/gigaset/isocdata.c new file mode 100644 index 000000000000..5744eb91b315 --- /dev/null +++ b/drivers/isdn/gigaset/isocdata.c | |||
@@ -0,0 +1,1009 @@ | |||
1 | /* | ||
2 | * Common data handling layer for bas_gigaset | ||
3 | * | ||
4 | * Copyright (c) 2005 by Tilman Schmidt <tilman@imap.cc>, | ||
5 | * Hansjoerg Lipp <hjlipp@web.de>. | ||
6 | * | ||
7 | * ===================================================================== | ||
8 | * This program is free software; you can redistribute it and/or | ||
9 | * modify it under the terms of the GNU General Public License as | ||
10 | * published by the Free Software Foundation; either version 2 of | ||
11 | * the License, or (at your option) any later version. | ||
12 | * ===================================================================== | ||
13 | * ToDo: ... | ||
14 | * ===================================================================== | ||
15 | * Version: $Id: isocdata.c,v 1.2.2.5 2005/11/13 23:05:19 hjlipp Exp $ | ||
16 | * ===================================================================== | ||
17 | */ | ||
18 | |||
19 | #include "gigaset.h" | ||
20 | #include <linux/crc-ccitt.h> | ||
21 | |||
22 | /* access methods for isowbuf_t */ | ||
23 | /* ============================ */ | ||
24 | |||
25 | /* initialize buffer structure | ||
26 | */ | ||
27 | void gigaset_isowbuf_init(struct isowbuf_t *iwb, unsigned char idle) | ||
28 | { | ||
29 | atomic_set(&iwb->read, 0); | ||
30 | atomic_set(&iwb->nextread, 0); | ||
31 | atomic_set(&iwb->write, 0); | ||
32 | atomic_set(&iwb->writesem, 1); | ||
33 | iwb->wbits = 0; | ||
34 | iwb->idle = idle; | ||
35 | memset(iwb->data + BAS_OUTBUFSIZE, idle, BAS_OUTBUFPAD); | ||
36 | } | ||
37 | |||
38 | /* compute number of bytes which can be appended to buffer | ||
39 | * so that there is still room to append a maximum frame of flags | ||
40 | */ | ||
41 | static inline int isowbuf_freebytes(struct isowbuf_t *iwb) | ||
42 | { | ||
43 | int read, write, freebytes; | ||
44 | |||
45 | read = atomic_read(&iwb->read); | ||
46 | write = atomic_read(&iwb->write); | ||
47 | if ((freebytes = read - write) > 0) { | ||
48 | /* no wraparound: need padding space within regular area */ | ||
49 | return freebytes - BAS_OUTBUFPAD; | ||
50 | } else if (read < BAS_OUTBUFPAD) { | ||
51 | /* wraparound: can use space up to end of regular area */ | ||
52 | return BAS_OUTBUFSIZE - write; | ||
53 | } else { | ||
54 | /* following the wraparound yields more space */ | ||
55 | return freebytes + BAS_OUTBUFSIZE - BAS_OUTBUFPAD; | ||
56 | } | ||
57 | } | ||
58 | |||
59 | /* compare two offsets within the buffer | ||
60 | * The buffer is seen as circular, with the read position as start | ||
61 | * returns -1/0/1 if position a </=/> position b without crossing 'read' | ||
62 | */ | ||
63 | static inline int isowbuf_poscmp(struct isowbuf_t *iwb, int a, int b) | ||
64 | { | ||
65 | int read; | ||
66 | if (a == b) | ||
67 | return 0; | ||
68 | read = atomic_read(&iwb->read); | ||
69 | if (a < b) { | ||
70 | if (a < read && read <= b) | ||
71 | return +1; | ||
72 | else | ||
73 | return -1; | ||
74 | } else { | ||
75 | if (b < read && read <= a) | ||
76 | return -1; | ||
77 | else | ||
78 | return +1; | ||
79 | } | ||
80 | } | ||
81 | |||
82 | /* start writing | ||
83 | * acquire the write semaphore | ||
84 | * return true if acquired, false if busy | ||
85 | */ | ||
86 | static inline int isowbuf_startwrite(struct isowbuf_t *iwb) | ||
87 | { | ||
88 | if (!atomic_dec_and_test(&iwb->writesem)) { | ||
89 | atomic_inc(&iwb->writesem); | ||
90 | dbg(DEBUG_ISO, | ||
91 | "%s: couldn't acquire iso write semaphore", __func__); | ||
92 | return 0; | ||
93 | } | ||
94 | #ifdef CONFIG_GIGASET_DEBUG | ||
95 | dbg(DEBUG_ISO, | ||
96 | "%s: acquired iso write semaphore, data[write]=%02x, nbits=%d", | ||
97 | __func__, iwb->data[atomic_read(&iwb->write)], iwb->wbits); | ||
98 | #endif | ||
99 | return 1; | ||
100 | } | ||
101 | |||
102 | /* finish writing | ||
103 | * release the write semaphore and update the maximum buffer fill level | ||
104 | * returns the current write position | ||
105 | */ | ||
106 | static inline int isowbuf_donewrite(struct isowbuf_t *iwb) | ||
107 | { | ||
108 | int write = atomic_read(&iwb->write); | ||
109 | atomic_inc(&iwb->writesem); | ||
110 | return write; | ||
111 | } | ||
112 | |||
113 | /* append bits to buffer without any checks | ||
114 | * - data contains bits to append, starting at LSB | ||
115 | * - nbits is number of bits to append (0..24) | ||
116 | * must be called with the write semaphore held | ||
117 | * If more than nbits bits are set in data, the extraneous bits are set in the | ||
118 | * buffer too, but the write position is only advanced by nbits. | ||
119 | */ | ||
120 | static inline void isowbuf_putbits(struct isowbuf_t *iwb, u32 data, int nbits) | ||
121 | { | ||
122 | int write = atomic_read(&iwb->write); | ||
123 | data <<= iwb->wbits; | ||
124 | data |= iwb->data[write]; | ||
125 | nbits += iwb->wbits; | ||
126 | while (nbits >= 8) { | ||
127 | iwb->data[write++] = data & 0xff; | ||
128 | write %= BAS_OUTBUFSIZE; | ||
129 | data >>= 8; | ||
130 | nbits -= 8; | ||
131 | } | ||
132 | iwb->wbits = nbits; | ||
133 | iwb->data[write] = data & 0xff; | ||
134 | atomic_set(&iwb->write, write); | ||
135 | } | ||
136 | |||
137 | /* put final flag on HDLC bitstream | ||
138 | * also sets the idle fill byte to the correspondingly shifted flag pattern | ||
139 | * must be called with the write semaphore held | ||
140 | */ | ||
141 | static inline void isowbuf_putflag(struct isowbuf_t *iwb) | ||
142 | { | ||
143 | int write; | ||
144 | |||
145 | /* add two flags, thus reliably covering one byte */ | ||
146 | isowbuf_putbits(iwb, 0x7e7e, 8); | ||
147 | /* recover the idle flag byte */ | ||
148 | write = atomic_read(&iwb->write); | ||
149 | iwb->idle = iwb->data[write]; | ||
150 | dbg(DEBUG_ISO, "idle fill byte %02x", iwb->idle); | ||
151 | /* mask extraneous bits in buffer */ | ||
152 | iwb->data[write] &= (1 << iwb->wbits) - 1; | ||
153 | } | ||
154 | |||
155 | /* retrieve a block of bytes for sending | ||
156 | * The requested number of bytes is provided as a contiguous block. | ||
157 | * If necessary, the frame is filled to the requested number of bytes | ||
158 | * with the idle value. | ||
159 | * returns offset to frame, < 0 on busy or error | ||
160 | */ | ||
161 | int gigaset_isowbuf_getbytes(struct isowbuf_t *iwb, int size) | ||
162 | { | ||
163 | int read, write, limit, src, dst; | ||
164 | unsigned char pbyte; | ||
165 | |||
166 | read = atomic_read(&iwb->nextread); | ||
167 | write = atomic_read(&iwb->write); | ||
168 | if (likely(read == write)) { | ||
169 | //dbg(DEBUG_STREAM, "%s: send buffer empty", __func__); | ||
170 | /* return idle frame */ | ||
171 | return read < BAS_OUTBUFPAD ? | ||
172 | BAS_OUTBUFSIZE : read - BAS_OUTBUFPAD; | ||
173 | } | ||
174 | |||
175 | limit = read + size; | ||
176 | dbg(DEBUG_STREAM, | ||
177 | "%s: read=%d write=%d limit=%d", __func__, read, write, limit); | ||
178 | #ifdef CONFIG_GIGASET_DEBUG | ||
179 | if (unlikely(size < 0 || size > BAS_OUTBUFPAD)) { | ||
180 | err("invalid size %d", size); | ||
181 | return -EINVAL; | ||
182 | } | ||
183 | src = atomic_read(&iwb->read); | ||
184 | if (unlikely(limit > BAS_OUTBUFSIZE + BAS_OUTBUFPAD || | ||
185 | (read < src && limit >= src))) { | ||
186 | err("isoc write buffer frame reservation violated"); | ||
187 | return -EFAULT; | ||
188 | } | ||
189 | #endif | ||
190 | |||
191 | if (read < write) { | ||
192 | /* no wraparound in valid data */ | ||
193 | if (limit >= write) { | ||
194 | /* append idle frame */ | ||
195 | if (!isowbuf_startwrite(iwb)) | ||
196 | return -EBUSY; | ||
197 | /* write position could have changed */ | ||
198 | if (limit >= (write = atomic_read(&iwb->write))) { | ||
199 | pbyte = iwb->data[write]; /* save partial byte */ | ||
200 | limit = write + BAS_OUTBUFPAD; | ||
201 | dbg(DEBUG_STREAM, | ||
202 | "%s: filling %d->%d with %02x", | ||
203 | __func__, write, limit, iwb->idle); | ||
204 | if (write + BAS_OUTBUFPAD < BAS_OUTBUFSIZE) | ||
205 | memset(iwb->data + write, iwb->idle, | ||
206 | BAS_OUTBUFPAD); | ||
207 | else { | ||
208 | /* wraparound, fill entire pad area */ | ||
209 | memset(iwb->data + write, iwb->idle, | ||
210 | BAS_OUTBUFSIZE + BAS_OUTBUFPAD | ||
211 | - write); | ||
212 | limit = 0; | ||
213 | } | ||
214 | dbg(DEBUG_STREAM, "%s: restoring %02x at %d", | ||
215 | __func__, pbyte, limit); | ||
216 | iwb->data[limit] = pbyte; /* restore partial byte */ | ||
217 | atomic_set(&iwb->write, limit); | ||
218 | } | ||
219 | isowbuf_donewrite(iwb); | ||
220 | } | ||
221 | } else { | ||
222 | /* valid data wraparound */ | ||
223 | if (limit >= BAS_OUTBUFSIZE) { | ||
224 | /* copy wrapped part into pad area */ | ||
225 | src = 0; | ||
226 | dst = BAS_OUTBUFSIZE; | ||
227 | while (dst < limit && src < write) | ||
228 | iwb->data[dst++] = iwb->data[src++]; | ||
229 | if (dst <= limit) { | ||
230 | /* fill pad area with idle byte */ | ||
231 | memset(iwb->data + dst, iwb->idle, | ||
232 | BAS_OUTBUFSIZE + BAS_OUTBUFPAD - dst); | ||
233 | } | ||
234 | limit = src; | ||
235 | } | ||
236 | } | ||
237 | atomic_set(&iwb->nextread, limit); | ||
238 | return read; | ||
239 | } | ||
240 | |||
241 | /* dump_bytes | ||
242 | * write hex bytes to syslog for debugging | ||
243 | */ | ||
244 | static inline void dump_bytes(enum debuglevel level, const char *tag, | ||
245 | unsigned char *bytes, int count) | ||
246 | { | ||
247 | #ifdef CONFIG_GIGASET_DEBUG | ||
248 | unsigned char c; | ||
249 | static char dbgline[3 * 32 + 1]; | ||
250 | static const char hexdigit[] = "0123456789abcdef"; | ||
251 | int i = 0; | ||
252 | IFNULLRET(tag); | ||
253 | IFNULLRET(bytes); | ||
254 | while (count-- > 0) { | ||
255 | if (i > sizeof(dbgline) - 4) { | ||
256 | dbgline[i] = '\0'; | ||
257 | dbg(level, "%s:%s", tag, dbgline); | ||
258 | i = 0; | ||
259 | } | ||
260 | c = *bytes++; | ||
261 | dbgline[i] = (i && !(i % 12)) ? '-' : ' '; | ||
262 | i++; | ||
263 | dbgline[i++] = hexdigit[(c >> 4) & 0x0f]; | ||
264 | dbgline[i++] = hexdigit[c & 0x0f]; | ||
265 | } | ||
266 | dbgline[i] = '\0'; | ||
267 | dbg(level, "%s:%s", tag, dbgline); | ||
268 | #endif | ||
269 | } | ||
270 | |||
271 | /*============================================================================*/ | ||
272 | |||
273 | /* bytewise HDLC bitstuffing via table lookup | ||
274 | * lookup table: 5 subtables for 0..4 preceding consecutive '1' bits | ||
275 | * index: 256*(number of preceding '1' bits) + (next byte to stuff) | ||
276 | * value: bit 9.. 0 = result bits | ||
277 | * bit 12..10 = number of trailing '1' bits in result | ||
278 | * bit 14..13 = number of bits added by stuffing | ||
279 | */ | ||
280 | static u16 stufftab[5 * 256] = { | ||
281 | // previous 1s = 0: | ||
282 | 0x0000, 0x0001, 0x0002, 0x0003, 0x0004, 0x0005, 0x0006, 0x0007, 0x0008, 0x0009, 0x000a, 0x000b, 0x000c, 0x000d, 0x000e, 0x000f, | ||
283 | 0x0010, 0x0011, 0x0012, 0x0013, 0x0014, 0x0015, 0x0016, 0x0017, 0x0018, 0x0019, 0x001a, 0x001b, 0x001c, 0x001d, 0x001e, 0x201f, | ||
284 | 0x0020, 0x0021, 0x0022, 0x0023, 0x0024, 0x0025, 0x0026, 0x0027, 0x0028, 0x0029, 0x002a, 0x002b, 0x002c, 0x002d, 0x002e, 0x002f, | ||
285 | 0x0030, 0x0031, 0x0032, 0x0033, 0x0034, 0x0035, 0x0036, 0x0037, 0x0038, 0x0039, 0x003a, 0x003b, 0x003c, 0x003d, 0x203e, 0x205f, | ||
286 | 0x0040, 0x0041, 0x0042, 0x0043, 0x0044, 0x0045, 0x0046, 0x0047, 0x0048, 0x0049, 0x004a, 0x004b, 0x004c, 0x004d, 0x004e, 0x004f, | ||
287 | 0x0050, 0x0051, 0x0052, 0x0053, 0x0054, 0x0055, 0x0056, 0x0057, 0x0058, 0x0059, 0x005a, 0x005b, 0x005c, 0x005d, 0x005e, 0x209f, | ||
288 | 0x0060, 0x0061, 0x0062, 0x0063, 0x0064, 0x0065, 0x0066, 0x0067, 0x0068, 0x0069, 0x006a, 0x006b, 0x006c, 0x006d, 0x006e, 0x006f, | ||
289 | 0x0070, 0x0071, 0x0072, 0x0073, 0x0074, 0x0075, 0x0076, 0x0077, 0x0078, 0x0079, 0x007a, 0x007b, 0x207c, 0x207d, 0x20be, 0x20df, | ||
290 | 0x0480, 0x0481, 0x0482, 0x0483, 0x0484, 0x0485, 0x0486, 0x0487, 0x0488, 0x0489, 0x048a, 0x048b, 0x048c, 0x048d, 0x048e, 0x048f, | ||
291 | 0x0490, 0x0491, 0x0492, 0x0493, 0x0494, 0x0495, 0x0496, 0x0497, 0x0498, 0x0499, 0x049a, 0x049b, 0x049c, 0x049d, 0x049e, 0x251f, | ||
292 | 0x04a0, 0x04a1, 0x04a2, 0x04a3, 0x04a4, 0x04a5, 0x04a6, 0x04a7, 0x04a8, 0x04a9, 0x04aa, 0x04ab, 0x04ac, 0x04ad, 0x04ae, 0x04af, | ||
293 | 0x04b0, 0x04b1, 0x04b2, 0x04b3, 0x04b4, 0x04b5, 0x04b6, 0x04b7, 0x04b8, 0x04b9, 0x04ba, 0x04bb, 0x04bc, 0x04bd, 0x253e, 0x255f, | ||
294 | 0x08c0, 0x08c1, 0x08c2, 0x08c3, 0x08c4, 0x08c5, 0x08c6, 0x08c7, 0x08c8, 0x08c9, 0x08ca, 0x08cb, 0x08cc, 0x08cd, 0x08ce, 0x08cf, | ||
295 | 0x08d0, 0x08d1, 0x08d2, 0x08d3, 0x08d4, 0x08d5, 0x08d6, 0x08d7, 0x08d8, 0x08d9, 0x08da, 0x08db, 0x08dc, 0x08dd, 0x08de, 0x299f, | ||
296 | 0x0ce0, 0x0ce1, 0x0ce2, 0x0ce3, 0x0ce4, 0x0ce5, 0x0ce6, 0x0ce7, 0x0ce8, 0x0ce9, 0x0cea, 0x0ceb, 0x0cec, 0x0ced, 0x0cee, 0x0cef, | ||
297 | 0x10f0, 0x10f1, 0x10f2, 0x10f3, 0x10f4, 0x10f5, 0x10f6, 0x10f7, 0x20f8, 0x20f9, 0x20fa, 0x20fb, 0x257c, 0x257d, 0x29be, 0x2ddf, | ||
298 | |||
299 | // previous 1s = 1: | ||
300 | 0x0000, 0x0001, 0x0002, 0x0003, 0x0004, 0x0005, 0x0006, 0x0007, 0x0008, 0x0009, 0x000a, 0x000b, 0x000c, 0x000d, 0x000e, 0x200f, | ||
301 | 0x0010, 0x0011, 0x0012, 0x0013, 0x0014, 0x0015, 0x0016, 0x0017, 0x0018, 0x0019, 0x001a, 0x001b, 0x001c, 0x001d, 0x001e, 0x202f, | ||
302 | 0x0020, 0x0021, 0x0022, 0x0023, 0x0024, 0x0025, 0x0026, 0x0027, 0x0028, 0x0029, 0x002a, 0x002b, 0x002c, 0x002d, 0x002e, 0x204f, | ||
303 | 0x0030, 0x0031, 0x0032, 0x0033, 0x0034, 0x0035, 0x0036, 0x0037, 0x0038, 0x0039, 0x003a, 0x003b, 0x003c, 0x003d, 0x203e, 0x206f, | ||
304 | 0x0040, 0x0041, 0x0042, 0x0043, 0x0044, 0x0045, 0x0046, 0x0047, 0x0048, 0x0049, 0x004a, 0x004b, 0x004c, 0x004d, 0x004e, 0x208f, | ||
305 | 0x0050, 0x0051, 0x0052, 0x0053, 0x0054, 0x0055, 0x0056, 0x0057, 0x0058, 0x0059, 0x005a, 0x005b, 0x005c, 0x005d, 0x005e, 0x20af, | ||
306 | 0x0060, 0x0061, 0x0062, 0x0063, 0x0064, 0x0065, 0x0066, 0x0067, 0x0068, 0x0069, 0x006a, 0x006b, 0x006c, 0x006d, 0x006e, 0x20cf, | ||
307 | 0x0070, 0x0071, 0x0072, 0x0073, 0x0074, 0x0075, 0x0076, 0x0077, 0x0078, 0x0079, 0x007a, 0x007b, 0x207c, 0x207d, 0x20be, 0x20ef, | ||
308 | 0x0480, 0x0481, 0x0482, 0x0483, 0x0484, 0x0485, 0x0486, 0x0487, 0x0488, 0x0489, 0x048a, 0x048b, 0x048c, 0x048d, 0x048e, 0x250f, | ||
309 | 0x0490, 0x0491, 0x0492, 0x0493, 0x0494, 0x0495, 0x0496, 0x0497, 0x0498, 0x0499, 0x049a, 0x049b, 0x049c, 0x049d, 0x049e, 0x252f, | ||
310 | 0x04a0, 0x04a1, 0x04a2, 0x04a3, 0x04a4, 0x04a5, 0x04a6, 0x04a7, 0x04a8, 0x04a9, 0x04aa, 0x04ab, 0x04ac, 0x04ad, 0x04ae, 0x254f, | ||
311 | 0x04b0, 0x04b1, 0x04b2, 0x04b3, 0x04b4, 0x04b5, 0x04b6, 0x04b7, 0x04b8, 0x04b9, 0x04ba, 0x04bb, 0x04bc, 0x04bd, 0x253e, 0x256f, | ||
312 | 0x08c0, 0x08c1, 0x08c2, 0x08c3, 0x08c4, 0x08c5, 0x08c6, 0x08c7, 0x08c8, 0x08c9, 0x08ca, 0x08cb, 0x08cc, 0x08cd, 0x08ce, 0x298f, | ||
313 | 0x08d0, 0x08d1, 0x08d2, 0x08d3, 0x08d4, 0x08d5, 0x08d6, 0x08d7, 0x08d8, 0x08d9, 0x08da, 0x08db, 0x08dc, 0x08dd, 0x08de, 0x29af, | ||
314 | 0x0ce0, 0x0ce1, 0x0ce2, 0x0ce3, 0x0ce4, 0x0ce5, 0x0ce6, 0x0ce7, 0x0ce8, 0x0ce9, 0x0cea, 0x0ceb, 0x0cec, 0x0ced, 0x0cee, 0x2dcf, | ||
315 | 0x10f0, 0x10f1, 0x10f2, 0x10f3, 0x10f4, 0x10f5, 0x10f6, 0x10f7, 0x20f8, 0x20f9, 0x20fa, 0x20fb, 0x257c, 0x257d, 0x29be, 0x31ef, | ||
316 | |||
317 | // previous 1s = 2: | ||
318 | 0x0000, 0x0001, 0x0002, 0x0003, 0x0004, 0x0005, 0x0006, 0x2007, 0x0008, 0x0009, 0x000a, 0x000b, 0x000c, 0x000d, 0x000e, 0x2017, | ||
319 | 0x0010, 0x0011, 0x0012, 0x0013, 0x0014, 0x0015, 0x0016, 0x2027, 0x0018, 0x0019, 0x001a, 0x001b, 0x001c, 0x001d, 0x001e, 0x2037, | ||
320 | 0x0020, 0x0021, 0x0022, 0x0023, 0x0024, 0x0025, 0x0026, 0x2047, 0x0028, 0x0029, 0x002a, 0x002b, 0x002c, 0x002d, 0x002e, 0x2057, | ||
321 | 0x0030, 0x0031, 0x0032, 0x0033, 0x0034, 0x0035, 0x0036, 0x2067, 0x0038, 0x0039, 0x003a, 0x003b, 0x003c, 0x003d, 0x203e, 0x2077, | ||
322 | 0x0040, 0x0041, 0x0042, 0x0043, 0x0044, 0x0045, 0x0046, 0x2087, 0x0048, 0x0049, 0x004a, 0x004b, 0x004c, 0x004d, 0x004e, 0x2097, | ||
323 | 0x0050, 0x0051, 0x0052, 0x0053, 0x0054, 0x0055, 0x0056, 0x20a7, 0x0058, 0x0059, 0x005a, 0x005b, 0x005c, 0x005d, 0x005e, 0x20b7, | ||
324 | 0x0060, 0x0061, 0x0062, 0x0063, 0x0064, 0x0065, 0x0066, 0x20c7, 0x0068, 0x0069, 0x006a, 0x006b, 0x006c, 0x006d, 0x006e, 0x20d7, | ||
325 | 0x0070, 0x0071, 0x0072, 0x0073, 0x0074, 0x0075, 0x0076, 0x20e7, 0x0078, 0x0079, 0x007a, 0x007b, 0x207c, 0x207d, 0x20be, 0x20f7, | ||
326 | 0x0480, 0x0481, 0x0482, 0x0483, 0x0484, 0x0485, 0x0486, 0x2507, 0x0488, 0x0489, 0x048a, 0x048b, 0x048c, 0x048d, 0x048e, 0x2517, | ||
327 | 0x0490, 0x0491, 0x0492, 0x0493, 0x0494, 0x0495, 0x0496, 0x2527, 0x0498, 0x0499, 0x049a, 0x049b, 0x049c, 0x049d, 0x049e, 0x2537, | ||
328 | 0x04a0, 0x04a1, 0x04a2, 0x04a3, 0x04a4, 0x04a5, 0x04a6, 0x2547, 0x04a8, 0x04a9, 0x04aa, 0x04ab, 0x04ac, 0x04ad, 0x04ae, 0x2557, | ||
329 | 0x04b0, 0x04b1, 0x04b2, 0x04b3, 0x04b4, 0x04b5, 0x04b6, 0x2567, 0x04b8, 0x04b9, 0x04ba, 0x04bb, 0x04bc, 0x04bd, 0x253e, 0x2577, | ||
330 | 0x08c0, 0x08c1, 0x08c2, 0x08c3, 0x08c4, 0x08c5, 0x08c6, 0x2987, 0x08c8, 0x08c9, 0x08ca, 0x08cb, 0x08cc, 0x08cd, 0x08ce, 0x2997, | ||
331 | 0x08d0, 0x08d1, 0x08d2, 0x08d3, 0x08d4, 0x08d5, 0x08d6, 0x29a7, 0x08d8, 0x08d9, 0x08da, 0x08db, 0x08dc, 0x08dd, 0x08de, 0x29b7, | ||
332 | 0x0ce0, 0x0ce1, 0x0ce2, 0x0ce3, 0x0ce4, 0x0ce5, 0x0ce6, 0x2dc7, 0x0ce8, 0x0ce9, 0x0cea, 0x0ceb, 0x0cec, 0x0ced, 0x0cee, 0x2dd7, | ||
333 | 0x10f0, 0x10f1, 0x10f2, 0x10f3, 0x10f4, 0x10f5, 0x10f6, 0x31e7, 0x20f8, 0x20f9, 0x20fa, 0x20fb, 0x257c, 0x257d, 0x29be, 0x41f7, | ||
334 | |||
335 | // previous 1s = 3: | ||
336 | 0x0000, 0x0001, 0x0002, 0x2003, 0x0004, 0x0005, 0x0006, 0x200b, 0x0008, 0x0009, 0x000a, 0x2013, 0x000c, 0x000d, 0x000e, 0x201b, | ||
337 | 0x0010, 0x0011, 0x0012, 0x2023, 0x0014, 0x0015, 0x0016, 0x202b, 0x0018, 0x0019, 0x001a, 0x2033, 0x001c, 0x001d, 0x001e, 0x203b, | ||
338 | 0x0020, 0x0021, 0x0022, 0x2043, 0x0024, 0x0025, 0x0026, 0x204b, 0x0028, 0x0029, 0x002a, 0x2053, 0x002c, 0x002d, 0x002e, 0x205b, | ||
339 | 0x0030, 0x0031, 0x0032, 0x2063, 0x0034, 0x0035, 0x0036, 0x206b, 0x0038, 0x0039, 0x003a, 0x2073, 0x003c, 0x003d, 0x203e, 0x207b, | ||
340 | 0x0040, 0x0041, 0x0042, 0x2083, 0x0044, 0x0045, 0x0046, 0x208b, 0x0048, 0x0049, 0x004a, 0x2093, 0x004c, 0x004d, 0x004e, 0x209b, | ||
341 | 0x0050, 0x0051, 0x0052, 0x20a3, 0x0054, 0x0055, 0x0056, 0x20ab, 0x0058, 0x0059, 0x005a, 0x20b3, 0x005c, 0x005d, 0x005e, 0x20bb, | ||
342 | 0x0060, 0x0061, 0x0062, 0x20c3, 0x0064, 0x0065, 0x0066, 0x20cb, 0x0068, 0x0069, 0x006a, 0x20d3, 0x006c, 0x006d, 0x006e, 0x20db, | ||
343 | 0x0070, 0x0071, 0x0072, 0x20e3, 0x0074, 0x0075, 0x0076, 0x20eb, 0x0078, 0x0079, 0x007a, 0x20f3, 0x207c, 0x207d, 0x20be, 0x40fb, | ||
344 | 0x0480, 0x0481, 0x0482, 0x2503, 0x0484, 0x0485, 0x0486, 0x250b, 0x0488, 0x0489, 0x048a, 0x2513, 0x048c, 0x048d, 0x048e, 0x251b, | ||
345 | 0x0490, 0x0491, 0x0492, 0x2523, 0x0494, 0x0495, 0x0496, 0x252b, 0x0498, 0x0499, 0x049a, 0x2533, 0x049c, 0x049d, 0x049e, 0x253b, | ||
346 | 0x04a0, 0x04a1, 0x04a2, 0x2543, 0x04a4, 0x04a5, 0x04a6, 0x254b, 0x04a8, 0x04a9, 0x04aa, 0x2553, 0x04ac, 0x04ad, 0x04ae, 0x255b, | ||
347 | 0x04b0, 0x04b1, 0x04b2, 0x2563, 0x04b4, 0x04b5, 0x04b6, 0x256b, 0x04b8, 0x04b9, 0x04ba, 0x2573, 0x04bc, 0x04bd, 0x253e, 0x257b, | ||
348 | 0x08c0, 0x08c1, 0x08c2, 0x2983, 0x08c4, 0x08c5, 0x08c6, 0x298b, 0x08c8, 0x08c9, 0x08ca, 0x2993, 0x08cc, 0x08cd, 0x08ce, 0x299b, | ||
349 | 0x08d0, 0x08d1, 0x08d2, 0x29a3, 0x08d4, 0x08d5, 0x08d6, 0x29ab, 0x08d8, 0x08d9, 0x08da, 0x29b3, 0x08dc, 0x08dd, 0x08de, 0x29bb, | ||
350 | 0x0ce0, 0x0ce1, 0x0ce2, 0x2dc3, 0x0ce4, 0x0ce5, 0x0ce6, 0x2dcb, 0x0ce8, 0x0ce9, 0x0cea, 0x2dd3, 0x0cec, 0x0ced, 0x0cee, 0x2ddb, | ||
351 | 0x10f0, 0x10f1, 0x10f2, 0x31e3, 0x10f4, 0x10f5, 0x10f6, 0x31eb, 0x20f8, 0x20f9, 0x20fa, 0x41f3, 0x257c, 0x257d, 0x29be, 0x46fb, | ||
352 | |||
353 | // previous 1s = 4: | ||
354 | 0x0000, 0x2001, 0x0002, 0x2005, 0x0004, 0x2009, 0x0006, 0x200d, 0x0008, 0x2011, 0x000a, 0x2015, 0x000c, 0x2019, 0x000e, 0x201d, | ||
355 | 0x0010, 0x2021, 0x0012, 0x2025, 0x0014, 0x2029, 0x0016, 0x202d, 0x0018, 0x2031, 0x001a, 0x2035, 0x001c, 0x2039, 0x001e, 0x203d, | ||
356 | 0x0020, 0x2041, 0x0022, 0x2045, 0x0024, 0x2049, 0x0026, 0x204d, 0x0028, 0x2051, 0x002a, 0x2055, 0x002c, 0x2059, 0x002e, 0x205d, | ||
357 | 0x0030, 0x2061, 0x0032, 0x2065, 0x0034, 0x2069, 0x0036, 0x206d, 0x0038, 0x2071, 0x003a, 0x2075, 0x003c, 0x2079, 0x203e, 0x407d, | ||
358 | 0x0040, 0x2081, 0x0042, 0x2085, 0x0044, 0x2089, 0x0046, 0x208d, 0x0048, 0x2091, 0x004a, 0x2095, 0x004c, 0x2099, 0x004e, 0x209d, | ||
359 | 0x0050, 0x20a1, 0x0052, 0x20a5, 0x0054, 0x20a9, 0x0056, 0x20ad, 0x0058, 0x20b1, 0x005a, 0x20b5, 0x005c, 0x20b9, 0x005e, 0x20bd, | ||
360 | 0x0060, 0x20c1, 0x0062, 0x20c5, 0x0064, 0x20c9, 0x0066, 0x20cd, 0x0068, 0x20d1, 0x006a, 0x20d5, 0x006c, 0x20d9, 0x006e, 0x20dd, | ||
361 | 0x0070, 0x20e1, 0x0072, 0x20e5, 0x0074, 0x20e9, 0x0076, 0x20ed, 0x0078, 0x20f1, 0x007a, 0x20f5, 0x207c, 0x40f9, 0x20be, 0x417d, | ||
362 | 0x0480, 0x2501, 0x0482, 0x2505, 0x0484, 0x2509, 0x0486, 0x250d, 0x0488, 0x2511, 0x048a, 0x2515, 0x048c, 0x2519, 0x048e, 0x251d, | ||
363 | 0x0490, 0x2521, 0x0492, 0x2525, 0x0494, 0x2529, 0x0496, 0x252d, 0x0498, 0x2531, 0x049a, 0x2535, 0x049c, 0x2539, 0x049e, 0x253d, | ||
364 | 0x04a0, 0x2541, 0x04a2, 0x2545, 0x04a4, 0x2549, 0x04a6, 0x254d, 0x04a8, 0x2551, 0x04aa, 0x2555, 0x04ac, 0x2559, 0x04ae, 0x255d, | ||
365 | 0x04b0, 0x2561, 0x04b2, 0x2565, 0x04b4, 0x2569, 0x04b6, 0x256d, 0x04b8, 0x2571, 0x04ba, 0x2575, 0x04bc, 0x2579, 0x253e, 0x467d, | ||
366 | 0x08c0, 0x2981, 0x08c2, 0x2985, 0x08c4, 0x2989, 0x08c6, 0x298d, 0x08c8, 0x2991, 0x08ca, 0x2995, 0x08cc, 0x2999, 0x08ce, 0x299d, | ||
367 | 0x08d0, 0x29a1, 0x08d2, 0x29a5, 0x08d4, 0x29a9, 0x08d6, 0x29ad, 0x08d8, 0x29b1, 0x08da, 0x29b5, 0x08dc, 0x29b9, 0x08de, 0x29bd, | ||
368 | 0x0ce0, 0x2dc1, 0x0ce2, 0x2dc5, 0x0ce4, 0x2dc9, 0x0ce6, 0x2dcd, 0x0ce8, 0x2dd1, 0x0cea, 0x2dd5, 0x0cec, 0x2dd9, 0x0cee, 0x2ddd, | ||
369 | 0x10f0, 0x31e1, 0x10f2, 0x31e5, 0x10f4, 0x31e9, 0x10f6, 0x31ed, 0x20f8, 0x41f1, 0x20fa, 0x41f5, 0x257c, 0x46f9, 0x29be, 0x4b7d | ||
370 | }; | ||
371 | |||
372 | /* hdlc_bitstuff_byte | ||
373 | * perform HDLC bitstuffing for one input byte (8 bits, LSB first) | ||
374 | * parameters: | ||
375 | * cin input byte | ||
376 | * ones number of trailing '1' bits in result before this step | ||
377 | * iwb pointer to output buffer structure (write semaphore must be held) | ||
378 | * return value: | ||
379 | * number of trailing '1' bits in result after this step | ||
380 | */ | ||
381 | |||
382 | static inline int hdlc_bitstuff_byte(struct isowbuf_t *iwb, unsigned char cin, | ||
383 | int ones) | ||
384 | { | ||
385 | u16 stuff; | ||
386 | int shiftinc, newones; | ||
387 | |||
388 | /* get stuffing information for input byte | ||
389 | * value: bit 9.. 0 = result bits | ||
390 | * bit 12..10 = number of trailing '1' bits in result | ||
391 | * bit 14..13 = number of bits added by stuffing | ||
392 | */ | ||
393 | stuff = stufftab[256 * ones + cin]; | ||
394 | shiftinc = (stuff >> 13) & 3; | ||
395 | newones = (stuff >> 10) & 7; | ||
396 | stuff &= 0x3ff; | ||
397 | |||
398 | /* append stuffed byte to output stream */ | ||
399 | isowbuf_putbits(iwb, stuff, 8 + shiftinc); | ||
400 | return newones; | ||
401 | } | ||
402 | |||
403 | /* hdlc_buildframe | ||
404 | * Perform HDLC framing with bitstuffing on a byte buffer | ||
405 | * The input buffer is regarded as a sequence of bits, starting with the least | ||
406 | * significant bit of the first byte and ending with the most significant bit | ||
407 | * of the last byte. A 16 bit FCS is appended as defined by RFC 1662. | ||
408 | * Whenever five consecutive '1' bits appear in the resulting bit sequence, a | ||
409 | * '0' bit is inserted after them. | ||
410 | * The resulting bit string and a closing flag pattern (PPP_FLAG, '01111110') | ||
411 | * are appended to the output buffer starting at the given bit position, which | ||
412 | * is assumed to already contain a leading flag. | ||
413 | * The output buffer must have sufficient length; count + count/5 + 6 bytes | ||
414 | * starting at *out are safe and are verified to be present. | ||
415 | * parameters: | ||
416 | * in input buffer | ||
417 | * count number of bytes in input buffer | ||
418 | * iwb pointer to output buffer structure (write semaphore must be held) | ||
419 | * return value: | ||
420 | * position of end of packet in output buffer on success, | ||
421 | * -EAGAIN if write semaphore busy or buffer full | ||
422 | */ | ||
423 | |||
424 | static inline int hdlc_buildframe(struct isowbuf_t *iwb, | ||
425 | unsigned char *in, int count) | ||
426 | { | ||
427 | int ones; | ||
428 | u16 fcs; | ||
429 | int end; | ||
430 | unsigned char c; | ||
431 | |||
432 | if (isowbuf_freebytes(iwb) < count + count / 5 + 6 || | ||
433 | !isowbuf_startwrite(iwb)) { | ||
434 | dbg(DEBUG_ISO, "%s: %d bytes free -> -EAGAIN", | ||
435 | __func__, isowbuf_freebytes(iwb)); | ||
436 | return -EAGAIN; | ||
437 | } | ||
438 | |||
439 | dump_bytes(DEBUG_STREAM, "snd data", in, count); | ||
440 | |||
441 | /* bitstuff and checksum input data */ | ||
442 | fcs = PPP_INITFCS; | ||
443 | ones = 0; | ||
444 | while (count-- > 0) { | ||
445 | c = *in++; | ||
446 | ones = hdlc_bitstuff_byte(iwb, c, ones); | ||
447 | fcs = crc_ccitt_byte(fcs, c); | ||
448 | } | ||
449 | |||
450 | /* bitstuff and append FCS (complemented, least significant byte first) */ | ||
451 | fcs ^= 0xffff; | ||
452 | ones = hdlc_bitstuff_byte(iwb, fcs & 0x00ff, ones); | ||
453 | ones = hdlc_bitstuff_byte(iwb, (fcs >> 8) & 0x00ff, ones); | ||
454 | |||
455 | /* put closing flag and repeat byte for flag idle */ | ||
456 | isowbuf_putflag(iwb); | ||
457 | end = isowbuf_donewrite(iwb); | ||
458 | dump_bytes(DEBUG_STREAM_DUMP, "isowbuf", iwb->data, end + 1); | ||
459 | return end; | ||
460 | } | ||
461 | |||
462 | /* trans_buildframe | ||
463 | * Append a block of 'transparent' data to the output buffer, | ||
464 | * inverting the bytes. | ||
465 | * The output buffer must have sufficient length; count bytes | ||
466 | * starting at *out are safe and are verified to be present. | ||
467 | * parameters: | ||
468 | * in input buffer | ||
469 | * count number of bytes in input buffer | ||
470 | * iwb pointer to output buffer structure (write semaphore must be held) | ||
471 | * return value: | ||
472 | * position of end of packet in output buffer on success, | ||
473 | * -EAGAIN if write semaphore busy or buffer full | ||
474 | */ | ||
475 | |||
476 | static inline int trans_buildframe(struct isowbuf_t *iwb, | ||
477 | unsigned char *in, int count) | ||
478 | { | ||
479 | int write; | ||
480 | unsigned char c; | ||
481 | |||
482 | if (unlikely(count <= 0)) | ||
483 | return atomic_read(&iwb->write); /* better ideas? */ | ||
484 | |||
485 | if (isowbuf_freebytes(iwb) < count || | ||
486 | !isowbuf_startwrite(iwb)) { | ||
487 | dbg(DEBUG_ISO, "can't put %d bytes", count); | ||
488 | return -EAGAIN; | ||
489 | } | ||
490 | |||
491 | dbg(DEBUG_STREAM, "put %d bytes", count); | ||
492 | write = atomic_read(&iwb->write); | ||
493 | do { | ||
494 | c = gigaset_invtab[*in++]; | ||
495 | iwb->data[write++] = c; | ||
496 | write %= BAS_OUTBUFSIZE; | ||
497 | } while (--count > 0); | ||
498 | atomic_set(&iwb->write, write); | ||
499 | iwb->idle = c; | ||
500 | |||
501 | return isowbuf_donewrite(iwb); | ||
502 | } | ||
503 | |||
504 | int gigaset_isoc_buildframe(struct bc_state *bcs, unsigned char *in, int len) | ||
505 | { | ||
506 | int result; | ||
507 | |||
508 | switch (bcs->proto2) { | ||
509 | case ISDN_PROTO_L2_HDLC: | ||
510 | result = hdlc_buildframe(bcs->hw.bas->isooutbuf, in, len); | ||
511 | dbg(DEBUG_ISO, "%s: %d bytes HDLC -> %d", __func__, len, result); | ||
512 | break; | ||
513 | default: /* assume transparent */ | ||
514 | result = trans_buildframe(bcs->hw.bas->isooutbuf, in, len); | ||
515 | dbg(DEBUG_ISO, "%s: %d bytes trans -> %d", __func__, len, result); | ||
516 | } | ||
517 | return result; | ||
518 | } | ||
519 | |||
520 | /* hdlc_putbyte | ||
521 | * append byte c to current skb of B channel structure *bcs, updating fcs | ||
522 | */ | ||
523 | static inline void hdlc_putbyte(unsigned char c, struct bc_state *bcs) | ||
524 | { | ||
525 | bcs->fcs = crc_ccitt_byte(bcs->fcs, c); | ||
526 | if (unlikely(bcs->skb == NULL)) { | ||
527 | /* skipping */ | ||
528 | return; | ||
529 | } | ||
530 | if (unlikely(bcs->skb->len == SBUFSIZE)) { | ||
531 | warn("received oversized packet discarded"); | ||
532 | bcs->hw.bas->giants++; | ||
533 | dev_kfree_skb_any(bcs->skb); | ||
534 | bcs->skb = NULL; | ||
535 | return; | ||
536 | } | ||
537 | *gigaset_skb_put_quick(bcs->skb, 1) = c; | ||
538 | } | ||
539 | |||
540 | /* hdlc_flush | ||
541 | * drop partial HDLC data packet | ||
542 | */ | ||
543 | static inline void hdlc_flush(struct bc_state *bcs) | ||
544 | { | ||
545 | /* clear skb or allocate new if not skipping */ | ||
546 | if (likely(bcs->skb != NULL)) | ||
547 | skb_trim(bcs->skb, 0); | ||
548 | else if (!bcs->ignore) { | ||
549 | if ((bcs->skb = dev_alloc_skb(SBUFSIZE + HW_HDR_LEN)) != NULL) | ||
550 | skb_reserve(bcs->skb, HW_HDR_LEN); | ||
551 | else | ||
552 | err("could not allocate skb"); | ||
553 | } | ||
554 | |||
555 | /* reset packet state */ | ||
556 | bcs->fcs = PPP_INITFCS; | ||
557 | } | ||
558 | |||
559 | /* hdlc_done | ||
560 | * process completed HDLC data packet | ||
561 | */ | ||
562 | static inline void hdlc_done(struct bc_state *bcs) | ||
563 | { | ||
564 | struct sk_buff *procskb; | ||
565 | |||
566 | if (unlikely(bcs->ignore)) { | ||
567 | bcs->ignore--; | ||
568 | hdlc_flush(bcs); | ||
569 | return; | ||
570 | } | ||
571 | |||
572 | if ((procskb = bcs->skb) == NULL) { | ||
573 | /* previous error */ | ||
574 | dbg(DEBUG_ISO, "%s: skb=NULL", __func__); | ||
575 | gigaset_rcv_error(NULL, bcs->cs, bcs); | ||
576 | } else if (procskb->len < 2) { | ||
577 | notice("received short frame (%d octets)", procskb->len); | ||
578 | bcs->hw.bas->runts++; | ||
579 | gigaset_rcv_error(procskb, bcs->cs, bcs); | ||
580 | } else if (bcs->fcs != PPP_GOODFCS) { | ||
581 | notice("frame check error (0x%04x)", bcs->fcs); | ||
582 | bcs->hw.bas->fcserrs++; | ||
583 | gigaset_rcv_error(procskb, bcs->cs, bcs); | ||
584 | } else { | ||
585 | procskb->len -= 2; /* subtract FCS */ | ||
586 | procskb->tail -= 2; | ||
587 | dbg(DEBUG_ISO, | ||
588 | "%s: good frame (%d octets)", __func__, procskb->len); | ||
589 | dump_bytes(DEBUG_STREAM, | ||
590 | "rcv data", procskb->data, procskb->len); | ||
591 | bcs->hw.bas->goodbytes += procskb->len; | ||
592 | gigaset_rcv_skb(procskb, bcs->cs, bcs); | ||
593 | } | ||
594 | |||
595 | if ((bcs->skb = dev_alloc_skb(SBUFSIZE + HW_HDR_LEN)) != NULL) | ||
596 | skb_reserve(bcs->skb, HW_HDR_LEN); | ||
597 | else | ||
598 | err("could not allocate skb"); | ||
599 | bcs->fcs = PPP_INITFCS; | ||
600 | } | ||
601 | |||
602 | /* hdlc_frag | ||
603 | * drop HDLC data packet with non-integral last byte | ||
604 | */ | ||
605 | static inline void hdlc_frag(struct bc_state *bcs, unsigned inbits) | ||
606 | { | ||
607 | if (unlikely(bcs->ignore)) { | ||
608 | bcs->ignore--; | ||
609 | hdlc_flush(bcs); | ||
610 | return; | ||
611 | } | ||
612 | |||
613 | notice("received partial byte (%d bits)", inbits); | ||
614 | bcs->hw.bas->alignerrs++; | ||
615 | gigaset_rcv_error(bcs->skb, bcs->cs, bcs); | ||
616 | |||
617 | if ((bcs->skb = dev_alloc_skb(SBUFSIZE + HW_HDR_LEN)) != NULL) | ||
618 | skb_reserve(bcs->skb, HW_HDR_LEN); | ||
619 | else | ||
620 | err("could not allocate skb"); | ||
621 | bcs->fcs = PPP_INITFCS; | ||
622 | } | ||
623 | |||
624 | /* bit counts lookup table for HDLC bit unstuffing | ||
625 | * index: input byte | ||
626 | * value: bit 0..3 = number of consecutive '1' bits starting from LSB | ||
627 | * bit 4..6 = number of consecutive '1' bits starting from MSB | ||
628 | * (replacing 8 by 7 to make it fit; the algorithm won't care) | ||
629 | * bit 7 set if there are 5 or more "interior" consecutive '1' bits | ||
630 | */ | ||
631 | static unsigned char bitcounts[256] = { | ||
632 | 0x00, 0x01, 0x00, 0x02, 0x00, 0x01, 0x00, 0x03, 0x00, 0x01, 0x00, 0x02, 0x00, 0x01, 0x00, 0x04, | ||
633 | 0x00, 0x01, 0x00, 0x02, 0x00, 0x01, 0x00, 0x03, 0x00, 0x01, 0x00, 0x02, 0x00, 0x01, 0x00, 0x05, | ||
634 | 0x00, 0x01, 0x00, 0x02, 0x00, 0x01, 0x00, 0x03, 0x00, 0x01, 0x00, 0x02, 0x00, 0x01, 0x00, 0x04, | ||
635 | 0x00, 0x01, 0x00, 0x02, 0x00, 0x01, 0x00, 0x03, 0x00, 0x01, 0x00, 0x02, 0x00, 0x01, 0x80, 0x06, | ||
636 | 0x00, 0x01, 0x00, 0x02, 0x00, 0x01, 0x00, 0x03, 0x00, 0x01, 0x00, 0x02, 0x00, 0x01, 0x00, 0x04, | ||
637 | 0x00, 0x01, 0x00, 0x02, 0x00, 0x01, 0x00, 0x03, 0x00, 0x01, 0x00, 0x02, 0x00, 0x01, 0x00, 0x05, | ||
638 | 0x00, 0x01, 0x00, 0x02, 0x00, 0x01, 0x00, 0x03, 0x00, 0x01, 0x00, 0x02, 0x00, 0x01, 0x00, 0x04, | ||
639 | 0x00, 0x01, 0x00, 0x02, 0x00, 0x01, 0x00, 0x03, 0x00, 0x01, 0x00, 0x02, 0x80, 0x81, 0x80, 0x07, | ||
640 | 0x10, 0x11, 0x10, 0x12, 0x10, 0x11, 0x10, 0x13, 0x10, 0x11, 0x10, 0x12, 0x10, 0x11, 0x10, 0x14, | ||
641 | 0x10, 0x11, 0x10, 0x12, 0x10, 0x11, 0x10, 0x13, 0x10, 0x11, 0x10, 0x12, 0x10, 0x11, 0x10, 0x15, | ||
642 | 0x10, 0x11, 0x10, 0x12, 0x10, 0x11, 0x10, 0x13, 0x10, 0x11, 0x10, 0x12, 0x10, 0x11, 0x10, 0x14, | ||
643 | 0x10, 0x11, 0x10, 0x12, 0x10, 0x11, 0x10, 0x13, 0x10, 0x11, 0x10, 0x12, 0x10, 0x11, 0x90, 0x16, | ||
644 | 0x20, 0x21, 0x20, 0x22, 0x20, 0x21, 0x20, 0x23, 0x20, 0x21, 0x20, 0x22, 0x20, 0x21, 0x20, 0x24, | ||
645 | 0x20, 0x21, 0x20, 0x22, 0x20, 0x21, 0x20, 0x23, 0x20, 0x21, 0x20, 0x22, 0x20, 0x21, 0x20, 0x25, | ||
646 | 0x30, 0x31, 0x30, 0x32, 0x30, 0x31, 0x30, 0x33, 0x30, 0x31, 0x30, 0x32, 0x30, 0x31, 0x30, 0x34, | ||
647 | 0x40, 0x41, 0x40, 0x42, 0x40, 0x41, 0x40, 0x43, 0x50, 0x51, 0x50, 0x52, 0x60, 0x61, 0x70, 0x78 | ||
648 | }; | ||
649 | |||
650 | /* hdlc_unpack | ||
651 | * perform HDLC frame processing (bit unstuffing, flag detection, FCS calculation) | ||
652 | * on a sequence of received data bytes (8 bits each, LSB first) | ||
653 | * pass on successfully received, complete frames as SKBs via gigaset_rcv_skb | ||
654 | * notify of errors via gigaset_rcv_error | ||
655 | * tally frames, errors etc. in BC structure counters | ||
656 | * parameters: | ||
657 | * src received data | ||
658 | * count number of received bytes | ||
659 | * bcs receiving B channel structure | ||
660 | */ | ||
661 | static inline void hdlc_unpack(unsigned char *src, unsigned count, | ||
662 | struct bc_state *bcs) | ||
663 | { | ||
664 | struct bas_bc_state *ubc; | ||
665 | int inputstate; | ||
666 | unsigned seqlen, inbyte, inbits; | ||
667 | |||
668 | IFNULLRET(bcs); | ||
669 | ubc = bcs->hw.bas; | ||
670 | IFNULLRET(ubc); | ||
671 | |||
672 | /* load previous state: | ||
673 | * inputstate = set of flag bits: | ||
674 | * - INS_flag_hunt: no complete opening flag received since connection setup or last abort | ||
675 | * - INS_have_data: at least one complete data byte received since last flag | ||
676 | * seqlen = number of consecutive '1' bits in last 7 input stream bits (0..7) | ||
677 | * inbyte = accumulated partial data byte (if !INS_flag_hunt) | ||
678 | * inbits = number of valid bits in inbyte, starting at LSB (0..6) | ||
679 | */ | ||
680 | inputstate = bcs->inputstate; | ||
681 | seqlen = ubc->seqlen; | ||
682 | inbyte = ubc->inbyte; | ||
683 | inbits = ubc->inbits; | ||
684 | |||
685 | /* bit unstuffing a byte a time | ||
686 | * Take your time to understand this; it's straightforward but tedious. | ||
687 | * The "bitcounts" lookup table is used to speed up the counting of | ||
688 | * leading and trailing '1' bits. | ||
689 | */ | ||
690 | while (count--) { | ||
691 | unsigned char c = *src++; | ||
692 | unsigned char tabentry = bitcounts[c]; | ||
693 | unsigned lead1 = tabentry & 0x0f; | ||
694 | unsigned trail1 = (tabentry >> 4) & 0x0f; | ||
695 | |||
696 | seqlen += lead1; | ||
697 | |||
698 | if (unlikely(inputstate & INS_flag_hunt)) { | ||
699 | if (c == PPP_FLAG) { | ||
700 | /* flag-in-one */ | ||
701 | inputstate &= ~(INS_flag_hunt | INS_have_data); | ||
702 | inbyte = 0; | ||
703 | inbits = 0; | ||
704 | } else if (seqlen == 6 && trail1 != 7) { | ||
705 | /* flag completed & not followed by abort */ | ||
706 | inputstate &= ~(INS_flag_hunt | INS_have_data); | ||
707 | inbyte = c >> (lead1 + 1); | ||
708 | inbits = 7 - lead1; | ||
709 | if (trail1 >= 8) { | ||
710 | /* interior stuffing: omitting the MSB handles most cases */ | ||
711 | inbits--; | ||
712 | /* correct the incorrectly handled cases individually */ | ||
713 | switch (c) { | ||
714 | case 0xbe: | ||
715 | inbyte = 0x3f; | ||
716 | break; | ||
717 | } | ||
718 | } | ||
719 | } | ||
720 | /* else: continue flag-hunting */ | ||
721 | } else if (likely(seqlen < 5 && trail1 < 7)) { | ||
722 | /* streamlined case: 8 data bits, no stuffing */ | ||
723 | inbyte |= c << inbits; | ||
724 | hdlc_putbyte(inbyte & 0xff, bcs); | ||
725 | inputstate |= INS_have_data; | ||
726 | inbyte >>= 8; | ||
727 | /* inbits unchanged */ | ||
728 | } else if (likely(seqlen == 6 && inbits == 7 - lead1 && | ||
729 | trail1 + 1 == inbits && | ||
730 | !(inputstate & INS_have_data))) { | ||
731 | /* streamlined case: flag idle - state unchanged */ | ||
732 | } else if (unlikely(seqlen > 6)) { | ||
733 | /* abort sequence */ | ||
734 | ubc->aborts++; | ||
735 | hdlc_flush(bcs); | ||
736 | inputstate |= INS_flag_hunt; | ||
737 | } else if (seqlen == 6) { | ||
738 | /* closing flag, including (6 - lead1) '1's and one '0' from inbits */ | ||
739 | if (inbits > 7 - lead1) { | ||
740 | hdlc_frag(bcs, inbits + lead1 - 7); | ||
741 | inputstate &= ~INS_have_data; | ||
742 | } else { | ||
743 | if (inbits < 7 - lead1) | ||
744 | ubc->stolen0s ++; | ||
745 | if (inputstate & INS_have_data) { | ||
746 | hdlc_done(bcs); | ||
747 | inputstate &= ~INS_have_data; | ||
748 | } | ||
749 | } | ||
750 | |||
751 | if (c == PPP_FLAG) { | ||
752 | /* complete flag, LSB overlaps preceding flag */ | ||
753 | ubc->shared0s ++; | ||
754 | inbits = 0; | ||
755 | inbyte = 0; | ||
756 | } else if (trail1 != 7) { | ||
757 | /* remaining bits */ | ||
758 | inbyte = c >> (lead1 + 1); | ||
759 | inbits = 7 - lead1; | ||
760 | if (trail1 >= 8) { | ||
761 | /* interior stuffing: omitting the MSB handles most cases */ | ||
762 | inbits--; | ||
763 | /* correct the incorrectly handled cases individually */ | ||
764 | switch (c) { | ||
765 | case 0xbe: | ||
766 | inbyte = 0x3f; | ||
767 | break; | ||
768 | } | ||
769 | } | ||
770 | } else { | ||
771 | /* abort sequence follows, skb already empty anyway */ | ||
772 | ubc->aborts++; | ||
773 | inputstate |= INS_flag_hunt; | ||
774 | } | ||
775 | } else { /* (seqlen < 6) && (seqlen == 5 || trail1 >= 7) */ | ||
776 | |||
777 | if (c == PPP_FLAG) { | ||
778 | /* complete flag */ | ||
779 | if (seqlen == 5) | ||
780 | ubc->stolen0s++; | ||
781 | if (inbits) { | ||
782 | hdlc_frag(bcs, inbits); | ||
783 | inbits = 0; | ||
784 | inbyte = 0; | ||
785 | } else if (inputstate & INS_have_data) | ||
786 | hdlc_done(bcs); | ||
787 | inputstate &= ~INS_have_data; | ||
788 | } else if (trail1 == 7) { | ||
789 | /* abort sequence */ | ||
790 | ubc->aborts++; | ||
791 | hdlc_flush(bcs); | ||
792 | inputstate |= INS_flag_hunt; | ||
793 | } else { | ||
794 | /* stuffed data */ | ||
795 | if (trail1 < 7) { /* => seqlen == 5 */ | ||
796 | /* stuff bit at position lead1, no interior stuffing */ | ||
797 | unsigned char mask = (1 << lead1) - 1; | ||
798 | c = (c & mask) | ((c & ~mask) >> 1); | ||
799 | inbyte |= c << inbits; | ||
800 | inbits += 7; | ||
801 | } else if (seqlen < 5) { /* trail1 >= 8 */ | ||
802 | /* interior stuffing: omitting the MSB handles most cases */ | ||
803 | /* correct the incorrectly handled cases individually */ | ||
804 | switch (c) { | ||
805 | case 0xbe: | ||
806 | c = 0x7e; | ||
807 | break; | ||
808 | } | ||
809 | inbyte |= c << inbits; | ||
810 | inbits += 7; | ||
811 | } else { /* seqlen == 5 && trail1 >= 8 */ | ||
812 | |||
813 | /* stuff bit at lead1 *and* interior stuffing */ | ||
814 | switch (c) { /* unstuff individually */ | ||
815 | case 0x7d: | ||
816 | c = 0x3f; | ||
817 | break; | ||
818 | case 0xbe: | ||
819 | c = 0x3f; | ||
820 | break; | ||
821 | case 0x3e: | ||
822 | c = 0x1f; | ||
823 | break; | ||
824 | case 0x7c: | ||
825 | c = 0x3e; | ||
826 | break; | ||
827 | } | ||
828 | inbyte |= c << inbits; | ||
829 | inbits += 6; | ||
830 | } | ||
831 | if (inbits >= 8) { | ||
832 | inbits -= 8; | ||
833 | hdlc_putbyte(inbyte & 0xff, bcs); | ||
834 | inputstate |= INS_have_data; | ||
835 | inbyte >>= 8; | ||
836 | } | ||
837 | } | ||
838 | } | ||
839 | seqlen = trail1 & 7; | ||
840 | } | ||
841 | |||
842 | /* save new state */ | ||
843 | bcs->inputstate = inputstate; | ||
844 | ubc->seqlen = seqlen; | ||
845 | ubc->inbyte = inbyte; | ||
846 | ubc->inbits = inbits; | ||
847 | } | ||
848 | |||
849 | /* trans_receive | ||
850 | * pass on received USB frame transparently as SKB via gigaset_rcv_skb | ||
851 | * invert bytes | ||
852 | * tally frames, errors etc. in BC structure counters | ||
853 | * parameters: | ||
854 | * src received data | ||
855 | * count number of received bytes | ||
856 | * bcs receiving B channel structure | ||
857 | */ | ||
858 | static inline void trans_receive(unsigned char *src, unsigned count, | ||
859 | struct bc_state *bcs) | ||
860 | { | ||
861 | struct sk_buff *skb; | ||
862 | int dobytes; | ||
863 | unsigned char *dst; | ||
864 | |||
865 | if (unlikely(bcs->ignore)) { | ||
866 | bcs->ignore--; | ||
867 | hdlc_flush(bcs); | ||
868 | return; | ||
869 | } | ||
870 | if (unlikely((skb = bcs->skb) == NULL)) { | ||
871 | bcs->skb = skb = dev_alloc_skb(SBUFSIZE + HW_HDR_LEN); | ||
872 | if (!skb) { | ||
873 | err("could not allocate skb"); | ||
874 | return; | ||
875 | } | ||
876 | skb_reserve(skb, HW_HDR_LEN); | ||
877 | } | ||
878 | bcs->hw.bas->goodbytes += skb->len; | ||
879 | dobytes = TRANSBUFSIZE - skb->len; | ||
880 | while (count > 0) { | ||
881 | dst = skb_put(skb, count < dobytes ? count : dobytes); | ||
882 | while (count > 0 && dobytes > 0) { | ||
883 | *dst++ = gigaset_invtab[*src++]; | ||
884 | count--; | ||
885 | dobytes--; | ||
886 | } | ||
887 | if (dobytes == 0) { | ||
888 | gigaset_rcv_skb(skb, bcs->cs, bcs); | ||
889 | bcs->skb = skb = dev_alloc_skb(SBUFSIZE + HW_HDR_LEN); | ||
890 | if (!skb) { | ||
891 | err("could not allocate skb"); | ||
892 | return; | ||
893 | } | ||
894 | skb_reserve(bcs->skb, HW_HDR_LEN); | ||
895 | dobytes = TRANSBUFSIZE; | ||
896 | } | ||
897 | } | ||
898 | } | ||
899 | |||
900 | void gigaset_isoc_receive(unsigned char *src, unsigned count, struct bc_state *bcs) | ||
901 | { | ||
902 | switch (bcs->proto2) { | ||
903 | case ISDN_PROTO_L2_HDLC: | ||
904 | hdlc_unpack(src, count, bcs); | ||
905 | break; | ||
906 | default: /* assume transparent */ | ||
907 | trans_receive(src, count, bcs); | ||
908 | } | ||
909 | } | ||
910 | |||
911 | /* == data input =========================================================== */ | ||
912 | |||
913 | static void cmd_loop(unsigned char *src, int numbytes, struct inbuf_t *inbuf) | ||
914 | { | ||
915 | struct cardstate *cs = inbuf->cs; | ||
916 | unsigned cbytes = cs->cbytes; | ||
917 | |||
918 | while (numbytes--) { | ||
919 | /* copy next character, check for end of line */ | ||
920 | switch (cs->respdata[cbytes] = *src++) { | ||
921 | case '\r': | ||
922 | case '\n': | ||
923 | /* end of line */ | ||
924 | dbg(DEBUG_TRANSCMD, "%s: End of Command (%d Bytes)", | ||
925 | __func__, cbytes); | ||
926 | cs->cbytes = cbytes; | ||
927 | gigaset_handle_modem_response(cs); | ||
928 | cbytes = 0; | ||
929 | break; | ||
930 | default: | ||
931 | /* advance in line buffer, checking for overflow */ | ||
932 | if (cbytes < MAX_RESP_SIZE - 1) | ||
933 | cbytes++; | ||
934 | else | ||
935 | warn("response too large"); | ||
936 | } | ||
937 | } | ||
938 | |||
939 | /* save state */ | ||
940 | cs->cbytes = cbytes; | ||
941 | } | ||
942 | |||
943 | |||
944 | /* process a block of data received through the control channel | ||
945 | */ | ||
946 | void gigaset_isoc_input(struct inbuf_t *inbuf) | ||
947 | { | ||
948 | struct cardstate *cs = inbuf->cs; | ||
949 | unsigned tail, head, numbytes; | ||
950 | unsigned char *src; | ||
951 | |||
952 | head = atomic_read(&inbuf->head); | ||
953 | while (head != (tail = atomic_read(&inbuf->tail))) { | ||
954 | dbg(DEBUG_INTR, "buffer state: %u -> %u", head, tail); | ||
955 | if (head > tail) | ||
956 | tail = RBUFSIZE; | ||
957 | src = inbuf->data + head; | ||
958 | numbytes = tail - head; | ||
959 | dbg(DEBUG_INTR, "processing %u bytes", numbytes); | ||
960 | |||
961 | if (atomic_read(&cs->mstate) == MS_LOCKED) { | ||
962 | gigaset_dbg_buffer(DEBUG_LOCKCMD, "received response", | ||
963 | numbytes, src, 0); | ||
964 | gigaset_if_receive(inbuf->cs, src, numbytes); | ||
965 | } else { | ||
966 | gigaset_dbg_buffer(DEBUG_CMD, "received response", | ||
967 | numbytes, src, 0); | ||
968 | cmd_loop(src, numbytes, inbuf); | ||
969 | } | ||
970 | |||
971 | head += numbytes; | ||
972 | if (head == RBUFSIZE) | ||
973 | head = 0; | ||
974 | dbg(DEBUG_INTR, "setting head to %u", head); | ||
975 | atomic_set(&inbuf->head, head); | ||
976 | } | ||
977 | } | ||
978 | |||
979 | |||
980 | /* == data output ========================================================== */ | ||
981 | |||
982 | /* gigaset_send_skb | ||
983 | * called by common.c to queue an skb for sending | ||
984 | * and start transmission if necessary | ||
985 | * parameters: | ||
986 | * B Channel control structure | ||
987 | * skb | ||
988 | * return value: | ||
989 | * number of bytes accepted for sending | ||
990 | * (skb->len if ok, 0 if out of buffer space) | ||
991 | * or error code (< 0, eg. -EINVAL) | ||
992 | */ | ||
993 | int gigaset_isoc_send_skb(struct bc_state *bcs, struct sk_buff *skb) | ||
994 | { | ||
995 | int len; | ||
996 | |||
997 | IFNULLRETVAL(bcs, -EFAULT); | ||
998 | IFNULLRETVAL(skb, -EFAULT); | ||
999 | len = skb->len; | ||
1000 | |||
1001 | skb_queue_tail(&bcs->squeue, skb); | ||
1002 | dbg(DEBUG_ISO, | ||
1003 | "%s: skb queued, qlen=%d", __func__, skb_queue_len(&bcs->squeue)); | ||
1004 | |||
1005 | /* tasklet submits URB if necessary */ | ||
1006 | tasklet_schedule(&bcs->hw.bas->sent_tasklet); | ||
1007 | |||
1008 | return len; /* ok so far */ | ||
1009 | } | ||
diff --git a/drivers/isdn/gigaset/proc.c b/drivers/isdn/gigaset/proc.c new file mode 100644 index 000000000000..c6915fa2be6c --- /dev/null +++ b/drivers/isdn/gigaset/proc.c | |||
@@ -0,0 +1,81 @@ | |||
1 | /* | ||
2 | * Stuff used by all variants of the driver | ||
3 | * | ||
4 | * Copyright (c) 2001 by Stefan Eilers <Eilers.Stefan@epost.de>, | ||
5 | * Hansjoerg Lipp <hjlipp@web.de>, | ||
6 | * Tilman Schmidt <tilman@imap.cc>. | ||
7 | * | ||
8 | * ===================================================================== | ||
9 | * This program is free software; you can redistribute it and/or | ||
10 | * modify it under the terms of the GNU General Public License as | ||
11 | * published by the Free Software Foundation; either version 2 of | ||
12 | * the License, or (at your option) any later version. | ||
13 | * ===================================================================== | ||
14 | * ToDo: ... | ||
15 | * ===================================================================== | ||
16 | * Version: $Id: proc.c,v 1.5.2.13 2006/02/04 18:28:16 hjlipp Exp $ | ||
17 | * ===================================================================== | ||
18 | */ | ||
19 | |||
20 | #include "gigaset.h" | ||
21 | #include <linux/ctype.h> | ||
22 | |||
23 | static ssize_t show_cidmode(struct device *dev, struct device_attribute *attr, char *buf) | ||
24 | { | ||
25 | struct usb_interface *intf = to_usb_interface(dev); | ||
26 | struct cardstate *cs = usb_get_intfdata(intf); | ||
27 | return sprintf(buf, "%d\n", atomic_read(&cs->cidmode)); // FIXME use scnprintf for 13607 bit architectures (if PAGE_SIZE==4096) | ||
28 | } | ||
29 | |||
30 | static ssize_t set_cidmode(struct device *dev, struct device_attribute *attr, const char *buf, size_t count) | ||
31 | { | ||
32 | struct usb_interface *intf = to_usb_interface(dev); | ||
33 | struct cardstate *cs = usb_get_intfdata(intf); | ||
34 | long int value; | ||
35 | char *end; | ||
36 | |||
37 | value = simple_strtol(buf, &end, 0); | ||
38 | while (*end) | ||
39 | if (!isspace(*end++)) | ||
40 | return -EINVAL; | ||
41 | if (value < 0 || value > 1) | ||
42 | return -EINVAL; | ||
43 | |||
44 | if (down_interruptible(&cs->sem)) | ||
45 | return -ERESTARTSYS; // FIXME -EINTR? | ||
46 | |||
47 | cs->waiting = 1; | ||
48 | if (!gigaset_add_event(cs, &cs->at_state, EV_PROC_CIDMODE, | ||
49 | NULL, value, NULL)) { | ||
50 | cs->waiting = 0; | ||
51 | up(&cs->sem); | ||
52 | return -ENOMEM; | ||
53 | } | ||
54 | |||
55 | dbg(DEBUG_CMD, "scheduling PROC_CIDMODE"); | ||
56 | gigaset_schedule_event(cs); | ||
57 | |||
58 | wait_event(cs->waitqueue, !cs->waiting); | ||
59 | |||
60 | up(&cs->sem); | ||
61 | |||
62 | return count; | ||
63 | } | ||
64 | |||
65 | static DEVICE_ATTR(cidmode, S_IRUGO|S_IWUSR, show_cidmode, set_cidmode); | ||
66 | |||
67 | /* free sysfs for device */ | ||
68 | void gigaset_free_dev_sysfs(struct usb_interface *interface) | ||
69 | { | ||
70 | dbg(DEBUG_INIT, "removing sysfs entries"); | ||
71 | device_remove_file(&interface->dev, &dev_attr_cidmode); | ||
72 | } | ||
73 | EXPORT_SYMBOL_GPL(gigaset_free_dev_sysfs); | ||
74 | |||
75 | /* initialize sysfs for device */ | ||
76 | void gigaset_init_dev_sysfs(struct usb_interface *interface) | ||
77 | { | ||
78 | dbg(DEBUG_INIT, "setting up sysfs"); | ||
79 | device_create_file(&interface->dev, &dev_attr_cidmode); | ||
80 | } | ||
81 | EXPORT_SYMBOL_GPL(gigaset_init_dev_sysfs); | ||
diff --git a/drivers/isdn/gigaset/usb-gigaset.c b/drivers/isdn/gigaset/usb-gigaset.c new file mode 100644 index 000000000000..323fc7349dec --- /dev/null +++ b/drivers/isdn/gigaset/usb-gigaset.c | |||
@@ -0,0 +1,1008 @@ | |||
1 | /* | ||
2 | * USB driver for Gigaset 307x directly or using M105 Data. | ||
3 | * | ||
4 | * Copyright (c) 2001 by Stefan Eilers <Eilers.Stefan@epost.de> | ||
5 | * and Hansjoerg Lipp <hjlipp@web.de>. | ||
6 | * | ||
7 | * This driver was derived from the USB skeleton driver by | ||
8 | * Greg Kroah-Hartman <greg@kroah.com> | ||
9 | * | ||
10 | * ===================================================================== | ||
11 | * This program is free software; you can redistribute it and/or | ||
12 | * modify it under the terms of the GNU General Public License as | ||
13 | * published by the Free Software Foundation; either version 2 of | ||
14 | * the License, or (at your option) any later version. | ||
15 | * ===================================================================== | ||
16 | * ToDo: ... | ||
17 | * ===================================================================== | ||
18 | * Version: $Id: usb-gigaset.c,v 1.85.4.18 2006/02/04 18:28:16 hjlipp Exp $ | ||
19 | * ===================================================================== | ||
20 | */ | ||
21 | |||
22 | #include "gigaset.h" | ||
23 | |||
24 | #include <linux/errno.h> | ||
25 | #include <linux/init.h> | ||
26 | #include <linux/slab.h> | ||
27 | #include <linux/usb.h> | ||
28 | #include <linux/module.h> | ||
29 | #include <linux/moduleparam.h> | ||
30 | |||
31 | /* Version Information */ | ||
32 | #define DRIVER_AUTHOR "Hansjoerg Lipp <hjlipp@web.de>, Stefan Eilers <Eilers.Stefan@epost.de>" | ||
33 | #define DRIVER_DESC "USB Driver for Gigaset 307x using M105" | ||
34 | |||
35 | /* Module parameters */ | ||
36 | |||
37 | static int startmode = SM_ISDN; | ||
38 | static int cidmode = 1; | ||
39 | |||
40 | module_param(startmode, int, S_IRUGO); | ||
41 | module_param(cidmode, int, S_IRUGO); | ||
42 | MODULE_PARM_DESC(startmode, "start in isdn4linux mode"); | ||
43 | MODULE_PARM_DESC(cidmode, "Call-ID mode"); | ||
44 | |||
45 | #define GIGASET_MINORS 1 | ||
46 | #define GIGASET_MINOR 8 | ||
47 | #define GIGASET_MODULENAME "usb_gigaset" | ||
48 | #define GIGASET_DEVFSNAME "gig/usb/" | ||
49 | #define GIGASET_DEVNAME "ttyGU" | ||
50 | |||
51 | #define IF_WRITEBUF 2000 //FIXME // WAKEUP_CHARS: 256 | ||
52 | |||
53 | /* Values for the Gigaset M105 Data */ | ||
54 | #define USB_M105_VENDOR_ID 0x0681 | ||
55 | #define USB_M105_PRODUCT_ID 0x0009 | ||
56 | |||
57 | /* table of devices that work with this driver */ | ||
58 | static struct usb_device_id gigaset_table [] = { | ||
59 | { USB_DEVICE(USB_M105_VENDOR_ID, USB_M105_PRODUCT_ID) }, | ||
60 | { } /* Terminating entry */ | ||
61 | }; | ||
62 | |||
63 | MODULE_DEVICE_TABLE(usb, gigaset_table); | ||
64 | |||
65 | /* Get a minor range for your devices from the usb maintainer */ | ||
66 | #define USB_SKEL_MINOR_BASE 200 | ||
67 | |||
68 | |||
69 | /* | ||
70 | * Control requests (empty fields: 00) | ||
71 | * | ||
72 | * RT|RQ|VALUE|INDEX|LEN |DATA | ||
73 | * In: | ||
74 | * C1 08 01 | ||
75 | * Get flags (1 byte). Bits: 0=dtr,1=rts,3-7:? | ||
76 | * C1 0F ll ll | ||
77 | * Get device information/status (llll: 0x200 and 0x40 seen). | ||
78 | * Real size: I only saw MIN(llll,0x64). | ||
79 | * Contents: seems to be always the same... | ||
80 | * offset 0x00: Length of this structure (0x64) (len: 1,2,3 bytes) | ||
81 | * offset 0x3c: String (16 bit chars): "MCCI USB Serial V2.0" | ||
82 | * rest: ? | ||
83 | * Out: | ||
84 | * 41 11 | ||
85 | * Initialize/reset device ? | ||
86 | * 41 00 xx 00 | ||
87 | * ? (xx=00 or 01; 01 on start, 00 on close) | ||
88 | * 41 07 vv mm | ||
89 | * Set/clear flags vv=value, mm=mask (see RQ 08) | ||
90 | * 41 12 xx | ||
91 | * Used before the following configuration requests are issued | ||
92 | * (with xx=0x0f). I've seen other values<0xf, though. | ||
93 | * 41 01 xx xx | ||
94 | * Set baud rate. xxxx=ceil(0x384000/rate)=trunc(0x383fff/rate)+1. | ||
95 | * 41 03 ps bb | ||
96 | * Set byte size and parity. p: 0x20=even,0x10=odd,0x00=no parity | ||
97 | * [ 0x30: m, 0x40: s ] | ||
98 | * [s: 0: 1 stop bit; 1: 1.5; 2: 2] | ||
99 | * bb: bits/byte (seen 7 and 8) | ||
100 | * 41 13 -- -- -- -- 10 00 ww 00 00 00 xx 00 00 00 yy 00 00 00 zz 00 00 00 | ||
101 | * ?? | ||
102 | * Initialization: 01, 40, 00, 00 | ||
103 | * Open device: 00 40, 00, 00 | ||
104 | * yy and zz seem to be equal, either 0x00 or 0x0a | ||
105 | * (ww,xx) pairs seen: (00,00), (00,40), (01,40), (09,80), (19,80) | ||
106 | * 41 19 -- -- -- -- 06 00 00 00 00 xx 11 13 | ||
107 | * Used after every "configuration sequence" (RQ 12, RQs 01/03/13). | ||
108 | * xx is usually 0x00 but was 0x7e before starting data transfer | ||
109 | * in unimodem mode. So, this might be an array of characters that need | ||
110 | * special treatment ("commit all bufferd data"?), 11=^Q, 13=^S. | ||
111 | * | ||
112 | * Unimodem mode: use "modprobe ppp_async flag_time=0" as the device _needs_ two | ||
113 | * flags per packet. | ||
114 | */ | ||
115 | |||
116 | static int gigaset_probe(struct usb_interface *interface, | ||
117 | const struct usb_device_id *id); | ||
118 | static void gigaset_disconnect(struct usb_interface *interface); | ||
119 | |||
120 | static struct gigaset_driver *driver = NULL; | ||
121 | static struct cardstate *cardstate = NULL; | ||
122 | |||
123 | /* usb specific object needed to register this driver with the usb subsystem */ | ||
124 | static struct usb_driver gigaset_usb_driver = { | ||
125 | .name = GIGASET_MODULENAME, | ||
126 | .probe = gigaset_probe, | ||
127 | .disconnect = gigaset_disconnect, | ||
128 | .id_table = gigaset_table, | ||
129 | }; | ||
130 | |||
131 | struct usb_cardstate { | ||
132 | struct usb_device *udev; /* save off the usb device pointer */ | ||
133 | struct usb_interface *interface; /* the interface for this device */ | ||
134 | atomic_t busy; /* bulk output in progress */ | ||
135 | |||
136 | /* Output buffer for commands (M105: and data)*/ | ||
137 | unsigned char *bulk_out_buffer; /* the buffer to send data */ | ||
138 | int bulk_out_size; /* the size of the send buffer */ | ||
139 | __u8 bulk_out_endpointAddr; /* the address of the bulk out endpoint */ | ||
140 | struct urb *bulk_out_urb; /* the urb used to transmit data */ | ||
141 | |||
142 | /* Input buffer for command responses (M105: and data)*/ | ||
143 | int rcvbuf_size; /* the size of the receive buffer */ | ||
144 | struct urb *read_urb; /* the urb used to receive data */ | ||
145 | __u8 int_in_endpointAddr; /* the address of the bulk in endpoint */ | ||
146 | |||
147 | char bchars[6]; /* req. 0x19 */ | ||
148 | }; | ||
149 | |||
150 | struct usb_bc_state {}; | ||
151 | |||
152 | static inline unsigned tiocm_to_gigaset(unsigned state) | ||
153 | { | ||
154 | return ((state & TIOCM_DTR) ? 1 : 0) | ((state & TIOCM_RTS) ? 2 : 0); | ||
155 | } | ||
156 | |||
157 | #ifdef CONFIG_GIGASET_UNDOCREQ | ||
158 | /* WARNING: EXPERIMENTAL! */ | ||
159 | static int gigaset_set_modem_ctrl(struct cardstate *cs, unsigned old_state, | ||
160 | unsigned new_state) | ||
161 | { | ||
162 | unsigned mask, val; | ||
163 | int r; | ||
164 | |||
165 | mask = tiocm_to_gigaset(old_state ^ new_state); | ||
166 | val = tiocm_to_gigaset(new_state); | ||
167 | |||
168 | dbg(DEBUG_USBREQ, "set flags 0x%02x with mask 0x%02x", val, mask); | ||
169 | r = usb_control_msg(cs->hw.usb->udev, | ||
170 | usb_sndctrlpipe(cs->hw.usb->udev, 0), 7, 0x41, | ||
171 | (val & 0xff) | ((mask & 0xff) << 8), 0, | ||
172 | NULL, 0, 2000 /*timeout??*/); // don't use this in an interrupt/BH | ||
173 | if (r < 0) | ||
174 | return r; | ||
175 | //.. | ||
176 | return 0; | ||
177 | } | ||
178 | |||
179 | static int set_value(struct cardstate *cs, u8 req, u16 val) | ||
180 | { | ||
181 | int r, r2; | ||
182 | |||
183 | dbg(DEBUG_USBREQ, "request %02x (%04x)", (unsigned)req, (unsigned)val); | ||
184 | r = usb_control_msg(cs->hw.usb->udev, | ||
185 | usb_sndctrlpipe(cs->hw.usb->udev, 0), 0x12, 0x41, | ||
186 | 0xf /*?*/, 0, | ||
187 | NULL, 0, 2000 /*?*/); /* no idea, what this does */ | ||
188 | if (r < 0) { | ||
189 | err("error %d on request 0x12", -r); | ||
190 | return r; | ||
191 | } | ||
192 | |||
193 | r = usb_control_msg(cs->hw.usb->udev, | ||
194 | usb_sndctrlpipe(cs->hw.usb->udev, 0), req, 0x41, | ||
195 | val, 0, | ||
196 | NULL, 0, 2000 /*?*/); | ||
197 | if (r < 0) | ||
198 | err("error %d on request 0x%02x", -r, (unsigned)req); | ||
199 | |||
200 | r2 = usb_control_msg(cs->hw.usb->udev, | ||
201 | usb_sndctrlpipe(cs->hw.usb->udev, 0), 0x19, 0x41, | ||
202 | 0, 0, cs->hw.usb->bchars, 6, 2000 /*?*/); | ||
203 | if (r2 < 0) | ||
204 | err("error %d on request 0x19", -r2); | ||
205 | |||
206 | return r < 0 ? r : (r2 < 0 ? r2 : 0); | ||
207 | } | ||
208 | |||
209 | /* WARNING: HIGHLY EXPERIMENTAL! */ | ||
210 | // don't use this in an interrupt/BH | ||
211 | static int gigaset_baud_rate(struct cardstate *cs, unsigned cflag) | ||
212 | { | ||
213 | u16 val; | ||
214 | u32 rate; | ||
215 | |||
216 | cflag &= CBAUD; | ||
217 | |||
218 | switch (cflag) { | ||
219 | //FIXME more values? | ||
220 | case B300: rate = 300; break; | ||
221 | case B600: rate = 600; break; | ||
222 | case B1200: rate = 1200; break; | ||
223 | case B2400: rate = 2400; break; | ||
224 | case B4800: rate = 4800; break; | ||
225 | case B9600: rate = 9600; break; | ||
226 | case B19200: rate = 19200; break; | ||
227 | case B38400: rate = 38400; break; | ||
228 | case B57600: rate = 57600; break; | ||
229 | case B115200: rate = 115200; break; | ||
230 | default: | ||
231 | rate = 9600; | ||
232 | err("unsupported baudrate request 0x%x," | ||
233 | " using default of B9600", cflag); | ||
234 | } | ||
235 | |||
236 | val = 0x383fff / rate + 1; | ||
237 | |||
238 | return set_value(cs, 1, val); | ||
239 | } | ||
240 | |||
241 | /* WARNING: HIGHLY EXPERIMENTAL! */ | ||
242 | // don't use this in an interrupt/BH | ||
243 | static int gigaset_set_line_ctrl(struct cardstate *cs, unsigned cflag) | ||
244 | { | ||
245 | u16 val = 0; | ||
246 | |||
247 | /* set the parity */ | ||
248 | if (cflag & PARENB) | ||
249 | val |= (cflag & PARODD) ? 0x10 : 0x20; | ||
250 | |||
251 | /* set the number of data bits */ | ||
252 | switch (cflag & CSIZE) { | ||
253 | case CS5: | ||
254 | val |= 5 << 8; break; | ||
255 | case CS6: | ||
256 | val |= 6 << 8; break; | ||
257 | case CS7: | ||
258 | val |= 7 << 8; break; | ||
259 | case CS8: | ||
260 | val |= 8 << 8; break; | ||
261 | default: | ||
262 | err("CSIZE was not CS5-CS8, using default of 8"); | ||
263 | val |= 8 << 8; | ||
264 | break; | ||
265 | } | ||
266 | |||
267 | /* set the number of stop bits */ | ||
268 | if (cflag & CSTOPB) { | ||
269 | if ((cflag & CSIZE) == CS5) | ||
270 | val |= 1; /* 1.5 stop bits */ //FIXME is this okay? | ||
271 | else | ||
272 | val |= 2; /* 2 stop bits */ | ||
273 | } | ||
274 | |||
275 | return set_value(cs, 3, val); | ||
276 | } | ||
277 | |||
278 | #else | ||
279 | static int gigaset_set_modem_ctrl(struct cardstate *cs, unsigned old_state, | ||
280 | unsigned new_state) | ||
281 | { | ||
282 | return -EINVAL; | ||
283 | } | ||
284 | |||
285 | static int gigaset_set_line_ctrl(struct cardstate *cs, unsigned cflag) | ||
286 | { | ||
287 | return -EINVAL; | ||
288 | } | ||
289 | |||
290 | static int gigaset_baud_rate(struct cardstate *cs, unsigned cflag) | ||
291 | { | ||
292 | return -EINVAL; | ||
293 | } | ||
294 | #endif | ||
295 | |||
296 | |||
297 | /*================================================================================================================*/ | ||
298 | static int gigaset_init_bchannel(struct bc_state *bcs) | ||
299 | { | ||
300 | /* nothing to do for M10x */ | ||
301 | gigaset_bchannel_up(bcs); | ||
302 | return 0; | ||
303 | } | ||
304 | |||
305 | static int gigaset_close_bchannel(struct bc_state *bcs) | ||
306 | { | ||
307 | /* nothing to do for M10x */ | ||
308 | gigaset_bchannel_down(bcs); | ||
309 | return 0; | ||
310 | } | ||
311 | |||
312 | //void send_ack_to_LL(void *data); | ||
313 | static int write_modem(struct cardstate *cs); | ||
314 | static int send_cb(struct cardstate *cs, struct cmdbuf_t *cb); | ||
315 | |||
316 | |||
317 | /* Handling of send queue. If there is already a skb opened, put data to | ||
318 | * the transfer buffer by calling "write_modem". Otherwise take a new skb out of the queue. | ||
319 | * This function will be called by the ISR via "transmit_chars" (USB: B-Channel Bulk callback handler | ||
320 | * via immediate task queue) or by writebuf_from_LL if the LL wants to transmit data. | ||
321 | */ | ||
322 | static void gigaset_modem_fill(unsigned long data) | ||
323 | { | ||
324 | struct cardstate *cs = (struct cardstate *) data; | ||
325 | struct bc_state *bcs = &cs->bcs[0]; /* only one channel */ | ||
326 | struct cmdbuf_t *cb; | ||
327 | unsigned long flags; | ||
328 | int again; | ||
329 | |||
330 | dbg(DEBUG_OUTPUT, "modem_fill"); | ||
331 | |||
332 | if (atomic_read(&cs->hw.usb->busy)) { | ||
333 | dbg(DEBUG_OUTPUT, "modem_fill: busy"); | ||
334 | return; | ||
335 | } | ||
336 | |||
337 | do { | ||
338 | again = 0; | ||
339 | if (!bcs->tx_skb) { /* no skb is being sent */ | ||
340 | spin_lock_irqsave(&cs->cmdlock, flags); | ||
341 | cb = cs->cmdbuf; | ||
342 | spin_unlock_irqrestore(&cs->cmdlock, flags); | ||
343 | if (cb) { /* commands to send? */ | ||
344 | dbg(DEBUG_OUTPUT, "modem_fill: cb"); | ||
345 | if (send_cb(cs, cb) < 0) { | ||
346 | dbg(DEBUG_OUTPUT, | ||
347 | "modem_fill: send_cb failed"); | ||
348 | again = 1; /* no callback will be called! */ | ||
349 | } | ||
350 | } else { /* skbs to send? */ | ||
351 | bcs->tx_skb = skb_dequeue(&bcs->squeue); | ||
352 | if (bcs->tx_skb) | ||
353 | dbg(DEBUG_INTR, | ||
354 | "Dequeued skb (Adr: %lx)!", | ||
355 | (unsigned long) bcs->tx_skb); | ||
356 | } | ||
357 | } | ||
358 | |||
359 | if (bcs->tx_skb) { | ||
360 | dbg(DEBUG_OUTPUT, "modem_fill: tx_skb"); | ||
361 | if (write_modem(cs) < 0) { | ||
362 | dbg(DEBUG_OUTPUT, | ||
363 | "modem_fill: write_modem failed"); | ||
364 | // FIXME should we tell the LL? | ||
365 | again = 1; /* no callback will be called! */ | ||
366 | } | ||
367 | } | ||
368 | } while (again); | ||
369 | } | ||
370 | |||
371 | /** | ||
372 | * gigaset_read_int_callback | ||
373 | * | ||
374 | * It is called if the data was received from the device. This is almost similiar to | ||
375 | * the interrupt service routine in the serial device. | ||
376 | */ | ||
377 | static void gigaset_read_int_callback(struct urb *urb, struct pt_regs *regs) | ||
378 | { | ||
379 | int resubmit = 0; | ||
380 | int r; | ||
381 | struct cardstate *cs; | ||
382 | unsigned numbytes; | ||
383 | unsigned char *src; | ||
384 | //unsigned long flags; | ||
385 | struct inbuf_t *inbuf; | ||
386 | |||
387 | IFNULLRET(urb); | ||
388 | inbuf = (struct inbuf_t *) urb->context; | ||
389 | IFNULLRET(inbuf); | ||
390 | //spin_lock_irqsave(&inbuf->lock, flags); | ||
391 | cs = inbuf->cs; | ||
392 | IFNULLGOTO(cs, exit); | ||
393 | IFNULLGOTO(cardstate, exit); | ||
394 | |||
395 | if (!atomic_read(&cs->connected)) { | ||
396 | err("%s: disconnected", __func__); | ||
397 | goto exit; | ||
398 | } | ||
399 | |||
400 | if (!urb->status) { | ||
401 | numbytes = urb->actual_length; | ||
402 | |||
403 | if (numbytes) { | ||
404 | src = inbuf->rcvbuf; | ||
405 | if (unlikely(*src)) | ||
406 | warn("%s: There was no leading 0, but 0x%02x!", | ||
407 | __func__, (unsigned) *src); | ||
408 | ++src; /* skip leading 0x00 */ | ||
409 | --numbytes; | ||
410 | if (gigaset_fill_inbuf(inbuf, src, numbytes)) { | ||
411 | dbg(DEBUG_INTR, "%s-->BH", __func__); | ||
412 | gigaset_schedule_event(inbuf->cs); | ||
413 | } | ||
414 | } else | ||
415 | dbg(DEBUG_INTR, "Received zero block length"); | ||
416 | resubmit = 1; | ||
417 | } else { | ||
418 | /* The urb might have been killed. */ | ||
419 | dbg(DEBUG_ANY, "%s - nonzero read bulk status received: %d", | ||
420 | __func__, urb->status); | ||
421 | if (urb->status != -ENOENT) /* not killed */ | ||
422 | resubmit = 1; | ||
423 | } | ||
424 | exit: | ||
425 | //spin_unlock_irqrestore(&inbuf->lock, flags); | ||
426 | if (resubmit) { | ||
427 | r = usb_submit_urb(urb, SLAB_ATOMIC); | ||
428 | if (r) | ||
429 | err("error %d when resubmitting urb.", -r); | ||
430 | } | ||
431 | } | ||
432 | |||
433 | |||
434 | /* This callback routine is called when data was transmitted to a B-Channel. | ||
435 | * Therefore it has to check if there is still data to transmit. This | ||
436 | * happens by calling modem_fill via task queue. | ||
437 | * | ||
438 | */ | ||
439 | static void gigaset_write_bulk_callback(struct urb *urb, struct pt_regs *regs) | ||
440 | { | ||
441 | struct cardstate *cs = (struct cardstate *) urb->context; | ||
442 | |||
443 | IFNULLRET(cs); | ||
444 | #ifdef CONFIG_GIGASET_DEBUG | ||
445 | if (!atomic_read(&cs->connected)) { | ||
446 | err("%s:not connected", __func__); | ||
447 | return; | ||
448 | } | ||
449 | #endif | ||
450 | if (urb->status) | ||
451 | err("bulk transfer failed (status %d)", -urb->status); /* That's all we can do. Communication problems | ||
452 | are handeled by timeouts or network protocols */ | ||
453 | |||
454 | atomic_set(&cs->hw.usb->busy, 0); | ||
455 | tasklet_schedule(&cs->write_tasklet); | ||
456 | } | ||
457 | |||
458 | static int send_cb(struct cardstate *cs, struct cmdbuf_t *cb) | ||
459 | { | ||
460 | struct cmdbuf_t *tcb; | ||
461 | unsigned long flags; | ||
462 | int count; | ||
463 | int status = -ENOENT; // FIXME | ||
464 | struct usb_cardstate *ucs = cs->hw.usb; | ||
465 | |||
466 | do { | ||
467 | if (!cb->len) { | ||
468 | tcb = cb; | ||
469 | |||
470 | spin_lock_irqsave(&cs->cmdlock, flags); | ||
471 | cs->cmdbytes -= cs->curlen; | ||
472 | dbg(DEBUG_OUTPUT, "send_cb: sent %u bytes, %u left", | ||
473 | cs->curlen, cs->cmdbytes); | ||
474 | cs->cmdbuf = cb = cb->next; | ||
475 | if (cb) { | ||
476 | cb->prev = NULL; | ||
477 | cs->curlen = cb->len; | ||
478 | } else { | ||
479 | cs->lastcmdbuf = NULL; | ||
480 | cs->curlen = 0; | ||
481 | } | ||
482 | spin_unlock_irqrestore(&cs->cmdlock, flags); | ||
483 | |||
484 | if (tcb->wake_tasklet) | ||
485 | tasklet_schedule(tcb->wake_tasklet); | ||
486 | kfree(tcb); | ||
487 | } | ||
488 | if (cb) { | ||
489 | count = min(cb->len, ucs->bulk_out_size); | ||
490 | usb_fill_bulk_urb(ucs->bulk_out_urb, ucs->udev, | ||
491 | usb_sndbulkpipe(ucs->udev, | ||
492 | ucs->bulk_out_endpointAddr & 0x0f), | ||
493 | cb->buf + cb->offset, count, | ||
494 | gigaset_write_bulk_callback, cs); | ||
495 | |||
496 | cb->offset += count; | ||
497 | cb->len -= count; | ||
498 | atomic_set(&ucs->busy, 1); | ||
499 | dbg(DEBUG_OUTPUT, "send_cb: send %d bytes", count); | ||
500 | |||
501 | status = usb_submit_urb(ucs->bulk_out_urb, SLAB_ATOMIC); | ||
502 | if (status) { | ||
503 | atomic_set(&ucs->busy, 0); | ||
504 | err("could not submit urb (error %d).", | ||
505 | -status); | ||
506 | cb->len = 0; /* skip urb => remove cb+wakeup in next loop cycle */ | ||
507 | } | ||
508 | } | ||
509 | } while (cb && status); /* bei Fehler naechster Befehl //FIXME: ist das OK? */ | ||
510 | |||
511 | return status; | ||
512 | } | ||
513 | |||
514 | /* Write string into transbuf and send it to modem. | ||
515 | */ | ||
516 | static int gigaset_write_cmd(struct cardstate *cs, const unsigned char *buf, | ||
517 | int len, struct tasklet_struct *wake_tasklet) | ||
518 | { | ||
519 | struct cmdbuf_t *cb; | ||
520 | unsigned long flags; | ||
521 | |||
522 | gigaset_dbg_buffer(atomic_read(&cs->mstate) != MS_LOCKED ? | ||
523 | DEBUG_TRANSCMD : DEBUG_LOCKCMD, | ||
524 | "CMD Transmit", len, buf, 0); | ||
525 | |||
526 | if (!atomic_read(&cs->connected)) { | ||
527 | err("%s: not connected", __func__); | ||
528 | return -ENODEV; | ||
529 | } | ||
530 | |||
531 | if (len <= 0) | ||
532 | return 0; | ||
533 | |||
534 | if (!(cb = kmalloc(sizeof(struct cmdbuf_t) + len, GFP_ATOMIC))) { | ||
535 | err("%s: out of memory", __func__); | ||
536 | return -ENOMEM; | ||
537 | } | ||
538 | |||
539 | memcpy(cb->buf, buf, len); | ||
540 | cb->len = len; | ||
541 | cb->offset = 0; | ||
542 | cb->next = NULL; | ||
543 | cb->wake_tasklet = wake_tasklet; | ||
544 | |||
545 | spin_lock_irqsave(&cs->cmdlock, flags); | ||
546 | cb->prev = cs->lastcmdbuf; | ||
547 | if (cs->lastcmdbuf) | ||
548 | cs->lastcmdbuf->next = cb; | ||
549 | else { | ||
550 | cs->cmdbuf = cb; | ||
551 | cs->curlen = len; | ||
552 | } | ||
553 | cs->cmdbytes += len; | ||
554 | cs->lastcmdbuf = cb; | ||
555 | spin_unlock_irqrestore(&cs->cmdlock, flags); | ||
556 | |||
557 | tasklet_schedule(&cs->write_tasklet); | ||
558 | return len; | ||
559 | } | ||
560 | |||
561 | static int gigaset_write_room(struct cardstate *cs) | ||
562 | { | ||
563 | unsigned long flags; | ||
564 | unsigned bytes; | ||
565 | |||
566 | spin_lock_irqsave(&cs->cmdlock, flags); | ||
567 | bytes = cs->cmdbytes; | ||
568 | spin_unlock_irqrestore(&cs->cmdlock, flags); | ||
569 | |||
570 | return bytes < IF_WRITEBUF ? IF_WRITEBUF - bytes : 0; | ||
571 | } | ||
572 | |||
573 | static int gigaset_chars_in_buffer(struct cardstate *cs) | ||
574 | { | ||
575 | return cs->cmdbytes; | ||
576 | } | ||
577 | |||
578 | static int gigaset_brkchars(struct cardstate *cs, const unsigned char buf[6]) | ||
579 | { | ||
580 | #ifdef CONFIG_GIGASET_UNDOCREQ | ||
581 | gigaset_dbg_buffer(DEBUG_USBREQ, "brkchars", 6, buf, 0); | ||
582 | memcpy(cs->hw.usb->bchars, buf, 6); | ||
583 | return usb_control_msg(cs->hw.usb->udev, | ||
584 | usb_sndctrlpipe(cs->hw.usb->udev, 0), 0x19, 0x41, | ||
585 | 0, 0, &buf, 6, 2000); | ||
586 | #else | ||
587 | return -EINVAL; | ||
588 | #endif | ||
589 | } | ||
590 | |||
591 | static int gigaset_freebcshw(struct bc_state *bcs) | ||
592 | { | ||
593 | if (!bcs->hw.usb) | ||
594 | return 0; | ||
595 | //FIXME | ||
596 | kfree(bcs->hw.usb); | ||
597 | return 1; | ||
598 | } | ||
599 | |||
600 | /* Initialize the b-channel structure */ | ||
601 | static int gigaset_initbcshw(struct bc_state *bcs) | ||
602 | { | ||
603 | bcs->hw.usb = kmalloc(sizeof(struct usb_bc_state), GFP_KERNEL); | ||
604 | if (!bcs->hw.usb) | ||
605 | return 0; | ||
606 | |||
607 | //bcs->hw.usb->trans_flg = READY_TO_TRNSMIT; /* B-Channel ready to transmit */ | ||
608 | return 1; | ||
609 | } | ||
610 | |||
611 | static void gigaset_reinitbcshw(struct bc_state *bcs) | ||
612 | { | ||
613 | } | ||
614 | |||
615 | static void gigaset_freecshw(struct cardstate *cs) | ||
616 | { | ||
617 | //FIXME | ||
618 | tasklet_kill(&cs->write_tasklet); | ||
619 | kfree(cs->hw.usb); | ||
620 | } | ||
621 | |||
622 | static int gigaset_initcshw(struct cardstate *cs) | ||
623 | { | ||
624 | struct usb_cardstate *ucs; | ||
625 | |||
626 | cs->hw.usb = ucs = | ||
627 | kmalloc(sizeof(struct usb_cardstate), GFP_KERNEL); | ||
628 | if (!ucs) | ||
629 | return 0; | ||
630 | |||
631 | ucs->bchars[0] = 0; | ||
632 | ucs->bchars[1] = 0; | ||
633 | ucs->bchars[2] = 0; | ||
634 | ucs->bchars[3] = 0; | ||
635 | ucs->bchars[4] = 0x11; | ||
636 | ucs->bchars[5] = 0x13; | ||
637 | ucs->bulk_out_buffer = NULL; | ||
638 | ucs->bulk_out_urb = NULL; | ||
639 | //ucs->urb_cmd_out = NULL; | ||
640 | ucs->read_urb = NULL; | ||
641 | tasklet_init(&cs->write_tasklet, | ||
642 | &gigaset_modem_fill, (unsigned long) cs); | ||
643 | |||
644 | return 1; | ||
645 | } | ||
646 | |||
647 | /* Writes the data of the current open skb into the modem. | ||
648 | * We have to protect against multiple calls until the | ||
649 | * callback handler () is called , due to the fact that we | ||
650 | * are just allowed to send data once to an endpoint. Therefore | ||
651 | * we using "trans_flg" to synchonize ... | ||
652 | */ | ||
653 | static int write_modem(struct cardstate *cs) | ||
654 | { | ||
655 | int ret; | ||
656 | int count; | ||
657 | struct bc_state *bcs = &cs->bcs[0]; /* only one channel */ | ||
658 | struct usb_cardstate *ucs = cs->hw.usb; | ||
659 | //unsigned long flags; | ||
660 | |||
661 | IFNULLRETVAL(bcs->tx_skb, -EINVAL); | ||
662 | |||
663 | dbg(DEBUG_WRITE, "len: %d...", bcs->tx_skb->len); | ||
664 | |||
665 | ret = -ENODEV; | ||
666 | IFNULLGOTO(ucs->bulk_out_buffer, error); | ||
667 | IFNULLGOTO(ucs->bulk_out_urb, error); | ||
668 | ret = 0; | ||
669 | |||
670 | if (!bcs->tx_skb->len) { | ||
671 | dev_kfree_skb_any(bcs->tx_skb); | ||
672 | bcs->tx_skb = NULL; | ||
673 | return -EINVAL; | ||
674 | } | ||
675 | |||
676 | /* Copy data to bulk out buffer and // FIXME copying not necessary | ||
677 | * transmit data | ||
678 | */ | ||
679 | count = min(bcs->tx_skb->len, (unsigned) ucs->bulk_out_size); | ||
680 | memcpy(ucs->bulk_out_buffer, bcs->tx_skb->data, count); | ||
681 | skb_pull(bcs->tx_skb, count); | ||
682 | |||
683 | usb_fill_bulk_urb(ucs->bulk_out_urb, ucs->udev, | ||
684 | usb_sndbulkpipe(ucs->udev, | ||
685 | ucs->bulk_out_endpointAddr & 0x0f), | ||
686 | ucs->bulk_out_buffer, count, | ||
687 | gigaset_write_bulk_callback, cs); | ||
688 | atomic_set(&ucs->busy, 1); | ||
689 | dbg(DEBUG_OUTPUT, "write_modem: send %d bytes", count); | ||
690 | |||
691 | ret = usb_submit_urb(ucs->bulk_out_urb, SLAB_ATOMIC); | ||
692 | if (ret) { | ||
693 | err("could not submit urb (error %d).", -ret); | ||
694 | atomic_set(&ucs->busy, 0); | ||
695 | } | ||
696 | if (!bcs->tx_skb->len) { | ||
697 | /* skb sent completely */ | ||
698 | gigaset_skb_sent(bcs, bcs->tx_skb); //FIXME also, when ret<0? | ||
699 | |||
700 | dbg(DEBUG_INTR, | ||
701 | "kfree skb (Adr: %lx)!", (unsigned long) bcs->tx_skb); | ||
702 | dev_kfree_skb_any(bcs->tx_skb); | ||
703 | bcs->tx_skb = NULL; | ||
704 | } | ||
705 | |||
706 | return ret; | ||
707 | error: | ||
708 | dev_kfree_skb_any(bcs->tx_skb); | ||
709 | bcs->tx_skb = NULL; | ||
710 | return ret; | ||
711 | |||
712 | } | ||
713 | |||
714 | static int gigaset_probe(struct usb_interface *interface, | ||
715 | const struct usb_device_id *id) | ||
716 | { | ||
717 | int retval; | ||
718 | struct usb_device *udev = interface_to_usbdev(interface); | ||
719 | unsigned int ifnum; | ||
720 | struct usb_host_interface *hostif; | ||
721 | struct cardstate *cs = NULL; | ||
722 | struct usb_cardstate *ucs = NULL; | ||
723 | //struct usb_interface_descriptor *iface_desc; | ||
724 | struct usb_endpoint_descriptor *endpoint; | ||
725 | //isdn_ctrl command; | ||
726 | int buffer_size; | ||
727 | int alt; | ||
728 | //unsigned long flags; | ||
729 | |||
730 | info("%s: Check if device matches .. (Vendor: 0x%x, Product: 0x%x)", | ||
731 | __func__, le16_to_cpu(udev->descriptor.idVendor), | ||
732 | le16_to_cpu(udev->descriptor.idProduct)); | ||
733 | |||
734 | retval = -ENODEV; //FIXME | ||
735 | |||
736 | /* See if the device offered us matches what we can accept */ | ||
737 | if ((le16_to_cpu(udev->descriptor.idVendor != USB_M105_VENDOR_ID)) || | ||
738 | (le16_to_cpu(udev->descriptor.idProduct != USB_M105_PRODUCT_ID))) | ||
739 | return -ENODEV; | ||
740 | |||
741 | /* this starts to become ascii art... */ | ||
742 | hostif = interface->cur_altsetting; | ||
743 | alt = hostif->desc.bAlternateSetting; | ||
744 | ifnum = hostif->desc.bInterfaceNumber; // FIXME ? | ||
745 | |||
746 | if (alt != 0 || ifnum != 0) { | ||
747 | warn("ifnum %d, alt %d", ifnum, alt); | ||
748 | return -ENODEV; | ||
749 | } | ||
750 | |||
751 | /* Reject application specific intefaces | ||
752 | * | ||
753 | */ | ||
754 | if (hostif->desc.bInterfaceClass != 255) { | ||
755 | info("%s: Device matched, but iface_desc[%d]->bInterfaceClass==%d !", | ||
756 | __func__, ifnum, hostif->desc.bInterfaceClass); | ||
757 | return -ENODEV; | ||
758 | } | ||
759 | |||
760 | info("%s: Device matched ... !", __func__); | ||
761 | |||
762 | cs = gigaset_getunassignedcs(driver); | ||
763 | if (!cs) { | ||
764 | warn("No free cardstate!"); | ||
765 | return -ENODEV; | ||
766 | } | ||
767 | ucs = cs->hw.usb; | ||
768 | |||
769 | #if 0 | ||
770 | if (usb_set_configuration(udev, udev->config[0].desc.bConfigurationValue) < 0) { | ||
771 | warn("set_configuration failed"); | ||
772 | goto error; | ||
773 | } | ||
774 | |||
775 | |||
776 | if (usb_set_interface(udev, ifnum/*==0*/, alt/*==0*/) < 0) { | ||
777 | warn("usb_set_interface failed, device %d interface %d altsetting %d", | ||
778 | udev->devnum, ifnum, alt); | ||
779 | goto error; | ||
780 | } | ||
781 | #endif | ||
782 | |||
783 | /* set up the endpoint information */ | ||
784 | /* check out the endpoints */ | ||
785 | /* We will get 2 endpoints: One for sending commands to the device (bulk out) and one to | ||
786 | * poll messages from the device(int in). | ||
787 | * Therefore we will have an almost similiar situation as with our serial port handler. | ||
788 | * If an connection will be established, we will have to create data in/out pipes | ||
789 | * dynamically... | ||
790 | */ | ||
791 | |||
792 | endpoint = &hostif->endpoint[0].desc; | ||
793 | |||
794 | buffer_size = le16_to_cpu(endpoint->wMaxPacketSize); | ||
795 | ucs->bulk_out_size = buffer_size; | ||
796 | ucs->bulk_out_endpointAddr = endpoint->bEndpointAddress; | ||
797 | ucs->bulk_out_buffer = kmalloc(buffer_size, GFP_KERNEL); | ||
798 | if (!ucs->bulk_out_buffer) { | ||
799 | err("Couldn't allocate bulk_out_buffer"); | ||
800 | retval = -ENOMEM; | ||
801 | goto error; | ||
802 | } | ||
803 | |||
804 | ucs->bulk_out_urb = usb_alloc_urb(0, SLAB_KERNEL); | ||
805 | if (!ucs->bulk_out_urb) { | ||
806 | err("Couldn't allocate bulk_out_buffer"); | ||
807 | retval = -ENOMEM; | ||
808 | goto error; | ||
809 | } | ||
810 | |||
811 | endpoint = &hostif->endpoint[1].desc; | ||
812 | |||
813 | atomic_set(&ucs->busy, 0); | ||
814 | ucs->udev = udev; | ||
815 | ucs->interface = interface; | ||
816 | |||
817 | ucs->read_urb = usb_alloc_urb(0, SLAB_KERNEL); | ||
818 | if (!ucs->read_urb) { | ||
819 | err("No free urbs available"); | ||
820 | retval = -ENOMEM; | ||
821 | goto error; | ||
822 | } | ||
823 | buffer_size = le16_to_cpu(endpoint->wMaxPacketSize); | ||
824 | ucs->rcvbuf_size = buffer_size; | ||
825 | ucs->int_in_endpointAddr = endpoint->bEndpointAddress; | ||
826 | cs->inbuf[0].rcvbuf = kmalloc(buffer_size, GFP_KERNEL); | ||
827 | if (!cs->inbuf[0].rcvbuf) { | ||
828 | err("Couldn't allocate rcvbuf"); | ||
829 | retval = -ENOMEM; | ||
830 | goto error; | ||
831 | } | ||
832 | /* Fill the interrupt urb and send it to the core */ | ||
833 | usb_fill_int_urb(ucs->read_urb, udev, | ||
834 | usb_rcvintpipe(udev, | ||
835 | endpoint->bEndpointAddress & 0x0f), | ||
836 | cs->inbuf[0].rcvbuf, buffer_size, | ||
837 | gigaset_read_int_callback, | ||
838 | cs->inbuf + 0, endpoint->bInterval); | ||
839 | |||
840 | retval = usb_submit_urb(ucs->read_urb, SLAB_KERNEL); | ||
841 | if (retval) { | ||
842 | err("Could not submit URB!"); | ||
843 | goto error; | ||
844 | } | ||
845 | |||
846 | /* tell common part that the device is ready */ | ||
847 | if (startmode == SM_LOCKED) | ||
848 | atomic_set(&cs->mstate, MS_LOCKED); | ||
849 | if (!gigaset_start(cs)) { | ||
850 | tasklet_kill(&cs->write_tasklet); | ||
851 | retval = -ENODEV; //FIXME | ||
852 | goto error; | ||
853 | } | ||
854 | |||
855 | /* save address of controller structure */ | ||
856 | usb_set_intfdata(interface, cs); | ||
857 | |||
858 | /* set up device sysfs */ | ||
859 | gigaset_init_dev_sysfs(interface); | ||
860 | return 0; | ||
861 | |||
862 | error: | ||
863 | if (ucs->read_urb) | ||
864 | usb_kill_urb(ucs->read_urb); | ||
865 | kfree(ucs->bulk_out_buffer); | ||
866 | if (ucs->bulk_out_urb != NULL) | ||
867 | usb_free_urb(ucs->bulk_out_urb); | ||
868 | kfree(cs->inbuf[0].rcvbuf); | ||
869 | if (ucs->read_urb != NULL) | ||
870 | usb_free_urb(ucs->read_urb); | ||
871 | ucs->read_urb = ucs->bulk_out_urb = NULL; | ||
872 | cs->inbuf[0].rcvbuf = ucs->bulk_out_buffer = NULL; | ||
873 | gigaset_unassign(cs); | ||
874 | return retval; | ||
875 | } | ||
876 | |||
877 | /** | ||
878 | * skel_disconnect | ||
879 | */ | ||
880 | static void gigaset_disconnect(struct usb_interface *interface) | ||
881 | { | ||
882 | struct cardstate *cs; | ||
883 | struct usb_cardstate *ucs; | ||
884 | |||
885 | cs = usb_get_intfdata(interface); | ||
886 | |||
887 | /* clear device sysfs */ | ||
888 | gigaset_free_dev_sysfs(interface); | ||
889 | |||
890 | usb_set_intfdata(interface, NULL); | ||
891 | ucs = cs->hw.usb; | ||
892 | usb_kill_urb(ucs->read_urb); | ||
893 | //info("GigaSet USB device #%d will be disconnected", minor); | ||
894 | |||
895 | gigaset_stop(cs); | ||
896 | |||
897 | tasklet_kill(&cs->write_tasklet); | ||
898 | |||
899 | usb_kill_urb(ucs->bulk_out_urb); /* FIXME: nur, wenn noetig */ | ||
900 | //usb_kill_urb(ucs->urb_cmd_out); /* FIXME: nur, wenn noetig */ | ||
901 | |||
902 | kfree(ucs->bulk_out_buffer); | ||
903 | if (ucs->bulk_out_urb != NULL) | ||
904 | usb_free_urb(ucs->bulk_out_urb); | ||
905 | //if(ucs->urb_cmd_out != NULL) | ||
906 | // usb_free_urb(ucs->urb_cmd_out); | ||
907 | kfree(cs->inbuf[0].rcvbuf); | ||
908 | if (ucs->read_urb != NULL) | ||
909 | usb_free_urb(ucs->read_urb); | ||
910 | ucs->read_urb = ucs->bulk_out_urb/*=ucs->urb_cmd_out*/=NULL; | ||
911 | cs->inbuf[0].rcvbuf = ucs->bulk_out_buffer = NULL; | ||
912 | |||
913 | gigaset_unassign(cs); | ||
914 | } | ||
915 | |||
916 | static struct gigaset_ops ops = { | ||
917 | gigaset_write_cmd, | ||
918 | gigaset_write_room, | ||
919 | gigaset_chars_in_buffer, | ||
920 | gigaset_brkchars, | ||
921 | gigaset_init_bchannel, | ||
922 | gigaset_close_bchannel, | ||
923 | gigaset_initbcshw, | ||
924 | gigaset_freebcshw, | ||
925 | gigaset_reinitbcshw, | ||
926 | gigaset_initcshw, | ||
927 | gigaset_freecshw, | ||
928 | gigaset_set_modem_ctrl, | ||
929 | gigaset_baud_rate, | ||
930 | gigaset_set_line_ctrl, | ||
931 | gigaset_m10x_send_skb, | ||
932 | gigaset_m10x_input, | ||
933 | }; | ||
934 | |||
935 | /** | ||
936 | * usb_gigaset_init | ||
937 | * This function is called while kernel-module is loaded | ||
938 | */ | ||
939 | static int __init usb_gigaset_init(void) | ||
940 | { | ||
941 | int result; | ||
942 | |||
943 | /* allocate memory for our driver state and intialize it */ | ||
944 | if ((driver = gigaset_initdriver(GIGASET_MINOR, GIGASET_MINORS, | ||
945 | GIGASET_MODULENAME, GIGASET_DEVNAME, | ||
946 | GIGASET_DEVFSNAME, &ops, | ||
947 | THIS_MODULE)) == NULL) | ||
948 | goto error; | ||
949 | |||
950 | /* allocate memory for our device state and intialize it */ | ||
951 | cardstate = gigaset_initcs(driver, 1, 1, 0, cidmode, GIGASET_MODULENAME); | ||
952 | if (!cardstate) | ||
953 | goto error; | ||
954 | |||
955 | /* register this driver with the USB subsystem */ | ||
956 | result = usb_register(&gigaset_usb_driver); | ||
957 | if (result < 0) { | ||
958 | err("usb_gigaset: usb_register failed (error %d)", | ||
959 | -result); | ||
960 | goto error; | ||
961 | } | ||
962 | |||
963 | info(DRIVER_AUTHOR); | ||
964 | info(DRIVER_DESC); | ||
965 | return 0; | ||
966 | |||
967 | error: if (cardstate) | ||
968 | gigaset_freecs(cardstate); | ||
969 | cardstate = NULL; | ||
970 | if (driver) | ||
971 | gigaset_freedriver(driver); | ||
972 | driver = NULL; | ||
973 | return -1; | ||
974 | } | ||
975 | |||
976 | |||
977 | /** | ||
978 | * usb_gigaset_exit | ||
979 | * This function is called while unloading the kernel-module | ||
980 | */ | ||
981 | static void __exit usb_gigaset_exit(void) | ||
982 | { | ||
983 | gigaset_blockdriver(driver); /* => probe will fail | ||
984 | * => no gigaset_start any more | ||
985 | */ | ||
986 | |||
987 | gigaset_shutdown(cardstate); | ||
988 | /* from now on, no isdn callback should be possible */ | ||
989 | |||
990 | /* deregister this driver with the USB subsystem */ | ||
991 | usb_deregister(&gigaset_usb_driver); | ||
992 | /* this will call the disconnect-callback */ | ||
993 | /* from now on, no disconnect/probe callback should be running */ | ||
994 | |||
995 | gigaset_freecs(cardstate); | ||
996 | cardstate = NULL; | ||
997 | gigaset_freedriver(driver); | ||
998 | driver = NULL; | ||
999 | } | ||
1000 | |||
1001 | |||
1002 | module_init(usb_gigaset_init); | ||
1003 | module_exit(usb_gigaset_exit); | ||
1004 | |||
1005 | MODULE_AUTHOR(DRIVER_AUTHOR); | ||
1006 | MODULE_DESCRIPTION(DRIVER_DESC); | ||
1007 | |||
1008 | MODULE_LICENSE("GPL"); | ||
diff --git a/drivers/isdn/hardware/avm/avmcard.h b/drivers/isdn/hardware/avm/avmcard.h index 296d6a6f749f..3b431723c7cb 100644 --- a/drivers/isdn/hardware/avm/avmcard.h +++ b/drivers/isdn/hardware/avm/avmcard.h | |||
@@ -437,9 +437,7 @@ static inline unsigned int t1_get_slice(unsigned int base, | |||
437 | #endif | 437 | #endif |
438 | dp += i; | 438 | dp += i; |
439 | i = 0; | 439 | i = 0; |
440 | if (i == 0) | 440 | break; |
441 | break; | ||
442 | /* fall through */ | ||
443 | default: | 441 | default: |
444 | *dp++ = b1_get_byte(base); | 442 | *dp++ = b1_get_byte(base); |
445 | i--; | 443 | i--; |
diff --git a/drivers/isdn/i4l/Kconfig b/drivers/isdn/i4l/Kconfig index 1789b607f090..a4f7288a1fc8 100644 --- a/drivers/isdn/i4l/Kconfig +++ b/drivers/isdn/i4l/Kconfig | |||
@@ -139,3 +139,4 @@ source "drivers/isdn/hysdn/Kconfig" | |||
139 | 139 | ||
140 | endmenu | 140 | endmenu |
141 | 141 | ||
142 | source "drivers/isdn/gigaset/Kconfig" | ||
diff --git a/drivers/macintosh/smu.c b/drivers/macintosh/smu.c index 4eb05d7143d8..f4516ca7aa3a 100644 --- a/drivers/macintosh/smu.c +++ b/drivers/macintosh/smu.c | |||
@@ -35,6 +35,7 @@ | |||
35 | #include <linux/delay.h> | 35 | #include <linux/delay.h> |
36 | #include <linux/sysdev.h> | 36 | #include <linux/sysdev.h> |
37 | #include <linux/poll.h> | 37 | #include <linux/poll.h> |
38 | #include <linux/mutex.h> | ||
38 | 39 | ||
39 | #include <asm/byteorder.h> | 40 | #include <asm/byteorder.h> |
40 | #include <asm/io.h> | 41 | #include <asm/io.h> |
@@ -92,7 +93,7 @@ struct smu_device { | |||
92 | * for now, just hard code that | 93 | * for now, just hard code that |
93 | */ | 94 | */ |
94 | static struct smu_device *smu; | 95 | static struct smu_device *smu; |
95 | static DECLARE_MUTEX(smu_part_access); | 96 | static DEFINE_MUTEX(smu_part_access); |
96 | 97 | ||
97 | static void smu_i2c_retry(unsigned long data); | 98 | static void smu_i2c_retry(unsigned long data); |
98 | 99 | ||
@@ -976,11 +977,11 @@ struct smu_sdbp_header *__smu_get_sdb_partition(int id, unsigned int *size, | |||
976 | 977 | ||
977 | if (interruptible) { | 978 | if (interruptible) { |
978 | int rc; | 979 | int rc; |
979 | rc = down_interruptible(&smu_part_access); | 980 | rc = mutex_lock_interruptible(&smu_part_access); |
980 | if (rc) | 981 | if (rc) |
981 | return ERR_PTR(rc); | 982 | return ERR_PTR(rc); |
982 | } else | 983 | } else |
983 | down(&smu_part_access); | 984 | mutex_lock(&smu_part_access); |
984 | 985 | ||
985 | part = (struct smu_sdbp_header *)get_property(smu->of_node, | 986 | part = (struct smu_sdbp_header *)get_property(smu->of_node, |
986 | pname, size); | 987 | pname, size); |
@@ -990,7 +991,7 @@ struct smu_sdbp_header *__smu_get_sdb_partition(int id, unsigned int *size, | |||
990 | if (part != NULL && size) | 991 | if (part != NULL && size) |
991 | *size = part->len << 2; | 992 | *size = part->len << 2; |
992 | } | 993 | } |
993 | up(&smu_part_access); | 994 | mutex_unlock(&smu_part_access); |
994 | return part; | 995 | return part; |
995 | } | 996 | } |
996 | 997 | ||
diff --git a/drivers/md/bitmap.c b/drivers/md/bitmap.c index e1c18aa1d712..f8ffaee20ff8 100644 --- a/drivers/md/bitmap.c +++ b/drivers/md/bitmap.c | |||
@@ -89,16 +89,6 @@ int bitmap_active(struct bitmap *bitmap) | |||
89 | } | 89 | } |
90 | 90 | ||
91 | #define WRITE_POOL_SIZE 256 | 91 | #define WRITE_POOL_SIZE 256 |
92 | /* mempool for queueing pending writes on the bitmap file */ | ||
93 | static void *write_pool_alloc(gfp_t gfp_flags, void *data) | ||
94 | { | ||
95 | return kmalloc(sizeof(struct page_list), gfp_flags); | ||
96 | } | ||
97 | |||
98 | static void write_pool_free(void *ptr, void *data) | ||
99 | { | ||
100 | kfree(ptr); | ||
101 | } | ||
102 | 92 | ||
103 | /* | 93 | /* |
104 | * just a placeholder - calls kmalloc for bitmap pages | 94 | * just a placeholder - calls kmalloc for bitmap pages |
@@ -1564,8 +1554,8 @@ int bitmap_create(mddev_t *mddev) | |||
1564 | spin_lock_init(&bitmap->write_lock); | 1554 | spin_lock_init(&bitmap->write_lock); |
1565 | INIT_LIST_HEAD(&bitmap->complete_pages); | 1555 | INIT_LIST_HEAD(&bitmap->complete_pages); |
1566 | init_waitqueue_head(&bitmap->write_wait); | 1556 | init_waitqueue_head(&bitmap->write_wait); |
1567 | bitmap->write_pool = mempool_create(WRITE_POOL_SIZE, write_pool_alloc, | 1557 | bitmap->write_pool = mempool_create_kmalloc_pool(WRITE_POOL_SIZE, |
1568 | write_pool_free, NULL); | 1558 | sizeof(struct page_list)); |
1569 | err = -ENOMEM; | 1559 | err = -ENOMEM; |
1570 | if (!bitmap->write_pool) | 1560 | if (!bitmap->write_pool) |
1571 | goto error; | 1561 | goto error; |
diff --git a/drivers/md/dm-crypt.c b/drivers/md/dm-crypt.c index e7a650f9ca07..259e86f26549 100644 --- a/drivers/md/dm-crypt.c +++ b/drivers/md/dm-crypt.c | |||
@@ -94,20 +94,6 @@ struct crypt_config { | |||
94 | static kmem_cache_t *_crypt_io_pool; | 94 | static kmem_cache_t *_crypt_io_pool; |
95 | 95 | ||
96 | /* | 96 | /* |
97 | * Mempool alloc and free functions for the page | ||
98 | */ | ||
99 | static void *mempool_alloc_page(gfp_t gfp_mask, void *data) | ||
100 | { | ||
101 | return alloc_page(gfp_mask); | ||
102 | } | ||
103 | |||
104 | static void mempool_free_page(void *page, void *data) | ||
105 | { | ||
106 | __free_page(page); | ||
107 | } | ||
108 | |||
109 | |||
110 | /* | ||
111 | * Different IV generation algorithms: | 97 | * Different IV generation algorithms: |
112 | * | 98 | * |
113 | * plain: the initial vector is the 32-bit low-endian version of the sector | 99 | * plain: the initial vector is the 32-bit low-endian version of the sector |
@@ -630,15 +616,13 @@ static int crypt_ctr(struct dm_target *ti, unsigned int argc, char **argv) | |||
630 | } | 616 | } |
631 | } | 617 | } |
632 | 618 | ||
633 | cc->io_pool = mempool_create(MIN_IOS, mempool_alloc_slab, | 619 | cc->io_pool = mempool_create_slab_pool(MIN_IOS, _crypt_io_pool); |
634 | mempool_free_slab, _crypt_io_pool); | ||
635 | if (!cc->io_pool) { | 620 | if (!cc->io_pool) { |
636 | ti->error = PFX "Cannot allocate crypt io mempool"; | 621 | ti->error = PFX "Cannot allocate crypt io mempool"; |
637 | goto bad3; | 622 | goto bad3; |
638 | } | 623 | } |
639 | 624 | ||
640 | cc->page_pool = mempool_create(MIN_POOL_PAGES, mempool_alloc_page, | 625 | cc->page_pool = mempool_create_page_pool(MIN_POOL_PAGES, 0); |
641 | mempool_free_page, NULL); | ||
642 | if (!cc->page_pool) { | 626 | if (!cc->page_pool) { |
643 | ti->error = PFX "Cannot allocate page mempool"; | 627 | ti->error = PFX "Cannot allocate page mempool"; |
644 | goto bad4; | 628 | goto bad4; |
diff --git a/drivers/md/dm-io.c b/drivers/md/dm-io.c index 4809b209fbb1..da663d2ff552 100644 --- a/drivers/md/dm-io.c +++ b/drivers/md/dm-io.c | |||
@@ -32,16 +32,6 @@ struct io { | |||
32 | static unsigned _num_ios; | 32 | static unsigned _num_ios; |
33 | static mempool_t *_io_pool; | 33 | static mempool_t *_io_pool; |
34 | 34 | ||
35 | static void *alloc_io(gfp_t gfp_mask, void *pool_data) | ||
36 | { | ||
37 | return kmalloc(sizeof(struct io), gfp_mask); | ||
38 | } | ||
39 | |||
40 | static void free_io(void *element, void *pool_data) | ||
41 | { | ||
42 | kfree(element); | ||
43 | } | ||
44 | |||
45 | static unsigned int pages_to_ios(unsigned int pages) | 35 | static unsigned int pages_to_ios(unsigned int pages) |
46 | { | 36 | { |
47 | return 4 * pages; /* too many ? */ | 37 | return 4 * pages; /* too many ? */ |
@@ -65,7 +55,8 @@ static int resize_pool(unsigned int new_ios) | |||
65 | 55 | ||
66 | } else { | 56 | } else { |
67 | /* create new pool */ | 57 | /* create new pool */ |
68 | _io_pool = mempool_create(new_ios, alloc_io, free_io, NULL); | 58 | _io_pool = mempool_create_kmalloc_pool(new_ios, |
59 | sizeof(struct io)); | ||
69 | if (!_io_pool) | 60 | if (!_io_pool) |
70 | return -ENOMEM; | 61 | return -ENOMEM; |
71 | 62 | ||
diff --git a/drivers/md/dm-mpath.c b/drivers/md/dm-mpath.c index f72a82fb9434..1816f30678ed 100644 --- a/drivers/md/dm-mpath.c +++ b/drivers/md/dm-mpath.c | |||
@@ -179,8 +179,7 @@ static struct multipath *alloc_multipath(void) | |||
179 | m->queue_io = 1; | 179 | m->queue_io = 1; |
180 | INIT_WORK(&m->process_queued_ios, process_queued_ios, m); | 180 | INIT_WORK(&m->process_queued_ios, process_queued_ios, m); |
181 | INIT_WORK(&m->trigger_event, trigger_event, m); | 181 | INIT_WORK(&m->trigger_event, trigger_event, m); |
182 | m->mpio_pool = mempool_create(MIN_IOS, mempool_alloc_slab, | 182 | m->mpio_pool = mempool_create_slab_pool(MIN_IOS, _mpio_cache); |
183 | mempool_free_slab, _mpio_cache); | ||
184 | if (!m->mpio_pool) { | 183 | if (!m->mpio_pool) { |
185 | kfree(m); | 184 | kfree(m); |
186 | return NULL; | 185 | return NULL; |
diff --git a/drivers/md/dm-raid1.c b/drivers/md/dm-raid1.c index 6cfa8d435d55..4e90f231fbfb 100644 --- a/drivers/md/dm-raid1.c +++ b/drivers/md/dm-raid1.c | |||
@@ -122,16 +122,6 @@ static inline sector_t region_to_sector(struct region_hash *rh, region_t region) | |||
122 | /* FIXME move this */ | 122 | /* FIXME move this */ |
123 | static void queue_bio(struct mirror_set *ms, struct bio *bio, int rw); | 123 | static void queue_bio(struct mirror_set *ms, struct bio *bio, int rw); |
124 | 124 | ||
125 | static void *region_alloc(gfp_t gfp_mask, void *pool_data) | ||
126 | { | ||
127 | return kmalloc(sizeof(struct region), gfp_mask); | ||
128 | } | ||
129 | |||
130 | static void region_free(void *element, void *pool_data) | ||
131 | { | ||
132 | kfree(element); | ||
133 | } | ||
134 | |||
135 | #define MIN_REGIONS 64 | 125 | #define MIN_REGIONS 64 |
136 | #define MAX_RECOVERY 1 | 126 | #define MAX_RECOVERY 1 |
137 | static int rh_init(struct region_hash *rh, struct mirror_set *ms, | 127 | static int rh_init(struct region_hash *rh, struct mirror_set *ms, |
@@ -173,8 +163,8 @@ static int rh_init(struct region_hash *rh, struct mirror_set *ms, | |||
173 | INIT_LIST_HEAD(&rh->quiesced_regions); | 163 | INIT_LIST_HEAD(&rh->quiesced_regions); |
174 | INIT_LIST_HEAD(&rh->recovered_regions); | 164 | INIT_LIST_HEAD(&rh->recovered_regions); |
175 | 165 | ||
176 | rh->region_pool = mempool_create(MIN_REGIONS, region_alloc, | 166 | rh->region_pool = mempool_create_kmalloc_pool(MIN_REGIONS, |
177 | region_free, NULL); | 167 | sizeof(struct region)); |
178 | if (!rh->region_pool) { | 168 | if (!rh->region_pool) { |
179 | vfree(rh->buckets); | 169 | vfree(rh->buckets); |
180 | rh->buckets = NULL; | 170 | rh->buckets = NULL; |
diff --git a/drivers/md/dm-snap.c b/drivers/md/dm-snap.c index f3759dd7828e..7401540086df 100644 --- a/drivers/md/dm-snap.c +++ b/drivers/md/dm-snap.c | |||
@@ -1174,8 +1174,7 @@ static int __init dm_snapshot_init(void) | |||
1174 | goto bad4; | 1174 | goto bad4; |
1175 | } | 1175 | } |
1176 | 1176 | ||
1177 | pending_pool = mempool_create(128, mempool_alloc_slab, | 1177 | pending_pool = mempool_create_slab_pool(128, pending_cache); |
1178 | mempool_free_slab, pending_cache); | ||
1179 | if (!pending_pool) { | 1178 | if (!pending_pool) { |
1180 | DMERR("Couldn't create pending pool."); | 1179 | DMERR("Couldn't create pending pool."); |
1181 | r = -ENOMEM; | 1180 | r = -ENOMEM; |
diff --git a/drivers/md/dm.c b/drivers/md/dm.c index 8c82373f7ff3..a64798ef481e 100644 --- a/drivers/md/dm.c +++ b/drivers/md/dm.c | |||
@@ -823,13 +823,11 @@ static struct mapped_device *alloc_dev(unsigned int minor, int persistent) | |||
823 | md->queue->unplug_fn = dm_unplug_all; | 823 | md->queue->unplug_fn = dm_unplug_all; |
824 | md->queue->issue_flush_fn = dm_flush_all; | 824 | md->queue->issue_flush_fn = dm_flush_all; |
825 | 825 | ||
826 | md->io_pool = mempool_create(MIN_IOS, mempool_alloc_slab, | 826 | md->io_pool = mempool_create_slab_pool(MIN_IOS, _io_cache); |
827 | mempool_free_slab, _io_cache); | ||
828 | if (!md->io_pool) | 827 | if (!md->io_pool) |
829 | goto bad2; | 828 | goto bad2; |
830 | 829 | ||
831 | md->tio_pool = mempool_create(MIN_IOS, mempool_alloc_slab, | 830 | md->tio_pool = mempool_create_slab_pool(MIN_IOS, _tio_cache); |
832 | mempool_free_slab, _tio_cache); | ||
833 | if (!md->tio_pool) | 831 | if (!md->tio_pool) |
834 | goto bad3; | 832 | goto bad3; |
835 | 833 | ||
diff --git a/drivers/md/kcopyd.c b/drivers/md/kcopyd.c index 8b3515f394a6..9dcb2c8a3853 100644 --- a/drivers/md/kcopyd.c +++ b/drivers/md/kcopyd.c | |||
@@ -227,8 +227,7 @@ static int jobs_init(void) | |||
227 | if (!_job_cache) | 227 | if (!_job_cache) |
228 | return -ENOMEM; | 228 | return -ENOMEM; |
229 | 229 | ||
230 | _job_pool = mempool_create(MIN_JOBS, mempool_alloc_slab, | 230 | _job_pool = mempool_create_slab_pool(MIN_JOBS, _job_cache); |
231 | mempool_free_slab, _job_cache); | ||
232 | if (!_job_pool) { | 231 | if (!_job_pool) { |
233 | kmem_cache_destroy(_job_cache); | 232 | kmem_cache_destroy(_job_cache); |
234 | return -ENOMEM; | 233 | return -ENOMEM; |
@@ -590,51 +589,51 @@ static void client_del(struct kcopyd_client *kc) | |||
590 | up(&_client_lock); | 589 | up(&_client_lock); |
591 | } | 590 | } |
592 | 591 | ||
593 | static DECLARE_MUTEX(kcopyd_init_lock); | 592 | static DEFINE_MUTEX(kcopyd_init_lock); |
594 | static int kcopyd_clients = 0; | 593 | static int kcopyd_clients = 0; |
595 | 594 | ||
596 | static int kcopyd_init(void) | 595 | static int kcopyd_init(void) |
597 | { | 596 | { |
598 | int r; | 597 | int r; |
599 | 598 | ||
600 | down(&kcopyd_init_lock); | 599 | mutex_lock(&kcopyd_init_lock); |
601 | 600 | ||
602 | if (kcopyd_clients) { | 601 | if (kcopyd_clients) { |
603 | /* Already initialized. */ | 602 | /* Already initialized. */ |
604 | kcopyd_clients++; | 603 | kcopyd_clients++; |
605 | up(&kcopyd_init_lock); | 604 | mutex_unlock(&kcopyd_init_lock); |
606 | return 0; | 605 | return 0; |
607 | } | 606 | } |
608 | 607 | ||
609 | r = jobs_init(); | 608 | r = jobs_init(); |
610 | if (r) { | 609 | if (r) { |
611 | up(&kcopyd_init_lock); | 610 | mutex_unlock(&kcopyd_init_lock); |
612 | return r; | 611 | return r; |
613 | } | 612 | } |
614 | 613 | ||
615 | _kcopyd_wq = create_singlethread_workqueue("kcopyd"); | 614 | _kcopyd_wq = create_singlethread_workqueue("kcopyd"); |
616 | if (!_kcopyd_wq) { | 615 | if (!_kcopyd_wq) { |
617 | jobs_exit(); | 616 | jobs_exit(); |
618 | up(&kcopyd_init_lock); | 617 | mutex_unlock(&kcopyd_init_lock); |
619 | return -ENOMEM; | 618 | return -ENOMEM; |
620 | } | 619 | } |
621 | 620 | ||
622 | kcopyd_clients++; | 621 | kcopyd_clients++; |
623 | INIT_WORK(&_kcopyd_work, do_work, NULL); | 622 | INIT_WORK(&_kcopyd_work, do_work, NULL); |
624 | up(&kcopyd_init_lock); | 623 | mutex_unlock(&kcopyd_init_lock); |
625 | return 0; | 624 | return 0; |
626 | } | 625 | } |
627 | 626 | ||
628 | static void kcopyd_exit(void) | 627 | static void kcopyd_exit(void) |
629 | { | 628 | { |
630 | down(&kcopyd_init_lock); | 629 | mutex_lock(&kcopyd_init_lock); |
631 | kcopyd_clients--; | 630 | kcopyd_clients--; |
632 | if (!kcopyd_clients) { | 631 | if (!kcopyd_clients) { |
633 | jobs_exit(); | 632 | jobs_exit(); |
634 | destroy_workqueue(_kcopyd_wq); | 633 | destroy_workqueue(_kcopyd_wq); |
635 | _kcopyd_wq = NULL; | 634 | _kcopyd_wq = NULL; |
636 | } | 635 | } |
637 | up(&kcopyd_init_lock); | 636 | mutex_unlock(&kcopyd_init_lock); |
638 | } | 637 | } |
639 | 638 | ||
640 | int kcopyd_client_create(unsigned int nr_pages, struct kcopyd_client **result) | 639 | int kcopyd_client_create(unsigned int nr_pages, struct kcopyd_client **result) |
diff --git a/drivers/md/multipath.c b/drivers/md/multipath.c index 96f7af4ae400..1cc9de44ce86 100644 --- a/drivers/md/multipath.c +++ b/drivers/md/multipath.c | |||
@@ -35,18 +35,6 @@ | |||
35 | #define NR_RESERVED_BUFS 32 | 35 | #define NR_RESERVED_BUFS 32 |
36 | 36 | ||
37 | 37 | ||
38 | static void *mp_pool_alloc(gfp_t gfp_flags, void *data) | ||
39 | { | ||
40 | struct multipath_bh *mpb; | ||
41 | mpb = kzalloc(sizeof(*mpb), gfp_flags); | ||
42 | return mpb; | ||
43 | } | ||
44 | |||
45 | static void mp_pool_free(void *mpb, void *data) | ||
46 | { | ||
47 | kfree(mpb); | ||
48 | } | ||
49 | |||
50 | static int multipath_map (multipath_conf_t *conf) | 38 | static int multipath_map (multipath_conf_t *conf) |
51 | { | 39 | { |
52 | int i, disks = conf->raid_disks; | 40 | int i, disks = conf->raid_disks; |
@@ -494,9 +482,8 @@ static int multipath_run (mddev_t *mddev) | |||
494 | } | 482 | } |
495 | mddev->degraded = conf->raid_disks = conf->working_disks; | 483 | mddev->degraded = conf->raid_disks = conf->working_disks; |
496 | 484 | ||
497 | conf->pool = mempool_create(NR_RESERVED_BUFS, | 485 | conf->pool = mempool_create_kzalloc_pool(NR_RESERVED_BUFS, |
498 | mp_pool_alloc, mp_pool_free, | 486 | sizeof(struct multipath_bh)); |
499 | NULL); | ||
500 | if (conf->pool == NULL) { | 487 | if (conf->pool == NULL) { |
501 | printk(KERN_ERR | 488 | printk(KERN_ERR |
502 | "multipath: couldn't allocate memory for %s\n", | 489 | "multipath: couldn't allocate memory for %s\n", |
diff --git a/drivers/message/i2o/i2o_block.c b/drivers/message/i2o/i2o_block.c index b09fb6307153..7d4c5497785b 100644 --- a/drivers/message/i2o/i2o_block.c +++ b/drivers/message/i2o/i2o_block.c | |||
@@ -1179,10 +1179,9 @@ static int __init i2o_block_init(void) | |||
1179 | goto exit; | 1179 | goto exit; |
1180 | } | 1180 | } |
1181 | 1181 | ||
1182 | i2o_blk_req_pool.pool = mempool_create(I2O_BLOCK_REQ_MEMPOOL_SIZE, | 1182 | i2o_blk_req_pool.pool = |
1183 | mempool_alloc_slab, | 1183 | mempool_create_slab_pool(I2O_BLOCK_REQ_MEMPOOL_SIZE, |
1184 | mempool_free_slab, | 1184 | i2o_blk_req_pool.slab); |
1185 | i2o_blk_req_pool.slab); | ||
1186 | if (!i2o_blk_req_pool.pool) { | 1185 | if (!i2o_blk_req_pool.pool) { |
1187 | osm_err("can't init request mempool\n"); | 1186 | osm_err("can't init request mempool\n"); |
1188 | rc = -ENOMEM; | 1187 | rc = -ENOMEM; |
diff --git a/drivers/net/3c59x.c b/drivers/net/3c59x.c index d339308539fa..70f63891b19c 100644 --- a/drivers/net/3c59x.c +++ b/drivers/net/3c59x.c | |||
@@ -196,8 +196,6 @@ | |||
196 | 196 | ||
197 | 197 | ||
198 | #define DRV_NAME "3c59x" | 198 | #define DRV_NAME "3c59x" |
199 | #define DRV_VERSION "LK1.1.19" | ||
200 | #define DRV_RELDATE "10 Nov 2002" | ||
201 | 199 | ||
202 | 200 | ||
203 | 201 | ||
@@ -275,10 +273,8 @@ static char version[] __devinitdata = | |||
275 | DRV_NAME ": Donald Becker and others. www.scyld.com/network/vortex.html\n"; | 273 | DRV_NAME ": Donald Becker and others. www.scyld.com/network/vortex.html\n"; |
276 | 274 | ||
277 | MODULE_AUTHOR("Donald Becker <becker@scyld.com>"); | 275 | MODULE_AUTHOR("Donald Becker <becker@scyld.com>"); |
278 | MODULE_DESCRIPTION("3Com 3c59x/3c9xx ethernet driver " | 276 | MODULE_DESCRIPTION("3Com 3c59x/3c9xx ethernet driver "); |
279 | DRV_VERSION " " DRV_RELDATE); | ||
280 | MODULE_LICENSE("GPL"); | 277 | MODULE_LICENSE("GPL"); |
281 | MODULE_VERSION(DRV_VERSION); | ||
282 | 278 | ||
283 | 279 | ||
284 | /* Operational parameter that usually are not changed. */ | 280 | /* Operational parameter that usually are not changed. */ |
@@ -904,7 +900,6 @@ static void acpi_set_WOL(struct net_device *dev); | |||
904 | static struct ethtool_ops vortex_ethtool_ops; | 900 | static struct ethtool_ops vortex_ethtool_ops; |
905 | static void set_8021q_mode(struct net_device *dev, int enable); | 901 | static void set_8021q_mode(struct net_device *dev, int enable); |
906 | 902 | ||
907 | |||
908 | /* This driver uses 'options' to pass the media type, full-duplex flag, etc. */ | 903 | /* This driver uses 'options' to pass the media type, full-duplex flag, etc. */ |
909 | /* Option count limit only -- unlimited interfaces are supported. */ | 904 | /* Option count limit only -- unlimited interfaces are supported. */ |
910 | #define MAX_UNITS 8 | 905 | #define MAX_UNITS 8 |
@@ -919,8 +914,6 @@ static int global_full_duplex = -1; | |||
919 | static int global_enable_wol = -1; | 914 | static int global_enable_wol = -1; |
920 | static int global_use_mmio = -1; | 915 | static int global_use_mmio = -1; |
921 | 916 | ||
922 | /* #define dev_alloc_skb dev_alloc_skb_debug */ | ||
923 | |||
924 | /* Variables to work-around the Compaq PCI BIOS32 problem. */ | 917 | /* Variables to work-around the Compaq PCI BIOS32 problem. */ |
925 | static int compaq_ioaddr, compaq_irq, compaq_device_id = 0x5900; | 918 | static int compaq_ioaddr, compaq_irq, compaq_device_id = 0x5900; |
926 | static struct net_device *compaq_net_device; | 919 | static struct net_device *compaq_net_device; |
@@ -976,7 +969,7 @@ static void poll_vortex(struct net_device *dev) | |||
976 | 969 | ||
977 | #ifdef CONFIG_PM | 970 | #ifdef CONFIG_PM |
978 | 971 | ||
979 | static int vortex_suspend (struct pci_dev *pdev, pm_message_t state) | 972 | static int vortex_suspend(struct pci_dev *pdev, pm_message_t state) |
980 | { | 973 | { |
981 | struct net_device *dev = pci_get_drvdata(pdev); | 974 | struct net_device *dev = pci_get_drvdata(pdev); |
982 | 975 | ||
@@ -994,7 +987,7 @@ static int vortex_suspend (struct pci_dev *pdev, pm_message_t state) | |||
994 | return 0; | 987 | return 0; |
995 | } | 988 | } |
996 | 989 | ||
997 | static int vortex_resume (struct pci_dev *pdev) | 990 | static int vortex_resume(struct pci_dev *pdev) |
998 | { | 991 | { |
999 | struct net_device *dev = pci_get_drvdata(pdev); | 992 | struct net_device *dev = pci_get_drvdata(pdev); |
1000 | struct vortex_private *vp = netdev_priv(dev); | 993 | struct vortex_private *vp = netdev_priv(dev); |
@@ -1027,8 +1020,8 @@ static struct eisa_device_id vortex_eisa_ids[] = { | |||
1027 | { "" } | 1020 | { "" } |
1028 | }; | 1021 | }; |
1029 | 1022 | ||
1030 | static int vortex_eisa_probe (struct device *device); | 1023 | static int vortex_eisa_probe(struct device *device); |
1031 | static int vortex_eisa_remove (struct device *device); | 1024 | static int vortex_eisa_remove(struct device *device); |
1032 | 1025 | ||
1033 | static struct eisa_driver vortex_eisa_driver = { | 1026 | static struct eisa_driver vortex_eisa_driver = { |
1034 | .id_table = vortex_eisa_ids, | 1027 | .id_table = vortex_eisa_ids, |
@@ -1039,12 +1032,12 @@ static struct eisa_driver vortex_eisa_driver = { | |||
1039 | } | 1032 | } |
1040 | }; | 1033 | }; |
1041 | 1034 | ||
1042 | static int vortex_eisa_probe (struct device *device) | 1035 | static int vortex_eisa_probe(struct device *device) |
1043 | { | 1036 | { |
1044 | void __iomem *ioaddr; | 1037 | void __iomem *ioaddr; |
1045 | struct eisa_device *edev; | 1038 | struct eisa_device *edev; |
1046 | 1039 | ||
1047 | edev = to_eisa_device (device); | 1040 | edev = to_eisa_device(device); |
1048 | 1041 | ||
1049 | if (!request_region(edev->base_addr, VORTEX_TOTAL_SIZE, DRV_NAME)) | 1042 | if (!request_region(edev->base_addr, VORTEX_TOTAL_SIZE, DRV_NAME)) |
1050 | return -EBUSY; | 1043 | return -EBUSY; |
@@ -1053,7 +1046,7 @@ static int vortex_eisa_probe (struct device *device) | |||
1053 | 1046 | ||
1054 | if (vortex_probe1(device, ioaddr, ioread16(ioaddr + 0xC88) >> 12, | 1047 | if (vortex_probe1(device, ioaddr, ioread16(ioaddr + 0xC88) >> 12, |
1055 | edev->id.driver_data, vortex_cards_found)) { | 1048 | edev->id.driver_data, vortex_cards_found)) { |
1056 | release_region (edev->base_addr, VORTEX_TOTAL_SIZE); | 1049 | release_region(edev->base_addr, VORTEX_TOTAL_SIZE); |
1057 | return -ENODEV; | 1050 | return -ENODEV; |
1058 | } | 1051 | } |
1059 | 1052 | ||
@@ -1062,15 +1055,15 @@ static int vortex_eisa_probe (struct device *device) | |||
1062 | return 0; | 1055 | return 0; |
1063 | } | 1056 | } |
1064 | 1057 | ||
1065 | static int vortex_eisa_remove (struct device *device) | 1058 | static int vortex_eisa_remove(struct device *device) |
1066 | { | 1059 | { |
1067 | struct eisa_device *edev; | 1060 | struct eisa_device *edev; |
1068 | struct net_device *dev; | 1061 | struct net_device *dev; |
1069 | struct vortex_private *vp; | 1062 | struct vortex_private *vp; |
1070 | void __iomem *ioaddr; | 1063 | void __iomem *ioaddr; |
1071 | 1064 | ||
1072 | edev = to_eisa_device (device); | 1065 | edev = to_eisa_device(device); |
1073 | dev = eisa_get_drvdata (edev); | 1066 | dev = eisa_get_drvdata(edev); |
1074 | 1067 | ||
1075 | if (!dev) { | 1068 | if (!dev) { |
1076 | printk("vortex_eisa_remove called for Compaq device!\n"); | 1069 | printk("vortex_eisa_remove called for Compaq device!\n"); |
@@ -1080,17 +1073,17 @@ static int vortex_eisa_remove (struct device *device) | |||
1080 | vp = netdev_priv(dev); | 1073 | vp = netdev_priv(dev); |
1081 | ioaddr = vp->ioaddr; | 1074 | ioaddr = vp->ioaddr; |
1082 | 1075 | ||
1083 | unregister_netdev (dev); | 1076 | unregister_netdev(dev); |
1084 | iowrite16 (TotalReset|0x14, ioaddr + EL3_CMD); | 1077 | iowrite16(TotalReset|0x14, ioaddr + EL3_CMD); |
1085 | release_region (dev->base_addr, VORTEX_TOTAL_SIZE); | 1078 | release_region(dev->base_addr, VORTEX_TOTAL_SIZE); |
1086 | 1079 | ||
1087 | free_netdev (dev); | 1080 | free_netdev(dev); |
1088 | return 0; | 1081 | return 0; |
1089 | } | 1082 | } |
1090 | #endif | 1083 | #endif |
1091 | 1084 | ||
1092 | /* returns count found (>= 0), or negative on error */ | 1085 | /* returns count found (>= 0), or negative on error */ |
1093 | static int __init vortex_eisa_init (void) | 1086 | static int __init vortex_eisa_init(void) |
1094 | { | 1087 | { |
1095 | int eisa_found = 0; | 1088 | int eisa_found = 0; |
1096 | int orig_cards_found = vortex_cards_found; | 1089 | int orig_cards_found = vortex_cards_found; |
@@ -1121,7 +1114,7 @@ static int __init vortex_eisa_init (void) | |||
1121 | } | 1114 | } |
1122 | 1115 | ||
1123 | /* returns count (>= 0), or negative on error */ | 1116 | /* returns count (>= 0), or negative on error */ |
1124 | static int __devinit vortex_init_one (struct pci_dev *pdev, | 1117 | static int __devinit vortex_init_one(struct pci_dev *pdev, |
1125 | const struct pci_device_id *ent) | 1118 | const struct pci_device_id *ent) |
1126 | { | 1119 | { |
1127 | int rc, unit, pci_bar; | 1120 | int rc, unit, pci_bar; |
@@ -1129,7 +1122,7 @@ static int __devinit vortex_init_one (struct pci_dev *pdev, | |||
1129 | void __iomem *ioaddr; | 1122 | void __iomem *ioaddr; |
1130 | 1123 | ||
1131 | /* wake up and enable device */ | 1124 | /* wake up and enable device */ |
1132 | rc = pci_enable_device (pdev); | 1125 | rc = pci_enable_device(pdev); |
1133 | if (rc < 0) | 1126 | if (rc < 0) |
1134 | goto out; | 1127 | goto out; |
1135 | 1128 | ||
@@ -1151,7 +1144,7 @@ static int __devinit vortex_init_one (struct pci_dev *pdev, | |||
1151 | rc = vortex_probe1(&pdev->dev, ioaddr, pdev->irq, | 1144 | rc = vortex_probe1(&pdev->dev, ioaddr, pdev->irq, |
1152 | ent->driver_data, unit); | 1145 | ent->driver_data, unit); |
1153 | if (rc < 0) { | 1146 | if (rc < 0) { |
1154 | pci_disable_device (pdev); | 1147 | pci_disable_device(pdev); |
1155 | goto out; | 1148 | goto out; |
1156 | } | 1149 | } |
1157 | 1150 | ||
@@ -1236,7 +1229,7 @@ static int __devinit vortex_probe1(struct device *gendev, | |||
1236 | if (print_info) | 1229 | if (print_info) |
1237 | printk (KERN_INFO "See Documentation/networking/vortex.txt\n"); | 1230 | printk (KERN_INFO "See Documentation/networking/vortex.txt\n"); |
1238 | 1231 | ||
1239 | printk(KERN_INFO "%s: 3Com %s %s at %p. Vers " DRV_VERSION "\n", | 1232 | printk(KERN_INFO "%s: 3Com %s %s at %p.\n", |
1240 | print_name, | 1233 | print_name, |
1241 | pdev ? "PCI" : "EISA", | 1234 | pdev ? "PCI" : "EISA", |
1242 | vci->name, | 1235 | vci->name, |
@@ -1266,7 +1259,7 @@ static int __devinit vortex_probe1(struct device *gendev, | |||
1266 | 1259 | ||
1267 | /* enable bus-mastering if necessary */ | 1260 | /* enable bus-mastering if necessary */ |
1268 | if (vci->flags & PCI_USES_MASTER) | 1261 | if (vci->flags & PCI_USES_MASTER) |
1269 | pci_set_master (pdev); | 1262 | pci_set_master(pdev); |
1270 | 1263 | ||
1271 | if (vci->drv_flags & IS_VORTEX) { | 1264 | if (vci->drv_flags & IS_VORTEX) { |
1272 | u8 pci_latency; | 1265 | u8 pci_latency; |
@@ -1310,7 +1303,7 @@ static int __devinit vortex_probe1(struct device *gendev, | |||
1310 | if (pdev) | 1303 | if (pdev) |
1311 | pci_set_drvdata(pdev, dev); | 1304 | pci_set_drvdata(pdev, dev); |
1312 | if (edev) | 1305 | if (edev) |
1313 | eisa_set_drvdata (edev, dev); | 1306 | eisa_set_drvdata(edev, dev); |
1314 | 1307 | ||
1315 | vp->media_override = 7; | 1308 | vp->media_override = 7; |
1316 | if (option >= 0) { | 1309 | if (option >= 0) { |
@@ -1335,7 +1328,7 @@ static int __devinit vortex_probe1(struct device *gendev, | |||
1335 | vp->enable_wol = 1; | 1328 | vp->enable_wol = 1; |
1336 | } | 1329 | } |
1337 | 1330 | ||
1338 | vp->force_fd = vp->full_duplex; | 1331 | vp->mii.force_media = vp->full_duplex; |
1339 | vp->options = option; | 1332 | vp->options = option; |
1340 | /* Read the station address from the EEPROM. */ | 1333 | /* Read the station address from the EEPROM. */ |
1341 | EL3WINDOW(0); | 1334 | EL3WINDOW(0); |
@@ -1625,6 +1618,46 @@ issue_and_wait(struct net_device *dev, int cmd) | |||
1625 | } | 1618 | } |
1626 | 1619 | ||
1627 | static void | 1620 | static void |
1621 | vortex_set_duplex(struct net_device *dev) | ||
1622 | { | ||
1623 | struct vortex_private *vp = netdev_priv(dev); | ||
1624 | void __iomem *ioaddr = vp->ioaddr; | ||
1625 | |||
1626 | printk(KERN_INFO "%s: setting %s-duplex.\n", | ||
1627 | dev->name, (vp->full_duplex) ? "full" : "half"); | ||
1628 | |||
1629 | EL3WINDOW(3); | ||
1630 | /* Set the full-duplex bit. */ | ||
1631 | iowrite16(((vp->info1 & 0x8000) || vp->full_duplex ? 0x20 : 0) | | ||
1632 | (vp->large_frames ? 0x40 : 0) | | ||
1633 | ((vp->full_duplex && vp->flow_ctrl && vp->partner_flow_ctrl) ? | ||
1634 | 0x100 : 0), | ||
1635 | ioaddr + Wn3_MAC_Ctrl); | ||
1636 | |||
1637 | issue_and_wait(dev, TxReset); | ||
1638 | /* | ||
1639 | * Don't reset the PHY - that upsets autonegotiation during DHCP operations. | ||
1640 | */ | ||
1641 | issue_and_wait(dev, RxReset|0x04); | ||
1642 | } | ||
1643 | |||
1644 | static void vortex_check_media(struct net_device *dev, unsigned int init) | ||
1645 | { | ||
1646 | struct vortex_private *vp = netdev_priv(dev); | ||
1647 | unsigned int ok_to_print = 0; | ||
1648 | |||
1649 | if (vortex_debug > 3) | ||
1650 | ok_to_print = 1; | ||
1651 | |||
1652 | if (mii_check_media(&vp->mii, ok_to_print, init)) { | ||
1653 | vp->full_duplex = vp->mii.full_duplex; | ||
1654 | vortex_set_duplex(dev); | ||
1655 | } else if (init) { | ||
1656 | vortex_set_duplex(dev); | ||
1657 | } | ||
1658 | } | ||
1659 | |||
1660 | static void | ||
1628 | vortex_up(struct net_device *dev) | 1661 | vortex_up(struct net_device *dev) |
1629 | { | 1662 | { |
1630 | struct vortex_private *vp = netdev_priv(dev); | 1663 | struct vortex_private *vp = netdev_priv(dev); |
@@ -1684,53 +1717,20 @@ vortex_up(struct net_device *dev) | |||
1684 | printk(KERN_DEBUG "%s: Initial media type %s.\n", | 1717 | printk(KERN_DEBUG "%s: Initial media type %s.\n", |
1685 | dev->name, media_tbl[dev->if_port].name); | 1718 | dev->name, media_tbl[dev->if_port].name); |
1686 | 1719 | ||
1687 | vp->full_duplex = vp->force_fd; | 1720 | vp->full_duplex = vp->mii.force_media; |
1688 | config = BFINS(config, dev->if_port, 20, 4); | 1721 | config = BFINS(config, dev->if_port, 20, 4); |
1689 | if (vortex_debug > 6) | 1722 | if (vortex_debug > 6) |
1690 | printk(KERN_DEBUG "vortex_up(): writing 0x%x to InternalConfig\n", config); | 1723 | printk(KERN_DEBUG "vortex_up(): writing 0x%x to InternalConfig\n", config); |
1691 | iowrite32(config, ioaddr + Wn3_Config); | 1724 | iowrite32(config, ioaddr + Wn3_Config); |
1692 | 1725 | ||
1726 | netif_carrier_off(dev); | ||
1693 | if (dev->if_port == XCVR_MII || dev->if_port == XCVR_NWAY) { | 1727 | if (dev->if_port == XCVR_MII || dev->if_port == XCVR_NWAY) { |
1694 | int mii_reg1, mii_reg5; | ||
1695 | EL3WINDOW(4); | 1728 | EL3WINDOW(4); |
1696 | /* Read BMSR (reg1) only to clear old status. */ | 1729 | vortex_check_media(dev, 1); |
1697 | mii_reg1 = mdio_read(dev, vp->phys[0], MII_BMSR); | ||
1698 | mii_reg5 = mdio_read(dev, vp->phys[0], MII_LPA); | ||
1699 | if (mii_reg5 == 0xffff || mii_reg5 == 0x0000) { | ||
1700 | netif_carrier_off(dev); /* No MII device or no link partner report */ | ||
1701 | } else { | ||
1702 | mii_reg5 &= vp->advertising; | ||
1703 | if ((mii_reg5 & 0x0100) != 0 /* 100baseTx-FD */ | ||
1704 | || (mii_reg5 & 0x00C0) == 0x0040) /* 10T-FD, but not 100-HD */ | ||
1705 | vp->full_duplex = 1; | ||
1706 | netif_carrier_on(dev); | ||
1707 | } | ||
1708 | vp->partner_flow_ctrl = ((mii_reg5 & 0x0400) != 0); | ||
1709 | if (vortex_debug > 1) | ||
1710 | printk(KERN_INFO "%s: MII #%d status %4.4x, link partner capability %4.4x," | ||
1711 | " info1 %04x, setting %s-duplex.\n", | ||
1712 | dev->name, vp->phys[0], | ||
1713 | mii_reg1, mii_reg5, | ||
1714 | vp->info1, ((vp->info1 & 0x8000) || vp->full_duplex) ? "full" : "half"); | ||
1715 | EL3WINDOW(3); | ||
1716 | } | ||
1717 | |||
1718 | /* Set the full-duplex bit. */ | ||
1719 | iowrite16( ((vp->info1 & 0x8000) || vp->full_duplex ? 0x20 : 0) | | ||
1720 | (vp->large_frames ? 0x40 : 0) | | ||
1721 | ((vp->full_duplex && vp->flow_ctrl && vp->partner_flow_ctrl) ? 0x100 : 0), | ||
1722 | ioaddr + Wn3_MAC_Ctrl); | ||
1723 | |||
1724 | if (vortex_debug > 1) { | ||
1725 | printk(KERN_DEBUG "%s: vortex_up() InternalConfig %8.8x.\n", | ||
1726 | dev->name, config); | ||
1727 | } | 1730 | } |
1731 | else | ||
1732 | vortex_set_duplex(dev); | ||
1728 | 1733 | ||
1729 | issue_and_wait(dev, TxReset); | ||
1730 | /* | ||
1731 | * Don't reset the PHY - that upsets autonegotiation during DHCP operations. | ||
1732 | */ | ||
1733 | issue_and_wait(dev, RxReset|0x04); | ||
1734 | 1734 | ||
1735 | iowrite16(SetStatusEnb | 0x00, ioaddr + EL3_CMD); | 1735 | iowrite16(SetStatusEnb | 0x00, ioaddr + EL3_CMD); |
1736 | 1736 | ||
@@ -1805,7 +1805,6 @@ vortex_up(struct net_device *dev) | |||
1805 | set_8021q_mode(dev, 1); | 1805 | set_8021q_mode(dev, 1); |
1806 | iowrite16(StatsEnable, ioaddr + EL3_CMD); /* Turn on statistics. */ | 1806 | iowrite16(StatsEnable, ioaddr + EL3_CMD); /* Turn on statistics. */ |
1807 | 1807 | ||
1808 | // issue_and_wait(dev, SetTxStart|0x07ff); | ||
1809 | iowrite16(RxEnable, ioaddr + EL3_CMD); /* Enable the receiver. */ | 1808 | iowrite16(RxEnable, ioaddr + EL3_CMD); /* Enable the receiver. */ |
1810 | iowrite16(TxEnable, ioaddr + EL3_CMD); /* Enable transmitter. */ | 1809 | iowrite16(TxEnable, ioaddr + EL3_CMD); /* Enable transmitter. */ |
1811 | /* Allow status bits to be seen. */ | 1810 | /* Allow status bits to be seen. */ |
@@ -1892,7 +1891,7 @@ vortex_timer(unsigned long data) | |||
1892 | void __iomem *ioaddr = vp->ioaddr; | 1891 | void __iomem *ioaddr = vp->ioaddr; |
1893 | int next_tick = 60*HZ; | 1892 | int next_tick = 60*HZ; |
1894 | int ok = 0; | 1893 | int ok = 0; |
1895 | int media_status, mii_status, old_window; | 1894 | int media_status, old_window; |
1896 | 1895 | ||
1897 | if (vortex_debug > 2) { | 1896 | if (vortex_debug > 2) { |
1898 | printk(KERN_DEBUG "%s: Media selection timer tick happened, %s.\n", | 1897 | printk(KERN_DEBUG "%s: Media selection timer tick happened, %s.\n", |
@@ -1900,8 +1899,6 @@ vortex_timer(unsigned long data) | |||
1900 | printk(KERN_DEBUG "dev->watchdog_timeo=%d\n", dev->watchdog_timeo); | 1899 | printk(KERN_DEBUG "dev->watchdog_timeo=%d\n", dev->watchdog_timeo); |
1901 | } | 1900 | } |
1902 | 1901 | ||
1903 | if (vp->medialock) | ||
1904 | goto leave_media_alone; | ||
1905 | disable_irq(dev->irq); | 1902 | disable_irq(dev->irq); |
1906 | old_window = ioread16(ioaddr + EL3_CMD) >> 13; | 1903 | old_window = ioread16(ioaddr + EL3_CMD) >> 13; |
1907 | EL3WINDOW(4); | 1904 | EL3WINDOW(4); |
@@ -1924,44 +1921,9 @@ vortex_timer(unsigned long data) | |||
1924 | break; | 1921 | break; |
1925 | case XCVR_MII: case XCVR_NWAY: | 1922 | case XCVR_MII: case XCVR_NWAY: |
1926 | { | 1923 | { |
1927 | spin_lock_bh(&vp->lock); | ||
1928 | mii_status = mdio_read(dev, vp->phys[0], MII_BMSR); | ||
1929 | if (!(mii_status & BMSR_LSTATUS)) { | ||
1930 | /* Re-read to get actual link status */ | ||
1931 | mii_status = mdio_read(dev, vp->phys[0], MII_BMSR); | ||
1932 | } | ||
1933 | ok = 1; | 1924 | ok = 1; |
1934 | if (vortex_debug > 2) | 1925 | spin_lock_bh(&vp->lock); |
1935 | printk(KERN_DEBUG "%s: MII transceiver has status %4.4x.\n", | 1926 | vortex_check_media(dev, 0); |
1936 | dev->name, mii_status); | ||
1937 | if (mii_status & BMSR_LSTATUS) { | ||
1938 | int mii_reg5 = mdio_read(dev, vp->phys[0], MII_LPA); | ||
1939 | if (! vp->force_fd && mii_reg5 != 0xffff) { | ||
1940 | int duplex; | ||
1941 | |||
1942 | mii_reg5 &= vp->advertising; | ||
1943 | duplex = (mii_reg5&0x0100) || (mii_reg5 & 0x01C0) == 0x0040; | ||
1944 | if (vp->full_duplex != duplex) { | ||
1945 | vp->full_duplex = duplex; | ||
1946 | printk(KERN_INFO "%s: Setting %s-duplex based on MII " | ||
1947 | "#%d link partner capability of %4.4x.\n", | ||
1948 | dev->name, vp->full_duplex ? "full" : "half", | ||
1949 | vp->phys[0], mii_reg5); | ||
1950 | /* Set the full-duplex bit. */ | ||
1951 | EL3WINDOW(3); | ||
1952 | iowrite16( (vp->full_duplex ? 0x20 : 0) | | ||
1953 | (vp->large_frames ? 0x40 : 0) | | ||
1954 | ((vp->full_duplex && vp->flow_ctrl && vp->partner_flow_ctrl) ? 0x100 : 0), | ||
1955 | ioaddr + Wn3_MAC_Ctrl); | ||
1956 | if (vortex_debug > 1) | ||
1957 | printk(KERN_DEBUG "Setting duplex in Wn3_MAC_Ctrl\n"); | ||
1958 | /* AKPM: bug: should reset Tx and Rx after setting Duplex. Page 180 */ | ||
1959 | } | ||
1960 | } | ||
1961 | netif_carrier_on(dev); | ||
1962 | } else { | ||
1963 | netif_carrier_off(dev); | ||
1964 | } | ||
1965 | spin_unlock_bh(&vp->lock); | 1927 | spin_unlock_bh(&vp->lock); |
1966 | } | 1928 | } |
1967 | break; | 1929 | break; |
@@ -1971,7 +1933,14 @@ vortex_timer(unsigned long data) | |||
1971 | dev->name, media_tbl[dev->if_port].name, media_status); | 1933 | dev->name, media_tbl[dev->if_port].name, media_status); |
1972 | ok = 1; | 1934 | ok = 1; |
1973 | } | 1935 | } |
1974 | if ( ! ok) { | 1936 | |
1937 | if (!netif_carrier_ok(dev)) | ||
1938 | next_tick = 5*HZ; | ||
1939 | |||
1940 | if (vp->medialock) | ||
1941 | goto leave_media_alone; | ||
1942 | |||
1943 | if (!ok) { | ||
1975 | unsigned int config; | 1944 | unsigned int config; |
1976 | 1945 | ||
1977 | do { | 1946 | do { |
@@ -2004,14 +1973,14 @@ vortex_timer(unsigned long data) | |||
2004 | printk(KERN_DEBUG "wrote 0x%08x to Wn3_Config\n", config); | 1973 | printk(KERN_DEBUG "wrote 0x%08x to Wn3_Config\n", config); |
2005 | /* AKPM: FIXME: Should reset Rx & Tx here. P60 of 3c90xc.pdf */ | 1974 | /* AKPM: FIXME: Should reset Rx & Tx here. P60 of 3c90xc.pdf */ |
2006 | } | 1975 | } |
2007 | EL3WINDOW(old_window); | ||
2008 | enable_irq(dev->irq); | ||
2009 | 1976 | ||
2010 | leave_media_alone: | 1977 | leave_media_alone: |
2011 | if (vortex_debug > 2) | 1978 | if (vortex_debug > 2) |
2012 | printk(KERN_DEBUG "%s: Media selection timer finished, %s.\n", | 1979 | printk(KERN_DEBUG "%s: Media selection timer finished, %s.\n", |
2013 | dev->name, media_tbl[dev->if_port].name); | 1980 | dev->name, media_tbl[dev->if_port].name); |
2014 | 1981 | ||
1982 | EL3WINDOW(old_window); | ||
1983 | enable_irq(dev->irq); | ||
2015 | mod_timer(&vp->timer, RUN_AT(next_tick)); | 1984 | mod_timer(&vp->timer, RUN_AT(next_tick)); |
2016 | if (vp->deferred) | 1985 | if (vp->deferred) |
2017 | iowrite16(FakeIntr, ioaddr + EL3_CMD); | 1986 | iowrite16(FakeIntr, ioaddr + EL3_CMD); |
@@ -2206,7 +2175,7 @@ vortex_start_xmit(struct sk_buff *skb, struct net_device *dev) | |||
2206 | if (vp->bus_master) { | 2175 | if (vp->bus_master) { |
2207 | /* Set the bus-master controller to transfer the packet. */ | 2176 | /* Set the bus-master controller to transfer the packet. */ |
2208 | int len = (skb->len + 3) & ~3; | 2177 | int len = (skb->len + 3) & ~3; |
2209 | iowrite32( vp->tx_skb_dma = pci_map_single(VORTEX_PCI(vp), skb->data, len, PCI_DMA_TODEVICE), | 2178 | iowrite32(vp->tx_skb_dma = pci_map_single(VORTEX_PCI(vp), skb->data, len, PCI_DMA_TODEVICE), |
2210 | ioaddr + Wn7_MasterAddr); | 2179 | ioaddr + Wn7_MasterAddr); |
2211 | iowrite16(len, ioaddr + Wn7_MasterLen); | 2180 | iowrite16(len, ioaddr + Wn7_MasterLen); |
2212 | vp->tx_skb = skb; | 2181 | vp->tx_skb = skb; |
@@ -2983,20 +2952,6 @@ static int vortex_nway_reset(struct net_device *dev) | |||
2983 | return rc; | 2952 | return rc; |
2984 | } | 2953 | } |
2985 | 2954 | ||
2986 | static u32 vortex_get_link(struct net_device *dev) | ||
2987 | { | ||
2988 | struct vortex_private *vp = netdev_priv(dev); | ||
2989 | void __iomem *ioaddr = vp->ioaddr; | ||
2990 | unsigned long flags; | ||
2991 | int rc; | ||
2992 | |||
2993 | spin_lock_irqsave(&vp->lock, flags); | ||
2994 | EL3WINDOW(4); | ||
2995 | rc = mii_link_ok(&vp->mii); | ||
2996 | spin_unlock_irqrestore(&vp->lock, flags); | ||
2997 | return rc; | ||
2998 | } | ||
2999 | |||
3000 | static int vortex_get_settings(struct net_device *dev, struct ethtool_cmd *cmd) | 2955 | static int vortex_get_settings(struct net_device *dev, struct ethtool_cmd *cmd) |
3001 | { | 2956 | { |
3002 | struct vortex_private *vp = netdev_priv(dev); | 2957 | struct vortex_private *vp = netdev_priv(dev); |
@@ -3077,7 +3032,6 @@ static void vortex_get_drvinfo(struct net_device *dev, | |||
3077 | struct vortex_private *vp = netdev_priv(dev); | 3032 | struct vortex_private *vp = netdev_priv(dev); |
3078 | 3033 | ||
3079 | strcpy(info->driver, DRV_NAME); | 3034 | strcpy(info->driver, DRV_NAME); |
3080 | strcpy(info->version, DRV_VERSION); | ||
3081 | if (VORTEX_PCI(vp)) { | 3035 | if (VORTEX_PCI(vp)) { |
3082 | strcpy(info->bus_info, pci_name(VORTEX_PCI(vp))); | 3036 | strcpy(info->bus_info, pci_name(VORTEX_PCI(vp))); |
3083 | } else { | 3037 | } else { |
@@ -3098,9 +3052,9 @@ static struct ethtool_ops vortex_ethtool_ops = { | |||
3098 | .get_stats_count = vortex_get_stats_count, | 3052 | .get_stats_count = vortex_get_stats_count, |
3099 | .get_settings = vortex_get_settings, | 3053 | .get_settings = vortex_get_settings, |
3100 | .set_settings = vortex_set_settings, | 3054 | .set_settings = vortex_set_settings, |
3101 | .get_link = vortex_get_link, | 3055 | .get_link = ethtool_op_get_link, |
3102 | .nway_reset = vortex_nway_reset, | 3056 | .nway_reset = vortex_nway_reset, |
3103 | .get_perm_addr = ethtool_op_get_perm_addr, | 3057 | .get_perm_addr = ethtool_op_get_perm_addr, |
3104 | }; | 3058 | }; |
3105 | 3059 | ||
3106 | #ifdef CONFIG_PCI | 3060 | #ifdef CONFIG_PCI |
@@ -3301,7 +3255,7 @@ static void mdio_write(struct net_device *dev, int phy_id, int location, int val | |||
3301 | } | 3255 | } |
3302 | return; | 3256 | return; |
3303 | } | 3257 | } |
3304 | 3258 | ||
3305 | /* ACPI: Advanced Configuration and Power Interface. */ | 3259 | /* ACPI: Advanced Configuration and Power Interface. */ |
3306 | /* Set Wake-On-LAN mode and put the board into D3 (power-down) state. */ | 3260 | /* Set Wake-On-LAN mode and put the board into D3 (power-down) state. */ |
3307 | static void acpi_set_WOL(struct net_device *dev) | 3261 | static void acpi_set_WOL(struct net_device *dev) |
@@ -3325,7 +3279,7 @@ static void acpi_set_WOL(struct net_device *dev) | |||
3325 | } | 3279 | } |
3326 | 3280 | ||
3327 | 3281 | ||
3328 | static void __devexit vortex_remove_one (struct pci_dev *pdev) | 3282 | static void __devexit vortex_remove_one(struct pci_dev *pdev) |
3329 | { | 3283 | { |
3330 | struct net_device *dev = pci_get_drvdata(pdev); | 3284 | struct net_device *dev = pci_get_drvdata(pdev); |
3331 | struct vortex_private *vp; | 3285 | struct vortex_private *vp; |
@@ -3381,7 +3335,7 @@ static int vortex_have_pci; | |||
3381 | static int vortex_have_eisa; | 3335 | static int vortex_have_eisa; |
3382 | 3336 | ||
3383 | 3337 | ||
3384 | static int __init vortex_init (void) | 3338 | static int __init vortex_init(void) |
3385 | { | 3339 | { |
3386 | int pci_rc, eisa_rc; | 3340 | int pci_rc, eisa_rc; |
3387 | 3341 | ||
@@ -3397,14 +3351,14 @@ static int __init vortex_init (void) | |||
3397 | } | 3351 | } |
3398 | 3352 | ||
3399 | 3353 | ||
3400 | static void __exit vortex_eisa_cleanup (void) | 3354 | static void __exit vortex_eisa_cleanup(void) |
3401 | { | 3355 | { |
3402 | struct vortex_private *vp; | 3356 | struct vortex_private *vp; |
3403 | void __iomem *ioaddr; | 3357 | void __iomem *ioaddr; |
3404 | 3358 | ||
3405 | #ifdef CONFIG_EISA | 3359 | #ifdef CONFIG_EISA |
3406 | /* Take care of the EISA devices */ | 3360 | /* Take care of the EISA devices */ |
3407 | eisa_driver_unregister (&vortex_eisa_driver); | 3361 | eisa_driver_unregister(&vortex_eisa_driver); |
3408 | #endif | 3362 | #endif |
3409 | 3363 | ||
3410 | if (compaq_net_device) { | 3364 | if (compaq_net_device) { |
@@ -3412,33 +3366,24 @@ static void __exit vortex_eisa_cleanup (void) | |||
3412 | ioaddr = ioport_map(compaq_net_device->base_addr, | 3366 | ioaddr = ioport_map(compaq_net_device->base_addr, |
3413 | VORTEX_TOTAL_SIZE); | 3367 | VORTEX_TOTAL_SIZE); |
3414 | 3368 | ||
3415 | unregister_netdev (compaq_net_device); | 3369 | unregister_netdev(compaq_net_device); |
3416 | iowrite16 (TotalReset, ioaddr + EL3_CMD); | 3370 | iowrite16(TotalReset, ioaddr + EL3_CMD); |
3417 | release_region(compaq_net_device->base_addr, | 3371 | release_region(compaq_net_device->base_addr, |
3418 | VORTEX_TOTAL_SIZE); | 3372 | VORTEX_TOTAL_SIZE); |
3419 | 3373 | ||
3420 | free_netdev (compaq_net_device); | 3374 | free_netdev(compaq_net_device); |
3421 | } | 3375 | } |
3422 | } | 3376 | } |
3423 | 3377 | ||
3424 | 3378 | ||
3425 | static void __exit vortex_cleanup (void) | 3379 | static void __exit vortex_cleanup(void) |
3426 | { | 3380 | { |
3427 | if (vortex_have_pci) | 3381 | if (vortex_have_pci) |
3428 | pci_unregister_driver (&vortex_driver); | 3382 | pci_unregister_driver(&vortex_driver); |
3429 | if (vortex_have_eisa) | 3383 | if (vortex_have_eisa) |
3430 | vortex_eisa_cleanup (); | 3384 | vortex_eisa_cleanup(); |
3431 | } | 3385 | } |
3432 | 3386 | ||
3433 | 3387 | ||
3434 | module_init(vortex_init); | 3388 | module_init(vortex_init); |
3435 | module_exit(vortex_cleanup); | 3389 | module_exit(vortex_cleanup); |
3436 | |||
3437 | |||
3438 | /* | ||
3439 | * Local variables: | ||
3440 | * c-indent-level: 4 | ||
3441 | * c-basic-offset: 4 | ||
3442 | * tab-width: 4 | ||
3443 | * End: | ||
3444 | */ | ||
diff --git a/drivers/net/sis900.c b/drivers/net/sis900.c index 253440a98022..8429ceb01389 100644 --- a/drivers/net/sis900.c +++ b/drivers/net/sis900.c | |||
@@ -1693,7 +1693,7 @@ static irqreturn_t sis900_interrupt(int irq, void *dev_instance, struct pt_regs | |||
1693 | * | 1693 | * |
1694 | * Process receive interrupt events, | 1694 | * Process receive interrupt events, |
1695 | * put buffer to higher layer and refill buffer pool | 1695 | * put buffer to higher layer and refill buffer pool |
1696 | * Note: This fucntion is called by interrupt handler, | 1696 | * Note: This function is called by interrupt handler, |
1697 | * don't do "too much" work here | 1697 | * don't do "too much" work here |
1698 | */ | 1698 | */ |
1699 | 1699 | ||
@@ -1840,7 +1840,7 @@ static int sis900_rx(struct net_device *net_dev) | |||
1840 | * | 1840 | * |
1841 | * Check for error condition and free socket buffer etc | 1841 | * Check for error condition and free socket buffer etc |
1842 | * schedule for more transmission as needed | 1842 | * schedule for more transmission as needed |
1843 | * Note: This fucntion is called by interrupt handler, | 1843 | * Note: This function is called by interrupt handler, |
1844 | * don't do "too much" work here | 1844 | * don't do "too much" work here |
1845 | */ | 1845 | */ |
1846 | 1846 | ||
diff --git a/drivers/net/wan/dscc4.c b/drivers/net/wan/dscc4.c index 1ff5de076d21..4505540e3c59 100644 --- a/drivers/net/wan/dscc4.c +++ b/drivers/net/wan/dscc4.c | |||
@@ -105,6 +105,7 @@ | |||
105 | #include <linux/delay.h> | 105 | #include <linux/delay.h> |
106 | #include <net/syncppp.h> | 106 | #include <net/syncppp.h> |
107 | #include <linux/hdlc.h> | 107 | #include <linux/hdlc.h> |
108 | #include <linux/mutex.h> | ||
108 | 109 | ||
109 | /* Version */ | 110 | /* Version */ |
110 | static const char version[] = "$Id: dscc4.c,v 1.173 2003/09/20 23:55:34 romieu Exp $ for Linux\n"; | 111 | static const char version[] = "$Id: dscc4.c,v 1.173 2003/09/20 23:55:34 romieu Exp $ for Linux\n"; |
@@ -112,7 +113,7 @@ static int debug; | |||
112 | static int quartz; | 113 | static int quartz; |
113 | 114 | ||
114 | #ifdef CONFIG_DSCC4_PCI_RST | 115 | #ifdef CONFIG_DSCC4_PCI_RST |
115 | static DECLARE_MUTEX(dscc4_sem); | 116 | static DEFINE_MUTEX(dscc4_mutex); |
116 | static u32 dscc4_pci_config_store[16]; | 117 | static u32 dscc4_pci_config_store[16]; |
117 | #endif | 118 | #endif |
118 | 119 | ||
@@ -1018,7 +1019,7 @@ static void dscc4_pci_reset(struct pci_dev *pdev, void __iomem *ioaddr) | |||
1018 | { | 1019 | { |
1019 | int i; | 1020 | int i; |
1020 | 1021 | ||
1021 | down(&dscc4_sem); | 1022 | mutex_lock(&dscc4_mutex); |
1022 | for (i = 0; i < 16; i++) | 1023 | for (i = 0; i < 16; i++) |
1023 | pci_read_config_dword(pdev, i << 2, dscc4_pci_config_store + i); | 1024 | pci_read_config_dword(pdev, i << 2, dscc4_pci_config_store + i); |
1024 | 1025 | ||
@@ -1039,7 +1040,7 @@ static void dscc4_pci_reset(struct pci_dev *pdev, void __iomem *ioaddr) | |||
1039 | 1040 | ||
1040 | for (i = 0; i < 16; i++) | 1041 | for (i = 0; i < 16; i++) |
1041 | pci_write_config_dword(pdev, i << 2, dscc4_pci_config_store[i]); | 1042 | pci_write_config_dword(pdev, i << 2, dscc4_pci_config_store[i]); |
1042 | up(&dscc4_sem); | 1043 | mutex_unlock(&dscc4_mutex); |
1043 | } | 1044 | } |
1044 | #else | 1045 | #else |
1045 | #define dscc4_pci_reset(pdev,ioaddr) do {} while (0) | 1046 | #define dscc4_pci_reset(pdev,ioaddr) do {} while (0) |
diff --git a/drivers/parport/share.c b/drivers/parport/share.c index ea62bed6bc83..bbbfd79adbaf 100644 --- a/drivers/parport/share.c +++ b/drivers/parport/share.c | |||
@@ -32,6 +32,7 @@ | |||
32 | #include <linux/kmod.h> | 32 | #include <linux/kmod.h> |
33 | 33 | ||
34 | #include <linux/spinlock.h> | 34 | #include <linux/spinlock.h> |
35 | #include <linux/mutex.h> | ||
35 | #include <asm/irq.h> | 36 | #include <asm/irq.h> |
36 | 37 | ||
37 | #undef PARPORT_PARANOID | 38 | #undef PARPORT_PARANOID |
@@ -50,7 +51,7 @@ static DEFINE_SPINLOCK(full_list_lock); | |||
50 | 51 | ||
51 | static LIST_HEAD(drivers); | 52 | static LIST_HEAD(drivers); |
52 | 53 | ||
53 | static DECLARE_MUTEX(registration_lock); | 54 | static DEFINE_MUTEX(registration_lock); |
54 | 55 | ||
55 | /* What you can do to a port that's gone away.. */ | 56 | /* What you can do to a port that's gone away.. */ |
56 | static void dead_write_lines (struct parport *p, unsigned char b){} | 57 | static void dead_write_lines (struct parport *p, unsigned char b){} |
@@ -158,11 +159,11 @@ int parport_register_driver (struct parport_driver *drv) | |||
158 | if (list_empty(&portlist)) | 159 | if (list_empty(&portlist)) |
159 | get_lowlevel_driver (); | 160 | get_lowlevel_driver (); |
160 | 161 | ||
161 | down(®istration_lock); | 162 | mutex_lock(®istration_lock); |
162 | list_for_each_entry(port, &portlist, list) | 163 | list_for_each_entry(port, &portlist, list) |
163 | drv->attach(port); | 164 | drv->attach(port); |
164 | list_add(&drv->list, &drivers); | 165 | list_add(&drv->list, &drivers); |
165 | up(®istration_lock); | 166 | mutex_unlock(®istration_lock); |
166 | 167 | ||
167 | return 0; | 168 | return 0; |
168 | } | 169 | } |
@@ -188,11 +189,11 @@ void parport_unregister_driver (struct parport_driver *drv) | |||
188 | { | 189 | { |
189 | struct parport *port; | 190 | struct parport *port; |
190 | 191 | ||
191 | down(®istration_lock); | 192 | mutex_lock(®istration_lock); |
192 | list_del_init(&drv->list); | 193 | list_del_init(&drv->list); |
193 | list_for_each_entry(port, &portlist, list) | 194 | list_for_each_entry(port, &portlist, list) |
194 | drv->detach(port); | 195 | drv->detach(port); |
195 | up(®istration_lock); | 196 | mutex_unlock(®istration_lock); |
196 | } | 197 | } |
197 | 198 | ||
198 | static void free_port (struct parport *port) | 199 | static void free_port (struct parport *port) |
@@ -366,7 +367,7 @@ void parport_announce_port (struct parport *port) | |||
366 | #endif | 367 | #endif |
367 | 368 | ||
368 | parport_proc_register(port); | 369 | parport_proc_register(port); |
369 | down(®istration_lock); | 370 | mutex_lock(®istration_lock); |
370 | spin_lock_irq(&parportlist_lock); | 371 | spin_lock_irq(&parportlist_lock); |
371 | list_add_tail(&port->list, &portlist); | 372 | list_add_tail(&port->list, &portlist); |
372 | for (i = 1; i < 3; i++) { | 373 | for (i = 1; i < 3; i++) { |
@@ -383,7 +384,7 @@ void parport_announce_port (struct parport *port) | |||
383 | if (slave) | 384 | if (slave) |
384 | attach_driver_chain(slave); | 385 | attach_driver_chain(slave); |
385 | } | 386 | } |
386 | up(®istration_lock); | 387 | mutex_unlock(®istration_lock); |
387 | } | 388 | } |
388 | 389 | ||
389 | /** | 390 | /** |
@@ -409,7 +410,7 @@ void parport_remove_port(struct parport *port) | |||
409 | { | 410 | { |
410 | int i; | 411 | int i; |
411 | 412 | ||
412 | down(®istration_lock); | 413 | mutex_lock(®istration_lock); |
413 | 414 | ||
414 | /* Spread the word. */ | 415 | /* Spread the word. */ |
415 | detach_driver_chain (port); | 416 | detach_driver_chain (port); |
@@ -436,7 +437,7 @@ void parport_remove_port(struct parport *port) | |||
436 | } | 437 | } |
437 | spin_unlock(&parportlist_lock); | 438 | spin_unlock(&parportlist_lock); |
438 | 439 | ||
439 | up(®istration_lock); | 440 | mutex_unlock(®istration_lock); |
440 | 441 | ||
441 | parport_proc_unregister(port); | 442 | parport_proc_unregister(port); |
442 | 443 | ||
diff --git a/drivers/pci/hotplug/rpadlpar_core.c b/drivers/pci/hotplug/rpadlpar_core.c index 3eefe2cec72d..46825fee3ae4 100644 --- a/drivers/pci/hotplug/rpadlpar_core.c +++ b/drivers/pci/hotplug/rpadlpar_core.c | |||
@@ -19,7 +19,7 @@ | |||
19 | #include <linux/string.h> | 19 | #include <linux/string.h> |
20 | 20 | ||
21 | #include <asm/pci-bridge.h> | 21 | #include <asm/pci-bridge.h> |
22 | #include <asm/semaphore.h> | 22 | #include <linux/mutex.h> |
23 | #include <asm/rtas.h> | 23 | #include <asm/rtas.h> |
24 | #include <asm/vio.h> | 24 | #include <asm/vio.h> |
25 | 25 | ||
@@ -27,7 +27,7 @@ | |||
27 | #include "rpaphp.h" | 27 | #include "rpaphp.h" |
28 | #include "rpadlpar.h" | 28 | #include "rpadlpar.h" |
29 | 29 | ||
30 | static DECLARE_MUTEX(rpadlpar_sem); | 30 | static DEFINE_MUTEX(rpadlpar_mutex); |
31 | 31 | ||
32 | #define DLPAR_MODULE_NAME "rpadlpar_io" | 32 | #define DLPAR_MODULE_NAME "rpadlpar_io" |
33 | 33 | ||
@@ -300,7 +300,7 @@ int dlpar_add_slot(char *drc_name) | |||
300 | int node_type; | 300 | int node_type; |
301 | int rc = -EIO; | 301 | int rc = -EIO; |
302 | 302 | ||
303 | if (down_interruptible(&rpadlpar_sem)) | 303 | if (mutex_lock_interruptible(&rpadlpar_mutex)) |
304 | return -ERESTARTSYS; | 304 | return -ERESTARTSYS; |
305 | 305 | ||
306 | /* Find newly added node */ | 306 | /* Find newly added node */ |
@@ -324,7 +324,7 @@ int dlpar_add_slot(char *drc_name) | |||
324 | 324 | ||
325 | printk(KERN_INFO "%s: slot %s added\n", DLPAR_MODULE_NAME, drc_name); | 325 | printk(KERN_INFO "%s: slot %s added\n", DLPAR_MODULE_NAME, drc_name); |
326 | exit: | 326 | exit: |
327 | up(&rpadlpar_sem); | 327 | mutex_unlock(&rpadlpar_mutex); |
328 | return rc; | 328 | return rc; |
329 | } | 329 | } |
330 | 330 | ||
@@ -417,7 +417,7 @@ int dlpar_remove_slot(char *drc_name) | |||
417 | int node_type; | 417 | int node_type; |
418 | int rc = 0; | 418 | int rc = 0; |
419 | 419 | ||
420 | if (down_interruptible(&rpadlpar_sem)) | 420 | if (mutex_lock_interruptible(&rpadlpar_mutex)) |
421 | return -ERESTARTSYS; | 421 | return -ERESTARTSYS; |
422 | 422 | ||
423 | dn = find_dlpar_node(drc_name, &node_type); | 423 | dn = find_dlpar_node(drc_name, &node_type); |
@@ -439,7 +439,7 @@ int dlpar_remove_slot(char *drc_name) | |||
439 | } | 439 | } |
440 | printk(KERN_INFO "%s: slot %s removed\n", DLPAR_MODULE_NAME, drc_name); | 440 | printk(KERN_INFO "%s: slot %s removed\n", DLPAR_MODULE_NAME, drc_name); |
441 | exit: | 441 | exit: |
442 | up(&rpadlpar_sem); | 442 | mutex_unlock(&rpadlpar_mutex); |
443 | return rc; | 443 | return rc; |
444 | } | 444 | } |
445 | 445 | ||
diff --git a/drivers/pci/hotplug/sgi_hotplug.c b/drivers/pci/hotplug/sgi_hotplug.c index c402da8e78ae..8cb9abde736b 100644 --- a/drivers/pci/hotplug/sgi_hotplug.c +++ b/drivers/pci/hotplug/sgi_hotplug.c | |||
@@ -15,6 +15,7 @@ | |||
15 | #include <linux/pci.h> | 15 | #include <linux/pci.h> |
16 | #include <linux/proc_fs.h> | 16 | #include <linux/proc_fs.h> |
17 | #include <linux/types.h> | 17 | #include <linux/types.h> |
18 | #include <linux/mutex.h> | ||
18 | 19 | ||
19 | #include <asm/sn/addrs.h> | 20 | #include <asm/sn/addrs.h> |
20 | #include <asm/sn/l1.h> | 21 | #include <asm/sn/l1.h> |
@@ -81,7 +82,7 @@ static struct hotplug_slot_ops sn_hotplug_slot_ops = { | |||
81 | .get_power_status = get_power_status, | 82 | .get_power_status = get_power_status, |
82 | }; | 83 | }; |
83 | 84 | ||
84 | static DECLARE_MUTEX(sn_hotplug_sem); | 85 | static DEFINE_MUTEX(sn_hotplug_mutex); |
85 | 86 | ||
86 | static ssize_t path_show (struct hotplug_slot *bss_hotplug_slot, | 87 | static ssize_t path_show (struct hotplug_slot *bss_hotplug_slot, |
87 | char *buf) | 88 | char *buf) |
@@ -346,7 +347,7 @@ static int enable_slot(struct hotplug_slot *bss_hotplug_slot) | |||
346 | int rc; | 347 | int rc; |
347 | 348 | ||
348 | /* Serialize the Linux PCI infrastructure */ | 349 | /* Serialize the Linux PCI infrastructure */ |
349 | down(&sn_hotplug_sem); | 350 | mutex_lock(&sn_hotplug_mutex); |
350 | 351 | ||
351 | /* | 352 | /* |
352 | * Power-on and initialize the slot in the SN | 353 | * Power-on and initialize the slot in the SN |
@@ -354,7 +355,7 @@ static int enable_slot(struct hotplug_slot *bss_hotplug_slot) | |||
354 | */ | 355 | */ |
355 | rc = sn_slot_enable(bss_hotplug_slot, slot->device_num); | 356 | rc = sn_slot_enable(bss_hotplug_slot, slot->device_num); |
356 | if (rc) { | 357 | if (rc) { |
357 | up(&sn_hotplug_sem); | 358 | mutex_unlock(&sn_hotplug_mutex); |
358 | return rc; | 359 | return rc; |
359 | } | 360 | } |
360 | 361 | ||
@@ -362,7 +363,7 @@ static int enable_slot(struct hotplug_slot *bss_hotplug_slot) | |||
362 | PCI_DEVFN(slot->device_num + 1, 0)); | 363 | PCI_DEVFN(slot->device_num + 1, 0)); |
363 | if (!num_funcs) { | 364 | if (!num_funcs) { |
364 | dev_dbg(slot->pci_bus->self, "no device in slot\n"); | 365 | dev_dbg(slot->pci_bus->self, "no device in slot\n"); |
365 | up(&sn_hotplug_sem); | 366 | mutex_unlock(&sn_hotplug_mutex); |
366 | return -ENODEV; | 367 | return -ENODEV; |
367 | } | 368 | } |
368 | 369 | ||
@@ -402,7 +403,7 @@ static int enable_slot(struct hotplug_slot *bss_hotplug_slot) | |||
402 | if (new_ppb) | 403 | if (new_ppb) |
403 | pci_bus_add_devices(new_bus); | 404 | pci_bus_add_devices(new_bus); |
404 | 405 | ||
405 | up(&sn_hotplug_sem); | 406 | mutex_unlock(&sn_hotplug_mutex); |
406 | 407 | ||
407 | if (rc == 0) | 408 | if (rc == 0) |
408 | dev_dbg(slot->pci_bus->self, | 409 | dev_dbg(slot->pci_bus->self, |
@@ -422,7 +423,7 @@ static int disable_slot(struct hotplug_slot *bss_hotplug_slot) | |||
422 | int rc; | 423 | int rc; |
423 | 424 | ||
424 | /* Acquire update access to the bus */ | 425 | /* Acquire update access to the bus */ |
425 | down(&sn_hotplug_sem); | 426 | mutex_lock(&sn_hotplug_mutex); |
426 | 427 | ||
427 | /* is it okay to bring this slot down? */ | 428 | /* is it okay to bring this slot down? */ |
428 | rc = sn_slot_disable(bss_hotplug_slot, slot->device_num, | 429 | rc = sn_slot_disable(bss_hotplug_slot, slot->device_num, |
@@ -450,7 +451,7 @@ static int disable_slot(struct hotplug_slot *bss_hotplug_slot) | |||
450 | PCI_REQ_SLOT_DISABLE); | 451 | PCI_REQ_SLOT_DISABLE); |
451 | leaving: | 452 | leaving: |
452 | /* Release the bus lock */ | 453 | /* Release the bus lock */ |
453 | up(&sn_hotplug_sem); | 454 | mutex_unlock(&sn_hotplug_mutex); |
454 | 455 | ||
455 | return rc; | 456 | return rc; |
456 | } | 457 | } |
@@ -462,9 +463,9 @@ static inline int get_power_status(struct hotplug_slot *bss_hotplug_slot, | |||
462 | struct pcibus_info *pcibus_info; | 463 | struct pcibus_info *pcibus_info; |
463 | 464 | ||
464 | pcibus_info = SN_PCIBUS_BUSSOFT_INFO(slot->pci_bus); | 465 | pcibus_info = SN_PCIBUS_BUSSOFT_INFO(slot->pci_bus); |
465 | down(&sn_hotplug_sem); | 466 | mutex_lock(&sn_hotplug_mutex); |
466 | *value = pcibus_info->pbi_enabled_devices & (1 << slot->device_num); | 467 | *value = pcibus_info->pbi_enabled_devices & (1 << slot->device_num); |
467 | up(&sn_hotplug_sem); | 468 | mutex_unlock(&sn_hotplug_mutex); |
468 | return 0; | 469 | return 0; |
469 | } | 470 | } |
470 | 471 | ||
diff --git a/drivers/pnp/isapnp/core.c b/drivers/pnp/isapnp/core.c index b1b4b683cbdd..ac7c2bb6c69e 100644 --- a/drivers/pnp/isapnp/core.c +++ b/drivers/pnp/isapnp/core.c | |||
@@ -42,6 +42,7 @@ | |||
42 | #include <linux/delay.h> | 42 | #include <linux/delay.h> |
43 | #include <linux/init.h> | 43 | #include <linux/init.h> |
44 | #include <linux/isapnp.h> | 44 | #include <linux/isapnp.h> |
45 | #include <linux/mutex.h> | ||
45 | #include <asm/io.h> | 46 | #include <asm/io.h> |
46 | 47 | ||
47 | #if 0 | 48 | #if 0 |
@@ -92,7 +93,7 @@ MODULE_LICENSE("GPL"); | |||
92 | #define _LTAG_FIXEDMEM32RANGE 0x86 | 93 | #define _LTAG_FIXEDMEM32RANGE 0x86 |
93 | 94 | ||
94 | static unsigned char isapnp_checksum_value; | 95 | static unsigned char isapnp_checksum_value; |
95 | static DECLARE_MUTEX(isapnp_cfg_mutex); | 96 | static DEFINE_MUTEX(isapnp_cfg_mutex); |
96 | static int isapnp_detected; | 97 | static int isapnp_detected; |
97 | static int isapnp_csn_count; | 98 | static int isapnp_csn_count; |
98 | 99 | ||
@@ -903,7 +904,7 @@ int isapnp_cfg_begin(int csn, int logdev) | |||
903 | { | 904 | { |
904 | if (csn < 1 || csn > isapnp_csn_count || logdev > 10) | 905 | if (csn < 1 || csn > isapnp_csn_count || logdev > 10) |
905 | return -EINVAL; | 906 | return -EINVAL; |
906 | down(&isapnp_cfg_mutex); | 907 | mutex_lock(&isapnp_cfg_mutex); |
907 | isapnp_wait(); | 908 | isapnp_wait(); |
908 | isapnp_key(); | 909 | isapnp_key(); |
909 | isapnp_wake(csn); | 910 | isapnp_wake(csn); |
@@ -929,7 +930,7 @@ int isapnp_cfg_begin(int csn, int logdev) | |||
929 | int isapnp_cfg_end(void) | 930 | int isapnp_cfg_end(void) |
930 | { | 931 | { |
931 | isapnp_wait(); | 932 | isapnp_wait(); |
932 | up(&isapnp_cfg_mutex); | 933 | mutex_unlock(&isapnp_cfg_mutex); |
933 | return 0; | 934 | return 0; |
934 | } | 935 | } |
935 | 936 | ||
diff --git a/drivers/s390/char/raw3270.c b/drivers/s390/char/raw3270.c index bd06607a5dcc..eecb2afad5c2 100644 --- a/drivers/s390/char/raw3270.c +++ b/drivers/s390/char/raw3270.c | |||
@@ -28,6 +28,7 @@ | |||
28 | #include <linux/major.h> | 28 | #include <linux/major.h> |
29 | #include <linux/kdev_t.h> | 29 | #include <linux/kdev_t.h> |
30 | #include <linux/device.h> | 30 | #include <linux/device.h> |
31 | #include <linux/mutex.h> | ||
31 | 32 | ||
32 | struct class *class3270; | 33 | struct class *class3270; |
33 | 34 | ||
@@ -59,7 +60,7 @@ struct raw3270 { | |||
59 | #define RAW3270_FLAGS_CONSOLE 8 /* Device is the console. */ | 60 | #define RAW3270_FLAGS_CONSOLE 8 /* Device is the console. */ |
60 | 61 | ||
61 | /* Semaphore to protect global data of raw3270 (devices, views, etc). */ | 62 | /* Semaphore to protect global data of raw3270 (devices, views, etc). */ |
62 | static DECLARE_MUTEX(raw3270_sem); | 63 | static DEFINE_MUTEX(raw3270_mutex); |
63 | 64 | ||
64 | /* List of 3270 devices. */ | 65 | /* List of 3270 devices. */ |
65 | static struct list_head raw3270_devices = LIST_HEAD_INIT(raw3270_devices); | 66 | static struct list_head raw3270_devices = LIST_HEAD_INIT(raw3270_devices); |
@@ -815,7 +816,7 @@ raw3270_setup_device(struct ccw_device *cdev, struct raw3270 *rp, char *ascebc) | |||
815 | * number for it. Note: there is no device with minor 0, | 816 | * number for it. Note: there is no device with minor 0, |
816 | * see special case for fs3270.c:fs3270_open(). | 817 | * see special case for fs3270.c:fs3270_open(). |
817 | */ | 818 | */ |
818 | down(&raw3270_sem); | 819 | mutex_lock(&raw3270_mutex); |
819 | /* Keep the list sorted. */ | 820 | /* Keep the list sorted. */ |
820 | minor = RAW3270_FIRSTMINOR; | 821 | minor = RAW3270_FIRSTMINOR; |
821 | rp->minor = -1; | 822 | rp->minor = -1; |
@@ -832,7 +833,7 @@ raw3270_setup_device(struct ccw_device *cdev, struct raw3270 *rp, char *ascebc) | |||
832 | rp->minor = minor; | 833 | rp->minor = minor; |
833 | list_add_tail(&rp->list, &raw3270_devices); | 834 | list_add_tail(&rp->list, &raw3270_devices); |
834 | } | 835 | } |
835 | up(&raw3270_sem); | 836 | mutex_unlock(&raw3270_mutex); |
836 | /* No free minor number? Then give up. */ | 837 | /* No free minor number? Then give up. */ |
837 | if (rp->minor == -1) | 838 | if (rp->minor == -1) |
838 | return -EUSERS; | 839 | return -EUSERS; |
@@ -1003,7 +1004,7 @@ raw3270_add_view(struct raw3270_view *view, struct raw3270_fn *fn, int minor) | |||
1003 | 1004 | ||
1004 | if (minor <= 0) | 1005 | if (minor <= 0) |
1005 | return -ENODEV; | 1006 | return -ENODEV; |
1006 | down(&raw3270_sem); | 1007 | mutex_lock(&raw3270_mutex); |
1007 | rc = -ENODEV; | 1008 | rc = -ENODEV; |
1008 | list_for_each_entry(rp, &raw3270_devices, list) { | 1009 | list_for_each_entry(rp, &raw3270_devices, list) { |
1009 | if (rp->minor != minor) | 1010 | if (rp->minor != minor) |
@@ -1024,7 +1025,7 @@ raw3270_add_view(struct raw3270_view *view, struct raw3270_fn *fn, int minor) | |||
1024 | spin_unlock_irqrestore(get_ccwdev_lock(rp->cdev), flags); | 1025 | spin_unlock_irqrestore(get_ccwdev_lock(rp->cdev), flags); |
1025 | break; | 1026 | break; |
1026 | } | 1027 | } |
1027 | up(&raw3270_sem); | 1028 | mutex_unlock(&raw3270_mutex); |
1028 | return rc; | 1029 | return rc; |
1029 | } | 1030 | } |
1030 | 1031 | ||
@@ -1038,7 +1039,7 @@ raw3270_find_view(struct raw3270_fn *fn, int minor) | |||
1038 | struct raw3270_view *view, *tmp; | 1039 | struct raw3270_view *view, *tmp; |
1039 | unsigned long flags; | 1040 | unsigned long flags; |
1040 | 1041 | ||
1041 | down(&raw3270_sem); | 1042 | mutex_lock(&raw3270_mutex); |
1042 | view = ERR_PTR(-ENODEV); | 1043 | view = ERR_PTR(-ENODEV); |
1043 | list_for_each_entry(rp, &raw3270_devices, list) { | 1044 | list_for_each_entry(rp, &raw3270_devices, list) { |
1044 | if (rp->minor != minor) | 1045 | if (rp->minor != minor) |
@@ -1057,7 +1058,7 @@ raw3270_find_view(struct raw3270_fn *fn, int minor) | |||
1057 | spin_unlock_irqrestore(get_ccwdev_lock(rp->cdev), flags); | 1058 | spin_unlock_irqrestore(get_ccwdev_lock(rp->cdev), flags); |
1058 | break; | 1059 | break; |
1059 | } | 1060 | } |
1060 | up(&raw3270_sem); | 1061 | mutex_unlock(&raw3270_mutex); |
1061 | return view; | 1062 | return view; |
1062 | } | 1063 | } |
1063 | 1064 | ||
@@ -1104,7 +1105,7 @@ raw3270_delete_device(struct raw3270 *rp) | |||
1104 | struct ccw_device *cdev; | 1105 | struct ccw_device *cdev; |
1105 | 1106 | ||
1106 | /* Remove from device chain. */ | 1107 | /* Remove from device chain. */ |
1107 | down(&raw3270_sem); | 1108 | mutex_lock(&raw3270_mutex); |
1108 | if (rp->clttydev) | 1109 | if (rp->clttydev) |
1109 | class_device_destroy(class3270, | 1110 | class_device_destroy(class3270, |
1110 | MKDEV(IBM_TTY3270_MAJOR, rp->minor)); | 1111 | MKDEV(IBM_TTY3270_MAJOR, rp->minor)); |
@@ -1112,7 +1113,7 @@ raw3270_delete_device(struct raw3270 *rp) | |||
1112 | class_device_destroy(class3270, | 1113 | class_device_destroy(class3270, |
1113 | MKDEV(IBM_FS3270_MAJOR, rp->minor)); | 1114 | MKDEV(IBM_FS3270_MAJOR, rp->minor)); |
1114 | list_del_init(&rp->list); | 1115 | list_del_init(&rp->list); |
1115 | up(&raw3270_sem); | 1116 | mutex_unlock(&raw3270_mutex); |
1116 | 1117 | ||
1117 | /* Disconnect from ccw_device. */ | 1118 | /* Disconnect from ccw_device. */ |
1118 | cdev = rp->cdev; | 1119 | cdev = rp->cdev; |
@@ -1208,13 +1209,13 @@ int raw3270_register_notifier(void (*notifier)(int, int)) | |||
1208 | if (!np) | 1209 | if (!np) |
1209 | return -ENOMEM; | 1210 | return -ENOMEM; |
1210 | np->notifier = notifier; | 1211 | np->notifier = notifier; |
1211 | down(&raw3270_sem); | 1212 | mutex_lock(&raw3270_mutex); |
1212 | list_add_tail(&np->list, &raw3270_notifier); | 1213 | list_add_tail(&np->list, &raw3270_notifier); |
1213 | list_for_each_entry(rp, &raw3270_devices, list) { | 1214 | list_for_each_entry(rp, &raw3270_devices, list) { |
1214 | get_device(&rp->cdev->dev); | 1215 | get_device(&rp->cdev->dev); |
1215 | notifier(rp->minor, 1); | 1216 | notifier(rp->minor, 1); |
1216 | } | 1217 | } |
1217 | up(&raw3270_sem); | 1218 | mutex_unlock(&raw3270_mutex); |
1218 | return 0; | 1219 | return 0; |
1219 | } | 1220 | } |
1220 | 1221 | ||
@@ -1222,14 +1223,14 @@ void raw3270_unregister_notifier(void (*notifier)(int, int)) | |||
1222 | { | 1223 | { |
1223 | struct raw3270_notifier *np; | 1224 | struct raw3270_notifier *np; |
1224 | 1225 | ||
1225 | down(&raw3270_sem); | 1226 | mutex_lock(&raw3270_mutex); |
1226 | list_for_each_entry(np, &raw3270_notifier, list) | 1227 | list_for_each_entry(np, &raw3270_notifier, list) |
1227 | if (np->notifier == notifier) { | 1228 | if (np->notifier == notifier) { |
1228 | list_del(&np->list); | 1229 | list_del(&np->list); |
1229 | kfree(np); | 1230 | kfree(np); |
1230 | break; | 1231 | break; |
1231 | } | 1232 | } |
1232 | up(&raw3270_sem); | 1233 | mutex_unlock(&raw3270_mutex); |
1233 | } | 1234 | } |
1234 | 1235 | ||
1235 | /* | 1236 | /* |
@@ -1256,10 +1257,10 @@ raw3270_set_online (struct ccw_device *cdev) | |||
1256 | goto failure; | 1257 | goto failure; |
1257 | raw3270_create_attributes(rp); | 1258 | raw3270_create_attributes(rp); |
1258 | set_bit(RAW3270_FLAGS_READY, &rp->flags); | 1259 | set_bit(RAW3270_FLAGS_READY, &rp->flags); |
1259 | down(&raw3270_sem); | 1260 | mutex_lock(&raw3270_mutex); |
1260 | list_for_each_entry(np, &raw3270_notifier, list) | 1261 | list_for_each_entry(np, &raw3270_notifier, list) |
1261 | np->notifier(rp->minor, 1); | 1262 | np->notifier(rp->minor, 1); |
1262 | up(&raw3270_sem); | 1263 | mutex_unlock(&raw3270_mutex); |
1263 | return 0; | 1264 | return 0; |
1264 | 1265 | ||
1265 | failure: | 1266 | failure: |
@@ -1307,10 +1308,10 @@ raw3270_remove (struct ccw_device *cdev) | |||
1307 | } | 1308 | } |
1308 | spin_unlock_irqrestore(get_ccwdev_lock(cdev), flags); | 1309 | spin_unlock_irqrestore(get_ccwdev_lock(cdev), flags); |
1309 | 1310 | ||
1310 | down(&raw3270_sem); | 1311 | mutex_lock(&raw3270_mutex); |
1311 | list_for_each_entry(np, &raw3270_notifier, list) | 1312 | list_for_each_entry(np, &raw3270_notifier, list) |
1312 | np->notifier(rp->minor, 0); | 1313 | np->notifier(rp->minor, 0); |
1313 | up(&raw3270_sem); | 1314 | mutex_unlock(&raw3270_mutex); |
1314 | 1315 | ||
1315 | /* Reset 3270 device. */ | 1316 | /* Reset 3270 device. */ |
1316 | raw3270_reset_device(rp); | 1317 | raw3270_reset_device(rp); |
@@ -1370,13 +1371,13 @@ raw3270_init(void) | |||
1370 | rc = ccw_driver_register(&raw3270_ccw_driver); | 1371 | rc = ccw_driver_register(&raw3270_ccw_driver); |
1371 | if (rc == 0) { | 1372 | if (rc == 0) { |
1372 | /* Create attributes for early (= console) device. */ | 1373 | /* Create attributes for early (= console) device. */ |
1373 | down(&raw3270_sem); | 1374 | mutex_lock(&raw3270_mutex); |
1374 | class3270 = class_create(THIS_MODULE, "3270"); | 1375 | class3270 = class_create(THIS_MODULE, "3270"); |
1375 | list_for_each_entry(rp, &raw3270_devices, list) { | 1376 | list_for_each_entry(rp, &raw3270_devices, list) { |
1376 | get_device(&rp->cdev->dev); | 1377 | get_device(&rp->cdev->dev); |
1377 | raw3270_create_attributes(rp); | 1378 | raw3270_create_attributes(rp); |
1378 | } | 1379 | } |
1379 | up(&raw3270_sem); | 1380 | mutex_unlock(&raw3270_mutex); |
1380 | } | 1381 | } |
1381 | return rc; | 1382 | return rc; |
1382 | } | 1383 | } |
diff --git a/drivers/s390/scsi/zfcp_aux.c b/drivers/s390/scsi/zfcp_aux.c index 95b92f317b6f..395cfc6a344f 100644 --- a/drivers/s390/scsi/zfcp_aux.c +++ b/drivers/s390/scsi/zfcp_aux.c | |||
@@ -829,18 +829,6 @@ zfcp_unit_dequeue(struct zfcp_unit *unit) | |||
829 | device_unregister(&unit->sysfs_device); | 829 | device_unregister(&unit->sysfs_device); |
830 | } | 830 | } |
831 | 831 | ||
832 | static void * | ||
833 | zfcp_mempool_alloc(gfp_t gfp_mask, void *size) | ||
834 | { | ||
835 | return kmalloc((size_t) size, gfp_mask); | ||
836 | } | ||
837 | |||
838 | static void | ||
839 | zfcp_mempool_free(void *element, void *size) | ||
840 | { | ||
841 | kfree(element); | ||
842 | } | ||
843 | |||
844 | /* | 832 | /* |
845 | * Allocates a combined QTCB/fsf_req buffer for erp actions and fcp/SCSI | 833 | * Allocates a combined QTCB/fsf_req buffer for erp actions and fcp/SCSI |
846 | * commands. | 834 | * commands. |
@@ -853,51 +841,39 @@ static int | |||
853 | zfcp_allocate_low_mem_buffers(struct zfcp_adapter *adapter) | 841 | zfcp_allocate_low_mem_buffers(struct zfcp_adapter *adapter) |
854 | { | 842 | { |
855 | adapter->pool.fsf_req_erp = | 843 | adapter->pool.fsf_req_erp = |
856 | mempool_create(ZFCP_POOL_FSF_REQ_ERP_NR, | 844 | mempool_create_kmalloc_pool(ZFCP_POOL_FSF_REQ_ERP_NR, |
857 | zfcp_mempool_alloc, zfcp_mempool_free, (void *) | 845 | sizeof(struct zfcp_fsf_req_pool_element)); |
858 | sizeof(struct zfcp_fsf_req_pool_element)); | 846 | if (!adapter->pool.fsf_req_erp) |
859 | |||
860 | if (NULL == adapter->pool.fsf_req_erp) | ||
861 | return -ENOMEM; | 847 | return -ENOMEM; |
862 | 848 | ||
863 | adapter->pool.fsf_req_scsi = | 849 | adapter->pool.fsf_req_scsi = |
864 | mempool_create(ZFCP_POOL_FSF_REQ_SCSI_NR, | 850 | mempool_create_kmalloc_pool(ZFCP_POOL_FSF_REQ_SCSI_NR, |
865 | zfcp_mempool_alloc, zfcp_mempool_free, (void *) | 851 | sizeof(struct zfcp_fsf_req_pool_element)); |
866 | sizeof(struct zfcp_fsf_req_pool_element)); | 852 | if (!adapter->pool.fsf_req_scsi) |
867 | |||
868 | if (NULL == adapter->pool.fsf_req_scsi) | ||
869 | return -ENOMEM; | 853 | return -ENOMEM; |
870 | 854 | ||
871 | adapter->pool.fsf_req_abort = | 855 | adapter->pool.fsf_req_abort = |
872 | mempool_create(ZFCP_POOL_FSF_REQ_ABORT_NR, | 856 | mempool_create_kmalloc_pool(ZFCP_POOL_FSF_REQ_ABORT_NR, |
873 | zfcp_mempool_alloc, zfcp_mempool_free, (void *) | 857 | sizeof(struct zfcp_fsf_req_pool_element)); |
874 | sizeof(struct zfcp_fsf_req_pool_element)); | 858 | if (!adapter->pool.fsf_req_abort) |
875 | |||
876 | if (NULL == adapter->pool.fsf_req_abort) | ||
877 | return -ENOMEM; | 859 | return -ENOMEM; |
878 | 860 | ||
879 | adapter->pool.fsf_req_status_read = | 861 | adapter->pool.fsf_req_status_read = |
880 | mempool_create(ZFCP_POOL_STATUS_READ_NR, | 862 | mempool_create_kmalloc_pool(ZFCP_POOL_STATUS_READ_NR, |
881 | zfcp_mempool_alloc, zfcp_mempool_free, | 863 | sizeof(struct zfcp_fsf_req)); |
882 | (void *) sizeof(struct zfcp_fsf_req)); | 864 | if (!adapter->pool.fsf_req_status_read) |
883 | |||
884 | if (NULL == adapter->pool.fsf_req_status_read) | ||
885 | return -ENOMEM; | 865 | return -ENOMEM; |
886 | 866 | ||
887 | adapter->pool.data_status_read = | 867 | adapter->pool.data_status_read = |
888 | mempool_create(ZFCP_POOL_STATUS_READ_NR, | 868 | mempool_create_kmalloc_pool(ZFCP_POOL_STATUS_READ_NR, |
889 | zfcp_mempool_alloc, zfcp_mempool_free, | 869 | sizeof(struct fsf_status_read_buffer)); |
890 | (void *) sizeof(struct fsf_status_read_buffer)); | 870 | if (!adapter->pool.data_status_read) |
891 | |||
892 | if (NULL == adapter->pool.data_status_read) | ||
893 | return -ENOMEM; | 871 | return -ENOMEM; |
894 | 872 | ||
895 | adapter->pool.data_gid_pn = | 873 | adapter->pool.data_gid_pn = |
896 | mempool_create(ZFCP_POOL_DATA_GID_PN_NR, | 874 | mempool_create_kmalloc_pool(ZFCP_POOL_DATA_GID_PN_NR, |
897 | zfcp_mempool_alloc, zfcp_mempool_free, (void *) | 875 | sizeof(struct zfcp_gid_pn_data)); |
898 | sizeof(struct zfcp_gid_pn_data)); | 876 | if (!adapter->pool.data_gid_pn) |
899 | |||
900 | if (NULL == adapter->pool.data_gid_pn) | ||
901 | return -ENOMEM; | 877 | return -ENOMEM; |
902 | 878 | ||
903 | return 0; | 879 | return 0; |
diff --git a/drivers/scsi/iscsi_tcp.c b/drivers/scsi/iscsi_tcp.c index 7b82ff090d42..2068b66822b7 100644 --- a/drivers/scsi/iscsi_tcp.c +++ b/drivers/scsi/iscsi_tcp.c | |||
@@ -3200,8 +3200,8 @@ iscsi_r2tpool_alloc(struct iscsi_session *session) | |||
3200 | * Data-Out PDU's within R2T-sequence can be quite big; | 3200 | * Data-Out PDU's within R2T-sequence can be quite big; |
3201 | * using mempool | 3201 | * using mempool |
3202 | */ | 3202 | */ |
3203 | ctask->datapool = mempool_create(ISCSI_DTASK_DEFAULT_MAX, | 3203 | ctask->datapool = mempool_create_slab_pool(ISCSI_DTASK_DEFAULT_MAX, |
3204 | mempool_alloc_slab, mempool_free_slab, taskcache); | 3204 | taskcache); |
3205 | if (ctask->datapool == NULL) { | 3205 | if (ctask->datapool == NULL) { |
3206 | kfifo_free(ctask->r2tqueue); | 3206 | kfifo_free(ctask->r2tqueue); |
3207 | iscsi_pool_free(&ctask->r2tpool, (void**)ctask->r2ts); | 3207 | iscsi_pool_free(&ctask->r2tpool, (void**)ctask->r2ts); |
diff --git a/drivers/scsi/lpfc/lpfc_mem.c b/drivers/scsi/lpfc/lpfc_mem.c index 352df47bcaca..07017658ac56 100644 --- a/drivers/scsi/lpfc/lpfc_mem.c +++ b/drivers/scsi/lpfc/lpfc_mem.c | |||
@@ -38,18 +38,6 @@ | |||
38 | #define LPFC_MBUF_POOL_SIZE 64 /* max elements in MBUF safety pool */ | 38 | #define LPFC_MBUF_POOL_SIZE 64 /* max elements in MBUF safety pool */ |
39 | #define LPFC_MEM_POOL_SIZE 64 /* max elem in non-DMA safety pool */ | 39 | #define LPFC_MEM_POOL_SIZE 64 /* max elem in non-DMA safety pool */ |
40 | 40 | ||
41 | static void * | ||
42 | lpfc_pool_kmalloc(gfp_t gfp_flags, void *data) | ||
43 | { | ||
44 | return kmalloc((unsigned long)data, gfp_flags); | ||
45 | } | ||
46 | |||
47 | static void | ||
48 | lpfc_pool_kfree(void *obj, void *data) | ||
49 | { | ||
50 | kfree(obj); | ||
51 | } | ||
52 | |||
53 | int | 41 | int |
54 | lpfc_mem_alloc(struct lpfc_hba * phba) | 42 | lpfc_mem_alloc(struct lpfc_hba * phba) |
55 | { | 43 | { |
@@ -79,15 +67,13 @@ lpfc_mem_alloc(struct lpfc_hba * phba) | |||
79 | pool->current_count++; | 67 | pool->current_count++; |
80 | } | 68 | } |
81 | 69 | ||
82 | phba->mbox_mem_pool = mempool_create(LPFC_MEM_POOL_SIZE, | 70 | phba->mbox_mem_pool = mempool_create_kmalloc_pool(LPFC_MEM_POOL_SIZE, |
83 | lpfc_pool_kmalloc, lpfc_pool_kfree, | 71 | sizeof(LPFC_MBOXQ_t)); |
84 | (void *)(unsigned long)sizeof(LPFC_MBOXQ_t)); | ||
85 | if (!phba->mbox_mem_pool) | 72 | if (!phba->mbox_mem_pool) |
86 | goto fail_free_mbuf_pool; | 73 | goto fail_free_mbuf_pool; |
87 | 74 | ||
88 | phba->nlp_mem_pool = mempool_create(LPFC_MEM_POOL_SIZE, | 75 | phba->nlp_mem_pool = mempool_create_kmalloc_pool(LPFC_MEM_POOL_SIZE, |
89 | lpfc_pool_kmalloc, lpfc_pool_kfree, | 76 | sizeof(struct lpfc_nodelist)); |
90 | (void *)(unsigned long)sizeof(struct lpfc_nodelist)); | ||
91 | if (!phba->nlp_mem_pool) | 77 | if (!phba->nlp_mem_pool) |
92 | goto fail_free_mbox_pool; | 78 | goto fail_free_mbox_pool; |
93 | 79 | ||
diff --git a/drivers/scsi/qla2xxx/qla_os.c b/drivers/scsi/qla2xxx/qla_os.c index 029bbf461bb2..017729c59a49 100644 --- a/drivers/scsi/qla2xxx/qla_os.c +++ b/drivers/scsi/qla2xxx/qla_os.c | |||
@@ -2154,8 +2154,7 @@ qla2x00_allocate_sp_pool(scsi_qla_host_t *ha) | |||
2154 | int rval; | 2154 | int rval; |
2155 | 2155 | ||
2156 | rval = QLA_SUCCESS; | 2156 | rval = QLA_SUCCESS; |
2157 | ha->srb_mempool = mempool_create(SRB_MIN_REQ, mempool_alloc_slab, | 2157 | ha->srb_mempool = mempool_create_slab_pool(SRB_MIN_REQ, srb_cachep); |
2158 | mempool_free_slab, srb_cachep); | ||
2159 | if (ha->srb_mempool == NULL) { | 2158 | if (ha->srb_mempool == NULL) { |
2160 | qla_printk(KERN_INFO, ha, "Unable to allocate SRB mempool.\n"); | 2159 | qla_printk(KERN_INFO, ha, "Unable to allocate SRB mempool.\n"); |
2161 | rval = QLA_FUNCTION_FAILED; | 2160 | rval = QLA_FUNCTION_FAILED; |
diff --git a/drivers/scsi/scsi_lib.c b/drivers/scsi/scsi_lib.c index ede158d08d9d..8f010a314a3d 100644 --- a/drivers/scsi/scsi_lib.c +++ b/drivers/scsi/scsi_lib.c | |||
@@ -1787,9 +1787,8 @@ int __init scsi_init_queue(void) | |||
1787 | sgp->name); | 1787 | sgp->name); |
1788 | } | 1788 | } |
1789 | 1789 | ||
1790 | sgp->pool = mempool_create(SG_MEMPOOL_SIZE, | 1790 | sgp->pool = mempool_create_slab_pool(SG_MEMPOOL_SIZE, |
1791 | mempool_alloc_slab, mempool_free_slab, | 1791 | sgp->slab); |
1792 | sgp->slab); | ||
1793 | if (!sgp->pool) { | 1792 | if (!sgp->pool) { |
1794 | printk(KERN_ERR "SCSI: can't init sg mempool %s\n", | 1793 | printk(KERN_ERR "SCSI: can't init sg mempool %s\n", |
1795 | sgp->name); | 1794 | sgp->name); |
diff --git a/drivers/telephony/phonedev.c b/drivers/telephony/phonedev.c index 3c987f49f6b4..7a6db1c5c8c5 100644 --- a/drivers/telephony/phonedev.c +++ b/drivers/telephony/phonedev.c | |||
@@ -29,6 +29,7 @@ | |||
29 | #include <linux/kmod.h> | 29 | #include <linux/kmod.h> |
30 | #include <linux/sem.h> | 30 | #include <linux/sem.h> |
31 | #include <linux/devfs_fs_kernel.h> | 31 | #include <linux/devfs_fs_kernel.h> |
32 | #include <linux/mutex.h> | ||
32 | 33 | ||
33 | #define PHONE_NUM_DEVICES 256 | 34 | #define PHONE_NUM_DEVICES 256 |
34 | 35 | ||
@@ -37,7 +38,7 @@ | |||
37 | */ | 38 | */ |
38 | 39 | ||
39 | static struct phone_device *phone_device[PHONE_NUM_DEVICES]; | 40 | static struct phone_device *phone_device[PHONE_NUM_DEVICES]; |
40 | static DECLARE_MUTEX(phone_lock); | 41 | static DEFINE_MUTEX(phone_lock); |
41 | 42 | ||
42 | /* | 43 | /* |
43 | * Open a phone device. | 44 | * Open a phone device. |
@@ -53,14 +54,14 @@ static int phone_open(struct inode *inode, struct file *file) | |||
53 | if (minor >= PHONE_NUM_DEVICES) | 54 | if (minor >= PHONE_NUM_DEVICES) |
54 | return -ENODEV; | 55 | return -ENODEV; |
55 | 56 | ||
56 | down(&phone_lock); | 57 | mutex_lock(&phone_lock); |
57 | p = phone_device[minor]; | 58 | p = phone_device[minor]; |
58 | if (p) | 59 | if (p) |
59 | new_fops = fops_get(p->f_op); | 60 | new_fops = fops_get(p->f_op); |
60 | if (!new_fops) { | 61 | if (!new_fops) { |
61 | up(&phone_lock); | 62 | mutex_unlock(&phone_lock); |
62 | request_module("char-major-%d-%d", PHONE_MAJOR, minor); | 63 | request_module("char-major-%d-%d", PHONE_MAJOR, minor); |
63 | down(&phone_lock); | 64 | mutex_lock(&phone_lock); |
64 | p = phone_device[minor]; | 65 | p = phone_device[minor]; |
65 | if (p == NULL || (new_fops = fops_get(p->f_op)) == NULL) | 66 | if (p == NULL || (new_fops = fops_get(p->f_op)) == NULL) |
66 | { | 67 | { |
@@ -78,7 +79,7 @@ static int phone_open(struct inode *inode, struct file *file) | |||
78 | } | 79 | } |
79 | fops_put(old_fops); | 80 | fops_put(old_fops); |
80 | end: | 81 | end: |
81 | up(&phone_lock); | 82 | mutex_unlock(&phone_lock); |
82 | return err; | 83 | return err; |
83 | } | 84 | } |
84 | 85 | ||
@@ -100,18 +101,18 @@ int phone_register_device(struct phone_device *p, int unit) | |||
100 | end = unit + 1; /* enter the loop at least one time */ | 101 | end = unit + 1; /* enter the loop at least one time */ |
101 | } | 102 | } |
102 | 103 | ||
103 | down(&phone_lock); | 104 | mutex_lock(&phone_lock); |
104 | for (i = base; i < end; i++) { | 105 | for (i = base; i < end; i++) { |
105 | if (phone_device[i] == NULL) { | 106 | if (phone_device[i] == NULL) { |
106 | phone_device[i] = p; | 107 | phone_device[i] = p; |
107 | p->minor = i; | 108 | p->minor = i; |
108 | devfs_mk_cdev(MKDEV(PHONE_MAJOR,i), | 109 | devfs_mk_cdev(MKDEV(PHONE_MAJOR,i), |
109 | S_IFCHR|S_IRUSR|S_IWUSR, "phone/%d", i); | 110 | S_IFCHR|S_IRUSR|S_IWUSR, "phone/%d", i); |
110 | up(&phone_lock); | 111 | mutex_unlock(&phone_lock); |
111 | return 0; | 112 | return 0; |
112 | } | 113 | } |
113 | } | 114 | } |
114 | up(&phone_lock); | 115 | mutex_unlock(&phone_lock); |
115 | return -ENFILE; | 116 | return -ENFILE; |
116 | } | 117 | } |
117 | 118 | ||
@@ -121,12 +122,12 @@ int phone_register_device(struct phone_device *p, int unit) | |||
121 | 122 | ||
122 | void phone_unregister_device(struct phone_device *pfd) | 123 | void phone_unregister_device(struct phone_device *pfd) |
123 | { | 124 | { |
124 | down(&phone_lock); | 125 | mutex_lock(&phone_lock); |
125 | if (phone_device[pfd->minor] != pfd) | 126 | if (phone_device[pfd->minor] != pfd) |
126 | panic("phone: bad unregister"); | 127 | panic("phone: bad unregister"); |
127 | devfs_remove("phone/%d", pfd->minor); | 128 | devfs_remove("phone/%d", pfd->minor); |
128 | phone_device[pfd->minor] = NULL; | 129 | phone_device[pfd->minor] = NULL; |
129 | up(&phone_lock); | 130 | mutex_unlock(&phone_lock); |
130 | } | 131 | } |
131 | 132 | ||
132 | 133 | ||
diff --git a/fs/afs/file.c b/fs/afs/file.c index 150b19227922..7bb716887e29 100644 --- a/fs/afs/file.c +++ b/fs/afs/file.c | |||
@@ -28,7 +28,7 @@ static int afs_file_release(struct inode *inode, struct file *file); | |||
28 | #endif | 28 | #endif |
29 | 29 | ||
30 | static int afs_file_readpage(struct file *file, struct page *page); | 30 | static int afs_file_readpage(struct file *file, struct page *page); |
31 | static int afs_file_invalidatepage(struct page *page, unsigned long offset); | 31 | static void afs_file_invalidatepage(struct page *page, unsigned long offset); |
32 | static int afs_file_releasepage(struct page *page, gfp_t gfp_flags); | 32 | static int afs_file_releasepage(struct page *page, gfp_t gfp_flags); |
33 | 33 | ||
34 | struct inode_operations afs_file_inode_operations = { | 34 | struct inode_operations afs_file_inode_operations = { |
@@ -212,7 +212,7 @@ int afs_cache_get_page_cookie(struct page *page, | |||
212 | /* | 212 | /* |
213 | * invalidate part or all of a page | 213 | * invalidate part or all of a page |
214 | */ | 214 | */ |
215 | static int afs_file_invalidatepage(struct page *page, unsigned long offset) | 215 | static void afs_file_invalidatepage(struct page *page, unsigned long offset) |
216 | { | 216 | { |
217 | int ret = 1; | 217 | int ret = 1; |
218 | 218 | ||
@@ -238,11 +238,11 @@ static int afs_file_invalidatepage(struct page *page, unsigned long offset) | |||
238 | if (!PageWriteback(page)) | 238 | if (!PageWriteback(page)) |
239 | ret = page->mapping->a_ops->releasepage(page, | 239 | ret = page->mapping->a_ops->releasepage(page, |
240 | 0); | 240 | 0); |
241 | /* possibly should BUG_ON(!ret); - neilb */ | ||
241 | } | 242 | } |
242 | } | 243 | } |
243 | 244 | ||
244 | _leave(" = %d", ret); | 245 | _leave(" = %d", ret); |
245 | return ret; | ||
246 | } /* end afs_file_invalidatepage() */ | 246 | } /* end afs_file_invalidatepage() */ |
247 | 247 | ||
248 | /*****************************************************************************/ | 248 | /*****************************************************************************/ |
@@ -30,7 +30,7 @@ | |||
30 | 30 | ||
31 | #define BIO_POOL_SIZE 256 | 31 | #define BIO_POOL_SIZE 256 |
32 | 32 | ||
33 | static kmem_cache_t *bio_slab; | 33 | static kmem_cache_t *bio_slab __read_mostly; |
34 | 34 | ||
35 | #define BIOVEC_NR_POOLS 6 | 35 | #define BIOVEC_NR_POOLS 6 |
36 | 36 | ||
@@ -39,7 +39,7 @@ static kmem_cache_t *bio_slab; | |||
39 | * basically we just need to survive | 39 | * basically we just need to survive |
40 | */ | 40 | */ |
41 | #define BIO_SPLIT_ENTRIES 8 | 41 | #define BIO_SPLIT_ENTRIES 8 |
42 | mempool_t *bio_split_pool; | 42 | mempool_t *bio_split_pool __read_mostly; |
43 | 43 | ||
44 | struct biovec_slab { | 44 | struct biovec_slab { |
45 | int nr_vecs; | 45 | int nr_vecs; |
@@ -1125,16 +1125,6 @@ struct bio_pair *bio_split(struct bio *bi, mempool_t *pool, int first_sectors) | |||
1125 | return bp; | 1125 | return bp; |
1126 | } | 1126 | } |
1127 | 1127 | ||
1128 | static void *bio_pair_alloc(gfp_t gfp_flags, void *data) | ||
1129 | { | ||
1130 | return kmalloc(sizeof(struct bio_pair), gfp_flags); | ||
1131 | } | ||
1132 | |||
1133 | static void bio_pair_free(void *bp, void *data) | ||
1134 | { | ||
1135 | kfree(bp); | ||
1136 | } | ||
1137 | |||
1138 | 1128 | ||
1139 | /* | 1129 | /* |
1140 | * create memory pools for biovec's in a bio_set. | 1130 | * create memory pools for biovec's in a bio_set. |
@@ -1151,8 +1141,7 @@ static int biovec_create_pools(struct bio_set *bs, int pool_entries, int scale) | |||
1151 | if (i >= scale) | 1141 | if (i >= scale) |
1152 | pool_entries >>= 1; | 1142 | pool_entries >>= 1; |
1153 | 1143 | ||
1154 | *bvp = mempool_create(pool_entries, mempool_alloc_slab, | 1144 | *bvp = mempool_create_slab_pool(pool_entries, bp->slab); |
1155 | mempool_free_slab, bp->slab); | ||
1156 | if (!*bvp) | 1145 | if (!*bvp) |
1157 | return -ENOMEM; | 1146 | return -ENOMEM; |
1158 | } | 1147 | } |
@@ -1189,9 +1178,7 @@ struct bio_set *bioset_create(int bio_pool_size, int bvec_pool_size, int scale) | |||
1189 | if (!bs) | 1178 | if (!bs) |
1190 | return NULL; | 1179 | return NULL; |
1191 | 1180 | ||
1192 | bs->bio_pool = mempool_create(bio_pool_size, mempool_alloc_slab, | 1181 | bs->bio_pool = mempool_create_slab_pool(bio_pool_size, bio_slab); |
1193 | mempool_free_slab, bio_slab); | ||
1194 | |||
1195 | if (!bs->bio_pool) | 1182 | if (!bs->bio_pool) |
1196 | goto bad; | 1183 | goto bad; |
1197 | 1184 | ||
@@ -1254,8 +1241,8 @@ static int __init init_bio(void) | |||
1254 | if (!fs_bio_set) | 1241 | if (!fs_bio_set) |
1255 | panic("bio: can't allocate bios\n"); | 1242 | panic("bio: can't allocate bios\n"); |
1256 | 1243 | ||
1257 | bio_split_pool = mempool_create(BIO_SPLIT_ENTRIES, | 1244 | bio_split_pool = mempool_create_kmalloc_pool(BIO_SPLIT_ENTRIES, |
1258 | bio_pair_alloc, bio_pair_free, NULL); | 1245 | sizeof(struct bio_pair)); |
1259 | if (!bio_split_pool) | 1246 | if (!bio_split_pool) |
1260 | panic("bio: can't create split pool\n"); | 1247 | panic("bio: can't create split pool\n"); |
1261 | 1248 | ||
diff --git a/fs/block_dev.c b/fs/block_dev.c index 573fc8e0b67a..5983d42df015 100644 --- a/fs/block_dev.c +++ b/fs/block_dev.c | |||
@@ -131,9 +131,10 @@ blkdev_get_block(struct inode *inode, sector_t iblock, | |||
131 | 131 | ||
132 | static int | 132 | static int |
133 | blkdev_get_blocks(struct inode *inode, sector_t iblock, | 133 | blkdev_get_blocks(struct inode *inode, sector_t iblock, |
134 | unsigned long max_blocks, struct buffer_head *bh, int create) | 134 | struct buffer_head *bh, int create) |
135 | { | 135 | { |
136 | sector_t end_block = max_block(I_BDEV(inode)); | 136 | sector_t end_block = max_block(I_BDEV(inode)); |
137 | unsigned long max_blocks = bh->b_size >> inode->i_blkbits; | ||
137 | 138 | ||
138 | if ((iblock + max_blocks) > end_block) { | 139 | if ((iblock + max_blocks) > end_block) { |
139 | max_blocks = end_block - iblock; | 140 | max_blocks = end_block - iblock; |
@@ -234,7 +235,7 @@ static int block_fsync(struct file *filp, struct dentry *dentry, int datasync) | |||
234 | */ | 235 | */ |
235 | 236 | ||
236 | static __cacheline_aligned_in_smp DEFINE_SPINLOCK(bdev_lock); | 237 | static __cacheline_aligned_in_smp DEFINE_SPINLOCK(bdev_lock); |
237 | static kmem_cache_t * bdev_cachep; | 238 | static kmem_cache_t * bdev_cachep __read_mostly; |
238 | 239 | ||
239 | static struct inode *bdev_alloc_inode(struct super_block *sb) | 240 | static struct inode *bdev_alloc_inode(struct super_block *sb) |
240 | { | 241 | { |
@@ -308,7 +309,7 @@ static struct file_system_type bd_type = { | |||
308 | .kill_sb = kill_anon_super, | 309 | .kill_sb = kill_anon_super, |
309 | }; | 310 | }; |
310 | 311 | ||
311 | static struct vfsmount *bd_mnt; | 312 | static struct vfsmount *bd_mnt __read_mostly; |
312 | struct super_block *blockdev_superblock; | 313 | struct super_block *blockdev_superblock; |
313 | 314 | ||
314 | void __init bdev_cache_init(void) | 315 | void __init bdev_cache_init(void) |
diff --git a/fs/buffer.c b/fs/buffer.c index 4342ab0ad99a..d597758dd129 100644 --- a/fs/buffer.c +++ b/fs/buffer.c | |||
@@ -426,8 +426,10 @@ __find_get_block_slow(struct block_device *bdev, sector_t block) | |||
426 | if (all_mapped) { | 426 | if (all_mapped) { |
427 | printk("__find_get_block_slow() failed. " | 427 | printk("__find_get_block_slow() failed. " |
428 | "block=%llu, b_blocknr=%llu\n", | 428 | "block=%llu, b_blocknr=%llu\n", |
429 | (unsigned long long)block, (unsigned long long)bh->b_blocknr); | 429 | (unsigned long long)block, |
430 | printk("b_state=0x%08lx, b_size=%u\n", bh->b_state, bh->b_size); | 430 | (unsigned long long)bh->b_blocknr); |
431 | printk("b_state=0x%08lx, b_size=%zu\n", | ||
432 | bh->b_state, bh->b_size); | ||
431 | printk("device blocksize: %d\n", 1 << bd_inode->i_blkbits); | 433 | printk("device blocksize: %d\n", 1 << bd_inode->i_blkbits); |
432 | } | 434 | } |
433 | out_unlock: | 435 | out_unlock: |
@@ -1590,11 +1592,10 @@ EXPORT_SYMBOL(try_to_release_page); | |||
1590 | * point. Because the caller is about to free (and possibly reuse) those | 1592 | * point. Because the caller is about to free (and possibly reuse) those |
1591 | * blocks on-disk. | 1593 | * blocks on-disk. |
1592 | */ | 1594 | */ |
1593 | int block_invalidatepage(struct page *page, unsigned long offset) | 1595 | void block_invalidatepage(struct page *page, unsigned long offset) |
1594 | { | 1596 | { |
1595 | struct buffer_head *head, *bh, *next; | 1597 | struct buffer_head *head, *bh, *next; |
1596 | unsigned int curr_off = 0; | 1598 | unsigned int curr_off = 0; |
1597 | int ret = 1; | ||
1598 | 1599 | ||
1599 | BUG_ON(!PageLocked(page)); | 1600 | BUG_ON(!PageLocked(page)); |
1600 | if (!page_has_buffers(page)) | 1601 | if (!page_has_buffers(page)) |
@@ -1621,19 +1622,18 @@ int block_invalidatepage(struct page *page, unsigned long offset) | |||
1621 | * so real IO is not possible anymore. | 1622 | * so real IO is not possible anymore. |
1622 | */ | 1623 | */ |
1623 | if (offset == 0) | 1624 | if (offset == 0) |
1624 | ret = try_to_release_page(page, 0); | 1625 | try_to_release_page(page, 0); |
1625 | out: | 1626 | out: |
1626 | return ret; | 1627 | return; |
1627 | } | 1628 | } |
1628 | EXPORT_SYMBOL(block_invalidatepage); | 1629 | EXPORT_SYMBOL(block_invalidatepage); |
1629 | 1630 | ||
1630 | int do_invalidatepage(struct page *page, unsigned long offset) | 1631 | void do_invalidatepage(struct page *page, unsigned long offset) |
1631 | { | 1632 | { |
1632 | int (*invalidatepage)(struct page *, unsigned long); | 1633 | void (*invalidatepage)(struct page *, unsigned long); |
1633 | invalidatepage = page->mapping->a_ops->invalidatepage; | 1634 | invalidatepage = page->mapping->a_ops->invalidatepage ? : |
1634 | if (invalidatepage == NULL) | 1635 | block_invalidatepage; |
1635 | invalidatepage = block_invalidatepage; | 1636 | (*invalidatepage)(page, offset); |
1636 | return (*invalidatepage)(page, offset); | ||
1637 | } | 1637 | } |
1638 | 1638 | ||
1639 | /* | 1639 | /* |
@@ -1735,6 +1735,7 @@ static int __block_write_full_page(struct inode *inode, struct page *page, | |||
1735 | sector_t block; | 1735 | sector_t block; |
1736 | sector_t last_block; | 1736 | sector_t last_block; |
1737 | struct buffer_head *bh, *head; | 1737 | struct buffer_head *bh, *head; |
1738 | const unsigned blocksize = 1 << inode->i_blkbits; | ||
1738 | int nr_underway = 0; | 1739 | int nr_underway = 0; |
1739 | 1740 | ||
1740 | BUG_ON(!PageLocked(page)); | 1741 | BUG_ON(!PageLocked(page)); |
@@ -1742,7 +1743,7 @@ static int __block_write_full_page(struct inode *inode, struct page *page, | |||
1742 | last_block = (i_size_read(inode) - 1) >> inode->i_blkbits; | 1743 | last_block = (i_size_read(inode) - 1) >> inode->i_blkbits; |
1743 | 1744 | ||
1744 | if (!page_has_buffers(page)) { | 1745 | if (!page_has_buffers(page)) { |
1745 | create_empty_buffers(page, 1 << inode->i_blkbits, | 1746 | create_empty_buffers(page, blocksize, |
1746 | (1 << BH_Dirty)|(1 << BH_Uptodate)); | 1747 | (1 << BH_Dirty)|(1 << BH_Uptodate)); |
1747 | } | 1748 | } |
1748 | 1749 | ||
@@ -1777,6 +1778,7 @@ static int __block_write_full_page(struct inode *inode, struct page *page, | |||
1777 | clear_buffer_dirty(bh); | 1778 | clear_buffer_dirty(bh); |
1778 | set_buffer_uptodate(bh); | 1779 | set_buffer_uptodate(bh); |
1779 | } else if (!buffer_mapped(bh) && buffer_dirty(bh)) { | 1780 | } else if (!buffer_mapped(bh) && buffer_dirty(bh)) { |
1781 | WARN_ON(bh->b_size != blocksize); | ||
1780 | err = get_block(inode, block, bh, 1); | 1782 | err = get_block(inode, block, bh, 1); |
1781 | if (err) | 1783 | if (err) |
1782 | goto recover; | 1784 | goto recover; |
@@ -1930,6 +1932,7 @@ static int __block_prepare_write(struct inode *inode, struct page *page, | |||
1930 | if (buffer_new(bh)) | 1932 | if (buffer_new(bh)) |
1931 | clear_buffer_new(bh); | 1933 | clear_buffer_new(bh); |
1932 | if (!buffer_mapped(bh)) { | 1934 | if (!buffer_mapped(bh)) { |
1935 | WARN_ON(bh->b_size != blocksize); | ||
1933 | err = get_block(inode, block, bh, 1); | 1936 | err = get_block(inode, block, bh, 1); |
1934 | if (err) | 1937 | if (err) |
1935 | break; | 1938 | break; |
@@ -2085,6 +2088,7 @@ int block_read_full_page(struct page *page, get_block_t *get_block) | |||
2085 | 2088 | ||
2086 | fully_mapped = 0; | 2089 | fully_mapped = 0; |
2087 | if (iblock < lblock) { | 2090 | if (iblock < lblock) { |
2091 | WARN_ON(bh->b_size != blocksize); | ||
2088 | err = get_block(inode, iblock, bh, 0); | 2092 | err = get_block(inode, iblock, bh, 0); |
2089 | if (err) | 2093 | if (err) |
2090 | SetPageError(page); | 2094 | SetPageError(page); |
@@ -2406,6 +2410,7 @@ int nobh_prepare_write(struct page *page, unsigned from, unsigned to, | |||
2406 | create = 1; | 2410 | create = 1; |
2407 | if (block_start >= to) | 2411 | if (block_start >= to) |
2408 | create = 0; | 2412 | create = 0; |
2413 | map_bh.b_size = blocksize; | ||
2409 | ret = get_block(inode, block_in_file + block_in_page, | 2414 | ret = get_block(inode, block_in_file + block_in_page, |
2410 | &map_bh, create); | 2415 | &map_bh, create); |
2411 | if (ret) | 2416 | if (ret) |
@@ -2666,6 +2671,7 @@ int block_truncate_page(struct address_space *mapping, | |||
2666 | 2671 | ||
2667 | err = 0; | 2672 | err = 0; |
2668 | if (!buffer_mapped(bh)) { | 2673 | if (!buffer_mapped(bh)) { |
2674 | WARN_ON(bh->b_size != blocksize); | ||
2669 | err = get_block(inode, iblock, bh, 0); | 2675 | err = get_block(inode, iblock, bh, 0); |
2670 | if (err) | 2676 | if (err) |
2671 | goto unlock; | 2677 | goto unlock; |
@@ -2752,6 +2758,7 @@ sector_t generic_block_bmap(struct address_space *mapping, sector_t block, | |||
2752 | struct inode *inode = mapping->host; | 2758 | struct inode *inode = mapping->host; |
2753 | tmp.b_state = 0; | 2759 | tmp.b_state = 0; |
2754 | tmp.b_blocknr = 0; | 2760 | tmp.b_blocknr = 0; |
2761 | tmp.b_size = 1 << inode->i_blkbits; | ||
2755 | get_block(inode, block, &tmp, 0); | 2762 | get_block(inode, block, &tmp, 0); |
2756 | return tmp.b_blocknr; | 2763 | return tmp.b_blocknr; |
2757 | } | 2764 | } |
@@ -3004,7 +3011,7 @@ out: | |||
3004 | } | 3011 | } |
3005 | EXPORT_SYMBOL(try_to_free_buffers); | 3012 | EXPORT_SYMBOL(try_to_free_buffers); |
3006 | 3013 | ||
3007 | int block_sync_page(struct page *page) | 3014 | void block_sync_page(struct page *page) |
3008 | { | 3015 | { |
3009 | struct address_space *mapping; | 3016 | struct address_space *mapping; |
3010 | 3017 | ||
@@ -3012,7 +3019,6 @@ int block_sync_page(struct page *page) | |||
3012 | mapping = page_mapping(page); | 3019 | mapping = page_mapping(page); |
3013 | if (mapping) | 3020 | if (mapping) |
3014 | blk_run_backing_dev(mapping->backing_dev_info, page); | 3021 | blk_run_backing_dev(mapping->backing_dev_info, page); |
3015 | return 0; | ||
3016 | } | 3022 | } |
3017 | 3023 | ||
3018 | /* | 3024 | /* |
diff --git a/fs/cifs/cifsfs.c b/fs/cifs/cifsfs.c index 221b3334b737..6b99b51d6694 100644 --- a/fs/cifs/cifsfs.c +++ b/fs/cifs/cifsfs.c | |||
@@ -738,10 +738,8 @@ cifs_init_request_bufs(void) | |||
738 | cERROR(1,("cifs_min_rcv set to maximum (64)")); | 738 | cERROR(1,("cifs_min_rcv set to maximum (64)")); |
739 | } | 739 | } |
740 | 740 | ||
741 | cifs_req_poolp = mempool_create(cifs_min_rcv, | 741 | cifs_req_poolp = mempool_create_slab_pool(cifs_min_rcv, |
742 | mempool_alloc_slab, | 742 | cifs_req_cachep); |
743 | mempool_free_slab, | ||
744 | cifs_req_cachep); | ||
745 | 743 | ||
746 | if(cifs_req_poolp == NULL) { | 744 | if(cifs_req_poolp == NULL) { |
747 | kmem_cache_destroy(cifs_req_cachep); | 745 | kmem_cache_destroy(cifs_req_cachep); |
@@ -771,10 +769,8 @@ cifs_init_request_bufs(void) | |||
771 | cFYI(1,("cifs_min_small set to maximum (256)")); | 769 | cFYI(1,("cifs_min_small set to maximum (256)")); |
772 | } | 770 | } |
773 | 771 | ||
774 | cifs_sm_req_poolp = mempool_create(cifs_min_small, | 772 | cifs_sm_req_poolp = mempool_create_slab_pool(cifs_min_small, |
775 | mempool_alloc_slab, | 773 | cifs_sm_req_cachep); |
776 | mempool_free_slab, | ||
777 | cifs_sm_req_cachep); | ||
778 | 774 | ||
779 | if(cifs_sm_req_poolp == NULL) { | 775 | if(cifs_sm_req_poolp == NULL) { |
780 | mempool_destroy(cifs_req_poolp); | 776 | mempool_destroy(cifs_req_poolp); |
@@ -808,10 +804,8 @@ cifs_init_mids(void) | |||
808 | if (cifs_mid_cachep == NULL) | 804 | if (cifs_mid_cachep == NULL) |
809 | return -ENOMEM; | 805 | return -ENOMEM; |
810 | 806 | ||
811 | cifs_mid_poolp = mempool_create(3 /* a reasonable min simultan opers */, | 807 | /* 3 is a reasonable minimum number of simultaneous operations */ |
812 | mempool_alloc_slab, | 808 | cifs_mid_poolp = mempool_create_slab_pool(3, cifs_mid_cachep); |
813 | mempool_free_slab, | ||
814 | cifs_mid_cachep); | ||
815 | if(cifs_mid_poolp == NULL) { | 809 | if(cifs_mid_poolp == NULL) { |
816 | kmem_cache_destroy(cifs_mid_cachep); | 810 | kmem_cache_destroy(cifs_mid_cachep); |
817 | return -ENOMEM; | 811 | return -ENOMEM; |
diff --git a/fs/cifs/file.c b/fs/cifs/file.c index 165d67426381..fb49aef1f2ec 100644 --- a/fs/cifs/file.c +++ b/fs/cifs/file.c | |||
@@ -1339,7 +1339,7 @@ int cifs_fsync(struct file *file, struct dentry *dentry, int datasync) | |||
1339 | return rc; | 1339 | return rc; |
1340 | } | 1340 | } |
1341 | 1341 | ||
1342 | /* static int cifs_sync_page(struct page *page) | 1342 | /* static void cifs_sync_page(struct page *page) |
1343 | { | 1343 | { |
1344 | struct address_space *mapping; | 1344 | struct address_space *mapping; |
1345 | struct inode *inode; | 1345 | struct inode *inode; |
@@ -1353,16 +1353,18 @@ int cifs_fsync(struct file *file, struct dentry *dentry, int datasync) | |||
1353 | return 0; | 1353 | return 0; |
1354 | inode = mapping->host; | 1354 | inode = mapping->host; |
1355 | if (!inode) | 1355 | if (!inode) |
1356 | return 0; */ | 1356 | return; */ |
1357 | 1357 | ||
1358 | /* fill in rpages then | 1358 | /* fill in rpages then |
1359 | result = cifs_pagein_inode(inode, index, rpages); */ /* BB finish */ | 1359 | result = cifs_pagein_inode(inode, index, rpages); */ /* BB finish */ |
1360 | 1360 | ||
1361 | /* cFYI(1, ("rpages is %d for sync page of Index %ld ", rpages, index)); | 1361 | /* cFYI(1, ("rpages is %d for sync page of Index %ld ", rpages, index)); |
1362 | 1362 | ||
1363 | #if 0 | ||
1363 | if (rc < 0) | 1364 | if (rc < 0) |
1364 | return rc; | 1365 | return rc; |
1365 | return 0; | 1366 | return 0; |
1367 | #endif | ||
1366 | } */ | 1368 | } */ |
1367 | 1369 | ||
1368 | /* | 1370 | /* |
diff --git a/fs/cifs/inode.c b/fs/cifs/inode.c index ff93a9f81d1c..598eec9778f6 100644 --- a/fs/cifs/inode.c +++ b/fs/cifs/inode.c | |||
@@ -163,9 +163,9 @@ int cifs_get_inode_info_unix(struct inode **pinode, | |||
163 | 163 | ||
164 | if (num_of_bytes < end_of_file) | 164 | if (num_of_bytes < end_of_file) |
165 | cFYI(1, ("allocation size less than end of file")); | 165 | cFYI(1, ("allocation size less than end of file")); |
166 | cFYI(1, | 166 | cFYI(1, ("Size %ld and blocks %llu", |
167 | ("Size %ld and blocks %ld", | 167 | (unsigned long) inode->i_size, |
168 | (unsigned long) inode->i_size, inode->i_blocks)); | 168 | (unsigned long long)inode->i_blocks)); |
169 | if (S_ISREG(inode->i_mode)) { | 169 | if (S_ISREG(inode->i_mode)) { |
170 | cFYI(1, ("File inode")); | 170 | cFYI(1, ("File inode")); |
171 | inode->i_op = &cifs_file_inode_ops; | 171 | inode->i_op = &cifs_file_inode_ops; |
diff --git a/fs/cifs/readdir.c b/fs/cifs/readdir.c index edb3b6eb34bc..488bd0d81dcf 100644 --- a/fs/cifs/readdir.c +++ b/fs/cifs/readdir.c | |||
@@ -197,10 +197,10 @@ static void fill_in_inode(struct inode *tmp_inode, | |||
197 | 197 | ||
198 | if (allocation_size < end_of_file) | 198 | if (allocation_size < end_of_file) |
199 | cFYI(1, ("May be sparse file, allocation less than file size")); | 199 | cFYI(1, ("May be sparse file, allocation less than file size")); |
200 | cFYI(1, | 200 | cFYI(1, ("File Size %ld and blocks %llu and blocksize %ld", |
201 | ("File Size %ld and blocks %ld and blocksize %ld", | 201 | (unsigned long)tmp_inode->i_size, |
202 | (unsigned long)tmp_inode->i_size, tmp_inode->i_blocks, | 202 | (unsigned long long)tmp_inode->i_blocks, |
203 | tmp_inode->i_blksize)); | 203 | tmp_inode->i_blksize)); |
204 | if (S_ISREG(tmp_inode->i_mode)) { | 204 | if (S_ISREG(tmp_inode->i_mode)) { |
205 | cFYI(1, ("File inode")); | 205 | cFYI(1, ("File inode")); |
206 | tmp_inode->i_op = &cifs_file_inode_ops; | 206 | tmp_inode->i_op = &cifs_file_inode_ops; |
diff --git a/fs/cramfs/inode.c b/fs/cramfs/inode.c index 8ad52f5bf255..acc1b2c10a86 100644 --- a/fs/cramfs/inode.c +++ b/fs/cramfs/inode.c | |||
@@ -22,6 +22,7 @@ | |||
22 | #include <linux/cramfs_fs_sb.h> | 22 | #include <linux/cramfs_fs_sb.h> |
23 | #include <linux/buffer_head.h> | 23 | #include <linux/buffer_head.h> |
24 | #include <linux/vfs.h> | 24 | #include <linux/vfs.h> |
25 | #include <linux/mutex.h> | ||
25 | #include <asm/semaphore.h> | 26 | #include <asm/semaphore.h> |
26 | 27 | ||
27 | #include <asm/uaccess.h> | 28 | #include <asm/uaccess.h> |
@@ -31,7 +32,7 @@ static struct inode_operations cramfs_dir_inode_operations; | |||
31 | static struct file_operations cramfs_directory_operations; | 32 | static struct file_operations cramfs_directory_operations; |
32 | static struct address_space_operations cramfs_aops; | 33 | static struct address_space_operations cramfs_aops; |
33 | 34 | ||
34 | static DECLARE_MUTEX(read_mutex); | 35 | static DEFINE_MUTEX(read_mutex); |
35 | 36 | ||
36 | 37 | ||
37 | /* These two macros may change in future, to provide better st_ino | 38 | /* These two macros may change in future, to provide better st_ino |
@@ -250,20 +251,20 @@ static int cramfs_fill_super(struct super_block *sb, void *data, int silent) | |||
250 | memset(sbi, 0, sizeof(struct cramfs_sb_info)); | 251 | memset(sbi, 0, sizeof(struct cramfs_sb_info)); |
251 | 252 | ||
252 | /* Invalidate the read buffers on mount: think disk change.. */ | 253 | /* Invalidate the read buffers on mount: think disk change.. */ |
253 | down(&read_mutex); | 254 | mutex_lock(&read_mutex); |
254 | for (i = 0; i < READ_BUFFERS; i++) | 255 | for (i = 0; i < READ_BUFFERS; i++) |
255 | buffer_blocknr[i] = -1; | 256 | buffer_blocknr[i] = -1; |
256 | 257 | ||
257 | /* Read the first block and get the superblock from it */ | 258 | /* Read the first block and get the superblock from it */ |
258 | memcpy(&super, cramfs_read(sb, 0, sizeof(super)), sizeof(super)); | 259 | memcpy(&super, cramfs_read(sb, 0, sizeof(super)), sizeof(super)); |
259 | up(&read_mutex); | 260 | mutex_unlock(&read_mutex); |
260 | 261 | ||
261 | /* Do sanity checks on the superblock */ | 262 | /* Do sanity checks on the superblock */ |
262 | if (super.magic != CRAMFS_MAGIC) { | 263 | if (super.magic != CRAMFS_MAGIC) { |
263 | /* check at 512 byte offset */ | 264 | /* check at 512 byte offset */ |
264 | down(&read_mutex); | 265 | mutex_lock(&read_mutex); |
265 | memcpy(&super, cramfs_read(sb, 512, sizeof(super)), sizeof(super)); | 266 | memcpy(&super, cramfs_read(sb, 512, sizeof(super)), sizeof(super)); |
266 | up(&read_mutex); | 267 | mutex_unlock(&read_mutex); |
267 | if (super.magic != CRAMFS_MAGIC) { | 268 | if (super.magic != CRAMFS_MAGIC) { |
268 | if (!silent) | 269 | if (!silent) |
269 | printk(KERN_ERR "cramfs: wrong magic\n"); | 270 | printk(KERN_ERR "cramfs: wrong magic\n"); |
@@ -366,7 +367,7 @@ static int cramfs_readdir(struct file *filp, void *dirent, filldir_t filldir) | |||
366 | mode_t mode; | 367 | mode_t mode; |
367 | int namelen, error; | 368 | int namelen, error; |
368 | 369 | ||
369 | down(&read_mutex); | 370 | mutex_lock(&read_mutex); |
370 | de = cramfs_read(sb, OFFSET(inode) + offset, sizeof(*de)+256); | 371 | de = cramfs_read(sb, OFFSET(inode) + offset, sizeof(*de)+256); |
371 | name = (char *)(de+1); | 372 | name = (char *)(de+1); |
372 | 373 | ||
@@ -379,7 +380,7 @@ static int cramfs_readdir(struct file *filp, void *dirent, filldir_t filldir) | |||
379 | memcpy(buf, name, namelen); | 380 | memcpy(buf, name, namelen); |
380 | ino = CRAMINO(de); | 381 | ino = CRAMINO(de); |
381 | mode = de->mode; | 382 | mode = de->mode; |
382 | up(&read_mutex); | 383 | mutex_unlock(&read_mutex); |
383 | nextoffset = offset + sizeof(*de) + namelen; | 384 | nextoffset = offset + sizeof(*de) + namelen; |
384 | for (;;) { | 385 | for (;;) { |
385 | if (!namelen) { | 386 | if (!namelen) { |
@@ -410,7 +411,7 @@ static struct dentry * cramfs_lookup(struct inode *dir, struct dentry *dentry, s | |||
410 | unsigned int offset = 0; | 411 | unsigned int offset = 0; |
411 | int sorted; | 412 | int sorted; |
412 | 413 | ||
413 | down(&read_mutex); | 414 | mutex_lock(&read_mutex); |
414 | sorted = CRAMFS_SB(dir->i_sb)->flags & CRAMFS_FLAG_SORTED_DIRS; | 415 | sorted = CRAMFS_SB(dir->i_sb)->flags & CRAMFS_FLAG_SORTED_DIRS; |
415 | while (offset < dir->i_size) { | 416 | while (offset < dir->i_size) { |
416 | struct cramfs_inode *de; | 417 | struct cramfs_inode *de; |
@@ -433,7 +434,7 @@ static struct dentry * cramfs_lookup(struct inode *dir, struct dentry *dentry, s | |||
433 | 434 | ||
434 | for (;;) { | 435 | for (;;) { |
435 | if (!namelen) { | 436 | if (!namelen) { |
436 | up(&read_mutex); | 437 | mutex_unlock(&read_mutex); |
437 | return ERR_PTR(-EIO); | 438 | return ERR_PTR(-EIO); |
438 | } | 439 | } |
439 | if (name[namelen-1]) | 440 | if (name[namelen-1]) |
@@ -447,7 +448,7 @@ static struct dentry * cramfs_lookup(struct inode *dir, struct dentry *dentry, s | |||
447 | continue; | 448 | continue; |
448 | if (!retval) { | 449 | if (!retval) { |
449 | struct cramfs_inode entry = *de; | 450 | struct cramfs_inode entry = *de; |
450 | up(&read_mutex); | 451 | mutex_unlock(&read_mutex); |
451 | d_add(dentry, get_cramfs_inode(dir->i_sb, &entry)); | 452 | d_add(dentry, get_cramfs_inode(dir->i_sb, &entry)); |
452 | return NULL; | 453 | return NULL; |
453 | } | 454 | } |
@@ -455,7 +456,7 @@ static struct dentry * cramfs_lookup(struct inode *dir, struct dentry *dentry, s | |||
455 | if (sorted) | 456 | if (sorted) |
456 | break; | 457 | break; |
457 | } | 458 | } |
458 | up(&read_mutex); | 459 | mutex_unlock(&read_mutex); |
459 | d_add(dentry, NULL); | 460 | d_add(dentry, NULL); |
460 | return NULL; | 461 | return NULL; |
461 | } | 462 | } |
@@ -474,21 +475,21 @@ static int cramfs_readpage(struct file *file, struct page * page) | |||
474 | u32 start_offset, compr_len; | 475 | u32 start_offset, compr_len; |
475 | 476 | ||
476 | start_offset = OFFSET(inode) + maxblock*4; | 477 | start_offset = OFFSET(inode) + maxblock*4; |
477 | down(&read_mutex); | 478 | mutex_lock(&read_mutex); |
478 | if (page->index) | 479 | if (page->index) |
479 | start_offset = *(u32 *) cramfs_read(sb, blkptr_offset-4, 4); | 480 | start_offset = *(u32 *) cramfs_read(sb, blkptr_offset-4, 4); |
480 | compr_len = (*(u32 *) cramfs_read(sb, blkptr_offset, 4) - start_offset); | 481 | compr_len = (*(u32 *) cramfs_read(sb, blkptr_offset, 4) - start_offset); |
481 | up(&read_mutex); | 482 | mutex_unlock(&read_mutex); |
482 | pgdata = kmap(page); | 483 | pgdata = kmap(page); |
483 | if (compr_len == 0) | 484 | if (compr_len == 0) |
484 | ; /* hole */ | 485 | ; /* hole */ |
485 | else { | 486 | else { |
486 | down(&read_mutex); | 487 | mutex_lock(&read_mutex); |
487 | bytes_filled = cramfs_uncompress_block(pgdata, | 488 | bytes_filled = cramfs_uncompress_block(pgdata, |
488 | PAGE_CACHE_SIZE, | 489 | PAGE_CACHE_SIZE, |
489 | cramfs_read(sb, start_offset, compr_len), | 490 | cramfs_read(sb, start_offset, compr_len), |
490 | compr_len); | 491 | compr_len); |
491 | up(&read_mutex); | 492 | mutex_unlock(&read_mutex); |
492 | } | 493 | } |
493 | } else | 494 | } else |
494 | pgdata = kmap(page); | 495 | pgdata = kmap(page); |
diff --git a/fs/dcache.c b/fs/dcache.c index 0778f49f993b..19458d399502 100644 --- a/fs/dcache.c +++ b/fs/dcache.c | |||
@@ -35,7 +35,7 @@ | |||
35 | #include <linux/bootmem.h> | 35 | #include <linux/bootmem.h> |
36 | 36 | ||
37 | 37 | ||
38 | int sysctl_vfs_cache_pressure = 100; | 38 | int sysctl_vfs_cache_pressure __read_mostly = 100; |
39 | EXPORT_SYMBOL_GPL(sysctl_vfs_cache_pressure); | 39 | EXPORT_SYMBOL_GPL(sysctl_vfs_cache_pressure); |
40 | 40 | ||
41 | __cacheline_aligned_in_smp DEFINE_SPINLOCK(dcache_lock); | 41 | __cacheline_aligned_in_smp DEFINE_SPINLOCK(dcache_lock); |
@@ -43,7 +43,7 @@ static seqlock_t rename_lock __cacheline_aligned_in_smp = SEQLOCK_UNLOCKED; | |||
43 | 43 | ||
44 | EXPORT_SYMBOL(dcache_lock); | 44 | EXPORT_SYMBOL(dcache_lock); |
45 | 45 | ||
46 | static kmem_cache_t *dentry_cache; | 46 | static kmem_cache_t *dentry_cache __read_mostly; |
47 | 47 | ||
48 | #define DNAME_INLINE_LEN (sizeof(struct dentry)-offsetof(struct dentry,d_iname)) | 48 | #define DNAME_INLINE_LEN (sizeof(struct dentry)-offsetof(struct dentry,d_iname)) |
49 | 49 | ||
@@ -58,9 +58,9 @@ static kmem_cache_t *dentry_cache; | |||
58 | #define D_HASHBITS d_hash_shift | 58 | #define D_HASHBITS d_hash_shift |
59 | #define D_HASHMASK d_hash_mask | 59 | #define D_HASHMASK d_hash_mask |
60 | 60 | ||
61 | static unsigned int d_hash_mask; | 61 | static unsigned int d_hash_mask __read_mostly; |
62 | static unsigned int d_hash_shift; | 62 | static unsigned int d_hash_shift __read_mostly; |
63 | static struct hlist_head *dentry_hashtable; | 63 | static struct hlist_head *dentry_hashtable __read_mostly; |
64 | static LIST_HEAD(dentry_unused); | 64 | static LIST_HEAD(dentry_unused); |
65 | 65 | ||
66 | /* Statistics gathering. */ | 66 | /* Statistics gathering. */ |
@@ -1710,10 +1710,10 @@ static void __init dcache_init(unsigned long mempages) | |||
1710 | } | 1710 | } |
1711 | 1711 | ||
1712 | /* SLAB cache for __getname() consumers */ | 1712 | /* SLAB cache for __getname() consumers */ |
1713 | kmem_cache_t *names_cachep; | 1713 | kmem_cache_t *names_cachep __read_mostly; |
1714 | 1714 | ||
1715 | /* SLAB cache for file structures */ | 1715 | /* SLAB cache for file structures */ |
1716 | kmem_cache_t *filp_cachep; | 1716 | kmem_cache_t *filp_cachep __read_mostly; |
1717 | 1717 | ||
1718 | EXPORT_SYMBOL(d_genocide); | 1718 | EXPORT_SYMBOL(d_genocide); |
1719 | 1719 | ||
diff --git a/fs/dcookies.c b/fs/dcookies.c index f8274a8f83bd..8749339bf4f6 100644 --- a/fs/dcookies.c +++ b/fs/dcookies.c | |||
@@ -23,6 +23,7 @@ | |||
23 | #include <linux/mm.h> | 23 | #include <linux/mm.h> |
24 | #include <linux/errno.h> | 24 | #include <linux/errno.h> |
25 | #include <linux/dcookies.h> | 25 | #include <linux/dcookies.h> |
26 | #include <linux/mutex.h> | ||
26 | #include <asm/uaccess.h> | 27 | #include <asm/uaccess.h> |
27 | 28 | ||
28 | /* The dcookies are allocated from a kmem_cache and | 29 | /* The dcookies are allocated from a kmem_cache and |
@@ -36,10 +37,10 @@ struct dcookie_struct { | |||
36 | }; | 37 | }; |
37 | 38 | ||
38 | static LIST_HEAD(dcookie_users); | 39 | static LIST_HEAD(dcookie_users); |
39 | static DECLARE_MUTEX(dcookie_sem); | 40 | static DEFINE_MUTEX(dcookie_mutex); |
40 | static kmem_cache_t * dcookie_cache; | 41 | static kmem_cache_t *dcookie_cache __read_mostly; |
41 | static struct list_head * dcookie_hashtable; | 42 | static struct list_head *dcookie_hashtable __read_mostly; |
42 | static size_t hash_size; | 43 | static size_t hash_size __read_mostly; |
43 | 44 | ||
44 | static inline int is_live(void) | 45 | static inline int is_live(void) |
45 | { | 46 | { |
@@ -114,7 +115,7 @@ int get_dcookie(struct dentry * dentry, struct vfsmount * vfsmnt, | |||
114 | int err = 0; | 115 | int err = 0; |
115 | struct dcookie_struct * dcs; | 116 | struct dcookie_struct * dcs; |
116 | 117 | ||
117 | down(&dcookie_sem); | 118 | mutex_lock(&dcookie_mutex); |
118 | 119 | ||
119 | if (!is_live()) { | 120 | if (!is_live()) { |
120 | err = -EINVAL; | 121 | err = -EINVAL; |
@@ -134,7 +135,7 @@ int get_dcookie(struct dentry * dentry, struct vfsmount * vfsmnt, | |||
134 | *cookie = dcookie_value(dcs); | 135 | *cookie = dcookie_value(dcs); |
135 | 136 | ||
136 | out: | 137 | out: |
137 | up(&dcookie_sem); | 138 | mutex_unlock(&dcookie_mutex); |
138 | return err; | 139 | return err; |
139 | } | 140 | } |
140 | 141 | ||
@@ -157,7 +158,7 @@ asmlinkage long sys_lookup_dcookie(u64 cookie64, char __user * buf, size_t len) | |||
157 | if (!capable(CAP_SYS_ADMIN)) | 158 | if (!capable(CAP_SYS_ADMIN)) |
158 | return -EPERM; | 159 | return -EPERM; |
159 | 160 | ||
160 | down(&dcookie_sem); | 161 | mutex_lock(&dcookie_mutex); |
161 | 162 | ||
162 | if (!is_live()) { | 163 | if (!is_live()) { |
163 | err = -EINVAL; | 164 | err = -EINVAL; |
@@ -192,7 +193,7 @@ asmlinkage long sys_lookup_dcookie(u64 cookie64, char __user * buf, size_t len) | |||
192 | out_free: | 193 | out_free: |
193 | kfree(kbuf); | 194 | kfree(kbuf); |
194 | out: | 195 | out: |
195 | up(&dcookie_sem); | 196 | mutex_unlock(&dcookie_mutex); |
196 | return err; | 197 | return err; |
197 | } | 198 | } |
198 | 199 | ||
@@ -290,7 +291,7 @@ struct dcookie_user * dcookie_register(void) | |||
290 | { | 291 | { |
291 | struct dcookie_user * user; | 292 | struct dcookie_user * user; |
292 | 293 | ||
293 | down(&dcookie_sem); | 294 | mutex_lock(&dcookie_mutex); |
294 | 295 | ||
295 | user = kmalloc(sizeof(struct dcookie_user), GFP_KERNEL); | 296 | user = kmalloc(sizeof(struct dcookie_user), GFP_KERNEL); |
296 | if (!user) | 297 | if (!user) |
@@ -302,7 +303,7 @@ struct dcookie_user * dcookie_register(void) | |||
302 | list_add(&user->next, &dcookie_users); | 303 | list_add(&user->next, &dcookie_users); |
303 | 304 | ||
304 | out: | 305 | out: |
305 | up(&dcookie_sem); | 306 | mutex_unlock(&dcookie_mutex); |
306 | return user; | 307 | return user; |
307 | out_free: | 308 | out_free: |
308 | kfree(user); | 309 | kfree(user); |
@@ -313,7 +314,7 @@ out_free: | |||
313 | 314 | ||
314 | void dcookie_unregister(struct dcookie_user * user) | 315 | void dcookie_unregister(struct dcookie_user * user) |
315 | { | 316 | { |
316 | down(&dcookie_sem); | 317 | mutex_lock(&dcookie_mutex); |
317 | 318 | ||
318 | list_del(&user->next); | 319 | list_del(&user->next); |
319 | kfree(user); | 320 | kfree(user); |
@@ -321,7 +322,7 @@ void dcookie_unregister(struct dcookie_user * user) | |||
321 | if (!is_live()) | 322 | if (!is_live()) |
322 | dcookie_exit(); | 323 | dcookie_exit(); |
323 | 324 | ||
324 | up(&dcookie_sem); | 325 | mutex_unlock(&dcookie_mutex); |
325 | } | 326 | } |
326 | 327 | ||
327 | EXPORT_SYMBOL_GPL(dcookie_register); | 328 | EXPORT_SYMBOL_GPL(dcookie_register); |
diff --git a/fs/direct-io.c b/fs/direct-io.c index 235ed8d1f11e..9d1d2aa73e42 100644 --- a/fs/direct-io.c +++ b/fs/direct-io.c | |||
@@ -86,12 +86,12 @@ struct dio { | |||
86 | unsigned first_block_in_page; /* doesn't change, Used only once */ | 86 | unsigned first_block_in_page; /* doesn't change, Used only once */ |
87 | int boundary; /* prev block is at a boundary */ | 87 | int boundary; /* prev block is at a boundary */ |
88 | int reap_counter; /* rate limit reaping */ | 88 | int reap_counter; /* rate limit reaping */ |
89 | get_blocks_t *get_blocks; /* block mapping function */ | 89 | get_block_t *get_block; /* block mapping function */ |
90 | dio_iodone_t *end_io; /* IO completion function */ | 90 | dio_iodone_t *end_io; /* IO completion function */ |
91 | sector_t final_block_in_bio; /* current final block in bio + 1 */ | 91 | sector_t final_block_in_bio; /* current final block in bio + 1 */ |
92 | sector_t next_block_for_io; /* next block to be put under IO, | 92 | sector_t next_block_for_io; /* next block to be put under IO, |
93 | in dio_blocks units */ | 93 | in dio_blocks units */ |
94 | struct buffer_head map_bh; /* last get_blocks() result */ | 94 | struct buffer_head map_bh; /* last get_block() result */ |
95 | 95 | ||
96 | /* | 96 | /* |
97 | * Deferred addition of a page to the dio. These variables are | 97 | * Deferred addition of a page to the dio. These variables are |
@@ -211,9 +211,9 @@ static struct page *dio_get_page(struct dio *dio) | |||
211 | 211 | ||
212 | /* | 212 | /* |
213 | * Called when all DIO BIO I/O has been completed - let the filesystem | 213 | * Called when all DIO BIO I/O has been completed - let the filesystem |
214 | * know, if it registered an interest earlier via get_blocks. Pass the | 214 | * know, if it registered an interest earlier via get_block. Pass the |
215 | * private field of the map buffer_head so that filesystems can use it | 215 | * private field of the map buffer_head so that filesystems can use it |
216 | * to hold additional state between get_blocks calls and dio_complete. | 216 | * to hold additional state between get_block calls and dio_complete. |
217 | */ | 217 | */ |
218 | static void dio_complete(struct dio *dio, loff_t offset, ssize_t bytes) | 218 | static void dio_complete(struct dio *dio, loff_t offset, ssize_t bytes) |
219 | { | 219 | { |
@@ -493,7 +493,7 @@ static int dio_bio_reap(struct dio *dio) | |||
493 | * The fs is allowed to map lots of blocks at once. If it wants to do that, | 493 | * The fs is allowed to map lots of blocks at once. If it wants to do that, |
494 | * it uses the passed inode-relative block number as the file offset, as usual. | 494 | * it uses the passed inode-relative block number as the file offset, as usual. |
495 | * | 495 | * |
496 | * get_blocks() is passed the number of i_blkbits-sized blocks which direct_io | 496 | * get_block() is passed the number of i_blkbits-sized blocks which direct_io |
497 | * has remaining to do. The fs should not map more than this number of blocks. | 497 | * has remaining to do. The fs should not map more than this number of blocks. |
498 | * | 498 | * |
499 | * If the fs has mapped a lot of blocks, it should populate bh->b_size to | 499 | * If the fs has mapped a lot of blocks, it should populate bh->b_size to |
@@ -506,7 +506,7 @@ static int dio_bio_reap(struct dio *dio) | |||
506 | * In the case of filesystem holes: the fs may return an arbitrarily-large | 506 | * In the case of filesystem holes: the fs may return an arbitrarily-large |
507 | * hole by returning an appropriate value in b_size and by clearing | 507 | * hole by returning an appropriate value in b_size and by clearing |
508 | * buffer_mapped(). However the direct-io code will only process holes one | 508 | * buffer_mapped(). However the direct-io code will only process holes one |
509 | * block at a time - it will repeatedly call get_blocks() as it walks the hole. | 509 | * block at a time - it will repeatedly call get_block() as it walks the hole. |
510 | */ | 510 | */ |
511 | static int get_more_blocks(struct dio *dio) | 511 | static int get_more_blocks(struct dio *dio) |
512 | { | 512 | { |
@@ -548,7 +548,8 @@ static int get_more_blocks(struct dio *dio) | |||
548 | * at a higher level for inside-i_size block-instantiating | 548 | * at a higher level for inside-i_size block-instantiating |
549 | * writes. | 549 | * writes. |
550 | */ | 550 | */ |
551 | ret = (*dio->get_blocks)(dio->inode, fs_startblk, fs_count, | 551 | map_bh->b_size = fs_count << dio->blkbits; |
552 | ret = (*dio->get_block)(dio->inode, fs_startblk, | ||
552 | map_bh, create); | 553 | map_bh, create); |
553 | } | 554 | } |
554 | return ret; | 555 | return ret; |
@@ -783,11 +784,11 @@ static void dio_zero_block(struct dio *dio, int end) | |||
783 | * happily perform page-sized but 512-byte aligned IOs. It is important that | 784 | * happily perform page-sized but 512-byte aligned IOs. It is important that |
784 | * blockdev IO be able to have fine alignment and large sizes. | 785 | * blockdev IO be able to have fine alignment and large sizes. |
785 | * | 786 | * |
786 | * So what we do is to permit the ->get_blocks function to populate bh.b_size | 787 | * So what we do is to permit the ->get_block function to populate bh.b_size |
787 | * with the size of IO which is permitted at this offset and this i_blkbits. | 788 | * with the size of IO which is permitted at this offset and this i_blkbits. |
788 | * | 789 | * |
789 | * For best results, the blockdev should be set up with 512-byte i_blkbits and | 790 | * For best results, the blockdev should be set up with 512-byte i_blkbits and |
790 | * it should set b_size to PAGE_SIZE or more inside get_blocks(). This gives | 791 | * it should set b_size to PAGE_SIZE or more inside get_block(). This gives |
791 | * fine alignment but still allows this function to work in PAGE_SIZE units. | 792 | * fine alignment but still allows this function to work in PAGE_SIZE units. |
792 | */ | 793 | */ |
793 | static int do_direct_IO(struct dio *dio) | 794 | static int do_direct_IO(struct dio *dio) |
@@ -947,7 +948,7 @@ out: | |||
947 | static ssize_t | 948 | static ssize_t |
948 | direct_io_worker(int rw, struct kiocb *iocb, struct inode *inode, | 949 | direct_io_worker(int rw, struct kiocb *iocb, struct inode *inode, |
949 | const struct iovec *iov, loff_t offset, unsigned long nr_segs, | 950 | const struct iovec *iov, loff_t offset, unsigned long nr_segs, |
950 | unsigned blkbits, get_blocks_t get_blocks, dio_iodone_t end_io, | 951 | unsigned blkbits, get_block_t get_block, dio_iodone_t end_io, |
951 | struct dio *dio) | 952 | struct dio *dio) |
952 | { | 953 | { |
953 | unsigned long user_addr; | 954 | unsigned long user_addr; |
@@ -969,7 +970,7 @@ direct_io_worker(int rw, struct kiocb *iocb, struct inode *inode, | |||
969 | 970 | ||
970 | dio->boundary = 0; | 971 | dio->boundary = 0; |
971 | dio->reap_counter = 0; | 972 | dio->reap_counter = 0; |
972 | dio->get_blocks = get_blocks; | 973 | dio->get_block = get_block; |
973 | dio->end_io = end_io; | 974 | dio->end_io = end_io; |
974 | dio->map_bh.b_private = NULL; | 975 | dio->map_bh.b_private = NULL; |
975 | dio->final_block_in_bio = -1; | 976 | dio->final_block_in_bio = -1; |
@@ -1177,7 +1178,7 @@ direct_io_worker(int rw, struct kiocb *iocb, struct inode *inode, | |||
1177 | ssize_t | 1178 | ssize_t |
1178 | __blockdev_direct_IO(int rw, struct kiocb *iocb, struct inode *inode, | 1179 | __blockdev_direct_IO(int rw, struct kiocb *iocb, struct inode *inode, |
1179 | struct block_device *bdev, const struct iovec *iov, loff_t offset, | 1180 | struct block_device *bdev, const struct iovec *iov, loff_t offset, |
1180 | unsigned long nr_segs, get_blocks_t get_blocks, dio_iodone_t end_io, | 1181 | unsigned long nr_segs, get_block_t get_block, dio_iodone_t end_io, |
1181 | int dio_lock_type) | 1182 | int dio_lock_type) |
1182 | { | 1183 | { |
1183 | int seg; | 1184 | int seg; |
@@ -1273,7 +1274,7 @@ __blockdev_direct_IO(int rw, struct kiocb *iocb, struct inode *inode, | |||
1273 | (end > i_size_read(inode))); | 1274 | (end > i_size_read(inode))); |
1274 | 1275 | ||
1275 | retval = direct_io_worker(rw, iocb, inode, iov, offset, | 1276 | retval = direct_io_worker(rw, iocb, inode, iov, offset, |
1276 | nr_segs, blkbits, get_blocks, end_io, dio); | 1277 | nr_segs, blkbits, get_block, end_io, dio); |
1277 | 1278 | ||
1278 | if (rw == READ && dio_lock_type == DIO_LOCKING) | 1279 | if (rw == READ && dio_lock_type == DIO_LOCKING) |
1279 | release_i_mutex = 0; | 1280 | release_i_mutex = 0; |
diff --git a/fs/dnotify.c b/fs/dnotify.c index f3b540dd5d11..f932591df5a4 100644 --- a/fs/dnotify.c +++ b/fs/dnotify.c | |||
@@ -21,9 +21,9 @@ | |||
21 | #include <linux/spinlock.h> | 21 | #include <linux/spinlock.h> |
22 | #include <linux/slab.h> | 22 | #include <linux/slab.h> |
23 | 23 | ||
24 | int dir_notify_enable = 1; | 24 | int dir_notify_enable __read_mostly = 1; |
25 | 25 | ||
26 | static kmem_cache_t *dn_cache; | 26 | static kmem_cache_t *dn_cache __read_mostly; |
27 | 27 | ||
28 | static void redo_inode_mask(struct inode *inode) | 28 | static void redo_inode_mask(struct inode *inode) |
29 | { | 29 | { |
diff --git a/fs/eventpoll.c b/fs/eventpoll.c index a0f682cdd03e..e067a06c6464 100644 --- a/fs/eventpoll.c +++ b/fs/eventpoll.c | |||
@@ -281,13 +281,13 @@ static struct mutex epmutex; | |||
281 | static struct poll_safewake psw; | 281 | static struct poll_safewake psw; |
282 | 282 | ||
283 | /* Slab cache used to allocate "struct epitem" */ | 283 | /* Slab cache used to allocate "struct epitem" */ |
284 | static kmem_cache_t *epi_cache; | 284 | static kmem_cache_t *epi_cache __read_mostly; |
285 | 285 | ||
286 | /* Slab cache used to allocate "struct eppoll_entry" */ | 286 | /* Slab cache used to allocate "struct eppoll_entry" */ |
287 | static kmem_cache_t *pwq_cache; | 287 | static kmem_cache_t *pwq_cache __read_mostly; |
288 | 288 | ||
289 | /* Virtual fs used to allocate inodes for eventpoll files */ | 289 | /* Virtual fs used to allocate inodes for eventpoll files */ |
290 | static struct vfsmount *eventpoll_mnt; | 290 | static struct vfsmount *eventpoll_mnt __read_mostly; |
291 | 291 | ||
292 | /* File callbacks that implement the eventpoll file behaviour */ | 292 | /* File callbacks that implement the eventpoll file behaviour */ |
293 | static struct file_operations eventpoll_fops = { | 293 | static struct file_operations eventpoll_fops = { |
@@ -632,7 +632,7 @@ static int de_thread(struct task_struct *tsk) | |||
632 | * synchronize with any firing (by calling del_timer_sync) | 632 | * synchronize with any firing (by calling del_timer_sync) |
633 | * before we can safely let the old group leader die. | 633 | * before we can safely let the old group leader die. |
634 | */ | 634 | */ |
635 | sig->real_timer.data = current; | 635 | sig->tsk = current; |
636 | spin_unlock_irq(lock); | 636 | spin_unlock_irq(lock); |
637 | if (hrtimer_cancel(&sig->real_timer)) | 637 | if (hrtimer_cancel(&sig->real_timer)) |
638 | hrtimer_restart(&sig->real_timer); | 638 | hrtimer_restart(&sig->real_timer); |
diff --git a/fs/ext2/inode.c b/fs/ext2/inode.c index a717837f272e..04af9c45dce2 100644 --- a/fs/ext2/inode.c +++ b/fs/ext2/inode.c | |||
@@ -667,18 +667,6 @@ static sector_t ext2_bmap(struct address_space *mapping, sector_t block) | |||
667 | return generic_block_bmap(mapping,block,ext2_get_block); | 667 | return generic_block_bmap(mapping,block,ext2_get_block); |
668 | } | 668 | } |
669 | 669 | ||
670 | static int | ||
671 | ext2_get_blocks(struct inode *inode, sector_t iblock, unsigned long max_blocks, | ||
672 | struct buffer_head *bh_result, int create) | ||
673 | { | ||
674 | int ret; | ||
675 | |||
676 | ret = ext2_get_block(inode, iblock, bh_result, create); | ||
677 | if (ret == 0) | ||
678 | bh_result->b_size = (1 << inode->i_blkbits); | ||
679 | return ret; | ||
680 | } | ||
681 | |||
682 | static ssize_t | 670 | static ssize_t |
683 | ext2_direct_IO(int rw, struct kiocb *iocb, const struct iovec *iov, | 671 | ext2_direct_IO(int rw, struct kiocb *iocb, const struct iovec *iov, |
684 | loff_t offset, unsigned long nr_segs) | 672 | loff_t offset, unsigned long nr_segs) |
@@ -687,7 +675,7 @@ ext2_direct_IO(int rw, struct kiocb *iocb, const struct iovec *iov, | |||
687 | struct inode *inode = file->f_mapping->host; | 675 | struct inode *inode = file->f_mapping->host; |
688 | 676 | ||
689 | return blockdev_direct_IO(rw, iocb, inode, inode->i_sb->s_bdev, iov, | 677 | return blockdev_direct_IO(rw, iocb, inode, inode->i_sb->s_bdev, iov, |
690 | offset, nr_segs, ext2_get_blocks, NULL); | 678 | offset, nr_segs, ext2_get_block, NULL); |
691 | } | 679 | } |
692 | 680 | ||
693 | static int | 681 | static int |
diff --git a/fs/ext3/balloc.c b/fs/ext3/balloc.c index 46623f77666b..77927d6938f6 100644 --- a/fs/ext3/balloc.c +++ b/fs/ext3/balloc.c | |||
@@ -653,9 +653,11 @@ claim_block(spinlock_t *lock, int block, struct buffer_head *bh) | |||
653 | */ | 653 | */ |
654 | static int | 654 | static int |
655 | ext3_try_to_allocate(struct super_block *sb, handle_t *handle, int group, | 655 | ext3_try_to_allocate(struct super_block *sb, handle_t *handle, int group, |
656 | struct buffer_head *bitmap_bh, int goal, struct ext3_reserve_window *my_rsv) | 656 | struct buffer_head *bitmap_bh, int goal, |
657 | unsigned long *count, struct ext3_reserve_window *my_rsv) | ||
657 | { | 658 | { |
658 | int group_first_block, start, end; | 659 | int group_first_block, start, end; |
660 | unsigned long num = 0; | ||
659 | 661 | ||
660 | /* we do allocation within the reservation window if we have a window */ | 662 | /* we do allocation within the reservation window if we have a window */ |
661 | if (my_rsv) { | 663 | if (my_rsv) { |
@@ -713,8 +715,18 @@ repeat: | |||
713 | goto fail_access; | 715 | goto fail_access; |
714 | goto repeat; | 716 | goto repeat; |
715 | } | 717 | } |
716 | return goal; | 718 | num++; |
719 | goal++; | ||
720 | while (num < *count && goal < end | ||
721 | && ext3_test_allocatable(goal, bitmap_bh) | ||
722 | && claim_block(sb_bgl_lock(EXT3_SB(sb), group), goal, bitmap_bh)) { | ||
723 | num++; | ||
724 | goal++; | ||
725 | } | ||
726 | *count = num; | ||
727 | return goal - num; | ||
717 | fail_access: | 728 | fail_access: |
729 | *count = num; | ||
718 | return -1; | 730 | return -1; |
719 | } | 731 | } |
720 | 732 | ||
@@ -999,6 +1011,31 @@ retry: | |||
999 | goto retry; | 1011 | goto retry; |
1000 | } | 1012 | } |
1001 | 1013 | ||
1014 | static void try_to_extend_reservation(struct ext3_reserve_window_node *my_rsv, | ||
1015 | struct super_block *sb, int size) | ||
1016 | { | ||
1017 | struct ext3_reserve_window_node *next_rsv; | ||
1018 | struct rb_node *next; | ||
1019 | spinlock_t *rsv_lock = &EXT3_SB(sb)->s_rsv_window_lock; | ||
1020 | |||
1021 | if (!spin_trylock(rsv_lock)) | ||
1022 | return; | ||
1023 | |||
1024 | next = rb_next(&my_rsv->rsv_node); | ||
1025 | |||
1026 | if (!next) | ||
1027 | my_rsv->rsv_end += size; | ||
1028 | else { | ||
1029 | next_rsv = list_entry(next, struct ext3_reserve_window_node, rsv_node); | ||
1030 | |||
1031 | if ((next_rsv->rsv_start - my_rsv->rsv_end - 1) >= size) | ||
1032 | my_rsv->rsv_end += size; | ||
1033 | else | ||
1034 | my_rsv->rsv_end = next_rsv->rsv_start - 1; | ||
1035 | } | ||
1036 | spin_unlock(rsv_lock); | ||
1037 | } | ||
1038 | |||
1002 | /* | 1039 | /* |
1003 | * This is the main function used to allocate a new block and its reservation | 1040 | * This is the main function used to allocate a new block and its reservation |
1004 | * window. | 1041 | * window. |
@@ -1024,11 +1061,12 @@ static int | |||
1024 | ext3_try_to_allocate_with_rsv(struct super_block *sb, handle_t *handle, | 1061 | ext3_try_to_allocate_with_rsv(struct super_block *sb, handle_t *handle, |
1025 | unsigned int group, struct buffer_head *bitmap_bh, | 1062 | unsigned int group, struct buffer_head *bitmap_bh, |
1026 | int goal, struct ext3_reserve_window_node * my_rsv, | 1063 | int goal, struct ext3_reserve_window_node * my_rsv, |
1027 | int *errp) | 1064 | unsigned long *count, int *errp) |
1028 | { | 1065 | { |
1029 | unsigned long group_first_block; | 1066 | unsigned long group_first_block; |
1030 | int ret = 0; | 1067 | int ret = 0; |
1031 | int fatal; | 1068 | int fatal; |
1069 | unsigned long num = *count; | ||
1032 | 1070 | ||
1033 | *errp = 0; | 1071 | *errp = 0; |
1034 | 1072 | ||
@@ -1051,7 +1089,8 @@ ext3_try_to_allocate_with_rsv(struct super_block *sb, handle_t *handle, | |||
1051 | * or last attempt to allocate a block with reservation turned on failed | 1089 | * or last attempt to allocate a block with reservation turned on failed |
1052 | */ | 1090 | */ |
1053 | if (my_rsv == NULL ) { | 1091 | if (my_rsv == NULL ) { |
1054 | ret = ext3_try_to_allocate(sb, handle, group, bitmap_bh, goal, NULL); | 1092 | ret = ext3_try_to_allocate(sb, handle, group, bitmap_bh, |
1093 | goal, count, NULL); | ||
1055 | goto out; | 1094 | goto out; |
1056 | } | 1095 | } |
1057 | /* | 1096 | /* |
@@ -1081,6 +1120,8 @@ ext3_try_to_allocate_with_rsv(struct super_block *sb, handle_t *handle, | |||
1081 | while (1) { | 1120 | while (1) { |
1082 | if (rsv_is_empty(&my_rsv->rsv_window) || (ret < 0) || | 1121 | if (rsv_is_empty(&my_rsv->rsv_window) || (ret < 0) || |
1083 | !goal_in_my_reservation(&my_rsv->rsv_window, goal, group, sb)) { | 1122 | !goal_in_my_reservation(&my_rsv->rsv_window, goal, group, sb)) { |
1123 | if (my_rsv->rsv_goal_size < *count) | ||
1124 | my_rsv->rsv_goal_size = *count; | ||
1084 | ret = alloc_new_reservation(my_rsv, goal, sb, | 1125 | ret = alloc_new_reservation(my_rsv, goal, sb, |
1085 | group, bitmap_bh); | 1126 | group, bitmap_bh); |
1086 | if (ret < 0) | 1127 | if (ret < 0) |
@@ -1088,16 +1129,21 @@ ext3_try_to_allocate_with_rsv(struct super_block *sb, handle_t *handle, | |||
1088 | 1129 | ||
1089 | if (!goal_in_my_reservation(&my_rsv->rsv_window, goal, group, sb)) | 1130 | if (!goal_in_my_reservation(&my_rsv->rsv_window, goal, group, sb)) |
1090 | goal = -1; | 1131 | goal = -1; |
1091 | } | 1132 | } else if (goal > 0 && (my_rsv->rsv_end-goal+1) < *count) |
1133 | try_to_extend_reservation(my_rsv, sb, | ||
1134 | *count-my_rsv->rsv_end + goal - 1); | ||
1135 | |||
1092 | if ((my_rsv->rsv_start >= group_first_block + EXT3_BLOCKS_PER_GROUP(sb)) | 1136 | if ((my_rsv->rsv_start >= group_first_block + EXT3_BLOCKS_PER_GROUP(sb)) |
1093 | || (my_rsv->rsv_end < group_first_block)) | 1137 | || (my_rsv->rsv_end < group_first_block)) |
1094 | BUG(); | 1138 | BUG(); |
1095 | ret = ext3_try_to_allocate(sb, handle, group, bitmap_bh, goal, | 1139 | ret = ext3_try_to_allocate(sb, handle, group, bitmap_bh, goal, |
1096 | &my_rsv->rsv_window); | 1140 | &num, &my_rsv->rsv_window); |
1097 | if (ret >= 0) { | 1141 | if (ret >= 0) { |
1098 | my_rsv->rsv_alloc_hit++; | 1142 | my_rsv->rsv_alloc_hit += num; |
1143 | *count = num; | ||
1099 | break; /* succeed */ | 1144 | break; /* succeed */ |
1100 | } | 1145 | } |
1146 | num = *count; | ||
1101 | } | 1147 | } |
1102 | out: | 1148 | out: |
1103 | if (ret >= 0) { | 1149 | if (ret >= 0) { |
@@ -1154,8 +1200,8 @@ int ext3_should_retry_alloc(struct super_block *sb, int *retries) | |||
1154 | * bitmap, and then for any free bit if that fails. | 1200 | * bitmap, and then for any free bit if that fails. |
1155 | * This function also updates quota and i_blocks field. | 1201 | * This function also updates quota and i_blocks field. |
1156 | */ | 1202 | */ |
1157 | int ext3_new_block(handle_t *handle, struct inode *inode, | 1203 | int ext3_new_blocks(handle_t *handle, struct inode *inode, |
1158 | unsigned long goal, int *errp) | 1204 | unsigned long goal, unsigned long *count, int *errp) |
1159 | { | 1205 | { |
1160 | struct buffer_head *bitmap_bh = NULL; | 1206 | struct buffer_head *bitmap_bh = NULL; |
1161 | struct buffer_head *gdp_bh; | 1207 | struct buffer_head *gdp_bh; |
@@ -1178,6 +1224,7 @@ int ext3_new_block(handle_t *handle, struct inode *inode, | |||
1178 | static int goal_hits, goal_attempts; | 1224 | static int goal_hits, goal_attempts; |
1179 | #endif | 1225 | #endif |
1180 | unsigned long ngroups; | 1226 | unsigned long ngroups; |
1227 | unsigned long num = *count; | ||
1181 | 1228 | ||
1182 | *errp = -ENOSPC; | 1229 | *errp = -ENOSPC; |
1183 | sb = inode->i_sb; | 1230 | sb = inode->i_sb; |
@@ -1189,7 +1236,7 @@ int ext3_new_block(handle_t *handle, struct inode *inode, | |||
1189 | /* | 1236 | /* |
1190 | * Check quota for allocation of this block. | 1237 | * Check quota for allocation of this block. |
1191 | */ | 1238 | */ |
1192 | if (DQUOT_ALLOC_BLOCK(inode, 1)) { | 1239 | if (DQUOT_ALLOC_BLOCK(inode, num)) { |
1193 | *errp = -EDQUOT; | 1240 | *errp = -EDQUOT; |
1194 | return 0; | 1241 | return 0; |
1195 | } | 1242 | } |
@@ -1244,7 +1291,7 @@ retry: | |||
1244 | if (!bitmap_bh) | 1291 | if (!bitmap_bh) |
1245 | goto io_error; | 1292 | goto io_error; |
1246 | ret_block = ext3_try_to_allocate_with_rsv(sb, handle, group_no, | 1293 | ret_block = ext3_try_to_allocate_with_rsv(sb, handle, group_no, |
1247 | bitmap_bh, ret_block, my_rsv, &fatal); | 1294 | bitmap_bh, ret_block, my_rsv, &num, &fatal); |
1248 | if (fatal) | 1295 | if (fatal) |
1249 | goto out; | 1296 | goto out; |
1250 | if (ret_block >= 0) | 1297 | if (ret_block >= 0) |
@@ -1281,7 +1328,7 @@ retry: | |||
1281 | if (!bitmap_bh) | 1328 | if (!bitmap_bh) |
1282 | goto io_error; | 1329 | goto io_error; |
1283 | ret_block = ext3_try_to_allocate_with_rsv(sb, handle, group_no, | 1330 | ret_block = ext3_try_to_allocate_with_rsv(sb, handle, group_no, |
1284 | bitmap_bh, -1, my_rsv, &fatal); | 1331 | bitmap_bh, -1, my_rsv, &num, &fatal); |
1285 | if (fatal) | 1332 | if (fatal) |
1286 | goto out; | 1333 | goto out; |
1287 | if (ret_block >= 0) | 1334 | if (ret_block >= 0) |
@@ -1316,13 +1363,15 @@ allocated: | |||
1316 | target_block = ret_block + group_no * EXT3_BLOCKS_PER_GROUP(sb) | 1363 | target_block = ret_block + group_no * EXT3_BLOCKS_PER_GROUP(sb) |
1317 | + le32_to_cpu(es->s_first_data_block); | 1364 | + le32_to_cpu(es->s_first_data_block); |
1318 | 1365 | ||
1319 | if (target_block == le32_to_cpu(gdp->bg_block_bitmap) || | 1366 | if (in_range(le32_to_cpu(gdp->bg_block_bitmap), target_block, num) || |
1320 | target_block == le32_to_cpu(gdp->bg_inode_bitmap) || | 1367 | in_range(le32_to_cpu(gdp->bg_inode_bitmap), target_block, num) || |
1321 | in_range(target_block, le32_to_cpu(gdp->bg_inode_table), | 1368 | in_range(target_block, le32_to_cpu(gdp->bg_inode_table), |
1369 | EXT3_SB(sb)->s_itb_per_group) || | ||
1370 | in_range(target_block + num - 1, le32_to_cpu(gdp->bg_inode_table), | ||
1322 | EXT3_SB(sb)->s_itb_per_group)) | 1371 | EXT3_SB(sb)->s_itb_per_group)) |
1323 | ext3_error(sb, "ext3_new_block", | 1372 | ext3_error(sb, "ext3_new_block", |
1324 | "Allocating block in system zone - " | 1373 | "Allocating block in system zone - " |
1325 | "block = %u", target_block); | 1374 | "blocks from %u, length %lu", target_block, num); |
1326 | 1375 | ||
1327 | performed_allocation = 1; | 1376 | performed_allocation = 1; |
1328 | 1377 | ||
@@ -1341,10 +1390,14 @@ allocated: | |||
1341 | jbd_lock_bh_state(bitmap_bh); | 1390 | jbd_lock_bh_state(bitmap_bh); |
1342 | spin_lock(sb_bgl_lock(sbi, group_no)); | 1391 | spin_lock(sb_bgl_lock(sbi, group_no)); |
1343 | if (buffer_jbd(bitmap_bh) && bh2jh(bitmap_bh)->b_committed_data) { | 1392 | if (buffer_jbd(bitmap_bh) && bh2jh(bitmap_bh)->b_committed_data) { |
1344 | if (ext3_test_bit(ret_block, | 1393 | int i; |
1345 | bh2jh(bitmap_bh)->b_committed_data)) { | 1394 | |
1346 | printk("%s: block was unexpectedly set in " | 1395 | for (i = 0; i < num; i++) { |
1347 | "b_committed_data\n", __FUNCTION__); | 1396 | if (ext3_test_bit(ret_block, |
1397 | bh2jh(bitmap_bh)->b_committed_data)) { | ||
1398 | printk("%s: block was unexpectedly set in " | ||
1399 | "b_committed_data\n", __FUNCTION__); | ||
1400 | } | ||
1348 | } | 1401 | } |
1349 | } | 1402 | } |
1350 | ext3_debug("found bit %d\n", ret_block); | 1403 | ext3_debug("found bit %d\n", ret_block); |
@@ -1355,7 +1408,7 @@ allocated: | |||
1355 | /* ret_block was blockgroup-relative. Now it becomes fs-relative */ | 1408 | /* ret_block was blockgroup-relative. Now it becomes fs-relative */ |
1356 | ret_block = target_block; | 1409 | ret_block = target_block; |
1357 | 1410 | ||
1358 | if (ret_block >= le32_to_cpu(es->s_blocks_count)) { | 1411 | if (ret_block + num - 1 >= le32_to_cpu(es->s_blocks_count)) { |
1359 | ext3_error(sb, "ext3_new_block", | 1412 | ext3_error(sb, "ext3_new_block", |
1360 | "block(%d) >= blocks count(%d) - " | 1413 | "block(%d) >= blocks count(%d) - " |
1361 | "block_group = %d, es == %p ", ret_block, | 1414 | "block_group = %d, es == %p ", ret_block, |
@@ -1373,9 +1426,9 @@ allocated: | |||
1373 | 1426 | ||
1374 | spin_lock(sb_bgl_lock(sbi, group_no)); | 1427 | spin_lock(sb_bgl_lock(sbi, group_no)); |
1375 | gdp->bg_free_blocks_count = | 1428 | gdp->bg_free_blocks_count = |
1376 | cpu_to_le16(le16_to_cpu(gdp->bg_free_blocks_count) - 1); | 1429 | cpu_to_le16(le16_to_cpu(gdp->bg_free_blocks_count) - num); |
1377 | spin_unlock(sb_bgl_lock(sbi, group_no)); | 1430 | spin_unlock(sb_bgl_lock(sbi, group_no)); |
1378 | percpu_counter_mod(&sbi->s_freeblocks_counter, -1); | 1431 | percpu_counter_mod(&sbi->s_freeblocks_counter, -num); |
1379 | 1432 | ||
1380 | BUFFER_TRACE(gdp_bh, "journal_dirty_metadata for group descriptor"); | 1433 | BUFFER_TRACE(gdp_bh, "journal_dirty_metadata for group descriptor"); |
1381 | err = ext3_journal_dirty_metadata(handle, gdp_bh); | 1434 | err = ext3_journal_dirty_metadata(handle, gdp_bh); |
@@ -1388,6 +1441,8 @@ allocated: | |||
1388 | 1441 | ||
1389 | *errp = 0; | 1442 | *errp = 0; |
1390 | brelse(bitmap_bh); | 1443 | brelse(bitmap_bh); |
1444 | DQUOT_FREE_BLOCK(inode, *count-num); | ||
1445 | *count = num; | ||
1391 | return ret_block; | 1446 | return ret_block; |
1392 | 1447 | ||
1393 | io_error: | 1448 | io_error: |
@@ -1401,11 +1456,19 @@ out: | |||
1401 | * Undo the block allocation | 1456 | * Undo the block allocation |
1402 | */ | 1457 | */ |
1403 | if (!performed_allocation) | 1458 | if (!performed_allocation) |
1404 | DQUOT_FREE_BLOCK(inode, 1); | 1459 | DQUOT_FREE_BLOCK(inode, *count); |
1405 | brelse(bitmap_bh); | 1460 | brelse(bitmap_bh); |
1406 | return 0; | 1461 | return 0; |
1407 | } | 1462 | } |
1408 | 1463 | ||
1464 | int ext3_new_block(handle_t *handle, struct inode *inode, | ||
1465 | unsigned long goal, int *errp) | ||
1466 | { | ||
1467 | unsigned long count = 1; | ||
1468 | |||
1469 | return ext3_new_blocks(handle, inode, goal, &count, errp); | ||
1470 | } | ||
1471 | |||
1409 | unsigned long ext3_count_free_blocks(struct super_block *sb) | 1472 | unsigned long ext3_count_free_blocks(struct super_block *sb) |
1410 | { | 1473 | { |
1411 | unsigned long desc_count; | 1474 | unsigned long desc_count; |
diff --git a/fs/ext3/dir.c b/fs/ext3/dir.c index 773459164bb2..38bd3f6ec147 100644 --- a/fs/ext3/dir.c +++ b/fs/ext3/dir.c | |||
@@ -131,8 +131,9 @@ static int ext3_readdir(struct file * filp, | |||
131 | struct buffer_head *bh = NULL; | 131 | struct buffer_head *bh = NULL; |
132 | 132 | ||
133 | map_bh.b_state = 0; | 133 | map_bh.b_state = 0; |
134 | err = ext3_get_block_handle(NULL, inode, blk, &map_bh, 0, 0); | 134 | err = ext3_get_blocks_handle(NULL, inode, blk, 1, |
135 | if (!err) { | 135 | &map_bh, 0, 0); |
136 | if (err > 0) { | ||
136 | page_cache_readahead(sb->s_bdev->bd_inode->i_mapping, | 137 | page_cache_readahead(sb->s_bdev->bd_inode->i_mapping, |
137 | &filp->f_ra, | 138 | &filp->f_ra, |
138 | filp, | 139 | filp, |
diff --git a/fs/ext3/inode.c b/fs/ext3/inode.c index 2c361377e0a5..48ae0339af17 100644 --- a/fs/ext3/inode.c +++ b/fs/ext3/inode.c | |||
@@ -44,16 +44,16 @@ static int ext3_writepage_trans_blocks(struct inode *inode); | |||
44 | /* | 44 | /* |
45 | * Test whether an inode is a fast symlink. | 45 | * Test whether an inode is a fast symlink. |
46 | */ | 46 | */ |
47 | static inline int ext3_inode_is_fast_symlink(struct inode *inode) | 47 | static int ext3_inode_is_fast_symlink(struct inode *inode) |
48 | { | 48 | { |
49 | int ea_blocks = EXT3_I(inode)->i_file_acl ? | 49 | int ea_blocks = EXT3_I(inode)->i_file_acl ? |
50 | (inode->i_sb->s_blocksize >> 9) : 0; | 50 | (inode->i_sb->s_blocksize >> 9) : 0; |
51 | 51 | ||
52 | return (S_ISLNK(inode->i_mode) && | 52 | return (S_ISLNK(inode->i_mode) && inode->i_blocks - ea_blocks == 0); |
53 | inode->i_blocks - ea_blocks == 0); | ||
54 | } | 53 | } |
55 | 54 | ||
56 | /* The ext3 forget function must perform a revoke if we are freeing data | 55 | /* |
56 | * The ext3 forget function must perform a revoke if we are freeing data | ||
57 | * which has been journaled. Metadata (eg. indirect blocks) must be | 57 | * which has been journaled. Metadata (eg. indirect blocks) must be |
58 | * revoked in all cases. | 58 | * revoked in all cases. |
59 | * | 59 | * |
@@ -61,10 +61,8 @@ static inline int ext3_inode_is_fast_symlink(struct inode *inode) | |||
61 | * but there may still be a record of it in the journal, and that record | 61 | * but there may still be a record of it in the journal, and that record |
62 | * still needs to be revoked. | 62 | * still needs to be revoked. |
63 | */ | 63 | */ |
64 | 64 | int ext3_forget(handle_t *handle, int is_metadata, struct inode *inode, | |
65 | int ext3_forget(handle_t *handle, int is_metadata, | 65 | struct buffer_head *bh, int blocknr) |
66 | struct inode *inode, struct buffer_head *bh, | ||
67 | int blocknr) | ||
68 | { | 66 | { |
69 | int err; | 67 | int err; |
70 | 68 | ||
@@ -104,10 +102,9 @@ int ext3_forget(handle_t *handle, int is_metadata, | |||
104 | } | 102 | } |
105 | 103 | ||
106 | /* | 104 | /* |
107 | * Work out how many blocks we need to progress with the next chunk of a | 105 | * Work out how many blocks we need to proceed with the next chunk of a |
108 | * truncate transaction. | 106 | * truncate transaction. |
109 | */ | 107 | */ |
110 | |||
111 | static unsigned long blocks_for_truncate(struct inode *inode) | 108 | static unsigned long blocks_for_truncate(struct inode *inode) |
112 | { | 109 | { |
113 | unsigned long needed; | 110 | unsigned long needed; |
@@ -141,7 +138,6 @@ static unsigned long blocks_for_truncate(struct inode *inode) | |||
141 | * extend fails, we need to propagate the failure up and restart the | 138 | * extend fails, we need to propagate the failure up and restart the |
142 | * transaction in the top-level truncate loop. --sct | 139 | * transaction in the top-level truncate loop. --sct |
143 | */ | 140 | */ |
144 | |||
145 | static handle_t *start_transaction(struct inode *inode) | 141 | static handle_t *start_transaction(struct inode *inode) |
146 | { | 142 | { |
147 | handle_t *result; | 143 | handle_t *result; |
@@ -194,9 +190,11 @@ void ext3_delete_inode (struct inode * inode) | |||
194 | 190 | ||
195 | handle = start_transaction(inode); | 191 | handle = start_transaction(inode); |
196 | if (IS_ERR(handle)) { | 192 | if (IS_ERR(handle)) { |
197 | /* If we're going to skip the normal cleanup, we still | 193 | /* |
198 | * need to make sure that the in-core orphan linked list | 194 | * If we're going to skip the normal cleanup, we still need to |
199 | * is properly cleaned up. */ | 195 | * make sure that the in-core orphan linked list is properly |
196 | * cleaned up. | ||
197 | */ | ||
200 | ext3_orphan_del(NULL, inode); | 198 | ext3_orphan_del(NULL, inode); |
201 | goto no_delete; | 199 | goto no_delete; |
202 | } | 200 | } |
@@ -235,16 +233,6 @@ no_delete: | |||
235 | clear_inode(inode); /* We must guarantee clearing of inode... */ | 233 | clear_inode(inode); /* We must guarantee clearing of inode... */ |
236 | } | 234 | } |
237 | 235 | ||
238 | static int ext3_alloc_block (handle_t *handle, | ||
239 | struct inode * inode, unsigned long goal, int *err) | ||
240 | { | ||
241 | unsigned long result; | ||
242 | |||
243 | result = ext3_new_block(handle, inode, goal, err); | ||
244 | return result; | ||
245 | } | ||
246 | |||
247 | |||
248 | typedef struct { | 236 | typedef struct { |
249 | __le32 *p; | 237 | __le32 *p; |
250 | __le32 key; | 238 | __le32 key; |
@@ -257,7 +245,7 @@ static inline void add_chain(Indirect *p, struct buffer_head *bh, __le32 *v) | |||
257 | p->bh = bh; | 245 | p->bh = bh; |
258 | } | 246 | } |
259 | 247 | ||
260 | static inline int verify_chain(Indirect *from, Indirect *to) | 248 | static int verify_chain(Indirect *from, Indirect *to) |
261 | { | 249 | { |
262 | while (from <= to && from->key == *from->p) | 250 | while (from <= to && from->key == *from->p) |
263 | from++; | 251 | from++; |
@@ -327,10 +315,10 @@ static int ext3_block_to_path(struct inode *inode, | |||
327 | offsets[n++] = i_block & (ptrs - 1); | 315 | offsets[n++] = i_block & (ptrs - 1); |
328 | final = ptrs; | 316 | final = ptrs; |
329 | } else { | 317 | } else { |
330 | ext3_warning (inode->i_sb, "ext3_block_to_path", "block > big"); | 318 | ext3_warning(inode->i_sb, "ext3_block_to_path", "block > big"); |
331 | } | 319 | } |
332 | if (boundary) | 320 | if (boundary) |
333 | *boundary = (i_block & (ptrs - 1)) == (final - 1); | 321 | *boundary = final - 1 - (i_block & (ptrs - 1)); |
334 | return n; | 322 | return n; |
335 | } | 323 | } |
336 | 324 | ||
@@ -419,7 +407,6 @@ no_block: | |||
419 | * | 407 | * |
420 | * Caller must make sure that @ind is valid and will stay that way. | 408 | * Caller must make sure that @ind is valid and will stay that way. |
421 | */ | 409 | */ |
422 | |||
423 | static unsigned long ext3_find_near(struct inode *inode, Indirect *ind) | 410 | static unsigned long ext3_find_near(struct inode *inode, Indirect *ind) |
424 | { | 411 | { |
425 | struct ext3_inode_info *ei = EXT3_I(inode); | 412 | struct ext3_inode_info *ei = EXT3_I(inode); |
@@ -429,17 +416,18 @@ static unsigned long ext3_find_near(struct inode *inode, Indirect *ind) | |||
429 | unsigned long colour; | 416 | unsigned long colour; |
430 | 417 | ||
431 | /* Try to find previous block */ | 418 | /* Try to find previous block */ |
432 | for (p = ind->p - 1; p >= start; p--) | 419 | for (p = ind->p - 1; p >= start; p--) { |
433 | if (*p) | 420 | if (*p) |
434 | return le32_to_cpu(*p); | 421 | return le32_to_cpu(*p); |
422 | } | ||
435 | 423 | ||
436 | /* No such thing, so let's try location of indirect block */ | 424 | /* No such thing, so let's try location of indirect block */ |
437 | if (ind->bh) | 425 | if (ind->bh) |
438 | return ind->bh->b_blocknr; | 426 | return ind->bh->b_blocknr; |
439 | 427 | ||
440 | /* | 428 | /* |
441 | * It is going to be refered from inode itself? OK, just put it into | 429 | * It is going to be referred to from the inode itself? OK, just put it |
442 | * the same cylinder group then. | 430 | * into the same cylinder group then. |
443 | */ | 431 | */ |
444 | bg_start = (ei->i_block_group * EXT3_BLOCKS_PER_GROUP(inode->i_sb)) + | 432 | bg_start = (ei->i_block_group * EXT3_BLOCKS_PER_GROUP(inode->i_sb)) + |
445 | le32_to_cpu(EXT3_SB(inode->i_sb)->s_es->s_first_data_block); | 433 | le32_to_cpu(EXT3_SB(inode->i_sb)->s_es->s_first_data_block); |
@@ -463,7 +451,9 @@ static unsigned long ext3_find_near(struct inode *inode, Indirect *ind) | |||
463 | static unsigned long ext3_find_goal(struct inode *inode, long block, | 451 | static unsigned long ext3_find_goal(struct inode *inode, long block, |
464 | Indirect chain[4], Indirect *partial) | 452 | Indirect chain[4], Indirect *partial) |
465 | { | 453 | { |
466 | struct ext3_block_alloc_info *block_i = EXT3_I(inode)->i_block_alloc_info; | 454 | struct ext3_block_alloc_info *block_i; |
455 | |||
456 | block_i = EXT3_I(inode)->i_block_alloc_info; | ||
467 | 457 | ||
468 | /* | 458 | /* |
469 | * try the heuristic for sequential allocation, | 459 | * try the heuristic for sequential allocation, |
@@ -478,13 +468,113 @@ static unsigned long ext3_find_goal(struct inode *inode, long block, | |||
478 | } | 468 | } |
479 | 469 | ||
480 | /** | 470 | /** |
471 | * ext3_blks_to_allocate: Look up the block map and count the number | ||
472 | * of direct blocks need to be allocated for the given branch. | ||
473 | * | ||
474 | * @branch: chain of indirect blocks | ||
475 | * @k: number of blocks need for indirect blocks | ||
476 | * @blks: number of data blocks to be mapped. | ||
477 | * @blocks_to_boundary: the offset in the indirect block | ||
478 | * | ||
479 | * return the total number of blocks to be allocate, including the | ||
480 | * direct and indirect blocks. | ||
481 | */ | ||
482 | static int ext3_blks_to_allocate(Indirect *branch, int k, unsigned long blks, | ||
483 | int blocks_to_boundary) | ||
484 | { | ||
485 | unsigned long count = 0; | ||
486 | |||
487 | /* | ||
488 | * Simple case, [t,d]Indirect block(s) has not allocated yet | ||
489 | * then it's clear blocks on that path have not allocated | ||
490 | */ | ||
491 | if (k > 0) { | ||
492 | /* right now we don't handle cross boundary allocation */ | ||
493 | if (blks < blocks_to_boundary + 1) | ||
494 | count += blks; | ||
495 | else | ||
496 | count += blocks_to_boundary + 1; | ||
497 | return count; | ||
498 | } | ||
499 | |||
500 | count++; | ||
501 | while (count < blks && count <= blocks_to_boundary && | ||
502 | le32_to_cpu(*(branch[0].p + count)) == 0) { | ||
503 | count++; | ||
504 | } | ||
505 | return count; | ||
506 | } | ||
507 | |||
508 | /** | ||
509 | * ext3_alloc_blocks: multiple allocate blocks needed for a branch | ||
510 | * @indirect_blks: the number of blocks need to allocate for indirect | ||
511 | * blocks | ||
512 | * | ||
513 | * @new_blocks: on return it will store the new block numbers for | ||
514 | * the indirect blocks(if needed) and the first direct block, | ||
515 | * @blks: on return it will store the total number of allocated | ||
516 | * direct blocks | ||
517 | */ | ||
518 | static int ext3_alloc_blocks(handle_t *handle, struct inode *inode, | ||
519 | unsigned long goal, int indirect_blks, int blks, | ||
520 | unsigned long long new_blocks[4], int *err) | ||
521 | { | ||
522 | int target, i; | ||
523 | unsigned long count = 0; | ||
524 | int index = 0; | ||
525 | unsigned long current_block = 0; | ||
526 | int ret = 0; | ||
527 | |||
528 | /* | ||
529 | * Here we try to allocate the requested multiple blocks at once, | ||
530 | * on a best-effort basis. | ||
531 | * To build a branch, we should allocate blocks for | ||
532 | * the indirect blocks(if not allocated yet), and at least | ||
533 | * the first direct block of this branch. That's the | ||
534 | * minimum number of blocks need to allocate(required) | ||
535 | */ | ||
536 | target = blks + indirect_blks; | ||
537 | |||
538 | while (1) { | ||
539 | count = target; | ||
540 | /* allocating blocks for indirect blocks and direct blocks */ | ||
541 | current_block = ext3_new_blocks(handle,inode,goal,&count,err); | ||
542 | if (*err) | ||
543 | goto failed_out; | ||
544 | |||
545 | target -= count; | ||
546 | /* allocate blocks for indirect blocks */ | ||
547 | while (index < indirect_blks && count) { | ||
548 | new_blocks[index++] = current_block++; | ||
549 | count--; | ||
550 | } | ||
551 | |||
552 | if (count > 0) | ||
553 | break; | ||
554 | } | ||
555 | |||
556 | /* save the new block number for the first direct block */ | ||
557 | new_blocks[index] = current_block; | ||
558 | |||
559 | /* total number of blocks allocated for direct blocks */ | ||
560 | ret = count; | ||
561 | *err = 0; | ||
562 | return ret; | ||
563 | failed_out: | ||
564 | for (i = 0; i <index; i++) | ||
565 | ext3_free_blocks(handle, inode, new_blocks[i], 1); | ||
566 | return ret; | ||
567 | } | ||
568 | |||
569 | /** | ||
481 | * ext3_alloc_branch - allocate and set up a chain of blocks. | 570 | * ext3_alloc_branch - allocate and set up a chain of blocks. |
482 | * @inode: owner | 571 | * @inode: owner |
483 | * @num: depth of the chain (number of blocks to allocate) | 572 | * @indirect_blks: number of allocated indirect blocks |
573 | * @blks: number of allocated direct blocks | ||
484 | * @offsets: offsets (in the blocks) to store the pointers to next. | 574 | * @offsets: offsets (in the blocks) to store the pointers to next. |
485 | * @branch: place to store the chain in. | 575 | * @branch: place to store the chain in. |
486 | * | 576 | * |
487 | * This function allocates @num blocks, zeroes out all but the last one, | 577 | * This function allocates blocks, zeroes out all but the last one, |
488 | * links them into chain and (if we are synchronous) writes them to disk. | 578 | * links them into chain and (if we are synchronous) writes them to disk. |
489 | * In other words, it prepares a branch that can be spliced onto the | 579 | * In other words, it prepares a branch that can be spliced onto the |
490 | * inode. It stores the information about that chain in the branch[], in | 580 | * inode. It stores the information about that chain in the branch[], in |
@@ -501,97 +591,106 @@ static unsigned long ext3_find_goal(struct inode *inode, long block, | |||
501 | * ext3_alloc_block() (normally -ENOSPC). Otherwise we set the chain | 591 | * ext3_alloc_block() (normally -ENOSPC). Otherwise we set the chain |
502 | * as described above and return 0. | 592 | * as described above and return 0. |
503 | */ | 593 | */ |
504 | |||
505 | static int ext3_alloc_branch(handle_t *handle, struct inode *inode, | 594 | static int ext3_alloc_branch(handle_t *handle, struct inode *inode, |
506 | int num, | 595 | int indirect_blks, int *blks, unsigned long goal, |
507 | unsigned long goal, | 596 | int *offsets, Indirect *branch) |
508 | int *offsets, | ||
509 | Indirect *branch) | ||
510 | { | 597 | { |
511 | int blocksize = inode->i_sb->s_blocksize; | 598 | int blocksize = inode->i_sb->s_blocksize; |
512 | int n = 0, keys = 0; | 599 | int i, n = 0; |
513 | int err = 0; | 600 | int err = 0; |
514 | int i; | 601 | struct buffer_head *bh; |
515 | int parent = ext3_alloc_block(handle, inode, goal, &err); | 602 | int num; |
516 | 603 | unsigned long long new_blocks[4]; | |
517 | branch[0].key = cpu_to_le32(parent); | 604 | unsigned long long current_block; |
518 | if (parent) { | ||
519 | for (n = 1; n < num; n++) { | ||
520 | struct buffer_head *bh; | ||
521 | /* Allocate the next block */ | ||
522 | int nr = ext3_alloc_block(handle, inode, parent, &err); | ||
523 | if (!nr) | ||
524 | break; | ||
525 | branch[n].key = cpu_to_le32(nr); | ||
526 | 605 | ||
527 | /* | 606 | num = ext3_alloc_blocks(handle, inode, goal, indirect_blks, |
528 | * Get buffer_head for parent block, zero it out | 607 | *blks, new_blocks, &err); |
529 | * and set the pointer to new one, then send | 608 | if (err) |
530 | * parent to disk. | 609 | return err; |
531 | */ | ||
532 | bh = sb_getblk(inode->i_sb, parent); | ||
533 | if (!bh) | ||
534 | break; | ||
535 | keys = n+1; | ||
536 | branch[n].bh = bh; | ||
537 | lock_buffer(bh); | ||
538 | BUFFER_TRACE(bh, "call get_create_access"); | ||
539 | err = ext3_journal_get_create_access(handle, bh); | ||
540 | if (err) { | ||
541 | unlock_buffer(bh); | ||
542 | brelse(bh); | ||
543 | break; | ||
544 | } | ||
545 | 610 | ||
546 | memset(bh->b_data, 0, blocksize); | 611 | branch[0].key = cpu_to_le32(new_blocks[0]); |
547 | branch[n].p = (__le32*) bh->b_data + offsets[n]; | 612 | /* |
548 | *branch[n].p = branch[n].key; | 613 | * metadata blocks and data blocks are allocated. |
549 | BUFFER_TRACE(bh, "marking uptodate"); | 614 | */ |
550 | set_buffer_uptodate(bh); | 615 | for (n = 1; n <= indirect_blks; n++) { |
616 | /* | ||
617 | * Get buffer_head for parent block, zero it out | ||
618 | * and set the pointer to new one, then send | ||
619 | * parent to disk. | ||
620 | */ | ||
621 | bh = sb_getblk(inode->i_sb, new_blocks[n-1]); | ||
622 | branch[n].bh = bh; | ||
623 | lock_buffer(bh); | ||
624 | BUFFER_TRACE(bh, "call get_create_access"); | ||
625 | err = ext3_journal_get_create_access(handle, bh); | ||
626 | if (err) { | ||
551 | unlock_buffer(bh); | 627 | unlock_buffer(bh); |
628 | brelse(bh); | ||
629 | goto failed; | ||
630 | } | ||
552 | 631 | ||
553 | BUFFER_TRACE(bh, "call ext3_journal_dirty_metadata"); | 632 | memset(bh->b_data, 0, blocksize); |
554 | err = ext3_journal_dirty_metadata(handle, bh); | 633 | branch[n].p = (__le32 *) bh->b_data + offsets[n]; |
555 | if (err) | 634 | branch[n].key = cpu_to_le32(new_blocks[n]); |
556 | break; | 635 | *branch[n].p = branch[n].key; |
557 | 636 | if ( n == indirect_blks) { | |
558 | parent = nr; | 637 | current_block = new_blocks[n]; |
638 | /* | ||
639 | * End of chain, update the last new metablock of | ||
640 | * the chain to point to the new allocated | ||
641 | * data blocks numbers | ||
642 | */ | ||
643 | for (i=1; i < num; i++) | ||
644 | *(branch[n].p + i) = cpu_to_le32(++current_block); | ||
559 | } | 645 | } |
560 | } | 646 | BUFFER_TRACE(bh, "marking uptodate"); |
561 | if (n == num) | 647 | set_buffer_uptodate(bh); |
562 | return 0; | 648 | unlock_buffer(bh); |
563 | 649 | ||
650 | BUFFER_TRACE(bh, "call ext3_journal_dirty_metadata"); | ||
651 | err = ext3_journal_dirty_metadata(handle, bh); | ||
652 | if (err) | ||
653 | goto failed; | ||
654 | } | ||
655 | *blks = num; | ||
656 | return err; | ||
657 | failed: | ||
564 | /* Allocation failed, free what we already allocated */ | 658 | /* Allocation failed, free what we already allocated */ |
565 | for (i = 1; i < keys; i++) { | 659 | for (i = 1; i <= n ; i++) { |
566 | BUFFER_TRACE(branch[i].bh, "call journal_forget"); | 660 | BUFFER_TRACE(branch[i].bh, "call journal_forget"); |
567 | ext3_journal_forget(handle, branch[i].bh); | 661 | ext3_journal_forget(handle, branch[i].bh); |
568 | } | 662 | } |
569 | for (i = 0; i < keys; i++) | 663 | for (i = 0; i <indirect_blks; i++) |
570 | ext3_free_blocks(handle, inode, le32_to_cpu(branch[i].key), 1); | 664 | ext3_free_blocks(handle, inode, new_blocks[i], 1); |
665 | |||
666 | ext3_free_blocks(handle, inode, new_blocks[i], num); | ||
667 | |||
571 | return err; | 668 | return err; |
572 | } | 669 | } |
573 | 670 | ||
574 | /** | 671 | /** |
575 | * ext3_splice_branch - splice the allocated branch onto inode. | 672 | * ext3_splice_branch - splice the allocated branch onto inode. |
576 | * @inode: owner | 673 | * @inode: owner |
577 | * @block: (logical) number of block we are adding | 674 | * @block: (logical) number of block we are adding |
578 | * @chain: chain of indirect blocks (with a missing link - see | 675 | * @chain: chain of indirect blocks (with a missing link - see |
579 | * ext3_alloc_branch) | 676 | * ext3_alloc_branch) |
580 | * @where: location of missing link | 677 | * @where: location of missing link |
581 | * @num: number of blocks we are adding | 678 | * @num: number of indirect blocks we are adding |
582 | * | 679 | * @blks: number of direct blocks we are adding |
583 | * This function fills the missing link and does all housekeeping needed in | 680 | * |
584 | * inode (->i_blocks, etc.). In case of success we end up with the full | 681 | * This function fills the missing link and does all housekeeping needed in |
585 | * chain to new block and return 0. | 682 | * inode (->i_blocks, etc.). In case of success we end up with the full |
683 | * chain to new block and return 0. | ||
586 | */ | 684 | */ |
587 | 685 | static int ext3_splice_branch(handle_t *handle, struct inode *inode, | |
588 | static int ext3_splice_branch(handle_t *handle, struct inode *inode, long block, | 686 | long block, Indirect *where, int num, int blks) |
589 | Indirect chain[4], Indirect *where, int num) | ||
590 | { | 687 | { |
591 | int i; | 688 | int i; |
592 | int err = 0; | 689 | int err = 0; |
593 | struct ext3_block_alloc_info *block_i = EXT3_I(inode)->i_block_alloc_info; | 690 | struct ext3_block_alloc_info *block_i; |
691 | unsigned long current_block; | ||
594 | 692 | ||
693 | block_i = EXT3_I(inode)->i_block_alloc_info; | ||
595 | /* | 694 | /* |
596 | * If we're splicing into a [td]indirect block (as opposed to the | 695 | * If we're splicing into a [td]indirect block (as opposed to the |
597 | * inode) then we need to get write access to the [td]indirect block | 696 | * inode) then we need to get write access to the [td]indirect block |
@@ -608,13 +707,24 @@ static int ext3_splice_branch(handle_t *handle, struct inode *inode, long block, | |||
608 | *where->p = where->key; | 707 | *where->p = where->key; |
609 | 708 | ||
610 | /* | 709 | /* |
710 | * Update the host buffer_head or inode to point to more just allocated | ||
711 | * direct blocks blocks | ||
712 | */ | ||
713 | if (num == 0 && blks > 1) { | ||
714 | current_block = le32_to_cpu(where->key + 1); | ||
715 | for (i = 1; i < blks; i++) | ||
716 | *(where->p + i ) = cpu_to_le32(current_block++); | ||
717 | } | ||
718 | |||
719 | /* | ||
611 | * update the most recently allocated logical & physical block | 720 | * update the most recently allocated logical & physical block |
612 | * in i_block_alloc_info, to assist find the proper goal block for next | 721 | * in i_block_alloc_info, to assist find the proper goal block for next |
613 | * allocation | 722 | * allocation |
614 | */ | 723 | */ |
615 | if (block_i) { | 724 | if (block_i) { |
616 | block_i->last_alloc_logical_block = block; | 725 | block_i->last_alloc_logical_block = block + blks - 1; |
617 | block_i->last_alloc_physical_block = le32_to_cpu(where[num-1].key); | 726 | block_i->last_alloc_physical_block = |
727 | le32_to_cpu(where[num].key + blks - 1); | ||
618 | } | 728 | } |
619 | 729 | ||
620 | /* We are done with atomic stuff, now do the rest of housekeeping */ | 730 | /* We are done with atomic stuff, now do the rest of housekeeping */ |
@@ -625,7 +735,7 @@ static int ext3_splice_branch(handle_t *handle, struct inode *inode, long block, | |||
625 | /* had we spliced it onto indirect block? */ | 735 | /* had we spliced it onto indirect block? */ |
626 | if (where->bh) { | 736 | if (where->bh) { |
627 | /* | 737 | /* |
628 | * akpm: If we spliced it onto an indirect block, we haven't | 738 | * If we spliced it onto an indirect block, we haven't |
629 | * altered the inode. Note however that if it is being spliced | 739 | * altered the inode. Note however that if it is being spliced |
630 | * onto an indirect block at the very end of the file (the | 740 | * onto an indirect block at the very end of the file (the |
631 | * file is growing) then we *will* alter the inode to reflect | 741 | * file is growing) then we *will* alter the inode to reflect |
@@ -647,10 +757,13 @@ static int ext3_splice_branch(handle_t *handle, struct inode *inode, long block, | |||
647 | return err; | 757 | return err; |
648 | 758 | ||
649 | err_out: | 759 | err_out: |
650 | for (i = 1; i < num; i++) { | 760 | for (i = 1; i <= num; i++) { |
651 | BUFFER_TRACE(where[i].bh, "call journal_forget"); | 761 | BUFFER_TRACE(where[i].bh, "call journal_forget"); |
652 | ext3_journal_forget(handle, where[i].bh); | 762 | ext3_journal_forget(handle, where[i].bh); |
763 | ext3_free_blocks(handle,inode,le32_to_cpu(where[i-1].key),1); | ||
653 | } | 764 | } |
765 | ext3_free_blocks(handle, inode, le32_to_cpu(where[num].key), blks); | ||
766 | |||
654 | return err; | 767 | return err; |
655 | } | 768 | } |
656 | 769 | ||
@@ -666,26 +779,33 @@ err_out: | |||
666 | * allocations is needed - we simply release blocks and do not touch anything | 779 | * allocations is needed - we simply release blocks and do not touch anything |
667 | * reachable from inode. | 780 | * reachable from inode. |
668 | * | 781 | * |
669 | * akpm: `handle' can be NULL if create == 0. | 782 | * `handle' can be NULL if create == 0. |
670 | * | 783 | * |
671 | * The BKL may not be held on entry here. Be sure to take it early. | 784 | * The BKL may not be held on entry here. Be sure to take it early. |
785 | * return > 0, # of blocks mapped or allocated. | ||
786 | * return = 0, if plain lookup failed. | ||
787 | * return < 0, error case. | ||
672 | */ | 788 | */ |
673 | 789 | int ext3_get_blocks_handle(handle_t *handle, struct inode *inode, | |
674 | int | 790 | sector_t iblock, unsigned long maxblocks, |
675 | ext3_get_block_handle(handle_t *handle, struct inode *inode, sector_t iblock, | 791 | struct buffer_head *bh_result, |
676 | struct buffer_head *bh_result, int create, int extend_disksize) | 792 | int create, int extend_disksize) |
677 | { | 793 | { |
678 | int err = -EIO; | 794 | int err = -EIO; |
679 | int offsets[4]; | 795 | int offsets[4]; |
680 | Indirect chain[4]; | 796 | Indirect chain[4]; |
681 | Indirect *partial; | 797 | Indirect *partial; |
682 | unsigned long goal; | 798 | unsigned long goal; |
683 | int left; | 799 | int indirect_blks; |
684 | int boundary = 0; | 800 | int blocks_to_boundary = 0; |
685 | const int depth = ext3_block_to_path(inode, iblock, offsets, &boundary); | 801 | int depth; |
686 | struct ext3_inode_info *ei = EXT3_I(inode); | 802 | struct ext3_inode_info *ei = EXT3_I(inode); |
803 | int count = 0; | ||
804 | unsigned long first_block = 0; | ||
805 | |||
687 | 806 | ||
688 | J_ASSERT(handle != NULL || create == 0); | 807 | J_ASSERT(handle != NULL || create == 0); |
808 | depth = ext3_block_to_path(inode,iblock,offsets,&blocks_to_boundary); | ||
689 | 809 | ||
690 | if (depth == 0) | 810 | if (depth == 0) |
691 | goto out; | 811 | goto out; |
@@ -694,8 +814,31 @@ ext3_get_block_handle(handle_t *handle, struct inode *inode, sector_t iblock, | |||
694 | 814 | ||
695 | /* Simplest case - block found, no allocation needed */ | 815 | /* Simplest case - block found, no allocation needed */ |
696 | if (!partial) { | 816 | if (!partial) { |
817 | first_block = chain[depth - 1].key; | ||
697 | clear_buffer_new(bh_result); | 818 | clear_buffer_new(bh_result); |
698 | goto got_it; | 819 | count++; |
820 | /*map more blocks*/ | ||
821 | while (count < maxblocks && count <= blocks_to_boundary) { | ||
822 | if (!verify_chain(chain, partial)) { | ||
823 | /* | ||
824 | * Indirect block might be removed by | ||
825 | * truncate while we were reading it. | ||
826 | * Handling of that case: forget what we've | ||
827 | * got now. Flag the err as EAGAIN, so it | ||
828 | * will reread. | ||
829 | */ | ||
830 | err = -EAGAIN; | ||
831 | count = 0; | ||
832 | break; | ||
833 | } | ||
834 | if (le32_to_cpu(*(chain[depth-1].p+count) == | ||
835 | (first_block + count))) | ||
836 | count++; | ||
837 | else | ||
838 | break; | ||
839 | } | ||
840 | if (err != -EAGAIN) | ||
841 | goto got_it; | ||
699 | } | 842 | } |
700 | 843 | ||
701 | /* Next simple case - plain lookup or failed read of indirect block */ | 844 | /* Next simple case - plain lookup or failed read of indirect block */ |
@@ -723,6 +866,7 @@ ext3_get_block_handle(handle_t *handle, struct inode *inode, sector_t iblock, | |||
723 | } | 866 | } |
724 | partial = ext3_get_branch(inode, depth, offsets, chain, &err); | 867 | partial = ext3_get_branch(inode, depth, offsets, chain, &err); |
725 | if (!partial) { | 868 | if (!partial) { |
869 | count++; | ||
726 | mutex_unlock(&ei->truncate_mutex); | 870 | mutex_unlock(&ei->truncate_mutex); |
727 | if (err) | 871 | if (err) |
728 | goto cleanup; | 872 | goto cleanup; |
@@ -740,12 +884,19 @@ ext3_get_block_handle(handle_t *handle, struct inode *inode, sector_t iblock, | |||
740 | 884 | ||
741 | goal = ext3_find_goal(inode, iblock, chain, partial); | 885 | goal = ext3_find_goal(inode, iblock, chain, partial); |
742 | 886 | ||
743 | left = (chain + depth) - partial; | 887 | /* the number of blocks need to allocate for [d,t]indirect blocks */ |
888 | indirect_blks = (chain + depth) - partial - 1; | ||
744 | 889 | ||
745 | /* | 890 | /* |
891 | * Next look up the indirect map to count the totoal number of | ||
892 | * direct blocks to allocate for this branch. | ||
893 | */ | ||
894 | count = ext3_blks_to_allocate(partial, indirect_blks, | ||
895 | maxblocks, blocks_to_boundary); | ||
896 | /* | ||
746 | * Block out ext3_truncate while we alter the tree | 897 | * Block out ext3_truncate while we alter the tree |
747 | */ | 898 | */ |
748 | err = ext3_alloc_branch(handle, inode, left, goal, | 899 | err = ext3_alloc_branch(handle, inode, indirect_blks, &count, goal, |
749 | offsets + (partial - chain), partial); | 900 | offsets + (partial - chain), partial); |
750 | 901 | ||
751 | /* | 902 | /* |
@@ -756,8 +907,8 @@ ext3_get_block_handle(handle_t *handle, struct inode *inode, sector_t iblock, | |||
756 | * may need to return -EAGAIN upwards in the worst case. --sct | 907 | * may need to return -EAGAIN upwards in the worst case. --sct |
757 | */ | 908 | */ |
758 | if (!err) | 909 | if (!err) |
759 | err = ext3_splice_branch(handle, inode, iblock, chain, | 910 | err = ext3_splice_branch(handle, inode, iblock, |
760 | partial, left); | 911 | partial, indirect_blks, count); |
761 | /* | 912 | /* |
762 | * i_disksize growing is protected by truncate_mutex. Don't forget to | 913 | * i_disksize growing is protected by truncate_mutex. Don't forget to |
763 | * protect it if you're about to implement concurrent | 914 | * protect it if you're about to implement concurrent |
@@ -772,8 +923,9 @@ ext3_get_block_handle(handle_t *handle, struct inode *inode, sector_t iblock, | |||
772 | set_buffer_new(bh_result); | 923 | set_buffer_new(bh_result); |
773 | got_it: | 924 | got_it: |
774 | map_bh(bh_result, inode->i_sb, le32_to_cpu(chain[depth-1].key)); | 925 | map_bh(bh_result, inode->i_sb, le32_to_cpu(chain[depth-1].key)); |
775 | if (boundary) | 926 | if (blocks_to_boundary == 0) |
776 | set_buffer_boundary(bh_result); | 927 | set_buffer_boundary(bh_result); |
928 | err = count; | ||
777 | /* Clean up and exit */ | 929 | /* Clean up and exit */ |
778 | partial = chain + depth - 1; /* the whole chain */ | 930 | partial = chain + depth - 1; /* the whole chain */ |
779 | cleanup: | 931 | cleanup: |
@@ -787,34 +939,21 @@ out: | |||
787 | return err; | 939 | return err; |
788 | } | 940 | } |
789 | 941 | ||
790 | static int ext3_get_block(struct inode *inode, sector_t iblock, | ||
791 | struct buffer_head *bh_result, int create) | ||
792 | { | ||
793 | handle_t *handle = NULL; | ||
794 | int ret; | ||
795 | |||
796 | if (create) { | ||
797 | handle = ext3_journal_current_handle(); | ||
798 | J_ASSERT(handle != 0); | ||
799 | } | ||
800 | ret = ext3_get_block_handle(handle, inode, iblock, | ||
801 | bh_result, create, 1); | ||
802 | return ret; | ||
803 | } | ||
804 | |||
805 | #define DIO_CREDITS (EXT3_RESERVE_TRANS_BLOCKS + 32) | 942 | #define DIO_CREDITS (EXT3_RESERVE_TRANS_BLOCKS + 32) |
806 | 943 | ||
807 | static int | 944 | static int ext3_get_block(struct inode *inode, sector_t iblock, |
808 | ext3_direct_io_get_blocks(struct inode *inode, sector_t iblock, | 945 | struct buffer_head *bh_result, int create) |
809 | unsigned long max_blocks, struct buffer_head *bh_result, | ||
810 | int create) | ||
811 | { | 946 | { |
812 | handle_t *handle = journal_current_handle(); | 947 | handle_t *handle = journal_current_handle(); |
813 | int ret = 0; | 948 | int ret = 0; |
949 | unsigned max_blocks = bh_result->b_size >> inode->i_blkbits; | ||
814 | 950 | ||
815 | if (!handle) | 951 | if (!create) |
816 | goto get_block; /* A read */ | 952 | goto get_block; /* A read */ |
817 | 953 | ||
954 | if (max_blocks == 1) | ||
955 | goto get_block; /* A single block get */ | ||
956 | |||
818 | if (handle->h_transaction->t_state == T_LOCKED) { | 957 | if (handle->h_transaction->t_state == T_LOCKED) { |
819 | /* | 958 | /* |
820 | * Huge direct-io writes can hold off commits for long | 959 | * Huge direct-io writes can hold off commits for long |
@@ -841,18 +980,22 @@ ext3_direct_io_get_blocks(struct inode *inode, sector_t iblock, | |||
841 | } | 980 | } |
842 | 981 | ||
843 | get_block: | 982 | get_block: |
844 | if (ret == 0) | 983 | if (ret == 0) { |
845 | ret = ext3_get_block_handle(handle, inode, iblock, | 984 | ret = ext3_get_blocks_handle(handle, inode, iblock, |
846 | bh_result, create, 0); | 985 | max_blocks, bh_result, create, 0); |
847 | bh_result->b_size = (1 << inode->i_blkbits); | 986 | if (ret > 0) { |
987 | bh_result->b_size = (ret << inode->i_blkbits); | ||
988 | ret = 0; | ||
989 | } | ||
990 | } | ||
848 | return ret; | 991 | return ret; |
849 | } | 992 | } |
850 | 993 | ||
851 | /* | 994 | /* |
852 | * `handle' can be NULL if create is zero | 995 | * `handle' can be NULL if create is zero |
853 | */ | 996 | */ |
854 | struct buffer_head *ext3_getblk(handle_t *handle, struct inode * inode, | 997 | struct buffer_head *ext3_getblk(handle_t *handle, struct inode *inode, |
855 | long block, int create, int * errp) | 998 | long block, int create, int *errp) |
856 | { | 999 | { |
857 | struct buffer_head dummy; | 1000 | struct buffer_head dummy; |
858 | int fatal = 0, err; | 1001 | int fatal = 0, err; |
@@ -862,8 +1005,16 @@ struct buffer_head *ext3_getblk(handle_t *handle, struct inode * inode, | |||
862 | dummy.b_state = 0; | 1005 | dummy.b_state = 0; |
863 | dummy.b_blocknr = -1000; | 1006 | dummy.b_blocknr = -1000; |
864 | buffer_trace_init(&dummy.b_history); | 1007 | buffer_trace_init(&dummy.b_history); |
865 | *errp = ext3_get_block_handle(handle, inode, block, &dummy, create, 1); | 1008 | err = ext3_get_blocks_handle(handle, inode, block, 1, |
866 | if (!*errp && buffer_mapped(&dummy)) { | 1009 | &dummy, create, 1); |
1010 | if (err == 1) { | ||
1011 | err = 0; | ||
1012 | } else if (err >= 0) { | ||
1013 | WARN_ON(1); | ||
1014 | err = -EIO; | ||
1015 | } | ||
1016 | *errp = err; | ||
1017 | if (!err && buffer_mapped(&dummy)) { | ||
867 | struct buffer_head *bh; | 1018 | struct buffer_head *bh; |
868 | bh = sb_getblk(inode->i_sb, dummy.b_blocknr); | 1019 | bh = sb_getblk(inode->i_sb, dummy.b_blocknr); |
869 | if (!bh) { | 1020 | if (!bh) { |
@@ -874,17 +1025,18 @@ struct buffer_head *ext3_getblk(handle_t *handle, struct inode * inode, | |||
874 | J_ASSERT(create != 0); | 1025 | J_ASSERT(create != 0); |
875 | J_ASSERT(handle != 0); | 1026 | J_ASSERT(handle != 0); |
876 | 1027 | ||
877 | /* Now that we do not always journal data, we | 1028 | /* |
878 | should keep in mind whether this should | 1029 | * Now that we do not always journal data, we should |
879 | always journal the new buffer as metadata. | 1030 | * keep in mind whether this should always journal the |
880 | For now, regular file writes use | 1031 | * new buffer as metadata. For now, regular file |
881 | ext3_get_block instead, so it's not a | 1032 | * writes use ext3_get_block instead, so it's not a |
882 | problem. */ | 1033 | * problem. |
1034 | */ | ||
883 | lock_buffer(bh); | 1035 | lock_buffer(bh); |
884 | BUFFER_TRACE(bh, "call get_create_access"); | 1036 | BUFFER_TRACE(bh, "call get_create_access"); |
885 | fatal = ext3_journal_get_create_access(handle, bh); | 1037 | fatal = ext3_journal_get_create_access(handle, bh); |
886 | if (!fatal && !buffer_uptodate(bh)) { | 1038 | if (!fatal && !buffer_uptodate(bh)) { |
887 | memset(bh->b_data, 0, inode->i_sb->s_blocksize); | 1039 | memset(bh->b_data,0,inode->i_sb->s_blocksize); |
888 | set_buffer_uptodate(bh); | 1040 | set_buffer_uptodate(bh); |
889 | } | 1041 | } |
890 | unlock_buffer(bh); | 1042 | unlock_buffer(bh); |
@@ -906,7 +1058,7 @@ err: | |||
906 | return NULL; | 1058 | return NULL; |
907 | } | 1059 | } |
908 | 1060 | ||
909 | struct buffer_head *ext3_bread(handle_t *handle, struct inode * inode, | 1061 | struct buffer_head *ext3_bread(handle_t *handle, struct inode *inode, |
910 | int block, int create, int *err) | 1062 | int block, int create, int *err) |
911 | { | 1063 | { |
912 | struct buffer_head * bh; | 1064 | struct buffer_head * bh; |
@@ -982,9 +1134,8 @@ static int walk_page_buffers( handle_t *handle, | |||
982 | * is elevated. We'll still have enough credits for the tiny quotafile | 1134 | * is elevated. We'll still have enough credits for the tiny quotafile |
983 | * write. | 1135 | * write. |
984 | */ | 1136 | */ |
985 | 1137 | static int do_journal_get_write_access(handle_t *handle, | |
986 | static int do_journal_get_write_access(handle_t *handle, | 1138 | struct buffer_head *bh) |
987 | struct buffer_head *bh) | ||
988 | { | 1139 | { |
989 | if (!buffer_mapped(bh) || buffer_freed(bh)) | 1140 | if (!buffer_mapped(bh) || buffer_freed(bh)) |
990 | return 0; | 1141 | return 0; |
@@ -1025,8 +1176,7 @@ out: | |||
1025 | return ret; | 1176 | return ret; |
1026 | } | 1177 | } |
1027 | 1178 | ||
1028 | int | 1179 | int ext3_journal_dirty_data(handle_t *handle, struct buffer_head *bh) |
1029 | ext3_journal_dirty_data(handle_t *handle, struct buffer_head *bh) | ||
1030 | { | 1180 | { |
1031 | int err = journal_dirty_data(handle, bh); | 1181 | int err = journal_dirty_data(handle, bh); |
1032 | if (err) | 1182 | if (err) |
@@ -1051,7 +1201,6 @@ static int commit_write_fn(handle_t *handle, struct buffer_head *bh) | |||
1051 | * ext3 never places buffers on inode->i_mapping->private_list. metadata | 1201 | * ext3 never places buffers on inode->i_mapping->private_list. metadata |
1052 | * buffers are managed internally. | 1202 | * buffers are managed internally. |
1053 | */ | 1203 | */ |
1054 | |||
1055 | static int ext3_ordered_commit_write(struct file *file, struct page *page, | 1204 | static int ext3_ordered_commit_write(struct file *file, struct page *page, |
1056 | unsigned from, unsigned to) | 1205 | unsigned from, unsigned to) |
1057 | { | 1206 | { |
@@ -1261,7 +1410,7 @@ static int journal_dirty_data_fn(handle_t *handle, struct buffer_head *bh) | |||
1261 | * we don't need to open a transaction here. | 1410 | * we don't need to open a transaction here. |
1262 | */ | 1411 | */ |
1263 | static int ext3_ordered_writepage(struct page *page, | 1412 | static int ext3_ordered_writepage(struct page *page, |
1264 | struct writeback_control *wbc) | 1413 | struct writeback_control *wbc) |
1265 | { | 1414 | { |
1266 | struct inode *inode = page->mapping->host; | 1415 | struct inode *inode = page->mapping->host; |
1267 | struct buffer_head *page_bufs; | 1416 | struct buffer_head *page_bufs; |
@@ -1430,7 +1579,7 @@ ext3_readpages(struct file *file, struct address_space *mapping, | |||
1430 | return mpage_readpages(mapping, pages, nr_pages, ext3_get_block); | 1579 | return mpage_readpages(mapping, pages, nr_pages, ext3_get_block); |
1431 | } | 1580 | } |
1432 | 1581 | ||
1433 | static int ext3_invalidatepage(struct page *page, unsigned long offset) | 1582 | static void ext3_invalidatepage(struct page *page, unsigned long offset) |
1434 | { | 1583 | { |
1435 | journal_t *journal = EXT3_JOURNAL(page->mapping->host); | 1584 | journal_t *journal = EXT3_JOURNAL(page->mapping->host); |
1436 | 1585 | ||
@@ -1440,7 +1589,7 @@ static int ext3_invalidatepage(struct page *page, unsigned long offset) | |||
1440 | if (offset == 0) | 1589 | if (offset == 0) |
1441 | ClearPageChecked(page); | 1590 | ClearPageChecked(page); |
1442 | 1591 | ||
1443 | return journal_invalidatepage(journal, page, offset); | 1592 | journal_invalidatepage(journal, page, offset); |
1444 | } | 1593 | } |
1445 | 1594 | ||
1446 | static int ext3_releasepage(struct page *page, gfp_t wait) | 1595 | static int ext3_releasepage(struct page *page, gfp_t wait) |
@@ -1492,11 +1641,10 @@ static ssize_t ext3_direct_IO(int rw, struct kiocb *iocb, | |||
1492 | 1641 | ||
1493 | ret = blockdev_direct_IO(rw, iocb, inode, inode->i_sb->s_bdev, iov, | 1642 | ret = blockdev_direct_IO(rw, iocb, inode, inode->i_sb->s_bdev, iov, |
1494 | offset, nr_segs, | 1643 | offset, nr_segs, |
1495 | ext3_direct_io_get_blocks, NULL); | 1644 | ext3_get_block, NULL); |
1496 | 1645 | ||
1497 | /* | 1646 | /* |
1498 | * Reacquire the handle: ext3_direct_io_get_block() can restart the | 1647 | * Reacquire the handle: ext3_get_block() can restart the transaction |
1499 | * transaction | ||
1500 | */ | 1648 | */ |
1501 | handle = journal_current_handle(); | 1649 | handle = journal_current_handle(); |
1502 | 1650 | ||
@@ -1752,11 +1900,8 @@ static inline int all_zeroes(__le32 *p, __le32 *q) | |||
1752 | * c) free the subtrees growing from the inode past the @chain[0]. | 1900 | * c) free the subtrees growing from the inode past the @chain[0]. |
1753 | * (no partially truncated stuff there). */ | 1901 | * (no partially truncated stuff there). */ |
1754 | 1902 | ||
1755 | static Indirect *ext3_find_shared(struct inode *inode, | 1903 | static Indirect *ext3_find_shared(struct inode *inode, int depth, |
1756 | int depth, | 1904 | int offsets[4], Indirect chain[4], __le32 *top) |
1757 | int offsets[4], | ||
1758 | Indirect chain[4], | ||
1759 | __le32 *top) | ||
1760 | { | 1905 | { |
1761 | Indirect *partial, *p; | 1906 | Indirect *partial, *p; |
1762 | int k, err; | 1907 | int k, err; |
@@ -1795,8 +1940,7 @@ static Indirect *ext3_find_shared(struct inode *inode, | |||
1795 | } | 1940 | } |
1796 | /* Writer: end */ | 1941 | /* Writer: end */ |
1797 | 1942 | ||
1798 | while(partial > p) | 1943 | while(partial > p) { |
1799 | { | ||
1800 | brelse(partial->bh); | 1944 | brelse(partial->bh); |
1801 | partial--; | 1945 | partial--; |
1802 | } | 1946 | } |
@@ -1812,10 +1956,9 @@ no_top: | |||
1812 | * We release `count' blocks on disk, but (last - first) may be greater | 1956 | * We release `count' blocks on disk, but (last - first) may be greater |
1813 | * than `count' because there can be holes in there. | 1957 | * than `count' because there can be holes in there. |
1814 | */ | 1958 | */ |
1815 | static void | 1959 | static void ext3_clear_blocks(handle_t *handle, struct inode *inode, |
1816 | ext3_clear_blocks(handle_t *handle, struct inode *inode, struct buffer_head *bh, | 1960 | struct buffer_head *bh, unsigned long block_to_free, |
1817 | unsigned long block_to_free, unsigned long count, | 1961 | unsigned long count, __le32 *first, __le32 *last) |
1818 | __le32 *first, __le32 *last) | ||
1819 | { | 1962 | { |
1820 | __le32 *p; | 1963 | __le32 *p; |
1821 | if (try_to_extend_transaction(handle, inode)) { | 1964 | if (try_to_extend_transaction(handle, inode)) { |
@@ -2076,8 +2219,7 @@ static void ext3_free_branches(handle_t *handle, struct inode *inode, | |||
2076 | * that's fine - as long as they are linked from the inode, the post-crash | 2219 | * that's fine - as long as they are linked from the inode, the post-crash |
2077 | * ext3_truncate() run will find them and release them. | 2220 | * ext3_truncate() run will find them and release them. |
2078 | */ | 2221 | */ |
2079 | 2222 | void ext3_truncate(struct inode *inode) | |
2080 | void ext3_truncate(struct inode * inode) | ||
2081 | { | 2223 | { |
2082 | handle_t *handle; | 2224 | handle_t *handle; |
2083 | struct ext3_inode_info *ei = EXT3_I(inode); | 2225 | struct ext3_inode_info *ei = EXT3_I(inode); |
@@ -2201,29 +2343,26 @@ void ext3_truncate(struct inode * inode) | |||
2201 | do_indirects: | 2343 | do_indirects: |
2202 | /* Kill the remaining (whole) subtrees */ | 2344 | /* Kill the remaining (whole) subtrees */ |
2203 | switch (offsets[0]) { | 2345 | switch (offsets[0]) { |
2204 | default: | 2346 | default: |
2205 | nr = i_data[EXT3_IND_BLOCK]; | 2347 | nr = i_data[EXT3_IND_BLOCK]; |
2206 | if (nr) { | 2348 | if (nr) { |
2207 | ext3_free_branches(handle, inode, NULL, | 2349 | ext3_free_branches(handle, inode, NULL, &nr, &nr+1, 1); |
2208 | &nr, &nr+1, 1); | 2350 | i_data[EXT3_IND_BLOCK] = 0; |
2209 | i_data[EXT3_IND_BLOCK] = 0; | 2351 | } |
2210 | } | 2352 | case EXT3_IND_BLOCK: |
2211 | case EXT3_IND_BLOCK: | 2353 | nr = i_data[EXT3_DIND_BLOCK]; |
2212 | nr = i_data[EXT3_DIND_BLOCK]; | 2354 | if (nr) { |
2213 | if (nr) { | 2355 | ext3_free_branches(handle, inode, NULL, &nr, &nr+1, 2); |
2214 | ext3_free_branches(handle, inode, NULL, | 2356 | i_data[EXT3_DIND_BLOCK] = 0; |
2215 | &nr, &nr+1, 2); | 2357 | } |
2216 | i_data[EXT3_DIND_BLOCK] = 0; | 2358 | case EXT3_DIND_BLOCK: |
2217 | } | 2359 | nr = i_data[EXT3_TIND_BLOCK]; |
2218 | case EXT3_DIND_BLOCK: | 2360 | if (nr) { |
2219 | nr = i_data[EXT3_TIND_BLOCK]; | 2361 | ext3_free_branches(handle, inode, NULL, &nr, &nr+1, 3); |
2220 | if (nr) { | 2362 | i_data[EXT3_TIND_BLOCK] = 0; |
2221 | ext3_free_branches(handle, inode, NULL, | 2363 | } |
2222 | &nr, &nr+1, 3); | 2364 | case EXT3_TIND_BLOCK: |
2223 | i_data[EXT3_TIND_BLOCK] = 0; | 2365 | ; |
2224 | } | ||
2225 | case EXT3_TIND_BLOCK: | ||
2226 | ; | ||
2227 | } | 2366 | } |
2228 | 2367 | ||
2229 | ext3_discard_reservation(inode); | 2368 | ext3_discard_reservation(inode); |
@@ -2232,8 +2371,10 @@ do_indirects: | |||
2232 | inode->i_mtime = inode->i_ctime = CURRENT_TIME_SEC; | 2371 | inode->i_mtime = inode->i_ctime = CURRENT_TIME_SEC; |
2233 | ext3_mark_inode_dirty(handle, inode); | 2372 | ext3_mark_inode_dirty(handle, inode); |
2234 | 2373 | ||
2235 | /* In a multi-transaction truncate, we only make the final | 2374 | /* |
2236 | * transaction synchronous */ | 2375 | * In a multi-transaction truncate, we only make the final transaction |
2376 | * synchronous | ||
2377 | */ | ||
2237 | if (IS_SYNC(inode)) | 2378 | if (IS_SYNC(inode)) |
2238 | handle->h_sync = 1; | 2379 | handle->h_sync = 1; |
2239 | out_stop: | 2380 | out_stop: |
@@ -2259,20 +2400,16 @@ static unsigned long ext3_get_inode_block(struct super_block *sb, | |||
2259 | struct ext3_group_desc * gdp; | 2400 | struct ext3_group_desc * gdp; |
2260 | 2401 | ||
2261 | 2402 | ||
2262 | if ((ino != EXT3_ROOT_INO && | 2403 | if ((ino != EXT3_ROOT_INO && ino != EXT3_JOURNAL_INO && |
2263 | ino != EXT3_JOURNAL_INO && | 2404 | ino != EXT3_RESIZE_INO && ino < EXT3_FIRST_INO(sb)) || |
2264 | ino != EXT3_RESIZE_INO && | 2405 | ino > le32_to_cpu(EXT3_SB(sb)->s_es->s_inodes_count)) { |
2265 | ino < EXT3_FIRST_INO(sb)) || | 2406 | ext3_error(sb, "ext3_get_inode_block", |
2266 | ino > le32_to_cpu( | ||
2267 | EXT3_SB(sb)->s_es->s_inodes_count)) { | ||
2268 | ext3_error (sb, "ext3_get_inode_block", | ||
2269 | "bad inode number: %lu", ino); | 2407 | "bad inode number: %lu", ino); |
2270 | return 0; | 2408 | return 0; |
2271 | } | 2409 | } |
2272 | block_group = (ino - 1) / EXT3_INODES_PER_GROUP(sb); | 2410 | block_group = (ino - 1) / EXT3_INODES_PER_GROUP(sb); |
2273 | if (block_group >= EXT3_SB(sb)->s_groups_count) { | 2411 | if (block_group >= EXT3_SB(sb)->s_groups_count) { |
2274 | ext3_error (sb, "ext3_get_inode_block", | 2412 | ext3_error(sb,"ext3_get_inode_block","group >= groups count"); |
2275 | "group >= groups count"); | ||
2276 | return 0; | 2413 | return 0; |
2277 | } | 2414 | } |
2278 | smp_rmb(); | 2415 | smp_rmb(); |
@@ -2285,7 +2422,7 @@ static unsigned long ext3_get_inode_block(struct super_block *sb, | |||
2285 | return 0; | 2422 | return 0; |
2286 | } | 2423 | } |
2287 | 2424 | ||
2288 | gdp = (struct ext3_group_desc *) bh->b_data; | 2425 | gdp = (struct ext3_group_desc *)bh->b_data; |
2289 | /* | 2426 | /* |
2290 | * Figure out the offset within the block group inode table | 2427 | * Figure out the offset within the block group inode table |
2291 | */ | 2428 | */ |
@@ -2834,7 +2971,7 @@ err_out: | |||
2834 | 2971 | ||
2835 | 2972 | ||
2836 | /* | 2973 | /* |
2837 | * akpm: how many blocks doth make a writepage()? | 2974 | * How many blocks doth make a writepage()? |
2838 | * | 2975 | * |
2839 | * With N blocks per page, it may be: | 2976 | * With N blocks per page, it may be: |
2840 | * N data blocks | 2977 | * N data blocks |
@@ -2924,8 +3061,8 @@ ext3_reserve_inode_write(handle_t *handle, struct inode *inode, | |||
2924 | } | 3061 | } |
2925 | 3062 | ||
2926 | /* | 3063 | /* |
2927 | * akpm: What we do here is to mark the in-core inode as clean | 3064 | * What we do here is to mark the in-core inode as clean with respect to inode |
2928 | * with respect to inode dirtiness (it may still be data-dirty). | 3065 | * dirtiness (it may still be data-dirty). |
2929 | * This means that the in-core inode may be reaped by prune_icache | 3066 | * This means that the in-core inode may be reaped by prune_icache |
2930 | * without having to perform any I/O. This is a very good thing, | 3067 | * without having to perform any I/O. This is a very good thing, |
2931 | * because *any* task may call prune_icache - even ones which | 3068 | * because *any* task may call prune_icache - even ones which |
@@ -2957,7 +3094,7 @@ int ext3_mark_inode_dirty(handle_t *handle, struct inode *inode) | |||
2957 | } | 3094 | } |
2958 | 3095 | ||
2959 | /* | 3096 | /* |
2960 | * akpm: ext3_dirty_inode() is called from __mark_inode_dirty() | 3097 | * ext3_dirty_inode() is called from __mark_inode_dirty() |
2961 | * | 3098 | * |
2962 | * We're really interested in the case where a file is being extended. | 3099 | * We're really interested in the case where a file is being extended. |
2963 | * i_size has been changed by generic_commit_write() and we thus need | 3100 | * i_size has been changed by generic_commit_write() and we thus need |
@@ -2993,7 +3130,7 @@ out: | |||
2993 | return; | 3130 | return; |
2994 | } | 3131 | } |
2995 | 3132 | ||
2996 | #ifdef AKPM | 3133 | #if 0 |
2997 | /* | 3134 | /* |
2998 | * Bind an inode's backing buffer_head into this transaction, to prevent | 3135 | * Bind an inode's backing buffer_head into this transaction, to prevent |
2999 | * it from being flushed to disk early. Unlike | 3136 | * it from being flushed to disk early. Unlike |
@@ -3001,8 +3138,7 @@ out: | |||
3001 | * returns no iloc structure, so the caller needs to repeat the iloc | 3138 | * returns no iloc structure, so the caller needs to repeat the iloc |
3002 | * lookup to mark the inode dirty later. | 3139 | * lookup to mark the inode dirty later. |
3003 | */ | 3140 | */ |
3004 | static inline int | 3141 | static int ext3_pin_inode(handle_t *handle, struct inode *inode) |
3005 | ext3_pin_inode(handle_t *handle, struct inode *inode) | ||
3006 | { | 3142 | { |
3007 | struct ext3_iloc iloc; | 3143 | struct ext3_iloc iloc; |
3008 | 3144 | ||
diff --git a/fs/ext3/super.c b/fs/ext3/super.c index 86e443182de4..f8a5266ea1ff 100644 --- a/fs/ext3/super.c +++ b/fs/ext3/super.c | |||
@@ -1678,12 +1678,6 @@ static int ext3_fill_super (struct super_block *sb, void *data, int silent) | |||
1678 | } | 1678 | } |
1679 | 1679 | ||
1680 | if (test_opt(sb, NOBH)) { | 1680 | if (test_opt(sb, NOBH)) { |
1681 | if (sb->s_blocksize_bits != PAGE_CACHE_SHIFT) { | ||
1682 | printk(KERN_WARNING "EXT3-fs: Ignoring nobh option " | ||
1683 | "since filesystem blocksize doesn't match " | ||
1684 | "pagesize\n"); | ||
1685 | clear_opt(sbi->s_mount_opt, NOBH); | ||
1686 | } | ||
1687 | if (!(test_opt(sb, DATA_FLAGS) == EXT3_MOUNT_WRITEBACK_DATA)) { | 1681 | if (!(test_opt(sb, DATA_FLAGS) == EXT3_MOUNT_WRITEBACK_DATA)) { |
1688 | printk(KERN_WARNING "EXT3-fs: Ignoring nobh option - " | 1682 | printk(KERN_WARNING "EXT3-fs: Ignoring nobh option - " |
1689 | "its supported only with writeback mode\n"); | 1683 | "its supported only with writeback mode\n"); |
diff --git a/fs/fat/inode.c b/fs/fat/inode.c index 297300fe81c2..404bfc9f7385 100644 --- a/fs/fat/inode.c +++ b/fs/fat/inode.c | |||
@@ -101,11 +101,11 @@ static int __fat_get_blocks(struct inode *inode, sector_t iblock, | |||
101 | } | 101 | } |
102 | 102 | ||
103 | static int fat_get_blocks(struct inode *inode, sector_t iblock, | 103 | static int fat_get_blocks(struct inode *inode, sector_t iblock, |
104 | unsigned long max_blocks, | ||
105 | struct buffer_head *bh_result, int create) | 104 | struct buffer_head *bh_result, int create) |
106 | { | 105 | { |
107 | struct super_block *sb = inode->i_sb; | 106 | struct super_block *sb = inode->i_sb; |
108 | int err; | 107 | int err; |
108 | unsigned long max_blocks = bh_result->b_size >> inode->i_blkbits; | ||
109 | 109 | ||
110 | err = __fat_get_blocks(inode, iblock, &max_blocks, bh_result, create); | 110 | err = __fat_get_blocks(inode, iblock, &max_blocks, bh_result, create); |
111 | if (err) | 111 | if (err) |
diff --git a/fs/fcntl.c b/fs/fcntl.c index 03c789560fb8..2a2479196f96 100644 --- a/fs/fcntl.c +++ b/fs/fcntl.c | |||
@@ -412,7 +412,7 @@ out: | |||
412 | 412 | ||
413 | /* Table to convert sigio signal codes into poll band bitmaps */ | 413 | /* Table to convert sigio signal codes into poll band bitmaps */ |
414 | 414 | ||
415 | static long band_table[NSIGPOLL] = { | 415 | static const long band_table[NSIGPOLL] = { |
416 | POLLIN | POLLRDNORM, /* POLL_IN */ | 416 | POLLIN | POLLRDNORM, /* POLL_IN */ |
417 | POLLOUT | POLLWRNORM | POLLWRBAND, /* POLL_OUT */ | 417 | POLLOUT | POLLWRNORM | POLLWRBAND, /* POLL_OUT */ |
418 | POLLIN | POLLRDNORM | POLLMSG, /* POLL_MSG */ | 418 | POLLIN | POLLRDNORM | POLLMSG, /* POLL_MSG */ |
@@ -531,7 +531,7 @@ int send_sigurg(struct fown_struct *fown) | |||
531 | } | 531 | } |
532 | 532 | ||
533 | static DEFINE_RWLOCK(fasync_lock); | 533 | static DEFINE_RWLOCK(fasync_lock); |
534 | static kmem_cache_t *fasync_cache; | 534 | static kmem_cache_t *fasync_cache __read_mostly; |
535 | 535 | ||
536 | /* | 536 | /* |
537 | * fasync_helper() is used by some character device drivers (mainly mice) | 537 | * fasync_helper() is used by some character device drivers (mainly mice) |
diff --git a/fs/hfs/inode.c b/fs/hfs/inode.c index 39fd85b9b916..2c564701724f 100644 --- a/fs/hfs/inode.c +++ b/fs/hfs/inode.c | |||
@@ -98,17 +98,6 @@ static int hfs_releasepage(struct page *page, gfp_t mask) | |||
98 | return res ? try_to_free_buffers(page) : 0; | 98 | return res ? try_to_free_buffers(page) : 0; |
99 | } | 99 | } |
100 | 100 | ||
101 | static int hfs_get_blocks(struct inode *inode, sector_t iblock, unsigned long max_blocks, | ||
102 | struct buffer_head *bh_result, int create) | ||
103 | { | ||
104 | int ret; | ||
105 | |||
106 | ret = hfs_get_block(inode, iblock, bh_result, create); | ||
107 | if (!ret) | ||
108 | bh_result->b_size = (1 << inode->i_blkbits); | ||
109 | return ret; | ||
110 | } | ||
111 | |||
112 | static ssize_t hfs_direct_IO(int rw, struct kiocb *iocb, | 101 | static ssize_t hfs_direct_IO(int rw, struct kiocb *iocb, |
113 | const struct iovec *iov, loff_t offset, unsigned long nr_segs) | 102 | const struct iovec *iov, loff_t offset, unsigned long nr_segs) |
114 | { | 103 | { |
@@ -116,7 +105,7 @@ static ssize_t hfs_direct_IO(int rw, struct kiocb *iocb, | |||
116 | struct inode *inode = file->f_dentry->d_inode->i_mapping->host; | 105 | struct inode *inode = file->f_dentry->d_inode->i_mapping->host; |
117 | 106 | ||
118 | return blockdev_direct_IO(rw, iocb, inode, inode->i_sb->s_bdev, iov, | 107 | return blockdev_direct_IO(rw, iocb, inode, inode->i_sb->s_bdev, iov, |
119 | offset, nr_segs, hfs_get_blocks, NULL); | 108 | offset, nr_segs, hfs_get_block, NULL); |
120 | } | 109 | } |
121 | 110 | ||
122 | static int hfs_writepages(struct address_space *mapping, | 111 | static int hfs_writepages(struct address_space *mapping, |
diff --git a/fs/hfsplus/inode.c b/fs/hfsplus/inode.c index 12ed2b7d046b..9fbe4d2aeece 100644 --- a/fs/hfsplus/inode.c +++ b/fs/hfsplus/inode.c | |||
@@ -93,17 +93,6 @@ static int hfsplus_releasepage(struct page *page, gfp_t mask) | |||
93 | return res ? try_to_free_buffers(page) : 0; | 93 | return res ? try_to_free_buffers(page) : 0; |
94 | } | 94 | } |
95 | 95 | ||
96 | static int hfsplus_get_blocks(struct inode *inode, sector_t iblock, unsigned long max_blocks, | ||
97 | struct buffer_head *bh_result, int create) | ||
98 | { | ||
99 | int ret; | ||
100 | |||
101 | ret = hfsplus_get_block(inode, iblock, bh_result, create); | ||
102 | if (!ret) | ||
103 | bh_result->b_size = (1 << inode->i_blkbits); | ||
104 | return ret; | ||
105 | } | ||
106 | |||
107 | static ssize_t hfsplus_direct_IO(int rw, struct kiocb *iocb, | 96 | static ssize_t hfsplus_direct_IO(int rw, struct kiocb *iocb, |
108 | const struct iovec *iov, loff_t offset, unsigned long nr_segs) | 97 | const struct iovec *iov, loff_t offset, unsigned long nr_segs) |
109 | { | 98 | { |
@@ -111,7 +100,7 @@ static ssize_t hfsplus_direct_IO(int rw, struct kiocb *iocb, | |||
111 | struct inode *inode = file->f_dentry->d_inode->i_mapping->host; | 100 | struct inode *inode = file->f_dentry->d_inode->i_mapping->host; |
112 | 101 | ||
113 | return blockdev_direct_IO(rw, iocb, inode, inode->i_sb->s_bdev, iov, | 102 | return blockdev_direct_IO(rw, iocb, inode, inode->i_sb->s_bdev, iov, |
114 | offset, nr_segs, hfsplus_get_blocks, NULL); | 103 | offset, nr_segs, hfsplus_get_block, NULL); |
115 | } | 104 | } |
116 | 105 | ||
117 | static int hfsplus_writepages(struct address_space *mapping, | 106 | static int hfsplus_writepages(struct address_space *mapping, |
diff --git a/fs/inode.c b/fs/inode.c index 85da11044adc..1fddf2803af8 100644 --- a/fs/inode.c +++ b/fs/inode.c | |||
@@ -56,8 +56,8 @@ | |||
56 | #define I_HASHBITS i_hash_shift | 56 | #define I_HASHBITS i_hash_shift |
57 | #define I_HASHMASK i_hash_mask | 57 | #define I_HASHMASK i_hash_mask |
58 | 58 | ||
59 | static unsigned int i_hash_mask; | 59 | static unsigned int i_hash_mask __read_mostly; |
60 | static unsigned int i_hash_shift; | 60 | static unsigned int i_hash_shift __read_mostly; |
61 | 61 | ||
62 | /* | 62 | /* |
63 | * Each inode can be on two separate lists. One is | 63 | * Each inode can be on two separate lists. One is |
@@ -73,7 +73,7 @@ static unsigned int i_hash_shift; | |||
73 | 73 | ||
74 | LIST_HEAD(inode_in_use); | 74 | LIST_HEAD(inode_in_use); |
75 | LIST_HEAD(inode_unused); | 75 | LIST_HEAD(inode_unused); |
76 | static struct hlist_head *inode_hashtable; | 76 | static struct hlist_head *inode_hashtable __read_mostly; |
77 | 77 | ||
78 | /* | 78 | /* |
79 | * A simple spinlock to protect the list manipulations. | 79 | * A simple spinlock to protect the list manipulations. |
@@ -98,7 +98,7 @@ static DEFINE_MUTEX(iprune_mutex); | |||
98 | */ | 98 | */ |
99 | struct inodes_stat_t inodes_stat; | 99 | struct inodes_stat_t inodes_stat; |
100 | 100 | ||
101 | static kmem_cache_t * inode_cachep; | 101 | static kmem_cache_t * inode_cachep __read_mostly; |
102 | 102 | ||
103 | static struct inode *alloc_inode(struct super_block *sb) | 103 | static struct inode *alloc_inode(struct super_block *sb) |
104 | { | 104 | { |
diff --git a/fs/inotify.c b/fs/inotify.c index a61e93e17853..f48a3dae0712 100644 --- a/fs/inotify.c +++ b/fs/inotify.c | |||
@@ -39,15 +39,15 @@ | |||
39 | 39 | ||
40 | static atomic_t inotify_cookie; | 40 | static atomic_t inotify_cookie; |
41 | 41 | ||
42 | static kmem_cache_t *watch_cachep; | 42 | static kmem_cache_t *watch_cachep __read_mostly; |
43 | static kmem_cache_t *event_cachep; | 43 | static kmem_cache_t *event_cachep __read_mostly; |
44 | 44 | ||
45 | static struct vfsmount *inotify_mnt; | 45 | static struct vfsmount *inotify_mnt __read_mostly; |
46 | 46 | ||
47 | /* these are configurable via /proc/sys/fs/inotify/ */ | 47 | /* these are configurable via /proc/sys/fs/inotify/ */ |
48 | int inotify_max_user_instances; | 48 | int inotify_max_user_instances __read_mostly; |
49 | int inotify_max_user_watches; | 49 | int inotify_max_user_watches __read_mostly; |
50 | int inotify_max_queued_events; | 50 | int inotify_max_queued_events __read_mostly; |
51 | 51 | ||
52 | /* | 52 | /* |
53 | * Lock ordering: | 53 | * Lock ordering: |
diff --git a/fs/jbd/transaction.c b/fs/jbd/transaction.c index ada31fa272e3..c609f5034fcd 100644 --- a/fs/jbd/transaction.c +++ b/fs/jbd/transaction.c | |||
@@ -1873,16 +1873,15 @@ zap_buffer_unlocked: | |||
1873 | } | 1873 | } |
1874 | 1874 | ||
1875 | /** | 1875 | /** |
1876 | * int journal_invalidatepage() | 1876 | * void journal_invalidatepage() |
1877 | * @journal: journal to use for flush... | 1877 | * @journal: journal to use for flush... |
1878 | * @page: page to flush | 1878 | * @page: page to flush |
1879 | * @offset: length of page to invalidate. | 1879 | * @offset: length of page to invalidate. |
1880 | * | 1880 | * |
1881 | * Reap page buffers containing data after offset in page. | 1881 | * Reap page buffers containing data after offset in page. |
1882 | * | 1882 | * |
1883 | * Return non-zero if the page's buffers were successfully reaped. | ||
1884 | */ | 1883 | */ |
1885 | int journal_invalidatepage(journal_t *journal, | 1884 | void journal_invalidatepage(journal_t *journal, |
1886 | struct page *page, | 1885 | struct page *page, |
1887 | unsigned long offset) | 1886 | unsigned long offset) |
1888 | { | 1887 | { |
@@ -1893,7 +1892,7 @@ int journal_invalidatepage(journal_t *journal, | |||
1893 | if (!PageLocked(page)) | 1892 | if (!PageLocked(page)) |
1894 | BUG(); | 1893 | BUG(); |
1895 | if (!page_has_buffers(page)) | 1894 | if (!page_has_buffers(page)) |
1896 | return 1; | 1895 | return; |
1897 | 1896 | ||
1898 | /* We will potentially be playing with lists other than just the | 1897 | /* We will potentially be playing with lists other than just the |
1899 | * data lists (especially for journaled data mode), so be | 1898 | * data lists (especially for journaled data mode), so be |
@@ -1916,11 +1915,9 @@ int journal_invalidatepage(journal_t *journal, | |||
1916 | } while (bh != head); | 1915 | } while (bh != head); |
1917 | 1916 | ||
1918 | if (!offset) { | 1917 | if (!offset) { |
1919 | if (!may_free || !try_to_free_buffers(page)) | 1918 | if (may_free && try_to_free_buffers(page)) |
1920 | return 0; | 1919 | J_ASSERT(!page_has_buffers(page)); |
1921 | J_ASSERT(!page_has_buffers(page)); | ||
1922 | } | 1920 | } |
1923 | return 1; | ||
1924 | } | 1921 | } |
1925 | 1922 | ||
1926 | /* | 1923 | /* |
diff --git a/fs/jffs2/compr_zlib.c b/fs/jffs2/compr_zlib.c index 4db8be8e90cc..5c63e0cdcf4c 100644 --- a/fs/jffs2/compr_zlib.c +++ b/fs/jffs2/compr_zlib.c | |||
@@ -33,13 +33,14 @@ | |||
33 | */ | 33 | */ |
34 | #define STREAM_END_SPACE 12 | 34 | #define STREAM_END_SPACE 12 |
35 | 35 | ||
36 | static DECLARE_MUTEX(deflate_sem); | 36 | static DEFINE_MUTEX(deflate_mutex); |
37 | static DECLARE_MUTEX(inflate_sem); | 37 | static DEFINE_MUTEX(inflate_mutex); |
38 | static z_stream inf_strm, def_strm; | 38 | static z_stream inf_strm, def_strm; |
39 | 39 | ||
40 | #ifdef __KERNEL__ /* Linux-only */ | 40 | #ifdef __KERNEL__ /* Linux-only */ |
41 | #include <linux/vmalloc.h> | 41 | #include <linux/vmalloc.h> |
42 | #include <linux/init.h> | 42 | #include <linux/init.h> |
43 | #include <linux/mutex.h> | ||
43 | 44 | ||
44 | static int __init alloc_workspaces(void) | 45 | static int __init alloc_workspaces(void) |
45 | { | 46 | { |
@@ -79,11 +80,11 @@ static int jffs2_zlib_compress(unsigned char *data_in, | |||
79 | if (*dstlen <= STREAM_END_SPACE) | 80 | if (*dstlen <= STREAM_END_SPACE) |
80 | return -1; | 81 | return -1; |
81 | 82 | ||
82 | down(&deflate_sem); | 83 | mutex_lock(&deflate_mutex); |
83 | 84 | ||
84 | if (Z_OK != zlib_deflateInit(&def_strm, 3)) { | 85 | if (Z_OK != zlib_deflateInit(&def_strm, 3)) { |
85 | printk(KERN_WARNING "deflateInit failed\n"); | 86 | printk(KERN_WARNING "deflateInit failed\n"); |
86 | up(&deflate_sem); | 87 | mutex_unlock(&deflate_mutex); |
87 | return -1; | 88 | return -1; |
88 | } | 89 | } |
89 | 90 | ||
@@ -104,7 +105,7 @@ static int jffs2_zlib_compress(unsigned char *data_in, | |||
104 | if (ret != Z_OK) { | 105 | if (ret != Z_OK) { |
105 | D1(printk(KERN_DEBUG "deflate in loop returned %d\n", ret)); | 106 | D1(printk(KERN_DEBUG "deflate in loop returned %d\n", ret)); |
106 | zlib_deflateEnd(&def_strm); | 107 | zlib_deflateEnd(&def_strm); |
107 | up(&deflate_sem); | 108 | mutex_unlock(&deflate_mutex); |
108 | return -1; | 109 | return -1; |
109 | } | 110 | } |
110 | } | 111 | } |
@@ -133,7 +134,7 @@ static int jffs2_zlib_compress(unsigned char *data_in, | |||
133 | *sourcelen = def_strm.total_in; | 134 | *sourcelen = def_strm.total_in; |
134 | ret = 0; | 135 | ret = 0; |
135 | out: | 136 | out: |
136 | up(&deflate_sem); | 137 | mutex_unlock(&deflate_mutex); |
137 | return ret; | 138 | return ret; |
138 | } | 139 | } |
139 | 140 | ||
@@ -145,7 +146,7 @@ static int jffs2_zlib_decompress(unsigned char *data_in, | |||
145 | int ret; | 146 | int ret; |
146 | int wbits = MAX_WBITS; | 147 | int wbits = MAX_WBITS; |
147 | 148 | ||
148 | down(&inflate_sem); | 149 | mutex_lock(&inflate_mutex); |
149 | 150 | ||
150 | inf_strm.next_in = data_in; | 151 | inf_strm.next_in = data_in; |
151 | inf_strm.avail_in = srclen; | 152 | inf_strm.avail_in = srclen; |
@@ -173,7 +174,7 @@ static int jffs2_zlib_decompress(unsigned char *data_in, | |||
173 | 174 | ||
174 | if (Z_OK != zlib_inflateInit2(&inf_strm, wbits)) { | 175 | if (Z_OK != zlib_inflateInit2(&inf_strm, wbits)) { |
175 | printk(KERN_WARNING "inflateInit failed\n"); | 176 | printk(KERN_WARNING "inflateInit failed\n"); |
176 | up(&inflate_sem); | 177 | mutex_unlock(&inflate_mutex); |
177 | return 1; | 178 | return 1; |
178 | } | 179 | } |
179 | 180 | ||
@@ -183,7 +184,7 @@ static int jffs2_zlib_decompress(unsigned char *data_in, | |||
183 | printk(KERN_NOTICE "inflate returned %d\n", ret); | 184 | printk(KERN_NOTICE "inflate returned %d\n", ret); |
184 | } | 185 | } |
185 | zlib_inflateEnd(&inf_strm); | 186 | zlib_inflateEnd(&inf_strm); |
186 | up(&inflate_sem); | 187 | mutex_unlock(&inflate_mutex); |
187 | return 0; | 188 | return 0; |
188 | } | 189 | } |
189 | 190 | ||
diff --git a/fs/jfs/inode.c b/fs/jfs/inode.c index 51a5fed90cca..04eb78f1252e 100644 --- a/fs/jfs/inode.c +++ b/fs/jfs/inode.c | |||
@@ -258,7 +258,8 @@ jfs_get_blocks(struct inode *ip, sector_t lblock, unsigned long max_blocks, | |||
258 | static int jfs_get_block(struct inode *ip, sector_t lblock, | 258 | static int jfs_get_block(struct inode *ip, sector_t lblock, |
259 | struct buffer_head *bh_result, int create) | 259 | struct buffer_head *bh_result, int create) |
260 | { | 260 | { |
261 | return jfs_get_blocks(ip, lblock, 1, bh_result, create); | 261 | return jfs_get_blocks(ip, lblock, bh_result->b_size >> ip->i_blkbits, |
262 | bh_result, create); | ||
262 | } | 263 | } |
263 | 264 | ||
264 | static int jfs_writepage(struct page *page, struct writeback_control *wbc) | 265 | static int jfs_writepage(struct page *page, struct writeback_control *wbc) |
@@ -301,7 +302,7 @@ static ssize_t jfs_direct_IO(int rw, struct kiocb *iocb, | |||
301 | struct inode *inode = file->f_mapping->host; | 302 | struct inode *inode = file->f_mapping->host; |
302 | 303 | ||
303 | return blockdev_direct_IO(rw, iocb, inode, inode->i_sb->s_bdev, iov, | 304 | return blockdev_direct_IO(rw, iocb, inode, inode->i_sb->s_bdev, iov, |
304 | offset, nr_segs, jfs_get_blocks, NULL); | 305 | offset, nr_segs, jfs_get_block, NULL); |
305 | } | 306 | } |
306 | 307 | ||
307 | struct address_space_operations jfs_aops = { | 308 | struct address_space_operations jfs_aops = { |
diff --git a/fs/jfs/jfs_logmgr.c b/fs/jfs/jfs_logmgr.c index 0b348b13b551..3315f0b1fbc0 100644 --- a/fs/jfs/jfs_logmgr.c +++ b/fs/jfs/jfs_logmgr.c | |||
@@ -69,6 +69,7 @@ | |||
69 | #include <linux/bio.h> | 69 | #include <linux/bio.h> |
70 | #include <linux/suspend.h> | 70 | #include <linux/suspend.h> |
71 | #include <linux/delay.h> | 71 | #include <linux/delay.h> |
72 | #include <linux/mutex.h> | ||
72 | #include "jfs_incore.h" | 73 | #include "jfs_incore.h" |
73 | #include "jfs_filsys.h" | 74 | #include "jfs_filsys.h" |
74 | #include "jfs_metapage.h" | 75 | #include "jfs_metapage.h" |
@@ -165,7 +166,7 @@ do { \ | |||
165 | */ | 166 | */ |
166 | static LIST_HEAD(jfs_external_logs); | 167 | static LIST_HEAD(jfs_external_logs); |
167 | static struct jfs_log *dummy_log = NULL; | 168 | static struct jfs_log *dummy_log = NULL; |
168 | static DECLARE_MUTEX(jfs_log_sem); | 169 | static DEFINE_MUTEX(jfs_log_mutex); |
169 | 170 | ||
170 | /* | 171 | /* |
171 | * forward references | 172 | * forward references |
@@ -1085,20 +1086,20 @@ int lmLogOpen(struct super_block *sb) | |||
1085 | if (sbi->mntflag & JFS_INLINELOG) | 1086 | if (sbi->mntflag & JFS_INLINELOG) |
1086 | return open_inline_log(sb); | 1087 | return open_inline_log(sb); |
1087 | 1088 | ||
1088 | down(&jfs_log_sem); | 1089 | mutex_lock(&jfs_log_mutex); |
1089 | list_for_each_entry(log, &jfs_external_logs, journal_list) { | 1090 | list_for_each_entry(log, &jfs_external_logs, journal_list) { |
1090 | if (log->bdev->bd_dev == sbi->logdev) { | 1091 | if (log->bdev->bd_dev == sbi->logdev) { |
1091 | if (memcmp(log->uuid, sbi->loguuid, | 1092 | if (memcmp(log->uuid, sbi->loguuid, |
1092 | sizeof(log->uuid))) { | 1093 | sizeof(log->uuid))) { |
1093 | jfs_warn("wrong uuid on JFS journal\n"); | 1094 | jfs_warn("wrong uuid on JFS journal\n"); |
1094 | up(&jfs_log_sem); | 1095 | mutex_unlock(&jfs_log_mutex); |
1095 | return -EINVAL; | 1096 | return -EINVAL; |
1096 | } | 1097 | } |
1097 | /* | 1098 | /* |
1098 | * add file system to log active file system list | 1099 | * add file system to log active file system list |
1099 | */ | 1100 | */ |
1100 | if ((rc = lmLogFileSystem(log, sbi, 1))) { | 1101 | if ((rc = lmLogFileSystem(log, sbi, 1))) { |
1101 | up(&jfs_log_sem); | 1102 | mutex_unlock(&jfs_log_mutex); |
1102 | return rc; | 1103 | return rc; |
1103 | } | 1104 | } |
1104 | goto journal_found; | 1105 | goto journal_found; |
@@ -1106,7 +1107,7 @@ int lmLogOpen(struct super_block *sb) | |||
1106 | } | 1107 | } |
1107 | 1108 | ||
1108 | if (!(log = kzalloc(sizeof(struct jfs_log), GFP_KERNEL))) { | 1109 | if (!(log = kzalloc(sizeof(struct jfs_log), GFP_KERNEL))) { |
1109 | up(&jfs_log_sem); | 1110 | mutex_unlock(&jfs_log_mutex); |
1110 | return -ENOMEM; | 1111 | return -ENOMEM; |
1111 | } | 1112 | } |
1112 | INIT_LIST_HEAD(&log->sb_list); | 1113 | INIT_LIST_HEAD(&log->sb_list); |
@@ -1151,7 +1152,7 @@ journal_found: | |||
1151 | sbi->log = log; | 1152 | sbi->log = log; |
1152 | LOG_UNLOCK(log); | 1153 | LOG_UNLOCK(log); |
1153 | 1154 | ||
1154 | up(&jfs_log_sem); | 1155 | mutex_unlock(&jfs_log_mutex); |
1155 | return 0; | 1156 | return 0; |
1156 | 1157 | ||
1157 | /* | 1158 | /* |
@@ -1168,7 +1169,7 @@ journal_found: | |||
1168 | blkdev_put(bdev); | 1169 | blkdev_put(bdev); |
1169 | 1170 | ||
1170 | free: /* free log descriptor */ | 1171 | free: /* free log descriptor */ |
1171 | up(&jfs_log_sem); | 1172 | mutex_unlock(&jfs_log_mutex); |
1172 | kfree(log); | 1173 | kfree(log); |
1173 | 1174 | ||
1174 | jfs_warn("lmLogOpen: exit(%d)", rc); | 1175 | jfs_warn("lmLogOpen: exit(%d)", rc); |
@@ -1212,11 +1213,11 @@ static int open_dummy_log(struct super_block *sb) | |||
1212 | { | 1213 | { |
1213 | int rc; | 1214 | int rc; |
1214 | 1215 | ||
1215 | down(&jfs_log_sem); | 1216 | mutex_lock(&jfs_log_mutex); |
1216 | if (!dummy_log) { | 1217 | if (!dummy_log) { |
1217 | dummy_log = kzalloc(sizeof(struct jfs_log), GFP_KERNEL); | 1218 | dummy_log = kzalloc(sizeof(struct jfs_log), GFP_KERNEL); |
1218 | if (!dummy_log) { | 1219 | if (!dummy_log) { |
1219 | up(&jfs_log_sem); | 1220 | mutex_unlock(&jfs_log_mutex); |
1220 | return -ENOMEM; | 1221 | return -ENOMEM; |
1221 | } | 1222 | } |
1222 | INIT_LIST_HEAD(&dummy_log->sb_list); | 1223 | INIT_LIST_HEAD(&dummy_log->sb_list); |
@@ -1229,7 +1230,7 @@ static int open_dummy_log(struct super_block *sb) | |||
1229 | if (rc) { | 1230 | if (rc) { |
1230 | kfree(dummy_log); | 1231 | kfree(dummy_log); |
1231 | dummy_log = NULL; | 1232 | dummy_log = NULL; |
1232 | up(&jfs_log_sem); | 1233 | mutex_unlock(&jfs_log_mutex); |
1233 | return rc; | 1234 | return rc; |
1234 | } | 1235 | } |
1235 | } | 1236 | } |
@@ -1238,7 +1239,7 @@ static int open_dummy_log(struct super_block *sb) | |||
1238 | list_add(&JFS_SBI(sb)->log_list, &dummy_log->sb_list); | 1239 | list_add(&JFS_SBI(sb)->log_list, &dummy_log->sb_list); |
1239 | JFS_SBI(sb)->log = dummy_log; | 1240 | JFS_SBI(sb)->log = dummy_log; |
1240 | LOG_UNLOCK(dummy_log); | 1241 | LOG_UNLOCK(dummy_log); |
1241 | up(&jfs_log_sem); | 1242 | mutex_unlock(&jfs_log_mutex); |
1242 | 1243 | ||
1243 | return 0; | 1244 | return 0; |
1244 | } | 1245 | } |
@@ -1466,7 +1467,7 @@ int lmLogClose(struct super_block *sb) | |||
1466 | 1467 | ||
1467 | jfs_info("lmLogClose: log:0x%p", log); | 1468 | jfs_info("lmLogClose: log:0x%p", log); |
1468 | 1469 | ||
1469 | down(&jfs_log_sem); | 1470 | mutex_lock(&jfs_log_mutex); |
1470 | LOG_LOCK(log); | 1471 | LOG_LOCK(log); |
1471 | list_del(&sbi->log_list); | 1472 | list_del(&sbi->log_list); |
1472 | LOG_UNLOCK(log); | 1473 | LOG_UNLOCK(log); |
@@ -1516,7 +1517,7 @@ int lmLogClose(struct super_block *sb) | |||
1516 | kfree(log); | 1517 | kfree(log); |
1517 | 1518 | ||
1518 | out: | 1519 | out: |
1519 | up(&jfs_log_sem); | 1520 | mutex_unlock(&jfs_log_mutex); |
1520 | jfs_info("lmLogClose: exit(%d)", rc); | 1521 | jfs_info("lmLogClose: exit(%d)", rc); |
1521 | return rc; | 1522 | return rc; |
1522 | } | 1523 | } |
diff --git a/fs/jfs/jfs_metapage.c b/fs/jfs/jfs_metapage.c index 5fbaeaadccd3..f28696f235c4 100644 --- a/fs/jfs/jfs_metapage.c +++ b/fs/jfs/jfs_metapage.c | |||
@@ -220,8 +220,8 @@ int __init metapage_init(void) | |||
220 | if (metapage_cache == NULL) | 220 | if (metapage_cache == NULL) |
221 | return -ENOMEM; | 221 | return -ENOMEM; |
222 | 222 | ||
223 | metapage_mempool = mempool_create(METAPOOL_MIN_PAGES, mempool_alloc_slab, | 223 | metapage_mempool = mempool_create_slab_pool(METAPOOL_MIN_PAGES, |
224 | mempool_free_slab, metapage_cache); | 224 | metapage_cache); |
225 | 225 | ||
226 | if (metapage_mempool == NULL) { | 226 | if (metapage_mempool == NULL) { |
227 | kmem_cache_destroy(metapage_cache); | 227 | kmem_cache_destroy(metapage_cache); |
@@ -578,14 +578,13 @@ static int metapage_releasepage(struct page *page, gfp_t gfp_mask) | |||
578 | return 0; | 578 | return 0; |
579 | } | 579 | } |
580 | 580 | ||
581 | static int metapage_invalidatepage(struct page *page, unsigned long offset) | 581 | static void metapage_invalidatepage(struct page *page, unsigned long offset) |
582 | { | 582 | { |
583 | BUG_ON(offset); | 583 | BUG_ON(offset); |
584 | 584 | ||
585 | if (PageWriteback(page)) | 585 | BUG_ON(PageWriteback(page)); |
586 | return 0; | ||
587 | 586 | ||
588 | return metapage_releasepage(page, 0); | 587 | metapage_releasepage(page, 0); |
589 | } | 588 | } |
590 | 589 | ||
591 | struct address_space_operations jfs_metapage_aops = { | 590 | struct address_space_operations jfs_metapage_aops = { |
diff --git a/fs/lockd/host.c b/fs/lockd/host.c index 112ebf8b8dfe..729ac427d359 100644 --- a/fs/lockd/host.c +++ b/fs/lockd/host.c | |||
@@ -16,6 +16,7 @@ | |||
16 | #include <linux/sunrpc/svc.h> | 16 | #include <linux/sunrpc/svc.h> |
17 | #include <linux/lockd/lockd.h> | 17 | #include <linux/lockd/lockd.h> |
18 | #include <linux/lockd/sm_inter.h> | 18 | #include <linux/lockd/sm_inter.h> |
19 | #include <linux/mutex.h> | ||
19 | 20 | ||
20 | 21 | ||
21 | #define NLMDBG_FACILITY NLMDBG_HOSTCACHE | 22 | #define NLMDBG_FACILITY NLMDBG_HOSTCACHE |
@@ -30,7 +31,7 @@ | |||
30 | static struct nlm_host * nlm_hosts[NLM_HOST_NRHASH]; | 31 | static struct nlm_host * nlm_hosts[NLM_HOST_NRHASH]; |
31 | static unsigned long next_gc; | 32 | static unsigned long next_gc; |
32 | static int nrhosts; | 33 | static int nrhosts; |
33 | static DECLARE_MUTEX(nlm_host_sema); | 34 | static DEFINE_MUTEX(nlm_host_mutex); |
34 | 35 | ||
35 | 36 | ||
36 | static void nlm_gc_hosts(void); | 37 | static void nlm_gc_hosts(void); |
@@ -71,7 +72,7 @@ nlm_lookup_host(int server, struct sockaddr_in *sin, | |||
71 | hash = NLM_ADDRHASH(sin->sin_addr.s_addr); | 72 | hash = NLM_ADDRHASH(sin->sin_addr.s_addr); |
72 | 73 | ||
73 | /* Lock hash table */ | 74 | /* Lock hash table */ |
74 | down(&nlm_host_sema); | 75 | mutex_lock(&nlm_host_mutex); |
75 | 76 | ||
76 | if (time_after_eq(jiffies, next_gc)) | 77 | if (time_after_eq(jiffies, next_gc)) |
77 | nlm_gc_hosts(); | 78 | nlm_gc_hosts(); |
@@ -91,7 +92,7 @@ nlm_lookup_host(int server, struct sockaddr_in *sin, | |||
91 | nlm_hosts[hash] = host; | 92 | nlm_hosts[hash] = host; |
92 | } | 93 | } |
93 | nlm_get_host(host); | 94 | nlm_get_host(host); |
94 | up(&nlm_host_sema); | 95 | mutex_unlock(&nlm_host_mutex); |
95 | return host; | 96 | return host; |
96 | } | 97 | } |
97 | } | 98 | } |
@@ -130,7 +131,7 @@ nlm_lookup_host(int server, struct sockaddr_in *sin, | |||
130 | next_gc = 0; | 131 | next_gc = 0; |
131 | 132 | ||
132 | nohost: | 133 | nohost: |
133 | up(&nlm_host_sema); | 134 | mutex_unlock(&nlm_host_mutex); |
134 | return host; | 135 | return host; |
135 | } | 136 | } |
136 | 137 | ||
@@ -141,19 +142,19 @@ nlm_find_client(void) | |||
141 | * and return it | 142 | * and return it |
142 | */ | 143 | */ |
143 | int hash; | 144 | int hash; |
144 | down(&nlm_host_sema); | 145 | mutex_lock(&nlm_host_mutex); |
145 | for (hash = 0 ; hash < NLM_HOST_NRHASH; hash++) { | 146 | for (hash = 0 ; hash < NLM_HOST_NRHASH; hash++) { |
146 | struct nlm_host *host, **hp; | 147 | struct nlm_host *host, **hp; |
147 | for (hp = &nlm_hosts[hash]; (host = *hp) != 0; hp = &host->h_next) { | 148 | for (hp = &nlm_hosts[hash]; (host = *hp) != 0; hp = &host->h_next) { |
148 | if (host->h_server && | 149 | if (host->h_server && |
149 | host->h_killed == 0) { | 150 | host->h_killed == 0) { |
150 | nlm_get_host(host); | 151 | nlm_get_host(host); |
151 | up(&nlm_host_sema); | 152 | mutex_unlock(&nlm_host_mutex); |
152 | return host; | 153 | return host; |
153 | } | 154 | } |
154 | } | 155 | } |
155 | } | 156 | } |
156 | up(&nlm_host_sema); | 157 | mutex_unlock(&nlm_host_mutex); |
157 | return NULL; | 158 | return NULL; |
158 | } | 159 | } |
159 | 160 | ||
@@ -265,7 +266,7 @@ nlm_shutdown_hosts(void) | |||
265 | int i; | 266 | int i; |
266 | 267 | ||
267 | dprintk("lockd: shutting down host module\n"); | 268 | dprintk("lockd: shutting down host module\n"); |
268 | down(&nlm_host_sema); | 269 | mutex_lock(&nlm_host_mutex); |
269 | 270 | ||
270 | /* First, make all hosts eligible for gc */ | 271 | /* First, make all hosts eligible for gc */ |
271 | dprintk("lockd: nuking all hosts...\n"); | 272 | dprintk("lockd: nuking all hosts...\n"); |
@@ -276,7 +277,7 @@ nlm_shutdown_hosts(void) | |||
276 | 277 | ||
277 | /* Then, perform a garbage collection pass */ | 278 | /* Then, perform a garbage collection pass */ |
278 | nlm_gc_hosts(); | 279 | nlm_gc_hosts(); |
279 | up(&nlm_host_sema); | 280 | mutex_unlock(&nlm_host_mutex); |
280 | 281 | ||
281 | /* complain if any hosts are left */ | 282 | /* complain if any hosts are left */ |
282 | if (nrhosts) { | 283 | if (nrhosts) { |
diff --git a/fs/lockd/svc.c b/fs/lockd/svc.c index 5e85bde6c123..fd56c8872f34 100644 --- a/fs/lockd/svc.c +++ b/fs/lockd/svc.c | |||
@@ -25,6 +25,7 @@ | |||
25 | #include <linux/slab.h> | 25 | #include <linux/slab.h> |
26 | #include <linux/smp.h> | 26 | #include <linux/smp.h> |
27 | #include <linux/smp_lock.h> | 27 | #include <linux/smp_lock.h> |
28 | #include <linux/mutex.h> | ||
28 | 29 | ||
29 | #include <linux/sunrpc/types.h> | 30 | #include <linux/sunrpc/types.h> |
30 | #include <linux/sunrpc/stats.h> | 31 | #include <linux/sunrpc/stats.h> |
@@ -43,13 +44,13 @@ static struct svc_program nlmsvc_program; | |||
43 | struct nlmsvc_binding * nlmsvc_ops; | 44 | struct nlmsvc_binding * nlmsvc_ops; |
44 | EXPORT_SYMBOL(nlmsvc_ops); | 45 | EXPORT_SYMBOL(nlmsvc_ops); |
45 | 46 | ||
46 | static DECLARE_MUTEX(nlmsvc_sema); | 47 | static DEFINE_MUTEX(nlmsvc_mutex); |
47 | static unsigned int nlmsvc_users; | 48 | static unsigned int nlmsvc_users; |
48 | static pid_t nlmsvc_pid; | 49 | static pid_t nlmsvc_pid; |
49 | int nlmsvc_grace_period; | 50 | int nlmsvc_grace_period; |
50 | unsigned long nlmsvc_timeout; | 51 | unsigned long nlmsvc_timeout; |
51 | 52 | ||
52 | static DECLARE_MUTEX_LOCKED(lockd_start); | 53 | static DECLARE_COMPLETION(lockd_start_done); |
53 | static DECLARE_WAIT_QUEUE_HEAD(lockd_exit); | 54 | static DECLARE_WAIT_QUEUE_HEAD(lockd_exit); |
54 | 55 | ||
55 | /* | 56 | /* |
@@ -112,7 +113,7 @@ lockd(struct svc_rqst *rqstp) | |||
112 | * Let our maker know we're running. | 113 | * Let our maker know we're running. |
113 | */ | 114 | */ |
114 | nlmsvc_pid = current->pid; | 115 | nlmsvc_pid = current->pid; |
115 | up(&lockd_start); | 116 | complete(&lockd_start_done); |
116 | 117 | ||
117 | daemonize("lockd"); | 118 | daemonize("lockd"); |
118 | 119 | ||
@@ -215,7 +216,7 @@ lockd_up(void) | |||
215 | struct svc_serv * serv; | 216 | struct svc_serv * serv; |
216 | int error = 0; | 217 | int error = 0; |
217 | 218 | ||
218 | down(&nlmsvc_sema); | 219 | mutex_lock(&nlmsvc_mutex); |
219 | /* | 220 | /* |
220 | * Unconditionally increment the user count ... this is | 221 | * Unconditionally increment the user count ... this is |
221 | * the number of clients who _want_ a lockd process. | 222 | * the number of clients who _want_ a lockd process. |
@@ -263,7 +264,7 @@ lockd_up(void) | |||
263 | "lockd_up: create thread failed, error=%d\n", error); | 264 | "lockd_up: create thread failed, error=%d\n", error); |
264 | goto destroy_and_out; | 265 | goto destroy_and_out; |
265 | } | 266 | } |
266 | down(&lockd_start); | 267 | wait_for_completion(&lockd_start_done); |
267 | 268 | ||
268 | /* | 269 | /* |
269 | * Note: svc_serv structures have an initial use count of 1, | 270 | * Note: svc_serv structures have an initial use count of 1, |
@@ -272,7 +273,7 @@ lockd_up(void) | |||
272 | destroy_and_out: | 273 | destroy_and_out: |
273 | svc_destroy(serv); | 274 | svc_destroy(serv); |
274 | out: | 275 | out: |
275 | up(&nlmsvc_sema); | 276 | mutex_unlock(&nlmsvc_mutex); |
276 | return error; | 277 | return error; |
277 | } | 278 | } |
278 | EXPORT_SYMBOL(lockd_up); | 279 | EXPORT_SYMBOL(lockd_up); |
@@ -285,7 +286,7 @@ lockd_down(void) | |||
285 | { | 286 | { |
286 | static int warned; | 287 | static int warned; |
287 | 288 | ||
288 | down(&nlmsvc_sema); | 289 | mutex_lock(&nlmsvc_mutex); |
289 | if (nlmsvc_users) { | 290 | if (nlmsvc_users) { |
290 | if (--nlmsvc_users) | 291 | if (--nlmsvc_users) |
291 | goto out; | 292 | goto out; |
@@ -315,7 +316,7 @@ lockd_down(void) | |||
315 | recalc_sigpending(); | 316 | recalc_sigpending(); |
316 | spin_unlock_irq(¤t->sighand->siglock); | 317 | spin_unlock_irq(¤t->sighand->siglock); |
317 | out: | 318 | out: |
318 | up(&nlmsvc_sema); | 319 | mutex_unlock(&nlmsvc_mutex); |
319 | } | 320 | } |
320 | EXPORT_SYMBOL(lockd_down); | 321 | EXPORT_SYMBOL(lockd_down); |
321 | 322 | ||
diff --git a/fs/lockd/svcsubs.c b/fs/lockd/svcsubs.c index c7a6e3ae44d6..a570e5c8a930 100644 --- a/fs/lockd/svcsubs.c +++ b/fs/lockd/svcsubs.c | |||
@@ -11,6 +11,7 @@ | |||
11 | #include <linux/string.h> | 11 | #include <linux/string.h> |
12 | #include <linux/time.h> | 12 | #include <linux/time.h> |
13 | #include <linux/in.h> | 13 | #include <linux/in.h> |
14 | #include <linux/mutex.h> | ||
14 | #include <linux/sunrpc/svc.h> | 15 | #include <linux/sunrpc/svc.h> |
15 | #include <linux/sunrpc/clnt.h> | 16 | #include <linux/sunrpc/clnt.h> |
16 | #include <linux/nfsd/nfsfh.h> | 17 | #include <linux/nfsd/nfsfh.h> |
@@ -28,7 +29,7 @@ | |||
28 | #define FILE_HASH_BITS 5 | 29 | #define FILE_HASH_BITS 5 |
29 | #define FILE_NRHASH (1<<FILE_HASH_BITS) | 30 | #define FILE_NRHASH (1<<FILE_HASH_BITS) |
30 | static struct nlm_file * nlm_files[FILE_NRHASH]; | 31 | static struct nlm_file * nlm_files[FILE_NRHASH]; |
31 | static DECLARE_MUTEX(nlm_file_sema); | 32 | static DEFINE_MUTEX(nlm_file_mutex); |
32 | 33 | ||
33 | #ifdef NFSD_DEBUG | 34 | #ifdef NFSD_DEBUG |
34 | static inline void nlm_debug_print_fh(char *msg, struct nfs_fh *f) | 35 | static inline void nlm_debug_print_fh(char *msg, struct nfs_fh *f) |
@@ -91,7 +92,7 @@ nlm_lookup_file(struct svc_rqst *rqstp, struct nlm_file **result, | |||
91 | hash = file_hash(f); | 92 | hash = file_hash(f); |
92 | 93 | ||
93 | /* Lock file table */ | 94 | /* Lock file table */ |
94 | down(&nlm_file_sema); | 95 | mutex_lock(&nlm_file_mutex); |
95 | 96 | ||
96 | for (file = nlm_files[hash]; file; file = file->f_next) | 97 | for (file = nlm_files[hash]; file; file = file->f_next) |
97 | if (!nfs_compare_fh(&file->f_handle, f)) | 98 | if (!nfs_compare_fh(&file->f_handle, f)) |
@@ -130,7 +131,7 @@ found: | |||
130 | nfserr = 0; | 131 | nfserr = 0; |
131 | 132 | ||
132 | out_unlock: | 133 | out_unlock: |
133 | up(&nlm_file_sema); | 134 | mutex_unlock(&nlm_file_mutex); |
134 | return nfserr; | 135 | return nfserr; |
135 | 136 | ||
136 | out_free: | 137 | out_free: |
@@ -239,14 +240,14 @@ nlm_traverse_files(struct nlm_host *host, int action) | |||
239 | struct nlm_file *file, **fp; | 240 | struct nlm_file *file, **fp; |
240 | int i; | 241 | int i; |
241 | 242 | ||
242 | down(&nlm_file_sema); | 243 | mutex_lock(&nlm_file_mutex); |
243 | for (i = 0; i < FILE_NRHASH; i++) { | 244 | for (i = 0; i < FILE_NRHASH; i++) { |
244 | fp = nlm_files + i; | 245 | fp = nlm_files + i; |
245 | while ((file = *fp) != NULL) { | 246 | while ((file = *fp) != NULL) { |
246 | /* Traverse locks, blocks and shares of this file | 247 | /* Traverse locks, blocks and shares of this file |
247 | * and update file->f_locks count */ | 248 | * and update file->f_locks count */ |
248 | if (nlm_inspect_file(host, file, action)) { | 249 | if (nlm_inspect_file(host, file, action)) { |
249 | up(&nlm_file_sema); | 250 | mutex_unlock(&nlm_file_mutex); |
250 | return 1; | 251 | return 1; |
251 | } | 252 | } |
252 | 253 | ||
@@ -261,7 +262,7 @@ nlm_traverse_files(struct nlm_host *host, int action) | |||
261 | } | 262 | } |
262 | } | 263 | } |
263 | } | 264 | } |
264 | up(&nlm_file_sema); | 265 | mutex_unlock(&nlm_file_mutex); |
265 | return 0; | 266 | return 0; |
266 | } | 267 | } |
267 | 268 | ||
@@ -281,7 +282,7 @@ nlm_release_file(struct nlm_file *file) | |||
281 | file, file->f_count); | 282 | file, file->f_count); |
282 | 283 | ||
283 | /* Lock file table */ | 284 | /* Lock file table */ |
284 | down(&nlm_file_sema); | 285 | mutex_lock(&nlm_file_mutex); |
285 | 286 | ||
286 | /* If there are no more locks etc, delete the file */ | 287 | /* If there are no more locks etc, delete the file */ |
287 | if(--file->f_count == 0) { | 288 | if(--file->f_count == 0) { |
@@ -289,7 +290,7 @@ nlm_release_file(struct nlm_file *file) | |||
289 | nlm_delete_file(file); | 290 | nlm_delete_file(file); |
290 | } | 291 | } |
291 | 292 | ||
292 | up(&nlm_file_sema); | 293 | mutex_unlock(&nlm_file_mutex); |
293 | } | 294 | } |
294 | 295 | ||
295 | /* | 296 | /* |
diff --git a/fs/locks.c b/fs/locks.c index 56f996e98bbc..4d9e71d43e7e 100644 --- a/fs/locks.c +++ b/fs/locks.c | |||
@@ -142,7 +142,7 @@ int lease_break_time = 45; | |||
142 | static LIST_HEAD(file_lock_list); | 142 | static LIST_HEAD(file_lock_list); |
143 | static LIST_HEAD(blocked_list); | 143 | static LIST_HEAD(blocked_list); |
144 | 144 | ||
145 | static kmem_cache_t *filelock_cache; | 145 | static kmem_cache_t *filelock_cache __read_mostly; |
146 | 146 | ||
147 | /* Allocate an empty lock structure. */ | 147 | /* Allocate an empty lock structure. */ |
148 | static struct file_lock *locks_alloc_lock(void) | 148 | static struct file_lock *locks_alloc_lock(void) |
@@ -533,12 +533,7 @@ static void locks_delete_block(struct file_lock *waiter) | |||
533 | static void locks_insert_block(struct file_lock *blocker, | 533 | static void locks_insert_block(struct file_lock *blocker, |
534 | struct file_lock *waiter) | 534 | struct file_lock *waiter) |
535 | { | 535 | { |
536 | if (!list_empty(&waiter->fl_block)) { | 536 | BUG_ON(!list_empty(&waiter->fl_block)); |
537 | printk(KERN_ERR "locks_insert_block: removing duplicated lock " | ||
538 | "(pid=%d %Ld-%Ld type=%d)\n", waiter->fl_pid, | ||
539 | waiter->fl_start, waiter->fl_end, waiter->fl_type); | ||
540 | __locks_delete_block(waiter); | ||
541 | } | ||
542 | list_add_tail(&waiter->fl_block, &blocker->fl_block); | 537 | list_add_tail(&waiter->fl_block, &blocker->fl_block); |
543 | waiter->fl_next = blocker; | 538 | waiter->fl_next = blocker; |
544 | if (IS_POSIX(blocker)) | 539 | if (IS_POSIX(blocker)) |
@@ -797,9 +792,7 @@ out: | |||
797 | return error; | 792 | return error; |
798 | } | 793 | } |
799 | 794 | ||
800 | EXPORT_SYMBOL(posix_lock_file); | 795 | static int __posix_lock_file_conf(struct inode *inode, struct file_lock *request, struct file_lock *conflock) |
801 | |||
802 | static int __posix_lock_file(struct inode *inode, struct file_lock *request) | ||
803 | { | 796 | { |
804 | struct file_lock *fl; | 797 | struct file_lock *fl; |
805 | struct file_lock *new_fl, *new_fl2; | 798 | struct file_lock *new_fl, *new_fl2; |
@@ -823,6 +816,8 @@ static int __posix_lock_file(struct inode *inode, struct file_lock *request) | |||
823 | continue; | 816 | continue; |
824 | if (!posix_locks_conflict(request, fl)) | 817 | if (!posix_locks_conflict(request, fl)) |
825 | continue; | 818 | continue; |
819 | if (conflock) | ||
820 | locks_copy_lock(conflock, fl); | ||
826 | error = -EAGAIN; | 821 | error = -EAGAIN; |
827 | if (!(request->fl_flags & FL_SLEEP)) | 822 | if (!(request->fl_flags & FL_SLEEP)) |
828 | goto out; | 823 | goto out; |
@@ -992,8 +987,24 @@ static int __posix_lock_file(struct inode *inode, struct file_lock *request) | |||
992 | */ | 987 | */ |
993 | int posix_lock_file(struct file *filp, struct file_lock *fl) | 988 | int posix_lock_file(struct file *filp, struct file_lock *fl) |
994 | { | 989 | { |
995 | return __posix_lock_file(filp->f_dentry->d_inode, fl); | 990 | return __posix_lock_file_conf(filp->f_dentry->d_inode, fl, NULL); |
991 | } | ||
992 | EXPORT_SYMBOL(posix_lock_file); | ||
993 | |||
994 | /** | ||
995 | * posix_lock_file_conf - Apply a POSIX-style lock to a file | ||
996 | * @filp: The file to apply the lock to | ||
997 | * @fl: The lock to be applied | ||
998 | * @conflock: Place to return a copy of the conflicting lock, if found. | ||
999 | * | ||
1000 | * Except for the conflock parameter, acts just like posix_lock_file. | ||
1001 | */ | ||
1002 | int posix_lock_file_conf(struct file *filp, struct file_lock *fl, | ||
1003 | struct file_lock *conflock) | ||
1004 | { | ||
1005 | return __posix_lock_file_conf(filp->f_dentry->d_inode, fl, conflock); | ||
996 | } | 1006 | } |
1007 | EXPORT_SYMBOL(posix_lock_file_conf); | ||
997 | 1008 | ||
998 | /** | 1009 | /** |
999 | * posix_lock_file_wait - Apply a POSIX-style lock to a file | 1010 | * posix_lock_file_wait - Apply a POSIX-style lock to a file |
@@ -1009,7 +1020,7 @@ int posix_lock_file_wait(struct file *filp, struct file_lock *fl) | |||
1009 | int error; | 1020 | int error; |
1010 | might_sleep (); | 1021 | might_sleep (); |
1011 | for (;;) { | 1022 | for (;;) { |
1012 | error = __posix_lock_file(filp->f_dentry->d_inode, fl); | 1023 | error = posix_lock_file(filp, fl); |
1013 | if ((error != -EAGAIN) || !(fl->fl_flags & FL_SLEEP)) | 1024 | if ((error != -EAGAIN) || !(fl->fl_flags & FL_SLEEP)) |
1014 | break; | 1025 | break; |
1015 | error = wait_event_interruptible(fl->fl_wait, !fl->fl_next); | 1026 | error = wait_event_interruptible(fl->fl_wait, !fl->fl_next); |
@@ -1081,7 +1092,7 @@ int locks_mandatory_area(int read_write, struct inode *inode, | |||
1081 | fl.fl_end = offset + count - 1; | 1092 | fl.fl_end = offset + count - 1; |
1082 | 1093 | ||
1083 | for (;;) { | 1094 | for (;;) { |
1084 | error = __posix_lock_file(inode, &fl); | 1095 | error = __posix_lock_file_conf(inode, &fl, NULL); |
1085 | if (error != -EAGAIN) | 1096 | if (error != -EAGAIN) |
1086 | break; | 1097 | break; |
1087 | if (!(fl.fl_flags & FL_SLEEP)) | 1098 | if (!(fl.fl_flags & FL_SLEEP)) |
@@ -1694,7 +1705,7 @@ again: | |||
1694 | error = filp->f_op->lock(filp, cmd, file_lock); | 1705 | error = filp->f_op->lock(filp, cmd, file_lock); |
1695 | else { | 1706 | else { |
1696 | for (;;) { | 1707 | for (;;) { |
1697 | error = __posix_lock_file(inode, file_lock); | 1708 | error = posix_lock_file(filp, file_lock); |
1698 | if ((error != -EAGAIN) || (cmd == F_SETLK)) | 1709 | if ((error != -EAGAIN) || (cmd == F_SETLK)) |
1699 | break; | 1710 | break; |
1700 | error = wait_event_interruptible(file_lock->fl_wait, | 1711 | error = wait_event_interruptible(file_lock->fl_wait, |
@@ -1837,7 +1848,7 @@ again: | |||
1837 | error = filp->f_op->lock(filp, cmd, file_lock); | 1848 | error = filp->f_op->lock(filp, cmd, file_lock); |
1838 | else { | 1849 | else { |
1839 | for (;;) { | 1850 | for (;;) { |
1840 | error = __posix_lock_file(inode, file_lock); | 1851 | error = posix_lock_file(filp, file_lock); |
1841 | if ((error != -EAGAIN) || (cmd == F_SETLK64)) | 1852 | if ((error != -EAGAIN) || (cmd == F_SETLK64)) |
1842 | break; | 1853 | break; |
1843 | error = wait_event_interruptible(file_lock->fl_wait, | 1854 | error = wait_event_interruptible(file_lock->fl_wait, |
diff --git a/fs/mpage.c b/fs/mpage.c index e431cb3878d6..9bf2eb30e6f4 100644 --- a/fs/mpage.c +++ b/fs/mpage.c | |||
@@ -163,9 +163,19 @@ map_buffer_to_page(struct page *page, struct buffer_head *bh, int page_block) | |||
163 | } while (page_bh != head); | 163 | } while (page_bh != head); |
164 | } | 164 | } |
165 | 165 | ||
166 | /* | ||
167 | * This is the worker routine which does all the work of mapping the disk | ||
168 | * blocks and constructs largest possible bios, submits them for IO if the | ||
169 | * blocks are not contiguous on the disk. | ||
170 | * | ||
171 | * We pass a buffer_head back and forth and use its buffer_mapped() flag to | ||
172 | * represent the validity of its disk mapping and to decide when to do the next | ||
173 | * get_block() call. | ||
174 | */ | ||
166 | static struct bio * | 175 | static struct bio * |
167 | do_mpage_readpage(struct bio *bio, struct page *page, unsigned nr_pages, | 176 | do_mpage_readpage(struct bio *bio, struct page *page, unsigned nr_pages, |
168 | sector_t *last_block_in_bio, get_block_t get_block) | 177 | sector_t *last_block_in_bio, struct buffer_head *map_bh, |
178 | unsigned long *first_logical_block, get_block_t get_block) | ||
169 | { | 179 | { |
170 | struct inode *inode = page->mapping->host; | 180 | struct inode *inode = page->mapping->host; |
171 | const unsigned blkbits = inode->i_blkbits; | 181 | const unsigned blkbits = inode->i_blkbits; |
@@ -173,33 +183,72 @@ do_mpage_readpage(struct bio *bio, struct page *page, unsigned nr_pages, | |||
173 | const unsigned blocksize = 1 << blkbits; | 183 | const unsigned blocksize = 1 << blkbits; |
174 | sector_t block_in_file; | 184 | sector_t block_in_file; |
175 | sector_t last_block; | 185 | sector_t last_block; |
186 | sector_t last_block_in_file; | ||
176 | sector_t blocks[MAX_BUF_PER_PAGE]; | 187 | sector_t blocks[MAX_BUF_PER_PAGE]; |
177 | unsigned page_block; | 188 | unsigned page_block; |
178 | unsigned first_hole = blocks_per_page; | 189 | unsigned first_hole = blocks_per_page; |
179 | struct block_device *bdev = NULL; | 190 | struct block_device *bdev = NULL; |
180 | struct buffer_head bh; | ||
181 | int length; | 191 | int length; |
182 | int fully_mapped = 1; | 192 | int fully_mapped = 1; |
193 | unsigned nblocks; | ||
194 | unsigned relative_block; | ||
183 | 195 | ||
184 | if (page_has_buffers(page)) | 196 | if (page_has_buffers(page)) |
185 | goto confused; | 197 | goto confused; |
186 | 198 | ||
187 | block_in_file = (sector_t)page->index << (PAGE_CACHE_SHIFT - blkbits); | 199 | block_in_file = (sector_t)page->index << (PAGE_CACHE_SHIFT - blkbits); |
188 | last_block = (i_size_read(inode) + blocksize - 1) >> blkbits; | 200 | last_block = block_in_file + nr_pages * blocks_per_page; |
201 | last_block_in_file = (i_size_read(inode) + blocksize - 1) >> blkbits; | ||
202 | if (last_block > last_block_in_file) | ||
203 | last_block = last_block_in_file; | ||
204 | page_block = 0; | ||
205 | |||
206 | /* | ||
207 | * Map blocks using the result from the previous get_blocks call first. | ||
208 | */ | ||
209 | nblocks = map_bh->b_size >> blkbits; | ||
210 | if (buffer_mapped(map_bh) && block_in_file > *first_logical_block && | ||
211 | block_in_file < (*first_logical_block + nblocks)) { | ||
212 | unsigned map_offset = block_in_file - *first_logical_block; | ||
213 | unsigned last = nblocks - map_offset; | ||
214 | |||
215 | for (relative_block = 0; ; relative_block++) { | ||
216 | if (relative_block == last) { | ||
217 | clear_buffer_mapped(map_bh); | ||
218 | break; | ||
219 | } | ||
220 | if (page_block == blocks_per_page) | ||
221 | break; | ||
222 | blocks[page_block] = map_bh->b_blocknr + map_offset + | ||
223 | relative_block; | ||
224 | page_block++; | ||
225 | block_in_file++; | ||
226 | } | ||
227 | bdev = map_bh->b_bdev; | ||
228 | } | ||
229 | |||
230 | /* | ||
231 | * Then do more get_blocks calls until we are done with this page. | ||
232 | */ | ||
233 | map_bh->b_page = page; | ||
234 | while (page_block < blocks_per_page) { | ||
235 | map_bh->b_state = 0; | ||
236 | map_bh->b_size = 0; | ||
189 | 237 | ||
190 | bh.b_page = page; | ||
191 | for (page_block = 0; page_block < blocks_per_page; | ||
192 | page_block++, block_in_file++) { | ||
193 | bh.b_state = 0; | ||
194 | if (block_in_file < last_block) { | 238 | if (block_in_file < last_block) { |
195 | if (get_block(inode, block_in_file, &bh, 0)) | 239 | map_bh->b_size = (last_block-block_in_file) << blkbits; |
240 | if (get_block(inode, block_in_file, map_bh, 0)) | ||
196 | goto confused; | 241 | goto confused; |
242 | *first_logical_block = block_in_file; | ||
197 | } | 243 | } |
198 | 244 | ||
199 | if (!buffer_mapped(&bh)) { | 245 | if (!buffer_mapped(map_bh)) { |
200 | fully_mapped = 0; | 246 | fully_mapped = 0; |
201 | if (first_hole == blocks_per_page) | 247 | if (first_hole == blocks_per_page) |
202 | first_hole = page_block; | 248 | first_hole = page_block; |
249 | page_block++; | ||
250 | block_in_file++; | ||
251 | clear_buffer_mapped(map_bh); | ||
203 | continue; | 252 | continue; |
204 | } | 253 | } |
205 | 254 | ||
@@ -209,8 +258,8 @@ do_mpage_readpage(struct bio *bio, struct page *page, unsigned nr_pages, | |||
209 | * we just collected from get_block into the page's buffers | 258 | * we just collected from get_block into the page's buffers |
210 | * so readpage doesn't have to repeat the get_block call | 259 | * so readpage doesn't have to repeat the get_block call |
211 | */ | 260 | */ |
212 | if (buffer_uptodate(&bh)) { | 261 | if (buffer_uptodate(map_bh)) { |
213 | map_buffer_to_page(page, &bh, page_block); | 262 | map_buffer_to_page(page, map_bh, page_block); |
214 | goto confused; | 263 | goto confused; |
215 | } | 264 | } |
216 | 265 | ||
@@ -218,10 +267,20 @@ do_mpage_readpage(struct bio *bio, struct page *page, unsigned nr_pages, | |||
218 | goto confused; /* hole -> non-hole */ | 267 | goto confused; /* hole -> non-hole */ |
219 | 268 | ||
220 | /* Contiguous blocks? */ | 269 | /* Contiguous blocks? */ |
221 | if (page_block && blocks[page_block-1] != bh.b_blocknr-1) | 270 | if (page_block && blocks[page_block-1] != map_bh->b_blocknr-1) |
222 | goto confused; | 271 | goto confused; |
223 | blocks[page_block] = bh.b_blocknr; | 272 | nblocks = map_bh->b_size >> blkbits; |
224 | bdev = bh.b_bdev; | 273 | for (relative_block = 0; ; relative_block++) { |
274 | if (relative_block == nblocks) { | ||
275 | clear_buffer_mapped(map_bh); | ||
276 | break; | ||
277 | } else if (page_block == blocks_per_page) | ||
278 | break; | ||
279 | blocks[page_block] = map_bh->b_blocknr+relative_block; | ||
280 | page_block++; | ||
281 | block_in_file++; | ||
282 | } | ||
283 | bdev = map_bh->b_bdev; | ||
225 | } | 284 | } |
226 | 285 | ||
227 | if (first_hole != blocks_per_page) { | 286 | if (first_hole != blocks_per_page) { |
@@ -260,7 +319,7 @@ alloc_new: | |||
260 | goto alloc_new; | 319 | goto alloc_new; |
261 | } | 320 | } |
262 | 321 | ||
263 | if (buffer_boundary(&bh) || (first_hole != blocks_per_page)) | 322 | if (buffer_boundary(map_bh) || (first_hole != blocks_per_page)) |
264 | bio = mpage_bio_submit(READ, bio); | 323 | bio = mpage_bio_submit(READ, bio); |
265 | else | 324 | else |
266 | *last_block_in_bio = blocks[blocks_per_page - 1]; | 325 | *last_block_in_bio = blocks[blocks_per_page - 1]; |
@@ -331,7 +390,10 @@ mpage_readpages(struct address_space *mapping, struct list_head *pages, | |||
331 | unsigned page_idx; | 390 | unsigned page_idx; |
332 | sector_t last_block_in_bio = 0; | 391 | sector_t last_block_in_bio = 0; |
333 | struct pagevec lru_pvec; | 392 | struct pagevec lru_pvec; |
393 | struct buffer_head map_bh; | ||
394 | unsigned long first_logical_block = 0; | ||
334 | 395 | ||
396 | clear_buffer_mapped(&map_bh); | ||
335 | pagevec_init(&lru_pvec, 0); | 397 | pagevec_init(&lru_pvec, 0); |
336 | for (page_idx = 0; page_idx < nr_pages; page_idx++) { | 398 | for (page_idx = 0; page_idx < nr_pages; page_idx++) { |
337 | struct page *page = list_entry(pages->prev, struct page, lru); | 399 | struct page *page = list_entry(pages->prev, struct page, lru); |
@@ -342,7 +404,9 @@ mpage_readpages(struct address_space *mapping, struct list_head *pages, | |||
342 | page->index, GFP_KERNEL)) { | 404 | page->index, GFP_KERNEL)) { |
343 | bio = do_mpage_readpage(bio, page, | 405 | bio = do_mpage_readpage(bio, page, |
344 | nr_pages - page_idx, | 406 | nr_pages - page_idx, |
345 | &last_block_in_bio, get_block); | 407 | &last_block_in_bio, &map_bh, |
408 | &first_logical_block, | ||
409 | get_block); | ||
346 | if (!pagevec_add(&lru_pvec, page)) | 410 | if (!pagevec_add(&lru_pvec, page)) |
347 | __pagevec_lru_add(&lru_pvec); | 411 | __pagevec_lru_add(&lru_pvec); |
348 | } else { | 412 | } else { |
@@ -364,9 +428,12 @@ int mpage_readpage(struct page *page, get_block_t get_block) | |||
364 | { | 428 | { |
365 | struct bio *bio = NULL; | 429 | struct bio *bio = NULL; |
366 | sector_t last_block_in_bio = 0; | 430 | sector_t last_block_in_bio = 0; |
431 | struct buffer_head map_bh; | ||
432 | unsigned long first_logical_block = 0; | ||
367 | 433 | ||
368 | bio = do_mpage_readpage(bio, page, 1, | 434 | clear_buffer_mapped(&map_bh); |
369 | &last_block_in_bio, get_block); | 435 | bio = do_mpage_readpage(bio, page, 1, &last_block_in_bio, |
436 | &map_bh, &first_logical_block, get_block); | ||
370 | if (bio) | 437 | if (bio) |
371 | mpage_bio_submit(READ, bio); | 438 | mpage_bio_submit(READ, bio); |
372 | return 0; | 439 | return 0; |
@@ -472,6 +539,7 @@ __mpage_writepage(struct bio *bio, struct page *page, get_block_t get_block, | |||
472 | for (page_block = 0; page_block < blocks_per_page; ) { | 539 | for (page_block = 0; page_block < blocks_per_page; ) { |
473 | 540 | ||
474 | map_bh.b_state = 0; | 541 | map_bh.b_state = 0; |
542 | map_bh.b_size = 1 << blkbits; | ||
475 | if (get_block(inode, block_in_file, &map_bh, 1)) | 543 | if (get_block(inode, block_in_file, &map_bh, 1)) |
476 | goto confused; | 544 | goto confused; |
477 | if (buffer_new(&map_bh)) | 545 | if (buffer_new(&map_bh)) |
diff --git a/fs/namespace.c b/fs/namespace.c index 71e75bcf4d28..e069a4c5e389 100644 --- a/fs/namespace.c +++ b/fs/namespace.c | |||
@@ -43,9 +43,9 @@ __cacheline_aligned_in_smp DEFINE_SPINLOCK(vfsmount_lock); | |||
43 | 43 | ||
44 | static int event; | 44 | static int event; |
45 | 45 | ||
46 | static struct list_head *mount_hashtable; | 46 | static struct list_head *mount_hashtable __read_mostly; |
47 | static int hash_mask __read_mostly, hash_bits __read_mostly; | 47 | static int hash_mask __read_mostly, hash_bits __read_mostly; |
48 | static kmem_cache_t *mnt_cache; | 48 | static kmem_cache_t *mnt_cache __read_mostly; |
49 | static struct rw_semaphore namespace_sem; | 49 | static struct rw_semaphore namespace_sem; |
50 | 50 | ||
51 | /* /sys/fs */ | 51 | /* /sys/fs */ |
diff --git a/fs/nfs/callback.c b/fs/nfs/callback.c index 99d2cfbce863..90c95adc8c1b 100644 --- a/fs/nfs/callback.c +++ b/fs/nfs/callback.c | |||
@@ -14,6 +14,7 @@ | |||
14 | #include <linux/sunrpc/svc.h> | 14 | #include <linux/sunrpc/svc.h> |
15 | #include <linux/sunrpc/svcsock.h> | 15 | #include <linux/sunrpc/svcsock.h> |
16 | #include <linux/nfs_fs.h> | 16 | #include <linux/nfs_fs.h> |
17 | #include <linux/mutex.h> | ||
17 | 18 | ||
18 | #include <net/inet_sock.h> | 19 | #include <net/inet_sock.h> |
19 | 20 | ||
@@ -31,7 +32,7 @@ struct nfs_callback_data { | |||
31 | }; | 32 | }; |
32 | 33 | ||
33 | static struct nfs_callback_data nfs_callback_info; | 34 | static struct nfs_callback_data nfs_callback_info; |
34 | static DECLARE_MUTEX(nfs_callback_sema); | 35 | static DEFINE_MUTEX(nfs_callback_mutex); |
35 | static struct svc_program nfs4_callback_program; | 36 | static struct svc_program nfs4_callback_program; |
36 | 37 | ||
37 | unsigned int nfs_callback_set_tcpport; | 38 | unsigned int nfs_callback_set_tcpport; |
@@ -95,7 +96,7 @@ int nfs_callback_up(void) | |||
95 | int ret = 0; | 96 | int ret = 0; |
96 | 97 | ||
97 | lock_kernel(); | 98 | lock_kernel(); |
98 | down(&nfs_callback_sema); | 99 | mutex_lock(&nfs_callback_mutex); |
99 | if (nfs_callback_info.users++ || nfs_callback_info.pid != 0) | 100 | if (nfs_callback_info.users++ || nfs_callback_info.pid != 0) |
100 | goto out; | 101 | goto out; |
101 | init_completion(&nfs_callback_info.started); | 102 | init_completion(&nfs_callback_info.started); |
@@ -121,7 +122,7 @@ int nfs_callback_up(void) | |||
121 | nfs_callback_info.serv = serv; | 122 | nfs_callback_info.serv = serv; |
122 | wait_for_completion(&nfs_callback_info.started); | 123 | wait_for_completion(&nfs_callback_info.started); |
123 | out: | 124 | out: |
124 | up(&nfs_callback_sema); | 125 | mutex_unlock(&nfs_callback_mutex); |
125 | unlock_kernel(); | 126 | unlock_kernel(); |
126 | return ret; | 127 | return ret; |
127 | out_destroy: | 128 | out_destroy: |
@@ -139,7 +140,7 @@ int nfs_callback_down(void) | |||
139 | int ret = 0; | 140 | int ret = 0; |
140 | 141 | ||
141 | lock_kernel(); | 142 | lock_kernel(); |
142 | down(&nfs_callback_sema); | 143 | mutex_lock(&nfs_callback_mutex); |
143 | nfs_callback_info.users--; | 144 | nfs_callback_info.users--; |
144 | do { | 145 | do { |
145 | if (nfs_callback_info.users != 0 || nfs_callback_info.pid == 0) | 146 | if (nfs_callback_info.users != 0 || nfs_callback_info.pid == 0) |
@@ -147,7 +148,7 @@ int nfs_callback_down(void) | |||
147 | if (kill_proc(nfs_callback_info.pid, SIGKILL, 1) < 0) | 148 | if (kill_proc(nfs_callback_info.pid, SIGKILL, 1) < 0) |
148 | break; | 149 | break; |
149 | } while (wait_for_completion_timeout(&nfs_callback_info.stopped, 5*HZ) == 0); | 150 | } while (wait_for_completion_timeout(&nfs_callback_info.stopped, 5*HZ) == 0); |
150 | up(&nfs_callback_sema); | 151 | mutex_unlock(&nfs_callback_mutex); |
151 | unlock_kernel(); | 152 | unlock_kernel(); |
152 | return ret; | 153 | return ret; |
153 | } | 154 | } |
diff --git a/fs/nfs/file.c b/fs/nfs/file.c index 5263b2864a44..dee49a0cb995 100644 --- a/fs/nfs/file.c +++ b/fs/nfs/file.c | |||
@@ -318,10 +318,9 @@ static int nfs_commit_write(struct file *file, struct page *page, unsigned offse | |||
318 | return status; | 318 | return status; |
319 | } | 319 | } |
320 | 320 | ||
321 | static int nfs_invalidate_page(struct page *page, unsigned long offset) | 321 | static void nfs_invalidate_page(struct page *page, unsigned long offset) |
322 | { | 322 | { |
323 | /* FIXME: we really should cancel any unstarted writes on this page */ | 323 | /* FIXME: we really should cancel any unstarted writes on this page */ |
324 | return 1; | ||
325 | } | 324 | } |
326 | 325 | ||
327 | static int nfs_release_page(struct page *page, gfp_t gfp) | 326 | static int nfs_release_page(struct page *page, gfp_t gfp) |
diff --git a/fs/nfs/read.c b/fs/nfs/read.c index 3961524fd4ab..624ca7146b6b 100644 --- a/fs/nfs/read.c +++ b/fs/nfs/read.c | |||
@@ -663,10 +663,8 @@ int nfs_init_readpagecache(void) | |||
663 | if (nfs_rdata_cachep == NULL) | 663 | if (nfs_rdata_cachep == NULL) |
664 | return -ENOMEM; | 664 | return -ENOMEM; |
665 | 665 | ||
666 | nfs_rdata_mempool = mempool_create(MIN_POOL_READ, | 666 | nfs_rdata_mempool = mempool_create_slab_pool(MIN_POOL_READ, |
667 | mempool_alloc_slab, | 667 | nfs_rdata_cachep); |
668 | mempool_free_slab, | ||
669 | nfs_rdata_cachep); | ||
670 | if (nfs_rdata_mempool == NULL) | 668 | if (nfs_rdata_mempool == NULL) |
671 | return -ENOMEM; | 669 | return -ENOMEM; |
672 | 670 | ||
diff --git a/fs/nfs/write.c b/fs/nfs/write.c index 3f5225404c97..4cfada2cc09f 100644 --- a/fs/nfs/write.c +++ b/fs/nfs/write.c | |||
@@ -1521,17 +1521,13 @@ int nfs_init_writepagecache(void) | |||
1521 | if (nfs_wdata_cachep == NULL) | 1521 | if (nfs_wdata_cachep == NULL) |
1522 | return -ENOMEM; | 1522 | return -ENOMEM; |
1523 | 1523 | ||
1524 | nfs_wdata_mempool = mempool_create(MIN_POOL_WRITE, | 1524 | nfs_wdata_mempool = mempool_create_slab_pool(MIN_POOL_WRITE, |
1525 | mempool_alloc_slab, | 1525 | nfs_wdata_cachep); |
1526 | mempool_free_slab, | ||
1527 | nfs_wdata_cachep); | ||
1528 | if (nfs_wdata_mempool == NULL) | 1526 | if (nfs_wdata_mempool == NULL) |
1529 | return -ENOMEM; | 1527 | return -ENOMEM; |
1530 | 1528 | ||
1531 | nfs_commit_mempool = mempool_create(MIN_POOL_COMMIT, | 1529 | nfs_commit_mempool = mempool_create_slab_pool(MIN_POOL_COMMIT, |
1532 | mempool_alloc_slab, | 1530 | nfs_wdata_cachep); |
1533 | mempool_free_slab, | ||
1534 | nfs_wdata_cachep); | ||
1535 | if (nfs_commit_mempool == NULL) | 1531 | if (nfs_commit_mempool == NULL) |
1536 | return -ENOMEM; | 1532 | return -ENOMEM; |
1537 | 1533 | ||
diff --git a/fs/nfsd/nfs4state.c b/fs/nfsd/nfs4state.c index f6ab762bea99..47ec112b266c 100644 --- a/fs/nfsd/nfs4state.c +++ b/fs/nfsd/nfs4state.c | |||
@@ -49,6 +49,7 @@ | |||
49 | #include <linux/nfsd/state.h> | 49 | #include <linux/nfsd/state.h> |
50 | #include <linux/nfsd/xdr4.h> | 50 | #include <linux/nfsd/xdr4.h> |
51 | #include <linux/namei.h> | 51 | #include <linux/namei.h> |
52 | #include <linux/mutex.h> | ||
52 | 53 | ||
53 | #define NFSDDBG_FACILITY NFSDDBG_PROC | 54 | #define NFSDDBG_FACILITY NFSDDBG_PROC |
54 | 55 | ||
@@ -77,11 +78,11 @@ static void nfs4_set_recdir(char *recdir); | |||
77 | 78 | ||
78 | /* Locking: | 79 | /* Locking: |
79 | * | 80 | * |
80 | * client_sema: | 81 | * client_mutex: |
81 | * protects clientid_hashtbl[], clientstr_hashtbl[], | 82 | * protects clientid_hashtbl[], clientstr_hashtbl[], |
82 | * unconfstr_hashtbl[], uncofid_hashtbl[]. | 83 | * unconfstr_hashtbl[], uncofid_hashtbl[]. |
83 | */ | 84 | */ |
84 | static DECLARE_MUTEX(client_sema); | 85 | static DEFINE_MUTEX(client_mutex); |
85 | 86 | ||
86 | static kmem_cache_t *stateowner_slab = NULL; | 87 | static kmem_cache_t *stateowner_slab = NULL; |
87 | static kmem_cache_t *file_slab = NULL; | 88 | static kmem_cache_t *file_slab = NULL; |
@@ -91,13 +92,13 @@ static kmem_cache_t *deleg_slab = NULL; | |||
91 | void | 92 | void |
92 | nfs4_lock_state(void) | 93 | nfs4_lock_state(void) |
93 | { | 94 | { |
94 | down(&client_sema); | 95 | mutex_lock(&client_mutex); |
95 | } | 96 | } |
96 | 97 | ||
97 | void | 98 | void |
98 | nfs4_unlock_state(void) | 99 | nfs4_unlock_state(void) |
99 | { | 100 | { |
100 | up(&client_sema); | 101 | mutex_unlock(&client_mutex); |
101 | } | 102 | } |
102 | 103 | ||
103 | static inline u32 | 104 | static inline u32 |
@@ -2749,37 +2750,31 @@ nfsd4_lock(struct svc_rqst *rqstp, struct svc_fh *current_fh, struct nfsd4_lock | |||
2749 | * Note: locks.c uses the BKL to protect the inode's lock list. | 2750 | * Note: locks.c uses the BKL to protect the inode's lock list. |
2750 | */ | 2751 | */ |
2751 | 2752 | ||
2752 | status = posix_lock_file(filp, &file_lock); | 2753 | /* XXX?: Just to divert the locks_release_private at the start of |
2753 | dprintk("NFSD: nfsd4_lock: posix_lock_file status %d\n",status); | 2754 | * locks_copy_lock: */ |
2755 | conflock.fl_ops = NULL; | ||
2756 | conflock.fl_lmops = NULL; | ||
2757 | status = posix_lock_file_conf(filp, &file_lock, &conflock); | ||
2758 | dprintk("NFSD: nfsd4_lock: posix_lock_file_conf status %d\n",status); | ||
2754 | switch (-status) { | 2759 | switch (-status) { |
2755 | case 0: /* success! */ | 2760 | case 0: /* success! */ |
2756 | update_stateid(&lock_stp->st_stateid); | 2761 | update_stateid(&lock_stp->st_stateid); |
2757 | memcpy(&lock->lk_resp_stateid, &lock_stp->st_stateid, | 2762 | memcpy(&lock->lk_resp_stateid, &lock_stp->st_stateid, |
2758 | sizeof(stateid_t)); | 2763 | sizeof(stateid_t)); |
2759 | goto out; | 2764 | break; |
2760 | case (EAGAIN): | 2765 | case (EAGAIN): /* conflock holds conflicting lock */ |
2761 | goto conflicting_lock; | 2766 | status = nfserr_denied; |
2767 | dprintk("NFSD: nfsd4_lock: conflicting lock found!\n"); | ||
2768 | nfs4_set_lock_denied(&conflock, &lock->lk_denied); | ||
2769 | break; | ||
2762 | case (EDEADLK): | 2770 | case (EDEADLK): |
2763 | status = nfserr_deadlock; | 2771 | status = nfserr_deadlock; |
2764 | dprintk("NFSD: nfsd4_lock: posix_lock_file() failed! status %d\n",status); | 2772 | break; |
2765 | goto out; | ||
2766 | default: | 2773 | default: |
2767 | status = nfserrno(status); | 2774 | dprintk("NFSD: nfsd4_lock: posix_lock_file_conf() failed! status %d\n",status); |
2768 | dprintk("NFSD: nfsd4_lock: posix_lock_file() failed! status %d\n",status); | 2775 | status = nfserr_resource; |
2769 | goto out; | 2776 | break; |
2770 | } | ||
2771 | |||
2772 | conflicting_lock: | ||
2773 | dprintk("NFSD: nfsd4_lock: conflicting lock found!\n"); | ||
2774 | status = nfserr_denied; | ||
2775 | /* XXX There is a race here. Future patch needed to provide | ||
2776 | * an atomic posix_lock_and_test_file | ||
2777 | */ | ||
2778 | if (!posix_test_lock(filp, &file_lock, &conflock)) { | ||
2779 | status = nfserr_serverfault; | ||
2780 | goto out; | ||
2781 | } | 2777 | } |
2782 | nfs4_set_lock_denied(&conflock, &lock->lk_denied); | ||
2783 | out: | 2778 | out: |
2784 | if (status && lock->lk_is_new && lock_sop) | 2779 | if (status && lock->lk_is_new && lock_sop) |
2785 | release_stateowner(lock_sop); | 2780 | release_stateowner(lock_sop); |
diff --git a/fs/ntfs/logfile.c b/fs/ntfs/logfile.c index 0fd70295cca6..4af2ad1193ec 100644 --- a/fs/ntfs/logfile.c +++ b/fs/ntfs/logfile.c | |||
@@ -515,10 +515,10 @@ BOOL ntfs_check_logfile(struct inode *log_vi, RESTART_PAGE_HEADER **rp) | |||
515 | log_page_size = PAGE_CACHE_SIZE; | 515 | log_page_size = PAGE_CACHE_SIZE; |
516 | log_page_mask = log_page_size - 1; | 516 | log_page_mask = log_page_size - 1; |
517 | /* | 517 | /* |
518 | * Use generic_ffs() instead of ffs() to enable the compiler to | 518 | * Use ntfs_ffs() instead of ffs() to enable the compiler to |
519 | * optimize log_page_size and log_page_bits into constants. | 519 | * optimize log_page_size and log_page_bits into constants. |
520 | */ | 520 | */ |
521 | log_page_bits = generic_ffs(log_page_size) - 1; | 521 | log_page_bits = ntfs_ffs(log_page_size) - 1; |
522 | size &= ~(s64)(log_page_size - 1); | 522 | size &= ~(s64)(log_page_size - 1); |
523 | /* | 523 | /* |
524 | * Ensure the log file is big enough to store at least the two restart | 524 | * Ensure the log file is big enough to store at least the two restart |
diff --git a/fs/ntfs/mft.c b/fs/ntfs/mft.c index 4e72bc7afdf9..2438c00ec0ce 100644 --- a/fs/ntfs/mft.c +++ b/fs/ntfs/mft.c | |||
@@ -2670,7 +2670,7 @@ mft_rec_already_initialized: | |||
2670 | ni->name_len = 4; | 2670 | ni->name_len = 4; |
2671 | 2671 | ||
2672 | ni->itype.index.block_size = 4096; | 2672 | ni->itype.index.block_size = 4096; |
2673 | ni->itype.index.block_size_bits = generic_ffs(4096) - 1; | 2673 | ni->itype.index.block_size_bits = ntfs_ffs(4096) - 1; |
2674 | ni->itype.index.collation_rule = COLLATION_FILE_NAME; | 2674 | ni->itype.index.collation_rule = COLLATION_FILE_NAME; |
2675 | if (vol->cluster_size <= ni->itype.index.block_size) { | 2675 | if (vol->cluster_size <= ni->itype.index.block_size) { |
2676 | ni->itype.index.vcn_size = vol->cluster_size; | 2676 | ni->itype.index.vcn_size = vol->cluster_size; |
diff --git a/fs/ntfs/ntfs.h b/fs/ntfs/ntfs.h index 0624c8ef4d9c..166142960b53 100644 --- a/fs/ntfs/ntfs.h +++ b/fs/ntfs/ntfs.h | |||
@@ -132,4 +132,33 @@ extern int ntfs_ucstonls(const ntfs_volume *vol, const ntfschar *ins, | |||
132 | /* From fs/ntfs/upcase.c */ | 132 | /* From fs/ntfs/upcase.c */ |
133 | extern ntfschar *generate_default_upcase(void); | 133 | extern ntfschar *generate_default_upcase(void); |
134 | 134 | ||
135 | static inline int ntfs_ffs(int x) | ||
136 | { | ||
137 | int r = 1; | ||
138 | |||
139 | if (!x) | ||
140 | return 0; | ||
141 | if (!(x & 0xffff)) { | ||
142 | x >>= 16; | ||
143 | r += 16; | ||
144 | } | ||
145 | if (!(x & 0xff)) { | ||
146 | x >>= 8; | ||
147 | r += 8; | ||
148 | } | ||
149 | if (!(x & 0xf)) { | ||
150 | x >>= 4; | ||
151 | r += 4; | ||
152 | } | ||
153 | if (!(x & 3)) { | ||
154 | x >>= 2; | ||
155 | r += 2; | ||
156 | } | ||
157 | if (!(x & 1)) { | ||
158 | x >>= 1; | ||
159 | r += 1; | ||
160 | } | ||
161 | return r; | ||
162 | } | ||
163 | |||
135 | #endif /* _LINUX_NTFS_H */ | 164 | #endif /* _LINUX_NTFS_H */ |
diff --git a/fs/ocfs2/aops.c b/fs/ocfs2/aops.c index bf931ba1d364..0d858d0b25be 100644 --- a/fs/ocfs2/aops.c +++ b/fs/ocfs2/aops.c | |||
@@ -540,7 +540,6 @@ bail: | |||
540 | * fs_count, map_bh, dio->rw == WRITE); | 540 | * fs_count, map_bh, dio->rw == WRITE); |
541 | */ | 541 | */ |
542 | static int ocfs2_direct_IO_get_blocks(struct inode *inode, sector_t iblock, | 542 | static int ocfs2_direct_IO_get_blocks(struct inode *inode, sector_t iblock, |
543 | unsigned long max_blocks, | ||
544 | struct buffer_head *bh_result, int create) | 543 | struct buffer_head *bh_result, int create) |
545 | { | 544 | { |
546 | int ret; | 545 | int ret; |
@@ -548,6 +547,7 @@ static int ocfs2_direct_IO_get_blocks(struct inode *inode, sector_t iblock, | |||
548 | u64 p_blkno; | 547 | u64 p_blkno; |
549 | int contig_blocks; | 548 | int contig_blocks; |
550 | unsigned char blocksize_bits; | 549 | unsigned char blocksize_bits; |
550 | unsigned long max_blocks = bh_result->b_size >> inode->i_blkbits; | ||
551 | 551 | ||
552 | if (!inode || !bh_result) { | 552 | if (!inode || !bh_result) { |
553 | mlog(ML_ERROR, "inode or bh_result is null\n"); | 553 | mlog(ML_ERROR, "inode or bh_result is null\n"); |
diff --git a/fs/ocfs2/journal.c b/fs/ocfs2/journal.c index ae3440ca083c..6a610ae53583 100644 --- a/fs/ocfs2/journal.c +++ b/fs/ocfs2/journal.c | |||
@@ -377,7 +377,7 @@ int ocfs2_journal_access(struct ocfs2_journal_handle *handle, | |||
377 | BUG_ON(!bh); | 377 | BUG_ON(!bh); |
378 | BUG_ON(!(handle->flags & OCFS2_HANDLE_STARTED)); | 378 | BUG_ON(!(handle->flags & OCFS2_HANDLE_STARTED)); |
379 | 379 | ||
380 | mlog_entry("bh->b_blocknr=%llu, type=%d (\"%s\"), bh->b_size = %hu\n", | 380 | mlog_entry("bh->b_blocknr=%llu, type=%d (\"%s\"), bh->b_size = %zu\n", |
381 | (unsigned long long)bh->b_blocknr, type, | 381 | (unsigned long long)bh->b_blocknr, type, |
382 | (type == OCFS2_JOURNAL_ACCESS_CREATE) ? | 382 | (type == OCFS2_JOURNAL_ACCESS_CREATE) ? |
383 | "OCFS2_JOURNAL_ACCESS_CREATE" : | 383 | "OCFS2_JOURNAL_ACCESS_CREATE" : |
@@ -582,7 +582,8 @@ int ocfs2_journal_init(struct ocfs2_journal *journal, int *dirty) | |||
582 | } | 582 | } |
583 | 583 | ||
584 | mlog(0, "inode->i_size = %lld\n", inode->i_size); | 584 | mlog(0, "inode->i_size = %lld\n", inode->i_size); |
585 | mlog(0, "inode->i_blocks = %lu\n", inode->i_blocks); | 585 | mlog(0, "inode->i_blocks = %llu\n", |
586 | (unsigned long long)inode->i_blocks); | ||
586 | mlog(0, "inode->ip_clusters = %u\n", OCFS2_I(inode)->ip_clusters); | 587 | mlog(0, "inode->ip_clusters = %u\n", OCFS2_I(inode)->ip_clusters); |
587 | 588 | ||
588 | /* call the kernels journal init function now */ | 589 | /* call the kernels journal init function now */ |
@@ -850,8 +851,9 @@ static int ocfs2_force_read_journal(struct inode *inode) | |||
850 | 851 | ||
851 | memset(bhs, 0, sizeof(struct buffer_head *) * CONCURRENT_JOURNAL_FILL); | 852 | memset(bhs, 0, sizeof(struct buffer_head *) * CONCURRENT_JOURNAL_FILL); |
852 | 853 | ||
853 | mlog(0, "Force reading %lu blocks\n", | 854 | mlog(0, "Force reading %llu blocks\n", |
854 | (inode->i_blocks >> (inode->i_sb->s_blocksize_bits - 9))); | 855 | (unsigned long long)(inode->i_blocks >> |
856 | (inode->i_sb->s_blocksize_bits - 9))); | ||
855 | 857 | ||
856 | v_blkno = 0; | 858 | v_blkno = 0; |
857 | while (v_blkno < | 859 | while (v_blkno < |
diff --git a/fs/ocfs2/namei.c b/fs/ocfs2/namei.c index 274f61d0cda9..0673862c8bdd 100644 --- a/fs/ocfs2/namei.c +++ b/fs/ocfs2/namei.c | |||
@@ -1444,8 +1444,9 @@ static int ocfs2_create_symlink_data(struct ocfs2_super *osb, | |||
1444 | * write i_size + 1 bytes. */ | 1444 | * write i_size + 1 bytes. */ |
1445 | blocks = (bytes_left + sb->s_blocksize - 1) >> sb->s_blocksize_bits; | 1445 | blocks = (bytes_left + sb->s_blocksize - 1) >> sb->s_blocksize_bits; |
1446 | 1446 | ||
1447 | mlog_entry("i_blocks = %lu, i_size = %llu, blocks = %d\n", | 1447 | mlog_entry("i_blocks = %llu, i_size = %llu, blocks = %d\n", |
1448 | inode->i_blocks, i_size_read(inode), blocks); | 1448 | (unsigned long long)inode->i_blocks, |
1449 | i_size_read(inode), blocks); | ||
1449 | 1450 | ||
1450 | /* Sanity check -- make sure we're going to fit. */ | 1451 | /* Sanity check -- make sure we're going to fit. */ |
1451 | if (bytes_left > | 1452 | if (bytes_left > |
diff --git a/fs/partitions/devfs.c b/fs/partitions/devfs.c index 87f50444fd39..3f0a780c9cec 100644 --- a/fs/partitions/devfs.c +++ b/fs/partitions/devfs.c | |||
@@ -6,7 +6,7 @@ | |||
6 | #include <linux/vmalloc.h> | 6 | #include <linux/vmalloc.h> |
7 | #include <linux/genhd.h> | 7 | #include <linux/genhd.h> |
8 | #include <linux/bitops.h> | 8 | #include <linux/bitops.h> |
9 | #include <asm/semaphore.h> | 9 | #include <linux/mutex.h> |
10 | 10 | ||
11 | 11 | ||
12 | struct unique_numspace { | 12 | struct unique_numspace { |
@@ -16,7 +16,7 @@ struct unique_numspace { | |||
16 | struct semaphore mutex; | 16 | struct semaphore mutex; |
17 | }; | 17 | }; |
18 | 18 | ||
19 | static DECLARE_MUTEX(numspace_mutex); | 19 | static DEFINE_MUTEX(numspace_mutex); |
20 | 20 | ||
21 | static int expand_numspace(struct unique_numspace *s) | 21 | static int expand_numspace(struct unique_numspace *s) |
22 | { | 22 | { |
@@ -48,7 +48,7 @@ static int alloc_unique_number(struct unique_numspace *s) | |||
48 | { | 48 | { |
49 | int rval = 0; | 49 | int rval = 0; |
50 | 50 | ||
51 | down(&numspace_mutex); | 51 | mutex_lock(&numspace_mutex); |
52 | if (s->num_free < 1) | 52 | if (s->num_free < 1) |
53 | rval = expand_numspace(s); | 53 | rval = expand_numspace(s); |
54 | if (!rval) { | 54 | if (!rval) { |
@@ -56,7 +56,7 @@ static int alloc_unique_number(struct unique_numspace *s) | |||
56 | --s->num_free; | 56 | --s->num_free; |
57 | __set_bit(rval, s->bits); | 57 | __set_bit(rval, s->bits); |
58 | } | 58 | } |
59 | up(&numspace_mutex); | 59 | mutex_unlock(&numspace_mutex); |
60 | 60 | ||
61 | return rval; | 61 | return rval; |
62 | } | 62 | } |
@@ -66,11 +66,11 @@ static void dealloc_unique_number(struct unique_numspace *s, int number) | |||
66 | int old_val; | 66 | int old_val; |
67 | 67 | ||
68 | if (number >= 0) { | 68 | if (number >= 0) { |
69 | down(&numspace_mutex); | 69 | mutex_lock(&numspace_mutex); |
70 | old_val = __test_and_clear_bit(number, s->bits); | 70 | old_val = __test_and_clear_bit(number, s->bits); |
71 | if (old_val) | 71 | if (old_val) |
72 | ++s->num_free; | 72 | ++s->num_free; |
73 | up(&numspace_mutex); | 73 | mutex_unlock(&numspace_mutex); |
74 | } | 74 | } |
75 | } | 75 | } |
76 | 76 | ||
@@ -675,7 +675,7 @@ fail_page: | |||
675 | return NULL; | 675 | return NULL; |
676 | } | 676 | } |
677 | 677 | ||
678 | static struct vfsmount *pipe_mnt; | 678 | static struct vfsmount *pipe_mnt __read_mostly; |
679 | static int pipefs_delete_dentry(struct dentry *dentry) | 679 | static int pipefs_delete_dentry(struct dentry *dentry) |
680 | { | 680 | { |
681 | return 1; | 681 | return 1; |
diff --git a/fs/proc/array.c b/fs/proc/array.c index 7eb1bd7f800c..7a76ad570230 100644 --- a/fs/proc/array.c +++ b/fs/proc/array.c | |||
@@ -330,7 +330,6 @@ static int do_task_stat(struct task_struct *task, char * buffer, int whole) | |||
330 | unsigned long min_flt = 0, maj_flt = 0; | 330 | unsigned long min_flt = 0, maj_flt = 0; |
331 | cputime_t cutime, cstime, utime, stime; | 331 | cputime_t cutime, cstime, utime, stime; |
332 | unsigned long rsslim = 0; | 332 | unsigned long rsslim = 0; |
333 | DEFINE_KTIME(it_real_value); | ||
334 | struct task_struct *t; | 333 | struct task_struct *t; |
335 | char tcomm[sizeof(task->comm)]; | 334 | char tcomm[sizeof(task->comm)]; |
336 | 335 | ||
@@ -386,7 +385,6 @@ static int do_task_stat(struct task_struct *task, char * buffer, int whole) | |||
386 | utime = cputime_add(utime, task->signal->utime); | 385 | utime = cputime_add(utime, task->signal->utime); |
387 | stime = cputime_add(stime, task->signal->stime); | 386 | stime = cputime_add(stime, task->signal->stime); |
388 | } | 387 | } |
389 | it_real_value = task->signal->real_timer.expires; | ||
390 | } | 388 | } |
391 | ppid = pid_alive(task) ? task->group_leader->real_parent->tgid : 0; | 389 | ppid = pid_alive(task) ? task->group_leader->real_parent->tgid : 0; |
392 | read_unlock(&tasklist_lock); | 390 | read_unlock(&tasklist_lock); |
@@ -413,7 +411,7 @@ static int do_task_stat(struct task_struct *task, char * buffer, int whole) | |||
413 | start_time = nsec_to_clock_t(start_time); | 411 | start_time = nsec_to_clock_t(start_time); |
414 | 412 | ||
415 | res = sprintf(buffer,"%d (%s) %c %d %d %d %d %d %lu %lu \ | 413 | res = sprintf(buffer,"%d (%s) %c %d %d %d %d %d %lu %lu \ |
416 | %lu %lu %lu %lu %lu %ld %ld %ld %ld %d %ld %llu %lu %ld %lu %lu %lu %lu %lu \ | 414 | %lu %lu %lu %lu %lu %ld %ld %ld %ld %d 0 %llu %lu %ld %lu %lu %lu %lu %lu \ |
417 | %lu %lu %lu %lu %lu %lu %lu %lu %d %d %lu %lu\n", | 415 | %lu %lu %lu %lu %lu %lu %lu %lu %d %d %lu %lu\n", |
418 | task->pid, | 416 | task->pid, |
419 | tcomm, | 417 | tcomm, |
@@ -435,7 +433,6 @@ static int do_task_stat(struct task_struct *task, char * buffer, int whole) | |||
435 | priority, | 433 | priority, |
436 | nice, | 434 | nice, |
437 | num_threads, | 435 | num_threads, |
438 | (long) ktime_to_clock_t(it_real_value), | ||
439 | start_time, | 436 | start_time, |
440 | vsize, | 437 | vsize, |
441 | mm ? get_mm_rss(mm) : 0, | 438 | mm ? get_mm_rss(mm) : 0, |
diff --git a/fs/proc/generic.c b/fs/proc/generic.c index 20e5c4509a43..47b7a20d45eb 100644 --- a/fs/proc/generic.c +++ b/fs/proc/generic.c | |||
@@ -19,6 +19,7 @@ | |||
19 | #include <linux/idr.h> | 19 | #include <linux/idr.h> |
20 | #include <linux/namei.h> | 20 | #include <linux/namei.h> |
21 | #include <linux/bitops.h> | 21 | #include <linux/bitops.h> |
22 | #include <linux/spinlock.h> | ||
22 | #include <asm/uaccess.h> | 23 | #include <asm/uaccess.h> |
23 | 24 | ||
24 | #include "internal.h" | 25 | #include "internal.h" |
@@ -29,6 +30,8 @@ static ssize_t proc_file_write(struct file *file, const char __user *buffer, | |||
29 | size_t count, loff_t *ppos); | 30 | size_t count, loff_t *ppos); |
30 | static loff_t proc_file_lseek(struct file *, loff_t, int); | 31 | static loff_t proc_file_lseek(struct file *, loff_t, int); |
31 | 32 | ||
33 | DEFINE_SPINLOCK(proc_subdir_lock); | ||
34 | |||
32 | int proc_match(int len, const char *name, struct proc_dir_entry *de) | 35 | int proc_match(int len, const char *name, struct proc_dir_entry *de) |
33 | { | 36 | { |
34 | if (de->namelen != len) | 37 | if (de->namelen != len) |
@@ -277,7 +280,9 @@ static int xlate_proc_name(const char *name, | |||
277 | const char *cp = name, *next; | 280 | const char *cp = name, *next; |
278 | struct proc_dir_entry *de; | 281 | struct proc_dir_entry *de; |
279 | int len; | 282 | int len; |
283 | int rtn = 0; | ||
280 | 284 | ||
285 | spin_lock(&proc_subdir_lock); | ||
281 | de = &proc_root; | 286 | de = &proc_root; |
282 | while (1) { | 287 | while (1) { |
283 | next = strchr(cp, '/'); | 288 | next = strchr(cp, '/'); |
@@ -289,13 +294,17 @@ static int xlate_proc_name(const char *name, | |||
289 | if (proc_match(len, cp, de)) | 294 | if (proc_match(len, cp, de)) |
290 | break; | 295 | break; |
291 | } | 296 | } |
292 | if (!de) | 297 | if (!de) { |
293 | return -ENOENT; | 298 | rtn = -ENOENT; |
299 | goto out; | ||
300 | } | ||
294 | cp += len + 1; | 301 | cp += len + 1; |
295 | } | 302 | } |
296 | *residual = cp; | 303 | *residual = cp; |
297 | *ret = de; | 304 | *ret = de; |
298 | return 0; | 305 | out: |
306 | spin_unlock(&proc_subdir_lock); | ||
307 | return rtn; | ||
299 | } | 308 | } |
300 | 309 | ||
301 | static DEFINE_IDR(proc_inum_idr); | 310 | static DEFINE_IDR(proc_inum_idr); |
@@ -380,6 +389,7 @@ struct dentry *proc_lookup(struct inode * dir, struct dentry *dentry, struct nam | |||
380 | int error = -ENOENT; | 389 | int error = -ENOENT; |
381 | 390 | ||
382 | lock_kernel(); | 391 | lock_kernel(); |
392 | spin_lock(&proc_subdir_lock); | ||
383 | de = PDE(dir); | 393 | de = PDE(dir); |
384 | if (de) { | 394 | if (de) { |
385 | for (de = de->subdir; de ; de = de->next) { | 395 | for (de = de->subdir; de ; de = de->next) { |
@@ -388,12 +398,15 @@ struct dentry *proc_lookup(struct inode * dir, struct dentry *dentry, struct nam | |||
388 | if (!memcmp(dentry->d_name.name, de->name, de->namelen)) { | 398 | if (!memcmp(dentry->d_name.name, de->name, de->namelen)) { |
389 | unsigned int ino = de->low_ino; | 399 | unsigned int ino = de->low_ino; |
390 | 400 | ||
401 | spin_unlock(&proc_subdir_lock); | ||
391 | error = -EINVAL; | 402 | error = -EINVAL; |
392 | inode = proc_get_inode(dir->i_sb, ino, de); | 403 | inode = proc_get_inode(dir->i_sb, ino, de); |
404 | spin_lock(&proc_subdir_lock); | ||
393 | break; | 405 | break; |
394 | } | 406 | } |
395 | } | 407 | } |
396 | } | 408 | } |
409 | spin_unlock(&proc_subdir_lock); | ||
397 | unlock_kernel(); | 410 | unlock_kernel(); |
398 | 411 | ||
399 | if (inode) { | 412 | if (inode) { |
@@ -447,11 +460,13 @@ int proc_readdir(struct file * filp, | |||
447 | filp->f_pos++; | 460 | filp->f_pos++; |
448 | /* fall through */ | 461 | /* fall through */ |
449 | default: | 462 | default: |
463 | spin_lock(&proc_subdir_lock); | ||
450 | de = de->subdir; | 464 | de = de->subdir; |
451 | i -= 2; | 465 | i -= 2; |
452 | for (;;) { | 466 | for (;;) { |
453 | if (!de) { | 467 | if (!de) { |
454 | ret = 1; | 468 | ret = 1; |
469 | spin_unlock(&proc_subdir_lock); | ||
455 | goto out; | 470 | goto out; |
456 | } | 471 | } |
457 | if (!i) | 472 | if (!i) |
@@ -461,12 +476,16 @@ int proc_readdir(struct file * filp, | |||
461 | } | 476 | } |
462 | 477 | ||
463 | do { | 478 | do { |
479 | /* filldir passes info to user space */ | ||
480 | spin_unlock(&proc_subdir_lock); | ||
464 | if (filldir(dirent, de->name, de->namelen, filp->f_pos, | 481 | if (filldir(dirent, de->name, de->namelen, filp->f_pos, |
465 | de->low_ino, de->mode >> 12) < 0) | 482 | de->low_ino, de->mode >> 12) < 0) |
466 | goto out; | 483 | goto out; |
484 | spin_lock(&proc_subdir_lock); | ||
467 | filp->f_pos++; | 485 | filp->f_pos++; |
468 | de = de->next; | 486 | de = de->next; |
469 | } while (de); | 487 | } while (de); |
488 | spin_unlock(&proc_subdir_lock); | ||
470 | } | 489 | } |
471 | ret = 1; | 490 | ret = 1; |
472 | out: unlock_kernel(); | 491 | out: unlock_kernel(); |
@@ -500,9 +519,13 @@ static int proc_register(struct proc_dir_entry * dir, struct proc_dir_entry * dp | |||
500 | if (i == 0) | 519 | if (i == 0) |
501 | return -EAGAIN; | 520 | return -EAGAIN; |
502 | dp->low_ino = i; | 521 | dp->low_ino = i; |
522 | |||
523 | spin_lock(&proc_subdir_lock); | ||
503 | dp->next = dir->subdir; | 524 | dp->next = dir->subdir; |
504 | dp->parent = dir; | 525 | dp->parent = dir; |
505 | dir->subdir = dp; | 526 | dir->subdir = dp; |
527 | spin_unlock(&proc_subdir_lock); | ||
528 | |||
506 | if (S_ISDIR(dp->mode)) { | 529 | if (S_ISDIR(dp->mode)) { |
507 | if (dp->proc_iops == NULL) { | 530 | if (dp->proc_iops == NULL) { |
508 | dp->proc_fops = &proc_dir_operations; | 531 | dp->proc_fops = &proc_dir_operations; |
@@ -694,6 +717,8 @@ void remove_proc_entry(const char *name, struct proc_dir_entry *parent) | |||
694 | if (!parent && xlate_proc_name(name, &parent, &fn) != 0) | 717 | if (!parent && xlate_proc_name(name, &parent, &fn) != 0) |
695 | goto out; | 718 | goto out; |
696 | len = strlen(fn); | 719 | len = strlen(fn); |
720 | |||
721 | spin_lock(&proc_subdir_lock); | ||
697 | for (p = &parent->subdir; *p; p=&(*p)->next ) { | 722 | for (p = &parent->subdir; *p; p=&(*p)->next ) { |
698 | if (!proc_match(len, fn, *p)) | 723 | if (!proc_match(len, fn, *p)) |
699 | continue; | 724 | continue; |
@@ -714,6 +739,7 @@ void remove_proc_entry(const char *name, struct proc_dir_entry *parent) | |||
714 | } | 739 | } |
715 | break; | 740 | break; |
716 | } | 741 | } |
742 | spin_unlock(&proc_subdir_lock); | ||
717 | out: | 743 | out: |
718 | return; | 744 | return; |
719 | } | 745 | } |
diff --git a/fs/proc/proc_devtree.c b/fs/proc/proc_devtree.c index 9bdd077d6f55..596b4b4f1cc8 100644 --- a/fs/proc/proc_devtree.c +++ b/fs/proc/proc_devtree.c | |||
@@ -136,9 +136,11 @@ void proc_device_tree_add_node(struct device_node *np, | |||
136 | * properties are quite unimportant for us though, thus we | 136 | * properties are quite unimportant for us though, thus we |
137 | * simply "skip" them here, but we do have to check. | 137 | * simply "skip" them here, but we do have to check. |
138 | */ | 138 | */ |
139 | spin_lock(&proc_subdir_lock); | ||
139 | for (ent = de->subdir; ent != NULL; ent = ent->next) | 140 | for (ent = de->subdir; ent != NULL; ent = ent->next) |
140 | if (!strcmp(ent->name, pp->name)) | 141 | if (!strcmp(ent->name, pp->name)) |
141 | break; | 142 | break; |
143 | spin_unlock(&proc_subdir_lock); | ||
142 | if (ent != NULL) { | 144 | if (ent != NULL) { |
143 | printk(KERN_WARNING "device-tree: property \"%s\" name" | 145 | printk(KERN_WARNING "device-tree: property \"%s\" name" |
144 | " conflicts with node in %s\n", pp->name, | 146 | " conflicts with node in %s\n", pp->name, |
diff --git a/fs/reiserfs/inode.c b/fs/reiserfs/inode.c index d60f6238c66a..9857e50f85e7 100644 --- a/fs/reiserfs/inode.c +++ b/fs/reiserfs/inode.c | |||
@@ -466,7 +466,6 @@ static int reiserfs_get_block_create_0(struct inode *inode, sector_t block, | |||
466 | direct_IO request. */ | 466 | direct_IO request. */ |
467 | static int reiserfs_get_blocks_direct_io(struct inode *inode, | 467 | static int reiserfs_get_blocks_direct_io(struct inode *inode, |
468 | sector_t iblock, | 468 | sector_t iblock, |
469 | unsigned long max_blocks, | ||
470 | struct buffer_head *bh_result, | 469 | struct buffer_head *bh_result, |
471 | int create) | 470 | int create) |
472 | { | 471 | { |
@@ -2793,7 +2792,7 @@ static int invalidatepage_can_drop(struct inode *inode, struct buffer_head *bh) | |||
2793 | } | 2792 | } |
2794 | 2793 | ||
2795 | /* clm -- taken from fs/buffer.c:block_invalidate_page */ | 2794 | /* clm -- taken from fs/buffer.c:block_invalidate_page */ |
2796 | static int reiserfs_invalidatepage(struct page *page, unsigned long offset) | 2795 | static void reiserfs_invalidatepage(struct page *page, unsigned long offset) |
2797 | { | 2796 | { |
2798 | struct buffer_head *head, *bh, *next; | 2797 | struct buffer_head *head, *bh, *next; |
2799 | struct inode *inode = page->mapping->host; | 2798 | struct inode *inode = page->mapping->host; |
@@ -2832,10 +2831,12 @@ static int reiserfs_invalidatepage(struct page *page, unsigned long offset) | |||
2832 | * The get_block cached value has been unconditionally invalidated, | 2831 | * The get_block cached value has been unconditionally invalidated, |
2833 | * so real IO is not possible anymore. | 2832 | * so real IO is not possible anymore. |
2834 | */ | 2833 | */ |
2835 | if (!offset && ret) | 2834 | if (!offset && ret) { |
2836 | ret = try_to_release_page(page, 0); | 2835 | ret = try_to_release_page(page, 0); |
2836 | /* maybe should BUG_ON(!ret); - neilb */ | ||
2837 | } | ||
2837 | out: | 2838 | out: |
2838 | return ret; | 2839 | return; |
2839 | } | 2840 | } |
2840 | 2841 | ||
2841 | static int reiserfs_set_page_dirty(struct page *page) | 2842 | static int reiserfs_set_page_dirty(struct page *page) |
diff --git a/fs/reiserfs/prints.c b/fs/reiserfs/prints.c index 78b40621b88b..27bd3a1df2ad 100644 --- a/fs/reiserfs/prints.c +++ b/fs/reiserfs/prints.c | |||
@@ -143,7 +143,7 @@ static void sprintf_buffer_head(char *buf, struct buffer_head *bh) | |||
143 | char b[BDEVNAME_SIZE]; | 143 | char b[BDEVNAME_SIZE]; |
144 | 144 | ||
145 | sprintf(buf, | 145 | sprintf(buf, |
146 | "dev %s, size %d, blocknr %llu, count %d, state 0x%lx, page %p, (%s, %s, %s)", | 146 | "dev %s, size %zd, blocknr %llu, count %d, state 0x%lx, page %p, (%s, %s, %s)", |
147 | bdevname(bh->b_bdev, b), bh->b_size, | 147 | bdevname(bh->b_bdev, b), bh->b_size, |
148 | (unsigned long long)bh->b_blocknr, atomic_read(&(bh->b_count)), | 148 | (unsigned long long)bh->b_blocknr, atomic_read(&(bh->b_count)), |
149 | bh->b_state, bh->b_page, | 149 | bh->b_state, bh->b_page, |
diff --git a/fs/super.c b/fs/super.c index 8743e9bbb297..a66f66bb8049 100644 --- a/fs/super.c +++ b/fs/super.c | |||
@@ -37,6 +37,7 @@ | |||
37 | #include <linux/writeback.h> /* for the emergency remount stuff */ | 37 | #include <linux/writeback.h> /* for the emergency remount stuff */ |
38 | #include <linux/idr.h> | 38 | #include <linux/idr.h> |
39 | #include <linux/kobject.h> | 39 | #include <linux/kobject.h> |
40 | #include <linux/mutex.h> | ||
40 | #include <asm/uaccess.h> | 41 | #include <asm/uaccess.h> |
41 | 42 | ||
42 | 43 | ||
@@ -380,9 +381,9 @@ restart: | |||
380 | void sync_filesystems(int wait) | 381 | void sync_filesystems(int wait) |
381 | { | 382 | { |
382 | struct super_block *sb; | 383 | struct super_block *sb; |
383 | static DECLARE_MUTEX(mutex); | 384 | static DEFINE_MUTEX(mutex); |
384 | 385 | ||
385 | down(&mutex); /* Could be down_interruptible */ | 386 | mutex_lock(&mutex); /* Could be down_interruptible */ |
386 | spin_lock(&sb_lock); | 387 | spin_lock(&sb_lock); |
387 | list_for_each_entry(sb, &super_blocks, s_list) { | 388 | list_for_each_entry(sb, &super_blocks, s_list) { |
388 | if (!sb->s_op->sync_fs) | 389 | if (!sb->s_op->sync_fs) |
@@ -411,7 +412,7 @@ restart: | |||
411 | goto restart; | 412 | goto restart; |
412 | } | 413 | } |
413 | spin_unlock(&sb_lock); | 414 | spin_unlock(&sb_lock); |
414 | up(&mutex); | 415 | mutex_unlock(&mutex); |
415 | } | 416 | } |
416 | 417 | ||
417 | /** | 418 | /** |
diff --git a/fs/xfs/linux-2.6/xfs_aops.c b/fs/xfs/linux-2.6/xfs_aops.c index 97fc056130eb..c02f7c5b7462 100644 --- a/fs/xfs/linux-2.6/xfs_aops.c +++ b/fs/xfs/linux-2.6/xfs_aops.c | |||
@@ -1310,20 +1310,21 @@ xfs_get_block( | |||
1310 | struct buffer_head *bh_result, | 1310 | struct buffer_head *bh_result, |
1311 | int create) | 1311 | int create) |
1312 | { | 1312 | { |
1313 | return __xfs_get_block(inode, iblock, 0, bh_result, | 1313 | return __xfs_get_block(inode, iblock, |
1314 | create, 0, BMAPI_WRITE); | 1314 | bh_result->b_size >> inode->i_blkbits, |
1315 | bh_result, create, 0, BMAPI_WRITE); | ||
1315 | } | 1316 | } |
1316 | 1317 | ||
1317 | STATIC int | 1318 | STATIC int |
1318 | xfs_get_blocks_direct( | 1319 | xfs_get_blocks_direct( |
1319 | struct inode *inode, | 1320 | struct inode *inode, |
1320 | sector_t iblock, | 1321 | sector_t iblock, |
1321 | unsigned long max_blocks, | ||
1322 | struct buffer_head *bh_result, | 1322 | struct buffer_head *bh_result, |
1323 | int create) | 1323 | int create) |
1324 | { | 1324 | { |
1325 | return __xfs_get_block(inode, iblock, max_blocks, bh_result, | 1325 | return __xfs_get_block(inode, iblock, |
1326 | create, 1, BMAPI_WRITE|BMAPI_DIRECT); | 1326 | bh_result->b_size >> inode->i_blkbits, |
1327 | bh_result, create, 1, BMAPI_WRITE|BMAPI_DIRECT); | ||
1327 | } | 1328 | } |
1328 | 1329 | ||
1329 | STATIC void | 1330 | STATIC void |
@@ -1442,14 +1443,14 @@ xfs_vm_readpages( | |||
1442 | return mpage_readpages(mapping, pages, nr_pages, xfs_get_block); | 1443 | return mpage_readpages(mapping, pages, nr_pages, xfs_get_block); |
1443 | } | 1444 | } |
1444 | 1445 | ||
1445 | STATIC int | 1446 | STATIC void |
1446 | xfs_vm_invalidatepage( | 1447 | xfs_vm_invalidatepage( |
1447 | struct page *page, | 1448 | struct page *page, |
1448 | unsigned long offset) | 1449 | unsigned long offset) |
1449 | { | 1450 | { |
1450 | xfs_page_trace(XFS_INVALIDPAGE_ENTER, | 1451 | xfs_page_trace(XFS_INVALIDPAGE_ENTER, |
1451 | page->mapping->host, page, offset); | 1452 | page->mapping->host, page, offset); |
1452 | return block_invalidatepage(page, offset); | 1453 | block_invalidatepage(page, offset); |
1453 | } | 1454 | } |
1454 | 1455 | ||
1455 | struct address_space_operations xfs_address_space_operations = { | 1456 | struct address_space_operations xfs_address_space_operations = { |
diff --git a/fs/xfs/linux-2.6/xfs_super.c b/fs/xfs/linux-2.6/xfs_super.c index 8355faf8ffde..1884300417e3 100644 --- a/fs/xfs/linux-2.6/xfs_super.c +++ b/fs/xfs/linux-2.6/xfs_super.c | |||
@@ -375,9 +375,8 @@ xfs_init_zones(void) | |||
375 | if (!xfs_ioend_zone) | 375 | if (!xfs_ioend_zone) |
376 | goto out_destroy_vnode_zone; | 376 | goto out_destroy_vnode_zone; |
377 | 377 | ||
378 | xfs_ioend_pool = mempool_create(4 * MAX_BUF_PER_PAGE, | 378 | xfs_ioend_pool = mempool_create_slab_pool(4 * MAX_BUF_PER_PAGE, |
379 | mempool_alloc_slab, mempool_free_slab, | 379 | xfs_ioend_zone); |
380 | xfs_ioend_zone); | ||
381 | if (!xfs_ioend_pool) | 380 | if (!xfs_ioend_pool) |
382 | goto out_free_ioend_zone; | 381 | goto out_free_ioend_zone; |
383 | return 0; | 382 | return 0; |
diff --git a/include/asm-alpha/bitops.h b/include/asm-alpha/bitops.h index 302201f1a097..3f88715e811e 100644 --- a/include/asm-alpha/bitops.h +++ b/include/asm-alpha/bitops.h | |||
@@ -261,7 +261,7 @@ static inline unsigned long ffz_b(unsigned long x) | |||
261 | 261 | ||
262 | static inline unsigned long ffz(unsigned long word) | 262 | static inline unsigned long ffz(unsigned long word) |
263 | { | 263 | { |
264 | #if defined(__alpha_cix__) && defined(__alpha_fix__) | 264 | #if defined(CONFIG_ALPHA_EV6) && defined(CONFIG_ALPHA_EV67) |
265 | /* Whee. EV67 can calculate it directly. */ | 265 | /* Whee. EV67 can calculate it directly. */ |
266 | return __kernel_cttz(~word); | 266 | return __kernel_cttz(~word); |
267 | #else | 267 | #else |
@@ -281,7 +281,7 @@ static inline unsigned long ffz(unsigned long word) | |||
281 | */ | 281 | */ |
282 | static inline unsigned long __ffs(unsigned long word) | 282 | static inline unsigned long __ffs(unsigned long word) |
283 | { | 283 | { |
284 | #if defined(__alpha_cix__) && defined(__alpha_fix__) | 284 | #if defined(CONFIG_ALPHA_EV6) && defined(CONFIG_ALPHA_EV67) |
285 | /* Whee. EV67 can calculate it directly. */ | 285 | /* Whee. EV67 can calculate it directly. */ |
286 | return __kernel_cttz(word); | 286 | return __kernel_cttz(word); |
287 | #else | 287 | #else |
@@ -313,20 +313,20 @@ static inline int ffs(int word) | |||
313 | /* | 313 | /* |
314 | * fls: find last bit set. | 314 | * fls: find last bit set. |
315 | */ | 315 | */ |
316 | #if defined(__alpha_cix__) && defined(__alpha_fix__) | 316 | #if defined(CONFIG_ALPHA_EV6) && defined(CONFIG_ALPHA_EV67) |
317 | static inline int fls(int word) | 317 | static inline int fls(int word) |
318 | { | 318 | { |
319 | return 64 - __kernel_ctlz(word & 0xffffffff); | 319 | return 64 - __kernel_ctlz(word & 0xffffffff); |
320 | } | 320 | } |
321 | #else | 321 | #else |
322 | #define fls generic_fls | 322 | #include <asm-generic/bitops/fls.h> |
323 | #endif | 323 | #endif |
324 | #define fls64 generic_fls64 | 324 | #include <asm-generic/bitops/fls64.h> |
325 | 325 | ||
326 | /* Compute powers of two for the given integer. */ | 326 | /* Compute powers of two for the given integer. */ |
327 | static inline long floor_log2(unsigned long word) | 327 | static inline long floor_log2(unsigned long word) |
328 | { | 328 | { |
329 | #if defined(__alpha_cix__) && defined(__alpha_fix__) | 329 | #if defined(CONFIG_ALPHA_EV6) && defined(CONFIG_ALPHA_EV67) |
330 | return 63 - __kernel_ctlz(word); | 330 | return 63 - __kernel_ctlz(word); |
331 | #else | 331 | #else |
332 | long bit; | 332 | long bit; |
@@ -347,7 +347,7 @@ static inline long ceil_log2(unsigned long word) | |||
347 | * of bits set) of a N-bit word | 347 | * of bits set) of a N-bit word |
348 | */ | 348 | */ |
349 | 349 | ||
350 | #if defined(__alpha_cix__) && defined(__alpha_fix__) | 350 | #if defined(CONFIG_ALPHA_EV6) && defined(CONFIG_ALPHA_EV67) |
351 | /* Whee. EV67 can calculate it directly. */ | 351 | /* Whee. EV67 can calculate it directly. */ |
352 | static inline unsigned long hweight64(unsigned long w) | 352 | static inline unsigned long hweight64(unsigned long w) |
353 | { | 353 | { |
@@ -358,112 +358,12 @@ static inline unsigned long hweight64(unsigned long w) | |||
358 | #define hweight16(x) (unsigned int) hweight64((x) & 0xfffful) | 358 | #define hweight16(x) (unsigned int) hweight64((x) & 0xfffful) |
359 | #define hweight8(x) (unsigned int) hweight64((x) & 0xfful) | 359 | #define hweight8(x) (unsigned int) hweight64((x) & 0xfful) |
360 | #else | 360 | #else |
361 | static inline unsigned long hweight64(unsigned long w) | 361 | #include <asm-generic/bitops/hweight.h> |
362 | { | ||
363 | unsigned long result; | ||
364 | for (result = 0; w ; w >>= 1) | ||
365 | result += (w & 1); | ||
366 | return result; | ||
367 | } | ||
368 | |||
369 | #define hweight32(x) generic_hweight32(x) | ||
370 | #define hweight16(x) generic_hweight16(x) | ||
371 | #define hweight8(x) generic_hweight8(x) | ||
372 | #endif | 362 | #endif |
373 | 363 | ||
374 | #endif /* __KERNEL__ */ | 364 | #endif /* __KERNEL__ */ |
375 | 365 | ||
376 | /* | 366 | #include <asm-generic/bitops/find.h> |
377 | * Find next zero bit in a bitmap reasonably efficiently.. | ||
378 | */ | ||
379 | static inline unsigned long | ||
380 | find_next_zero_bit(const void *addr, unsigned long size, unsigned long offset) | ||
381 | { | ||
382 | const unsigned long *p = addr; | ||
383 | unsigned long result = offset & ~63UL; | ||
384 | unsigned long tmp; | ||
385 | |||
386 | p += offset >> 6; | ||
387 | if (offset >= size) | ||
388 | return size; | ||
389 | size -= result; | ||
390 | offset &= 63UL; | ||
391 | if (offset) { | ||
392 | tmp = *(p++); | ||
393 | tmp |= ~0UL >> (64-offset); | ||
394 | if (size < 64) | ||
395 | goto found_first; | ||
396 | if (~tmp) | ||
397 | goto found_middle; | ||
398 | size -= 64; | ||
399 | result += 64; | ||
400 | } | ||
401 | while (size & ~63UL) { | ||
402 | if (~(tmp = *(p++))) | ||
403 | goto found_middle; | ||
404 | result += 64; | ||
405 | size -= 64; | ||
406 | } | ||
407 | if (!size) | ||
408 | return result; | ||
409 | tmp = *p; | ||
410 | found_first: | ||
411 | tmp |= ~0UL << size; | ||
412 | if (tmp == ~0UL) /* Are any bits zero? */ | ||
413 | return result + size; /* Nope. */ | ||
414 | found_middle: | ||
415 | return result + ffz(tmp); | ||
416 | } | ||
417 | |||
418 | /* | ||
419 | * Find next one bit in a bitmap reasonably efficiently. | ||
420 | */ | ||
421 | static inline unsigned long | ||
422 | find_next_bit(const void * addr, unsigned long size, unsigned long offset) | ||
423 | { | ||
424 | const unsigned long *p = addr; | ||
425 | unsigned long result = offset & ~63UL; | ||
426 | unsigned long tmp; | ||
427 | |||
428 | p += offset >> 6; | ||
429 | if (offset >= size) | ||
430 | return size; | ||
431 | size -= result; | ||
432 | offset &= 63UL; | ||
433 | if (offset) { | ||
434 | tmp = *(p++); | ||
435 | tmp &= ~0UL << offset; | ||
436 | if (size < 64) | ||
437 | goto found_first; | ||
438 | if (tmp) | ||
439 | goto found_middle; | ||
440 | size -= 64; | ||
441 | result += 64; | ||
442 | } | ||
443 | while (size & ~63UL) { | ||
444 | if ((tmp = *(p++))) | ||
445 | goto found_middle; | ||
446 | result += 64; | ||
447 | size -= 64; | ||
448 | } | ||
449 | if (!size) | ||
450 | return result; | ||
451 | tmp = *p; | ||
452 | found_first: | ||
453 | tmp &= ~0UL >> (64 - size); | ||
454 | if (!tmp) | ||
455 | return result + size; | ||
456 | found_middle: | ||
457 | return result + __ffs(tmp); | ||
458 | } | ||
459 | |||
460 | /* | ||
461 | * The optimizer actually does good code for this case. | ||
462 | */ | ||
463 | #define find_first_zero_bit(addr, size) \ | ||
464 | find_next_zero_bit((addr), (size), 0) | ||
465 | #define find_first_bit(addr, size) \ | ||
466 | find_next_bit((addr), (size), 0) | ||
467 | 367 | ||
468 | #ifdef __KERNEL__ | 368 | #ifdef __KERNEL__ |
469 | 369 | ||
@@ -487,21 +387,12 @@ sched_find_first_bit(unsigned long b[3]) | |||
487 | return __ffs(b0) + ofs; | 387 | return __ffs(b0) + ofs; |
488 | } | 388 | } |
489 | 389 | ||
390 | #include <asm-generic/bitops/ext2-non-atomic.h> | ||
490 | 391 | ||
491 | #define ext2_set_bit __test_and_set_bit | ||
492 | #define ext2_set_bit_atomic(l,n,a) test_and_set_bit(n,a) | 392 | #define ext2_set_bit_atomic(l,n,a) test_and_set_bit(n,a) |
493 | #define ext2_clear_bit __test_and_clear_bit | ||
494 | #define ext2_clear_bit_atomic(l,n,a) test_and_clear_bit(n,a) | 393 | #define ext2_clear_bit_atomic(l,n,a) test_and_clear_bit(n,a) |
495 | #define ext2_test_bit test_bit | 394 | |
496 | #define ext2_find_first_zero_bit find_first_zero_bit | 395 | #include <asm-generic/bitops/minix.h> |
497 | #define ext2_find_next_zero_bit find_next_zero_bit | ||
498 | |||
499 | /* Bitmap functions for the minix filesystem. */ | ||
500 | #define minix_test_and_set_bit(nr,addr) __test_and_set_bit(nr,addr) | ||
501 | #define minix_set_bit(nr,addr) __set_bit(nr,addr) | ||
502 | #define minix_test_and_clear_bit(nr,addr) __test_and_clear_bit(nr,addr) | ||
503 | #define minix_test_bit(nr,addr) test_bit(nr,addr) | ||
504 | #define minix_find_first_zero_bit(addr,size) find_first_zero_bit(addr,size) | ||
505 | 396 | ||
506 | #endif /* __KERNEL__ */ | 397 | #endif /* __KERNEL__ */ |
507 | 398 | ||
diff --git a/include/asm-alpha/fpu.h b/include/asm-alpha/fpu.h index c203fc2fa5cd..ecb17a72acc3 100644 --- a/include/asm-alpha/fpu.h +++ b/include/asm-alpha/fpu.h | |||
@@ -130,7 +130,7 @@ rdfpcr(void) | |||
130 | { | 130 | { |
131 | unsigned long tmp, ret; | 131 | unsigned long tmp, ret; |
132 | 132 | ||
133 | #if defined(__alpha_cix__) || defined(__alpha_fix__) | 133 | #if defined(CONFIG_ALPHA_EV6) || defined(CONFIG_ALPHA_EV67) |
134 | __asm__ __volatile__ ( | 134 | __asm__ __volatile__ ( |
135 | "ftoit $f0,%0\n\t" | 135 | "ftoit $f0,%0\n\t" |
136 | "mf_fpcr $f0\n\t" | 136 | "mf_fpcr $f0\n\t" |
@@ -154,7 +154,7 @@ wrfpcr(unsigned long val) | |||
154 | { | 154 | { |
155 | unsigned long tmp; | 155 | unsigned long tmp; |
156 | 156 | ||
157 | #if defined(__alpha_cix__) || defined(__alpha_fix__) | 157 | #if defined(CONFIG_ALPHA_EV6) || defined(CONFIG_ALPHA_EV67) |
158 | __asm__ __volatile__ ( | 158 | __asm__ __volatile__ ( |
159 | "ftoit $f0,%0\n\t" | 159 | "ftoit $f0,%0\n\t" |
160 | "itoft %1,$f0\n\t" | 160 | "itoft %1,$f0\n\t" |
diff --git a/include/asm-arm/bitops.h b/include/asm-arm/bitops.h index d02de721ecc1..0ac54b1a8bad 100644 --- a/include/asm-arm/bitops.h +++ b/include/asm-arm/bitops.h | |||
@@ -117,65 +117,7 @@ ____atomic_test_and_change_bit(unsigned int bit, volatile unsigned long *p) | |||
117 | return res & mask; | 117 | return res & mask; |
118 | } | 118 | } |
119 | 119 | ||
120 | /* | 120 | #include <asm-generic/bitops/non-atomic.h> |
121 | * Now the non-atomic variants. We let the compiler handle all | ||
122 | * optimisations for these. These are all _native_ endian. | ||
123 | */ | ||
124 | static inline void __set_bit(int nr, volatile unsigned long *p) | ||
125 | { | ||
126 | p[nr >> 5] |= (1UL << (nr & 31)); | ||
127 | } | ||
128 | |||
129 | static inline void __clear_bit(int nr, volatile unsigned long *p) | ||
130 | { | ||
131 | p[nr >> 5] &= ~(1UL << (nr & 31)); | ||
132 | } | ||
133 | |||
134 | static inline void __change_bit(int nr, volatile unsigned long *p) | ||
135 | { | ||
136 | p[nr >> 5] ^= (1UL << (nr & 31)); | ||
137 | } | ||
138 | |||
139 | static inline int __test_and_set_bit(int nr, volatile unsigned long *p) | ||
140 | { | ||
141 | unsigned long oldval, mask = 1UL << (nr & 31); | ||
142 | |||
143 | p += nr >> 5; | ||
144 | |||
145 | oldval = *p; | ||
146 | *p = oldval | mask; | ||
147 | return oldval & mask; | ||
148 | } | ||
149 | |||
150 | static inline int __test_and_clear_bit(int nr, volatile unsigned long *p) | ||
151 | { | ||
152 | unsigned long oldval, mask = 1UL << (nr & 31); | ||
153 | |||
154 | p += nr >> 5; | ||
155 | |||
156 | oldval = *p; | ||
157 | *p = oldval & ~mask; | ||
158 | return oldval & mask; | ||
159 | } | ||
160 | |||
161 | static inline int __test_and_change_bit(int nr, volatile unsigned long *p) | ||
162 | { | ||
163 | unsigned long oldval, mask = 1UL << (nr & 31); | ||
164 | |||
165 | p += nr >> 5; | ||
166 | |||
167 | oldval = *p; | ||
168 | *p = oldval ^ mask; | ||
169 | return oldval & mask; | ||
170 | } | ||
171 | |||
172 | /* | ||
173 | * This routine doesn't need to be atomic. | ||
174 | */ | ||
175 | static inline int __test_bit(int nr, const volatile unsigned long * p) | ||
176 | { | ||
177 | return (p[nr >> 5] >> (nr & 31)) & 1UL; | ||
178 | } | ||
179 | 121 | ||
180 | /* | 122 | /* |
181 | * A note about Endian-ness. | 123 | * A note about Endian-ness. |
@@ -261,7 +203,6 @@ extern int _find_next_bit_be(const unsigned long *p, int size, int offset); | |||
261 | #define test_and_set_bit(nr,p) ATOMIC_BITOP_LE(test_and_set_bit,nr,p) | 203 | #define test_and_set_bit(nr,p) ATOMIC_BITOP_LE(test_and_set_bit,nr,p) |
262 | #define test_and_clear_bit(nr,p) ATOMIC_BITOP_LE(test_and_clear_bit,nr,p) | 204 | #define test_and_clear_bit(nr,p) ATOMIC_BITOP_LE(test_and_clear_bit,nr,p) |
263 | #define test_and_change_bit(nr,p) ATOMIC_BITOP_LE(test_and_change_bit,nr,p) | 205 | #define test_and_change_bit(nr,p) ATOMIC_BITOP_LE(test_and_change_bit,nr,p) |
264 | #define test_bit(nr,p) __test_bit(nr,p) | ||
265 | #define find_first_zero_bit(p,sz) _find_first_zero_bit_le(p,sz) | 206 | #define find_first_zero_bit(p,sz) _find_first_zero_bit_le(p,sz) |
266 | #define find_next_zero_bit(p,sz,off) _find_next_zero_bit_le(p,sz,off) | 207 | #define find_next_zero_bit(p,sz,off) _find_next_zero_bit_le(p,sz,off) |
267 | #define find_first_bit(p,sz) _find_first_bit_le(p,sz) | 208 | #define find_first_bit(p,sz) _find_first_bit_le(p,sz) |
@@ -280,7 +221,6 @@ extern int _find_next_bit_be(const unsigned long *p, int size, int offset); | |||
280 | #define test_and_set_bit(nr,p) ATOMIC_BITOP_BE(test_and_set_bit,nr,p) | 221 | #define test_and_set_bit(nr,p) ATOMIC_BITOP_BE(test_and_set_bit,nr,p) |
281 | #define test_and_clear_bit(nr,p) ATOMIC_BITOP_BE(test_and_clear_bit,nr,p) | 222 | #define test_and_clear_bit(nr,p) ATOMIC_BITOP_BE(test_and_clear_bit,nr,p) |
282 | #define test_and_change_bit(nr,p) ATOMIC_BITOP_BE(test_and_change_bit,nr,p) | 223 | #define test_and_change_bit(nr,p) ATOMIC_BITOP_BE(test_and_change_bit,nr,p) |
283 | #define test_bit(nr,p) __test_bit(nr,p) | ||
284 | #define find_first_zero_bit(p,sz) _find_first_zero_bit_be(p,sz) | 224 | #define find_first_zero_bit(p,sz) _find_first_zero_bit_be(p,sz) |
285 | #define find_next_zero_bit(p,sz,off) _find_next_zero_bit_be(p,sz,off) | 225 | #define find_next_zero_bit(p,sz,off) _find_next_zero_bit_be(p,sz,off) |
286 | #define find_first_bit(p,sz) _find_first_bit_be(p,sz) | 226 | #define find_first_bit(p,sz) _find_first_bit_be(p,sz) |
@@ -292,57 +232,41 @@ extern int _find_next_bit_be(const unsigned long *p, int size, int offset); | |||
292 | 232 | ||
293 | #if __LINUX_ARM_ARCH__ < 5 | 233 | #if __LINUX_ARM_ARCH__ < 5 |
294 | 234 | ||
295 | /* | 235 | #include <asm-generic/bitops/ffz.h> |
296 | * ffz = Find First Zero in word. Undefined if no zero exists, | 236 | #include <asm-generic/bitops/__ffs.h> |
297 | * so code should check against ~0UL first.. | 237 | #include <asm-generic/bitops/fls.h> |
298 | */ | 238 | #include <asm-generic/bitops/ffs.h> |
299 | static inline unsigned long ffz(unsigned long word) | ||
300 | { | ||
301 | int k; | ||
302 | |||
303 | word = ~word; | ||
304 | k = 31; | ||
305 | if (word & 0x0000ffff) { k -= 16; word <<= 16; } | ||
306 | if (word & 0x00ff0000) { k -= 8; word <<= 8; } | ||
307 | if (word & 0x0f000000) { k -= 4; word <<= 4; } | ||
308 | if (word & 0x30000000) { k -= 2; word <<= 2; } | ||
309 | if (word & 0x40000000) { k -= 1; } | ||
310 | return k; | ||
311 | } | ||
312 | |||
313 | /* | ||
314 | * ffz = Find First Zero in word. Undefined if no zero exists, | ||
315 | * so code should check against ~0UL first.. | ||
316 | */ | ||
317 | static inline unsigned long __ffs(unsigned long word) | ||
318 | { | ||
319 | int k; | ||
320 | |||
321 | k = 31; | ||
322 | if (word & 0x0000ffff) { k -= 16; word <<= 16; } | ||
323 | if (word & 0x00ff0000) { k -= 8; word <<= 8; } | ||
324 | if (word & 0x0f000000) { k -= 4; word <<= 4; } | ||
325 | if (word & 0x30000000) { k -= 2; word <<= 2; } | ||
326 | if (word & 0x40000000) { k -= 1; } | ||
327 | return k; | ||
328 | } | ||
329 | |||
330 | /* | ||
331 | * fls: find last bit set. | ||
332 | */ | ||
333 | 239 | ||
334 | #define fls(x) generic_fls(x) | 240 | #else |
335 | #define fls64(x) generic_fls64(x) | ||
336 | |||
337 | /* | ||
338 | * ffs: find first bit set. This is defined the same way as | ||
339 | * the libc and compiler builtin ffs routines, therefore | ||
340 | * differs in spirit from the above ffz (man ffs). | ||
341 | */ | ||
342 | 241 | ||
343 | #define ffs(x) generic_ffs(x) | 242 | static inline int constant_fls(int x) |
243 | { | ||
244 | int r = 32; | ||
344 | 245 | ||
345 | #else | 246 | if (!x) |
247 | return 0; | ||
248 | if (!(x & 0xffff0000u)) { | ||
249 | x <<= 16; | ||
250 | r -= 16; | ||
251 | } | ||
252 | if (!(x & 0xff000000u)) { | ||
253 | x <<= 8; | ||
254 | r -= 8; | ||
255 | } | ||
256 | if (!(x & 0xf0000000u)) { | ||
257 | x <<= 4; | ||
258 | r -= 4; | ||
259 | } | ||
260 | if (!(x & 0xc0000000u)) { | ||
261 | x <<= 2; | ||
262 | r -= 2; | ||
263 | } | ||
264 | if (!(x & 0x80000000u)) { | ||
265 | x <<= 1; | ||
266 | r -= 1; | ||
267 | } | ||
268 | return r; | ||
269 | } | ||
346 | 270 | ||
347 | /* | 271 | /* |
348 | * On ARMv5 and above those functions can be implemented around | 272 | * On ARMv5 and above those functions can be implemented around |
@@ -350,39 +274,18 @@ static inline unsigned long __ffs(unsigned long word) | |||
350 | */ | 274 | */ |
351 | 275 | ||
352 | #define fls(x) \ | 276 | #define fls(x) \ |
353 | ( __builtin_constant_p(x) ? generic_fls(x) : \ | 277 | ( __builtin_constant_p(x) ? constant_fls(x) : \ |
354 | ({ int __r; asm("clz\t%0, %1" : "=r"(__r) : "r"(x) : "cc"); 32-__r; }) ) | 278 | ({ int __r; asm("clz\t%0, %1" : "=r"(__r) : "r"(x) : "cc"); 32-__r; }) ) |
355 | #define fls64(x) generic_fls64(x) | ||
356 | #define ffs(x) ({ unsigned long __t = (x); fls(__t & -__t); }) | 279 | #define ffs(x) ({ unsigned long __t = (x); fls(__t & -__t); }) |
357 | #define __ffs(x) (ffs(x) - 1) | 280 | #define __ffs(x) (ffs(x) - 1) |
358 | #define ffz(x) __ffs( ~(x) ) | 281 | #define ffz(x) __ffs( ~(x) ) |
359 | 282 | ||
360 | #endif | 283 | #endif |
361 | 284 | ||
362 | /* | 285 | #include <asm-generic/bitops/fls64.h> |
363 | * Find first bit set in a 168-bit bitmap, where the first | ||
364 | * 128 bits are unlikely to be set. | ||
365 | */ | ||
366 | static inline int sched_find_first_bit(const unsigned long *b) | ||
367 | { | ||
368 | unsigned long v; | ||
369 | unsigned int off; | ||
370 | |||
371 | for (off = 0; v = b[off], off < 4; off++) { | ||
372 | if (unlikely(v)) | ||
373 | break; | ||
374 | } | ||
375 | return __ffs(v) + off * 32; | ||
376 | } | ||
377 | |||
378 | /* | ||
379 | * hweightN: returns the hamming weight (i.e. the number | ||
380 | * of bits set) of a N-bit word | ||
381 | */ | ||
382 | 286 | ||
383 | #define hweight32(x) generic_hweight32(x) | 287 | #include <asm-generic/bitops/sched.h> |
384 | #define hweight16(x) generic_hweight16(x) | 288 | #include <asm-generic/bitops/hweight.h> |
385 | #define hweight8(x) generic_hweight8(x) | ||
386 | 289 | ||
387 | /* | 290 | /* |
388 | * Ext2 is defined to use little-endian byte ordering. | 291 | * Ext2 is defined to use little-endian byte ordering. |
@@ -397,7 +300,7 @@ static inline int sched_find_first_bit(const unsigned long *b) | |||
397 | #define ext2_clear_bit_atomic(lock,nr,p) \ | 300 | #define ext2_clear_bit_atomic(lock,nr,p) \ |
398 | test_and_clear_bit(WORD_BITOFF_TO_LE(nr), (unsigned long *)(p)) | 301 | test_and_clear_bit(WORD_BITOFF_TO_LE(nr), (unsigned long *)(p)) |
399 | #define ext2_test_bit(nr,p) \ | 302 | #define ext2_test_bit(nr,p) \ |
400 | __test_bit(WORD_BITOFF_TO_LE(nr), (unsigned long *)(p)) | 303 | test_bit(WORD_BITOFF_TO_LE(nr), (unsigned long *)(p)) |
401 | #define ext2_find_first_zero_bit(p,sz) \ | 304 | #define ext2_find_first_zero_bit(p,sz) \ |
402 | _find_first_zero_bit_le(p,sz) | 305 | _find_first_zero_bit_le(p,sz) |
403 | #define ext2_find_next_zero_bit(p,sz,off) \ | 306 | #define ext2_find_next_zero_bit(p,sz,off) \ |
@@ -410,7 +313,7 @@ static inline int sched_find_first_bit(const unsigned long *b) | |||
410 | #define minix_set_bit(nr,p) \ | 313 | #define minix_set_bit(nr,p) \ |
411 | __set_bit(WORD_BITOFF_TO_LE(nr), (unsigned long *)(p)) | 314 | __set_bit(WORD_BITOFF_TO_LE(nr), (unsigned long *)(p)) |
412 | #define minix_test_bit(nr,p) \ | 315 | #define minix_test_bit(nr,p) \ |
413 | __test_bit(WORD_BITOFF_TO_LE(nr), (unsigned long *)(p)) | 316 | test_bit(WORD_BITOFF_TO_LE(nr), (unsigned long *)(p)) |
414 | #define minix_test_and_set_bit(nr,p) \ | 317 | #define minix_test_and_set_bit(nr,p) \ |
415 | __test_and_set_bit(WORD_BITOFF_TO_LE(nr), (unsigned long *)(p)) | 318 | __test_and_set_bit(WORD_BITOFF_TO_LE(nr), (unsigned long *)(p)) |
416 | #define minix_test_and_clear_bit(nr,p) \ | 319 | #define minix_test_and_clear_bit(nr,p) \ |
diff --git a/include/asm-arm26/bitops.h b/include/asm-arm26/bitops.h index d87f8634e625..19a69573a654 100644 --- a/include/asm-arm26/bitops.h +++ b/include/asm-arm26/bitops.h | |||
@@ -117,65 +117,7 @@ ____atomic_test_and_change_bit(unsigned int bit, volatile unsigned long *p) | |||
117 | return res & mask; | 117 | return res & mask; |
118 | } | 118 | } |
119 | 119 | ||
120 | /* | 120 | #include <asm-generic/bitops/non-atomic.h> |
121 | * Now the non-atomic variants. We let the compiler handle all | ||
122 | * optimisations for these. These are all _native_ endian. | ||
123 | */ | ||
124 | static inline void __set_bit(int nr, volatile unsigned long *p) | ||
125 | { | ||
126 | p[nr >> 5] |= (1UL << (nr & 31)); | ||
127 | } | ||
128 | |||
129 | static inline void __clear_bit(int nr, volatile unsigned long *p) | ||
130 | { | ||
131 | p[nr >> 5] &= ~(1UL << (nr & 31)); | ||
132 | } | ||
133 | |||
134 | static inline void __change_bit(int nr, volatile unsigned long *p) | ||
135 | { | ||
136 | p[nr >> 5] ^= (1UL << (nr & 31)); | ||
137 | } | ||
138 | |||
139 | static inline int __test_and_set_bit(int nr, volatile unsigned long *p) | ||
140 | { | ||
141 | unsigned long oldval, mask = 1UL << (nr & 31); | ||
142 | |||
143 | p += nr >> 5; | ||
144 | |||
145 | oldval = *p; | ||
146 | *p = oldval | mask; | ||
147 | return oldval & mask; | ||
148 | } | ||
149 | |||
150 | static inline int __test_and_clear_bit(int nr, volatile unsigned long *p) | ||
151 | { | ||
152 | unsigned long oldval, mask = 1UL << (nr & 31); | ||
153 | |||
154 | p += nr >> 5; | ||
155 | |||
156 | oldval = *p; | ||
157 | *p = oldval & ~mask; | ||
158 | return oldval & mask; | ||
159 | } | ||
160 | |||
161 | static inline int __test_and_change_bit(int nr, volatile unsigned long *p) | ||
162 | { | ||
163 | unsigned long oldval, mask = 1UL << (nr & 31); | ||
164 | |||
165 | p += nr >> 5; | ||
166 | |||
167 | oldval = *p; | ||
168 | *p = oldval ^ mask; | ||
169 | return oldval & mask; | ||
170 | } | ||
171 | |||
172 | /* | ||
173 | * This routine doesn't need to be atomic. | ||
174 | */ | ||
175 | static inline int __test_bit(int nr, const volatile unsigned long * p) | ||
176 | { | ||
177 | return (p[nr >> 5] >> (nr & 31)) & 1UL; | ||
178 | } | ||
179 | 121 | ||
180 | /* | 122 | /* |
181 | * Little endian assembly bitops. nr = 0 -> byte 0 bit 0. | 123 | * Little endian assembly bitops. nr = 0 -> byte 0 bit 0. |
@@ -211,7 +153,6 @@ extern int _find_next_bit_le(const unsigned long *p, int size, int offset); | |||
211 | #define test_and_set_bit(nr,p) ATOMIC_BITOP_LE(test_and_set_bit,nr,p) | 153 | #define test_and_set_bit(nr,p) ATOMIC_BITOP_LE(test_and_set_bit,nr,p) |
212 | #define test_and_clear_bit(nr,p) ATOMIC_BITOP_LE(test_and_clear_bit,nr,p) | 154 | #define test_and_clear_bit(nr,p) ATOMIC_BITOP_LE(test_and_clear_bit,nr,p) |
213 | #define test_and_change_bit(nr,p) ATOMIC_BITOP_LE(test_and_change_bit,nr,p) | 155 | #define test_and_change_bit(nr,p) ATOMIC_BITOP_LE(test_and_change_bit,nr,p) |
214 | #define test_bit(nr,p) __test_bit(nr,p) | ||
215 | #define find_first_zero_bit(p,sz) _find_first_zero_bit_le(p,sz) | 156 | #define find_first_zero_bit(p,sz) _find_first_zero_bit_le(p,sz) |
216 | #define find_next_zero_bit(p,sz,off) _find_next_zero_bit_le(p,sz,off) | 157 | #define find_next_zero_bit(p,sz,off) _find_next_zero_bit_le(p,sz,off) |
217 | #define find_first_bit(p,sz) _find_first_bit_le(p,sz) | 158 | #define find_first_bit(p,sz) _find_first_bit_le(p,sz) |
@@ -219,80 +160,13 @@ extern int _find_next_bit_le(const unsigned long *p, int size, int offset); | |||
219 | 160 | ||
220 | #define WORD_BITOFF_TO_LE(x) ((x)) | 161 | #define WORD_BITOFF_TO_LE(x) ((x)) |
221 | 162 | ||
222 | /* | 163 | #include <asm-generic/bitops/ffz.h> |
223 | * ffz = Find First Zero in word. Undefined if no zero exists, | 164 | #include <asm-generic/bitops/__ffs.h> |
224 | * so code should check against ~0UL first.. | 165 | #include <asm-generic/bitops/fls.h> |
225 | */ | 166 | #include <asm-generic/bitops/fls64.h> |
226 | static inline unsigned long ffz(unsigned long word) | 167 | #include <asm-generic/bitops/ffs.h> |
227 | { | 168 | #include <asm-generic/bitops/sched.h> |
228 | int k; | 169 | #include <asm-generic/bitops/hweight.h> |
229 | |||
230 | word = ~word; | ||
231 | k = 31; | ||
232 | if (word & 0x0000ffff) { k -= 16; word <<= 16; } | ||
233 | if (word & 0x00ff0000) { k -= 8; word <<= 8; } | ||
234 | if (word & 0x0f000000) { k -= 4; word <<= 4; } | ||
235 | if (word & 0x30000000) { k -= 2; word <<= 2; } | ||
236 | if (word & 0x40000000) { k -= 1; } | ||
237 | return k; | ||
238 | } | ||
239 | |||
240 | /* | ||
241 | * ffz = Find First Zero in word. Undefined if no zero exists, | ||
242 | * so code should check against ~0UL first.. | ||
243 | */ | ||
244 | static inline unsigned long __ffs(unsigned long word) | ||
245 | { | ||
246 | int k; | ||
247 | |||
248 | k = 31; | ||
249 | if (word & 0x0000ffff) { k -= 16; word <<= 16; } | ||
250 | if (word & 0x00ff0000) { k -= 8; word <<= 8; } | ||
251 | if (word & 0x0f000000) { k -= 4; word <<= 4; } | ||
252 | if (word & 0x30000000) { k -= 2; word <<= 2; } | ||
253 | if (word & 0x40000000) { k -= 1; } | ||
254 | return k; | ||
255 | } | ||
256 | |||
257 | /* | ||
258 | * fls: find last bit set. | ||
259 | */ | ||
260 | |||
261 | #define fls(x) generic_fls(x) | ||
262 | #define fls64(x) generic_fls64(x) | ||
263 | |||
264 | /* | ||
265 | * ffs: find first bit set. This is defined the same way as | ||
266 | * the libc and compiler builtin ffs routines, therefore | ||
267 | * differs in spirit from the above ffz (man ffs). | ||
268 | */ | ||
269 | |||
270 | #define ffs(x) generic_ffs(x) | ||
271 | |||
272 | /* | ||
273 | * Find first bit set in a 168-bit bitmap, where the first | ||
274 | * 128 bits are unlikely to be set. | ||
275 | */ | ||
276 | static inline int sched_find_first_bit(unsigned long *b) | ||
277 | { | ||
278 | unsigned long v; | ||
279 | unsigned int off; | ||
280 | |||
281 | for (off = 0; v = b[off], off < 4; off++) { | ||
282 | if (unlikely(v)) | ||
283 | break; | ||
284 | } | ||
285 | return __ffs(v) + off * 32; | ||
286 | } | ||
287 | |||
288 | /* | ||
289 | * hweightN: returns the hamming weight (i.e. the number | ||
290 | * of bits set) of a N-bit word | ||
291 | */ | ||
292 | |||
293 | #define hweight32(x) generic_hweight32(x) | ||
294 | #define hweight16(x) generic_hweight16(x) | ||
295 | #define hweight8(x) generic_hweight8(x) | ||
296 | 170 | ||
297 | /* | 171 | /* |
298 | * Ext2 is defined to use little-endian byte ordering. | 172 | * Ext2 is defined to use little-endian byte ordering. |
@@ -307,7 +181,7 @@ static inline int sched_find_first_bit(unsigned long *b) | |||
307 | #define ext2_clear_bit_atomic(lock,nr,p) \ | 181 | #define ext2_clear_bit_atomic(lock,nr,p) \ |
308 | test_and_clear_bit(WORD_BITOFF_TO_LE(nr), (unsigned long *)(p)) | 182 | test_and_clear_bit(WORD_BITOFF_TO_LE(nr), (unsigned long *)(p)) |
309 | #define ext2_test_bit(nr,p) \ | 183 | #define ext2_test_bit(nr,p) \ |
310 | __test_bit(WORD_BITOFF_TO_LE(nr), (unsigned long *)(p)) | 184 | test_bit(WORD_BITOFF_TO_LE(nr), (unsigned long *)(p)) |
311 | #define ext2_find_first_zero_bit(p,sz) \ | 185 | #define ext2_find_first_zero_bit(p,sz) \ |
312 | _find_first_zero_bit_le(p,sz) | 186 | _find_first_zero_bit_le(p,sz) |
313 | #define ext2_find_next_zero_bit(p,sz,off) \ | 187 | #define ext2_find_next_zero_bit(p,sz,off) \ |
@@ -320,7 +194,7 @@ static inline int sched_find_first_bit(unsigned long *b) | |||
320 | #define minix_set_bit(nr,p) \ | 194 | #define minix_set_bit(nr,p) \ |
321 | __set_bit(WORD_BITOFF_TO_LE(nr), (unsigned long *)(p)) | 195 | __set_bit(WORD_BITOFF_TO_LE(nr), (unsigned long *)(p)) |
322 | #define minix_test_bit(nr,p) \ | 196 | #define minix_test_bit(nr,p) \ |
323 | __test_bit(WORD_BITOFF_TO_LE(nr), (unsigned long *)(p)) | 197 | test_bit(WORD_BITOFF_TO_LE(nr), (unsigned long *)(p)) |
324 | #define minix_test_and_set_bit(nr,p) \ | 198 | #define minix_test_and_set_bit(nr,p) \ |
325 | __test_and_set_bit(WORD_BITOFF_TO_LE(nr), (unsigned long *)(p)) | 199 | __test_and_set_bit(WORD_BITOFF_TO_LE(nr), (unsigned long *)(p)) |
326 | #define minix_test_and_clear_bit(nr,p) \ | 200 | #define minix_test_and_clear_bit(nr,p) \ |
diff --git a/include/asm-cris/bitops.h b/include/asm-cris/bitops.h index b7fef1572dc0..a569065113d9 100644 --- a/include/asm-cris/bitops.h +++ b/include/asm-cris/bitops.h | |||
@@ -39,8 +39,6 @@ struct __dummy { unsigned long a[100]; }; | |||
39 | 39 | ||
40 | #define set_bit(nr, addr) (void)test_and_set_bit(nr, addr) | 40 | #define set_bit(nr, addr) (void)test_and_set_bit(nr, addr) |
41 | 41 | ||
42 | #define __set_bit(nr, addr) (void)__test_and_set_bit(nr, addr) | ||
43 | |||
44 | /* | 42 | /* |
45 | * clear_bit - Clears a bit in memory | 43 | * clear_bit - Clears a bit in memory |
46 | * @nr: Bit to clear | 44 | * @nr: Bit to clear |
@@ -54,8 +52,6 @@ struct __dummy { unsigned long a[100]; }; | |||
54 | 52 | ||
55 | #define clear_bit(nr, addr) (void)test_and_clear_bit(nr, addr) | 53 | #define clear_bit(nr, addr) (void)test_and_clear_bit(nr, addr) |
56 | 54 | ||
57 | #define __clear_bit(nr, addr) (void)__test_and_clear_bit(nr, addr) | ||
58 | |||
59 | /* | 55 | /* |
60 | * change_bit - Toggle a bit in memory | 56 | * change_bit - Toggle a bit in memory |
61 | * @nr: Bit to change | 57 | * @nr: Bit to change |
@@ -68,18 +64,6 @@ struct __dummy { unsigned long a[100]; }; | |||
68 | 64 | ||
69 | #define change_bit(nr, addr) (void)test_and_change_bit(nr, addr) | 65 | #define change_bit(nr, addr) (void)test_and_change_bit(nr, addr) |
70 | 66 | ||
71 | /* | ||
72 | * __change_bit - Toggle a bit in memory | ||
73 | * @nr: the bit to change | ||
74 | * @addr: the address to start counting from | ||
75 | * | ||
76 | * Unlike change_bit(), this function is non-atomic and may be reordered. | ||
77 | * If it's called on the same region of memory simultaneously, the effect | ||
78 | * may be that only one operation succeeds. | ||
79 | */ | ||
80 | |||
81 | #define __change_bit(nr, addr) (void)__test_and_change_bit(nr, addr) | ||
82 | |||
83 | /** | 67 | /** |
84 | * test_and_set_bit - Set a bit and return its old value | 68 | * test_and_set_bit - Set a bit and return its old value |
85 | * @nr: Bit to set | 69 | * @nr: Bit to set |
@@ -101,19 +85,6 @@ static inline int test_and_set_bit(int nr, volatile unsigned long *addr) | |||
101 | retval = (mask & *adr) != 0; | 85 | retval = (mask & *adr) != 0; |
102 | *adr |= mask; | 86 | *adr |= mask; |
103 | cris_atomic_restore(addr, flags); | 87 | cris_atomic_restore(addr, flags); |
104 | local_irq_restore(flags); | ||
105 | return retval; | ||
106 | } | ||
107 | |||
108 | static inline int __test_and_set_bit(int nr, volatile unsigned long *addr) | ||
109 | { | ||
110 | unsigned int mask, retval; | ||
111 | unsigned int *adr = (unsigned int *)addr; | ||
112 | |||
113 | adr += nr >> 5; | ||
114 | mask = 1 << (nr & 0x1f); | ||
115 | retval = (mask & *adr) != 0; | ||
116 | *adr |= mask; | ||
117 | return retval; | 88 | return retval; |
118 | } | 89 | } |
119 | 90 | ||
@@ -148,27 +119,6 @@ static inline int test_and_clear_bit(int nr, volatile unsigned long *addr) | |||
148 | } | 119 | } |
149 | 120 | ||
150 | /** | 121 | /** |
151 | * __test_and_clear_bit - Clear a bit and return its old value | ||
152 | * @nr: Bit to clear | ||
153 | * @addr: Address to count from | ||
154 | * | ||
155 | * This operation is non-atomic and can be reordered. | ||
156 | * If two examples of this operation race, one can appear to succeed | ||
157 | * but actually fail. You must protect multiple accesses with a lock. | ||
158 | */ | ||
159 | |||
160 | static inline int __test_and_clear_bit(int nr, volatile unsigned long *addr) | ||
161 | { | ||
162 | unsigned int mask, retval; | ||
163 | unsigned int *adr = (unsigned int *)addr; | ||
164 | |||
165 | adr += nr >> 5; | ||
166 | mask = 1 << (nr & 0x1f); | ||
167 | retval = (mask & *adr) != 0; | ||
168 | *adr &= ~mask; | ||
169 | return retval; | ||
170 | } | ||
171 | /** | ||
172 | * test_and_change_bit - Change a bit and return its old value | 122 | * test_and_change_bit - Change a bit and return its old value |
173 | * @nr: Bit to change | 123 | * @nr: Bit to change |
174 | * @addr: Address to count from | 124 | * @addr: Address to count from |
@@ -191,42 +141,7 @@ static inline int test_and_change_bit(int nr, volatile unsigned long *addr) | |||
191 | return retval; | 141 | return retval; |
192 | } | 142 | } |
193 | 143 | ||
194 | /* WARNING: non atomic and it can be reordered! */ | 144 | #include <asm-generic/bitops/non-atomic.h> |
195 | |||
196 | static inline int __test_and_change_bit(int nr, volatile unsigned long *addr) | ||
197 | { | ||
198 | unsigned int mask, retval; | ||
199 | unsigned int *adr = (unsigned int *)addr; | ||
200 | |||
201 | adr += nr >> 5; | ||
202 | mask = 1 << (nr & 0x1f); | ||
203 | retval = (mask & *adr) != 0; | ||
204 | *adr ^= mask; | ||
205 | |||
206 | return retval; | ||
207 | } | ||
208 | |||
209 | /** | ||
210 | * test_bit - Determine whether a bit is set | ||
211 | * @nr: bit number to test | ||
212 | * @addr: Address to start counting from | ||
213 | * | ||
214 | * This routine doesn't need to be atomic. | ||
215 | */ | ||
216 | |||
217 | static inline int test_bit(int nr, const volatile unsigned long *addr) | ||
218 | { | ||
219 | unsigned int mask; | ||
220 | unsigned int *adr = (unsigned int *)addr; | ||
221 | |||
222 | adr += nr >> 5; | ||
223 | mask = 1 << (nr & 0x1f); | ||
224 | return ((mask & *adr) != 0); | ||
225 | } | ||
226 | |||
227 | /* | ||
228 | * Find-bit routines.. | ||
229 | */ | ||
230 | 145 | ||
231 | /* | 146 | /* |
232 | * Since we define it "external", it collides with the built-in | 147 | * Since we define it "external", it collides with the built-in |
@@ -235,152 +150,18 @@ static inline int test_bit(int nr, const volatile unsigned long *addr) | |||
235 | */ | 150 | */ |
236 | #define ffs kernel_ffs | 151 | #define ffs kernel_ffs |
237 | 152 | ||
238 | /* | 153 | #include <asm-generic/bitops/fls.h> |
239 | * fls: find last bit set. | 154 | #include <asm-generic/bitops/fls64.h> |
240 | */ | 155 | #include <asm-generic/bitops/hweight.h> |
241 | 156 | #include <asm-generic/bitops/find.h> | |
242 | #define fls(x) generic_fls(x) | ||
243 | #define fls64(x) generic_fls64(x) | ||
244 | |||
245 | /* | ||
246 | * hweightN - returns the hamming weight of a N-bit word | ||
247 | * @x: the word to weigh | ||
248 | * | ||
249 | * The Hamming Weight of a number is the total number of bits set in it. | ||
250 | */ | ||
251 | 157 | ||
252 | #define hweight32(x) generic_hweight32(x) | 158 | #include <asm-generic/bitops/ext2-non-atomic.h> |
253 | #define hweight16(x) generic_hweight16(x) | ||
254 | #define hweight8(x) generic_hweight8(x) | ||
255 | 159 | ||
256 | /** | ||
257 | * find_next_zero_bit - find the first zero bit in a memory region | ||
258 | * @addr: The address to base the search on | ||
259 | * @offset: The bitnumber to start searching at | ||
260 | * @size: The maximum size to search | ||
261 | */ | ||
262 | static inline int find_next_zero_bit (const unsigned long * addr, int size, int offset) | ||
263 | { | ||
264 | unsigned long *p = ((unsigned long *) addr) + (offset >> 5); | ||
265 | unsigned long result = offset & ~31UL; | ||
266 | unsigned long tmp; | ||
267 | |||
268 | if (offset >= size) | ||
269 | return size; | ||
270 | size -= result; | ||
271 | offset &= 31UL; | ||
272 | if (offset) { | ||
273 | tmp = *(p++); | ||
274 | tmp |= ~0UL >> (32-offset); | ||
275 | if (size < 32) | ||
276 | goto found_first; | ||
277 | if (~tmp) | ||
278 | goto found_middle; | ||
279 | size -= 32; | ||
280 | result += 32; | ||
281 | } | ||
282 | while (size & ~31UL) { | ||
283 | if (~(tmp = *(p++))) | ||
284 | goto found_middle; | ||
285 | result += 32; | ||
286 | size -= 32; | ||
287 | } | ||
288 | if (!size) | ||
289 | return result; | ||
290 | tmp = *p; | ||
291 | |||
292 | found_first: | ||
293 | tmp |= ~0UL << size; | ||
294 | found_middle: | ||
295 | return result + ffz(tmp); | ||
296 | } | ||
297 | |||
298 | /** | ||
299 | * find_next_bit - find the first set bit in a memory region | ||
300 | * @addr: The address to base the search on | ||
301 | * @offset: The bitnumber to start searching at | ||
302 | * @size: The maximum size to search | ||
303 | */ | ||
304 | static __inline__ int find_next_bit(const unsigned long *addr, int size, int offset) | ||
305 | { | ||
306 | unsigned long *p = ((unsigned long *) addr) + (offset >> 5); | ||
307 | unsigned long result = offset & ~31UL; | ||
308 | unsigned long tmp; | ||
309 | |||
310 | if (offset >= size) | ||
311 | return size; | ||
312 | size -= result; | ||
313 | offset &= 31UL; | ||
314 | if (offset) { | ||
315 | tmp = *(p++); | ||
316 | tmp &= (~0UL << offset); | ||
317 | if (size < 32) | ||
318 | goto found_first; | ||
319 | if (tmp) | ||
320 | goto found_middle; | ||
321 | size -= 32; | ||
322 | result += 32; | ||
323 | } | ||
324 | while (size & ~31UL) { | ||
325 | if ((tmp = *(p++))) | ||
326 | goto found_middle; | ||
327 | result += 32; | ||
328 | size -= 32; | ||
329 | } | ||
330 | if (!size) | ||
331 | return result; | ||
332 | tmp = *p; | ||
333 | |||
334 | found_first: | ||
335 | tmp &= (~0UL >> (32 - size)); | ||
336 | if (tmp == 0UL) /* Are any bits set? */ | ||
337 | return result + size; /* Nope. */ | ||
338 | found_middle: | ||
339 | return result + __ffs(tmp); | ||
340 | } | ||
341 | |||
342 | /** | ||
343 | * find_first_zero_bit - find the first zero bit in a memory region | ||
344 | * @addr: The address to start the search at | ||
345 | * @size: The maximum size to search | ||
346 | * | ||
347 | * Returns the bit-number of the first zero bit, not the number of the byte | ||
348 | * containing a bit. | ||
349 | */ | ||
350 | |||
351 | #define find_first_zero_bit(addr, size) \ | ||
352 | find_next_zero_bit((addr), (size), 0) | ||
353 | #define find_first_bit(addr, size) \ | ||
354 | find_next_bit((addr), (size), 0) | ||
355 | |||
356 | #define ext2_set_bit test_and_set_bit | ||
357 | #define ext2_set_bit_atomic(l,n,a) test_and_set_bit(n,a) | 160 | #define ext2_set_bit_atomic(l,n,a) test_and_set_bit(n,a) |
358 | #define ext2_clear_bit test_and_clear_bit | ||
359 | #define ext2_clear_bit_atomic(l,n,a) test_and_clear_bit(n,a) | 161 | #define ext2_clear_bit_atomic(l,n,a) test_and_clear_bit(n,a) |
360 | #define ext2_test_bit test_bit | ||
361 | #define ext2_find_first_zero_bit find_first_zero_bit | ||
362 | #define ext2_find_next_zero_bit find_next_zero_bit | ||
363 | |||
364 | /* Bitmap functions for the minix filesystem. */ | ||
365 | #define minix_set_bit(nr,addr) test_and_set_bit(nr,addr) | ||
366 | #define minix_clear_bit(nr,addr) test_and_clear_bit(nr,addr) | ||
367 | #define minix_test_bit(nr,addr) test_bit(nr,addr) | ||
368 | #define minix_find_first_zero_bit(addr,size) find_first_zero_bit(addr,size) | ||
369 | 162 | ||
370 | static inline int sched_find_first_bit(const unsigned long *b) | 163 | #include <asm-generic/bitops/minix.h> |
371 | { | 164 | #include <asm-generic/bitops/sched.h> |
372 | if (unlikely(b[0])) | ||
373 | return __ffs(b[0]); | ||
374 | if (unlikely(b[1])) | ||
375 | return __ffs(b[1]) + 32; | ||
376 | if (unlikely(b[2])) | ||
377 | return __ffs(b[2]) + 64; | ||
378 | if (unlikely(b[3])) | ||
379 | return __ffs(b[3]) + 96; | ||
380 | if (b[4]) | ||
381 | return __ffs(b[4]) + 128; | ||
382 | return __ffs(b[5]) + 32 + 128; | ||
383 | } | ||
384 | 165 | ||
385 | #endif /* __KERNEL__ */ | 166 | #endif /* __KERNEL__ */ |
386 | 167 | ||
diff --git a/include/asm-frv/bitops.h b/include/asm-frv/bitops.h index f686b519878e..6344d06390b9 100644 --- a/include/asm-frv/bitops.h +++ b/include/asm-frv/bitops.h | |||
@@ -22,20 +22,7 @@ | |||
22 | 22 | ||
23 | #ifdef __KERNEL__ | 23 | #ifdef __KERNEL__ |
24 | 24 | ||
25 | /* | 25 | #include <asm-generic/bitops/ffz.h> |
26 | * ffz = Find First Zero in word. Undefined if no zero exists, | ||
27 | * so code should check against ~0UL first.. | ||
28 | */ | ||
29 | static inline unsigned long ffz(unsigned long word) | ||
30 | { | ||
31 | unsigned long result = 0; | ||
32 | |||
33 | while (word & 1) { | ||
34 | result++; | ||
35 | word >>= 1; | ||
36 | } | ||
37 | return result; | ||
38 | } | ||
39 | 26 | ||
40 | /* | 27 | /* |
41 | * clear_bit() doesn't provide any barrier for the compiler. | 28 | * clear_bit() doesn't provide any barrier for the compiler. |
@@ -171,51 +158,9 @@ static inline int __test_bit(int nr, const volatile void * addr) | |||
171 | __constant_test_bit((nr),(addr)) : \ | 158 | __constant_test_bit((nr),(addr)) : \ |
172 | __test_bit((nr),(addr))) | 159 | __test_bit((nr),(addr))) |
173 | 160 | ||
174 | extern int find_next_bit(const unsigned long *addr, int size, int offset); | 161 | #include <asm-generic/bitops/ffs.h> |
175 | 162 | #include <asm-generic/bitops/__ffs.h> | |
176 | #define find_first_bit(addr, size) find_next_bit(addr, size, 0) | 163 | #include <asm-generic/bitops/find.h> |
177 | |||
178 | #define find_first_zero_bit(addr, size) \ | ||
179 | find_next_zero_bit((addr), (size), 0) | ||
180 | |||
181 | static inline int find_next_zero_bit(const void *addr, int size, int offset) | ||
182 | { | ||
183 | const unsigned long *p = ((const unsigned long *) addr) + (offset >> 5); | ||
184 | unsigned long result = offset & ~31UL; | ||
185 | unsigned long tmp; | ||
186 | |||
187 | if (offset >= size) | ||
188 | return size; | ||
189 | size -= result; | ||
190 | offset &= 31UL; | ||
191 | if (offset) { | ||
192 | tmp = *(p++); | ||
193 | tmp |= ~0UL >> (32-offset); | ||
194 | if (size < 32) | ||
195 | goto found_first; | ||
196 | if (~tmp) | ||
197 | goto found_middle; | ||
198 | size -= 32; | ||
199 | result += 32; | ||
200 | } | ||
201 | while (size & ~31UL) { | ||
202 | if (~(tmp = *(p++))) | ||
203 | goto found_middle; | ||
204 | result += 32; | ||
205 | size -= 32; | ||
206 | } | ||
207 | if (!size) | ||
208 | return result; | ||
209 | tmp = *p; | ||
210 | |||
211 | found_first: | ||
212 | tmp |= ~0UL << size; | ||
213 | found_middle: | ||
214 | return result + ffz(tmp); | ||
215 | } | ||
216 | |||
217 | #define ffs(x) generic_ffs(x) | ||
218 | #define __ffs(x) (ffs(x) - 1) | ||
219 | 164 | ||
220 | /* | 165 | /* |
221 | * fls: find last bit set. | 166 | * fls: find last bit set. |
@@ -228,114 +173,17 @@ found_middle: | |||
228 | \ | 173 | \ |
229 | bit ? 33 - bit : bit; \ | 174 | bit ? 33 - bit : bit; \ |
230 | }) | 175 | }) |
231 | #define fls64(x) generic_fls64(x) | ||
232 | 176 | ||
233 | /* | 177 | #include <asm-generic/bitops/fls64.h> |
234 | * Every architecture must define this function. It's the fastest | 178 | #include <asm-generic/bitops/sched.h> |
235 | * way of searching a 140-bit bitmap where the first 100 bits are | 179 | #include <asm-generic/bitops/hweight.h> |
236 | * unlikely to be set. It's guaranteed that at least one of the 140 | ||
237 | * bits is cleared. | ||
238 | */ | ||
239 | static inline int sched_find_first_bit(const unsigned long *b) | ||
240 | { | ||
241 | if (unlikely(b[0])) | ||
242 | return __ffs(b[0]); | ||
243 | if (unlikely(b[1])) | ||
244 | return __ffs(b[1]) + 32; | ||
245 | if (unlikely(b[2])) | ||
246 | return __ffs(b[2]) + 64; | ||
247 | if (b[3]) | ||
248 | return __ffs(b[3]) + 96; | ||
249 | return __ffs(b[4]) + 128; | ||
250 | } | ||
251 | 180 | ||
181 | #include <asm-generic/bitops/ext2-non-atomic.h> | ||
252 | 182 | ||
253 | /* | 183 | #define ext2_set_bit_atomic(lock,nr,addr) test_and_set_bit ((nr) ^ 0x18, (addr)) |
254 | * hweightN: returns the hamming weight (i.e. the number | 184 | #define ext2_clear_bit_atomic(lock,nr,addr) test_and_clear_bit((nr) ^ 0x18, (addr)) |
255 | * of bits set) of a N-bit word | ||
256 | */ | ||
257 | |||
258 | #define hweight32(x) generic_hweight32(x) | ||
259 | #define hweight16(x) generic_hweight16(x) | ||
260 | #define hweight8(x) generic_hweight8(x) | ||
261 | |||
262 | #define ext2_set_bit(nr, addr) test_and_set_bit ((nr) ^ 0x18, (addr)) | ||
263 | #define ext2_clear_bit(nr, addr) test_and_clear_bit((nr) ^ 0x18, (addr)) | ||
264 | |||
265 | #define ext2_set_bit_atomic(lock,nr,addr) ext2_set_bit((nr), addr) | ||
266 | #define ext2_clear_bit_atomic(lock,nr,addr) ext2_clear_bit((nr), addr) | ||
267 | |||
268 | static inline int ext2_test_bit(int nr, const volatile void * addr) | ||
269 | { | ||
270 | const volatile unsigned char *ADDR = (const unsigned char *) addr; | ||
271 | int mask; | ||
272 | |||
273 | ADDR += nr >> 3; | ||
274 | mask = 1 << (nr & 0x07); | ||
275 | return ((mask & *ADDR) != 0); | ||
276 | } | ||
277 | |||
278 | #define ext2_find_first_zero_bit(addr, size) \ | ||
279 | ext2_find_next_zero_bit((addr), (size), 0) | ||
280 | |||
281 | static inline unsigned long ext2_find_next_zero_bit(const void *addr, | ||
282 | unsigned long size, | ||
283 | unsigned long offset) | ||
284 | { | ||
285 | const unsigned long *p = ((const unsigned long *) addr) + (offset >> 5); | ||
286 | unsigned long result = offset & ~31UL; | ||
287 | unsigned long tmp; | ||
288 | |||
289 | if (offset >= size) | ||
290 | return size; | ||
291 | size -= result; | ||
292 | offset &= 31UL; | ||
293 | if(offset) { | ||
294 | /* We hold the little endian value in tmp, but then the | ||
295 | * shift is illegal. So we could keep a big endian value | ||
296 | * in tmp, like this: | ||
297 | * | ||
298 | * tmp = __swab32(*(p++)); | ||
299 | * tmp |= ~0UL >> (32-offset); | ||
300 | * | ||
301 | * but this would decrease preformance, so we change the | ||
302 | * shift: | ||
303 | */ | ||
304 | tmp = *(p++); | ||
305 | tmp |= __swab32(~0UL >> (32-offset)); | ||
306 | if(size < 32) | ||
307 | goto found_first; | ||
308 | if(~tmp) | ||
309 | goto found_middle; | ||
310 | size -= 32; | ||
311 | result += 32; | ||
312 | } | ||
313 | while(size & ~31UL) { | ||
314 | if(~(tmp = *(p++))) | ||
315 | goto found_middle; | ||
316 | result += 32; | ||
317 | size -= 32; | ||
318 | } | ||
319 | if(!size) | ||
320 | return result; | ||
321 | tmp = *p; | ||
322 | |||
323 | found_first: | ||
324 | /* tmp is little endian, so we would have to swab the shift, | ||
325 | * see above. But then we have to swab tmp below for ffz, so | ||
326 | * we might as well do this here. | ||
327 | */ | ||
328 | return result + ffz(__swab32(tmp) | (~0UL << size)); | ||
329 | found_middle: | ||
330 | return result + ffz(__swab32(tmp)); | ||
331 | } | ||
332 | 185 | ||
333 | /* Bitmap functions for the minix filesystem. */ | 186 | #include <asm-generic/bitops/minix-le.h> |
334 | #define minix_test_and_set_bit(nr,addr) ext2_set_bit(nr,addr) | ||
335 | #define minix_set_bit(nr,addr) ext2_set_bit(nr,addr) | ||
336 | #define minix_test_and_clear_bit(nr,addr) ext2_clear_bit(nr,addr) | ||
337 | #define minix_test_bit(nr,addr) ext2_test_bit(nr,addr) | ||
338 | #define minix_find_first_zero_bit(addr,size) ext2_find_first_zero_bit(addr,size) | ||
339 | 187 | ||
340 | #endif /* __KERNEL__ */ | 188 | #endif /* __KERNEL__ */ |
341 | 189 | ||
diff --git a/include/asm-generic/bitops.h b/include/asm-generic/bitops.h index 0e6d9852008c..1f9d99193df8 100644 --- a/include/asm-generic/bitops.h +++ b/include/asm-generic/bitops.h | |||
@@ -5,77 +5,27 @@ | |||
5 | * For the benefit of those who are trying to port Linux to another | 5 | * For the benefit of those who are trying to port Linux to another |
6 | * architecture, here are some C-language equivalents. You should | 6 | * architecture, here are some C-language equivalents. You should |
7 | * recode these in the native assembly language, if at all possible. | 7 | * recode these in the native assembly language, if at all possible. |
8 | * To guarantee atomicity, these routines call cli() and sti() to | ||
9 | * disable interrupts while they operate. (You have to provide inline | ||
10 | * routines to cli() and sti().) | ||
11 | * | ||
12 | * Also note, these routines assume that you have 32 bit longs. | ||
13 | * You will have to change this if you are trying to port Linux to the | ||
14 | * Alpha architecture or to a Cray. :-) | ||
15 | * | 8 | * |
16 | * C language equivalents written by Theodore Ts'o, 9/26/92 | 9 | * C language equivalents written by Theodore Ts'o, 9/26/92 |
17 | */ | 10 | */ |
18 | 11 | ||
19 | extern __inline__ int set_bit(int nr,long * addr) | 12 | #include <asm-generic/bitops/atomic.h> |
20 | { | 13 | #include <asm-generic/bitops/non-atomic.h> |
21 | int mask, retval; | 14 | #include <asm-generic/bitops/__ffs.h> |
22 | 15 | #include <asm-generic/bitops/ffz.h> | |
23 | addr += nr >> 5; | 16 | #include <asm-generic/bitops/fls.h> |
24 | mask = 1 << (nr & 0x1f); | 17 | #include <asm-generic/bitops/fls64.h> |
25 | cli(); | 18 | #include <asm-generic/bitops/find.h> |
26 | retval = (mask & *addr) != 0; | ||
27 | *addr |= mask; | ||
28 | sti(); | ||
29 | return retval; | ||
30 | } | ||
31 | |||
32 | extern __inline__ int clear_bit(int nr, long * addr) | ||
33 | { | ||
34 | int mask, retval; | ||
35 | |||
36 | addr += nr >> 5; | ||
37 | mask = 1 << (nr & 0x1f); | ||
38 | cli(); | ||
39 | retval = (mask & *addr) != 0; | ||
40 | *addr &= ~mask; | ||
41 | sti(); | ||
42 | return retval; | ||
43 | } | ||
44 | |||
45 | extern __inline__ int test_bit(int nr, const unsigned long * addr) | ||
46 | { | ||
47 | int mask; | ||
48 | |||
49 | addr += nr >> 5; | ||
50 | mask = 1 << (nr & 0x1f); | ||
51 | return ((mask & *addr) != 0); | ||
52 | } | ||
53 | |||
54 | /* | ||
55 | * fls: find last bit set. | ||
56 | */ | ||
57 | |||
58 | #define fls(x) generic_fls(x) | ||
59 | #define fls64(x) generic_fls64(x) | ||
60 | 19 | ||
61 | #ifdef __KERNEL__ | 20 | #ifdef __KERNEL__ |
62 | 21 | ||
63 | /* | 22 | #include <asm-generic/bitops/sched.h> |
64 | * ffs: find first bit set. This is defined the same way as | 23 | #include <asm-generic/bitops/ffs.h> |
65 | * the libc and compiler builtin ffs routines, therefore | 24 | #include <asm-generic/bitops/hweight.h> |
66 | * differs in spirit from the above ffz (man ffs). | ||
67 | */ | ||
68 | |||
69 | #define ffs(x) generic_ffs(x) | ||
70 | |||
71 | /* | ||
72 | * hweightN: returns the hamming weight (i.e. the number | ||
73 | * of bits set) of a N-bit word | ||
74 | */ | ||
75 | 25 | ||
76 | #define hweight32(x) generic_hweight32(x) | 26 | #include <asm-generic/bitops/ext2-non-atomic.h> |
77 | #define hweight16(x) generic_hweight16(x) | 27 | #include <asm-generic/bitops/ext2-atomic.h> |
78 | #define hweight8(x) generic_hweight8(x) | 28 | #include <asm-generic/bitops/minix.h> |
79 | 29 | ||
80 | #endif /* __KERNEL__ */ | 30 | #endif /* __KERNEL__ */ |
81 | 31 | ||
diff --git a/include/asm-generic/bitops/__ffs.h b/include/asm-generic/bitops/__ffs.h new file mode 100644 index 000000000000..9a3274aecf83 --- /dev/null +++ b/include/asm-generic/bitops/__ffs.h | |||
@@ -0,0 +1,43 @@ | |||
1 | #ifndef _ASM_GENERIC_BITOPS___FFS_H_ | ||
2 | #define _ASM_GENERIC_BITOPS___FFS_H_ | ||
3 | |||
4 | #include <asm/types.h> | ||
5 | |||
6 | /** | ||
7 | * __ffs - find first bit in word. | ||
8 | * @word: The word to search | ||
9 | * | ||
10 | * Undefined if no bit exists, so code should check against 0 first. | ||
11 | */ | ||
12 | static inline unsigned long __ffs(unsigned long word) | ||
13 | { | ||
14 | int num = 0; | ||
15 | |||
16 | #if BITS_PER_LONG == 64 | ||
17 | if ((word & 0xffffffff) == 0) { | ||
18 | num += 32; | ||
19 | word >>= 32; | ||
20 | } | ||
21 | #endif | ||
22 | if ((word & 0xffff) == 0) { | ||
23 | num += 16; | ||
24 | word >>= 16; | ||
25 | } | ||
26 | if ((word & 0xff) == 0) { | ||
27 | num += 8; | ||
28 | word >>= 8; | ||
29 | } | ||
30 | if ((word & 0xf) == 0) { | ||
31 | num += 4; | ||
32 | word >>= 4; | ||
33 | } | ||
34 | if ((word & 0x3) == 0) { | ||
35 | num += 2; | ||
36 | word >>= 2; | ||
37 | } | ||
38 | if ((word & 0x1) == 0) | ||
39 | num += 1; | ||
40 | return num; | ||
41 | } | ||
42 | |||
43 | #endif /* _ASM_GENERIC_BITOPS___FFS_H_ */ | ||
diff --git a/include/asm-generic/bitops/atomic.h b/include/asm-generic/bitops/atomic.h new file mode 100644 index 000000000000..78339319ba02 --- /dev/null +++ b/include/asm-generic/bitops/atomic.h | |||
@@ -0,0 +1,191 @@ | |||
1 | #ifndef _ASM_GENERIC_BITOPS_ATOMIC_H_ | ||
2 | #define _ASM_GENERIC_BITOPS_ATOMIC_H_ | ||
3 | |||
4 | #include <asm/types.h> | ||
5 | |||
6 | #define BITOP_MASK(nr) (1UL << ((nr) % BITS_PER_LONG)) | ||
7 | #define BITOP_WORD(nr) ((nr) / BITS_PER_LONG) | ||
8 | |||
9 | #ifdef CONFIG_SMP | ||
10 | #include <asm/spinlock.h> | ||
11 | #include <asm/cache.h> /* we use L1_CACHE_BYTES */ | ||
12 | |||
13 | /* Use an array of spinlocks for our atomic_ts. | ||
14 | * Hash function to index into a different SPINLOCK. | ||
15 | * Since "a" is usually an address, use one spinlock per cacheline. | ||
16 | */ | ||
17 | # define ATOMIC_HASH_SIZE 4 | ||
18 | # define ATOMIC_HASH(a) (&(__atomic_hash[ (((unsigned long) a)/L1_CACHE_BYTES) & (ATOMIC_HASH_SIZE-1) ])) | ||
19 | |||
20 | extern raw_spinlock_t __atomic_hash[ATOMIC_HASH_SIZE] __lock_aligned; | ||
21 | |||
22 | /* Can't use raw_spin_lock_irq because of #include problems, so | ||
23 | * this is the substitute */ | ||
24 | #define _atomic_spin_lock_irqsave(l,f) do { \ | ||
25 | raw_spinlock_t *s = ATOMIC_HASH(l); \ | ||
26 | local_irq_save(f); \ | ||
27 | __raw_spin_lock(s); \ | ||
28 | } while(0) | ||
29 | |||
30 | #define _atomic_spin_unlock_irqrestore(l,f) do { \ | ||
31 | raw_spinlock_t *s = ATOMIC_HASH(l); \ | ||
32 | __raw_spin_unlock(s); \ | ||
33 | local_irq_restore(f); \ | ||
34 | } while(0) | ||
35 | |||
36 | |||
37 | #else | ||
38 | # define _atomic_spin_lock_irqsave(l,f) do { local_irq_save(f); } while (0) | ||
39 | # define _atomic_spin_unlock_irqrestore(l,f) do { local_irq_restore(f); } while (0) | ||
40 | #endif | ||
41 | |||
42 | /* | ||
43 | * NMI events can occur at any time, including when interrupts have been | ||
44 | * disabled by *_irqsave(). So you can get NMI events occurring while a | ||
45 | * *_bit function is holding a spin lock. If the NMI handler also wants | ||
46 | * to do bit manipulation (and they do) then you can get a deadlock | ||
47 | * between the original caller of *_bit() and the NMI handler. | ||
48 | * | ||
49 | * by Keith Owens | ||
50 | */ | ||
51 | |||
52 | /** | ||
53 | * set_bit - Atomically set a bit in memory | ||
54 | * @nr: the bit to set | ||
55 | * @addr: the address to start counting from | ||
56 | * | ||
57 | * This function is atomic and may not be reordered. See __set_bit() | ||
58 | * if you do not require the atomic guarantees. | ||
59 | * | ||
60 | * Note: there are no guarantees that this function will not be reordered | ||
61 | * on non x86 architectures, so if you are writting portable code, | ||
62 | * make sure not to rely on its reordering guarantees. | ||
63 | * | ||
64 | * Note that @nr may be almost arbitrarily large; this function is not | ||
65 | * restricted to acting on a single-word quantity. | ||
66 | */ | ||
67 | static inline void set_bit(int nr, volatile unsigned long *addr) | ||
68 | { | ||
69 | unsigned long mask = BITOP_MASK(nr); | ||
70 | unsigned long *p = ((unsigned long *)addr) + BITOP_WORD(nr); | ||
71 | unsigned long flags; | ||
72 | |||
73 | _atomic_spin_lock_irqsave(p, flags); | ||
74 | *p |= mask; | ||
75 | _atomic_spin_unlock_irqrestore(p, flags); | ||
76 | } | ||
77 | |||
78 | /** | ||
79 | * clear_bit - Clears a bit in memory | ||
80 | * @nr: Bit to clear | ||
81 | * @addr: Address to start counting from | ||
82 | * | ||
83 | * clear_bit() is atomic and may not be reordered. However, it does | ||
84 | * not contain a memory barrier, so if it is used for locking purposes, | ||
85 | * you should call smp_mb__before_clear_bit() and/or smp_mb__after_clear_bit() | ||
86 | * in order to ensure changes are visible on other processors. | ||
87 | */ | ||
88 | static inline void clear_bit(int nr, volatile unsigned long *addr) | ||
89 | { | ||
90 | unsigned long mask = BITOP_MASK(nr); | ||
91 | unsigned long *p = ((unsigned long *)addr) + BITOP_WORD(nr); | ||
92 | unsigned long flags; | ||
93 | |||
94 | _atomic_spin_lock_irqsave(p, flags); | ||
95 | *p &= ~mask; | ||
96 | _atomic_spin_unlock_irqrestore(p, flags); | ||
97 | } | ||
98 | |||
99 | /** | ||
100 | * change_bit - Toggle a bit in memory | ||
101 | * @nr: Bit to change | ||
102 | * @addr: Address to start counting from | ||
103 | * | ||
104 | * change_bit() is atomic and may not be reordered. It may be | ||
105 | * reordered on other architectures than x86. | ||
106 | * Note that @nr may be almost arbitrarily large; this function is not | ||
107 | * restricted to acting on a single-word quantity. | ||
108 | */ | ||
109 | static inline void change_bit(int nr, volatile unsigned long *addr) | ||
110 | { | ||
111 | unsigned long mask = BITOP_MASK(nr); | ||
112 | unsigned long *p = ((unsigned long *)addr) + BITOP_WORD(nr); | ||
113 | unsigned long flags; | ||
114 | |||
115 | _atomic_spin_lock_irqsave(p, flags); | ||
116 | *p ^= mask; | ||
117 | _atomic_spin_unlock_irqrestore(p, flags); | ||
118 | } | ||
119 | |||
120 | /** | ||
121 | * test_and_set_bit - Set a bit and return its old value | ||
122 | * @nr: Bit to set | ||
123 | * @addr: Address to count from | ||
124 | * | ||
125 | * This operation is atomic and cannot be reordered. | ||
126 | * It may be reordered on other architectures than x86. | ||
127 | * It also implies a memory barrier. | ||
128 | */ | ||
129 | static inline int test_and_set_bit(int nr, volatile unsigned long *addr) | ||
130 | { | ||
131 | unsigned long mask = BITOP_MASK(nr); | ||
132 | unsigned long *p = ((unsigned long *)addr) + BITOP_WORD(nr); | ||
133 | unsigned long old; | ||
134 | unsigned long flags; | ||
135 | |||
136 | _atomic_spin_lock_irqsave(p, flags); | ||
137 | old = *p; | ||
138 | *p = old | mask; | ||
139 | _atomic_spin_unlock_irqrestore(p, flags); | ||
140 | |||
141 | return (old & mask) != 0; | ||
142 | } | ||
143 | |||
144 | /** | ||
145 | * test_and_clear_bit - Clear a bit and return its old value | ||
146 | * @nr: Bit to clear | ||
147 | * @addr: Address to count from | ||
148 | * | ||
149 | * This operation is atomic and cannot be reordered. | ||
150 | * It can be reorderdered on other architectures other than x86. | ||
151 | * It also implies a memory barrier. | ||
152 | */ | ||
153 | static inline int test_and_clear_bit(int nr, volatile unsigned long *addr) | ||
154 | { | ||
155 | unsigned long mask = BITOP_MASK(nr); | ||
156 | unsigned long *p = ((unsigned long *)addr) + BITOP_WORD(nr); | ||
157 | unsigned long old; | ||
158 | unsigned long flags; | ||
159 | |||
160 | _atomic_spin_lock_irqsave(p, flags); | ||
161 | old = *p; | ||
162 | *p = old & ~mask; | ||
163 | _atomic_spin_unlock_irqrestore(p, flags); | ||
164 | |||
165 | return (old & mask) != 0; | ||
166 | } | ||
167 | |||
168 | /** | ||
169 | * test_and_change_bit - Change a bit and return its old value | ||
170 | * @nr: Bit to change | ||
171 | * @addr: Address to count from | ||
172 | * | ||
173 | * This operation is atomic and cannot be reordered. | ||
174 | * It also implies a memory barrier. | ||
175 | */ | ||
176 | static inline int test_and_change_bit(int nr, volatile unsigned long *addr) | ||
177 | { | ||
178 | unsigned long mask = BITOP_MASK(nr); | ||
179 | unsigned long *p = ((unsigned long *)addr) + BITOP_WORD(nr); | ||
180 | unsigned long old; | ||
181 | unsigned long flags; | ||
182 | |||
183 | _atomic_spin_lock_irqsave(p, flags); | ||
184 | old = *p; | ||
185 | *p = old ^ mask; | ||
186 | _atomic_spin_unlock_irqrestore(p, flags); | ||
187 | |||
188 | return (old & mask) != 0; | ||
189 | } | ||
190 | |||
191 | #endif /* _ASM_GENERIC_BITOPS_ATOMIC_H */ | ||
diff --git a/include/asm-generic/bitops/ext2-atomic.h b/include/asm-generic/bitops/ext2-atomic.h new file mode 100644 index 000000000000..ab1c875efb74 --- /dev/null +++ b/include/asm-generic/bitops/ext2-atomic.h | |||
@@ -0,0 +1,22 @@ | |||
1 | #ifndef _ASM_GENERIC_BITOPS_EXT2_ATOMIC_H_ | ||
2 | #define _ASM_GENERIC_BITOPS_EXT2_ATOMIC_H_ | ||
3 | |||
4 | #define ext2_set_bit_atomic(lock, nr, addr) \ | ||
5 | ({ \ | ||
6 | int ret; \ | ||
7 | spin_lock(lock); \ | ||
8 | ret = ext2_set_bit((nr), (unsigned long *)(addr)); \ | ||
9 | spin_unlock(lock); \ | ||
10 | ret; \ | ||
11 | }) | ||
12 | |||
13 | #define ext2_clear_bit_atomic(lock, nr, addr) \ | ||
14 | ({ \ | ||
15 | int ret; \ | ||
16 | spin_lock(lock); \ | ||
17 | ret = ext2_clear_bit((nr), (unsigned long *)(addr)); \ | ||
18 | spin_unlock(lock); \ | ||
19 | ret; \ | ||
20 | }) | ||
21 | |||
22 | #endif /* _ASM_GENERIC_BITOPS_EXT2_ATOMIC_H_ */ | ||
diff --git a/include/asm-generic/bitops/ext2-non-atomic.h b/include/asm-generic/bitops/ext2-non-atomic.h new file mode 100644 index 000000000000..1697404afa05 --- /dev/null +++ b/include/asm-generic/bitops/ext2-non-atomic.h | |||
@@ -0,0 +1,18 @@ | |||
1 | #ifndef _ASM_GENERIC_BITOPS_EXT2_NON_ATOMIC_H_ | ||
2 | #define _ASM_GENERIC_BITOPS_EXT2_NON_ATOMIC_H_ | ||
3 | |||
4 | #include <asm-generic/bitops/le.h> | ||
5 | |||
6 | #define ext2_set_bit(nr,addr) \ | ||
7 | generic___test_and_set_le_bit((nr),(unsigned long *)(addr)) | ||
8 | #define ext2_clear_bit(nr,addr) \ | ||
9 | generic___test_and_clear_le_bit((nr),(unsigned long *)(addr)) | ||
10 | |||
11 | #define ext2_test_bit(nr,addr) \ | ||
12 | generic_test_le_bit((nr),(unsigned long *)(addr)) | ||
13 | #define ext2_find_first_zero_bit(addr, size) \ | ||
14 | generic_find_first_zero_le_bit((unsigned long *)(addr), (size)) | ||
15 | #define ext2_find_next_zero_bit(addr, size, off) \ | ||
16 | generic_find_next_zero_le_bit((unsigned long *)(addr), (size), (off)) | ||
17 | |||
18 | #endif /* _ASM_GENERIC_BITOPS_EXT2_NON_ATOMIC_H_ */ | ||
diff --git a/include/asm-generic/bitops/ffs.h b/include/asm-generic/bitops/ffs.h new file mode 100644 index 000000000000..fbbb43af7dc0 --- /dev/null +++ b/include/asm-generic/bitops/ffs.h | |||
@@ -0,0 +1,41 @@ | |||
1 | #ifndef _ASM_GENERIC_BITOPS_FFS_H_ | ||
2 | #define _ASM_GENERIC_BITOPS_FFS_H_ | ||
3 | |||
4 | /** | ||
5 | * ffs - find first bit set | ||
6 | * @x: the word to search | ||
7 | * | ||
8 | * This is defined the same way as | ||
9 | * the libc and compiler builtin ffs routines, therefore | ||
10 | * differs in spirit from the above ffz (man ffs). | ||
11 | */ | ||
12 | static inline int ffs(int x) | ||
13 | { | ||
14 | int r = 1; | ||
15 | |||
16 | if (!x) | ||
17 | return 0; | ||
18 | if (!(x & 0xffff)) { | ||
19 | x >>= 16; | ||
20 | r += 16; | ||
21 | } | ||
22 | if (!(x & 0xff)) { | ||
23 | x >>= 8; | ||
24 | r += 8; | ||
25 | } | ||
26 | if (!(x & 0xf)) { | ||
27 | x >>= 4; | ||
28 | r += 4; | ||
29 | } | ||
30 | if (!(x & 3)) { | ||
31 | x >>= 2; | ||
32 | r += 2; | ||
33 | } | ||
34 | if (!(x & 1)) { | ||
35 | x >>= 1; | ||
36 | r += 1; | ||
37 | } | ||
38 | return r; | ||
39 | } | ||
40 | |||
41 | #endif /* _ASM_GENERIC_BITOPS_FFS_H_ */ | ||
diff --git a/include/asm-generic/bitops/ffz.h b/include/asm-generic/bitops/ffz.h new file mode 100644 index 000000000000..6744bd4cdf46 --- /dev/null +++ b/include/asm-generic/bitops/ffz.h | |||
@@ -0,0 +1,12 @@ | |||
1 | #ifndef _ASM_GENERIC_BITOPS_FFZ_H_ | ||
2 | #define _ASM_GENERIC_BITOPS_FFZ_H_ | ||
3 | |||
4 | /* | ||
5 | * ffz - find first zero in word. | ||
6 | * @word: The word to search | ||
7 | * | ||
8 | * Undefined if no zero exists, so code should check against ~0UL first. | ||
9 | */ | ||
10 | #define ffz(x) __ffs(~(x)) | ||
11 | |||
12 | #endif /* _ASM_GENERIC_BITOPS_FFZ_H_ */ | ||
diff --git a/include/asm-generic/bitops/find.h b/include/asm-generic/bitops/find.h new file mode 100644 index 000000000000..72a51e5a12ef --- /dev/null +++ b/include/asm-generic/bitops/find.h | |||
@@ -0,0 +1,13 @@ | |||
1 | #ifndef _ASM_GENERIC_BITOPS_FIND_H_ | ||
2 | #define _ASM_GENERIC_BITOPS_FIND_H_ | ||
3 | |||
4 | extern unsigned long find_next_bit(const unsigned long *addr, unsigned long | ||
5 | size, unsigned long offset); | ||
6 | |||
7 | extern unsigned long find_next_zero_bit(const unsigned long *addr, unsigned | ||
8 | long size, unsigned long offset); | ||
9 | |||
10 | #define find_first_bit(addr, size) find_next_bit((addr), (size), 0) | ||
11 | #define find_first_zero_bit(addr, size) find_next_zero_bit((addr), (size), 0) | ||
12 | |||
13 | #endif /*_ASM_GENERIC_BITOPS_FIND_H_ */ | ||
diff --git a/include/asm-generic/bitops/fls.h b/include/asm-generic/bitops/fls.h new file mode 100644 index 000000000000..850859bc5069 --- /dev/null +++ b/include/asm-generic/bitops/fls.h | |||
@@ -0,0 +1,41 @@ | |||
1 | #ifndef _ASM_GENERIC_BITOPS_FLS_H_ | ||
2 | #define _ASM_GENERIC_BITOPS_FLS_H_ | ||
3 | |||
4 | /** | ||
5 | * fls - find last (most-significant) bit set | ||
6 | * @x: the word to search | ||
7 | * | ||
8 | * This is defined the same way as ffs. | ||
9 | * Note fls(0) = 0, fls(1) = 1, fls(0x80000000) = 32. | ||
10 | */ | ||
11 | |||
12 | static inline int fls(int x) | ||
13 | { | ||
14 | int r = 32; | ||
15 | |||
16 | if (!x) | ||
17 | return 0; | ||
18 | if (!(x & 0xffff0000u)) { | ||
19 | x <<= 16; | ||
20 | r -= 16; | ||
21 | } | ||
22 | if (!(x & 0xff000000u)) { | ||
23 | x <<= 8; | ||
24 | r -= 8; | ||
25 | } | ||
26 | if (!(x & 0xf0000000u)) { | ||
27 | x <<= 4; | ||
28 | r -= 4; | ||
29 | } | ||
30 | if (!(x & 0xc0000000u)) { | ||
31 | x <<= 2; | ||
32 | r -= 2; | ||
33 | } | ||
34 | if (!(x & 0x80000000u)) { | ||
35 | x <<= 1; | ||
36 | r -= 1; | ||
37 | } | ||
38 | return r; | ||
39 | } | ||
40 | |||
41 | #endif /* _ASM_GENERIC_BITOPS_FLS_H_ */ | ||
diff --git a/include/asm-generic/bitops/fls64.h b/include/asm-generic/bitops/fls64.h new file mode 100644 index 000000000000..1b6b17ce2428 --- /dev/null +++ b/include/asm-generic/bitops/fls64.h | |||
@@ -0,0 +1,14 @@ | |||
1 | #ifndef _ASM_GENERIC_BITOPS_FLS64_H_ | ||
2 | #define _ASM_GENERIC_BITOPS_FLS64_H_ | ||
3 | |||
4 | #include <asm/types.h> | ||
5 | |||
6 | static inline int fls64(__u64 x) | ||
7 | { | ||
8 | __u32 h = x >> 32; | ||
9 | if (h) | ||
10 | return fls(h) + 32; | ||
11 | return fls(x); | ||
12 | } | ||
13 | |||
14 | #endif /* _ASM_GENERIC_BITOPS_FLS64_H_ */ | ||
diff --git a/include/asm-generic/bitops/hweight.h b/include/asm-generic/bitops/hweight.h new file mode 100644 index 000000000000..fbbc383771da --- /dev/null +++ b/include/asm-generic/bitops/hweight.h | |||
@@ -0,0 +1,11 @@ | |||
1 | #ifndef _ASM_GENERIC_BITOPS_HWEIGHT_H_ | ||
2 | #define _ASM_GENERIC_BITOPS_HWEIGHT_H_ | ||
3 | |||
4 | #include <asm/types.h> | ||
5 | |||
6 | extern unsigned int hweight32(unsigned int w); | ||
7 | extern unsigned int hweight16(unsigned int w); | ||
8 | extern unsigned int hweight8(unsigned int w); | ||
9 | extern unsigned long hweight64(__u64 w); | ||
10 | |||
11 | #endif /* _ASM_GENERIC_BITOPS_HWEIGHT_H_ */ | ||
diff --git a/include/asm-generic/bitops/le.h b/include/asm-generic/bitops/le.h new file mode 100644 index 000000000000..b9c7e5d2d2ad --- /dev/null +++ b/include/asm-generic/bitops/le.h | |||
@@ -0,0 +1,53 @@ | |||
1 | #ifndef _ASM_GENERIC_BITOPS_LE_H_ | ||
2 | #define _ASM_GENERIC_BITOPS_LE_H_ | ||
3 | |||
4 | #include <asm/types.h> | ||
5 | #include <asm/byteorder.h> | ||
6 | |||
7 | #define BITOP_WORD(nr) ((nr) / BITS_PER_LONG) | ||
8 | #define BITOP_LE_SWIZZLE ((BITS_PER_LONG-1) & ~0x7) | ||
9 | |||
10 | #if defined(__LITTLE_ENDIAN) | ||
11 | |||
12 | #define generic_test_le_bit(nr, addr) test_bit(nr, addr) | ||
13 | #define generic___set_le_bit(nr, addr) __set_bit(nr, addr) | ||
14 | #define generic___clear_le_bit(nr, addr) __clear_bit(nr, addr) | ||
15 | |||
16 | #define generic_test_and_set_le_bit(nr, addr) test_and_set_bit(nr, addr) | ||
17 | #define generic_test_and_clear_le_bit(nr, addr) test_and_clear_bit(nr, addr) | ||
18 | |||
19 | #define generic___test_and_set_le_bit(nr, addr) __test_and_set_bit(nr, addr) | ||
20 | #define generic___test_and_clear_le_bit(nr, addr) __test_and_clear_bit(nr, addr) | ||
21 | |||
22 | #define generic_find_next_zero_le_bit(addr, size, offset) find_next_zero_bit(addr, size, offset) | ||
23 | |||
24 | #elif defined(__BIG_ENDIAN) | ||
25 | |||
26 | #define generic_test_le_bit(nr, addr) \ | ||
27 | test_bit((nr) ^ BITOP_LE_SWIZZLE, (addr)) | ||
28 | #define generic___set_le_bit(nr, addr) \ | ||
29 | __set_bit((nr) ^ BITOP_LE_SWIZZLE, (addr)) | ||
30 | #define generic___clear_le_bit(nr, addr) \ | ||
31 | __clear_bit((nr) ^ BITOP_LE_SWIZZLE, (addr)) | ||
32 | |||
33 | #define generic_test_and_set_le_bit(nr, addr) \ | ||
34 | test_and_set_bit((nr) ^ BITOP_LE_SWIZZLE, (addr)) | ||
35 | #define generic_test_and_clear_le_bit(nr, addr) \ | ||
36 | test_and_clear_bit((nr) ^ BITOP_LE_SWIZZLE, (addr)) | ||
37 | |||
38 | #define generic___test_and_set_le_bit(nr, addr) \ | ||
39 | __test_and_set_bit((nr) ^ BITOP_LE_SWIZZLE, (addr)) | ||
40 | #define generic___test_and_clear_le_bit(nr, addr) \ | ||
41 | __test_and_clear_bit((nr) ^ BITOP_LE_SWIZZLE, (addr)) | ||
42 | |||
43 | extern unsigned long generic_find_next_zero_le_bit(const unsigned long *addr, | ||
44 | unsigned long size, unsigned long offset); | ||
45 | |||
46 | #else | ||
47 | #error "Please fix <asm/byteorder.h>" | ||
48 | #endif | ||
49 | |||
50 | #define generic_find_first_zero_le_bit(addr, size) \ | ||
51 | generic_find_next_zero_le_bit((addr), (size), 0) | ||
52 | |||
53 | #endif /* _ASM_GENERIC_BITOPS_LE_H_ */ | ||
diff --git a/include/asm-generic/bitops/minix-le.h b/include/asm-generic/bitops/minix-le.h new file mode 100644 index 000000000000..4a981c1bb1ae --- /dev/null +++ b/include/asm-generic/bitops/minix-le.h | |||
@@ -0,0 +1,17 @@ | |||
1 | #ifndef _ASM_GENERIC_BITOPS_MINIX_LE_H_ | ||
2 | #define _ASM_GENERIC_BITOPS_MINIX_LE_H_ | ||
3 | |||
4 | #include <asm-generic/bitops/le.h> | ||
5 | |||
6 | #define minix_test_and_set_bit(nr,addr) \ | ||
7 | generic___test_and_set_le_bit((nr),(unsigned long *)(addr)) | ||
8 | #define minix_set_bit(nr,addr) \ | ||
9 | generic___set_le_bit((nr),(unsigned long *)(addr)) | ||
10 | #define minix_test_and_clear_bit(nr,addr) \ | ||
11 | generic___test_and_clear_le_bit((nr),(unsigned long *)(addr)) | ||
12 | #define minix_test_bit(nr,addr) \ | ||
13 | generic_test_le_bit((nr),(unsigned long *)(addr)) | ||
14 | #define minix_find_first_zero_bit(addr,size) \ | ||
15 | generic_find_first_zero_le_bit((unsigned long *)(addr),(size)) | ||
16 | |||
17 | #endif /* _ASM_GENERIC_BITOPS_MINIX_LE_H_ */ | ||
diff --git a/include/asm-generic/bitops/minix.h b/include/asm-generic/bitops/minix.h new file mode 100644 index 000000000000..91f42e87aa51 --- /dev/null +++ b/include/asm-generic/bitops/minix.h | |||
@@ -0,0 +1,15 @@ | |||
1 | #ifndef _ASM_GENERIC_BITOPS_MINIX_H_ | ||
2 | #define _ASM_GENERIC_BITOPS_MINIX_H_ | ||
3 | |||
4 | #define minix_test_and_set_bit(nr,addr) \ | ||
5 | __test_and_set_bit((nr),(unsigned long *)(addr)) | ||
6 | #define minix_set_bit(nr,addr) \ | ||
7 | __set_bit((nr),(unsigned long *)(addr)) | ||
8 | #define minix_test_and_clear_bit(nr,addr) \ | ||
9 | __test_and_clear_bit((nr),(unsigned long *)(addr)) | ||
10 | #define minix_test_bit(nr,addr) \ | ||
11 | test_bit((nr),(unsigned long *)(addr)) | ||
12 | #define minix_find_first_zero_bit(addr,size) \ | ||
13 | find_first_zero_bit((unsigned long *)(addr),(size)) | ||
14 | |||
15 | #endif /* _ASM_GENERIC_BITOPS_MINIX_H_ */ | ||
diff --git a/include/asm-generic/bitops/non-atomic.h b/include/asm-generic/bitops/non-atomic.h new file mode 100644 index 000000000000..46a825cf2ae1 --- /dev/null +++ b/include/asm-generic/bitops/non-atomic.h | |||
@@ -0,0 +1,111 @@ | |||
1 | #ifndef _ASM_GENERIC_BITOPS_NON_ATOMIC_H_ | ||
2 | #define _ASM_GENERIC_BITOPS_NON_ATOMIC_H_ | ||
3 | |||
4 | #include <asm/types.h> | ||
5 | |||
6 | #define BITOP_MASK(nr) (1UL << ((nr) % BITS_PER_LONG)) | ||
7 | #define BITOP_WORD(nr) ((nr) / BITS_PER_LONG) | ||
8 | |||
9 | /** | ||
10 | * __set_bit - Set a bit in memory | ||
11 | * @nr: the bit to set | ||
12 | * @addr: the address to start counting from | ||
13 | * | ||
14 | * Unlike set_bit(), this function is non-atomic and may be reordered. | ||
15 | * If it's called on the same region of memory simultaneously, the effect | ||
16 | * may be that only one operation succeeds. | ||
17 | */ | ||
18 | static inline void __set_bit(int nr, volatile unsigned long *addr) | ||
19 | { | ||
20 | unsigned long mask = BITOP_MASK(nr); | ||
21 | unsigned long *p = ((unsigned long *)addr) + BITOP_WORD(nr); | ||
22 | |||
23 | *p |= mask; | ||
24 | } | ||
25 | |||
26 | static inline void __clear_bit(int nr, volatile unsigned long *addr) | ||
27 | { | ||
28 | unsigned long mask = BITOP_MASK(nr); | ||
29 | unsigned long *p = ((unsigned long *)addr) + BITOP_WORD(nr); | ||
30 | |||
31 | *p &= ~mask; | ||
32 | } | ||
33 | |||
34 | /** | ||
35 | * __change_bit - Toggle a bit in memory | ||
36 | * @nr: the bit to change | ||
37 | * @addr: the address to start counting from | ||
38 | * | ||
39 | * Unlike change_bit(), this function is non-atomic and may be reordered. | ||
40 | * If it's called on the same region of memory simultaneously, the effect | ||
41 | * may be that only one operation succeeds. | ||
42 | */ | ||
43 | static inline void __change_bit(int nr, volatile unsigned long *addr) | ||
44 | { | ||
45 | unsigned long mask = BITOP_MASK(nr); | ||
46 | unsigned long *p = ((unsigned long *)addr) + BITOP_WORD(nr); | ||
47 | |||
48 | *p ^= mask; | ||
49 | } | ||
50 | |||
51 | /** | ||
52 | * __test_and_set_bit - Set a bit and return its old value | ||
53 | * @nr: Bit to set | ||
54 | * @addr: Address to count from | ||
55 | * | ||
56 | * This operation is non-atomic and can be reordered. | ||
57 | * If two examples of this operation race, one can appear to succeed | ||
58 | * but actually fail. You must protect multiple accesses with a lock. | ||
59 | */ | ||
60 | static inline int __test_and_set_bit(int nr, volatile unsigned long *addr) | ||
61 | { | ||
62 | unsigned long mask = BITOP_MASK(nr); | ||
63 | unsigned long *p = ((unsigned long *)addr) + BITOP_WORD(nr); | ||
64 | unsigned long old = *p; | ||
65 | |||
66 | *p = old | mask; | ||
67 | return (old & mask) != 0; | ||
68 | } | ||
69 | |||
70 | /** | ||
71 | * __test_and_clear_bit - Clear a bit and return its old value | ||
72 | * @nr: Bit to clear | ||
73 | * @addr: Address to count from | ||
74 | * | ||
75 | * This operation is non-atomic and can be reordered. | ||
76 | * If two examples of this operation race, one can appear to succeed | ||
77 | * but actually fail. You must protect multiple accesses with a lock. | ||
78 | */ | ||
79 | static inline int __test_and_clear_bit(int nr, volatile unsigned long *addr) | ||
80 | { | ||
81 | unsigned long mask = BITOP_MASK(nr); | ||
82 | unsigned long *p = ((unsigned long *)addr) + BITOP_WORD(nr); | ||
83 | unsigned long old = *p; | ||
84 | |||
85 | *p = old & ~mask; | ||
86 | return (old & mask) != 0; | ||
87 | } | ||
88 | |||
89 | /* WARNING: non atomic and it can be reordered! */ | ||
90 | static inline int __test_and_change_bit(int nr, | ||
91 | volatile unsigned long *addr) | ||
92 | { | ||
93 | unsigned long mask = BITOP_MASK(nr); | ||
94 | unsigned long *p = ((unsigned long *)addr) + BITOP_WORD(nr); | ||
95 | unsigned long old = *p; | ||
96 | |||
97 | *p = old ^ mask; | ||
98 | return (old & mask) != 0; | ||
99 | } | ||
100 | |||
101 | /** | ||
102 | * test_bit - Determine whether a bit is set | ||
103 | * @nr: bit number to test | ||
104 | * @addr: Address to start counting from | ||
105 | */ | ||
106 | static inline int test_bit(int nr, const volatile unsigned long *addr) | ||
107 | { | ||
108 | return 1UL & (addr[BITOP_WORD(nr)] >> (nr & (BITS_PER_LONG-1))); | ||
109 | } | ||
110 | |||
111 | #endif /* _ASM_GENERIC_BITOPS_NON_ATOMIC_H_ */ | ||
diff --git a/include/asm-generic/bitops/sched.h b/include/asm-generic/bitops/sched.h new file mode 100644 index 000000000000..5ef93a4d009f --- /dev/null +++ b/include/asm-generic/bitops/sched.h | |||
@@ -0,0 +1,36 @@ | |||
1 | #ifndef _ASM_GENERIC_BITOPS_SCHED_H_ | ||
2 | #define _ASM_GENERIC_BITOPS_SCHED_H_ | ||
3 | |||
4 | #include <linux/compiler.h> /* unlikely() */ | ||
5 | #include <asm/types.h> | ||
6 | |||
7 | /* | ||
8 | * Every architecture must define this function. It's the fastest | ||
9 | * way of searching a 140-bit bitmap where the first 100 bits are | ||
10 | * unlikely to be set. It's guaranteed that at least one of the 140 | ||
11 | * bits is cleared. | ||
12 | */ | ||
13 | static inline int sched_find_first_bit(const unsigned long *b) | ||
14 | { | ||
15 | #if BITS_PER_LONG == 64 | ||
16 | if (unlikely(b[0])) | ||
17 | return __ffs(b[0]); | ||
18 | if (unlikely(b[1])) | ||
19 | return __ffs(b[1]) + 64; | ||
20 | return __ffs(b[2]) + 128; | ||
21 | #elif BITS_PER_LONG == 32 | ||
22 | if (unlikely(b[0])) | ||
23 | return __ffs(b[0]); | ||
24 | if (unlikely(b[1])) | ||
25 | return __ffs(b[1]) + 32; | ||
26 | if (unlikely(b[2])) | ||
27 | return __ffs(b[2]) + 64; | ||
28 | if (b[3]) | ||
29 | return __ffs(b[3]) + 96; | ||
30 | return __ffs(b[4]) + 128; | ||
31 | #else | ||
32 | #error BITS_PER_LONG not defined | ||
33 | #endif | ||
34 | } | ||
35 | |||
36 | #endif /* _ASM_GENERIC_BITOPS_SCHED_H_ */ | ||
diff --git a/include/asm-h8300/bitops.h b/include/asm-h8300/bitops.h index ff7c2b721594..574f57b6c4d1 100644 --- a/include/asm-h8300/bitops.h +++ b/include/asm-h8300/bitops.h | |||
@@ -8,7 +8,6 @@ | |||
8 | 8 | ||
9 | #include <linux/config.h> | 9 | #include <linux/config.h> |
10 | #include <linux/compiler.h> | 10 | #include <linux/compiler.h> |
11 | #include <asm/byteorder.h> /* swab32 */ | ||
12 | #include <asm/system.h> | 11 | #include <asm/system.h> |
13 | 12 | ||
14 | #ifdef __KERNEL__ | 13 | #ifdef __KERNEL__ |
@@ -177,10 +176,7 @@ H8300_GEN_TEST_BITOP(test_and_change_bit,"bnot") | |||
177 | #undef H8300_GEN_TEST_BITOP_CONST_INT | 176 | #undef H8300_GEN_TEST_BITOP_CONST_INT |
178 | #undef H8300_GEN_TEST_BITOP | 177 | #undef H8300_GEN_TEST_BITOP |
179 | 178 | ||
180 | #define find_first_zero_bit(addr, size) \ | 179 | #include <asm-generic/bitops/ffs.h> |
181 | find_next_zero_bit((addr), (size), 0) | ||
182 | |||
183 | #define ffs(x) generic_ffs(x) | ||
184 | 180 | ||
185 | static __inline__ unsigned long __ffs(unsigned long word) | 181 | static __inline__ unsigned long __ffs(unsigned long word) |
186 | { | 182 | { |
@@ -196,216 +192,16 @@ static __inline__ unsigned long __ffs(unsigned long word) | |||
196 | return result; | 192 | return result; |
197 | } | 193 | } |
198 | 194 | ||
199 | static __inline__ int find_next_zero_bit (const unsigned long * addr, int size, int offset) | 195 | #include <asm-generic/bitops/find.h> |
200 | { | 196 | #include <asm-generic/bitops/sched.h> |
201 | unsigned long *p = (unsigned long *)(((unsigned long)addr + (offset >> 3)) & ~3); | 197 | #include <asm-generic/bitops/hweight.h> |
202 | unsigned long result = offset & ~31UL; | 198 | #include <asm-generic/bitops/ext2-non-atomic.h> |
203 | unsigned long tmp; | 199 | #include <asm-generic/bitops/ext2-atomic.h> |
204 | 200 | #include <asm-generic/bitops/minix.h> | |
205 | if (offset >= size) | ||
206 | return size; | ||
207 | size -= result; | ||
208 | offset &= 31UL; | ||
209 | if (offset) { | ||
210 | tmp = *(p++); | ||
211 | tmp |= ~0UL >> (32-offset); | ||
212 | if (size < 32) | ||
213 | goto found_first; | ||
214 | if (~tmp) | ||
215 | goto found_middle; | ||
216 | size -= 32; | ||
217 | result += 32; | ||
218 | } | ||
219 | while (size & ~31UL) { | ||
220 | if (~(tmp = *(p++))) | ||
221 | goto found_middle; | ||
222 | result += 32; | ||
223 | size -= 32; | ||
224 | } | ||
225 | if (!size) | ||
226 | return result; | ||
227 | tmp = *p; | ||
228 | |||
229 | found_first: | ||
230 | tmp |= ~0UL << size; | ||
231 | found_middle: | ||
232 | return result + ffz(tmp); | ||
233 | } | ||
234 | |||
235 | static __inline__ unsigned long find_next_bit(const unsigned long *addr, | ||
236 | unsigned long size, unsigned long offset) | ||
237 | { | ||
238 | unsigned long *p = (unsigned long *)(((unsigned long)addr + (offset >> 3)) & ~3); | ||
239 | unsigned int result = offset & ~31UL; | ||
240 | unsigned int tmp; | ||
241 | |||
242 | if (offset >= size) | ||
243 | return size; | ||
244 | size -= result; | ||
245 | offset &= 31UL; | ||
246 | if (offset) { | ||
247 | tmp = *(p++); | ||
248 | tmp &= ~0UL << offset; | ||
249 | if (size < 32) | ||
250 | goto found_first; | ||
251 | if (tmp) | ||
252 | goto found_middle; | ||
253 | size -= 32; | ||
254 | result += 32; | ||
255 | } | ||
256 | while (size >= 32) { | ||
257 | if ((tmp = *p++) != 0) | ||
258 | goto found_middle; | ||
259 | result += 32; | ||
260 | size -= 32; | ||
261 | } | ||
262 | if (!size) | ||
263 | return result; | ||
264 | tmp = *p; | ||
265 | |||
266 | found_first: | ||
267 | tmp &= ~0UL >> (32 - size); | ||
268 | if (tmp == 0UL) | ||
269 | return result + size; | ||
270 | found_middle: | ||
271 | return result + __ffs(tmp); | ||
272 | } | ||
273 | |||
274 | #define find_first_bit(addr, size) find_next_bit(addr, size, 0) | ||
275 | |||
276 | /* | ||
277 | * Every architecture must define this function. It's the fastest | ||
278 | * way of searching a 140-bit bitmap where the first 100 bits are | ||
279 | * unlikely to be set. It's guaranteed that at least one of the 140 | ||
280 | * bits is cleared. | ||
281 | */ | ||
282 | static inline int sched_find_first_bit(unsigned long *b) | ||
283 | { | ||
284 | if (unlikely(b[0])) | ||
285 | return __ffs(b[0]); | ||
286 | if (unlikely(b[1])) | ||
287 | return __ffs(b[1]) + 32; | ||
288 | if (unlikely(b[2])) | ||
289 | return __ffs(b[2]) + 64; | ||
290 | if (b[3]) | ||
291 | return __ffs(b[3]) + 96; | ||
292 | return __ffs(b[4]) + 128; | ||
293 | } | ||
294 | |||
295 | /* | ||
296 | * hweightN: returns the hamming weight (i.e. the number | ||
297 | * of bits set) of a N-bit word | ||
298 | */ | ||
299 | |||
300 | #define hweight32(x) generic_hweight32(x) | ||
301 | #define hweight16(x) generic_hweight16(x) | ||
302 | #define hweight8(x) generic_hweight8(x) | ||
303 | |||
304 | static __inline__ int ext2_set_bit(int nr, volatile void * addr) | ||
305 | { | ||
306 | int mask, retval; | ||
307 | unsigned long flags; | ||
308 | volatile unsigned char *ADDR = (unsigned char *) addr; | ||
309 | |||
310 | ADDR += nr >> 3; | ||
311 | mask = 1 << (nr & 0x07); | ||
312 | local_irq_save(flags); | ||
313 | retval = (mask & *ADDR) != 0; | ||
314 | *ADDR |= mask; | ||
315 | local_irq_restore(flags); | ||
316 | return retval; | ||
317 | } | ||
318 | #define ext2_set_bit_atomic(lock, nr, addr) ext2_set_bit(nr, addr) | ||
319 | |||
320 | static __inline__ int ext2_clear_bit(int nr, volatile void * addr) | ||
321 | { | ||
322 | int mask, retval; | ||
323 | unsigned long flags; | ||
324 | volatile unsigned char *ADDR = (unsigned char *) addr; | ||
325 | |||
326 | ADDR += nr >> 3; | ||
327 | mask = 1 << (nr & 0x07); | ||
328 | local_irq_save(flags); | ||
329 | retval = (mask & *ADDR) != 0; | ||
330 | *ADDR &= ~mask; | ||
331 | local_irq_restore(flags); | ||
332 | return retval; | ||
333 | } | ||
334 | #define ext2_clear_bit_atomic(lock, nr, addr) ext2_set_bit(nr, addr) | ||
335 | |||
336 | static __inline__ int ext2_test_bit(int nr, const volatile void * addr) | ||
337 | { | ||
338 | int mask; | ||
339 | const volatile unsigned char *ADDR = (const unsigned char *) addr; | ||
340 | |||
341 | ADDR += nr >> 3; | ||
342 | mask = 1 << (nr & 0x07); | ||
343 | return ((mask & *ADDR) != 0); | ||
344 | } | ||
345 | |||
346 | #define ext2_find_first_zero_bit(addr, size) \ | ||
347 | ext2_find_next_zero_bit((addr), (size), 0) | ||
348 | |||
349 | static __inline__ unsigned long ext2_find_next_zero_bit(void *addr, unsigned long size, unsigned long offset) | ||
350 | { | ||
351 | unsigned long *p = ((unsigned long *) addr) + (offset >> 5); | ||
352 | unsigned long result = offset & ~31UL; | ||
353 | unsigned long tmp; | ||
354 | |||
355 | if (offset >= size) | ||
356 | return size; | ||
357 | size -= result; | ||
358 | offset &= 31UL; | ||
359 | if(offset) { | ||
360 | /* We hold the little endian value in tmp, but then the | ||
361 | * shift is illegal. So we could keep a big endian value | ||
362 | * in tmp, like this: | ||
363 | * | ||
364 | * tmp = __swab32(*(p++)); | ||
365 | * tmp |= ~0UL >> (32-offset); | ||
366 | * | ||
367 | * but this would decrease performance, so we change the | ||
368 | * shift: | ||
369 | */ | ||
370 | tmp = *(p++); | ||
371 | tmp |= __swab32(~0UL >> (32-offset)); | ||
372 | if(size < 32) | ||
373 | goto found_first; | ||
374 | if(~tmp) | ||
375 | goto found_middle; | ||
376 | size -= 32; | ||
377 | result += 32; | ||
378 | } | ||
379 | while(size & ~31UL) { | ||
380 | if(~(tmp = *(p++))) | ||
381 | goto found_middle; | ||
382 | result += 32; | ||
383 | size -= 32; | ||
384 | } | ||
385 | if(!size) | ||
386 | return result; | ||
387 | tmp = *p; | ||
388 | |||
389 | found_first: | ||
390 | /* tmp is little endian, so we would have to swab the shift, | ||
391 | * see above. But then we have to swab tmp below for ffz, so | ||
392 | * we might as well do this here. | ||
393 | */ | ||
394 | return result + ffz(__swab32(tmp) | (~0UL << size)); | ||
395 | found_middle: | ||
396 | return result + ffz(__swab32(tmp)); | ||
397 | } | ||
398 | |||
399 | /* Bitmap functions for the minix filesystem. */ | ||
400 | #define minix_test_and_set_bit(nr,addr) test_and_set_bit(nr,addr) | ||
401 | #define minix_set_bit(nr,addr) set_bit(nr,addr) | ||
402 | #define minix_test_and_clear_bit(nr,addr) test_and_clear_bit(nr,addr) | ||
403 | #define minix_test_bit(nr,addr) test_bit(nr,addr) | ||
404 | #define minix_find_first_zero_bit(addr,size) find_first_zero_bit(addr,size) | ||
405 | 201 | ||
406 | #endif /* __KERNEL__ */ | 202 | #endif /* __KERNEL__ */ |
407 | 203 | ||
408 | #define fls(x) generic_fls(x) | 204 | #include <asm-generic/bitops/fls.h> |
409 | #define fls64(x) generic_fls64(x) | 205 | #include <asm-generic/bitops/fls64.h> |
410 | 206 | ||
411 | #endif /* _H8300_BITOPS_H */ | 207 | #endif /* _H8300_BITOPS_H */ |
diff --git a/include/asm-h8300/types.h b/include/asm-h8300/types.h index bf91e0d4dde7..da2402b86540 100644 --- a/include/asm-h8300/types.h +++ b/include/asm-h8300/types.h | |||
@@ -58,6 +58,9 @@ typedef u32 dma_addr_t; | |||
58 | #define HAVE_SECTOR_T | 58 | #define HAVE_SECTOR_T |
59 | typedef u64 sector_t; | 59 | typedef u64 sector_t; |
60 | 60 | ||
61 | #define HAVE_BLKCNT_T | ||
62 | typedef u64 blkcnt_t; | ||
63 | |||
61 | #endif /* __KERNEL__ */ | 64 | #endif /* __KERNEL__ */ |
62 | 65 | ||
63 | #endif /* __ASSEMBLY__ */ | 66 | #endif /* __ASSEMBLY__ */ |
diff --git a/include/asm-i386/bitops.h b/include/asm-i386/bitops.h index 7d20b95edb3b..08deaeee6be9 100644 --- a/include/asm-i386/bitops.h +++ b/include/asm-i386/bitops.h | |||
@@ -362,28 +362,9 @@ static inline unsigned long ffz(unsigned long word) | |||
362 | return word; | 362 | return word; |
363 | } | 363 | } |
364 | 364 | ||
365 | #define fls64(x) generic_fls64(x) | ||
366 | |||
367 | #ifdef __KERNEL__ | 365 | #ifdef __KERNEL__ |
368 | 366 | ||
369 | /* | 367 | #include <asm-generic/bitops/sched.h> |
370 | * Every architecture must define this function. It's the fastest | ||
371 | * way of searching a 140-bit bitmap where the first 100 bits are | ||
372 | * unlikely to be set. It's guaranteed that at least one of the 140 | ||
373 | * bits is cleared. | ||
374 | */ | ||
375 | static inline int sched_find_first_bit(const unsigned long *b) | ||
376 | { | ||
377 | if (unlikely(b[0])) | ||
378 | return __ffs(b[0]); | ||
379 | if (unlikely(b[1])) | ||
380 | return __ffs(b[1]) + 32; | ||
381 | if (unlikely(b[2])) | ||
382 | return __ffs(b[2]) + 64; | ||
383 | if (b[3]) | ||
384 | return __ffs(b[3]) + 96; | ||
385 | return __ffs(b[4]) + 128; | ||
386 | } | ||
387 | 368 | ||
388 | /** | 369 | /** |
389 | * ffs - find first bit set | 370 | * ffs - find first bit set |
@@ -421,42 +402,22 @@ static inline int fls(int x) | |||
421 | return r+1; | 402 | return r+1; |
422 | } | 403 | } |
423 | 404 | ||
424 | /** | 405 | #include <asm-generic/bitops/hweight.h> |
425 | * hweightN - returns the hamming weight of a N-bit word | ||
426 | * @x: the word to weigh | ||
427 | * | ||
428 | * The Hamming Weight of a number is the total number of bits set in it. | ||
429 | */ | ||
430 | |||
431 | #define hweight32(x) generic_hweight32(x) | ||
432 | #define hweight16(x) generic_hweight16(x) | ||
433 | #define hweight8(x) generic_hweight8(x) | ||
434 | 406 | ||
435 | #endif /* __KERNEL__ */ | 407 | #endif /* __KERNEL__ */ |
436 | 408 | ||
409 | #include <asm-generic/bitops/fls64.h> | ||
410 | |||
437 | #ifdef __KERNEL__ | 411 | #ifdef __KERNEL__ |
438 | 412 | ||
439 | #define ext2_set_bit(nr,addr) \ | 413 | #include <asm-generic/bitops/ext2-non-atomic.h> |
440 | __test_and_set_bit((nr),(unsigned long*)addr) | 414 | |
441 | #define ext2_set_bit_atomic(lock,nr,addr) \ | 415 | #define ext2_set_bit_atomic(lock,nr,addr) \ |
442 | test_and_set_bit((nr),(unsigned long*)addr) | 416 | test_and_set_bit((nr),(unsigned long*)addr) |
443 | #define ext2_clear_bit(nr, addr) \ | ||
444 | __test_and_clear_bit((nr),(unsigned long*)addr) | ||
445 | #define ext2_clear_bit_atomic(lock,nr, addr) \ | 417 | #define ext2_clear_bit_atomic(lock,nr, addr) \ |
446 | test_and_clear_bit((nr),(unsigned long*)addr) | 418 | test_and_clear_bit((nr),(unsigned long*)addr) |
447 | #define ext2_test_bit(nr, addr) test_bit((nr),(unsigned long*)addr) | 419 | |
448 | #define ext2_find_first_zero_bit(addr, size) \ | 420 | #include <asm-generic/bitops/minix.h> |
449 | find_first_zero_bit((unsigned long*)addr, size) | ||
450 | #define ext2_find_next_zero_bit(addr, size, off) \ | ||
451 | find_next_zero_bit((unsigned long*)addr, size, off) | ||
452 | |||
453 | /* Bitmap functions for the minix filesystem. */ | ||
454 | #define minix_test_and_set_bit(nr,addr) __test_and_set_bit(nr,(void*)addr) | ||
455 | #define minix_set_bit(nr,addr) __set_bit(nr,(void*)addr) | ||
456 | #define minix_test_and_clear_bit(nr,addr) __test_and_clear_bit(nr,(void*)addr) | ||
457 | #define minix_test_bit(nr,addr) test_bit(nr,(void*)addr) | ||
458 | #define minix_find_first_zero_bit(addr,size) \ | ||
459 | find_first_zero_bit((void*)addr,size) | ||
460 | 421 | ||
461 | #endif /* __KERNEL__ */ | 422 | #endif /* __KERNEL__ */ |
462 | 423 | ||
diff --git a/include/asm-i386/kprobes.h b/include/asm-i386/kprobes.h index a0d2d74a7dda..57d157c5cf89 100644 --- a/include/asm-i386/kprobes.h +++ b/include/asm-i386/kprobes.h | |||
@@ -34,6 +34,7 @@ struct pt_regs; | |||
34 | 34 | ||
35 | typedef u8 kprobe_opcode_t; | 35 | typedef u8 kprobe_opcode_t; |
36 | #define BREAKPOINT_INSTRUCTION 0xcc | 36 | #define BREAKPOINT_INSTRUCTION 0xcc |
37 | #define RELATIVEJUMP_INSTRUCTION 0xe9 | ||
37 | #define MAX_INSN_SIZE 16 | 38 | #define MAX_INSN_SIZE 16 |
38 | #define MAX_STACK_SIZE 64 | 39 | #define MAX_STACK_SIZE 64 |
39 | #define MIN_STACK_SIZE(ADDR) (((MAX_STACK_SIZE) < \ | 40 | #define MIN_STACK_SIZE(ADDR) (((MAX_STACK_SIZE) < \ |
@@ -51,6 +52,11 @@ void kretprobe_trampoline(void); | |||
51 | struct arch_specific_insn { | 52 | struct arch_specific_insn { |
52 | /* copy of the original instruction */ | 53 | /* copy of the original instruction */ |
53 | kprobe_opcode_t *insn; | 54 | kprobe_opcode_t *insn; |
55 | /* | ||
56 | * If this flag is not 0, this kprobe can be boost when its | ||
57 | * post_handler and break_handler is not set. | ||
58 | */ | ||
59 | int boostable; | ||
54 | }; | 60 | }; |
55 | 61 | ||
56 | struct prev_kprobe { | 62 | struct prev_kprobe { |
diff --git a/include/asm-i386/stat.h b/include/asm-i386/stat.h index b464f8020ec4..67eae78323ba 100644 --- a/include/asm-i386/stat.h +++ b/include/asm-i386/stat.h | |||
@@ -58,8 +58,7 @@ struct stat64 { | |||
58 | long long st_size; | 58 | long long st_size; |
59 | unsigned long st_blksize; | 59 | unsigned long st_blksize; |
60 | 60 | ||
61 | unsigned long st_blocks; /* Number 512-byte blocks allocated. */ | 61 | unsigned long long st_blocks; /* Number 512-byte blocks allocated. */ |
62 | unsigned long __pad4; /* future possible st_blocks high bits */ | ||
63 | 62 | ||
64 | unsigned long st_atime; | 63 | unsigned long st_atime; |
65 | unsigned long st_atime_nsec; | 64 | unsigned long st_atime_nsec; |
diff --git a/include/asm-i386/types.h b/include/asm-i386/types.h index ced00fe8fe61..e50a08bd7ced 100644 --- a/include/asm-i386/types.h +++ b/include/asm-i386/types.h | |||
@@ -63,6 +63,11 @@ typedef u64 sector_t; | |||
63 | #define HAVE_SECTOR_T | 63 | #define HAVE_SECTOR_T |
64 | #endif | 64 | #endif |
65 | 65 | ||
66 | #ifdef CONFIG_LSF | ||
67 | typedef u64 blkcnt_t; | ||
68 | #define HAVE_BLKCNT_T | ||
69 | #endif | ||
70 | |||
66 | #endif /* __ASSEMBLY__ */ | 71 | #endif /* __ASSEMBLY__ */ |
67 | 72 | ||
68 | #endif /* __KERNEL__ */ | 73 | #endif /* __KERNEL__ */ |
diff --git a/include/asm-ia64/bitops.h b/include/asm-ia64/bitops.h index 36d0fb95ea89..90921e162793 100644 --- a/include/asm-ia64/bitops.h +++ b/include/asm-ia64/bitops.h | |||
@@ -5,8 +5,8 @@ | |||
5 | * Copyright (C) 1998-2003 Hewlett-Packard Co | 5 | * Copyright (C) 1998-2003 Hewlett-Packard Co |
6 | * David Mosberger-Tang <davidm@hpl.hp.com> | 6 | * David Mosberger-Tang <davidm@hpl.hp.com> |
7 | * | 7 | * |
8 | * 02/06/02 find_next_bit() and find_first_bit() added from Erich Focht's ia64 O(1) | 8 | * 02/06/02 find_next_bit() and find_first_bit() added from Erich Focht's ia64 |
9 | * scheduler patch | 9 | * O(1) scheduler patch |
10 | */ | 10 | */ |
11 | 11 | ||
12 | #include <linux/compiler.h> | 12 | #include <linux/compiler.h> |
@@ -25,9 +25,9 @@ | |||
25 | * restricted to acting on a single-word quantity. | 25 | * restricted to acting on a single-word quantity. |
26 | * | 26 | * |
27 | * The address must be (at least) "long" aligned. | 27 | * The address must be (at least) "long" aligned. |
28 | * Note that there are driver (e.g., eepro100) which use these operations to operate on | 28 | * Note that there are driver (e.g., eepro100) which use these operations to |
29 | * hw-defined data-structures, so we can't easily change these operations to force a | 29 | * operate on hw-defined data-structures, so we can't easily change these |
30 | * bigger alignment. | 30 | * operations to force a bigger alignment. |
31 | * | 31 | * |
32 | * bit 0 is the LSB of addr; bit 32 is the LSB of (addr+1). | 32 | * bit 0 is the LSB of addr; bit 32 is the LSB of (addr+1). |
33 | */ | 33 | */ |
@@ -284,8 +284,8 @@ test_bit (int nr, const volatile void *addr) | |||
284 | * ffz - find the first zero bit in a long word | 284 | * ffz - find the first zero bit in a long word |
285 | * @x: The long word to find the bit in | 285 | * @x: The long word to find the bit in |
286 | * | 286 | * |
287 | * Returns the bit-number (0..63) of the first (least significant) zero bit. Undefined if | 287 | * Returns the bit-number (0..63) of the first (least significant) zero bit. |
288 | * no zero exists, so code should check against ~0UL first... | 288 | * Undefined if no zero exists, so code should check against ~0UL first... |
289 | */ | 289 | */ |
290 | static inline unsigned long | 290 | static inline unsigned long |
291 | ffz (unsigned long x) | 291 | ffz (unsigned long x) |
@@ -345,13 +345,14 @@ fls (int t) | |||
345 | x |= x >> 16; | 345 | x |= x >> 16; |
346 | return ia64_popcnt(x); | 346 | return ia64_popcnt(x); |
347 | } | 347 | } |
348 | #define fls64(x) generic_fls64(x) | 348 | |
349 | #include <asm-generic/bitops/fls64.h> | ||
349 | 350 | ||
350 | /* | 351 | /* |
351 | * ffs: find first bit set. This is defined the same way as the libc and compiler builtin | 352 | * ffs: find first bit set. This is defined the same way as the libc and |
352 | * ffs routines, therefore differs in spirit from the above ffz (man ffs): it operates on | 353 | * compiler builtin ffs routines, therefore differs in spirit from the above |
353 | * "int" values only and the result value is the bit number + 1. ffs(0) is defined to | 354 | * ffz (man ffs): it operates on "int" values only and the result value is the |
354 | * return zero. | 355 | * bit number + 1. ffs(0) is defined to return zero. |
355 | */ | 356 | */ |
356 | #define ffs(x) __builtin_ffs(x) | 357 | #define ffs(x) __builtin_ffs(x) |
357 | 358 | ||
@@ -373,51 +374,17 @@ hweight64 (unsigned long x) | |||
373 | 374 | ||
374 | #endif /* __KERNEL__ */ | 375 | #endif /* __KERNEL__ */ |
375 | 376 | ||
376 | extern int __find_next_zero_bit (const void *addr, unsigned long size, | 377 | #include <asm-generic/bitops/find.h> |
377 | unsigned long offset); | ||
378 | extern int __find_next_bit(const void *addr, unsigned long size, | ||
379 | unsigned long offset); | ||
380 | |||
381 | #define find_next_zero_bit(addr, size, offset) \ | ||
382 | __find_next_zero_bit((addr), (size), (offset)) | ||
383 | #define find_next_bit(addr, size, offset) \ | ||
384 | __find_next_bit((addr), (size), (offset)) | ||
385 | |||
386 | /* | ||
387 | * The optimizer actually does good code for this case.. | ||
388 | */ | ||
389 | #define find_first_zero_bit(addr, size) find_next_zero_bit((addr), (size), 0) | ||
390 | |||
391 | #define find_first_bit(addr, size) find_next_bit((addr), (size), 0) | ||
392 | 378 | ||
393 | #ifdef __KERNEL__ | 379 | #ifdef __KERNEL__ |
394 | 380 | ||
395 | #define __clear_bit(nr, addr) clear_bit(nr, addr) | 381 | #include <asm-generic/bitops/ext2-non-atomic.h> |
396 | 382 | ||
397 | #define ext2_set_bit test_and_set_bit | ||
398 | #define ext2_set_bit_atomic(l,n,a) test_and_set_bit(n,a) | 383 | #define ext2_set_bit_atomic(l,n,a) test_and_set_bit(n,a) |
399 | #define ext2_clear_bit test_and_clear_bit | ||
400 | #define ext2_clear_bit_atomic(l,n,a) test_and_clear_bit(n,a) | 384 | #define ext2_clear_bit_atomic(l,n,a) test_and_clear_bit(n,a) |
401 | #define ext2_test_bit test_bit | ||
402 | #define ext2_find_first_zero_bit find_first_zero_bit | ||
403 | #define ext2_find_next_zero_bit find_next_zero_bit | ||
404 | |||
405 | /* Bitmap functions for the minix filesystem. */ | ||
406 | #define minix_test_and_set_bit(nr,addr) test_and_set_bit(nr,addr) | ||
407 | #define minix_set_bit(nr,addr) set_bit(nr,addr) | ||
408 | #define minix_test_and_clear_bit(nr,addr) test_and_clear_bit(nr,addr) | ||
409 | #define minix_test_bit(nr,addr) test_bit(nr,addr) | ||
410 | #define minix_find_first_zero_bit(addr,size) find_first_zero_bit(addr,size) | ||
411 | 385 | ||
412 | static inline int | 386 | #include <asm-generic/bitops/minix.h> |
413 | sched_find_first_bit (unsigned long *b) | 387 | #include <asm-generic/bitops/sched.h> |
414 | { | ||
415 | if (unlikely(b[0])) | ||
416 | return __ffs(b[0]); | ||
417 | if (unlikely(b[1])) | ||
418 | return 64 + __ffs(b[1]); | ||
419 | return __ffs(b[2]) + 128; | ||
420 | } | ||
421 | 388 | ||
422 | #endif /* __KERNEL__ */ | 389 | #endif /* __KERNEL__ */ |
423 | 390 | ||
diff --git a/include/asm-ia64/dmi.h b/include/asm-ia64/dmi.h new file mode 100644 index 000000000000..f3efaa229525 --- /dev/null +++ b/include/asm-ia64/dmi.h | |||
@@ -0,0 +1,6 @@ | |||
1 | #ifndef _ASM_DMI_H | ||
2 | #define _ASM_DMI_H 1 | ||
3 | |||
4 | #include <asm/io.h> | ||
5 | |||
6 | #endif | ||
diff --git a/include/asm-ia64/io.h b/include/asm-ia64/io.h index b64fdb985494..c2e3742108bb 100644 --- a/include/asm-ia64/io.h +++ b/include/asm-ia64/io.h | |||
@@ -88,8 +88,8 @@ phys_to_virt (unsigned long address) | |||
88 | } | 88 | } |
89 | 89 | ||
90 | #define ARCH_HAS_VALID_PHYS_ADDR_RANGE | 90 | #define ARCH_HAS_VALID_PHYS_ADDR_RANGE |
91 | extern int valid_phys_addr_range (unsigned long addr, size_t *count); /* efi.c */ | 91 | extern int valid_phys_addr_range (unsigned long addr, size_t count); /* efi.c */ |
92 | extern int valid_mmap_phys_addr_range (unsigned long addr, size_t *count); | 92 | extern int valid_mmap_phys_addr_range (unsigned long addr, size_t count); |
93 | 93 | ||
94 | /* | 94 | /* |
95 | * The following two macros are deprecated and scheduled for removal. | 95 | * The following two macros are deprecated and scheduled for removal. |
@@ -416,24 +416,18 @@ __writeq (unsigned long val, volatile void __iomem *addr) | |||
416 | # define outl_p outl | 416 | # define outl_p outl |
417 | #endif | 417 | #endif |
418 | 418 | ||
419 | /* | 419 | extern void __iomem * ioremap(unsigned long offset, unsigned long size); |
420 | * An "address" in IO memory space is not clearly either an integer or a pointer. We will | 420 | extern void __iomem * ioremap_nocache (unsigned long offset, unsigned long size); |
421 | * accept both, thus the casts. | ||
422 | * | ||
423 | * On ia-64, we access the physical I/O memory space through the uncached kernel region. | ||
424 | */ | ||
425 | static inline void __iomem * | ||
426 | ioremap (unsigned long offset, unsigned long size) | ||
427 | { | ||
428 | return (void __iomem *) (__IA64_UNCACHED_OFFSET | (offset)); | ||
429 | } | ||
430 | 421 | ||
431 | static inline void | 422 | static inline void |
432 | iounmap (volatile void __iomem *addr) | 423 | iounmap (volatile void __iomem *addr) |
433 | { | 424 | { |
434 | } | 425 | } |
435 | 426 | ||
436 | #define ioremap_nocache(o,s) ioremap(o,s) | 427 | /* Use normal IO mappings for DMI */ |
428 | #define dmi_ioremap ioremap | ||
429 | #define dmi_iounmap(x,l) iounmap(x) | ||
430 | #define dmi_alloc(l) kmalloc(l, GFP_ATOMIC) | ||
437 | 431 | ||
438 | # ifdef __KERNEL__ | 432 | # ifdef __KERNEL__ |
439 | 433 | ||
diff --git a/include/asm-ia64/sn/sn_sal.h b/include/asm-ia64/sn/sn_sal.h index 244449df7411..bf4cc867a698 100644 --- a/include/asm-ia64/sn/sn_sal.h +++ b/include/asm-ia64/sn/sn_sal.h | |||
@@ -159,7 +159,7 @@ | |||
159 | static inline u32 | 159 | static inline u32 |
160 | sn_sal_rev(void) | 160 | sn_sal_rev(void) |
161 | { | 161 | { |
162 | struct ia64_sal_systab *systab = efi.sal_systab; | 162 | struct ia64_sal_systab *systab = __va(efi.sal_systab); |
163 | 163 | ||
164 | return (u32)(systab->sal_b_rev_major << 8 | systab->sal_b_rev_minor); | 164 | return (u32)(systab->sal_b_rev_major << 8 | systab->sal_b_rev_minor); |
165 | } | 165 | } |
diff --git a/include/asm-m32r/bitops.h b/include/asm-m32r/bitops.h index abea2fdd8689..902a366101a5 100644 --- a/include/asm-m32r/bitops.h +++ b/include/asm-m32r/bitops.h | |||
@@ -63,25 +63,6 @@ static __inline__ void set_bit(int nr, volatile void * addr) | |||
63 | } | 63 | } |
64 | 64 | ||
65 | /** | 65 | /** |
66 | * __set_bit - Set a bit in memory | ||
67 | * @nr: the bit to set | ||
68 | * @addr: the address to start counting from | ||
69 | * | ||
70 | * Unlike set_bit(), this function is non-atomic and may be reordered. | ||
71 | * If it's called on the same region of memory simultaneously, the effect | ||
72 | * may be that only one operation succeeds. | ||
73 | */ | ||
74 | static __inline__ void __set_bit(int nr, volatile void * addr) | ||
75 | { | ||
76 | __u32 mask; | ||
77 | volatile __u32 *a = addr; | ||
78 | |||
79 | a += (nr >> 5); | ||
80 | mask = (1 << (nr & 0x1F)); | ||
81 | *a |= mask; | ||
82 | } | ||
83 | |||
84 | /** | ||
85 | * clear_bit - Clears a bit in memory | 66 | * clear_bit - Clears a bit in memory |
86 | * @nr: Bit to clear | 67 | * @nr: Bit to clear |
87 | * @addr: Address to start counting from | 68 | * @addr: Address to start counting from |
@@ -118,39 +99,10 @@ static __inline__ void clear_bit(int nr, volatile void * addr) | |||
118 | local_irq_restore(flags); | 99 | local_irq_restore(flags); |
119 | } | 100 | } |
120 | 101 | ||
121 | static __inline__ void __clear_bit(int nr, volatile unsigned long * addr) | ||
122 | { | ||
123 | unsigned long mask; | ||
124 | volatile unsigned long *a = addr; | ||
125 | |||
126 | a += (nr >> 5); | ||
127 | mask = (1 << (nr & 0x1F)); | ||
128 | *a &= ~mask; | ||
129 | } | ||
130 | |||
131 | #define smp_mb__before_clear_bit() barrier() | 102 | #define smp_mb__before_clear_bit() barrier() |
132 | #define smp_mb__after_clear_bit() barrier() | 103 | #define smp_mb__after_clear_bit() barrier() |
133 | 104 | ||
134 | /** | 105 | /** |
135 | * __change_bit - Toggle a bit in memory | ||
136 | * @nr: the bit to set | ||
137 | * @addr: the address to start counting from | ||
138 | * | ||
139 | * Unlike change_bit(), this function is non-atomic and may be reordered. | ||
140 | * If it's called on the same region of memory simultaneously, the effect | ||
141 | * may be that only one operation succeeds. | ||
142 | */ | ||
143 | static __inline__ void __change_bit(int nr, volatile void * addr) | ||
144 | { | ||
145 | __u32 mask; | ||
146 | volatile __u32 *a = addr; | ||
147 | |||
148 | a += (nr >> 5); | ||
149 | mask = (1 << (nr & 0x1F)); | ||
150 | *a ^= mask; | ||
151 | } | ||
152 | |||
153 | /** | ||
154 | * change_bit - Toggle a bit in memory | 106 | * change_bit - Toggle a bit in memory |
155 | * @nr: Bit to clear | 107 | * @nr: Bit to clear |
156 | * @addr: Address to start counting from | 108 | * @addr: Address to start counting from |
@@ -221,28 +173,6 @@ static __inline__ int test_and_set_bit(int nr, volatile void * addr) | |||
221 | } | 173 | } |
222 | 174 | ||
223 | /** | 175 | /** |
224 | * __test_and_set_bit - Set a bit and return its old value | ||
225 | * @nr: Bit to set | ||
226 | * @addr: Address to count from | ||
227 | * | ||
228 | * This operation is non-atomic and can be reordered. | ||
229 | * If two examples of this operation race, one can appear to succeed | ||
230 | * but actually fail. You must protect multiple accesses with a lock. | ||
231 | */ | ||
232 | static __inline__ int __test_and_set_bit(int nr, volatile void * addr) | ||
233 | { | ||
234 | __u32 mask, oldbit; | ||
235 | volatile __u32 *a = addr; | ||
236 | |||
237 | a += (nr >> 5); | ||
238 | mask = (1 << (nr & 0x1F)); | ||
239 | oldbit = (*a & mask); | ||
240 | *a |= mask; | ||
241 | |||
242 | return (oldbit != 0); | ||
243 | } | ||
244 | |||
245 | /** | ||
246 | * test_and_clear_bit - Clear a bit and return its old value | 176 | * test_and_clear_bit - Clear a bit and return its old value |
247 | * @nr: Bit to set | 177 | * @nr: Bit to set |
248 | * @addr: Address to count from | 178 | * @addr: Address to count from |
@@ -280,42 +210,6 @@ static __inline__ int test_and_clear_bit(int nr, volatile void * addr) | |||
280 | } | 210 | } |
281 | 211 | ||
282 | /** | 212 | /** |
283 | * __test_and_clear_bit - Clear a bit and return its old value | ||
284 | * @nr: Bit to set | ||
285 | * @addr: Address to count from | ||
286 | * | ||
287 | * This operation is non-atomic and can be reordered. | ||
288 | * If two examples of this operation race, one can appear to succeed | ||
289 | * but actually fail. You must protect multiple accesses with a lock. | ||
290 | */ | ||
291 | static __inline__ int __test_and_clear_bit(int nr, volatile void * addr) | ||
292 | { | ||
293 | __u32 mask, oldbit; | ||
294 | volatile __u32 *a = addr; | ||
295 | |||
296 | a += (nr >> 5); | ||
297 | mask = (1 << (nr & 0x1F)); | ||
298 | oldbit = (*a & mask); | ||
299 | *a &= ~mask; | ||
300 | |||
301 | return (oldbit != 0); | ||
302 | } | ||
303 | |||
304 | /* WARNING: non atomic and it can be reordered! */ | ||
305 | static __inline__ int __test_and_change_bit(int nr, volatile void * addr) | ||
306 | { | ||
307 | __u32 mask, oldbit; | ||
308 | volatile __u32 *a = addr; | ||
309 | |||
310 | a += (nr >> 5); | ||
311 | mask = (1 << (nr & 0x1F)); | ||
312 | oldbit = (*a & mask); | ||
313 | *a ^= mask; | ||
314 | |||
315 | return (oldbit != 0); | ||
316 | } | ||
317 | |||
318 | /** | ||
319 | * test_and_change_bit - Change a bit and return its old value | 213 | * test_and_change_bit - Change a bit and return its old value |
320 | * @nr: Bit to set | 214 | * @nr: Bit to set |
321 | * @addr: Address to count from | 215 | * @addr: Address to count from |
@@ -350,353 +244,26 @@ static __inline__ int test_and_change_bit(int nr, volatile void * addr) | |||
350 | return (oldbit != 0); | 244 | return (oldbit != 0); |
351 | } | 245 | } |
352 | 246 | ||
353 | /** | 247 | #include <asm-generic/bitops/non-atomic.h> |
354 | * test_bit - Determine whether a bit is set | 248 | #include <asm-generic/bitops/ffz.h> |
355 | * @nr: bit number to test | 249 | #include <asm-generic/bitops/__ffs.h> |
356 | * @addr: Address to start counting from | 250 | #include <asm-generic/bitops/fls.h> |
357 | */ | 251 | #include <asm-generic/bitops/fls64.h> |
358 | static __inline__ int test_bit(int nr, const volatile void * addr) | ||
359 | { | ||
360 | __u32 mask; | ||
361 | const volatile __u32 *a = addr; | ||
362 | |||
363 | a += (nr >> 5); | ||
364 | mask = (1 << (nr & 0x1F)); | ||
365 | |||
366 | return ((*a & mask) != 0); | ||
367 | } | ||
368 | |||
369 | /** | ||
370 | * ffz - find first zero in word. | ||
371 | * @word: The word to search | ||
372 | * | ||
373 | * Undefined if no zero exists, so code should check against ~0UL first. | ||
374 | */ | ||
375 | static __inline__ unsigned long ffz(unsigned long word) | ||
376 | { | ||
377 | int k; | ||
378 | |||
379 | word = ~word; | ||
380 | k = 0; | ||
381 | if (!(word & 0x0000ffff)) { k += 16; word >>= 16; } | ||
382 | if (!(word & 0x000000ff)) { k += 8; word >>= 8; } | ||
383 | if (!(word & 0x0000000f)) { k += 4; word >>= 4; } | ||
384 | if (!(word & 0x00000003)) { k += 2; word >>= 2; } | ||
385 | if (!(word & 0x00000001)) { k += 1; } | ||
386 | |||
387 | return k; | ||
388 | } | ||
389 | |||
390 | /** | ||
391 | * find_first_zero_bit - find the first zero bit in a memory region | ||
392 | * @addr: The address to start the search at | ||
393 | * @size: The maximum size to search | ||
394 | * | ||
395 | * Returns the bit-number of the first zero bit, not the number of the byte | ||
396 | * containing a bit. | ||
397 | */ | ||
398 | |||
399 | #define find_first_zero_bit(addr, size) \ | ||
400 | find_next_zero_bit((addr), (size), 0) | ||
401 | |||
402 | /** | ||
403 | * find_next_zero_bit - find the first zero bit in a memory region | ||
404 | * @addr: The address to base the search on | ||
405 | * @offset: The bitnumber to start searching at | ||
406 | * @size: The maximum size to search | ||
407 | */ | ||
408 | static __inline__ int find_next_zero_bit(const unsigned long *addr, | ||
409 | int size, int offset) | ||
410 | { | ||
411 | const unsigned long *p = addr + (offset >> 5); | ||
412 | unsigned long result = offset & ~31UL; | ||
413 | unsigned long tmp; | ||
414 | |||
415 | if (offset >= size) | ||
416 | return size; | ||
417 | size -= result; | ||
418 | offset &= 31UL; | ||
419 | if (offset) { | ||
420 | tmp = *(p++); | ||
421 | tmp |= ~0UL >> (32-offset); | ||
422 | if (size < 32) | ||
423 | goto found_first; | ||
424 | if (~tmp) | ||
425 | goto found_middle; | ||
426 | size -= 32; | ||
427 | result += 32; | ||
428 | } | ||
429 | while (size & ~31UL) { | ||
430 | if (~(tmp = *(p++))) | ||
431 | goto found_middle; | ||
432 | result += 32; | ||
433 | size -= 32; | ||
434 | } | ||
435 | if (!size) | ||
436 | return result; | ||
437 | tmp = *p; | ||
438 | |||
439 | found_first: | ||
440 | tmp |= ~0UL << size; | ||
441 | found_middle: | ||
442 | return result + ffz(tmp); | ||
443 | } | ||
444 | |||
445 | /** | ||
446 | * __ffs - find first bit in word. | ||
447 | * @word: The word to search | ||
448 | * | ||
449 | * Undefined if no bit exists, so code should check against 0 first. | ||
450 | */ | ||
451 | static __inline__ unsigned long __ffs(unsigned long word) | ||
452 | { | ||
453 | int k = 0; | ||
454 | |||
455 | if (!(word & 0x0000ffff)) { k += 16; word >>= 16; } | ||
456 | if (!(word & 0x000000ff)) { k += 8; word >>= 8; } | ||
457 | if (!(word & 0x0000000f)) { k += 4; word >>= 4; } | ||
458 | if (!(word & 0x00000003)) { k += 2; word >>= 2; } | ||
459 | if (!(word & 0x00000001)) { k += 1;} | ||
460 | |||
461 | return k; | ||
462 | } | ||
463 | |||
464 | /* | ||
465 | * fls: find last bit set. | ||
466 | */ | ||
467 | #define fls(x) generic_fls(x) | ||
468 | #define fls64(x) generic_fls64(x) | ||
469 | 252 | ||
470 | #ifdef __KERNEL__ | 253 | #ifdef __KERNEL__ |
471 | 254 | ||
472 | /* | 255 | #include <asm-generic/bitops/sched.h> |
473 | * Every architecture must define this function. It's the fastest | 256 | #include <asm-generic/bitops/find.h> |
474 | * way of searching a 140-bit bitmap where the first 100 bits are | 257 | #include <asm-generic/bitops/ffs.h> |
475 | * unlikely to be set. It's guaranteed that at least one of the 140 | 258 | #include <asm-generic/bitops/hweight.h> |
476 | * bits is cleared. | ||
477 | */ | ||
478 | static inline int sched_find_first_bit(unsigned long *b) | ||
479 | { | ||
480 | if (unlikely(b[0])) | ||
481 | return __ffs(b[0]); | ||
482 | if (unlikely(b[1])) | ||
483 | return __ffs(b[1]) + 32; | ||
484 | if (unlikely(b[2])) | ||
485 | return __ffs(b[2]) + 64; | ||
486 | if (b[3]) | ||
487 | return __ffs(b[3]) + 96; | ||
488 | return __ffs(b[4]) + 128; | ||
489 | } | ||
490 | |||
491 | /** | ||
492 | * find_next_bit - find the first set bit in a memory region | ||
493 | * @addr: The address to base the search on | ||
494 | * @offset: The bitnumber to start searching at | ||
495 | * @size: The maximum size to search | ||
496 | */ | ||
497 | static inline unsigned long find_next_bit(const unsigned long *addr, | ||
498 | unsigned long size, unsigned long offset) | ||
499 | { | ||
500 | unsigned int *p = ((unsigned int *) addr) + (offset >> 5); | ||
501 | unsigned int result = offset & ~31UL; | ||
502 | unsigned int tmp; | ||
503 | |||
504 | if (offset >= size) | ||
505 | return size; | ||
506 | size -= result; | ||
507 | offset &= 31UL; | ||
508 | if (offset) { | ||
509 | tmp = *p++; | ||
510 | tmp &= ~0UL << offset; | ||
511 | if (size < 32) | ||
512 | goto found_first; | ||
513 | if (tmp) | ||
514 | goto found_middle; | ||
515 | size -= 32; | ||
516 | result += 32; | ||
517 | } | ||
518 | while (size >= 32) { | ||
519 | if ((tmp = *p++) != 0) | ||
520 | goto found_middle; | ||
521 | result += 32; | ||
522 | size -= 32; | ||
523 | } | ||
524 | if (!size) | ||
525 | return result; | ||
526 | tmp = *p; | ||
527 | |||
528 | found_first: | ||
529 | tmp &= ~0UL >> (32 - size); | ||
530 | if (tmp == 0UL) /* Are any bits set? */ | ||
531 | return result + size; /* Nope. */ | ||
532 | found_middle: | ||
533 | return result + __ffs(tmp); | ||
534 | } | ||
535 | |||
536 | /** | ||
537 | * find_first_bit - find the first set bit in a memory region | ||
538 | * @addr: The address to start the search at | ||
539 | * @size: The maximum size to search | ||
540 | * | ||
541 | * Returns the bit-number of the first set bit, not the number of the byte | ||
542 | * containing a bit. | ||
543 | */ | ||
544 | #define find_first_bit(addr, size) \ | ||
545 | find_next_bit((addr), (size), 0) | ||
546 | |||
547 | /** | ||
548 | * ffs - find first bit set | ||
549 | * @x: the word to search | ||
550 | * | ||
551 | * This is defined the same way as | ||
552 | * the libc and compiler builtin ffs routines, therefore | ||
553 | * differs in spirit from the above ffz (man ffs). | ||
554 | */ | ||
555 | #define ffs(x) generic_ffs(x) | ||
556 | |||
557 | /** | ||
558 | * hweightN - returns the hamming weight of a N-bit word | ||
559 | * @x: the word to weigh | ||
560 | * | ||
561 | * The Hamming Weight of a number is the total number of bits set in it. | ||
562 | */ | ||
563 | |||
564 | #define hweight32(x) generic_hweight32(x) | ||
565 | #define hweight16(x) generic_hweight16(x) | ||
566 | #define hweight8(x) generic_hweight8(x) | ||
567 | 259 | ||
568 | #endif /* __KERNEL__ */ | 260 | #endif /* __KERNEL__ */ |
569 | 261 | ||
570 | #ifdef __KERNEL__ | 262 | #ifdef __KERNEL__ |
571 | 263 | ||
572 | /* | 264 | #include <asm-generic/bitops/ext2-non-atomic.h> |
573 | * ext2_XXXX function | 265 | #include <asm-generic/bitops/ext2-atomic.h> |
574 | * orig: include/asm-sh/bitops.h | 266 | #include <asm-generic/bitops/minix.h> |
575 | */ | ||
576 | |||
577 | #ifdef __LITTLE_ENDIAN__ | ||
578 | #define ext2_set_bit test_and_set_bit | ||
579 | #define ext2_clear_bit __test_and_clear_bit | ||
580 | #define ext2_test_bit test_bit | ||
581 | #define ext2_find_first_zero_bit find_first_zero_bit | ||
582 | #define ext2_find_next_zero_bit find_next_zero_bit | ||
583 | #else | ||
584 | static inline int ext2_set_bit(int nr, volatile void * addr) | ||
585 | { | ||
586 | __u8 mask, oldbit; | ||
587 | volatile __u8 *a = addr; | ||
588 | |||
589 | a += (nr >> 3); | ||
590 | mask = (1 << (nr & 0x07)); | ||
591 | oldbit = (*a & mask); | ||
592 | *a |= mask; | ||
593 | |||
594 | return (oldbit != 0); | ||
595 | } | ||
596 | |||
597 | static inline int ext2_clear_bit(int nr, volatile void * addr) | ||
598 | { | ||
599 | __u8 mask, oldbit; | ||
600 | volatile __u8 *a = addr; | ||
601 | |||
602 | a += (nr >> 3); | ||
603 | mask = (1 << (nr & 0x07)); | ||
604 | oldbit = (*a & mask); | ||
605 | *a &= ~mask; | ||
606 | |||
607 | return (oldbit != 0); | ||
608 | } | ||
609 | |||
610 | static inline int ext2_test_bit(int nr, const volatile void * addr) | ||
611 | { | ||
612 | __u32 mask; | ||
613 | const volatile __u8 *a = addr; | ||
614 | |||
615 | a += (nr >> 3); | ||
616 | mask = (1 << (nr & 0x07)); | ||
617 | |||
618 | return ((mask & *a) != 0); | ||
619 | } | ||
620 | |||
621 | #define ext2_find_first_zero_bit(addr, size) \ | ||
622 | ext2_find_next_zero_bit((addr), (size), 0) | ||
623 | |||
624 | static inline unsigned long ext2_find_next_zero_bit(void *addr, | ||
625 | unsigned long size, unsigned long offset) | ||
626 | { | ||
627 | unsigned long *p = ((unsigned long *) addr) + (offset >> 5); | ||
628 | unsigned long result = offset & ~31UL; | ||
629 | unsigned long tmp; | ||
630 | |||
631 | if (offset >= size) | ||
632 | return size; | ||
633 | size -= result; | ||
634 | offset &= 31UL; | ||
635 | if(offset) { | ||
636 | /* We hold the little endian value in tmp, but then the | ||
637 | * shift is illegal. So we could keep a big endian value | ||
638 | * in tmp, like this: | ||
639 | * | ||
640 | * tmp = __swab32(*(p++)); | ||
641 | * tmp |= ~0UL >> (32-offset); | ||
642 | * | ||
643 | * but this would decrease preformance, so we change the | ||
644 | * shift: | ||
645 | */ | ||
646 | tmp = *(p++); | ||
647 | tmp |= __swab32(~0UL >> (32-offset)); | ||
648 | if(size < 32) | ||
649 | goto found_first; | ||
650 | if(~tmp) | ||
651 | goto found_middle; | ||
652 | size -= 32; | ||
653 | result += 32; | ||
654 | } | ||
655 | while(size & ~31UL) { | ||
656 | if(~(tmp = *(p++))) | ||
657 | goto found_middle; | ||
658 | result += 32; | ||
659 | size -= 32; | ||
660 | } | ||
661 | if(!size) | ||
662 | return result; | ||
663 | tmp = *p; | ||
664 | |||
665 | found_first: | ||
666 | /* tmp is little endian, so we would have to swab the shift, | ||
667 | * see above. But then we have to swab tmp below for ffz, so | ||
668 | * we might as well do this here. | ||
669 | */ | ||
670 | return result + ffz(__swab32(tmp) | (~0UL << size)); | ||
671 | found_middle: | ||
672 | return result + ffz(__swab32(tmp)); | ||
673 | } | ||
674 | #endif | ||
675 | |||
676 | #define ext2_set_bit_atomic(lock, nr, addr) \ | ||
677 | ({ \ | ||
678 | int ret; \ | ||
679 | spin_lock(lock); \ | ||
680 | ret = ext2_set_bit((nr), (addr)); \ | ||
681 | spin_unlock(lock); \ | ||
682 | ret; \ | ||
683 | }) | ||
684 | |||
685 | #define ext2_clear_bit_atomic(lock, nr, addr) \ | ||
686 | ({ \ | ||
687 | int ret; \ | ||
688 | spin_lock(lock); \ | ||
689 | ret = ext2_clear_bit((nr), (addr)); \ | ||
690 | spin_unlock(lock); \ | ||
691 | ret; \ | ||
692 | }) | ||
693 | |||
694 | /* Bitmap functions for the minix filesystem. */ | ||
695 | #define minix_test_and_set_bit(nr,addr) __test_and_set_bit(nr,addr) | ||
696 | #define minix_set_bit(nr,addr) __set_bit(nr,addr) | ||
697 | #define minix_test_and_clear_bit(nr,addr) __test_and_clear_bit(nr,addr) | ||
698 | #define minix_test_bit(nr,addr) test_bit(nr,addr) | ||
699 | #define minix_find_first_zero_bit(addr,size) find_first_zero_bit(addr,size) | ||
700 | 267 | ||
701 | #endif /* __KERNEL__ */ | 268 | #endif /* __KERNEL__ */ |
702 | 269 | ||
diff --git a/include/asm-m68k/bitops.h b/include/asm-m68k/bitops.h index 13f4c0048463..1a61fdb56aaf 100644 --- a/include/asm-m68k/bitops.h +++ b/include/asm-m68k/bitops.h | |||
@@ -310,36 +310,10 @@ static inline int fls(int x) | |||
310 | 310 | ||
311 | return 32 - cnt; | 311 | return 32 - cnt; |
312 | } | 312 | } |
313 | #define fls64(x) generic_fls64(x) | ||
314 | 313 | ||
315 | /* | 314 | #include <asm-generic/bitops/fls64.h> |
316 | * Every architecture must define this function. It's the fastest | 315 | #include <asm-generic/bitops/sched.h> |
317 | * way of searching a 140-bit bitmap where the first 100 bits are | 316 | #include <asm-generic/bitops/hweight.h> |
318 | * unlikely to be set. It's guaranteed that at least one of the 140 | ||
319 | * bits is cleared. | ||
320 | */ | ||
321 | static inline int sched_find_first_bit(const unsigned long *b) | ||
322 | { | ||
323 | if (unlikely(b[0])) | ||
324 | return __ffs(b[0]); | ||
325 | if (unlikely(b[1])) | ||
326 | return __ffs(b[1]) + 32; | ||
327 | if (unlikely(b[2])) | ||
328 | return __ffs(b[2]) + 64; | ||
329 | if (b[3]) | ||
330 | return __ffs(b[3]) + 96; | ||
331 | return __ffs(b[4]) + 128; | ||
332 | } | ||
333 | |||
334 | |||
335 | /* | ||
336 | * hweightN: returns the hamming weight (i.e. the number | ||
337 | * of bits set) of a N-bit word | ||
338 | */ | ||
339 | |||
340 | #define hweight32(x) generic_hweight32(x) | ||
341 | #define hweight16(x) generic_hweight16(x) | ||
342 | #define hweight8(x) generic_hweight8(x) | ||
343 | 317 | ||
344 | /* Bitmap functions for the minix filesystem */ | 318 | /* Bitmap functions for the minix filesystem */ |
345 | 319 | ||
@@ -365,9 +339,9 @@ static inline int minix_find_first_zero_bit(const void *vaddr, unsigned size) | |||
365 | return ((p - addr) << 4) + (res ^ 31); | 339 | return ((p - addr) << 4) + (res ^ 31); |
366 | } | 340 | } |
367 | 341 | ||
368 | #define minix_test_and_set_bit(nr, addr) test_and_set_bit((nr) ^ 16, (unsigned long *)(addr)) | 342 | #define minix_test_and_set_bit(nr, addr) __test_and_set_bit((nr) ^ 16, (unsigned long *)(addr)) |
369 | #define minix_set_bit(nr,addr) set_bit((nr) ^ 16, (unsigned long *)(addr)) | 343 | #define minix_set_bit(nr,addr) __set_bit((nr) ^ 16, (unsigned long *)(addr)) |
370 | #define minix_test_and_clear_bit(nr, addr) test_and_clear_bit((nr) ^ 16, (unsigned long *)(addr)) | 344 | #define minix_test_and_clear_bit(nr, addr) __test_and_clear_bit((nr) ^ 16, (unsigned long *)(addr)) |
371 | 345 | ||
372 | static inline int minix_test_bit(int nr, const void *vaddr) | 346 | static inline int minix_test_bit(int nr, const void *vaddr) |
373 | { | 347 | { |
@@ -377,9 +351,9 @@ static inline int minix_test_bit(int nr, const void *vaddr) | |||
377 | 351 | ||
378 | /* Bitmap functions for the ext2 filesystem. */ | 352 | /* Bitmap functions for the ext2 filesystem. */ |
379 | 353 | ||
380 | #define ext2_set_bit(nr, addr) test_and_set_bit((nr) ^ 24, (unsigned long *)(addr)) | 354 | #define ext2_set_bit(nr, addr) __test_and_set_bit((nr) ^ 24, (unsigned long *)(addr)) |
381 | #define ext2_set_bit_atomic(lock, nr, addr) test_and_set_bit((nr) ^ 24, (unsigned long *)(addr)) | 355 | #define ext2_set_bit_atomic(lock, nr, addr) test_and_set_bit((nr) ^ 24, (unsigned long *)(addr)) |
382 | #define ext2_clear_bit(nr, addr) test_and_clear_bit((nr) ^ 24, (unsigned long *)(addr)) | 356 | #define ext2_clear_bit(nr, addr) __test_and_clear_bit((nr) ^ 24, (unsigned long *)(addr)) |
383 | #define ext2_clear_bit_atomic(lock, nr, addr) test_and_clear_bit((nr) ^ 24, (unsigned long *)(addr)) | 357 | #define ext2_clear_bit_atomic(lock, nr, addr) test_and_clear_bit((nr) ^ 24, (unsigned long *)(addr)) |
384 | 358 | ||
385 | static inline int ext2_test_bit(int nr, const void *vaddr) | 359 | static inline int ext2_test_bit(int nr, const void *vaddr) |
diff --git a/include/asm-m68k/stat.h b/include/asm-m68k/stat.h index c4c402a45e21..dd38bc2e9f98 100644 --- a/include/asm-m68k/stat.h +++ b/include/asm-m68k/stat.h | |||
@@ -60,8 +60,7 @@ struct stat64 { | |||
60 | long long st_size; | 60 | long long st_size; |
61 | unsigned long st_blksize; | 61 | unsigned long st_blksize; |
62 | 62 | ||
63 | unsigned long __pad4; /* future possible st_blocks high bits */ | 63 | unsigned long long st_blocks; /* Number 512-byte blocks allocated. */ |
64 | unsigned long st_blocks; /* Number 512-byte blocks allocated. */ | ||
65 | 64 | ||
66 | unsigned long st_atime; | 65 | unsigned long st_atime; |
67 | unsigned long st_atime_nsec; | 66 | unsigned long st_atime_nsec; |
diff --git a/include/asm-m68knommu/bitops.h b/include/asm-m68knommu/bitops.h index 25d8a3cfef90..0b68ccd327f7 100644 --- a/include/asm-m68knommu/bitops.h +++ b/include/asm-m68knommu/bitops.h | |||
@@ -12,104 +12,10 @@ | |||
12 | 12 | ||
13 | #ifdef __KERNEL__ | 13 | #ifdef __KERNEL__ |
14 | 14 | ||
15 | /* | 15 | #include <asm-generic/bitops/ffs.h> |
16 | * Generic ffs(). | 16 | #include <asm-generic/bitops/__ffs.h> |
17 | */ | 17 | #include <asm-generic/bitops/sched.h> |
18 | static inline int ffs(int x) | 18 | #include <asm-generic/bitops/ffz.h> |
19 | { | ||
20 | int r = 1; | ||
21 | |||
22 | if (!x) | ||
23 | return 0; | ||
24 | if (!(x & 0xffff)) { | ||
25 | x >>= 16; | ||
26 | r += 16; | ||
27 | } | ||
28 | if (!(x & 0xff)) { | ||
29 | x >>= 8; | ||
30 | r += 8; | ||
31 | } | ||
32 | if (!(x & 0xf)) { | ||
33 | x >>= 4; | ||
34 | r += 4; | ||
35 | } | ||
36 | if (!(x & 3)) { | ||
37 | x >>= 2; | ||
38 | r += 2; | ||
39 | } | ||
40 | if (!(x & 1)) { | ||
41 | x >>= 1; | ||
42 | r += 1; | ||
43 | } | ||
44 | return r; | ||
45 | } | ||
46 | |||
47 | /* | ||
48 | * Generic __ffs(). | ||
49 | */ | ||
50 | static inline int __ffs(int x) | ||
51 | { | ||
52 | int r = 0; | ||
53 | |||
54 | if (!x) | ||
55 | return 0; | ||
56 | if (!(x & 0xffff)) { | ||
57 | x >>= 16; | ||
58 | r += 16; | ||
59 | } | ||
60 | if (!(x & 0xff)) { | ||
61 | x >>= 8; | ||
62 | r += 8; | ||
63 | } | ||
64 | if (!(x & 0xf)) { | ||
65 | x >>= 4; | ||
66 | r += 4; | ||
67 | } | ||
68 | if (!(x & 3)) { | ||
69 | x >>= 2; | ||
70 | r += 2; | ||
71 | } | ||
72 | if (!(x & 1)) { | ||
73 | x >>= 1; | ||
74 | r += 1; | ||
75 | } | ||
76 | return r; | ||
77 | } | ||
78 | |||
79 | /* | ||
80 | * Every architecture must define this function. It's the fastest | ||
81 | * way of searching a 140-bit bitmap where the first 100 bits are | ||
82 | * unlikely to be set. It's guaranteed that at least one of the 140 | ||
83 | * bits is cleared. | ||
84 | */ | ||
85 | static inline int sched_find_first_bit(unsigned long *b) | ||
86 | { | ||
87 | if (unlikely(b[0])) | ||
88 | return __ffs(b[0]); | ||
89 | if (unlikely(b[1])) | ||
90 | return __ffs(b[1]) + 32; | ||
91 | if (unlikely(b[2])) | ||
92 | return __ffs(b[2]) + 64; | ||
93 | if (b[3]) | ||
94 | return __ffs(b[3]) + 96; | ||
95 | return __ffs(b[4]) + 128; | ||
96 | } | ||
97 | |||
98 | /* | ||
99 | * ffz = Find First Zero in word. Undefined if no zero exists, | ||
100 | * so code should check against ~0UL first.. | ||
101 | */ | ||
102 | static __inline__ unsigned long ffz(unsigned long word) | ||
103 | { | ||
104 | unsigned long result = 0; | ||
105 | |||
106 | while(word & 1) { | ||
107 | result++; | ||
108 | word >>= 1; | ||
109 | } | ||
110 | return result; | ||
111 | } | ||
112 | |||
113 | 19 | ||
114 | static __inline__ void set_bit(int nr, volatile unsigned long * addr) | 20 | static __inline__ void set_bit(int nr, volatile unsigned long * addr) |
115 | { | 21 | { |
@@ -254,98 +160,8 @@ static __inline__ int __test_bit(int nr, const volatile unsigned long * addr) | |||
254 | __constant_test_bit((nr),(addr)) : \ | 160 | __constant_test_bit((nr),(addr)) : \ |
255 | __test_bit((nr),(addr))) | 161 | __test_bit((nr),(addr))) |
256 | 162 | ||
257 | #define find_first_zero_bit(addr, size) \ | 163 | #include <asm-generic/bitops/find.h> |
258 | find_next_zero_bit((addr), (size), 0) | 164 | #include <asm-generic/bitops/hweight.h> |
259 | #define find_first_bit(addr, size) \ | ||
260 | find_next_bit((addr), (size), 0) | ||
261 | |||
262 | static __inline__ int find_next_zero_bit (const void * addr, int size, int offset) | ||
263 | { | ||
264 | unsigned long *p = ((unsigned long *) addr) + (offset >> 5); | ||
265 | unsigned long result = offset & ~31UL; | ||
266 | unsigned long tmp; | ||
267 | |||
268 | if (offset >= size) | ||
269 | return size; | ||
270 | size -= result; | ||
271 | offset &= 31UL; | ||
272 | if (offset) { | ||
273 | tmp = *(p++); | ||
274 | tmp |= ~0UL >> (32-offset); | ||
275 | if (size < 32) | ||
276 | goto found_first; | ||
277 | if (~tmp) | ||
278 | goto found_middle; | ||
279 | size -= 32; | ||
280 | result += 32; | ||
281 | } | ||
282 | while (size & ~31UL) { | ||
283 | if (~(tmp = *(p++))) | ||
284 | goto found_middle; | ||
285 | result += 32; | ||
286 | size -= 32; | ||
287 | } | ||
288 | if (!size) | ||
289 | return result; | ||
290 | tmp = *p; | ||
291 | |||
292 | found_first: | ||
293 | tmp |= ~0UL << size; | ||
294 | found_middle: | ||
295 | return result + ffz(tmp); | ||
296 | } | ||
297 | |||
298 | /* | ||
299 | * Find next one bit in a bitmap reasonably efficiently. | ||
300 | */ | ||
301 | static __inline__ unsigned long find_next_bit(const unsigned long *addr, | ||
302 | unsigned long size, unsigned long offset) | ||
303 | { | ||
304 | unsigned int *p = ((unsigned int *) addr) + (offset >> 5); | ||
305 | unsigned int result = offset & ~31UL; | ||
306 | unsigned int tmp; | ||
307 | |||
308 | if (offset >= size) | ||
309 | return size; | ||
310 | size -= result; | ||
311 | offset &= 31UL; | ||
312 | if (offset) { | ||
313 | tmp = *p++; | ||
314 | tmp &= ~0UL << offset; | ||
315 | if (size < 32) | ||
316 | goto found_first; | ||
317 | if (tmp) | ||
318 | goto found_middle; | ||
319 | size -= 32; | ||
320 | result += 32; | ||
321 | } | ||
322 | while (size >= 32) { | ||
323 | if ((tmp = *p++) != 0) | ||
324 | goto found_middle; | ||
325 | result += 32; | ||
326 | size -= 32; | ||
327 | } | ||
328 | if (!size) | ||
329 | return result; | ||
330 | tmp = *p; | ||
331 | |||
332 | found_first: | ||
333 | tmp &= ~0UL >> (32 - size); | ||
334 | if (tmp == 0UL) /* Are any bits set? */ | ||
335 | return result + size; /* Nope. */ | ||
336 | found_middle: | ||
337 | return result + __ffs(tmp); | ||
338 | } | ||
339 | |||
340 | /* | ||
341 | * hweightN: returns the hamming weight (i.e. the number | ||
342 | * of bits set) of a N-bit word | ||
343 | */ | ||
344 | |||
345 | #define hweight32(x) generic_hweight32(x) | ||
346 | #define hweight16(x) generic_hweight16(x) | ||
347 | #define hweight8(x) generic_hweight8(x) | ||
348 | |||
349 | 165 | ||
350 | static __inline__ int ext2_set_bit(int nr, volatile void * addr) | 166 | static __inline__ int ext2_set_bit(int nr, volatile void * addr) |
351 | { | 167 | { |
@@ -475,30 +291,11 @@ found_middle: | |||
475 | return result + ffz(__swab32(tmp)); | 291 | return result + ffz(__swab32(tmp)); |
476 | } | 292 | } |
477 | 293 | ||
478 | /* Bitmap functions for the minix filesystem. */ | 294 | #include <asm-generic/bitops/minix.h> |
479 | #define minix_test_and_set_bit(nr,addr) test_and_set_bit(nr,addr) | ||
480 | #define minix_set_bit(nr,addr) set_bit(nr,addr) | ||
481 | #define minix_test_and_clear_bit(nr,addr) test_and_clear_bit(nr,addr) | ||
482 | #define minix_test_bit(nr,addr) test_bit(nr,addr) | ||
483 | #define minix_find_first_zero_bit(addr,size) find_first_zero_bit(addr,size) | ||
484 | |||
485 | /** | ||
486 | * hweightN - returns the hamming weight of a N-bit word | ||
487 | * @x: the word to weigh | ||
488 | * | ||
489 | * The Hamming Weight of a number is the total number of bits set in it. | ||
490 | */ | ||
491 | |||
492 | #define hweight32(x) generic_hweight32(x) | ||
493 | #define hweight16(x) generic_hweight16(x) | ||
494 | #define hweight8(x) generic_hweight8(x) | ||
495 | 295 | ||
496 | #endif /* __KERNEL__ */ | 296 | #endif /* __KERNEL__ */ |
497 | 297 | ||
498 | /* | 298 | #include <asm-generic/bitops/fls.h> |
499 | * fls: find last bit set. | 299 | #include <asm-generic/bitops/fls64.h> |
500 | */ | ||
501 | #define fls(x) generic_fls(x) | ||
502 | #define fls64(x) generic_fls64(x) | ||
503 | 300 | ||
504 | #endif /* _M68KNOMMU_BITOPS_H */ | 301 | #endif /* _M68KNOMMU_BITOPS_H */ |
diff --git a/include/asm-mips/bitops.h b/include/asm-mips/bitops.h index 8e802059fe67..a1728f8c0705 100644 --- a/include/asm-mips/bitops.h +++ b/include/asm-mips/bitops.h | |||
@@ -105,22 +105,6 @@ static inline void set_bit(unsigned long nr, volatile unsigned long *addr) | |||
105 | } | 105 | } |
106 | 106 | ||
107 | /* | 107 | /* |
108 | * __set_bit - Set a bit in memory | ||
109 | * @nr: the bit to set | ||
110 | * @addr: the address to start counting from | ||
111 | * | ||
112 | * Unlike set_bit(), this function is non-atomic and may be reordered. | ||
113 | * If it's called on the same region of memory simultaneously, the effect | ||
114 | * may be that only one operation succeeds. | ||
115 | */ | ||
116 | static inline void __set_bit(unsigned long nr, volatile unsigned long * addr) | ||
117 | { | ||
118 | unsigned long * m = ((unsigned long *) addr) + (nr >> SZLONG_LOG); | ||
119 | |||
120 | *m |= 1UL << (nr & SZLONG_MASK); | ||
121 | } | ||
122 | |||
123 | /* | ||
124 | * clear_bit - Clears a bit in memory | 108 | * clear_bit - Clears a bit in memory |
125 | * @nr: Bit to clear | 109 | * @nr: Bit to clear |
126 | * @addr: Address to start counting from | 110 | * @addr: Address to start counting from |
@@ -169,22 +153,6 @@ static inline void clear_bit(unsigned long nr, volatile unsigned long *addr) | |||
169 | } | 153 | } |
170 | 154 | ||
171 | /* | 155 | /* |
172 | * __clear_bit - Clears a bit in memory | ||
173 | * @nr: Bit to clear | ||
174 | * @addr: Address to start counting from | ||
175 | * | ||
176 | * Unlike clear_bit(), this function is non-atomic and may be reordered. | ||
177 | * If it's called on the same region of memory simultaneously, the effect | ||
178 | * may be that only one operation succeeds. | ||
179 | */ | ||
180 | static inline void __clear_bit(unsigned long nr, volatile unsigned long * addr) | ||
181 | { | ||
182 | unsigned long * m = ((unsigned long *) addr) + (nr >> SZLONG_LOG); | ||
183 | |||
184 | *m &= ~(1UL << (nr & SZLONG_MASK)); | ||
185 | } | ||
186 | |||
187 | /* | ||
188 | * change_bit - Toggle a bit in memory | 156 | * change_bit - Toggle a bit in memory |
189 | * @nr: Bit to change | 157 | * @nr: Bit to change |
190 | * @addr: Address to start counting from | 158 | * @addr: Address to start counting from |
@@ -235,22 +203,6 @@ static inline void change_bit(unsigned long nr, volatile unsigned long *addr) | |||
235 | } | 203 | } |
236 | 204 | ||
237 | /* | 205 | /* |
238 | * __change_bit - Toggle a bit in memory | ||
239 | * @nr: the bit to change | ||
240 | * @addr: the address to start counting from | ||
241 | * | ||
242 | * Unlike change_bit(), this function is non-atomic and may be reordered. | ||
243 | * If it's called on the same region of memory simultaneously, the effect | ||
244 | * may be that only one operation succeeds. | ||
245 | */ | ||
246 | static inline void __change_bit(unsigned long nr, volatile unsigned long * addr) | ||
247 | { | ||
248 | unsigned long * m = ((unsigned long *) addr) + (nr >> SZLONG_LOG); | ||
249 | |||
250 | *m ^= 1UL << (nr & SZLONG_MASK); | ||
251 | } | ||
252 | |||
253 | /* | ||
254 | * test_and_set_bit - Set a bit and return its old value | 206 | * test_and_set_bit - Set a bit and return its old value |
255 | * @nr: Bit to set | 207 | * @nr: Bit to set |
256 | * @addr: Address to count from | 208 | * @addr: Address to count from |
@@ -321,30 +273,6 @@ static inline int test_and_set_bit(unsigned long nr, | |||
321 | } | 273 | } |
322 | 274 | ||
323 | /* | 275 | /* |
324 | * __test_and_set_bit - Set a bit and return its old value | ||
325 | * @nr: Bit to set | ||
326 | * @addr: Address to count from | ||
327 | * | ||
328 | * This operation is non-atomic and can be reordered. | ||
329 | * If two examples of this operation race, one can appear to succeed | ||
330 | * but actually fail. You must protect multiple accesses with a lock. | ||
331 | */ | ||
332 | static inline int __test_and_set_bit(unsigned long nr, | ||
333 | volatile unsigned long *addr) | ||
334 | { | ||
335 | volatile unsigned long *a = addr; | ||
336 | unsigned long mask; | ||
337 | int retval; | ||
338 | |||
339 | a += nr >> SZLONG_LOG; | ||
340 | mask = 1UL << (nr & SZLONG_MASK); | ||
341 | retval = (mask & *a) != 0; | ||
342 | *a |= mask; | ||
343 | |||
344 | return retval; | ||
345 | } | ||
346 | |||
347 | /* | ||
348 | * test_and_clear_bit - Clear a bit and return its old value | 276 | * test_and_clear_bit - Clear a bit and return its old value |
349 | * @nr: Bit to clear | 277 | * @nr: Bit to clear |
350 | * @addr: Address to count from | 278 | * @addr: Address to count from |
@@ -417,30 +345,6 @@ static inline int test_and_clear_bit(unsigned long nr, | |||
417 | } | 345 | } |
418 | 346 | ||
419 | /* | 347 | /* |
420 | * __test_and_clear_bit - Clear a bit and return its old value | ||
421 | * @nr: Bit to clear | ||
422 | * @addr: Address to count from | ||
423 | * | ||
424 | * This operation is non-atomic and can be reordered. | ||
425 | * If two examples of this operation race, one can appear to succeed | ||
426 | * but actually fail. You must protect multiple accesses with a lock. | ||
427 | */ | ||
428 | static inline int __test_and_clear_bit(unsigned long nr, | ||
429 | volatile unsigned long * addr) | ||
430 | { | ||
431 | volatile unsigned long *a = addr; | ||
432 | unsigned long mask; | ||
433 | int retval; | ||
434 | |||
435 | a += (nr >> SZLONG_LOG); | ||
436 | mask = 1UL << (nr & SZLONG_MASK); | ||
437 | retval = ((mask & *a) != 0); | ||
438 | *a &= ~mask; | ||
439 | |||
440 | return retval; | ||
441 | } | ||
442 | |||
443 | /* | ||
444 | * test_and_change_bit - Change a bit and return its old value | 348 | * test_and_change_bit - Change a bit and return its old value |
445 | * @nr: Bit to change | 349 | * @nr: Bit to change |
446 | * @addr: Address to count from | 350 | * @addr: Address to count from |
@@ -509,43 +413,11 @@ static inline int test_and_change_bit(unsigned long nr, | |||
509 | } | 413 | } |
510 | } | 414 | } |
511 | 415 | ||
512 | /* | ||
513 | * __test_and_change_bit - Change a bit and return its old value | ||
514 | * @nr: Bit to change | ||
515 | * @addr: Address to count from | ||
516 | * | ||
517 | * This operation is non-atomic and can be reordered. | ||
518 | * If two examples of this operation race, one can appear to succeed | ||
519 | * but actually fail. You must protect multiple accesses with a lock. | ||
520 | */ | ||
521 | static inline int __test_and_change_bit(unsigned long nr, | ||
522 | volatile unsigned long *addr) | ||
523 | { | ||
524 | volatile unsigned long *a = addr; | ||
525 | unsigned long mask; | ||
526 | int retval; | ||
527 | |||
528 | a += (nr >> SZLONG_LOG); | ||
529 | mask = 1UL << (nr & SZLONG_MASK); | ||
530 | retval = ((mask & *a) != 0); | ||
531 | *a ^= mask; | ||
532 | |||
533 | return retval; | ||
534 | } | ||
535 | |||
536 | #undef __bi_flags | 416 | #undef __bi_flags |
537 | #undef __bi_local_irq_save | 417 | #undef __bi_local_irq_save |
538 | #undef __bi_local_irq_restore | 418 | #undef __bi_local_irq_restore |
539 | 419 | ||
540 | /* | 420 | #include <asm-generic/bitops/non-atomic.h> |
541 | * test_bit - Determine whether a bit is set | ||
542 | * @nr: bit number to test | ||
543 | * @addr: Address to start counting from | ||
544 | */ | ||
545 | static inline int test_bit(unsigned long nr, const volatile unsigned long *addr) | ||
546 | { | ||
547 | return 1UL & (addr[nr >> SZLONG_LOG] >> (nr & SZLONG_MASK)); | ||
548 | } | ||
549 | 421 | ||
550 | /* | 422 | /* |
551 | * Return the bit position (0..63) of the most significant 1 bit in a word | 423 | * Return the bit position (0..63) of the most significant 1 bit in a word |
@@ -580,6 +452,8 @@ static inline int __ilog2(unsigned long x) | |||
580 | return 63 - lz; | 452 | return 63 - lz; |
581 | } | 453 | } |
582 | 454 | ||
455 | #if defined(CONFIG_CPU_MIPS32) || defined(CONFIG_CPU_MIPS64) | ||
456 | |||
583 | /* | 457 | /* |
584 | * __ffs - find first bit in word. | 458 | * __ffs - find first bit in word. |
585 | * @word: The word to search | 459 | * @word: The word to search |
@@ -589,31 +463,7 @@ static inline int __ilog2(unsigned long x) | |||
589 | */ | 463 | */ |
590 | static inline unsigned long __ffs(unsigned long word) | 464 | static inline unsigned long __ffs(unsigned long word) |
591 | { | 465 | { |
592 | #if defined(CONFIG_CPU_MIPS32) || defined(CONFIG_CPU_MIPS64) | ||
593 | return __ilog2(word & -word); | 466 | return __ilog2(word & -word); |
594 | #else | ||
595 | int b = 0, s; | ||
596 | |||
597 | #ifdef CONFIG_32BIT | ||
598 | s = 16; if (word << 16 != 0) s = 0; b += s; word >>= s; | ||
599 | s = 8; if (word << 24 != 0) s = 0; b += s; word >>= s; | ||
600 | s = 4; if (word << 28 != 0) s = 0; b += s; word >>= s; | ||
601 | s = 2; if (word << 30 != 0) s = 0; b += s; word >>= s; | ||
602 | s = 1; if (word << 31 != 0) s = 0; b += s; | ||
603 | |||
604 | return b; | ||
605 | #endif | ||
606 | #ifdef CONFIG_64BIT | ||
607 | s = 32; if (word << 32 != 0) s = 0; b += s; word >>= s; | ||
608 | s = 16; if (word << 48 != 0) s = 0; b += s; word >>= s; | ||
609 | s = 8; if (word << 56 != 0) s = 0; b += s; word >>= s; | ||
610 | s = 4; if (word << 60 != 0) s = 0; b += s; word >>= s; | ||
611 | s = 2; if (word << 62 != 0) s = 0; b += s; word >>= s; | ||
612 | s = 1; if (word << 63 != 0) s = 0; b += s; | ||
613 | |||
614 | return b; | ||
615 | #endif | ||
616 | #endif | ||
617 | } | 467 | } |
618 | 468 | ||
619 | /* | 469 | /* |
@@ -652,321 +502,38 @@ static inline unsigned long ffz(unsigned long word) | |||
652 | */ | 502 | */ |
653 | static inline unsigned long fls(unsigned long word) | 503 | static inline unsigned long fls(unsigned long word) |
654 | { | 504 | { |
655 | #ifdef CONFIG_32BIT | ||
656 | #ifdef CONFIG_CPU_MIPS32 | 505 | #ifdef CONFIG_CPU_MIPS32 |
657 | __asm__ ("clz %0, %1" : "=r" (word) : "r" (word)); | 506 | __asm__ ("clz %0, %1" : "=r" (word) : "r" (word)); |
658 | 507 | ||
659 | return 32 - word; | 508 | return 32 - word; |
660 | #else | ||
661 | { | ||
662 | int r = 32, s; | ||
663 | |||
664 | if (word == 0) | ||
665 | return 0; | ||
666 | |||
667 | s = 16; if ((word & 0xffff0000)) s = 0; r -= s; word <<= s; | ||
668 | s = 8; if ((word & 0xff000000)) s = 0; r -= s; word <<= s; | ||
669 | s = 4; if ((word & 0xf0000000)) s = 0; r -= s; word <<= s; | ||
670 | s = 2; if ((word & 0xc0000000)) s = 0; r -= s; word <<= s; | ||
671 | s = 1; if ((word & 0x80000000)) s = 0; r -= s; | ||
672 | |||
673 | return r; | ||
674 | } | ||
675 | #endif | 509 | #endif |
676 | #endif /* CONFIG_32BIT */ | ||
677 | 510 | ||
678 | #ifdef CONFIG_64BIT | ||
679 | #ifdef CONFIG_CPU_MIPS64 | 511 | #ifdef CONFIG_CPU_MIPS64 |
680 | |||
681 | __asm__ ("dclz %0, %1" : "=r" (word) : "r" (word)); | 512 | __asm__ ("dclz %0, %1" : "=r" (word) : "r" (word)); |
682 | 513 | ||
683 | return 64 - word; | 514 | return 64 - word; |
684 | #else | ||
685 | { | ||
686 | int r = 64, s; | ||
687 | |||
688 | if (word == 0) | ||
689 | return 0; | ||
690 | |||
691 | s = 32; if ((word & 0xffffffff00000000UL)) s = 0; r -= s; word <<= s; | ||
692 | s = 16; if ((word & 0xffff000000000000UL)) s = 0; r -= s; word <<= s; | ||
693 | s = 8; if ((word & 0xff00000000000000UL)) s = 0; r -= s; word <<= s; | ||
694 | s = 4; if ((word & 0xf000000000000000UL)) s = 0; r -= s; word <<= s; | ||
695 | s = 2; if ((word & 0xc000000000000000UL)) s = 0; r -= s; word <<= s; | ||
696 | s = 1; if ((word & 0x8000000000000000UL)) s = 0; r -= s; | ||
697 | |||
698 | return r; | ||
699 | } | ||
700 | #endif | 515 | #endif |
701 | #endif /* CONFIG_64BIT */ | ||
702 | } | 516 | } |
703 | 517 | ||
704 | #define fls64(x) generic_fls64(x) | 518 | #else |
705 | |||
706 | /* | ||
707 | * find_next_zero_bit - find the first zero bit in a memory region | ||
708 | * @addr: The address to base the search on | ||
709 | * @offset: The bitnumber to start searching at | ||
710 | * @size: The maximum size to search | ||
711 | */ | ||
712 | static inline unsigned long find_next_zero_bit(const unsigned long *addr, | ||
713 | unsigned long size, unsigned long offset) | ||
714 | { | ||
715 | const unsigned long *p = addr + (offset >> SZLONG_LOG); | ||
716 | unsigned long result = offset & ~SZLONG_MASK; | ||
717 | unsigned long tmp; | ||
718 | |||
719 | if (offset >= size) | ||
720 | return size; | ||
721 | size -= result; | ||
722 | offset &= SZLONG_MASK; | ||
723 | if (offset) { | ||
724 | tmp = *(p++); | ||
725 | tmp |= ~0UL >> (_MIPS_SZLONG-offset); | ||
726 | if (size < _MIPS_SZLONG) | ||
727 | goto found_first; | ||
728 | if (~tmp) | ||
729 | goto found_middle; | ||
730 | size -= _MIPS_SZLONG; | ||
731 | result += _MIPS_SZLONG; | ||
732 | } | ||
733 | while (size & ~SZLONG_MASK) { | ||
734 | if (~(tmp = *(p++))) | ||
735 | goto found_middle; | ||
736 | result += _MIPS_SZLONG; | ||
737 | size -= _MIPS_SZLONG; | ||
738 | } | ||
739 | if (!size) | ||
740 | return result; | ||
741 | tmp = *p; | ||
742 | |||
743 | found_first: | ||
744 | tmp |= ~0UL << size; | ||
745 | if (tmp == ~0UL) /* Are any bits zero? */ | ||
746 | return result + size; /* Nope. */ | ||
747 | found_middle: | ||
748 | return result + ffz(tmp); | ||
749 | } | ||
750 | 519 | ||
751 | #define find_first_zero_bit(addr, size) \ | 520 | #include <asm-generic/bitops/__ffs.h> |
752 | find_next_zero_bit((addr), (size), 0) | 521 | #include <asm-generic/bitops/ffs.h> |
522 | #include <asm-generic/bitops/ffz.h> | ||
523 | #include <asm-generic/bitops/fls.h> | ||
753 | 524 | ||
754 | /* | 525 | #endif /*defined(CONFIG_CPU_MIPS32) || defined(CONFIG_CPU_MIPS64) */ |
755 | * find_next_bit - find the next set bit in a memory region | ||
756 | * @addr: The address to base the search on | ||
757 | * @offset: The bitnumber to start searching at | ||
758 | * @size: The maximum size to search | ||
759 | */ | ||
760 | static inline unsigned long find_next_bit(const unsigned long *addr, | ||
761 | unsigned long size, unsigned long offset) | ||
762 | { | ||
763 | const unsigned long *p = addr + (offset >> SZLONG_LOG); | ||
764 | unsigned long result = offset & ~SZLONG_MASK; | ||
765 | unsigned long tmp; | ||
766 | |||
767 | if (offset >= size) | ||
768 | return size; | ||
769 | size -= result; | ||
770 | offset &= SZLONG_MASK; | ||
771 | if (offset) { | ||
772 | tmp = *(p++); | ||
773 | tmp &= ~0UL << offset; | ||
774 | if (size < _MIPS_SZLONG) | ||
775 | goto found_first; | ||
776 | if (tmp) | ||
777 | goto found_middle; | ||
778 | size -= _MIPS_SZLONG; | ||
779 | result += _MIPS_SZLONG; | ||
780 | } | ||
781 | while (size & ~SZLONG_MASK) { | ||
782 | if ((tmp = *(p++))) | ||
783 | goto found_middle; | ||
784 | result += _MIPS_SZLONG; | ||
785 | size -= _MIPS_SZLONG; | ||
786 | } | ||
787 | if (!size) | ||
788 | return result; | ||
789 | tmp = *p; | ||
790 | |||
791 | found_first: | ||
792 | tmp &= ~0UL >> (_MIPS_SZLONG - size); | ||
793 | if (tmp == 0UL) /* Are any bits set? */ | ||
794 | return result + size; /* Nope. */ | ||
795 | found_middle: | ||
796 | return result + __ffs(tmp); | ||
797 | } | ||
798 | 526 | ||
799 | /* | 527 | #include <asm-generic/bitops/fls64.h> |
800 | * find_first_bit - find the first set bit in a memory region | 528 | #include <asm-generic/bitops/find.h> |
801 | * @addr: The address to start the search at | ||
802 | * @size: The maximum size to search | ||
803 | * | ||
804 | * Returns the bit-number of the first set bit, not the number of the byte | ||
805 | * containing a bit. | ||
806 | */ | ||
807 | #define find_first_bit(addr, size) \ | ||
808 | find_next_bit((addr), (size), 0) | ||
809 | 529 | ||
810 | #ifdef __KERNEL__ | 530 | #ifdef __KERNEL__ |
811 | 531 | ||
812 | /* | 532 | #include <asm-generic/bitops/sched.h> |
813 | * Every architecture must define this function. It's the fastest | 533 | #include <asm-generic/bitops/hweight.h> |
814 | * way of searching a 140-bit bitmap where the first 100 bits are | 534 | #include <asm-generic/bitops/ext2-non-atomic.h> |
815 | * unlikely to be set. It's guaranteed that at least one of the 140 | 535 | #include <asm-generic/bitops/ext2-atomic.h> |
816 | * bits is cleared. | 536 | #include <asm-generic/bitops/minix.h> |
817 | */ | ||
818 | static inline int sched_find_first_bit(const unsigned long *b) | ||
819 | { | ||
820 | #ifdef CONFIG_32BIT | ||
821 | if (unlikely(b[0])) | ||
822 | return __ffs(b[0]); | ||
823 | if (unlikely(b[1])) | ||
824 | return __ffs(b[1]) + 32; | ||
825 | if (unlikely(b[2])) | ||
826 | return __ffs(b[2]) + 64; | ||
827 | if (b[3]) | ||
828 | return __ffs(b[3]) + 96; | ||
829 | return __ffs(b[4]) + 128; | ||
830 | #endif | ||
831 | #ifdef CONFIG_64BIT | ||
832 | if (unlikely(b[0])) | ||
833 | return __ffs(b[0]); | ||
834 | if (unlikely(b[1])) | ||
835 | return __ffs(b[1]) + 64; | ||
836 | return __ffs(b[2]) + 128; | ||
837 | #endif | ||
838 | } | ||
839 | |||
840 | /* | ||
841 | * hweightN - returns the hamming weight of a N-bit word | ||
842 | * @x: the word to weigh | ||
843 | * | ||
844 | * The Hamming Weight of a number is the total number of bits set in it. | ||
845 | */ | ||
846 | |||
847 | #define hweight64(x) generic_hweight64(x) | ||
848 | #define hweight32(x) generic_hweight32(x) | ||
849 | #define hweight16(x) generic_hweight16(x) | ||
850 | #define hweight8(x) generic_hweight8(x) | ||
851 | |||
852 | static inline int __test_and_set_le_bit(unsigned long nr, unsigned long *addr) | ||
853 | { | ||
854 | unsigned char *ADDR = (unsigned char *) addr; | ||
855 | int mask, retval; | ||
856 | |||
857 | ADDR += nr >> 3; | ||
858 | mask = 1 << (nr & 0x07); | ||
859 | retval = (mask & *ADDR) != 0; | ||
860 | *ADDR |= mask; | ||
861 | |||
862 | return retval; | ||
863 | } | ||
864 | |||
865 | static inline int __test_and_clear_le_bit(unsigned long nr, unsigned long *addr) | ||
866 | { | ||
867 | unsigned char *ADDR = (unsigned char *) addr; | ||
868 | int mask, retval; | ||
869 | |||
870 | ADDR += nr >> 3; | ||
871 | mask = 1 << (nr & 0x07); | ||
872 | retval = (mask & *ADDR) != 0; | ||
873 | *ADDR &= ~mask; | ||
874 | |||
875 | return retval; | ||
876 | } | ||
877 | |||
878 | static inline int test_le_bit(unsigned long nr, const unsigned long * addr) | ||
879 | { | ||
880 | const unsigned char *ADDR = (const unsigned char *) addr; | ||
881 | int mask; | ||
882 | |||
883 | ADDR += nr >> 3; | ||
884 | mask = 1 << (nr & 0x07); | ||
885 | |||
886 | return ((mask & *ADDR) != 0); | ||
887 | } | ||
888 | |||
889 | static inline unsigned long find_next_zero_le_bit(unsigned long *addr, | ||
890 | unsigned long size, unsigned long offset) | ||
891 | { | ||
892 | unsigned long *p = ((unsigned long *) addr) + (offset >> SZLONG_LOG); | ||
893 | unsigned long result = offset & ~SZLONG_MASK; | ||
894 | unsigned long tmp; | ||
895 | |||
896 | if (offset >= size) | ||
897 | return size; | ||
898 | size -= result; | ||
899 | offset &= SZLONG_MASK; | ||
900 | if (offset) { | ||
901 | tmp = cpu_to_lelongp(p++); | ||
902 | tmp |= ~0UL >> (_MIPS_SZLONG-offset); /* bug or feature ? */ | ||
903 | if (size < _MIPS_SZLONG) | ||
904 | goto found_first; | ||
905 | if (~tmp) | ||
906 | goto found_middle; | ||
907 | size -= _MIPS_SZLONG; | ||
908 | result += _MIPS_SZLONG; | ||
909 | } | ||
910 | while (size & ~SZLONG_MASK) { | ||
911 | if (~(tmp = cpu_to_lelongp(p++))) | ||
912 | goto found_middle; | ||
913 | result += _MIPS_SZLONG; | ||
914 | size -= _MIPS_SZLONG; | ||
915 | } | ||
916 | if (!size) | ||
917 | return result; | ||
918 | tmp = cpu_to_lelongp(p); | ||
919 | |||
920 | found_first: | ||
921 | tmp |= ~0UL << size; | ||
922 | if (tmp == ~0UL) /* Are any bits zero? */ | ||
923 | return result + size; /* Nope. */ | ||
924 | |||
925 | found_middle: | ||
926 | return result + ffz(tmp); | ||
927 | } | ||
928 | |||
929 | #define find_first_zero_le_bit(addr, size) \ | ||
930 | find_next_zero_le_bit((addr), (size), 0) | ||
931 | |||
932 | #define ext2_set_bit(nr,addr) \ | ||
933 | __test_and_set_le_bit((nr),(unsigned long*)addr) | ||
934 | #define ext2_clear_bit(nr, addr) \ | ||
935 | __test_and_clear_le_bit((nr),(unsigned long*)addr) | ||
936 | #define ext2_set_bit_atomic(lock, nr, addr) \ | ||
937 | ({ \ | ||
938 | int ret; \ | ||
939 | spin_lock(lock); \ | ||
940 | ret = ext2_set_bit((nr), (addr)); \ | ||
941 | spin_unlock(lock); \ | ||
942 | ret; \ | ||
943 | }) | ||
944 | |||
945 | #define ext2_clear_bit_atomic(lock, nr, addr) \ | ||
946 | ({ \ | ||
947 | int ret; \ | ||
948 | spin_lock(lock); \ | ||
949 | ret = ext2_clear_bit((nr), (addr)); \ | ||
950 | spin_unlock(lock); \ | ||
951 | ret; \ | ||
952 | }) | ||
953 | #define ext2_test_bit(nr, addr) test_le_bit((nr),(unsigned long*)addr) | ||
954 | #define ext2_find_first_zero_bit(addr, size) \ | ||
955 | find_first_zero_le_bit((unsigned long*)addr, size) | ||
956 | #define ext2_find_next_zero_bit(addr, size, off) \ | ||
957 | find_next_zero_le_bit((unsigned long*)addr, size, off) | ||
958 | |||
959 | /* | ||
960 | * Bitmap functions for the minix filesystem. | ||
961 | * | ||
962 | * FIXME: These assume that Minix uses the native byte/bitorder. | ||
963 | * This limits the Minix filesystem's value for data exchange very much. | ||
964 | */ | ||
965 | #define minix_test_and_set_bit(nr,addr) test_and_set_bit(nr,addr) | ||
966 | #define minix_set_bit(nr,addr) set_bit(nr,addr) | ||
967 | #define minix_test_and_clear_bit(nr,addr) test_and_clear_bit(nr,addr) | ||
968 | #define minix_test_bit(nr,addr) test_bit(nr,addr) | ||
969 | #define minix_find_first_zero_bit(addr,size) find_first_zero_bit(addr,size) | ||
970 | 537 | ||
971 | #endif /* __KERNEL__ */ | 538 | #endif /* __KERNEL__ */ |
972 | 539 | ||
diff --git a/include/asm-mips/types.h b/include/asm-mips/types.h index 421b3aea14cc..cd2813d8e136 100644 --- a/include/asm-mips/types.h +++ b/include/asm-mips/types.h | |||
@@ -99,6 +99,11 @@ typedef u64 sector_t; | |||
99 | #define HAVE_SECTOR_T | 99 | #define HAVE_SECTOR_T |
100 | #endif | 100 | #endif |
101 | 101 | ||
102 | #ifdef CONFIG_LSF | ||
103 | typedef u64 blkcnt_t; | ||
104 | #define HAVE_BLKCNT_T | ||
105 | #endif | ||
106 | |||
102 | #endif /* __ASSEMBLY__ */ | 107 | #endif /* __ASSEMBLY__ */ |
103 | 108 | ||
104 | #endif /* __KERNEL__ */ | 109 | #endif /* __KERNEL__ */ |
diff --git a/include/asm-parisc/bitops.h b/include/asm-parisc/bitops.h index 15d8c2b51584..900561922c4c 100644 --- a/include/asm-parisc/bitops.h +++ b/include/asm-parisc/bitops.h | |||
@@ -35,13 +35,6 @@ static __inline__ void set_bit(int nr, volatile unsigned long * addr) | |||
35 | _atomic_spin_unlock_irqrestore(addr, flags); | 35 | _atomic_spin_unlock_irqrestore(addr, flags); |
36 | } | 36 | } |
37 | 37 | ||
38 | static __inline__ void __set_bit(unsigned long nr, volatile unsigned long * addr) | ||
39 | { | ||
40 | unsigned long *m = (unsigned long *) addr + (nr >> SHIFT_PER_LONG); | ||
41 | |||
42 | *m |= 1UL << CHOP_SHIFTCOUNT(nr); | ||
43 | } | ||
44 | |||
45 | static __inline__ void clear_bit(int nr, volatile unsigned long * addr) | 38 | static __inline__ void clear_bit(int nr, volatile unsigned long * addr) |
46 | { | 39 | { |
47 | unsigned long mask = ~(1UL << CHOP_SHIFTCOUNT(nr)); | 40 | unsigned long mask = ~(1UL << CHOP_SHIFTCOUNT(nr)); |
@@ -53,13 +46,6 @@ static __inline__ void clear_bit(int nr, volatile unsigned long * addr) | |||
53 | _atomic_spin_unlock_irqrestore(addr, flags); | 46 | _atomic_spin_unlock_irqrestore(addr, flags); |
54 | } | 47 | } |
55 | 48 | ||
56 | static __inline__ void __clear_bit(unsigned long nr, volatile unsigned long * addr) | ||
57 | { | ||
58 | unsigned long *m = (unsigned long *) addr + (nr >> SHIFT_PER_LONG); | ||
59 | |||
60 | *m &= ~(1UL << CHOP_SHIFTCOUNT(nr)); | ||
61 | } | ||
62 | |||
63 | static __inline__ void change_bit(int nr, volatile unsigned long * addr) | 49 | static __inline__ void change_bit(int nr, volatile unsigned long * addr) |
64 | { | 50 | { |
65 | unsigned long mask = 1UL << CHOP_SHIFTCOUNT(nr); | 51 | unsigned long mask = 1UL << CHOP_SHIFTCOUNT(nr); |
@@ -71,13 +57,6 @@ static __inline__ void change_bit(int nr, volatile unsigned long * addr) | |||
71 | _atomic_spin_unlock_irqrestore(addr, flags); | 57 | _atomic_spin_unlock_irqrestore(addr, flags); |
72 | } | 58 | } |
73 | 59 | ||
74 | static __inline__ void __change_bit(unsigned long nr, volatile unsigned long * addr) | ||
75 | { | ||
76 | unsigned long *m = (unsigned long *) addr + (nr >> SHIFT_PER_LONG); | ||
77 | |||
78 | *m ^= 1UL << CHOP_SHIFTCOUNT(nr); | ||
79 | } | ||
80 | |||
81 | static __inline__ int test_and_set_bit(int nr, volatile unsigned long * addr) | 60 | static __inline__ int test_and_set_bit(int nr, volatile unsigned long * addr) |
82 | { | 61 | { |
83 | unsigned long mask = 1UL << CHOP_SHIFTCOUNT(nr); | 62 | unsigned long mask = 1UL << CHOP_SHIFTCOUNT(nr); |
@@ -93,18 +72,6 @@ static __inline__ int test_and_set_bit(int nr, volatile unsigned long * addr) | |||
93 | return (oldbit & mask) ? 1 : 0; | 72 | return (oldbit & mask) ? 1 : 0; |
94 | } | 73 | } |
95 | 74 | ||
96 | static __inline__ int __test_and_set_bit(int nr, volatile unsigned long * address) | ||
97 | { | ||
98 | unsigned long mask = 1UL << CHOP_SHIFTCOUNT(nr); | ||
99 | unsigned long oldbit; | ||
100 | unsigned long *addr = (unsigned long *)address + (nr >> SHIFT_PER_LONG); | ||
101 | |||
102 | oldbit = *addr; | ||
103 | *addr = oldbit | mask; | ||
104 | |||
105 | return (oldbit & mask) ? 1 : 0; | ||
106 | } | ||
107 | |||
108 | static __inline__ int test_and_clear_bit(int nr, volatile unsigned long * addr) | 75 | static __inline__ int test_and_clear_bit(int nr, volatile unsigned long * addr) |
109 | { | 76 | { |
110 | unsigned long mask = 1UL << CHOP_SHIFTCOUNT(nr); | 77 | unsigned long mask = 1UL << CHOP_SHIFTCOUNT(nr); |
@@ -120,18 +87,6 @@ static __inline__ int test_and_clear_bit(int nr, volatile unsigned long * addr) | |||
120 | return (oldbit & mask) ? 1 : 0; | 87 | return (oldbit & mask) ? 1 : 0; |
121 | } | 88 | } |
122 | 89 | ||
123 | static __inline__ int __test_and_clear_bit(int nr, volatile unsigned long * address) | ||
124 | { | ||
125 | unsigned long mask = 1UL << CHOP_SHIFTCOUNT(nr); | ||
126 | unsigned long *addr = (unsigned long *)address + (nr >> SHIFT_PER_LONG); | ||
127 | unsigned long oldbit; | ||
128 | |||
129 | oldbit = *addr; | ||
130 | *addr = oldbit & ~mask; | ||
131 | |||
132 | return (oldbit & mask) ? 1 : 0; | ||
133 | } | ||
134 | |||
135 | static __inline__ int test_and_change_bit(int nr, volatile unsigned long * addr) | 90 | static __inline__ int test_and_change_bit(int nr, volatile unsigned long * addr) |
136 | { | 91 | { |
137 | unsigned long mask = 1UL << CHOP_SHIFTCOUNT(nr); | 92 | unsigned long mask = 1UL << CHOP_SHIFTCOUNT(nr); |
@@ -147,25 +102,7 @@ static __inline__ int test_and_change_bit(int nr, volatile unsigned long * addr) | |||
147 | return (oldbit & mask) ? 1 : 0; | 102 | return (oldbit & mask) ? 1 : 0; |
148 | } | 103 | } |
149 | 104 | ||
150 | static __inline__ int __test_and_change_bit(int nr, volatile unsigned long * address) | 105 | #include <asm-generic/bitops/non-atomic.h> |
151 | { | ||
152 | unsigned long mask = 1UL << CHOP_SHIFTCOUNT(nr); | ||
153 | unsigned long *addr = (unsigned long *)address + (nr >> SHIFT_PER_LONG); | ||
154 | unsigned long oldbit; | ||
155 | |||
156 | oldbit = *addr; | ||
157 | *addr = oldbit ^ mask; | ||
158 | |||
159 | return (oldbit & mask) ? 1 : 0; | ||
160 | } | ||
161 | |||
162 | static __inline__ int test_bit(int nr, const volatile unsigned long *address) | ||
163 | { | ||
164 | unsigned long mask = 1UL << CHOP_SHIFTCOUNT(nr); | ||
165 | const unsigned long *addr = (const unsigned long *)address + (nr >> SHIFT_PER_LONG); | ||
166 | |||
167 | return !!(*addr & mask); | ||
168 | } | ||
169 | 106 | ||
170 | #ifdef __KERNEL__ | 107 | #ifdef __KERNEL__ |
171 | 108 | ||
@@ -219,8 +156,7 @@ static __inline__ unsigned long __ffs(unsigned long x) | |||
219 | return ret; | 156 | return ret; |
220 | } | 157 | } |
221 | 158 | ||
222 | /* Undefined if no bit is zero. */ | 159 | #include <asm-generic/bitops/ffz.h> |
223 | #define ffz(x) __ffs(~x) | ||
224 | 160 | ||
225 | /* | 161 | /* |
226 | * ffs: find first bit set. returns 1 to BITS_PER_LONG or 0 (if none set) | 162 | * ffs: find first bit set. returns 1 to BITS_PER_LONG or 0 (if none set) |
@@ -263,155 +199,22 @@ static __inline__ int fls(int x) | |||
263 | 199 | ||
264 | return ret; | 200 | return ret; |
265 | } | 201 | } |
266 | #define fls64(x) generic_fls64(x) | ||
267 | 202 | ||
268 | /* | 203 | #include <asm-generic/bitops/fls64.h> |
269 | * hweightN: returns the hamming weight (i.e. the number | 204 | #include <asm-generic/bitops/hweight.h> |
270 | * of bits set) of a N-bit word | 205 | #include <asm-generic/bitops/sched.h> |
271 | */ | ||
272 | #define hweight64(x) generic_hweight64(x) | ||
273 | #define hweight32(x) generic_hweight32(x) | ||
274 | #define hweight16(x) generic_hweight16(x) | ||
275 | #define hweight8(x) generic_hweight8(x) | ||
276 | |||
277 | /* | ||
278 | * Every architecture must define this function. It's the fastest | ||
279 | * way of searching a 140-bit bitmap where the first 100 bits are | ||
280 | * unlikely to be set. It's guaranteed that at least one of the 140 | ||
281 | * bits is cleared. | ||
282 | */ | ||
283 | static inline int sched_find_first_bit(const unsigned long *b) | ||
284 | { | ||
285 | #ifdef __LP64__ | ||
286 | if (unlikely(b[0])) | ||
287 | return __ffs(b[0]); | ||
288 | if (unlikely(b[1])) | ||
289 | return __ffs(b[1]) + 64; | ||
290 | return __ffs(b[2]) + 128; | ||
291 | #else | ||
292 | if (unlikely(b[0])) | ||
293 | return __ffs(b[0]); | ||
294 | if (unlikely(b[1])) | ||
295 | return __ffs(b[1]) + 32; | ||
296 | if (unlikely(b[2])) | ||
297 | return __ffs(b[2]) + 64; | ||
298 | if (b[3]) | ||
299 | return __ffs(b[3]) + 96; | ||
300 | return __ffs(b[4]) + 128; | ||
301 | #endif | ||
302 | } | ||
303 | 206 | ||
304 | #endif /* __KERNEL__ */ | 207 | #endif /* __KERNEL__ */ |
305 | 208 | ||
306 | /* | 209 | #include <asm-generic/bitops/find.h> |
307 | * This implementation of find_{first,next}_zero_bit was stolen from | ||
308 | * Linus' asm-alpha/bitops.h. | ||
309 | */ | ||
310 | #define find_first_zero_bit(addr, size) \ | ||
311 | find_next_zero_bit((addr), (size), 0) | ||
312 | |||
313 | static __inline__ unsigned long find_next_zero_bit(const void * addr, unsigned long size, unsigned long offset) | ||
314 | { | ||
315 | const unsigned long * p = ((unsigned long *) addr) + (offset >> SHIFT_PER_LONG); | ||
316 | unsigned long result = offset & ~(BITS_PER_LONG-1); | ||
317 | unsigned long tmp; | ||
318 | |||
319 | if (offset >= size) | ||
320 | return size; | ||
321 | size -= result; | ||
322 | offset &= (BITS_PER_LONG-1); | ||
323 | if (offset) { | ||
324 | tmp = *(p++); | ||
325 | tmp |= ~0UL >> (BITS_PER_LONG-offset); | ||
326 | if (size < BITS_PER_LONG) | ||
327 | goto found_first; | ||
328 | if (~tmp) | ||
329 | goto found_middle; | ||
330 | size -= BITS_PER_LONG; | ||
331 | result += BITS_PER_LONG; | ||
332 | } | ||
333 | while (size & ~(BITS_PER_LONG -1)) { | ||
334 | if (~(tmp = *(p++))) | ||
335 | goto found_middle; | ||
336 | result += BITS_PER_LONG; | ||
337 | size -= BITS_PER_LONG; | ||
338 | } | ||
339 | if (!size) | ||
340 | return result; | ||
341 | tmp = *p; | ||
342 | found_first: | ||
343 | tmp |= ~0UL << size; | ||
344 | found_middle: | ||
345 | return result + ffz(tmp); | ||
346 | } | ||
347 | |||
348 | static __inline__ unsigned long find_next_bit(const unsigned long *addr, unsigned long size, unsigned long offset) | ||
349 | { | ||
350 | const unsigned long *p = addr + (offset >> SHIFT_PER_LONG); | ||
351 | unsigned long result = offset & ~(BITS_PER_LONG-1); | ||
352 | unsigned long tmp; | ||
353 | |||
354 | if (offset >= size) | ||
355 | return size; | ||
356 | size -= result; | ||
357 | offset &= (BITS_PER_LONG-1); | ||
358 | if (offset) { | ||
359 | tmp = *(p++); | ||
360 | tmp &= (~0UL << offset); | ||
361 | if (size < BITS_PER_LONG) | ||
362 | goto found_first; | ||
363 | if (tmp) | ||
364 | goto found_middle; | ||
365 | size -= BITS_PER_LONG; | ||
366 | result += BITS_PER_LONG; | ||
367 | } | ||
368 | while (size & ~(BITS_PER_LONG-1)) { | ||
369 | if ((tmp = *(p++))) | ||
370 | goto found_middle; | ||
371 | result += BITS_PER_LONG; | ||
372 | size -= BITS_PER_LONG; | ||
373 | } | ||
374 | if (!size) | ||
375 | return result; | ||
376 | tmp = *p; | ||
377 | |||
378 | found_first: | ||
379 | tmp &= (~0UL >> (BITS_PER_LONG - size)); | ||
380 | if (tmp == 0UL) /* Are any bits set? */ | ||
381 | return result + size; /* Nope. */ | ||
382 | found_middle: | ||
383 | return result + __ffs(tmp); | ||
384 | } | ||
385 | |||
386 | /** | ||
387 | * find_first_bit - find the first set bit in a memory region | ||
388 | * @addr: The address to start the search at | ||
389 | * @size: The maximum size to search | ||
390 | * | ||
391 | * Returns the bit-number of the first set bit, not the number of the byte | ||
392 | * containing a bit. | ||
393 | */ | ||
394 | #define find_first_bit(addr, size) \ | ||
395 | find_next_bit((addr), (size), 0) | ||
396 | |||
397 | #define _EXT2_HAVE_ASM_BITOPS_ | ||
398 | 210 | ||
399 | #ifdef __KERNEL__ | 211 | #ifdef __KERNEL__ |
400 | /* | 212 | |
401 | * test_and_{set,clear}_bit guarantee atomicity without | 213 | #include <asm-generic/bitops/ext2-non-atomic.h> |
402 | * disabling interrupts. | ||
403 | */ | ||
404 | 214 | ||
405 | /* '3' is bits per byte */ | 215 | /* '3' is bits per byte */ |
406 | #define LE_BYTE_ADDR ((sizeof(unsigned long) - 1) << 3) | 216 | #define LE_BYTE_ADDR ((sizeof(unsigned long) - 1) << 3) |
407 | 217 | ||
408 | #define ext2_test_bit(nr, addr) \ | ||
409 | test_bit((nr) ^ LE_BYTE_ADDR, (unsigned long *)addr) | ||
410 | #define ext2_set_bit(nr, addr) \ | ||
411 | __test_and_set_bit((nr) ^ LE_BYTE_ADDR, (unsigned long *)addr) | ||
412 | #define ext2_clear_bit(nr, addr) \ | ||
413 | __test_and_clear_bit((nr) ^ LE_BYTE_ADDR, (unsigned long *)addr) | ||
414 | |||
415 | #define ext2_set_bit_atomic(l,nr,addr) \ | 218 | #define ext2_set_bit_atomic(l,nr,addr) \ |
416 | test_and_set_bit((nr) ^ LE_BYTE_ADDR, (unsigned long *)addr) | 219 | test_and_set_bit((nr) ^ LE_BYTE_ADDR, (unsigned long *)addr) |
417 | #define ext2_clear_bit_atomic(l,nr,addr) \ | 220 | #define ext2_clear_bit_atomic(l,nr,addr) \ |
@@ -419,77 +222,6 @@ found_middle: | |||
419 | 222 | ||
420 | #endif /* __KERNEL__ */ | 223 | #endif /* __KERNEL__ */ |
421 | 224 | ||
422 | 225 | #include <asm-generic/bitops/minix-le.h> | |
423 | #define ext2_find_first_zero_bit(addr, size) \ | ||
424 | ext2_find_next_zero_bit((addr), (size), 0) | ||
425 | |||
426 | /* include/linux/byteorder does not support "unsigned long" type */ | ||
427 | static inline unsigned long ext2_swabp(unsigned long * x) | ||
428 | { | ||
429 | #ifdef __LP64__ | ||
430 | return (unsigned long) __swab64p((u64 *) x); | ||
431 | #else | ||
432 | return (unsigned long) __swab32p((u32 *) x); | ||
433 | #endif | ||
434 | } | ||
435 | |||
436 | /* include/linux/byteorder doesn't support "unsigned long" type */ | ||
437 | static inline unsigned long ext2_swab(unsigned long y) | ||
438 | { | ||
439 | #ifdef __LP64__ | ||
440 | return (unsigned long) __swab64((u64) y); | ||
441 | #else | ||
442 | return (unsigned long) __swab32((u32) y); | ||
443 | #endif | ||
444 | } | ||
445 | |||
446 | static __inline__ unsigned long ext2_find_next_zero_bit(void *addr, unsigned long size, unsigned long offset) | ||
447 | { | ||
448 | unsigned long *p = (unsigned long *) addr + (offset >> SHIFT_PER_LONG); | ||
449 | unsigned long result = offset & ~(BITS_PER_LONG - 1); | ||
450 | unsigned long tmp; | ||
451 | |||
452 | if (offset >= size) | ||
453 | return size; | ||
454 | size -= result; | ||
455 | offset &= (BITS_PER_LONG - 1UL); | ||
456 | if (offset) { | ||
457 | tmp = ext2_swabp(p++); | ||
458 | tmp |= (~0UL >> (BITS_PER_LONG - offset)); | ||
459 | if (size < BITS_PER_LONG) | ||
460 | goto found_first; | ||
461 | if (~tmp) | ||
462 | goto found_middle; | ||
463 | size -= BITS_PER_LONG; | ||
464 | result += BITS_PER_LONG; | ||
465 | } | ||
466 | |||
467 | while (size & ~(BITS_PER_LONG - 1)) { | ||
468 | if (~(tmp = *(p++))) | ||
469 | goto found_middle_swap; | ||
470 | result += BITS_PER_LONG; | ||
471 | size -= BITS_PER_LONG; | ||
472 | } | ||
473 | if (!size) | ||
474 | return result; | ||
475 | tmp = ext2_swabp(p); | ||
476 | found_first: | ||
477 | tmp |= ~0UL << size; | ||
478 | if (tmp == ~0UL) /* Are any bits zero? */ | ||
479 | return result + size; /* Nope. Skip ffz */ | ||
480 | found_middle: | ||
481 | return result + ffz(tmp); | ||
482 | |||
483 | found_middle_swap: | ||
484 | return result + ffz(ext2_swab(tmp)); | ||
485 | } | ||
486 | |||
487 | |||
488 | /* Bitmap functions for the minix filesystem. */ | ||
489 | #define minix_test_and_set_bit(nr,addr) ext2_set_bit(nr,addr) | ||
490 | #define minix_set_bit(nr,addr) ((void)ext2_set_bit(nr,addr)) | ||
491 | #define minix_test_and_clear_bit(nr,addr) ext2_clear_bit(nr,addr) | ||
492 | #define minix_test_bit(nr,addr) ext2_test_bit(nr,addr) | ||
493 | #define minix_find_first_zero_bit(addr,size) ext2_find_first_zero_bit(addr,size) | ||
494 | 226 | ||
495 | #endif /* _PARISC_BITOPS_H */ | 227 | #endif /* _PARISC_BITOPS_H */ |
diff --git a/include/asm-powerpc/bitops.h b/include/asm-powerpc/bitops.h index bf6941a810b8..d1c2a4405660 100644 --- a/include/asm-powerpc/bitops.h +++ b/include/asm-powerpc/bitops.h | |||
@@ -184,72 +184,7 @@ static __inline__ void set_bits(unsigned long mask, unsigned long *addr) | |||
184 | : "cc"); | 184 | : "cc"); |
185 | } | 185 | } |
186 | 186 | ||
187 | /* Non-atomic versions */ | 187 | #include <asm-generic/bitops/non-atomic.h> |
188 | static __inline__ int test_bit(unsigned long nr, | ||
189 | __const__ volatile unsigned long *addr) | ||
190 | { | ||
191 | return 1UL & (addr[BITOP_WORD(nr)] >> (nr & (BITS_PER_LONG-1))); | ||
192 | } | ||
193 | |||
194 | static __inline__ void __set_bit(unsigned long nr, | ||
195 | volatile unsigned long *addr) | ||
196 | { | ||
197 | unsigned long mask = BITOP_MASK(nr); | ||
198 | unsigned long *p = ((unsigned long *)addr) + BITOP_WORD(nr); | ||
199 | |||
200 | *p |= mask; | ||
201 | } | ||
202 | |||
203 | static __inline__ void __clear_bit(unsigned long nr, | ||
204 | volatile unsigned long *addr) | ||
205 | { | ||
206 | unsigned long mask = BITOP_MASK(nr); | ||
207 | unsigned long *p = ((unsigned long *)addr) + BITOP_WORD(nr); | ||
208 | |||
209 | *p &= ~mask; | ||
210 | } | ||
211 | |||
212 | static __inline__ void __change_bit(unsigned long nr, | ||
213 | volatile unsigned long *addr) | ||
214 | { | ||
215 | unsigned long mask = BITOP_MASK(nr); | ||
216 | unsigned long *p = ((unsigned long *)addr) + BITOP_WORD(nr); | ||
217 | |||
218 | *p ^= mask; | ||
219 | } | ||
220 | |||
221 | static __inline__ int __test_and_set_bit(unsigned long nr, | ||
222 | volatile unsigned long *addr) | ||
223 | { | ||
224 | unsigned long mask = BITOP_MASK(nr); | ||
225 | unsigned long *p = ((unsigned long *)addr) + BITOP_WORD(nr); | ||
226 | unsigned long old = *p; | ||
227 | |||
228 | *p = old | mask; | ||
229 | return (old & mask) != 0; | ||
230 | } | ||
231 | |||
232 | static __inline__ int __test_and_clear_bit(unsigned long nr, | ||
233 | volatile unsigned long *addr) | ||
234 | { | ||
235 | unsigned long mask = BITOP_MASK(nr); | ||
236 | unsigned long *p = ((unsigned long *)addr) + BITOP_WORD(nr); | ||
237 | unsigned long old = *p; | ||
238 | |||
239 | *p = old & ~mask; | ||
240 | return (old & mask) != 0; | ||
241 | } | ||
242 | |||
243 | static __inline__ int __test_and_change_bit(unsigned long nr, | ||
244 | volatile unsigned long *addr) | ||
245 | { | ||
246 | unsigned long mask = BITOP_MASK(nr); | ||
247 | unsigned long *p = ((unsigned long *)addr) + BITOP_WORD(nr); | ||
248 | unsigned long old = *p; | ||
249 | |||
250 | *p = old ^ mask; | ||
251 | return (old & mask) != 0; | ||
252 | } | ||
253 | 188 | ||
254 | /* | 189 | /* |
255 | * Return the zero-based bit position (LE, not IBM bit numbering) of | 190 | * Return the zero-based bit position (LE, not IBM bit numbering) of |
@@ -310,16 +245,9 @@ static __inline__ int fls(unsigned int x) | |||
310 | asm ("cntlzw %0,%1" : "=r" (lz) : "r" (x)); | 245 | asm ("cntlzw %0,%1" : "=r" (lz) : "r" (x)); |
311 | return 32 - lz; | 246 | return 32 - lz; |
312 | } | 247 | } |
313 | #define fls64(x) generic_fls64(x) | 248 | #include <asm-generic/bitops/fls64.h> |
314 | 249 | ||
315 | /* | 250 | #include <asm-generic/bitops/hweight.h> |
316 | * hweightN: returns the hamming weight (i.e. the number | ||
317 | * of bits set) of a N-bit word | ||
318 | */ | ||
319 | #define hweight64(x) generic_hweight64(x) | ||
320 | #define hweight32(x) generic_hweight32(x) | ||
321 | #define hweight16(x) generic_hweight16(x) | ||
322 | #define hweight8(x) generic_hweight8(x) | ||
323 | 251 | ||
324 | #define find_first_zero_bit(addr, size) find_next_zero_bit((addr), (size), 0) | 252 | #define find_first_zero_bit(addr, size) find_next_zero_bit((addr), (size), 0) |
325 | unsigned long find_next_zero_bit(const unsigned long *addr, | 253 | unsigned long find_next_zero_bit(const unsigned long *addr, |
@@ -397,32 +325,7 @@ unsigned long find_next_zero_le_bit(const unsigned long *addr, | |||
397 | #define minix_find_first_zero_bit(addr,size) \ | 325 | #define minix_find_first_zero_bit(addr,size) \ |
398 | find_first_zero_le_bit((unsigned long *)addr, size) | 326 | find_first_zero_le_bit((unsigned long *)addr, size) |
399 | 327 | ||
400 | /* | 328 | #include <asm-generic/bitops/sched.h> |
401 | * Every architecture must define this function. It's the fastest | ||
402 | * way of searching a 140-bit bitmap where the first 100 bits are | ||
403 | * unlikely to be set. It's guaranteed that at least one of the 140 | ||
404 | * bits is cleared. | ||
405 | */ | ||
406 | static inline int sched_find_first_bit(const unsigned long *b) | ||
407 | { | ||
408 | #ifdef CONFIG_PPC64 | ||
409 | if (unlikely(b[0])) | ||
410 | return __ffs(b[0]); | ||
411 | if (unlikely(b[1])) | ||
412 | return __ffs(b[1]) + 64; | ||
413 | return __ffs(b[2]) + 128; | ||
414 | #else | ||
415 | if (unlikely(b[0])) | ||
416 | return __ffs(b[0]); | ||
417 | if (unlikely(b[1])) | ||
418 | return __ffs(b[1]) + 32; | ||
419 | if (unlikely(b[2])) | ||
420 | return __ffs(b[2]) + 64; | ||
421 | if (b[3]) | ||
422 | return __ffs(b[3]) + 96; | ||
423 | return __ffs(b[4]) + 128; | ||
424 | #endif | ||
425 | } | ||
426 | 329 | ||
427 | #endif /* __KERNEL__ */ | 330 | #endif /* __KERNEL__ */ |
428 | 331 | ||
diff --git a/include/asm-powerpc/types.h b/include/asm-powerpc/types.h index ec3c2ee8bf86..baabba96e313 100644 --- a/include/asm-powerpc/types.h +++ b/include/asm-powerpc/types.h | |||
@@ -103,6 +103,11 @@ typedef u64 sector_t; | |||
103 | #define HAVE_SECTOR_T | 103 | #define HAVE_SECTOR_T |
104 | #endif | 104 | #endif |
105 | 105 | ||
106 | #ifdef CONFIG_LSF | ||
107 | typedef u64 blkcnt_t; | ||
108 | #define HAVE_BLKCNT_T | ||
109 | #endif | ||
110 | |||
106 | #endif /* __ASSEMBLY__ */ | 111 | #endif /* __ASSEMBLY__ */ |
107 | 112 | ||
108 | #endif /* __KERNEL__ */ | 113 | #endif /* __KERNEL__ */ |
diff --git a/include/asm-s390/bitops.h b/include/asm-s390/bitops.h index 3628899f48bb..ca092ffb7a95 100644 --- a/include/asm-s390/bitops.h +++ b/include/asm-s390/bitops.h | |||
@@ -828,35 +828,12 @@ static inline int sched_find_first_bit(unsigned long *b) | |||
828 | return find_first_bit(b, 140); | 828 | return find_first_bit(b, 140); |
829 | } | 829 | } |
830 | 830 | ||
831 | /* | 831 | #include <asm-generic/bitops/ffs.h> |
832 | * ffs: find first bit set. This is defined the same way as | ||
833 | * the libc and compiler builtin ffs routines, therefore | ||
834 | * differs in spirit from the above ffz (man ffs). | ||
835 | */ | ||
836 | #define ffs(x) generic_ffs(x) | ||
837 | 832 | ||
838 | /* | 833 | #include <asm-generic/bitops/fls.h> |
839 | * fls: find last bit set. | 834 | #include <asm-generic/bitops/fls64.h> |
840 | */ | ||
841 | #define fls(x) generic_fls(x) | ||
842 | #define fls64(x) generic_fls64(x) | ||
843 | |||
844 | /* | ||
845 | * hweightN: returns the hamming weight (i.e. the number | ||
846 | * of bits set) of a N-bit word | ||
847 | */ | ||
848 | #define hweight64(x) \ | ||
849 | ({ \ | ||
850 | unsigned long __x = (x); \ | ||
851 | unsigned int __w; \ | ||
852 | __w = generic_hweight32((unsigned int) __x); \ | ||
853 | __w += generic_hweight32((unsigned int) (__x>>32)); \ | ||
854 | __w; \ | ||
855 | }) | ||
856 | #define hweight32(x) generic_hweight32(x) | ||
857 | #define hweight16(x) generic_hweight16(x) | ||
858 | #define hweight8(x) generic_hweight8(x) | ||
859 | 835 | ||
836 | #include <asm-generic/bitops/hweight.h> | ||
860 | 837 | ||
861 | #ifdef __KERNEL__ | 838 | #ifdef __KERNEL__ |
862 | 839 | ||
@@ -871,11 +848,11 @@ static inline int sched_find_first_bit(unsigned long *b) | |||
871 | */ | 848 | */ |
872 | 849 | ||
873 | #define ext2_set_bit(nr, addr) \ | 850 | #define ext2_set_bit(nr, addr) \ |
874 | test_and_set_bit((nr)^(__BITOPS_WORDSIZE - 8), (unsigned long *)addr) | 851 | __test_and_set_bit((nr)^(__BITOPS_WORDSIZE - 8), (unsigned long *)addr) |
875 | #define ext2_set_bit_atomic(lock, nr, addr) \ | 852 | #define ext2_set_bit_atomic(lock, nr, addr) \ |
876 | test_and_set_bit((nr)^(__BITOPS_WORDSIZE - 8), (unsigned long *)addr) | 853 | test_and_set_bit((nr)^(__BITOPS_WORDSIZE - 8), (unsigned long *)addr) |
877 | #define ext2_clear_bit(nr, addr) \ | 854 | #define ext2_clear_bit(nr, addr) \ |
878 | test_and_clear_bit((nr)^(__BITOPS_WORDSIZE - 8), (unsigned long *)addr) | 855 | __test_and_clear_bit((nr)^(__BITOPS_WORDSIZE - 8), (unsigned long *)addr) |
879 | #define ext2_clear_bit_atomic(lock, nr, addr) \ | 856 | #define ext2_clear_bit_atomic(lock, nr, addr) \ |
880 | test_and_clear_bit((nr)^(__BITOPS_WORDSIZE - 8), (unsigned long *)addr) | 857 | test_and_clear_bit((nr)^(__BITOPS_WORDSIZE - 8), (unsigned long *)addr) |
881 | #define ext2_test_bit(nr, addr) \ | 858 | #define ext2_test_bit(nr, addr) \ |
@@ -1011,18 +988,7 @@ ext2_find_next_zero_bit(void *vaddr, unsigned long size, unsigned long offset) | |||
1011 | return offset + ext2_find_first_zero_bit(p, size); | 988 | return offset + ext2_find_first_zero_bit(p, size); |
1012 | } | 989 | } |
1013 | 990 | ||
1014 | /* Bitmap functions for the minix filesystem. */ | 991 | #include <asm-generic/bitops/minix.h> |
1015 | /* FIXME !!! */ | ||
1016 | #define minix_test_and_set_bit(nr,addr) \ | ||
1017 | test_and_set_bit(nr,(unsigned long *)addr) | ||
1018 | #define minix_set_bit(nr,addr) \ | ||
1019 | set_bit(nr,(unsigned long *)addr) | ||
1020 | #define minix_test_and_clear_bit(nr,addr) \ | ||
1021 | test_and_clear_bit(nr,(unsigned long *)addr) | ||
1022 | #define minix_test_bit(nr,addr) \ | ||
1023 | test_bit(nr,(unsigned long *)addr) | ||
1024 | #define minix_find_first_zero_bit(addr,size) \ | ||
1025 | find_first_zero_bit(addr,size) | ||
1026 | 992 | ||
1027 | #endif /* __KERNEL__ */ | 993 | #endif /* __KERNEL__ */ |
1028 | 994 | ||
diff --git a/include/asm-s390/types.h b/include/asm-s390/types.h index d0be3e477013..5738ad63537c 100644 --- a/include/asm-s390/types.h +++ b/include/asm-s390/types.h | |||
@@ -93,6 +93,11 @@ typedef u64 sector_t; | |||
93 | #define HAVE_SECTOR_T | 93 | #define HAVE_SECTOR_T |
94 | #endif | 94 | #endif |
95 | 95 | ||
96 | #ifdef CONFIG_LSF | ||
97 | typedef u64 blkcnt_t; | ||
98 | #define HAVE_BLKCNT_T | ||
99 | #endif | ||
100 | |||
96 | #endif /* ! __s390x__ */ | 101 | #endif /* ! __s390x__ */ |
97 | #endif /* __ASSEMBLY__ */ | 102 | #endif /* __ASSEMBLY__ */ |
98 | #endif /* __KERNEL__ */ | 103 | #endif /* __KERNEL__ */ |
diff --git a/include/asm-sh/bitops.h b/include/asm-sh/bitops.h index 1c5260860045..e34f82508568 100644 --- a/include/asm-sh/bitops.h +++ b/include/asm-sh/bitops.h | |||
@@ -19,16 +19,6 @@ static __inline__ void set_bit(int nr, volatile void * addr) | |||
19 | local_irq_restore(flags); | 19 | local_irq_restore(flags); |
20 | } | 20 | } |
21 | 21 | ||
22 | static __inline__ void __set_bit(int nr, volatile void * addr) | ||
23 | { | ||
24 | int mask; | ||
25 | volatile unsigned int *a = addr; | ||
26 | |||
27 | a += nr >> 5; | ||
28 | mask = 1 << (nr & 0x1f); | ||
29 | *a |= mask; | ||
30 | } | ||
31 | |||
32 | /* | 22 | /* |
33 | * clear_bit() doesn't provide any barrier for the compiler. | 23 | * clear_bit() doesn't provide any barrier for the compiler. |
34 | */ | 24 | */ |
@@ -47,16 +37,6 @@ static __inline__ void clear_bit(int nr, volatile void * addr) | |||
47 | local_irq_restore(flags); | 37 | local_irq_restore(flags); |
48 | } | 38 | } |
49 | 39 | ||
50 | static __inline__ void __clear_bit(int nr, volatile void * addr) | ||
51 | { | ||
52 | int mask; | ||
53 | volatile unsigned int *a = addr; | ||
54 | |||
55 | a += nr >> 5; | ||
56 | mask = 1 << (nr & 0x1f); | ||
57 | *a &= ~mask; | ||
58 | } | ||
59 | |||
60 | static __inline__ void change_bit(int nr, volatile void * addr) | 40 | static __inline__ void change_bit(int nr, volatile void * addr) |
61 | { | 41 | { |
62 | int mask; | 42 | int mask; |
@@ -70,16 +50,6 @@ static __inline__ void change_bit(int nr, volatile void * addr) | |||
70 | local_irq_restore(flags); | 50 | local_irq_restore(flags); |
71 | } | 51 | } |
72 | 52 | ||
73 | static __inline__ void __change_bit(int nr, volatile void * addr) | ||
74 | { | ||
75 | int mask; | ||
76 | volatile unsigned int *a = addr; | ||
77 | |||
78 | a += nr >> 5; | ||
79 | mask = 1 << (nr & 0x1f); | ||
80 | *a ^= mask; | ||
81 | } | ||
82 | |||
83 | static __inline__ int test_and_set_bit(int nr, volatile void * addr) | 53 | static __inline__ int test_and_set_bit(int nr, volatile void * addr) |
84 | { | 54 | { |
85 | int mask, retval; | 55 | int mask, retval; |
@@ -96,19 +66,6 @@ static __inline__ int test_and_set_bit(int nr, volatile void * addr) | |||
96 | return retval; | 66 | return retval; |
97 | } | 67 | } |
98 | 68 | ||
99 | static __inline__ int __test_and_set_bit(int nr, volatile void * addr) | ||
100 | { | ||
101 | int mask, retval; | ||
102 | volatile unsigned int *a = addr; | ||
103 | |||
104 | a += nr >> 5; | ||
105 | mask = 1 << (nr & 0x1f); | ||
106 | retval = (mask & *a) != 0; | ||
107 | *a |= mask; | ||
108 | |||
109 | return retval; | ||
110 | } | ||
111 | |||
112 | static __inline__ int test_and_clear_bit(int nr, volatile void * addr) | 69 | static __inline__ int test_and_clear_bit(int nr, volatile void * addr) |
113 | { | 70 | { |
114 | int mask, retval; | 71 | int mask, retval; |
@@ -125,19 +82,6 @@ static __inline__ int test_and_clear_bit(int nr, volatile void * addr) | |||
125 | return retval; | 82 | return retval; |
126 | } | 83 | } |
127 | 84 | ||
128 | static __inline__ int __test_and_clear_bit(int nr, volatile void * addr) | ||
129 | { | ||
130 | int mask, retval; | ||
131 | volatile unsigned int *a = addr; | ||
132 | |||
133 | a += nr >> 5; | ||
134 | mask = 1 << (nr & 0x1f); | ||
135 | retval = (mask & *a) != 0; | ||
136 | *a &= ~mask; | ||
137 | |||
138 | return retval; | ||
139 | } | ||
140 | |||
141 | static __inline__ int test_and_change_bit(int nr, volatile void * addr) | 85 | static __inline__ int test_and_change_bit(int nr, volatile void * addr) |
142 | { | 86 | { |
143 | int mask, retval; | 87 | int mask, retval; |
@@ -154,23 +98,7 @@ static __inline__ int test_and_change_bit(int nr, volatile void * addr) | |||
154 | return retval; | 98 | return retval; |
155 | } | 99 | } |
156 | 100 | ||
157 | static __inline__ int __test_and_change_bit(int nr, volatile void * addr) | 101 | #include <asm-generic/bitops/non-atomic.h> |
158 | { | ||
159 | int mask, retval; | ||
160 | volatile unsigned int *a = addr; | ||
161 | |||
162 | a += nr >> 5; | ||
163 | mask = 1 << (nr & 0x1f); | ||
164 | retval = (mask & *a) != 0; | ||
165 | *a ^= mask; | ||
166 | |||
167 | return retval; | ||
168 | } | ||
169 | |||
170 | static __inline__ int test_bit(int nr, const volatile void *addr) | ||
171 | { | ||
172 | return 1UL & (((const volatile unsigned int *) addr)[nr >> 5] >> (nr & 31)); | ||
173 | } | ||
174 | 102 | ||
175 | static __inline__ unsigned long ffz(unsigned long word) | 103 | static __inline__ unsigned long ffz(unsigned long word) |
176 | { | 104 | { |
@@ -206,271 +134,15 @@ static __inline__ unsigned long __ffs(unsigned long word) | |||
206 | return result; | 134 | return result; |
207 | } | 135 | } |
208 | 136 | ||
209 | /** | 137 | #include <asm-generic/bitops/find.h> |
210 | * find_next_bit - find the next set bit in a memory region | 138 | #include <asm-generic/bitops/ffs.h> |
211 | * @addr: The address to base the search on | 139 | #include <asm-generic/bitops/hweight.h> |
212 | * @offset: The bitnumber to start searching at | 140 | #include <asm-generic/bitops/sched.h> |
213 | * @size: The maximum size to search | 141 | #include <asm-generic/bitops/ext2-non-atomic.h> |
214 | */ | 142 | #include <asm-generic/bitops/ext2-atomic.h> |
215 | static __inline__ unsigned long find_next_bit(const unsigned long *addr, | 143 | #include <asm-generic/bitops/minix.h> |
216 | unsigned long size, unsigned long offset) | 144 | #include <asm-generic/bitops/fls.h> |
217 | { | 145 | #include <asm-generic/bitops/fls64.h> |
218 | unsigned int *p = ((unsigned int *) addr) + (offset >> 5); | ||
219 | unsigned int result = offset & ~31UL; | ||
220 | unsigned int tmp; | ||
221 | |||
222 | if (offset >= size) | ||
223 | return size; | ||
224 | size -= result; | ||
225 | offset &= 31UL; | ||
226 | if (offset) { | ||
227 | tmp = *p++; | ||
228 | tmp &= ~0UL << offset; | ||
229 | if (size < 32) | ||
230 | goto found_first; | ||
231 | if (tmp) | ||
232 | goto found_middle; | ||
233 | size -= 32; | ||
234 | result += 32; | ||
235 | } | ||
236 | while (size >= 32) { | ||
237 | if ((tmp = *p++) != 0) | ||
238 | goto found_middle; | ||
239 | result += 32; | ||
240 | size -= 32; | ||
241 | } | ||
242 | if (!size) | ||
243 | return result; | ||
244 | tmp = *p; | ||
245 | |||
246 | found_first: | ||
247 | tmp &= ~0UL >> (32 - size); | ||
248 | if (tmp == 0UL) /* Are any bits set? */ | ||
249 | return result + size; /* Nope. */ | ||
250 | found_middle: | ||
251 | return result + __ffs(tmp); | ||
252 | } | ||
253 | |||
254 | /** | ||
255 | * find_first_bit - find the first set bit in a memory region | ||
256 | * @addr: The address to start the search at | ||
257 | * @size: The maximum size to search | ||
258 | * | ||
259 | * Returns the bit-number of the first set bit, not the number of the byte | ||
260 | * containing a bit. | ||
261 | */ | ||
262 | #define find_first_bit(addr, size) \ | ||
263 | find_next_bit((addr), (size), 0) | ||
264 | |||
265 | static __inline__ int find_next_zero_bit(const unsigned long *addr, int size, int offset) | ||
266 | { | ||
267 | const unsigned long *p = ((unsigned long *) addr) + (offset >> 5); | ||
268 | unsigned long result = offset & ~31UL; | ||
269 | unsigned long tmp; | ||
270 | |||
271 | if (offset >= size) | ||
272 | return size; | ||
273 | size -= result; | ||
274 | offset &= 31UL; | ||
275 | if (offset) { | ||
276 | tmp = *(p++); | ||
277 | tmp |= ~0UL >> (32-offset); | ||
278 | if (size < 32) | ||
279 | goto found_first; | ||
280 | if (~tmp) | ||
281 | goto found_middle; | ||
282 | size -= 32; | ||
283 | result += 32; | ||
284 | } | ||
285 | while (size & ~31UL) { | ||
286 | if (~(tmp = *(p++))) | ||
287 | goto found_middle; | ||
288 | result += 32; | ||
289 | size -= 32; | ||
290 | } | ||
291 | if (!size) | ||
292 | return result; | ||
293 | tmp = *p; | ||
294 | |||
295 | found_first: | ||
296 | tmp |= ~0UL << size; | ||
297 | found_middle: | ||
298 | return result + ffz(tmp); | ||
299 | } | ||
300 | |||
301 | #define find_first_zero_bit(addr, size) \ | ||
302 | find_next_zero_bit((addr), (size), 0) | ||
303 | |||
304 | /* | ||
305 | * ffs: find first bit set. This is defined the same way as | ||
306 | * the libc and compiler builtin ffs routines, therefore | ||
307 | * differs in spirit from the above ffz (man ffs). | ||
308 | */ | ||
309 | |||
310 | #define ffs(x) generic_ffs(x) | ||
311 | |||
312 | /* | ||
313 | * hweightN: returns the hamming weight (i.e. the number | ||
314 | * of bits set) of a N-bit word | ||
315 | */ | ||
316 | |||
317 | #define hweight32(x) generic_hweight32(x) | ||
318 | #define hweight16(x) generic_hweight16(x) | ||
319 | #define hweight8(x) generic_hweight8(x) | ||
320 | |||
321 | /* | ||
322 | * Every architecture must define this function. It's the fastest | ||
323 | * way of searching a 140-bit bitmap where the first 100 bits are | ||
324 | * unlikely to be set. It's guaranteed that at least one of the 140 | ||
325 | * bits is cleared. | ||
326 | */ | ||
327 | |||
328 | static inline int sched_find_first_bit(const unsigned long *b) | ||
329 | { | ||
330 | if (unlikely(b[0])) | ||
331 | return __ffs(b[0]); | ||
332 | if (unlikely(b[1])) | ||
333 | return __ffs(b[1]) + 32; | ||
334 | if (unlikely(b[2])) | ||
335 | return __ffs(b[2]) + 64; | ||
336 | if (b[3]) | ||
337 | return __ffs(b[3]) + 96; | ||
338 | return __ffs(b[4]) + 128; | ||
339 | } | ||
340 | |||
341 | #ifdef __LITTLE_ENDIAN__ | ||
342 | #define ext2_set_bit(nr, addr) test_and_set_bit((nr), (addr)) | ||
343 | #define ext2_clear_bit(nr, addr) test_and_clear_bit((nr), (addr)) | ||
344 | #define ext2_test_bit(nr, addr) test_bit((nr), (addr)) | ||
345 | #define ext2_find_first_zero_bit(addr, size) find_first_zero_bit((addr), (size)) | ||
346 | #define ext2_find_next_zero_bit(addr, size, offset) \ | ||
347 | find_next_zero_bit((unsigned long *)(addr), (size), (offset)) | ||
348 | #else | ||
349 | static __inline__ int ext2_set_bit(int nr, volatile void * addr) | ||
350 | { | ||
351 | int mask, retval; | ||
352 | unsigned long flags; | ||
353 | volatile unsigned char *ADDR = (unsigned char *) addr; | ||
354 | |||
355 | ADDR += nr >> 3; | ||
356 | mask = 1 << (nr & 0x07); | ||
357 | local_irq_save(flags); | ||
358 | retval = (mask & *ADDR) != 0; | ||
359 | *ADDR |= mask; | ||
360 | local_irq_restore(flags); | ||
361 | return retval; | ||
362 | } | ||
363 | |||
364 | static __inline__ int ext2_clear_bit(int nr, volatile void * addr) | ||
365 | { | ||
366 | int mask, retval; | ||
367 | unsigned long flags; | ||
368 | volatile unsigned char *ADDR = (unsigned char *) addr; | ||
369 | |||
370 | ADDR += nr >> 3; | ||
371 | mask = 1 << (nr & 0x07); | ||
372 | local_irq_save(flags); | ||
373 | retval = (mask & *ADDR) != 0; | ||
374 | *ADDR &= ~mask; | ||
375 | local_irq_restore(flags); | ||
376 | return retval; | ||
377 | } | ||
378 | |||
379 | static __inline__ int ext2_test_bit(int nr, const volatile void * addr) | ||
380 | { | ||
381 | int mask; | ||
382 | const volatile unsigned char *ADDR = (const unsigned char *) addr; | ||
383 | |||
384 | ADDR += nr >> 3; | ||
385 | mask = 1 << (nr & 0x07); | ||
386 | return ((mask & *ADDR) != 0); | ||
387 | } | ||
388 | |||
389 | #define ext2_find_first_zero_bit(addr, size) \ | ||
390 | ext2_find_next_zero_bit((addr), (size), 0) | ||
391 | |||
392 | static __inline__ unsigned long ext2_find_next_zero_bit(void *addr, unsigned long size, unsigned long offset) | ||
393 | { | ||
394 | unsigned long *p = ((unsigned long *) addr) + (offset >> 5); | ||
395 | unsigned long result = offset & ~31UL; | ||
396 | unsigned long tmp; | ||
397 | |||
398 | if (offset >= size) | ||
399 | return size; | ||
400 | size -= result; | ||
401 | offset &= 31UL; | ||
402 | if(offset) { | ||
403 | /* We hold the little endian value in tmp, but then the | ||
404 | * shift is illegal. So we could keep a big endian value | ||
405 | * in tmp, like this: | ||
406 | * | ||
407 | * tmp = __swab32(*(p++)); | ||
408 | * tmp |= ~0UL >> (32-offset); | ||
409 | * | ||
410 | * but this would decrease preformance, so we change the | ||
411 | * shift: | ||
412 | */ | ||
413 | tmp = *(p++); | ||
414 | tmp |= __swab32(~0UL >> (32-offset)); | ||
415 | if(size < 32) | ||
416 | goto found_first; | ||
417 | if(~tmp) | ||
418 | goto found_middle; | ||
419 | size -= 32; | ||
420 | result += 32; | ||
421 | } | ||
422 | while(size & ~31UL) { | ||
423 | if(~(tmp = *(p++))) | ||
424 | goto found_middle; | ||
425 | result += 32; | ||
426 | size -= 32; | ||
427 | } | ||
428 | if(!size) | ||
429 | return result; | ||
430 | tmp = *p; | ||
431 | |||
432 | found_first: | ||
433 | /* tmp is little endian, so we would have to swab the shift, | ||
434 | * see above. But then we have to swab tmp below for ffz, so | ||
435 | * we might as well do this here. | ||
436 | */ | ||
437 | return result + ffz(__swab32(tmp) | (~0UL << size)); | ||
438 | found_middle: | ||
439 | return result + ffz(__swab32(tmp)); | ||
440 | } | ||
441 | #endif | ||
442 | |||
443 | #define ext2_set_bit_atomic(lock, nr, addr) \ | ||
444 | ({ \ | ||
445 | int ret; \ | ||
446 | spin_lock(lock); \ | ||
447 | ret = ext2_set_bit((nr), (addr)); \ | ||
448 | spin_unlock(lock); \ | ||
449 | ret; \ | ||
450 | }) | ||
451 | |||
452 | #define ext2_clear_bit_atomic(lock, nr, addr) \ | ||
453 | ({ \ | ||
454 | int ret; \ | ||
455 | spin_lock(lock); \ | ||
456 | ret = ext2_clear_bit((nr), (addr)); \ | ||
457 | spin_unlock(lock); \ | ||
458 | ret; \ | ||
459 | }) | ||
460 | |||
461 | /* Bitmap functions for the minix filesystem. */ | ||
462 | #define minix_test_and_set_bit(nr,addr) test_and_set_bit(nr,addr) | ||
463 | #define minix_set_bit(nr,addr) set_bit(nr,addr) | ||
464 | #define minix_test_and_clear_bit(nr,addr) test_and_clear_bit(nr,addr) | ||
465 | #define minix_test_bit(nr,addr) test_bit(nr,addr) | ||
466 | #define minix_find_first_zero_bit(addr,size) find_first_zero_bit(addr,size) | ||
467 | |||
468 | /* | ||
469 | * fls: find last bit set. | ||
470 | */ | ||
471 | |||
472 | #define fls(x) generic_fls(x) | ||
473 | #define fls64(x) generic_fls64(x) | ||
474 | 146 | ||
475 | #endif /* __KERNEL__ */ | 147 | #endif /* __KERNEL__ */ |
476 | 148 | ||
diff --git a/include/asm-sh/stat.h b/include/asm-sh/stat.h index 914e3fcbbd37..6c41a60657f1 100644 --- a/include/asm-sh/stat.h +++ b/include/asm-sh/stat.h | |||
@@ -60,13 +60,7 @@ struct stat64 { | |||
60 | long long st_size; | 60 | long long st_size; |
61 | unsigned long st_blksize; | 61 | unsigned long st_blksize; |
62 | 62 | ||
63 | #if defined(__BIG_ENDIAN__) | 63 | unsigned long long st_blocks; /* Number 512-byte blocks allocated. */ |
64 | unsigned long __pad4; /* Future possible st_blocks hi bits */ | ||
65 | unsigned long st_blocks; /* Number 512-byte blocks allocated. */ | ||
66 | #else /* Must be little */ | ||
67 | unsigned long st_blocks; /* Number 512-byte blocks allocated. */ | ||
68 | unsigned long __pad4; /* Future possible st_blocks hi bits */ | ||
69 | #endif | ||
70 | 64 | ||
71 | unsigned long st_atime; | 65 | unsigned long st_atime; |
72 | unsigned long st_atime_nsec; | 66 | unsigned long st_atime_nsec; |
diff --git a/include/asm-sh/thread_info.h b/include/asm-sh/thread_info.h index 85f0c11b4319..7345350d98c0 100644 --- a/include/asm-sh/thread_info.h +++ b/include/asm-sh/thread_info.h | |||
@@ -18,7 +18,7 @@ | |||
18 | struct thread_info { | 18 | struct thread_info { |
19 | struct task_struct *task; /* main task structure */ | 19 | struct task_struct *task; /* main task structure */ |
20 | struct exec_domain *exec_domain; /* execution domain */ | 20 | struct exec_domain *exec_domain; /* execution domain */ |
21 | __u32 flags; /* low level flags */ | 21 | unsigned long flags; /* low level flags */ |
22 | __u32 cpu; | 22 | __u32 cpu; |
23 | int preempt_count; /* 0 => preemptable, <0 => BUG */ | 23 | int preempt_count; /* 0 => preemptable, <0 => BUG */ |
24 | struct restart_block restart_block; | 24 | struct restart_block restart_block; |
diff --git a/include/asm-sh/types.h b/include/asm-sh/types.h index cb7e183a0a6b..488552f43b2a 100644 --- a/include/asm-sh/types.h +++ b/include/asm-sh/types.h | |||
@@ -58,6 +58,11 @@ typedef u64 sector_t; | |||
58 | #define HAVE_SECTOR_T | 58 | #define HAVE_SECTOR_T |
59 | #endif | 59 | #endif |
60 | 60 | ||
61 | #ifdef CONFIG_LSF | ||
62 | typedef u64 blkcnt_t; | ||
63 | #define HAVE_BLKCNT_T | ||
64 | #endif | ||
65 | |||
61 | #endif /* __ASSEMBLY__ */ | 66 | #endif /* __ASSEMBLY__ */ |
62 | 67 | ||
63 | #endif /* __KERNEL__ */ | 68 | #endif /* __KERNEL__ */ |
diff --git a/include/asm-sh64/bitops.h b/include/asm-sh64/bitops.h index ce9c3ad45fe0..f3bdcdb5d046 100644 --- a/include/asm-sh64/bitops.h +++ b/include/asm-sh64/bitops.h | |||
@@ -31,16 +31,6 @@ static __inline__ void set_bit(int nr, volatile void * addr) | |||
31 | local_irq_restore(flags); | 31 | local_irq_restore(flags); |
32 | } | 32 | } |
33 | 33 | ||
34 | static inline void __set_bit(int nr, void *addr) | ||
35 | { | ||
36 | int mask; | ||
37 | unsigned int *a = addr; | ||
38 | |||
39 | a += nr >> 5; | ||
40 | mask = 1 << (nr & 0x1f); | ||
41 | *a |= mask; | ||
42 | } | ||
43 | |||
44 | /* | 34 | /* |
45 | * clear_bit() doesn't provide any barrier for the compiler. | 35 | * clear_bit() doesn't provide any barrier for the compiler. |
46 | */ | 36 | */ |
@@ -58,15 +48,6 @@ static inline void clear_bit(int nr, volatile unsigned long *a) | |||
58 | local_irq_restore(flags); | 48 | local_irq_restore(flags); |
59 | } | 49 | } |
60 | 50 | ||
61 | static inline void __clear_bit(int nr, volatile unsigned long *a) | ||
62 | { | ||
63 | int mask; | ||
64 | |||
65 | a += nr >> 5; | ||
66 | mask = 1 << (nr & 0x1f); | ||
67 | *a &= ~mask; | ||
68 | } | ||
69 | |||
70 | static __inline__ void change_bit(int nr, volatile void * addr) | 51 | static __inline__ void change_bit(int nr, volatile void * addr) |
71 | { | 52 | { |
72 | int mask; | 53 | int mask; |
@@ -80,16 +61,6 @@ static __inline__ void change_bit(int nr, volatile void * addr) | |||
80 | local_irq_restore(flags); | 61 | local_irq_restore(flags); |
81 | } | 62 | } |
82 | 63 | ||
83 | static __inline__ void __change_bit(int nr, volatile void * addr) | ||
84 | { | ||
85 | int mask; | ||
86 | volatile unsigned int *a = addr; | ||
87 | |||
88 | a += nr >> 5; | ||
89 | mask = 1 << (nr & 0x1f); | ||
90 | *a ^= mask; | ||
91 | } | ||
92 | |||
93 | static __inline__ int test_and_set_bit(int nr, volatile void * addr) | 64 | static __inline__ int test_and_set_bit(int nr, volatile void * addr) |
94 | { | 65 | { |
95 | int mask, retval; | 66 | int mask, retval; |
@@ -106,19 +77,6 @@ static __inline__ int test_and_set_bit(int nr, volatile void * addr) | |||
106 | return retval; | 77 | return retval; |
107 | } | 78 | } |
108 | 79 | ||
109 | static __inline__ int __test_and_set_bit(int nr, volatile void * addr) | ||
110 | { | ||
111 | int mask, retval; | ||
112 | volatile unsigned int *a = addr; | ||
113 | |||
114 | a += nr >> 5; | ||
115 | mask = 1 << (nr & 0x1f); | ||
116 | retval = (mask & *a) != 0; | ||
117 | *a |= mask; | ||
118 | |||
119 | return retval; | ||
120 | } | ||
121 | |||
122 | static __inline__ int test_and_clear_bit(int nr, volatile void * addr) | 80 | static __inline__ int test_and_clear_bit(int nr, volatile void * addr) |
123 | { | 81 | { |
124 | int mask, retval; | 82 | int mask, retval; |
@@ -135,19 +93,6 @@ static __inline__ int test_and_clear_bit(int nr, volatile void * addr) | |||
135 | return retval; | 93 | return retval; |
136 | } | 94 | } |
137 | 95 | ||
138 | static __inline__ int __test_and_clear_bit(int nr, volatile void * addr) | ||
139 | { | ||
140 | int mask, retval; | ||
141 | volatile unsigned int *a = addr; | ||
142 | |||
143 | a += nr >> 5; | ||
144 | mask = 1 << (nr & 0x1f); | ||
145 | retval = (mask & *a) != 0; | ||
146 | *a &= ~mask; | ||
147 | |||
148 | return retval; | ||
149 | } | ||
150 | |||
151 | static __inline__ int test_and_change_bit(int nr, volatile void * addr) | 96 | static __inline__ int test_and_change_bit(int nr, volatile void * addr) |
152 | { | 97 | { |
153 | int mask, retval; | 98 | int mask, retval; |
@@ -164,23 +109,7 @@ static __inline__ int test_and_change_bit(int nr, volatile void * addr) | |||
164 | return retval; | 109 | return retval; |
165 | } | 110 | } |
166 | 111 | ||
167 | static __inline__ int __test_and_change_bit(int nr, volatile void * addr) | 112 | #include <asm-generic/bitops/non-atomic.h> |
168 | { | ||
169 | int mask, retval; | ||
170 | volatile unsigned int *a = addr; | ||
171 | |||
172 | a += nr >> 5; | ||
173 | mask = 1 << (nr & 0x1f); | ||
174 | retval = (mask & *a) != 0; | ||
175 | *a ^= mask; | ||
176 | |||
177 | return retval; | ||
178 | } | ||
179 | |||
180 | static __inline__ int test_bit(int nr, const volatile void *addr) | ||
181 | { | ||
182 | return 1UL & (((const volatile unsigned int *) addr)[nr >> 5] >> (nr & 31)); | ||
183 | } | ||
184 | 113 | ||
185 | static __inline__ unsigned long ffz(unsigned long word) | 114 | static __inline__ unsigned long ffz(unsigned long word) |
186 | { | 115 | { |
@@ -204,313 +133,16 @@ static __inline__ unsigned long ffz(unsigned long word) | |||
204 | return result; | 133 | return result; |
205 | } | 134 | } |
206 | 135 | ||
207 | /** | 136 | #include <asm-generic/bitops/__ffs.h> |
208 | * __ffs - find first bit in word | 137 | #include <asm-generic/bitops/find.h> |
209 | * @word: The word to search | 138 | #include <asm-generic/bitops/hweight.h> |
210 | * | 139 | #include <asm-generic/bitops/sched.h> |
211 | * Undefined if no bit exists, so code should check against 0 first. | 140 | #include <asm-generic/bitops/ffs.h> |
212 | */ | 141 | #include <asm-generic/bitops/ext2-non-atomic.h> |
213 | static inline unsigned long __ffs(unsigned long word) | 142 | #include <asm-generic/bitops/ext2-atomic.h> |
214 | { | 143 | #include <asm-generic/bitops/minix.h> |
215 | int r = 0; | 144 | #include <asm-generic/bitops/fls.h> |
216 | 145 | #include <asm-generic/bitops/fls64.h> | |
217 | if (!word) | ||
218 | return 0; | ||
219 | if (!(word & 0xffff)) { | ||
220 | word >>= 16; | ||
221 | r += 16; | ||
222 | } | ||
223 | if (!(word & 0xff)) { | ||
224 | word >>= 8; | ||
225 | r += 8; | ||
226 | } | ||
227 | if (!(word & 0xf)) { | ||
228 | word >>= 4; | ||
229 | r += 4; | ||
230 | } | ||
231 | if (!(word & 3)) { | ||
232 | word >>= 2; | ||
233 | r += 2; | ||
234 | } | ||
235 | if (!(word & 1)) { | ||
236 | word >>= 1; | ||
237 | r += 1; | ||
238 | } | ||
239 | return r; | ||
240 | } | ||
241 | |||
242 | /** | ||
243 | * find_next_bit - find the next set bit in a memory region | ||
244 | * @addr: The address to base the search on | ||
245 | * @offset: The bitnumber to start searching at | ||
246 | * @size: The maximum size to search | ||
247 | */ | ||
248 | static inline unsigned long find_next_bit(const unsigned long *addr, | ||
249 | unsigned long size, unsigned long offset) | ||
250 | { | ||
251 | unsigned int *p = ((unsigned int *) addr) + (offset >> 5); | ||
252 | unsigned int result = offset & ~31UL; | ||
253 | unsigned int tmp; | ||
254 | |||
255 | if (offset >= size) | ||
256 | return size; | ||
257 | size -= result; | ||
258 | offset &= 31UL; | ||
259 | if (offset) { | ||
260 | tmp = *p++; | ||
261 | tmp &= ~0UL << offset; | ||
262 | if (size < 32) | ||
263 | goto found_first; | ||
264 | if (tmp) | ||
265 | goto found_middle; | ||
266 | size -= 32; | ||
267 | result += 32; | ||
268 | } | ||
269 | while (size >= 32) { | ||
270 | if ((tmp = *p++) != 0) | ||
271 | goto found_middle; | ||
272 | result += 32; | ||
273 | size -= 32; | ||
274 | } | ||
275 | if (!size) | ||
276 | return result; | ||
277 | tmp = *p; | ||
278 | |||
279 | found_first: | ||
280 | tmp &= ~0UL >> (32 - size); | ||
281 | if (tmp == 0UL) /* Are any bits set? */ | ||
282 | return result + size; /* Nope. */ | ||
283 | found_middle: | ||
284 | return result + __ffs(tmp); | ||
285 | } | ||
286 | |||
287 | /** | ||
288 | * find_first_bit - find the first set bit in a memory region | ||
289 | * @addr: The address to start the search at | ||
290 | * @size: The maximum size to search | ||
291 | * | ||
292 | * Returns the bit-number of the first set bit, not the number of the byte | ||
293 | * containing a bit. | ||
294 | */ | ||
295 | #define find_first_bit(addr, size) \ | ||
296 | find_next_bit((addr), (size), 0) | ||
297 | |||
298 | |||
299 | static inline int find_next_zero_bit(void *addr, int size, int offset) | ||
300 | { | ||
301 | unsigned long *p = ((unsigned long *) addr) + (offset >> 5); | ||
302 | unsigned long result = offset & ~31UL; | ||
303 | unsigned long tmp; | ||
304 | |||
305 | if (offset >= size) | ||
306 | return size; | ||
307 | size -= result; | ||
308 | offset &= 31UL; | ||
309 | if (offset) { | ||
310 | tmp = *(p++); | ||
311 | tmp |= ~0UL >> (32-offset); | ||
312 | if (size < 32) | ||
313 | goto found_first; | ||
314 | if (~tmp) | ||
315 | goto found_middle; | ||
316 | size -= 32; | ||
317 | result += 32; | ||
318 | } | ||
319 | while (size & ~31UL) { | ||
320 | if (~(tmp = *(p++))) | ||
321 | goto found_middle; | ||
322 | result += 32; | ||
323 | size -= 32; | ||
324 | } | ||
325 | if (!size) | ||
326 | return result; | ||
327 | tmp = *p; | ||
328 | |||
329 | found_first: | ||
330 | tmp |= ~0UL << size; | ||
331 | found_middle: | ||
332 | return result + ffz(tmp); | ||
333 | } | ||
334 | |||
335 | #define find_first_zero_bit(addr, size) \ | ||
336 | find_next_zero_bit((addr), (size), 0) | ||
337 | |||
338 | /* | ||
339 | * hweightN: returns the hamming weight (i.e. the number | ||
340 | * of bits set) of a N-bit word | ||
341 | */ | ||
342 | |||
343 | #define hweight32(x) generic_hweight32(x) | ||
344 | #define hweight16(x) generic_hweight16(x) | ||
345 | #define hweight8(x) generic_hweight8(x) | ||
346 | |||
347 | /* | ||
348 | * Every architecture must define this function. It's the fastest | ||
349 | * way of searching a 140-bit bitmap where the first 100 bits are | ||
350 | * unlikely to be set. It's guaranteed that at least one of the 140 | ||
351 | * bits is cleared. | ||
352 | */ | ||
353 | |||
354 | static inline int sched_find_first_bit(unsigned long *b) | ||
355 | { | ||
356 | if (unlikely(b[0])) | ||
357 | return __ffs(b[0]); | ||
358 | if (unlikely(b[1])) | ||
359 | return __ffs(b[1]) + 32; | ||
360 | if (unlikely(b[2])) | ||
361 | return __ffs(b[2]) + 64; | ||
362 | if (b[3]) | ||
363 | return __ffs(b[3]) + 96; | ||
364 | return __ffs(b[4]) + 128; | ||
365 | } | ||
366 | |||
367 | /* | ||
368 | * ffs: find first bit set. This is defined the same way as | ||
369 | * the libc and compiler builtin ffs routines, therefore | ||
370 | * differs in spirit from the above ffz (man ffs). | ||
371 | */ | ||
372 | |||
373 | #define ffs(x) generic_ffs(x) | ||
374 | |||
375 | /* | ||
376 | * hweightN: returns the hamming weight (i.e. the number | ||
377 | * of bits set) of a N-bit word | ||
378 | */ | ||
379 | |||
380 | #define hweight32(x) generic_hweight32(x) | ||
381 | #define hweight16(x) generic_hweight16(x) | ||
382 | #define hweight8(x) generic_hweight8(x) | ||
383 | |||
384 | #ifdef __LITTLE_ENDIAN__ | ||
385 | #define ext2_set_bit(nr, addr) test_and_set_bit((nr), (addr)) | ||
386 | #define ext2_clear_bit(nr, addr) test_and_clear_bit((nr), (addr)) | ||
387 | #define ext2_test_bit(nr, addr) test_bit((nr), (addr)) | ||
388 | #define ext2_find_first_zero_bit(addr, size) find_first_zero_bit((addr), (size)) | ||
389 | #define ext2_find_next_zero_bit(addr, size, offset) \ | ||
390 | find_next_zero_bit((addr), (size), (offset)) | ||
391 | #else | ||
392 | static __inline__ int ext2_set_bit(int nr, volatile void * addr) | ||
393 | { | ||
394 | int mask, retval; | ||
395 | unsigned long flags; | ||
396 | volatile unsigned char *ADDR = (unsigned char *) addr; | ||
397 | |||
398 | ADDR += nr >> 3; | ||
399 | mask = 1 << (nr & 0x07); | ||
400 | local_irq_save(flags); | ||
401 | retval = (mask & *ADDR) != 0; | ||
402 | *ADDR |= mask; | ||
403 | local_irq_restore(flags); | ||
404 | return retval; | ||
405 | } | ||
406 | |||
407 | static __inline__ int ext2_clear_bit(int nr, volatile void * addr) | ||
408 | { | ||
409 | int mask, retval; | ||
410 | unsigned long flags; | ||
411 | volatile unsigned char *ADDR = (unsigned char *) addr; | ||
412 | |||
413 | ADDR += nr >> 3; | ||
414 | mask = 1 << (nr & 0x07); | ||
415 | local_irq_save(flags); | ||
416 | retval = (mask & *ADDR) != 0; | ||
417 | *ADDR &= ~mask; | ||
418 | local_irq_restore(flags); | ||
419 | return retval; | ||
420 | } | ||
421 | |||
422 | static __inline__ int ext2_test_bit(int nr, const volatile void * addr) | ||
423 | { | ||
424 | int mask; | ||
425 | const volatile unsigned char *ADDR = (const unsigned char *) addr; | ||
426 | |||
427 | ADDR += nr >> 3; | ||
428 | mask = 1 << (nr & 0x07); | ||
429 | return ((mask & *ADDR) != 0); | ||
430 | } | ||
431 | |||
432 | #define ext2_find_first_zero_bit(addr, size) \ | ||
433 | ext2_find_next_zero_bit((addr), (size), 0) | ||
434 | |||
435 | static __inline__ unsigned long ext2_find_next_zero_bit(void *addr, unsigned long size, unsigned long offset) | ||
436 | { | ||
437 | unsigned long *p = ((unsigned long *) addr) + (offset >> 5); | ||
438 | unsigned long result = offset & ~31UL; | ||
439 | unsigned long tmp; | ||
440 | |||
441 | if (offset >= size) | ||
442 | return size; | ||
443 | size -= result; | ||
444 | offset &= 31UL; | ||
445 | if(offset) { | ||
446 | /* We hold the little endian value in tmp, but then the | ||
447 | * shift is illegal. So we could keep a big endian value | ||
448 | * in tmp, like this: | ||
449 | * | ||
450 | * tmp = __swab32(*(p++)); | ||
451 | * tmp |= ~0UL >> (32-offset); | ||
452 | * | ||
453 | * but this would decrease preformance, so we change the | ||
454 | * shift: | ||
455 | */ | ||
456 | tmp = *(p++); | ||
457 | tmp |= __swab32(~0UL >> (32-offset)); | ||
458 | if(size < 32) | ||
459 | goto found_first; | ||
460 | if(~tmp) | ||
461 | goto found_middle; | ||
462 | size -= 32; | ||
463 | result += 32; | ||
464 | } | ||
465 | while(size & ~31UL) { | ||
466 | if(~(tmp = *(p++))) | ||
467 | goto found_middle; | ||
468 | result += 32; | ||
469 | size -= 32; | ||
470 | } | ||
471 | if(!size) | ||
472 | return result; | ||
473 | tmp = *p; | ||
474 | |||
475 | found_first: | ||
476 | /* tmp is little endian, so we would have to swab the shift, | ||
477 | * see above. But then we have to swab tmp below for ffz, so | ||
478 | * we might as well do this here. | ||
479 | */ | ||
480 | return result + ffz(__swab32(tmp) | (~0UL << size)); | ||
481 | found_middle: | ||
482 | return result + ffz(__swab32(tmp)); | ||
483 | } | ||
484 | #endif | ||
485 | |||
486 | #define ext2_set_bit_atomic(lock, nr, addr) \ | ||
487 | ({ \ | ||
488 | int ret; \ | ||
489 | spin_lock(lock); \ | ||
490 | ret = ext2_set_bit((nr), (addr)); \ | ||
491 | spin_unlock(lock); \ | ||
492 | ret; \ | ||
493 | }) | ||
494 | |||
495 | #define ext2_clear_bit_atomic(lock, nr, addr) \ | ||
496 | ({ \ | ||
497 | int ret; \ | ||
498 | spin_lock(lock); \ | ||
499 | ret = ext2_clear_bit((nr), (addr)); \ | ||
500 | spin_unlock(lock); \ | ||
501 | ret; \ | ||
502 | }) | ||
503 | |||
504 | /* Bitmap functions for the minix filesystem. */ | ||
505 | #define minix_test_and_set_bit(nr,addr) test_and_set_bit(nr,addr) | ||
506 | #define minix_set_bit(nr,addr) set_bit(nr,addr) | ||
507 | #define minix_test_and_clear_bit(nr,addr) test_and_clear_bit(nr,addr) | ||
508 | #define minix_test_bit(nr,addr) test_bit(nr,addr) | ||
509 | #define minix_find_first_zero_bit(addr,size) find_first_zero_bit(addr,size) | ||
510 | |||
511 | #define ffs(x) generic_ffs(x) | ||
512 | #define fls(x) generic_fls(x) | ||
513 | #define fls64(x) generic_fls64(x) | ||
514 | 146 | ||
515 | #endif /* __KERNEL__ */ | 147 | #endif /* __KERNEL__ */ |
516 | 148 | ||
diff --git a/include/asm-sparc/bitops.h b/include/asm-sparc/bitops.h index 41722b5e45ef..04aa3318f76a 100644 --- a/include/asm-sparc/bitops.h +++ b/include/asm-sparc/bitops.h | |||
@@ -152,386 +152,22 @@ static inline void change_bit(unsigned long nr, volatile unsigned long *addr) | |||
152 | : "memory", "cc"); | 152 | : "memory", "cc"); |
153 | } | 153 | } |
154 | 154 | ||
155 | /* | 155 | #include <asm-generic/bitops/non-atomic.h> |
156 | * non-atomic versions | ||
157 | */ | ||
158 | static inline void __set_bit(int nr, volatile unsigned long *addr) | ||
159 | { | ||
160 | unsigned long mask = 1UL << (nr & 0x1f); | ||
161 | unsigned long *p = ((unsigned long *)addr) + (nr >> 5); | ||
162 | |||
163 | *p |= mask; | ||
164 | } | ||
165 | |||
166 | static inline void __clear_bit(int nr, volatile unsigned long *addr) | ||
167 | { | ||
168 | unsigned long mask = 1UL << (nr & 0x1f); | ||
169 | unsigned long *p = ((unsigned long *)addr) + (nr >> 5); | ||
170 | |||
171 | *p &= ~mask; | ||
172 | } | ||
173 | |||
174 | static inline void __change_bit(int nr, volatile unsigned long *addr) | ||
175 | { | ||
176 | unsigned long mask = 1UL << (nr & 0x1f); | ||
177 | unsigned long *p = ((unsigned long *)addr) + (nr >> 5); | ||
178 | |||
179 | *p ^= mask; | ||
180 | } | ||
181 | |||
182 | static inline int __test_and_set_bit(int nr, volatile unsigned long *addr) | ||
183 | { | ||
184 | unsigned long mask = 1UL << (nr & 0x1f); | ||
185 | unsigned long *p = ((unsigned long *)addr) + (nr >> 5); | ||
186 | unsigned long old = *p; | ||
187 | |||
188 | *p = old | mask; | ||
189 | return (old & mask) != 0; | ||
190 | } | ||
191 | |||
192 | static inline int __test_and_clear_bit(int nr, volatile unsigned long *addr) | ||
193 | { | ||
194 | unsigned long mask = 1UL << (nr & 0x1f); | ||
195 | unsigned long *p = ((unsigned long *)addr) + (nr >> 5); | ||
196 | unsigned long old = *p; | ||
197 | |||
198 | *p = old & ~mask; | ||
199 | return (old & mask) != 0; | ||
200 | } | ||
201 | |||
202 | static inline int __test_and_change_bit(int nr, volatile unsigned long *addr) | ||
203 | { | ||
204 | unsigned long mask = 1UL << (nr & 0x1f); | ||
205 | unsigned long *p = ((unsigned long *)addr) + (nr >> 5); | ||
206 | unsigned long old = *p; | ||
207 | |||
208 | *p = old ^ mask; | ||
209 | return (old & mask) != 0; | ||
210 | } | ||
211 | 156 | ||
212 | #define smp_mb__before_clear_bit() do { } while(0) | 157 | #define smp_mb__before_clear_bit() do { } while(0) |
213 | #define smp_mb__after_clear_bit() do { } while(0) | 158 | #define smp_mb__after_clear_bit() do { } while(0) |
214 | 159 | ||
215 | /* The following routine need not be atomic. */ | 160 | #include <asm-generic/bitops/ffz.h> |
216 | static inline int test_bit(int nr, __const__ volatile unsigned long *addr) | 161 | #include <asm-generic/bitops/__ffs.h> |
217 | { | 162 | #include <asm-generic/bitops/sched.h> |
218 | return (1UL & (((unsigned long *)addr)[nr >> 5] >> (nr & 31))) != 0UL; | 163 | #include <asm-generic/bitops/ffs.h> |
219 | } | 164 | #include <asm-generic/bitops/fls.h> |
220 | 165 | #include <asm-generic/bitops/fls64.h> | |
221 | /* The easy/cheese version for now. */ | 166 | #include <asm-generic/bitops/hweight.h> |
222 | static inline unsigned long ffz(unsigned long word) | 167 | #include <asm-generic/bitops/find.h> |
223 | { | 168 | #include <asm-generic/bitops/ext2-non-atomic.h> |
224 | unsigned long result = 0; | 169 | #include <asm-generic/bitops/ext2-atomic.h> |
225 | 170 | #include <asm-generic/bitops/minix.h> | |
226 | while(word & 1) { | ||
227 | result++; | ||
228 | word >>= 1; | ||
229 | } | ||
230 | return result; | ||
231 | } | ||
232 | |||
233 | /** | ||
234 | * __ffs - find first bit in word. | ||
235 | * @word: The word to search | ||
236 | * | ||
237 | * Undefined if no bit exists, so code should check against 0 first. | ||
238 | */ | ||
239 | static inline int __ffs(unsigned long word) | ||
240 | { | ||
241 | int num = 0; | ||
242 | |||
243 | if ((word & 0xffff) == 0) { | ||
244 | num += 16; | ||
245 | word >>= 16; | ||
246 | } | ||
247 | if ((word & 0xff) == 0) { | ||
248 | num += 8; | ||
249 | word >>= 8; | ||
250 | } | ||
251 | if ((word & 0xf) == 0) { | ||
252 | num += 4; | ||
253 | word >>= 4; | ||
254 | } | ||
255 | if ((word & 0x3) == 0) { | ||
256 | num += 2; | ||
257 | word >>= 2; | ||
258 | } | ||
259 | if ((word & 0x1) == 0) | ||
260 | num += 1; | ||
261 | return num; | ||
262 | } | ||
263 | |||
264 | /* | ||
265 | * Every architecture must define this function. It's the fastest | ||
266 | * way of searching a 140-bit bitmap where the first 100 bits are | ||
267 | * unlikely to be set. It's guaranteed that at least one of the 140 | ||
268 | * bits is cleared. | ||
269 | */ | ||
270 | static inline int sched_find_first_bit(unsigned long *b) | ||
271 | { | ||
272 | |||
273 | if (unlikely(b[0])) | ||
274 | return __ffs(b[0]); | ||
275 | if (unlikely(b[1])) | ||
276 | return __ffs(b[1]) + 32; | ||
277 | if (unlikely(b[2])) | ||
278 | return __ffs(b[2]) + 64; | ||
279 | if (b[3]) | ||
280 | return __ffs(b[3]) + 96; | ||
281 | return __ffs(b[4]) + 128; | ||
282 | } | ||
283 | |||
284 | /* | ||
285 | * ffs: find first bit set. This is defined the same way as | ||
286 | * the libc and compiler builtin ffs routines, therefore | ||
287 | * differs in spirit from the above ffz (man ffs). | ||
288 | */ | ||
289 | static inline int ffs(int x) | ||
290 | { | ||
291 | if (!x) | ||
292 | return 0; | ||
293 | return __ffs((unsigned long)x) + 1; | ||
294 | } | ||
295 | |||
296 | /* | ||
297 | * fls: find last (most-significant) bit set. | ||
298 | * Note fls(0) = 0, fls(1) = 1, fls(0x80000000) = 32. | ||
299 | */ | ||
300 | #define fls(x) generic_fls(x) | ||
301 | #define fls64(x) generic_fls64(x) | ||
302 | |||
303 | /* | ||
304 | * hweightN: returns the hamming weight (i.e. the number | ||
305 | * of bits set) of a N-bit word | ||
306 | */ | ||
307 | #define hweight32(x) generic_hweight32(x) | ||
308 | #define hweight16(x) generic_hweight16(x) | ||
309 | #define hweight8(x) generic_hweight8(x) | ||
310 | |||
311 | /* | ||
312 | * find_next_zero_bit() finds the first zero bit in a bit string of length | ||
313 | * 'size' bits, starting the search at bit 'offset'. This is largely based | ||
314 | * on Linus's ALPHA routines, which are pretty portable BTW. | ||
315 | */ | ||
316 | static inline unsigned long find_next_zero_bit(const unsigned long *addr, | ||
317 | unsigned long size, unsigned long offset) | ||
318 | { | ||
319 | const unsigned long *p = addr + (offset >> 5); | ||
320 | unsigned long result = offset & ~31UL; | ||
321 | unsigned long tmp; | ||
322 | |||
323 | if (offset >= size) | ||
324 | return size; | ||
325 | size -= result; | ||
326 | offset &= 31UL; | ||
327 | if (offset) { | ||
328 | tmp = *(p++); | ||
329 | tmp |= ~0UL >> (32-offset); | ||
330 | if (size < 32) | ||
331 | goto found_first; | ||
332 | if (~tmp) | ||
333 | goto found_middle; | ||
334 | size -= 32; | ||
335 | result += 32; | ||
336 | } | ||
337 | while (size & ~31UL) { | ||
338 | if (~(tmp = *(p++))) | ||
339 | goto found_middle; | ||
340 | result += 32; | ||
341 | size -= 32; | ||
342 | } | ||
343 | if (!size) | ||
344 | return result; | ||
345 | tmp = *p; | ||
346 | |||
347 | found_first: | ||
348 | tmp |= ~0UL << size; | ||
349 | if (tmp == ~0UL) /* Are any bits zero? */ | ||
350 | return result + size; /* Nope. */ | ||
351 | found_middle: | ||
352 | return result + ffz(tmp); | ||
353 | } | ||
354 | |||
355 | /* | ||
356 | * Linus sez that gcc can optimize the following correctly, we'll see if this | ||
357 | * holds on the Sparc as it does for the ALPHA. | ||
358 | */ | ||
359 | #define find_first_zero_bit(addr, size) \ | ||
360 | find_next_zero_bit((addr), (size), 0) | ||
361 | |||
362 | /** | ||
363 | * find_next_bit - find the first set bit in a memory region | ||
364 | * @addr: The address to base the search on | ||
365 | * @offset: The bitnumber to start searching at | ||
366 | * @size: The maximum size to search | ||
367 | * | ||
368 | * Scheduler induced bitop, do not use. | ||
369 | */ | ||
370 | static inline int find_next_bit(const unsigned long *addr, int size, int offset) | ||
371 | { | ||
372 | const unsigned long *p = addr + (offset >> 5); | ||
373 | int num = offset & ~0x1f; | ||
374 | unsigned long word; | ||
375 | |||
376 | word = *p++; | ||
377 | word &= ~((1 << (offset & 0x1f)) - 1); | ||
378 | while (num < size) { | ||
379 | if (word != 0) { | ||
380 | return __ffs(word) + num; | ||
381 | } | ||
382 | word = *p++; | ||
383 | num += 0x20; | ||
384 | } | ||
385 | return num; | ||
386 | } | ||
387 | |||
388 | /** | ||
389 | * find_first_bit - find the first set bit in a memory region | ||
390 | * @addr: The address to start the search at | ||
391 | * @size: The maximum size to search | ||
392 | * | ||
393 | * Returns the bit-number of the first set bit, not the number of the byte | ||
394 | * containing a bit. | ||
395 | */ | ||
396 | #define find_first_bit(addr, size) \ | ||
397 | find_next_bit((addr), (size), 0) | ||
398 | |||
399 | /* | ||
400 | */ | ||
401 | static inline int test_le_bit(int nr, __const__ unsigned long * addr) | ||
402 | { | ||
403 | __const__ unsigned char *ADDR = (__const__ unsigned char *) addr; | ||
404 | return (ADDR[nr >> 3] >> (nr & 7)) & 1; | ||
405 | } | ||
406 | |||
407 | /* | ||
408 | * non-atomic versions | ||
409 | */ | ||
410 | static inline void __set_le_bit(int nr, unsigned long *addr) | ||
411 | { | ||
412 | unsigned char *ADDR = (unsigned char *)addr; | ||
413 | |||
414 | ADDR += nr >> 3; | ||
415 | *ADDR |= 1 << (nr & 0x07); | ||
416 | } | ||
417 | |||
418 | static inline void __clear_le_bit(int nr, unsigned long *addr) | ||
419 | { | ||
420 | unsigned char *ADDR = (unsigned char *)addr; | ||
421 | |||
422 | ADDR += nr >> 3; | ||
423 | *ADDR &= ~(1 << (nr & 0x07)); | ||
424 | } | ||
425 | |||
426 | static inline int __test_and_set_le_bit(int nr, unsigned long *addr) | ||
427 | { | ||
428 | int mask, retval; | ||
429 | unsigned char *ADDR = (unsigned char *)addr; | ||
430 | |||
431 | ADDR += nr >> 3; | ||
432 | mask = 1 << (nr & 0x07); | ||
433 | retval = (mask & *ADDR) != 0; | ||
434 | *ADDR |= mask; | ||
435 | return retval; | ||
436 | } | ||
437 | |||
438 | static inline int __test_and_clear_le_bit(int nr, unsigned long *addr) | ||
439 | { | ||
440 | int mask, retval; | ||
441 | unsigned char *ADDR = (unsigned char *)addr; | ||
442 | |||
443 | ADDR += nr >> 3; | ||
444 | mask = 1 << (nr & 0x07); | ||
445 | retval = (mask & *ADDR) != 0; | ||
446 | *ADDR &= ~mask; | ||
447 | return retval; | ||
448 | } | ||
449 | |||
450 | static inline unsigned long find_next_zero_le_bit(const unsigned long *addr, | ||
451 | unsigned long size, unsigned long offset) | ||
452 | { | ||
453 | const unsigned long *p = addr + (offset >> 5); | ||
454 | unsigned long result = offset & ~31UL; | ||
455 | unsigned long tmp; | ||
456 | |||
457 | if (offset >= size) | ||
458 | return size; | ||
459 | size -= result; | ||
460 | offset &= 31UL; | ||
461 | if(offset) { | ||
462 | tmp = *(p++); | ||
463 | tmp |= __swab32(~0UL >> (32-offset)); | ||
464 | if(size < 32) | ||
465 | goto found_first; | ||
466 | if(~tmp) | ||
467 | goto found_middle; | ||
468 | size -= 32; | ||
469 | result += 32; | ||
470 | } | ||
471 | while(size & ~31UL) { | ||
472 | if(~(tmp = *(p++))) | ||
473 | goto found_middle; | ||
474 | result += 32; | ||
475 | size -= 32; | ||
476 | } | ||
477 | if(!size) | ||
478 | return result; | ||
479 | tmp = *p; | ||
480 | |||
481 | found_first: | ||
482 | tmp = __swab32(tmp) | (~0UL << size); | ||
483 | if (tmp == ~0UL) /* Are any bits zero? */ | ||
484 | return result + size; /* Nope. */ | ||
485 | return result + ffz(tmp); | ||
486 | |||
487 | found_middle: | ||
488 | return result + ffz(__swab32(tmp)); | ||
489 | } | ||
490 | |||
491 | #define find_first_zero_le_bit(addr, size) \ | ||
492 | find_next_zero_le_bit((addr), (size), 0) | ||
493 | |||
494 | #define ext2_set_bit(nr,addr) \ | ||
495 | __test_and_set_le_bit((nr),(unsigned long *)(addr)) | ||
496 | #define ext2_clear_bit(nr,addr) \ | ||
497 | __test_and_clear_le_bit((nr),(unsigned long *)(addr)) | ||
498 | |||
499 | #define ext2_set_bit_atomic(lock, nr, addr) \ | ||
500 | ({ \ | ||
501 | int ret; \ | ||
502 | spin_lock(lock); \ | ||
503 | ret = ext2_set_bit((nr), (unsigned long *)(addr)); \ | ||
504 | spin_unlock(lock); \ | ||
505 | ret; \ | ||
506 | }) | ||
507 | |||
508 | #define ext2_clear_bit_atomic(lock, nr, addr) \ | ||
509 | ({ \ | ||
510 | int ret; \ | ||
511 | spin_lock(lock); \ | ||
512 | ret = ext2_clear_bit((nr), (unsigned long *)(addr)); \ | ||
513 | spin_unlock(lock); \ | ||
514 | ret; \ | ||
515 | }) | ||
516 | |||
517 | #define ext2_test_bit(nr,addr) \ | ||
518 | test_le_bit((nr),(unsigned long *)(addr)) | ||
519 | #define ext2_find_first_zero_bit(addr, size) \ | ||
520 | find_first_zero_le_bit((unsigned long *)(addr), (size)) | ||
521 | #define ext2_find_next_zero_bit(addr, size, off) \ | ||
522 | find_next_zero_le_bit((unsigned long *)(addr), (size), (off)) | ||
523 | |||
524 | /* Bitmap functions for the minix filesystem. */ | ||
525 | #define minix_test_and_set_bit(nr,addr) \ | ||
526 | test_and_set_bit((nr),(unsigned long *)(addr)) | ||
527 | #define minix_set_bit(nr,addr) \ | ||
528 | set_bit((nr),(unsigned long *)(addr)) | ||
529 | #define minix_test_and_clear_bit(nr,addr) \ | ||
530 | test_and_clear_bit((nr),(unsigned long *)(addr)) | ||
531 | #define minix_test_bit(nr,addr) \ | ||
532 | test_bit((nr),(unsigned long *)(addr)) | ||
533 | #define minix_find_first_zero_bit(addr,size) \ | ||
534 | find_first_zero_bit((unsigned long *)(addr),(size)) | ||
535 | 171 | ||
536 | #endif /* __KERNEL__ */ | 172 | #endif /* __KERNEL__ */ |
537 | 173 | ||
diff --git a/include/asm-sparc64/bitops.h b/include/asm-sparc64/bitops.h index 6efc0162fb09..71944b0f09de 100644 --- a/include/asm-sparc64/bitops.h +++ b/include/asm-sparc64/bitops.h | |||
@@ -18,58 +18,7 @@ extern void set_bit(unsigned long nr, volatile unsigned long *addr); | |||
18 | extern void clear_bit(unsigned long nr, volatile unsigned long *addr); | 18 | extern void clear_bit(unsigned long nr, volatile unsigned long *addr); |
19 | extern void change_bit(unsigned long nr, volatile unsigned long *addr); | 19 | extern void change_bit(unsigned long nr, volatile unsigned long *addr); |
20 | 20 | ||
21 | /* "non-atomic" versions... */ | 21 | #include <asm-generic/bitops/non-atomic.h> |
22 | |||
23 | static inline void __set_bit(int nr, volatile unsigned long *addr) | ||
24 | { | ||
25 | unsigned long *m = ((unsigned long *)addr) + (nr >> 6); | ||
26 | |||
27 | *m |= (1UL << (nr & 63)); | ||
28 | } | ||
29 | |||
30 | static inline void __clear_bit(int nr, volatile unsigned long *addr) | ||
31 | { | ||
32 | unsigned long *m = ((unsigned long *)addr) + (nr >> 6); | ||
33 | |||
34 | *m &= ~(1UL << (nr & 63)); | ||
35 | } | ||
36 | |||
37 | static inline void __change_bit(int nr, volatile unsigned long *addr) | ||
38 | { | ||
39 | unsigned long *m = ((unsigned long *)addr) + (nr >> 6); | ||
40 | |||
41 | *m ^= (1UL << (nr & 63)); | ||
42 | } | ||
43 | |||
44 | static inline int __test_and_set_bit(int nr, volatile unsigned long *addr) | ||
45 | { | ||
46 | unsigned long *m = ((unsigned long *)addr) + (nr >> 6); | ||
47 | unsigned long old = *m; | ||
48 | unsigned long mask = (1UL << (nr & 63)); | ||
49 | |||
50 | *m = (old | mask); | ||
51 | return ((old & mask) != 0); | ||
52 | } | ||
53 | |||
54 | static inline int __test_and_clear_bit(int nr, volatile unsigned long *addr) | ||
55 | { | ||
56 | unsigned long *m = ((unsigned long *)addr) + (nr >> 6); | ||
57 | unsigned long old = *m; | ||
58 | unsigned long mask = (1UL << (nr & 63)); | ||
59 | |||
60 | *m = (old & ~mask); | ||
61 | return ((old & mask) != 0); | ||
62 | } | ||
63 | |||
64 | static inline int __test_and_change_bit(int nr, volatile unsigned long *addr) | ||
65 | { | ||
66 | unsigned long *m = ((unsigned long *)addr) + (nr >> 6); | ||
67 | unsigned long old = *m; | ||
68 | unsigned long mask = (1UL << (nr & 63)); | ||
69 | |||
70 | *m = (old ^ mask); | ||
71 | return ((old & mask) != 0); | ||
72 | } | ||
73 | 22 | ||
74 | #ifdef CONFIG_SMP | 23 | #ifdef CONFIG_SMP |
75 | #define smp_mb__before_clear_bit() membar_storeload_loadload() | 24 | #define smp_mb__before_clear_bit() membar_storeload_loadload() |
@@ -79,78 +28,15 @@ static inline int __test_and_change_bit(int nr, volatile unsigned long *addr) | |||
79 | #define smp_mb__after_clear_bit() barrier() | 28 | #define smp_mb__after_clear_bit() barrier() |
80 | #endif | 29 | #endif |
81 | 30 | ||
82 | static inline int test_bit(int nr, __const__ volatile unsigned long *addr) | 31 | #include <asm-generic/bitops/ffz.h> |
83 | { | 32 | #include <asm-generic/bitops/__ffs.h> |
84 | return (1UL & (addr[nr >> 6] >> (nr & 63))) != 0UL; | 33 | #include <asm-generic/bitops/fls.h> |
85 | } | 34 | #include <asm-generic/bitops/fls64.h> |
86 | |||
87 | /* The easy/cheese version for now. */ | ||
88 | static inline unsigned long ffz(unsigned long word) | ||
89 | { | ||
90 | unsigned long result; | ||
91 | |||
92 | result = 0; | ||
93 | while(word & 1) { | ||
94 | result++; | ||
95 | word >>= 1; | ||
96 | } | ||
97 | return result; | ||
98 | } | ||
99 | |||
100 | /** | ||
101 | * __ffs - find first bit in word. | ||
102 | * @word: The word to search | ||
103 | * | ||
104 | * Undefined if no bit exists, so code should check against 0 first. | ||
105 | */ | ||
106 | static inline unsigned long __ffs(unsigned long word) | ||
107 | { | ||
108 | unsigned long result = 0; | ||
109 | |||
110 | while (!(word & 1UL)) { | ||
111 | result++; | ||
112 | word >>= 1; | ||
113 | } | ||
114 | return result; | ||
115 | } | ||
116 | |||
117 | /* | ||
118 | * fls: find last bit set. | ||
119 | */ | ||
120 | |||
121 | #define fls(x) generic_fls(x) | ||
122 | #define fls64(x) generic_fls64(x) | ||
123 | 35 | ||
124 | #ifdef __KERNEL__ | 36 | #ifdef __KERNEL__ |
125 | 37 | ||
126 | /* | 38 | #include <asm-generic/bitops/sched.h> |
127 | * Every architecture must define this function. It's the fastest | 39 | #include <asm-generic/bitops/ffs.h> |
128 | * way of searching a 140-bit bitmap where the first 100 bits are | ||
129 | * unlikely to be set. It's guaranteed that at least one of the 140 | ||
130 | * bits is cleared. | ||
131 | */ | ||
132 | static inline int sched_find_first_bit(unsigned long *b) | ||
133 | { | ||
134 | if (unlikely(b[0])) | ||
135 | return __ffs(b[0]); | ||
136 | if (unlikely(((unsigned int)b[1]))) | ||
137 | return __ffs(b[1]) + 64; | ||
138 | if (b[1] >> 32) | ||
139 | return __ffs(b[1] >> 32) + 96; | ||
140 | return __ffs(b[2]) + 128; | ||
141 | } | ||
142 | |||
143 | /* | ||
144 | * ffs: find first bit set. This is defined the same way as | ||
145 | * the libc and compiler builtin ffs routines, therefore | ||
146 | * differs in spirit from the above ffz (man ffs). | ||
147 | */ | ||
148 | static inline int ffs(int x) | ||
149 | { | ||
150 | if (!x) | ||
151 | return 0; | ||
152 | return __ffs((unsigned long)x) + 1; | ||
153 | } | ||
154 | 40 | ||
155 | /* | 41 | /* |
156 | * hweightN: returns the hamming weight (i.e. the number | 42 | * hweightN: returns the hamming weight (i.e. the number |
@@ -193,102 +79,23 @@ static inline unsigned int hweight8(unsigned int w) | |||
193 | 79 | ||
194 | #else | 80 | #else |
195 | 81 | ||
196 | #define hweight64(x) generic_hweight64(x) | 82 | #include <asm-generic/bitops/hweight.h> |
197 | #define hweight32(x) generic_hweight32(x) | ||
198 | #define hweight16(x) generic_hweight16(x) | ||
199 | #define hweight8(x) generic_hweight8(x) | ||
200 | 83 | ||
201 | #endif | 84 | #endif |
202 | #endif /* __KERNEL__ */ | 85 | #endif /* __KERNEL__ */ |
203 | 86 | ||
204 | /** | 87 | #include <asm-generic/bitops/find.h> |
205 | * find_next_bit - find the next set bit in a memory region | ||
206 | * @addr: The address to base the search on | ||
207 | * @offset: The bitnumber to start searching at | ||
208 | * @size: The maximum size to search | ||
209 | */ | ||
210 | extern unsigned long find_next_bit(const unsigned long *, unsigned long, | ||
211 | unsigned long); | ||
212 | |||
213 | /** | ||
214 | * find_first_bit - find the first set bit in a memory region | ||
215 | * @addr: The address to start the search at | ||
216 | * @size: The maximum size to search | ||
217 | * | ||
218 | * Returns the bit-number of the first set bit, not the number of the byte | ||
219 | * containing a bit. | ||
220 | */ | ||
221 | #define find_first_bit(addr, size) \ | ||
222 | find_next_bit((addr), (size), 0) | ||
223 | |||
224 | /* find_next_zero_bit() finds the first zero bit in a bit string of length | ||
225 | * 'size' bits, starting the search at bit 'offset'. This is largely based | ||
226 | * on Linus's ALPHA routines, which are pretty portable BTW. | ||
227 | */ | ||
228 | |||
229 | extern unsigned long find_next_zero_bit(const unsigned long *, | ||
230 | unsigned long, unsigned long); | ||
231 | |||
232 | #define find_first_zero_bit(addr, size) \ | ||
233 | find_next_zero_bit((addr), (size), 0) | ||
234 | |||
235 | #define test_and_set_le_bit(nr,addr) \ | ||
236 | test_and_set_bit((nr) ^ 0x38, (addr)) | ||
237 | #define test_and_clear_le_bit(nr,addr) \ | ||
238 | test_and_clear_bit((nr) ^ 0x38, (addr)) | ||
239 | |||
240 | static inline int test_le_bit(int nr, __const__ unsigned long * addr) | ||
241 | { | ||
242 | int mask; | ||
243 | __const__ unsigned char *ADDR = (__const__ unsigned char *) addr; | ||
244 | |||
245 | ADDR += nr >> 3; | ||
246 | mask = 1 << (nr & 0x07); | ||
247 | return ((mask & *ADDR) != 0); | ||
248 | } | ||
249 | |||
250 | #define find_first_zero_le_bit(addr, size) \ | ||
251 | find_next_zero_le_bit((addr), (size), 0) | ||
252 | |||
253 | extern unsigned long find_next_zero_le_bit(unsigned long *, unsigned long, unsigned long); | ||
254 | 88 | ||
255 | #ifdef __KERNEL__ | 89 | #ifdef __KERNEL__ |
256 | 90 | ||
257 | #define __set_le_bit(nr, addr) \ | 91 | #include <asm-generic/bitops/ext2-non-atomic.h> |
258 | __set_bit((nr) ^ 0x38, (addr)) | ||
259 | #define __clear_le_bit(nr, addr) \ | ||
260 | __clear_bit((nr) ^ 0x38, (addr)) | ||
261 | #define __test_and_clear_le_bit(nr, addr) \ | ||
262 | __test_and_clear_bit((nr) ^ 0x38, (addr)) | ||
263 | #define __test_and_set_le_bit(nr, addr) \ | ||
264 | __test_and_set_bit((nr) ^ 0x38, (addr)) | ||
265 | 92 | ||
266 | #define ext2_set_bit(nr,addr) \ | ||
267 | __test_and_set_le_bit((nr),(unsigned long *)(addr)) | ||
268 | #define ext2_set_bit_atomic(lock,nr,addr) \ | 93 | #define ext2_set_bit_atomic(lock,nr,addr) \ |
269 | test_and_set_le_bit((nr),(unsigned long *)(addr)) | 94 | test_and_set_bit((nr) ^ 0x38,(unsigned long *)(addr)) |
270 | #define ext2_clear_bit(nr,addr) \ | ||
271 | __test_and_clear_le_bit((nr),(unsigned long *)(addr)) | ||
272 | #define ext2_clear_bit_atomic(lock,nr,addr) \ | 95 | #define ext2_clear_bit_atomic(lock,nr,addr) \ |
273 | test_and_clear_le_bit((nr),(unsigned long *)(addr)) | 96 | test_and_clear_bit((nr) ^ 0x38,(unsigned long *)(addr)) |
274 | #define ext2_test_bit(nr,addr) \ | ||
275 | test_le_bit((nr),(unsigned long *)(addr)) | ||
276 | #define ext2_find_first_zero_bit(addr, size) \ | ||
277 | find_first_zero_le_bit((unsigned long *)(addr), (size)) | ||
278 | #define ext2_find_next_zero_bit(addr, size, off) \ | ||
279 | find_next_zero_le_bit((unsigned long *)(addr), (size), (off)) | ||
280 | 97 | ||
281 | /* Bitmap functions for the minix filesystem. */ | 98 | #include <asm-generic/bitops/minix.h> |
282 | #define minix_test_and_set_bit(nr,addr) \ | ||
283 | test_and_set_bit((nr),(unsigned long *)(addr)) | ||
284 | #define minix_set_bit(nr,addr) \ | ||
285 | set_bit((nr),(unsigned long *)(addr)) | ||
286 | #define minix_test_and_clear_bit(nr,addr) \ | ||
287 | test_and_clear_bit((nr),(unsigned long *)(addr)) | ||
288 | #define minix_test_bit(nr,addr) \ | ||
289 | test_bit((nr),(unsigned long *)(addr)) | ||
290 | #define minix_find_first_zero_bit(addr,size) \ | ||
291 | find_first_zero_bit((unsigned long *)(addr),(size)) | ||
292 | 99 | ||
293 | #endif /* __KERNEL__ */ | 100 | #endif /* __KERNEL__ */ |
294 | 101 | ||
diff --git a/include/asm-v850/bitops.h b/include/asm-v850/bitops.h index 609b9e87222a..1f6fd5ab4177 100644 --- a/include/asm-v850/bitops.h +++ b/include/asm-v850/bitops.h | |||
@@ -22,25 +22,11 @@ | |||
22 | 22 | ||
23 | #ifdef __KERNEL__ | 23 | #ifdef __KERNEL__ |
24 | 24 | ||
25 | /* | 25 | #include <asm-generic/bitops/ffz.h> |
26 | * The __ functions are not atomic | ||
27 | */ | ||
28 | 26 | ||
29 | /* | 27 | /* |
30 | * ffz = Find First Zero in word. Undefined if no zero exists, | 28 | * The __ functions are not atomic |
31 | * so code should check against ~0UL first.. | ||
32 | */ | 29 | */ |
33 | static inline unsigned long ffz (unsigned long word) | ||
34 | { | ||
35 | unsigned long result = 0; | ||
36 | |||
37 | while (word & 1) { | ||
38 | result++; | ||
39 | word >>= 1; | ||
40 | } | ||
41 | return result; | ||
42 | } | ||
43 | |||
44 | 30 | ||
45 | /* In the following constant-bit-op macros, a "g" constraint is used when | 31 | /* In the following constant-bit-op macros, a "g" constraint is used when |
46 | we really need an integer ("i" constraint). This is to avoid | 32 | we really need an integer ("i" constraint). This is to avoid |
@@ -153,203 +139,19 @@ static inline int __test_bit (int nr, const void *addr) | |||
153 | #define smp_mb__before_clear_bit() barrier () | 139 | #define smp_mb__before_clear_bit() barrier () |
154 | #define smp_mb__after_clear_bit() barrier () | 140 | #define smp_mb__after_clear_bit() barrier () |
155 | 141 | ||
142 | #include <asm-generic/bitops/ffs.h> | ||
143 | #include <asm-generic/bitops/fls.h> | ||
144 | #include <asm-generic/bitops/fls64.h> | ||
145 | #include <asm-generic/bitops/__ffs.h> | ||
146 | #include <asm-generic/bitops/find.h> | ||
147 | #include <asm-generic/bitops/sched.h> | ||
148 | #include <asm-generic/bitops/hweight.h> | ||
156 | 149 | ||
157 | #define find_first_zero_bit(addr, size) \ | 150 | #include <asm-generic/bitops/ext2-non-atomic.h> |
158 | find_next_zero_bit ((addr), (size), 0) | ||
159 | |||
160 | static inline int find_next_zero_bit(const void *addr, int size, int offset) | ||
161 | { | ||
162 | unsigned long *p = ((unsigned long *) addr) + (offset >> 5); | ||
163 | unsigned long result = offset & ~31UL; | ||
164 | unsigned long tmp; | ||
165 | |||
166 | if (offset >= size) | ||
167 | return size; | ||
168 | size -= result; | ||
169 | offset &= 31UL; | ||
170 | if (offset) { | ||
171 | tmp = * (p++); | ||
172 | tmp |= ~0UL >> (32-offset); | ||
173 | if (size < 32) | ||
174 | goto found_first; | ||
175 | if (~tmp) | ||
176 | goto found_middle; | ||
177 | size -= 32; | ||
178 | result += 32; | ||
179 | } | ||
180 | while (size & ~31UL) { | ||
181 | if (~ (tmp = * (p++))) | ||
182 | goto found_middle; | ||
183 | result += 32; | ||
184 | size -= 32; | ||
185 | } | ||
186 | if (!size) | ||
187 | return result; | ||
188 | tmp = *p; | ||
189 | |||
190 | found_first: | ||
191 | tmp |= ~0UL << size; | ||
192 | found_middle: | ||
193 | return result + ffz (tmp); | ||
194 | } | ||
195 | |||
196 | |||
197 | /* This is the same as generic_ffs, but we can't use that because it's | ||
198 | inline and the #include order mucks things up. */ | ||
199 | static inline int generic_ffs_for_find_next_bit(int x) | ||
200 | { | ||
201 | int r = 1; | ||
202 | |||
203 | if (!x) | ||
204 | return 0; | ||
205 | if (!(x & 0xffff)) { | ||
206 | x >>= 16; | ||
207 | r += 16; | ||
208 | } | ||
209 | if (!(x & 0xff)) { | ||
210 | x >>= 8; | ||
211 | r += 8; | ||
212 | } | ||
213 | if (!(x & 0xf)) { | ||
214 | x >>= 4; | ||
215 | r += 4; | ||
216 | } | ||
217 | if (!(x & 3)) { | ||
218 | x >>= 2; | ||
219 | r += 2; | ||
220 | } | ||
221 | if (!(x & 1)) { | ||
222 | x >>= 1; | ||
223 | r += 1; | ||
224 | } | ||
225 | return r; | ||
226 | } | ||
227 | |||
228 | /* | ||
229 | * Find next one bit in a bitmap reasonably efficiently. | ||
230 | */ | ||
231 | static __inline__ unsigned long find_next_bit(const unsigned long *addr, | ||
232 | unsigned long size, unsigned long offset) | ||
233 | { | ||
234 | unsigned int *p = ((unsigned int *) addr) + (offset >> 5); | ||
235 | unsigned int result = offset & ~31UL; | ||
236 | unsigned int tmp; | ||
237 | |||
238 | if (offset >= size) | ||
239 | return size; | ||
240 | size -= result; | ||
241 | offset &= 31UL; | ||
242 | if (offset) { | ||
243 | tmp = *p++; | ||
244 | tmp &= ~0UL << offset; | ||
245 | if (size < 32) | ||
246 | goto found_first; | ||
247 | if (tmp) | ||
248 | goto found_middle; | ||
249 | size -= 32; | ||
250 | result += 32; | ||
251 | } | ||
252 | while (size >= 32) { | ||
253 | if ((tmp = *p++) != 0) | ||
254 | goto found_middle; | ||
255 | result += 32; | ||
256 | size -= 32; | ||
257 | } | ||
258 | if (!size) | ||
259 | return result; | ||
260 | tmp = *p; | ||
261 | |||
262 | found_first: | ||
263 | tmp &= ~0UL >> (32 - size); | ||
264 | if (tmp == 0UL) /* Are any bits set? */ | ||
265 | return result + size; /* Nope. */ | ||
266 | found_middle: | ||
267 | return result + generic_ffs_for_find_next_bit(tmp); | ||
268 | } | ||
269 | |||
270 | /* | ||
271 | * find_first_bit - find the first set bit in a memory region | ||
272 | */ | ||
273 | #define find_first_bit(addr, size) \ | ||
274 | find_next_bit((addr), (size), 0) | ||
275 | |||
276 | |||
277 | #define ffs(x) generic_ffs (x) | ||
278 | #define fls(x) generic_fls (x) | ||
279 | #define fls64(x) generic_fls64(x) | ||
280 | #define __ffs(x) ffs(x) | ||
281 | |||
282 | |||
283 | /* | ||
284 | * This is just `generic_ffs' from <linux/bitops.h>, except that it assumes | ||
285 | * that at least one bit is set, and returns the real index of the bit | ||
286 | * (rather than the bit index + 1, like ffs does). | ||
287 | */ | ||
288 | static inline int sched_ffs(int x) | ||
289 | { | ||
290 | int r = 0; | ||
291 | |||
292 | if (!(x & 0xffff)) { | ||
293 | x >>= 16; | ||
294 | r += 16; | ||
295 | } | ||
296 | if (!(x & 0xff)) { | ||
297 | x >>= 8; | ||
298 | r += 8; | ||
299 | } | ||
300 | if (!(x & 0xf)) { | ||
301 | x >>= 4; | ||
302 | r += 4; | ||
303 | } | ||
304 | if (!(x & 3)) { | ||
305 | x >>= 2; | ||
306 | r += 2; | ||
307 | } | ||
308 | if (!(x & 1)) { | ||
309 | x >>= 1; | ||
310 | r += 1; | ||
311 | } | ||
312 | return r; | ||
313 | } | ||
314 | |||
315 | /* | ||
316 | * Every architecture must define this function. It's the fastest | ||
317 | * way of searching a 140-bit bitmap where the first 100 bits are | ||
318 | * unlikely to be set. It's guaranteed that at least one of the 140 | ||
319 | * bits is set. | ||
320 | */ | ||
321 | static inline int sched_find_first_bit(unsigned long *b) | ||
322 | { | ||
323 | unsigned offs = 0; | ||
324 | while (! *b) { | ||
325 | b++; | ||
326 | offs += 32; | ||
327 | } | ||
328 | return sched_ffs (*b) + offs; | ||
329 | } | ||
330 | |||
331 | /* | ||
332 | * hweightN: returns the hamming weight (i.e. the number | ||
333 | * of bits set) of a N-bit word | ||
334 | */ | ||
335 | #define hweight32(x) generic_hweight32 (x) | ||
336 | #define hweight16(x) generic_hweight16 (x) | ||
337 | #define hweight8(x) generic_hweight8 (x) | ||
338 | |||
339 | #define ext2_set_bit test_and_set_bit | ||
340 | #define ext2_set_bit_atomic(l,n,a) test_and_set_bit(n,a) | 151 | #define ext2_set_bit_atomic(l,n,a) test_and_set_bit(n,a) |
341 | #define ext2_clear_bit test_and_clear_bit | ||
342 | #define ext2_clear_bit_atomic(l,n,a) test_and_clear_bit(n,a) | 152 | #define ext2_clear_bit_atomic(l,n,a) test_and_clear_bit(n,a) |
343 | #define ext2_test_bit test_bit | ||
344 | #define ext2_find_first_zero_bit find_first_zero_bit | ||
345 | #define ext2_find_next_zero_bit find_next_zero_bit | ||
346 | 153 | ||
347 | /* Bitmap functions for the minix filesystem. */ | 154 | #include <asm-generic/bitops/minix.h> |
348 | #define minix_test_and_set_bit test_and_set_bit | ||
349 | #define minix_set_bit set_bit | ||
350 | #define minix_test_and_clear_bit test_and_clear_bit | ||
351 | #define minix_test_bit test_bit | ||
352 | #define minix_find_first_zero_bit find_first_zero_bit | ||
353 | 155 | ||
354 | #endif /* __KERNEL__ */ | 156 | #endif /* __KERNEL__ */ |
355 | 157 | ||
diff --git a/include/asm-x86_64/bitops.h b/include/asm-x86_64/bitops.h index eb4df23e1e41..79212128d0f7 100644 --- a/include/asm-x86_64/bitops.h +++ b/include/asm-x86_64/bitops.h | |||
@@ -356,14 +356,7 @@ static __inline__ unsigned long __fls(unsigned long word) | |||
356 | 356 | ||
357 | #ifdef __KERNEL__ | 357 | #ifdef __KERNEL__ |
358 | 358 | ||
359 | static inline int sched_find_first_bit(const unsigned long *b) | 359 | #include <asm-generic/bitops/sched.h> |
360 | { | ||
361 | if (b[0]) | ||
362 | return __ffs(b[0]); | ||
363 | if (b[1]) | ||
364 | return __ffs(b[1]) + 64; | ||
365 | return __ffs(b[2]) + 128; | ||
366 | } | ||
367 | 360 | ||
368 | /** | 361 | /** |
369 | * ffs - find first bit set | 362 | * ffs - find first bit set |
@@ -412,43 +405,20 @@ static __inline__ int fls(int x) | |||
412 | return r+1; | 405 | return r+1; |
413 | } | 406 | } |
414 | 407 | ||
415 | /** | 408 | #include <asm-generic/bitops/hweight.h> |
416 | * hweightN - returns the hamming weight of a N-bit word | ||
417 | * @x: the word to weigh | ||
418 | * | ||
419 | * The Hamming Weight of a number is the total number of bits set in it. | ||
420 | */ | ||
421 | |||
422 | #define hweight64(x) generic_hweight64(x) | ||
423 | #define hweight32(x) generic_hweight32(x) | ||
424 | #define hweight16(x) generic_hweight16(x) | ||
425 | #define hweight8(x) generic_hweight8(x) | ||
426 | 409 | ||
427 | #endif /* __KERNEL__ */ | 410 | #endif /* __KERNEL__ */ |
428 | 411 | ||
429 | #ifdef __KERNEL__ | 412 | #ifdef __KERNEL__ |
430 | 413 | ||
431 | #define ext2_set_bit(nr,addr) \ | 414 | #include <asm-generic/bitops/ext2-non-atomic.h> |
432 | __test_and_set_bit((nr),(unsigned long*)addr) | 415 | |
433 | #define ext2_set_bit_atomic(lock,nr,addr) \ | 416 | #define ext2_set_bit_atomic(lock,nr,addr) \ |
434 | test_and_set_bit((nr),(unsigned long*)addr) | 417 | test_and_set_bit((nr),(unsigned long*)addr) |
435 | #define ext2_clear_bit(nr, addr) \ | ||
436 | __test_and_clear_bit((nr),(unsigned long*)addr) | ||
437 | #define ext2_clear_bit_atomic(lock,nr,addr) \ | 418 | #define ext2_clear_bit_atomic(lock,nr,addr) \ |
438 | test_and_clear_bit((nr),(unsigned long*)addr) | 419 | test_and_clear_bit((nr),(unsigned long*)addr) |
439 | #define ext2_test_bit(nr, addr) test_bit((nr),(unsigned long*)addr) | 420 | |
440 | #define ext2_find_first_zero_bit(addr, size) \ | 421 | #include <asm-generic/bitops/minix.h> |
441 | find_first_zero_bit((unsigned long*)addr, size) | ||
442 | #define ext2_find_next_zero_bit(addr, size, off) \ | ||
443 | find_next_zero_bit((unsigned long*)addr, size, off) | ||
444 | |||
445 | /* Bitmap functions for the minix filesystem. */ | ||
446 | #define minix_test_and_set_bit(nr,addr) __test_and_set_bit(nr,(void*)addr) | ||
447 | #define minix_set_bit(nr,addr) __set_bit(nr,(void*)addr) | ||
448 | #define minix_test_and_clear_bit(nr,addr) __test_and_clear_bit(nr,(void*)addr) | ||
449 | #define minix_test_bit(nr,addr) test_bit(nr,(void*)addr) | ||
450 | #define minix_find_first_zero_bit(addr,size) \ | ||
451 | find_first_zero_bit((void*)addr,size) | ||
452 | 422 | ||
453 | #endif /* __KERNEL__ */ | 423 | #endif /* __KERNEL__ */ |
454 | 424 | ||
diff --git a/include/asm-xtensa/bitops.h b/include/asm-xtensa/bitops.h index 0a2065f1a372..d815649617aa 100644 --- a/include/asm-xtensa/bitops.h +++ b/include/asm-xtensa/bitops.h | |||
@@ -23,156 +23,11 @@ | |||
23 | # error SMP not supported on this architecture | 23 | # error SMP not supported on this architecture |
24 | #endif | 24 | #endif |
25 | 25 | ||
26 | static __inline__ void set_bit(int nr, volatile void * addr) | ||
27 | { | ||
28 | unsigned long mask = 1 << (nr & 0x1f); | ||
29 | unsigned long *a = ((unsigned long *)addr) + (nr >> 5); | ||
30 | unsigned long flags; | ||
31 | |||
32 | local_irq_save(flags); | ||
33 | *a |= mask; | ||
34 | local_irq_restore(flags); | ||
35 | } | ||
36 | |||
37 | static __inline__ void __set_bit(int nr, volatile unsigned long * addr) | ||
38 | { | ||
39 | unsigned long mask = 1 << (nr & 0x1f); | ||
40 | unsigned long *a = ((unsigned long *)addr) + (nr >> 5); | ||
41 | |||
42 | *a |= mask; | ||
43 | } | ||
44 | |||
45 | static __inline__ void clear_bit(int nr, volatile void * addr) | ||
46 | { | ||
47 | unsigned long mask = 1 << (nr & 0x1f); | ||
48 | unsigned long *a = ((unsigned long *)addr) + (nr >> 5); | ||
49 | unsigned long flags; | ||
50 | |||
51 | local_irq_save(flags); | ||
52 | *a &= ~mask; | ||
53 | local_irq_restore(flags); | ||
54 | } | ||
55 | |||
56 | static __inline__ void __clear_bit(int nr, volatile unsigned long *addr) | ||
57 | { | ||
58 | unsigned long mask = 1 << (nr & 0x1f); | ||
59 | unsigned long *a = ((unsigned long *)addr) + (nr >> 5); | ||
60 | |||
61 | *a &= ~mask; | ||
62 | } | ||
63 | |||
64 | /* | ||
65 | * clear_bit() doesn't provide any barrier for the compiler. | ||
66 | */ | ||
67 | |||
68 | #define smp_mb__before_clear_bit() barrier() | 26 | #define smp_mb__before_clear_bit() barrier() |
69 | #define smp_mb__after_clear_bit() barrier() | 27 | #define smp_mb__after_clear_bit() barrier() |
70 | 28 | ||
71 | static __inline__ void change_bit(int nr, volatile void * addr) | 29 | #include <asm-generic/bitops/atomic.h> |
72 | { | 30 | #include <asm-generic/bitops/non-atomic.h> |
73 | unsigned long mask = 1 << (nr & 0x1f); | ||
74 | unsigned long *a = ((unsigned long *)addr) + (nr >> 5); | ||
75 | unsigned long flags; | ||
76 | |||
77 | local_irq_save(flags); | ||
78 | *a ^= mask; | ||
79 | local_irq_restore(flags); | ||
80 | } | ||
81 | |||
82 | static __inline__ void __change_bit(int nr, volatile void * addr) | ||
83 | { | ||
84 | unsigned long mask = 1 << (nr & 0x1f); | ||
85 | unsigned long *a = ((unsigned long *)addr) + (nr >> 5); | ||
86 | |||
87 | *a ^= mask; | ||
88 | } | ||
89 | |||
90 | static __inline__ int test_and_set_bit(int nr, volatile void * addr) | ||
91 | { | ||
92 | unsigned long retval; | ||
93 | unsigned long mask = 1 << (nr & 0x1f); | ||
94 | unsigned long *a = ((unsigned long *)addr) + (nr >> 5); | ||
95 | unsigned long flags; | ||
96 | |||
97 | local_irq_save(flags); | ||
98 | retval = (mask & *a) != 0; | ||
99 | *a |= mask; | ||
100 | local_irq_restore(flags); | ||
101 | |||
102 | return retval; | ||
103 | } | ||
104 | |||
105 | static __inline__ int __test_and_set_bit(int nr, volatile void * addr) | ||
106 | { | ||
107 | unsigned long retval; | ||
108 | unsigned long mask = 1 << (nr & 0x1f); | ||
109 | unsigned long *a = ((unsigned long *)addr) + (nr >> 5); | ||
110 | |||
111 | retval = (mask & *a) != 0; | ||
112 | *a |= mask; | ||
113 | |||
114 | return retval; | ||
115 | } | ||
116 | |||
117 | static __inline__ int test_and_clear_bit(int nr, volatile void * addr) | ||
118 | { | ||
119 | unsigned long retval; | ||
120 | unsigned long mask = 1 << (nr & 0x1f); | ||
121 | unsigned long *a = ((unsigned long *)addr) + (nr >> 5); | ||
122 | unsigned long flags; | ||
123 | |||
124 | local_irq_save(flags); | ||
125 | retval = (mask & *a) != 0; | ||
126 | *a &= ~mask; | ||
127 | local_irq_restore(flags); | ||
128 | |||
129 | return retval; | ||
130 | } | ||
131 | |||
132 | static __inline__ int __test_and_clear_bit(int nr, volatile void * addr) | ||
133 | { | ||
134 | unsigned long mask = 1 << (nr & 0x1f); | ||
135 | unsigned long *a = ((unsigned long *)addr) + (nr >> 5); | ||
136 | unsigned long old = *a; | ||
137 | |||
138 | *a = old & ~mask; | ||
139 | return (old & mask) != 0; | ||
140 | } | ||
141 | |||
142 | static __inline__ int test_and_change_bit(int nr, volatile void * addr) | ||
143 | { | ||
144 | unsigned long retval; | ||
145 | unsigned long mask = 1 << (nr & 0x1f); | ||
146 | unsigned long *a = ((unsigned long *)addr) + (nr >> 5); | ||
147 | unsigned long flags; | ||
148 | |||
149 | local_irq_save(flags); | ||
150 | |||
151 | retval = (mask & *a) != 0; | ||
152 | *a ^= mask; | ||
153 | local_irq_restore(flags); | ||
154 | |||
155 | return retval; | ||
156 | } | ||
157 | |||
158 | /* | ||
159 | * non-atomic version; can be reordered | ||
160 | */ | ||
161 | |||
162 | static __inline__ int __test_and_change_bit(int nr, volatile void *addr) | ||
163 | { | ||
164 | unsigned long mask = 1 << (nr & 0x1f); | ||
165 | unsigned long *a = ((unsigned long *)addr) + (nr >> 5); | ||
166 | unsigned long old = *a; | ||
167 | |||
168 | *a = old ^ mask; | ||
169 | return (old & mask) != 0; | ||
170 | } | ||
171 | |||
172 | static __inline__ int test_bit(int nr, const volatile void *addr) | ||
173 | { | ||
174 | return 1UL & (((const volatile unsigned int *)addr)[nr>>5] >> (nr&31)); | ||
175 | } | ||
176 | 31 | ||
177 | #if XCHAL_HAVE_NSA | 32 | #if XCHAL_HAVE_NSA |
178 | 33 | ||
@@ -245,202 +100,23 @@ static __inline__ int fls (unsigned int x) | |||
245 | { | 100 | { |
246 | return __cntlz(x); | 101 | return __cntlz(x); |
247 | } | 102 | } |
248 | #define fls64(x) generic_fls64(x) | 103 | #include <asm-generic/bitops/fls64.h> |
249 | 104 | #include <asm-generic/bitops/find.h> | |
250 | static __inline__ int | 105 | #include <asm-generic/bitops/ext2-non-atomic.h> |
251 | find_next_bit(const unsigned long *addr, int size, int offset) | ||
252 | { | ||
253 | const unsigned long *p = addr + (offset >> 5); | ||
254 | unsigned long result = offset & ~31UL; | ||
255 | unsigned long tmp; | ||
256 | |||
257 | if (offset >= size) | ||
258 | return size; | ||
259 | size -= result; | ||
260 | offset &= 31UL; | ||
261 | if (offset) { | ||
262 | tmp = *p++; | ||
263 | tmp &= ~0UL << offset; | ||
264 | if (size < 32) | ||
265 | goto found_first; | ||
266 | if (tmp) | ||
267 | goto found_middle; | ||
268 | size -= 32; | ||
269 | result += 32; | ||
270 | } | ||
271 | while (size >= 32) { | ||
272 | if ((tmp = *p++) != 0) | ||
273 | goto found_middle; | ||
274 | result += 32; | ||
275 | size -= 32; | ||
276 | } | ||
277 | if (!size) | ||
278 | return result; | ||
279 | tmp = *p; | ||
280 | |||
281 | found_first: | ||
282 | tmp &= ~0UL >> (32 - size); | ||
283 | if (tmp == 0UL) /* Are any bits set? */ | ||
284 | return result + size; /* Nope. */ | ||
285 | found_middle: | ||
286 | return result + __ffs(tmp); | ||
287 | } | ||
288 | |||
289 | /** | ||
290 | * find_first_bit - find the first set bit in a memory region | ||
291 | * @addr: The address to start the search at | ||
292 | * @size: The maximum size to search | ||
293 | * | ||
294 | * Returns the bit-number of the first set bit, not the number of the byte | ||
295 | * containing a bit. | ||
296 | */ | ||
297 | |||
298 | #define find_first_bit(addr, size) \ | ||
299 | find_next_bit((addr), (size), 0) | ||
300 | |||
301 | static __inline__ int | ||
302 | find_next_zero_bit(const unsigned long *addr, int size, int offset) | ||
303 | { | ||
304 | const unsigned long *p = addr + (offset >> 5); | ||
305 | unsigned long result = offset & ~31UL; | ||
306 | unsigned long tmp; | ||
307 | |||
308 | if (offset >= size) | ||
309 | return size; | ||
310 | size -= result; | ||
311 | offset &= 31UL; | ||
312 | if (offset) { | ||
313 | tmp = *p++; | ||
314 | tmp |= ~0UL >> (32-offset); | ||
315 | if (size < 32) | ||
316 | goto found_first; | ||
317 | if (~tmp) | ||
318 | goto found_middle; | ||
319 | size -= 32; | ||
320 | result += 32; | ||
321 | } | ||
322 | while (size & ~31UL) { | ||
323 | if (~(tmp = *p++)) | ||
324 | goto found_middle; | ||
325 | result += 32; | ||
326 | size -= 32; | ||
327 | } | ||
328 | if (!size) | ||
329 | return result; | ||
330 | tmp = *p; | ||
331 | |||
332 | found_first: | ||
333 | tmp |= ~0UL << size; | ||
334 | found_middle: | ||
335 | return result + ffz(tmp); | ||
336 | } | ||
337 | |||
338 | #define find_first_zero_bit(addr, size) \ | ||
339 | find_next_zero_bit((addr), (size), 0) | ||
340 | 106 | ||
341 | #ifdef __XTENSA_EL__ | 107 | #ifdef __XTENSA_EL__ |
342 | # define ext2_set_bit(nr,addr) __test_and_set_bit((nr), (addr)) | ||
343 | # define ext2_set_bit_atomic(lock,nr,addr) test_and_set_bit((nr),(addr)) | 108 | # define ext2_set_bit_atomic(lock,nr,addr) test_and_set_bit((nr),(addr)) |
344 | # define ext2_clear_bit(nr,addr) __test_and_clear_bit((nr), (addr)) | ||
345 | # define ext2_clear_bit_atomic(lock,nr,addr) test_and_clear_bit((nr),(addr)) | 109 | # define ext2_clear_bit_atomic(lock,nr,addr) test_and_clear_bit((nr),(addr)) |
346 | # define ext2_test_bit(nr,addr) test_bit((nr), (addr)) | ||
347 | # define ext2_find_first_zero_bit(addr, size) find_first_zero_bit((addr),(size)) | ||
348 | # define ext2_find_next_zero_bit(addr, size, offset) \ | ||
349 | find_next_zero_bit((addr), (size), (offset)) | ||
350 | #elif defined(__XTENSA_EB__) | 110 | #elif defined(__XTENSA_EB__) |
351 | # define ext2_set_bit(nr,addr) __test_and_set_bit((nr) ^ 0x18, (addr)) | ||
352 | # define ext2_set_bit_atomic(lock,nr,addr) test_and_set_bit((nr) ^ 0x18, (addr)) | 111 | # define ext2_set_bit_atomic(lock,nr,addr) test_and_set_bit((nr) ^ 0x18, (addr)) |
353 | # define ext2_clear_bit(nr,addr) __test_and_clear_bit((nr) ^ 18, (addr)) | ||
354 | # define ext2_clear_bit_atomic(lock,nr,addr) test_and_clear_bit((nr)^0x18,(addr)) | 112 | # define ext2_clear_bit_atomic(lock,nr,addr) test_and_clear_bit((nr)^0x18,(addr)) |
355 | # define ext2_test_bit(nr,addr) test_bit((nr) ^ 0x18, (addr)) | ||
356 | # define ext2_find_first_zero_bit(addr, size) \ | ||
357 | ext2_find_next_zero_bit((addr), (size), 0) | ||
358 | |||
359 | static __inline__ unsigned long ext2_find_next_zero_bit(void *addr, unsigned long size, unsigned long offset) | ||
360 | { | ||
361 | unsigned long *p = ((unsigned long *) addr) + (offset >> 5); | ||
362 | unsigned long result = offset & ~31UL; | ||
363 | unsigned long tmp; | ||
364 | |||
365 | if (offset >= size) | ||
366 | return size; | ||
367 | size -= result; | ||
368 | offset &= 31UL; | ||
369 | if(offset) { | ||
370 | /* We hold the little endian value in tmp, but then the | ||
371 | * shift is illegal. So we could keep a big endian value | ||
372 | * in tmp, like this: | ||
373 | * | ||
374 | * tmp = __swab32(*(p++)); | ||
375 | * tmp |= ~0UL >> (32-offset); | ||
376 | * | ||
377 | * but this would decrease preformance, so we change the | ||
378 | * shift: | ||
379 | */ | ||
380 | tmp = *(p++); | ||
381 | tmp |= __swab32(~0UL >> (32-offset)); | ||
382 | if(size < 32) | ||
383 | goto found_first; | ||
384 | if(~tmp) | ||
385 | goto found_middle; | ||
386 | size -= 32; | ||
387 | result += 32; | ||
388 | } | ||
389 | while(size & ~31UL) { | ||
390 | if(~(tmp = *(p++))) | ||
391 | goto found_middle; | ||
392 | result += 32; | ||
393 | size -= 32; | ||
394 | } | ||
395 | if(!size) | ||
396 | return result; | ||
397 | tmp = *p; | ||
398 | |||
399 | found_first: | ||
400 | /* tmp is little endian, so we would have to swab the shift, | ||
401 | * see above. But then we have to swab tmp below for ffz, so | ||
402 | * we might as well do this here. | ||
403 | */ | ||
404 | return result + ffz(__swab32(tmp) | (~0UL << size)); | ||
405 | found_middle: | ||
406 | return result + ffz(__swab32(tmp)); | ||
407 | } | ||
408 | |||
409 | #else | 113 | #else |
410 | # error processor byte order undefined! | 114 | # error processor byte order undefined! |
411 | #endif | 115 | #endif |
412 | 116 | ||
413 | 117 | #include <asm-generic/bitops/hweight.h> | |
414 | #define hweight32(x) generic_hweight32(x) | 118 | #include <asm-generic/bitops/sched.h> |
415 | #define hweight16(x) generic_hweight16(x) | 119 | #include <asm-generic/bitops/minix.h> |
416 | #define hweight8(x) generic_hweight8(x) | ||
417 | |||
418 | /* | ||
419 | * Find the first bit set in a 140-bit bitmap. | ||
420 | * The first 100 bits are unlikely to be set. | ||
421 | */ | ||
422 | |||
423 | static inline int sched_find_first_bit(const unsigned long *b) | ||
424 | { | ||
425 | if (unlikely(b[0])) | ||
426 | return __ffs(b[0]); | ||
427 | if (unlikely(b[1])) | ||
428 | return __ffs(b[1]) + 32; | ||
429 | if (unlikely(b[2])) | ||
430 | return __ffs(b[2]) + 64; | ||
431 | if (b[3]) | ||
432 | return __ffs(b[3]) + 96; | ||
433 | return __ffs(b[4]) + 128; | ||
434 | } | ||
435 | |||
436 | |||
437 | /* Bitmap functions for the minix filesystem. */ | ||
438 | |||
439 | #define minix_test_and_set_bit(nr,addr) test_and_set_bit(nr,addr) | ||
440 | #define minix_set_bit(nr,addr) set_bit(nr,addr) | ||
441 | #define minix_test_and_clear_bit(nr,addr) test_and_clear_bit(nr,addr) | ||
442 | #define minix_test_bit(nr,addr) test_bit(nr,addr) | ||
443 | #define minix_find_first_zero_bit(addr,size) find_first_zero_bit(addr,size) | ||
444 | 120 | ||
445 | #endif /* __KERNEL__ */ | 121 | #endif /* __KERNEL__ */ |
446 | 122 | ||
diff --git a/include/linux/bitops.h b/include/linux/bitops.h index f17525a963d1..5d1eabcde5d5 100644 --- a/include/linux/bitops.h +++ b/include/linux/bitops.h | |||
@@ -3,88 +3,11 @@ | |||
3 | #include <asm/types.h> | 3 | #include <asm/types.h> |
4 | 4 | ||
5 | /* | 5 | /* |
6 | * ffs: find first bit set. This is defined the same way as | ||
7 | * the libc and compiler builtin ffs routines, therefore | ||
8 | * differs in spirit from the above ffz (man ffs). | ||
9 | */ | ||
10 | |||
11 | static inline int generic_ffs(int x) | ||
12 | { | ||
13 | int r = 1; | ||
14 | |||
15 | if (!x) | ||
16 | return 0; | ||
17 | if (!(x & 0xffff)) { | ||
18 | x >>= 16; | ||
19 | r += 16; | ||
20 | } | ||
21 | if (!(x & 0xff)) { | ||
22 | x >>= 8; | ||
23 | r += 8; | ||
24 | } | ||
25 | if (!(x & 0xf)) { | ||
26 | x >>= 4; | ||
27 | r += 4; | ||
28 | } | ||
29 | if (!(x & 3)) { | ||
30 | x >>= 2; | ||
31 | r += 2; | ||
32 | } | ||
33 | if (!(x & 1)) { | ||
34 | x >>= 1; | ||
35 | r += 1; | ||
36 | } | ||
37 | return r; | ||
38 | } | ||
39 | |||
40 | /* | ||
41 | * fls: find last bit set. | ||
42 | */ | ||
43 | |||
44 | static __inline__ int generic_fls(int x) | ||
45 | { | ||
46 | int r = 32; | ||
47 | |||
48 | if (!x) | ||
49 | return 0; | ||
50 | if (!(x & 0xffff0000u)) { | ||
51 | x <<= 16; | ||
52 | r -= 16; | ||
53 | } | ||
54 | if (!(x & 0xff000000u)) { | ||
55 | x <<= 8; | ||
56 | r -= 8; | ||
57 | } | ||
58 | if (!(x & 0xf0000000u)) { | ||
59 | x <<= 4; | ||
60 | r -= 4; | ||
61 | } | ||
62 | if (!(x & 0xc0000000u)) { | ||
63 | x <<= 2; | ||
64 | r -= 2; | ||
65 | } | ||
66 | if (!(x & 0x80000000u)) { | ||
67 | x <<= 1; | ||
68 | r -= 1; | ||
69 | } | ||
70 | return r; | ||
71 | } | ||
72 | |||
73 | /* | ||
74 | * Include this here because some architectures need generic_ffs/fls in | 6 | * Include this here because some architectures need generic_ffs/fls in |
75 | * scope | 7 | * scope |
76 | */ | 8 | */ |
77 | #include <asm/bitops.h> | 9 | #include <asm/bitops.h> |
78 | 10 | ||
79 | |||
80 | static inline int generic_fls64(__u64 x) | ||
81 | { | ||
82 | __u32 h = x >> 32; | ||
83 | if (h) | ||
84 | return fls(h) + 32; | ||
85 | return fls(x); | ||
86 | } | ||
87 | |||
88 | static __inline__ int get_bitmask_order(unsigned int count) | 11 | static __inline__ int get_bitmask_order(unsigned int count) |
89 | { | 12 | { |
90 | int order; | 13 | int order; |
@@ -103,54 +26,9 @@ static __inline__ int get_count_order(unsigned int count) | |||
103 | return order; | 26 | return order; |
104 | } | 27 | } |
105 | 28 | ||
106 | /* | ||
107 | * hweightN: returns the hamming weight (i.e. the number | ||
108 | * of bits set) of a N-bit word | ||
109 | */ | ||
110 | |||
111 | static inline unsigned int generic_hweight32(unsigned int w) | ||
112 | { | ||
113 | unsigned int res = (w & 0x55555555) + ((w >> 1) & 0x55555555); | ||
114 | res = (res & 0x33333333) + ((res >> 2) & 0x33333333); | ||
115 | res = (res & 0x0F0F0F0F) + ((res >> 4) & 0x0F0F0F0F); | ||
116 | res = (res & 0x00FF00FF) + ((res >> 8) & 0x00FF00FF); | ||
117 | return (res & 0x0000FFFF) + ((res >> 16) & 0x0000FFFF); | ||
118 | } | ||
119 | |||
120 | static inline unsigned int generic_hweight16(unsigned int w) | ||
121 | { | ||
122 | unsigned int res = (w & 0x5555) + ((w >> 1) & 0x5555); | ||
123 | res = (res & 0x3333) + ((res >> 2) & 0x3333); | ||
124 | res = (res & 0x0F0F) + ((res >> 4) & 0x0F0F); | ||
125 | return (res & 0x00FF) + ((res >> 8) & 0x00FF); | ||
126 | } | ||
127 | |||
128 | static inline unsigned int generic_hweight8(unsigned int w) | ||
129 | { | ||
130 | unsigned int res = (w & 0x55) + ((w >> 1) & 0x55); | ||
131 | res = (res & 0x33) + ((res >> 2) & 0x33); | ||
132 | return (res & 0x0F) + ((res >> 4) & 0x0F); | ||
133 | } | ||
134 | |||
135 | static inline unsigned long generic_hweight64(__u64 w) | ||
136 | { | ||
137 | #if BITS_PER_LONG < 64 | ||
138 | return generic_hweight32((unsigned int)(w >> 32)) + | ||
139 | generic_hweight32((unsigned int)w); | ||
140 | #else | ||
141 | u64 res; | ||
142 | res = (w & 0x5555555555555555ul) + ((w >> 1) & 0x5555555555555555ul); | ||
143 | res = (res & 0x3333333333333333ul) + ((res >> 2) & 0x3333333333333333ul); | ||
144 | res = (res & 0x0F0F0F0F0F0F0F0Ful) + ((res >> 4) & 0x0F0F0F0F0F0F0F0Ful); | ||
145 | res = (res & 0x00FF00FF00FF00FFul) + ((res >> 8) & 0x00FF00FF00FF00FFul); | ||
146 | res = (res & 0x0000FFFF0000FFFFul) + ((res >> 16) & 0x0000FFFF0000FFFFul); | ||
147 | return (res & 0x00000000FFFFFFFFul) + ((res >> 32) & 0x00000000FFFFFFFFul); | ||
148 | #endif | ||
149 | } | ||
150 | |||
151 | static inline unsigned long hweight_long(unsigned long w) | 29 | static inline unsigned long hweight_long(unsigned long w) |
152 | { | 30 | { |
153 | return sizeof(w) == 4 ? generic_hweight32(w) : generic_hweight64(w); | 31 | return sizeof(w) == 4 ? hweight32(w) : hweight64(w); |
154 | } | 32 | } |
155 | 33 | ||
156 | /* | 34 | /* |
diff --git a/include/linux/buffer_head.h b/include/linux/buffer_head.h index 9f159baf153f..fb7e9b7ccbe3 100644 --- a/include/linux/buffer_head.h +++ b/include/linux/buffer_head.h | |||
@@ -46,25 +46,28 @@ struct address_space; | |||
46 | typedef void (bh_end_io_t)(struct buffer_head *bh, int uptodate); | 46 | typedef void (bh_end_io_t)(struct buffer_head *bh, int uptodate); |
47 | 47 | ||
48 | /* | 48 | /* |
49 | * Keep related fields in common cachelines. The most commonly accessed | 49 | * Historically, a buffer_head was used to map a single block |
50 | * field (b_state) goes at the start so the compiler does not generate | 50 | * within a page, and of course as the unit of I/O through the |
51 | * indexed addressing for it. | 51 | * filesystem and block layers. Nowadays the basic I/O unit |
52 | * is the bio, and buffer_heads are used for extracting block | ||
53 | * mappings (via a get_block_t call), for tracking state within | ||
54 | * a page (via a page_mapping) and for wrapping bio submission | ||
55 | * for backward compatibility reasons (e.g. submit_bh). | ||
52 | */ | 56 | */ |
53 | struct buffer_head { | 57 | struct buffer_head { |
54 | /* First cache line: */ | ||
55 | unsigned long b_state; /* buffer state bitmap (see above) */ | 58 | unsigned long b_state; /* buffer state bitmap (see above) */ |
56 | struct buffer_head *b_this_page;/* circular list of page's buffers */ | 59 | struct buffer_head *b_this_page;/* circular list of page's buffers */ |
57 | struct page *b_page; /* the page this bh is mapped to */ | 60 | struct page *b_page; /* the page this bh is mapped to */ |
58 | atomic_t b_count; /* users using this block */ | ||
59 | u32 b_size; /* block size */ | ||
60 | 61 | ||
61 | sector_t b_blocknr; /* block number */ | 62 | sector_t b_blocknr; /* start block number */ |
62 | char *b_data; /* pointer to data block */ | 63 | size_t b_size; /* size of mapping */ |
64 | char *b_data; /* pointer to data within the page */ | ||
63 | 65 | ||
64 | struct block_device *b_bdev; | 66 | struct block_device *b_bdev; |
65 | bh_end_io_t *b_end_io; /* I/O completion */ | 67 | bh_end_io_t *b_end_io; /* I/O completion */ |
66 | void *b_private; /* reserved for b_end_io */ | 68 | void *b_private; /* reserved for b_end_io */ |
67 | struct list_head b_assoc_buffers; /* associated with another mapping */ | 69 | struct list_head b_assoc_buffers; /* associated with another mapping */ |
70 | atomic_t b_count; /* users using this buffer_head */ | ||
68 | }; | 71 | }; |
69 | 72 | ||
70 | /* | 73 | /* |
@@ -189,8 +192,8 @@ extern int buffer_heads_over_limit; | |||
189 | * address_spaces. | 192 | * address_spaces. |
190 | */ | 193 | */ |
191 | int try_to_release_page(struct page * page, gfp_t gfp_mask); | 194 | int try_to_release_page(struct page * page, gfp_t gfp_mask); |
192 | int block_invalidatepage(struct page *page, unsigned long offset); | 195 | void block_invalidatepage(struct page *page, unsigned long offset); |
193 | int do_invalidatepage(struct page *page, unsigned long offset); | 196 | void do_invalidatepage(struct page *page, unsigned long offset); |
194 | int block_write_full_page(struct page *page, get_block_t *get_block, | 197 | int block_write_full_page(struct page *page, get_block_t *get_block, |
195 | struct writeback_control *wbc); | 198 | struct writeback_control *wbc); |
196 | int block_read_full_page(struct page*, get_block_t*); | 199 | int block_read_full_page(struct page*, get_block_t*); |
@@ -200,7 +203,7 @@ int cont_prepare_write(struct page*, unsigned, unsigned, get_block_t*, | |||
200 | int generic_cont_expand(struct inode *inode, loff_t size); | 203 | int generic_cont_expand(struct inode *inode, loff_t size); |
201 | int generic_cont_expand_simple(struct inode *inode, loff_t size); | 204 | int generic_cont_expand_simple(struct inode *inode, loff_t size); |
202 | int block_commit_write(struct page *page, unsigned from, unsigned to); | 205 | int block_commit_write(struct page *page, unsigned from, unsigned to); |
203 | int block_sync_page(struct page *); | 206 | void block_sync_page(struct page *); |
204 | sector_t generic_block_bmap(struct address_space *, sector_t, get_block_t *); | 207 | sector_t generic_block_bmap(struct address_space *, sector_t, get_block_t *); |
205 | int generic_commit_write(struct file *, struct page *, unsigned, unsigned); | 208 | int generic_commit_write(struct file *, struct page *, unsigned, unsigned); |
206 | int block_truncate_page(struct address_space *, loff_t, get_block_t *); | 209 | int block_truncate_page(struct address_space *, loff_t, get_block_t *); |
@@ -277,6 +280,7 @@ map_bh(struct buffer_head *bh, struct super_block *sb, sector_t block) | |||
277 | set_buffer_mapped(bh); | 280 | set_buffer_mapped(bh); |
278 | bh->b_bdev = sb->s_bdev; | 281 | bh->b_bdev = sb->s_bdev; |
279 | bh->b_blocknr = block; | 282 | bh->b_blocknr = block; |
283 | bh->b_size = sb->s_blocksize; | ||
280 | } | 284 | } |
281 | 285 | ||
282 | /* | 286 | /* |
diff --git a/include/linux/compat.h b/include/linux/compat.h index c9ab2a26348c..24d659cdbafe 100644 --- a/include/linux/compat.h +++ b/include/linux/compat.h | |||
@@ -45,6 +45,32 @@ struct compat_tms { | |||
45 | compat_clock_t tms_cstime; | 45 | compat_clock_t tms_cstime; |
46 | }; | 46 | }; |
47 | 47 | ||
48 | struct compat_timex { | ||
49 | compat_uint_t modes; | ||
50 | compat_long_t offset; | ||
51 | compat_long_t freq; | ||
52 | compat_long_t maxerror; | ||
53 | compat_long_t esterror; | ||
54 | compat_int_t status; | ||
55 | compat_long_t constant; | ||
56 | compat_long_t precision; | ||
57 | compat_long_t tolerance; | ||
58 | struct compat_timeval time; | ||
59 | compat_long_t tick; | ||
60 | compat_long_t ppsfreq; | ||
61 | compat_long_t jitter; | ||
62 | compat_int_t shift; | ||
63 | compat_long_t stabil; | ||
64 | compat_long_t jitcnt; | ||
65 | compat_long_t calcnt; | ||
66 | compat_long_t errcnt; | ||
67 | compat_long_t stbcnt; | ||
68 | |||
69 | compat_int_t :32; compat_int_t :32; compat_int_t :32; compat_int_t :32; | ||
70 | compat_int_t :32; compat_int_t :32; compat_int_t :32; compat_int_t :32; | ||
71 | compat_int_t :32; compat_int_t :32; compat_int_t :32; compat_int_t :32; | ||
72 | }; | ||
73 | |||
48 | #define _COMPAT_NSIG_WORDS (_COMPAT_NSIG / _COMPAT_NSIG_BPW) | 74 | #define _COMPAT_NSIG_WORDS (_COMPAT_NSIG / _COMPAT_NSIG_BPW) |
49 | 75 | ||
50 | typedef struct { | 76 | typedef struct { |
@@ -181,5 +207,7 @@ static inline int compat_timespec_compare(struct compat_timespec *lhs, | |||
181 | return lhs->tv_nsec - rhs->tv_nsec; | 207 | return lhs->tv_nsec - rhs->tv_nsec; |
182 | } | 208 | } |
183 | 209 | ||
210 | asmlinkage long compat_sys_adjtimex(struct compat_timex __user *utp); | ||
211 | |||
184 | #endif /* CONFIG_COMPAT */ | 212 | #endif /* CONFIG_COMPAT */ |
185 | #endif /* _LINUX_COMPAT_H */ | 213 | #endif /* _LINUX_COMPAT_H */ |
diff --git a/include/linux/efi.h b/include/linux/efi.h index c7c5dd316182..e203613d3aec 100644 --- a/include/linux/efi.h +++ b/include/linux/efi.h | |||
@@ -240,19 +240,21 @@ struct efi_memory_map { | |||
240 | unsigned long desc_size; | 240 | unsigned long desc_size; |
241 | }; | 241 | }; |
242 | 242 | ||
243 | #define EFI_INVALID_TABLE_ADDR (~0UL) | ||
244 | |||
243 | /* | 245 | /* |
244 | * All runtime access to EFI goes through this structure: | 246 | * All runtime access to EFI goes through this structure: |
245 | */ | 247 | */ |
246 | extern struct efi { | 248 | extern struct efi { |
247 | efi_system_table_t *systab; /* EFI system table */ | 249 | efi_system_table_t *systab; /* EFI system table */ |
248 | void *mps; /* MPS table */ | 250 | unsigned long mps; /* MPS table */ |
249 | void *acpi; /* ACPI table (IA64 ext 0.71) */ | 251 | unsigned long acpi; /* ACPI table (IA64 ext 0.71) */ |
250 | void *acpi20; /* ACPI table (ACPI 2.0) */ | 252 | unsigned long acpi20; /* ACPI table (ACPI 2.0) */ |
251 | void *smbios; /* SM BIOS table */ | 253 | unsigned long smbios; /* SM BIOS table */ |
252 | void *sal_systab; /* SAL system table */ | 254 | unsigned long sal_systab; /* SAL system table */ |
253 | void *boot_info; /* boot info table */ | 255 | unsigned long boot_info; /* boot info table */ |
254 | void *hcdp; /* HCDP table */ | 256 | unsigned long hcdp; /* HCDP table */ |
255 | void *uga; /* UGA table */ | 257 | unsigned long uga; /* UGA table */ |
256 | efi_get_time_t *get_time; | 258 | efi_get_time_t *get_time; |
257 | efi_set_time_t *set_time; | 259 | efi_set_time_t *set_time; |
258 | efi_get_wakeup_time_t *get_wakeup_time; | 260 | efi_get_wakeup_time_t *get_wakeup_time; |
@@ -292,6 +294,8 @@ extern void efi_enter_virtual_mode (void); /* switch EFI to virtual mode, if pos | |||
292 | extern u64 efi_get_iobase (void); | 294 | extern u64 efi_get_iobase (void); |
293 | extern u32 efi_mem_type (unsigned long phys_addr); | 295 | extern u32 efi_mem_type (unsigned long phys_addr); |
294 | extern u64 efi_mem_attributes (unsigned long phys_addr); | 296 | extern u64 efi_mem_attributes (unsigned long phys_addr); |
297 | extern int efi_mem_attribute_range (unsigned long phys_addr, unsigned long size, | ||
298 | u64 attr); | ||
295 | extern int __init efi_uart_console_only (void); | 299 | extern int __init efi_uart_console_only (void); |
296 | extern void efi_initialize_iomem_resources(struct resource *code_resource, | 300 | extern void efi_initialize_iomem_resources(struct resource *code_resource, |
297 | struct resource *data_resource); | 301 | struct resource *data_resource); |
diff --git a/include/linux/ext3_fs.h b/include/linux/ext3_fs.h index e7239f2f97a1..8bb4f842cded 100644 --- a/include/linux/ext3_fs.h +++ b/include/linux/ext3_fs.h | |||
@@ -36,7 +36,8 @@ struct statfs; | |||
36 | * Define EXT3_RESERVATION to reserve data blocks for expanding files | 36 | * Define EXT3_RESERVATION to reserve data blocks for expanding files |
37 | */ | 37 | */ |
38 | #define EXT3_DEFAULT_RESERVE_BLOCKS 8 | 38 | #define EXT3_DEFAULT_RESERVE_BLOCKS 8 |
39 | #define EXT3_MAX_RESERVE_BLOCKS 1024 | 39 | /*max window size: 1024(direct blocks) + 3([t,d]indirect blocks) */ |
40 | #define EXT3_MAX_RESERVE_BLOCKS 1027 | ||
40 | #define EXT3_RESERVE_WINDOW_NOT_ALLOCATED 0 | 41 | #define EXT3_RESERVE_WINDOW_NOT_ALLOCATED 0 |
41 | /* | 42 | /* |
42 | * Always enable hashed directories | 43 | * Always enable hashed directories |
@@ -732,6 +733,8 @@ struct dir_private_info { | |||
732 | extern int ext3_bg_has_super(struct super_block *sb, int group); | 733 | extern int ext3_bg_has_super(struct super_block *sb, int group); |
733 | extern unsigned long ext3_bg_num_gdb(struct super_block *sb, int group); | 734 | extern unsigned long ext3_bg_num_gdb(struct super_block *sb, int group); |
734 | extern int ext3_new_block (handle_t *, struct inode *, unsigned long, int *); | 735 | extern int ext3_new_block (handle_t *, struct inode *, unsigned long, int *); |
736 | extern int ext3_new_blocks (handle_t *, struct inode *, unsigned long, | ||
737 | unsigned long *, int *); | ||
735 | extern void ext3_free_blocks (handle_t *, struct inode *, unsigned long, | 738 | extern void ext3_free_blocks (handle_t *, struct inode *, unsigned long, |
736 | unsigned long); | 739 | unsigned long); |
737 | extern void ext3_free_blocks_sb (handle_t *, struct super_block *, | 740 | extern void ext3_free_blocks_sb (handle_t *, struct super_block *, |
@@ -775,9 +778,9 @@ extern unsigned long ext3_count_free (struct buffer_head *, unsigned); | |||
775 | int ext3_forget(handle_t *, int, struct inode *, struct buffer_head *, int); | 778 | int ext3_forget(handle_t *, int, struct inode *, struct buffer_head *, int); |
776 | struct buffer_head * ext3_getblk (handle_t *, struct inode *, long, int, int *); | 779 | struct buffer_head * ext3_getblk (handle_t *, struct inode *, long, int, int *); |
777 | struct buffer_head * ext3_bread (handle_t *, struct inode *, int, int, int *); | 780 | struct buffer_head * ext3_bread (handle_t *, struct inode *, int, int, int *); |
778 | int ext3_get_block_handle(handle_t *handle, struct inode *inode, | 781 | int ext3_get_blocks_handle(handle_t *handle, struct inode *inode, |
779 | sector_t iblock, struct buffer_head *bh_result, int create, | 782 | sector_t iblock, unsigned long maxblocks, struct buffer_head *bh_result, |
780 | int extend_disksize); | 783 | int create, int extend_disksize); |
781 | 784 | ||
782 | extern void ext3_read_inode (struct inode *); | 785 | extern void ext3_read_inode (struct inode *); |
783 | extern int ext3_write_inode (struct inode *, int); | 786 | extern int ext3_write_inode (struct inode *, int); |
diff --git a/include/linux/fs.h b/include/linux/fs.h index 5adf32b90f36..9d9674946956 100644 --- a/include/linux/fs.h +++ b/include/linux/fs.h | |||
@@ -252,9 +252,6 @@ extern void __init files_init(unsigned long); | |||
252 | struct buffer_head; | 252 | struct buffer_head; |
253 | typedef int (get_block_t)(struct inode *inode, sector_t iblock, | 253 | typedef int (get_block_t)(struct inode *inode, sector_t iblock, |
254 | struct buffer_head *bh_result, int create); | 254 | struct buffer_head *bh_result, int create); |
255 | typedef int (get_blocks_t)(struct inode *inode, sector_t iblock, | ||
256 | unsigned long max_blocks, | ||
257 | struct buffer_head *bh_result, int create); | ||
258 | typedef void (dio_iodone_t)(struct kiocb *iocb, loff_t offset, | 255 | typedef void (dio_iodone_t)(struct kiocb *iocb, loff_t offset, |
259 | ssize_t bytes, void *private); | 256 | ssize_t bytes, void *private); |
260 | 257 | ||
@@ -350,7 +347,7 @@ struct writeback_control; | |||
350 | struct address_space_operations { | 347 | struct address_space_operations { |
351 | int (*writepage)(struct page *page, struct writeback_control *wbc); | 348 | int (*writepage)(struct page *page, struct writeback_control *wbc); |
352 | int (*readpage)(struct file *, struct page *); | 349 | int (*readpage)(struct file *, struct page *); |
353 | int (*sync_page)(struct page *); | 350 | void (*sync_page)(struct page *); |
354 | 351 | ||
355 | /* Write back some dirty pages from this mapping. */ | 352 | /* Write back some dirty pages from this mapping. */ |
356 | int (*writepages)(struct address_space *, struct writeback_control *); | 353 | int (*writepages)(struct address_space *, struct writeback_control *); |
@@ -369,7 +366,7 @@ struct address_space_operations { | |||
369 | int (*commit_write)(struct file *, struct page *, unsigned, unsigned); | 366 | int (*commit_write)(struct file *, struct page *, unsigned, unsigned); |
370 | /* Unfortunately this kludge is needed for FIBMAP. Don't use it */ | 367 | /* Unfortunately this kludge is needed for FIBMAP. Don't use it */ |
371 | sector_t (*bmap)(struct address_space *, sector_t); | 368 | sector_t (*bmap)(struct address_space *, sector_t); |
372 | int (*invalidatepage) (struct page *, unsigned long); | 369 | void (*invalidatepage) (struct page *, unsigned long); |
373 | int (*releasepage) (struct page *, gfp_t); | 370 | int (*releasepage) (struct page *, gfp_t); |
374 | ssize_t (*direct_IO)(int, struct kiocb *, const struct iovec *iov, | 371 | ssize_t (*direct_IO)(int, struct kiocb *, const struct iovec *iov, |
375 | loff_t offset, unsigned long nr_segs); | 372 | loff_t offset, unsigned long nr_segs); |
@@ -490,7 +487,7 @@ struct inode { | |||
490 | unsigned int i_blkbits; | 487 | unsigned int i_blkbits; |
491 | unsigned long i_blksize; | 488 | unsigned long i_blksize; |
492 | unsigned long i_version; | 489 | unsigned long i_version; |
493 | unsigned long i_blocks; | 490 | blkcnt_t i_blocks; |
494 | unsigned short i_bytes; | 491 | unsigned short i_bytes; |
495 | spinlock_t i_lock; /* i_blocks, i_bytes, maybe i_size */ | 492 | spinlock_t i_lock; /* i_blocks, i_bytes, maybe i_size */ |
496 | struct mutex i_mutex; | 493 | struct mutex i_mutex; |
@@ -763,6 +760,7 @@ extern void locks_copy_lock(struct file_lock *, struct file_lock *); | |||
763 | extern void locks_remove_posix(struct file *, fl_owner_t); | 760 | extern void locks_remove_posix(struct file *, fl_owner_t); |
764 | extern void locks_remove_flock(struct file *); | 761 | extern void locks_remove_flock(struct file *); |
765 | extern int posix_test_lock(struct file *, struct file_lock *, struct file_lock *); | 762 | extern int posix_test_lock(struct file *, struct file_lock *, struct file_lock *); |
763 | extern int posix_lock_file_conf(struct file *, struct file_lock *, struct file_lock *); | ||
766 | extern int posix_lock_file(struct file *, struct file_lock *); | 764 | extern int posix_lock_file(struct file *, struct file_lock *); |
767 | extern int posix_lock_file_wait(struct file *, struct file_lock *); | 765 | extern int posix_lock_file_wait(struct file *, struct file_lock *); |
768 | extern int posix_unblock_lock(struct file *, struct file_lock *); | 766 | extern int posix_unblock_lock(struct file *, struct file_lock *); |
@@ -1644,7 +1642,7 @@ static inline void do_generic_file_read(struct file * filp, loff_t *ppos, | |||
1644 | 1642 | ||
1645 | ssize_t __blockdev_direct_IO(int rw, struct kiocb *iocb, struct inode *inode, | 1643 | ssize_t __blockdev_direct_IO(int rw, struct kiocb *iocb, struct inode *inode, |
1646 | struct block_device *bdev, const struct iovec *iov, loff_t offset, | 1644 | struct block_device *bdev, const struct iovec *iov, loff_t offset, |
1647 | unsigned long nr_segs, get_blocks_t get_blocks, dio_iodone_t end_io, | 1645 | unsigned long nr_segs, get_block_t get_block, dio_iodone_t end_io, |
1648 | int lock_type); | 1646 | int lock_type); |
1649 | 1647 | ||
1650 | enum { | 1648 | enum { |
@@ -1655,29 +1653,29 @@ enum { | |||
1655 | 1653 | ||
1656 | static inline ssize_t blockdev_direct_IO(int rw, struct kiocb *iocb, | 1654 | static inline ssize_t blockdev_direct_IO(int rw, struct kiocb *iocb, |
1657 | struct inode *inode, struct block_device *bdev, const struct iovec *iov, | 1655 | struct inode *inode, struct block_device *bdev, const struct iovec *iov, |
1658 | loff_t offset, unsigned long nr_segs, get_blocks_t get_blocks, | 1656 | loff_t offset, unsigned long nr_segs, get_block_t get_block, |
1659 | dio_iodone_t end_io) | 1657 | dio_iodone_t end_io) |
1660 | { | 1658 | { |
1661 | return __blockdev_direct_IO(rw, iocb, inode, bdev, iov, offset, | 1659 | return __blockdev_direct_IO(rw, iocb, inode, bdev, iov, offset, |
1662 | nr_segs, get_blocks, end_io, DIO_LOCKING); | 1660 | nr_segs, get_block, end_io, DIO_LOCKING); |
1663 | } | 1661 | } |
1664 | 1662 | ||
1665 | static inline ssize_t blockdev_direct_IO_no_locking(int rw, struct kiocb *iocb, | 1663 | static inline ssize_t blockdev_direct_IO_no_locking(int rw, struct kiocb *iocb, |
1666 | struct inode *inode, struct block_device *bdev, const struct iovec *iov, | 1664 | struct inode *inode, struct block_device *bdev, const struct iovec *iov, |
1667 | loff_t offset, unsigned long nr_segs, get_blocks_t get_blocks, | 1665 | loff_t offset, unsigned long nr_segs, get_block_t get_block, |
1668 | dio_iodone_t end_io) | 1666 | dio_iodone_t end_io) |
1669 | { | 1667 | { |
1670 | return __blockdev_direct_IO(rw, iocb, inode, bdev, iov, offset, | 1668 | return __blockdev_direct_IO(rw, iocb, inode, bdev, iov, offset, |
1671 | nr_segs, get_blocks, end_io, DIO_NO_LOCKING); | 1669 | nr_segs, get_block, end_io, DIO_NO_LOCKING); |
1672 | } | 1670 | } |
1673 | 1671 | ||
1674 | static inline ssize_t blockdev_direct_IO_own_locking(int rw, struct kiocb *iocb, | 1672 | static inline ssize_t blockdev_direct_IO_own_locking(int rw, struct kiocb *iocb, |
1675 | struct inode *inode, struct block_device *bdev, const struct iovec *iov, | 1673 | struct inode *inode, struct block_device *bdev, const struct iovec *iov, |
1676 | loff_t offset, unsigned long nr_segs, get_blocks_t get_blocks, | 1674 | loff_t offset, unsigned long nr_segs, get_block_t get_block, |
1677 | dio_iodone_t end_io) | 1675 | dio_iodone_t end_io) |
1678 | { | 1676 | { |
1679 | return __blockdev_direct_IO(rw, iocb, inode, bdev, iov, offset, | 1677 | return __blockdev_direct_IO(rw, iocb, inode, bdev, iov, offset, |
1680 | nr_segs, get_blocks, end_io, DIO_OWN_LOCKING); | 1678 | nr_segs, get_block, end_io, DIO_OWN_LOCKING); |
1681 | } | 1679 | } |
1682 | 1680 | ||
1683 | extern struct file_operations generic_ro_fops; | 1681 | extern struct file_operations generic_ro_fops; |
diff --git a/include/linux/gameport.h b/include/linux/gameport.h index 2401dea2b867..9c8e6da2393b 100644 --- a/include/linux/gameport.h +++ b/include/linux/gameport.h | |||
@@ -119,7 +119,7 @@ static inline void gameport_set_name(struct gameport *gameport, const char *name | |||
119 | } | 119 | } |
120 | 120 | ||
121 | /* | 121 | /* |
122 | * Use the following fucntions to manipulate gameport's per-port | 122 | * Use the following functions to manipulate gameport's per-port |
123 | * driver-specific data. | 123 | * driver-specific data. |
124 | */ | 124 | */ |
125 | static inline void *gameport_get_drvdata(struct gameport *gameport) | 125 | static inline void *gameport_get_drvdata(struct gameport *gameport) |
@@ -133,7 +133,7 @@ static inline void gameport_set_drvdata(struct gameport *gameport, void *data) | |||
133 | } | 133 | } |
134 | 134 | ||
135 | /* | 135 | /* |
136 | * Use the following fucntions to pin gameport's driver in process context | 136 | * Use the following functions to pin gameport's driver in process context |
137 | */ | 137 | */ |
138 | static inline int gameport_pin_driver(struct gameport *gameport) | 138 | static inline int gameport_pin_driver(struct gameport *gameport) |
139 | { | 139 | { |
diff --git a/include/linux/gigaset_dev.h b/include/linux/gigaset_dev.h new file mode 100644 index 000000000000..70ad09c8ad1e --- /dev/null +++ b/include/linux/gigaset_dev.h | |||
@@ -0,0 +1,32 @@ | |||
1 | /* | ||
2 | * interface to user space for the gigaset driver | ||
3 | * | ||
4 | * Copyright (c) 2004 by Hansjoerg Lipp <hjlipp@web.de> | ||
5 | * | ||
6 | * ===================================================================== | ||
7 | * This program is free software; you can redistribute it and/or | ||
8 | * modify it under the terms of the GNU General Public License as | ||
9 | * published by the Free Software Foundation; either version 2 of | ||
10 | * the License, or (at your option) any later version. | ||
11 | * ===================================================================== | ||
12 | * Version: $Id: gigaset_dev.h,v 1.4.4.4 2005/11/21 22:28:09 hjlipp Exp $ | ||
13 | * ===================================================================== | ||
14 | */ | ||
15 | |||
16 | #ifndef GIGASET_INTERFACE_H | ||
17 | #define GIGASET_INTERFACE_H | ||
18 | |||
19 | #include <linux/ioctl.h> | ||
20 | |||
21 | #define GIGASET_IOCTL 0x47 | ||
22 | |||
23 | #define GIGVER_DRIVER 0 | ||
24 | #define GIGVER_COMPAT 1 | ||
25 | #define GIGVER_FWBASE 2 | ||
26 | |||
27 | #define GIGASET_REDIR _IOWR (GIGASET_IOCTL, 0, int) | ||
28 | #define GIGASET_CONFIG _IOWR (GIGASET_IOCTL, 1, int) | ||
29 | #define GIGASET_BRKCHARS _IOW (GIGASET_IOCTL, 2, unsigned char[6]) //FIXME [6] okay? | ||
30 | #define GIGASET_VERSION _IOWR (GIGASET_IOCTL, 3, unsigned[4]) | ||
31 | |||
32 | #endif | ||
diff --git a/include/linux/highmem.h b/include/linux/highmem.h index 6bece9280eb7..892c4ea1b425 100644 --- a/include/linux/highmem.h +++ b/include/linux/highmem.h | |||
@@ -7,6 +7,18 @@ | |||
7 | 7 | ||
8 | #include <asm/cacheflush.h> | 8 | #include <asm/cacheflush.h> |
9 | 9 | ||
10 | #ifndef ARCH_HAS_FLUSH_ANON_PAGE | ||
11 | static inline void flush_anon_page(struct page *page, unsigned long vmaddr) | ||
12 | { | ||
13 | } | ||
14 | #endif | ||
15 | |||
16 | #ifndef ARCH_HAS_FLUSH_KERNEL_DCACHE_PAGE | ||
17 | static inline void flush_kernel_dcache_page(struct page *page) | ||
18 | { | ||
19 | } | ||
20 | #endif | ||
21 | |||
10 | #ifdef CONFIG_HIGHMEM | 22 | #ifdef CONFIG_HIGHMEM |
11 | 23 | ||
12 | #include <asm/highmem.h> | 24 | #include <asm/highmem.h> |
diff --git a/include/linux/hpet.h b/include/linux/hpet.h index 27238194b212..707f7cb9e795 100644 --- a/include/linux/hpet.h +++ b/include/linux/hpet.h | |||
@@ -3,6 +3,8 @@ | |||
3 | 3 | ||
4 | #include <linux/compiler.h> | 4 | #include <linux/compiler.h> |
5 | 5 | ||
6 | #ifdef __KERNEL__ | ||
7 | |||
6 | /* | 8 | /* |
7 | * Offsets into HPET Registers | 9 | * Offsets into HPET Registers |
8 | */ | 10 | */ |
@@ -85,22 +87,6 @@ struct hpet { | |||
85 | #define Tn_FSB_INT_ADDR_SHIFT (32UL) | 87 | #define Tn_FSB_INT_ADDR_SHIFT (32UL) |
86 | #define Tn_FSB_INT_VAL_MASK (0x00000000ffffffffULL) | 88 | #define Tn_FSB_INT_VAL_MASK (0x00000000ffffffffULL) |
87 | 89 | ||
88 | struct hpet_info { | ||
89 | unsigned long hi_ireqfreq; /* Hz */ | ||
90 | unsigned long hi_flags; /* information */ | ||
91 | unsigned short hi_hpet; | ||
92 | unsigned short hi_timer; | ||
93 | }; | ||
94 | |||
95 | #define HPET_INFO_PERIODIC 0x0001 /* timer is periodic */ | ||
96 | |||
97 | #define HPET_IE_ON _IO('h', 0x01) /* interrupt on */ | ||
98 | #define HPET_IE_OFF _IO('h', 0x02) /* interrupt off */ | ||
99 | #define HPET_INFO _IOR('h', 0x03, struct hpet_info) | ||
100 | #define HPET_EPI _IO('h', 0x04) /* enable periodic */ | ||
101 | #define HPET_DPI _IO('h', 0x05) /* disable periodic */ | ||
102 | #define HPET_IRQFREQ _IOW('h', 0x6, unsigned long) /* IRQFREQ usec */ | ||
103 | |||
104 | /* | 90 | /* |
105 | * exported interfaces | 91 | * exported interfaces |
106 | */ | 92 | */ |
@@ -133,4 +119,22 @@ int hpet_register(struct hpet_task *, int); | |||
133 | int hpet_unregister(struct hpet_task *); | 119 | int hpet_unregister(struct hpet_task *); |
134 | int hpet_control(struct hpet_task *, unsigned int, unsigned long); | 120 | int hpet_control(struct hpet_task *, unsigned int, unsigned long); |
135 | 121 | ||
122 | #endif /* __KERNEL__ */ | ||
123 | |||
124 | struct hpet_info { | ||
125 | unsigned long hi_ireqfreq; /* Hz */ | ||
126 | unsigned long hi_flags; /* information */ | ||
127 | unsigned short hi_hpet; | ||
128 | unsigned short hi_timer; | ||
129 | }; | ||
130 | |||
131 | #define HPET_INFO_PERIODIC 0x0001 /* timer is periodic */ | ||
132 | |||
133 | #define HPET_IE_ON _IO('h', 0x01) /* interrupt on */ | ||
134 | #define HPET_IE_OFF _IO('h', 0x02) /* interrupt off */ | ||
135 | #define HPET_INFO _IOR('h', 0x03, struct hpet_info) | ||
136 | #define HPET_EPI _IO('h', 0x04) /* enable periodic */ | ||
137 | #define HPET_DPI _IO('h', 0x05) /* disable periodic */ | ||
138 | #define HPET_IRQFREQ _IOW('h', 0x6, unsigned long) /* IRQFREQ usec */ | ||
139 | |||
136 | #endif /* !__HPET__ */ | 140 | #endif /* !__HPET__ */ |
diff --git a/include/linux/hrtimer.h b/include/linux/hrtimer.h index 6401c31d6add..93830158348e 100644 --- a/include/linux/hrtimer.h +++ b/include/linux/hrtimer.h | |||
@@ -34,15 +34,7 @@ enum hrtimer_restart { | |||
34 | HRTIMER_RESTART, | 34 | HRTIMER_RESTART, |
35 | }; | 35 | }; |
36 | 36 | ||
37 | /* | 37 | #define HRTIMER_INACTIVE ((void *)1UL) |
38 | * Timer states: | ||
39 | */ | ||
40 | enum hrtimer_state { | ||
41 | HRTIMER_INACTIVE, /* Timer is inactive */ | ||
42 | HRTIMER_EXPIRED, /* Timer is expired */ | ||
43 | HRTIMER_RUNNING, /* Timer is running the callback function */ | ||
44 | HRTIMER_PENDING, /* Timer is pending */ | ||
45 | }; | ||
46 | 38 | ||
47 | struct hrtimer_base; | 39 | struct hrtimer_base; |
48 | 40 | ||
@@ -53,9 +45,7 @@ struct hrtimer_base; | |||
53 | * @expires: the absolute expiry time in the hrtimers internal | 45 | * @expires: the absolute expiry time in the hrtimers internal |
54 | * representation. The time is related to the clock on | 46 | * representation. The time is related to the clock on |
55 | * which the timer is based. | 47 | * which the timer is based. |
56 | * @state: state of the timer | ||
57 | * @function: timer expiry callback function | 48 | * @function: timer expiry callback function |
58 | * @data: argument for the callback function | ||
59 | * @base: pointer to the timer base (per cpu and per clock) | 49 | * @base: pointer to the timer base (per cpu and per clock) |
60 | * | 50 | * |
61 | * The hrtimer structure must be initialized by init_hrtimer_#CLOCKTYPE() | 51 | * The hrtimer structure must be initialized by init_hrtimer_#CLOCKTYPE() |
@@ -63,23 +53,23 @@ struct hrtimer_base; | |||
63 | struct hrtimer { | 53 | struct hrtimer { |
64 | struct rb_node node; | 54 | struct rb_node node; |
65 | ktime_t expires; | 55 | ktime_t expires; |
66 | enum hrtimer_state state; | 56 | int (*function)(struct hrtimer *); |
67 | int (*function)(void *); | ||
68 | void *data; | ||
69 | struct hrtimer_base *base; | 57 | struct hrtimer_base *base; |
70 | }; | 58 | }; |
71 | 59 | ||
72 | /** | 60 | /** |
73 | * struct hrtimer_base - the timer base for a specific clock | 61 | * struct hrtimer_base - the timer base for a specific clock |
74 | * | 62 | * |
75 | * @index: clock type index for per_cpu support when moving a timer | 63 | * @index: clock type index for per_cpu support when moving a timer |
76 | * to a base on another cpu. | 64 | * to a base on another cpu. |
77 | * @lock: lock protecting the base and associated timers | 65 | * @lock: lock protecting the base and associated timers |
78 | * @active: red black tree root node for the active timers | 66 | * @active: red black tree root node for the active timers |
79 | * @first: pointer to the timer node which expires first | 67 | * @first: pointer to the timer node which expires first |
80 | * @resolution: the resolution of the clock, in nanoseconds | 68 | * @resolution: the resolution of the clock, in nanoseconds |
81 | * @get_time: function to retrieve the current time of the clock | 69 | * @get_time: function to retrieve the current time of the clock |
82 | * @curr_timer: the timer which is executing a callback right now | 70 | * @get_sofirq_time: function to retrieve the current time from the softirq |
71 | * @curr_timer: the timer which is executing a callback right now | ||
72 | * @softirq_time: the time when running the hrtimer queue in the softirq | ||
83 | */ | 73 | */ |
84 | struct hrtimer_base { | 74 | struct hrtimer_base { |
85 | clockid_t index; | 75 | clockid_t index; |
@@ -88,7 +78,9 @@ struct hrtimer_base { | |||
88 | struct rb_node *first; | 78 | struct rb_node *first; |
89 | ktime_t resolution; | 79 | ktime_t resolution; |
90 | ktime_t (*get_time)(void); | 80 | ktime_t (*get_time)(void); |
81 | ktime_t (*get_softirq_time)(void); | ||
91 | struct hrtimer *curr_timer; | 82 | struct hrtimer *curr_timer; |
83 | ktime_t softirq_time; | ||
92 | }; | 84 | }; |
93 | 85 | ||
94 | /* | 86 | /* |
@@ -122,11 +114,12 @@ extern ktime_t hrtimer_get_next_event(void); | |||
122 | 114 | ||
123 | static inline int hrtimer_active(const struct hrtimer *timer) | 115 | static inline int hrtimer_active(const struct hrtimer *timer) |
124 | { | 116 | { |
125 | return timer->state == HRTIMER_PENDING; | 117 | return timer->node.rb_parent != HRTIMER_INACTIVE; |
126 | } | 118 | } |
127 | 119 | ||
128 | /* Forward a hrtimer so it expires after now: */ | 120 | /* Forward a hrtimer so it expires after now: */ |
129 | extern unsigned long hrtimer_forward(struct hrtimer *timer, ktime_t interval); | 121 | extern unsigned long |
122 | hrtimer_forward(struct hrtimer *timer, ktime_t now, ktime_t interval); | ||
130 | 123 | ||
131 | /* Precise sleep: */ | 124 | /* Precise sleep: */ |
132 | extern long hrtimer_nanosleep(struct timespec *rqtp, | 125 | extern long hrtimer_nanosleep(struct timespec *rqtp, |
diff --git a/include/linux/i2o.h b/include/linux/i2o.h index 5a9d8c599171..dd7d627bf66f 100644 --- a/include/linux/i2o.h +++ b/include/linux/i2o.h | |||
@@ -950,9 +950,7 @@ static inline int i2o_pool_alloc(struct i2o_pool *pool, const char *name, | |||
950 | if (!pool->slab) | 950 | if (!pool->slab) |
951 | goto free_name; | 951 | goto free_name; |
952 | 952 | ||
953 | pool->mempool = | 953 | pool->mempool = mempool_create_slab_pool(min_nr, pool->slab); |
954 | mempool_create(min_nr, mempool_alloc_slab, mempool_free_slab, | ||
955 | pool->slab); | ||
956 | if (!pool->mempool) | 954 | if (!pool->mempool) |
957 | goto free_slab; | 955 | goto free_slab; |
958 | 956 | ||
diff --git a/include/linux/ipmi.h b/include/linux/ipmi.h index d6276e60b3bf..0a84b56935c2 100644 --- a/include/linux/ipmi.h +++ b/include/linux/ipmi.h | |||
@@ -36,6 +36,7 @@ | |||
36 | 36 | ||
37 | #include <linux/ipmi_msgdefs.h> | 37 | #include <linux/ipmi_msgdefs.h> |
38 | #include <linux/compiler.h> | 38 | #include <linux/compiler.h> |
39 | #include <linux/device.h> | ||
39 | 40 | ||
40 | /* | 41 | /* |
41 | * This file describes an interface to an IPMI driver. You have to | 42 | * This file describes an interface to an IPMI driver. You have to |
@@ -397,7 +398,7 @@ struct ipmi_smi_watcher | |||
397 | the watcher list. So you can add and remove users from the | 398 | the watcher list. So you can add and remove users from the |
398 | IPMI interface, send messages, etc., but you cannot add | 399 | IPMI interface, send messages, etc., but you cannot add |
399 | or remove SMI watchers or SMI interfaces. */ | 400 | or remove SMI watchers or SMI interfaces. */ |
400 | void (*new_smi)(int if_num); | 401 | void (*new_smi)(int if_num, struct device *dev); |
401 | void (*smi_gone)(int if_num); | 402 | void (*smi_gone)(int if_num); |
402 | }; | 403 | }; |
403 | 404 | ||
diff --git a/include/linux/ipmi_msgdefs.h b/include/linux/ipmi_msgdefs.h index 03bc64dc2ec1..22f5e2afda4f 100644 --- a/include/linux/ipmi_msgdefs.h +++ b/include/linux/ipmi_msgdefs.h | |||
@@ -47,6 +47,7 @@ | |||
47 | #define IPMI_NETFN_APP_RESPONSE 0x07 | 47 | #define IPMI_NETFN_APP_RESPONSE 0x07 |
48 | #define IPMI_GET_DEVICE_ID_CMD 0x01 | 48 | #define IPMI_GET_DEVICE_ID_CMD 0x01 |
49 | #define IPMI_CLEAR_MSG_FLAGS_CMD 0x30 | 49 | #define IPMI_CLEAR_MSG_FLAGS_CMD 0x30 |
50 | #define IPMI_GET_DEVICE_GUID_CMD 0x08 | ||
50 | #define IPMI_GET_MSG_FLAGS_CMD 0x31 | 51 | #define IPMI_GET_MSG_FLAGS_CMD 0x31 |
51 | #define IPMI_SEND_MSG_CMD 0x34 | 52 | #define IPMI_SEND_MSG_CMD 0x34 |
52 | #define IPMI_GET_MSG_CMD 0x33 | 53 | #define IPMI_GET_MSG_CMD 0x33 |
diff --git a/include/linux/ipmi_smi.h b/include/linux/ipmi_smi.h index e36ee157ad67..53571288a9fc 100644 --- a/include/linux/ipmi_smi.h +++ b/include/linux/ipmi_smi.h | |||
@@ -37,6 +37,9 @@ | |||
37 | #include <linux/ipmi_msgdefs.h> | 37 | #include <linux/ipmi_msgdefs.h> |
38 | #include <linux/proc_fs.h> | 38 | #include <linux/proc_fs.h> |
39 | #include <linux/module.h> | 39 | #include <linux/module.h> |
40 | #include <linux/device.h> | ||
41 | #include <linux/platform_device.h> | ||
42 | #include <linux/ipmi_smi.h> | ||
40 | 43 | ||
41 | /* This files describes the interface for IPMI system management interface | 44 | /* This files describes the interface for IPMI system management interface |
42 | drivers to bind into the IPMI message handler. */ | 45 | drivers to bind into the IPMI message handler. */ |
@@ -113,12 +116,52 @@ struct ipmi_smi_handlers | |||
113 | void (*dec_usecount)(void *send_info); | 116 | void (*dec_usecount)(void *send_info); |
114 | }; | 117 | }; |
115 | 118 | ||
119 | struct ipmi_device_id { | ||
120 | unsigned char device_id; | ||
121 | unsigned char device_revision; | ||
122 | unsigned char firmware_revision_1; | ||
123 | unsigned char firmware_revision_2; | ||
124 | unsigned char ipmi_version; | ||
125 | unsigned char additional_device_support; | ||
126 | unsigned int manufacturer_id; | ||
127 | unsigned int product_id; | ||
128 | unsigned char aux_firmware_revision[4]; | ||
129 | unsigned int aux_firmware_revision_set : 1; | ||
130 | }; | ||
131 | |||
132 | #define ipmi_version_major(v) ((v)->ipmi_version & 0xf) | ||
133 | #define ipmi_version_minor(v) ((v)->ipmi_version >> 4) | ||
134 | |||
135 | /* Take a pointer to a raw data buffer and a length and extract device | ||
136 | id information from it. The first byte of data must point to the | ||
137 | byte from the get device id response after the completion code. | ||
138 | The caller is responsible for making sure the length is at least | ||
139 | 11 and the command completed without error. */ | ||
140 | static inline void ipmi_demangle_device_id(unsigned char *data, | ||
141 | unsigned int data_len, | ||
142 | struct ipmi_device_id *id) | ||
143 | { | ||
144 | id->device_id = data[0]; | ||
145 | id->device_revision = data[1]; | ||
146 | id->firmware_revision_1 = data[2]; | ||
147 | id->firmware_revision_2 = data[3]; | ||
148 | id->ipmi_version = data[4]; | ||
149 | id->additional_device_support = data[5]; | ||
150 | id->manufacturer_id = data[6] | (data[7] << 8) | (data[8] << 16); | ||
151 | id->product_id = data[9] | (data[10] << 8); | ||
152 | if (data_len >= 15) { | ||
153 | memcpy(id->aux_firmware_revision, data+11, 4); | ||
154 | id->aux_firmware_revision_set = 1; | ||
155 | } else | ||
156 | id->aux_firmware_revision_set = 0; | ||
157 | } | ||
158 | |||
116 | /* Add a low-level interface to the IPMI driver. Note that if the | 159 | /* Add a low-level interface to the IPMI driver. Note that if the |
117 | interface doesn't know its slave address, it should pass in zero. */ | 160 | interface doesn't know its slave address, it should pass in zero. */ |
118 | int ipmi_register_smi(struct ipmi_smi_handlers *handlers, | 161 | int ipmi_register_smi(struct ipmi_smi_handlers *handlers, |
119 | void *send_info, | 162 | void *send_info, |
120 | unsigned char version_major, | 163 | struct ipmi_device_id *device_id, |
121 | unsigned char version_minor, | 164 | struct device *dev, |
122 | unsigned char slave_addr, | 165 | unsigned char slave_addr, |
123 | ipmi_smi_t *intf); | 166 | ipmi_smi_t *intf); |
124 | 167 | ||
diff --git a/include/linux/jbd.h b/include/linux/jbd.h index 4fc7dffd66ef..6a425e370cb3 100644 --- a/include/linux/jbd.h +++ b/include/linux/jbd.h | |||
@@ -895,7 +895,7 @@ extern int journal_dirty_metadata (handle_t *, struct buffer_head *); | |||
895 | extern void journal_release_buffer (handle_t *, struct buffer_head *); | 895 | extern void journal_release_buffer (handle_t *, struct buffer_head *); |
896 | extern int journal_forget (handle_t *, struct buffer_head *); | 896 | extern int journal_forget (handle_t *, struct buffer_head *); |
897 | extern void journal_sync_buffer (struct buffer_head *); | 897 | extern void journal_sync_buffer (struct buffer_head *); |
898 | extern int journal_invalidatepage(journal_t *, | 898 | extern void journal_invalidatepage(journal_t *, |
899 | struct page *, unsigned long); | 899 | struct page *, unsigned long); |
900 | extern int journal_try_to_free_buffers(journal_t *, struct page *, gfp_t); | 900 | extern int journal_try_to_free_buffers(journal_t *, struct page *, gfp_t); |
901 | extern int journal_stop(handle_t *); | 901 | extern int journal_stop(handle_t *); |
diff --git a/include/linux/ktime.h b/include/linux/ktime.h index f3dec45ef874..62bc57580707 100644 --- a/include/linux/ktime.h +++ b/include/linux/ktime.h | |||
@@ -64,9 +64,6 @@ typedef union { | |||
64 | 64 | ||
65 | #if (BITS_PER_LONG == 64) || defined(CONFIG_KTIME_SCALAR) | 65 | #if (BITS_PER_LONG == 64) || defined(CONFIG_KTIME_SCALAR) |
66 | 66 | ||
67 | /* Define a ktime_t variable and initialize it to zero: */ | ||
68 | #define DEFINE_KTIME(kt) ktime_t kt = { .tv64 = 0 } | ||
69 | |||
70 | /** | 67 | /** |
71 | * ktime_set - Set a ktime_t variable from a seconds/nanoseconds value | 68 | * ktime_set - Set a ktime_t variable from a seconds/nanoseconds value |
72 | * | 69 | * |
@@ -113,9 +110,6 @@ static inline ktime_t timeval_to_ktime(struct timeval tv) | |||
113 | /* Map the ktime_t to timeval conversion to ns_to_timeval function */ | 110 | /* Map the ktime_t to timeval conversion to ns_to_timeval function */ |
114 | #define ktime_to_timeval(kt) ns_to_timeval((kt).tv64) | 111 | #define ktime_to_timeval(kt) ns_to_timeval((kt).tv64) |
115 | 112 | ||
116 | /* Map the ktime_t to clock_t conversion to the inline in jiffies.h: */ | ||
117 | #define ktime_to_clock_t(kt) nsec_to_clock_t((kt).tv64) | ||
118 | |||
119 | /* Convert ktime_t to nanoseconds - NOP in the scalar storage format: */ | 113 | /* Convert ktime_t to nanoseconds - NOP in the scalar storage format: */ |
120 | #define ktime_to_ns(kt) ((kt).tv64) | 114 | #define ktime_to_ns(kt) ((kt).tv64) |
121 | 115 | ||
@@ -136,9 +130,6 @@ static inline ktime_t timeval_to_ktime(struct timeval tv) | |||
136 | * tv.sec < 0 and 0 >= tv.nsec < NSEC_PER_SEC | 130 | * tv.sec < 0 and 0 >= tv.nsec < NSEC_PER_SEC |
137 | */ | 131 | */ |
138 | 132 | ||
139 | /* Define a ktime_t variable and initialize it to zero: */ | ||
140 | #define DEFINE_KTIME(kt) ktime_t kt = { .tv64 = 0 } | ||
141 | |||
142 | /* Set a ktime_t variable to a value in sec/nsec representation: */ | 133 | /* Set a ktime_t variable to a value in sec/nsec representation: */ |
143 | static inline ktime_t ktime_set(const long secs, const unsigned long nsecs) | 134 | static inline ktime_t ktime_set(const long secs, const unsigned long nsecs) |
144 | { | 135 | { |
@@ -255,17 +246,6 @@ static inline struct timeval ktime_to_timeval(const ktime_t kt) | |||
255 | } | 246 | } |
256 | 247 | ||
257 | /** | 248 | /** |
258 | * ktime_to_clock_t - convert a ktime_t variable to clock_t format | ||
259 | * @kt: the ktime_t variable to convert | ||
260 | * | ||
261 | * Returns a clock_t variable with the converted value | ||
262 | */ | ||
263 | static inline clock_t ktime_to_clock_t(const ktime_t kt) | ||
264 | { | ||
265 | return nsec_to_clock_t( (u64) kt.tv.sec * NSEC_PER_SEC + kt.tv.nsec); | ||
266 | } | ||
267 | |||
268 | /** | ||
269 | * ktime_to_ns - convert a ktime_t variable to scalar nanoseconds | 249 | * ktime_to_ns - convert a ktime_t variable to scalar nanoseconds |
270 | * @kt: the ktime_t variable to convert | 250 | * @kt: the ktime_t variable to convert |
271 | * | 251 | * |
diff --git a/include/linux/mempool.h b/include/linux/mempool.h index f2427d7394b0..9be484d11283 100644 --- a/include/linux/mempool.h +++ b/include/linux/mempool.h | |||
@@ -6,6 +6,8 @@ | |||
6 | 6 | ||
7 | #include <linux/wait.h> | 7 | #include <linux/wait.h> |
8 | 8 | ||
9 | struct kmem_cache; | ||
10 | |||
9 | typedef void * (mempool_alloc_t)(gfp_t gfp_mask, void *pool_data); | 11 | typedef void * (mempool_alloc_t)(gfp_t gfp_mask, void *pool_data); |
10 | typedef void (mempool_free_t)(void *element, void *pool_data); | 12 | typedef void (mempool_free_t)(void *element, void *pool_data); |
11 | 13 | ||
@@ -37,5 +39,41 @@ extern void mempool_free(void *element, mempool_t *pool); | |||
37 | */ | 39 | */ |
38 | void *mempool_alloc_slab(gfp_t gfp_mask, void *pool_data); | 40 | void *mempool_alloc_slab(gfp_t gfp_mask, void *pool_data); |
39 | void mempool_free_slab(void *element, void *pool_data); | 41 | void mempool_free_slab(void *element, void *pool_data); |
42 | static inline mempool_t * | ||
43 | mempool_create_slab_pool(int min_nr, struct kmem_cache *kc) | ||
44 | { | ||
45 | return mempool_create(min_nr, mempool_alloc_slab, mempool_free_slab, | ||
46 | (void *) kc); | ||
47 | } | ||
48 | |||
49 | /* | ||
50 | * 2 mempool_alloc_t's and a mempool_free_t to kmalloc/kzalloc and kfree | ||
51 | * the amount of memory specified by pool_data | ||
52 | */ | ||
53 | void *mempool_kmalloc(gfp_t gfp_mask, void *pool_data); | ||
54 | void *mempool_kzalloc(gfp_t gfp_mask, void *pool_data); | ||
55 | void mempool_kfree(void *element, void *pool_data); | ||
56 | static inline mempool_t *mempool_create_kmalloc_pool(int min_nr, size_t size) | ||
57 | { | ||
58 | return mempool_create(min_nr, mempool_kmalloc, mempool_kfree, | ||
59 | (void *) size); | ||
60 | } | ||
61 | static inline mempool_t *mempool_create_kzalloc_pool(int min_nr, size_t size) | ||
62 | { | ||
63 | return mempool_create(min_nr, mempool_kzalloc, mempool_kfree, | ||
64 | (void *) size); | ||
65 | } | ||
66 | |||
67 | /* | ||
68 | * A mempool_alloc_t and mempool_free_t for a simple page allocator that | ||
69 | * allocates pages of the order specified by pool_data | ||
70 | */ | ||
71 | void *mempool_alloc_pages(gfp_t gfp_mask, void *pool_data); | ||
72 | void mempool_free_pages(void *element, void *pool_data); | ||
73 | static inline mempool_t *mempool_create_page_pool(int min_nr, int order) | ||
74 | { | ||
75 | return mempool_create(min_nr, mempool_alloc_pages, mempool_free_pages, | ||
76 | (void *)(long)order); | ||
77 | } | ||
40 | 78 | ||
41 | #endif /* _LINUX_MEMPOOL_H */ | 79 | #endif /* _LINUX_MEMPOOL_H */ |
diff --git a/include/linux/proc_fs.h b/include/linux/proc_fs.h index aa6322d45198..cb224cf653b1 100644 --- a/include/linux/proc_fs.h +++ b/include/linux/proc_fs.h | |||
@@ -4,6 +4,7 @@ | |||
4 | #include <linux/config.h> | 4 | #include <linux/config.h> |
5 | #include <linux/slab.h> | 5 | #include <linux/slab.h> |
6 | #include <linux/fs.h> | 6 | #include <linux/fs.h> |
7 | #include <linux/spinlock.h> | ||
7 | #include <asm/atomic.h> | 8 | #include <asm/atomic.h> |
8 | 9 | ||
9 | /* | 10 | /* |
@@ -55,7 +56,7 @@ struct proc_dir_entry { | |||
55 | nlink_t nlink; | 56 | nlink_t nlink; |
56 | uid_t uid; | 57 | uid_t uid; |
57 | gid_t gid; | 58 | gid_t gid; |
58 | unsigned long size; | 59 | loff_t size; |
59 | struct inode_operations * proc_iops; | 60 | struct inode_operations * proc_iops; |
60 | struct file_operations * proc_fops; | 61 | struct file_operations * proc_fops; |
61 | get_info_t *get_info; | 62 | get_info_t *get_info; |
@@ -92,6 +93,8 @@ extern struct proc_dir_entry *proc_bus; | |||
92 | extern struct proc_dir_entry *proc_root_driver; | 93 | extern struct proc_dir_entry *proc_root_driver; |
93 | extern struct proc_dir_entry *proc_root_kcore; | 94 | extern struct proc_dir_entry *proc_root_kcore; |
94 | 95 | ||
96 | extern spinlock_t proc_subdir_lock; | ||
97 | |||
95 | extern void proc_root_init(void); | 98 | extern void proc_root_init(void); |
96 | extern void proc_misc_init(void); | 99 | extern void proc_misc_init(void); |
97 | 100 | ||
diff --git a/include/linux/sched.h b/include/linux/sched.h index e0054c1b9a09..036d14d2bf90 100644 --- a/include/linux/sched.h +++ b/include/linux/sched.h | |||
@@ -402,6 +402,7 @@ struct signal_struct { | |||
402 | 402 | ||
403 | /* ITIMER_REAL timer for the process */ | 403 | /* ITIMER_REAL timer for the process */ |
404 | struct hrtimer real_timer; | 404 | struct hrtimer real_timer; |
405 | struct task_struct *tsk; | ||
405 | ktime_t it_real_incr; | 406 | ktime_t it_real_incr; |
406 | 407 | ||
407 | /* ITIMER_PROF and ITIMER_VIRTUAL timers for the process */ | 408 | /* ITIMER_PROF and ITIMER_VIRTUAL timers for the process */ |
diff --git a/include/linux/serio.h b/include/linux/serio.h index aa4d6493a034..690aabca8ed0 100644 --- a/include/linux/serio.h +++ b/include/linux/serio.h | |||
@@ -119,7 +119,7 @@ static inline void serio_cleanup(struct serio *serio) | |||
119 | } | 119 | } |
120 | 120 | ||
121 | /* | 121 | /* |
122 | * Use the following fucntions to manipulate serio's per-port | 122 | * Use the following functions to manipulate serio's per-port |
123 | * driver-specific data. | 123 | * driver-specific data. |
124 | */ | 124 | */ |
125 | static inline void *serio_get_drvdata(struct serio *serio) | 125 | static inline void *serio_get_drvdata(struct serio *serio) |
@@ -133,7 +133,7 @@ static inline void serio_set_drvdata(struct serio *serio, void *data) | |||
133 | } | 133 | } |
134 | 134 | ||
135 | /* | 135 | /* |
136 | * Use the following fucntions to protect critical sections in | 136 | * Use the following functions to protect critical sections in |
137 | * driver code from port's interrupt handler | 137 | * driver code from port's interrupt handler |
138 | */ | 138 | */ |
139 | static inline void serio_pause_rx(struct serio *serio) | 139 | static inline void serio_pause_rx(struct serio *serio) |
@@ -147,7 +147,7 @@ static inline void serio_continue_rx(struct serio *serio) | |||
147 | } | 147 | } |
148 | 148 | ||
149 | /* | 149 | /* |
150 | * Use the following fucntions to pin serio's driver in process context | 150 | * Use the following functions to pin serio's driver in process context |
151 | */ | 151 | */ |
152 | static inline int serio_pin_driver(struct serio *serio) | 152 | static inline int serio_pin_driver(struct serio *serio) |
153 | { | 153 | { |
diff --git a/include/linux/smp.h b/include/linux/smp.h index d699a16b0cb2..e2fa3ab4afc5 100644 --- a/include/linux/smp.h +++ b/include/linux/smp.h | |||
@@ -82,7 +82,11 @@ void smp_prepare_boot_cpu(void); | |||
82 | */ | 82 | */ |
83 | #define raw_smp_processor_id() 0 | 83 | #define raw_smp_processor_id() 0 |
84 | #define hard_smp_processor_id() 0 | 84 | #define hard_smp_processor_id() 0 |
85 | #define smp_call_function(func,info,retry,wait) ({ 0; }) | 85 | static inline int up_smp_call_function(void) |
86 | { | ||
87 | return 0; | ||
88 | } | ||
89 | #define smp_call_function(func,info,retry,wait) (up_smp_call_function()) | ||
86 | #define on_each_cpu(func,info,retry,wait) \ | 90 | #define on_each_cpu(func,info,retry,wait) \ |
87 | ({ \ | 91 | ({ \ |
88 | local_irq_disable(); \ | 92 | local_irq_disable(); \ |
diff --git a/include/linux/stat.h b/include/linux/stat.h index 8ff2a122dfef..8669291352db 100644 --- a/include/linux/stat.h +++ b/include/linux/stat.h | |||
@@ -69,7 +69,7 @@ struct kstat { | |||
69 | struct timespec mtime; | 69 | struct timespec mtime; |
70 | struct timespec ctime; | 70 | struct timespec ctime; |
71 | unsigned long blksize; | 71 | unsigned long blksize; |
72 | unsigned long blocks; | 72 | unsigned long long blocks; |
73 | }; | 73 | }; |
74 | 74 | ||
75 | #endif | 75 | #endif |
diff --git a/include/linux/statfs.h b/include/linux/statfs.h index ad83a2bdb821..b34cc829f98d 100644 --- a/include/linux/statfs.h +++ b/include/linux/statfs.h | |||
@@ -8,11 +8,11 @@ | |||
8 | struct kstatfs { | 8 | struct kstatfs { |
9 | long f_type; | 9 | long f_type; |
10 | long f_bsize; | 10 | long f_bsize; |
11 | sector_t f_blocks; | 11 | u64 f_blocks; |
12 | sector_t f_bfree; | 12 | u64 f_bfree; |
13 | sector_t f_bavail; | 13 | u64 f_bavail; |
14 | sector_t f_files; | 14 | u64 f_files; |
15 | sector_t f_ffree; | 15 | u64 f_ffree; |
16 | __kernel_fsid_t f_fsid; | 16 | __kernel_fsid_t f_fsid; |
17 | long f_namelen; | 17 | long f_namelen; |
18 | long f_frsize; | 18 | long f_frsize; |
diff --git a/include/linux/time.h b/include/linux/time.h index bf0e785e2e03..0cd696cee998 100644 --- a/include/linux/time.h +++ b/include/linux/time.h | |||
@@ -73,12 +73,6 @@ extern void set_normalized_timespec(struct timespec *ts, time_t sec, long nsec); | |||
73 | #define timespec_valid(ts) \ | 73 | #define timespec_valid(ts) \ |
74 | (((ts)->tv_sec >= 0) && (((unsigned long) (ts)->tv_nsec) < NSEC_PER_SEC)) | 74 | (((ts)->tv_sec >= 0) && (((unsigned long) (ts)->tv_nsec) < NSEC_PER_SEC)) |
75 | 75 | ||
76 | /* | ||
77 | * 64-bit nanosec type. Large enough to span 292+ years in nanosecond | ||
78 | * resolution. Ought to be enough for a while. | ||
79 | */ | ||
80 | typedef s64 nsec_t; | ||
81 | |||
82 | extern struct timespec xtime; | 76 | extern struct timespec xtime; |
83 | extern struct timespec wall_to_monotonic; | 77 | extern struct timespec wall_to_monotonic; |
84 | extern seqlock_t xtime_lock; | 78 | extern seqlock_t xtime_lock; |
@@ -114,9 +108,9 @@ extern struct timespec timespec_trunc(struct timespec t, unsigned gran); | |||
114 | * Returns the scalar nanosecond representation of the timespec | 108 | * Returns the scalar nanosecond representation of the timespec |
115 | * parameter. | 109 | * parameter. |
116 | */ | 110 | */ |
117 | static inline nsec_t timespec_to_ns(const struct timespec *ts) | 111 | static inline s64 timespec_to_ns(const struct timespec *ts) |
118 | { | 112 | { |
119 | return ((nsec_t) ts->tv_sec * NSEC_PER_SEC) + ts->tv_nsec; | 113 | return ((s64) ts->tv_sec * NSEC_PER_SEC) + ts->tv_nsec; |
120 | } | 114 | } |
121 | 115 | ||
122 | /** | 116 | /** |
@@ -126,9 +120,9 @@ static inline nsec_t timespec_to_ns(const struct timespec *ts) | |||
126 | * Returns the scalar nanosecond representation of the timeval | 120 | * Returns the scalar nanosecond representation of the timeval |
127 | * parameter. | 121 | * parameter. |
128 | */ | 122 | */ |
129 | static inline nsec_t timeval_to_ns(const struct timeval *tv) | 123 | static inline s64 timeval_to_ns(const struct timeval *tv) |
130 | { | 124 | { |
131 | return ((nsec_t) tv->tv_sec * NSEC_PER_SEC) + | 125 | return ((s64) tv->tv_sec * NSEC_PER_SEC) + |
132 | tv->tv_usec * NSEC_PER_USEC; | 126 | tv->tv_usec * NSEC_PER_USEC; |
133 | } | 127 | } |
134 | 128 | ||
@@ -138,7 +132,7 @@ static inline nsec_t timeval_to_ns(const struct timeval *tv) | |||
138 | * | 132 | * |
139 | * Returns the timespec representation of the nsec parameter. | 133 | * Returns the timespec representation of the nsec parameter. |
140 | */ | 134 | */ |
141 | extern struct timespec ns_to_timespec(const nsec_t nsec); | 135 | extern struct timespec ns_to_timespec(const s64 nsec); |
142 | 136 | ||
143 | /** | 137 | /** |
144 | * ns_to_timeval - Convert nanoseconds to timeval | 138 | * ns_to_timeval - Convert nanoseconds to timeval |
@@ -146,7 +140,7 @@ extern struct timespec ns_to_timespec(const nsec_t nsec); | |||
146 | * | 140 | * |
147 | * Returns the timeval representation of the nsec parameter. | 141 | * Returns the timeval representation of the nsec parameter. |
148 | */ | 142 | */ |
149 | extern struct timeval ns_to_timeval(const nsec_t nsec); | 143 | extern struct timeval ns_to_timeval(const s64 nsec); |
150 | 144 | ||
151 | #endif /* __KERNEL__ */ | 145 | #endif /* __KERNEL__ */ |
152 | 146 | ||
diff --git a/include/linux/timer.h b/include/linux/timer.h index ee5a09e806e8..b5caabca553c 100644 --- a/include/linux/timer.h +++ b/include/linux/timer.h | |||
@@ -96,6 +96,7 @@ static inline void add_timer(struct timer_list *timer) | |||
96 | 96 | ||
97 | extern void init_timers(void); | 97 | extern void init_timers(void); |
98 | extern void run_local_timers(void); | 98 | extern void run_local_timers(void); |
99 | extern int it_real_fn(void *); | 99 | struct hrtimer; |
100 | extern int it_real_fn(struct hrtimer *); | ||
100 | 101 | ||
101 | #endif | 102 | #endif |
diff --git a/include/linux/timex.h b/include/linux/timex.h index 82dc9ae79d37..03914b7e41b1 100644 --- a/include/linux/timex.h +++ b/include/linux/timex.h | |||
@@ -307,6 +307,8 @@ time_interpolator_reset(void) | |||
307 | /* Returns how long ticks are at present, in ns / 2^(SHIFT_SCALE-10). */ | 307 | /* Returns how long ticks are at present, in ns / 2^(SHIFT_SCALE-10). */ |
308 | extern u64 current_tick_length(void); | 308 | extern u64 current_tick_length(void); |
309 | 309 | ||
310 | extern int do_adjtimex(struct timex *); | ||
311 | |||
310 | #endif /* KERNEL */ | 312 | #endif /* KERNEL */ |
311 | 313 | ||
312 | #endif /* LINUX_TIMEX_H */ | 314 | #endif /* LINUX_TIMEX_H */ |
diff --git a/include/linux/types.h b/include/linux/types.h index 54ae2d59e71b..1046c7ad86d9 100644 --- a/include/linux/types.h +++ b/include/linux/types.h | |||
@@ -137,6 +137,10 @@ typedef __s64 int64_t; | |||
137 | typedef unsigned long sector_t; | 137 | typedef unsigned long sector_t; |
138 | #endif | 138 | #endif |
139 | 139 | ||
140 | #ifndef HAVE_BLKCNT_T | ||
141 | typedef unsigned long blkcnt_t; | ||
142 | #endif | ||
143 | |||
140 | /* | 144 | /* |
141 | * The type of an index into the pagecache. Use a #define so asm/types.h | 145 | * The type of an index into the pagecache. Use a #define so asm/types.h |
142 | * can override it. | 146 | * can override it. |
diff --git a/init/initramfs.c b/init/initramfs.c index 77b934cccefe..679d870d991b 100644 --- a/init/initramfs.c +++ b/init/initramfs.c | |||
@@ -519,7 +519,7 @@ void __init populate_rootfs(void) | |||
519 | return; | 519 | return; |
520 | } | 520 | } |
521 | printk("it isn't (%s); looks like an initrd\n", err); | 521 | printk("it isn't (%s); looks like an initrd\n", err); |
522 | fd = sys_open("/initrd.image", O_WRONLY|O_CREAT, 700); | 522 | fd = sys_open("/initrd.image", O_WRONLY|O_CREAT, 0700); |
523 | if (fd >= 0) { | 523 | if (fd >= 0) { |
524 | sys_write(fd, (char *)initrd_start, | 524 | sys_write(fd, (char *)initrd_start, |
525 | initrd_end - initrd_start); | 525 | initrd_end - initrd_start); |
diff --git a/init/main.c b/init/main.c index 006dcd547dc2..64466ea1984c 100644 --- a/init/main.c +++ b/init/main.c | |||
@@ -645,24 +645,6 @@ static void run_init_process(char *init_filename) | |||
645 | execve(init_filename, argv_init, envp_init); | 645 | execve(init_filename, argv_init, envp_init); |
646 | } | 646 | } |
647 | 647 | ||
648 | static inline void fixup_cpu_present_map(void) | ||
649 | { | ||
650 | #ifdef CONFIG_SMP | ||
651 | int i; | ||
652 | |||
653 | /* | ||
654 | * If arch is not hotplug ready and did not populate | ||
655 | * cpu_present_map, just make cpu_present_map same as cpu_possible_map | ||
656 | * for other cpu bringup code to function as normal. e.g smp_init() etc. | ||
657 | */ | ||
658 | if (cpus_empty(cpu_present_map)) { | ||
659 | for_each_cpu(i) { | ||
660 | cpu_set(i, cpu_present_map); | ||
661 | } | ||
662 | } | ||
663 | #endif | ||
664 | } | ||
665 | |||
666 | static int init(void * unused) | 648 | static int init(void * unused) |
667 | { | 649 | { |
668 | lock_kernel(); | 650 | lock_kernel(); |
@@ -684,7 +666,6 @@ static int init(void * unused) | |||
684 | 666 | ||
685 | do_pre_smp_initcalls(); | 667 | do_pre_smp_initcalls(); |
686 | 668 | ||
687 | fixup_cpu_present_map(); | ||
688 | smp_init(); | 669 | smp_init(); |
689 | sched_init_smp(); | 670 | sched_init_smp(); |
690 | 671 | ||
diff --git a/ipc/compat.c b/ipc/compat.c index 1fe95f6659dd..a544dfbb082a 100644 --- a/ipc/compat.c +++ b/ipc/compat.c | |||
@@ -30,7 +30,7 @@ | |||
30 | #include <linux/slab.h> | 30 | #include <linux/slab.h> |
31 | #include <linux/syscalls.h> | 31 | #include <linux/syscalls.h> |
32 | 32 | ||
33 | #include <asm/semaphore.h> | 33 | #include <linux/mutex.h> |
34 | #include <asm/uaccess.h> | 34 | #include <asm/uaccess.h> |
35 | 35 | ||
36 | #include "util.h" | 36 | #include "util.h" |
diff --git a/ipc/mqueue.c b/ipc/mqueue.c index 85c52fd26bff..a3bb0c8201c7 100644 --- a/ipc/mqueue.c +++ b/ipc/mqueue.c | |||
@@ -25,6 +25,8 @@ | |||
25 | #include <linux/netlink.h> | 25 | #include <linux/netlink.h> |
26 | #include <linux/syscalls.h> | 26 | #include <linux/syscalls.h> |
27 | #include <linux/signal.h> | 27 | #include <linux/signal.h> |
28 | #include <linux/mutex.h> | ||
29 | |||
28 | #include <net/sock.h> | 30 | #include <net/sock.h> |
29 | #include "util.h" | 31 | #include "util.h" |
30 | 32 | ||
@@ -760,7 +762,7 @@ out_unlock: | |||
760 | * The receiver accepts the message and returns without grabbing the queue | 762 | * The receiver accepts the message and returns without grabbing the queue |
761 | * spinlock. Therefore an intermediate STATE_PENDING state and memory barriers | 763 | * spinlock. Therefore an intermediate STATE_PENDING state and memory barriers |
762 | * are necessary. The same algorithm is used for sysv semaphores, see | 764 | * are necessary. The same algorithm is used for sysv semaphores, see |
763 | * ipc/sem.c fore more details. | 765 | * ipc/mutex.c fore more details. |
764 | * | 766 | * |
765 | * The same algorithm is used for senders. | 767 | * The same algorithm is used for senders. |
766 | */ | 768 | */ |
@@ -28,6 +28,8 @@ | |||
28 | #include <linux/syscalls.h> | 28 | #include <linux/syscalls.h> |
29 | #include <linux/audit.h> | 29 | #include <linux/audit.h> |
30 | #include <linux/seq_file.h> | 30 | #include <linux/seq_file.h> |
31 | #include <linux/mutex.h> | ||
32 | |||
31 | #include <asm/current.h> | 33 | #include <asm/current.h> |
32 | #include <asm/uaccess.h> | 34 | #include <asm/uaccess.h> |
33 | #include "util.h" | 35 | #include "util.h" |
@@ -179,8 +181,8 @@ static void expunge_all(struct msg_queue* msq, int res) | |||
179 | * removes the message queue from message queue ID | 181 | * removes the message queue from message queue ID |
180 | * array, and cleans up all the messages associated with this queue. | 182 | * array, and cleans up all the messages associated with this queue. |
181 | * | 183 | * |
182 | * msg_ids.sem and the spinlock for this message queue is hold | 184 | * msg_ids.mutex and the spinlock for this message queue is hold |
183 | * before freeque() is called. msg_ids.sem remains locked on exit. | 185 | * before freeque() is called. msg_ids.mutex remains locked on exit. |
184 | */ | 186 | */ |
185 | static void freeque (struct msg_queue *msq, int id) | 187 | static void freeque (struct msg_queue *msq, int id) |
186 | { | 188 | { |
@@ -208,7 +210,7 @@ asmlinkage long sys_msgget (key_t key, int msgflg) | |||
208 | int id, ret = -EPERM; | 210 | int id, ret = -EPERM; |
209 | struct msg_queue *msq; | 211 | struct msg_queue *msq; |
210 | 212 | ||
211 | down(&msg_ids.sem); | 213 | mutex_lock(&msg_ids.mutex); |
212 | if (key == IPC_PRIVATE) | 214 | if (key == IPC_PRIVATE) |
213 | ret = newque(key, msgflg); | 215 | ret = newque(key, msgflg); |
214 | else if ((id = ipc_findkey(&msg_ids, key)) == -1) { /* key not used */ | 216 | else if ((id = ipc_findkey(&msg_ids, key)) == -1) { /* key not used */ |
@@ -231,7 +233,7 @@ asmlinkage long sys_msgget (key_t key, int msgflg) | |||
231 | } | 233 | } |
232 | msg_unlock(msq); | 234 | msg_unlock(msq); |
233 | } | 235 | } |
234 | up(&msg_ids.sem); | 236 | mutex_unlock(&msg_ids.mutex); |
235 | return ret; | 237 | return ret; |
236 | } | 238 | } |
237 | 239 | ||
@@ -361,7 +363,7 @@ asmlinkage long sys_msgctl (int msqid, int cmd, struct msqid_ds __user *buf) | |||
361 | msginfo.msgmnb = msg_ctlmnb; | 363 | msginfo.msgmnb = msg_ctlmnb; |
362 | msginfo.msgssz = MSGSSZ; | 364 | msginfo.msgssz = MSGSSZ; |
363 | msginfo.msgseg = MSGSEG; | 365 | msginfo.msgseg = MSGSEG; |
364 | down(&msg_ids.sem); | 366 | mutex_lock(&msg_ids.mutex); |
365 | if (cmd == MSG_INFO) { | 367 | if (cmd == MSG_INFO) { |
366 | msginfo.msgpool = msg_ids.in_use; | 368 | msginfo.msgpool = msg_ids.in_use; |
367 | msginfo.msgmap = atomic_read(&msg_hdrs); | 369 | msginfo.msgmap = atomic_read(&msg_hdrs); |
@@ -372,7 +374,7 @@ asmlinkage long sys_msgctl (int msqid, int cmd, struct msqid_ds __user *buf) | |||
372 | msginfo.msgtql = MSGTQL; | 374 | msginfo.msgtql = MSGTQL; |
373 | } | 375 | } |
374 | max_id = msg_ids.max_id; | 376 | max_id = msg_ids.max_id; |
375 | up(&msg_ids.sem); | 377 | mutex_unlock(&msg_ids.mutex); |
376 | if (copy_to_user (buf, &msginfo, sizeof(struct msginfo))) | 378 | if (copy_to_user (buf, &msginfo, sizeof(struct msginfo))) |
377 | return -EFAULT; | 379 | return -EFAULT; |
378 | return (max_id < 0) ? 0: max_id; | 380 | return (max_id < 0) ? 0: max_id; |
@@ -435,7 +437,7 @@ asmlinkage long sys_msgctl (int msqid, int cmd, struct msqid_ds __user *buf) | |||
435 | return -EINVAL; | 437 | return -EINVAL; |
436 | } | 438 | } |
437 | 439 | ||
438 | down(&msg_ids.sem); | 440 | mutex_lock(&msg_ids.mutex); |
439 | msq = msg_lock(msqid); | 441 | msq = msg_lock(msqid); |
440 | err=-EINVAL; | 442 | err=-EINVAL; |
441 | if (msq == NULL) | 443 | if (msq == NULL) |
@@ -489,7 +491,7 @@ asmlinkage long sys_msgctl (int msqid, int cmd, struct msqid_ds __user *buf) | |||
489 | } | 491 | } |
490 | err = 0; | 492 | err = 0; |
491 | out_up: | 493 | out_up: |
492 | up(&msg_ids.sem); | 494 | mutex_unlock(&msg_ids.mutex); |
493 | return err; | 495 | return err; |
494 | out_unlock_up: | 496 | out_unlock_up: |
495 | msg_unlock(msq); | 497 | msg_unlock(msq); |
@@ -75,6 +75,8 @@ | |||
75 | #include <linux/audit.h> | 75 | #include <linux/audit.h> |
76 | #include <linux/capability.h> | 76 | #include <linux/capability.h> |
77 | #include <linux/seq_file.h> | 77 | #include <linux/seq_file.h> |
78 | #include <linux/mutex.h> | ||
79 | |||
78 | #include <asm/uaccess.h> | 80 | #include <asm/uaccess.h> |
79 | #include "util.h" | 81 | #include "util.h" |
80 | 82 | ||
@@ -139,7 +141,7 @@ void __init sem_init (void) | |||
139 | * * if it's IN_WAKEUP, then it must wait until the value changes | 141 | * * if it's IN_WAKEUP, then it must wait until the value changes |
140 | * * if it's not -EINTR, then the operation was completed by | 142 | * * if it's not -EINTR, then the operation was completed by |
141 | * update_queue. semtimedop can return queue.status without | 143 | * update_queue. semtimedop can return queue.status without |
142 | * performing any operation on the semaphore array. | 144 | * performing any operation on the sem array. |
143 | * * otherwise it must acquire the spinlock and check what's up. | 145 | * * otherwise it must acquire the spinlock and check what's up. |
144 | * | 146 | * |
145 | * The two-stage algorithm is necessary to protect against the following | 147 | * The two-stage algorithm is necessary to protect against the following |
@@ -214,7 +216,7 @@ asmlinkage long sys_semget (key_t key, int nsems, int semflg) | |||
214 | 216 | ||
215 | if (nsems < 0 || nsems > sc_semmsl) | 217 | if (nsems < 0 || nsems > sc_semmsl) |
216 | return -EINVAL; | 218 | return -EINVAL; |
217 | down(&sem_ids.sem); | 219 | mutex_lock(&sem_ids.mutex); |
218 | 220 | ||
219 | if (key == IPC_PRIVATE) { | 221 | if (key == IPC_PRIVATE) { |
220 | err = newary(key, nsems, semflg); | 222 | err = newary(key, nsems, semflg); |
@@ -241,7 +243,7 @@ asmlinkage long sys_semget (key_t key, int nsems, int semflg) | |||
241 | sem_unlock(sma); | 243 | sem_unlock(sma); |
242 | } | 244 | } |
243 | 245 | ||
244 | up(&sem_ids.sem); | 246 | mutex_unlock(&sem_ids.mutex); |
245 | return err; | 247 | return err; |
246 | } | 248 | } |
247 | 249 | ||
@@ -436,8 +438,8 @@ static int count_semzcnt (struct sem_array * sma, ushort semnum) | |||
436 | return semzcnt; | 438 | return semzcnt; |
437 | } | 439 | } |
438 | 440 | ||
439 | /* Free a semaphore set. freeary() is called with sem_ids.sem down and | 441 | /* Free a semaphore set. freeary() is called with sem_ids.mutex locked and |
440 | * the spinlock for this semaphore set hold. sem_ids.sem remains locked | 442 | * the spinlock for this semaphore set hold. sem_ids.mutex remains locked |
441 | * on exit. | 443 | * on exit. |
442 | */ | 444 | */ |
443 | static void freeary (struct sem_array *sma, int id) | 445 | static void freeary (struct sem_array *sma, int id) |
@@ -524,7 +526,7 @@ static int semctl_nolock(int semid, int semnum, int cmd, int version, union semu | |||
524 | seminfo.semmnu = SEMMNU; | 526 | seminfo.semmnu = SEMMNU; |
525 | seminfo.semmap = SEMMAP; | 527 | seminfo.semmap = SEMMAP; |
526 | seminfo.semume = SEMUME; | 528 | seminfo.semume = SEMUME; |
527 | down(&sem_ids.sem); | 529 | mutex_lock(&sem_ids.mutex); |
528 | if (cmd == SEM_INFO) { | 530 | if (cmd == SEM_INFO) { |
529 | seminfo.semusz = sem_ids.in_use; | 531 | seminfo.semusz = sem_ids.in_use; |
530 | seminfo.semaem = used_sems; | 532 | seminfo.semaem = used_sems; |
@@ -533,7 +535,7 @@ static int semctl_nolock(int semid, int semnum, int cmd, int version, union semu | |||
533 | seminfo.semaem = SEMAEM; | 535 | seminfo.semaem = SEMAEM; |
534 | } | 536 | } |
535 | max_id = sem_ids.max_id; | 537 | max_id = sem_ids.max_id; |
536 | up(&sem_ids.sem); | 538 | mutex_unlock(&sem_ids.mutex); |
537 | if (copy_to_user (arg.__buf, &seminfo, sizeof(struct seminfo))) | 539 | if (copy_to_user (arg.__buf, &seminfo, sizeof(struct seminfo))) |
538 | return -EFAULT; | 540 | return -EFAULT; |
539 | return (max_id < 0) ? 0: max_id; | 541 | return (max_id < 0) ? 0: max_id; |
@@ -884,9 +886,9 @@ asmlinkage long sys_semctl (int semid, int semnum, int cmd, union semun arg) | |||
884 | return err; | 886 | return err; |
885 | case IPC_RMID: | 887 | case IPC_RMID: |
886 | case IPC_SET: | 888 | case IPC_SET: |
887 | down(&sem_ids.sem); | 889 | mutex_lock(&sem_ids.mutex); |
888 | err = semctl_down(semid,semnum,cmd,version,arg); | 890 | err = semctl_down(semid,semnum,cmd,version,arg); |
889 | up(&sem_ids.sem); | 891 | mutex_unlock(&sem_ids.mutex); |
890 | return err; | 892 | return err; |
891 | default: | 893 | default: |
892 | return -EINVAL; | 894 | return -EINVAL; |
@@ -1297,9 +1299,9 @@ found: | |||
1297 | /* perform adjustments registered in u */ | 1299 | /* perform adjustments registered in u */ |
1298 | nsems = sma->sem_nsems; | 1300 | nsems = sma->sem_nsems; |
1299 | for (i = 0; i < nsems; i++) { | 1301 | for (i = 0; i < nsems; i++) { |
1300 | struct sem * sem = &sma->sem_base[i]; | 1302 | struct sem * semaphore = &sma->sem_base[i]; |
1301 | if (u->semadj[i]) { | 1303 | if (u->semadj[i]) { |
1302 | sem->semval += u->semadj[i]; | 1304 | semaphore->semval += u->semadj[i]; |
1303 | /* | 1305 | /* |
1304 | * Range checks of the new semaphore value, | 1306 | * Range checks of the new semaphore value, |
1305 | * not defined by sus: | 1307 | * not defined by sus: |
@@ -1313,11 +1315,11 @@ found: | |||
1313 | * | 1315 | * |
1314 | * Manfred <manfred@colorfullife.com> | 1316 | * Manfred <manfred@colorfullife.com> |
1315 | */ | 1317 | */ |
1316 | if (sem->semval < 0) | 1318 | if (semaphore->semval < 0) |
1317 | sem->semval = 0; | 1319 | semaphore->semval = 0; |
1318 | if (sem->semval > SEMVMX) | 1320 | if (semaphore->semval > SEMVMX) |
1319 | sem->semval = SEMVMX; | 1321 | semaphore->semval = SEMVMX; |
1320 | sem->sempid = current->tgid; | 1322 | semaphore->sempid = current->tgid; |
1321 | } | 1323 | } |
1322 | } | 1324 | } |
1323 | sma->sem_otime = get_seconds(); | 1325 | sma->sem_otime = get_seconds(); |
@@ -30,6 +30,7 @@ | |||
30 | #include <linux/capability.h> | 30 | #include <linux/capability.h> |
31 | #include <linux/ptrace.h> | 31 | #include <linux/ptrace.h> |
32 | #include <linux/seq_file.h> | 32 | #include <linux/seq_file.h> |
33 | #include <linux/mutex.h> | ||
33 | 34 | ||
34 | #include <asm/uaccess.h> | 35 | #include <asm/uaccess.h> |
35 | 36 | ||
@@ -109,7 +110,7 @@ static void shm_open (struct vm_area_struct *shmd) | |||
109 | * | 110 | * |
110 | * @shp: struct to free | 111 | * @shp: struct to free |
111 | * | 112 | * |
112 | * It has to be called with shp and shm_ids.sem locked, | 113 | * It has to be called with shp and shm_ids.mutex locked, |
113 | * but returns with shp unlocked and freed. | 114 | * but returns with shp unlocked and freed. |
114 | */ | 115 | */ |
115 | static void shm_destroy (struct shmid_kernel *shp) | 116 | static void shm_destroy (struct shmid_kernel *shp) |
@@ -139,7 +140,7 @@ static void shm_close (struct vm_area_struct *shmd) | |||
139 | int id = file->f_dentry->d_inode->i_ino; | 140 | int id = file->f_dentry->d_inode->i_ino; |
140 | struct shmid_kernel *shp; | 141 | struct shmid_kernel *shp; |
141 | 142 | ||
142 | down (&shm_ids.sem); | 143 | mutex_lock(&shm_ids.mutex); |
143 | /* remove from the list of attaches of the shm segment */ | 144 | /* remove from the list of attaches of the shm segment */ |
144 | if(!(shp = shm_lock(id))) | 145 | if(!(shp = shm_lock(id))) |
145 | BUG(); | 146 | BUG(); |
@@ -151,7 +152,7 @@ static void shm_close (struct vm_area_struct *shmd) | |||
151 | shm_destroy (shp); | 152 | shm_destroy (shp); |
152 | else | 153 | else |
153 | shm_unlock(shp); | 154 | shm_unlock(shp); |
154 | up (&shm_ids.sem); | 155 | mutex_unlock(&shm_ids.mutex); |
155 | } | 156 | } |
156 | 157 | ||
157 | static int shm_mmap(struct file * file, struct vm_area_struct * vma) | 158 | static int shm_mmap(struct file * file, struct vm_area_struct * vma) |
@@ -270,7 +271,7 @@ asmlinkage long sys_shmget (key_t key, size_t size, int shmflg) | |||
270 | struct shmid_kernel *shp; | 271 | struct shmid_kernel *shp; |
271 | int err, id = 0; | 272 | int err, id = 0; |
272 | 273 | ||
273 | down(&shm_ids.sem); | 274 | mutex_lock(&shm_ids.mutex); |
274 | if (key == IPC_PRIVATE) { | 275 | if (key == IPC_PRIVATE) { |
275 | err = newseg(key, shmflg, size); | 276 | err = newseg(key, shmflg, size); |
276 | } else if ((id = ipc_findkey(&shm_ids, key)) == -1) { | 277 | } else if ((id = ipc_findkey(&shm_ids, key)) == -1) { |
@@ -296,7 +297,7 @@ asmlinkage long sys_shmget (key_t key, size_t size, int shmflg) | |||
296 | } | 297 | } |
297 | shm_unlock(shp); | 298 | shm_unlock(shp); |
298 | } | 299 | } |
299 | up(&shm_ids.sem); | 300 | mutex_unlock(&shm_ids.mutex); |
300 | 301 | ||
301 | return err; | 302 | return err; |
302 | } | 303 | } |
@@ -467,14 +468,14 @@ asmlinkage long sys_shmctl (int shmid, int cmd, struct shmid_ds __user *buf) | |||
467 | return err; | 468 | return err; |
468 | 469 | ||
469 | memset(&shm_info,0,sizeof(shm_info)); | 470 | memset(&shm_info,0,sizeof(shm_info)); |
470 | down(&shm_ids.sem); | 471 | mutex_lock(&shm_ids.mutex); |
471 | shm_info.used_ids = shm_ids.in_use; | 472 | shm_info.used_ids = shm_ids.in_use; |
472 | shm_get_stat (&shm_info.shm_rss, &shm_info.shm_swp); | 473 | shm_get_stat (&shm_info.shm_rss, &shm_info.shm_swp); |
473 | shm_info.shm_tot = shm_tot; | 474 | shm_info.shm_tot = shm_tot; |
474 | shm_info.swap_attempts = 0; | 475 | shm_info.swap_attempts = 0; |
475 | shm_info.swap_successes = 0; | 476 | shm_info.swap_successes = 0; |
476 | err = shm_ids.max_id; | 477 | err = shm_ids.max_id; |
477 | up(&shm_ids.sem); | 478 | mutex_unlock(&shm_ids.mutex); |
478 | if(copy_to_user (buf, &shm_info, sizeof(shm_info))) { | 479 | if(copy_to_user (buf, &shm_info, sizeof(shm_info))) { |
479 | err = -EFAULT; | 480 | err = -EFAULT; |
480 | goto out; | 481 | goto out; |
@@ -583,7 +584,7 @@ asmlinkage long sys_shmctl (int shmid, int cmd, struct shmid_ds __user *buf) | |||
583 | * Instead we set a destroyed flag, and then blow | 584 | * Instead we set a destroyed flag, and then blow |
584 | * the name away when the usage hits zero. | 585 | * the name away when the usage hits zero. |
585 | */ | 586 | */ |
586 | down(&shm_ids.sem); | 587 | mutex_lock(&shm_ids.mutex); |
587 | shp = shm_lock(shmid); | 588 | shp = shm_lock(shmid); |
588 | err = -EINVAL; | 589 | err = -EINVAL; |
589 | if (shp == NULL) | 590 | if (shp == NULL) |
@@ -610,7 +611,7 @@ asmlinkage long sys_shmctl (int shmid, int cmd, struct shmid_ds __user *buf) | |||
610 | shm_unlock(shp); | 611 | shm_unlock(shp); |
611 | } else | 612 | } else |
612 | shm_destroy (shp); | 613 | shm_destroy (shp); |
613 | up(&shm_ids.sem); | 614 | mutex_unlock(&shm_ids.mutex); |
614 | goto out; | 615 | goto out; |
615 | } | 616 | } |
616 | 617 | ||
@@ -620,12 +621,13 @@ asmlinkage long sys_shmctl (int shmid, int cmd, struct shmid_ds __user *buf) | |||
620 | err = -EFAULT; | 621 | err = -EFAULT; |
621 | goto out; | 622 | goto out; |
622 | } | 623 | } |
623 | down(&shm_ids.sem); | 624 | mutex_lock(&shm_ids.mutex); |
624 | shp = shm_lock(shmid); | 625 | shp = shm_lock(shmid); |
625 | err=-EINVAL; | 626 | err=-EINVAL; |
626 | if(shp==NULL) | 627 | if(shp==NULL) |
627 | goto out_up; | 628 | goto out_up; |
628 | if ((err = audit_ipc_perms(0, setbuf.uid, setbuf.gid, setbuf.mode, &(shp->shm_perm)))) | 629 | if ((err = audit_ipc_perms(0, setbuf.uid, setbuf.gid, |
630 | setbuf.mode, &(shp->shm_perm)))) | ||
629 | goto out_unlock_up; | 631 | goto out_unlock_up; |
630 | err = shm_checkid(shp,shmid); | 632 | err = shm_checkid(shp,shmid); |
631 | if(err) | 633 | if(err) |
@@ -658,7 +660,7 @@ asmlinkage long sys_shmctl (int shmid, int cmd, struct shmid_ds __user *buf) | |||
658 | out_unlock_up: | 660 | out_unlock_up: |
659 | shm_unlock(shp); | 661 | shm_unlock(shp); |
660 | out_up: | 662 | out_up: |
661 | up(&shm_ids.sem); | 663 | mutex_unlock(&shm_ids.mutex); |
662 | goto out; | 664 | goto out; |
663 | out_unlock: | 665 | out_unlock: |
664 | shm_unlock(shp); | 666 | shm_unlock(shp); |
@@ -771,7 +773,7 @@ long do_shmat(int shmid, char __user *shmaddr, int shmflg, ulong *raddr) | |||
771 | invalid: | 773 | invalid: |
772 | up_write(¤t->mm->mmap_sem); | 774 | up_write(¤t->mm->mmap_sem); |
773 | 775 | ||
774 | down (&shm_ids.sem); | 776 | mutex_lock(&shm_ids.mutex); |
775 | if(!(shp = shm_lock(shmid))) | 777 | if(!(shp = shm_lock(shmid))) |
776 | BUG(); | 778 | BUG(); |
777 | shp->shm_nattch--; | 779 | shp->shm_nattch--; |
@@ -780,7 +782,7 @@ invalid: | |||
780 | shm_destroy (shp); | 782 | shm_destroy (shp); |
781 | else | 783 | else |
782 | shm_unlock(shp); | 784 | shm_unlock(shp); |
783 | up (&shm_ids.sem); | 785 | mutex_unlock(&shm_ids.mutex); |
784 | 786 | ||
785 | *raddr = (unsigned long) user_addr; | 787 | *raddr = (unsigned long) user_addr; |
786 | err = 0; | 788 | err = 0; |
diff --git a/ipc/util.c b/ipc/util.c index 862621980b01..23151ef32590 100644 --- a/ipc/util.c +++ b/ipc/util.c | |||
@@ -68,7 +68,8 @@ __initcall(ipc_init); | |||
68 | void __init ipc_init_ids(struct ipc_ids* ids, int size) | 68 | void __init ipc_init_ids(struct ipc_ids* ids, int size) |
69 | { | 69 | { |
70 | int i; | 70 | int i; |
71 | sema_init(&ids->sem,1); | 71 | |
72 | mutex_init(&ids->mutex); | ||
72 | 73 | ||
73 | if(size > IPCMNI) | 74 | if(size > IPCMNI) |
74 | size = IPCMNI; | 75 | size = IPCMNI; |
@@ -138,7 +139,7 @@ void __init ipc_init_proc_interface(const char *path, const char *header, | |||
138 | * @ids: Identifier set | 139 | * @ids: Identifier set |
139 | * @key: The key to find | 140 | * @key: The key to find |
140 | * | 141 | * |
141 | * Requires ipc_ids.sem locked. | 142 | * Requires ipc_ids.mutex locked. |
142 | * Returns the identifier if found or -1 if not. | 143 | * Returns the identifier if found or -1 if not. |
143 | */ | 144 | */ |
144 | 145 | ||
@@ -150,7 +151,7 @@ int ipc_findkey(struct ipc_ids* ids, key_t key) | |||
150 | 151 | ||
151 | /* | 152 | /* |
152 | * rcu_dereference() is not needed here | 153 | * rcu_dereference() is not needed here |
153 | * since ipc_ids.sem is held | 154 | * since ipc_ids.mutex is held |
154 | */ | 155 | */ |
155 | for (id = 0; id <= max_id; id++) { | 156 | for (id = 0; id <= max_id; id++) { |
156 | p = ids->entries->p[id]; | 157 | p = ids->entries->p[id]; |
@@ -163,7 +164,7 @@ int ipc_findkey(struct ipc_ids* ids, key_t key) | |||
163 | } | 164 | } |
164 | 165 | ||
165 | /* | 166 | /* |
166 | * Requires ipc_ids.sem locked | 167 | * Requires ipc_ids.mutex locked |
167 | */ | 168 | */ |
168 | static int grow_ary(struct ipc_ids* ids, int newsize) | 169 | static int grow_ary(struct ipc_ids* ids, int newsize) |
169 | { | 170 | { |
@@ -210,7 +211,7 @@ static int grow_ary(struct ipc_ids* ids, int newsize) | |||
210 | * is returned. The list is returned in a locked state on success. | 211 | * is returned. The list is returned in a locked state on success. |
211 | * On failure the list is not locked and -1 is returned. | 212 | * On failure the list is not locked and -1 is returned. |
212 | * | 213 | * |
213 | * Called with ipc_ids.sem held. | 214 | * Called with ipc_ids.mutex held. |
214 | */ | 215 | */ |
215 | 216 | ||
216 | int ipc_addid(struct ipc_ids* ids, struct kern_ipc_perm* new, int size) | 217 | int ipc_addid(struct ipc_ids* ids, struct kern_ipc_perm* new, int size) |
@@ -221,7 +222,7 @@ int ipc_addid(struct ipc_ids* ids, struct kern_ipc_perm* new, int size) | |||
221 | 222 | ||
222 | /* | 223 | /* |
223 | * rcu_dereference()() is not needed here since | 224 | * rcu_dereference()() is not needed here since |
224 | * ipc_ids.sem is held | 225 | * ipc_ids.mutex is held |
225 | */ | 226 | */ |
226 | for (id = 0; id < size; id++) { | 227 | for (id = 0; id < size; id++) { |
227 | if(ids->entries->p[id] == NULL) | 228 | if(ids->entries->p[id] == NULL) |
@@ -257,7 +258,7 @@ found: | |||
257 | * fed an invalid identifier. The entry is removed and internal | 258 | * fed an invalid identifier. The entry is removed and internal |
258 | * variables recomputed. The object associated with the identifier | 259 | * variables recomputed. The object associated with the identifier |
259 | * is returned. | 260 | * is returned. |
260 | * ipc_ids.sem and the spinlock for this ID is hold before this function | 261 | * ipc_ids.mutex and the spinlock for this ID is hold before this function |
261 | * is called, and remain locked on the exit. | 262 | * is called, and remain locked on the exit. |
262 | */ | 263 | */ |
263 | 264 | ||
@@ -270,7 +271,7 @@ struct kern_ipc_perm* ipc_rmid(struct ipc_ids* ids, int id) | |||
270 | 271 | ||
271 | /* | 272 | /* |
272 | * do not need a rcu_dereference()() here to force ordering | 273 | * do not need a rcu_dereference()() here to force ordering |
273 | * on Alpha, since the ipc_ids.sem is held. | 274 | * on Alpha, since the ipc_ids.mutex is held. |
274 | */ | 275 | */ |
275 | p = ids->entries->p[lid]; | 276 | p = ids->entries->p[lid]; |
276 | ids->entries->p[lid] = NULL; | 277 | ids->entries->p[lid] = NULL; |
@@ -530,13 +531,13 @@ void ipc64_perm_to_ipc_perm (struct ipc64_perm *in, struct ipc_perm *out) | |||
530 | 531 | ||
531 | /* | 532 | /* |
532 | * So far only shm_get_stat() calls ipc_get() via shm_get(), so ipc_get() | 533 | * So far only shm_get_stat() calls ipc_get() via shm_get(), so ipc_get() |
533 | * is called with shm_ids.sem locked. Since grow_ary() is also called with | 534 | * is called with shm_ids.mutex locked. Since grow_ary() is also called with |
534 | * shm_ids.sem down(for Shared Memory), there is no need to add read | 535 | * shm_ids.mutex down(for Shared Memory), there is no need to add read |
535 | * barriers here to gurantee the writes in grow_ary() are seen in order | 536 | * barriers here to gurantee the writes in grow_ary() are seen in order |
536 | * here (for Alpha). | 537 | * here (for Alpha). |
537 | * | 538 | * |
538 | * However ipc_get() itself does not necessary require ipc_ids.sem down. So | 539 | * However ipc_get() itself does not necessary require ipc_ids.mutex down. So |
539 | * if in the future ipc_get() is used by other places without ipc_ids.sem | 540 | * if in the future ipc_get() is used by other places without ipc_ids.mutex |
540 | * down, then ipc_get() needs read memery barriers as ipc_lock() does. | 541 | * down, then ipc_get() needs read memery barriers as ipc_lock() does. |
541 | */ | 542 | */ |
542 | struct kern_ipc_perm* ipc_get(struct ipc_ids* ids, int id) | 543 | struct kern_ipc_perm* ipc_get(struct ipc_ids* ids, int id) |
@@ -667,7 +668,7 @@ static void *sysvipc_proc_start(struct seq_file *s, loff_t *pos) | |||
667 | * Take the lock - this will be released by the corresponding | 668 | * Take the lock - this will be released by the corresponding |
668 | * call to stop(). | 669 | * call to stop(). |
669 | */ | 670 | */ |
670 | down(&iface->ids->sem); | 671 | mutex_lock(&iface->ids->mutex); |
671 | 672 | ||
672 | /* pos < 0 is invalid */ | 673 | /* pos < 0 is invalid */ |
673 | if (*pos < 0) | 674 | if (*pos < 0) |
@@ -697,7 +698,7 @@ static void sysvipc_proc_stop(struct seq_file *s, void *it) | |||
697 | ipc_unlock(ipc); | 698 | ipc_unlock(ipc); |
698 | 699 | ||
699 | /* Release the lock we took in start() */ | 700 | /* Release the lock we took in start() */ |
700 | up(&iface->ids->sem); | 701 | mutex_unlock(&iface->ids->mutex); |
701 | } | 702 | } |
702 | 703 | ||
703 | static int sysvipc_proc_show(struct seq_file *s, void *it) | 704 | static int sysvipc_proc_show(struct seq_file *s, void *it) |
diff --git a/ipc/util.h b/ipc/util.h index efaff3ee7de7..0181553d31d8 100644 --- a/ipc/util.h +++ b/ipc/util.h | |||
@@ -25,7 +25,7 @@ struct ipc_ids { | |||
25 | int max_id; | 25 | int max_id; |
26 | unsigned short seq; | 26 | unsigned short seq; |
27 | unsigned short seq_max; | 27 | unsigned short seq_max; |
28 | struct semaphore sem; | 28 | struct mutex mutex; |
29 | struct ipc_id_ary nullentry; | 29 | struct ipc_id_ary nullentry; |
30 | struct ipc_id_ary* entries; | 30 | struct ipc_id_ary* entries; |
31 | }; | 31 | }; |
@@ -40,7 +40,7 @@ void __init ipc_init_proc_interface(const char *path, const char *header, | |||
40 | #define ipc_init_proc_interface(path, header, ids, show) do {} while (0) | 40 | #define ipc_init_proc_interface(path, header, ids, show) do {} while (0) |
41 | #endif | 41 | #endif |
42 | 42 | ||
43 | /* must be called with ids->sem acquired.*/ | 43 | /* must be called with ids->mutex acquired.*/ |
44 | int ipc_findkey(struct ipc_ids* ids, key_t key); | 44 | int ipc_findkey(struct ipc_ids* ids, key_t key); |
45 | int ipc_addid(struct ipc_ids* ids, struct kern_ipc_perm* new, int size); | 45 | int ipc_addid(struct ipc_ids* ids, struct kern_ipc_perm* new, int size); |
46 | 46 | ||
diff --git a/kernel/compat.c b/kernel/compat.c index 8c9cd88b6785..b9bdd1271f44 100644 --- a/kernel/compat.c +++ b/kernel/compat.c | |||
@@ -21,6 +21,7 @@ | |||
21 | #include <linux/syscalls.h> | 21 | #include <linux/syscalls.h> |
22 | #include <linux/unistd.h> | 22 | #include <linux/unistd.h> |
23 | #include <linux/security.h> | 23 | #include <linux/security.h> |
24 | #include <linux/timex.h> | ||
24 | 25 | ||
25 | #include <asm/uaccess.h> | 26 | #include <asm/uaccess.h> |
26 | 27 | ||
@@ -898,3 +899,61 @@ asmlinkage long compat_sys_rt_sigsuspend(compat_sigset_t __user *unewset, compat | |||
898 | return -ERESTARTNOHAND; | 899 | return -ERESTARTNOHAND; |
899 | } | 900 | } |
900 | #endif /* __ARCH_WANT_COMPAT_SYS_RT_SIGSUSPEND */ | 901 | #endif /* __ARCH_WANT_COMPAT_SYS_RT_SIGSUSPEND */ |
902 | |||
903 | asmlinkage long compat_sys_adjtimex(struct compat_timex __user *utp) | ||
904 | { | ||
905 | struct timex txc; | ||
906 | int ret; | ||
907 | |||
908 | memset(&txc, 0, sizeof(struct timex)); | ||
909 | |||
910 | if (!access_ok(VERIFY_READ, utp, sizeof(struct compat_timex)) || | ||
911 | __get_user(txc.modes, &utp->modes) || | ||
912 | __get_user(txc.offset, &utp->offset) || | ||
913 | __get_user(txc.freq, &utp->freq) || | ||
914 | __get_user(txc.maxerror, &utp->maxerror) || | ||
915 | __get_user(txc.esterror, &utp->esterror) || | ||
916 | __get_user(txc.status, &utp->status) || | ||
917 | __get_user(txc.constant, &utp->constant) || | ||
918 | __get_user(txc.precision, &utp->precision) || | ||
919 | __get_user(txc.tolerance, &utp->tolerance) || | ||
920 | __get_user(txc.time.tv_sec, &utp->time.tv_sec) || | ||
921 | __get_user(txc.time.tv_usec, &utp->time.tv_usec) || | ||
922 | __get_user(txc.tick, &utp->tick) || | ||
923 | __get_user(txc.ppsfreq, &utp->ppsfreq) || | ||
924 | __get_user(txc.jitter, &utp->jitter) || | ||
925 | __get_user(txc.shift, &utp->shift) || | ||
926 | __get_user(txc.stabil, &utp->stabil) || | ||
927 | __get_user(txc.jitcnt, &utp->jitcnt) || | ||
928 | __get_user(txc.calcnt, &utp->calcnt) || | ||
929 | __get_user(txc.errcnt, &utp->errcnt) || | ||
930 | __get_user(txc.stbcnt, &utp->stbcnt)) | ||
931 | return -EFAULT; | ||
932 | |||
933 | ret = do_adjtimex(&txc); | ||
934 | |||
935 | if (!access_ok(VERIFY_WRITE, utp, sizeof(struct compat_timex)) || | ||
936 | __put_user(txc.modes, &utp->modes) || | ||
937 | __put_user(txc.offset, &utp->offset) || | ||
938 | __put_user(txc.freq, &utp->freq) || | ||
939 | __put_user(txc.maxerror, &utp->maxerror) || | ||
940 | __put_user(txc.esterror, &utp->esterror) || | ||
941 | __put_user(txc.status, &utp->status) || | ||
942 | __put_user(txc.constant, &utp->constant) || | ||
943 | __put_user(txc.precision, &utp->precision) || | ||
944 | __put_user(txc.tolerance, &utp->tolerance) || | ||
945 | __put_user(txc.time.tv_sec, &utp->time.tv_sec) || | ||
946 | __put_user(txc.time.tv_usec, &utp->time.tv_usec) || | ||
947 | __put_user(txc.tick, &utp->tick) || | ||
948 | __put_user(txc.ppsfreq, &utp->ppsfreq) || | ||
949 | __put_user(txc.jitter, &utp->jitter) || | ||
950 | __put_user(txc.shift, &utp->shift) || | ||
951 | __put_user(txc.stabil, &utp->stabil) || | ||
952 | __put_user(txc.jitcnt, &utp->jitcnt) || | ||
953 | __put_user(txc.calcnt, &utp->calcnt) || | ||
954 | __put_user(txc.errcnt, &utp->errcnt) || | ||
955 | __put_user(txc.stbcnt, &utp->stbcnt)) | ||
956 | ret = -EFAULT; | ||
957 | |||
958 | return ret; | ||
959 | } | ||
diff --git a/kernel/fork.c b/kernel/fork.c index d93ab2ba729c..e0a2b449dea6 100644 --- a/kernel/fork.c +++ b/kernel/fork.c | |||
@@ -847,7 +847,7 @@ static inline int copy_signal(unsigned long clone_flags, struct task_struct * ts | |||
847 | hrtimer_init(&sig->real_timer, CLOCK_MONOTONIC, HRTIMER_REL); | 847 | hrtimer_init(&sig->real_timer, CLOCK_MONOTONIC, HRTIMER_REL); |
848 | sig->it_real_incr.tv64 = 0; | 848 | sig->it_real_incr.tv64 = 0; |
849 | sig->real_timer.function = it_real_fn; | 849 | sig->real_timer.function = it_real_fn; |
850 | sig->real_timer.data = tsk; | 850 | sig->tsk = tsk; |
851 | 851 | ||
852 | sig->it_virt_expires = cputime_zero; | 852 | sig->it_virt_expires = cputime_zero; |
853 | sig->it_virt_incr = cputime_zero; | 853 | sig->it_virt_incr = cputime_zero; |
diff --git a/kernel/hrtimer.c b/kernel/hrtimer.c index 14bc9cfa6399..0237a556eb1f 100644 --- a/kernel/hrtimer.c +++ b/kernel/hrtimer.c | |||
@@ -123,6 +123,26 @@ void ktime_get_ts(struct timespec *ts) | |||
123 | EXPORT_SYMBOL_GPL(ktime_get_ts); | 123 | EXPORT_SYMBOL_GPL(ktime_get_ts); |
124 | 124 | ||
125 | /* | 125 | /* |
126 | * Get the coarse grained time at the softirq based on xtime and | ||
127 | * wall_to_monotonic. | ||
128 | */ | ||
129 | static void hrtimer_get_softirq_time(struct hrtimer_base *base) | ||
130 | { | ||
131 | ktime_t xtim, tomono; | ||
132 | unsigned long seq; | ||
133 | |||
134 | do { | ||
135 | seq = read_seqbegin(&xtime_lock); | ||
136 | xtim = timespec_to_ktime(xtime); | ||
137 | tomono = timespec_to_ktime(wall_to_monotonic); | ||
138 | |||
139 | } while (read_seqretry(&xtime_lock, seq)); | ||
140 | |||
141 | base[CLOCK_REALTIME].softirq_time = xtim; | ||
142 | base[CLOCK_MONOTONIC].softirq_time = ktime_add(xtim, tomono); | ||
143 | } | ||
144 | |||
145 | /* | ||
126 | * Functions and macros which are different for UP/SMP systems are kept in a | 146 | * Functions and macros which are different for UP/SMP systems are kept in a |
127 | * single place | 147 | * single place |
128 | */ | 148 | */ |
@@ -246,7 +266,7 @@ ktime_t ktime_add_ns(const ktime_t kt, u64 nsec) | |||
246 | /* | 266 | /* |
247 | * Divide a ktime value by a nanosecond value | 267 | * Divide a ktime value by a nanosecond value |
248 | */ | 268 | */ |
249 | static unsigned long ktime_divns(const ktime_t kt, nsec_t div) | 269 | static unsigned long ktime_divns(const ktime_t kt, s64 div) |
250 | { | 270 | { |
251 | u64 dclc, inc, dns; | 271 | u64 dclc, inc, dns; |
252 | int sft = 0; | 272 | int sft = 0; |
@@ -281,18 +301,17 @@ void unlock_hrtimer_base(const struct hrtimer *timer, unsigned long *flags) | |||
281 | * hrtimer_forward - forward the timer expiry | 301 | * hrtimer_forward - forward the timer expiry |
282 | * | 302 | * |
283 | * @timer: hrtimer to forward | 303 | * @timer: hrtimer to forward |
304 | * @now: forward past this time | ||
284 | * @interval: the interval to forward | 305 | * @interval: the interval to forward |
285 | * | 306 | * |
286 | * Forward the timer expiry so it will expire in the future. | 307 | * Forward the timer expiry so it will expire in the future. |
287 | * Returns the number of overruns. | 308 | * Returns the number of overruns. |
288 | */ | 309 | */ |
289 | unsigned long | 310 | unsigned long |
290 | hrtimer_forward(struct hrtimer *timer, ktime_t interval) | 311 | hrtimer_forward(struct hrtimer *timer, ktime_t now, ktime_t interval) |
291 | { | 312 | { |
292 | unsigned long orun = 1; | 313 | unsigned long orun = 1; |
293 | ktime_t delta, now; | 314 | ktime_t delta; |
294 | |||
295 | now = timer->base->get_time(); | ||
296 | 315 | ||
297 | delta = ktime_sub(now, timer->expires); | 316 | delta = ktime_sub(now, timer->expires); |
298 | 317 | ||
@@ -303,7 +322,7 @@ hrtimer_forward(struct hrtimer *timer, ktime_t interval) | |||
303 | interval.tv64 = timer->base->resolution.tv64; | 322 | interval.tv64 = timer->base->resolution.tv64; |
304 | 323 | ||
305 | if (unlikely(delta.tv64 >= interval.tv64)) { | 324 | if (unlikely(delta.tv64 >= interval.tv64)) { |
306 | nsec_t incr = ktime_to_ns(interval); | 325 | s64 incr = ktime_to_ns(interval); |
307 | 326 | ||
308 | orun = ktime_divns(delta, incr); | 327 | orun = ktime_divns(delta, incr); |
309 | timer->expires = ktime_add_ns(timer->expires, incr * orun); | 328 | timer->expires = ktime_add_ns(timer->expires, incr * orun); |
@@ -355,8 +374,6 @@ static void enqueue_hrtimer(struct hrtimer *timer, struct hrtimer_base *base) | |||
355 | rb_link_node(&timer->node, parent, link); | 374 | rb_link_node(&timer->node, parent, link); |
356 | rb_insert_color(&timer->node, &base->active); | 375 | rb_insert_color(&timer->node, &base->active); |
357 | 376 | ||
358 | timer->state = HRTIMER_PENDING; | ||
359 | |||
360 | if (!base->first || timer->expires.tv64 < | 377 | if (!base->first || timer->expires.tv64 < |
361 | rb_entry(base->first, struct hrtimer, node)->expires.tv64) | 378 | rb_entry(base->first, struct hrtimer, node)->expires.tv64) |
362 | base->first = &timer->node; | 379 | base->first = &timer->node; |
@@ -376,6 +393,7 @@ static void __remove_hrtimer(struct hrtimer *timer, struct hrtimer_base *base) | |||
376 | if (base->first == &timer->node) | 393 | if (base->first == &timer->node) |
377 | base->first = rb_next(&timer->node); | 394 | base->first = rb_next(&timer->node); |
378 | rb_erase(&timer->node, &base->active); | 395 | rb_erase(&timer->node, &base->active); |
396 | timer->node.rb_parent = HRTIMER_INACTIVE; | ||
379 | } | 397 | } |
380 | 398 | ||
381 | /* | 399 | /* |
@@ -386,7 +404,6 @@ remove_hrtimer(struct hrtimer *timer, struct hrtimer_base *base) | |||
386 | { | 404 | { |
387 | if (hrtimer_active(timer)) { | 405 | if (hrtimer_active(timer)) { |
388 | __remove_hrtimer(timer, base); | 406 | __remove_hrtimer(timer, base); |
389 | timer->state = HRTIMER_INACTIVE; | ||
390 | return 1; | 407 | return 1; |
391 | } | 408 | } |
392 | return 0; | 409 | return 0; |
@@ -560,6 +577,7 @@ void hrtimer_init(struct hrtimer *timer, clockid_t clock_id, | |||
560 | clock_id = CLOCK_MONOTONIC; | 577 | clock_id = CLOCK_MONOTONIC; |
561 | 578 | ||
562 | timer->base = &bases[clock_id]; | 579 | timer->base = &bases[clock_id]; |
580 | timer->node.rb_parent = HRTIMER_INACTIVE; | ||
563 | } | 581 | } |
564 | 582 | ||
565 | /** | 583 | /** |
@@ -586,48 +604,35 @@ int hrtimer_get_res(const clockid_t which_clock, struct timespec *tp) | |||
586 | */ | 604 | */ |
587 | static inline void run_hrtimer_queue(struct hrtimer_base *base) | 605 | static inline void run_hrtimer_queue(struct hrtimer_base *base) |
588 | { | 606 | { |
589 | ktime_t now = base->get_time(); | ||
590 | struct rb_node *node; | 607 | struct rb_node *node; |
591 | 608 | ||
609 | if (base->get_softirq_time) | ||
610 | base->softirq_time = base->get_softirq_time(); | ||
611 | |||
592 | spin_lock_irq(&base->lock); | 612 | spin_lock_irq(&base->lock); |
593 | 613 | ||
594 | while ((node = base->first)) { | 614 | while ((node = base->first)) { |
595 | struct hrtimer *timer; | 615 | struct hrtimer *timer; |
596 | int (*fn)(void *); | 616 | int (*fn)(struct hrtimer *); |
597 | int restart; | 617 | int restart; |
598 | void *data; | ||
599 | 618 | ||
600 | timer = rb_entry(node, struct hrtimer, node); | 619 | timer = rb_entry(node, struct hrtimer, node); |
601 | if (now.tv64 <= timer->expires.tv64) | 620 | if (base->softirq_time.tv64 <= timer->expires.tv64) |
602 | break; | 621 | break; |
603 | 622 | ||
604 | fn = timer->function; | 623 | fn = timer->function; |
605 | data = timer->data; | ||
606 | set_curr_timer(base, timer); | 624 | set_curr_timer(base, timer); |
607 | timer->state = HRTIMER_RUNNING; | ||
608 | __remove_hrtimer(timer, base); | 625 | __remove_hrtimer(timer, base); |
609 | spin_unlock_irq(&base->lock); | 626 | spin_unlock_irq(&base->lock); |
610 | 627 | ||
611 | /* | 628 | restart = fn(timer); |
612 | * fn == NULL is special case for the simplest timer | ||
613 | * variant - wake up process and do not restart: | ||
614 | */ | ||
615 | if (!fn) { | ||
616 | wake_up_process(data); | ||
617 | restart = HRTIMER_NORESTART; | ||
618 | } else | ||
619 | restart = fn(data); | ||
620 | 629 | ||
621 | spin_lock_irq(&base->lock); | 630 | spin_lock_irq(&base->lock); |
622 | 631 | ||
623 | /* Another CPU has added back the timer */ | 632 | if (restart != HRTIMER_NORESTART) { |
624 | if (timer->state != HRTIMER_RUNNING) | 633 | BUG_ON(hrtimer_active(timer)); |
625 | continue; | ||
626 | |||
627 | if (restart == HRTIMER_RESTART) | ||
628 | enqueue_hrtimer(timer, base); | 634 | enqueue_hrtimer(timer, base); |
629 | else | 635 | } |
630 | timer->state = HRTIMER_EXPIRED; | ||
631 | } | 636 | } |
632 | set_curr_timer(base, NULL); | 637 | set_curr_timer(base, NULL); |
633 | spin_unlock_irq(&base->lock); | 638 | spin_unlock_irq(&base->lock); |
@@ -641,6 +646,8 @@ void hrtimer_run_queues(void) | |||
641 | struct hrtimer_base *base = __get_cpu_var(hrtimer_bases); | 646 | struct hrtimer_base *base = __get_cpu_var(hrtimer_bases); |
642 | int i; | 647 | int i; |
643 | 648 | ||
649 | hrtimer_get_softirq_time(base); | ||
650 | |||
644 | for (i = 0; i < MAX_HRTIMER_BASES; i++) | 651 | for (i = 0; i < MAX_HRTIMER_BASES; i++) |
645 | run_hrtimer_queue(&base[i]); | 652 | run_hrtimer_queue(&base[i]); |
646 | } | 653 | } |
@@ -649,79 +656,70 @@ void hrtimer_run_queues(void) | |||
649 | * Sleep related functions: | 656 | * Sleep related functions: |
650 | */ | 657 | */ |
651 | 658 | ||
652 | /** | 659 | struct sleep_hrtimer { |
653 | * schedule_hrtimer - sleep until timeout | 660 | struct hrtimer timer; |
654 | * | 661 | struct task_struct *task; |
655 | * @timer: hrtimer variable initialized with the correct clock base | 662 | int expired; |
656 | * @mode: timeout value is abs/rel | 663 | }; |
657 | * | ||
658 | * Make the current task sleep until @timeout is | ||
659 | * elapsed. | ||
660 | * | ||
661 | * You can set the task state as follows - | ||
662 | * | ||
663 | * %TASK_UNINTERRUPTIBLE - at least @timeout is guaranteed to | ||
664 | * pass before the routine returns. The routine will return 0 | ||
665 | * | ||
666 | * %TASK_INTERRUPTIBLE - the routine may return early if a signal is | ||
667 | * delivered to the current task. In this case the remaining time | ||
668 | * will be returned | ||
669 | * | ||
670 | * The current task state is guaranteed to be TASK_RUNNING when this | ||
671 | * routine returns. | ||
672 | */ | ||
673 | static ktime_t __sched | ||
674 | schedule_hrtimer(struct hrtimer *timer, const enum hrtimer_mode mode) | ||
675 | { | ||
676 | /* fn stays NULL, meaning single-shot wakeup: */ | ||
677 | timer->data = current; | ||
678 | 664 | ||
679 | hrtimer_start(timer, timer->expires, mode); | 665 | static int nanosleep_wakeup(struct hrtimer *timer) |
666 | { | ||
667 | struct sleep_hrtimer *t = | ||
668 | container_of(timer, struct sleep_hrtimer, timer); | ||
680 | 669 | ||
681 | schedule(); | 670 | t->expired = 1; |
682 | hrtimer_cancel(timer); | 671 | wake_up_process(t->task); |
683 | 672 | ||
684 | /* Return the remaining time: */ | 673 | return HRTIMER_NORESTART; |
685 | if (timer->state != HRTIMER_EXPIRED) | ||
686 | return ktime_sub(timer->expires, timer->base->get_time()); | ||
687 | else | ||
688 | return (ktime_t) {.tv64 = 0 }; | ||
689 | } | 674 | } |
690 | 675 | ||
691 | static inline ktime_t __sched | 676 | static int __sched do_nanosleep(struct sleep_hrtimer *t, enum hrtimer_mode mode) |
692 | schedule_hrtimer_interruptible(struct hrtimer *timer, | ||
693 | const enum hrtimer_mode mode) | ||
694 | { | 677 | { |
695 | set_current_state(TASK_INTERRUPTIBLE); | 678 | t->timer.function = nanosleep_wakeup; |
679 | t->task = current; | ||
680 | t->expired = 0; | ||
681 | |||
682 | do { | ||
683 | set_current_state(TASK_INTERRUPTIBLE); | ||
684 | hrtimer_start(&t->timer, t->timer.expires, mode); | ||
685 | |||
686 | schedule(); | ||
687 | |||
688 | if (unlikely(!t->expired)) { | ||
689 | hrtimer_cancel(&t->timer); | ||
690 | mode = HRTIMER_ABS; | ||
691 | } | ||
692 | } while (!t->expired && !signal_pending(current)); | ||
696 | 693 | ||
697 | return schedule_hrtimer(timer, mode); | 694 | return t->expired; |
698 | } | 695 | } |
699 | 696 | ||
700 | static long __sched nanosleep_restart(struct restart_block *restart) | 697 | static long __sched nanosleep_restart(struct restart_block *restart) |
701 | { | 698 | { |
699 | struct sleep_hrtimer t; | ||
702 | struct timespec __user *rmtp; | 700 | struct timespec __user *rmtp; |
703 | struct timespec tu; | 701 | struct timespec tu; |
704 | void *rfn_save = restart->fn; | 702 | ktime_t time; |
705 | struct hrtimer timer; | ||
706 | ktime_t rem; | ||
707 | 703 | ||
708 | restart->fn = do_no_restart_syscall; | 704 | restart->fn = do_no_restart_syscall; |
709 | 705 | ||
710 | hrtimer_init(&timer, (clockid_t) restart->arg3, HRTIMER_ABS); | 706 | hrtimer_init(&t.timer, restart->arg3, HRTIMER_ABS); |
711 | 707 | t.timer.expires.tv64 = ((u64)restart->arg1 << 32) | (u64) restart->arg0; | |
712 | timer.expires.tv64 = ((u64)restart->arg1 << 32) | (u64) restart->arg0; | ||
713 | |||
714 | rem = schedule_hrtimer_interruptible(&timer, HRTIMER_ABS); | ||
715 | 708 | ||
716 | if (rem.tv64 <= 0) | 709 | if (do_nanosleep(&t, HRTIMER_ABS)) |
717 | return 0; | 710 | return 0; |
718 | 711 | ||
719 | rmtp = (struct timespec __user *) restart->arg2; | 712 | rmtp = (struct timespec __user *) restart->arg2; |
720 | tu = ktime_to_timespec(rem); | 713 | if (rmtp) { |
721 | if (rmtp && copy_to_user(rmtp, &tu, sizeof(tu))) | 714 | time = ktime_sub(t.timer.expires, t.timer.base->get_time()); |
722 | return -EFAULT; | 715 | if (time.tv64 <= 0) |
716 | return 0; | ||
717 | tu = ktime_to_timespec(time); | ||
718 | if (copy_to_user(rmtp, &tu, sizeof(tu))) | ||
719 | return -EFAULT; | ||
720 | } | ||
723 | 721 | ||
724 | restart->fn = rfn_save; | 722 | restart->fn = nanosleep_restart; |
725 | 723 | ||
726 | /* The other values in restart are already filled in */ | 724 | /* The other values in restart are already filled in */ |
727 | return -ERESTART_RESTARTBLOCK; | 725 | return -ERESTART_RESTARTBLOCK; |
@@ -731,33 +729,34 @@ long hrtimer_nanosleep(struct timespec *rqtp, struct timespec __user *rmtp, | |||
731 | const enum hrtimer_mode mode, const clockid_t clockid) | 729 | const enum hrtimer_mode mode, const clockid_t clockid) |
732 | { | 730 | { |
733 | struct restart_block *restart; | 731 | struct restart_block *restart; |
734 | struct hrtimer timer; | 732 | struct sleep_hrtimer t; |
735 | struct timespec tu; | 733 | struct timespec tu; |
736 | ktime_t rem; | 734 | ktime_t rem; |
737 | 735 | ||
738 | hrtimer_init(&timer, clockid, mode); | 736 | hrtimer_init(&t.timer, clockid, mode); |
739 | 737 | t.timer.expires = timespec_to_ktime(*rqtp); | |
740 | timer.expires = timespec_to_ktime(*rqtp); | 738 | if (do_nanosleep(&t, mode)) |
741 | |||
742 | rem = schedule_hrtimer_interruptible(&timer, mode); | ||
743 | if (rem.tv64 <= 0) | ||
744 | return 0; | 739 | return 0; |
745 | 740 | ||
746 | /* Absolute timers do not update the rmtp value and restart: */ | 741 | /* Absolute timers do not update the rmtp value and restart: */ |
747 | if (mode == HRTIMER_ABS) | 742 | if (mode == HRTIMER_ABS) |
748 | return -ERESTARTNOHAND; | 743 | return -ERESTARTNOHAND; |
749 | 744 | ||
750 | tu = ktime_to_timespec(rem); | 745 | if (rmtp) { |
751 | 746 | rem = ktime_sub(t.timer.expires, t.timer.base->get_time()); | |
752 | if (rmtp && copy_to_user(rmtp, &tu, sizeof(tu))) | 747 | if (rem.tv64 <= 0) |
753 | return -EFAULT; | 748 | return 0; |
749 | tu = ktime_to_timespec(rem); | ||
750 | if (copy_to_user(rmtp, &tu, sizeof(tu))) | ||
751 | return -EFAULT; | ||
752 | } | ||
754 | 753 | ||
755 | restart = ¤t_thread_info()->restart_block; | 754 | restart = ¤t_thread_info()->restart_block; |
756 | restart->fn = nanosleep_restart; | 755 | restart->fn = nanosleep_restart; |
757 | restart->arg0 = timer.expires.tv64 & 0xFFFFFFFF; | 756 | restart->arg0 = t.timer.expires.tv64 & 0xFFFFFFFF; |
758 | restart->arg1 = timer.expires.tv64 >> 32; | 757 | restart->arg1 = t.timer.expires.tv64 >> 32; |
759 | restart->arg2 = (unsigned long) rmtp; | 758 | restart->arg2 = (unsigned long) rmtp; |
760 | restart->arg3 = (unsigned long) timer.base->index; | 759 | restart->arg3 = (unsigned long) t.timer.base->index; |
761 | 760 | ||
762 | return -ERESTART_RESTARTBLOCK; | 761 | return -ERESTART_RESTARTBLOCK; |
763 | } | 762 | } |
diff --git a/kernel/irq/manage.c b/kernel/irq/manage.c index 6edfcef291e8..ac766ad573e8 100644 --- a/kernel/irq/manage.c +++ b/kernel/irq/manage.c | |||
@@ -271,6 +271,7 @@ void free_irq(unsigned int irq, void *dev_id) | |||
271 | struct irqaction **p; | 271 | struct irqaction **p; |
272 | unsigned long flags; | 272 | unsigned long flags; |
273 | 273 | ||
274 | WARN_ON(in_interrupt()); | ||
274 | if (irq >= NR_IRQS) | 275 | if (irq >= NR_IRQS) |
275 | return; | 276 | return; |
276 | 277 | ||
diff --git a/kernel/itimer.c b/kernel/itimer.c index 680e6b70c872..204ed7939e75 100644 --- a/kernel/itimer.c +++ b/kernel/itimer.c | |||
@@ -128,16 +128,16 @@ asmlinkage long sys_getitimer(int which, struct itimerval __user *value) | |||
128 | /* | 128 | /* |
129 | * The timer is automagically restarted, when interval != 0 | 129 | * The timer is automagically restarted, when interval != 0 |
130 | */ | 130 | */ |
131 | int it_real_fn(void *data) | 131 | int it_real_fn(struct hrtimer *timer) |
132 | { | 132 | { |
133 | struct task_struct *tsk = (struct task_struct *) data; | 133 | struct signal_struct *sig = |
134 | container_of(timer, struct signal_struct, real_timer); | ||
134 | 135 | ||
135 | send_group_sig_info(SIGALRM, SEND_SIG_PRIV, tsk); | 136 | send_group_sig_info(SIGALRM, SEND_SIG_PRIV, sig->tsk); |
136 | |||
137 | if (tsk->signal->it_real_incr.tv64 != 0) { | ||
138 | hrtimer_forward(&tsk->signal->real_timer, | ||
139 | tsk->signal->it_real_incr); | ||
140 | 137 | ||
138 | if (sig->it_real_incr.tv64 != 0) { | ||
139 | hrtimer_forward(timer, timer->base->softirq_time, | ||
140 | sig->it_real_incr); | ||
141 | return HRTIMER_RESTART; | 141 | return HRTIMER_RESTART; |
142 | } | 142 | } |
143 | return HRTIMER_NORESTART; | 143 | return HRTIMER_NORESTART; |
diff --git a/kernel/kprobes.c b/kernel/kprobes.c index 1fb9f753ef60..1156eb0977d0 100644 --- a/kernel/kprobes.c +++ b/kernel/kprobes.c | |||
@@ -323,10 +323,10 @@ struct hlist_head __kprobes *kretprobe_inst_table_head(struct task_struct *tsk) | |||
323 | } | 323 | } |
324 | 324 | ||
325 | /* | 325 | /* |
326 | * This function is called from exit_thread or flush_thread when task tk's | 326 | * This function is called from finish_task_switch when task tk becomes dead, |
327 | * stack is being recycled so that we can recycle any function-return probe | 327 | * so that we can recycle any function-return probe instances associated |
328 | * instances associated with this task. These left over instances represent | 328 | * with this task. These left over instances represent probed functions |
329 | * probed functions that have been called but will never return. | 329 | * that have been called but will never return. |
330 | */ | 330 | */ |
331 | void __kprobes kprobe_flush_task(struct task_struct *tk) | 331 | void __kprobes kprobe_flush_task(struct task_struct *tk) |
332 | { | 332 | { |
@@ -336,7 +336,7 @@ void __kprobes kprobe_flush_task(struct task_struct *tk) | |||
336 | unsigned long flags = 0; | 336 | unsigned long flags = 0; |
337 | 337 | ||
338 | spin_lock_irqsave(&kretprobe_lock, flags); | 338 | spin_lock_irqsave(&kretprobe_lock, flags); |
339 | head = kretprobe_inst_table_head(current); | 339 | head = kretprobe_inst_table_head(tk); |
340 | hlist_for_each_entry_safe(ri, node, tmp, head, hlist) { | 340 | hlist_for_each_entry_safe(ri, node, tmp, head, hlist) { |
341 | if (ri->task == tk) | 341 | if (ri->task == tk) |
342 | recycle_rp_inst(ri); | 342 | recycle_rp_inst(ri); |
diff --git a/kernel/posix-timers.c b/kernel/posix-timers.c index 9944379360b5..ac6dc8744429 100644 --- a/kernel/posix-timers.c +++ b/kernel/posix-timers.c | |||
@@ -145,7 +145,7 @@ static int common_timer_set(struct k_itimer *, int, | |||
145 | struct itimerspec *, struct itimerspec *); | 145 | struct itimerspec *, struct itimerspec *); |
146 | static int common_timer_del(struct k_itimer *timer); | 146 | static int common_timer_del(struct k_itimer *timer); |
147 | 147 | ||
148 | static int posix_timer_fn(void *data); | 148 | static int posix_timer_fn(struct hrtimer *data); |
149 | 149 | ||
150 | static struct k_itimer *lock_timer(timer_t timer_id, unsigned long *flags); | 150 | static struct k_itimer *lock_timer(timer_t timer_id, unsigned long *flags); |
151 | 151 | ||
@@ -251,15 +251,18 @@ __initcall(init_posix_timers); | |||
251 | 251 | ||
252 | static void schedule_next_timer(struct k_itimer *timr) | 252 | static void schedule_next_timer(struct k_itimer *timr) |
253 | { | 253 | { |
254 | struct hrtimer *timer = &timr->it.real.timer; | ||
255 | |||
254 | if (timr->it.real.interval.tv64 == 0) | 256 | if (timr->it.real.interval.tv64 == 0) |
255 | return; | 257 | return; |
256 | 258 | ||
257 | timr->it_overrun += hrtimer_forward(&timr->it.real.timer, | 259 | timr->it_overrun += hrtimer_forward(timer, timer->base->get_time(), |
258 | timr->it.real.interval); | 260 | timr->it.real.interval); |
261 | |||
259 | timr->it_overrun_last = timr->it_overrun; | 262 | timr->it_overrun_last = timr->it_overrun; |
260 | timr->it_overrun = -1; | 263 | timr->it_overrun = -1; |
261 | ++timr->it_requeue_pending; | 264 | ++timr->it_requeue_pending; |
262 | hrtimer_restart(&timr->it.real.timer); | 265 | hrtimer_restart(timer); |
263 | } | 266 | } |
264 | 267 | ||
265 | /* | 268 | /* |
@@ -331,13 +334,14 @@ EXPORT_SYMBOL_GPL(posix_timer_event); | |||
331 | 334 | ||
332 | * This code is for CLOCK_REALTIME* and CLOCK_MONOTONIC* timers. | 335 | * This code is for CLOCK_REALTIME* and CLOCK_MONOTONIC* timers. |
333 | */ | 336 | */ |
334 | static int posix_timer_fn(void *data) | 337 | static int posix_timer_fn(struct hrtimer *timer) |
335 | { | 338 | { |
336 | struct k_itimer *timr = data; | 339 | struct k_itimer *timr; |
337 | unsigned long flags; | 340 | unsigned long flags; |
338 | int si_private = 0; | 341 | int si_private = 0; |
339 | int ret = HRTIMER_NORESTART; | 342 | int ret = HRTIMER_NORESTART; |
340 | 343 | ||
344 | timr = container_of(timer, struct k_itimer, it.real.timer); | ||
341 | spin_lock_irqsave(&timr->it_lock, flags); | 345 | spin_lock_irqsave(&timr->it_lock, flags); |
342 | 346 | ||
343 | if (timr->it.real.interval.tv64 != 0) | 347 | if (timr->it.real.interval.tv64 != 0) |
@@ -351,7 +355,8 @@ static int posix_timer_fn(void *data) | |||
351 | */ | 355 | */ |
352 | if (timr->it.real.interval.tv64 != 0) { | 356 | if (timr->it.real.interval.tv64 != 0) { |
353 | timr->it_overrun += | 357 | timr->it_overrun += |
354 | hrtimer_forward(&timr->it.real.timer, | 358 | hrtimer_forward(timer, |
359 | timer->base->softirq_time, | ||
355 | timr->it.real.interval); | 360 | timr->it.real.interval); |
356 | ret = HRTIMER_RESTART; | 361 | ret = HRTIMER_RESTART; |
357 | ++timr->it_requeue_pending; | 362 | ++timr->it_requeue_pending; |
@@ -603,38 +608,41 @@ static struct k_itimer * lock_timer(timer_t timer_id, unsigned long *flags) | |||
603 | static void | 608 | static void |
604 | common_timer_get(struct k_itimer *timr, struct itimerspec *cur_setting) | 609 | common_timer_get(struct k_itimer *timr, struct itimerspec *cur_setting) |
605 | { | 610 | { |
606 | ktime_t remaining; | 611 | ktime_t now, remaining, iv; |
607 | struct hrtimer *timer = &timr->it.real.timer; | 612 | struct hrtimer *timer = &timr->it.real.timer; |
608 | 613 | ||
609 | memset(cur_setting, 0, sizeof(struct itimerspec)); | 614 | memset(cur_setting, 0, sizeof(struct itimerspec)); |
610 | remaining = hrtimer_get_remaining(timer); | ||
611 | 615 | ||
612 | /* Time left ? or timer pending */ | 616 | iv = timr->it.real.interval; |
613 | if (remaining.tv64 > 0 || hrtimer_active(timer)) | 617 | |
614 | goto calci; | ||
615 | /* interval timer ? */ | 618 | /* interval timer ? */ |
616 | if (timr->it.real.interval.tv64 == 0) | 619 | if (iv.tv64) |
620 | cur_setting->it_interval = ktime_to_timespec(iv); | ||
621 | else if (!hrtimer_active(timer) && | ||
622 | (timr->it_sigev_notify & ~SIGEV_THREAD_ID) != SIGEV_NONE) | ||
617 | return; | 623 | return; |
624 | |||
625 | now = timer->base->get_time(); | ||
626 | |||
618 | /* | 627 | /* |
619 | * When a requeue is pending or this is a SIGEV_NONE timer | 628 | * When a requeue is pending or this is a SIGEV_NONE |
620 | * move the expiry time forward by intervals, so expiry is > | 629 | * timer move the expiry time forward by intervals, so |
621 | * now. | 630 | * expiry is > now. |
622 | */ | 631 | */ |
623 | if (timr->it_requeue_pending & REQUEUE_PENDING || | 632 | if (iv.tv64 && (timr->it_requeue_pending & REQUEUE_PENDING || |
624 | (timr->it_sigev_notify & ~SIGEV_THREAD_ID) == SIGEV_NONE) { | 633 | (timr->it_sigev_notify & ~SIGEV_THREAD_ID) == SIGEV_NONE)) |
625 | timr->it_overrun += | 634 | timr->it_overrun += hrtimer_forward(timer, now, iv); |
626 | hrtimer_forward(timer, timr->it.real.interval); | 635 | |
627 | remaining = hrtimer_get_remaining(timer); | 636 | remaining = ktime_sub(timer->expires, now); |
628 | } | ||
629 | calci: | ||
630 | /* interval timer ? */ | ||
631 | if (timr->it.real.interval.tv64 != 0) | ||
632 | cur_setting->it_interval = | ||
633 | ktime_to_timespec(timr->it.real.interval); | ||
634 | /* Return 0 only, when the timer is expired and not pending */ | 637 | /* Return 0 only, when the timer is expired and not pending */ |
635 | if (remaining.tv64 <= 0) | 638 | if (remaining.tv64 <= 0) { |
636 | cur_setting->it_value.tv_nsec = 1; | 639 | /* |
637 | else | 640 | * A single shot SIGEV_NONE timer must return 0, when |
641 | * it is expired ! | ||
642 | */ | ||
643 | if ((timr->it_sigev_notify & ~SIGEV_THREAD_ID) != SIGEV_NONE) | ||
644 | cur_setting->it_value.tv_nsec = 1; | ||
645 | } else | ||
638 | cur_setting->it_value = ktime_to_timespec(remaining); | 646 | cur_setting->it_value = ktime_to_timespec(remaining); |
639 | } | 647 | } |
640 | 648 | ||
@@ -717,7 +725,6 @@ common_timer_set(struct k_itimer *timr, int flags, | |||
717 | 725 | ||
718 | mode = flags & TIMER_ABSTIME ? HRTIMER_ABS : HRTIMER_REL; | 726 | mode = flags & TIMER_ABSTIME ? HRTIMER_ABS : HRTIMER_REL; |
719 | hrtimer_init(&timr->it.real.timer, timr->it_clock, mode); | 727 | hrtimer_init(&timr->it.real.timer, timr->it_clock, mode); |
720 | timr->it.real.timer.data = timr; | ||
721 | timr->it.real.timer.function = posix_timer_fn; | 728 | timr->it.real.timer.function = posix_timer_fn; |
722 | 729 | ||
723 | timer->expires = timespec_to_ktime(new_setting->it_value); | 730 | timer->expires = timespec_to_ktime(new_setting->it_value); |
diff --git a/kernel/power/swap.c b/kernel/power/swap.c index 9177f3f73a6c..044b8e0c1025 100644 --- a/kernel/power/swap.c +++ b/kernel/power/swap.c | |||
@@ -454,10 +454,11 @@ static int load_image(struct swap_map_handle *handle, | |||
454 | nr_pages++; | 454 | nr_pages++; |
455 | } | 455 | } |
456 | } while (ret > 0); | 456 | } while (ret > 0); |
457 | if (!error) | 457 | if (!error) { |
458 | printk("\b\b\b\bdone\n"); | 458 | printk("\b\b\b\bdone\n"); |
459 | if (!snapshot_image_loaded(snapshot)) | 459 | if (!snapshot_image_loaded(snapshot)) |
460 | error = -ENODATA; | 460 | error = -ENODATA; |
461 | } | ||
461 | return error; | 462 | return error; |
462 | } | 463 | } |
463 | 464 | ||
diff --git a/kernel/sched.c b/kernel/sched.c index 7ffaabd64f89..78acdefeccca 100644 --- a/kernel/sched.c +++ b/kernel/sched.c | |||
@@ -49,6 +49,7 @@ | |||
49 | #include <linux/syscalls.h> | 49 | #include <linux/syscalls.h> |
50 | #include <linux/times.h> | 50 | #include <linux/times.h> |
51 | #include <linux/acct.h> | 51 | #include <linux/acct.h> |
52 | #include <linux/kprobes.h> | ||
52 | #include <asm/tlb.h> | 53 | #include <asm/tlb.h> |
53 | 54 | ||
54 | #include <asm/unistd.h> | 55 | #include <asm/unistd.h> |
@@ -1546,8 +1547,14 @@ static inline void finish_task_switch(runqueue_t *rq, task_t *prev) | |||
1546 | finish_lock_switch(rq, prev); | 1547 | finish_lock_switch(rq, prev); |
1547 | if (mm) | 1548 | if (mm) |
1548 | mmdrop(mm); | 1549 | mmdrop(mm); |
1549 | if (unlikely(prev_task_flags & PF_DEAD)) | 1550 | if (unlikely(prev_task_flags & PF_DEAD)) { |
1551 | /* | ||
1552 | * Remove function-return probe instances associated with this | ||
1553 | * task and put them back on the free list. | ||
1554 | */ | ||
1555 | kprobe_flush_task(prev); | ||
1550 | put_task_struct(prev); | 1556 | put_task_struct(prev); |
1557 | } | ||
1551 | } | 1558 | } |
1552 | 1559 | ||
1553 | /** | 1560 | /** |
diff --git a/kernel/time.c b/kernel/time.c index e00a97b77241..ff8e7019c4c4 100644 --- a/kernel/time.c +++ b/kernel/time.c | |||
@@ -610,7 +610,7 @@ void set_normalized_timespec(struct timespec *ts, time_t sec, long nsec) | |||
610 | * | 610 | * |
611 | * Returns the timespec representation of the nsec parameter. | 611 | * Returns the timespec representation of the nsec parameter. |
612 | */ | 612 | */ |
613 | struct timespec ns_to_timespec(const nsec_t nsec) | 613 | struct timespec ns_to_timespec(const s64 nsec) |
614 | { | 614 | { |
615 | struct timespec ts; | 615 | struct timespec ts; |
616 | 616 | ||
@@ -630,7 +630,7 @@ struct timespec ns_to_timespec(const nsec_t nsec) | |||
630 | * | 630 | * |
631 | * Returns the timeval representation of the nsec parameter. | 631 | * Returns the timeval representation of the nsec parameter. |
632 | */ | 632 | */ |
633 | struct timeval ns_to_timeval(const nsec_t nsec) | 633 | struct timeval ns_to_timeval(const s64 nsec) |
634 | { | 634 | { |
635 | struct timespec ts = ns_to_timespec(nsec); | 635 | struct timespec ts = ns_to_timespec(nsec); |
636 | struct timeval tv; | 636 | struct timeval tv; |
diff --git a/lib/Makefile b/lib/Makefile index f827e3c24ec0..b830c9a15541 100644 --- a/lib/Makefile +++ b/lib/Makefile | |||
@@ -23,6 +23,7 @@ lib-$(CONFIG_RWSEM_GENERIC_SPINLOCK) += rwsem-spinlock.o | |||
23 | lib-$(CONFIG_RWSEM_XCHGADD_ALGORITHM) += rwsem.o | 23 | lib-$(CONFIG_RWSEM_XCHGADD_ALGORITHM) += rwsem.o |
24 | lib-$(CONFIG_SEMAPHORE_SLEEPERS) += semaphore-sleepers.o | 24 | lib-$(CONFIG_SEMAPHORE_SLEEPERS) += semaphore-sleepers.o |
25 | lib-$(CONFIG_GENERIC_FIND_NEXT_BIT) += find_next_bit.o | 25 | lib-$(CONFIG_GENERIC_FIND_NEXT_BIT) += find_next_bit.o |
26 | lib-$(CONFIG_GENERIC_HWEIGHT) += hweight.o | ||
26 | obj-$(CONFIG_LOCK_KERNEL) += kernel_lock.o | 27 | obj-$(CONFIG_LOCK_KERNEL) += kernel_lock.o |
27 | obj-$(CONFIG_DEBUG_PREEMPT) += smp_processor_id.o | 28 | obj-$(CONFIG_DEBUG_PREEMPT) += smp_processor_id.o |
28 | 29 | ||
diff --git a/lib/bitmap.c b/lib/bitmap.c index 8acab0e176ef..ed2ae3b0cd06 100644 --- a/lib/bitmap.c +++ b/lib/bitmap.c | |||
@@ -253,33 +253,18 @@ int __bitmap_subset(const unsigned long *bitmap1, | |||
253 | } | 253 | } |
254 | EXPORT_SYMBOL(__bitmap_subset); | 254 | EXPORT_SYMBOL(__bitmap_subset); |
255 | 255 | ||
256 | #if BITS_PER_LONG == 32 | ||
257 | int __bitmap_weight(const unsigned long *bitmap, int bits) | 256 | int __bitmap_weight(const unsigned long *bitmap, int bits) |
258 | { | 257 | { |
259 | int k, w = 0, lim = bits/BITS_PER_LONG; | 258 | int k, w = 0, lim = bits/BITS_PER_LONG; |
260 | 259 | ||
261 | for (k = 0; k < lim; k++) | 260 | for (k = 0; k < lim; k++) |
262 | w += hweight32(bitmap[k]); | 261 | w += hweight_long(bitmap[k]); |
263 | 262 | ||
264 | if (bits % BITS_PER_LONG) | 263 | if (bits % BITS_PER_LONG) |
265 | w += hweight32(bitmap[k] & BITMAP_LAST_WORD_MASK(bits)); | 264 | w += hweight_long(bitmap[k] & BITMAP_LAST_WORD_MASK(bits)); |
266 | 265 | ||
267 | return w; | 266 | return w; |
268 | } | 267 | } |
269 | #else | ||
270 | int __bitmap_weight(const unsigned long *bitmap, int bits) | ||
271 | { | ||
272 | int k, w = 0, lim = bits/BITS_PER_LONG; | ||
273 | |||
274 | for (k = 0; k < lim; k++) | ||
275 | w += hweight64(bitmap[k]); | ||
276 | |||
277 | if (bits % BITS_PER_LONG) | ||
278 | w += hweight64(bitmap[k] & BITMAP_LAST_WORD_MASK(bits)); | ||
279 | |||
280 | return w; | ||
281 | } | ||
282 | #endif | ||
283 | EXPORT_SYMBOL(__bitmap_weight); | 268 | EXPORT_SYMBOL(__bitmap_weight); |
284 | 269 | ||
285 | /* | 270 | /* |
diff --git a/lib/find_next_bit.c b/lib/find_next_bit.c index c05b4b19cf6c..bda0d71a2514 100644 --- a/lib/find_next_bit.c +++ b/lib/find_next_bit.c | |||
@@ -11,48 +11,171 @@ | |||
11 | 11 | ||
12 | #include <linux/bitops.h> | 12 | #include <linux/bitops.h> |
13 | #include <linux/module.h> | 13 | #include <linux/module.h> |
14 | #include <asm/types.h> | ||
15 | #include <asm/byteorder.h> | ||
14 | 16 | ||
15 | int find_next_bit(const unsigned long *addr, int size, int offset) | 17 | #define BITOP_WORD(nr) ((nr) / BITS_PER_LONG) |
18 | |||
19 | /** | ||
20 | * find_next_bit - find the next set bit in a memory region | ||
21 | * @addr: The address to base the search on | ||
22 | * @offset: The bitnumber to start searching at | ||
23 | * @size: The maximum size to search | ||
24 | */ | ||
25 | unsigned long find_next_bit(const unsigned long *addr, unsigned long size, | ||
26 | unsigned long offset) | ||
16 | { | 27 | { |
17 | const unsigned long *base; | 28 | const unsigned long *p = addr + BITOP_WORD(offset); |
18 | const int NBITS = sizeof(*addr) * 8; | 29 | unsigned long result = offset & ~(BITS_PER_LONG-1); |
19 | unsigned long tmp; | 30 | unsigned long tmp; |
20 | 31 | ||
21 | base = addr; | 32 | if (offset >= size) |
33 | return size; | ||
34 | size -= result; | ||
35 | offset %= BITS_PER_LONG; | ||
22 | if (offset) { | 36 | if (offset) { |
23 | int suboffset; | 37 | tmp = *(p++); |
38 | tmp &= (~0UL << offset); | ||
39 | if (size < BITS_PER_LONG) | ||
40 | goto found_first; | ||
41 | if (tmp) | ||
42 | goto found_middle; | ||
43 | size -= BITS_PER_LONG; | ||
44 | result += BITS_PER_LONG; | ||
45 | } | ||
46 | while (size & ~(BITS_PER_LONG-1)) { | ||
47 | if ((tmp = *(p++))) | ||
48 | goto found_middle; | ||
49 | result += BITS_PER_LONG; | ||
50 | size -= BITS_PER_LONG; | ||
51 | } | ||
52 | if (!size) | ||
53 | return result; | ||
54 | tmp = *p; | ||
24 | 55 | ||
25 | addr += offset / NBITS; | 56 | found_first: |
57 | tmp &= (~0UL >> (BITS_PER_LONG - size)); | ||
58 | if (tmp == 0UL) /* Are any bits set? */ | ||
59 | return result + size; /* Nope. */ | ||
60 | found_middle: | ||
61 | return result + __ffs(tmp); | ||
62 | } | ||
26 | 63 | ||
27 | suboffset = offset % NBITS; | 64 | EXPORT_SYMBOL(find_next_bit); |
28 | if (suboffset) { | ||
29 | tmp = *addr; | ||
30 | tmp >>= suboffset; | ||
31 | if (tmp) | ||
32 | goto finish; | ||
33 | } | ||
34 | 65 | ||
35 | addr++; | 66 | /* |
67 | * This implementation of find_{first,next}_zero_bit was stolen from | ||
68 | * Linus' asm-alpha/bitops.h. | ||
69 | */ | ||
70 | unsigned long find_next_zero_bit(const unsigned long *addr, unsigned long size, | ||
71 | unsigned long offset) | ||
72 | { | ||
73 | const unsigned long *p = addr + BITOP_WORD(offset); | ||
74 | unsigned long result = offset & ~(BITS_PER_LONG-1); | ||
75 | unsigned long tmp; | ||
76 | |||
77 | if (offset >= size) | ||
78 | return size; | ||
79 | size -= result; | ||
80 | offset %= BITS_PER_LONG; | ||
81 | if (offset) { | ||
82 | tmp = *(p++); | ||
83 | tmp |= ~0UL >> (BITS_PER_LONG - offset); | ||
84 | if (size < BITS_PER_LONG) | ||
85 | goto found_first; | ||
86 | if (~tmp) | ||
87 | goto found_middle; | ||
88 | size -= BITS_PER_LONG; | ||
89 | result += BITS_PER_LONG; | ||
90 | } | ||
91 | while (size & ~(BITS_PER_LONG-1)) { | ||
92 | if (~(tmp = *(p++))) | ||
93 | goto found_middle; | ||
94 | result += BITS_PER_LONG; | ||
95 | size -= BITS_PER_LONG; | ||
36 | } | 96 | } |
97 | if (!size) | ||
98 | return result; | ||
99 | tmp = *p; | ||
100 | |||
101 | found_first: | ||
102 | tmp |= ~0UL << size; | ||
103 | if (tmp == ~0UL) /* Are any bits zero? */ | ||
104 | return result + size; /* Nope. */ | ||
105 | found_middle: | ||
106 | return result + ffz(tmp); | ||
107 | } | ||
108 | |||
109 | EXPORT_SYMBOL(find_next_zero_bit); | ||
37 | 110 | ||
38 | while ((tmp = *addr) == 0) | 111 | #ifdef __BIG_ENDIAN |
39 | addr++; | ||
40 | 112 | ||
41 | offset = (addr - base) * NBITS; | 113 | /* include/linux/byteorder does not support "unsigned long" type */ |
114 | static inline unsigned long ext2_swabp(const unsigned long * x) | ||
115 | { | ||
116 | #if BITS_PER_LONG == 64 | ||
117 | return (unsigned long) __swab64p((u64 *) x); | ||
118 | #elif BITS_PER_LONG == 32 | ||
119 | return (unsigned long) __swab32p((u32 *) x); | ||
120 | #else | ||
121 | #error BITS_PER_LONG not defined | ||
122 | #endif | ||
123 | } | ||
124 | |||
125 | /* include/linux/byteorder doesn't support "unsigned long" type */ | ||
126 | static inline unsigned long ext2_swab(const unsigned long y) | ||
127 | { | ||
128 | #if BITS_PER_LONG == 64 | ||
129 | return (unsigned long) __swab64((u64) y); | ||
130 | #elif BITS_PER_LONG == 32 | ||
131 | return (unsigned long) __swab32((u32) y); | ||
132 | #else | ||
133 | #error BITS_PER_LONG not defined | ||
134 | #endif | ||
135 | } | ||
42 | 136 | ||
43 | finish: | 137 | unsigned long generic_find_next_zero_le_bit(const unsigned long *addr, unsigned |
44 | /* count the remaining bits without using __ffs() since that takes a 32-bit arg */ | 138 | long size, unsigned long offset) |
45 | while (!(tmp & 0xff)) { | 139 | { |
46 | offset += 8; | 140 | const unsigned long *p = addr + BITOP_WORD(offset); |
47 | tmp >>= 8; | 141 | unsigned long result = offset & ~(BITS_PER_LONG - 1); |
142 | unsigned long tmp; | ||
143 | |||
144 | if (offset >= size) | ||
145 | return size; | ||
146 | size -= result; | ||
147 | offset &= (BITS_PER_LONG - 1UL); | ||
148 | if (offset) { | ||
149 | tmp = ext2_swabp(p++); | ||
150 | tmp |= (~0UL >> (BITS_PER_LONG - offset)); | ||
151 | if (size < BITS_PER_LONG) | ||
152 | goto found_first; | ||
153 | if (~tmp) | ||
154 | goto found_middle; | ||
155 | size -= BITS_PER_LONG; | ||
156 | result += BITS_PER_LONG; | ||
48 | } | 157 | } |
49 | 158 | ||
50 | while (!(tmp & 1)) { | 159 | while (size & ~(BITS_PER_LONG - 1)) { |
51 | offset++; | 160 | if (~(tmp = *(p++))) |
52 | tmp >>= 1; | 161 | goto found_middle_swap; |
162 | result += BITS_PER_LONG; | ||
163 | size -= BITS_PER_LONG; | ||
53 | } | 164 | } |
165 | if (!size) | ||
166 | return result; | ||
167 | tmp = ext2_swabp(p); | ||
168 | found_first: | ||
169 | tmp |= ~0UL << size; | ||
170 | if (tmp == ~0UL) /* Are any bits zero? */ | ||
171 | return result + size; /* Nope. Skip ffz */ | ||
172 | found_middle: | ||
173 | return result + ffz(tmp); | ||
54 | 174 | ||
55 | return offset; | 175 | found_middle_swap: |
176 | return result + ffz(ext2_swab(tmp)); | ||
56 | } | 177 | } |
57 | 178 | ||
58 | EXPORT_SYMBOL(find_next_bit); | 179 | EXPORT_SYMBOL(generic_find_next_zero_le_bit); |
180 | |||
181 | #endif /* __BIG_ENDIAN */ | ||
diff --git a/lib/hweight.c b/lib/hweight.c new file mode 100644 index 000000000000..438257671708 --- /dev/null +++ b/lib/hweight.c | |||
@@ -0,0 +1,53 @@ | |||
1 | #include <linux/module.h> | ||
2 | #include <asm/types.h> | ||
3 | |||
4 | /** | ||
5 | * hweightN - returns the hamming weight of a N-bit word | ||
6 | * @x: the word to weigh | ||
7 | * | ||
8 | * The Hamming Weight of a number is the total number of bits set in it. | ||
9 | */ | ||
10 | |||
11 | unsigned int hweight32(unsigned int w) | ||
12 | { | ||
13 | unsigned int res = w - ((w >> 1) & 0x55555555); | ||
14 | res = (res & 0x33333333) + ((res >> 2) & 0x33333333); | ||
15 | res = (res + (res >> 4)) & 0x0F0F0F0F; | ||
16 | res = res + (res >> 8); | ||
17 | return (res + (res >> 16)) & 0x000000FF; | ||
18 | } | ||
19 | EXPORT_SYMBOL(hweight32); | ||
20 | |||
21 | unsigned int hweight16(unsigned int w) | ||
22 | { | ||
23 | unsigned int res = w - ((w >> 1) & 0x5555); | ||
24 | res = (res & 0x3333) + ((res >> 2) & 0x3333); | ||
25 | res = (res + (res >> 4)) & 0x0F0F; | ||
26 | return (res + (res >> 8)) & 0x00FF; | ||
27 | } | ||
28 | EXPORT_SYMBOL(hweight16); | ||
29 | |||
30 | unsigned int hweight8(unsigned int w) | ||
31 | { | ||
32 | unsigned int res = w - ((w >> 1) & 0x55); | ||
33 | res = (res & 0x33) + ((res >> 2) & 0x33); | ||
34 | return (res + (res >> 4)) & 0x0F; | ||
35 | } | ||
36 | EXPORT_SYMBOL(hweight8); | ||
37 | |||
38 | unsigned long hweight64(__u64 w) | ||
39 | { | ||
40 | #if BITS_PER_LONG == 32 | ||
41 | return hweight32((unsigned int)(w >> 32)) + hweight32((unsigned int)w); | ||
42 | #elif BITS_PER_LONG == 64 | ||
43 | __u64 res = w - ((w >> 1) & 0x5555555555555555ul); | ||
44 | res = (res & 0x3333333333333333ul) + ((res >> 2) & 0x3333333333333333ul); | ||
45 | res = (res + (res >> 4)) & 0x0F0F0F0F0F0F0F0Ful; | ||
46 | res = res + (res >> 8); | ||
47 | res = res + (res >> 16); | ||
48 | return (res + (res >> 32)) & 0x00000000000000FFul; | ||
49 | #else | ||
50 | #error BITS_PER_LONG not defined | ||
51 | #endif | ||
52 | } | ||
53 | EXPORT_SYMBOL(hweight64); | ||
diff --git a/mm/highmem.c b/mm/highmem.c index d0ea1eec6a9a..55885f64af40 100644 --- a/mm/highmem.c +++ b/mm/highmem.c | |||
@@ -31,14 +31,9 @@ | |||
31 | 31 | ||
32 | static mempool_t *page_pool, *isa_page_pool; | 32 | static mempool_t *page_pool, *isa_page_pool; |
33 | 33 | ||
34 | static void *page_pool_alloc_isa(gfp_t gfp_mask, void *data) | 34 | static void *mempool_alloc_pages_isa(gfp_t gfp_mask, void *data) |
35 | { | 35 | { |
36 | return alloc_page(gfp_mask | GFP_DMA); | 36 | return mempool_alloc_pages(gfp_mask | GFP_DMA, data); |
37 | } | ||
38 | |||
39 | static void page_pool_free(void *page, void *data) | ||
40 | { | ||
41 | __free_page(page); | ||
42 | } | 37 | } |
43 | 38 | ||
44 | /* | 39 | /* |
@@ -51,11 +46,6 @@ static void page_pool_free(void *page, void *data) | |||
51 | */ | 46 | */ |
52 | #ifdef CONFIG_HIGHMEM | 47 | #ifdef CONFIG_HIGHMEM |
53 | 48 | ||
54 | static void *page_pool_alloc(gfp_t gfp_mask, void *data) | ||
55 | { | ||
56 | return alloc_page(gfp_mask); | ||
57 | } | ||
58 | |||
59 | static int pkmap_count[LAST_PKMAP]; | 49 | static int pkmap_count[LAST_PKMAP]; |
60 | static unsigned int last_pkmap_nr; | 50 | static unsigned int last_pkmap_nr; |
61 | static __cacheline_aligned_in_smp DEFINE_SPINLOCK(kmap_lock); | 51 | static __cacheline_aligned_in_smp DEFINE_SPINLOCK(kmap_lock); |
@@ -229,7 +219,7 @@ static __init int init_emergency_pool(void) | |||
229 | if (!i.totalhigh) | 219 | if (!i.totalhigh) |
230 | return 0; | 220 | return 0; |
231 | 221 | ||
232 | page_pool = mempool_create(POOL_SIZE, page_pool_alloc, page_pool_free, NULL); | 222 | page_pool = mempool_create_page_pool(POOL_SIZE, 0); |
233 | if (!page_pool) | 223 | if (!page_pool) |
234 | BUG(); | 224 | BUG(); |
235 | printk("highmem bounce pool size: %d pages\n", POOL_SIZE); | 225 | printk("highmem bounce pool size: %d pages\n", POOL_SIZE); |
@@ -272,7 +262,8 @@ int init_emergency_isa_pool(void) | |||
272 | if (isa_page_pool) | 262 | if (isa_page_pool) |
273 | return 0; | 263 | return 0; |
274 | 264 | ||
275 | isa_page_pool = mempool_create(ISA_POOL_SIZE, page_pool_alloc_isa, page_pool_free, NULL); | 265 | isa_page_pool = mempool_create(ISA_POOL_SIZE, mempool_alloc_pages_isa, |
266 | mempool_free_pages, (void *) 0); | ||
276 | if (!isa_page_pool) | 267 | if (!isa_page_pool) |
277 | BUG(); | 268 | BUG(); |
278 | 269 | ||
@@ -337,7 +328,7 @@ static void bounce_end_io(struct bio *bio, mempool_t *pool, int err) | |||
337 | bio_put(bio); | 328 | bio_put(bio); |
338 | } | 329 | } |
339 | 330 | ||
340 | static int bounce_end_io_write(struct bio *bio, unsigned int bytes_done,int err) | 331 | static int bounce_end_io_write(struct bio *bio, unsigned int bytes_done, int err) |
341 | { | 332 | { |
342 | if (bio->bi_size) | 333 | if (bio->bi_size) |
343 | return 1; | 334 | return 1; |
@@ -384,7 +375,7 @@ static int bounce_end_io_read_isa(struct bio *bio, unsigned int bytes_done, int | |||
384 | } | 375 | } |
385 | 376 | ||
386 | static void __blk_queue_bounce(request_queue_t *q, struct bio **bio_orig, | 377 | static void __blk_queue_bounce(request_queue_t *q, struct bio **bio_orig, |
387 | mempool_t *pool) | 378 | mempool_t *pool) |
388 | { | 379 | { |
389 | struct page *page; | 380 | struct page *page; |
390 | struct bio *bio = NULL; | 381 | struct bio *bio = NULL; |
diff --git a/mm/memory.c b/mm/memory.c index d90ff9d04957..8d8f52569f32 100644 --- a/mm/memory.c +++ b/mm/memory.c | |||
@@ -1071,6 +1071,8 @@ int get_user_pages(struct task_struct *tsk, struct mm_struct *mm, | |||
1071 | } | 1071 | } |
1072 | if (pages) { | 1072 | if (pages) { |
1073 | pages[i] = page; | 1073 | pages[i] = page; |
1074 | |||
1075 | flush_anon_page(page, start); | ||
1074 | flush_dcache_page(page); | 1076 | flush_dcache_page(page); |
1075 | } | 1077 | } |
1076 | if (vmas) | 1078 | if (vmas) |
diff --git a/mm/mempool.c b/mm/mempool.c index 9ef13dd68ab7..fe6e05289cc5 100644 --- a/mm/mempool.c +++ b/mm/mempool.c | |||
@@ -289,3 +289,45 @@ void mempool_free_slab(void *element, void *pool_data) | |||
289 | kmem_cache_free(mem, element); | 289 | kmem_cache_free(mem, element); |
290 | } | 290 | } |
291 | EXPORT_SYMBOL(mempool_free_slab); | 291 | EXPORT_SYMBOL(mempool_free_slab); |
292 | |||
293 | /* | ||
294 | * A commonly used alloc and free fn that kmalloc/kfrees the amount of memory | ||
295 | * specfied by pool_data | ||
296 | */ | ||
297 | void *mempool_kmalloc(gfp_t gfp_mask, void *pool_data) | ||
298 | { | ||
299 | size_t size = (size_t)(long)pool_data; | ||
300 | return kmalloc(size, gfp_mask); | ||
301 | } | ||
302 | EXPORT_SYMBOL(mempool_kmalloc); | ||
303 | |||
304 | void *mempool_kzalloc(gfp_t gfp_mask, void *pool_data) | ||
305 | { | ||
306 | size_t size = (size_t) pool_data; | ||
307 | return kzalloc(size, gfp_mask); | ||
308 | } | ||
309 | EXPORT_SYMBOL(mempool_kzalloc); | ||
310 | |||
311 | void mempool_kfree(void *element, void *pool_data) | ||
312 | { | ||
313 | kfree(element); | ||
314 | } | ||
315 | EXPORT_SYMBOL(mempool_kfree); | ||
316 | |||
317 | /* | ||
318 | * A simple mempool-backed page allocator that allocates pages | ||
319 | * of the order specified by pool_data. | ||
320 | */ | ||
321 | void *mempool_alloc_pages(gfp_t gfp_mask, void *pool_data) | ||
322 | { | ||
323 | int order = (int)(long)pool_data; | ||
324 | return alloc_pages(gfp_mask, order); | ||
325 | } | ||
326 | EXPORT_SYMBOL(mempool_alloc_pages); | ||
327 | |||
328 | void mempool_free_pages(void *element, void *pool_data) | ||
329 | { | ||
330 | int order = (int)(long)pool_data; | ||
331 | __free_pages(element, order); | ||
332 | } | ||
333 | EXPORT_SYMBOL(mempool_free_pages); | ||
diff --git a/net/ipv4/ipvs/ip_vs_ctl.c b/net/ipv4/ipvs/ip_vs_ctl.c index 7f0288b25fa1..f28ec6882162 100644 --- a/net/ipv4/ipvs/ip_vs_ctl.c +++ b/net/ipv4/ipvs/ip_vs_ctl.c | |||
@@ -34,6 +34,7 @@ | |||
34 | 34 | ||
35 | #include <linux/netfilter.h> | 35 | #include <linux/netfilter.h> |
36 | #include <linux/netfilter_ipv4.h> | 36 | #include <linux/netfilter_ipv4.h> |
37 | #include <linux/mutex.h> | ||
37 | 38 | ||
38 | #include <net/ip.h> | 39 | #include <net/ip.h> |
39 | #include <net/route.h> | 40 | #include <net/route.h> |
@@ -44,7 +45,7 @@ | |||
44 | #include <net/ip_vs.h> | 45 | #include <net/ip_vs.h> |
45 | 46 | ||
46 | /* semaphore for IPVS sockopts. And, [gs]etsockopt may sleep. */ | 47 | /* semaphore for IPVS sockopts. And, [gs]etsockopt may sleep. */ |
47 | static DECLARE_MUTEX(__ip_vs_mutex); | 48 | static DEFINE_MUTEX(__ip_vs_mutex); |
48 | 49 | ||
49 | /* lock for service table */ | 50 | /* lock for service table */ |
50 | static DEFINE_RWLOCK(__ip_vs_svc_lock); | 51 | static DEFINE_RWLOCK(__ip_vs_svc_lock); |
@@ -1950,7 +1951,7 @@ do_ip_vs_set_ctl(struct sock *sk, int cmd, void __user *user, unsigned int len) | |||
1950 | /* increase the module use count */ | 1951 | /* increase the module use count */ |
1951 | ip_vs_use_count_inc(); | 1952 | ip_vs_use_count_inc(); |
1952 | 1953 | ||
1953 | if (down_interruptible(&__ip_vs_mutex)) { | 1954 | if (mutex_lock_interruptible(&__ip_vs_mutex)) { |
1954 | ret = -ERESTARTSYS; | 1955 | ret = -ERESTARTSYS; |
1955 | goto out_dec; | 1956 | goto out_dec; |
1956 | } | 1957 | } |
@@ -2041,7 +2042,7 @@ do_ip_vs_set_ctl(struct sock *sk, int cmd, void __user *user, unsigned int len) | |||
2041 | ip_vs_service_put(svc); | 2042 | ip_vs_service_put(svc); |
2042 | 2043 | ||
2043 | out_unlock: | 2044 | out_unlock: |
2044 | up(&__ip_vs_mutex); | 2045 | mutex_unlock(&__ip_vs_mutex); |
2045 | out_dec: | 2046 | out_dec: |
2046 | /* decrease the module use count */ | 2047 | /* decrease the module use count */ |
2047 | ip_vs_use_count_dec(); | 2048 | ip_vs_use_count_dec(); |
@@ -2211,7 +2212,7 @@ do_ip_vs_get_ctl(struct sock *sk, int cmd, void __user *user, int *len) | |||
2211 | if (copy_from_user(arg, user, get_arglen[GET_CMDID(cmd)]) != 0) | 2212 | if (copy_from_user(arg, user, get_arglen[GET_CMDID(cmd)]) != 0) |
2212 | return -EFAULT; | 2213 | return -EFAULT; |
2213 | 2214 | ||
2214 | if (down_interruptible(&__ip_vs_mutex)) | 2215 | if (mutex_lock_interruptible(&__ip_vs_mutex)) |
2215 | return -ERESTARTSYS; | 2216 | return -ERESTARTSYS; |
2216 | 2217 | ||
2217 | switch (cmd) { | 2218 | switch (cmd) { |
@@ -2330,7 +2331,7 @@ do_ip_vs_get_ctl(struct sock *sk, int cmd, void __user *user, int *len) | |||
2330 | } | 2331 | } |
2331 | 2332 | ||
2332 | out: | 2333 | out: |
2333 | up(&__ip_vs_mutex); | 2334 | mutex_unlock(&__ip_vs_mutex); |
2334 | return ret; | 2335 | return ret; |
2335 | } | 2336 | } |
2336 | 2337 | ||
diff --git a/net/ipv4/netfilter/ipt_hashlimit.c b/net/ipv4/netfilter/ipt_hashlimit.c index dc1521c5aa81..ba5e23505e88 100644 --- a/net/ipv4/netfilter/ipt_hashlimit.c +++ b/net/ipv4/netfilter/ipt_hashlimit.c | |||
@@ -40,6 +40,7 @@ | |||
40 | 40 | ||
41 | /* FIXME: this is just for IP_NF_ASSERRT */ | 41 | /* FIXME: this is just for IP_NF_ASSERRT */ |
42 | #include <linux/netfilter_ipv4/ip_conntrack.h> | 42 | #include <linux/netfilter_ipv4/ip_conntrack.h> |
43 | #include <linux/mutex.h> | ||
43 | 44 | ||
44 | MODULE_LICENSE("GPL"); | 45 | MODULE_LICENSE("GPL"); |
45 | MODULE_AUTHOR("Harald Welte <laforge@netfilter.org>"); | 46 | MODULE_AUTHOR("Harald Welte <laforge@netfilter.org>"); |
@@ -92,7 +93,7 @@ struct ipt_hashlimit_htable { | |||
92 | }; | 93 | }; |
93 | 94 | ||
94 | static DEFINE_SPINLOCK(hashlimit_lock); /* protects htables list */ | 95 | static DEFINE_SPINLOCK(hashlimit_lock); /* protects htables list */ |
95 | static DECLARE_MUTEX(hlimit_mutex); /* additional checkentry protection */ | 96 | static DEFINE_MUTEX(hlimit_mutex); /* additional checkentry protection */ |
96 | static HLIST_HEAD(hashlimit_htables); | 97 | static HLIST_HEAD(hashlimit_htables); |
97 | static kmem_cache_t *hashlimit_cachep __read_mostly; | 98 | static kmem_cache_t *hashlimit_cachep __read_mostly; |
98 | 99 | ||
@@ -542,13 +543,13 @@ hashlimit_checkentry(const char *tablename, | |||
542 | * call vmalloc, and that can sleep. And we cannot just re-search | 543 | * call vmalloc, and that can sleep. And we cannot just re-search |
543 | * the list of htable's in htable_create(), since then we would | 544 | * the list of htable's in htable_create(), since then we would |
544 | * create duplicate proc files. -HW */ | 545 | * create duplicate proc files. -HW */ |
545 | down(&hlimit_mutex); | 546 | mutex_lock(&hlimit_mutex); |
546 | r->hinfo = htable_find_get(r->name); | 547 | r->hinfo = htable_find_get(r->name); |
547 | if (!r->hinfo && (htable_create(r) != 0)) { | 548 | if (!r->hinfo && (htable_create(r) != 0)) { |
548 | up(&hlimit_mutex); | 549 | mutex_unlock(&hlimit_mutex); |
549 | return 0; | 550 | return 0; |
550 | } | 551 | } |
551 | up(&hlimit_mutex); | 552 | mutex_unlock(&hlimit_mutex); |
552 | 553 | ||
553 | /* Ugly hack: For SMP, we only want to use one set */ | 554 | /* Ugly hack: For SMP, we only want to use one set */ |
554 | r->u.master = r; | 555 | r->u.master = r; |
diff --git a/net/netlink/genetlink.c b/net/netlink/genetlink.c index 43e72419c868..f329b72578f5 100644 --- a/net/netlink/genetlink.c +++ b/net/netlink/genetlink.c | |||
@@ -13,26 +13,27 @@ | |||
13 | #include <linux/socket.h> | 13 | #include <linux/socket.h> |
14 | #include <linux/string.h> | 14 | #include <linux/string.h> |
15 | #include <linux/skbuff.h> | 15 | #include <linux/skbuff.h> |
16 | #include <linux/mutex.h> | ||
16 | #include <net/sock.h> | 17 | #include <net/sock.h> |
17 | #include <net/genetlink.h> | 18 | #include <net/genetlink.h> |
18 | 19 | ||
19 | struct sock *genl_sock = NULL; | 20 | struct sock *genl_sock = NULL; |
20 | 21 | ||
21 | static DECLARE_MUTEX(genl_sem); /* serialization of message processing */ | 22 | static DEFINE_MUTEX(genl_mutex); /* serialization of message processing */ |
22 | 23 | ||
23 | static void genl_lock(void) | 24 | static void genl_lock(void) |
24 | { | 25 | { |
25 | down(&genl_sem); | 26 | mutex_lock(&genl_mutex); |
26 | } | 27 | } |
27 | 28 | ||
28 | static int genl_trylock(void) | 29 | static int genl_trylock(void) |
29 | { | 30 | { |
30 | return down_trylock(&genl_sem); | 31 | return !mutex_trylock(&genl_mutex); |
31 | } | 32 | } |
32 | 33 | ||
33 | static void genl_unlock(void) | 34 | static void genl_unlock(void) |
34 | { | 35 | { |
35 | up(&genl_sem); | 36 | mutex_unlock(&genl_mutex); |
36 | 37 | ||
37 | if (genl_sock && genl_sock->sk_receive_queue.qlen) | 38 | if (genl_sock && genl_sock->sk_receive_queue.qlen) |
38 | genl_sock->sk_data_ready(genl_sock, 0); | 39 | genl_sock->sk_data_ready(genl_sock, 0); |
diff --git a/net/sunrpc/sched.c b/net/sunrpc/sched.c index b9969b91a9f7..5c3eee768504 100644 --- a/net/sunrpc/sched.c +++ b/net/sunrpc/sched.c | |||
@@ -1167,16 +1167,12 @@ rpc_init_mempool(void) | |||
1167 | NULL, NULL); | 1167 | NULL, NULL); |
1168 | if (!rpc_buffer_slabp) | 1168 | if (!rpc_buffer_slabp) |
1169 | goto err_nomem; | 1169 | goto err_nomem; |
1170 | rpc_task_mempool = mempool_create(RPC_TASK_POOLSIZE, | 1170 | rpc_task_mempool = mempool_create_slab_pool(RPC_TASK_POOLSIZE, |
1171 | mempool_alloc_slab, | 1171 | rpc_task_slabp); |
1172 | mempool_free_slab, | ||
1173 | rpc_task_slabp); | ||
1174 | if (!rpc_task_mempool) | 1172 | if (!rpc_task_mempool) |
1175 | goto err_nomem; | 1173 | goto err_nomem; |
1176 | rpc_buffer_mempool = mempool_create(RPC_BUFFER_POOLSIZE, | 1174 | rpc_buffer_mempool = mempool_create_slab_pool(RPC_BUFFER_POOLSIZE, |
1177 | mempool_alloc_slab, | 1175 | rpc_buffer_slabp); |
1178 | mempool_free_slab, | ||
1179 | rpc_buffer_slabp); | ||
1180 | if (!rpc_buffer_mempool) | 1176 | if (!rpc_buffer_mempool) |
1181 | goto err_nomem; | 1177 | goto err_nomem; |
1182 | return 0; | 1178 | return 0; |
diff --git a/sound/oss/cmpci.c b/sound/oss/cmpci.c index 1fbd5137f6d7..de60a059ff5f 100644 --- a/sound/oss/cmpci.c +++ b/sound/oss/cmpci.c | |||
@@ -1713,7 +1713,7 @@ static int mixer_ioctl(struct cm_state *s, unsigned int cmd, unsigned long arg) | |||
1713 | case SOUND_MIXER_RECSRC: /* Arg contains a bit for each recording source */ | 1713 | case SOUND_MIXER_RECSRC: /* Arg contains a bit for each recording source */ |
1714 | if (get_user(val, p)) | 1714 | if (get_user(val, p)) |
1715 | return -EFAULT; | 1715 | return -EFAULT; |
1716 | i = generic_hweight32(val); | 1716 | i = hweight32(val); |
1717 | for (j = i = 0; i < SOUND_MIXER_NRDEVICES; i++) { | 1717 | for (j = i = 0; i < SOUND_MIXER_NRDEVICES; i++) { |
1718 | if (!(val & (1 << i))) | 1718 | if (!(val & (1 << i))) |
1719 | continue; | 1719 | continue; |
diff --git a/sound/oss/sonicvibes.c b/sound/oss/sonicvibes.c index 69a4b8778b51..4471757b7985 100644 --- a/sound/oss/sonicvibes.c +++ b/sound/oss/sonicvibes.c | |||
@@ -407,24 +407,6 @@ static inline unsigned ld2(unsigned int x) | |||
407 | return r; | 407 | return r; |
408 | } | 408 | } |
409 | 409 | ||
410 | /* | ||
411 | * hweightN: returns the hamming weight (i.e. the number | ||
412 | * of bits set) of a N-bit word | ||
413 | */ | ||
414 | |||
415 | #ifdef hweight32 | ||
416 | #undef hweight32 | ||
417 | #endif | ||
418 | |||
419 | static inline unsigned int hweight32(unsigned int w) | ||
420 | { | ||
421 | unsigned int res = (w & 0x55555555) + ((w >> 1) & 0x55555555); | ||
422 | res = (res & 0x33333333) + ((res >> 2) & 0x33333333); | ||
423 | res = (res & 0x0F0F0F0F) + ((res >> 4) & 0x0F0F0F0F); | ||
424 | res = (res & 0x00FF00FF) + ((res >> 8) & 0x00FF00FF); | ||
425 | return (res & 0x0000FFFF) + ((res >> 16) & 0x0000FFFF); | ||
426 | } | ||
427 | |||
428 | /* --------------------------------------------------------------------- */ | 410 | /* --------------------------------------------------------------------- */ |
429 | 411 | ||
430 | /* | 412 | /* |
diff --git a/sound/oss/vwsnd.c b/sound/oss/vwsnd.c index b372e88e857f..5f140c7586b3 100644 --- a/sound/oss/vwsnd.c +++ b/sound/oss/vwsnd.c | |||
@@ -248,27 +248,6 @@ typedef struct lithium { | |||
248 | } lithium_t; | 248 | } lithium_t; |
249 | 249 | ||
250 | /* | 250 | /* |
251 | * li_create initializes the lithium_t structure and sets up vm mappings | ||
252 | * to access the registers. | ||
253 | * Returns 0 on success, -errno on failure. | ||
254 | */ | ||
255 | |||
256 | static int __init li_create(lithium_t *lith, unsigned long baseaddr) | ||
257 | { | ||
258 | static void li_destroy(lithium_t *); | ||
259 | |||
260 | spin_lock_init(&lith->lock); | ||
261 | lith->page0 = ioremap_nocache(baseaddr + LI_PAGE0_OFFSET, PAGE_SIZE); | ||
262 | lith->page1 = ioremap_nocache(baseaddr + LI_PAGE1_OFFSET, PAGE_SIZE); | ||
263 | lith->page2 = ioremap_nocache(baseaddr + LI_PAGE2_OFFSET, PAGE_SIZE); | ||
264 | if (!lith->page0 || !lith->page1 || !lith->page2) { | ||
265 | li_destroy(lith); | ||
266 | return -ENOMEM; | ||
267 | } | ||
268 | return 0; | ||
269 | } | ||
270 | |||
271 | /* | ||
272 | * li_destroy destroys the lithium_t structure and vm mappings. | 251 | * li_destroy destroys the lithium_t structure and vm mappings. |
273 | */ | 252 | */ |
274 | 253 | ||
@@ -289,6 +268,25 @@ static void li_destroy(lithium_t *lith) | |||
289 | } | 268 | } |
290 | 269 | ||
291 | /* | 270 | /* |
271 | * li_create initializes the lithium_t structure and sets up vm mappings | ||
272 | * to access the registers. | ||
273 | * Returns 0 on success, -errno on failure. | ||
274 | */ | ||
275 | |||
276 | static int __init li_create(lithium_t *lith, unsigned long baseaddr) | ||
277 | { | ||
278 | spin_lock_init(&lith->lock); | ||
279 | lith->page0 = ioremap_nocache(baseaddr + LI_PAGE0_OFFSET, PAGE_SIZE); | ||
280 | lith->page1 = ioremap_nocache(baseaddr + LI_PAGE1_OFFSET, PAGE_SIZE); | ||
281 | lith->page2 = ioremap_nocache(baseaddr + LI_PAGE2_OFFSET, PAGE_SIZE); | ||
282 | if (!lith->page0 || !lith->page1 || !lith->page2) { | ||
283 | li_destroy(lith); | ||
284 | return -ENOMEM; | ||
285 | } | ||
286 | return 0; | ||
287 | } | ||
288 | |||
289 | /* | ||
292 | * basic register accessors - read/write long/byte | 290 | * basic register accessors - read/write long/byte |
293 | */ | 291 | */ |
294 | 292 | ||