diff options
201 files changed, 4996 insertions, 1250 deletions
@@ -3241,14 +3241,9 @@ S: 12725 SW Millikan Way, Suite 400 | |||
3241 | S: Beaverton, Oregon 97005 | 3241 | S: Beaverton, Oregon 97005 |
3242 | S: USA | 3242 | S: USA |
3243 | 3243 | ||
3244 | N: Marcelo W. Tosatti | 3244 | N: Marcelo Tosatti |
3245 | E: marcelo.tosatti@cyclades.com | 3245 | E: marcelo@kvack.org |
3246 | D: Miscellaneous kernel hacker | ||
3247 | D: v2.4 kernel maintainer | 3246 | D: v2.4 kernel maintainer |
3248 | D: Current pc300/cyclades maintainer | ||
3249 | S: Cyclades Corporation | ||
3250 | S: Av Cristovao Colombo, 462. Floresta. | ||
3251 | S: Porto Alegre | ||
3252 | S: Brazil | 3247 | S: Brazil |
3253 | 3248 | ||
3254 | N: Stefan Traby | 3249 | N: Stefan Traby |
diff --git a/Documentation/devices.txt b/Documentation/devices.txt index 3c406acd4dfa..b369a8c46a73 100644 --- a/Documentation/devices.txt +++ b/Documentation/devices.txt | |||
@@ -1721,11 +1721,6 @@ Your cooperation is appreciated. | |||
1721 | These devices support the same API as the generic SCSI | 1721 | These devices support the same API as the generic SCSI |
1722 | devices. | 1722 | devices. |
1723 | 1723 | ||
1724 | 97 block Packet writing for CD/DVD devices | ||
1725 | 0 = /dev/pktcdvd0 First packet-writing module | ||
1726 | 1 = /dev/pktcdvd1 Second packet-writing module | ||
1727 | ... | ||
1728 | |||
1729 | 98 char Control and Measurement Device (comedi) | 1724 | 98 char Control and Measurement Device (comedi) |
1730 | 0 = /dev/comedi0 First comedi device | 1725 | 0 = /dev/comedi0 First comedi device |
1731 | 1 = /dev/comedi1 Second comedi device | 1726 | 1 = /dev/comedi1 Second comedi device |
diff --git a/Documentation/feature-removal-schedule.txt b/Documentation/feature-removal-schedule.txt index 421bcfff6ad2..43ab119963d5 100644 --- a/Documentation/feature-removal-schedule.txt +++ b/Documentation/feature-removal-schedule.txt | |||
@@ -57,6 +57,15 @@ Who: Jody McIntyre <scjody@steamballoon.com> | |||
57 | 57 | ||
58 | --------------------------- | 58 | --------------------------- |
59 | 59 | ||
60 | What: sbp2: module parameter "force_inquiry_hack" | ||
61 | When: July 2006 | ||
62 | Why: Superceded by parameter "workarounds". Both parameters are meant to be | ||
63 | used ad-hoc and for single devices only, i.e. not in modprobe.conf, | ||
64 | therefore the impact of this feature replacement should be low. | ||
65 | Who: Stefan Richter <stefanr@s5r6.in-berlin.de> | ||
66 | |||
67 | --------------------------- | ||
68 | |||
60 | What: Video4Linux API 1 ioctls and video_decoder.h from Video devices. | 69 | What: Video4Linux API 1 ioctls and video_decoder.h from Video devices. |
61 | When: July 2006 | 70 | When: July 2006 |
62 | Why: V4L1 AP1 was replaced by V4L2 API. during migration from 2.4 to 2.6 | 71 | Why: V4L1 AP1 was replaced by V4L2 API. during migration from 2.4 to 2.6 |
diff --git a/Documentation/firmware_class/README b/Documentation/firmware_class/README index 43e836c07ae8..e9cc8bb26f7d 100644 --- a/Documentation/firmware_class/README +++ b/Documentation/firmware_class/README | |||
@@ -105,20 +105,3 @@ | |||
105 | on the setup, so I think that the choice on what firmware to make | 105 | on the setup, so I think that the choice on what firmware to make |
106 | persistent should be left to userspace. | 106 | persistent should be left to userspace. |
107 | 107 | ||
108 | - Why register_firmware()+__init can be useful: | ||
109 | - For boot devices needing firmware. | ||
110 | - To make the transition easier: | ||
111 | The firmware can be declared __init and register_firmware() | ||
112 | called on module_init. Then the firmware is warranted to be | ||
113 | there even if "firmware hotplug userspace" is not there yet or | ||
114 | it doesn't yet provide the needed firmware. | ||
115 | Once the firmware is widely available in userspace, it can be | ||
116 | removed from the kernel. Or made optional (CONFIG_.*_FIRMWARE). | ||
117 | |||
118 | In either case, if firmware hotplug support is there, it can move the | ||
119 | firmware out of kernel memory into the real filesystem for later | ||
120 | usage. | ||
121 | |||
122 | Note: If persistence is implemented on top of initramfs, | ||
123 | register_firmware() may not be appropriate. | ||
124 | |||
diff --git a/Documentation/firmware_class/firmware_sample_driver.c b/Documentation/firmware_class/firmware_sample_driver.c index ad3edaba4533..87feccdb5c9f 100644 --- a/Documentation/firmware_class/firmware_sample_driver.c +++ b/Documentation/firmware_class/firmware_sample_driver.c | |||
@@ -5,8 +5,6 @@ | |||
5 | * | 5 | * |
6 | * Sample code on how to use request_firmware() from drivers. | 6 | * Sample code on how to use request_firmware() from drivers. |
7 | * | 7 | * |
8 | * Note that register_firmware() is currently useless. | ||
9 | * | ||
10 | */ | 8 | */ |
11 | 9 | ||
12 | #include <linux/module.h> | 10 | #include <linux/module.h> |
@@ -17,11 +15,6 @@ | |||
17 | 15 | ||
18 | #include "linux/firmware.h" | 16 | #include "linux/firmware.h" |
19 | 17 | ||
20 | #define WE_CAN_NEED_FIRMWARE_BEFORE_USERSPACE_IS_AVAILABLE | ||
21 | #ifdef WE_CAN_NEED_FIRMWARE_BEFORE_USERSPACE_IS_AVAILABLE | ||
22 | char __init inkernel_firmware[] = "let's say that this is firmware\n"; | ||
23 | #endif | ||
24 | |||
25 | static struct device ghost_device = { | 18 | static struct device ghost_device = { |
26 | .bus_id = "ghost0", | 19 | .bus_id = "ghost0", |
27 | }; | 20 | }; |
@@ -104,10 +97,6 @@ static void sample_probe_async(void) | |||
104 | 97 | ||
105 | static int sample_init(void) | 98 | static int sample_init(void) |
106 | { | 99 | { |
107 | #ifdef WE_CAN_NEED_FIRMWARE_BEFORE_USERSPACE_IS_AVAILABLE | ||
108 | register_firmware("sample_driver_fw", inkernel_firmware, | ||
109 | sizeof(inkernel_firmware)); | ||
110 | #endif | ||
111 | device_initialize(&ghost_device); | 100 | device_initialize(&ghost_device); |
112 | /* since there is no real hardware insertion I just call the | 101 | /* since there is no real hardware insertion I just call the |
113 | * sample probe functions here */ | 102 | * sample probe functions here */ |
diff --git a/Documentation/memory-barriers.txt b/Documentation/memory-barriers.txt index 92f0056d928c..c61d8b876fdb 100644 --- a/Documentation/memory-barriers.txt +++ b/Documentation/memory-barriers.txt | |||
@@ -1031,7 +1031,7 @@ conflict on any particular lock. | |||
1031 | LOCKS VS MEMORY ACCESSES | 1031 | LOCKS VS MEMORY ACCESSES |
1032 | ------------------------ | 1032 | ------------------------ |
1033 | 1033 | ||
1034 | Consider the following: the system has a pair of spinlocks (N) and (Q), and | 1034 | Consider the following: the system has a pair of spinlocks (M) and (Q), and |
1035 | three CPUs; then should the following sequence of events occur: | 1035 | three CPUs; then should the following sequence of events occur: |
1036 | 1036 | ||
1037 | CPU 1 CPU 2 | 1037 | CPU 1 CPU 2 |
@@ -1678,7 +1678,7 @@ CPU's caches by some other cache event: | |||
1678 | smp_wmb(); | 1678 | smp_wmb(); |
1679 | <A:modify v=2> <C:busy> | 1679 | <A:modify v=2> <C:busy> |
1680 | <C:queue v=2> | 1680 | <C:queue v=2> |
1681 | p = &b; q = p; | 1681 | p = &v; q = p; |
1682 | <D:request p> | 1682 | <D:request p> |
1683 | <B:modify p=&v> <D:commit p=&v> | 1683 | <B:modify p=&v> <D:commit p=&v> |
1684 | <D:read p> | 1684 | <D:read p> |
diff --git a/Documentation/spi/pxa2xx b/Documentation/spi/pxa2xx new file mode 100644 index 000000000000..9c45f3df2e18 --- /dev/null +++ b/Documentation/spi/pxa2xx | |||
@@ -0,0 +1,234 @@ | |||
1 | PXA2xx SPI on SSP driver HOWTO | ||
2 | =================================================== | ||
3 | This a mini howto on the pxa2xx_spi driver. The driver turns a PXA2xx | ||
4 | synchronous serial port into a SPI master controller | ||
5 | (see Documentation/spi/spi_summary). The driver has the following features | ||
6 | |||
7 | - Support for any PXA2xx SSP | ||
8 | - SSP PIO and SSP DMA data transfers. | ||
9 | - External and Internal (SSPFRM) chip selects. | ||
10 | - Per slave device (chip) configuration. | ||
11 | - Full suspend, freeze, resume support. | ||
12 | |||
13 | The driver is built around a "spi_message" fifo serviced by workqueue and a | ||
14 | tasklet. The workqueue, "pump_messages", drives message fifo and the tasklet | ||
15 | (pump_transfer) is responsible for queuing SPI transactions and setting up and | ||
16 | launching the dma/interrupt driven transfers. | ||
17 | |||
18 | Declaring PXA2xx Master Controllers | ||
19 | ----------------------------------- | ||
20 | Typically a SPI master is defined in the arch/.../mach-*/board-*.c as a | ||
21 | "platform device". The master configuration is passed to the driver via a table | ||
22 | found in include/asm-arm/arch-pxa/pxa2xx_spi.h: | ||
23 | |||
24 | struct pxa2xx_spi_master { | ||
25 | enum pxa_ssp_type ssp_type; | ||
26 | u32 clock_enable; | ||
27 | u16 num_chipselect; | ||
28 | u8 enable_dma; | ||
29 | }; | ||
30 | |||
31 | The "pxa2xx_spi_master.ssp_type" field must have a value between 1 and 3 and | ||
32 | informs the driver which features a particular SSP supports. | ||
33 | |||
34 | The "pxa2xx_spi_master.clock_enable" field is used to enable/disable the | ||
35 | corresponding SSP peripheral block in the "Clock Enable Register (CKEN"). See | ||
36 | the "PXA2xx Developer Manual" section "Clocks and Power Management". | ||
37 | |||
38 | The "pxa2xx_spi_master.num_chipselect" field is used to determine the number of | ||
39 | slave device (chips) attached to this SPI master. | ||
40 | |||
41 | The "pxa2xx_spi_master.enable_dma" field informs the driver that SSP DMA should | ||
42 | be used. This caused the driver to acquire two DMA channels: rx_channel and | ||
43 | tx_channel. The rx_channel has a higher DMA service priority the tx_channel. | ||
44 | See the "PXA2xx Developer Manual" section "DMA Controller". | ||
45 | |||
46 | NSSP MASTER SAMPLE | ||
47 | ------------------ | ||
48 | Below is a sample configuration using the PXA255 NSSP. | ||
49 | |||
50 | static struct resource pxa_spi_nssp_resources[] = { | ||
51 | [0] = { | ||
52 | .start = __PREG(SSCR0_P(2)), /* Start address of NSSP */ | ||
53 | .end = __PREG(SSCR0_P(2)) + 0x2c, /* Range of registers */ | ||
54 | .flags = IORESOURCE_MEM, | ||
55 | }, | ||
56 | [1] = { | ||
57 | .start = IRQ_NSSP, /* NSSP IRQ */ | ||
58 | .end = IRQ_NSSP, | ||
59 | .flags = IORESOURCE_IRQ, | ||
60 | }, | ||
61 | }; | ||
62 | |||
63 | static struct pxa2xx_spi_master pxa_nssp_master_info = { | ||
64 | .ssp_type = PXA25x_NSSP, /* Type of SSP */ | ||
65 | .clock_enable = CKEN9_NSSP, /* NSSP Peripheral clock */ | ||
66 | .num_chipselect = 1, /* Matches the number of chips attached to NSSP */ | ||
67 | .enable_dma = 1, /* Enables NSSP DMA */ | ||
68 | }; | ||
69 | |||
70 | static struct platform_device pxa_spi_nssp = { | ||
71 | .name = "pxa2xx-spi", /* MUST BE THIS VALUE, so device match driver */ | ||
72 | .id = 2, /* Bus number, MUST MATCH SSP number 1..n */ | ||
73 | .resource = pxa_spi_nssp_resources, | ||
74 | .num_resources = ARRAY_SIZE(pxa_spi_nssp_resources), | ||
75 | .dev = { | ||
76 | .platform_data = &pxa_nssp_master_info, /* Passed to driver */ | ||
77 | }, | ||
78 | }; | ||
79 | |||
80 | static struct platform_device *devices[] __initdata = { | ||
81 | &pxa_spi_nssp, | ||
82 | }; | ||
83 | |||
84 | static void __init board_init(void) | ||
85 | { | ||
86 | (void)platform_add_device(devices, ARRAY_SIZE(devices)); | ||
87 | } | ||
88 | |||
89 | Declaring Slave Devices | ||
90 | ----------------------- | ||
91 | Typically each SPI slave (chip) is defined in the arch/.../mach-*/board-*.c | ||
92 | using the "spi_board_info" structure found in "linux/spi/spi.h". See | ||
93 | "Documentation/spi/spi_summary" for additional information. | ||
94 | |||
95 | Each slave device attached to the PXA must provide slave specific configuration | ||
96 | information via the structure "pxa2xx_spi_chip" found in | ||
97 | "include/asm-arm/arch-pxa/pxa2xx_spi.h". The pxa2xx_spi master controller driver | ||
98 | will uses the configuration whenever the driver communicates with the slave | ||
99 | device. | ||
100 | |||
101 | struct pxa2xx_spi_chip { | ||
102 | u8 tx_threshold; | ||
103 | u8 rx_threshold; | ||
104 | u8 dma_burst_size; | ||
105 | u32 timeout_microsecs; | ||
106 | u8 enable_loopback; | ||
107 | void (*cs_control)(u32 command); | ||
108 | }; | ||
109 | |||
110 | The "pxa2xx_spi_chip.tx_threshold" and "pxa2xx_spi_chip.rx_threshold" fields are | ||
111 | used to configure the SSP hardware fifo. These fields are critical to the | ||
112 | performance of pxa2xx_spi driver and misconfiguration will result in rx | ||
113 | fifo overruns (especially in PIO mode transfers). Good default values are | ||
114 | |||
115 | .tx_threshold = 12, | ||
116 | .rx_threshold = 4, | ||
117 | |||
118 | The "pxa2xx_spi_chip.dma_burst_size" field is used to configure PXA2xx DMA | ||
119 | engine and is related the "spi_device.bits_per_word" field. Read and understand | ||
120 | the PXA2xx "Developer Manual" sections on the DMA controller and SSP Controllers | ||
121 | to determine the correct value. An SSP configured for byte-wide transfers would | ||
122 | use a value of 8. | ||
123 | |||
124 | The "pxa2xx_spi_chip.timeout_microsecs" fields is used to efficiently handle | ||
125 | trailing bytes in the SSP receiver fifo. The correct value for this field is | ||
126 | dependent on the SPI bus speed ("spi_board_info.max_speed_hz") and the specific | ||
127 | slave device. Please note the the PXA2xx SSP 1 does not support trailing byte | ||
128 | timeouts and must busy-wait any trailing bytes. | ||
129 | |||
130 | The "pxa2xx_spi_chip.enable_loopback" field is used to place the SSP porting | ||
131 | into internal loopback mode. In this mode the SSP controller internally | ||
132 | connects the SSPTX pin the the SSPRX pin. This is useful for initial setup | ||
133 | testing. | ||
134 | |||
135 | The "pxa2xx_spi_chip.cs_control" field is used to point to a board specific | ||
136 | function for asserting/deasserting a slave device chip select. If the field is | ||
137 | NULL, the pxa2xx_spi master controller driver assumes that the SSP port is | ||
138 | configured to use SSPFRM instead. | ||
139 | |||
140 | NSSP SALVE SAMPLE | ||
141 | ----------------- | ||
142 | The pxa2xx_spi_chip structure is passed to the pxa2xx_spi driver in the | ||
143 | "spi_board_info.controller_data" field. Below is a sample configuration using | ||
144 | the PXA255 NSSP. | ||
145 | |||
146 | /* Chip Select control for the CS8415A SPI slave device */ | ||
147 | static void cs8415a_cs_control(u32 command) | ||
148 | { | ||
149 | if (command & PXA2XX_CS_ASSERT) | ||
150 | GPCR(2) = GPIO_bit(2); | ||
151 | else | ||
152 | GPSR(2) = GPIO_bit(2); | ||
153 | } | ||
154 | |||
155 | /* Chip Select control for the CS8405A SPI slave device */ | ||
156 | static void cs8405a_cs_control(u32 command) | ||
157 | { | ||
158 | if (command & PXA2XX_CS_ASSERT) | ||
159 | GPCR(3) = GPIO_bit(3); | ||
160 | else | ||
161 | GPSR(3) = GPIO_bit(3); | ||
162 | } | ||
163 | |||
164 | static struct pxa2xx_spi_chip cs8415a_chip_info = { | ||
165 | .tx_threshold = 12, /* SSP hardward FIFO threshold */ | ||
166 | .rx_threshold = 4, /* SSP hardward FIFO threshold */ | ||
167 | .dma_burst_size = 8, /* Byte wide transfers used so 8 byte bursts */ | ||
168 | .timeout_microsecs = 64, /* Wait at least 64usec to handle trailing */ | ||
169 | .cs_control = cs8415a_cs_control, /* Use external chip select */ | ||
170 | }; | ||
171 | |||
172 | static struct pxa2xx_spi_chip cs8405a_chip_info = { | ||
173 | .tx_threshold = 12, /* SSP hardward FIFO threshold */ | ||
174 | .rx_threshold = 4, /* SSP hardward FIFO threshold */ | ||
175 | .dma_burst_size = 8, /* Byte wide transfers used so 8 byte bursts */ | ||
176 | .timeout_microsecs = 64, /* Wait at least 64usec to handle trailing */ | ||
177 | .cs_control = cs8405a_cs_control, /* Use external chip select */ | ||
178 | }; | ||
179 | |||
180 | static struct spi_board_info streetracer_spi_board_info[] __initdata = { | ||
181 | { | ||
182 | .modalias = "cs8415a", /* Name of spi_driver for this device */ | ||
183 | .max_speed_hz = 3686400, /* Run SSP as fast a possbile */ | ||
184 | .bus_num = 2, /* Framework bus number */ | ||
185 | .chip_select = 0, /* Framework chip select */ | ||
186 | .platform_data = NULL; /* No spi_driver specific config */ | ||
187 | .controller_data = &cs8415a_chip_info, /* Master chip config */ | ||
188 | .irq = STREETRACER_APCI_IRQ, /* Slave device interrupt */ | ||
189 | }, | ||
190 | { | ||
191 | .modalias = "cs8405a", /* Name of spi_driver for this device */ | ||
192 | .max_speed_hz = 3686400, /* Run SSP as fast a possbile */ | ||
193 | .bus_num = 2, /* Framework bus number */ | ||
194 | .chip_select = 1, /* Framework chip select */ | ||
195 | .controller_data = &cs8405a_chip_info, /* Master chip config */ | ||
196 | .irq = STREETRACER_APCI_IRQ, /* Slave device interrupt */ | ||
197 | }, | ||
198 | }; | ||
199 | |||
200 | static void __init streetracer_init(void) | ||
201 | { | ||
202 | spi_register_board_info(streetracer_spi_board_info, | ||
203 | ARRAY_SIZE(streetracer_spi_board_info)); | ||
204 | } | ||
205 | |||
206 | |||
207 | DMA and PIO I/O Support | ||
208 | ----------------------- | ||
209 | The pxa2xx_spi driver support both DMA and interrupt driven PIO message | ||
210 | transfers. The driver defaults to PIO mode and DMA transfers must enabled by | ||
211 | setting the "enable_dma" flag in the "pxa2xx_spi_master" structure and and | ||
212 | ensuring that the "pxa2xx_spi_chip.dma_burst_size" field is non-zero. The DMA | ||
213 | mode support both coherent and stream based DMA mappings. | ||
214 | |||
215 | The following logic is used to determine the type of I/O to be used on | ||
216 | a per "spi_transfer" basis: | ||
217 | |||
218 | if !enable_dma or dma_burst_size == 0 then | ||
219 | always use PIO transfers | ||
220 | |||
221 | if spi_message.is_dma_mapped and rx_dma_buf != 0 and tx_dma_buf != 0 then | ||
222 | use coherent DMA mode | ||
223 | |||
224 | if rx_buf and tx_buf are aligned on 8 byte boundary then | ||
225 | use streaming DMA mode | ||
226 | |||
227 | otherwise | ||
228 | use PIO transfer | ||
229 | |||
230 | THANKS TO | ||
231 | --------- | ||
232 | |||
233 | David Brownell and others for mentoring the development of this driver. | ||
234 | |||
diff --git a/Documentation/spi/spi-summary b/Documentation/spi/spi-summary index a5ffba33a351..068732d32276 100644 --- a/Documentation/spi/spi-summary +++ b/Documentation/spi/spi-summary | |||
@@ -414,7 +414,33 @@ to get the driver-private data allocated for that device. | |||
414 | The driver will initialize the fields of that spi_master, including the | 414 | The driver will initialize the fields of that spi_master, including the |
415 | bus number (maybe the same as the platform device ID) and three methods | 415 | bus number (maybe the same as the platform device ID) and three methods |
416 | used to interact with the SPI core and SPI protocol drivers. It will | 416 | used to interact with the SPI core and SPI protocol drivers. It will |
417 | also initialize its own internal state. | 417 | also initialize its own internal state. (See below about bus numbering |
418 | and those methods.) | ||
419 | |||
420 | After you initialize the spi_master, then use spi_register_master() to | ||
421 | publish it to the rest of the system. At that time, device nodes for | ||
422 | the controller and any predeclared spi devices will be made available, | ||
423 | and the driver model core will take care of binding them to drivers. | ||
424 | |||
425 | If you need to remove your SPI controller driver, spi_unregister_master() | ||
426 | will reverse the effect of spi_register_master(). | ||
427 | |||
428 | |||
429 | BUS NUMBERING | ||
430 | |||
431 | Bus numbering is important, since that's how Linux identifies a given | ||
432 | SPI bus (shared SCK, MOSI, MISO). Valid bus numbers start at zero. On | ||
433 | SOC systems, the bus numbers should match the numbers defined by the chip | ||
434 | manufacturer. For example, hardware controller SPI2 would be bus number 2, | ||
435 | and spi_board_info for devices connected to it would use that number. | ||
436 | |||
437 | If you don't have such hardware-assigned bus number, and for some reason | ||
438 | you can't just assign them, then provide a negative bus number. That will | ||
439 | then be replaced by a dynamically assigned number. You'd then need to treat | ||
440 | this as a non-static configuration (see above). | ||
441 | |||
442 | |||
443 | SPI MASTER METHODS | ||
418 | 444 | ||
419 | master->setup(struct spi_device *spi) | 445 | master->setup(struct spi_device *spi) |
420 | This sets up the device clock rate, SPI mode, and word sizes. | 446 | This sets up the device clock rate, SPI mode, and word sizes. |
@@ -431,6 +457,9 @@ also initialize its own internal state. | |||
431 | state it dynamically associates with that device. If you do that, | 457 | state it dynamically associates with that device. If you do that, |
432 | be sure to provide the cleanup() method to free that state. | 458 | be sure to provide the cleanup() method to free that state. |
433 | 459 | ||
460 | |||
461 | SPI MESSAGE QUEUE | ||
462 | |||
434 | The bulk of the driver will be managing the I/O queue fed by transfer(). | 463 | The bulk of the driver will be managing the I/O queue fed by transfer(). |
435 | 464 | ||
436 | That queue could be purely conceptual. For example, a driver used only | 465 | That queue could be purely conceptual. For example, a driver used only |
@@ -440,6 +469,9 @@ But the queue will probably be very real, using message->queue, PIO, | |||
440 | often DMA (especially if the root filesystem is in SPI flash), and | 469 | often DMA (especially if the root filesystem is in SPI flash), and |
441 | execution contexts like IRQ handlers, tasklets, or workqueues (such | 470 | execution contexts like IRQ handlers, tasklets, or workqueues (such |
442 | as keventd). Your driver can be as fancy, or as simple, as you need. | 471 | as keventd). Your driver can be as fancy, or as simple, as you need. |
472 | Such a transfer() method would normally just add the message to a | ||
473 | queue, and then start some asynchronous transfer engine (unless it's | ||
474 | already running). | ||
443 | 475 | ||
444 | 476 | ||
445 | THANKS TO | 477 | THANKS TO |
diff --git a/Documentation/watchdog/watchdog-api.txt b/Documentation/watchdog/watchdog-api.txt index c5beb548cfc4..21ed51173662 100644 --- a/Documentation/watchdog/watchdog-api.txt +++ b/Documentation/watchdog/watchdog-api.txt | |||
@@ -36,6 +36,9 @@ timeout or margin. The simplest way to ping the watchdog is to write | |||
36 | some data to the device. So a very simple watchdog daemon would look | 36 | some data to the device. So a very simple watchdog daemon would look |
37 | like this: | 37 | like this: |
38 | 38 | ||
39 | #include <stdlib.h> | ||
40 | #include <fcntl.h> | ||
41 | |||
39 | int main(int argc, const char *argv[]) { | 42 | int main(int argc, const char *argv[]) { |
40 | int fd=open("/dev/watchdog",O_WRONLY); | 43 | int fd=open("/dev/watchdog",O_WRONLY); |
41 | if (fd==-1) { | 44 | if (fd==-1) { |
diff --git a/MAINTAINERS b/MAINTAINERS index 5e3355871416..bd10b2af2223 100644 --- a/MAINTAINERS +++ b/MAINTAINERS | |||
@@ -40,11 +40,20 @@ trivial patch so apply some common sense. | |||
40 | PLEASE document known bugs. If it doesn't work for everything | 40 | PLEASE document known bugs. If it doesn't work for everything |
41 | or does something very odd once a month document it. | 41 | or does something very odd once a month document it. |
42 | 42 | ||
43 | PLEASE remember that submissions must be made under the terms | ||
44 | of the OSDL certificate of contribution | ||
45 | (http://www.osdl.org/newsroom/press_releases/2004/2004_05_24_dco.html) | ||
46 | and should include a Signed-off-by: line. | ||
47 | |||
43 | 6. Make sure you have the right to send any changes you make. If you | 48 | 6. Make sure you have the right to send any changes you make. If you |
44 | do changes at work you may find your employer owns the patch | 49 | do changes at work you may find your employer owns the patch |
45 | not you. | 50 | not you. |
46 | 51 | ||
47 | 7. Happy hacking. | 52 | 7. When sending security related changes or reports to a maintainer |
53 | please Cc: security@kernel.org, especially if the maintainer | ||
54 | does not respond. | ||
55 | |||
56 | 8. Happy hacking. | ||
48 | 57 | ||
49 | ----------------------------------- | 58 | ----------------------------------- |
50 | 59 | ||
@@ -969,7 +978,7 @@ S: Maintained | |||
969 | EXT3 FILE SYSTEM | 978 | EXT3 FILE SYSTEM |
970 | P: Stephen Tweedie, Andrew Morton | 979 | P: Stephen Tweedie, Andrew Morton |
971 | M: sct@redhat.com, akpm@osdl.org, adilger@clusterfs.com | 980 | M: sct@redhat.com, akpm@osdl.org, adilger@clusterfs.com |
972 | L: ext3-users@redhat.com | 981 | L: ext2-devel@lists.sourceforge.net |
973 | S: Maintained | 982 | S: Maintained |
974 | 983 | ||
975 | F71805F HARDWARE MONITORING DRIVER | 984 | F71805F HARDWARE MONITORING DRIVER |
@@ -1530,12 +1539,28 @@ W: http://jfs.sourceforge.net/ | |||
1530 | T: git kernel.org:/pub/scm/linux/kernel/git/shaggy/jfs-2.6.git | 1539 | T: git kernel.org:/pub/scm/linux/kernel/git/shaggy/jfs-2.6.git |
1531 | S: Supported | 1540 | S: Supported |
1532 | 1541 | ||
1542 | JOURNALLING LAYER FOR BLOCK DEVICS (JBD) | ||
1543 | P: Stephen Tweedie, Andrew Morton | ||
1544 | M: sct@redhat.com, akpm@osdl.org | ||
1545 | L: ext2-devel@lists.sourceforge.net | ||
1546 | S: Maintained | ||
1547 | |||
1533 | KCONFIG | 1548 | KCONFIG |
1534 | P: Roman Zippel | 1549 | P: Roman Zippel |
1535 | M: zippel@linux-m68k.org | 1550 | M: zippel@linux-m68k.org |
1536 | L: kbuild-devel@lists.sourceforge.net | 1551 | L: kbuild-devel@lists.sourceforge.net |
1537 | S: Maintained | 1552 | S: Maintained |
1538 | 1553 | ||
1554 | KDUMP | ||
1555 | P: Vivek Goyal | ||
1556 | M: vgoyal@in.ibm.com | ||
1557 | P: Haren Myneni | ||
1558 | M: hbabu@us.ibm.com | ||
1559 | L: fastboot@lists.osdl.org | ||
1560 | L: linux-kernel@vger.kernel.org | ||
1561 | W: http://lse.sourceforge.net/kdump/ | ||
1562 | S: Maintained | ||
1563 | |||
1539 | KERNEL AUTOMOUNTER (AUTOFS) | 1564 | KERNEL AUTOMOUNTER (AUTOFS) |
1540 | P: H. Peter Anvin | 1565 | P: H. Peter Anvin |
1541 | M: hpa@zytor.com | 1566 | M: hpa@zytor.com |
@@ -1603,6 +1628,11 @@ M: James.Bottomley@HansenPartnership.com | |||
1603 | L: linux-scsi@vger.kernel.org | 1628 | L: linux-scsi@vger.kernel.org |
1604 | S: Maintained | 1629 | S: Maintained |
1605 | 1630 | ||
1631 | LED SUBSYSTEM | ||
1632 | P: Richard Purdie | ||
1633 | M: rpurdie@rpsys.net | ||
1634 | S: Maintained | ||
1635 | |||
1606 | LEGO USB Tower driver | 1636 | LEGO USB Tower driver |
1607 | P: Juergen Stuber | 1637 | P: Juergen Stuber |
1608 | M: starblue@users.sourceforge.net | 1638 | M: starblue@users.sourceforge.net |
@@ -1662,7 +1692,7 @@ S: Maintained | |||
1662 | 1692 | ||
1663 | LINUX FOR POWERPC EMBEDDED PPC8XX | 1693 | LINUX FOR POWERPC EMBEDDED PPC8XX |
1664 | P: Marcelo Tosatti | 1694 | P: Marcelo Tosatti |
1665 | M: marcelo.tosatti@cyclades.com | 1695 | M: marcelo@kvack.org |
1666 | W: http://www.penguinppc.org/ | 1696 | W: http://www.penguinppc.org/ |
1667 | L: linuxppc-embedded@ozlabs.org | 1697 | L: linuxppc-embedded@ozlabs.org |
1668 | S: Maintained | 1698 | S: Maintained |
@@ -2513,6 +2543,12 @@ M: perex@suse.cz | |||
2513 | L: alsa-devel@alsa-project.org | 2543 | L: alsa-devel@alsa-project.org |
2514 | S: Maintained | 2544 | S: Maintained |
2515 | 2545 | ||
2546 | SPI SUBSYSTEM | ||
2547 | P: David Brownell | ||
2548 | M: dbrownell@users.sourceforge.net | ||
2549 | L: spi-devel-general@lists.sourceforge.net | ||
2550 | S: Maintained | ||
2551 | |||
2516 | TPM DEVICE DRIVER | 2552 | TPM DEVICE DRIVER |
2517 | P: Kylene Hall | 2553 | P: Kylene Hall |
2518 | M: kjhall@us.ibm.com | 2554 | M: kjhall@us.ibm.com |
diff --git a/arch/arm/kernel/asm-offsets.c b/arch/arm/kernel/asm-offsets.c index 45fdf4a51a2a..396efba9bacd 100644 --- a/arch/arm/kernel/asm-offsets.c +++ b/arch/arm/kernel/asm-offsets.c | |||
@@ -99,6 +99,8 @@ int main(void) | |||
99 | DEFINE(MACHINFO_NAME, offsetof(struct machine_desc, name)); | 99 | DEFINE(MACHINFO_NAME, offsetof(struct machine_desc, name)); |
100 | DEFINE(MACHINFO_PHYSIO, offsetof(struct machine_desc, phys_io)); | 100 | DEFINE(MACHINFO_PHYSIO, offsetof(struct machine_desc, phys_io)); |
101 | DEFINE(MACHINFO_PGOFFIO, offsetof(struct machine_desc, io_pg_offst)); | 101 | DEFINE(MACHINFO_PGOFFIO, offsetof(struct machine_desc, io_pg_offst)); |
102 | BLANK(); | ||
103 | DEFINE(PROC_INFO_SZ, sizeof(struct proc_info_list)); | ||
102 | DEFINE(PROCINFO_INITFUNC, offsetof(struct proc_info_list, __cpu_flush)); | 104 | DEFINE(PROCINFO_INITFUNC, offsetof(struct proc_info_list, __cpu_flush)); |
103 | DEFINE(PROCINFO_MMUFLAGS, offsetof(struct proc_info_list, __cpu_mmu_flags)); | 105 | DEFINE(PROCINFO_MMUFLAGS, offsetof(struct proc_info_list, __cpu_mmu_flags)); |
104 | return 0; | 106 | return 0; |
diff --git a/arch/arm/kernel/dma-isa.c b/arch/arm/kernel/dma-isa.c index 03532769a97f..0a3e9ad297d8 100644 --- a/arch/arm/kernel/dma-isa.c +++ b/arch/arm/kernel/dma-isa.c | |||
@@ -143,12 +143,23 @@ static struct dma_ops isa_dma_ops = { | |||
143 | .residue = isa_get_dma_residue, | 143 | .residue = isa_get_dma_residue, |
144 | }; | 144 | }; |
145 | 145 | ||
146 | static struct resource dma_resources[] = { | 146 | static struct resource dma_resources[] = { { |
147 | { "dma1", 0x0000, 0x000f }, | 147 | .name = "dma1", |
148 | { "dma low page", 0x0080, 0x008f }, | 148 | .start = 0x0000, |
149 | { "dma2", 0x00c0, 0x00df }, | 149 | .end = 0x000f |
150 | { "dma high page", 0x0480, 0x048f } | 150 | }, { |
151 | }; | 151 | .name = "dma low page", |
152 | .start = 0x0080, | ||
153 | .end = 0x008f | ||
154 | }, { | ||
155 | .name = "dma2", | ||
156 | .start = 0x00c0, | ||
157 | .end = 0x00df | ||
158 | }, { | ||
159 | .name = "dma high page", | ||
160 | .start = 0x0480, | ||
161 | .end = 0x048f | ||
162 | } }; | ||
152 | 163 | ||
153 | void __init isa_init_dma(dma_t *dma) | 164 | void __init isa_init_dma(dma_t *dma) |
154 | { | 165 | { |
diff --git a/arch/arm/kernel/process.c b/arch/arm/kernel/process.c index 1a1539e3a946..7df6e1aaa323 100644 --- a/arch/arm/kernel/process.c +++ b/arch/arm/kernel/process.c | |||
@@ -311,7 +311,7 @@ void free_thread_info(struct thread_info *thread) | |||
311 | struct thread_info_list *th = &get_cpu_var(thread_info_list); | 311 | struct thread_info_list *th = &get_cpu_var(thread_info_list); |
312 | if (th->nr < EXTRA_TASK_STRUCT) { | 312 | if (th->nr < EXTRA_TASK_STRUCT) { |
313 | unsigned long *p = (unsigned long *)thread; | 313 | unsigned long *p = (unsigned long *)thread; |
314 | p[0] = th->head; | 314 | p[0] = (unsigned long)th->head; |
315 | th->head = p; | 315 | th->head = p; |
316 | th->nr += 1; | 316 | th->nr += 1; |
317 | put_cpu_var(thread_info_list); | 317 | put_cpu_var(thread_info_list); |
diff --git a/arch/arm/lib/backtrace.S b/arch/arm/lib/backtrace.S index 3bdc8c6949c5..16153c86c3f8 100644 --- a/arch/arm/lib/backtrace.S +++ b/arch/arm/lib/backtrace.S | |||
@@ -122,7 +122,7 @@ ENTRY(c_backtrace) | |||
122 | #define reg r5 | 122 | #define reg r5 |
123 | #define stack r6 | 123 | #define stack r6 |
124 | 124 | ||
125 | .Ldumpstm: stmfd sp!, {instr, reg, stack, r7, lr} | 125 | .Ldumpstm: stmfd sp!, {instr, reg, stack, r7, r8, lr} |
126 | mov stack, r0 | 126 | mov stack, r0 |
127 | mov instr, r1 | 127 | mov instr, r1 |
128 | mov reg, #9 | 128 | mov reg, #9 |
@@ -145,7 +145,7 @@ ENTRY(c_backtrace) | |||
145 | adrne r0, .Lcr | 145 | adrne r0, .Lcr |
146 | blne printk | 146 | blne printk |
147 | mov r0, stack | 147 | mov r0, stack |
148 | LOADREGS(fd, sp!, {instr, reg, stack, r7, pc}) | 148 | LOADREGS(fd, sp!, {instr, reg, stack, r7, r8, pc}) |
149 | 149 | ||
150 | .Lfp: .asciz " r%d = %08X%c" | 150 | .Lfp: .asciz " r%d = %08X%c" |
151 | .Lcr: .asciz "\n" | 151 | .Lcr: .asciz "\n" |
diff --git a/arch/arm/lib/div64.S b/arch/arm/lib/div64.S index ec9a1cd6176f..58eef6607629 100644 --- a/arch/arm/lib/div64.S +++ b/arch/arm/lib/div64.S | |||
@@ -189,12 +189,12 @@ ENTRY(__do_div64) | |||
189 | moveq pc, lr | 189 | moveq pc, lr |
190 | 190 | ||
191 | @ Division by 0: | 191 | @ Division by 0: |
192 | str lr, [sp, #-4]! | 192 | str lr, [sp, #-8]! |
193 | bl __div0 | 193 | bl __div0 |
194 | 194 | ||
195 | @ as wrong as it could be... | 195 | @ as wrong as it could be... |
196 | mov yl, #0 | 196 | mov yl, #0 |
197 | mov yh, #0 | 197 | mov yh, #0 |
198 | mov xh, #0 | 198 | mov xh, #0 |
199 | ldr pc, [sp], #4 | 199 | ldr pc, [sp], #8 |
200 | 200 | ||
diff --git a/arch/arm/mach-pxa/mainstone.c b/arch/arm/mach-pxa/mainstone.c index 98356f810007..02e188d98e7d 100644 --- a/arch/arm/mach-pxa/mainstone.c +++ b/arch/arm/mach-pxa/mainstone.c | |||
@@ -95,7 +95,10 @@ static void __init mainstone_init_irq(void) | |||
95 | for(irq = MAINSTONE_IRQ(0); irq <= MAINSTONE_IRQ(15); irq++) { | 95 | for(irq = MAINSTONE_IRQ(0); irq <= MAINSTONE_IRQ(15); irq++) { |
96 | set_irq_chip(irq, &mainstone_irq_chip); | 96 | set_irq_chip(irq, &mainstone_irq_chip); |
97 | set_irq_handler(irq, do_level_IRQ); | 97 | set_irq_handler(irq, do_level_IRQ); |
98 | set_irq_flags(irq, IRQF_VALID | IRQF_PROBE); | 98 | if (irq == MAINSTONE_IRQ(10) || irq == MAINSTONE_IRQ(14)) |
99 | set_irq_flags(irq, IRQF_VALID | IRQF_PROBE | IRQF_NOAUTOEN); | ||
100 | else | ||
101 | set_irq_flags(irq, IRQF_VALID | IRQF_PROBE); | ||
99 | } | 102 | } |
100 | set_irq_flags(MAINSTONE_IRQ(8), 0); | 103 | set_irq_flags(MAINSTONE_IRQ(8), 0); |
101 | set_irq_flags(MAINSTONE_IRQ(12), 0); | 104 | set_irq_flags(MAINSTONE_IRQ(12), 0); |
diff --git a/arch/arm/mach-realview/realview_eb.c b/arch/arm/mach-realview/realview_eb.c index d4a586e38d5b..693fb1e396e0 100644 --- a/arch/arm/mach-realview/realview_eb.c +++ b/arch/arm/mach-realview/realview_eb.c | |||
@@ -137,8 +137,11 @@ static struct amba_device *amba_devs[] __initdata = { | |||
137 | static void __init gic_init_irq(void) | 137 | static void __init gic_init_irq(void) |
138 | { | 138 | { |
139 | #ifdef CONFIG_REALVIEW_MPCORE | 139 | #ifdef CONFIG_REALVIEW_MPCORE |
140 | unsigned int pldctrl; | ||
140 | writel(0x0000a05f, __io_address(REALVIEW_SYS_LOCK)); | 141 | writel(0x0000a05f, __io_address(REALVIEW_SYS_LOCK)); |
141 | writel(0x008003c0, __io_address(REALVIEW_SYS_BASE) + 0xd8); | 142 | pldctrl = readl(__io_address(REALVIEW_SYS_BASE) + 0xd8); |
143 | pldctrl |= 0x00800000; /* New irq mode */ | ||
144 | writel(pldctrl, __io_address(REALVIEW_SYS_BASE) + 0xd8); | ||
142 | writel(0x00000000, __io_address(REALVIEW_SYS_LOCK)); | 145 | writel(0x00000000, __io_address(REALVIEW_SYS_LOCK)); |
143 | #endif | 146 | #endif |
144 | gic_dist_init(__io_address(REALVIEW_GIC_DIST_BASE)); | 147 | gic_dist_init(__io_address(REALVIEW_GIC_DIST_BASE)); |
diff --git a/arch/arm/mach-s3c2410/sleep.S b/arch/arm/mach-s3c2410/sleep.S index 832fb86a03b4..73de2eaca22a 100644 --- a/arch/arm/mach-s3c2410/sleep.S +++ b/arch/arm/mach-s3c2410/sleep.S | |||
@@ -59,8 +59,7 @@ ENTRY(s3c2410_cpu_suspend) | |||
59 | mrc p15, 0, r5, c13, c0, 0 @ PID | 59 | mrc p15, 0, r5, c13, c0, 0 @ PID |
60 | mrc p15, 0, r6, c3, c0, 0 @ Domain ID | 60 | mrc p15, 0, r6, c3, c0, 0 @ Domain ID |
61 | mrc p15, 0, r7, c2, c0, 0 @ translation table base address | 61 | mrc p15, 0, r7, c2, c0, 0 @ translation table base address |
62 | mrc p15, 0, r8, c2, c0, 0 @ auxiliary control register | 62 | mrc p15, 0, r8, c1, c0, 0 @ control register |
63 | mrc p15, 0, r9, c1, c0, 0 @ control register | ||
64 | 63 | ||
65 | stmia r0, { r4 - r13 } | 64 | stmia r0, { r4 - r13 } |
66 | 65 | ||
@@ -165,7 +164,6 @@ ENTRY(s3c2410_cpu_resume) | |||
165 | mcr p15, 0, r5, c13, c0, 0 @ PID | 164 | mcr p15, 0, r5, c13, c0, 0 @ PID |
166 | mcr p15, 0, r6, c3, c0, 0 @ Domain ID | 165 | mcr p15, 0, r6, c3, c0, 0 @ Domain ID |
167 | mcr p15, 0, r7, c2, c0, 0 @ translation table base | 166 | mcr p15, 0, r7, c2, c0, 0 @ translation table base |
168 | mcr p15, 0, r8, c1, c1, 0 @ auxilliary control | ||
169 | 167 | ||
170 | #ifdef CONFIG_DEBUG_RESUME | 168 | #ifdef CONFIG_DEBUG_RESUME |
171 | mov r3, #'R' | 169 | mov r3, #'R' |
@@ -173,7 +171,7 @@ ENTRY(s3c2410_cpu_resume) | |||
173 | #endif | 171 | #endif |
174 | 172 | ||
175 | ldr r2, =resume_with_mmu | 173 | ldr r2, =resume_with_mmu |
176 | mcr p15, 0, r9, c1, c0, 0 @ turn on MMU, etc | 174 | mcr p15, 0, r8, c1, c0, 0 @ turn on MMU, etc |
177 | nop @ second-to-last before mmu | 175 | nop @ second-to-last before mmu |
178 | mov pc, r2 @ go back to virtual address | 176 | mov pc, r2 @ go back to virtual address |
179 | 177 | ||
diff --git a/arch/arm/mm/ioremap.c b/arch/arm/mm/ioremap.c index 25e0ca3e598c..c1f7180c7bed 100644 --- a/arch/arm/mm/ioremap.c +++ b/arch/arm/mm/ioremap.c | |||
@@ -141,7 +141,7 @@ __ioremap_pfn(unsigned long pfn, unsigned long offset, size_t size, | |||
141 | return NULL; | 141 | return NULL; |
142 | addr = (unsigned long)area->addr; | 142 | addr = (unsigned long)area->addr; |
143 | if (remap_area_pages(addr, pfn, size, flags)) { | 143 | if (remap_area_pages(addr, pfn, size, flags)) { |
144 | vfree((void *)addr); | 144 | vunmap((void *)addr); |
145 | return NULL; | 145 | return NULL; |
146 | } | 146 | } |
147 | return (void __iomem *) (offset + (char *)addr); | 147 | return (void __iomem *) (offset + (char *)addr); |
@@ -173,7 +173,7 @@ EXPORT_SYMBOL(__ioremap); | |||
173 | 173 | ||
174 | void __iounmap(void __iomem *addr) | 174 | void __iounmap(void __iomem *addr) |
175 | { | 175 | { |
176 | vfree((void *) (PAGE_MASK & (unsigned long) addr)); | 176 | vunmap((void *)(PAGE_MASK & (unsigned long)addr)); |
177 | } | 177 | } |
178 | EXPORT_SYMBOL(__iounmap); | 178 | EXPORT_SYMBOL(__iounmap); |
179 | 179 | ||
diff --git a/arch/i386/Kconfig b/arch/i386/Kconfig index c6fe99e57a05..8dfa3054f10f 100644 --- a/arch/i386/Kconfig +++ b/arch/i386/Kconfig | |||
@@ -758,10 +758,10 @@ config HOTPLUG_CPU | |||
758 | bool "Support for hot-pluggable CPUs (EXPERIMENTAL)" | 758 | bool "Support for hot-pluggable CPUs (EXPERIMENTAL)" |
759 | depends on SMP && HOTPLUG && EXPERIMENTAL && !X86_VOYAGER | 759 | depends on SMP && HOTPLUG && EXPERIMENTAL && !X86_VOYAGER |
760 | ---help--- | 760 | ---help--- |
761 | Say Y here to experiment with turning CPUs off and on. CPUs | 761 | Say Y here to experiment with turning CPUs off and on, and to |
762 | can be controlled through /sys/devices/system/cpu. | 762 | enable suspend on SMP systems. CPUs can be controlled through |
763 | /sys/devices/system/cpu. | ||
763 | 764 | ||
764 | Say N. | ||
765 | 765 | ||
766 | endmenu | 766 | endmenu |
767 | 767 | ||
diff --git a/arch/i386/kernel/acpi/boot.c b/arch/i386/kernel/acpi/boot.c index 40e5aba3ad3d..daee69579b1c 100644 --- a/arch/i386/kernel/acpi/boot.c +++ b/arch/i386/kernel/acpi/boot.c | |||
@@ -1066,6 +1066,14 @@ static struct dmi_system_id __initdata acpi_dmi_table[] = { | |||
1066 | DMI_MATCH(DMI_PRODUCT_NAME, "TravelMate 360"), | 1066 | DMI_MATCH(DMI_PRODUCT_NAME, "TravelMate 360"), |
1067 | }, | 1067 | }, |
1068 | }, | 1068 | }, |
1069 | { | ||
1070 | .callback = disable_acpi_pci, | ||
1071 | .ident = "HP xw9300", | ||
1072 | .matches = { | ||
1073 | DMI_MATCH(DMI_SYS_VENDOR, "Hewlett-Packard"), | ||
1074 | DMI_MATCH(DMI_PRODUCT_NAME, "HP xw9300 Workstation"), | ||
1075 | }, | ||
1076 | }, | ||
1069 | {} | 1077 | {} |
1070 | }; | 1078 | }; |
1071 | 1079 | ||
diff --git a/arch/i386/kernel/apic.c b/arch/i386/kernel/apic.c index 013b85df18c6..3d4b2f3d116a 100644 --- a/arch/i386/kernel/apic.c +++ b/arch/i386/kernel/apic.c | |||
@@ -1341,6 +1341,14 @@ int __init APIC_init_uniprocessor (void) | |||
1341 | 1341 | ||
1342 | connect_bsp_APIC(); | 1342 | connect_bsp_APIC(); |
1343 | 1343 | ||
1344 | /* | ||
1345 | * Hack: In case of kdump, after a crash, kernel might be booting | ||
1346 | * on a cpu with non-zero lapic id. But boot_cpu_physical_apicid | ||
1347 | * might be zero if read from MP tables. Get it from LAPIC. | ||
1348 | */ | ||
1349 | #ifdef CONFIG_CRASH_DUMP | ||
1350 | boot_cpu_physical_apicid = GET_APIC_ID(apic_read(APIC_ID)); | ||
1351 | #endif | ||
1344 | phys_cpu_present_map = physid_mask_of_physid(boot_cpu_physical_apicid); | 1352 | phys_cpu_present_map = physid_mask_of_physid(boot_cpu_physical_apicid); |
1345 | 1353 | ||
1346 | setup_local_APIC(); | 1354 | setup_local_APIC(); |
diff --git a/arch/i386/kernel/setup.c b/arch/i386/kernel/setup.c index d77e89ac0d54..846e1639ef7c 100644 --- a/arch/i386/kernel/setup.c +++ b/arch/i386/kernel/setup.c | |||
@@ -1320,6 +1320,8 @@ legacy_init_iomem_resources(struct resource *code_resource, struct resource *dat | |||
1320 | probe_roms(); | 1320 | probe_roms(); |
1321 | for (i = 0; i < e820.nr_map; i++) { | 1321 | for (i = 0; i < e820.nr_map; i++) { |
1322 | struct resource *res; | 1322 | struct resource *res; |
1323 | if (e820.map[i].addr + e820.map[i].size > 0x100000000ULL) | ||
1324 | continue; | ||
1323 | res = kzalloc(sizeof(struct resource), GFP_ATOMIC); | 1325 | res = kzalloc(sizeof(struct resource), GFP_ATOMIC); |
1324 | switch (e820.map[i].type) { | 1326 | switch (e820.map[i].type) { |
1325 | case E820_RAM: res->name = "System RAM"; break; | 1327 | case E820_RAM: res->name = "System RAM"; break; |
diff --git a/arch/i386/kernel/traps.c b/arch/i386/kernel/traps.c index 2d22f5761b1d..0e498369f35e 100644 --- a/arch/i386/kernel/traps.c +++ b/arch/i386/kernel/traps.c | |||
@@ -130,9 +130,8 @@ static inline int print_addr_and_symbol(unsigned long addr, char *log_lvl, | |||
130 | print_symbol("%s", addr); | 130 | print_symbol("%s", addr); |
131 | 131 | ||
132 | printed = (printed + 1) % CONFIG_STACK_BACKTRACE_COLS; | 132 | printed = (printed + 1) % CONFIG_STACK_BACKTRACE_COLS; |
133 | |||
134 | if (printed) | 133 | if (printed) |
135 | printk(" "); | 134 | printk(" "); |
136 | else | 135 | else |
137 | printk("\n"); | 136 | printk("\n"); |
138 | 137 | ||
@@ -212,7 +211,6 @@ static void show_stack_log_lvl(struct task_struct *task, unsigned long *esp, | |||
212 | } | 211 | } |
213 | 212 | ||
214 | stack = esp; | 213 | stack = esp; |
215 | printk(log_lvl); | ||
216 | for(i = 0; i < kstack_depth_to_print; i++) { | 214 | for(i = 0; i < kstack_depth_to_print; i++) { |
217 | if (kstack_end(stack)) | 215 | if (kstack_end(stack)) |
218 | break; | 216 | break; |
diff --git a/arch/i386/mm/init.c b/arch/i386/mm/init.c index ae6534ad8161..3df1371d4520 100644 --- a/arch/i386/mm/init.c +++ b/arch/i386/mm/init.c | |||
@@ -651,7 +651,7 @@ void __init mem_init(void) | |||
651 | * Specifically, in the case of x86, we will always add | 651 | * Specifically, in the case of x86, we will always add |
652 | * memory to the highmem for now. | 652 | * memory to the highmem for now. |
653 | */ | 653 | */ |
654 | #ifdef CONFIG_HOTPLUG_MEMORY | 654 | #ifdef CONFIG_MEMORY_HOTPLUG |
655 | #ifndef CONFIG_NEED_MULTIPLE_NODES | 655 | #ifndef CONFIG_NEED_MULTIPLE_NODES |
656 | int add_memory(u64 start, u64 size) | 656 | int add_memory(u64 start, u64 size) |
657 | { | 657 | { |
diff --git a/arch/i386/oprofile/nmi_int.c b/arch/i386/oprofile/nmi_int.c index 1a2076ce6f6a..ec0fd3cfa774 100644 --- a/arch/i386/oprofile/nmi_int.c +++ b/arch/i386/oprofile/nmi_int.c | |||
@@ -332,10 +332,11 @@ static int __init ppro_init(char ** cpu_type) | |||
332 | { | 332 | { |
333 | __u8 cpu_model = boot_cpu_data.x86_model; | 333 | __u8 cpu_model = boot_cpu_data.x86_model; |
334 | 334 | ||
335 | if (cpu_model > 0xd) | 335 | if (cpu_model == 14) |
336 | *cpu_type = "i386/core"; | ||
337 | else if (cpu_model > 0xd) | ||
336 | return 0; | 338 | return 0; |
337 | 339 | else if (cpu_model == 9) { | |
338 | if (cpu_model == 9) { | ||
339 | *cpu_type = "i386/p6_mobile"; | 340 | *cpu_type = "i386/p6_mobile"; |
340 | } else if (cpu_model > 5) { | 341 | } else if (cpu_model > 5) { |
341 | *cpu_type = "i386/piii"; | 342 | *cpu_type = "i386/piii"; |
diff --git a/arch/ia64/configs/sn2_defconfig b/arch/ia64/configs/sn2_defconfig index f6a8853cd1b4..9ea35398e10d 100644 --- a/arch/ia64/configs/sn2_defconfig +++ b/arch/ia64/configs/sn2_defconfig | |||
@@ -134,7 +134,7 @@ CONFIG_ARCH_FLATMEM_ENABLE=y | |||
134 | CONFIG_ARCH_SPARSEMEM_ENABLE=y | 134 | CONFIG_ARCH_SPARSEMEM_ENABLE=y |
135 | CONFIG_ARCH_DISCONTIGMEM_DEFAULT=y | 135 | CONFIG_ARCH_DISCONTIGMEM_DEFAULT=y |
136 | CONFIG_NUMA=y | 136 | CONFIG_NUMA=y |
137 | CONFIG_NODES_SHIFT=8 | 137 | CONFIG_NODES_SHIFT=10 |
138 | CONFIG_VIRTUAL_MEM_MAP=y | 138 | CONFIG_VIRTUAL_MEM_MAP=y |
139 | CONFIG_HOLES_IN_ZONE=y | 139 | CONFIG_HOLES_IN_ZONE=y |
140 | CONFIG_HAVE_ARCH_EARLY_PFN_TO_NID=y | 140 | CONFIG_HAVE_ARCH_EARLY_PFN_TO_NID=y |
@@ -1159,7 +1159,7 @@ CONFIG_DETECT_SOFTLOCKUP=y | |||
1159 | # CONFIG_SCHEDSTATS is not set | 1159 | # CONFIG_SCHEDSTATS is not set |
1160 | # CONFIG_DEBUG_SLAB is not set | 1160 | # CONFIG_DEBUG_SLAB is not set |
1161 | CONFIG_DEBUG_PREEMPT=y | 1161 | CONFIG_DEBUG_PREEMPT=y |
1162 | CONFIG_DEBUG_MUTEXES=y | 1162 | # CONFIG_DEBUG_MUTEXES is not set |
1163 | # CONFIG_DEBUG_SPINLOCK is not set | 1163 | # CONFIG_DEBUG_SPINLOCK is not set |
1164 | # CONFIG_DEBUG_SPINLOCK_SLEEP is not set | 1164 | # CONFIG_DEBUG_SPINLOCK_SLEEP is not set |
1165 | # CONFIG_DEBUG_KOBJECT is not set | 1165 | # CONFIG_DEBUG_KOBJECT is not set |
diff --git a/arch/ia64/kernel/iosapic.c b/arch/ia64/kernel/iosapic.c index 7956eb9058fc..d58c1c5c903a 100644 --- a/arch/ia64/kernel/iosapic.c +++ b/arch/ia64/kernel/iosapic.c | |||
@@ -416,7 +416,7 @@ iosapic_end_level_irq (unsigned int irq) | |||
416 | ia64_vector vec = irq_to_vector(irq); | 416 | ia64_vector vec = irq_to_vector(irq); |
417 | struct iosapic_rte_info *rte; | 417 | struct iosapic_rte_info *rte; |
418 | 418 | ||
419 | move_irq(irq); | 419 | move_native_irq(irq); |
420 | list_for_each_entry(rte, &iosapic_intr_info[vec].rtes, rte_list) | 420 | list_for_each_entry(rte, &iosapic_intr_info[vec].rtes, rte_list) |
421 | iosapic_eoi(rte->addr, vec); | 421 | iosapic_eoi(rte->addr, vec); |
422 | } | 422 | } |
@@ -458,7 +458,7 @@ iosapic_ack_edge_irq (unsigned int irq) | |||
458 | { | 458 | { |
459 | irq_desc_t *idesc = irq_descp(irq); | 459 | irq_desc_t *idesc = irq_descp(irq); |
460 | 460 | ||
461 | move_irq(irq); | 461 | move_native_irq(irq); |
462 | /* | 462 | /* |
463 | * Once we have recorded IRQ_PENDING already, we can mask the | 463 | * Once we have recorded IRQ_PENDING already, we can mask the |
464 | * interrupt for real. This prevents IRQ storms from unhandled | 464 | * interrupt for real. This prevents IRQ storms from unhandled |
diff --git a/arch/ia64/kernel/irq.c b/arch/ia64/kernel/irq.c index 5ce908ef9c95..9c72ea3f6432 100644 --- a/arch/ia64/kernel/irq.c +++ b/arch/ia64/kernel/irq.c | |||
@@ -101,7 +101,6 @@ void set_irq_affinity_info (unsigned int irq, int hwid, int redir) | |||
101 | 101 | ||
102 | if (irq < NR_IRQS) { | 102 | if (irq < NR_IRQS) { |
103 | irq_affinity[irq] = mask; | 103 | irq_affinity[irq] = mask; |
104 | set_irq_info(irq, mask); | ||
105 | irq_redir[irq] = (char) (redir & 0xff); | 104 | irq_redir[irq] = (char) (redir & 0xff); |
106 | } | 105 | } |
107 | } | 106 | } |
diff --git a/arch/powerpc/kernel/prom_init.c b/arch/powerpc/kernel/prom_init.c index 078fb5533541..2d80653aa2af 100644 --- a/arch/powerpc/kernel/prom_init.c +++ b/arch/powerpc/kernel/prom_init.c | |||
@@ -1636,7 +1636,7 @@ static int __init prom_find_machine_type(void) | |||
1636 | compat, sizeof(compat)-1); | 1636 | compat, sizeof(compat)-1); |
1637 | if (len <= 0) | 1637 | if (len <= 0) |
1638 | return PLATFORM_GENERIC; | 1638 | return PLATFORM_GENERIC; |
1639 | if (strncmp(compat, RELOC("chrp"), 4)) | 1639 | if (strcmp(compat, RELOC("chrp"))) |
1640 | return PLATFORM_GENERIC; | 1640 | return PLATFORM_GENERIC; |
1641 | 1641 | ||
1642 | /* Default to pSeries. We need to know if we are running LPAR */ | 1642 | /* Default to pSeries. We need to know if we are running LPAR */ |
diff --git a/arch/powerpc/platforms/pseries/setup.c b/arch/powerpc/platforms/pseries/setup.c index 5eb55ef1c91c..5f79f01c44f2 100644 --- a/arch/powerpc/platforms/pseries/setup.c +++ b/arch/powerpc/platforms/pseries/setup.c | |||
@@ -255,7 +255,7 @@ static int __init pSeries_init_panel(void) | |||
255 | { | 255 | { |
256 | /* Manually leave the kernel version on the panel. */ | 256 | /* Manually leave the kernel version on the panel. */ |
257 | ppc_md.progress("Linux ppc64\n", 0); | 257 | ppc_md.progress("Linux ppc64\n", 0); |
258 | ppc_md.progress(system_utsname.version, 0); | 258 | ppc_md.progress(system_utsname.release, 0); |
259 | 259 | ||
260 | return 0; | 260 | return 0; |
261 | } | 261 | } |
diff --git a/arch/s390/kernel/compat_wrapper.S b/arch/s390/kernel/compat_wrapper.S index ef5b9c44b86b..4d53b2739357 100644 --- a/arch/s390/kernel/compat_wrapper.S +++ b/arch/s390/kernel/compat_wrapper.S | |||
@@ -1650,3 +1650,11 @@ sys_tee_wrapper: | |||
1650 | llgfr %r4,%r4 # size_t | 1650 | llgfr %r4,%r4 # size_t |
1651 | llgfr %r5,%r5 # unsigned int | 1651 | llgfr %r5,%r5 # unsigned int |
1652 | jg sys_tee | 1652 | jg sys_tee |
1653 | |||
1654 | .globl compat_sys_vmsplice_wrapper | ||
1655 | compat_sys_vmsplice_wrapper: | ||
1656 | lgfr %r2,%r2 # int | ||
1657 | llgtr %r3,%r3 # compat_iovec * | ||
1658 | llgfr %r4,%r4 # unsigned int | ||
1659 | llgfr %r5,%r5 # unsigned int | ||
1660 | jg compat_sys_vmsplice | ||
diff --git a/arch/s390/kernel/syscalls.S b/arch/s390/kernel/syscalls.S index fc2c0767202b..93be1d56c036 100644 --- a/arch/s390/kernel/syscalls.S +++ b/arch/s390/kernel/syscalls.S | |||
@@ -317,3 +317,4 @@ SYSCALL(sys_get_robust_list,sys_get_robust_list,compat_sys_get_robust_list_wrapp | |||
317 | SYSCALL(sys_splice,sys_splice,sys_splice_wrapper) | 317 | SYSCALL(sys_splice,sys_splice,sys_splice_wrapper) |
318 | SYSCALL(sys_sync_file_range,sys_sync_file_range,sys_sync_file_range_wrapper) | 318 | SYSCALL(sys_sync_file_range,sys_sync_file_range,sys_sync_file_range_wrapper) |
319 | SYSCALL(sys_tee,sys_tee,sys_tee_wrapper) | 319 | SYSCALL(sys_tee,sys_tee,sys_tee_wrapper) |
320 | SYSCALL(sys_vmsplice,sys_vmsplice,compat_sys_vmsplice_wrapper) | ||
diff --git a/arch/s390/kernel/time.c b/arch/s390/kernel/time.c index fea043b69b91..ce19ad4e92ec 100644 --- a/arch/s390/kernel/time.c +++ b/arch/s390/kernel/time.c | |||
@@ -249,18 +249,19 @@ static inline void stop_hz_timer(void) | |||
249 | unsigned long flags; | 249 | unsigned long flags; |
250 | unsigned long seq, next; | 250 | unsigned long seq, next; |
251 | __u64 timer, todval; | 251 | __u64 timer, todval; |
252 | int cpu = smp_processor_id(); | ||
252 | 253 | ||
253 | if (sysctl_hz_timer != 0) | 254 | if (sysctl_hz_timer != 0) |
254 | return; | 255 | return; |
255 | 256 | ||
256 | cpu_set(smp_processor_id(), nohz_cpu_mask); | 257 | cpu_set(cpu, nohz_cpu_mask); |
257 | 258 | ||
258 | /* | 259 | /* |
259 | * Leave the clock comparator set up for the next timer | 260 | * Leave the clock comparator set up for the next timer |
260 | * tick if either rcu or a softirq is pending. | 261 | * tick if either rcu or a softirq is pending. |
261 | */ | 262 | */ |
262 | if (rcu_pending(smp_processor_id()) || local_softirq_pending()) { | 263 | if (rcu_needs_cpu(cpu) || local_softirq_pending()) { |
263 | cpu_clear(smp_processor_id(), nohz_cpu_mask); | 264 | cpu_clear(cpu, nohz_cpu_mask); |
264 | return; | 265 | return; |
265 | } | 266 | } |
266 | 267 | ||
@@ -271,7 +272,7 @@ static inline void stop_hz_timer(void) | |||
271 | next = next_timer_interrupt(); | 272 | next = next_timer_interrupt(); |
272 | do { | 273 | do { |
273 | seq = read_seqbegin_irqsave(&xtime_lock, flags); | 274 | seq = read_seqbegin_irqsave(&xtime_lock, flags); |
274 | timer = (__u64)(next - jiffies) + jiffies_64; | 275 | timer = (__u64 next) - (__u64 jiffies) + jiffies_64; |
275 | } while (read_seqretry_irqrestore(&xtime_lock, seq, flags)); | 276 | } while (read_seqretry_irqrestore(&xtime_lock, seq, flags)); |
276 | todval = -1ULL; | 277 | todval = -1ULL; |
277 | /* Be careful about overflows. */ | 278 | /* Be careful about overflows. */ |
diff --git a/arch/x86_64/kernel/kprobes.c b/arch/x86_64/kernel/kprobes.c index 1eaa5dae6174..fa1d19ca700a 100644 --- a/arch/x86_64/kernel/kprobes.c +++ b/arch/x86_64/kernel/kprobes.c | |||
@@ -514,13 +514,13 @@ static void __kprobes resume_execution(struct kprobe *p, | |||
514 | *tos = orig_rip + (*tos - copy_rip); | 514 | *tos = orig_rip + (*tos - copy_rip); |
515 | break; | 515 | break; |
516 | case 0xff: | 516 | case 0xff: |
517 | if ((*insn & 0x30) == 0x10) { | 517 | if ((insn[1] & 0x30) == 0x10) { |
518 | /* call absolute, indirect */ | 518 | /* call absolute, indirect */ |
519 | /* Fix return addr; rip is correct. */ | 519 | /* Fix return addr; rip is correct. */ |
520 | next_rip = regs->rip; | 520 | next_rip = regs->rip; |
521 | *tos = orig_rip + (*tos - copy_rip); | 521 | *tos = orig_rip + (*tos - copy_rip); |
522 | } else if (((*insn & 0x31) == 0x20) || /* jmp near, absolute indirect */ | 522 | } else if (((insn[1] & 0x31) == 0x20) || /* jmp near, absolute indirect */ |
523 | ((*insn & 0x31) == 0x21)) { /* jmp far, absolute indirect */ | 523 | ((insn[1] & 0x31) == 0x21)) { /* jmp far, absolute indirect */ |
524 | /* rip is correct. */ | 524 | /* rip is correct. */ |
525 | next_rip = regs->rip; | 525 | next_rip = regs->rip; |
526 | } | 526 | } |
diff --git a/arch/x86_64/kernel/pci-nommu.c b/arch/x86_64/kernel/pci-nommu.c index 44adcc2d5e5b..1f6ecc62061d 100644 --- a/arch/x86_64/kernel/pci-nommu.c +++ b/arch/x86_64/kernel/pci-nommu.c | |||
@@ -12,9 +12,10 @@ static int | |||
12 | check_addr(char *name, struct device *hwdev, dma_addr_t bus, size_t size) | 12 | check_addr(char *name, struct device *hwdev, dma_addr_t bus, size_t size) |
13 | { | 13 | { |
14 | if (hwdev && bus + size > *hwdev->dma_mask) { | 14 | if (hwdev && bus + size > *hwdev->dma_mask) { |
15 | printk(KERN_ERR | 15 | if (*hwdev->dma_mask >= 0xffffffffULL) |
16 | "nommu_%s: overflow %Lx+%lu of device mask %Lx\n", | 16 | printk(KERN_ERR |
17 | name, (long long)bus, size, (long long)*hwdev->dma_mask); | 17 | "nommu_%s: overflow %Lx+%lu of device mask %Lx\n", |
18 | name, (long long)bus, size, (long long)*hwdev->dma_mask); | ||
18 | return 0; | 19 | return 0; |
19 | } | 20 | } |
20 | return 1; | 21 | return 1; |
diff --git a/arch/x86_64/kernel/traps.c b/arch/x86_64/kernel/traps.c index 6b87268c5c2e..cea335e8746c 100644 --- a/arch/x86_64/kernel/traps.c +++ b/arch/x86_64/kernel/traps.c | |||
@@ -102,6 +102,8 @@ static inline void preempt_conditional_cli(struct pt_regs *regs) | |||
102 | { | 102 | { |
103 | if (regs->eflags & X86_EFLAGS_IF) | 103 | if (regs->eflags & X86_EFLAGS_IF) |
104 | local_irq_disable(); | 104 | local_irq_disable(); |
105 | /* Make sure to not schedule here because we could be running | ||
106 | on an exception stack. */ | ||
105 | preempt_enable_no_resched(); | 107 | preempt_enable_no_resched(); |
106 | } | 108 | } |
107 | 109 | ||
@@ -483,8 +485,6 @@ static void __kprobes do_trap(int trapnr, int signr, char *str, | |||
483 | { | 485 | { |
484 | struct task_struct *tsk = current; | 486 | struct task_struct *tsk = current; |
485 | 487 | ||
486 | conditional_sti(regs); | ||
487 | |||
488 | tsk->thread.error_code = error_code; | 488 | tsk->thread.error_code = error_code; |
489 | tsk->thread.trap_no = trapnr; | 489 | tsk->thread.trap_no = trapnr; |
490 | 490 | ||
@@ -521,6 +521,7 @@ asmlinkage void do_##name(struct pt_regs * regs, long error_code) \ | |||
521 | if (notify_die(DIE_TRAP, str, regs, error_code, trapnr, signr) \ | 521 | if (notify_die(DIE_TRAP, str, regs, error_code, trapnr, signr) \ |
522 | == NOTIFY_STOP) \ | 522 | == NOTIFY_STOP) \ |
523 | return; \ | 523 | return; \ |
524 | conditional_sti(regs); \ | ||
524 | do_trap(trapnr, signr, str, regs, error_code, NULL); \ | 525 | do_trap(trapnr, signr, str, regs, error_code, NULL); \ |
525 | } | 526 | } |
526 | 527 | ||
@@ -535,6 +536,7 @@ asmlinkage void do_##name(struct pt_regs * regs, long error_code) \ | |||
535 | if (notify_die(DIE_TRAP, str, regs, error_code, trapnr, signr) \ | 536 | if (notify_die(DIE_TRAP, str, regs, error_code, trapnr, signr) \ |
536 | == NOTIFY_STOP) \ | 537 | == NOTIFY_STOP) \ |
537 | return; \ | 538 | return; \ |
539 | conditional_sti(regs); \ | ||
538 | do_trap(trapnr, signr, str, regs, error_code, &info); \ | 540 | do_trap(trapnr, signr, str, regs, error_code, &info); \ |
539 | } | 541 | } |
540 | 542 | ||
@@ -548,7 +550,17 @@ DO_ERROR(10, SIGSEGV, "invalid TSS", invalid_TSS) | |||
548 | DO_ERROR(11, SIGBUS, "segment not present", segment_not_present) | 550 | DO_ERROR(11, SIGBUS, "segment not present", segment_not_present) |
549 | DO_ERROR_INFO(17, SIGBUS, "alignment check", alignment_check, BUS_ADRALN, 0) | 551 | DO_ERROR_INFO(17, SIGBUS, "alignment check", alignment_check, BUS_ADRALN, 0) |
550 | DO_ERROR(18, SIGSEGV, "reserved", reserved) | 552 | DO_ERROR(18, SIGSEGV, "reserved", reserved) |
551 | DO_ERROR(12, SIGBUS, "stack segment", stack_segment) | 553 | |
554 | /* Runs on IST stack */ | ||
555 | asmlinkage void do_stack_segment(struct pt_regs *regs, long error_code) | ||
556 | { | ||
557 | if (notify_die(DIE_TRAP, "stack segment", regs, error_code, | ||
558 | 12, SIGBUS) == NOTIFY_STOP) | ||
559 | return; | ||
560 | preempt_conditional_sti(regs); | ||
561 | do_trap(12, SIGBUS, "stack segment", regs, error_code, NULL); | ||
562 | preempt_conditional_cli(regs); | ||
563 | } | ||
552 | 564 | ||
553 | asmlinkage void do_double_fault(struct pt_regs * regs, long error_code) | 565 | asmlinkage void do_double_fault(struct pt_regs * regs, long error_code) |
554 | { | 566 | { |
@@ -682,8 +694,9 @@ asmlinkage void __kprobes do_int3(struct pt_regs * regs, long error_code) | |||
682 | if (notify_die(DIE_INT3, "int3", regs, error_code, 3, SIGTRAP) == NOTIFY_STOP) { | 694 | if (notify_die(DIE_INT3, "int3", regs, error_code, 3, SIGTRAP) == NOTIFY_STOP) { |
683 | return; | 695 | return; |
684 | } | 696 | } |
697 | preempt_conditional_sti(regs); | ||
685 | do_trap(3, SIGTRAP, "int3", regs, error_code, NULL); | 698 | do_trap(3, SIGTRAP, "int3", regs, error_code, NULL); |
686 | return; | 699 | preempt_conditional_cli(regs); |
687 | } | 700 | } |
688 | 701 | ||
689 | /* Help handler running on IST stack to switch back to user stack | 702 | /* Help handler running on IST stack to switch back to user stack |
diff --git a/arch/x86_64/mm/srat.c b/arch/x86_64/mm/srat.c index 15ae9fcd65a7..e1513532df29 100644 --- a/arch/x86_64/mm/srat.c +++ b/arch/x86_64/mm/srat.c | |||
@@ -34,7 +34,10 @@ static nodemask_t nodes_found __initdata; | |||
34 | static struct bootnode nodes[MAX_NUMNODES] __initdata; | 34 | static struct bootnode nodes[MAX_NUMNODES] __initdata; |
35 | static struct bootnode nodes_add[MAX_NUMNODES] __initdata; | 35 | static struct bootnode nodes_add[MAX_NUMNODES] __initdata; |
36 | static int found_add_area __initdata; | 36 | static int found_add_area __initdata; |
37 | int hotadd_percent __initdata = 10; | 37 | int hotadd_percent __initdata = 0; |
38 | #ifndef RESERVE_HOTADD | ||
39 | #define hotadd_percent 0 /* Ignore all settings */ | ||
40 | #endif | ||
38 | static u8 pxm2node[256] = { [0 ... 255] = 0xff }; | 41 | static u8 pxm2node[256] = { [0 ... 255] = 0xff }; |
39 | 42 | ||
40 | /* Too small nodes confuse the VM badly. Usually they result | 43 | /* Too small nodes confuse the VM badly. Usually they result |
@@ -103,6 +106,7 @@ static __init void bad_srat(void) | |||
103 | int i; | 106 | int i; |
104 | printk(KERN_ERR "SRAT: SRAT not used.\n"); | 107 | printk(KERN_ERR "SRAT: SRAT not used.\n"); |
105 | acpi_numa = -1; | 108 | acpi_numa = -1; |
109 | found_add_area = 0; | ||
106 | for (i = 0; i < MAX_LOCAL_APIC; i++) | 110 | for (i = 0; i < MAX_LOCAL_APIC; i++) |
107 | apicid_to_node[i] = NUMA_NO_NODE; | 111 | apicid_to_node[i] = NUMA_NO_NODE; |
108 | for (i = 0; i < MAX_NUMNODES; i++) | 112 | for (i = 0; i < MAX_NUMNODES; i++) |
@@ -154,7 +158,8 @@ acpi_numa_processor_affinity_init(struct acpi_table_processor_affinity *pa) | |||
154 | int pxm, node; | 158 | int pxm, node; |
155 | if (srat_disabled()) | 159 | if (srat_disabled()) |
156 | return; | 160 | return; |
157 | if (pa->header.length != sizeof(struct acpi_table_processor_affinity)) { bad_srat(); | 161 | if (pa->header.length != sizeof(struct acpi_table_processor_affinity)) { |
162 | bad_srat(); | ||
158 | return; | 163 | return; |
159 | } | 164 | } |
160 | if (pa->flags.enabled == 0) | 165 | if (pa->flags.enabled == 0) |
@@ -191,15 +196,17 @@ static int hotadd_enough_memory(struct bootnode *nd) | |||
191 | allowed = (end_pfn - e820_hole_size(0, end_pfn)) * PAGE_SIZE; | 196 | allowed = (end_pfn - e820_hole_size(0, end_pfn)) * PAGE_SIZE; |
192 | allowed = (allowed / 100) * hotadd_percent; | 197 | allowed = (allowed / 100) * hotadd_percent; |
193 | if (allocated + mem > allowed) { | 198 | if (allocated + mem > allowed) { |
199 | unsigned long range; | ||
194 | /* Give them at least part of their hotadd memory upto hotadd_percent | 200 | /* Give them at least part of their hotadd memory upto hotadd_percent |
195 | It would be better to spread the limit out | 201 | It would be better to spread the limit out |
196 | over multiple hotplug areas, but that is too complicated | 202 | over multiple hotplug areas, but that is too complicated |
197 | right now */ | 203 | right now */ |
198 | if (allocated >= allowed) | 204 | if (allocated >= allowed) |
199 | return 0; | 205 | return 0; |
200 | pages = (allowed - allocated + mem) / sizeof(struct page); | 206 | range = allowed - allocated; |
207 | pages = (range / PAGE_SIZE); | ||
201 | mem = pages * sizeof(struct page); | 208 | mem = pages * sizeof(struct page); |
202 | nd->end = nd->start + pages*PAGE_SIZE; | 209 | nd->end = nd->start + range; |
203 | } | 210 | } |
204 | /* Not completely fool proof, but a good sanity check */ | 211 | /* Not completely fool proof, but a good sanity check */ |
205 | addr = find_e820_area(last_area_end, end_pfn<<PAGE_SHIFT, mem); | 212 | addr = find_e820_area(last_area_end, end_pfn<<PAGE_SHIFT, mem); |
diff --git a/drivers/base/firmware_class.c b/drivers/base/firmware_class.c index 472318205236..0c99ae6a3407 100644 --- a/drivers/base/firmware_class.c +++ b/drivers/base/firmware_class.c | |||
@@ -86,18 +86,9 @@ firmware_timeout_store(struct class *class, const char *buf, size_t count) | |||
86 | static CLASS_ATTR(timeout, 0644, firmware_timeout_show, firmware_timeout_store); | 86 | static CLASS_ATTR(timeout, 0644, firmware_timeout_show, firmware_timeout_store); |
87 | 87 | ||
88 | static void fw_class_dev_release(struct class_device *class_dev); | 88 | static void fw_class_dev_release(struct class_device *class_dev); |
89 | int firmware_class_uevent(struct class_device *dev, char **envp, | ||
90 | int num_envp, char *buffer, int buffer_size); | ||
91 | 89 | ||
92 | static struct class firmware_class = { | 90 | static int firmware_class_uevent(struct class_device *class_dev, char **envp, |
93 | .name = "firmware", | 91 | int num_envp, char *buffer, int buffer_size) |
94 | .uevent = firmware_class_uevent, | ||
95 | .release = fw_class_dev_release, | ||
96 | }; | ||
97 | |||
98 | int | ||
99 | firmware_class_uevent(struct class_device *class_dev, char **envp, | ||
100 | int num_envp, char *buffer, int buffer_size) | ||
101 | { | 92 | { |
102 | struct firmware_priv *fw_priv = class_get_devdata(class_dev); | 93 | struct firmware_priv *fw_priv = class_get_devdata(class_dev); |
103 | int i = 0, len = 0; | 94 | int i = 0, len = 0; |
@@ -116,6 +107,12 @@ firmware_class_uevent(struct class_device *class_dev, char **envp, | |||
116 | return 0; | 107 | return 0; |
117 | } | 108 | } |
118 | 109 | ||
110 | static struct class firmware_class = { | ||
111 | .name = "firmware", | ||
112 | .uevent = firmware_class_uevent, | ||
113 | .release = fw_class_dev_release, | ||
114 | }; | ||
115 | |||
119 | static ssize_t | 116 | static ssize_t |
120 | firmware_loading_show(struct class_device *class_dev, char *buf) | 117 | firmware_loading_show(struct class_device *class_dev, char *buf) |
121 | { | 118 | { |
@@ -493,25 +490,6 @@ release_firmware(const struct firmware *fw) | |||
493 | } | 490 | } |
494 | } | 491 | } |
495 | 492 | ||
496 | /** | ||
497 | * register_firmware: - provide a firmware image for later usage | ||
498 | * @name: name of firmware image file | ||
499 | * @data: buffer pointer for the firmware image | ||
500 | * @size: size of the data buffer area | ||
501 | * | ||
502 | * Make sure that @data will be available by requesting firmware @name. | ||
503 | * | ||
504 | * Note: This will not be possible until some kind of persistence | ||
505 | * is available. | ||
506 | **/ | ||
507 | void | ||
508 | register_firmware(const char *name, const u8 *data, size_t size) | ||
509 | { | ||
510 | /* This is meaningless without firmware caching, so until we | ||
511 | * decide if firmware caching is reasonable just leave it as a | ||
512 | * noop */ | ||
513 | } | ||
514 | |||
515 | /* Async support */ | 493 | /* Async support */ |
516 | struct firmware_work { | 494 | struct firmware_work { |
517 | struct work_struct work; | 495 | struct work_struct work; |
@@ -630,4 +608,3 @@ module_exit(firmware_class_exit); | |||
630 | EXPORT_SYMBOL(release_firmware); | 608 | EXPORT_SYMBOL(release_firmware); |
631 | EXPORT_SYMBOL(request_firmware); | 609 | EXPORT_SYMBOL(request_firmware); |
632 | EXPORT_SYMBOL(request_firmware_nowait); | 610 | EXPORT_SYMBOL(request_firmware_nowait); |
633 | EXPORT_SYMBOL(register_firmware); | ||
diff --git a/drivers/char/Kconfig b/drivers/char/Kconfig index 402296670d3a..78d928f9d9f1 100644 --- a/drivers/char/Kconfig +++ b/drivers/char/Kconfig | |||
@@ -291,7 +291,7 @@ config SX | |||
291 | 291 | ||
292 | config RIO | 292 | config RIO |
293 | tristate "Specialix RIO system support" | 293 | tristate "Specialix RIO system support" |
294 | depends on SERIAL_NONSTANDARD && !64BIT | 294 | depends on SERIAL_NONSTANDARD |
295 | help | 295 | help |
296 | This is a driver for the Specialix RIO, a smart serial card which | 296 | This is a driver for the Specialix RIO, a smart serial card which |
297 | drives an outboard box that can support up to 128 ports. Product | 297 | drives an outboard box that can support up to 128 ports. Product |
diff --git a/drivers/char/rio/host.h b/drivers/char/rio/host.h index 3ec73d1a279a..179cdbea712b 100644 --- a/drivers/char/rio/host.h +++ b/drivers/char/rio/host.h | |||
@@ -33,12 +33,6 @@ | |||
33 | #ifndef __rio_host_h__ | 33 | #ifndef __rio_host_h__ |
34 | #define __rio_host_h__ | 34 | #define __rio_host_h__ |
35 | 35 | ||
36 | #ifdef SCCS_LABELS | ||
37 | #ifndef lint | ||
38 | static char *_host_h_sccs_ = "@(#)host.h 1.2"; | ||
39 | #endif | ||
40 | #endif | ||
41 | |||
42 | /* | 36 | /* |
43 | ** the host structure - one per host card in the system. | 37 | ** the host structure - one per host card in the system. |
44 | */ | 38 | */ |
@@ -77,9 +71,6 @@ struct Host { | |||
77 | #define RC_STARTUP 1 | 71 | #define RC_STARTUP 1 |
78 | #define RC_RUNNING 2 | 72 | #define RC_RUNNING 2 |
79 | #define RC_STUFFED 3 | 73 | #define RC_STUFFED 3 |
80 | #define RC_SOMETHING 4 | ||
81 | #define RC_SOMETHING_NEW 5 | ||
82 | #define RC_SOMETHING_ELSE 6 | ||
83 | #define RC_READY 7 | 74 | #define RC_READY 7 |
84 | #define RUN_STATE 7 | 75 | #define RUN_STATE 7 |
85 | /* | 76 | /* |
diff --git a/drivers/char/rio/rioboot.c b/drivers/char/rio/rioboot.c index acda9326c2ef..290143addd34 100644 --- a/drivers/char/rio/rioboot.c +++ b/drivers/char/rio/rioboot.c | |||
@@ -34,6 +34,7 @@ | |||
34 | #include <linux/slab.h> | 34 | #include <linux/slab.h> |
35 | #include <linux/termios.h> | 35 | #include <linux/termios.h> |
36 | #include <linux/serial.h> | 36 | #include <linux/serial.h> |
37 | #include <linux/vmalloc.h> | ||
37 | #include <asm/semaphore.h> | 38 | #include <asm/semaphore.h> |
38 | #include <linux/generic_serial.h> | 39 | #include <linux/generic_serial.h> |
39 | #include <linux/errno.h> | 40 | #include <linux/errno.h> |
diff --git a/drivers/char/rio/rioctrl.c b/drivers/char/rio/rioctrl.c index d31aba62bb7f..75b2557c37ec 100644 --- a/drivers/char/rio/rioctrl.c +++ b/drivers/char/rio/rioctrl.c | |||
@@ -1394,14 +1394,17 @@ int RIOPreemptiveCmd(struct rio_info *p, struct Port *PortP, u8 Cmd) | |||
1394 | return RIO_FAIL; | 1394 | return RIO_FAIL; |
1395 | } | 1395 | } |
1396 | 1396 | ||
1397 | if (((int) ((char) PortP->InUse) == -1) || !(CmdBlkP = RIOGetCmdBlk())) { | 1397 | if ((PortP->InUse == (typeof(PortP->InUse))-1) || |
1398 | rio_dprintk(RIO_DEBUG_CTRL, "Cannot allocate command block for command %d on port %d\n", Cmd, PortP->PortNum); | 1398 | !(CmdBlkP = RIOGetCmdBlk())) { |
1399 | rio_dprintk(RIO_DEBUG_CTRL, "Cannot allocate command block " | ||
1400 | "for command %d on port %d\n", Cmd, PortP->PortNum); | ||
1399 | return RIO_FAIL; | 1401 | return RIO_FAIL; |
1400 | } | 1402 | } |
1401 | 1403 | ||
1402 | rio_dprintk(RIO_DEBUG_CTRL, "Command blk %p - InUse now %d\n", CmdBlkP, PortP->InUse); | 1404 | rio_dprintk(RIO_DEBUG_CTRL, "Command blk %p - InUse now %d\n", |
1405 | CmdBlkP, PortP->InUse); | ||
1403 | 1406 | ||
1404 | PktCmdP = (struct PktCmd_M *) &CmdBlkP->Packet.data[0]; | 1407 | PktCmdP = (struct PktCmd_M *)&CmdBlkP->Packet.data[0]; |
1405 | 1408 | ||
1406 | CmdBlkP->Packet.src_unit = 0; | 1409 | CmdBlkP->Packet.src_unit = 0; |
1407 | if (PortP->SecondBlock) | 1410 | if (PortP->SecondBlock) |
@@ -1425,38 +1428,46 @@ int RIOPreemptiveCmd(struct rio_info *p, struct Port *PortP, u8 Cmd) | |||
1425 | 1428 | ||
1426 | switch (Cmd) { | 1429 | switch (Cmd) { |
1427 | case MEMDUMP: | 1430 | case MEMDUMP: |
1428 | rio_dprintk(RIO_DEBUG_CTRL, "Queue MEMDUMP command blk %p (addr 0x%x)\n", CmdBlkP, (int) SubCmd.Addr); | 1431 | rio_dprintk(RIO_DEBUG_CTRL, "Queue MEMDUMP command blk %p " |
1432 | "(addr 0x%x)\n", CmdBlkP, (int) SubCmd.Addr); | ||
1429 | PktCmdP->SubCommand = MEMDUMP; | 1433 | PktCmdP->SubCommand = MEMDUMP; |
1430 | PktCmdP->SubAddr = SubCmd.Addr; | 1434 | PktCmdP->SubAddr = SubCmd.Addr; |
1431 | break; | 1435 | break; |
1432 | case FCLOSE: | 1436 | case FCLOSE: |
1433 | rio_dprintk(RIO_DEBUG_CTRL, "Queue FCLOSE command blk %p\n", CmdBlkP); | 1437 | rio_dprintk(RIO_DEBUG_CTRL, "Queue FCLOSE command blk %p\n", |
1438 | CmdBlkP); | ||
1434 | break; | 1439 | break; |
1435 | case READ_REGISTER: | 1440 | case READ_REGISTER: |
1436 | rio_dprintk(RIO_DEBUG_CTRL, "Queue READ_REGISTER (0x%x) command blk %p\n", (int) SubCmd.Addr, CmdBlkP); | 1441 | rio_dprintk(RIO_DEBUG_CTRL, "Queue READ_REGISTER (0x%x) " |
1442 | "command blk %p\n", (int) SubCmd.Addr, CmdBlkP); | ||
1437 | PktCmdP->SubCommand = READ_REGISTER; | 1443 | PktCmdP->SubCommand = READ_REGISTER; |
1438 | PktCmdP->SubAddr = SubCmd.Addr; | 1444 | PktCmdP->SubAddr = SubCmd.Addr; |
1439 | break; | 1445 | break; |
1440 | case RESUME: | 1446 | case RESUME: |
1441 | rio_dprintk(RIO_DEBUG_CTRL, "Queue RESUME command blk %p\n", CmdBlkP); | 1447 | rio_dprintk(RIO_DEBUG_CTRL, "Queue RESUME command blk %p\n", |
1448 | CmdBlkP); | ||
1442 | break; | 1449 | break; |
1443 | case RFLUSH: | 1450 | case RFLUSH: |
1444 | rio_dprintk(RIO_DEBUG_CTRL, "Queue RFLUSH command blk %p\n", CmdBlkP); | 1451 | rio_dprintk(RIO_DEBUG_CTRL, "Queue RFLUSH command blk %p\n", |
1452 | CmdBlkP); | ||
1445 | CmdBlkP->PostFuncP = RIORFlushEnable; | 1453 | CmdBlkP->PostFuncP = RIORFlushEnable; |
1446 | break; | 1454 | break; |
1447 | case SUSPEND: | 1455 | case SUSPEND: |
1448 | rio_dprintk(RIO_DEBUG_CTRL, "Queue SUSPEND command blk %p\n", CmdBlkP); | 1456 | rio_dprintk(RIO_DEBUG_CTRL, "Queue SUSPEND command blk %p\n", |
1457 | CmdBlkP); | ||
1449 | break; | 1458 | break; |
1450 | 1459 | ||
1451 | case MGET: | 1460 | case MGET: |
1452 | rio_dprintk(RIO_DEBUG_CTRL, "Queue MGET command blk %p\n", CmdBlkP); | 1461 | rio_dprintk(RIO_DEBUG_CTRL, "Queue MGET command blk %p\n", |
1462 | CmdBlkP); | ||
1453 | break; | 1463 | break; |
1454 | 1464 | ||
1455 | case MSET: | 1465 | case MSET: |
1456 | case MBIC: | 1466 | case MBIC: |
1457 | case MBIS: | 1467 | case MBIS: |
1458 | CmdBlkP->Packet.data[4] = (char) PortP->ModemLines; | 1468 | CmdBlkP->Packet.data[4] = (char) PortP->ModemLines; |
1459 | rio_dprintk(RIO_DEBUG_CTRL, "Queue MSET/MBIC/MBIS command blk %p\n", CmdBlkP); | 1469 | rio_dprintk(RIO_DEBUG_CTRL, "Queue MSET/MBIC/MBIS command " |
1470 | "blk %p\n", CmdBlkP); | ||
1460 | break; | 1471 | break; |
1461 | 1472 | ||
1462 | case WFLUSH: | 1473 | case WFLUSH: |
@@ -1465,12 +1476,14 @@ int RIOPreemptiveCmd(struct rio_info *p, struct Port *PortP, u8 Cmd) | |||
1465 | ** allowed then we should not bother sending any more to the | 1476 | ** allowed then we should not bother sending any more to the |
1466 | ** RTA. | 1477 | ** RTA. |
1467 | */ | 1478 | */ |
1468 | if ((int) ((char) PortP->WflushFlag) == (int) -1) { | 1479 | if (PortP->WflushFlag == (typeof(PortP->WflushFlag))-1) { |
1469 | rio_dprintk(RIO_DEBUG_CTRL, "Trashed WFLUSH, WflushFlag about to wrap!"); | 1480 | rio_dprintk(RIO_DEBUG_CTRL, "Trashed WFLUSH, " |
1481 | "WflushFlag about to wrap!"); | ||
1470 | RIOFreeCmdBlk(CmdBlkP); | 1482 | RIOFreeCmdBlk(CmdBlkP); |
1471 | return (RIO_FAIL); | 1483 | return (RIO_FAIL); |
1472 | } else { | 1484 | } else { |
1473 | rio_dprintk(RIO_DEBUG_CTRL, "Queue WFLUSH command blk %p\n", CmdBlkP); | 1485 | rio_dprintk(RIO_DEBUG_CTRL, "Queue WFLUSH command " |
1486 | "blk %p\n", CmdBlkP); | ||
1474 | CmdBlkP->PostFuncP = RIOWFlushMark; | 1487 | CmdBlkP->PostFuncP = RIOWFlushMark; |
1475 | } | 1488 | } |
1476 | break; | 1489 | break; |
diff --git a/drivers/char/rio/rioioctl.h b/drivers/char/rio/rioioctl.h index 14b83fae75c8..e8af5b30519e 100644 --- a/drivers/char/rio/rioioctl.h +++ b/drivers/char/rio/rioioctl.h | |||
@@ -33,10 +33,6 @@ | |||
33 | #ifndef __rioioctl_h__ | 33 | #ifndef __rioioctl_h__ |
34 | #define __rioioctl_h__ | 34 | #define __rioioctl_h__ |
35 | 35 | ||
36 | #ifdef SCCS_LABELS | ||
37 | static char *_rioioctl_h_sccs_ = "@(#)rioioctl.h 1.2"; | ||
38 | #endif | ||
39 | |||
40 | /* | 36 | /* |
41 | ** RIO device driver - user ioctls and associated structures. | 37 | ** RIO device driver - user ioctls and associated structures. |
42 | */ | 38 | */ |
@@ -44,55 +40,13 @@ static char *_rioioctl_h_sccs_ = "@(#)rioioctl.h 1.2"; | |||
44 | struct portStats { | 40 | struct portStats { |
45 | int port; | 41 | int port; |
46 | int gather; | 42 | int gather; |
47 | ulong txchars; | 43 | unsigned long txchars; |
48 | ulong rxchars; | 44 | unsigned long rxchars; |
49 | ulong opens; | 45 | unsigned long opens; |
50 | ulong closes; | 46 | unsigned long closes; |
51 | ulong ioctls; | 47 | unsigned long ioctls; |
52 | }; | 48 | }; |
53 | 49 | ||
54 | |||
55 | #define rIOC ('r'<<8) | ||
56 | #define TCRIOSTATE (rIOC | 1) | ||
57 | #define TCRIOXPON (rIOC | 2) | ||
58 | #define TCRIOXPOFF (rIOC | 3) | ||
59 | #define TCRIOXPCPS (rIOC | 4) | ||
60 | #define TCRIOXPRINT (rIOC | 5) | ||
61 | #define TCRIOIXANYON (rIOC | 6) | ||
62 | #define TCRIOIXANYOFF (rIOC | 7) | ||
63 | #define TCRIOIXONON (rIOC | 8) | ||
64 | #define TCRIOIXONOFF (rIOC | 9) | ||
65 | #define TCRIOMBIS (rIOC | 10) | ||
66 | #define TCRIOMBIC (rIOC | 11) | ||
67 | #define TCRIOTRIAD (rIOC | 12) | ||
68 | #define TCRIOTSTATE (rIOC | 13) | ||
69 | |||
70 | /* | ||
71 | ** 15.10.1998 ARG - ESIL 0761 part fix | ||
72 | ** Add RIO ioctls for manipulating RTS and CTS flow control, (as LynxOS | ||
73 | ** appears to not support hardware flow control). | ||
74 | */ | ||
75 | #define TCRIOCTSFLOWEN (rIOC | 14) /* enable CTS flow control */ | ||
76 | #define TCRIOCTSFLOWDIS (rIOC | 15) /* disable CTS flow control */ | ||
77 | #define TCRIORTSFLOWEN (rIOC | 16) /* enable RTS flow control */ | ||
78 | #define TCRIORTSFLOWDIS (rIOC | 17) /* disable RTS flow control */ | ||
79 | |||
80 | /* | ||
81 | ** 09.12.1998 ARG - ESIL 0776 part fix | ||
82 | ** Definition for 'RIOC' also appears in daemon.h, so we'd better do a | ||
83 | ** #ifndef here first. | ||
84 | ** 'RIO_QUICK_CHECK' also #define'd here as this ioctl is now | ||
85 | ** allowed to be used by customers. | ||
86 | ** | ||
87 | ** 05.02.1999 ARG - | ||
88 | ** This is what I've decied to do with ioctls etc., which are intended to be | ||
89 | ** invoked from users applications : | ||
90 | ** Anything that needs to be defined here will be removed from daemon.h, that | ||
91 | ** way it won't end up having to be defined/maintained in two places. The only | ||
92 | ** consequence of this is that this file should now be #include'd by daemon.h | ||
93 | ** | ||
94 | ** 'stats' ioctls now #define'd here as they are to be used by customers. | ||
95 | */ | ||
96 | #define RIOC ('R'<<8)|('i'<<16)|('o'<<24) | 50 | #define RIOC ('R'<<8)|('i'<<16)|('o'<<24) |
97 | 51 | ||
98 | #define RIO_QUICK_CHECK (RIOC | 105) | 52 | #define RIO_QUICK_CHECK (RIOC | 105) |
diff --git a/drivers/char/tpm/Kconfig b/drivers/char/tpm/Kconfig index 1efde3b27619..fe00c7dfb649 100644 --- a/drivers/char/tpm/Kconfig +++ b/drivers/char/tpm/Kconfig | |||
@@ -22,7 +22,7 @@ config TCG_TPM | |||
22 | 22 | ||
23 | config TCG_TIS | 23 | config TCG_TIS |
24 | tristate "TPM Interface Specification 1.2 Interface" | 24 | tristate "TPM Interface Specification 1.2 Interface" |
25 | depends on TCG_TPM | 25 | depends on TCG_TPM && PNPACPI |
26 | ---help--- | 26 | ---help--- |
27 | If you have a TPM security chip that is compliant with the | 27 | If you have a TPM security chip that is compliant with the |
28 | TCG TIS 1.2 TPM specification say Yes and it will be accessible | 28 | TCG TIS 1.2 TPM specification say Yes and it will be accessible |
diff --git a/drivers/char/tpm/tpm.h b/drivers/char/tpm/tpm.h index 54a4c804e25f..050ced247f68 100644 --- a/drivers/char/tpm/tpm.h +++ b/drivers/char/tpm/tpm.h | |||
@@ -140,7 +140,7 @@ extern int tpm_pm_resume(struct device *); | |||
140 | extern struct dentry ** tpm_bios_log_setup(char *); | 140 | extern struct dentry ** tpm_bios_log_setup(char *); |
141 | extern void tpm_bios_log_teardown(struct dentry **); | 141 | extern void tpm_bios_log_teardown(struct dentry **); |
142 | #else | 142 | #else |
143 | static inline struct dentry* tpm_bios_log_setup(char *name) | 143 | static inline struct dentry ** tpm_bios_log_setup(char *name) |
144 | { | 144 | { |
145 | return NULL; | 145 | return NULL; |
146 | } | 146 | } |
diff --git a/drivers/char/tpm/tpm_tis.c b/drivers/char/tpm/tpm_tis.c index b9cae9a238bb..f621168f38ae 100644 --- a/drivers/char/tpm/tpm_tis.c +++ b/drivers/char/tpm/tpm_tis.c | |||
@@ -55,7 +55,7 @@ enum tis_int_flags { | |||
55 | }; | 55 | }; |
56 | 56 | ||
57 | enum tis_defaults { | 57 | enum tis_defaults { |
58 | TIS_MEM_BASE = 0xFED4000, | 58 | TIS_MEM_BASE = 0xFED40000, |
59 | TIS_MEM_LEN = 0x5000, | 59 | TIS_MEM_LEN = 0x5000, |
60 | TIS_SHORT_TIMEOUT = 750, /* ms */ | 60 | TIS_SHORT_TIMEOUT = 750, /* ms */ |
61 | TIS_LONG_TIMEOUT = 2000, /* 2 sec */ | 61 | TIS_LONG_TIMEOUT = 2000, /* 2 sec */ |
diff --git a/drivers/char/watchdog/i8xx_tco.c b/drivers/char/watchdog/i8xx_tco.c index a13395e2c372..fa2ba9ebe42a 100644 --- a/drivers/char/watchdog/i8xx_tco.c +++ b/drivers/char/watchdog/i8xx_tco.c | |||
@@ -33,11 +33,6 @@ | |||
33 | * 82801E (C-ICH) : document number 273599-001, 273645-002, | 33 | * 82801E (C-ICH) : document number 273599-001, 273645-002, |
34 | * 82801EB (ICH5) : document number 252516-001, 252517-003, | 34 | * 82801EB (ICH5) : document number 252516-001, 252517-003, |
35 | * 82801ER (ICH5R) : document number 252516-001, 252517-003, | 35 | * 82801ER (ICH5R) : document number 252516-001, 252517-003, |
36 | * 82801FB (ICH6) : document number 301473-002, 301474-007, | ||
37 | * 82801FR (ICH6R) : document number 301473-002, 301474-007, | ||
38 | * 82801FBM (ICH6-M) : document number 301473-002, 301474-007, | ||
39 | * 82801FW (ICH6W) : document number 301473-001, 301474-007, | ||
40 | * 82801FRW (ICH6RW) : document number 301473-001, 301474-007 | ||
41 | * | 36 | * |
42 | * 20000710 Nils Faerber | 37 | * 20000710 Nils Faerber |
43 | * Initial Version 0.01 | 38 | * Initial Version 0.01 |
@@ -66,6 +61,10 @@ | |||
66 | * 20050807 Wim Van Sebroeck <wim@iguana.be> | 61 | * 20050807 Wim Van Sebroeck <wim@iguana.be> |
67 | * 0.08 Make sure that the watchdog is only "armed" when started. | 62 | * 0.08 Make sure that the watchdog is only "armed" when started. |
68 | * (Kernel Bug 4251) | 63 | * (Kernel Bug 4251) |
64 | * 20060416 Wim Van Sebroeck <wim@iguana.be> | ||
65 | * 0.09 Remove support for the ICH6, ICH6R, ICH6-M, ICH6W and ICH6RW and | ||
66 | * ICH7 chipsets. (See Kernel Bug 6031 - other code will support these | ||
67 | * chipsets) | ||
69 | */ | 68 | */ |
70 | 69 | ||
71 | /* | 70 | /* |
@@ -90,7 +89,7 @@ | |||
90 | #include "i8xx_tco.h" | 89 | #include "i8xx_tco.h" |
91 | 90 | ||
92 | /* Module and version information */ | 91 | /* Module and version information */ |
93 | #define TCO_VERSION "0.08" | 92 | #define TCO_VERSION "0.09" |
94 | #define TCO_MODULE_NAME "i8xx TCO timer" | 93 | #define TCO_MODULE_NAME "i8xx TCO timer" |
95 | #define TCO_DRIVER_NAME TCO_MODULE_NAME ", v" TCO_VERSION | 94 | #define TCO_DRIVER_NAME TCO_MODULE_NAME ", v" TCO_VERSION |
96 | #define PFX TCO_MODULE_NAME ": " | 95 | #define PFX TCO_MODULE_NAME ": " |
@@ -391,11 +390,6 @@ static struct pci_device_id i8xx_tco_pci_tbl[] = { | |||
391 | { PCI_VENDOR_ID_INTEL, PCI_DEVICE_ID_INTEL_82801DB_12, PCI_ANY_ID, PCI_ANY_ID, }, | 390 | { PCI_VENDOR_ID_INTEL, PCI_DEVICE_ID_INTEL_82801DB_12, PCI_ANY_ID, PCI_ANY_ID, }, |
392 | { PCI_VENDOR_ID_INTEL, PCI_DEVICE_ID_INTEL_82801E_0, PCI_ANY_ID, PCI_ANY_ID, }, | 391 | { PCI_VENDOR_ID_INTEL, PCI_DEVICE_ID_INTEL_82801E_0, PCI_ANY_ID, PCI_ANY_ID, }, |
393 | { PCI_VENDOR_ID_INTEL, PCI_DEVICE_ID_INTEL_82801EB_0, PCI_ANY_ID, PCI_ANY_ID, }, | 392 | { PCI_VENDOR_ID_INTEL, PCI_DEVICE_ID_INTEL_82801EB_0, PCI_ANY_ID, PCI_ANY_ID, }, |
394 | { PCI_VENDOR_ID_INTEL, PCI_DEVICE_ID_INTEL_ICH6_0, PCI_ANY_ID, PCI_ANY_ID, }, | ||
395 | { PCI_VENDOR_ID_INTEL, PCI_DEVICE_ID_INTEL_ICH6_1, PCI_ANY_ID, PCI_ANY_ID, }, | ||
396 | { PCI_VENDOR_ID_INTEL, PCI_DEVICE_ID_INTEL_ICH6_2, PCI_ANY_ID, PCI_ANY_ID, }, | ||
397 | { PCI_VENDOR_ID_INTEL, PCI_DEVICE_ID_INTEL_ICH7_0, PCI_ANY_ID, PCI_ANY_ID, }, | ||
398 | { PCI_VENDOR_ID_INTEL, PCI_DEVICE_ID_INTEL_ICH7_1, PCI_ANY_ID, PCI_ANY_ID, }, | ||
399 | { PCI_VENDOR_ID_INTEL, PCI_DEVICE_ID_INTEL_ESB_1, PCI_ANY_ID, PCI_ANY_ID, }, | 393 | { PCI_VENDOR_ID_INTEL, PCI_DEVICE_ID_INTEL_ESB_1, PCI_ANY_ID, PCI_ANY_ID, }, |
400 | { 0, }, /* End of list */ | 394 | { 0, }, /* End of list */ |
401 | }; | 395 | }; |
diff --git a/drivers/char/watchdog/s3c2410_wdt.c b/drivers/char/watchdog/s3c2410_wdt.c index 9dc54736e4eb..1ea04e9b2b0b 100644 --- a/drivers/char/watchdog/s3c2410_wdt.c +++ b/drivers/char/watchdog/s3c2410_wdt.c | |||
@@ -423,6 +423,12 @@ static int s3c2410wdt_probe(struct platform_device *pdev) | |||
423 | if (tmr_atboot && started == 0) { | 423 | if (tmr_atboot && started == 0) { |
424 | printk(KERN_INFO PFX "Starting Watchdog Timer\n"); | 424 | printk(KERN_INFO PFX "Starting Watchdog Timer\n"); |
425 | s3c2410wdt_start(); | 425 | s3c2410wdt_start(); |
426 | } else if (!tmr_atboot) { | ||
427 | /* if we're not enabling the watchdog, then ensure it is | ||
428 | * disabled if it has been left running from the bootloader | ||
429 | * or other source */ | ||
430 | |||
431 | s3c2410wdt_stop(); | ||
426 | } | 432 | } |
427 | 433 | ||
428 | return 0; | 434 | return 0; |
diff --git a/drivers/char/watchdog/sc1200wdt.c b/drivers/char/watchdog/sc1200wdt.c index 515ce7572049..20b88f9b7be2 100644 --- a/drivers/char/watchdog/sc1200wdt.c +++ b/drivers/char/watchdog/sc1200wdt.c | |||
@@ -377,7 +377,7 @@ static int __init sc1200wdt_init(void) | |||
377 | { | 377 | { |
378 | int ret; | 378 | int ret; |
379 | 379 | ||
380 | printk(banner); | 380 | printk("%s\n", banner); |
381 | 381 | ||
382 | spin_lock_init(&sc1200wdt_lock); | 382 | spin_lock_init(&sc1200wdt_lock); |
383 | sema_init(&open_sem, 1); | 383 | sema_init(&open_sem, 1); |
diff --git a/drivers/ide/legacy/ide-cs.c b/drivers/ide/legacy/ide-cs.c index 4961f1e764a7..602797a44208 100644 --- a/drivers/ide/legacy/ide-cs.c +++ b/drivers/ide/legacy/ide-cs.c | |||
@@ -392,6 +392,7 @@ static struct pcmcia_device_id ide_ids[] = { | |||
392 | PCMCIA_DEVICE_PROD_ID12("FREECOM", "PCCARD-IDE", 0x5714cbf7, 0x48e0ab8e), | 392 | PCMCIA_DEVICE_PROD_ID12("FREECOM", "PCCARD-IDE", 0x5714cbf7, 0x48e0ab8e), |
393 | PCMCIA_DEVICE_PROD_ID12("HITACHI", "FLASH", 0xf4f43949, 0x9eb86aae), | 393 | PCMCIA_DEVICE_PROD_ID12("HITACHI", "FLASH", 0xf4f43949, 0x9eb86aae), |
394 | PCMCIA_DEVICE_PROD_ID12("HITACHI", "microdrive", 0xf4f43949, 0xa6d76178), | 394 | PCMCIA_DEVICE_PROD_ID12("HITACHI", "microdrive", 0xf4f43949, 0xa6d76178), |
395 | PCMCIA_DEVICE_PROD_ID12("IBM", "microdrive", 0xb569a6e5, 0xa6d76178), | ||
395 | PCMCIA_DEVICE_PROD_ID12("IBM", "IBM17JSSFP20", 0xb569a6e5, 0xf2508753), | 396 | PCMCIA_DEVICE_PROD_ID12("IBM", "IBM17JSSFP20", 0xb569a6e5, 0xf2508753), |
396 | PCMCIA_DEVICE_PROD_ID12("IO DATA", "CBIDE2 ", 0x547e66dc, 0x8671043b), | 397 | PCMCIA_DEVICE_PROD_ID12("IO DATA", "CBIDE2 ", 0x547e66dc, 0x8671043b), |
397 | PCMCIA_DEVICE_PROD_ID12("IO DATA", "PCIDE", 0x547e66dc, 0x5c5ab149), | 398 | PCMCIA_DEVICE_PROD_ID12("IO DATA", "PCIDE", 0x547e66dc, 0x5c5ab149), |
diff --git a/drivers/ide/ppc/pmac.c b/drivers/ide/ppc/pmac.c index 78e30f803671..ffca8b63ee79 100644 --- a/drivers/ide/ppc/pmac.c +++ b/drivers/ide/ppc/pmac.c | |||
@@ -553,6 +553,8 @@ pmac_ide_init_hwif_ports(hw_regs_t *hw, | |||
553 | 553 | ||
554 | if (irq != NULL) | 554 | if (irq != NULL) |
555 | *irq = pmac_ide[ix].irq; | 555 | *irq = pmac_ide[ix].irq; |
556 | |||
557 | hw->dev = &pmac_ide[ix].mdev->ofdev.dev; | ||
556 | } | 558 | } |
557 | 559 | ||
558 | #define PMAC_IDE_REG(x) ((void __iomem *)(IDE_DATA_REG+(x))) | 560 | #define PMAC_IDE_REG(x) ((void __iomem *)(IDE_DATA_REG+(x))) |
diff --git a/drivers/ieee1394/ohci1394.c b/drivers/ieee1394/ohci1394.c index 19222878aae9..11f13778f139 100644 --- a/drivers/ieee1394/ohci1394.c +++ b/drivers/ieee1394/ohci1394.c | |||
@@ -553,7 +553,7 @@ static void ohci_initialize(struct ti_ohci *ohci) | |||
553 | * register content. | 553 | * register content. |
554 | * To actually enable physical responses is the job of our interrupt | 554 | * To actually enable physical responses is the job of our interrupt |
555 | * handler which programs the physical request filter. */ | 555 | * handler which programs the physical request filter. */ |
556 | reg_write(ohci, OHCI1394_PhyUpperBound, 0xffff0000); | 556 | reg_write(ohci, OHCI1394_PhyUpperBound, 0x01000000); |
557 | 557 | ||
558 | DBGMSG("physUpperBoundOffset=%08x", | 558 | DBGMSG("physUpperBoundOffset=%08x", |
559 | reg_read(ohci, OHCI1394_PhyUpperBound)); | 559 | reg_read(ohci, OHCI1394_PhyUpperBound)); |
diff --git a/drivers/ieee1394/sbp2.c b/drivers/ieee1394/sbp2.c index f4206604db03..8a23fb54c693 100644 --- a/drivers/ieee1394/sbp2.c +++ b/drivers/ieee1394/sbp2.c | |||
@@ -42,6 +42,7 @@ | |||
42 | #include <linux/kernel.h> | 42 | #include <linux/kernel.h> |
43 | #include <linux/list.h> | 43 | #include <linux/list.h> |
44 | #include <linux/string.h> | 44 | #include <linux/string.h> |
45 | #include <linux/stringify.h> | ||
45 | #include <linux/slab.h> | 46 | #include <linux/slab.h> |
46 | #include <linux/interrupt.h> | 47 | #include <linux/interrupt.h> |
47 | #include <linux/fs.h> | 48 | #include <linux/fs.h> |
@@ -117,7 +118,8 @@ MODULE_PARM_DESC(serialize_io, "Serialize I/O coming from scsi drivers (default | |||
117 | */ | 118 | */ |
118 | static int max_sectors = SBP2_MAX_SECTORS; | 119 | static int max_sectors = SBP2_MAX_SECTORS; |
119 | module_param(max_sectors, int, 0444); | 120 | module_param(max_sectors, int, 0444); |
120 | MODULE_PARM_DESC(max_sectors, "Change max sectors per I/O supported (default = 255)"); | 121 | MODULE_PARM_DESC(max_sectors, "Change max sectors per I/O supported (default = " |
122 | __stringify(SBP2_MAX_SECTORS) ")"); | ||
121 | 123 | ||
122 | /* | 124 | /* |
123 | * Exclusive login to sbp2 device? In most cases, the sbp2 driver should | 125 | * Exclusive login to sbp2 device? In most cases, the sbp2 driver should |
@@ -135,18 +137,45 @@ module_param(exclusive_login, int, 0644); | |||
135 | MODULE_PARM_DESC(exclusive_login, "Exclusive login to sbp2 device (default = 1)"); | 137 | MODULE_PARM_DESC(exclusive_login, "Exclusive login to sbp2 device (default = 1)"); |
136 | 138 | ||
137 | /* | 139 | /* |
138 | * SCSI inquiry hack for really badly behaved sbp2 devices. Turn this on | 140 | * If any of the following workarounds is required for your device to work, |
139 | * if your sbp2 device is not properly handling the SCSI inquiry command. | 141 | * please submit the kernel messages logged by sbp2 to the linux1394-devel |
140 | * This hack makes the inquiry look more like a typical MS Windows inquiry | 142 | * mailing list. |
141 | * by enforcing 36 byte inquiry and avoiding access to mode_sense page 8. | ||
142 | * | 143 | * |
143 | * If force_inquiry_hack=1 is required for your device to work, | 144 | * - 128kB max transfer |
144 | * please submit the logged sbp2_firmware_revision value of this device to | 145 | * Limit transfer size. Necessary for some old bridges. |
145 | * the linux1394-devel mailing list. | 146 | * |
147 | * - 36 byte inquiry | ||
148 | * When scsi_mod probes the device, let the inquiry command look like that | ||
149 | * from MS Windows. | ||
150 | * | ||
151 | * - skip mode page 8 | ||
152 | * Suppress sending of mode_sense for mode page 8 if the device pretends to | ||
153 | * support the SCSI Primary Block commands instead of Reduced Block Commands. | ||
154 | * | ||
155 | * - fix capacity | ||
156 | * Tell sd_mod to correct the last sector number reported by read_capacity. | ||
157 | * Avoids access beyond actual disk limits on devices with an off-by-one bug. | ||
158 | * Don't use this with devices which don't have this bug. | ||
159 | * | ||
160 | * - override internal blacklist | ||
161 | * Instead of adding to the built-in blacklist, use only the workarounds | ||
162 | * specified in the module load parameter. | ||
163 | * Useful if a blacklist entry interfered with a non-broken device. | ||
146 | */ | 164 | */ |
165 | static int sbp2_default_workarounds; | ||
166 | module_param_named(workarounds, sbp2_default_workarounds, int, 0644); | ||
167 | MODULE_PARM_DESC(workarounds, "Work around device bugs (default = 0" | ||
168 | ", 128kB max transfer = " __stringify(SBP2_WORKAROUND_128K_MAX_TRANS) | ||
169 | ", 36 byte inquiry = " __stringify(SBP2_WORKAROUND_INQUIRY_36) | ||
170 | ", skip mode page 8 = " __stringify(SBP2_WORKAROUND_MODE_SENSE_8) | ||
171 | ", fix capacity = " __stringify(SBP2_WORKAROUND_FIX_CAPACITY) | ||
172 | ", override internal blacklist = " __stringify(SBP2_WORKAROUND_OVERRIDE) | ||
173 | ", or a combination)"); | ||
174 | |||
175 | /* legacy parameter */ | ||
147 | static int force_inquiry_hack; | 176 | static int force_inquiry_hack; |
148 | module_param(force_inquiry_hack, int, 0644); | 177 | module_param(force_inquiry_hack, int, 0644); |
149 | MODULE_PARM_DESC(force_inquiry_hack, "Force SCSI inquiry hack (default = 0)"); | 178 | MODULE_PARM_DESC(force_inquiry_hack, "Deprecated, use 'workarounds'"); |
150 | 179 | ||
151 | /* | 180 | /* |
152 | * Export information about protocols/devices supported by this driver. | 181 | * Export information about protocols/devices supported by this driver. |
@@ -266,14 +295,55 @@ static struct hpsb_protocol_driver sbp2_driver = { | |||
266 | }; | 295 | }; |
267 | 296 | ||
268 | /* | 297 | /* |
269 | * List of device firmwares that require the inquiry hack. | 298 | * List of devices with known bugs. |
270 | * Yields a few false positives but did not break other devices so far. | 299 | * |
300 | * The firmware_revision field, masked with 0xffff00, is the best indicator | ||
301 | * for the type of bridge chip of a device. It yields a few false positives | ||
302 | * but this did not break correctly behaving devices so far. | ||
271 | */ | 303 | */ |
272 | static u32 sbp2_broken_inquiry_list[] = { | 304 | static const struct { |
273 | 0x00002800, /* Stefan Richter <stefanr@s5r6.in-berlin.de> */ | 305 | u32 firmware_revision; |
274 | /* DViCO Momobay CX-1 */ | 306 | u32 model_id; |
275 | 0x00000200 /* Andreas Plesch <plesch@fas.harvard.edu> */ | 307 | unsigned workarounds; |
276 | /* QPS Fire DVDBurner */ | 308 | } sbp2_workarounds_table[] = { |
309 | /* TSB42AA9 */ { | ||
310 | .firmware_revision = 0x002800, | ||
311 | .workarounds = SBP2_WORKAROUND_INQUIRY_36 | | ||
312 | SBP2_WORKAROUND_MODE_SENSE_8, | ||
313 | }, | ||
314 | /* Initio bridges, actually only needed for some older ones */ { | ||
315 | .firmware_revision = 0x000200, | ||
316 | .workarounds = SBP2_WORKAROUND_INQUIRY_36, | ||
317 | }, | ||
318 | /* Symbios bridge */ { | ||
319 | .firmware_revision = 0xa0b800, | ||
320 | .workarounds = SBP2_WORKAROUND_128K_MAX_TRANS, | ||
321 | }, | ||
322 | /* | ||
323 | * Note about the following Apple iPod blacklist entries: | ||
324 | * | ||
325 | * There are iPods (2nd gen, 3rd gen) with model_id==0. Since our | ||
326 | * matching logic treats 0 as a wildcard, we cannot match this ID | ||
327 | * without rewriting the matching routine. Fortunately these iPods | ||
328 | * do not feature the read_capacity bug according to one report. | ||
329 | * Read_capacity behaviour as well as model_id could change due to | ||
330 | * Apple-supplied firmware updates though. | ||
331 | */ | ||
332 | /* iPod 4th generation */ { | ||
333 | .firmware_revision = 0x0a2700, | ||
334 | .model_id = 0x000021, | ||
335 | .workarounds = SBP2_WORKAROUND_FIX_CAPACITY, | ||
336 | }, | ||
337 | /* iPod mini */ { | ||
338 | .firmware_revision = 0x0a2700, | ||
339 | .model_id = 0x000023, | ||
340 | .workarounds = SBP2_WORKAROUND_FIX_CAPACITY, | ||
341 | }, | ||
342 | /* iPod Photo */ { | ||
343 | .firmware_revision = 0x0a2700, | ||
344 | .model_id = 0x00007e, | ||
345 | .workarounds = SBP2_WORKAROUND_FIX_CAPACITY, | ||
346 | } | ||
277 | }; | 347 | }; |
278 | 348 | ||
279 | /************************************** | 349 | /************************************** |
@@ -765,11 +835,16 @@ static struct scsi_id_instance_data *sbp2_alloc_device(struct unit_directory *ud | |||
765 | 835 | ||
766 | /* Register the status FIFO address range. We could use the same FIFO | 836 | /* Register the status FIFO address range. We could use the same FIFO |
767 | * for targets at different nodes. However we need different FIFOs per | 837 | * for targets at different nodes. However we need different FIFOs per |
768 | * target in order to support multi-unit devices. */ | 838 | * target in order to support multi-unit devices. |
839 | * The FIFO is located out of the local host controller's physical range | ||
840 | * but, if possible, within the posted write area. Status writes will | ||
841 | * then be performed as unified transactions. This slightly reduces | ||
842 | * bandwidth usage, and some Prolific based devices seem to require it. | ||
843 | */ | ||
769 | scsi_id->status_fifo_addr = hpsb_allocate_and_register_addrspace( | 844 | scsi_id->status_fifo_addr = hpsb_allocate_and_register_addrspace( |
770 | &sbp2_highlevel, ud->ne->host, &sbp2_ops, | 845 | &sbp2_highlevel, ud->ne->host, &sbp2_ops, |
771 | sizeof(struct sbp2_status_block), sizeof(quadlet_t), | 846 | sizeof(struct sbp2_status_block), sizeof(quadlet_t), |
772 | ~0ULL, ~0ULL); | 847 | 0x010000000000ULL, CSR1212_ALL_SPACE_END); |
773 | if (!scsi_id->status_fifo_addr) { | 848 | if (!scsi_id->status_fifo_addr) { |
774 | SBP2_ERR("failed to allocate status FIFO address range"); | 849 | SBP2_ERR("failed to allocate status FIFO address range"); |
775 | goto failed_alloc; | 850 | goto failed_alloc; |
@@ -1450,7 +1525,8 @@ static void sbp2_parse_unit_directory(struct scsi_id_instance_data *scsi_id, | |||
1450 | struct csr1212_dentry *dentry; | 1525 | struct csr1212_dentry *dentry; |
1451 | u64 management_agent_addr; | 1526 | u64 management_agent_addr; |
1452 | u32 command_set_spec_id, command_set, unit_characteristics, | 1527 | u32 command_set_spec_id, command_set, unit_characteristics, |
1453 | firmware_revision, workarounds; | 1528 | firmware_revision; |
1529 | unsigned workarounds; | ||
1454 | int i; | 1530 | int i; |
1455 | 1531 | ||
1456 | SBP2_DEBUG_ENTER(); | 1532 | SBP2_DEBUG_ENTER(); |
@@ -1506,12 +1582,8 @@ static void sbp2_parse_unit_directory(struct scsi_id_instance_data *scsi_id, | |||
1506 | case SBP2_FIRMWARE_REVISION_KEY: | 1582 | case SBP2_FIRMWARE_REVISION_KEY: |
1507 | /* Firmware revision */ | 1583 | /* Firmware revision */ |
1508 | firmware_revision = kv->value.immediate; | 1584 | firmware_revision = kv->value.immediate; |
1509 | if (force_inquiry_hack) | 1585 | SBP2_DEBUG("sbp2_firmware_revision = %x", |
1510 | SBP2_INFO("sbp2_firmware_revision = %x", | 1586 | (unsigned int)firmware_revision); |
1511 | (unsigned int)firmware_revision); | ||
1512 | else | ||
1513 | SBP2_DEBUG("sbp2_firmware_revision = %x", | ||
1514 | (unsigned int)firmware_revision); | ||
1515 | break; | 1587 | break; |
1516 | 1588 | ||
1517 | default: | 1589 | default: |
@@ -1519,41 +1591,44 @@ static void sbp2_parse_unit_directory(struct scsi_id_instance_data *scsi_id, | |||
1519 | } | 1591 | } |
1520 | } | 1592 | } |
1521 | 1593 | ||
1522 | /* This is the start of our broken device checking. We try to hack | 1594 | workarounds = sbp2_default_workarounds; |
1523 | * around oddities and known defects. */ | 1595 | if (force_inquiry_hack) { |
1524 | workarounds = 0x0; | 1596 | SBP2_WARN("force_inquiry_hack is deprecated. " |
1597 | "Use parameter 'workarounds' instead."); | ||
1598 | workarounds |= SBP2_WORKAROUND_INQUIRY_36; | ||
1599 | } | ||
1525 | 1600 | ||
1526 | /* If the vendor id is 0xa0b8 (Symbios vendor id), then we have a | 1601 | if (!(workarounds & SBP2_WORKAROUND_OVERRIDE)) |
1527 | * bridge with 128KB max transfer size limitation. For sanity, we | 1602 | for (i = 0; i < ARRAY_SIZE(sbp2_workarounds_table); i++) { |
1528 | * only voice this when the current max_sectors setting | 1603 | if (sbp2_workarounds_table[i].firmware_revision && |
1529 | * exceeds the 128k limit. By default, that is not the case. | 1604 | sbp2_workarounds_table[i].firmware_revision != |
1530 | * | 1605 | (firmware_revision & 0xffff00)) |
1531 | * It would be really nice if we could detect this before the scsi | 1606 | continue; |
1532 | * host gets initialized. That way we can down-force the | 1607 | if (sbp2_workarounds_table[i].model_id && |
1533 | * max_sectors to account for it. That is not currently | 1608 | sbp2_workarounds_table[i].model_id != ud->model_id) |
1534 | * possible. */ | 1609 | continue; |
1535 | if ((firmware_revision & 0xffff00) == | 1610 | workarounds |= sbp2_workarounds_table[i].workarounds; |
1536 | SBP2_128KB_BROKEN_FIRMWARE && | 1611 | break; |
1537 | (max_sectors * 512) > (128*1024)) { | ||
1538 | SBP2_WARN("Node " NODE_BUS_FMT ": Bridge only supports 128KB max transfer size.", | ||
1539 | NODE_BUS_ARGS(ud->ne->host, ud->ne->nodeid)); | ||
1540 | SBP2_WARN("WARNING: Current max_sectors setting is larger than 128KB (%d sectors)!", | ||
1541 | max_sectors); | ||
1542 | workarounds |= SBP2_BREAKAGE_128K_MAX_TRANSFER; | ||
1543 | } | ||
1544 | |||
1545 | /* Check for a blacklisted set of devices that require us to force | ||
1546 | * a 36 byte host inquiry. This can be overriden as a module param | ||
1547 | * (to force all hosts). */ | ||
1548 | for (i = 0; i < ARRAY_SIZE(sbp2_broken_inquiry_list); i++) { | ||
1549 | if ((firmware_revision & 0xffff00) == | ||
1550 | sbp2_broken_inquiry_list[i]) { | ||
1551 | SBP2_WARN("Node " NODE_BUS_FMT ": Using 36byte inquiry workaround", | ||
1552 | NODE_BUS_ARGS(ud->ne->host, ud->ne->nodeid)); | ||
1553 | workarounds |= SBP2_BREAKAGE_INQUIRY_HACK; | ||
1554 | break; /* No need to continue. */ | ||
1555 | } | 1612 | } |
1556 | } | 1613 | |
1614 | if (workarounds) | ||
1615 | SBP2_INFO("Workarounds for node " NODE_BUS_FMT ": 0x%x " | ||
1616 | "(firmware_revision 0x%06x, vendor_id 0x%06x," | ||
1617 | " model_id 0x%06x)", | ||
1618 | NODE_BUS_ARGS(ud->ne->host, ud->ne->nodeid), | ||
1619 | workarounds, firmware_revision, | ||
1620 | ud->vendor_id ? ud->vendor_id : ud->ne->vendor_id, | ||
1621 | ud->model_id); | ||
1622 | |||
1623 | /* We would need one SCSI host template for each target to adjust | ||
1624 | * max_sectors on the fly, therefore warn only. */ | ||
1625 | if (workarounds & SBP2_WORKAROUND_128K_MAX_TRANS && | ||
1626 | (max_sectors * 512) > (128 * 1024)) | ||
1627 | SBP2_WARN("Node " NODE_BUS_FMT ": Bridge only supports 128KB " | ||
1628 | "max transfer size. WARNING: Current max_sectors " | ||
1629 | "setting is larger than 128KB (%d sectors)", | ||
1630 | NODE_BUS_ARGS(ud->ne->host, ud->ne->nodeid), | ||
1631 | max_sectors); | ||
1557 | 1632 | ||
1558 | /* If this is a logical unit directory entry, process the parent | 1633 | /* If this is a logical unit directory entry, process the parent |
1559 | * to get the values. */ | 1634 | * to get the values. */ |
@@ -2447,19 +2522,25 @@ static int sbp2scsi_slave_alloc(struct scsi_device *sdev) | |||
2447 | 2522 | ||
2448 | scsi_id->sdev = sdev; | 2523 | scsi_id->sdev = sdev; |
2449 | 2524 | ||
2450 | if (force_inquiry_hack || | 2525 | if (scsi_id->workarounds & SBP2_WORKAROUND_INQUIRY_36) |
2451 | scsi_id->workarounds & SBP2_BREAKAGE_INQUIRY_HACK) { | ||
2452 | sdev->inquiry_len = 36; | 2526 | sdev->inquiry_len = 36; |
2453 | sdev->skip_ms_page_8 = 1; | ||
2454 | } | ||
2455 | return 0; | 2527 | return 0; |
2456 | } | 2528 | } |
2457 | 2529 | ||
2458 | static int sbp2scsi_slave_configure(struct scsi_device *sdev) | 2530 | static int sbp2scsi_slave_configure(struct scsi_device *sdev) |
2459 | { | 2531 | { |
2532 | struct scsi_id_instance_data *scsi_id = | ||
2533 | (struct scsi_id_instance_data *)sdev->host->hostdata[0]; | ||
2534 | |||
2460 | blk_queue_dma_alignment(sdev->request_queue, (512 - 1)); | 2535 | blk_queue_dma_alignment(sdev->request_queue, (512 - 1)); |
2461 | sdev->use_10_for_rw = 1; | 2536 | sdev->use_10_for_rw = 1; |
2462 | sdev->use_10_for_ms = 1; | 2537 | sdev->use_10_for_ms = 1; |
2538 | |||
2539 | if (sdev->type == TYPE_DISK && | ||
2540 | scsi_id->workarounds & SBP2_WORKAROUND_MODE_SENSE_8) | ||
2541 | sdev->skip_ms_page_8 = 1; | ||
2542 | if (scsi_id->workarounds & SBP2_WORKAROUND_FIX_CAPACITY) | ||
2543 | sdev->fix_capacity = 1; | ||
2463 | return 0; | 2544 | return 0; |
2464 | } | 2545 | } |
2465 | 2546 | ||
@@ -2603,7 +2684,9 @@ static int sbp2_module_init(void) | |||
2603 | scsi_driver_template.cmd_per_lun = 1; | 2684 | scsi_driver_template.cmd_per_lun = 1; |
2604 | } | 2685 | } |
2605 | 2686 | ||
2606 | /* Set max sectors (module load option). Default is 255 sectors. */ | 2687 | if (sbp2_default_workarounds & SBP2_WORKAROUND_128K_MAX_TRANS && |
2688 | (max_sectors * 512) > (128 * 1024)) | ||
2689 | max_sectors = 128 * 1024 / 512; | ||
2607 | scsi_driver_template.max_sectors = max_sectors; | 2690 | scsi_driver_template.max_sectors = max_sectors; |
2608 | 2691 | ||
2609 | /* Register our high level driver with 1394 stack */ | 2692 | /* Register our high level driver with 1394 stack */ |
diff --git a/drivers/ieee1394/sbp2.h b/drivers/ieee1394/sbp2.h index e2d357a9ea3a..f4ccc9d0fba4 100644 --- a/drivers/ieee1394/sbp2.h +++ b/drivers/ieee1394/sbp2.h | |||
@@ -227,11 +227,6 @@ struct sbp2_status_block { | |||
227 | #define SBP2_SW_VERSION_ENTRY 0x00010483 | 227 | #define SBP2_SW_VERSION_ENTRY 0x00010483 |
228 | 228 | ||
229 | /* | 229 | /* |
230 | * Other misc defines | ||
231 | */ | ||
232 | #define SBP2_128KB_BROKEN_FIRMWARE 0xa0b800 | ||
233 | |||
234 | /* | ||
235 | * SCSI specific stuff | 230 | * SCSI specific stuff |
236 | */ | 231 | */ |
237 | 232 | ||
@@ -239,6 +234,13 @@ struct sbp2_status_block { | |||
239 | #define SBP2_MAX_SECTORS 255 /* Max sectors supported */ | 234 | #define SBP2_MAX_SECTORS 255 /* Max sectors supported */ |
240 | #define SBP2_MAX_CMDS 8 /* This should be safe */ | 235 | #define SBP2_MAX_CMDS 8 /* This should be safe */ |
241 | 236 | ||
237 | /* Flags for detected oddities and brokeness */ | ||
238 | #define SBP2_WORKAROUND_128K_MAX_TRANS 0x1 | ||
239 | #define SBP2_WORKAROUND_INQUIRY_36 0x2 | ||
240 | #define SBP2_WORKAROUND_MODE_SENSE_8 0x4 | ||
241 | #define SBP2_WORKAROUND_FIX_CAPACITY 0x8 | ||
242 | #define SBP2_WORKAROUND_OVERRIDE 0x100 | ||
243 | |||
242 | /* This is the two dma types we use for cmd_dma below */ | 244 | /* This is the two dma types we use for cmd_dma below */ |
243 | enum cmd_dma_types { | 245 | enum cmd_dma_types { |
244 | CMD_DMA_NONE, | 246 | CMD_DMA_NONE, |
@@ -268,10 +270,6 @@ struct sbp2_command_info { | |||
268 | 270 | ||
269 | }; | 271 | }; |
270 | 272 | ||
271 | /* A list of flags for detected oddities and brokeness. */ | ||
272 | #define SBP2_BREAKAGE_128K_MAX_TRANSFER 0x1 | ||
273 | #define SBP2_BREAKAGE_INQUIRY_HACK 0x2 | ||
274 | |||
275 | struct sbp2scsi_host_info; | 273 | struct sbp2scsi_host_info; |
276 | 274 | ||
277 | /* | 275 | /* |
@@ -345,7 +343,7 @@ struct scsi_id_instance_data { | |||
345 | struct Scsi_Host *scsi_host; | 343 | struct Scsi_Host *scsi_host; |
346 | 344 | ||
347 | /* Device specific workarounds/brokeness */ | 345 | /* Device specific workarounds/brokeness */ |
348 | u32 workarounds; | 346 | unsigned workarounds; |
349 | }; | 347 | }; |
350 | 348 | ||
351 | /* Sbp2 host data structure (one per IEEE1394 host) */ | 349 | /* Sbp2 host data structure (one per IEEE1394 host) */ |
diff --git a/drivers/infiniband/core/uverbs_mem.c b/drivers/infiniband/core/uverbs_mem.c index 36a32c315668..efe147dbeb42 100644 --- a/drivers/infiniband/core/uverbs_mem.c +++ b/drivers/infiniband/core/uverbs_mem.c | |||
@@ -211,8 +211,10 @@ void ib_umem_release_on_close(struct ib_device *dev, struct ib_umem *umem) | |||
211 | */ | 211 | */ |
212 | 212 | ||
213 | work = kmalloc(sizeof *work, GFP_KERNEL); | 213 | work = kmalloc(sizeof *work, GFP_KERNEL); |
214 | if (!work) | 214 | if (!work) { |
215 | mmput(mm); | ||
215 | return; | 216 | return; |
217 | } | ||
216 | 218 | ||
217 | INIT_WORK(&work->work, ib_umem_account, work); | 219 | INIT_WORK(&work->work, ib_umem_account, work); |
218 | work->mm = mm; | 220 | work->mm = mm; |
diff --git a/drivers/infiniband/hw/mthca/mthca_cmd.c b/drivers/infiniband/hw/mthca/mthca_cmd.c index 1985b5dfa481..798e13e14faf 100644 --- a/drivers/infiniband/hw/mthca/mthca_cmd.c +++ b/drivers/infiniband/hw/mthca/mthca_cmd.c | |||
@@ -182,7 +182,7 @@ struct mthca_cmd_context { | |||
182 | u8 status; | 182 | u8 status; |
183 | }; | 183 | }; |
184 | 184 | ||
185 | static int fw_cmd_doorbell = 1; | 185 | static int fw_cmd_doorbell = 0; |
186 | module_param(fw_cmd_doorbell, int, 0644); | 186 | module_param(fw_cmd_doorbell, int, 0644); |
187 | MODULE_PARM_DESC(fw_cmd_doorbell, "post FW commands through doorbell page if nonzero " | 187 | MODULE_PARM_DESC(fw_cmd_doorbell, "post FW commands through doorbell page if nonzero " |
188 | "(and supported by FW)"); | 188 | "(and supported by FW)"); |
diff --git a/drivers/infiniband/hw/mthca/mthca_qp.c b/drivers/infiniband/hw/mthca/mthca_qp.c index 19765f6f8d58..07c13be07a4a 100644 --- a/drivers/infiniband/hw/mthca/mthca_qp.c +++ b/drivers/infiniband/hw/mthca/mthca_qp.c | |||
@@ -1727,23 +1727,7 @@ int mthca_tavor_post_receive(struct ib_qp *ibqp, struct ib_recv_wr *wr, | |||
1727 | 1727 | ||
1728 | ind = qp->rq.next_ind; | 1728 | ind = qp->rq.next_ind; |
1729 | 1729 | ||
1730 | for (nreq = 0; wr; ++nreq, wr = wr->next) { | 1730 | for (nreq = 0; wr; wr = wr->next) { |
1731 | if (unlikely(nreq == MTHCA_TAVOR_MAX_WQES_PER_RECV_DB)) { | ||
1732 | nreq = 0; | ||
1733 | |||
1734 | doorbell[0] = cpu_to_be32((qp->rq.next_ind << qp->rq.wqe_shift) | size0); | ||
1735 | doorbell[1] = cpu_to_be32(qp->qpn << 8); | ||
1736 | |||
1737 | wmb(); | ||
1738 | |||
1739 | mthca_write64(doorbell, | ||
1740 | dev->kar + MTHCA_RECEIVE_DOORBELL, | ||
1741 | MTHCA_GET_DOORBELL_LOCK(&dev->doorbell_lock)); | ||
1742 | |||
1743 | qp->rq.head += MTHCA_TAVOR_MAX_WQES_PER_RECV_DB; | ||
1744 | size0 = 0; | ||
1745 | } | ||
1746 | |||
1747 | if (mthca_wq_overflow(&qp->rq, nreq, qp->ibqp.recv_cq)) { | 1731 | if (mthca_wq_overflow(&qp->rq, nreq, qp->ibqp.recv_cq)) { |
1748 | mthca_err(dev, "RQ %06x full (%u head, %u tail," | 1732 | mthca_err(dev, "RQ %06x full (%u head, %u tail," |
1749 | " %d max, %d nreq)\n", qp->qpn, | 1733 | " %d max, %d nreq)\n", qp->qpn, |
@@ -1797,6 +1781,23 @@ int mthca_tavor_post_receive(struct ib_qp *ibqp, struct ib_recv_wr *wr, | |||
1797 | ++ind; | 1781 | ++ind; |
1798 | if (unlikely(ind >= qp->rq.max)) | 1782 | if (unlikely(ind >= qp->rq.max)) |
1799 | ind -= qp->rq.max; | 1783 | ind -= qp->rq.max; |
1784 | |||
1785 | ++nreq; | ||
1786 | if (unlikely(nreq == MTHCA_TAVOR_MAX_WQES_PER_RECV_DB)) { | ||
1787 | nreq = 0; | ||
1788 | |||
1789 | doorbell[0] = cpu_to_be32((qp->rq.next_ind << qp->rq.wqe_shift) | size0); | ||
1790 | doorbell[1] = cpu_to_be32(qp->qpn << 8); | ||
1791 | |||
1792 | wmb(); | ||
1793 | |||
1794 | mthca_write64(doorbell, | ||
1795 | dev->kar + MTHCA_RECEIVE_DOORBELL, | ||
1796 | MTHCA_GET_DOORBELL_LOCK(&dev->doorbell_lock)); | ||
1797 | |||
1798 | qp->rq.head += MTHCA_TAVOR_MAX_WQES_PER_RECV_DB; | ||
1799 | size0 = 0; | ||
1800 | } | ||
1800 | } | 1801 | } |
1801 | 1802 | ||
1802 | out: | 1803 | out: |
diff --git a/drivers/infiniband/ulp/srp/ib_srp.c b/drivers/infiniband/ulp/srp/ib_srp.c index c32ce4348e1b..9cbdffa08dc2 100644 --- a/drivers/infiniband/ulp/srp/ib_srp.c +++ b/drivers/infiniband/ulp/srp/ib_srp.c | |||
@@ -340,7 +340,10 @@ static void srp_disconnect_target(struct srp_target_port *target) | |||
340 | /* XXX should send SRP_I_LOGOUT request */ | 340 | /* XXX should send SRP_I_LOGOUT request */ |
341 | 341 | ||
342 | init_completion(&target->done); | 342 | init_completion(&target->done); |
343 | ib_send_cm_dreq(target->cm_id, NULL, 0); | 343 | if (ib_send_cm_dreq(target->cm_id, NULL, 0)) { |
344 | printk(KERN_DEBUG PFX "Sending CM DREQ failed\n"); | ||
345 | return; | ||
346 | } | ||
344 | wait_for_completion(&target->done); | 347 | wait_for_completion(&target->done); |
345 | } | 348 | } |
346 | 349 | ||
@@ -351,7 +354,6 @@ static void srp_remove_work(void *target_ptr) | |||
351 | spin_lock_irq(target->scsi_host->host_lock); | 354 | spin_lock_irq(target->scsi_host->host_lock); |
352 | if (target->state != SRP_TARGET_DEAD) { | 355 | if (target->state != SRP_TARGET_DEAD) { |
353 | spin_unlock_irq(target->scsi_host->host_lock); | 356 | spin_unlock_irq(target->scsi_host->host_lock); |
354 | scsi_host_put(target->scsi_host); | ||
355 | return; | 357 | return; |
356 | } | 358 | } |
357 | target->state = SRP_TARGET_REMOVED; | 359 | target->state = SRP_TARGET_REMOVED; |
@@ -365,8 +367,6 @@ static void srp_remove_work(void *target_ptr) | |||
365 | ib_destroy_cm_id(target->cm_id); | 367 | ib_destroy_cm_id(target->cm_id); |
366 | srp_free_target_ib(target); | 368 | srp_free_target_ib(target); |
367 | scsi_host_put(target->scsi_host); | 369 | scsi_host_put(target->scsi_host); |
368 | /* And another put to really free the target port... */ | ||
369 | scsi_host_put(target->scsi_host); | ||
370 | } | 370 | } |
371 | 371 | ||
372 | static int srp_connect_target(struct srp_target_port *target) | 372 | static int srp_connect_target(struct srp_target_port *target) |
@@ -1241,7 +1241,7 @@ static int srp_reset_device(struct scsi_cmnd *scmnd) | |||
1241 | list_for_each_entry_safe(req, tmp, &target->req_queue, list) | 1241 | list_for_each_entry_safe(req, tmp, &target->req_queue, list) |
1242 | if (req->scmnd->device == scmnd->device) { | 1242 | if (req->scmnd->device == scmnd->device) { |
1243 | req->scmnd->result = DID_RESET << 16; | 1243 | req->scmnd->result = DID_RESET << 16; |
1244 | scmnd->scsi_done(scmnd); | 1244 | req->scmnd->scsi_done(req->scmnd); |
1245 | srp_remove_req(target, req); | 1245 | srp_remove_req(target, req); |
1246 | } | 1246 | } |
1247 | 1247 | ||
diff --git a/drivers/isdn/capi/capi.c b/drivers/isdn/capi/capi.c index 9b493f0becc4..173c899a1fb4 100644 --- a/drivers/isdn/capi/capi.c +++ b/drivers/isdn/capi/capi.c | |||
@@ -1499,7 +1499,6 @@ static int __init capi_init(void) | |||
1499 | printk(KERN_ERR "capi20: unable to get major %d\n", capi_major); | 1499 | printk(KERN_ERR "capi20: unable to get major %d\n", capi_major); |
1500 | return major_ret; | 1500 | return major_ret; |
1501 | } | 1501 | } |
1502 | capi_major = major_ret; | ||
1503 | capi_class = class_create(THIS_MODULE, "capi"); | 1502 | capi_class = class_create(THIS_MODULE, "capi"); |
1504 | if (IS_ERR(capi_class)) { | 1503 | if (IS_ERR(capi_class)) { |
1505 | unregister_chrdev(capi_major, "capi20"); | 1504 | unregister_chrdev(capi_major, "capi20"); |
diff --git a/drivers/isdn/gigaset/usb-gigaset.c b/drivers/isdn/gigaset/usb-gigaset.c index bfb73fd5077e..d86ab68114b0 100644 --- a/drivers/isdn/gigaset/usb-gigaset.c +++ b/drivers/isdn/gigaset/usb-gigaset.c | |||
@@ -710,8 +710,8 @@ static int gigaset_probe(struct usb_interface *interface, | |||
710 | retval = -ENODEV; //FIXME | 710 | retval = -ENODEV; //FIXME |
711 | 711 | ||
712 | /* See if the device offered us matches what we can accept */ | 712 | /* See if the device offered us matches what we can accept */ |
713 | if ((le16_to_cpu(udev->descriptor.idVendor != USB_M105_VENDOR_ID)) || | 713 | if ((le16_to_cpu(udev->descriptor.idVendor) != USB_M105_VENDOR_ID) || |
714 | (le16_to_cpu(udev->descriptor.idProduct != USB_M105_PRODUCT_ID))) | 714 | (le16_to_cpu(udev->descriptor.idProduct) != USB_M105_PRODUCT_ID)) |
715 | return -ENODEV; | 715 | return -ENODEV; |
716 | 716 | ||
717 | /* this starts to become ascii art... */ | 717 | /* this starts to become ascii art... */ |
diff --git a/drivers/isdn/i4l/isdn_tty.c b/drivers/isdn/i4l/isdn_tty.c index 3585fb1f3344..2ac90242d263 100644 --- a/drivers/isdn/i4l/isdn_tty.c +++ b/drivers/isdn/i4l/isdn_tty.c | |||
@@ -2880,7 +2880,7 @@ isdn_tty_cmd_ATand(char **p, modem_info * info) | |||
2880 | p[0]++; | 2880 | p[0]++; |
2881 | i = 0; | 2881 | i = 0; |
2882 | while (*p[0] && (strchr("0123456789,-*[]?;", *p[0])) && | 2882 | while (*p[0] && (strchr("0123456789,-*[]?;", *p[0])) && |
2883 | (i < ISDN_LMSNLEN)) | 2883 | (i < ISDN_LMSNLEN - 1)) |
2884 | m->lmsn[i++] = *p[0]++; | 2884 | m->lmsn[i++] = *p[0]++; |
2885 | m->lmsn[i] = '\0'; | 2885 | m->lmsn[i] = '\0'; |
2886 | break; | 2886 | break; |
diff --git a/drivers/leds/Kconfig b/drivers/leds/Kconfig index 3f5b64794542..626506234b76 100644 --- a/drivers/leds/Kconfig +++ b/drivers/leds/Kconfig | |||
@@ -4,8 +4,11 @@ menu "LED devices" | |||
4 | config NEW_LEDS | 4 | config NEW_LEDS |
5 | bool "LED Support" | 5 | bool "LED Support" |
6 | help | 6 | help |
7 | Say Y to enable Linux LED support. This is not related to standard | 7 | Say Y to enable Linux LED support. This allows control of supported |
8 | keyboard LEDs which are controlled via the input system. | 8 | LEDs from both userspace and optionally, by kernel events (triggers). |
9 | |||
10 | This is not related to standard keyboard LEDs which are controlled | ||
11 | via the input system. | ||
9 | 12 | ||
10 | config LEDS_CLASS | 13 | config LEDS_CLASS |
11 | tristate "LED Class Support" | 14 | tristate "LED Class Support" |
diff --git a/drivers/leds/led-class.c b/drivers/leds/led-class.c index b0b5d05fadd6..c75d0ef1609c 100644 --- a/drivers/leds/led-class.c +++ b/drivers/leds/led-class.c | |||
@@ -19,6 +19,7 @@ | |||
19 | #include <linux/sysdev.h> | 19 | #include <linux/sysdev.h> |
20 | #include <linux/timer.h> | 20 | #include <linux/timer.h> |
21 | #include <linux/err.h> | 21 | #include <linux/err.h> |
22 | #include <linux/ctype.h> | ||
22 | #include <linux/leds.h> | 23 | #include <linux/leds.h> |
23 | #include "leds.h" | 24 | #include "leds.h" |
24 | 25 | ||
@@ -43,9 +44,13 @@ static ssize_t led_brightness_store(struct class_device *dev, | |||
43 | ssize_t ret = -EINVAL; | 44 | ssize_t ret = -EINVAL; |
44 | char *after; | 45 | char *after; |
45 | unsigned long state = simple_strtoul(buf, &after, 10); | 46 | unsigned long state = simple_strtoul(buf, &after, 10); |
47 | size_t count = after - buf; | ||
46 | 48 | ||
47 | if (after - buf > 0) { | 49 | if (*after && isspace(*after)) |
48 | ret = after - buf; | 50 | count++; |
51 | |||
52 | if (count == size) { | ||
53 | ret = count; | ||
49 | led_set_brightness(led_cdev, state); | 54 | led_set_brightness(led_cdev, state); |
50 | } | 55 | } |
51 | 56 | ||
diff --git a/drivers/leds/ledtrig-timer.c b/drivers/leds/ledtrig-timer.c index f484b5d6dbf8..fbf141ef46ec 100644 --- a/drivers/leds/ledtrig-timer.c +++ b/drivers/leds/ledtrig-timer.c | |||
@@ -20,6 +20,7 @@ | |||
20 | #include <linux/device.h> | 20 | #include <linux/device.h> |
21 | #include <linux/sysdev.h> | 21 | #include <linux/sysdev.h> |
22 | #include <linux/timer.h> | 22 | #include <linux/timer.h> |
23 | #include <linux/ctype.h> | ||
23 | #include <linux/leds.h> | 24 | #include <linux/leds.h> |
24 | #include "leds.h" | 25 | #include "leds.h" |
25 | 26 | ||
@@ -69,11 +70,15 @@ static ssize_t led_delay_on_store(struct class_device *dev, const char *buf, | |||
69 | int ret = -EINVAL; | 70 | int ret = -EINVAL; |
70 | char *after; | 71 | char *after; |
71 | unsigned long state = simple_strtoul(buf, &after, 10); | 72 | unsigned long state = simple_strtoul(buf, &after, 10); |
73 | size_t count = after - buf; | ||
72 | 74 | ||
73 | if (after - buf > 0) { | 75 | if (*after && isspace(*after)) |
76 | count++; | ||
77 | |||
78 | if (count == size) { | ||
74 | timer_data->delay_on = state; | 79 | timer_data->delay_on = state; |
75 | mod_timer(&timer_data->timer, jiffies + 1); | 80 | mod_timer(&timer_data->timer, jiffies + 1); |
76 | ret = after - buf; | 81 | ret = count; |
77 | } | 82 | } |
78 | 83 | ||
79 | return ret; | 84 | return ret; |
@@ -97,11 +102,15 @@ static ssize_t led_delay_off_store(struct class_device *dev, const char *buf, | |||
97 | int ret = -EINVAL; | 102 | int ret = -EINVAL; |
98 | char *after; | 103 | char *after; |
99 | unsigned long state = simple_strtoul(buf, &after, 10); | 104 | unsigned long state = simple_strtoul(buf, &after, 10); |
105 | size_t count = after - buf; | ||
106 | |||
107 | if (*after && isspace(*after)) | ||
108 | count++; | ||
100 | 109 | ||
101 | if (after - buf > 0) { | 110 | if (count == size) { |
102 | timer_data->delay_off = state; | 111 | timer_data->delay_off = state; |
103 | mod_timer(&timer_data->timer, jiffies + 1); | 112 | mod_timer(&timer_data->timer, jiffies + 1); |
104 | ret = after - buf; | 113 | ret = count; |
105 | } | 114 | } |
106 | 115 | ||
107 | return ret; | 116 | return ret; |
diff --git a/drivers/md/md.c b/drivers/md/md.c index d7316b829a62..3ca3cfb03a7e 100644 --- a/drivers/md/md.c +++ b/drivers/md/md.c | |||
@@ -2252,7 +2252,7 @@ action_store(mddev_t *mddev, const char *page, size_t len) | |||
2252 | } else { | 2252 | } else { |
2253 | if (cmd_match(page, "check")) | 2253 | if (cmd_match(page, "check")) |
2254 | set_bit(MD_RECOVERY_CHECK, &mddev->recovery); | 2254 | set_bit(MD_RECOVERY_CHECK, &mddev->recovery); |
2255 | else if (cmd_match(page, "repair")) | 2255 | else if (!cmd_match(page, "repair")) |
2256 | return -EINVAL; | 2256 | return -EINVAL; |
2257 | set_bit(MD_RECOVERY_REQUESTED, &mddev->recovery); | 2257 | set_bit(MD_RECOVERY_REQUESTED, &mddev->recovery); |
2258 | set_bit(MD_RECOVERY_SYNC, &mddev->recovery); | 2258 | set_bit(MD_RECOVERY_SYNC, &mddev->recovery); |
diff --git a/drivers/mmc/au1xmmc.c b/drivers/mmc/au1xmmc.c index 914d62b24064..5dc4bee7abeb 100644 --- a/drivers/mmc/au1xmmc.c +++ b/drivers/mmc/au1xmmc.c | |||
@@ -310,7 +310,7 @@ static void au1xmmc_data_complete(struct au1xmmc_host *host, u32 status) | |||
310 | } | 310 | } |
311 | else | 311 | else |
312 | data->bytes_xfered = | 312 | data->bytes_xfered = |
313 | (data->blocks * (1 << data->blksz_bits)) - | 313 | (data->blocks * data->blksz) - |
314 | host->pio.len; | 314 | host->pio.len; |
315 | } | 315 | } |
316 | 316 | ||
@@ -575,7 +575,7 @@ static int | |||
575 | au1xmmc_prepare_data(struct au1xmmc_host *host, struct mmc_data *data) | 575 | au1xmmc_prepare_data(struct au1xmmc_host *host, struct mmc_data *data) |
576 | { | 576 | { |
577 | 577 | ||
578 | int datalen = data->blocks * (1 << data->blksz_bits); | 578 | int datalen = data->blocks * data->blksz; |
579 | 579 | ||
580 | if (dma != 0) | 580 | if (dma != 0) |
581 | host->flags |= HOST_F_DMA; | 581 | host->flags |= HOST_F_DMA; |
@@ -596,7 +596,7 @@ au1xmmc_prepare_data(struct au1xmmc_host *host, struct mmc_data *data) | |||
596 | if (host->dma.len == 0) | 596 | if (host->dma.len == 0) |
597 | return MMC_ERR_TIMEOUT; | 597 | return MMC_ERR_TIMEOUT; |
598 | 598 | ||
599 | au_writel((1 << data->blksz_bits) - 1, HOST_BLKSIZE(host)); | 599 | au_writel(data->blksz - 1, HOST_BLKSIZE(host)); |
600 | 600 | ||
601 | if (host->flags & HOST_F_DMA) { | 601 | if (host->flags & HOST_F_DMA) { |
602 | int i; | 602 | int i; |
diff --git a/drivers/mmc/imxmmc.c b/drivers/mmc/imxmmc.c index 79358e223f57..a4eb1d0e7a71 100644 --- a/drivers/mmc/imxmmc.c +++ b/drivers/mmc/imxmmc.c | |||
@@ -218,8 +218,10 @@ static int imxmci_busy_wait_for_status(struct imxmci_host *host, | |||
218 | if(!loops) | 218 | if(!loops) |
219 | return 0; | 219 | return 0; |
220 | 220 | ||
221 | dev_info(mmc_dev(host->mmc), "busy wait for %d usec in %s, STATUS = 0x%x (0x%x)\n", | 221 | /* The busy-wait is expected there for clock <8MHz due to SDHC hardware flaws */ |
222 | loops, where, *pstat, stat_mask); | 222 | if(!(stat_mask & STATUS_END_CMD_RESP) || (host->mmc->ios.clock>=8000000)) |
223 | dev_info(mmc_dev(host->mmc), "busy wait for %d usec in %s, STATUS = 0x%x (0x%x)\n", | ||
224 | loops, where, *pstat, stat_mask); | ||
223 | return loops; | 225 | return loops; |
224 | } | 226 | } |
225 | 227 | ||
@@ -333,6 +335,9 @@ static void imxmci_start_cmd(struct imxmci_host *host, struct mmc_command *cmd, | |||
333 | WARN_ON(host->cmd != NULL); | 335 | WARN_ON(host->cmd != NULL); |
334 | host->cmd = cmd; | 336 | host->cmd = cmd; |
335 | 337 | ||
338 | /* Ensure, that clock are stopped else command programming and start fails */ | ||
339 | imxmci_stop_clock(host); | ||
340 | |||
336 | if (cmd->flags & MMC_RSP_BUSY) | 341 | if (cmd->flags & MMC_RSP_BUSY) |
337 | cmdat |= CMD_DAT_CONT_BUSY; | 342 | cmdat |= CMD_DAT_CONT_BUSY; |
338 | 343 | ||
@@ -553,7 +558,7 @@ static int imxmci_cpu_driven_data(struct imxmci_host *host, unsigned int *pstat) | |||
553 | int trans_done = 0; | 558 | int trans_done = 0; |
554 | unsigned int stat = *pstat; | 559 | unsigned int stat = *pstat; |
555 | 560 | ||
556 | if(host->actual_bus_width == MMC_BUS_WIDTH_4) | 561 | if(host->actual_bus_width != MMC_BUS_WIDTH_4) |
557 | burst_len = 16; | 562 | burst_len = 16; |
558 | else | 563 | else |
559 | burst_len = 64; | 564 | burst_len = 64; |
@@ -591,8 +596,7 @@ static int imxmci_cpu_driven_data(struct imxmci_host *host, unsigned int *pstat) | |||
591 | stat = MMC_STATUS; | 596 | stat = MMC_STATUS; |
592 | 597 | ||
593 | /* Flush extra bytes from FIFO */ | 598 | /* Flush extra bytes from FIFO */ |
594 | while(flush_len >= 2){ | 599 | while(flush_len && !(stat & STATUS_DATA_TRANS_DONE)){ |
595 | flush_len -= 2; | ||
596 | i = MMC_BUFFER_ACCESS; | 600 | i = MMC_BUFFER_ACCESS; |
597 | stat = MMC_STATUS; | 601 | stat = MMC_STATUS; |
598 | stat &= ~STATUS_CRC_READ_ERR; /* Stupid but required there */ | 602 | stat &= ~STATUS_CRC_READ_ERR; /* Stupid but required there */ |
@@ -746,10 +750,6 @@ static void imxmci_tasklet_fnc(unsigned long data) | |||
746 | data_dir_mask = STATUS_DATA_TRANS_DONE; | 750 | data_dir_mask = STATUS_DATA_TRANS_DONE; |
747 | } | 751 | } |
748 | 752 | ||
749 | imxmci_busy_wait_for_status(host, &stat, | ||
750 | data_dir_mask, | ||
751 | 50, "imxmci_tasklet_fnc data"); | ||
752 | |||
753 | if(stat & data_dir_mask) { | 753 | if(stat & data_dir_mask) { |
754 | clear_bit(IMXMCI_PEND_DMA_END_b, &host->pending_events); | 754 | clear_bit(IMXMCI_PEND_DMA_END_b, &host->pending_events); |
755 | imxmci_data_done(host, stat); | 755 | imxmci_data_done(host, stat); |
@@ -865,7 +865,11 @@ static void imxmci_set_ios(struct mmc_host *mmc, struct mmc_ios *ios) | |||
865 | 865 | ||
866 | imxmci_stop_clock(host); | 866 | imxmci_stop_clock(host); |
867 | MMC_CLK_RATE = (prescaler<<3) | clk; | 867 | MMC_CLK_RATE = (prescaler<<3) | clk; |
868 | imxmci_start_clock(host); | 868 | /* |
869 | * Under my understanding, clock should not be started there, because it would | ||
870 | * initiate SDHC sequencer and send last or random command into card | ||
871 | */ | ||
872 | /*imxmci_start_clock(host);*/ | ||
869 | 873 | ||
870 | dev_dbg(mmc_dev(host->mmc), "MMC_CLK_RATE: 0x%08x\n", MMC_CLK_RATE); | 874 | dev_dbg(mmc_dev(host->mmc), "MMC_CLK_RATE: 0x%08x\n", MMC_CLK_RATE); |
871 | } else { | 875 | } else { |
diff --git a/drivers/mmc/mmc.c b/drivers/mmc/mmc.c index 1ca2c8b9c9b5..6201f3086a02 100644 --- a/drivers/mmc/mmc.c +++ b/drivers/mmc/mmc.c | |||
@@ -951,6 +951,7 @@ static void mmc_read_scrs(struct mmc_host *host) | |||
951 | data.timeout_ns = card->csd.tacc_ns * 10; | 951 | data.timeout_ns = card->csd.tacc_ns * 10; |
952 | data.timeout_clks = card->csd.tacc_clks * 10; | 952 | data.timeout_clks = card->csd.tacc_clks * 10; |
953 | data.blksz_bits = 3; | 953 | data.blksz_bits = 3; |
954 | data.blksz = 1 << 3; | ||
954 | data.blocks = 1; | 955 | data.blocks = 1; |
955 | data.flags = MMC_DATA_READ; | 956 | data.flags = MMC_DATA_READ; |
956 | data.sg = &sg; | 957 | data.sg = &sg; |
diff --git a/drivers/mmc/mmc_block.c b/drivers/mmc/mmc_block.c index 06bd1f4cb9b1..e39cc05c64c2 100644 --- a/drivers/mmc/mmc_block.c +++ b/drivers/mmc/mmc_block.c | |||
@@ -175,6 +175,7 @@ static int mmc_blk_issue_rq(struct mmc_queue *mq, struct request *req) | |||
175 | brq.data.timeout_ns = card->csd.tacc_ns * 10; | 175 | brq.data.timeout_ns = card->csd.tacc_ns * 10; |
176 | brq.data.timeout_clks = card->csd.tacc_clks * 10; | 176 | brq.data.timeout_clks = card->csd.tacc_clks * 10; |
177 | brq.data.blksz_bits = md->block_bits; | 177 | brq.data.blksz_bits = md->block_bits; |
178 | brq.data.blksz = 1 << md->block_bits; | ||
178 | brq.data.blocks = req->nr_sectors >> (md->block_bits - 9); | 179 | brq.data.blocks = req->nr_sectors >> (md->block_bits - 9); |
179 | brq.stop.opcode = MMC_STOP_TRANSMISSION; | 180 | brq.stop.opcode = MMC_STOP_TRANSMISSION; |
180 | brq.stop.arg = 0; | 181 | brq.stop.arg = 0; |
diff --git a/drivers/mmc/pxamci.c b/drivers/mmc/pxamci.c index f97b472085cb..b49368fd96b8 100644 --- a/drivers/mmc/pxamci.c +++ b/drivers/mmc/pxamci.c | |||
@@ -119,7 +119,7 @@ static void pxamci_setup_data(struct pxamci_host *host, struct mmc_data *data) | |||
119 | nob = 0xffff; | 119 | nob = 0xffff; |
120 | 120 | ||
121 | writel(nob, host->base + MMC_NOB); | 121 | writel(nob, host->base + MMC_NOB); |
122 | writel(1 << data->blksz_bits, host->base + MMC_BLKLEN); | 122 | writel(data->blksz, host->base + MMC_BLKLEN); |
123 | 123 | ||
124 | clks = (unsigned long long)data->timeout_ns * CLOCKRATE; | 124 | clks = (unsigned long long)data->timeout_ns * CLOCKRATE; |
125 | do_div(clks, 1000000000UL); | 125 | do_div(clks, 1000000000UL); |
@@ -283,7 +283,7 @@ static int pxamci_data_done(struct pxamci_host *host, unsigned int stat) | |||
283 | * data blocks as being in error. | 283 | * data blocks as being in error. |
284 | */ | 284 | */ |
285 | if (data->error == MMC_ERR_NONE) | 285 | if (data->error == MMC_ERR_NONE) |
286 | data->bytes_xfered = data->blocks << data->blksz_bits; | 286 | data->bytes_xfered = data->blocks * data->blksz; |
287 | else | 287 | else |
288 | data->bytes_xfered = 0; | 288 | data->bytes_xfered = 0; |
289 | 289 | ||
diff --git a/drivers/mmc/wbsd.c b/drivers/mmc/wbsd.c index 39b3d97f891e..8167332d4013 100644 --- a/drivers/mmc/wbsd.c +++ b/drivers/mmc/wbsd.c | |||
@@ -662,14 +662,14 @@ static void wbsd_prepare_data(struct wbsd_host *host, struct mmc_data *data) | |||
662 | unsigned long dmaflags; | 662 | unsigned long dmaflags; |
663 | 663 | ||
664 | DBGF("blksz %04x blks %04x flags %08x\n", | 664 | DBGF("blksz %04x blks %04x flags %08x\n", |
665 | 1 << data->blksz_bits, data->blocks, data->flags); | 665 | data->blksz, data->blocks, data->flags); |
666 | DBGF("tsac %d ms nsac %d clk\n", | 666 | DBGF("tsac %d ms nsac %d clk\n", |
667 | data->timeout_ns / 1000000, data->timeout_clks); | 667 | data->timeout_ns / 1000000, data->timeout_clks); |
668 | 668 | ||
669 | /* | 669 | /* |
670 | * Calculate size. | 670 | * Calculate size. |
671 | */ | 671 | */ |
672 | host->size = data->blocks << data->blksz_bits; | 672 | host->size = data->blocks * data->blksz; |
673 | 673 | ||
674 | /* | 674 | /* |
675 | * Check timeout values for overflow. | 675 | * Check timeout values for overflow. |
@@ -696,12 +696,12 @@ static void wbsd_prepare_data(struct wbsd_host *host, struct mmc_data *data) | |||
696 | * Two bytes are needed for each data line. | 696 | * Two bytes are needed for each data line. |
697 | */ | 697 | */ |
698 | if (host->bus_width == MMC_BUS_WIDTH_1) { | 698 | if (host->bus_width == MMC_BUS_WIDTH_1) { |
699 | blksize = (1 << data->blksz_bits) + 2; | 699 | blksize = data->blksz + 2; |
700 | 700 | ||
701 | wbsd_write_index(host, WBSD_IDX_PBSMSB, (blksize >> 4) & 0xF0); | 701 | wbsd_write_index(host, WBSD_IDX_PBSMSB, (blksize >> 4) & 0xF0); |
702 | wbsd_write_index(host, WBSD_IDX_PBSLSB, blksize & 0xFF); | 702 | wbsd_write_index(host, WBSD_IDX_PBSLSB, blksize & 0xFF); |
703 | } else if (host->bus_width == MMC_BUS_WIDTH_4) { | 703 | } else if (host->bus_width == MMC_BUS_WIDTH_4) { |
704 | blksize = (1 << data->blksz_bits) + 2 * 4; | 704 | blksize = data->blksz + 2 * 4; |
705 | 705 | ||
706 | wbsd_write_index(host, WBSD_IDX_PBSMSB, | 706 | wbsd_write_index(host, WBSD_IDX_PBSMSB, |
707 | ((blksize >> 4) & 0xF0) | WBSD_DATA_WIDTH); | 707 | ((blksize >> 4) & 0xF0) | WBSD_DATA_WIDTH); |
diff --git a/drivers/net/b44.c b/drivers/net/b44.c index 3d306681919e..d8233e0b7899 100644 --- a/drivers/net/b44.c +++ b/drivers/net/b44.c | |||
@@ -650,9 +650,11 @@ static int b44_alloc_rx_skb(struct b44 *bp, int src_idx, u32 dest_idx_unmasked) | |||
650 | 650 | ||
651 | /* Hardware bug work-around, the chip is unable to do PCI DMA | 651 | /* Hardware bug work-around, the chip is unable to do PCI DMA |
652 | to/from anything above 1GB :-( */ | 652 | to/from anything above 1GB :-( */ |
653 | if (mapping + RX_PKT_BUF_SZ > B44_DMA_MASK) { | 653 | if (dma_mapping_error(mapping) || |
654 | mapping + RX_PKT_BUF_SZ > B44_DMA_MASK) { | ||
654 | /* Sigh... */ | 655 | /* Sigh... */ |
655 | pci_unmap_single(bp->pdev, mapping, RX_PKT_BUF_SZ,PCI_DMA_FROMDEVICE); | 656 | if (!dma_mapping_error(mapping)) |
657 | pci_unmap_single(bp->pdev, mapping, RX_PKT_BUF_SZ,PCI_DMA_FROMDEVICE); | ||
656 | dev_kfree_skb_any(skb); | 658 | dev_kfree_skb_any(skb); |
657 | skb = __dev_alloc_skb(RX_PKT_BUF_SZ,GFP_DMA); | 659 | skb = __dev_alloc_skb(RX_PKT_BUF_SZ,GFP_DMA); |
658 | if (skb == NULL) | 660 | if (skb == NULL) |
@@ -660,8 +662,10 @@ static int b44_alloc_rx_skb(struct b44 *bp, int src_idx, u32 dest_idx_unmasked) | |||
660 | mapping = pci_map_single(bp->pdev, skb->data, | 662 | mapping = pci_map_single(bp->pdev, skb->data, |
661 | RX_PKT_BUF_SZ, | 663 | RX_PKT_BUF_SZ, |
662 | PCI_DMA_FROMDEVICE); | 664 | PCI_DMA_FROMDEVICE); |
663 | if (mapping + RX_PKT_BUF_SZ > B44_DMA_MASK) { | 665 | if (dma_mapping_error(mapping) || |
664 | pci_unmap_single(bp->pdev, mapping, RX_PKT_BUF_SZ,PCI_DMA_FROMDEVICE); | 666 | mapping + RX_PKT_BUF_SZ > B44_DMA_MASK) { |
667 | if (!dma_mapping_error(mapping)) | ||
668 | pci_unmap_single(bp->pdev, mapping, RX_PKT_BUF_SZ,PCI_DMA_FROMDEVICE); | ||
665 | dev_kfree_skb_any(skb); | 669 | dev_kfree_skb_any(skb); |
666 | return -ENOMEM; | 670 | return -ENOMEM; |
667 | } | 671 | } |
@@ -967,9 +971,10 @@ static int b44_start_xmit(struct sk_buff *skb, struct net_device *dev) | |||
967 | } | 971 | } |
968 | 972 | ||
969 | mapping = pci_map_single(bp->pdev, skb->data, len, PCI_DMA_TODEVICE); | 973 | mapping = pci_map_single(bp->pdev, skb->data, len, PCI_DMA_TODEVICE); |
970 | if (mapping + len > B44_DMA_MASK) { | 974 | if (dma_mapping_error(mapping) || mapping + len > B44_DMA_MASK) { |
971 | /* Chip can't handle DMA to/from >1GB, use bounce buffer */ | 975 | /* Chip can't handle DMA to/from >1GB, use bounce buffer */ |
972 | pci_unmap_single(bp->pdev, mapping, len, PCI_DMA_TODEVICE); | 976 | if (!dma_mapping_error(mapping)) |
977 | pci_unmap_single(bp->pdev, mapping, len, PCI_DMA_TODEVICE); | ||
973 | 978 | ||
974 | bounce_skb = __dev_alloc_skb(TX_PKT_BUF_SZ, | 979 | bounce_skb = __dev_alloc_skb(TX_PKT_BUF_SZ, |
975 | GFP_ATOMIC|GFP_DMA); | 980 | GFP_ATOMIC|GFP_DMA); |
@@ -978,8 +983,9 @@ static int b44_start_xmit(struct sk_buff *skb, struct net_device *dev) | |||
978 | 983 | ||
979 | mapping = pci_map_single(bp->pdev, bounce_skb->data, | 984 | mapping = pci_map_single(bp->pdev, bounce_skb->data, |
980 | len, PCI_DMA_TODEVICE); | 985 | len, PCI_DMA_TODEVICE); |
981 | if (mapping + len > B44_DMA_MASK) { | 986 | if (dma_mapping_error(mapping) || mapping + len > B44_DMA_MASK) { |
982 | pci_unmap_single(bp->pdev, mapping, | 987 | if (!dma_mapping_error(mapping)) |
988 | pci_unmap_single(bp->pdev, mapping, | ||
983 | len, PCI_DMA_TODEVICE); | 989 | len, PCI_DMA_TODEVICE); |
984 | dev_kfree_skb_any(bounce_skb); | 990 | dev_kfree_skb_any(bounce_skb); |
985 | goto err_out; | 991 | goto err_out; |
@@ -1203,7 +1209,8 @@ static int b44_alloc_consistent(struct b44 *bp) | |||
1203 | DMA_TABLE_BYTES, | 1209 | DMA_TABLE_BYTES, |
1204 | DMA_BIDIRECTIONAL); | 1210 | DMA_BIDIRECTIONAL); |
1205 | 1211 | ||
1206 | if (rx_ring_dma + size > B44_DMA_MASK) { | 1212 | if (dma_mapping_error(rx_ring_dma) || |
1213 | rx_ring_dma + size > B44_DMA_MASK) { | ||
1207 | kfree(rx_ring); | 1214 | kfree(rx_ring); |
1208 | goto out_err; | 1215 | goto out_err; |
1209 | } | 1216 | } |
@@ -1229,7 +1236,8 @@ static int b44_alloc_consistent(struct b44 *bp) | |||
1229 | DMA_TABLE_BYTES, | 1236 | DMA_TABLE_BYTES, |
1230 | DMA_TO_DEVICE); | 1237 | DMA_TO_DEVICE); |
1231 | 1238 | ||
1232 | if (tx_ring_dma + size > B44_DMA_MASK) { | 1239 | if (dma_mapping_error(tx_ring_dma) || |
1240 | tx_ring_dma + size > B44_DMA_MASK) { | ||
1233 | kfree(tx_ring); | 1241 | kfree(tx_ring); |
1234 | goto out_err; | 1242 | goto out_err; |
1235 | } | 1243 | } |
diff --git a/drivers/net/dl2k.c b/drivers/net/dl2k.c index 1ddefd281213..038447fb5c5e 100644 --- a/drivers/net/dl2k.c +++ b/drivers/net/dl2k.c | |||
@@ -53,6 +53,7 @@ | |||
53 | #define DRV_VERSION "v1.17b" | 53 | #define DRV_VERSION "v1.17b" |
54 | #define DRV_RELDATE "2006/03/10" | 54 | #define DRV_RELDATE "2006/03/10" |
55 | #include "dl2k.h" | 55 | #include "dl2k.h" |
56 | #include <linux/dma-mapping.h> | ||
56 | 57 | ||
57 | static char version[] __devinitdata = | 58 | static char version[] __devinitdata = |
58 | KERN_INFO DRV_NAME " " DRV_VERSION " " DRV_RELDATE "\n"; | 59 | KERN_INFO DRV_NAME " " DRV_VERSION " " DRV_RELDATE "\n"; |
diff --git a/drivers/net/forcedeth.c b/drivers/net/forcedeth.c index f7235c9bc421..705e1229d89d 100644 --- a/drivers/net/forcedeth.c +++ b/drivers/net/forcedeth.c | |||
@@ -2891,78 +2891,6 @@ static int nv_open(struct net_device *dev) | |||
2891 | goto out_drain; | 2891 | goto out_drain; |
2892 | } | 2892 | } |
2893 | 2893 | ||
2894 | if (np->msi_flags & NV_MSI_X_CAPABLE) { | ||
2895 | for (i = 0; i < (np->msi_flags & NV_MSI_X_VECTORS_MASK); i++) { | ||
2896 | np->msi_x_entry[i].entry = i; | ||
2897 | } | ||
2898 | if ((ret = pci_enable_msix(np->pci_dev, np->msi_x_entry, (np->msi_flags & NV_MSI_X_VECTORS_MASK))) == 0) { | ||
2899 | np->msi_flags |= NV_MSI_X_ENABLED; | ||
2900 | if (optimization_mode == NV_OPTIMIZATION_MODE_THROUGHPUT) { | ||
2901 | /* Request irq for rx handling */ | ||
2902 | if (request_irq(np->msi_x_entry[NV_MSI_X_VECTOR_RX].vector, &nv_nic_irq_rx, SA_SHIRQ, dev->name, dev) != 0) { | ||
2903 | printk(KERN_INFO "forcedeth: request_irq failed for rx %d\n", ret); | ||
2904 | pci_disable_msix(np->pci_dev); | ||
2905 | np->msi_flags &= ~NV_MSI_X_ENABLED; | ||
2906 | goto out_drain; | ||
2907 | } | ||
2908 | /* Request irq for tx handling */ | ||
2909 | if (request_irq(np->msi_x_entry[NV_MSI_X_VECTOR_TX].vector, &nv_nic_irq_tx, SA_SHIRQ, dev->name, dev) != 0) { | ||
2910 | printk(KERN_INFO "forcedeth: request_irq failed for tx %d\n", ret); | ||
2911 | pci_disable_msix(np->pci_dev); | ||
2912 | np->msi_flags &= ~NV_MSI_X_ENABLED; | ||
2913 | goto out_drain; | ||
2914 | } | ||
2915 | /* Request irq for link and timer handling */ | ||
2916 | if (request_irq(np->msi_x_entry[NV_MSI_X_VECTOR_OTHER].vector, &nv_nic_irq_other, SA_SHIRQ, dev->name, dev) != 0) { | ||
2917 | printk(KERN_INFO "forcedeth: request_irq failed for link %d\n", ret); | ||
2918 | pci_disable_msix(np->pci_dev); | ||
2919 | np->msi_flags &= ~NV_MSI_X_ENABLED; | ||
2920 | goto out_drain; | ||
2921 | } | ||
2922 | |||
2923 | /* map interrupts to their respective vector */ | ||
2924 | writel(0, base + NvRegMSIXMap0); | ||
2925 | writel(0, base + NvRegMSIXMap1); | ||
2926 | set_msix_vector_map(dev, NV_MSI_X_VECTOR_RX, NVREG_IRQ_RX_ALL); | ||
2927 | set_msix_vector_map(dev, NV_MSI_X_VECTOR_TX, NVREG_IRQ_TX_ALL); | ||
2928 | set_msix_vector_map(dev, NV_MSI_X_VECTOR_OTHER, NVREG_IRQ_OTHER); | ||
2929 | } else { | ||
2930 | /* Request irq for all interrupts */ | ||
2931 | if (request_irq(np->msi_x_entry[NV_MSI_X_VECTOR_ALL].vector, &nv_nic_irq, SA_SHIRQ, dev->name, dev) != 0) { | ||
2932 | printk(KERN_INFO "forcedeth: request_irq failed %d\n", ret); | ||
2933 | pci_disable_msix(np->pci_dev); | ||
2934 | np->msi_flags &= ~NV_MSI_X_ENABLED; | ||
2935 | goto out_drain; | ||
2936 | } | ||
2937 | |||
2938 | /* map interrupts to vector 0 */ | ||
2939 | writel(0, base + NvRegMSIXMap0); | ||
2940 | writel(0, base + NvRegMSIXMap1); | ||
2941 | } | ||
2942 | } | ||
2943 | } | ||
2944 | if (ret != 0 && np->msi_flags & NV_MSI_CAPABLE) { | ||
2945 | if ((ret = pci_enable_msi(np->pci_dev)) == 0) { | ||
2946 | np->msi_flags |= NV_MSI_ENABLED; | ||
2947 | if (request_irq(np->pci_dev->irq, &nv_nic_irq, SA_SHIRQ, dev->name, dev) != 0) { | ||
2948 | printk(KERN_INFO "forcedeth: request_irq failed %d\n", ret); | ||
2949 | pci_disable_msi(np->pci_dev); | ||
2950 | np->msi_flags &= ~NV_MSI_ENABLED; | ||
2951 | goto out_drain; | ||
2952 | } | ||
2953 | |||
2954 | /* map interrupts to vector 0 */ | ||
2955 | writel(0, base + NvRegMSIMap0); | ||
2956 | writel(0, base + NvRegMSIMap1); | ||
2957 | /* enable msi vector 0 */ | ||
2958 | writel(NVREG_MSI_VECTOR_0_ENABLED, base + NvRegMSIIrqMask); | ||
2959 | } | ||
2960 | } | ||
2961 | if (ret != 0) { | ||
2962 | if (request_irq(np->pci_dev->irq, &nv_nic_irq, SA_SHIRQ, dev->name, dev) != 0) | ||
2963 | goto out_drain; | ||
2964 | } | ||
2965 | |||
2966 | /* ask for interrupts */ | 2894 | /* ask for interrupts */ |
2967 | nv_enable_hw_interrupts(dev, np->irqmask); | 2895 | nv_enable_hw_interrupts(dev, np->irqmask); |
2968 | 2896 | ||
diff --git a/drivers/net/ixp2000/enp2611.c b/drivers/net/ixp2000/enp2611.c index 6f7dce8eba51..b67f586d7392 100644 --- a/drivers/net/ixp2000/enp2611.c +++ b/drivers/net/ixp2000/enp2611.c | |||
@@ -149,6 +149,8 @@ static void enp2611_check_link_status(unsigned long __dummy) | |||
149 | int status; | 149 | int status; |
150 | 150 | ||
151 | dev = nds[i]; | 151 | dev = nds[i]; |
152 | if (dev == NULL) | ||
153 | continue; | ||
152 | 154 | ||
153 | status = pm3386_is_link_up(i); | 155 | status = pm3386_is_link_up(i); |
154 | if (status && !netif_carrier_ok(dev)) { | 156 | if (status && !netif_carrier_ok(dev)) { |
@@ -191,6 +193,7 @@ static void enp2611_set_port_admin_status(int port, int up) | |||
191 | 193 | ||
192 | static int __init enp2611_init_module(void) | 194 | static int __init enp2611_init_module(void) |
193 | { | 195 | { |
196 | int ports; | ||
194 | int i; | 197 | int i; |
195 | 198 | ||
196 | if (!machine_is_enp2611()) | 199 | if (!machine_is_enp2611()) |
@@ -199,7 +202,8 @@ static int __init enp2611_init_module(void) | |||
199 | caleb_reset(); | 202 | caleb_reset(); |
200 | pm3386_reset(); | 203 | pm3386_reset(); |
201 | 204 | ||
202 | for (i = 0; i < 3; i++) { | 205 | ports = pm3386_port_count(); |
206 | for (i = 0; i < ports; i++) { | ||
203 | nds[i] = ixpdev_alloc(i, sizeof(struct enp2611_ixpdev_priv)); | 207 | nds[i] = ixpdev_alloc(i, sizeof(struct enp2611_ixpdev_priv)); |
204 | if (nds[i] == NULL) { | 208 | if (nds[i] == NULL) { |
205 | while (--i >= 0) | 209 | while (--i >= 0) |
@@ -215,9 +219,10 @@ static int __init enp2611_init_module(void) | |||
215 | 219 | ||
216 | ixp2400_msf_init(&enp2611_msf_parameters); | 220 | ixp2400_msf_init(&enp2611_msf_parameters); |
217 | 221 | ||
218 | if (ixpdev_init(3, nds, enp2611_set_port_admin_status)) { | 222 | if (ixpdev_init(ports, nds, enp2611_set_port_admin_status)) { |
219 | for (i = 0; i < 3; i++) | 223 | for (i = 0; i < ports; i++) |
220 | free_netdev(nds[i]); | 224 | if (nds[i]) |
225 | free_netdev(nds[i]); | ||
221 | return -EINVAL; | 226 | return -EINVAL; |
222 | } | 227 | } |
223 | 228 | ||
diff --git a/drivers/net/ixp2000/pm3386.c b/drivers/net/ixp2000/pm3386.c index 5c7ab7564053..5224651c9aac 100644 --- a/drivers/net/ixp2000/pm3386.c +++ b/drivers/net/ixp2000/pm3386.c | |||
@@ -86,40 +86,53 @@ static void pm3386_port_reg_write(int port, int _reg, int spacing, u16 value) | |||
86 | pm3386_reg_write(port >> 1, reg, value); | 86 | pm3386_reg_write(port >> 1, reg, value); |
87 | } | 87 | } |
88 | 88 | ||
89 | int pm3386_secondary_present(void) | ||
90 | { | ||
91 | return pm3386_reg_read(1, 0) == 0x3386; | ||
92 | } | ||
89 | 93 | ||
90 | void pm3386_reset(void) | 94 | void pm3386_reset(void) |
91 | { | 95 | { |
92 | u8 mac[3][6]; | 96 | u8 mac[3][6]; |
97 | int secondary; | ||
98 | |||
99 | secondary = pm3386_secondary_present(); | ||
93 | 100 | ||
94 | /* Save programmed MAC addresses. */ | 101 | /* Save programmed MAC addresses. */ |
95 | pm3386_get_mac(0, mac[0]); | 102 | pm3386_get_mac(0, mac[0]); |
96 | pm3386_get_mac(1, mac[1]); | 103 | pm3386_get_mac(1, mac[1]); |
97 | pm3386_get_mac(2, mac[2]); | 104 | if (secondary) |
105 | pm3386_get_mac(2, mac[2]); | ||
98 | 106 | ||
99 | /* Assert analog and digital reset. */ | 107 | /* Assert analog and digital reset. */ |
100 | pm3386_reg_write(0, 0x002, 0x0060); | 108 | pm3386_reg_write(0, 0x002, 0x0060); |
101 | pm3386_reg_write(1, 0x002, 0x0060); | 109 | if (secondary) |
110 | pm3386_reg_write(1, 0x002, 0x0060); | ||
102 | mdelay(1); | 111 | mdelay(1); |
103 | 112 | ||
104 | /* Deassert analog reset. */ | 113 | /* Deassert analog reset. */ |
105 | pm3386_reg_write(0, 0x002, 0x0062); | 114 | pm3386_reg_write(0, 0x002, 0x0062); |
106 | pm3386_reg_write(1, 0x002, 0x0062); | 115 | if (secondary) |
116 | pm3386_reg_write(1, 0x002, 0x0062); | ||
107 | mdelay(10); | 117 | mdelay(10); |
108 | 118 | ||
109 | /* Deassert digital reset. */ | 119 | /* Deassert digital reset. */ |
110 | pm3386_reg_write(0, 0x002, 0x0063); | 120 | pm3386_reg_write(0, 0x002, 0x0063); |
111 | pm3386_reg_write(1, 0x002, 0x0063); | 121 | if (secondary) |
122 | pm3386_reg_write(1, 0x002, 0x0063); | ||
112 | mdelay(10); | 123 | mdelay(10); |
113 | 124 | ||
114 | /* Restore programmed MAC addresses. */ | 125 | /* Restore programmed MAC addresses. */ |
115 | pm3386_set_mac(0, mac[0]); | 126 | pm3386_set_mac(0, mac[0]); |
116 | pm3386_set_mac(1, mac[1]); | 127 | pm3386_set_mac(1, mac[1]); |
117 | pm3386_set_mac(2, mac[2]); | 128 | if (secondary) |
129 | pm3386_set_mac(2, mac[2]); | ||
118 | 130 | ||
119 | /* Disable carrier on all ports. */ | 131 | /* Disable carrier on all ports. */ |
120 | pm3386_set_carrier(0, 0); | 132 | pm3386_set_carrier(0, 0); |
121 | pm3386_set_carrier(1, 0); | 133 | pm3386_set_carrier(1, 0); |
122 | pm3386_set_carrier(2, 0); | 134 | if (secondary) |
135 | pm3386_set_carrier(2, 0); | ||
123 | } | 136 | } |
124 | 137 | ||
125 | static u16 swaph(u16 x) | 138 | static u16 swaph(u16 x) |
@@ -127,6 +140,11 @@ static u16 swaph(u16 x) | |||
127 | return ((x << 8) | (x >> 8)) & 0xffff; | 140 | return ((x << 8) | (x >> 8)) & 0xffff; |
128 | } | 141 | } |
129 | 142 | ||
143 | int pm3386_port_count(void) | ||
144 | { | ||
145 | return 2 + pm3386_secondary_present(); | ||
146 | } | ||
147 | |||
130 | void pm3386_init_port(int port) | 148 | void pm3386_init_port(int port) |
131 | { | 149 | { |
132 | int pm = port >> 1; | 150 | int pm = port >> 1; |
diff --git a/drivers/net/ixp2000/pm3386.h b/drivers/net/ixp2000/pm3386.h index fe92bb056ac4..cc4183dca911 100644 --- a/drivers/net/ixp2000/pm3386.h +++ b/drivers/net/ixp2000/pm3386.h | |||
@@ -13,6 +13,7 @@ | |||
13 | #define __PM3386_H | 13 | #define __PM3386_H |
14 | 14 | ||
15 | void pm3386_reset(void); | 15 | void pm3386_reset(void); |
16 | int pm3386_port_count(void); | ||
16 | void pm3386_init_port(int port); | 17 | void pm3386_init_port(int port); |
17 | void pm3386_get_mac(int port, u8 *mac); | 18 | void pm3386_get_mac(int port, u8 *mac); |
18 | void pm3386_set_mac(int port, u8 *mac); | 19 | void pm3386_set_mac(int port, u8 *mac); |
diff --git a/drivers/net/pcmcia/axnet_cs.c b/drivers/net/pcmcia/axnet_cs.c index 448a09488529..2ea66aca648b 100644 --- a/drivers/net/pcmcia/axnet_cs.c +++ b/drivers/net/pcmcia/axnet_cs.c | |||
@@ -1691,17 +1691,6 @@ static void do_set_multicast_list(struct net_device *dev) | |||
1691 | memset(ei_local->mcfilter, 0xFF, 8); | 1691 | memset(ei_local->mcfilter, 0xFF, 8); |
1692 | } | 1692 | } |
1693 | 1693 | ||
1694 | /* | ||
1695 | * DP8390 manuals don't specify any magic sequence for altering | ||
1696 | * the multicast regs on an already running card. To be safe, we | ||
1697 | * ensure multicast mode is off prior to loading up the new hash | ||
1698 | * table. If this proves to be not enough, we can always resort | ||
1699 | * to stopping the NIC, loading the table and then restarting. | ||
1700 | */ | ||
1701 | |||
1702 | if (netif_running(dev)) | ||
1703 | outb_p(E8390_RXCONFIG, e8390_base + EN0_RXCR); | ||
1704 | |||
1705 | outb_p(E8390_NODMA + E8390_PAGE1, e8390_base + E8390_CMD); | 1694 | outb_p(E8390_NODMA + E8390_PAGE1, e8390_base + E8390_CMD); |
1706 | for(i = 0; i < 8; i++) | 1695 | for(i = 0; i < 8; i++) |
1707 | { | 1696 | { |
@@ -1715,6 +1704,8 @@ static void do_set_multicast_list(struct net_device *dev) | |||
1715 | outb_p(E8390_RXCONFIG | 0x48, e8390_base + EN0_RXCR); | 1704 | outb_p(E8390_RXCONFIG | 0x48, e8390_base + EN0_RXCR); |
1716 | else | 1705 | else |
1717 | outb_p(E8390_RXCONFIG | 0x40, e8390_base + EN0_RXCR); | 1706 | outb_p(E8390_RXCONFIG | 0x40, e8390_base + EN0_RXCR); |
1707 | |||
1708 | outb_p(E8390_NODMA+E8390_PAGE0+E8390_START, e8390_base+E8390_CMD); | ||
1718 | } | 1709 | } |
1719 | 1710 | ||
1720 | /* | 1711 | /* |
diff --git a/drivers/net/skge.c b/drivers/net/skge.c index a70c2b0cc104..5ca5a1b546a1 100644 --- a/drivers/net/skge.c +++ b/drivers/net/skge.c | |||
@@ -78,8 +78,7 @@ static const struct pci_device_id skge_id_table[] = { | |||
78 | { PCI_DEVICE(PCI_VENDOR_ID_SYSKONNECT, PCI_DEVICE_ID_SYSKONNECT_GE) }, | 78 | { PCI_DEVICE(PCI_VENDOR_ID_SYSKONNECT, PCI_DEVICE_ID_SYSKONNECT_GE) }, |
79 | { PCI_DEVICE(PCI_VENDOR_ID_SYSKONNECT, PCI_DEVICE_ID_SYSKONNECT_YU) }, | 79 | { PCI_DEVICE(PCI_VENDOR_ID_SYSKONNECT, PCI_DEVICE_ID_SYSKONNECT_YU) }, |
80 | { PCI_DEVICE(PCI_VENDOR_ID_DLINK, PCI_DEVICE_ID_DLINK_DGE510T), }, | 80 | { PCI_DEVICE(PCI_VENDOR_ID_DLINK, PCI_DEVICE_ID_DLINK_DGE510T), }, |
81 | { PCI_DEVICE(PCI_VENDOR_ID_DLINK, 0x4b00) }, | 81 | { PCI_DEVICE(PCI_VENDOR_ID_DLINK, 0x4b01) }, /* DGE-530T */ |
82 | { PCI_DEVICE(PCI_VENDOR_ID_DLINK, 0x4b01) }, | ||
83 | { PCI_DEVICE(PCI_VENDOR_ID_MARVELL, 0x4320) }, | 82 | { PCI_DEVICE(PCI_VENDOR_ID_MARVELL, 0x4320) }, |
84 | { PCI_DEVICE(PCI_VENDOR_ID_MARVELL, 0x5005) }, /* Belkin */ | 83 | { PCI_DEVICE(PCI_VENDOR_ID_MARVELL, 0x5005) }, /* Belkin */ |
85 | { PCI_DEVICE(PCI_VENDOR_ID_CNET, PCI_DEVICE_ID_CNET_GIGACARD) }, | 84 | { PCI_DEVICE(PCI_VENDOR_ID_CNET, PCI_DEVICE_ID_CNET_GIGACARD) }, |
@@ -402,7 +401,7 @@ static int skge_set_ring_param(struct net_device *dev, | |||
402 | int err; | 401 | int err; |
403 | 402 | ||
404 | if (p->rx_pending == 0 || p->rx_pending > MAX_RX_RING_SIZE || | 403 | if (p->rx_pending == 0 || p->rx_pending > MAX_RX_RING_SIZE || |
405 | p->tx_pending == 0 || p->tx_pending > MAX_TX_RING_SIZE) | 404 | p->tx_pending < MAX_SKB_FRAGS+1 || p->tx_pending > MAX_TX_RING_SIZE) |
406 | return -EINVAL; | 405 | return -EINVAL; |
407 | 406 | ||
408 | skge->rx_ring.count = p->rx_pending; | 407 | skge->rx_ring.count = p->rx_pending; |
@@ -2717,8 +2716,7 @@ static int skge_poll(struct net_device *dev, int *budget) | |||
2717 | if (control & BMU_OWN) | 2716 | if (control & BMU_OWN) |
2718 | break; | 2717 | break; |
2719 | 2718 | ||
2720 | skb = skge_rx_get(skge, e, control, rd->status, | 2719 | skb = skge_rx_get(skge, e, control, rd->status, rd->csum2); |
2721 | le16_to_cpu(rd->csum2)); | ||
2722 | if (likely(skb)) { | 2720 | if (likely(skb)) { |
2723 | dev->last_rx = jiffies; | 2721 | dev->last_rx = jiffies; |
2724 | netif_receive_skb(skb); | 2722 | netif_receive_skb(skb); |
diff --git a/drivers/net/sky2.c b/drivers/net/sky2.c index ffd267fab21d..60779ebf2ff6 100644 --- a/drivers/net/sky2.c +++ b/drivers/net/sky2.c | |||
@@ -51,7 +51,7 @@ | |||
51 | #include "sky2.h" | 51 | #include "sky2.h" |
52 | 52 | ||
53 | #define DRV_NAME "sky2" | 53 | #define DRV_NAME "sky2" |
54 | #define DRV_VERSION "1.3" | 54 | #define DRV_VERSION "1.4" |
55 | #define PFX DRV_NAME " " | 55 | #define PFX DRV_NAME " " |
56 | 56 | ||
57 | /* | 57 | /* |
@@ -105,6 +105,7 @@ MODULE_PARM_DESC(idle_timeout, "Idle timeout workaround for lost interrupts (ms) | |||
105 | static const struct pci_device_id sky2_id_table[] = { | 105 | static const struct pci_device_id sky2_id_table[] = { |
106 | { PCI_DEVICE(PCI_VENDOR_ID_SYSKONNECT, 0x9000) }, | 106 | { PCI_DEVICE(PCI_VENDOR_ID_SYSKONNECT, 0x9000) }, |
107 | { PCI_DEVICE(PCI_VENDOR_ID_SYSKONNECT, 0x9E00) }, | 107 | { PCI_DEVICE(PCI_VENDOR_ID_SYSKONNECT, 0x9E00) }, |
108 | { PCI_DEVICE(PCI_VENDOR_ID_DLINK, 0x4b00) }, /* DGE-560T */ | ||
108 | { PCI_DEVICE(PCI_VENDOR_ID_MARVELL, 0x4340) }, | 109 | { PCI_DEVICE(PCI_VENDOR_ID_MARVELL, 0x4340) }, |
109 | { PCI_DEVICE(PCI_VENDOR_ID_MARVELL, 0x4341) }, | 110 | { PCI_DEVICE(PCI_VENDOR_ID_MARVELL, 0x4341) }, |
110 | { PCI_DEVICE(PCI_VENDOR_ID_MARVELL, 0x4342) }, | 111 | { PCI_DEVICE(PCI_VENDOR_ID_MARVELL, 0x4342) }, |
@@ -235,6 +236,7 @@ static int sky2_set_power_state(struct sky2_hw *hw, pci_power_t state) | |||
235 | } | 236 | } |
236 | 237 | ||
237 | if (hw->chip_id == CHIP_ID_YUKON_EC_U) { | 238 | if (hw->chip_id == CHIP_ID_YUKON_EC_U) { |
239 | sky2_write16(hw, B0_CTST, Y2_HW_WOL_ON); | ||
238 | sky2_pci_write32(hw, PCI_DEV_REG3, 0); | 240 | sky2_pci_write32(hw, PCI_DEV_REG3, 0); |
239 | reg1 = sky2_pci_read32(hw, PCI_DEV_REG4); | 241 | reg1 = sky2_pci_read32(hw, PCI_DEV_REG4); |
240 | reg1 &= P_ASPM_CONTROL_MSK; | 242 | reg1 &= P_ASPM_CONTROL_MSK; |
@@ -306,7 +308,7 @@ static void sky2_phy_init(struct sky2_hw *hw, unsigned port) | |||
306 | u16 ctrl, ct1000, adv, pg, ledctrl, ledover; | 308 | u16 ctrl, ct1000, adv, pg, ledctrl, ledover; |
307 | 309 | ||
308 | if (sky2->autoneg == AUTONEG_ENABLE && | 310 | if (sky2->autoneg == AUTONEG_ENABLE && |
309 | (hw->chip_id != CHIP_ID_YUKON_XL || hw->chip_id == CHIP_ID_YUKON_EC_U)) { | 311 | !(hw->chip_id == CHIP_ID_YUKON_XL || hw->chip_id == CHIP_ID_YUKON_EC_U)) { |
310 | u16 ectrl = gm_phy_read(hw, port, PHY_MARV_EXT_CTRL); | 312 | u16 ectrl = gm_phy_read(hw, port, PHY_MARV_EXT_CTRL); |
311 | 313 | ||
312 | ectrl &= ~(PHY_M_EC_M_DSC_MSK | PHY_M_EC_S_DSC_MSK | | 314 | ectrl &= ~(PHY_M_EC_M_DSC_MSK | PHY_M_EC_S_DSC_MSK | |
@@ -1020,7 +1022,25 @@ static int sky2_up(struct net_device *dev) | |||
1020 | struct sky2_hw *hw = sky2->hw; | 1022 | struct sky2_hw *hw = sky2->hw; |
1021 | unsigned port = sky2->port; | 1023 | unsigned port = sky2->port; |
1022 | u32 ramsize, rxspace, imask; | 1024 | u32 ramsize, rxspace, imask; |
1023 | int err = -ENOMEM; | 1025 | int cap, err = -ENOMEM; |
1026 | struct net_device *otherdev = hw->dev[sky2->port^1]; | ||
1027 | |||
1028 | /* | ||
1029 | * On dual port PCI-X card, there is an problem where status | ||
1030 | * can be received out of order due to split transactions | ||
1031 | */ | ||
1032 | if (otherdev && netif_running(otherdev) && | ||
1033 | (cap = pci_find_capability(hw->pdev, PCI_CAP_ID_PCIX))) { | ||
1034 | struct sky2_port *osky2 = netdev_priv(otherdev); | ||
1035 | u16 cmd; | ||
1036 | |||
1037 | cmd = sky2_pci_read16(hw, cap + PCI_X_CMD); | ||
1038 | cmd &= ~PCI_X_CMD_MAX_SPLIT; | ||
1039 | sky2_pci_write16(hw, cap + PCI_X_CMD, cmd); | ||
1040 | |||
1041 | sky2->rx_csum = 0; | ||
1042 | osky2->rx_csum = 0; | ||
1043 | } | ||
1024 | 1044 | ||
1025 | if (netif_msg_ifup(sky2)) | 1045 | if (netif_msg_ifup(sky2)) |
1026 | printk(KERN_INFO PFX "%s: enabling interface\n", dev->name); | 1046 | printk(KERN_INFO PFX "%s: enabling interface\n", dev->name); |
@@ -1899,6 +1919,12 @@ static inline void sky2_tx_done(struct net_device *dev, u16 last) | |||
1899 | } | 1919 | } |
1900 | } | 1920 | } |
1901 | 1921 | ||
1922 | /* Is status ring empty or is there more to do? */ | ||
1923 | static inline int sky2_more_work(const struct sky2_hw *hw) | ||
1924 | { | ||
1925 | return (hw->st_idx != sky2_read16(hw, STAT_PUT_IDX)); | ||
1926 | } | ||
1927 | |||
1902 | /* Process status response ring */ | 1928 | /* Process status response ring */ |
1903 | static int sky2_status_intr(struct sky2_hw *hw, int to_do) | 1929 | static int sky2_status_intr(struct sky2_hw *hw, int to_do) |
1904 | { | 1930 | { |
@@ -2171,19 +2197,19 @@ static int sky2_poll(struct net_device *dev0, int *budget) | |||
2171 | if (status & Y2_IS_CHK_TXA2) | 2197 | if (status & Y2_IS_CHK_TXA2) |
2172 | sky2_descriptor_error(hw, 1, "transmit", Y2_IS_CHK_TXA2); | 2198 | sky2_descriptor_error(hw, 1, "transmit", Y2_IS_CHK_TXA2); |
2173 | 2199 | ||
2174 | if (status & Y2_IS_STAT_BMU) | ||
2175 | sky2_write32(hw, STAT_CTRL, SC_STAT_CLR_IRQ); | ||
2176 | |||
2177 | work_done = sky2_status_intr(hw, work_limit); | 2200 | work_done = sky2_status_intr(hw, work_limit); |
2178 | *budget -= work_done; | 2201 | *budget -= work_done; |
2179 | dev0->quota -= work_done; | 2202 | dev0->quota -= work_done; |
2180 | 2203 | ||
2181 | if (work_done >= work_limit) | 2204 | if (status & Y2_IS_STAT_BMU) |
2205 | sky2_write32(hw, STAT_CTRL, SC_STAT_CLR_IRQ); | ||
2206 | |||
2207 | if (sky2_more_work(hw)) | ||
2182 | return 1; | 2208 | return 1; |
2183 | 2209 | ||
2184 | netif_rx_complete(dev0); | 2210 | netif_rx_complete(dev0); |
2185 | 2211 | ||
2186 | status = sky2_read32(hw, B0_Y2_SP_LISR); | 2212 | sky2_read32(hw, B0_Y2_SP_LISR); |
2187 | return 0; | 2213 | return 0; |
2188 | } | 2214 | } |
2189 | 2215 | ||
@@ -3067,12 +3093,7 @@ static __devinit struct net_device *sky2_init_netdev(struct sky2_hw *hw, | |||
3067 | sky2->duplex = -1; | 3093 | sky2->duplex = -1; |
3068 | sky2->speed = -1; | 3094 | sky2->speed = -1; |
3069 | sky2->advertising = sky2_supported_modes(hw); | 3095 | sky2->advertising = sky2_supported_modes(hw); |
3070 | 3096 | sky2->rx_csum = 1; | |
3071 | /* Receive checksum disabled for Yukon XL | ||
3072 | * because of observed problems with incorrect | ||
3073 | * values when multiple packets are received in one interrupt | ||
3074 | */ | ||
3075 | sky2->rx_csum = (hw->chip_id != CHIP_ID_YUKON_XL); | ||
3076 | 3097 | ||
3077 | spin_lock_init(&sky2->phy_lock); | 3098 | spin_lock_init(&sky2->phy_lock); |
3078 | sky2->tx_pending = TX_DEF_PENDING; | 3099 | sky2->tx_pending = TX_DEF_PENDING; |
diff --git a/drivers/net/sky2.h b/drivers/net/sky2.h index 8012994c9b93..8a0bc5525f0a 100644 --- a/drivers/net/sky2.h +++ b/drivers/net/sky2.h | |||
@@ -214,6 +214,8 @@ enum csr_regs { | |||
214 | enum { | 214 | enum { |
215 | Y2_VMAIN_AVAIL = 1<<17,/* VMAIN available (YUKON-2 only) */ | 215 | Y2_VMAIN_AVAIL = 1<<17,/* VMAIN available (YUKON-2 only) */ |
216 | Y2_VAUX_AVAIL = 1<<16,/* VAUX available (YUKON-2 only) */ | 216 | Y2_VAUX_AVAIL = 1<<16,/* VAUX available (YUKON-2 only) */ |
217 | Y2_HW_WOL_ON = 1<<15,/* HW WOL On (Yukon-EC Ultra A1 only) */ | ||
218 | Y2_HW_WOL_OFF = 1<<14,/* HW WOL On (Yukon-EC Ultra A1 only) */ | ||
217 | Y2_ASF_ENABLE = 1<<13,/* ASF Unit Enable (YUKON-2 only) */ | 219 | Y2_ASF_ENABLE = 1<<13,/* ASF Unit Enable (YUKON-2 only) */ |
218 | Y2_ASF_DISABLE = 1<<12,/* ASF Unit Disable (YUKON-2 only) */ | 220 | Y2_ASF_DISABLE = 1<<12,/* ASF Unit Disable (YUKON-2 only) */ |
219 | Y2_CLK_RUN_ENA = 1<<11,/* CLK_RUN Enable (YUKON-2 only) */ | 221 | Y2_CLK_RUN_ENA = 1<<11,/* CLK_RUN Enable (YUKON-2 only) */ |
diff --git a/drivers/net/tg3.c b/drivers/net/tg3.c index 2bd9592b75cd..e1b33a25a25f 100644 --- a/drivers/net/tg3.c +++ b/drivers/net/tg3.c | |||
@@ -7653,21 +7653,23 @@ static int tg3_get_settings(struct net_device *dev, struct ethtool_cmd *cmd) | |||
7653 | cmd->supported |= (SUPPORTED_1000baseT_Half | | 7653 | cmd->supported |= (SUPPORTED_1000baseT_Half | |
7654 | SUPPORTED_1000baseT_Full); | 7654 | SUPPORTED_1000baseT_Full); |
7655 | 7655 | ||
7656 | if (!(tp->tg3_flags2 & TG3_FLG2_ANY_SERDES)) | 7656 | if (!(tp->tg3_flags2 & TG3_FLG2_ANY_SERDES)) { |
7657 | cmd->supported |= (SUPPORTED_100baseT_Half | | 7657 | cmd->supported |= (SUPPORTED_100baseT_Half | |
7658 | SUPPORTED_100baseT_Full | | 7658 | SUPPORTED_100baseT_Full | |
7659 | SUPPORTED_10baseT_Half | | 7659 | SUPPORTED_10baseT_Half | |
7660 | SUPPORTED_10baseT_Full | | 7660 | SUPPORTED_10baseT_Full | |
7661 | SUPPORTED_MII); | 7661 | SUPPORTED_MII); |
7662 | else | 7662 | cmd->port = PORT_TP; |
7663 | } else { | ||
7663 | cmd->supported |= SUPPORTED_FIBRE; | 7664 | cmd->supported |= SUPPORTED_FIBRE; |
7665 | cmd->port = PORT_FIBRE; | ||
7666 | } | ||
7664 | 7667 | ||
7665 | cmd->advertising = tp->link_config.advertising; | 7668 | cmd->advertising = tp->link_config.advertising; |
7666 | if (netif_running(dev)) { | 7669 | if (netif_running(dev)) { |
7667 | cmd->speed = tp->link_config.active_speed; | 7670 | cmd->speed = tp->link_config.active_speed; |
7668 | cmd->duplex = tp->link_config.active_duplex; | 7671 | cmd->duplex = tp->link_config.active_duplex; |
7669 | } | 7672 | } |
7670 | cmd->port = 0; | ||
7671 | cmd->phy_address = PHY_ADDR; | 7673 | cmd->phy_address = PHY_ADDR; |
7672 | cmd->transceiver = 0; | 7674 | cmd->transceiver = 0; |
7673 | cmd->autoneg = tp->link_config.autoneg; | 7675 | cmd->autoneg = tp->link_config.autoneg; |
diff --git a/drivers/net/tulip/winbond-840.c b/drivers/net/tulip/winbond-840.c index ba05dedf29d3..136a70c4d5e4 100644 --- a/drivers/net/tulip/winbond-840.c +++ b/drivers/net/tulip/winbond-840.c | |||
@@ -850,7 +850,7 @@ static void init_rxtx_rings(struct net_device *dev) | |||
850 | break; | 850 | break; |
851 | skb->dev = dev; /* Mark as being used by this device. */ | 851 | skb->dev = dev; /* Mark as being used by this device. */ |
852 | np->rx_addr[i] = pci_map_single(np->pci_dev,skb->data, | 852 | np->rx_addr[i] = pci_map_single(np->pci_dev,skb->data, |
853 | skb->len,PCI_DMA_FROMDEVICE); | 853 | np->rx_buf_sz,PCI_DMA_FROMDEVICE); |
854 | 854 | ||
855 | np->rx_ring[i].buffer1 = np->rx_addr[i]; | 855 | np->rx_ring[i].buffer1 = np->rx_addr[i]; |
856 | np->rx_ring[i].status = DescOwn; | 856 | np->rx_ring[i].status = DescOwn; |
@@ -1316,7 +1316,7 @@ static int netdev_rx(struct net_device *dev) | |||
1316 | skb->dev = dev; /* Mark as being used by this device. */ | 1316 | skb->dev = dev; /* Mark as being used by this device. */ |
1317 | np->rx_addr[entry] = pci_map_single(np->pci_dev, | 1317 | np->rx_addr[entry] = pci_map_single(np->pci_dev, |
1318 | skb->data, | 1318 | skb->data, |
1319 | skb->len, PCI_DMA_FROMDEVICE); | 1319 | np->rx_buf_sz, PCI_DMA_FROMDEVICE); |
1320 | np->rx_ring[entry].buffer1 = np->rx_addr[entry]; | 1320 | np->rx_ring[entry].buffer1 = np->rx_addr[entry]; |
1321 | } | 1321 | } |
1322 | wmb(); | 1322 | wmb(); |
diff --git a/drivers/net/via-rhine.c b/drivers/net/via-rhine.c index a6dc53b4250d..fdc21037f6dc 100644 --- a/drivers/net/via-rhine.c +++ b/drivers/net/via-rhine.c | |||
@@ -491,8 +491,6 @@ struct rhine_private { | |||
491 | u8 tx_thresh, rx_thresh; | 491 | u8 tx_thresh, rx_thresh; |
492 | 492 | ||
493 | struct mii_if_info mii_if; | 493 | struct mii_if_info mii_if; |
494 | struct work_struct tx_timeout_task; | ||
495 | struct work_struct check_media_task; | ||
496 | void __iomem *base; | 494 | void __iomem *base; |
497 | }; | 495 | }; |
498 | 496 | ||
@@ -500,8 +498,6 @@ static int mdio_read(struct net_device *dev, int phy_id, int location); | |||
500 | static void mdio_write(struct net_device *dev, int phy_id, int location, int value); | 498 | static void mdio_write(struct net_device *dev, int phy_id, int location, int value); |
501 | static int rhine_open(struct net_device *dev); | 499 | static int rhine_open(struct net_device *dev); |
502 | static void rhine_tx_timeout(struct net_device *dev); | 500 | static void rhine_tx_timeout(struct net_device *dev); |
503 | static void rhine_tx_timeout_task(struct net_device *dev); | ||
504 | static void rhine_check_media_task(struct net_device *dev); | ||
505 | static int rhine_start_tx(struct sk_buff *skb, struct net_device *dev); | 501 | static int rhine_start_tx(struct sk_buff *skb, struct net_device *dev); |
506 | static irqreturn_t rhine_interrupt(int irq, void *dev_instance, struct pt_regs *regs); | 502 | static irqreturn_t rhine_interrupt(int irq, void *dev_instance, struct pt_regs *regs); |
507 | static void rhine_tx(struct net_device *dev); | 503 | static void rhine_tx(struct net_device *dev); |
@@ -856,12 +852,6 @@ static int __devinit rhine_init_one(struct pci_dev *pdev, | |||
856 | if (rp->quirks & rqRhineI) | 852 | if (rp->quirks & rqRhineI) |
857 | dev->features |= NETIF_F_SG|NETIF_F_HW_CSUM; | 853 | dev->features |= NETIF_F_SG|NETIF_F_HW_CSUM; |
858 | 854 | ||
859 | INIT_WORK(&rp->tx_timeout_task, | ||
860 | (void (*)(void *))rhine_tx_timeout_task, dev); | ||
861 | |||
862 | INIT_WORK(&rp->check_media_task, | ||
863 | (void (*)(void *))rhine_check_media_task, dev); | ||
864 | |||
865 | /* dev->name not defined before register_netdev()! */ | 855 | /* dev->name not defined before register_netdev()! */ |
866 | rc = register_netdev(dev); | 856 | rc = register_netdev(dev); |
867 | if (rc) | 857 | if (rc) |
@@ -1108,11 +1098,6 @@ static void rhine_set_carrier(struct mii_if_info *mii) | |||
1108 | netif_carrier_ok(mii->dev)); | 1098 | netif_carrier_ok(mii->dev)); |
1109 | } | 1099 | } |
1110 | 1100 | ||
1111 | static void rhine_check_media_task(struct net_device *dev) | ||
1112 | { | ||
1113 | rhine_check_media(dev, 0); | ||
1114 | } | ||
1115 | |||
1116 | static void init_registers(struct net_device *dev) | 1101 | static void init_registers(struct net_device *dev) |
1117 | { | 1102 | { |
1118 | struct rhine_private *rp = netdev_priv(dev); | 1103 | struct rhine_private *rp = netdev_priv(dev); |
@@ -1166,8 +1151,8 @@ static void rhine_disable_linkmon(void __iomem *ioaddr, u32 quirks) | |||
1166 | if (quirks & rqRhineI) { | 1151 | if (quirks & rqRhineI) { |
1167 | iowrite8(0x01, ioaddr + MIIRegAddr); // MII_BMSR | 1152 | iowrite8(0x01, ioaddr + MIIRegAddr); // MII_BMSR |
1168 | 1153 | ||
1169 | /* Do not call from ISR! */ | 1154 | /* Can be called from ISR. Evil. */ |
1170 | msleep(1); | 1155 | mdelay(1); |
1171 | 1156 | ||
1172 | /* 0x80 must be set immediately before turning it off */ | 1157 | /* 0x80 must be set immediately before turning it off */ |
1173 | iowrite8(0x80, ioaddr + MIICmd); | 1158 | iowrite8(0x80, ioaddr + MIICmd); |
@@ -1257,16 +1242,6 @@ static int rhine_open(struct net_device *dev) | |||
1257 | static void rhine_tx_timeout(struct net_device *dev) | 1242 | static void rhine_tx_timeout(struct net_device *dev) |
1258 | { | 1243 | { |
1259 | struct rhine_private *rp = netdev_priv(dev); | 1244 | struct rhine_private *rp = netdev_priv(dev); |
1260 | |||
1261 | /* | ||
1262 | * Move bulk of work outside of interrupt context | ||
1263 | */ | ||
1264 | schedule_work(&rp->tx_timeout_task); | ||
1265 | } | ||
1266 | |||
1267 | static void rhine_tx_timeout_task(struct net_device *dev) | ||
1268 | { | ||
1269 | struct rhine_private *rp = netdev_priv(dev); | ||
1270 | void __iomem *ioaddr = rp->base; | 1245 | void __iomem *ioaddr = rp->base; |
1271 | 1246 | ||
1272 | printk(KERN_WARNING "%s: Transmit timed out, status %4.4x, PHY status " | 1247 | printk(KERN_WARNING "%s: Transmit timed out, status %4.4x, PHY status " |
@@ -1677,7 +1652,7 @@ static void rhine_error(struct net_device *dev, int intr_status) | |||
1677 | spin_lock(&rp->lock); | 1652 | spin_lock(&rp->lock); |
1678 | 1653 | ||
1679 | if (intr_status & IntrLinkChange) | 1654 | if (intr_status & IntrLinkChange) |
1680 | schedule_work(&rp->check_media_task); | 1655 | rhine_check_media(dev, 0); |
1681 | if (intr_status & IntrStatsMax) { | 1656 | if (intr_status & IntrStatsMax) { |
1682 | rp->stats.rx_crc_errors += ioread16(ioaddr + RxCRCErrs); | 1657 | rp->stats.rx_crc_errors += ioread16(ioaddr + RxCRCErrs); |
1683 | rp->stats.rx_missed_errors += ioread16(ioaddr + RxMissed); | 1658 | rp->stats.rx_missed_errors += ioread16(ioaddr + RxMissed); |
@@ -1927,9 +1902,6 @@ static int rhine_close(struct net_device *dev) | |||
1927 | spin_unlock_irq(&rp->lock); | 1902 | spin_unlock_irq(&rp->lock); |
1928 | 1903 | ||
1929 | free_irq(rp->pdev->irq, dev); | 1904 | free_irq(rp->pdev->irq, dev); |
1930 | |||
1931 | flush_scheduled_work(); | ||
1932 | |||
1933 | free_rbufs(dev); | 1905 | free_rbufs(dev); |
1934 | free_tbufs(dev); | 1906 | free_tbufs(dev); |
1935 | free_ring(dev); | 1907 | free_ring(dev); |
diff --git a/drivers/net/wireless/bcm43xx/bcm43xx_main.c b/drivers/net/wireless/bcm43xx/bcm43xx_main.c index e2982a83ae42..7ed18cad29f7 100644 --- a/drivers/net/wireless/bcm43xx/bcm43xx_main.c +++ b/drivers/net/wireless/bcm43xx/bcm43xx_main.c | |||
@@ -3271,6 +3271,9 @@ static int bcm43xx_init_board(struct bcm43xx_private *bcm) | |||
3271 | bcm43xx_sysfs_register(bcm); | 3271 | bcm43xx_sysfs_register(bcm); |
3272 | //FIXME: check for bcm43xx_sysfs_register failure. This function is a bit messy regarding unwinding, though... | 3272 | //FIXME: check for bcm43xx_sysfs_register failure. This function is a bit messy regarding unwinding, though... |
3273 | 3273 | ||
3274 | /*FIXME: This should be handled by softmac instead. */ | ||
3275 | schedule_work(&bcm->softmac->associnfo.work); | ||
3276 | |||
3274 | assert(err == 0); | 3277 | assert(err == 0); |
3275 | out: | 3278 | out: |
3276 | return err; | 3279 | return err; |
@@ -3946,9 +3949,6 @@ static int bcm43xx_resume(struct pci_dev *pdev) | |||
3946 | 3949 | ||
3947 | netif_device_attach(net_dev); | 3950 | netif_device_attach(net_dev); |
3948 | 3951 | ||
3949 | /*FIXME: This should be handled by softmac instead. */ | ||
3950 | schedule_work(&bcm->softmac->associnfo.work); | ||
3951 | |||
3952 | dprintk(KERN_INFO PFX "Device resumed.\n"); | 3952 | dprintk(KERN_INFO PFX "Device resumed.\n"); |
3953 | 3953 | ||
3954 | return 0; | 3954 | return 0; |
diff --git a/drivers/pci/pci-acpi.c b/drivers/pci/pci-acpi.c index 6917c6cb0912..c2ecae5ff0c1 100644 --- a/drivers/pci/pci-acpi.c +++ b/drivers/pci/pci-acpi.c | |||
@@ -33,13 +33,10 @@ acpi_query_osc ( | |||
33 | acpi_status status; | 33 | acpi_status status; |
34 | struct acpi_object_list input; | 34 | struct acpi_object_list input; |
35 | union acpi_object in_params[4]; | 35 | union acpi_object in_params[4]; |
36 | struct acpi_buffer output; | 36 | struct acpi_buffer output = {ACPI_ALLOCATE_BUFFER, NULL}; |
37 | union acpi_object out_obj; | 37 | union acpi_object *out_obj; |
38 | u32 osc_dw0; | 38 | u32 osc_dw0; |
39 | 39 | ||
40 | /* Setting up output buffer */ | ||
41 | output.length = sizeof(out_obj) + 3*sizeof(u32); | ||
42 | output.pointer = &out_obj; | ||
43 | 40 | ||
44 | /* Setting up input parameters */ | 41 | /* Setting up input parameters */ |
45 | input.count = 4; | 42 | input.count = 4; |
@@ -61,12 +58,15 @@ acpi_query_osc ( | |||
61 | "Evaluate _OSC Set fails. Status = 0x%04x\n", status); | 58 | "Evaluate _OSC Set fails. Status = 0x%04x\n", status); |
62 | return status; | 59 | return status; |
63 | } | 60 | } |
64 | if (out_obj.type != ACPI_TYPE_BUFFER) { | 61 | out_obj = output.pointer; |
62 | |||
63 | if (out_obj->type != ACPI_TYPE_BUFFER) { | ||
65 | printk(KERN_DEBUG | 64 | printk(KERN_DEBUG |
66 | "Evaluate _OSC returns wrong type\n"); | 65 | "Evaluate _OSC returns wrong type\n"); |
67 | return AE_TYPE; | 66 | status = AE_TYPE; |
67 | goto query_osc_out; | ||
68 | } | 68 | } |
69 | osc_dw0 = *((u32 *) out_obj.buffer.pointer); | 69 | osc_dw0 = *((u32 *) out_obj->buffer.pointer); |
70 | if (osc_dw0) { | 70 | if (osc_dw0) { |
71 | if (osc_dw0 & OSC_REQUEST_ERROR) | 71 | if (osc_dw0 & OSC_REQUEST_ERROR) |
72 | printk(KERN_DEBUG "_OSC request fails\n"); | 72 | printk(KERN_DEBUG "_OSC request fails\n"); |
@@ -76,15 +76,21 @@ acpi_query_osc ( | |||
76 | printk(KERN_DEBUG "_OSC invalid revision\n"); | 76 | printk(KERN_DEBUG "_OSC invalid revision\n"); |
77 | if (osc_dw0 & OSC_CAPABILITIES_MASK_ERROR) { | 77 | if (osc_dw0 & OSC_CAPABILITIES_MASK_ERROR) { |
78 | /* Update Global Control Set */ | 78 | /* Update Global Control Set */ |
79 | global_ctrlsets = *((u32 *)(out_obj.buffer.pointer+8)); | 79 | global_ctrlsets = *((u32 *)(out_obj->buffer.pointer+8)); |
80 | return AE_OK; | 80 | status = AE_OK; |
81 | goto query_osc_out; | ||
81 | } | 82 | } |
82 | return AE_ERROR; | 83 | status = AE_ERROR; |
84 | goto query_osc_out; | ||
83 | } | 85 | } |
84 | 86 | ||
85 | /* Update Global Control Set */ | 87 | /* Update Global Control Set */ |
86 | global_ctrlsets = *((u32 *)(out_obj.buffer.pointer + 8)); | 88 | global_ctrlsets = *((u32 *)(out_obj->buffer.pointer + 8)); |
87 | return AE_OK; | 89 | status = AE_OK; |
90 | |||
91 | query_osc_out: | ||
92 | kfree(output.pointer); | ||
93 | return status; | ||
88 | } | 94 | } |
89 | 95 | ||
90 | 96 | ||
@@ -96,14 +102,10 @@ acpi_run_osc ( | |||
96 | acpi_status status; | 102 | acpi_status status; |
97 | struct acpi_object_list input; | 103 | struct acpi_object_list input; |
98 | union acpi_object in_params[4]; | 104 | union acpi_object in_params[4]; |
99 | struct acpi_buffer output; | 105 | struct acpi_buffer output = {ACPI_ALLOCATE_BUFFER, NULL}; |
100 | union acpi_object out_obj; | 106 | union acpi_object *out_obj; |
101 | u32 osc_dw0; | 107 | u32 osc_dw0; |
102 | 108 | ||
103 | /* Setting up output buffer */ | ||
104 | output.length = sizeof(out_obj) + 3*sizeof(u32); | ||
105 | output.pointer = &out_obj; | ||
106 | |||
107 | /* Setting up input parameters */ | 109 | /* Setting up input parameters */ |
108 | input.count = 4; | 110 | input.count = 4; |
109 | input.pointer = in_params; | 111 | input.pointer = in_params; |
@@ -124,12 +126,14 @@ acpi_run_osc ( | |||
124 | "Evaluate _OSC Set fails. Status = 0x%04x\n", status); | 126 | "Evaluate _OSC Set fails. Status = 0x%04x\n", status); |
125 | return status; | 127 | return status; |
126 | } | 128 | } |
127 | if (out_obj.type != ACPI_TYPE_BUFFER) { | 129 | out_obj = output.pointer; |
130 | if (out_obj->type != ACPI_TYPE_BUFFER) { | ||
128 | printk(KERN_DEBUG | 131 | printk(KERN_DEBUG |
129 | "Evaluate _OSC returns wrong type\n"); | 132 | "Evaluate _OSC returns wrong type\n"); |
130 | return AE_TYPE; | 133 | status = AE_TYPE; |
134 | goto run_osc_out; | ||
131 | } | 135 | } |
132 | osc_dw0 = *((u32 *) out_obj.buffer.pointer); | 136 | osc_dw0 = *((u32 *) out_obj->buffer.pointer); |
133 | if (osc_dw0) { | 137 | if (osc_dw0) { |
134 | if (osc_dw0 & OSC_REQUEST_ERROR) | 138 | if (osc_dw0 & OSC_REQUEST_ERROR) |
135 | printk(KERN_DEBUG "_OSC request fails\n"); | 139 | printk(KERN_DEBUG "_OSC request fails\n"); |
@@ -139,11 +143,17 @@ acpi_run_osc ( | |||
139 | printk(KERN_DEBUG "_OSC invalid revision\n"); | 143 | printk(KERN_DEBUG "_OSC invalid revision\n"); |
140 | if (osc_dw0 & OSC_CAPABILITIES_MASK_ERROR) { | 144 | if (osc_dw0 & OSC_CAPABILITIES_MASK_ERROR) { |
141 | printk(KERN_DEBUG "_OSC FW not grant req. control\n"); | 145 | printk(KERN_DEBUG "_OSC FW not grant req. control\n"); |
142 | return AE_SUPPORT; | 146 | status = AE_SUPPORT; |
147 | goto run_osc_out; | ||
143 | } | 148 | } |
144 | return AE_ERROR; | 149 | status = AE_ERROR; |
150 | goto run_osc_out; | ||
145 | } | 151 | } |
146 | return AE_OK; | 152 | status = AE_OK; |
153 | |||
154 | run_osc_out: | ||
155 | kfree(output.pointer); | ||
156 | return status; | ||
147 | } | 157 | } |
148 | 158 | ||
149 | /** | 159 | /** |
diff --git a/drivers/pci/quirks.c b/drivers/pci/quirks.c index 19e2b174d33c..d378478612fb 100644 --- a/drivers/pci/quirks.c +++ b/drivers/pci/quirks.c | |||
@@ -634,6 +634,9 @@ DECLARE_PCI_FIXUP_HEADER(PCI_VENDOR_ID_VIA, PCI_DEVICE_ID_VIA_82C686_4, quirk_vi | |||
634 | * non-x86 architectures (yes Via exists on PPC among other places), | 634 | * non-x86 architectures (yes Via exists on PPC among other places), |
635 | * we must mask the PCI_INTERRUPT_LINE value versus 0xf to get | 635 | * we must mask the PCI_INTERRUPT_LINE value versus 0xf to get |
636 | * interrupts delivered properly. | 636 | * interrupts delivered properly. |
637 | * | ||
638 | * Some of the on-chip devices are actually '586 devices' so they are | ||
639 | * listed here. | ||
637 | */ | 640 | */ |
638 | static void quirk_via_irq(struct pci_dev *dev) | 641 | static void quirk_via_irq(struct pci_dev *dev) |
639 | { | 642 | { |
@@ -648,6 +651,10 @@ static void quirk_via_irq(struct pci_dev *dev) | |||
648 | pci_write_config_byte(dev, PCI_INTERRUPT_LINE, new_irq); | 651 | pci_write_config_byte(dev, PCI_INTERRUPT_LINE, new_irq); |
649 | } | 652 | } |
650 | } | 653 | } |
654 | DECLARE_PCI_FIXUP_ENABLE(PCI_VENDOR_ID_VIA, PCI_DEVICE_ID_VIA_82C586_0, quirk_via_irq); | ||
655 | DECLARE_PCI_FIXUP_ENABLE(PCI_VENDOR_ID_VIA, PCI_DEVICE_ID_VIA_82C586_1, quirk_via_irq); | ||
656 | DECLARE_PCI_FIXUP_ENABLE(PCI_VENDOR_ID_VIA, PCI_DEVICE_ID_VIA_82C586_2, quirk_via_irq); | ||
657 | DECLARE_PCI_FIXUP_ENABLE(PCI_VENDOR_ID_VIA, PCI_DEVICE_ID_VIA_82C586_3, quirk_via_irq); | ||
651 | DECLARE_PCI_FIXUP_ENABLE(PCI_VENDOR_ID_VIA, PCI_DEVICE_ID_VIA_82C686, quirk_via_irq); | 658 | DECLARE_PCI_FIXUP_ENABLE(PCI_VENDOR_ID_VIA, PCI_DEVICE_ID_VIA_82C686, quirk_via_irq); |
652 | DECLARE_PCI_FIXUP_ENABLE(PCI_VENDOR_ID_VIA, PCI_DEVICE_ID_VIA_82C686_4, quirk_via_irq); | 659 | DECLARE_PCI_FIXUP_ENABLE(PCI_VENDOR_ID_VIA, PCI_DEVICE_ID_VIA_82C686_4, quirk_via_irq); |
653 | DECLARE_PCI_FIXUP_ENABLE(PCI_VENDOR_ID_VIA, PCI_DEVICE_ID_VIA_82C686_5, quirk_via_irq); | 660 | DECLARE_PCI_FIXUP_ENABLE(PCI_VENDOR_ID_VIA, PCI_DEVICE_ID_VIA_82C686_5, quirk_via_irq); |
@@ -895,6 +902,7 @@ static void __init k8t_sound_hostbridge(struct pci_dev *dev) | |||
895 | } | 902 | } |
896 | DECLARE_PCI_FIXUP_HEADER(PCI_VENDOR_ID_VIA, PCI_DEVICE_ID_VIA_8237, k8t_sound_hostbridge); | 903 | DECLARE_PCI_FIXUP_HEADER(PCI_VENDOR_ID_VIA, PCI_DEVICE_ID_VIA_8237, k8t_sound_hostbridge); |
897 | 904 | ||
905 | #ifndef CONFIG_ACPI_SLEEP | ||
898 | /* | 906 | /* |
899 | * On ASUS P4B boards, the SMBus PCI Device within the ICH2/4 southbridge | 907 | * On ASUS P4B boards, the SMBus PCI Device within the ICH2/4 southbridge |
900 | * is not activated. The myth is that Asus said that they do not want the | 908 | * is not activated. The myth is that Asus said that they do not want the |
@@ -906,8 +914,12 @@ DECLARE_PCI_FIXUP_HEADER(PCI_VENDOR_ID_VIA, PCI_DEVICE_ID_VIA_8237, k8t_sound_ho | |||
906 | * bridge. Unfortunately, this device has no subvendor/subdevice ID. So it | 914 | * bridge. Unfortunately, this device has no subvendor/subdevice ID. So it |
907 | * becomes necessary to do this tweak in two steps -- I've chosen the Host | 915 | * becomes necessary to do this tweak in two steps -- I've chosen the Host |
908 | * bridge as trigger. | 916 | * bridge as trigger. |
917 | * | ||
918 | * Actually, leaving it unhidden and not redoing the quirk over suspend2ram | ||
919 | * will cause thermal management to break down, and causing machine to | ||
920 | * overheat. | ||
909 | */ | 921 | */ |
910 | static int __initdata asus_hides_smbus = 0; | 922 | static int __initdata asus_hides_smbus; |
911 | 923 | ||
912 | static void __init asus_hides_smbus_hostbridge(struct pci_dev *dev) | 924 | static void __init asus_hides_smbus_hostbridge(struct pci_dev *dev) |
913 | { | 925 | { |
@@ -1050,6 +1062,8 @@ static void __init asus_hides_smbus_lpc_ich6(struct pci_dev *dev) | |||
1050 | } | 1062 | } |
1051 | DECLARE_PCI_FIXUP_HEADER(PCI_VENDOR_ID_INTEL, PCI_DEVICE_ID_INTEL_ICH6_1, asus_hides_smbus_lpc_ich6 ); | 1063 | DECLARE_PCI_FIXUP_HEADER(PCI_VENDOR_ID_INTEL, PCI_DEVICE_ID_INTEL_ICH6_1, asus_hides_smbus_lpc_ich6 ); |
1052 | 1064 | ||
1065 | #endif | ||
1066 | |||
1053 | /* | 1067 | /* |
1054 | * SiS 96x south bridge: BIOS typically hides SMBus device... | 1068 | * SiS 96x south bridge: BIOS typically hides SMBus device... |
1055 | */ | 1069 | */ |
diff --git a/drivers/pcmcia/pcmcia_ioctl.c b/drivers/pcmcia/pcmcia_ioctl.c index c53db7ceda5e..738b1ef595a3 100644 --- a/drivers/pcmcia/pcmcia_ioctl.c +++ b/drivers/pcmcia/pcmcia_ioctl.c | |||
@@ -426,7 +426,7 @@ static int ds_open(struct inode *inode, struct file *file) | |||
426 | 426 | ||
427 | if (!warning_printed) { | 427 | if (!warning_printed) { |
428 | printk(KERN_INFO "pcmcia: Detected deprecated PCMCIA ioctl " | 428 | printk(KERN_INFO "pcmcia: Detected deprecated PCMCIA ioctl " |
429 | "usage.\n"); | 429 | "usage from process: %s.\n", current->comm); |
430 | printk(KERN_INFO "pcmcia: This interface will soon be removed from " | 430 | printk(KERN_INFO "pcmcia: This interface will soon be removed from " |
431 | "the kernel; please expect breakage unless you upgrade " | 431 | "the kernel; please expect breakage unless you upgrade " |
432 | "to new tools.\n"); | 432 | "to new tools.\n"); |
@@ -601,8 +601,12 @@ static int ds_ioctl(struct inode * inode, struct file * file, | |||
601 | ret = CS_BAD_ARGS; | 601 | ret = CS_BAD_ARGS; |
602 | else { | 602 | else { |
603 | struct pcmcia_device *p_dev = get_pcmcia_device(s, buf->config.Function); | 603 | struct pcmcia_device *p_dev = get_pcmcia_device(s, buf->config.Function); |
604 | ret = pccard_get_configuration_info(s, p_dev, &buf->config); | 604 | if (p_dev == NULL) |
605 | pcmcia_put_dev(p_dev); | 605 | ret = CS_BAD_ARGS; |
606 | else { | ||
607 | ret = pccard_get_configuration_info(s, p_dev, &buf->config); | ||
608 | pcmcia_put_dev(p_dev); | ||
609 | } | ||
606 | } | 610 | } |
607 | break; | 611 | break; |
608 | case DS_GET_FIRST_TUPLE: | 612 | case DS_GET_FIRST_TUPLE: |
@@ -632,8 +636,12 @@ static int ds_ioctl(struct inode * inode, struct file * file, | |||
632 | ret = CS_BAD_ARGS; | 636 | ret = CS_BAD_ARGS; |
633 | else { | 637 | else { |
634 | struct pcmcia_device *p_dev = get_pcmcia_device(s, buf->status.Function); | 638 | struct pcmcia_device *p_dev = get_pcmcia_device(s, buf->status.Function); |
635 | ret = pccard_get_status(s, p_dev, &buf->status); | 639 | if (p_dev == NULL) |
636 | pcmcia_put_dev(p_dev); | 640 | ret = CS_BAD_ARGS; |
641 | else { | ||
642 | ret = pccard_get_status(s, p_dev, &buf->status); | ||
643 | pcmcia_put_dev(p_dev); | ||
644 | } | ||
637 | } | 645 | } |
638 | break; | 646 | break; |
639 | case DS_VALIDATE_CIS: | 647 | case DS_VALIDATE_CIS: |
@@ -665,9 +673,10 @@ static int ds_ioctl(struct inode * inode, struct file * file, | |||
665 | if (!(buf->conf_reg.Function && | 673 | if (!(buf->conf_reg.Function && |
666 | (buf->conf_reg.Function >= s->functions))) { | 674 | (buf->conf_reg.Function >= s->functions))) { |
667 | struct pcmcia_device *p_dev = get_pcmcia_device(s, buf->conf_reg.Function); | 675 | struct pcmcia_device *p_dev = get_pcmcia_device(s, buf->conf_reg.Function); |
668 | if (p_dev) | 676 | if (p_dev) { |
669 | ret = pcmcia_access_configuration_register(p_dev, &buf->conf_reg); | 677 | ret = pcmcia_access_configuration_register(p_dev, &buf->conf_reg); |
670 | pcmcia_put_dev(p_dev); | 678 | pcmcia_put_dev(p_dev); |
679 | } | ||
671 | } | 680 | } |
672 | break; | 681 | break; |
673 | case DS_GET_FIRST_REGION: | 682 | case DS_GET_FIRST_REGION: |
diff --git a/drivers/pcmcia/pd6729.c b/drivers/pcmcia/pd6729.c index 16d1ea7b0a18..247ab837f841 100644 --- a/drivers/pcmcia/pd6729.c +++ b/drivers/pcmcia/pd6729.c | |||
@@ -589,7 +589,7 @@ static int pd6729_check_irq(int irq, int flags) | |||
589 | return 0; | 589 | return 0; |
590 | } | 590 | } |
591 | 591 | ||
592 | static u_int __init pd6729_isa_scan(void) | 592 | static u_int __devinit pd6729_isa_scan(void) |
593 | { | 593 | { |
594 | u_int mask0, mask = 0; | 594 | u_int mask0, mask = 0; |
595 | int i; | 595 | int i; |
diff --git a/drivers/rtc/rtc-dev.c b/drivers/rtc/rtc-dev.c index 6c9ad92747fd..2011567005f9 100644 --- a/drivers/rtc/rtc-dev.c +++ b/drivers/rtc/rtc-dev.c | |||
@@ -141,13 +141,13 @@ static int rtc_dev_ioctl(struct inode *inode, struct file *file, | |||
141 | /* try the driver's ioctl interface */ | 141 | /* try the driver's ioctl interface */ |
142 | if (ops->ioctl) { | 142 | if (ops->ioctl) { |
143 | err = ops->ioctl(class_dev->dev, cmd, arg); | 143 | err = ops->ioctl(class_dev->dev, cmd, arg); |
144 | if (err != -EINVAL) | 144 | if (err != -ENOIOCTLCMD) |
145 | return err; | 145 | return err; |
146 | } | 146 | } |
147 | 147 | ||
148 | /* if the driver does not provide the ioctl interface | 148 | /* if the driver does not provide the ioctl interface |
149 | * or if that particular ioctl was not implemented | 149 | * or if that particular ioctl was not implemented |
150 | * (-EINVAL), we will try to emulate here. | 150 | * (-ENOIOCTLCMD), we will try to emulate here. |
151 | */ | 151 | */ |
152 | 152 | ||
153 | switch (cmd) { | 153 | switch (cmd) { |
@@ -233,7 +233,7 @@ static int rtc_dev_ioctl(struct inode *inode, struct file *file, | |||
233 | break; | 233 | break; |
234 | 234 | ||
235 | default: | 235 | default: |
236 | err = -EINVAL; | 236 | err = -ENOTTY; |
237 | break; | 237 | break; |
238 | } | 238 | } |
239 | 239 | ||
diff --git a/drivers/rtc/rtc-sa1100.c b/drivers/rtc/rtc-sa1100.c index 2bc8aad47219..a997529f8926 100644 --- a/drivers/rtc/rtc-sa1100.c +++ b/drivers/rtc/rtc-sa1100.c | |||
@@ -247,7 +247,7 @@ static int sa1100_rtc_ioctl(struct device *dev, unsigned int cmd, | |||
247 | rtc_freq = arg; | 247 | rtc_freq = arg; |
248 | return 0; | 248 | return 0; |
249 | } | 249 | } |
250 | return -EINVAL; | 250 | return -ENOIOCTLCMD; |
251 | } | 251 | } |
252 | 252 | ||
253 | static int sa1100_rtc_read_time(struct device *dev, struct rtc_time *tm) | 253 | static int sa1100_rtc_read_time(struct device *dev, struct rtc_time *tm) |
diff --git a/drivers/rtc/rtc-test.c b/drivers/rtc/rtc-test.c index e1f7e8e86daf..e1fa5fe7901f 100644 --- a/drivers/rtc/rtc-test.c +++ b/drivers/rtc/rtc-test.c | |||
@@ -71,7 +71,7 @@ static int test_rtc_ioctl(struct device *dev, unsigned int cmd, | |||
71 | return 0; | 71 | return 0; |
72 | 72 | ||
73 | default: | 73 | default: |
74 | return -EINVAL; | 74 | return -ENOIOCTLCMD; |
75 | } | 75 | } |
76 | } | 76 | } |
77 | 77 | ||
diff --git a/drivers/rtc/rtc-vr41xx.c b/drivers/rtc/rtc-vr41xx.c index 4d49fd501198..277596c302e3 100644 --- a/drivers/rtc/rtc-vr41xx.c +++ b/drivers/rtc/rtc-vr41xx.c | |||
@@ -270,7 +270,7 @@ static int vr41xx_rtc_ioctl(struct device *dev, unsigned int cmd, unsigned long | |||
270 | epoch = arg; | 270 | epoch = arg; |
271 | break; | 271 | break; |
272 | default: | 272 | default: |
273 | return -EINVAL; | 273 | return -ENOIOCTLCMD; |
274 | } | 274 | } |
275 | 275 | ||
276 | return 0; | 276 | return 0; |
diff --git a/drivers/s390/net/lcs.c b/drivers/s390/net/lcs.c index 5d6b7a57b02f..e65da921a827 100644 --- a/drivers/s390/net/lcs.c +++ b/drivers/s390/net/lcs.c | |||
@@ -1348,7 +1348,7 @@ lcs_irq(struct ccw_device *cdev, unsigned long intparm, struct irb *irb) | |||
1348 | index = (struct ccw1 *) __va((addr_t) irb->scsw.cpa) | 1348 | index = (struct ccw1 *) __va((addr_t) irb->scsw.cpa) |
1349 | - channel->ccws; | 1349 | - channel->ccws; |
1350 | if ((irb->scsw.actl & SCSW_ACTL_SUSPENDED) || | 1350 | if ((irb->scsw.actl & SCSW_ACTL_SUSPENDED) || |
1351 | (irb->scsw.cstat | SCHN_STAT_PCI)) | 1351 | (irb->scsw.cstat & SCHN_STAT_PCI)) |
1352 | /* Bloody io subsystem tells us lies about cpa... */ | 1352 | /* Bloody io subsystem tells us lies about cpa... */ |
1353 | index = (index - 1) & (LCS_NUM_BUFFS - 1); | 1353 | index = (index - 1) & (LCS_NUM_BUFFS - 1); |
1354 | while (channel->io_idx != index) { | 1354 | while (channel->io_idx != index) { |
diff --git a/drivers/scsi/libata-core.c b/drivers/scsi/libata-core.c index bd147207f25d..823dfa78c0ba 100644 --- a/drivers/scsi/libata-core.c +++ b/drivers/scsi/libata-core.c | |||
@@ -864,6 +864,9 @@ static unsigned int ata_id_xfermask(const u16 *id) | |||
864 | /** | 864 | /** |
865 | * ata_port_queue_task - Queue port_task | 865 | * ata_port_queue_task - Queue port_task |
866 | * @ap: The ata_port to queue port_task for | 866 | * @ap: The ata_port to queue port_task for |
867 | * @fn: workqueue function to be scheduled | ||
868 | * @data: data value to pass to workqueue function | ||
869 | * @delay: delay time for workqueue function | ||
867 | * | 870 | * |
868 | * Schedule @fn(@data) for execution after @delay jiffies using | 871 | * Schedule @fn(@data) for execution after @delay jiffies using |
869 | * port_task. There is one port_task per port and it's the | 872 | * port_task. There is one port_task per port and it's the |
@@ -2739,6 +2742,8 @@ static unsigned int ata_dev_set_xfermode(struct ata_port *ap, | |||
2739 | * ata_dev_init_params - Issue INIT DEV PARAMS command | 2742 | * ata_dev_init_params - Issue INIT DEV PARAMS command |
2740 | * @ap: Port associated with device @dev | 2743 | * @ap: Port associated with device @dev |
2741 | * @dev: Device to which command will be sent | 2744 | * @dev: Device to which command will be sent |
2745 | * @heads: Number of heads (taskfile parameter) | ||
2746 | * @sectors: Number of sectors (taskfile parameter) | ||
2742 | * | 2747 | * |
2743 | * LOCKING: | 2748 | * LOCKING: |
2744 | * Kernel thread context (may sleep) | 2749 | * Kernel thread context (may sleep) |
@@ -4302,6 +4307,7 @@ int ata_device_resume(struct ata_port *ap, struct ata_device *dev) | |||
4302 | * ata_device_suspend - prepare a device for suspend | 4307 | * ata_device_suspend - prepare a device for suspend |
4303 | * @ap: port the device is connected to | 4308 | * @ap: port the device is connected to |
4304 | * @dev: the device to suspend | 4309 | * @dev: the device to suspend |
4310 | * @state: target power management state | ||
4305 | * | 4311 | * |
4306 | * Flush the cache on the drive, if appropriate, then issue a | 4312 | * Flush the cache on the drive, if appropriate, then issue a |
4307 | * standbynow command. | 4313 | * standbynow command. |
diff --git a/drivers/scsi/sata_mv.c b/drivers/scsi/sata_mv.c index d5fdcb9a8842..9b8bca1ac1f0 100644 --- a/drivers/scsi/sata_mv.c +++ b/drivers/scsi/sata_mv.c | |||
@@ -37,7 +37,7 @@ | |||
37 | #include <asm/io.h> | 37 | #include <asm/io.h> |
38 | 38 | ||
39 | #define DRV_NAME "sata_mv" | 39 | #define DRV_NAME "sata_mv" |
40 | #define DRV_VERSION "0.6" | 40 | #define DRV_VERSION "0.7" |
41 | 41 | ||
42 | enum { | 42 | enum { |
43 | /* BAR's are enumerated in terms of pci_resource_start() terms */ | 43 | /* BAR's are enumerated in terms of pci_resource_start() terms */ |
@@ -50,6 +50,12 @@ enum { | |||
50 | 50 | ||
51 | MV_PCI_REG_BASE = 0, | 51 | MV_PCI_REG_BASE = 0, |
52 | MV_IRQ_COAL_REG_BASE = 0x18000, /* 6xxx part only */ | 52 | MV_IRQ_COAL_REG_BASE = 0x18000, /* 6xxx part only */ |
53 | MV_IRQ_COAL_CAUSE = (MV_IRQ_COAL_REG_BASE + 0x08), | ||
54 | MV_IRQ_COAL_CAUSE_LO = (MV_IRQ_COAL_REG_BASE + 0x88), | ||
55 | MV_IRQ_COAL_CAUSE_HI = (MV_IRQ_COAL_REG_BASE + 0x8c), | ||
56 | MV_IRQ_COAL_THRESHOLD = (MV_IRQ_COAL_REG_BASE + 0xcc), | ||
57 | MV_IRQ_COAL_TIME_THRESHOLD = (MV_IRQ_COAL_REG_BASE + 0xd0), | ||
58 | |||
53 | MV_SATAHC0_REG_BASE = 0x20000, | 59 | MV_SATAHC0_REG_BASE = 0x20000, |
54 | MV_FLASH_CTL = 0x1046c, | 60 | MV_FLASH_CTL = 0x1046c, |
55 | MV_GPIO_PORT_CTL = 0x104f0, | 61 | MV_GPIO_PORT_CTL = 0x104f0, |
@@ -302,9 +308,6 @@ struct mv_port_priv { | |||
302 | dma_addr_t crpb_dma; | 308 | dma_addr_t crpb_dma; |
303 | struct mv_sg *sg_tbl; | 309 | struct mv_sg *sg_tbl; |
304 | dma_addr_t sg_tbl_dma; | 310 | dma_addr_t sg_tbl_dma; |
305 | |||
306 | unsigned req_producer; /* cp of req_in_ptr */ | ||
307 | unsigned rsp_consumer; /* cp of rsp_out_ptr */ | ||
308 | u32 pp_flags; | 311 | u32 pp_flags; |
309 | }; | 312 | }; |
310 | 313 | ||
@@ -937,8 +940,6 @@ static int mv_port_start(struct ata_port *ap) | |||
937 | writelfl(pp->crpb_dma & EDMA_RSP_Q_BASE_LO_MASK, | 940 | writelfl(pp->crpb_dma & EDMA_RSP_Q_BASE_LO_MASK, |
938 | port_mmio + EDMA_RSP_Q_OUT_PTR_OFS); | 941 | port_mmio + EDMA_RSP_Q_OUT_PTR_OFS); |
939 | 942 | ||
940 | pp->req_producer = pp->rsp_consumer = 0; | ||
941 | |||
942 | /* Don't turn on EDMA here...do it before DMA commands only. Else | 943 | /* Don't turn on EDMA here...do it before DMA commands only. Else |
943 | * we'll be unable to send non-data, PIO, etc due to restricted access | 944 | * we'll be unable to send non-data, PIO, etc due to restricted access |
944 | * to shadow regs. | 945 | * to shadow regs. |
@@ -1022,16 +1023,16 @@ static void mv_fill_sg(struct ata_queued_cmd *qc) | |||
1022 | } | 1023 | } |
1023 | } | 1024 | } |
1024 | 1025 | ||
1025 | static inline unsigned mv_inc_q_index(unsigned *index) | 1026 | static inline unsigned mv_inc_q_index(unsigned index) |
1026 | { | 1027 | { |
1027 | *index = (*index + 1) & MV_MAX_Q_DEPTH_MASK; | 1028 | return (index + 1) & MV_MAX_Q_DEPTH_MASK; |
1028 | return *index; | ||
1029 | } | 1029 | } |
1030 | 1030 | ||
1031 | static inline void mv_crqb_pack_cmd(u16 *cmdw, u8 data, u8 addr, unsigned last) | 1031 | static inline void mv_crqb_pack_cmd(u16 *cmdw, u8 data, u8 addr, unsigned last) |
1032 | { | 1032 | { |
1033 | *cmdw = data | (addr << CRQB_CMD_ADDR_SHIFT) | CRQB_CMD_CS | | 1033 | u16 tmp = data | (addr << CRQB_CMD_ADDR_SHIFT) | CRQB_CMD_CS | |
1034 | (last ? CRQB_CMD_LAST : 0); | 1034 | (last ? CRQB_CMD_LAST : 0); |
1035 | *cmdw = cpu_to_le16(tmp); | ||
1035 | } | 1036 | } |
1036 | 1037 | ||
1037 | /** | 1038 | /** |
@@ -1053,15 +1054,11 @@ static void mv_qc_prep(struct ata_queued_cmd *qc) | |||
1053 | u16 *cw; | 1054 | u16 *cw; |
1054 | struct ata_taskfile *tf; | 1055 | struct ata_taskfile *tf; |
1055 | u16 flags = 0; | 1056 | u16 flags = 0; |
1057 | unsigned in_index; | ||
1056 | 1058 | ||
1057 | if (ATA_PROT_DMA != qc->tf.protocol) | 1059 | if (ATA_PROT_DMA != qc->tf.protocol) |
1058 | return; | 1060 | return; |
1059 | 1061 | ||
1060 | /* the req producer index should be the same as we remember it */ | ||
1061 | WARN_ON(((readl(mv_ap_base(qc->ap) + EDMA_REQ_Q_IN_PTR_OFS) >> | ||
1062 | EDMA_REQ_Q_PTR_SHIFT) & MV_MAX_Q_DEPTH_MASK) != | ||
1063 | pp->req_producer); | ||
1064 | |||
1065 | /* Fill in command request block | 1062 | /* Fill in command request block |
1066 | */ | 1063 | */ |
1067 | if (!(qc->tf.flags & ATA_TFLAG_WRITE)) | 1064 | if (!(qc->tf.flags & ATA_TFLAG_WRITE)) |
@@ -1069,13 +1066,17 @@ static void mv_qc_prep(struct ata_queued_cmd *qc) | |||
1069 | WARN_ON(MV_MAX_Q_DEPTH <= qc->tag); | 1066 | WARN_ON(MV_MAX_Q_DEPTH <= qc->tag); |
1070 | flags |= qc->tag << CRQB_TAG_SHIFT; | 1067 | flags |= qc->tag << CRQB_TAG_SHIFT; |
1071 | 1068 | ||
1072 | pp->crqb[pp->req_producer].sg_addr = | 1069 | /* get current queue index from hardware */ |
1070 | in_index = (readl(mv_ap_base(ap) + EDMA_REQ_Q_IN_PTR_OFS) | ||
1071 | >> EDMA_REQ_Q_PTR_SHIFT) & MV_MAX_Q_DEPTH_MASK; | ||
1072 | |||
1073 | pp->crqb[in_index].sg_addr = | ||
1073 | cpu_to_le32(pp->sg_tbl_dma & 0xffffffff); | 1074 | cpu_to_le32(pp->sg_tbl_dma & 0xffffffff); |
1074 | pp->crqb[pp->req_producer].sg_addr_hi = | 1075 | pp->crqb[in_index].sg_addr_hi = |
1075 | cpu_to_le32((pp->sg_tbl_dma >> 16) >> 16); | 1076 | cpu_to_le32((pp->sg_tbl_dma >> 16) >> 16); |
1076 | pp->crqb[pp->req_producer].ctrl_flags = cpu_to_le16(flags); | 1077 | pp->crqb[in_index].ctrl_flags = cpu_to_le16(flags); |
1077 | 1078 | ||
1078 | cw = &pp->crqb[pp->req_producer].ata_cmd[0]; | 1079 | cw = &pp->crqb[in_index].ata_cmd[0]; |
1079 | tf = &qc->tf; | 1080 | tf = &qc->tf; |
1080 | 1081 | ||
1081 | /* Sadly, the CRQB cannot accomodate all registers--there are | 1082 | /* Sadly, the CRQB cannot accomodate all registers--there are |
@@ -1144,16 +1145,12 @@ static void mv_qc_prep_iie(struct ata_queued_cmd *qc) | |||
1144 | struct mv_port_priv *pp = ap->private_data; | 1145 | struct mv_port_priv *pp = ap->private_data; |
1145 | struct mv_crqb_iie *crqb; | 1146 | struct mv_crqb_iie *crqb; |
1146 | struct ata_taskfile *tf; | 1147 | struct ata_taskfile *tf; |
1148 | unsigned in_index; | ||
1147 | u32 flags = 0; | 1149 | u32 flags = 0; |
1148 | 1150 | ||
1149 | if (ATA_PROT_DMA != qc->tf.protocol) | 1151 | if (ATA_PROT_DMA != qc->tf.protocol) |
1150 | return; | 1152 | return; |
1151 | 1153 | ||
1152 | /* the req producer index should be the same as we remember it */ | ||
1153 | WARN_ON(((readl(mv_ap_base(qc->ap) + EDMA_REQ_Q_IN_PTR_OFS) >> | ||
1154 | EDMA_REQ_Q_PTR_SHIFT) & MV_MAX_Q_DEPTH_MASK) != | ||
1155 | pp->req_producer); | ||
1156 | |||
1157 | /* Fill in Gen IIE command request block | 1154 | /* Fill in Gen IIE command request block |
1158 | */ | 1155 | */ |
1159 | if (!(qc->tf.flags & ATA_TFLAG_WRITE)) | 1156 | if (!(qc->tf.flags & ATA_TFLAG_WRITE)) |
@@ -1162,7 +1159,11 @@ static void mv_qc_prep_iie(struct ata_queued_cmd *qc) | |||
1162 | WARN_ON(MV_MAX_Q_DEPTH <= qc->tag); | 1159 | WARN_ON(MV_MAX_Q_DEPTH <= qc->tag); |
1163 | flags |= qc->tag << CRQB_TAG_SHIFT; | 1160 | flags |= qc->tag << CRQB_TAG_SHIFT; |
1164 | 1161 | ||
1165 | crqb = (struct mv_crqb_iie *) &pp->crqb[pp->req_producer]; | 1162 | /* get current queue index from hardware */ |
1163 | in_index = (readl(mv_ap_base(ap) + EDMA_REQ_Q_IN_PTR_OFS) | ||
1164 | >> EDMA_REQ_Q_PTR_SHIFT) & MV_MAX_Q_DEPTH_MASK; | ||
1165 | |||
1166 | crqb = (struct mv_crqb_iie *) &pp->crqb[in_index]; | ||
1166 | crqb->addr = cpu_to_le32(pp->sg_tbl_dma & 0xffffffff); | 1167 | crqb->addr = cpu_to_le32(pp->sg_tbl_dma & 0xffffffff); |
1167 | crqb->addr_hi = cpu_to_le32((pp->sg_tbl_dma >> 16) >> 16); | 1168 | crqb->addr_hi = cpu_to_le32((pp->sg_tbl_dma >> 16) >> 16); |
1168 | crqb->flags = cpu_to_le32(flags); | 1169 | crqb->flags = cpu_to_le32(flags); |
@@ -1210,6 +1211,7 @@ static unsigned int mv_qc_issue(struct ata_queued_cmd *qc) | |||
1210 | { | 1211 | { |
1211 | void __iomem *port_mmio = mv_ap_base(qc->ap); | 1212 | void __iomem *port_mmio = mv_ap_base(qc->ap); |
1212 | struct mv_port_priv *pp = qc->ap->private_data; | 1213 | struct mv_port_priv *pp = qc->ap->private_data; |
1214 | unsigned in_index; | ||
1213 | u32 in_ptr; | 1215 | u32 in_ptr; |
1214 | 1216 | ||
1215 | if (ATA_PROT_DMA != qc->tf.protocol) { | 1217 | if (ATA_PROT_DMA != qc->tf.protocol) { |
@@ -1221,23 +1223,20 @@ static unsigned int mv_qc_issue(struct ata_queued_cmd *qc) | |||
1221 | return ata_qc_issue_prot(qc); | 1223 | return ata_qc_issue_prot(qc); |
1222 | } | 1224 | } |
1223 | 1225 | ||
1224 | in_ptr = readl(port_mmio + EDMA_REQ_Q_IN_PTR_OFS); | 1226 | in_ptr = readl(port_mmio + EDMA_REQ_Q_IN_PTR_OFS); |
1227 | in_index = (in_ptr >> EDMA_REQ_Q_PTR_SHIFT) & MV_MAX_Q_DEPTH_MASK; | ||
1225 | 1228 | ||
1226 | /* the req producer index should be the same as we remember it */ | ||
1227 | WARN_ON(((in_ptr >> EDMA_REQ_Q_PTR_SHIFT) & MV_MAX_Q_DEPTH_MASK) != | ||
1228 | pp->req_producer); | ||
1229 | /* until we do queuing, the queue should be empty at this point */ | 1229 | /* until we do queuing, the queue should be empty at this point */ |
1230 | WARN_ON(((in_ptr >> EDMA_REQ_Q_PTR_SHIFT) & MV_MAX_Q_DEPTH_MASK) != | 1230 | WARN_ON(in_index != ((readl(port_mmio + EDMA_REQ_Q_OUT_PTR_OFS) |
1231 | ((readl(port_mmio + EDMA_REQ_Q_OUT_PTR_OFS) >> | 1231 | >> EDMA_REQ_Q_PTR_SHIFT) & MV_MAX_Q_DEPTH_MASK)); |
1232 | EDMA_REQ_Q_PTR_SHIFT) & MV_MAX_Q_DEPTH_MASK)); | ||
1233 | 1232 | ||
1234 | mv_inc_q_index(&pp->req_producer); /* now incr producer index */ | 1233 | in_index = mv_inc_q_index(in_index); /* now incr producer index */ |
1235 | 1234 | ||
1236 | mv_start_dma(port_mmio, pp); | 1235 | mv_start_dma(port_mmio, pp); |
1237 | 1236 | ||
1238 | /* and write the request in pointer to kick the EDMA to life */ | 1237 | /* and write the request in pointer to kick the EDMA to life */ |
1239 | in_ptr &= EDMA_REQ_Q_BASE_LO_MASK; | 1238 | in_ptr &= EDMA_REQ_Q_BASE_LO_MASK; |
1240 | in_ptr |= pp->req_producer << EDMA_REQ_Q_PTR_SHIFT; | 1239 | in_ptr |= in_index << EDMA_REQ_Q_PTR_SHIFT; |
1241 | writelfl(in_ptr, port_mmio + EDMA_REQ_Q_IN_PTR_OFS); | 1240 | writelfl(in_ptr, port_mmio + EDMA_REQ_Q_IN_PTR_OFS); |
1242 | 1241 | ||
1243 | return 0; | 1242 | return 0; |
@@ -1260,28 +1259,26 @@ static u8 mv_get_crpb_status(struct ata_port *ap) | |||
1260 | { | 1259 | { |
1261 | void __iomem *port_mmio = mv_ap_base(ap); | 1260 | void __iomem *port_mmio = mv_ap_base(ap); |
1262 | struct mv_port_priv *pp = ap->private_data; | 1261 | struct mv_port_priv *pp = ap->private_data; |
1262 | unsigned out_index; | ||
1263 | u32 out_ptr; | 1263 | u32 out_ptr; |
1264 | u8 ata_status; | 1264 | u8 ata_status; |
1265 | 1265 | ||
1266 | out_ptr = readl(port_mmio + EDMA_RSP_Q_OUT_PTR_OFS); | 1266 | out_ptr = readl(port_mmio + EDMA_RSP_Q_OUT_PTR_OFS); |
1267 | out_index = (out_ptr >> EDMA_RSP_Q_PTR_SHIFT) & MV_MAX_Q_DEPTH_MASK; | ||
1267 | 1268 | ||
1268 | /* the response consumer index should be the same as we remember it */ | 1269 | ata_status = le16_to_cpu(pp->crpb[out_index].flags) |
1269 | WARN_ON(((out_ptr >> EDMA_RSP_Q_PTR_SHIFT) & MV_MAX_Q_DEPTH_MASK) != | 1270 | >> CRPB_FLAG_STATUS_SHIFT; |
1270 | pp->rsp_consumer); | ||
1271 | |||
1272 | ata_status = pp->crpb[pp->rsp_consumer].flags >> CRPB_FLAG_STATUS_SHIFT; | ||
1273 | 1271 | ||
1274 | /* increment our consumer index... */ | 1272 | /* increment our consumer index... */ |
1275 | pp->rsp_consumer = mv_inc_q_index(&pp->rsp_consumer); | 1273 | out_index = mv_inc_q_index(out_index); |
1276 | 1274 | ||
1277 | /* and, until we do NCQ, there should only be 1 CRPB waiting */ | 1275 | /* and, until we do NCQ, there should only be 1 CRPB waiting */ |
1278 | WARN_ON(((readl(port_mmio + EDMA_RSP_Q_IN_PTR_OFS) >> | 1276 | WARN_ON(out_index != ((readl(port_mmio + EDMA_RSP_Q_IN_PTR_OFS) |
1279 | EDMA_RSP_Q_PTR_SHIFT) & MV_MAX_Q_DEPTH_MASK) != | 1277 | >> EDMA_RSP_Q_PTR_SHIFT) & MV_MAX_Q_DEPTH_MASK)); |
1280 | pp->rsp_consumer); | ||
1281 | 1278 | ||
1282 | /* write out our inc'd consumer index so EDMA knows we're caught up */ | 1279 | /* write out our inc'd consumer index so EDMA knows we're caught up */ |
1283 | out_ptr &= EDMA_RSP_Q_BASE_LO_MASK; | 1280 | out_ptr &= EDMA_RSP_Q_BASE_LO_MASK; |
1284 | out_ptr |= pp->rsp_consumer << EDMA_RSP_Q_PTR_SHIFT; | 1281 | out_ptr |= out_index << EDMA_RSP_Q_PTR_SHIFT; |
1285 | writelfl(out_ptr, port_mmio + EDMA_RSP_Q_OUT_PTR_OFS); | 1282 | writelfl(out_ptr, port_mmio + EDMA_RSP_Q_OUT_PTR_OFS); |
1286 | 1283 | ||
1287 | /* Return ATA status register for completed CRPB */ | 1284 | /* Return ATA status register for completed CRPB */ |
@@ -1291,6 +1288,7 @@ static u8 mv_get_crpb_status(struct ata_port *ap) | |||
1291 | /** | 1288 | /** |
1292 | * mv_err_intr - Handle error interrupts on the port | 1289 | * mv_err_intr - Handle error interrupts on the port |
1293 | * @ap: ATA channel to manipulate | 1290 | * @ap: ATA channel to manipulate |
1291 | * @reset_allowed: bool: 0 == don't trigger from reset here | ||
1294 | * | 1292 | * |
1295 | * In most cases, just clear the interrupt and move on. However, | 1293 | * In most cases, just clear the interrupt and move on. However, |
1296 | * some cases require an eDMA reset, which is done right before | 1294 | * some cases require an eDMA reset, which is done right before |
@@ -1301,7 +1299,7 @@ static u8 mv_get_crpb_status(struct ata_port *ap) | |||
1301 | * LOCKING: | 1299 | * LOCKING: |
1302 | * Inherited from caller. | 1300 | * Inherited from caller. |
1303 | */ | 1301 | */ |
1304 | static void mv_err_intr(struct ata_port *ap) | 1302 | static void mv_err_intr(struct ata_port *ap, int reset_allowed) |
1305 | { | 1303 | { |
1306 | void __iomem *port_mmio = mv_ap_base(ap); | 1304 | void __iomem *port_mmio = mv_ap_base(ap); |
1307 | u32 edma_err_cause, serr = 0; | 1305 | u32 edma_err_cause, serr = 0; |
@@ -1323,9 +1321,8 @@ static void mv_err_intr(struct ata_port *ap) | |||
1323 | writelfl(0, port_mmio + EDMA_ERR_IRQ_CAUSE_OFS); | 1321 | writelfl(0, port_mmio + EDMA_ERR_IRQ_CAUSE_OFS); |
1324 | 1322 | ||
1325 | /* check for fatal here and recover if needed */ | 1323 | /* check for fatal here and recover if needed */ |
1326 | if (EDMA_ERR_FATAL & edma_err_cause) { | 1324 | if (reset_allowed && (EDMA_ERR_FATAL & edma_err_cause)) |
1327 | mv_stop_and_reset(ap); | 1325 | mv_stop_and_reset(ap); |
1328 | } | ||
1329 | } | 1326 | } |
1330 | 1327 | ||
1331 | /** | 1328 | /** |
@@ -1374,12 +1371,12 @@ static void mv_host_intr(struct ata_host_set *host_set, u32 relevant, | |||
1374 | struct ata_port *ap = host_set->ports[port]; | 1371 | struct ata_port *ap = host_set->ports[port]; |
1375 | struct mv_port_priv *pp = ap->private_data; | 1372 | struct mv_port_priv *pp = ap->private_data; |
1376 | 1373 | ||
1377 | hard_port = port & MV_PORT_MASK; /* range 0-3 */ | 1374 | hard_port = mv_hardport_from_port(port); /* range 0..3 */ |
1378 | handled = 0; /* ensure ata_status is set if handled++ */ | 1375 | handled = 0; /* ensure ata_status is set if handled++ */ |
1379 | 1376 | ||
1380 | /* Note that DEV_IRQ might happen spuriously during EDMA, | 1377 | /* Note that DEV_IRQ might happen spuriously during EDMA, |
1381 | * and should be ignored in such cases. We could mask it, | 1378 | * and should be ignored in such cases. |
1382 | * but it's pretty rare and may not be worth the overhead. | 1379 | * The cause of this is still under investigation. |
1383 | */ | 1380 | */ |
1384 | if (pp->pp_flags & MV_PP_FLAG_EDMA_EN) { | 1381 | if (pp->pp_flags & MV_PP_FLAG_EDMA_EN) { |
1385 | /* EDMA: check for response queue interrupt */ | 1382 | /* EDMA: check for response queue interrupt */ |
@@ -1393,6 +1390,11 @@ static void mv_host_intr(struct ata_host_set *host_set, u32 relevant, | |||
1393 | ata_status = readb((void __iomem *) | 1390 | ata_status = readb((void __iomem *) |
1394 | ap->ioaddr.status_addr); | 1391 | ap->ioaddr.status_addr); |
1395 | handled = 1; | 1392 | handled = 1; |
1393 | /* ignore spurious intr if drive still BUSY */ | ||
1394 | if (ata_status & ATA_BUSY) { | ||
1395 | ata_status = 0; | ||
1396 | handled = 0; | ||
1397 | } | ||
1396 | } | 1398 | } |
1397 | } | 1399 | } |
1398 | 1400 | ||
@@ -1406,7 +1408,7 @@ static void mv_host_intr(struct ata_host_set *host_set, u32 relevant, | |||
1406 | shift++; /* skip bit 8 in the HC Main IRQ reg */ | 1408 | shift++; /* skip bit 8 in the HC Main IRQ reg */ |
1407 | } | 1409 | } |
1408 | if ((PORT0_ERR << shift) & relevant) { | 1410 | if ((PORT0_ERR << shift) & relevant) { |
1409 | mv_err_intr(ap); | 1411 | mv_err_intr(ap, 1); |
1410 | err_mask |= AC_ERR_OTHER; | 1412 | err_mask |= AC_ERR_OTHER; |
1411 | handled = 1; | 1413 | handled = 1; |
1412 | } | 1414 | } |
@@ -1448,6 +1450,7 @@ static irqreturn_t mv_interrupt(int irq, void *dev_instance, | |||
1448 | struct ata_host_set *host_set = dev_instance; | 1450 | struct ata_host_set *host_set = dev_instance; |
1449 | unsigned int hc, handled = 0, n_hcs; | 1451 | unsigned int hc, handled = 0, n_hcs; |
1450 | void __iomem *mmio = host_set->mmio_base; | 1452 | void __iomem *mmio = host_set->mmio_base; |
1453 | struct mv_host_priv *hpriv; | ||
1451 | u32 irq_stat; | 1454 | u32 irq_stat; |
1452 | 1455 | ||
1453 | irq_stat = readl(mmio + HC_MAIN_IRQ_CAUSE_OFS); | 1456 | irq_stat = readl(mmio + HC_MAIN_IRQ_CAUSE_OFS); |
@@ -1469,6 +1472,17 @@ static irqreturn_t mv_interrupt(int irq, void *dev_instance, | |||
1469 | handled++; | 1472 | handled++; |
1470 | } | 1473 | } |
1471 | } | 1474 | } |
1475 | |||
1476 | hpriv = host_set->private_data; | ||
1477 | if (IS_60XX(hpriv)) { | ||
1478 | /* deal with the interrupt coalescing bits */ | ||
1479 | if (irq_stat & (TRAN_LO_DONE | TRAN_HI_DONE | PORTS_0_7_COAL_DONE)) { | ||
1480 | writelfl(0, mmio + MV_IRQ_COAL_CAUSE_LO); | ||
1481 | writelfl(0, mmio + MV_IRQ_COAL_CAUSE_HI); | ||
1482 | writelfl(0, mmio + MV_IRQ_COAL_CAUSE); | ||
1483 | } | ||
1484 | } | ||
1485 | |||
1472 | if (PCI_ERR & irq_stat) { | 1486 | if (PCI_ERR & irq_stat) { |
1473 | printk(KERN_ERR DRV_NAME ": PCI ERROR; PCI IRQ cause=0x%08x\n", | 1487 | printk(KERN_ERR DRV_NAME ": PCI ERROR; PCI IRQ cause=0x%08x\n", |
1474 | readl(mmio + PCI_IRQ_CAUSE_OFS)); | 1488 | readl(mmio + PCI_IRQ_CAUSE_OFS)); |
@@ -1867,7 +1881,8 @@ static void mv_channel_reset(struct mv_host_priv *hpriv, void __iomem *mmio, | |||
1867 | 1881 | ||
1868 | if (IS_60XX(hpriv)) { | 1882 | if (IS_60XX(hpriv)) { |
1869 | u32 ifctl = readl(port_mmio + SATA_INTERFACE_CTL); | 1883 | u32 ifctl = readl(port_mmio + SATA_INTERFACE_CTL); |
1870 | ifctl |= (1 << 12) | (1 << 7); | 1884 | ifctl |= (1 << 7); /* enable gen2i speed */ |
1885 | ifctl = (ifctl & 0xfff) | 0x9b1000; /* from chip spec */ | ||
1871 | writelfl(ifctl, port_mmio + SATA_INTERFACE_CTL); | 1886 | writelfl(ifctl, port_mmio + SATA_INTERFACE_CTL); |
1872 | } | 1887 | } |
1873 | 1888 | ||
@@ -2031,11 +2046,14 @@ static void mv_eng_timeout(struct ata_port *ap) | |||
2031 | ap->host_set->mmio_base, ap, qc, qc->scsicmd, | 2046 | ap->host_set->mmio_base, ap, qc, qc->scsicmd, |
2032 | &qc->scsicmd->cmnd); | 2047 | &qc->scsicmd->cmnd); |
2033 | 2048 | ||
2034 | mv_err_intr(ap); | 2049 | mv_err_intr(ap, 0); |
2035 | mv_stop_and_reset(ap); | 2050 | mv_stop_and_reset(ap); |
2036 | 2051 | ||
2037 | qc->err_mask |= AC_ERR_TIMEOUT; | 2052 | WARN_ON(!(qc->flags & ATA_QCFLAG_ACTIVE)); |
2038 | ata_eh_qc_complete(qc); | 2053 | if (qc->flags & ATA_QCFLAG_ACTIVE) { |
2054 | qc->err_mask |= AC_ERR_TIMEOUT; | ||
2055 | ata_eh_qc_complete(qc); | ||
2056 | } | ||
2039 | } | 2057 | } |
2040 | 2058 | ||
2041 | /** | 2059 | /** |
@@ -2229,7 +2247,8 @@ static int mv_init_host(struct pci_dev *pdev, struct ata_probe_ent *probe_ent, | |||
2229 | void __iomem *port_mmio = mv_port_base(mmio, port); | 2247 | void __iomem *port_mmio = mv_port_base(mmio, port); |
2230 | 2248 | ||
2231 | u32 ifctl = readl(port_mmio + SATA_INTERFACE_CTL); | 2249 | u32 ifctl = readl(port_mmio + SATA_INTERFACE_CTL); |
2232 | ifctl |= (1 << 12); | 2250 | ifctl |= (1 << 7); /* enable gen2i speed */ |
2251 | ifctl = (ifctl & 0xfff) | 0x9b1000; /* from chip spec */ | ||
2233 | writelfl(ifctl, port_mmio + SATA_INTERFACE_CTL); | 2252 | writelfl(ifctl, port_mmio + SATA_INTERFACE_CTL); |
2234 | } | 2253 | } |
2235 | 2254 | ||
@@ -2330,6 +2349,7 @@ static int mv_init_one(struct pci_dev *pdev, const struct pci_device_id *ent) | |||
2330 | if (rc) { | 2349 | if (rc) { |
2331 | return rc; | 2350 | return rc; |
2332 | } | 2351 | } |
2352 | pci_set_master(pdev); | ||
2333 | 2353 | ||
2334 | rc = pci_request_regions(pdev, DRV_NAME); | 2354 | rc = pci_request_regions(pdev, DRV_NAME); |
2335 | if (rc) { | 2355 | if (rc) { |
diff --git a/drivers/scsi/st.c b/drivers/scsi/st.c index d40e7c871c36..56cb49006116 100644 --- a/drivers/scsi/st.c +++ b/drivers/scsi/st.c | |||
@@ -4054,7 +4054,7 @@ static int st_probe(struct device *dev) | |||
4054 | } | 4054 | } |
4055 | 4055 | ||
4056 | sdev_printk(KERN_WARNING, SDp, | 4056 | sdev_printk(KERN_WARNING, SDp, |
4057 | "Attached scsi tape %s", tape_name(tpnt)); | 4057 | "Attached scsi tape %s\n", tape_name(tpnt)); |
4058 | printk(KERN_WARNING "%s: try direct i/o: %s (alignment %d B)\n", | 4058 | printk(KERN_WARNING "%s: try direct i/o: %s (alignment %d B)\n", |
4059 | tape_name(tpnt), tpnt->try_dio ? "yes" : "no", | 4059 | tape_name(tpnt), tpnt->try_dio ? "yes" : "no", |
4060 | queue_dma_alignment(SDp->request_queue) + 1); | 4060 | queue_dma_alignment(SDp->request_queue) + 1); |
diff --git a/drivers/serial/serial_core.c b/drivers/serial/serial_core.c index aeb8153ccf24..17839e753e4c 100644 --- a/drivers/serial/serial_core.c +++ b/drivers/serial/serial_core.c | |||
@@ -1907,9 +1907,12 @@ uart_set_options(struct uart_port *port, struct console *co, | |||
1907 | static void uart_change_pm(struct uart_state *state, int pm_state) | 1907 | static void uart_change_pm(struct uart_state *state, int pm_state) |
1908 | { | 1908 | { |
1909 | struct uart_port *port = state->port; | 1909 | struct uart_port *port = state->port; |
1910 | if (port->ops->pm) | 1910 | |
1911 | port->ops->pm(port, pm_state, state->pm_state); | 1911 | if (state->pm_state != pm_state) { |
1912 | state->pm_state = pm_state; | 1912 | if (port->ops->pm) |
1913 | port->ops->pm(port, pm_state, state->pm_state); | ||
1914 | state->pm_state = pm_state; | ||
1915 | } | ||
1913 | } | 1916 | } |
1914 | 1917 | ||
1915 | int uart_suspend_port(struct uart_driver *drv, struct uart_port *port) | 1918 | int uart_suspend_port(struct uart_driver *drv, struct uart_port *port) |
diff --git a/drivers/spi/Kconfig b/drivers/spi/Kconfig index 7a75faeb0526..23334c8bc4c7 100644 --- a/drivers/spi/Kconfig +++ b/drivers/spi/Kconfig | |||
@@ -75,11 +75,45 @@ config SPI_BUTTERFLY | |||
75 | inexpensive battery powered microcontroller evaluation board. | 75 | inexpensive battery powered microcontroller evaluation board. |
76 | This same cable can be used to flash new firmware. | 76 | This same cable can be used to flash new firmware. |
77 | 77 | ||
78 | config SPI_MPC83xx | ||
79 | tristate "Freescale MPC83xx SPI controller" | ||
80 | depends on SPI_MASTER && PPC_83xx && EXPERIMENTAL | ||
81 | select SPI_BITBANG | ||
82 | help | ||
83 | This enables using the Freescale MPC83xx SPI controller in master | ||
84 | mode. | ||
85 | |||
86 | Note, this driver uniquely supports the SPI controller on the MPC83xx | ||
87 | family of PowerPC processors. The MPC83xx uses a simple set of shift | ||
88 | registers for data (opposed to the CPM based descriptor model). | ||
89 | |||
90 | config SPI_PXA2XX | ||
91 | tristate "PXA2xx SSP SPI master" | ||
92 | depends on SPI_MASTER && ARCH_PXA && EXPERIMENTAL | ||
93 | help | ||
94 | This enables using a PXA2xx SSP port as a SPI master controller. | ||
95 | The driver can be configured to use any SSP port and additional | ||
96 | documentation can be found a Documentation/spi/pxa2xx. | ||
97 | |||
98 | config SPI_S3C24XX_GPIO | ||
99 | tristate "Samsung S3C24XX series SPI by GPIO" | ||
100 | depends on SPI_MASTER && ARCH_S3C2410 && SPI_BITBANG && EXPERIMENTAL | ||
101 | help | ||
102 | SPI driver for Samsung S3C24XX series ARM SoCs using | ||
103 | GPIO lines to provide the SPI bus. This can be used where | ||
104 | the inbuilt hardware cannot provide the transfer mode, or | ||
105 | where the board is using non hardware connected pins. | ||
78 | # | 106 | # |
79 | # Add new SPI master controllers in alphabetical order above this line | 107 | # Add new SPI master controllers in alphabetical order above this line |
80 | # | 108 | # |
81 | 109 | ||
82 | 110 | ||
111 | config SPI_S3C24XX | ||
112 | tristate "Samsung S3C24XX series SPI" | ||
113 | depends on SPI_MASTER && ARCH_S3C2410 && EXPERIMENTAL | ||
114 | help | ||
115 | SPI driver for Samsung S3C24XX series ARM SoCs | ||
116 | |||
83 | # | 117 | # |
84 | # There are lots of SPI device types, with sensors and memory | 118 | # There are lots of SPI device types, with sensors and memory |
85 | # being probably the most widely used ones. | 119 | # being probably the most widely used ones. |
diff --git a/drivers/spi/Makefile b/drivers/spi/Makefile index c2c87e845abf..8f4cb67997b3 100644 --- a/drivers/spi/Makefile +++ b/drivers/spi/Makefile | |||
@@ -13,6 +13,10 @@ obj-$(CONFIG_SPI_MASTER) += spi.o | |||
13 | # SPI master controller drivers (bus) | 13 | # SPI master controller drivers (bus) |
14 | obj-$(CONFIG_SPI_BITBANG) += spi_bitbang.o | 14 | obj-$(CONFIG_SPI_BITBANG) += spi_bitbang.o |
15 | obj-$(CONFIG_SPI_BUTTERFLY) += spi_butterfly.o | 15 | obj-$(CONFIG_SPI_BUTTERFLY) += spi_butterfly.o |
16 | obj-$(CONFIG_SPI_PXA2XX) += pxa2xx_spi.o | ||
17 | obj-$(CONFIG_SPI_MPC83xx) += spi_mpc83xx.o | ||
18 | obj-$(CONFIG_SPI_S3C24XX_GPIO) += spi_s3c24xx_gpio.o | ||
19 | obj-$(CONFIG_SPI_S3C24XX) += spi_s3c24xx.o | ||
16 | # ... add above this line ... | 20 | # ... add above this line ... |
17 | 21 | ||
18 | # SPI protocol drivers (device/link on bus) | 22 | # SPI protocol drivers (device/link on bus) |
diff --git a/drivers/spi/pxa2xx_spi.c b/drivers/spi/pxa2xx_spi.c new file mode 100644 index 000000000000..29aec77f98be --- /dev/null +++ b/drivers/spi/pxa2xx_spi.c | |||
@@ -0,0 +1,1486 @@ | |||
1 | /* | ||
2 | * Copyright (C) 2005 Stephen Street / StreetFire Sound Labs | ||
3 | * | ||
4 | * This program is free software; you can redistribute it and/or modify | ||
5 | * it under the terms of the GNU General Public License as published by | ||
6 | * the Free Software Foundation; either version 2 of the License, or | ||
7 | * (at your option) any later version. | ||
8 | * | ||
9 | * This program is distributed in the hope that it will be useful, | ||
10 | * but WITHOUT ANY WARRANTY; without even the implied warranty of | ||
11 | * MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE. See the | ||
12 | * GNU General Public License for more details. | ||
13 | * | ||
14 | * You should have received a copy of the GNU General Public License | ||
15 | * along with this program; if not, write to the Free Software | ||
16 | * Foundation, Inc., 675 Mass Ave, Cambridge, MA 02139, USA. | ||
17 | */ | ||
18 | |||
19 | #include <linux/init.h> | ||
20 | #include <linux/module.h> | ||
21 | #include <linux/device.h> | ||
22 | #include <linux/ioport.h> | ||
23 | #include <linux/errno.h> | ||
24 | #include <linux/interrupt.h> | ||
25 | #include <linux/platform_device.h> | ||
26 | #include <linux/dma-mapping.h> | ||
27 | #include <linux/spi/spi.h> | ||
28 | #include <linux/workqueue.h> | ||
29 | #include <linux/errno.h> | ||
30 | #include <linux/delay.h> | ||
31 | |||
32 | #include <asm/io.h> | ||
33 | #include <asm/irq.h> | ||
34 | #include <asm/hardware.h> | ||
35 | #include <asm/delay.h> | ||
36 | #include <asm/dma.h> | ||
37 | |||
38 | #include <asm/arch/hardware.h> | ||
39 | #include <asm/arch/pxa-regs.h> | ||
40 | #include <asm/arch/pxa2xx_spi.h> | ||
41 | |||
42 | MODULE_AUTHOR("Stephen Street"); | ||
43 | MODULE_DESCRIPTION("PXA2xx SSP SPI Contoller"); | ||
44 | MODULE_LICENSE("GPL"); | ||
45 | |||
46 | #define MAX_BUSES 3 | ||
47 | |||
48 | #define DMA_INT_MASK (DCSR_ENDINTR | DCSR_STARTINTR | DCSR_BUSERR) | ||
49 | #define RESET_DMA_CHANNEL (DCSR_NODESC | DMA_INT_MASK) | ||
50 | #define IS_DMA_ALIGNED(x) (((u32)(x)&0x07)==0) | ||
51 | |||
52 | #define DEFINE_SSP_REG(reg, off) \ | ||
53 | static inline u32 read_##reg(void *p) { return __raw_readl(p + (off)); } \ | ||
54 | static inline void write_##reg(u32 v, void *p) { __raw_writel(v, p + (off)); } | ||
55 | |||
56 | DEFINE_SSP_REG(SSCR0, 0x00) | ||
57 | DEFINE_SSP_REG(SSCR1, 0x04) | ||
58 | DEFINE_SSP_REG(SSSR, 0x08) | ||
59 | DEFINE_SSP_REG(SSITR, 0x0c) | ||
60 | DEFINE_SSP_REG(SSDR, 0x10) | ||
61 | DEFINE_SSP_REG(SSTO, 0x28) | ||
62 | DEFINE_SSP_REG(SSPSP, 0x2c) | ||
63 | |||
64 | #define START_STATE ((void*)0) | ||
65 | #define RUNNING_STATE ((void*)1) | ||
66 | #define DONE_STATE ((void*)2) | ||
67 | #define ERROR_STATE ((void*)-1) | ||
68 | |||
69 | #define QUEUE_RUNNING 0 | ||
70 | #define QUEUE_STOPPED 1 | ||
71 | |||
72 | struct driver_data { | ||
73 | /* Driver model hookup */ | ||
74 | struct platform_device *pdev; | ||
75 | |||
76 | /* SPI framework hookup */ | ||
77 | enum pxa_ssp_type ssp_type; | ||
78 | struct spi_master *master; | ||
79 | |||
80 | /* PXA hookup */ | ||
81 | struct pxa2xx_spi_master *master_info; | ||
82 | |||
83 | /* DMA setup stuff */ | ||
84 | int rx_channel; | ||
85 | int tx_channel; | ||
86 | u32 *null_dma_buf; | ||
87 | |||
88 | /* SSP register addresses */ | ||
89 | void *ioaddr; | ||
90 | u32 ssdr_physical; | ||
91 | |||
92 | /* SSP masks*/ | ||
93 | u32 dma_cr1; | ||
94 | u32 int_cr1; | ||
95 | u32 clear_sr; | ||
96 | u32 mask_sr; | ||
97 | |||
98 | /* Driver message queue */ | ||
99 | struct workqueue_struct *workqueue; | ||
100 | struct work_struct pump_messages; | ||
101 | spinlock_t lock; | ||
102 | struct list_head queue; | ||
103 | int busy; | ||
104 | int run; | ||
105 | |||
106 | /* Message Transfer pump */ | ||
107 | struct tasklet_struct pump_transfers; | ||
108 | |||
109 | /* Current message transfer state info */ | ||
110 | struct spi_message* cur_msg; | ||
111 | struct spi_transfer* cur_transfer; | ||
112 | struct chip_data *cur_chip; | ||
113 | size_t len; | ||
114 | void *tx; | ||
115 | void *tx_end; | ||
116 | void *rx; | ||
117 | void *rx_end; | ||
118 | int dma_mapped; | ||
119 | dma_addr_t rx_dma; | ||
120 | dma_addr_t tx_dma; | ||
121 | size_t rx_map_len; | ||
122 | size_t tx_map_len; | ||
123 | u8 n_bytes; | ||
124 | u32 dma_width; | ||
125 | int cs_change; | ||
126 | void (*write)(struct driver_data *drv_data); | ||
127 | void (*read)(struct driver_data *drv_data); | ||
128 | irqreturn_t (*transfer_handler)(struct driver_data *drv_data); | ||
129 | void (*cs_control)(u32 command); | ||
130 | }; | ||
131 | |||
132 | struct chip_data { | ||
133 | u32 cr0; | ||
134 | u32 cr1; | ||
135 | u32 to; | ||
136 | u32 psp; | ||
137 | u32 timeout; | ||
138 | u8 n_bytes; | ||
139 | u32 dma_width; | ||
140 | u32 dma_burst_size; | ||
141 | u32 threshold; | ||
142 | u32 dma_threshold; | ||
143 | u8 enable_dma; | ||
144 | u8 bits_per_word; | ||
145 | u32 speed_hz; | ||
146 | void (*write)(struct driver_data *drv_data); | ||
147 | void (*read)(struct driver_data *drv_data); | ||
148 | void (*cs_control)(u32 command); | ||
149 | }; | ||
150 | |||
151 | static void pump_messages(void *data); | ||
152 | |||
153 | static int flush(struct driver_data *drv_data) | ||
154 | { | ||
155 | unsigned long limit = loops_per_jiffy << 1; | ||
156 | |||
157 | void *reg = drv_data->ioaddr; | ||
158 | |||
159 | do { | ||
160 | while (read_SSSR(reg) & SSSR_RNE) { | ||
161 | read_SSDR(reg); | ||
162 | } | ||
163 | } while ((read_SSSR(reg) & SSSR_BSY) && limit--); | ||
164 | write_SSSR(SSSR_ROR, reg); | ||
165 | |||
166 | return limit; | ||
167 | } | ||
168 | |||
169 | static void restore_state(struct driver_data *drv_data) | ||
170 | { | ||
171 | void *reg = drv_data->ioaddr; | ||
172 | |||
173 | /* Clear status and disable clock */ | ||
174 | write_SSSR(drv_data->clear_sr, reg); | ||
175 | write_SSCR0(drv_data->cur_chip->cr0 & ~SSCR0_SSE, reg); | ||
176 | |||
177 | /* Load the registers */ | ||
178 | write_SSCR1(drv_data->cur_chip->cr1, reg); | ||
179 | write_SSCR0(drv_data->cur_chip->cr0, reg); | ||
180 | if (drv_data->ssp_type != PXA25x_SSP) { | ||
181 | write_SSTO(0, reg); | ||
182 | write_SSPSP(drv_data->cur_chip->psp, reg); | ||
183 | } | ||
184 | } | ||
185 | |||
186 | static void null_cs_control(u32 command) | ||
187 | { | ||
188 | } | ||
189 | |||
190 | static void null_writer(struct driver_data *drv_data) | ||
191 | { | ||
192 | void *reg = drv_data->ioaddr; | ||
193 | u8 n_bytes = drv_data->n_bytes; | ||
194 | |||
195 | while ((read_SSSR(reg) & SSSR_TNF) | ||
196 | && (drv_data->tx < drv_data->tx_end)) { | ||
197 | write_SSDR(0, reg); | ||
198 | drv_data->tx += n_bytes; | ||
199 | } | ||
200 | } | ||
201 | |||
202 | static void null_reader(struct driver_data *drv_data) | ||
203 | { | ||
204 | void *reg = drv_data->ioaddr; | ||
205 | u8 n_bytes = drv_data->n_bytes; | ||
206 | |||
207 | while ((read_SSSR(reg) & SSSR_RNE) | ||
208 | && (drv_data->rx < drv_data->rx_end)) { | ||
209 | read_SSDR(reg); | ||
210 | drv_data->rx += n_bytes; | ||
211 | } | ||
212 | } | ||
213 | |||
214 | static void u8_writer(struct driver_data *drv_data) | ||
215 | { | ||
216 | void *reg = drv_data->ioaddr; | ||
217 | |||
218 | while ((read_SSSR(reg) & SSSR_TNF) | ||
219 | && (drv_data->tx < drv_data->tx_end)) { | ||
220 | write_SSDR(*(u8 *)(drv_data->tx), reg); | ||
221 | ++drv_data->tx; | ||
222 | } | ||
223 | } | ||
224 | |||
225 | static void u8_reader(struct driver_data *drv_data) | ||
226 | { | ||
227 | void *reg = drv_data->ioaddr; | ||
228 | |||
229 | while ((read_SSSR(reg) & SSSR_RNE) | ||
230 | && (drv_data->rx < drv_data->rx_end)) { | ||
231 | *(u8 *)(drv_data->rx) = read_SSDR(reg); | ||
232 | ++drv_data->rx; | ||
233 | } | ||
234 | } | ||
235 | |||
236 | static void u16_writer(struct driver_data *drv_data) | ||
237 | { | ||
238 | void *reg = drv_data->ioaddr; | ||
239 | |||
240 | while ((read_SSSR(reg) & SSSR_TNF) | ||
241 | && (drv_data->tx < drv_data->tx_end)) { | ||
242 | write_SSDR(*(u16 *)(drv_data->tx), reg); | ||
243 | drv_data->tx += 2; | ||
244 | } | ||
245 | } | ||
246 | |||
247 | static void u16_reader(struct driver_data *drv_data) | ||
248 | { | ||
249 | void *reg = drv_data->ioaddr; | ||
250 | |||
251 | while ((read_SSSR(reg) & SSSR_RNE) | ||
252 | && (drv_data->rx < drv_data->rx_end)) { | ||
253 | *(u16 *)(drv_data->rx) = read_SSDR(reg); | ||
254 | drv_data->rx += 2; | ||
255 | } | ||
256 | } | ||
257 | static void u32_writer(struct driver_data *drv_data) | ||
258 | { | ||
259 | void *reg = drv_data->ioaddr; | ||
260 | |||
261 | while ((read_SSSR(reg) & SSSR_TNF) | ||
262 | && (drv_data->tx < drv_data->tx_end)) { | ||
263 | write_SSDR(*(u32 *)(drv_data->tx), reg); | ||
264 | drv_data->tx += 4; | ||
265 | } | ||
266 | } | ||
267 | |||
268 | static void u32_reader(struct driver_data *drv_data) | ||
269 | { | ||
270 | void *reg = drv_data->ioaddr; | ||
271 | |||
272 | while ((read_SSSR(reg) & SSSR_RNE) | ||
273 | && (drv_data->rx < drv_data->rx_end)) { | ||
274 | *(u32 *)(drv_data->rx) = read_SSDR(reg); | ||
275 | drv_data->rx += 4; | ||
276 | } | ||
277 | } | ||
278 | |||
279 | static void *next_transfer(struct driver_data *drv_data) | ||
280 | { | ||
281 | struct spi_message *msg = drv_data->cur_msg; | ||
282 | struct spi_transfer *trans = drv_data->cur_transfer; | ||
283 | |||
284 | /* Move to next transfer */ | ||
285 | if (trans->transfer_list.next != &msg->transfers) { | ||
286 | drv_data->cur_transfer = | ||
287 | list_entry(trans->transfer_list.next, | ||
288 | struct spi_transfer, | ||
289 | transfer_list); | ||
290 | return RUNNING_STATE; | ||
291 | } else | ||
292 | return DONE_STATE; | ||
293 | } | ||
294 | |||
295 | static int map_dma_buffers(struct driver_data *drv_data) | ||
296 | { | ||
297 | struct spi_message *msg = drv_data->cur_msg; | ||
298 | struct device *dev = &msg->spi->dev; | ||
299 | |||
300 | if (!drv_data->cur_chip->enable_dma) | ||
301 | return 0; | ||
302 | |||
303 | if (msg->is_dma_mapped) | ||
304 | return drv_data->rx_dma && drv_data->tx_dma; | ||
305 | |||
306 | if (!IS_DMA_ALIGNED(drv_data->rx) || !IS_DMA_ALIGNED(drv_data->tx)) | ||
307 | return 0; | ||
308 | |||
309 | /* Modify setup if rx buffer is null */ | ||
310 | if (drv_data->rx == NULL) { | ||
311 | *drv_data->null_dma_buf = 0; | ||
312 | drv_data->rx = drv_data->null_dma_buf; | ||
313 | drv_data->rx_map_len = 4; | ||
314 | } else | ||
315 | drv_data->rx_map_len = drv_data->len; | ||
316 | |||
317 | |||
318 | /* Modify setup if tx buffer is null */ | ||
319 | if (drv_data->tx == NULL) { | ||
320 | *drv_data->null_dma_buf = 0; | ||
321 | drv_data->tx = drv_data->null_dma_buf; | ||
322 | drv_data->tx_map_len = 4; | ||
323 | } else | ||
324 | drv_data->tx_map_len = drv_data->len; | ||
325 | |||
326 | /* Stream map the rx buffer */ | ||
327 | drv_data->rx_dma = dma_map_single(dev, drv_data->rx, | ||
328 | drv_data->rx_map_len, | ||
329 | DMA_FROM_DEVICE); | ||
330 | if (dma_mapping_error(drv_data->rx_dma)) | ||
331 | return 0; | ||
332 | |||
333 | /* Stream map the tx buffer */ | ||
334 | drv_data->tx_dma = dma_map_single(dev, drv_data->tx, | ||
335 | drv_data->tx_map_len, | ||
336 | DMA_TO_DEVICE); | ||
337 | |||
338 | if (dma_mapping_error(drv_data->tx_dma)) { | ||
339 | dma_unmap_single(dev, drv_data->rx_dma, | ||
340 | drv_data->rx_map_len, DMA_FROM_DEVICE); | ||
341 | return 0; | ||
342 | } | ||
343 | |||
344 | return 1; | ||
345 | } | ||
346 | |||
347 | static void unmap_dma_buffers(struct driver_data *drv_data) | ||
348 | { | ||
349 | struct device *dev; | ||
350 | |||
351 | if (!drv_data->dma_mapped) | ||
352 | return; | ||
353 | |||
354 | if (!drv_data->cur_msg->is_dma_mapped) { | ||
355 | dev = &drv_data->cur_msg->spi->dev; | ||
356 | dma_unmap_single(dev, drv_data->rx_dma, | ||
357 | drv_data->rx_map_len, DMA_FROM_DEVICE); | ||
358 | dma_unmap_single(dev, drv_data->tx_dma, | ||
359 | drv_data->tx_map_len, DMA_TO_DEVICE); | ||
360 | } | ||
361 | |||
362 | drv_data->dma_mapped = 0; | ||
363 | } | ||
364 | |||
365 | /* caller already set message->status; dma and pio irqs are blocked */ | ||
366 | static void giveback(struct driver_data *drv_data) | ||
367 | { | ||
368 | struct spi_transfer* last_transfer; | ||
369 | unsigned long flags; | ||
370 | struct spi_message *msg; | ||
371 | |||
372 | spin_lock_irqsave(&drv_data->lock, flags); | ||
373 | msg = drv_data->cur_msg; | ||
374 | drv_data->cur_msg = NULL; | ||
375 | drv_data->cur_transfer = NULL; | ||
376 | drv_data->cur_chip = NULL; | ||
377 | queue_work(drv_data->workqueue, &drv_data->pump_messages); | ||
378 | spin_unlock_irqrestore(&drv_data->lock, flags); | ||
379 | |||
380 | last_transfer = list_entry(msg->transfers.prev, | ||
381 | struct spi_transfer, | ||
382 | transfer_list); | ||
383 | |||
384 | if (!last_transfer->cs_change) | ||
385 | drv_data->cs_control(PXA2XX_CS_DEASSERT); | ||
386 | |||
387 | msg->state = NULL; | ||
388 | if (msg->complete) | ||
389 | msg->complete(msg->context); | ||
390 | } | ||
391 | |||
392 | static int wait_ssp_rx_stall(void *ioaddr) | ||
393 | { | ||
394 | unsigned long limit = loops_per_jiffy << 1; | ||
395 | |||
396 | while ((read_SSSR(ioaddr) & SSSR_BSY) && limit--) | ||
397 | cpu_relax(); | ||
398 | |||
399 | return limit; | ||
400 | } | ||
401 | |||
402 | static int wait_dma_channel_stop(int channel) | ||
403 | { | ||
404 | unsigned long limit = loops_per_jiffy << 1; | ||
405 | |||
406 | while (!(DCSR(channel) & DCSR_STOPSTATE) && limit--) | ||
407 | cpu_relax(); | ||
408 | |||
409 | return limit; | ||
410 | } | ||
411 | |||
412 | static void dma_handler(int channel, void *data, struct pt_regs *regs) | ||
413 | { | ||
414 | struct driver_data *drv_data = data; | ||
415 | struct spi_message *msg = drv_data->cur_msg; | ||
416 | void *reg = drv_data->ioaddr; | ||
417 | u32 irq_status = DCSR(channel) & DMA_INT_MASK; | ||
418 | u32 trailing_sssr = 0; | ||
419 | |||
420 | if (irq_status & DCSR_BUSERR) { | ||
421 | |||
422 | /* Disable interrupts, clear status and reset DMA */ | ||
423 | write_SSCR0(read_SSCR0(reg) & ~SSCR0_SSE, reg); | ||
424 | write_SSCR1(read_SSCR1(reg) & ~drv_data->dma_cr1, reg); | ||
425 | if (drv_data->ssp_type != PXA25x_SSP) | ||
426 | write_SSTO(0, reg); | ||
427 | write_SSSR(drv_data->clear_sr, reg); | ||
428 | DCSR(drv_data->rx_channel) = RESET_DMA_CHANNEL; | ||
429 | DCSR(drv_data->tx_channel) = RESET_DMA_CHANNEL; | ||
430 | |||
431 | if (flush(drv_data) == 0) | ||
432 | dev_err(&drv_data->pdev->dev, | ||
433 | "dma_handler: flush fail\n"); | ||
434 | |||
435 | unmap_dma_buffers(drv_data); | ||
436 | |||
437 | if (channel == drv_data->tx_channel) | ||
438 | dev_err(&drv_data->pdev->dev, | ||
439 | "dma_handler: bad bus address on " | ||
440 | "tx channel %d, source %x target = %x\n", | ||
441 | channel, DSADR(channel), DTADR(channel)); | ||
442 | else | ||
443 | dev_err(&drv_data->pdev->dev, | ||
444 | "dma_handler: bad bus address on " | ||
445 | "rx channel %d, source %x target = %x\n", | ||
446 | channel, DSADR(channel), DTADR(channel)); | ||
447 | |||
448 | msg->state = ERROR_STATE; | ||
449 | tasklet_schedule(&drv_data->pump_transfers); | ||
450 | } | ||
451 | |||
452 | /* PXA255x_SSP has no timeout interrupt, wait for tailing bytes */ | ||
453 | if ((drv_data->ssp_type == PXA25x_SSP) | ||
454 | && (channel == drv_data->tx_channel) | ||
455 | && (irq_status & DCSR_ENDINTR)) { | ||
456 | |||
457 | /* Wait for rx to stall */ | ||
458 | if (wait_ssp_rx_stall(drv_data->ioaddr) == 0) | ||
459 | dev_err(&drv_data->pdev->dev, | ||
460 | "dma_handler: ssp rx stall failed\n"); | ||
461 | |||
462 | /* Clear and disable interrupts on SSP and DMA channels*/ | ||
463 | write_SSCR1(read_SSCR1(reg) & ~drv_data->dma_cr1, reg); | ||
464 | write_SSSR(drv_data->clear_sr, reg); | ||
465 | DCSR(drv_data->tx_channel) = RESET_DMA_CHANNEL; | ||
466 | DCSR(drv_data->rx_channel) = RESET_DMA_CHANNEL; | ||
467 | if (wait_dma_channel_stop(drv_data->rx_channel) == 0) | ||
468 | dev_err(&drv_data->pdev->dev, | ||
469 | "dma_handler: dma rx channel stop failed\n"); | ||
470 | |||
471 | unmap_dma_buffers(drv_data); | ||
472 | |||
473 | /* Read trailing bytes */ | ||
474 | /* Calculate number of trailing bytes, read them */ | ||
475 | trailing_sssr = read_SSSR(reg); | ||
476 | if ((trailing_sssr & 0xf008) != 0xf000) { | ||
477 | drv_data->rx = drv_data->rx_end - | ||
478 | (((trailing_sssr >> 12) & 0x0f) + 1); | ||
479 | drv_data->read(drv_data); | ||
480 | } | ||
481 | msg->actual_length += drv_data->len; | ||
482 | |||
483 | /* Release chip select if requested, transfer delays are | ||
484 | * handled in pump_transfers */ | ||
485 | if (drv_data->cs_change) | ||
486 | drv_data->cs_control(PXA2XX_CS_DEASSERT); | ||
487 | |||
488 | /* Move to next transfer */ | ||
489 | msg->state = next_transfer(drv_data); | ||
490 | |||
491 | /* Schedule transfer tasklet */ | ||
492 | tasklet_schedule(&drv_data->pump_transfers); | ||
493 | } | ||
494 | } | ||
495 | |||
496 | static irqreturn_t dma_transfer(struct driver_data *drv_data) | ||
497 | { | ||
498 | u32 irq_status; | ||
499 | u32 trailing_sssr = 0; | ||
500 | struct spi_message *msg = drv_data->cur_msg; | ||
501 | void *reg = drv_data->ioaddr; | ||
502 | |||
503 | irq_status = read_SSSR(reg) & drv_data->mask_sr; | ||
504 | if (irq_status & SSSR_ROR) { | ||
505 | /* Clear and disable interrupts on SSP and DMA channels*/ | ||
506 | write_SSCR0(read_SSCR0(reg) & ~SSCR0_SSE, reg); | ||
507 | write_SSCR1(read_SSCR1(reg) & ~drv_data->dma_cr1, reg); | ||
508 | if (drv_data->ssp_type != PXA25x_SSP) | ||
509 | write_SSTO(0, reg); | ||
510 | write_SSSR(drv_data->clear_sr, reg); | ||
511 | DCSR(drv_data->tx_channel) = RESET_DMA_CHANNEL; | ||
512 | DCSR(drv_data->rx_channel) = RESET_DMA_CHANNEL; | ||
513 | unmap_dma_buffers(drv_data); | ||
514 | |||
515 | if (flush(drv_data) == 0) | ||
516 | dev_err(&drv_data->pdev->dev, | ||
517 | "dma_transfer: flush fail\n"); | ||
518 | |||
519 | dev_warn(&drv_data->pdev->dev, "dma_transfer: fifo overun\n"); | ||
520 | |||
521 | drv_data->cur_msg->state = ERROR_STATE; | ||
522 | tasklet_schedule(&drv_data->pump_transfers); | ||
523 | |||
524 | return IRQ_HANDLED; | ||
525 | } | ||
526 | |||
527 | /* Check for false positive timeout */ | ||
528 | if ((irq_status & SSSR_TINT) && DCSR(drv_data->tx_channel) & DCSR_RUN) { | ||
529 | write_SSSR(SSSR_TINT, reg); | ||
530 | return IRQ_HANDLED; | ||
531 | } | ||
532 | |||
533 | if (irq_status & SSSR_TINT || drv_data->rx == drv_data->rx_end) { | ||
534 | |||
535 | /* Clear and disable interrupts on SSP and DMA channels*/ | ||
536 | write_SSCR1(read_SSCR1(reg) & ~drv_data->dma_cr1, reg); | ||
537 | if (drv_data->ssp_type != PXA25x_SSP) | ||
538 | write_SSTO(0, reg); | ||
539 | write_SSSR(drv_data->clear_sr, reg); | ||
540 | DCSR(drv_data->tx_channel) = RESET_DMA_CHANNEL; | ||
541 | DCSR(drv_data->rx_channel) = RESET_DMA_CHANNEL; | ||
542 | |||
543 | if (wait_dma_channel_stop(drv_data->rx_channel) == 0) | ||
544 | dev_err(&drv_data->pdev->dev, | ||
545 | "dma_transfer: dma rx channel stop failed\n"); | ||
546 | |||
547 | if (wait_ssp_rx_stall(drv_data->ioaddr) == 0) | ||
548 | dev_err(&drv_data->pdev->dev, | ||
549 | "dma_transfer: ssp rx stall failed\n"); | ||
550 | |||
551 | unmap_dma_buffers(drv_data); | ||
552 | |||
553 | /* Calculate number of trailing bytes, read them */ | ||
554 | trailing_sssr = read_SSSR(reg); | ||
555 | if ((trailing_sssr & 0xf008) != 0xf000) { | ||
556 | drv_data->rx = drv_data->rx_end - | ||
557 | (((trailing_sssr >> 12) & 0x0f) + 1); | ||
558 | drv_data->read(drv_data); | ||
559 | } | ||
560 | msg->actual_length += drv_data->len; | ||
561 | |||
562 | /* Release chip select if requested, transfer delays are | ||
563 | * handled in pump_transfers */ | ||
564 | if (drv_data->cs_change) | ||
565 | drv_data->cs_control(PXA2XX_CS_DEASSERT); | ||
566 | |||
567 | /* Move to next transfer */ | ||
568 | msg->state = next_transfer(drv_data); | ||
569 | |||
570 | /* Schedule transfer tasklet */ | ||
571 | tasklet_schedule(&drv_data->pump_transfers); | ||
572 | |||
573 | return IRQ_HANDLED; | ||
574 | } | ||
575 | |||
576 | /* Opps problem detected */ | ||
577 | return IRQ_NONE; | ||
578 | } | ||
579 | |||
580 | static irqreturn_t interrupt_transfer(struct driver_data *drv_data) | ||
581 | { | ||
582 | struct spi_message *msg = drv_data->cur_msg; | ||
583 | void *reg = drv_data->ioaddr; | ||
584 | unsigned long limit = loops_per_jiffy << 1; | ||
585 | u32 irq_status; | ||
586 | u32 irq_mask = (read_SSCR1(reg) & SSCR1_TIE) ? | ||
587 | drv_data->mask_sr : drv_data->mask_sr & ~SSSR_TFS; | ||
588 | |||
589 | while ((irq_status = read_SSSR(reg) & irq_mask)) { | ||
590 | |||
591 | if (irq_status & SSSR_ROR) { | ||
592 | |||
593 | /* Clear and disable interrupts */ | ||
594 | write_SSCR0(read_SSCR0(reg) & ~SSCR0_SSE, reg); | ||
595 | write_SSCR1(read_SSCR1(reg) & ~drv_data->int_cr1, reg); | ||
596 | if (drv_data->ssp_type != PXA25x_SSP) | ||
597 | write_SSTO(0, reg); | ||
598 | write_SSSR(drv_data->clear_sr, reg); | ||
599 | |||
600 | if (flush(drv_data) == 0) | ||
601 | dev_err(&drv_data->pdev->dev, | ||
602 | "interrupt_transfer: flush fail\n"); | ||
603 | |||
604 | /* Stop the SSP */ | ||
605 | |||
606 | dev_warn(&drv_data->pdev->dev, | ||
607 | "interrupt_transfer: fifo overun\n"); | ||
608 | |||
609 | msg->state = ERROR_STATE; | ||
610 | tasklet_schedule(&drv_data->pump_transfers); | ||
611 | |||
612 | return IRQ_HANDLED; | ||
613 | } | ||
614 | |||
615 | /* Look for false positive timeout */ | ||
616 | if ((irq_status & SSSR_TINT) | ||
617 | && (drv_data->rx < drv_data->rx_end)) | ||
618 | write_SSSR(SSSR_TINT, reg); | ||
619 | |||
620 | /* Pump data */ | ||
621 | drv_data->read(drv_data); | ||
622 | drv_data->write(drv_data); | ||
623 | |||
624 | if (drv_data->tx == drv_data->tx_end) { | ||
625 | /* Disable tx interrupt */ | ||
626 | write_SSCR1(read_SSCR1(reg) & ~SSCR1_TIE, reg); | ||
627 | irq_mask = drv_data->mask_sr & ~SSSR_TFS; | ||
628 | |||
629 | /* PXA25x_SSP has no timeout, read trailing bytes */ | ||
630 | if (drv_data->ssp_type == PXA25x_SSP) { | ||
631 | while ((read_SSSR(reg) & SSSR_BSY) && limit--) | ||
632 | drv_data->read(drv_data); | ||
633 | |||
634 | if (limit == 0) | ||
635 | dev_err(&drv_data->pdev->dev, | ||
636 | "interrupt_transfer: " | ||
637 | "trailing byte read failed\n"); | ||
638 | } | ||
639 | } | ||
640 | |||
641 | if ((irq_status & SSSR_TINT) | ||
642 | || (drv_data->rx == drv_data->rx_end)) { | ||
643 | |||
644 | /* Clear timeout */ | ||
645 | write_SSCR1(read_SSCR1(reg) & ~drv_data->int_cr1, reg); | ||
646 | if (drv_data->ssp_type != PXA25x_SSP) | ||
647 | write_SSTO(0, reg); | ||
648 | write_SSSR(drv_data->clear_sr, reg); | ||
649 | |||
650 | /* Update total byte transfered */ | ||
651 | msg->actual_length += drv_data->len; | ||
652 | |||
653 | /* Release chip select if requested, transfer delays are | ||
654 | * handled in pump_transfers */ | ||
655 | if (drv_data->cs_change) | ||
656 | drv_data->cs_control(PXA2XX_CS_DEASSERT); | ||
657 | |||
658 | /* Move to next transfer */ | ||
659 | msg->state = next_transfer(drv_data); | ||
660 | |||
661 | /* Schedule transfer tasklet */ | ||
662 | tasklet_schedule(&drv_data->pump_transfers); | ||
663 | } | ||
664 | } | ||
665 | |||
666 | /* We did something */ | ||
667 | return IRQ_HANDLED; | ||
668 | } | ||
669 | |||
670 | static irqreturn_t ssp_int(int irq, void *dev_id, struct pt_regs *regs) | ||
671 | { | ||
672 | struct driver_data *drv_data = (struct driver_data *)dev_id; | ||
673 | void *reg = drv_data->ioaddr; | ||
674 | |||
675 | if (!drv_data->cur_msg) { | ||
676 | |||
677 | write_SSCR0(read_SSCR0(reg) & ~SSCR0_SSE, reg); | ||
678 | write_SSCR1(read_SSCR1(reg) & ~drv_data->int_cr1, reg); | ||
679 | if (drv_data->ssp_type != PXA25x_SSP) | ||
680 | write_SSTO(0, reg); | ||
681 | write_SSSR(drv_data->clear_sr, reg); | ||
682 | |||
683 | dev_err(&drv_data->pdev->dev, "bad message state " | ||
684 | "in interrupt handler"); | ||
685 | |||
686 | /* Never fail */ | ||
687 | return IRQ_HANDLED; | ||
688 | } | ||
689 | |||
690 | return drv_data->transfer_handler(drv_data); | ||
691 | } | ||
692 | |||
693 | static void pump_transfers(unsigned long data) | ||
694 | { | ||
695 | struct driver_data *drv_data = (struct driver_data *)data; | ||
696 | struct spi_message *message = NULL; | ||
697 | struct spi_transfer *transfer = NULL; | ||
698 | struct spi_transfer *previous = NULL; | ||
699 | struct chip_data *chip = NULL; | ||
700 | void *reg = drv_data->ioaddr; | ||
701 | u32 clk_div = 0; | ||
702 | u8 bits = 0; | ||
703 | u32 speed = 0; | ||
704 | u32 cr0; | ||
705 | |||
706 | /* Get current state information */ | ||
707 | message = drv_data->cur_msg; | ||
708 | transfer = drv_data->cur_transfer; | ||
709 | chip = drv_data->cur_chip; | ||
710 | |||
711 | /* Handle for abort */ | ||
712 | if (message->state == ERROR_STATE) { | ||
713 | message->status = -EIO; | ||
714 | giveback(drv_data); | ||
715 | return; | ||
716 | } | ||
717 | |||
718 | /* Handle end of message */ | ||
719 | if (message->state == DONE_STATE) { | ||
720 | message->status = 0; | ||
721 | giveback(drv_data); | ||
722 | return; | ||
723 | } | ||
724 | |||
725 | /* Delay if requested at end of transfer*/ | ||
726 | if (message->state == RUNNING_STATE) { | ||
727 | previous = list_entry(transfer->transfer_list.prev, | ||
728 | struct spi_transfer, | ||
729 | transfer_list); | ||
730 | if (previous->delay_usecs) | ||
731 | udelay(previous->delay_usecs); | ||
732 | } | ||
733 | |||
734 | /* Setup the transfer state based on the type of transfer */ | ||
735 | if (flush(drv_data) == 0) { | ||
736 | dev_err(&drv_data->pdev->dev, "pump_transfers: flush failed\n"); | ||
737 | message->status = -EIO; | ||
738 | giveback(drv_data); | ||
739 | return; | ||
740 | } | ||
741 | drv_data->n_bytes = chip->n_bytes; | ||
742 | drv_data->dma_width = chip->dma_width; | ||
743 | drv_data->cs_control = chip->cs_control; | ||
744 | drv_data->tx = (void *)transfer->tx_buf; | ||
745 | drv_data->tx_end = drv_data->tx + transfer->len; | ||
746 | drv_data->rx = transfer->rx_buf; | ||
747 | drv_data->rx_end = drv_data->rx + transfer->len; | ||
748 | drv_data->rx_dma = transfer->rx_dma; | ||
749 | drv_data->tx_dma = transfer->tx_dma; | ||
750 | drv_data->len = transfer->len; | ||
751 | drv_data->write = drv_data->tx ? chip->write : null_writer; | ||
752 | drv_data->read = drv_data->rx ? chip->read : null_reader; | ||
753 | drv_data->cs_change = transfer->cs_change; | ||
754 | |||
755 | /* Change speed and bit per word on a per transfer */ | ||
756 | if (transfer->speed_hz || transfer->bits_per_word) { | ||
757 | |||
758 | /* Disable clock */ | ||
759 | write_SSCR0(chip->cr0 & ~SSCR0_SSE, reg); | ||
760 | cr0 = chip->cr0; | ||
761 | bits = chip->bits_per_word; | ||
762 | speed = chip->speed_hz; | ||
763 | |||
764 | if (transfer->speed_hz) | ||
765 | speed = transfer->speed_hz; | ||
766 | |||
767 | if (transfer->bits_per_word) | ||
768 | bits = transfer->bits_per_word; | ||
769 | |||
770 | if (reg == SSP1_VIRT) | ||
771 | clk_div = SSP1_SerClkDiv(speed); | ||
772 | else if (reg == SSP2_VIRT) | ||
773 | clk_div = SSP2_SerClkDiv(speed); | ||
774 | else if (reg == SSP3_VIRT) | ||
775 | clk_div = SSP3_SerClkDiv(speed); | ||
776 | |||
777 | if (bits <= 8) { | ||
778 | drv_data->n_bytes = 1; | ||
779 | drv_data->dma_width = DCMD_WIDTH1; | ||
780 | drv_data->read = drv_data->read != null_reader ? | ||
781 | u8_reader : null_reader; | ||
782 | drv_data->write = drv_data->write != null_writer ? | ||
783 | u8_writer : null_writer; | ||
784 | } else if (bits <= 16) { | ||
785 | drv_data->n_bytes = 2; | ||
786 | drv_data->dma_width = DCMD_WIDTH2; | ||
787 | drv_data->read = drv_data->read != null_reader ? | ||
788 | u16_reader : null_reader; | ||
789 | drv_data->write = drv_data->write != null_writer ? | ||
790 | u16_writer : null_writer; | ||
791 | } else if (bits <= 32) { | ||
792 | drv_data->n_bytes = 4; | ||
793 | drv_data->dma_width = DCMD_WIDTH4; | ||
794 | drv_data->read = drv_data->read != null_reader ? | ||
795 | u32_reader : null_reader; | ||
796 | drv_data->write = drv_data->write != null_writer ? | ||
797 | u32_writer : null_writer; | ||
798 | } | ||
799 | |||
800 | cr0 = clk_div | ||
801 | | SSCR0_Motorola | ||
802 | | SSCR0_DataSize(bits > 16 ? bits - 16 : bits) | ||
803 | | SSCR0_SSE | ||
804 | | (bits > 16 ? SSCR0_EDSS : 0); | ||
805 | |||
806 | /* Start it back up */ | ||
807 | write_SSCR0(cr0, reg); | ||
808 | } | ||
809 | |||
810 | message->state = RUNNING_STATE; | ||
811 | |||
812 | /* Try to map dma buffer and do a dma transfer if successful */ | ||
813 | if ((drv_data->dma_mapped = map_dma_buffers(drv_data))) { | ||
814 | |||
815 | /* Ensure we have the correct interrupt handler */ | ||
816 | drv_data->transfer_handler = dma_transfer; | ||
817 | |||
818 | /* Setup rx DMA Channel */ | ||
819 | DCSR(drv_data->rx_channel) = RESET_DMA_CHANNEL; | ||
820 | DSADR(drv_data->rx_channel) = drv_data->ssdr_physical; | ||
821 | DTADR(drv_data->rx_channel) = drv_data->rx_dma; | ||
822 | if (drv_data->rx == drv_data->null_dma_buf) | ||
823 | /* No target address increment */ | ||
824 | DCMD(drv_data->rx_channel) = DCMD_FLOWSRC | ||
825 | | drv_data->dma_width | ||
826 | | chip->dma_burst_size | ||
827 | | drv_data->len; | ||
828 | else | ||
829 | DCMD(drv_data->rx_channel) = DCMD_INCTRGADDR | ||
830 | | DCMD_FLOWSRC | ||
831 | | drv_data->dma_width | ||
832 | | chip->dma_burst_size | ||
833 | | drv_data->len; | ||
834 | |||
835 | /* Setup tx DMA Channel */ | ||
836 | DCSR(drv_data->tx_channel) = RESET_DMA_CHANNEL; | ||
837 | DSADR(drv_data->tx_channel) = drv_data->tx_dma; | ||
838 | DTADR(drv_data->tx_channel) = drv_data->ssdr_physical; | ||
839 | if (drv_data->tx == drv_data->null_dma_buf) | ||
840 | /* No source address increment */ | ||
841 | DCMD(drv_data->tx_channel) = DCMD_FLOWTRG | ||
842 | | drv_data->dma_width | ||
843 | | chip->dma_burst_size | ||
844 | | drv_data->len; | ||
845 | else | ||
846 | DCMD(drv_data->tx_channel) = DCMD_INCSRCADDR | ||
847 | | DCMD_FLOWTRG | ||
848 | | drv_data->dma_width | ||
849 | | chip->dma_burst_size | ||
850 | | drv_data->len; | ||
851 | |||
852 | /* Enable dma end irqs on SSP to detect end of transfer */ | ||
853 | if (drv_data->ssp_type == PXA25x_SSP) | ||
854 | DCMD(drv_data->tx_channel) |= DCMD_ENDIRQEN; | ||
855 | |||
856 | /* Fix me, need to handle cs polarity */ | ||
857 | drv_data->cs_control(PXA2XX_CS_ASSERT); | ||
858 | |||
859 | /* Go baby, go */ | ||
860 | write_SSSR(drv_data->clear_sr, reg); | ||
861 | DCSR(drv_data->rx_channel) |= DCSR_RUN; | ||
862 | DCSR(drv_data->tx_channel) |= DCSR_RUN; | ||
863 | if (drv_data->ssp_type != PXA25x_SSP) | ||
864 | write_SSTO(chip->timeout, reg); | ||
865 | write_SSCR1(chip->cr1 | ||
866 | | chip->dma_threshold | ||
867 | | drv_data->dma_cr1, | ||
868 | reg); | ||
869 | } else { | ||
870 | /* Ensure we have the correct interrupt handler */ | ||
871 | drv_data->transfer_handler = interrupt_transfer; | ||
872 | |||
873 | /* Fix me, need to handle cs polarity */ | ||
874 | drv_data->cs_control(PXA2XX_CS_ASSERT); | ||
875 | |||
876 | /* Go baby, go */ | ||
877 | write_SSSR(drv_data->clear_sr, reg); | ||
878 | if (drv_data->ssp_type != PXA25x_SSP) | ||
879 | write_SSTO(chip->timeout, reg); | ||
880 | write_SSCR1(chip->cr1 | ||
881 | | chip->threshold | ||
882 | | drv_data->int_cr1, | ||
883 | reg); | ||
884 | } | ||
885 | } | ||
886 | |||
887 | static void pump_messages(void *data) | ||
888 | { | ||
889 | struct driver_data *drv_data = data; | ||
890 | unsigned long flags; | ||
891 | |||
892 | /* Lock queue and check for queue work */ | ||
893 | spin_lock_irqsave(&drv_data->lock, flags); | ||
894 | if (list_empty(&drv_data->queue) || drv_data->run == QUEUE_STOPPED) { | ||
895 | drv_data->busy = 0; | ||
896 | spin_unlock_irqrestore(&drv_data->lock, flags); | ||
897 | return; | ||
898 | } | ||
899 | |||
900 | /* Make sure we are not already running a message */ | ||
901 | if (drv_data->cur_msg) { | ||
902 | spin_unlock_irqrestore(&drv_data->lock, flags); | ||
903 | return; | ||
904 | } | ||
905 | |||
906 | /* Extract head of queue */ | ||
907 | drv_data->cur_msg = list_entry(drv_data->queue.next, | ||
908 | struct spi_message, queue); | ||
909 | list_del_init(&drv_data->cur_msg->queue); | ||
910 | |||
911 | /* Initial message state*/ | ||
912 | drv_data->cur_msg->state = START_STATE; | ||
913 | drv_data->cur_transfer = list_entry(drv_data->cur_msg->transfers.next, | ||
914 | struct spi_transfer, | ||
915 | transfer_list); | ||
916 | |||
917 | /* Setup the SSP using the per chip configuration */ | ||
918 | drv_data->cur_chip = spi_get_ctldata(drv_data->cur_msg->spi); | ||
919 | restore_state(drv_data); | ||
920 | |||
921 | /* Mark as busy and launch transfers */ | ||
922 | tasklet_schedule(&drv_data->pump_transfers); | ||
923 | |||
924 | drv_data->busy = 1; | ||
925 | spin_unlock_irqrestore(&drv_data->lock, flags); | ||
926 | } | ||
927 | |||
928 | static int transfer(struct spi_device *spi, struct spi_message *msg) | ||
929 | { | ||
930 | struct driver_data *drv_data = spi_master_get_devdata(spi->master); | ||
931 | unsigned long flags; | ||
932 | |||
933 | spin_lock_irqsave(&drv_data->lock, flags); | ||
934 | |||
935 | if (drv_data->run == QUEUE_STOPPED) { | ||
936 | spin_unlock_irqrestore(&drv_data->lock, flags); | ||
937 | return -ESHUTDOWN; | ||
938 | } | ||
939 | |||
940 | msg->actual_length = 0; | ||
941 | msg->status = -EINPROGRESS; | ||
942 | msg->state = START_STATE; | ||
943 | |||
944 | list_add_tail(&msg->queue, &drv_data->queue); | ||
945 | |||
946 | if (drv_data->run == QUEUE_RUNNING && !drv_data->busy) | ||
947 | queue_work(drv_data->workqueue, &drv_data->pump_messages); | ||
948 | |||
949 | spin_unlock_irqrestore(&drv_data->lock, flags); | ||
950 | |||
951 | return 0; | ||
952 | } | ||
953 | |||
954 | static int setup(struct spi_device *spi) | ||
955 | { | ||
956 | struct pxa2xx_spi_chip *chip_info = NULL; | ||
957 | struct chip_data *chip; | ||
958 | struct driver_data *drv_data = spi_master_get_devdata(spi->master); | ||
959 | unsigned int clk_div; | ||
960 | |||
961 | if (!spi->bits_per_word) | ||
962 | spi->bits_per_word = 8; | ||
963 | |||
964 | if (drv_data->ssp_type != PXA25x_SSP | ||
965 | && (spi->bits_per_word < 4 || spi->bits_per_word > 32)) | ||
966 | return -EINVAL; | ||
967 | else if (spi->bits_per_word < 4 || spi->bits_per_word > 16) | ||
968 | return -EINVAL; | ||
969 | |||
970 | /* Only alloc (or use chip_info) on first setup */ | ||
971 | chip = spi_get_ctldata(spi); | ||
972 | if (chip == NULL) { | ||
973 | chip = kzalloc(sizeof(struct chip_data), GFP_KERNEL); | ||
974 | if (!chip) | ||
975 | return -ENOMEM; | ||
976 | |||
977 | chip->cs_control = null_cs_control; | ||
978 | chip->enable_dma = 0; | ||
979 | chip->timeout = SSP_TIMEOUT(1000); | ||
980 | chip->threshold = SSCR1_RxTresh(1) | SSCR1_TxTresh(1); | ||
981 | chip->dma_burst_size = drv_data->master_info->enable_dma ? | ||
982 | DCMD_BURST8 : 0; | ||
983 | |||
984 | chip_info = spi->controller_data; | ||
985 | } | ||
986 | |||
987 | /* chip_info isn't always needed */ | ||
988 | if (chip_info) { | ||
989 | if (chip_info->cs_control) | ||
990 | chip->cs_control = chip_info->cs_control; | ||
991 | |||
992 | chip->timeout = SSP_TIMEOUT(chip_info->timeout_microsecs); | ||
993 | |||
994 | chip->threshold = SSCR1_RxTresh(chip_info->rx_threshold) | ||
995 | | SSCR1_TxTresh(chip_info->tx_threshold); | ||
996 | |||
997 | chip->enable_dma = chip_info->dma_burst_size != 0 | ||
998 | && drv_data->master_info->enable_dma; | ||
999 | chip->dma_threshold = 0; | ||
1000 | |||
1001 | if (chip->enable_dma) { | ||
1002 | if (chip_info->dma_burst_size <= 8) { | ||
1003 | chip->dma_threshold = SSCR1_RxTresh(8) | ||
1004 | | SSCR1_TxTresh(8); | ||
1005 | chip->dma_burst_size = DCMD_BURST8; | ||
1006 | } else if (chip_info->dma_burst_size <= 16) { | ||
1007 | chip->dma_threshold = SSCR1_RxTresh(16) | ||
1008 | | SSCR1_TxTresh(16); | ||
1009 | chip->dma_burst_size = DCMD_BURST16; | ||
1010 | } else { | ||
1011 | chip->dma_threshold = SSCR1_RxTresh(32) | ||
1012 | | SSCR1_TxTresh(32); | ||
1013 | chip->dma_burst_size = DCMD_BURST32; | ||
1014 | } | ||
1015 | } | ||
1016 | |||
1017 | |||
1018 | if (chip_info->enable_loopback) | ||
1019 | chip->cr1 = SSCR1_LBM; | ||
1020 | } | ||
1021 | |||
1022 | if (drv_data->ioaddr == SSP1_VIRT) | ||
1023 | clk_div = SSP1_SerClkDiv(spi->max_speed_hz); | ||
1024 | else if (drv_data->ioaddr == SSP2_VIRT) | ||
1025 | clk_div = SSP2_SerClkDiv(spi->max_speed_hz); | ||
1026 | else if (drv_data->ioaddr == SSP3_VIRT) | ||
1027 | clk_div = SSP3_SerClkDiv(spi->max_speed_hz); | ||
1028 | else | ||
1029 | return -ENODEV; | ||
1030 | chip->speed_hz = spi->max_speed_hz; | ||
1031 | |||
1032 | chip->cr0 = clk_div | ||
1033 | | SSCR0_Motorola | ||
1034 | | SSCR0_DataSize(spi->bits_per_word > 16 ? | ||
1035 | spi->bits_per_word - 16 : spi->bits_per_word) | ||
1036 | | SSCR0_SSE | ||
1037 | | (spi->bits_per_word > 16 ? SSCR0_EDSS : 0); | ||
1038 | chip->cr1 |= (((spi->mode & SPI_CPHA) != 0) << 4) | ||
1039 | | (((spi->mode & SPI_CPOL) != 0) << 3); | ||
1040 | |||
1041 | /* NOTE: PXA25x_SSP _could_ use external clocking ... */ | ||
1042 | if (drv_data->ssp_type != PXA25x_SSP) | ||
1043 | dev_dbg(&spi->dev, "%d bits/word, %d Hz, mode %d\n", | ||
1044 | spi->bits_per_word, | ||
1045 | (CLOCK_SPEED_HZ) | ||
1046 | / (1 + ((chip->cr0 & SSCR0_SCR) >> 8)), | ||
1047 | spi->mode & 0x3); | ||
1048 | else | ||
1049 | dev_dbg(&spi->dev, "%d bits/word, %d Hz, mode %d\n", | ||
1050 | spi->bits_per_word, | ||
1051 | (CLOCK_SPEED_HZ/2) | ||
1052 | / (1 + ((chip->cr0 & SSCR0_SCR) >> 8)), | ||
1053 | spi->mode & 0x3); | ||
1054 | |||
1055 | if (spi->bits_per_word <= 8) { | ||
1056 | chip->n_bytes = 1; | ||
1057 | chip->dma_width = DCMD_WIDTH1; | ||
1058 | chip->read = u8_reader; | ||
1059 | chip->write = u8_writer; | ||
1060 | } else if (spi->bits_per_word <= 16) { | ||
1061 | chip->n_bytes = 2; | ||
1062 | chip->dma_width = DCMD_WIDTH2; | ||
1063 | chip->read = u16_reader; | ||
1064 | chip->write = u16_writer; | ||
1065 | } else if (spi->bits_per_word <= 32) { | ||
1066 | chip->cr0 |= SSCR0_EDSS; | ||
1067 | chip->n_bytes = 4; | ||
1068 | chip->dma_width = DCMD_WIDTH4; | ||
1069 | chip->read = u32_reader; | ||
1070 | chip->write = u32_writer; | ||
1071 | } else { | ||
1072 | dev_err(&spi->dev, "invalid wordsize\n"); | ||
1073 | kfree(chip); | ||
1074 | return -ENODEV; | ||
1075 | } | ||
1076 | chip->bits_per_word = spi->bits_per_word; | ||
1077 | |||
1078 | spi_set_ctldata(spi, chip); | ||
1079 | |||
1080 | return 0; | ||
1081 | } | ||
1082 | |||
1083 | static void cleanup(const struct spi_device *spi) | ||
1084 | { | ||
1085 | struct chip_data *chip = spi_get_ctldata((struct spi_device *)spi); | ||
1086 | |||
1087 | kfree(chip); | ||
1088 | } | ||
1089 | |||
1090 | static int init_queue(struct driver_data *drv_data) | ||
1091 | { | ||
1092 | INIT_LIST_HEAD(&drv_data->queue); | ||
1093 | spin_lock_init(&drv_data->lock); | ||
1094 | |||
1095 | drv_data->run = QUEUE_STOPPED; | ||
1096 | drv_data->busy = 0; | ||
1097 | |||
1098 | tasklet_init(&drv_data->pump_transfers, | ||
1099 | pump_transfers, (unsigned long)drv_data); | ||
1100 | |||
1101 | INIT_WORK(&drv_data->pump_messages, pump_messages, drv_data); | ||
1102 | drv_data->workqueue = create_singlethread_workqueue( | ||
1103 | drv_data->master->cdev.dev->bus_id); | ||
1104 | if (drv_data->workqueue == NULL) | ||
1105 | return -EBUSY; | ||
1106 | |||
1107 | return 0; | ||
1108 | } | ||
1109 | |||
1110 | static int start_queue(struct driver_data *drv_data) | ||
1111 | { | ||
1112 | unsigned long flags; | ||
1113 | |||
1114 | spin_lock_irqsave(&drv_data->lock, flags); | ||
1115 | |||
1116 | if (drv_data->run == QUEUE_RUNNING || drv_data->busy) { | ||
1117 | spin_unlock_irqrestore(&drv_data->lock, flags); | ||
1118 | return -EBUSY; | ||
1119 | } | ||
1120 | |||
1121 | drv_data->run = QUEUE_RUNNING; | ||
1122 | drv_data->cur_msg = NULL; | ||
1123 | drv_data->cur_transfer = NULL; | ||
1124 | drv_data->cur_chip = NULL; | ||
1125 | spin_unlock_irqrestore(&drv_data->lock, flags); | ||
1126 | |||
1127 | queue_work(drv_data->workqueue, &drv_data->pump_messages); | ||
1128 | |||
1129 | return 0; | ||
1130 | } | ||
1131 | |||
1132 | static int stop_queue(struct driver_data *drv_data) | ||
1133 | { | ||
1134 | unsigned long flags; | ||
1135 | unsigned limit = 500; | ||
1136 | int status = 0; | ||
1137 | |||
1138 | spin_lock_irqsave(&drv_data->lock, flags); | ||
1139 | |||
1140 | /* This is a bit lame, but is optimized for the common execution path. | ||
1141 | * A wait_queue on the drv_data->busy could be used, but then the common | ||
1142 | * execution path (pump_messages) would be required to call wake_up or | ||
1143 | * friends on every SPI message. Do this instead */ | ||
1144 | drv_data->run = QUEUE_STOPPED; | ||
1145 | while (!list_empty(&drv_data->queue) && drv_data->busy && limit--) { | ||
1146 | spin_unlock_irqrestore(&drv_data->lock, flags); | ||
1147 | msleep(10); | ||
1148 | spin_lock_irqsave(&drv_data->lock, flags); | ||
1149 | } | ||
1150 | |||
1151 | if (!list_empty(&drv_data->queue) || drv_data->busy) | ||
1152 | status = -EBUSY; | ||
1153 | |||
1154 | spin_unlock_irqrestore(&drv_data->lock, flags); | ||
1155 | |||
1156 | return status; | ||
1157 | } | ||
1158 | |||
1159 | static int destroy_queue(struct driver_data *drv_data) | ||
1160 | { | ||
1161 | int status; | ||
1162 | |||
1163 | status = stop_queue(drv_data); | ||
1164 | if (status != 0) | ||
1165 | return status; | ||
1166 | |||
1167 | destroy_workqueue(drv_data->workqueue); | ||
1168 | |||
1169 | return 0; | ||
1170 | } | ||
1171 | |||
1172 | static int pxa2xx_spi_probe(struct platform_device *pdev) | ||
1173 | { | ||
1174 | struct device *dev = &pdev->dev; | ||
1175 | struct pxa2xx_spi_master *platform_info; | ||
1176 | struct spi_master *master; | ||
1177 | struct driver_data *drv_data = 0; | ||
1178 | struct resource *memory_resource; | ||
1179 | int irq; | ||
1180 | int status = 0; | ||
1181 | |||
1182 | platform_info = dev->platform_data; | ||
1183 | |||
1184 | if (platform_info->ssp_type == SSP_UNDEFINED) { | ||
1185 | dev_err(&pdev->dev, "undefined SSP\n"); | ||
1186 | return -ENODEV; | ||
1187 | } | ||
1188 | |||
1189 | /* Allocate master with space for drv_data and null dma buffer */ | ||
1190 | master = spi_alloc_master(dev, sizeof(struct driver_data) + 16); | ||
1191 | if (!master) { | ||
1192 | dev_err(&pdev->dev, "can not alloc spi_master\n"); | ||
1193 | return -ENOMEM; | ||
1194 | } | ||
1195 | drv_data = spi_master_get_devdata(master); | ||
1196 | drv_data->master = master; | ||
1197 | drv_data->master_info = platform_info; | ||
1198 | drv_data->pdev = pdev; | ||
1199 | |||
1200 | master->bus_num = pdev->id; | ||
1201 | master->num_chipselect = platform_info->num_chipselect; | ||
1202 | master->cleanup = cleanup; | ||
1203 | master->setup = setup; | ||
1204 | master->transfer = transfer; | ||
1205 | |||
1206 | drv_data->ssp_type = platform_info->ssp_type; | ||
1207 | drv_data->null_dma_buf = (u32 *)ALIGN((u32)(drv_data + | ||
1208 | sizeof(struct driver_data)), 8); | ||
1209 | |||
1210 | /* Setup register addresses */ | ||
1211 | memory_resource = platform_get_resource(pdev, IORESOURCE_MEM, 0); | ||
1212 | if (!memory_resource) { | ||
1213 | dev_err(&pdev->dev, "memory resources not defined\n"); | ||
1214 | status = -ENODEV; | ||
1215 | goto out_error_master_alloc; | ||
1216 | } | ||
1217 | |||
1218 | drv_data->ioaddr = (void *)io_p2v((unsigned long)(memory_resource->start)); | ||
1219 | drv_data->ssdr_physical = memory_resource->start + 0x00000010; | ||
1220 | if (platform_info->ssp_type == PXA25x_SSP) { | ||
1221 | drv_data->int_cr1 = SSCR1_TIE | SSCR1_RIE; | ||
1222 | drv_data->dma_cr1 = 0; | ||
1223 | drv_data->clear_sr = SSSR_ROR; | ||
1224 | drv_data->mask_sr = SSSR_RFS | SSSR_TFS | SSSR_ROR; | ||
1225 | } else { | ||
1226 | drv_data->int_cr1 = SSCR1_TIE | SSCR1_RIE | SSCR1_TINTE; | ||
1227 | drv_data->dma_cr1 = SSCR1_TSRE | SSCR1_RSRE | SSCR1_TINTE; | ||
1228 | drv_data->clear_sr = SSSR_ROR | SSSR_TINT; | ||
1229 | drv_data->mask_sr = SSSR_TINT | SSSR_RFS | SSSR_TFS | SSSR_ROR; | ||
1230 | } | ||
1231 | |||
1232 | /* Attach to IRQ */ | ||
1233 | irq = platform_get_irq(pdev, 0); | ||
1234 | if (irq < 0) { | ||
1235 | dev_err(&pdev->dev, "irq resource not defined\n"); | ||
1236 | status = -ENODEV; | ||
1237 | goto out_error_master_alloc; | ||
1238 | } | ||
1239 | |||
1240 | status = request_irq(irq, ssp_int, 0, dev->bus_id, drv_data); | ||
1241 | if (status < 0) { | ||
1242 | dev_err(&pdev->dev, "can not get IRQ\n"); | ||
1243 | goto out_error_master_alloc; | ||
1244 | } | ||
1245 | |||
1246 | /* Setup DMA if requested */ | ||
1247 | drv_data->tx_channel = -1; | ||
1248 | drv_data->rx_channel = -1; | ||
1249 | if (platform_info->enable_dma) { | ||
1250 | |||
1251 | /* Get two DMA channels (rx and tx) */ | ||
1252 | drv_data->rx_channel = pxa_request_dma("pxa2xx_spi_ssp_rx", | ||
1253 | DMA_PRIO_HIGH, | ||
1254 | dma_handler, | ||
1255 | drv_data); | ||
1256 | if (drv_data->rx_channel < 0) { | ||
1257 | dev_err(dev, "problem (%d) requesting rx channel\n", | ||
1258 | drv_data->rx_channel); | ||
1259 | status = -ENODEV; | ||
1260 | goto out_error_irq_alloc; | ||
1261 | } | ||
1262 | drv_data->tx_channel = pxa_request_dma("pxa2xx_spi_ssp_tx", | ||
1263 | DMA_PRIO_MEDIUM, | ||
1264 | dma_handler, | ||
1265 | drv_data); | ||
1266 | if (drv_data->tx_channel < 0) { | ||
1267 | dev_err(dev, "problem (%d) requesting tx channel\n", | ||
1268 | drv_data->tx_channel); | ||
1269 | status = -ENODEV; | ||
1270 | goto out_error_dma_alloc; | ||
1271 | } | ||
1272 | |||
1273 | if (drv_data->ioaddr == SSP1_VIRT) { | ||
1274 | DRCMRRXSSDR = DRCMR_MAPVLD | ||
1275 | | drv_data->rx_channel; | ||
1276 | DRCMRTXSSDR = DRCMR_MAPVLD | ||
1277 | | drv_data->tx_channel; | ||
1278 | } else if (drv_data->ioaddr == SSP2_VIRT) { | ||
1279 | DRCMRRXSS2DR = DRCMR_MAPVLD | ||
1280 | | drv_data->rx_channel; | ||
1281 | DRCMRTXSS2DR = DRCMR_MAPVLD | ||
1282 | | drv_data->tx_channel; | ||
1283 | } else if (drv_data->ioaddr == SSP3_VIRT) { | ||
1284 | DRCMRRXSS3DR = DRCMR_MAPVLD | ||
1285 | | drv_data->rx_channel; | ||
1286 | DRCMRTXSS3DR = DRCMR_MAPVLD | ||
1287 | | drv_data->tx_channel; | ||
1288 | } else { | ||
1289 | dev_err(dev, "bad SSP type\n"); | ||
1290 | goto out_error_dma_alloc; | ||
1291 | } | ||
1292 | } | ||
1293 | |||
1294 | /* Enable SOC clock */ | ||
1295 | pxa_set_cken(platform_info->clock_enable, 1); | ||
1296 | |||
1297 | /* Load default SSP configuration */ | ||
1298 | write_SSCR0(0, drv_data->ioaddr); | ||
1299 | write_SSCR1(SSCR1_RxTresh(4) | SSCR1_TxTresh(12), drv_data->ioaddr); | ||
1300 | write_SSCR0(SSCR0_SerClkDiv(2) | ||
1301 | | SSCR0_Motorola | ||
1302 | | SSCR0_DataSize(8), | ||
1303 | drv_data->ioaddr); | ||
1304 | if (drv_data->ssp_type != PXA25x_SSP) | ||
1305 | write_SSTO(0, drv_data->ioaddr); | ||
1306 | write_SSPSP(0, drv_data->ioaddr); | ||
1307 | |||
1308 | /* Initial and start queue */ | ||
1309 | status = init_queue(drv_data); | ||
1310 | if (status != 0) { | ||
1311 | dev_err(&pdev->dev, "problem initializing queue\n"); | ||
1312 | goto out_error_clock_enabled; | ||
1313 | } | ||
1314 | status = start_queue(drv_data); | ||
1315 | if (status != 0) { | ||
1316 | dev_err(&pdev->dev, "problem starting queue\n"); | ||
1317 | goto out_error_clock_enabled; | ||
1318 | } | ||
1319 | |||
1320 | /* Register with the SPI framework */ | ||
1321 | platform_set_drvdata(pdev, drv_data); | ||
1322 | status = spi_register_master(master); | ||
1323 | if (status != 0) { | ||
1324 | dev_err(&pdev->dev, "problem registering spi master\n"); | ||
1325 | goto out_error_queue_alloc; | ||
1326 | } | ||
1327 | |||
1328 | return status; | ||
1329 | |||
1330 | out_error_queue_alloc: | ||
1331 | destroy_queue(drv_data); | ||
1332 | |||
1333 | out_error_clock_enabled: | ||
1334 | pxa_set_cken(platform_info->clock_enable, 0); | ||
1335 | |||
1336 | out_error_dma_alloc: | ||
1337 | if (drv_data->tx_channel != -1) | ||
1338 | pxa_free_dma(drv_data->tx_channel); | ||
1339 | if (drv_data->rx_channel != -1) | ||
1340 | pxa_free_dma(drv_data->rx_channel); | ||
1341 | |||
1342 | out_error_irq_alloc: | ||
1343 | free_irq(irq, drv_data); | ||
1344 | |||
1345 | out_error_master_alloc: | ||
1346 | spi_master_put(master); | ||
1347 | return status; | ||
1348 | } | ||
1349 | |||
1350 | static int pxa2xx_spi_remove(struct platform_device *pdev) | ||
1351 | { | ||
1352 | struct driver_data *drv_data = platform_get_drvdata(pdev); | ||
1353 | int irq; | ||
1354 | int status = 0; | ||
1355 | |||
1356 | if (!drv_data) | ||
1357 | return 0; | ||
1358 | |||
1359 | /* Remove the queue */ | ||
1360 | status = destroy_queue(drv_data); | ||
1361 | if (status != 0) | ||
1362 | return status; | ||
1363 | |||
1364 | /* Disable the SSP at the peripheral and SOC level */ | ||
1365 | write_SSCR0(0, drv_data->ioaddr); | ||
1366 | pxa_set_cken(drv_data->master_info->clock_enable, 0); | ||
1367 | |||
1368 | /* Release DMA */ | ||
1369 | if (drv_data->master_info->enable_dma) { | ||
1370 | if (drv_data->ioaddr == SSP1_VIRT) { | ||
1371 | DRCMRRXSSDR = 0; | ||
1372 | DRCMRTXSSDR = 0; | ||
1373 | } else if (drv_data->ioaddr == SSP2_VIRT) { | ||
1374 | DRCMRRXSS2DR = 0; | ||
1375 | DRCMRTXSS2DR = 0; | ||
1376 | } else if (drv_data->ioaddr == SSP3_VIRT) { | ||
1377 | DRCMRRXSS3DR = 0; | ||
1378 | DRCMRTXSS3DR = 0; | ||
1379 | } | ||
1380 | pxa_free_dma(drv_data->tx_channel); | ||
1381 | pxa_free_dma(drv_data->rx_channel); | ||
1382 | } | ||
1383 | |||
1384 | /* Release IRQ */ | ||
1385 | irq = platform_get_irq(pdev, 0); | ||
1386 | if (irq >= 0) | ||
1387 | free_irq(irq, drv_data); | ||
1388 | |||
1389 | /* Disconnect from the SPI framework */ | ||
1390 | spi_unregister_master(drv_data->master); | ||
1391 | |||
1392 | /* Prevent double remove */ | ||
1393 | platform_set_drvdata(pdev, NULL); | ||
1394 | |||
1395 | return 0; | ||
1396 | } | ||
1397 | |||
1398 | static void pxa2xx_spi_shutdown(struct platform_device *pdev) | ||
1399 | { | ||
1400 | int status = 0; | ||
1401 | |||
1402 | if ((status = pxa2xx_spi_remove(pdev)) != 0) | ||
1403 | dev_err(&pdev->dev, "shutdown failed with %d\n", status); | ||
1404 | } | ||
1405 | |||
1406 | #ifdef CONFIG_PM | ||
1407 | static int suspend_devices(struct device *dev, void *pm_message) | ||
1408 | { | ||
1409 | pm_message_t *state = pm_message; | ||
1410 | |||
1411 | if (dev->power.power_state.event != state->event) { | ||
1412 | dev_warn(dev, "pm state does not match request\n"); | ||
1413 | return -1; | ||
1414 | } | ||
1415 | |||
1416 | return 0; | ||
1417 | } | ||
1418 | |||
1419 | static int pxa2xx_spi_suspend(struct platform_device *pdev, pm_message_t state) | ||
1420 | { | ||
1421 | struct driver_data *drv_data = platform_get_drvdata(pdev); | ||
1422 | int status = 0; | ||
1423 | |||
1424 | /* Check all childern for current power state */ | ||
1425 | if (device_for_each_child(&pdev->dev, &state, suspend_devices) != 0) { | ||
1426 | dev_warn(&pdev->dev, "suspend aborted\n"); | ||
1427 | return -1; | ||
1428 | } | ||
1429 | |||
1430 | status = stop_queue(drv_data); | ||
1431 | if (status != 0) | ||
1432 | return status; | ||
1433 | write_SSCR0(0, drv_data->ioaddr); | ||
1434 | pxa_set_cken(drv_data->master_info->clock_enable, 0); | ||
1435 | |||
1436 | return 0; | ||
1437 | } | ||
1438 | |||
1439 | static int pxa2xx_spi_resume(struct platform_device *pdev) | ||
1440 | { | ||
1441 | struct driver_data *drv_data = platform_get_drvdata(pdev); | ||
1442 | int status = 0; | ||
1443 | |||
1444 | /* Enable the SSP clock */ | ||
1445 | pxa_set_cken(drv_data->master_info->clock_enable, 1); | ||
1446 | |||
1447 | /* Start the queue running */ | ||
1448 | status = start_queue(drv_data); | ||
1449 | if (status != 0) { | ||
1450 | dev_err(&pdev->dev, "problem starting queue (%d)\n", status); | ||
1451 | return status; | ||
1452 | } | ||
1453 | |||
1454 | return 0; | ||
1455 | } | ||
1456 | #else | ||
1457 | #define pxa2xx_spi_suspend NULL | ||
1458 | #define pxa2xx_spi_resume NULL | ||
1459 | #endif /* CONFIG_PM */ | ||
1460 | |||
1461 | static struct platform_driver driver = { | ||
1462 | .driver = { | ||
1463 | .name = "pxa2xx-spi", | ||
1464 | .bus = &platform_bus_type, | ||
1465 | .owner = THIS_MODULE, | ||
1466 | }, | ||
1467 | .probe = pxa2xx_spi_probe, | ||
1468 | .remove = __devexit_p(pxa2xx_spi_remove), | ||
1469 | .shutdown = pxa2xx_spi_shutdown, | ||
1470 | .suspend = pxa2xx_spi_suspend, | ||
1471 | .resume = pxa2xx_spi_resume, | ||
1472 | }; | ||
1473 | |||
1474 | static int __init pxa2xx_spi_init(void) | ||
1475 | { | ||
1476 | platform_driver_register(&driver); | ||
1477 | |||
1478 | return 0; | ||
1479 | } | ||
1480 | module_init(pxa2xx_spi_init); | ||
1481 | |||
1482 | static void __exit pxa2xx_spi_exit(void) | ||
1483 | { | ||
1484 | platform_driver_unregister(&driver); | ||
1485 | } | ||
1486 | module_exit(pxa2xx_spi_exit); | ||
diff --git a/drivers/spi/spi.c b/drivers/spi/spi.c index 94f5e8ed83a7..1cea4a6799fe 100644 --- a/drivers/spi/spi.c +++ b/drivers/spi/spi.c | |||
@@ -338,18 +338,18 @@ static struct class spi_master_class = { | |||
338 | * spi_alloc_master - allocate SPI master controller | 338 | * spi_alloc_master - allocate SPI master controller |
339 | * @dev: the controller, possibly using the platform_bus | 339 | * @dev: the controller, possibly using the platform_bus |
340 | * @size: how much driver-private data to preallocate; the pointer to this | 340 | * @size: how much driver-private data to preallocate; the pointer to this |
341 | * memory is in the class_data field of the returned class_device, | 341 | * memory is in the class_data field of the returned class_device, |
342 | * accessible with spi_master_get_devdata(). | 342 | * accessible with spi_master_get_devdata(). |
343 | * | 343 | * |
344 | * This call is used only by SPI master controller drivers, which are the | 344 | * This call is used only by SPI master controller drivers, which are the |
345 | * only ones directly touching chip registers. It's how they allocate | 345 | * only ones directly touching chip registers. It's how they allocate |
346 | * an spi_master structure, prior to calling spi_add_master(). | 346 | * an spi_master structure, prior to calling spi_register_master(). |
347 | * | 347 | * |
348 | * This must be called from context that can sleep. It returns the SPI | 348 | * This must be called from context that can sleep. It returns the SPI |
349 | * master structure on success, else NULL. | 349 | * master structure on success, else NULL. |
350 | * | 350 | * |
351 | * The caller is responsible for assigning the bus number and initializing | 351 | * The caller is responsible for assigning the bus number and initializing |
352 | * the master's methods before calling spi_add_master(); and (after errors | 352 | * the master's methods before calling spi_register_master(); and (after errors |
353 | * adding the device) calling spi_master_put() to prevent a memory leak. | 353 | * adding the device) calling spi_master_put() to prevent a memory leak. |
354 | */ | 354 | */ |
355 | struct spi_master * __init_or_module | 355 | struct spi_master * __init_or_module |
@@ -395,7 +395,7 @@ EXPORT_SYMBOL_GPL(spi_alloc_master); | |||
395 | int __init_or_module | 395 | int __init_or_module |
396 | spi_register_master(struct spi_master *master) | 396 | spi_register_master(struct spi_master *master) |
397 | { | 397 | { |
398 | static atomic_t dyn_bus_id = ATOMIC_INIT(0); | 398 | static atomic_t dyn_bus_id = ATOMIC_INIT((1<<16) - 1); |
399 | struct device *dev = master->cdev.dev; | 399 | struct device *dev = master->cdev.dev; |
400 | int status = -ENODEV; | 400 | int status = -ENODEV; |
401 | int dynamic = 0; | 401 | int dynamic = 0; |
@@ -404,7 +404,7 @@ spi_register_master(struct spi_master *master) | |||
404 | return -ENODEV; | 404 | return -ENODEV; |
405 | 405 | ||
406 | /* convention: dynamically assigned bus IDs count down from the max */ | 406 | /* convention: dynamically assigned bus IDs count down from the max */ |
407 | if (master->bus_num == 0) { | 407 | if (master->bus_num < 0) { |
408 | master->bus_num = atomic_dec_return(&dyn_bus_id); | 408 | master->bus_num = atomic_dec_return(&dyn_bus_id); |
409 | dynamic = 1; | 409 | dynamic = 1; |
410 | } | 410 | } |
@@ -522,7 +522,8 @@ int spi_sync(struct spi_device *spi, struct spi_message *message) | |||
522 | } | 522 | } |
523 | EXPORT_SYMBOL_GPL(spi_sync); | 523 | EXPORT_SYMBOL_GPL(spi_sync); |
524 | 524 | ||
525 | #define SPI_BUFSIZ (SMP_CACHE_BYTES) | 525 | /* portable code must never pass more than 32 bytes */ |
526 | #define SPI_BUFSIZ max(32,SMP_CACHE_BYTES) | ||
526 | 527 | ||
527 | static u8 *buf; | 528 | static u8 *buf; |
528 | 529 | ||
diff --git a/drivers/spi/spi_bitbang.c b/drivers/spi/spi_bitbang.c index f037e5593269..dd2f950b21a7 100644 --- a/drivers/spi/spi_bitbang.c +++ b/drivers/spi/spi_bitbang.c | |||
@@ -138,6 +138,45 @@ static unsigned bitbang_txrx_32( | |||
138 | return t->len - count; | 138 | return t->len - count; |
139 | } | 139 | } |
140 | 140 | ||
141 | int spi_bitbang_setup_transfer(struct spi_device *spi, struct spi_transfer *t) | ||
142 | { | ||
143 | struct spi_bitbang_cs *cs = spi->controller_state; | ||
144 | u8 bits_per_word; | ||
145 | u32 hz; | ||
146 | |||
147 | if (t) { | ||
148 | bits_per_word = t->bits_per_word; | ||
149 | hz = t->speed_hz; | ||
150 | } else { | ||
151 | bits_per_word = 0; | ||
152 | hz = 0; | ||
153 | } | ||
154 | |||
155 | /* spi_transfer level calls that work per-word */ | ||
156 | if (!bits_per_word) | ||
157 | bits_per_word = spi->bits_per_word; | ||
158 | if (bits_per_word <= 8) | ||
159 | cs->txrx_bufs = bitbang_txrx_8; | ||
160 | else if (bits_per_word <= 16) | ||
161 | cs->txrx_bufs = bitbang_txrx_16; | ||
162 | else if (bits_per_word <= 32) | ||
163 | cs->txrx_bufs = bitbang_txrx_32; | ||
164 | else | ||
165 | return -EINVAL; | ||
166 | |||
167 | /* nsecs = (clock period)/2 */ | ||
168 | if (!hz) | ||
169 | hz = spi->max_speed_hz; | ||
170 | if (hz) { | ||
171 | cs->nsecs = (1000000000/2) / hz; | ||
172 | if (cs->nsecs > (MAX_UDELAY_MS * 1000 * 1000)) | ||
173 | return -EINVAL; | ||
174 | } | ||
175 | |||
176 | return 0; | ||
177 | } | ||
178 | EXPORT_SYMBOL_GPL(spi_bitbang_setup_transfer); | ||
179 | |||
141 | /** | 180 | /** |
142 | * spi_bitbang_setup - default setup for per-word I/O loops | 181 | * spi_bitbang_setup - default setup for per-word I/O loops |
143 | */ | 182 | */ |
@@ -145,8 +184,16 @@ int spi_bitbang_setup(struct spi_device *spi) | |||
145 | { | 184 | { |
146 | struct spi_bitbang_cs *cs = spi->controller_state; | 185 | struct spi_bitbang_cs *cs = spi->controller_state; |
147 | struct spi_bitbang *bitbang; | 186 | struct spi_bitbang *bitbang; |
187 | int retval; | ||
148 | 188 | ||
149 | if (!spi->max_speed_hz) | 189 | bitbang = spi_master_get_devdata(spi->master); |
190 | |||
191 | /* REVISIT: some systems will want to support devices using lsb-first | ||
192 | * bit encodings on the wire. In pure software that would be trivial, | ||
193 | * just bitbang_txrx_le_cphaX() routines shifting the other way, and | ||
194 | * some hardware controllers also have this support. | ||
195 | */ | ||
196 | if ((spi->mode & SPI_LSB_FIRST) != 0) | ||
150 | return -EINVAL; | 197 | return -EINVAL; |
151 | 198 | ||
152 | if (!cs) { | 199 | if (!cs) { |
@@ -155,32 +202,20 @@ int spi_bitbang_setup(struct spi_device *spi) | |||
155 | return -ENOMEM; | 202 | return -ENOMEM; |
156 | spi->controller_state = cs; | 203 | spi->controller_state = cs; |
157 | } | 204 | } |
158 | bitbang = spi_master_get_devdata(spi->master); | ||
159 | 205 | ||
160 | if (!spi->bits_per_word) | 206 | if (!spi->bits_per_word) |
161 | spi->bits_per_word = 8; | 207 | spi->bits_per_word = 8; |
162 | 208 | ||
163 | /* spi_transfer level calls that work per-word */ | ||
164 | if (spi->bits_per_word <= 8) | ||
165 | cs->txrx_bufs = bitbang_txrx_8; | ||
166 | else if (spi->bits_per_word <= 16) | ||
167 | cs->txrx_bufs = bitbang_txrx_16; | ||
168 | else if (spi->bits_per_word <= 32) | ||
169 | cs->txrx_bufs = bitbang_txrx_32; | ||
170 | else | ||
171 | return -EINVAL; | ||
172 | |||
173 | /* per-word shift register access, in hardware or bitbanging */ | 209 | /* per-word shift register access, in hardware or bitbanging */ |
174 | cs->txrx_word = bitbang->txrx_word[spi->mode & (SPI_CPOL|SPI_CPHA)]; | 210 | cs->txrx_word = bitbang->txrx_word[spi->mode & (SPI_CPOL|SPI_CPHA)]; |
175 | if (!cs->txrx_word) | 211 | if (!cs->txrx_word) |
176 | return -EINVAL; | 212 | return -EINVAL; |
177 | 213 | ||
178 | /* nsecs = (clock period)/2 */ | 214 | retval = spi_bitbang_setup_transfer(spi, NULL); |
179 | cs->nsecs = (1000000000/2) / (spi->max_speed_hz); | 215 | if (retval < 0) |
180 | if (cs->nsecs > MAX_UDELAY_MS * 1000) | 216 | return retval; |
181 | return -EINVAL; | ||
182 | 217 | ||
183 | dev_dbg(&spi->dev, "%s, mode %d, %u bits/w, %u nsec\n", | 218 | dev_dbg(&spi->dev, "%s, mode %d, %u bits/w, %u nsec/bit\n", |
184 | __FUNCTION__, spi->mode & (SPI_CPOL | SPI_CPHA), | 219 | __FUNCTION__, spi->mode & (SPI_CPOL | SPI_CPHA), |
185 | spi->bits_per_word, 2 * cs->nsecs); | 220 | spi->bits_per_word, 2 * cs->nsecs); |
186 | 221 | ||
@@ -246,6 +281,8 @@ static void bitbang_work(void *_bitbang) | |||
246 | unsigned tmp; | 281 | unsigned tmp; |
247 | unsigned cs_change; | 282 | unsigned cs_change; |
248 | int status; | 283 | int status; |
284 | int (*setup_transfer)(struct spi_device *, | ||
285 | struct spi_transfer *); | ||
249 | 286 | ||
250 | m = container_of(bitbang->queue.next, struct spi_message, | 287 | m = container_of(bitbang->queue.next, struct spi_message, |
251 | queue); | 288 | queue); |
@@ -262,6 +299,7 @@ static void bitbang_work(void *_bitbang) | |||
262 | tmp = 0; | 299 | tmp = 0; |
263 | cs_change = 1; | 300 | cs_change = 1; |
264 | status = 0; | 301 | status = 0; |
302 | setup_transfer = NULL; | ||
265 | 303 | ||
266 | list_for_each_entry (t, &m->transfers, transfer_list) { | 304 | list_for_each_entry (t, &m->transfers, transfer_list) { |
267 | if (bitbang->shutdown) { | 305 | if (bitbang->shutdown) { |
@@ -269,6 +307,20 @@ static void bitbang_work(void *_bitbang) | |||
269 | break; | 307 | break; |
270 | } | 308 | } |
271 | 309 | ||
310 | /* override or restore speed and wordsize */ | ||
311 | if (t->speed_hz || t->bits_per_word) { | ||
312 | setup_transfer = bitbang->setup_transfer; | ||
313 | if (!setup_transfer) { | ||
314 | status = -ENOPROTOOPT; | ||
315 | break; | ||
316 | } | ||
317 | } | ||
318 | if (setup_transfer) { | ||
319 | status = setup_transfer(spi, t); | ||
320 | if (status < 0) | ||
321 | break; | ||
322 | } | ||
323 | |||
272 | /* set up default clock polarity, and activate chip; | 324 | /* set up default clock polarity, and activate chip; |
273 | * this implicitly updates clock and spi modes as | 325 | * this implicitly updates clock and spi modes as |
274 | * previously recorded for this device via setup(). | 326 | * previously recorded for this device via setup(). |
@@ -325,6 +377,10 @@ static void bitbang_work(void *_bitbang) | |||
325 | m->status = status; | 377 | m->status = status; |
326 | m->complete(m->context); | 378 | m->complete(m->context); |
327 | 379 | ||
380 | /* restore speed and wordsize */ | ||
381 | if (setup_transfer) | ||
382 | setup_transfer(spi, NULL); | ||
383 | |||
328 | /* normally deactivate chipselect ... unless no error and | 384 | /* normally deactivate chipselect ... unless no error and |
329 | * cs_change has hinted that the next message will probably | 385 | * cs_change has hinted that the next message will probably |
330 | * be for this chip too. | 386 | * be for this chip too. |
@@ -348,6 +404,7 @@ int spi_bitbang_transfer(struct spi_device *spi, struct spi_message *m) | |||
348 | { | 404 | { |
349 | struct spi_bitbang *bitbang; | 405 | struct spi_bitbang *bitbang; |
350 | unsigned long flags; | 406 | unsigned long flags; |
407 | int status = 0; | ||
351 | 408 | ||
352 | m->actual_length = 0; | 409 | m->actual_length = 0; |
353 | m->status = -EINPROGRESS; | 410 | m->status = -EINPROGRESS; |
@@ -357,11 +414,15 @@ int spi_bitbang_transfer(struct spi_device *spi, struct spi_message *m) | |||
357 | return -ESHUTDOWN; | 414 | return -ESHUTDOWN; |
358 | 415 | ||
359 | spin_lock_irqsave(&bitbang->lock, flags); | 416 | spin_lock_irqsave(&bitbang->lock, flags); |
360 | list_add_tail(&m->queue, &bitbang->queue); | 417 | if (!spi->max_speed_hz) |
361 | queue_work(bitbang->workqueue, &bitbang->work); | 418 | status = -ENETDOWN; |
419 | else { | ||
420 | list_add_tail(&m->queue, &bitbang->queue); | ||
421 | queue_work(bitbang->workqueue, &bitbang->work); | ||
422 | } | ||
362 | spin_unlock_irqrestore(&bitbang->lock, flags); | 423 | spin_unlock_irqrestore(&bitbang->lock, flags); |
363 | 424 | ||
364 | return 0; | 425 | return status; |
365 | } | 426 | } |
366 | EXPORT_SYMBOL_GPL(spi_bitbang_transfer); | 427 | EXPORT_SYMBOL_GPL(spi_bitbang_transfer); |
367 | 428 | ||
@@ -406,6 +467,9 @@ int spi_bitbang_start(struct spi_bitbang *bitbang) | |||
406 | bitbang->use_dma = 0; | 467 | bitbang->use_dma = 0; |
407 | bitbang->txrx_bufs = spi_bitbang_bufs; | 468 | bitbang->txrx_bufs = spi_bitbang_bufs; |
408 | if (!bitbang->master->setup) { | 469 | if (!bitbang->master->setup) { |
470 | if (!bitbang->setup_transfer) | ||
471 | bitbang->setup_transfer = | ||
472 | spi_bitbang_setup_transfer; | ||
409 | bitbang->master->setup = spi_bitbang_setup; | 473 | bitbang->master->setup = spi_bitbang_setup; |
410 | bitbang->master->cleanup = spi_bitbang_cleanup; | 474 | bitbang->master->cleanup = spi_bitbang_cleanup; |
411 | } | 475 | } |
diff --git a/drivers/spi/spi_butterfly.c b/drivers/spi/spi_butterfly.c index ff9e5faa4dc9..a006a1ee27ac 100644 --- a/drivers/spi/spi_butterfly.c +++ b/drivers/spi/spi_butterfly.c | |||
@@ -321,6 +321,7 @@ static void butterfly_attach(struct parport *p) | |||
321 | * (firmware resets at45, acts as spi slave) or neither (we ignore | 321 | * (firmware resets at45, acts as spi slave) or neither (we ignore |
322 | * both, AVR uses AT45). Here we expect firmware for the first option. | 322 | * both, AVR uses AT45). Here we expect firmware for the first option. |
323 | */ | 323 | */ |
324 | |||
324 | pp->info[0].max_speed_hz = 15 * 1000 * 1000; | 325 | pp->info[0].max_speed_hz = 15 * 1000 * 1000; |
325 | strcpy(pp->info[0].modalias, "mtd_dataflash"); | 326 | strcpy(pp->info[0].modalias, "mtd_dataflash"); |
326 | pp->info[0].platform_data = &flash; | 327 | pp->info[0].platform_data = &flash; |
diff --git a/drivers/spi/spi_mpc83xx.c b/drivers/spi/spi_mpc83xx.c new file mode 100644 index 000000000000..5d92a7e5cb41 --- /dev/null +++ b/drivers/spi/spi_mpc83xx.c | |||
@@ -0,0 +1,483 @@ | |||
1 | /* | ||
2 | * MPC83xx SPI controller driver. | ||
3 | * | ||
4 | * Maintainer: Kumar Gala | ||
5 | * | ||
6 | * Copyright (C) 2006 Polycom, Inc. | ||
7 | * | ||
8 | * This program is free software; you can redistribute it and/or modify it | ||
9 | * under the terms of the GNU General Public License as published by the | ||
10 | * Free Software Foundation; either version 2 of the License, or (at your | ||
11 | * option) any later version. | ||
12 | */ | ||
13 | #include <linux/module.h> | ||
14 | #include <linux/init.h> | ||
15 | #include <linux/types.h> | ||
16 | #include <linux/kernel.h> | ||
17 | #include <linux/completion.h> | ||
18 | #include <linux/interrupt.h> | ||
19 | #include <linux/delay.h> | ||
20 | #include <linux/irq.h> | ||
21 | #include <linux/device.h> | ||
22 | #include <linux/spi/spi.h> | ||
23 | #include <linux/spi/spi_bitbang.h> | ||
24 | #include <linux/platform_device.h> | ||
25 | #include <linux/fsl_devices.h> | ||
26 | |||
27 | #include <asm/irq.h> | ||
28 | #include <asm/io.h> | ||
29 | |||
30 | /* SPI Controller registers */ | ||
31 | struct mpc83xx_spi_reg { | ||
32 | u8 res1[0x20]; | ||
33 | __be32 mode; | ||
34 | __be32 event; | ||
35 | __be32 mask; | ||
36 | __be32 command; | ||
37 | __be32 transmit; | ||
38 | __be32 receive; | ||
39 | }; | ||
40 | |||
41 | /* SPI Controller mode register definitions */ | ||
42 | #define SPMODE_CI_INACTIVEHIGH (1 << 29) | ||
43 | #define SPMODE_CP_BEGIN_EDGECLK (1 << 28) | ||
44 | #define SPMODE_DIV16 (1 << 27) | ||
45 | #define SPMODE_REV (1 << 26) | ||
46 | #define SPMODE_MS (1 << 25) | ||
47 | #define SPMODE_ENABLE (1 << 24) | ||
48 | #define SPMODE_LEN(x) ((x) << 20) | ||
49 | #define SPMODE_PM(x) ((x) << 16) | ||
50 | |||
51 | /* | ||
52 | * Default for SPI Mode: | ||
53 | * SPI MODE 0 (inactive low, phase middle, MSB, 8-bit length, slow clk | ||
54 | */ | ||
55 | #define SPMODE_INIT_VAL (SPMODE_CI_INACTIVEHIGH | SPMODE_DIV16 | SPMODE_REV | \ | ||
56 | SPMODE_MS | SPMODE_LEN(7) | SPMODE_PM(0xf)) | ||
57 | |||
58 | /* SPIE register values */ | ||
59 | #define SPIE_NE 0x00000200 /* Not empty */ | ||
60 | #define SPIE_NF 0x00000100 /* Not full */ | ||
61 | |||
62 | /* SPIM register values */ | ||
63 | #define SPIM_NE 0x00000200 /* Not empty */ | ||
64 | #define SPIM_NF 0x00000100 /* Not full */ | ||
65 | |||
66 | /* SPI Controller driver's private data. */ | ||
67 | struct mpc83xx_spi { | ||
68 | /* bitbang has to be first */ | ||
69 | struct spi_bitbang bitbang; | ||
70 | struct completion done; | ||
71 | |||
72 | struct mpc83xx_spi_reg __iomem *base; | ||
73 | |||
74 | /* rx & tx bufs from the spi_transfer */ | ||
75 | const void *tx; | ||
76 | void *rx; | ||
77 | |||
78 | /* functions to deal with different sized buffers */ | ||
79 | void (*get_rx) (u32 rx_data, struct mpc83xx_spi *); | ||
80 | u32(*get_tx) (struct mpc83xx_spi *); | ||
81 | |||
82 | unsigned int count; | ||
83 | u32 irq; | ||
84 | |||
85 | unsigned nsecs; /* (clock cycle time)/2 */ | ||
86 | |||
87 | u32 sysclk; | ||
88 | void (*activate_cs) (u8 cs, u8 polarity); | ||
89 | void (*deactivate_cs) (u8 cs, u8 polarity); | ||
90 | }; | ||
91 | |||
92 | static inline void mpc83xx_spi_write_reg(__be32 __iomem * reg, u32 val) | ||
93 | { | ||
94 | out_be32(reg, val); | ||
95 | } | ||
96 | |||
97 | static inline u32 mpc83xx_spi_read_reg(__be32 __iomem * reg) | ||
98 | { | ||
99 | return in_be32(reg); | ||
100 | } | ||
101 | |||
102 | #define MPC83XX_SPI_RX_BUF(type) \ | ||
103 | void mpc83xx_spi_rx_buf_##type(u32 data, struct mpc83xx_spi *mpc83xx_spi) \ | ||
104 | { \ | ||
105 | type * rx = mpc83xx_spi->rx; \ | ||
106 | *rx++ = (type)data; \ | ||
107 | mpc83xx_spi->rx = rx; \ | ||
108 | } | ||
109 | |||
110 | #define MPC83XX_SPI_TX_BUF(type) \ | ||
111 | u32 mpc83xx_spi_tx_buf_##type(struct mpc83xx_spi *mpc83xx_spi) \ | ||
112 | { \ | ||
113 | u32 data; \ | ||
114 | const type * tx = mpc83xx_spi->tx; \ | ||
115 | data = *tx++; \ | ||
116 | mpc83xx_spi->tx = tx; \ | ||
117 | return data; \ | ||
118 | } | ||
119 | |||
120 | MPC83XX_SPI_RX_BUF(u8) | ||
121 | MPC83XX_SPI_RX_BUF(u16) | ||
122 | MPC83XX_SPI_RX_BUF(u32) | ||
123 | MPC83XX_SPI_TX_BUF(u8) | ||
124 | MPC83XX_SPI_TX_BUF(u16) | ||
125 | MPC83XX_SPI_TX_BUF(u32) | ||
126 | |||
127 | static void mpc83xx_spi_chipselect(struct spi_device *spi, int value) | ||
128 | { | ||
129 | struct mpc83xx_spi *mpc83xx_spi; | ||
130 | u8 pol = spi->mode & SPI_CS_HIGH ? 1 : 0; | ||
131 | |||
132 | mpc83xx_spi = spi_master_get_devdata(spi->master); | ||
133 | |||
134 | if (value == BITBANG_CS_INACTIVE) { | ||
135 | if (mpc83xx_spi->deactivate_cs) | ||
136 | mpc83xx_spi->deactivate_cs(spi->chip_select, pol); | ||
137 | } | ||
138 | |||
139 | if (value == BITBANG_CS_ACTIVE) { | ||
140 | u32 regval = mpc83xx_spi_read_reg(&mpc83xx_spi->base->mode); | ||
141 | u32 len = spi->bits_per_word; | ||
142 | if (len == 32) | ||
143 | len = 0; | ||
144 | else | ||
145 | len = len - 1; | ||
146 | |||
147 | /* mask out bits we are going to set */ | ||
148 | regval &= ~0x38ff0000; | ||
149 | |||
150 | if (spi->mode & SPI_CPHA) | ||
151 | regval |= SPMODE_CP_BEGIN_EDGECLK; | ||
152 | if (spi->mode & SPI_CPOL) | ||
153 | regval |= SPMODE_CI_INACTIVEHIGH; | ||
154 | |||
155 | regval |= SPMODE_LEN(len); | ||
156 | |||
157 | if ((mpc83xx_spi->sysclk / spi->max_speed_hz) >= 64) { | ||
158 | u8 pm = mpc83xx_spi->sysclk / (spi->max_speed_hz * 64); | ||
159 | regval |= SPMODE_PM(pm) | SPMODE_DIV16; | ||
160 | } else { | ||
161 | u8 pm = mpc83xx_spi->sysclk / (spi->max_speed_hz * 4); | ||
162 | regval |= SPMODE_PM(pm); | ||
163 | } | ||
164 | |||
165 | mpc83xx_spi_write_reg(&mpc83xx_spi->base->mode, regval); | ||
166 | if (mpc83xx_spi->activate_cs) | ||
167 | mpc83xx_spi->activate_cs(spi->chip_select, pol); | ||
168 | } | ||
169 | } | ||
170 | |||
171 | static | ||
172 | int mpc83xx_spi_setup_transfer(struct spi_device *spi, struct spi_transfer *t) | ||
173 | { | ||
174 | struct mpc83xx_spi *mpc83xx_spi; | ||
175 | u32 regval; | ||
176 | u8 bits_per_word; | ||
177 | u32 hz; | ||
178 | |||
179 | mpc83xx_spi = spi_master_get_devdata(spi->master); | ||
180 | |||
181 | if (t) { | ||
182 | bits_per_word = t->bits_per_word; | ||
183 | hz = t->speed_hz; | ||
184 | } else { | ||
185 | bits_per_word = 0; | ||
186 | hz = 0; | ||
187 | } | ||
188 | |||
189 | /* spi_transfer level calls that work per-word */ | ||
190 | if (!bits_per_word) | ||
191 | bits_per_word = spi->bits_per_word; | ||
192 | |||
193 | /* Make sure its a bit width we support [4..16, 32] */ | ||
194 | if ((bits_per_word < 4) | ||
195 | || ((bits_per_word > 16) && (bits_per_word != 32))) | ||
196 | return -EINVAL; | ||
197 | |||
198 | if (bits_per_word <= 8) { | ||
199 | mpc83xx_spi->get_rx = mpc83xx_spi_rx_buf_u8; | ||
200 | mpc83xx_spi->get_tx = mpc83xx_spi_tx_buf_u8; | ||
201 | } else if (bits_per_word <= 16) { | ||
202 | mpc83xx_spi->get_rx = mpc83xx_spi_rx_buf_u16; | ||
203 | mpc83xx_spi->get_tx = mpc83xx_spi_tx_buf_u16; | ||
204 | } else if (bits_per_word <= 32) { | ||
205 | mpc83xx_spi->get_rx = mpc83xx_spi_rx_buf_u32; | ||
206 | mpc83xx_spi->get_tx = mpc83xx_spi_tx_buf_u32; | ||
207 | } else | ||
208 | return -EINVAL; | ||
209 | |||
210 | /* nsecs = (clock period)/2 */ | ||
211 | if (!hz) | ||
212 | hz = spi->max_speed_hz; | ||
213 | mpc83xx_spi->nsecs = (1000000000 / 2) / hz; | ||
214 | if (mpc83xx_spi->nsecs > MAX_UDELAY_MS * 1000) | ||
215 | return -EINVAL; | ||
216 | |||
217 | if (bits_per_word == 32) | ||
218 | bits_per_word = 0; | ||
219 | else | ||
220 | bits_per_word = bits_per_word - 1; | ||
221 | |||
222 | regval = mpc83xx_spi_read_reg(&mpc83xx_spi->base->mode); | ||
223 | |||
224 | /* Mask out bits_per_wordgth */ | ||
225 | regval &= 0xff0fffff; | ||
226 | regval |= SPMODE_LEN(bits_per_word); | ||
227 | |||
228 | mpc83xx_spi_write_reg(&mpc83xx_spi->base->mode, regval); | ||
229 | |||
230 | return 0; | ||
231 | } | ||
232 | |||
233 | static int mpc83xx_spi_setup(struct spi_device *spi) | ||
234 | { | ||
235 | struct spi_bitbang *bitbang; | ||
236 | struct mpc83xx_spi *mpc83xx_spi; | ||
237 | int retval; | ||
238 | |||
239 | if (!spi->max_speed_hz) | ||
240 | return -EINVAL; | ||
241 | |||
242 | bitbang = spi_master_get_devdata(spi->master); | ||
243 | mpc83xx_spi = spi_master_get_devdata(spi->master); | ||
244 | |||
245 | if (!spi->bits_per_word) | ||
246 | spi->bits_per_word = 8; | ||
247 | |||
248 | retval = mpc83xx_spi_setup_transfer(spi, NULL); | ||
249 | if (retval < 0) | ||
250 | return retval; | ||
251 | |||
252 | dev_dbg(&spi->dev, "%s, mode %d, %u bits/w, %u nsec\n", | ||
253 | __FUNCTION__, spi->mode & (SPI_CPOL | SPI_CPHA), | ||
254 | spi->bits_per_word, 2 * mpc83xx_spi->nsecs); | ||
255 | |||
256 | /* NOTE we _need_ to call chipselect() early, ideally with adapter | ||
257 | * setup, unless the hardware defaults cooperate to avoid confusion | ||
258 | * between normal (active low) and inverted chipselects. | ||
259 | */ | ||
260 | |||
261 | /* deselect chip (low or high) */ | ||
262 | spin_lock(&bitbang->lock); | ||
263 | if (!bitbang->busy) { | ||
264 | bitbang->chipselect(spi, BITBANG_CS_INACTIVE); | ||
265 | ndelay(mpc83xx_spi->nsecs); | ||
266 | } | ||
267 | spin_unlock(&bitbang->lock); | ||
268 | |||
269 | return 0; | ||
270 | } | ||
271 | |||
272 | static int mpc83xx_spi_bufs(struct spi_device *spi, struct spi_transfer *t) | ||
273 | { | ||
274 | struct mpc83xx_spi *mpc83xx_spi; | ||
275 | u32 word; | ||
276 | |||
277 | mpc83xx_spi = spi_master_get_devdata(spi->master); | ||
278 | |||
279 | mpc83xx_spi->tx = t->tx_buf; | ||
280 | mpc83xx_spi->rx = t->rx_buf; | ||
281 | mpc83xx_spi->count = t->len; | ||
282 | INIT_COMPLETION(mpc83xx_spi->done); | ||
283 | |||
284 | /* enable rx ints */ | ||
285 | mpc83xx_spi_write_reg(&mpc83xx_spi->base->mask, SPIM_NE); | ||
286 | |||
287 | /* transmit word */ | ||
288 | word = mpc83xx_spi->get_tx(mpc83xx_spi); | ||
289 | mpc83xx_spi_write_reg(&mpc83xx_spi->base->transmit, word); | ||
290 | |||
291 | wait_for_completion(&mpc83xx_spi->done); | ||
292 | |||
293 | /* disable rx ints */ | ||
294 | mpc83xx_spi_write_reg(&mpc83xx_spi->base->mask, 0); | ||
295 | |||
296 | return t->len - mpc83xx_spi->count; | ||
297 | } | ||
298 | |||
299 | irqreturn_t mpc83xx_spi_irq(s32 irq, void *context_data, | ||
300 | struct pt_regs * ptregs) | ||
301 | { | ||
302 | struct mpc83xx_spi *mpc83xx_spi = context_data; | ||
303 | u32 event; | ||
304 | irqreturn_t ret = IRQ_NONE; | ||
305 | |||
306 | /* Get interrupt events(tx/rx) */ | ||
307 | event = mpc83xx_spi_read_reg(&mpc83xx_spi->base->event); | ||
308 | |||
309 | /* We need handle RX first */ | ||
310 | if (event & SPIE_NE) { | ||
311 | u32 rx_data = mpc83xx_spi_read_reg(&mpc83xx_spi->base->receive); | ||
312 | |||
313 | if (mpc83xx_spi->rx) | ||
314 | mpc83xx_spi->get_rx(rx_data, mpc83xx_spi); | ||
315 | |||
316 | ret = IRQ_HANDLED; | ||
317 | } | ||
318 | |||
319 | if ((event & SPIE_NF) == 0) | ||
320 | /* spin until TX is done */ | ||
321 | while (((event = | ||
322 | mpc83xx_spi_read_reg(&mpc83xx_spi->base->event)) & | ||
323 | SPIE_NF) == 0) | ||
324 | cpu_relax(); | ||
325 | |||
326 | mpc83xx_spi->count -= 1; | ||
327 | if (mpc83xx_spi->count) { | ||
328 | if (mpc83xx_spi->tx) { | ||
329 | u32 word = mpc83xx_spi->get_tx(mpc83xx_spi); | ||
330 | mpc83xx_spi_write_reg(&mpc83xx_spi->base->transmit, | ||
331 | word); | ||
332 | } | ||
333 | } else { | ||
334 | complete(&mpc83xx_spi->done); | ||
335 | } | ||
336 | |||
337 | /* Clear the events */ | ||
338 | mpc83xx_spi_write_reg(&mpc83xx_spi->base->event, event); | ||
339 | |||
340 | return ret; | ||
341 | } | ||
342 | |||
343 | static int __init mpc83xx_spi_probe(struct platform_device *dev) | ||
344 | { | ||
345 | struct spi_master *master; | ||
346 | struct mpc83xx_spi *mpc83xx_spi; | ||
347 | struct fsl_spi_platform_data *pdata; | ||
348 | struct resource *r; | ||
349 | u32 regval; | ||
350 | int ret = 0; | ||
351 | |||
352 | /* Get resources(memory, IRQ) associated with the device */ | ||
353 | master = spi_alloc_master(&dev->dev, sizeof(struct mpc83xx_spi)); | ||
354 | |||
355 | if (master == NULL) { | ||
356 | ret = -ENOMEM; | ||
357 | goto err; | ||
358 | } | ||
359 | |||
360 | platform_set_drvdata(dev, master); | ||
361 | pdata = dev->dev.platform_data; | ||
362 | |||
363 | if (pdata == NULL) { | ||
364 | ret = -ENODEV; | ||
365 | goto free_master; | ||
366 | } | ||
367 | |||
368 | r = platform_get_resource(dev, IORESOURCE_MEM, 0); | ||
369 | if (r == NULL) { | ||
370 | ret = -ENODEV; | ||
371 | goto free_master; | ||
372 | } | ||
373 | |||
374 | mpc83xx_spi = spi_master_get_devdata(master); | ||
375 | mpc83xx_spi->bitbang.master = spi_master_get(master); | ||
376 | mpc83xx_spi->bitbang.chipselect = mpc83xx_spi_chipselect; | ||
377 | mpc83xx_spi->bitbang.setup_transfer = mpc83xx_spi_setup_transfer; | ||
378 | mpc83xx_spi->bitbang.txrx_bufs = mpc83xx_spi_bufs; | ||
379 | mpc83xx_spi->sysclk = pdata->sysclk; | ||
380 | mpc83xx_spi->activate_cs = pdata->activate_cs; | ||
381 | mpc83xx_spi->deactivate_cs = pdata->deactivate_cs; | ||
382 | mpc83xx_spi->get_rx = mpc83xx_spi_rx_buf_u8; | ||
383 | mpc83xx_spi->get_tx = mpc83xx_spi_tx_buf_u8; | ||
384 | |||
385 | mpc83xx_spi->bitbang.master->setup = mpc83xx_spi_setup; | ||
386 | init_completion(&mpc83xx_spi->done); | ||
387 | |||
388 | mpc83xx_spi->base = ioremap(r->start, r->end - r->start + 1); | ||
389 | if (mpc83xx_spi->base == NULL) { | ||
390 | ret = -ENOMEM; | ||
391 | goto put_master; | ||
392 | } | ||
393 | |||
394 | mpc83xx_spi->irq = platform_get_irq(dev, 0); | ||
395 | |||
396 | if (mpc83xx_spi->irq < 0) { | ||
397 | ret = -ENXIO; | ||
398 | goto unmap_io; | ||
399 | } | ||
400 | |||
401 | /* Register for SPI Interrupt */ | ||
402 | ret = request_irq(mpc83xx_spi->irq, mpc83xx_spi_irq, | ||
403 | 0, "mpc83xx_spi", mpc83xx_spi); | ||
404 | |||
405 | if (ret != 0) | ||
406 | goto unmap_io; | ||
407 | |||
408 | master->bus_num = pdata->bus_num; | ||
409 | master->num_chipselect = pdata->max_chipselect; | ||
410 | |||
411 | /* SPI controller initializations */ | ||
412 | mpc83xx_spi_write_reg(&mpc83xx_spi->base->mode, 0); | ||
413 | mpc83xx_spi_write_reg(&mpc83xx_spi->base->mask, 0); | ||
414 | mpc83xx_spi_write_reg(&mpc83xx_spi->base->command, 0); | ||
415 | mpc83xx_spi_write_reg(&mpc83xx_spi->base->event, 0xffffffff); | ||
416 | |||
417 | /* Enable SPI interface */ | ||
418 | regval = pdata->initial_spmode | SPMODE_INIT_VAL | SPMODE_ENABLE; | ||
419 | mpc83xx_spi_write_reg(&mpc83xx_spi->base->mode, regval); | ||
420 | |||
421 | ret = spi_bitbang_start(&mpc83xx_spi->bitbang); | ||
422 | |||
423 | if (ret != 0) | ||
424 | goto free_irq; | ||
425 | |||
426 | printk(KERN_INFO | ||
427 | "%s: MPC83xx SPI Controller driver at 0x%p (irq = %d)\n", | ||
428 | dev->dev.bus_id, mpc83xx_spi->base, mpc83xx_spi->irq); | ||
429 | |||
430 | return ret; | ||
431 | |||
432 | free_irq: | ||
433 | free_irq(mpc83xx_spi->irq, mpc83xx_spi); | ||
434 | unmap_io: | ||
435 | iounmap(mpc83xx_spi->base); | ||
436 | put_master: | ||
437 | spi_master_put(master); | ||
438 | free_master: | ||
439 | kfree(master); | ||
440 | err: | ||
441 | return ret; | ||
442 | } | ||
443 | |||
444 | static int __devexit mpc83xx_spi_remove(struct platform_device *dev) | ||
445 | { | ||
446 | struct mpc83xx_spi *mpc83xx_spi; | ||
447 | struct spi_master *master; | ||
448 | |||
449 | master = platform_get_drvdata(dev); | ||
450 | mpc83xx_spi = spi_master_get_devdata(master); | ||
451 | |||
452 | spi_bitbang_stop(&mpc83xx_spi->bitbang); | ||
453 | free_irq(mpc83xx_spi->irq, mpc83xx_spi); | ||
454 | iounmap(mpc83xx_spi->base); | ||
455 | spi_master_put(mpc83xx_spi->bitbang.master); | ||
456 | |||
457 | return 0; | ||
458 | } | ||
459 | |||
460 | static struct platform_driver mpc83xx_spi_driver = { | ||
461 | .probe = mpc83xx_spi_probe, | ||
462 | .remove = __devexit_p(mpc83xx_spi_remove), | ||
463 | .driver = { | ||
464 | .name = "mpc83xx_spi", | ||
465 | }, | ||
466 | }; | ||
467 | |||
468 | static int __init mpc83xx_spi_init(void) | ||
469 | { | ||
470 | return platform_driver_register(&mpc83xx_spi_driver); | ||
471 | } | ||
472 | |||
473 | static void __exit mpc83xx_spi_exit(void) | ||
474 | { | ||
475 | platform_driver_unregister(&mpc83xx_spi_driver); | ||
476 | } | ||
477 | |||
478 | module_init(mpc83xx_spi_init); | ||
479 | module_exit(mpc83xx_spi_exit); | ||
480 | |||
481 | MODULE_AUTHOR("Kumar Gala"); | ||
482 | MODULE_DESCRIPTION("Simple MPC83xx SPI Driver"); | ||
483 | MODULE_LICENSE("GPL"); | ||
diff --git a/drivers/spi/spi_s3c24xx.c b/drivers/spi/spi_s3c24xx.c new file mode 100644 index 000000000000..9de4b5a04d70 --- /dev/null +++ b/drivers/spi/spi_s3c24xx.c | |||
@@ -0,0 +1,453 @@ | |||
1 | /* linux/drivers/spi/spi_s3c24xx.c | ||
2 | * | ||
3 | * Copyright (c) 2006 Ben Dooks | ||
4 | * Copyright (c) 2006 Simtec Electronics | ||
5 | * Ben Dooks <ben@simtec.co.uk> | ||
6 | * | ||
7 | * This program is free software; you can redistribute it and/or modify | ||
8 | * it under the terms of the GNU General Public License version 2 as | ||
9 | * published by the Free Software Foundation. | ||
10 | * | ||
11 | */ | ||
12 | |||
13 | |||
14 | //#define DEBUG | ||
15 | |||
16 | #include <linux/config.h> | ||
17 | #include <linux/init.h> | ||
18 | #include <linux/spinlock.h> | ||
19 | #include <linux/workqueue.h> | ||
20 | #include <linux/interrupt.h> | ||
21 | #include <linux/delay.h> | ||
22 | #include <linux/errno.h> | ||
23 | #include <linux/err.h> | ||
24 | #include <linux/clk.h> | ||
25 | #include <linux/platform_device.h> | ||
26 | |||
27 | #include <linux/spi/spi.h> | ||
28 | #include <linux/spi/spi_bitbang.h> | ||
29 | |||
30 | #include <asm/io.h> | ||
31 | #include <asm/dma.h> | ||
32 | #include <asm/hardware.h> | ||
33 | |||
34 | #include <asm/arch/regs-gpio.h> | ||
35 | #include <asm/arch/regs-spi.h> | ||
36 | #include <asm/arch/spi.h> | ||
37 | |||
38 | struct s3c24xx_spi { | ||
39 | /* bitbang has to be first */ | ||
40 | struct spi_bitbang bitbang; | ||
41 | struct completion done; | ||
42 | |||
43 | void __iomem *regs; | ||
44 | int irq; | ||
45 | int len; | ||
46 | int count; | ||
47 | |||
48 | /* data buffers */ | ||
49 | const unsigned char *tx; | ||
50 | unsigned char *rx; | ||
51 | |||
52 | struct clk *clk; | ||
53 | struct resource *ioarea; | ||
54 | struct spi_master *master; | ||
55 | struct spi_device *curdev; | ||
56 | struct device *dev; | ||
57 | struct s3c2410_spi_info *pdata; | ||
58 | }; | ||
59 | |||
60 | #define SPCON_DEFAULT (S3C2410_SPCON_MSTR | S3C2410_SPCON_SMOD_INT) | ||
61 | #define SPPIN_DEFAULT (S3C2410_SPPIN_KEEP) | ||
62 | |||
63 | static inline struct s3c24xx_spi *to_hw(struct spi_device *sdev) | ||
64 | { | ||
65 | return spi_master_get_devdata(sdev->master); | ||
66 | } | ||
67 | |||
68 | static void s3c24xx_spi_chipsel(struct spi_device *spi, int value) | ||
69 | { | ||
70 | struct s3c24xx_spi *hw = to_hw(spi); | ||
71 | unsigned int cspol = spi->mode & SPI_CS_HIGH ? 1 : 0; | ||
72 | unsigned int spcon; | ||
73 | |||
74 | switch (value) { | ||
75 | case BITBANG_CS_INACTIVE: | ||
76 | if (hw->pdata->set_cs) | ||
77 | hw->pdata->set_cs(hw->pdata, value, cspol); | ||
78 | else | ||
79 | s3c2410_gpio_setpin(hw->pdata->pin_cs, cspol ^ 1); | ||
80 | break; | ||
81 | |||
82 | case BITBANG_CS_ACTIVE: | ||
83 | spcon = readb(hw->regs + S3C2410_SPCON); | ||
84 | |||
85 | if (spi->mode & SPI_CPHA) | ||
86 | spcon |= S3C2410_SPCON_CPHA_FMTB; | ||
87 | else | ||
88 | spcon &= ~S3C2410_SPCON_CPHA_FMTB; | ||
89 | |||
90 | if (spi->mode & SPI_CPOL) | ||
91 | spcon |= S3C2410_SPCON_CPOL_HIGH; | ||
92 | else | ||
93 | spcon &= ~S3C2410_SPCON_CPOL_HIGH; | ||
94 | |||
95 | spcon |= S3C2410_SPCON_ENSCK; | ||
96 | |||
97 | /* write new configration */ | ||
98 | |||
99 | writeb(spcon, hw->regs + S3C2410_SPCON); | ||
100 | |||
101 | if (hw->pdata->set_cs) | ||
102 | hw->pdata->set_cs(hw->pdata, value, cspol); | ||
103 | else | ||
104 | s3c2410_gpio_setpin(hw->pdata->pin_cs, cspol); | ||
105 | |||
106 | break; | ||
107 | |||
108 | } | ||
109 | } | ||
110 | |||
111 | static int s3c24xx_spi_setupxfer(struct spi_device *spi, | ||
112 | struct spi_transfer *t) | ||
113 | { | ||
114 | struct s3c24xx_spi *hw = to_hw(spi); | ||
115 | unsigned int bpw; | ||
116 | unsigned int hz; | ||
117 | unsigned int div; | ||
118 | |||
119 | bpw = t ? t->bits_per_word : spi->bits_per_word; | ||
120 | hz = t ? t->speed_hz : spi->max_speed_hz; | ||
121 | |||
122 | if (bpw != 8) { | ||
123 | dev_err(&spi->dev, "invalid bits-per-word (%d)\n", bpw); | ||
124 | return -EINVAL; | ||
125 | } | ||
126 | |||
127 | div = clk_get_rate(hw->clk) / hz; | ||
128 | |||
129 | /* is clk = pclk / (2 * (pre+1)), or is it | ||
130 | * clk = (pclk * 2) / ( pre + 1) */ | ||
131 | |||
132 | div = (div / 2) - 1; | ||
133 | |||
134 | if (div < 0) | ||
135 | div = 1; | ||
136 | |||
137 | if (div > 255) | ||
138 | div = 255; | ||
139 | |||
140 | dev_dbg(&spi->dev, "setting pre-scaler to %d (hz %d)\n", div, hz); | ||
141 | writeb(div, hw->regs + S3C2410_SPPRE); | ||
142 | |||
143 | spin_lock(&hw->bitbang.lock); | ||
144 | if (!hw->bitbang.busy) { | ||
145 | hw->bitbang.chipselect(spi, BITBANG_CS_INACTIVE); | ||
146 | /* need to ndelay for 0.5 clocktick ? */ | ||
147 | } | ||
148 | spin_unlock(&hw->bitbang.lock); | ||
149 | |||
150 | return 0; | ||
151 | } | ||
152 | |||
153 | static int s3c24xx_spi_setup(struct spi_device *spi) | ||
154 | { | ||
155 | int ret; | ||
156 | |||
157 | if (!spi->bits_per_word) | ||
158 | spi->bits_per_word = 8; | ||
159 | |||
160 | if ((spi->mode & SPI_LSB_FIRST) != 0) | ||
161 | return -EINVAL; | ||
162 | |||
163 | ret = s3c24xx_spi_setupxfer(spi, NULL); | ||
164 | if (ret < 0) { | ||
165 | dev_err(&spi->dev, "setupxfer returned %d\n", ret); | ||
166 | return ret; | ||
167 | } | ||
168 | |||
169 | dev_dbg(&spi->dev, "%s: mode %d, %u bpw, %d hz\n", | ||
170 | __FUNCTION__, spi->mode, spi->bits_per_word, | ||
171 | spi->max_speed_hz); | ||
172 | |||
173 | return 0; | ||
174 | } | ||
175 | |||
176 | static inline unsigned int hw_txbyte(struct s3c24xx_spi *hw, int count) | ||
177 | { | ||
178 | return hw->tx ? hw->tx[count] : 0xff; | ||
179 | } | ||
180 | |||
181 | static int s3c24xx_spi_txrx(struct spi_device *spi, struct spi_transfer *t) | ||
182 | { | ||
183 | struct s3c24xx_spi *hw = to_hw(spi); | ||
184 | |||
185 | dev_dbg(&spi->dev, "txrx: tx %p, rx %p, len %d\n", | ||
186 | t->tx_buf, t->rx_buf, t->len); | ||
187 | |||
188 | hw->tx = t->tx_buf; | ||
189 | hw->rx = t->rx_buf; | ||
190 | hw->len = t->len; | ||
191 | hw->count = 0; | ||
192 | |||
193 | /* send the first byte */ | ||
194 | writeb(hw_txbyte(hw, 0), hw->regs + S3C2410_SPTDAT); | ||
195 | wait_for_completion(&hw->done); | ||
196 | |||
197 | return hw->count; | ||
198 | } | ||
199 | |||
200 | static irqreturn_t s3c24xx_spi_irq(int irq, void *dev, struct pt_regs *regs) | ||
201 | { | ||
202 | struct s3c24xx_spi *hw = dev; | ||
203 | unsigned int spsta = readb(hw->regs + S3C2410_SPSTA); | ||
204 | unsigned int count = hw->count; | ||
205 | |||
206 | if (spsta & S3C2410_SPSTA_DCOL) { | ||
207 | dev_dbg(hw->dev, "data-collision\n"); | ||
208 | complete(&hw->done); | ||
209 | goto irq_done; | ||
210 | } | ||
211 | |||
212 | if (!(spsta & S3C2410_SPSTA_READY)) { | ||
213 | dev_dbg(hw->dev, "spi not ready for tx?\n"); | ||
214 | complete(&hw->done); | ||
215 | goto irq_done; | ||
216 | } | ||
217 | |||
218 | hw->count++; | ||
219 | |||
220 | if (hw->rx) | ||
221 | hw->rx[count] = readb(hw->regs + S3C2410_SPRDAT); | ||
222 | |||
223 | count++; | ||
224 | |||
225 | if (count < hw->len) | ||
226 | writeb(hw_txbyte(hw, count), hw->regs + S3C2410_SPTDAT); | ||
227 | else | ||
228 | complete(&hw->done); | ||
229 | |||
230 | irq_done: | ||
231 | return IRQ_HANDLED; | ||
232 | } | ||
233 | |||
234 | static int s3c24xx_spi_probe(struct platform_device *pdev) | ||
235 | { | ||
236 | struct s3c24xx_spi *hw; | ||
237 | struct spi_master *master; | ||
238 | struct spi_board_info *bi; | ||
239 | struct resource *res; | ||
240 | int err = 0; | ||
241 | int i; | ||
242 | |||
243 | master = spi_alloc_master(&pdev->dev, sizeof(struct s3c24xx_spi)); | ||
244 | if (master == NULL) { | ||
245 | dev_err(&pdev->dev, "No memory for spi_master\n"); | ||
246 | err = -ENOMEM; | ||
247 | goto err_nomem; | ||
248 | } | ||
249 | |||
250 | hw = spi_master_get_devdata(master); | ||
251 | memset(hw, 0, sizeof(struct s3c24xx_spi)); | ||
252 | |||
253 | hw->master = spi_master_get(master); | ||
254 | hw->pdata = pdev->dev.platform_data; | ||
255 | hw->dev = &pdev->dev; | ||
256 | |||
257 | if (hw->pdata == NULL) { | ||
258 | dev_err(&pdev->dev, "No platform data supplied\n"); | ||
259 | err = -ENOENT; | ||
260 | goto err_no_pdata; | ||
261 | } | ||
262 | |||
263 | platform_set_drvdata(pdev, hw); | ||
264 | init_completion(&hw->done); | ||
265 | |||
266 | /* setup the state for the bitbang driver */ | ||
267 | |||
268 | hw->bitbang.master = hw->master; | ||
269 | hw->bitbang.setup_transfer = s3c24xx_spi_setupxfer; | ||
270 | hw->bitbang.chipselect = s3c24xx_spi_chipsel; | ||
271 | hw->bitbang.txrx_bufs = s3c24xx_spi_txrx; | ||
272 | hw->bitbang.master->setup = s3c24xx_spi_setup; | ||
273 | |||
274 | dev_dbg(hw->dev, "bitbang at %p\n", &hw->bitbang); | ||
275 | |||
276 | /* find and map our resources */ | ||
277 | |||
278 | res = platform_get_resource(pdev, IORESOURCE_MEM, 0); | ||
279 | if (res == NULL) { | ||
280 | dev_err(&pdev->dev, "Cannot get IORESOURCE_MEM\n"); | ||
281 | err = -ENOENT; | ||
282 | goto err_no_iores; | ||
283 | } | ||
284 | |||
285 | hw->ioarea = request_mem_region(res->start, (res->end - res->start)+1, | ||
286 | pdev->name); | ||
287 | |||
288 | if (hw->ioarea == NULL) { | ||
289 | dev_err(&pdev->dev, "Cannot reserve region\n"); | ||
290 | err = -ENXIO; | ||
291 | goto err_no_iores; | ||
292 | } | ||
293 | |||
294 | hw->regs = ioremap(res->start, (res->end - res->start)+1); | ||
295 | if (hw->regs == NULL) { | ||
296 | dev_err(&pdev->dev, "Cannot map IO\n"); | ||
297 | err = -ENXIO; | ||
298 | goto err_no_iomap; | ||
299 | } | ||
300 | |||
301 | hw->irq = platform_get_irq(pdev, 0); | ||
302 | if (hw->irq < 0) { | ||
303 | dev_err(&pdev->dev, "No IRQ specified\n"); | ||
304 | err = -ENOENT; | ||
305 | goto err_no_irq; | ||
306 | } | ||
307 | |||
308 | err = request_irq(hw->irq, s3c24xx_spi_irq, 0, pdev->name, hw); | ||
309 | if (err) { | ||
310 | dev_err(&pdev->dev, "Cannot claim IRQ\n"); | ||
311 | goto err_no_irq; | ||
312 | } | ||
313 | |||
314 | hw->clk = clk_get(&pdev->dev, "spi"); | ||
315 | if (IS_ERR(hw->clk)) { | ||
316 | dev_err(&pdev->dev, "No clock for device\n"); | ||
317 | err = PTR_ERR(hw->clk); | ||
318 | goto err_no_clk; | ||
319 | } | ||
320 | |||
321 | /* for the moment, permanently enable the clock */ | ||
322 | |||
323 | clk_enable(hw->clk); | ||
324 | |||
325 | /* program defaults into the registers */ | ||
326 | |||
327 | writeb(0xff, hw->regs + S3C2410_SPPRE); | ||
328 | writeb(SPPIN_DEFAULT, hw->regs + S3C2410_SPPIN); | ||
329 | writeb(SPCON_DEFAULT, hw->regs + S3C2410_SPCON); | ||
330 | |||
331 | /* setup any gpio we can */ | ||
332 | |||
333 | if (!hw->pdata->set_cs) { | ||
334 | s3c2410_gpio_setpin(hw->pdata->pin_cs, 1); | ||
335 | s3c2410_gpio_cfgpin(hw->pdata->pin_cs, S3C2410_GPIO_OUTPUT); | ||
336 | } | ||
337 | |||
338 | /* register our spi controller */ | ||
339 | |||
340 | err = spi_bitbang_start(&hw->bitbang); | ||
341 | if (err) { | ||
342 | dev_err(&pdev->dev, "Failed to register SPI master\n"); | ||
343 | goto err_register; | ||
344 | } | ||
345 | |||
346 | dev_dbg(hw->dev, "shutdown=%d\n", hw->bitbang.shutdown); | ||
347 | |||
348 | /* register all the devices associated */ | ||
349 | |||
350 | bi = &hw->pdata->board_info[0]; | ||
351 | for (i = 0; i < hw->pdata->board_size; i++, bi++) { | ||
352 | dev_info(hw->dev, "registering %s\n", bi->modalias); | ||
353 | |||
354 | bi->controller_data = hw; | ||
355 | spi_new_device(master, bi); | ||
356 | } | ||
357 | |||
358 | return 0; | ||
359 | |||
360 | err_register: | ||
361 | clk_disable(hw->clk); | ||
362 | clk_put(hw->clk); | ||
363 | |||
364 | err_no_clk: | ||
365 | free_irq(hw->irq, hw); | ||
366 | |||
367 | err_no_irq: | ||
368 | iounmap(hw->regs); | ||
369 | |||
370 | err_no_iomap: | ||
371 | release_resource(hw->ioarea); | ||
372 | kfree(hw->ioarea); | ||
373 | |||
374 | err_no_iores: | ||
375 | err_no_pdata: | ||
376 | spi_master_put(hw->master);; | ||
377 | |||
378 | err_nomem: | ||
379 | return err; | ||
380 | } | ||
381 | |||
382 | static int s3c24xx_spi_remove(struct platform_device *dev) | ||
383 | { | ||
384 | struct s3c24xx_spi *hw = platform_get_drvdata(dev); | ||
385 | |||
386 | platform_set_drvdata(dev, NULL); | ||
387 | |||
388 | spi_unregister_master(hw->master); | ||
389 | |||
390 | clk_disable(hw->clk); | ||
391 | clk_put(hw->clk); | ||
392 | |||
393 | free_irq(hw->irq, hw); | ||
394 | iounmap(hw->regs); | ||
395 | |||
396 | release_resource(hw->ioarea); | ||
397 | kfree(hw->ioarea); | ||
398 | |||
399 | spi_master_put(hw->master); | ||
400 | return 0; | ||
401 | } | ||
402 | |||
403 | |||
404 | #ifdef CONFIG_PM | ||
405 | |||
406 | static int s3c24xx_spi_suspend(struct platform_device *pdev, pm_message_t msg) | ||
407 | { | ||
408 | struct s3c24xx_spi *hw = platform_get_drvdata(dev); | ||
409 | |||
410 | clk_disable(hw->clk); | ||
411 | return 0; | ||
412 | } | ||
413 | |||
414 | static int s3c24xx_spi_resume(struct platform_device *pdev) | ||
415 | { | ||
416 | struct s3c24xx_spi *hw = platform_get_drvdata(dev); | ||
417 | |||
418 | clk_enable(hw->clk); | ||
419 | return 0; | ||
420 | } | ||
421 | |||
422 | #else | ||
423 | #define s3c24xx_spi_suspend NULL | ||
424 | #define s3c24xx_spi_resume NULL | ||
425 | #endif | ||
426 | |||
427 | static struct platform_driver s3c24xx_spidrv = { | ||
428 | .probe = s3c24xx_spi_probe, | ||
429 | .remove = s3c24xx_spi_remove, | ||
430 | .suspend = s3c24xx_spi_suspend, | ||
431 | .resume = s3c24xx_spi_resume, | ||
432 | .driver = { | ||
433 | .name = "s3c2410-spi", | ||
434 | .owner = THIS_MODULE, | ||
435 | }, | ||
436 | }; | ||
437 | |||
438 | static int __init s3c24xx_spi_init(void) | ||
439 | { | ||
440 | return platform_driver_register(&s3c24xx_spidrv); | ||
441 | } | ||
442 | |||
443 | static void __exit s3c24xx_spi_exit(void) | ||
444 | { | ||
445 | platform_driver_unregister(&s3c24xx_spidrv); | ||
446 | } | ||
447 | |||
448 | module_init(s3c24xx_spi_init); | ||
449 | module_exit(s3c24xx_spi_exit); | ||
450 | |||
451 | MODULE_DESCRIPTION("S3C24XX SPI Driver"); | ||
452 | MODULE_AUTHOR("Ben Dooks, <ben@simtec.co.uk>"); | ||
453 | MODULE_LICENSE("GPL"); | ||
diff --git a/drivers/spi/spi_s3c24xx_gpio.c b/drivers/spi/spi_s3c24xx_gpio.c new file mode 100644 index 000000000000..aacdceb8f44b --- /dev/null +++ b/drivers/spi/spi_s3c24xx_gpio.c | |||
@@ -0,0 +1,188 @@ | |||
1 | /* linux/drivers/spi/spi_s3c24xx_gpio.c | ||
2 | * | ||
3 | * Copyright (c) 2006 Ben Dooks | ||
4 | * Copyright (c) 2006 Simtec Electronics | ||
5 | * | ||
6 | * S3C24XX GPIO based SPI driver | ||
7 | * | ||
8 | * This program is free software; you can redistribute it and/or modify | ||
9 | * it under the terms of the GNU General Public License version 2 as | ||
10 | * published by the Free Software Foundation. | ||
11 | * | ||
12 | */ | ||
13 | |||
14 | #include <linux/config.h> | ||
15 | #include <linux/kernel.h> | ||
16 | #include <linux/init.h> | ||
17 | #include <linux/delay.h> | ||
18 | #include <linux/spinlock.h> | ||
19 | #include <linux/platform_device.h> | ||
20 | |||
21 | #include <linux/spi/spi.h> | ||
22 | #include <linux/spi/spi_bitbang.h> | ||
23 | |||
24 | #include <asm/arch/regs-gpio.h> | ||
25 | #include <asm/arch/spi-gpio.h> | ||
26 | #include <asm/arch/hardware.h> | ||
27 | |||
28 | struct s3c2410_spigpio { | ||
29 | struct spi_bitbang bitbang; | ||
30 | |||
31 | struct s3c2410_spigpio_info *info; | ||
32 | struct platform_device *dev; | ||
33 | }; | ||
34 | |||
35 | static inline struct s3c2410_spigpio *spidev_to_sg(struct spi_device *spi) | ||
36 | { | ||
37 | return spi->controller_data; | ||
38 | } | ||
39 | |||
40 | static inline void setsck(struct spi_device *dev, int on) | ||
41 | { | ||
42 | struct s3c2410_spigpio *sg = spidev_to_sg(dev); | ||
43 | s3c2410_gpio_setpin(sg->info->pin_clk, on ? 1 : 0); | ||
44 | } | ||
45 | |||
46 | static inline void setmosi(struct spi_device *dev, int on) | ||
47 | { | ||
48 | struct s3c2410_spigpio *sg = spidev_to_sg(dev); | ||
49 | s3c2410_gpio_setpin(sg->info->pin_mosi, on ? 1 : 0); | ||
50 | } | ||
51 | |||
52 | static inline u32 getmiso(struct spi_device *dev) | ||
53 | { | ||
54 | struct s3c2410_spigpio *sg = spidev_to_sg(dev); | ||
55 | return s3c2410_gpio_getpin(sg->info->pin_miso) ? 1 : 0; | ||
56 | } | ||
57 | |||
58 | #define spidelay(x) ndelay(x) | ||
59 | |||
60 | #define EXPAND_BITBANG_TXRX | ||
61 | #include <linux/spi/spi_bitbang.h> | ||
62 | |||
63 | |||
64 | static u32 s3c2410_spigpio_txrx_mode0(struct spi_device *spi, | ||
65 | unsigned nsecs, u32 word, u8 bits) | ||
66 | { | ||
67 | return bitbang_txrx_be_cpha0(spi, nsecs, 0, word, bits); | ||
68 | } | ||
69 | |||
70 | static u32 s3c2410_spigpio_txrx_mode1(struct spi_device *spi, | ||
71 | unsigned nsecs, u32 word, u8 bits) | ||
72 | { | ||
73 | return bitbang_txrx_be_cpha1(spi, nsecs, 0, word, bits); | ||
74 | } | ||
75 | |||
76 | static void s3c2410_spigpio_chipselect(struct spi_device *dev, int value) | ||
77 | { | ||
78 | struct s3c2410_spigpio *sg = spidev_to_sg(dev); | ||
79 | |||
80 | if (sg->info && sg->info->chip_select) | ||
81 | (sg->info->chip_select)(sg->info, value); | ||
82 | } | ||
83 | |||
84 | static int s3c2410_spigpio_probe(struct platform_device *dev) | ||
85 | { | ||
86 | struct spi_master *master; | ||
87 | struct s3c2410_spigpio *sp; | ||
88 | int ret; | ||
89 | int i; | ||
90 | |||
91 | master = spi_alloc_master(&dev->dev, sizeof(struct s3c2410_spigpio)); | ||
92 | if (master == NULL) { | ||
93 | dev_err(&dev->dev, "failed to allocate spi master\n"); | ||
94 | ret = -ENOMEM; | ||
95 | goto err; | ||
96 | } | ||
97 | |||
98 | sp = spi_master_get_devdata(master); | ||
99 | |||
100 | platform_set_drvdata(dev, sp); | ||
101 | |||
102 | /* copy in the plkatform data */ | ||
103 | sp->info = dev->dev.platform_data; | ||
104 | |||
105 | /* setup spi bitbang adaptor */ | ||
106 | sp->bitbang.master = spi_master_get(master); | ||
107 | sp->bitbang.chipselect = s3c2410_spigpio_chipselect; | ||
108 | |||
109 | sp->bitbang.txrx_word[SPI_MODE_0] = s3c2410_spigpio_txrx_mode0; | ||
110 | sp->bitbang.txrx_word[SPI_MODE_1] = s3c2410_spigpio_txrx_mode1; | ||
111 | |||
112 | /* set state of spi pins */ | ||
113 | s3c2410_gpio_setpin(sp->info->pin_clk, 0); | ||
114 | s3c2410_gpio_setpin(sp->info->pin_mosi, 0); | ||
115 | |||
116 | s3c2410_gpio_cfgpin(sp->info->pin_clk, S3C2410_GPIO_OUTPUT); | ||
117 | s3c2410_gpio_cfgpin(sp->info->pin_mosi, S3C2410_GPIO_OUTPUT); | ||
118 | s3c2410_gpio_cfgpin(sp->info->pin_miso, S3C2410_GPIO_INPUT); | ||
119 | |||
120 | ret = spi_bitbang_start(&sp->bitbang); | ||
121 | if (ret) | ||
122 | goto err_no_bitbang; | ||
123 | |||
124 | /* register the chips to go with the board */ | ||
125 | |||
126 | for (i = 0; i < sp->info->board_size; i++) { | ||
127 | dev_info(&dev->dev, "registering %p: %s\n", | ||
128 | &sp->info->board_info[i], | ||
129 | sp->info->board_info[i].modalias); | ||
130 | |||
131 | sp->info->board_info[i].controller_data = sp; | ||
132 | spi_new_device(master, sp->info->board_info + i); | ||
133 | } | ||
134 | |||
135 | return 0; | ||
136 | |||
137 | err_no_bitbang: | ||
138 | spi_master_put(sp->bitbang.master); | ||
139 | err: | ||
140 | return ret; | ||
141 | |||
142 | } | ||
143 | |||
144 | static int s3c2410_spigpio_remove(struct platform_device *dev) | ||
145 | { | ||
146 | struct s3c2410_spigpio *sp = platform_get_drvdata(dev); | ||
147 | |||
148 | spi_bitbang_stop(&sp->bitbang); | ||
149 | spi_master_put(sp->bitbang.master); | ||
150 | |||
151 | return 0; | ||
152 | } | ||
153 | |||
154 | /* all gpio should be held over suspend/resume, so we should | ||
155 | * not need to deal with this | ||
156 | */ | ||
157 | |||
158 | #define s3c2410_spigpio_suspend NULL | ||
159 | #define s3c2410_spigpio_resume NULL | ||
160 | |||
161 | |||
162 | static struct platform_driver s3c2410_spigpio_drv = { | ||
163 | .probe = s3c2410_spigpio_probe, | ||
164 | .remove = s3c2410_spigpio_remove, | ||
165 | .suspend = s3c2410_spigpio_suspend, | ||
166 | .resume = s3c2410_spigpio_resume, | ||
167 | .driver = { | ||
168 | .name = "s3c24xx-spi-gpio", | ||
169 | .owner = THIS_MODULE, | ||
170 | }, | ||
171 | }; | ||
172 | |||
173 | static int __init s3c2410_spigpio_init(void) | ||
174 | { | ||
175 | return platform_driver_register(&s3c2410_spigpio_drv); | ||
176 | } | ||
177 | |||
178 | static void __exit s3c2410_spigpio_exit(void) | ||
179 | { | ||
180 | platform_driver_unregister(&s3c2410_spigpio_drv); | ||
181 | } | ||
182 | |||
183 | module_init(s3c2410_spigpio_init); | ||
184 | module_exit(s3c2410_spigpio_exit); | ||
185 | |||
186 | MODULE_DESCRIPTION("S3C24XX SPI Driver"); | ||
187 | MODULE_AUTHOR("Ben Dooks, <ben@simtec.co.uk>"); | ||
188 | MODULE_LICENSE("GPL"); | ||
diff --git a/drivers/usb/input/hiddev.c b/drivers/usb/input/hiddev.c index 6dd666696178..c4670e1d4654 100644 --- a/drivers/usb/input/hiddev.c +++ b/drivers/usb/input/hiddev.c | |||
@@ -317,6 +317,7 @@ static ssize_t hiddev_read(struct file * file, char __user * buffer, size_t coun | |||
317 | } | 317 | } |
318 | 318 | ||
319 | schedule(); | 319 | schedule(); |
320 | set_current_state(TASK_INTERRUPTIBLE); | ||
320 | } | 321 | } |
321 | 322 | ||
322 | set_current_state(TASK_RUNNING); | 323 | set_current_state(TASK_RUNNING); |
diff --git a/drivers/video/backlight/backlight.c b/drivers/video/backlight/backlight.c index 334b1db1bd7c..27597c576eff 100644 --- a/drivers/video/backlight/backlight.c +++ b/drivers/video/backlight/backlight.c | |||
@@ -29,12 +29,15 @@ static ssize_t backlight_show_power(struct class_device *cdev, char *buf) | |||
29 | 29 | ||
30 | static ssize_t backlight_store_power(struct class_device *cdev, const char *buf, size_t count) | 30 | static ssize_t backlight_store_power(struct class_device *cdev, const char *buf, size_t count) |
31 | { | 31 | { |
32 | int rc = -ENXIO, power; | 32 | int rc = -ENXIO; |
33 | char *endp; | 33 | char *endp; |
34 | struct backlight_device *bd = to_backlight_device(cdev); | 34 | struct backlight_device *bd = to_backlight_device(cdev); |
35 | int power = simple_strtoul(buf, &endp, 0); | ||
36 | size_t size = endp - buf; | ||
35 | 37 | ||
36 | power = simple_strtoul(buf, &endp, 0); | 38 | if (*endp && isspace(*endp)) |
37 | if (*endp && !isspace(*endp)) | 39 | size++; |
40 | if (size != count) | ||
38 | return -EINVAL; | 41 | return -EINVAL; |
39 | 42 | ||
40 | down(&bd->sem); | 43 | down(&bd->sem); |
@@ -65,12 +68,15 @@ static ssize_t backlight_show_brightness(struct class_device *cdev, char *buf) | |||
65 | 68 | ||
66 | static ssize_t backlight_store_brightness(struct class_device *cdev, const char *buf, size_t count) | 69 | static ssize_t backlight_store_brightness(struct class_device *cdev, const char *buf, size_t count) |
67 | { | 70 | { |
68 | int rc = -ENXIO, brightness; | 71 | int rc = -ENXIO; |
69 | char *endp; | 72 | char *endp; |
70 | struct backlight_device *bd = to_backlight_device(cdev); | 73 | struct backlight_device *bd = to_backlight_device(cdev); |
74 | int brightness = simple_strtoul(buf, &endp, 0); | ||
75 | size_t size = endp - buf; | ||
71 | 76 | ||
72 | brightness = simple_strtoul(buf, &endp, 0); | 77 | if (*endp && isspace(*endp)) |
73 | if (*endp && !isspace(*endp)) | 78 | size++; |
79 | if (size != count) | ||
74 | return -EINVAL; | 80 | return -EINVAL; |
75 | 81 | ||
76 | down(&bd->sem); | 82 | down(&bd->sem); |
diff --git a/drivers/video/backlight/lcd.c b/drivers/video/backlight/lcd.c index 86908a60c630..bc8ab005a3fb 100644 --- a/drivers/video/backlight/lcd.c +++ b/drivers/video/backlight/lcd.c | |||
@@ -31,12 +31,15 @@ static ssize_t lcd_show_power(struct class_device *cdev, char *buf) | |||
31 | 31 | ||
32 | static ssize_t lcd_store_power(struct class_device *cdev, const char *buf, size_t count) | 32 | static ssize_t lcd_store_power(struct class_device *cdev, const char *buf, size_t count) |
33 | { | 33 | { |
34 | int rc, power; | 34 | int rc = -ENXIO; |
35 | char *endp; | 35 | char *endp; |
36 | struct lcd_device *ld = to_lcd_device(cdev); | 36 | struct lcd_device *ld = to_lcd_device(cdev); |
37 | int power = simple_strtoul(buf, &endp, 0); | ||
38 | size_t size = endp - buf; | ||
37 | 39 | ||
38 | power = simple_strtoul(buf, &endp, 0); | 40 | if (*endp && isspace(*endp)) |
39 | if (*endp && !isspace(*endp)) | 41 | size++; |
42 | if (size != count) | ||
40 | return -EINVAL; | 43 | return -EINVAL; |
41 | 44 | ||
42 | down(&ld->sem); | 45 | down(&ld->sem); |
@@ -44,8 +47,7 @@ static ssize_t lcd_store_power(struct class_device *cdev, const char *buf, size_ | |||
44 | pr_debug("lcd: set power to %d\n", power); | 47 | pr_debug("lcd: set power to %d\n", power); |
45 | ld->props->set_power(ld, power); | 48 | ld->props->set_power(ld, power); |
46 | rc = count; | 49 | rc = count; |
47 | } else | 50 | } |
48 | rc = -ENXIO; | ||
49 | up(&ld->sem); | 51 | up(&ld->sem); |
50 | 52 | ||
51 | return rc; | 53 | return rc; |
@@ -53,14 +55,12 @@ static ssize_t lcd_store_power(struct class_device *cdev, const char *buf, size_ | |||
53 | 55 | ||
54 | static ssize_t lcd_show_contrast(struct class_device *cdev, char *buf) | 56 | static ssize_t lcd_show_contrast(struct class_device *cdev, char *buf) |
55 | { | 57 | { |
56 | int rc; | 58 | int rc = -ENXIO; |
57 | struct lcd_device *ld = to_lcd_device(cdev); | 59 | struct lcd_device *ld = to_lcd_device(cdev); |
58 | 60 | ||
59 | down(&ld->sem); | 61 | down(&ld->sem); |
60 | if (likely(ld->props && ld->props->get_contrast)) | 62 | if (likely(ld->props && ld->props->get_contrast)) |
61 | rc = sprintf(buf, "%d\n", ld->props->get_contrast(ld)); | 63 | rc = sprintf(buf, "%d\n", ld->props->get_contrast(ld)); |
62 | else | ||
63 | rc = -ENXIO; | ||
64 | up(&ld->sem); | 64 | up(&ld->sem); |
65 | 65 | ||
66 | return rc; | 66 | return rc; |
@@ -68,12 +68,15 @@ static ssize_t lcd_show_contrast(struct class_device *cdev, char *buf) | |||
68 | 68 | ||
69 | static ssize_t lcd_store_contrast(struct class_device *cdev, const char *buf, size_t count) | 69 | static ssize_t lcd_store_contrast(struct class_device *cdev, const char *buf, size_t count) |
70 | { | 70 | { |
71 | int rc, contrast; | 71 | int rc = -ENXIO; |
72 | char *endp; | 72 | char *endp; |
73 | struct lcd_device *ld = to_lcd_device(cdev); | 73 | struct lcd_device *ld = to_lcd_device(cdev); |
74 | int contrast = simple_strtoul(buf, &endp, 0); | ||
75 | size_t size = endp - buf; | ||
74 | 76 | ||
75 | contrast = simple_strtoul(buf, &endp, 0); | 77 | if (*endp && isspace(*endp)) |
76 | if (*endp && !isspace(*endp)) | 78 | size++; |
79 | if (size != count) | ||
77 | return -EINVAL; | 80 | return -EINVAL; |
78 | 81 | ||
79 | down(&ld->sem); | 82 | down(&ld->sem); |
@@ -81,8 +84,7 @@ static ssize_t lcd_store_contrast(struct class_device *cdev, const char *buf, si | |||
81 | pr_debug("lcd: set contrast to %d\n", contrast); | 84 | pr_debug("lcd: set contrast to %d\n", contrast); |
82 | ld->props->set_contrast(ld, contrast); | 85 | ld->props->set_contrast(ld, contrast); |
83 | rc = count; | 86 | rc = count; |
84 | } else | 87 | } |
85 | rc = -ENXIO; | ||
86 | up(&ld->sem); | 88 | up(&ld->sem); |
87 | 89 | ||
88 | return rc; | 90 | return rc; |
@@ -90,14 +92,12 @@ static ssize_t lcd_store_contrast(struct class_device *cdev, const char *buf, si | |||
90 | 92 | ||
91 | static ssize_t lcd_show_max_contrast(struct class_device *cdev, char *buf) | 93 | static ssize_t lcd_show_max_contrast(struct class_device *cdev, char *buf) |
92 | { | 94 | { |
93 | int rc; | 95 | int rc = -ENXIO; |
94 | struct lcd_device *ld = to_lcd_device(cdev); | 96 | struct lcd_device *ld = to_lcd_device(cdev); |
95 | 97 | ||
96 | down(&ld->sem); | 98 | down(&ld->sem); |
97 | if (likely(ld->props)) | 99 | if (likely(ld->props)) |
98 | rc = sprintf(buf, "%d\n", ld->props->max_contrast); | 100 | rc = sprintf(buf, "%d\n", ld->props->max_contrast); |
99 | else | ||
100 | rc = -ENXIO; | ||
101 | up(&ld->sem); | 101 | up(&ld->sem); |
102 | 102 | ||
103 | return rc; | 103 | return rc; |
diff --git a/drivers/video/i810/i810_main.c b/drivers/video/i810/i810_main.c index 788297e9d59e..44aa2ffff973 100644 --- a/drivers/video/i810/i810_main.c +++ b/drivers/video/i810/i810_main.c | |||
@@ -76,8 +76,8 @@ | |||
76 | * | 76 | * |
77 | * Experiment with v_offset to find out which works best for you. | 77 | * Experiment with v_offset to find out which works best for you. |
78 | */ | 78 | */ |
79 | static u32 v_offset_default __initdata; /* For 32 MiB Aper size, 8 should be the default */ | 79 | static u32 v_offset_default __devinitdata; /* For 32 MiB Aper size, 8 should be the default */ |
80 | static u32 voffset __initdata = 0; | 80 | static u32 voffset __devinitdata; |
81 | 81 | ||
82 | static int i810fb_cursor(struct fb_info *info, struct fb_cursor *cursor); | 82 | static int i810fb_cursor(struct fb_info *info, struct fb_cursor *cursor); |
83 | static int __devinit i810fb_init_pci (struct pci_dev *dev, | 83 | static int __devinit i810fb_init_pci (struct pci_dev *dev, |
diff --git a/drivers/video/matrox/g450_pll.c b/drivers/video/matrox/g450_pll.c index 8073a73f6f35..440272ad10e7 100644 --- a/drivers/video/matrox/g450_pll.c +++ b/drivers/video/matrox/g450_pll.c | |||
@@ -316,14 +316,24 @@ static int __g450_setclk(WPMINFO unsigned int fout, unsigned int pll, | |||
316 | case M_PIXEL_PLL_B: | 316 | case M_PIXEL_PLL_B: |
317 | case M_PIXEL_PLL_C: | 317 | case M_PIXEL_PLL_C: |
318 | { | 318 | { |
319 | u_int8_t tmp; | 319 | u_int8_t tmp, xpwrctrl; |
320 | unsigned long flags; | 320 | unsigned long flags; |
321 | 321 | ||
322 | matroxfb_DAC_lock_irqsave(flags); | 322 | matroxfb_DAC_lock_irqsave(flags); |
323 | |||
324 | xpwrctrl = matroxfb_DAC_in(PMINFO M1064_XPWRCTRL); | ||
325 | matroxfb_DAC_out(PMINFO M1064_XPWRCTRL, xpwrctrl & ~M1064_XPWRCTRL_PANELPDN); | ||
326 | mga_outb(M_SEQ_INDEX, M_SEQ1); | ||
327 | mga_outb(M_SEQ_DATA, mga_inb(M_SEQ_DATA) | M_SEQ1_SCROFF); | ||
323 | tmp = matroxfb_DAC_in(PMINFO M1064_XPIXCLKCTRL); | 328 | tmp = matroxfb_DAC_in(PMINFO M1064_XPIXCLKCTRL); |
329 | tmp |= M1064_XPIXCLKCTRL_DIS; | ||
324 | if (!(tmp & M1064_XPIXCLKCTRL_PLL_UP)) { | 330 | if (!(tmp & M1064_XPIXCLKCTRL_PLL_UP)) { |
325 | matroxfb_DAC_out(PMINFO M1064_XPIXCLKCTRL, tmp | M1064_XPIXCLKCTRL_PLL_UP); | 331 | tmp |= M1064_XPIXCLKCTRL_PLL_UP; |
326 | } | 332 | } |
333 | matroxfb_DAC_out(PMINFO M1064_XPIXCLKCTRL, tmp); | ||
334 | matroxfb_DAC_out(PMINFO M1064_XDVICLKCTRL, 0); | ||
335 | matroxfb_DAC_out(PMINFO M1064_XPWRCTRL, xpwrctrl); | ||
336 | |||
327 | matroxfb_DAC_unlock_irqrestore(flags); | 337 | matroxfb_DAC_unlock_irqrestore(flags); |
328 | } | 338 | } |
329 | { | 339 | { |
@@ -418,6 +428,15 @@ static int __g450_setclk(WPMINFO unsigned int fout, unsigned int pll, | |||
418 | frequency to higher - with <= lowest wins, while | 428 | frequency to higher - with <= lowest wins, while |
419 | with < highest one wins */ | 429 | with < highest one wins */ |
420 | if (delta <= deltaarray[idx-1]) { | 430 | if (delta <= deltaarray[idx-1]) { |
431 | /* all else being equal except VCO, | ||
432 | * choose VCO not near (within 1/16th or so) VCOmin | ||
433 | * (freqs near VCOmin aren't as stable) | ||
434 | */ | ||
435 | if (delta == deltaarray[idx-1] | ||
436 | && vco != g450_mnp2vco(PMINFO mnparray[idx-1]) | ||
437 | && vco < (pi->vcomin * 17 / 16)) { | ||
438 | break; | ||
439 | } | ||
421 | mnparray[idx] = mnparray[idx-1]; | 440 | mnparray[idx] = mnparray[idx-1]; |
422 | deltaarray[idx] = deltaarray[idx-1]; | 441 | deltaarray[idx] = deltaarray[idx-1]; |
423 | } else { | 442 | } else { |
diff --git a/drivers/video/matrox/matroxfb_DAC1064.h b/drivers/video/matrox/matroxfb_DAC1064.h index 2e7238aa2432..56513a5d220b 100644 --- a/drivers/video/matrox/matroxfb_DAC1064.h +++ b/drivers/video/matrox/matroxfb_DAC1064.h | |||
@@ -40,6 +40,7 @@ void DAC1064_global_restore(WPMINFO2); | |||
40 | #define M1064_XCURCOL1RED 0x0C | 40 | #define M1064_XCURCOL1RED 0x0C |
41 | #define M1064_XCURCOL1GREEN 0x0D | 41 | #define M1064_XCURCOL1GREEN 0x0D |
42 | #define M1064_XCURCOL1BLUE 0x0E | 42 | #define M1064_XCURCOL1BLUE 0x0E |
43 | #define M1064_XDVICLKCTRL 0x0F | ||
43 | #define M1064_XCURCOL2RED 0x10 | 44 | #define M1064_XCURCOL2RED 0x10 |
44 | #define M1064_XCURCOL2GREEN 0x11 | 45 | #define M1064_XCURCOL2GREEN 0x11 |
45 | #define M1064_XCURCOL2BLUE 0x12 | 46 | #define M1064_XCURCOL2BLUE 0x12 |
@@ -144,6 +145,7 @@ void DAC1064_global_restore(WPMINFO2); | |||
144 | #define M1064_XVIDPLLN 0x8F | 145 | #define M1064_XVIDPLLN 0x8F |
145 | 146 | ||
146 | #define M1064_XPWRCTRL 0xA0 | 147 | #define M1064_XPWRCTRL 0xA0 |
148 | #define M1064_XPWRCTRL_PANELPDN 0x04 | ||
147 | 149 | ||
148 | #define M1064_XPANMODE 0xA2 | 150 | #define M1064_XPANMODE 0xA2 |
149 | 151 | ||
diff --git a/drivers/video/matrox/matroxfb_base.h b/drivers/video/matrox/matroxfb_base.h index 3a3e1804c56a..b71737178d0d 100644 --- a/drivers/video/matrox/matroxfb_base.h +++ b/drivers/video/matrox/matroxfb_base.h | |||
@@ -672,6 +672,8 @@ void matroxfb_unregister_driver(struct matroxfb_driver* drv); | |||
672 | 672 | ||
673 | #define M_SEQ_INDEX 0x1FC4 | 673 | #define M_SEQ_INDEX 0x1FC4 |
674 | #define M_SEQ_DATA 0x1FC5 | 674 | #define M_SEQ_DATA 0x1FC5 |
675 | #define M_SEQ1 0x01 | ||
676 | #define M_SEQ1_SCROFF 0x20 | ||
675 | 677 | ||
676 | #define M_MISC_REG_READ 0x1FCC | 678 | #define M_MISC_REG_READ 0x1FCC |
677 | 679 | ||
diff --git a/fs/9p/fcall.c b/fs/9p/fcall.c index 71742ba150c4..6f2617820a4e 100644 --- a/fs/9p/fcall.c +++ b/fs/9p/fcall.c | |||
@@ -98,23 +98,20 @@ v9fs_t_attach(struct v9fs_session_info *v9ses, char *uname, char *aname, | |||
98 | static void v9fs_t_clunk_cb(void *a, struct v9fs_fcall *tc, | 98 | static void v9fs_t_clunk_cb(void *a, struct v9fs_fcall *tc, |
99 | struct v9fs_fcall *rc, int err) | 99 | struct v9fs_fcall *rc, int err) |
100 | { | 100 | { |
101 | int fid; | 101 | int fid, id; |
102 | struct v9fs_session_info *v9ses; | 102 | struct v9fs_session_info *v9ses; |
103 | 103 | ||
104 | if (err) | 104 | id = 0; |
105 | return; | ||
106 | |||
107 | fid = tc->params.tclunk.fid; | 105 | fid = tc->params.tclunk.fid; |
108 | kfree(tc); | 106 | if (rc) |
109 | 107 | id = rc->id; | |
110 | if (!rc) | ||
111 | return; | ||
112 | |||
113 | v9ses = a; | ||
114 | if (rc->id == RCLUNK) | ||
115 | v9fs_put_idpool(fid, &v9ses->fidpool); | ||
116 | 108 | ||
109 | kfree(tc); | ||
117 | kfree(rc); | 110 | kfree(rc); |
111 | if (id == RCLUNK) { | ||
112 | v9ses = a; | ||
113 | v9fs_put_idpool(fid, &v9ses->fidpool); | ||
114 | } | ||
118 | } | 115 | } |
119 | 116 | ||
120 | /** | 117 | /** |
diff --git a/fs/9p/mux.c b/fs/9p/mux.c index 3e5b124a7212..f4407eb276c7 100644 --- a/fs/9p/mux.c +++ b/fs/9p/mux.c | |||
@@ -50,15 +50,23 @@ enum { | |||
50 | Wpending = 8, /* can write */ | 50 | Wpending = 8, /* can write */ |
51 | }; | 51 | }; |
52 | 52 | ||
53 | enum { | ||
54 | None, | ||
55 | Flushing, | ||
56 | Flushed, | ||
57 | }; | ||
58 | |||
53 | struct v9fs_mux_poll_task; | 59 | struct v9fs_mux_poll_task; |
54 | 60 | ||
55 | struct v9fs_req { | 61 | struct v9fs_req { |
62 | spinlock_t lock; | ||
56 | int tag; | 63 | int tag; |
57 | struct v9fs_fcall *tcall; | 64 | struct v9fs_fcall *tcall; |
58 | struct v9fs_fcall *rcall; | 65 | struct v9fs_fcall *rcall; |
59 | int err; | 66 | int err; |
60 | v9fs_mux_req_callback cb; | 67 | v9fs_mux_req_callback cb; |
61 | void *cba; | 68 | void *cba; |
69 | int flush; | ||
62 | struct list_head req_list; | 70 | struct list_head req_list; |
63 | }; | 71 | }; |
64 | 72 | ||
@@ -96,8 +104,8 @@ struct v9fs_mux_poll_task { | |||
96 | 104 | ||
97 | struct v9fs_mux_rpc { | 105 | struct v9fs_mux_rpc { |
98 | struct v9fs_mux_data *m; | 106 | struct v9fs_mux_data *m; |
99 | struct v9fs_req *req; | ||
100 | int err; | 107 | int err; |
108 | struct v9fs_fcall *tcall; | ||
101 | struct v9fs_fcall *rcall; | 109 | struct v9fs_fcall *rcall; |
102 | wait_queue_head_t wqueue; | 110 | wait_queue_head_t wqueue; |
103 | }; | 111 | }; |
@@ -524,10 +532,9 @@ again: | |||
524 | 532 | ||
525 | static void process_request(struct v9fs_mux_data *m, struct v9fs_req *req) | 533 | static void process_request(struct v9fs_mux_data *m, struct v9fs_req *req) |
526 | { | 534 | { |
527 | int ecode, tag; | 535 | int ecode; |
528 | struct v9fs_str *ename; | 536 | struct v9fs_str *ename; |
529 | 537 | ||
530 | tag = req->tag; | ||
531 | if (!req->err && req->rcall->id == RERROR) { | 538 | if (!req->err && req->rcall->id == RERROR) { |
532 | ecode = req->rcall->params.rerror.errno; | 539 | ecode = req->rcall->params.rerror.errno; |
533 | ename = &req->rcall->params.rerror.error; | 540 | ename = &req->rcall->params.rerror.error; |
@@ -553,23 +560,6 @@ static void process_request(struct v9fs_mux_data *m, struct v9fs_req *req) | |||
553 | if (!req->err) | 560 | if (!req->err) |
554 | req->err = -EIO; | 561 | req->err = -EIO; |
555 | } | 562 | } |
556 | |||
557 | if (req->err == ERREQFLUSH) | ||
558 | return; | ||
559 | |||
560 | if (req->cb) { | ||
561 | dprintk(DEBUG_MUX, "calling callback tcall %p rcall %p\n", | ||
562 | req->tcall, req->rcall); | ||
563 | |||
564 | (*req->cb) (req->cba, req->tcall, req->rcall, req->err); | ||
565 | req->cb = NULL; | ||
566 | } else | ||
567 | kfree(req->rcall); | ||
568 | |||
569 | v9fs_mux_put_tag(m, tag); | ||
570 | |||
571 | wake_up(&m->equeue); | ||
572 | kfree(req); | ||
573 | } | 563 | } |
574 | 564 | ||
575 | /** | 565 | /** |
@@ -669,17 +659,26 @@ static void v9fs_read_work(void *a) | |||
669 | list_for_each_entry_safe(rreq, rptr, &m->req_list, req_list) { | 659 | list_for_each_entry_safe(rreq, rptr, &m->req_list, req_list) { |
670 | if (rreq->tag == rcall->tag) { | 660 | if (rreq->tag == rcall->tag) { |
671 | req = rreq; | 661 | req = rreq; |
672 | req->rcall = rcall; | 662 | if (req->flush != Flushing) |
673 | list_del(&req->req_list); | 663 | list_del(&req->req_list); |
674 | spin_unlock(&m->lock); | ||
675 | process_request(m, req); | ||
676 | break; | 664 | break; |
677 | } | 665 | } |
678 | |||
679 | } | 666 | } |
667 | spin_unlock(&m->lock); | ||
680 | 668 | ||
681 | if (!req) { | 669 | if (req) { |
682 | spin_unlock(&m->lock); | 670 | req->rcall = rcall; |
671 | process_request(m, req); | ||
672 | |||
673 | if (req->flush != Flushing) { | ||
674 | if (req->cb) | ||
675 | (*req->cb) (req, req->cba); | ||
676 | else | ||
677 | kfree(req->rcall); | ||
678 | |||
679 | wake_up(&m->equeue); | ||
680 | } | ||
681 | } else { | ||
683 | if (err >= 0 && rcall->id != RFLUSH) | 682 | if (err >= 0 && rcall->id != RFLUSH) |
684 | dprintk(DEBUG_ERROR, | 683 | dprintk(DEBUG_ERROR, |
685 | "unexpected response mux %p id %d tag %d\n", | 684 | "unexpected response mux %p id %d tag %d\n", |
@@ -746,7 +745,6 @@ static struct v9fs_req *v9fs_send_request(struct v9fs_mux_data *m, | |||
746 | return ERR_PTR(-ENOMEM); | 745 | return ERR_PTR(-ENOMEM); |
747 | 746 | ||
748 | v9fs_set_tag(tc, n); | 747 | v9fs_set_tag(tc, n); |
749 | |||
750 | if ((v9fs_debug_level&DEBUG_FCALL) == DEBUG_FCALL) { | 748 | if ((v9fs_debug_level&DEBUG_FCALL) == DEBUG_FCALL) { |
751 | char buf[150]; | 749 | char buf[150]; |
752 | 750 | ||
@@ -754,12 +752,14 @@ static struct v9fs_req *v9fs_send_request(struct v9fs_mux_data *m, | |||
754 | printk(KERN_NOTICE "<<< %p %s\n", m, buf); | 752 | printk(KERN_NOTICE "<<< %p %s\n", m, buf); |
755 | } | 753 | } |
756 | 754 | ||
755 | spin_lock_init(&req->lock); | ||
757 | req->tag = n; | 756 | req->tag = n; |
758 | req->tcall = tc; | 757 | req->tcall = tc; |
759 | req->rcall = NULL; | 758 | req->rcall = NULL; |
760 | req->err = 0; | 759 | req->err = 0; |
761 | req->cb = cb; | 760 | req->cb = cb; |
762 | req->cba = cba; | 761 | req->cba = cba; |
762 | req->flush = None; | ||
763 | 763 | ||
764 | spin_lock(&m->lock); | 764 | spin_lock(&m->lock); |
765 | list_add_tail(&req->req_list, &m->unsent_req_list); | 765 | list_add_tail(&req->req_list, &m->unsent_req_list); |
@@ -776,72 +776,108 @@ static struct v9fs_req *v9fs_send_request(struct v9fs_mux_data *m, | |||
776 | return req; | 776 | return req; |
777 | } | 777 | } |
778 | 778 | ||
779 | static void v9fs_mux_flush_cb(void *a, struct v9fs_fcall *tc, | 779 | static void v9fs_mux_free_request(struct v9fs_mux_data *m, struct v9fs_req *req) |
780 | struct v9fs_fcall *rc, int err) | 780 | { |
781 | v9fs_mux_put_tag(m, req->tag); | ||
782 | kfree(req); | ||
783 | } | ||
784 | |||
785 | static void v9fs_mux_flush_cb(struct v9fs_req *freq, void *a) | ||
781 | { | 786 | { |
782 | v9fs_mux_req_callback cb; | 787 | v9fs_mux_req_callback cb; |
783 | int tag; | 788 | int tag; |
784 | struct v9fs_mux_data *m; | 789 | struct v9fs_mux_data *m; |
785 | struct v9fs_req *req, *rptr; | 790 | struct v9fs_req *req, *rreq, *rptr; |
786 | 791 | ||
787 | m = a; | 792 | m = a; |
788 | dprintk(DEBUG_MUX, "mux %p tc %p rc %p err %d oldtag %d\n", m, tc, | 793 | dprintk(DEBUG_MUX, "mux %p tc %p rc %p err %d oldtag %d\n", m, |
789 | rc, err, tc->params.tflush.oldtag); | 794 | freq->tcall, freq->rcall, freq->err, |
795 | freq->tcall->params.tflush.oldtag); | ||
790 | 796 | ||
791 | spin_lock(&m->lock); | 797 | spin_lock(&m->lock); |
792 | cb = NULL; | 798 | cb = NULL; |
793 | tag = tc->params.tflush.oldtag; | 799 | tag = freq->tcall->params.tflush.oldtag; |
794 | list_for_each_entry_safe(req, rptr, &m->req_list, req_list) { | 800 | req = NULL; |
795 | if (req->tag == tag) { | 801 | list_for_each_entry_safe(rreq, rptr, &m->req_list, req_list) { |
802 | if (rreq->tag == tag) { | ||
803 | req = rreq; | ||
796 | list_del(&req->req_list); | 804 | list_del(&req->req_list); |
797 | if (req->cb) { | ||
798 | cb = req->cb; | ||
799 | req->cb = NULL; | ||
800 | spin_unlock(&m->lock); | ||
801 | (*cb) (req->cba, req->tcall, req->rcall, | ||
802 | req->err); | ||
803 | } | ||
804 | kfree(req); | ||
805 | wake_up(&m->equeue); | ||
806 | break; | 805 | break; |
807 | } | 806 | } |
808 | } | 807 | } |
808 | spin_unlock(&m->lock); | ||
809 | 809 | ||
810 | if (!cb) | 810 | if (req) { |
811 | spin_unlock(&m->lock); | 811 | spin_lock(&req->lock); |
812 | req->flush = Flushed; | ||
813 | spin_unlock(&req->lock); | ||
814 | |||
815 | if (req->cb) | ||
816 | (*req->cb) (req, req->cba); | ||
817 | else | ||
818 | kfree(req->rcall); | ||
819 | |||
820 | wake_up(&m->equeue); | ||
821 | } | ||
812 | 822 | ||
813 | v9fs_mux_put_tag(m, tag); | 823 | kfree(freq->tcall); |
814 | kfree(tc); | 824 | kfree(freq->rcall); |
815 | kfree(rc); | 825 | v9fs_mux_free_request(m, freq); |
816 | } | 826 | } |
817 | 827 | ||
818 | static void | 828 | static int |
819 | v9fs_mux_flush_request(struct v9fs_mux_data *m, struct v9fs_req *req) | 829 | v9fs_mux_flush_request(struct v9fs_mux_data *m, struct v9fs_req *req) |
820 | { | 830 | { |
821 | struct v9fs_fcall *fc; | 831 | struct v9fs_fcall *fc; |
832 | struct v9fs_req *rreq, *rptr; | ||
822 | 833 | ||
823 | dprintk(DEBUG_MUX, "mux %p req %p tag %d\n", m, req, req->tag); | 834 | dprintk(DEBUG_MUX, "mux %p req %p tag %d\n", m, req, req->tag); |
824 | 835 | ||
836 | /* if a response was received for a request, do nothing */ | ||
837 | spin_lock(&req->lock); | ||
838 | if (req->rcall || req->err) { | ||
839 | spin_unlock(&req->lock); | ||
840 | dprintk(DEBUG_MUX, "mux %p req %p response already received\n", m, req); | ||
841 | return 0; | ||
842 | } | ||
843 | |||
844 | req->flush = Flushing; | ||
845 | spin_unlock(&req->lock); | ||
846 | |||
847 | spin_lock(&m->lock); | ||
848 | /* if the request is not sent yet, just remove it from the list */ | ||
849 | list_for_each_entry_safe(rreq, rptr, &m->unsent_req_list, req_list) { | ||
850 | if (rreq->tag == req->tag) { | ||
851 | dprintk(DEBUG_MUX, "mux %p req %p request is not sent yet\n", m, req); | ||
852 | list_del(&rreq->req_list); | ||
853 | req->flush = Flushed; | ||
854 | spin_unlock(&m->lock); | ||
855 | if (req->cb) | ||
856 | (*req->cb) (req, req->cba); | ||
857 | return 0; | ||
858 | } | ||
859 | } | ||
860 | spin_unlock(&m->lock); | ||
861 | |||
862 | clear_thread_flag(TIF_SIGPENDING); | ||
825 | fc = v9fs_create_tflush(req->tag); | 863 | fc = v9fs_create_tflush(req->tag); |
826 | v9fs_send_request(m, fc, v9fs_mux_flush_cb, m); | 864 | v9fs_send_request(m, fc, v9fs_mux_flush_cb, m); |
865 | return 1; | ||
827 | } | 866 | } |
828 | 867 | ||
829 | static void | 868 | static void |
830 | v9fs_mux_rpc_cb(void *a, struct v9fs_fcall *tc, struct v9fs_fcall *rc, int err) | 869 | v9fs_mux_rpc_cb(struct v9fs_req *req, void *a) |
831 | { | 870 | { |
832 | struct v9fs_mux_rpc *r; | 871 | struct v9fs_mux_rpc *r; |
833 | 872 | ||
834 | if (err == ERREQFLUSH) { | 873 | dprintk(DEBUG_MUX, "req %p r %p\n", req, a); |
835 | kfree(rc); | ||
836 | dprintk(DEBUG_MUX, "err req flush\n"); | ||
837 | return; | ||
838 | } | ||
839 | |||
840 | r = a; | 874 | r = a; |
841 | dprintk(DEBUG_MUX, "mux %p req %p tc %p rc %p err %d\n", r->m, r->req, | 875 | r->rcall = req->rcall; |
842 | tc, rc, err); | 876 | r->err = req->err; |
843 | r->rcall = rc; | 877 | |
844 | r->err = err; | 878 | if (req->flush!=None && !req->err) |
879 | r->err = -ERESTARTSYS; | ||
880 | |||
845 | wake_up(&r->wqueue); | 881 | wake_up(&r->wqueue); |
846 | } | 882 | } |
847 | 883 | ||
@@ -856,12 +892,13 @@ int | |||
856 | v9fs_mux_rpc(struct v9fs_mux_data *m, struct v9fs_fcall *tc, | 892 | v9fs_mux_rpc(struct v9fs_mux_data *m, struct v9fs_fcall *tc, |
857 | struct v9fs_fcall **rc) | 893 | struct v9fs_fcall **rc) |
858 | { | 894 | { |
859 | int err; | 895 | int err, sigpending; |
860 | unsigned long flags; | 896 | unsigned long flags; |
861 | struct v9fs_req *req; | 897 | struct v9fs_req *req; |
862 | struct v9fs_mux_rpc r; | 898 | struct v9fs_mux_rpc r; |
863 | 899 | ||
864 | r.err = 0; | 900 | r.err = 0; |
901 | r.tcall = tc; | ||
865 | r.rcall = NULL; | 902 | r.rcall = NULL; |
866 | r.m = m; | 903 | r.m = m; |
867 | init_waitqueue_head(&r.wqueue); | 904 | init_waitqueue_head(&r.wqueue); |
@@ -869,48 +906,50 @@ v9fs_mux_rpc(struct v9fs_mux_data *m, struct v9fs_fcall *tc, | |||
869 | if (rc) | 906 | if (rc) |
870 | *rc = NULL; | 907 | *rc = NULL; |
871 | 908 | ||
909 | sigpending = 0; | ||
910 | if (signal_pending(current)) { | ||
911 | sigpending = 1; | ||
912 | clear_thread_flag(TIF_SIGPENDING); | ||
913 | } | ||
914 | |||
872 | req = v9fs_send_request(m, tc, v9fs_mux_rpc_cb, &r); | 915 | req = v9fs_send_request(m, tc, v9fs_mux_rpc_cb, &r); |
873 | if (IS_ERR(req)) { | 916 | if (IS_ERR(req)) { |
874 | err = PTR_ERR(req); | 917 | err = PTR_ERR(req); |
875 | dprintk(DEBUG_MUX, "error %d\n", err); | 918 | dprintk(DEBUG_MUX, "error %d\n", err); |
876 | return PTR_ERR(req); | 919 | return err; |
877 | } | 920 | } |
878 | 921 | ||
879 | r.req = req; | ||
880 | dprintk(DEBUG_MUX, "mux %p tc %p tag %d rpc %p req %p\n", m, tc, | ||
881 | req->tag, &r, req); | ||
882 | err = wait_event_interruptible(r.wqueue, r.rcall != NULL || r.err < 0); | 922 | err = wait_event_interruptible(r.wqueue, r.rcall != NULL || r.err < 0); |
883 | if (r.err < 0) | 923 | if (r.err < 0) |
884 | err = r.err; | 924 | err = r.err; |
885 | 925 | ||
886 | if (err == -ERESTARTSYS && m->trans->status == Connected && m->err == 0) { | 926 | if (err == -ERESTARTSYS && m->trans->status == Connected && m->err == 0) { |
887 | spin_lock(&m->lock); | 927 | if (v9fs_mux_flush_request(m, req)) { |
888 | req->tcall = NULL; | 928 | /* wait until we get response of the flush message */ |
889 | req->err = ERREQFLUSH; | 929 | do { |
890 | spin_unlock(&m->lock); | 930 | clear_thread_flag(TIF_SIGPENDING); |
931 | err = wait_event_interruptible(r.wqueue, | ||
932 | r.rcall || r.err); | ||
933 | } while (!r.rcall && !r.err && err==-ERESTARTSYS && | ||
934 | m->trans->status==Connected && !m->err); | ||
935 | } | ||
936 | sigpending = 1; | ||
937 | } | ||
891 | 938 | ||
892 | clear_thread_flag(TIF_SIGPENDING); | 939 | if (sigpending) { |
893 | v9fs_mux_flush_request(m, req); | ||
894 | spin_lock_irqsave(¤t->sighand->siglock, flags); | 940 | spin_lock_irqsave(¤t->sighand->siglock, flags); |
895 | recalc_sigpending(); | 941 | recalc_sigpending(); |
896 | spin_unlock_irqrestore(¤t->sighand->siglock, flags); | 942 | spin_unlock_irqrestore(¤t->sighand->siglock, flags); |
897 | } | 943 | } |
898 | 944 | ||
899 | if (!err) { | 945 | if (rc) |
900 | if (r.rcall) | 946 | *rc = r.rcall; |
901 | dprintk(DEBUG_MUX, "got response id %d tag %d\n", | 947 | else |
902 | r.rcall->id, r.rcall->tag); | ||
903 | |||
904 | if (rc) | ||
905 | *rc = r.rcall; | ||
906 | else | ||
907 | kfree(r.rcall); | ||
908 | } else { | ||
909 | kfree(r.rcall); | 948 | kfree(r.rcall); |
910 | dprintk(DEBUG_MUX, "got error %d\n", err); | 949 | |
911 | if (err > 0) | 950 | v9fs_mux_free_request(m, req); |
912 | err = -EIO; | 951 | if (err > 0) |
913 | } | 952 | err = -EIO; |
914 | 953 | ||
915 | return err; | 954 | return err; |
916 | } | 955 | } |
@@ -951,12 +990,15 @@ void v9fs_mux_cancel(struct v9fs_mux_data *m, int err) | |||
951 | struct v9fs_req *req, *rtmp; | 990 | struct v9fs_req *req, *rtmp; |
952 | LIST_HEAD(cancel_list); | 991 | LIST_HEAD(cancel_list); |
953 | 992 | ||
954 | dprintk(DEBUG_MUX, "mux %p err %d\n", m, err); | 993 | dprintk(DEBUG_ERROR, "mux %p err %d\n", m, err); |
955 | m->err = err; | 994 | m->err = err; |
956 | spin_lock(&m->lock); | 995 | spin_lock(&m->lock); |
957 | list_for_each_entry_safe(req, rtmp, &m->req_list, req_list) { | 996 | list_for_each_entry_safe(req, rtmp, &m->req_list, req_list) { |
958 | list_move(&req->req_list, &cancel_list); | 997 | list_move(&req->req_list, &cancel_list); |
959 | } | 998 | } |
999 | list_for_each_entry_safe(req, rtmp, &m->unsent_req_list, req_list) { | ||
1000 | list_move(&req->req_list, &cancel_list); | ||
1001 | } | ||
960 | spin_unlock(&m->lock); | 1002 | spin_unlock(&m->lock); |
961 | 1003 | ||
962 | list_for_each_entry_safe(req, rtmp, &cancel_list, req_list) { | 1004 | list_for_each_entry_safe(req, rtmp, &cancel_list, req_list) { |
@@ -965,11 +1007,9 @@ void v9fs_mux_cancel(struct v9fs_mux_data *m, int err) | |||
965 | req->err = err; | 1007 | req->err = err; |
966 | 1008 | ||
967 | if (req->cb) | 1009 | if (req->cb) |
968 | (*req->cb) (req->cba, req->tcall, req->rcall, req->err); | 1010 | (*req->cb) (req, req->cba); |
969 | else | 1011 | else |
970 | kfree(req->rcall); | 1012 | kfree(req->rcall); |
971 | |||
972 | kfree(req); | ||
973 | } | 1013 | } |
974 | 1014 | ||
975 | wake_up(&m->equeue); | 1015 | wake_up(&m->equeue); |
diff --git a/fs/9p/mux.h b/fs/9p/mux.h index e90bfd32ea42..fb10c50186a1 100644 --- a/fs/9p/mux.h +++ b/fs/9p/mux.h | |||
@@ -24,6 +24,7 @@ | |||
24 | */ | 24 | */ |
25 | 25 | ||
26 | struct v9fs_mux_data; | 26 | struct v9fs_mux_data; |
27 | struct v9fs_req; | ||
27 | 28 | ||
28 | /** | 29 | /** |
29 | * v9fs_mux_req_callback - callback function that is called when the | 30 | * v9fs_mux_req_callback - callback function that is called when the |
@@ -36,8 +37,7 @@ struct v9fs_mux_data; | |||
36 | * @rc - response call | 37 | * @rc - response call |
37 | * @err - error code (non-zero if error occured) | 38 | * @err - error code (non-zero if error occured) |
38 | */ | 39 | */ |
39 | typedef void (*v9fs_mux_req_callback)(void *a, struct v9fs_fcall *tc, | 40 | typedef void (*v9fs_mux_req_callback)(struct v9fs_req *req, void *a); |
40 | struct v9fs_fcall *rc, int err); | ||
41 | 41 | ||
42 | int v9fs_mux_global_init(void); | 42 | int v9fs_mux_global_init(void); |
43 | void v9fs_mux_global_exit(void); | 43 | void v9fs_mux_global_exit(void); |
diff --git a/fs/9p/vfs_file.c b/fs/9p/vfs_file.c index 083dcfcd158e..1a8e46084f0e 100644 --- a/fs/9p/vfs_file.c +++ b/fs/9p/vfs_file.c | |||
@@ -72,11 +72,17 @@ int v9fs_file_open(struct inode *inode, struct file *file) | |||
72 | return -ENOSPC; | 72 | return -ENOSPC; |
73 | } | 73 | } |
74 | 74 | ||
75 | err = v9fs_t_walk(v9ses, vfid->fid, fid, NULL, NULL); | 75 | err = v9fs_t_walk(v9ses, vfid->fid, fid, NULL, &fcall); |
76 | if (err < 0) { | 76 | if (err < 0) { |
77 | dprintk(DEBUG_ERROR, "rewalk didn't work\n"); | 77 | dprintk(DEBUG_ERROR, "rewalk didn't work\n"); |
78 | goto put_fid; | 78 | if (fcall && fcall->id == RWALK) |
79 | goto clunk_fid; | ||
80 | else { | ||
81 | v9fs_put_idpool(fid, &v9ses->fidpool); | ||
82 | goto free_fcall; | ||
83 | } | ||
79 | } | 84 | } |
85 | kfree(fcall); | ||
80 | 86 | ||
81 | /* TODO: do special things for O_EXCL, O_NOFOLLOW, O_SYNC */ | 87 | /* TODO: do special things for O_EXCL, O_NOFOLLOW, O_SYNC */ |
82 | /* translate open mode appropriately */ | 88 | /* translate open mode appropriately */ |
@@ -109,8 +115,7 @@ int v9fs_file_open(struct inode *inode, struct file *file) | |||
109 | clunk_fid: | 115 | clunk_fid: |
110 | v9fs_t_clunk(v9ses, fid); | 116 | v9fs_t_clunk(v9ses, fid); |
111 | 117 | ||
112 | put_fid: | 118 | free_fcall: |
113 | v9fs_put_idpool(fid, &v9ses->fidpool); | ||
114 | kfree(fcall); | 119 | kfree(fcall); |
115 | 120 | ||
116 | return err; | 121 | return err; |
diff --git a/fs/9p/vfs_inode.c b/fs/9p/vfs_inode.c index 133db366d306..2cb87ba4b1c1 100644 --- a/fs/9p/vfs_inode.c +++ b/fs/9p/vfs_inode.c | |||
@@ -270,7 +270,10 @@ v9fs_create(struct v9fs_session_info *v9ses, u32 pfid, char *name, u32 perm, | |||
270 | err = v9fs_t_walk(v9ses, pfid, fid, NULL, &fcall); | 270 | err = v9fs_t_walk(v9ses, pfid, fid, NULL, &fcall); |
271 | if (err < 0) { | 271 | if (err < 0) { |
272 | PRINT_FCALL_ERROR("clone error", fcall); | 272 | PRINT_FCALL_ERROR("clone error", fcall); |
273 | goto put_fid; | 273 | if (fcall && fcall->id == RWALK) |
274 | goto clunk_fid; | ||
275 | else | ||
276 | goto put_fid; | ||
274 | } | 277 | } |
275 | kfree(fcall); | 278 | kfree(fcall); |
276 | 279 | ||
@@ -322,6 +325,9 @@ v9fs_clone_walk(struct v9fs_session_info *v9ses, u32 fid, struct dentry *dentry) | |||
322 | &fcall); | 325 | &fcall); |
323 | 326 | ||
324 | if (err < 0) { | 327 | if (err < 0) { |
328 | if (fcall && fcall->id == RWALK) | ||
329 | goto clunk_fid; | ||
330 | |||
325 | PRINT_FCALL_ERROR("walk error", fcall); | 331 | PRINT_FCALL_ERROR("walk error", fcall); |
326 | v9fs_put_idpool(nfid, &v9ses->fidpool); | 332 | v9fs_put_idpool(nfid, &v9ses->fidpool); |
327 | goto error; | 333 | goto error; |
@@ -640,19 +646,26 @@ static struct dentry *v9fs_vfs_lookup(struct inode *dir, struct dentry *dentry, | |||
640 | } | 646 | } |
641 | 647 | ||
642 | result = v9fs_t_walk(v9ses, dirfidnum, newfid, | 648 | result = v9fs_t_walk(v9ses, dirfidnum, newfid, |
643 | (char *)dentry->d_name.name, NULL); | 649 | (char *)dentry->d_name.name, &fcall); |
650 | |||
644 | if (result < 0) { | 651 | if (result < 0) { |
645 | v9fs_put_idpool(newfid, &v9ses->fidpool); | 652 | if (fcall && fcall->id == RWALK) |
653 | v9fs_t_clunk(v9ses, newfid); | ||
654 | else | ||
655 | v9fs_put_idpool(newfid, &v9ses->fidpool); | ||
656 | |||
646 | if (result == -ENOENT) { | 657 | if (result == -ENOENT) { |
647 | d_add(dentry, NULL); | 658 | d_add(dentry, NULL); |
648 | dprintk(DEBUG_VFS, | 659 | dprintk(DEBUG_VFS, |
649 | "Return negative dentry %p count %d\n", | 660 | "Return negative dentry %p count %d\n", |
650 | dentry, atomic_read(&dentry->d_count)); | 661 | dentry, atomic_read(&dentry->d_count)); |
662 | kfree(fcall); | ||
651 | return NULL; | 663 | return NULL; |
652 | } | 664 | } |
653 | dprintk(DEBUG_ERROR, "walk error:%d\n", result); | 665 | dprintk(DEBUG_ERROR, "walk error:%d\n", result); |
654 | goto FreeFcall; | 666 | goto FreeFcall; |
655 | } | 667 | } |
668 | kfree(fcall); | ||
656 | 669 | ||
657 | result = v9fs_t_stat(v9ses, newfid, &fcall); | 670 | result = v9fs_t_stat(v9ses, newfid, &fcall); |
658 | if (result < 0) { | 671 | if (result < 0) { |
diff --git a/fs/Makefile b/fs/Makefile index 83bf478e786b..078d3d1191a5 100644 --- a/fs/Makefile +++ b/fs/Makefile | |||
@@ -45,6 +45,7 @@ obj-$(CONFIG_DNOTIFY) += dnotify.o | |||
45 | obj-$(CONFIG_PROC_FS) += proc/ | 45 | obj-$(CONFIG_PROC_FS) += proc/ |
46 | obj-y += partitions/ | 46 | obj-y += partitions/ |
47 | obj-$(CONFIG_SYSFS) += sysfs/ | 47 | obj-$(CONFIG_SYSFS) += sysfs/ |
48 | obj-$(CONFIG_CONFIGFS_FS) += configfs/ | ||
48 | obj-y += devpts/ | 49 | obj-y += devpts/ |
49 | 50 | ||
50 | obj-$(CONFIG_PROFILING) += dcookies.o | 51 | obj-$(CONFIG_PROFILING) += dcookies.o |
@@ -100,5 +101,4 @@ obj-$(CONFIG_BEFS_FS) += befs/ | |||
100 | obj-$(CONFIG_HOSTFS) += hostfs/ | 101 | obj-$(CONFIG_HOSTFS) += hostfs/ |
101 | obj-$(CONFIG_HPPFS) += hppfs/ | 102 | obj-$(CONFIG_HPPFS) += hppfs/ |
102 | obj-$(CONFIG_DEBUG_FS) += debugfs/ | 103 | obj-$(CONFIG_DEBUG_FS) += debugfs/ |
103 | obj-$(CONFIG_CONFIGFS_FS) += configfs/ | ||
104 | obj-$(CONFIG_OCFS2_FS) += ocfs2/ | 104 | obj-$(CONFIG_OCFS2_FS) += ocfs2/ |
diff --git a/fs/autofs4/autofs_i.h b/fs/autofs4/autofs_i.h index 57c4903614e5..d6603d02304c 100644 --- a/fs/autofs4/autofs_i.h +++ b/fs/autofs4/autofs_i.h | |||
@@ -74,8 +74,8 @@ struct autofs_wait_queue { | |||
74 | struct autofs_wait_queue *next; | 74 | struct autofs_wait_queue *next; |
75 | autofs_wqt_t wait_queue_token; | 75 | autofs_wqt_t wait_queue_token; |
76 | /* We use the following to see what we are waiting for */ | 76 | /* We use the following to see what we are waiting for */ |
77 | int hash; | 77 | unsigned int hash; |
78 | int len; | 78 | unsigned int len; |
79 | char *name; | 79 | char *name; |
80 | u32 dev; | 80 | u32 dev; |
81 | u64 ino; | 81 | u64 ino; |
@@ -85,7 +85,6 @@ struct autofs_wait_queue { | |||
85 | pid_t tgid; | 85 | pid_t tgid; |
86 | /* This is for status reporting upon return */ | 86 | /* This is for status reporting upon return */ |
87 | int status; | 87 | int status; |
88 | atomic_t notify; | ||
89 | atomic_t wait_ctr; | 88 | atomic_t wait_ctr; |
90 | }; | 89 | }; |
91 | 90 | ||
diff --git a/fs/autofs4/root.c b/fs/autofs4/root.c index 84e030c8ddd0..5100f984783f 100644 --- a/fs/autofs4/root.c +++ b/fs/autofs4/root.c | |||
@@ -327,6 +327,7 @@ static int try_to_fill_dentry(struct dentry *dentry, int flags) | |||
327 | static void *autofs4_follow_link(struct dentry *dentry, struct nameidata *nd) | 327 | static void *autofs4_follow_link(struct dentry *dentry, struct nameidata *nd) |
328 | { | 328 | { |
329 | struct autofs_sb_info *sbi = autofs4_sbi(dentry->d_sb); | 329 | struct autofs_sb_info *sbi = autofs4_sbi(dentry->d_sb); |
330 | struct autofs_info *ino = autofs4_dentry_ino(dentry); | ||
330 | int oz_mode = autofs4_oz_mode(sbi); | 331 | int oz_mode = autofs4_oz_mode(sbi); |
331 | unsigned int lookup_type; | 332 | unsigned int lookup_type; |
332 | int status; | 333 | int status; |
@@ -340,13 +341,8 @@ static void *autofs4_follow_link(struct dentry *dentry, struct nameidata *nd) | |||
340 | if (oz_mode || !lookup_type) | 341 | if (oz_mode || !lookup_type) |
341 | goto done; | 342 | goto done; |
342 | 343 | ||
343 | /* | 344 | /* If an expire request is pending wait for it. */ |
344 | * If a request is pending wait for it. | 345 | if (ino && (ino->flags & AUTOFS_INF_EXPIRING)) { |
345 | * If it's a mount then it won't be expired till at least | ||
346 | * a liitle later and if it's an expire then we might need | ||
347 | * to mount it again. | ||
348 | */ | ||
349 | if (autofs4_ispending(dentry)) { | ||
350 | DPRINTK("waiting for active request %p name=%.*s", | 346 | DPRINTK("waiting for active request %p name=%.*s", |
351 | dentry, dentry->d_name.len, dentry->d_name.name); | 347 | dentry, dentry->d_name.len, dentry->d_name.name); |
352 | 348 | ||
diff --git a/fs/autofs4/waitq.c b/fs/autofs4/waitq.c index 142ab6aa2aa1..ce103e7b0bc3 100644 --- a/fs/autofs4/waitq.c +++ b/fs/autofs4/waitq.c | |||
@@ -189,14 +189,30 @@ static int autofs4_getpath(struct autofs_sb_info *sbi, | |||
189 | return len; | 189 | return len; |
190 | } | 190 | } |
191 | 191 | ||
192 | static struct autofs_wait_queue * | ||
193 | autofs4_find_wait(struct autofs_sb_info *sbi, | ||
194 | char *name, unsigned int hash, unsigned int len) | ||
195 | { | ||
196 | struct autofs_wait_queue *wq; | ||
197 | |||
198 | for (wq = sbi->queues; wq; wq = wq->next) { | ||
199 | if (wq->hash == hash && | ||
200 | wq->len == len && | ||
201 | wq->name && !memcmp(wq->name, name, len)) | ||
202 | break; | ||
203 | } | ||
204 | return wq; | ||
205 | } | ||
206 | |||
192 | int autofs4_wait(struct autofs_sb_info *sbi, struct dentry *dentry, | 207 | int autofs4_wait(struct autofs_sb_info *sbi, struct dentry *dentry, |
193 | enum autofs_notify notify) | 208 | enum autofs_notify notify) |
194 | { | 209 | { |
210 | struct autofs_info *ino; | ||
195 | struct autofs_wait_queue *wq; | 211 | struct autofs_wait_queue *wq; |
196 | char *name; | 212 | char *name; |
197 | unsigned int len = 0; | 213 | unsigned int len = 0; |
198 | unsigned int hash = 0; | 214 | unsigned int hash = 0; |
199 | int status; | 215 | int status, type; |
200 | 216 | ||
201 | /* In catatonic mode, we don't wait for nobody */ | 217 | /* In catatonic mode, we don't wait for nobody */ |
202 | if (sbi->catatonic) | 218 | if (sbi->catatonic) |
@@ -223,21 +239,41 @@ int autofs4_wait(struct autofs_sb_info *sbi, struct dentry *dentry, | |||
223 | return -EINTR; | 239 | return -EINTR; |
224 | } | 240 | } |
225 | 241 | ||
226 | for (wq = sbi->queues ; wq ; wq = wq->next) { | 242 | wq = autofs4_find_wait(sbi, name, hash, len); |
227 | if (wq->hash == dentry->d_name.hash && | 243 | ino = autofs4_dentry_ino(dentry); |
228 | wq->len == len && | 244 | if (!wq && ino && notify == NFY_NONE) { |
229 | wq->name && !memcmp(wq->name, name, len)) | 245 | /* |
230 | break; | 246 | * Either we've betean the pending expire to post it's |
231 | } | 247 | * wait or it finished while we waited on the mutex. |
248 | * So we need to wait till either, the wait appears | ||
249 | * or the expire finishes. | ||
250 | */ | ||
251 | |||
252 | while (ino->flags & AUTOFS_INF_EXPIRING) { | ||
253 | mutex_unlock(&sbi->wq_mutex); | ||
254 | schedule_timeout_interruptible(HZ/10); | ||
255 | if (mutex_lock_interruptible(&sbi->wq_mutex)) { | ||
256 | kfree(name); | ||
257 | return -EINTR; | ||
258 | } | ||
259 | wq = autofs4_find_wait(sbi, name, hash, len); | ||
260 | if (wq) | ||
261 | break; | ||
262 | } | ||
232 | 263 | ||
233 | if (!wq) { | 264 | /* |
234 | /* Can't wait for an expire if there's no mount */ | 265 | * Not ideal but the status has already gone. Of the two |
235 | if (notify == NFY_NONE && !d_mountpoint(dentry)) { | 266 | * cases where we wait on NFY_NONE neither depend on the |
267 | * return status of the wait. | ||
268 | */ | ||
269 | if (!wq) { | ||
236 | kfree(name); | 270 | kfree(name); |
237 | mutex_unlock(&sbi->wq_mutex); | 271 | mutex_unlock(&sbi->wq_mutex); |
238 | return -ENOENT; | 272 | return 0; |
239 | } | 273 | } |
274 | } | ||
240 | 275 | ||
276 | if (!wq) { | ||
241 | /* Create a new wait queue */ | 277 | /* Create a new wait queue */ |
242 | wq = kmalloc(sizeof(struct autofs_wait_queue),GFP_KERNEL); | 278 | wq = kmalloc(sizeof(struct autofs_wait_queue),GFP_KERNEL); |
243 | if (!wq) { | 279 | if (!wq) { |
@@ -263,20 +299,7 @@ int autofs4_wait(struct autofs_sb_info *sbi, struct dentry *dentry, | |||
263 | wq->tgid = current->tgid; | 299 | wq->tgid = current->tgid; |
264 | wq->status = -EINTR; /* Status return if interrupted */ | 300 | wq->status = -EINTR; /* Status return if interrupted */ |
265 | atomic_set(&wq->wait_ctr, 2); | 301 | atomic_set(&wq->wait_ctr, 2); |
266 | atomic_set(&wq->notify, 1); | ||
267 | mutex_unlock(&sbi->wq_mutex); | ||
268 | } else { | ||
269 | atomic_inc(&wq->wait_ctr); | ||
270 | mutex_unlock(&sbi->wq_mutex); | 302 | mutex_unlock(&sbi->wq_mutex); |
271 | kfree(name); | ||
272 | DPRINTK("existing wait id = 0x%08lx, name = %.*s, nfy=%d", | ||
273 | (unsigned long) wq->wait_queue_token, wq->len, wq->name, notify); | ||
274 | } | ||
275 | |||
276 | if (notify != NFY_NONE && atomic_read(&wq->notify)) { | ||
277 | int type; | ||
278 | |||
279 | atomic_dec(&wq->notify); | ||
280 | 303 | ||
281 | if (sbi->version < 5) { | 304 | if (sbi->version < 5) { |
282 | if (notify == NFY_MOUNT) | 305 | if (notify == NFY_MOUNT) |
@@ -299,6 +322,12 @@ int autofs4_wait(struct autofs_sb_info *sbi, struct dentry *dentry, | |||
299 | 322 | ||
300 | /* autofs4_notify_daemon() may block */ | 323 | /* autofs4_notify_daemon() may block */ |
301 | autofs4_notify_daemon(sbi, wq, type); | 324 | autofs4_notify_daemon(sbi, wq, type); |
325 | } else { | ||
326 | atomic_inc(&wq->wait_ctr); | ||
327 | mutex_unlock(&sbi->wq_mutex); | ||
328 | kfree(name); | ||
329 | DPRINTK("existing wait id = 0x%08lx, name = %.*s, nfy=%d", | ||
330 | (unsigned long) wq->wait_queue_token, wq->len, wq->name, notify); | ||
302 | } | 331 | } |
303 | 332 | ||
304 | /* wq->name is NULL if and only if the lock is already released */ | 333 | /* wq->name is NULL if and only if the lock is already released */ |
diff --git a/fs/binfmt_flat.c b/fs/binfmt_flat.c index 69f44dcdb0b4..b1c902e319c1 100644 --- a/fs/binfmt_flat.c +++ b/fs/binfmt_flat.c | |||
@@ -428,7 +428,6 @@ static int load_flat_file(struct linux_binprm * bprm, | |||
428 | loff_t fpos; | 428 | loff_t fpos; |
429 | unsigned long start_code, end_code; | 429 | unsigned long start_code, end_code; |
430 | int ret; | 430 | int ret; |
431 | int exec_fileno; | ||
432 | 431 | ||
433 | hdr = ((struct flat_hdr *) bprm->buf); /* exec-header */ | 432 | hdr = ((struct flat_hdr *) bprm->buf); /* exec-header */ |
434 | inode = bprm->file->f_dentry->d_inode; | 433 | inode = bprm->file->f_dentry->d_inode; |
@@ -502,21 +501,12 @@ static int load_flat_file(struct linux_binprm * bprm, | |||
502 | goto err; | 501 | goto err; |
503 | } | 502 | } |
504 | 503 | ||
505 | /* check file descriptor */ | ||
506 | exec_fileno = get_unused_fd(); | ||
507 | if (exec_fileno < 0) { | ||
508 | ret = -EMFILE; | ||
509 | goto err; | ||
510 | } | ||
511 | get_file(bprm->file); | ||
512 | fd_install(exec_fileno, bprm->file); | ||
513 | |||
514 | /* Flush all traces of the currently running executable */ | 504 | /* Flush all traces of the currently running executable */ |
515 | if (id == 0) { | 505 | if (id == 0) { |
516 | result = flush_old_exec(bprm); | 506 | result = flush_old_exec(bprm); |
517 | if (result) { | 507 | if (result) { |
518 | ret = result; | 508 | ret = result; |
519 | goto err_close; | 509 | goto err; |
520 | } | 510 | } |
521 | 511 | ||
522 | /* OK, This is the point of no return */ | 512 | /* OK, This is the point of no return */ |
@@ -548,7 +538,7 @@ static int load_flat_file(struct linux_binprm * bprm, | |||
548 | textpos = (unsigned long) -ENOMEM; | 538 | textpos = (unsigned long) -ENOMEM; |
549 | printk("Unable to mmap process text, errno %d\n", (int)-textpos); | 539 | printk("Unable to mmap process text, errno %d\n", (int)-textpos); |
550 | ret = textpos; | 540 | ret = textpos; |
551 | goto err_close; | 541 | goto err; |
552 | } | 542 | } |
553 | 543 | ||
554 | down_write(¤t->mm->mmap_sem); | 544 | down_write(¤t->mm->mmap_sem); |
@@ -564,7 +554,7 @@ static int load_flat_file(struct linux_binprm * bprm, | |||
564 | (int)-datapos); | 554 | (int)-datapos); |
565 | do_munmap(current->mm, textpos, text_len); | 555 | do_munmap(current->mm, textpos, text_len); |
566 | ret = realdatastart; | 556 | ret = realdatastart; |
567 | goto err_close; | 557 | goto err; |
568 | } | 558 | } |
569 | datapos = realdatastart + MAX_SHARED_LIBS * sizeof(unsigned long); | 559 | datapos = realdatastart + MAX_SHARED_LIBS * sizeof(unsigned long); |
570 | 560 | ||
@@ -587,7 +577,7 @@ static int load_flat_file(struct linux_binprm * bprm, | |||
587 | do_munmap(current->mm, textpos, text_len); | 577 | do_munmap(current->mm, textpos, text_len); |
588 | do_munmap(current->mm, realdatastart, data_len + extra); | 578 | do_munmap(current->mm, realdatastart, data_len + extra); |
589 | ret = result; | 579 | ret = result; |
590 | goto err_close; | 580 | goto err; |
591 | } | 581 | } |
592 | 582 | ||
593 | reloc = (unsigned long *) (datapos+(ntohl(hdr->reloc_start)-text_len)); | 583 | reloc = (unsigned long *) (datapos+(ntohl(hdr->reloc_start)-text_len)); |
@@ -606,7 +596,7 @@ static int load_flat_file(struct linux_binprm * bprm, | |||
606 | printk("Unable to allocate RAM for process text/data, errno %d\n", | 596 | printk("Unable to allocate RAM for process text/data, errno %d\n", |
607 | (int)-textpos); | 597 | (int)-textpos); |
608 | ret = textpos; | 598 | ret = textpos; |
609 | goto err_close; | 599 | goto err; |
610 | } | 600 | } |
611 | 601 | ||
612 | realdatastart = textpos + ntohl(hdr->data_start); | 602 | realdatastart = textpos + ntohl(hdr->data_start); |
@@ -652,7 +642,7 @@ static int load_flat_file(struct linux_binprm * bprm, | |||
652 | do_munmap(current->mm, textpos, text_len + data_len + extra + | 642 | do_munmap(current->mm, textpos, text_len + data_len + extra + |
653 | MAX_SHARED_LIBS * sizeof(unsigned long)); | 643 | MAX_SHARED_LIBS * sizeof(unsigned long)); |
654 | ret = result; | 644 | ret = result; |
655 | goto err_close; | 645 | goto err; |
656 | } | 646 | } |
657 | } | 647 | } |
658 | 648 | ||
@@ -717,7 +707,7 @@ static int load_flat_file(struct linux_binprm * bprm, | |||
717 | addr = calc_reloc(*rp, libinfo, id, 0); | 707 | addr = calc_reloc(*rp, libinfo, id, 0); |
718 | if (addr == RELOC_FAILED) { | 708 | if (addr == RELOC_FAILED) { |
719 | ret = -ENOEXEC; | 709 | ret = -ENOEXEC; |
720 | goto err_close; | 710 | goto err; |
721 | } | 711 | } |
722 | *rp = addr; | 712 | *rp = addr; |
723 | } | 713 | } |
@@ -747,7 +737,7 @@ static int load_flat_file(struct linux_binprm * bprm, | |||
747 | rp = (unsigned long *) calc_reloc(addr, libinfo, id, 1); | 737 | rp = (unsigned long *) calc_reloc(addr, libinfo, id, 1); |
748 | if (rp == (unsigned long *)RELOC_FAILED) { | 738 | if (rp == (unsigned long *)RELOC_FAILED) { |
749 | ret = -ENOEXEC; | 739 | ret = -ENOEXEC; |
750 | goto err_close; | 740 | goto err; |
751 | } | 741 | } |
752 | 742 | ||
753 | /* Get the pointer's value. */ | 743 | /* Get the pointer's value. */ |
@@ -762,7 +752,7 @@ static int load_flat_file(struct linux_binprm * bprm, | |||
762 | addr = calc_reloc(addr, libinfo, id, 0); | 752 | addr = calc_reloc(addr, libinfo, id, 0); |
763 | if (addr == RELOC_FAILED) { | 753 | if (addr == RELOC_FAILED) { |
764 | ret = -ENOEXEC; | 754 | ret = -ENOEXEC; |
765 | goto err_close; | 755 | goto err; |
766 | } | 756 | } |
767 | 757 | ||
768 | /* Write back the relocated pointer. */ | 758 | /* Write back the relocated pointer. */ |
@@ -783,8 +773,6 @@ static int load_flat_file(struct linux_binprm * bprm, | |||
783 | stack_len); | 773 | stack_len); |
784 | 774 | ||
785 | return 0; | 775 | return 0; |
786 | err_close: | ||
787 | sys_close(exec_fileno); | ||
788 | err: | 776 | err: |
789 | return ret; | 777 | return ret; |
790 | } | 778 | } |
diff --git a/fs/compat.c b/fs/compat.c index 970888aad843..b1f64786a613 100644 --- a/fs/compat.c +++ b/fs/compat.c | |||
@@ -1913,7 +1913,7 @@ asmlinkage long compat_sys_ppoll(struct pollfd __user *ufds, | |||
1913 | } | 1913 | } |
1914 | 1914 | ||
1915 | if (sigmask) { | 1915 | if (sigmask) { |
1916 | if (sigsetsize |= sizeof(compat_sigset_t)) | 1916 | if (sigsetsize != sizeof(compat_sigset_t)) |
1917 | return -EINVAL; | 1917 | return -EINVAL; |
1918 | if (copy_from_user(&ss32, sigmask, sizeof(ss32))) | 1918 | if (copy_from_user(&ss32, sigmask, sizeof(ss32))) |
1919 | return -EFAULT; | 1919 | return -EFAULT; |
@@ -2030,109 +2030,115 @@ union compat_nfsctl_res { | |||
2030 | struct knfsd_fh cr32_getfs; | 2030 | struct knfsd_fh cr32_getfs; |
2031 | }; | 2031 | }; |
2032 | 2032 | ||
2033 | static int compat_nfs_svc_trans(struct nfsctl_arg *karg, struct compat_nfsctl_arg __user *arg) | 2033 | static int compat_nfs_svc_trans(struct nfsctl_arg *karg, |
2034 | struct compat_nfsctl_arg __user *arg) | ||
2034 | { | 2035 | { |
2035 | int err; | 2036 | if (!access_ok(VERIFY_READ, &arg->ca32_svc, sizeof(arg->ca32_svc)) || |
2036 | 2037 | get_user(karg->ca_version, &arg->ca32_version) || | |
2037 | err = access_ok(VERIFY_READ, &arg->ca32_svc, sizeof(arg->ca32_svc)); | 2038 | __get_user(karg->ca_svc.svc_port, &arg->ca32_svc.svc32_port) || |
2038 | err |= get_user(karg->ca_version, &arg->ca32_version); | 2039 | __get_user(karg->ca_svc.svc_nthreads, |
2039 | err |= __get_user(karg->ca_svc.svc_port, &arg->ca32_svc.svc32_port); | 2040 | &arg->ca32_svc.svc32_nthreads)) |
2040 | err |= __get_user(karg->ca_svc.svc_nthreads, &arg->ca32_svc.svc32_nthreads); | 2041 | return -EFAULT; |
2041 | return (err) ? -EFAULT : 0; | 2042 | return 0; |
2042 | } | 2043 | } |
2043 | 2044 | ||
2044 | static int compat_nfs_clnt_trans(struct nfsctl_arg *karg, struct compat_nfsctl_arg __user *arg) | 2045 | static int compat_nfs_clnt_trans(struct nfsctl_arg *karg, |
2045 | { | 2046 | struct compat_nfsctl_arg __user *arg) |
2046 | int err; | 2047 | { |
2047 | 2048 | if (!access_ok(VERIFY_READ, &arg->ca32_client, | |
2048 | err = access_ok(VERIFY_READ, &arg->ca32_client, sizeof(arg->ca32_client)); | 2049 | sizeof(arg->ca32_client)) || |
2049 | err |= get_user(karg->ca_version, &arg->ca32_version); | 2050 | get_user(karg->ca_version, &arg->ca32_version) || |
2050 | err |= __copy_from_user(&karg->ca_client.cl_ident[0], | 2051 | __copy_from_user(&karg->ca_client.cl_ident[0], |
2051 | &arg->ca32_client.cl32_ident[0], | 2052 | &arg->ca32_client.cl32_ident[0], |
2052 | NFSCLNT_IDMAX); | 2053 | NFSCLNT_IDMAX) || |
2053 | err |= __get_user(karg->ca_client.cl_naddr, &arg->ca32_client.cl32_naddr); | 2054 | __get_user(karg->ca_client.cl_naddr, |
2054 | err |= __copy_from_user(&karg->ca_client.cl_addrlist[0], | 2055 | &arg->ca32_client.cl32_naddr) || |
2055 | &arg->ca32_client.cl32_addrlist[0], | 2056 | __copy_from_user(&karg->ca_client.cl_addrlist[0], |
2056 | (sizeof(struct in_addr) * NFSCLNT_ADDRMAX)); | 2057 | &arg->ca32_client.cl32_addrlist[0], |
2057 | err |= __get_user(karg->ca_client.cl_fhkeytype, | 2058 | (sizeof(struct in_addr) * NFSCLNT_ADDRMAX)) || |
2058 | &arg->ca32_client.cl32_fhkeytype); | 2059 | __get_user(karg->ca_client.cl_fhkeytype, |
2059 | err |= __get_user(karg->ca_client.cl_fhkeylen, | 2060 | &arg->ca32_client.cl32_fhkeytype) || |
2060 | &arg->ca32_client.cl32_fhkeylen); | 2061 | __get_user(karg->ca_client.cl_fhkeylen, |
2061 | err |= __copy_from_user(&karg->ca_client.cl_fhkey[0], | 2062 | &arg->ca32_client.cl32_fhkeylen) || |
2062 | &arg->ca32_client.cl32_fhkey[0], | 2063 | __copy_from_user(&karg->ca_client.cl_fhkey[0], |
2063 | NFSCLNT_KEYMAX); | 2064 | &arg->ca32_client.cl32_fhkey[0], |
2065 | NFSCLNT_KEYMAX)) | ||
2066 | return -EFAULT; | ||
2064 | 2067 | ||
2065 | return (err) ? -EFAULT : 0; | 2068 | return 0; |
2066 | } | 2069 | } |
2067 | 2070 | ||
2068 | static int compat_nfs_exp_trans(struct nfsctl_arg *karg, struct compat_nfsctl_arg __user *arg) | 2071 | static int compat_nfs_exp_trans(struct nfsctl_arg *karg, |
2069 | { | 2072 | struct compat_nfsctl_arg __user *arg) |
2070 | int err; | 2073 | { |
2071 | 2074 | if (!access_ok(VERIFY_READ, &arg->ca32_export, | |
2072 | err = access_ok(VERIFY_READ, &arg->ca32_export, sizeof(arg->ca32_export)); | 2075 | sizeof(arg->ca32_export)) || |
2073 | err |= get_user(karg->ca_version, &arg->ca32_version); | 2076 | get_user(karg->ca_version, &arg->ca32_version) || |
2074 | err |= __copy_from_user(&karg->ca_export.ex_client[0], | 2077 | __copy_from_user(&karg->ca_export.ex_client[0], |
2075 | &arg->ca32_export.ex32_client[0], | 2078 | &arg->ca32_export.ex32_client[0], |
2076 | NFSCLNT_IDMAX); | 2079 | NFSCLNT_IDMAX) || |
2077 | err |= __copy_from_user(&karg->ca_export.ex_path[0], | 2080 | __copy_from_user(&karg->ca_export.ex_path[0], |
2078 | &arg->ca32_export.ex32_path[0], | 2081 | &arg->ca32_export.ex32_path[0], |
2079 | NFS_MAXPATHLEN); | 2082 | NFS_MAXPATHLEN) || |
2080 | err |= __get_user(karg->ca_export.ex_dev, | 2083 | __get_user(karg->ca_export.ex_dev, |
2081 | &arg->ca32_export.ex32_dev); | 2084 | &arg->ca32_export.ex32_dev) || |
2082 | err |= __get_user(karg->ca_export.ex_ino, | 2085 | __get_user(karg->ca_export.ex_ino, |
2083 | &arg->ca32_export.ex32_ino); | 2086 | &arg->ca32_export.ex32_ino) || |
2084 | err |= __get_user(karg->ca_export.ex_flags, | 2087 | __get_user(karg->ca_export.ex_flags, |
2085 | &arg->ca32_export.ex32_flags); | 2088 | &arg->ca32_export.ex32_flags) || |
2086 | err |= __get_user(karg->ca_export.ex_anon_uid, | 2089 | __get_user(karg->ca_export.ex_anon_uid, |
2087 | &arg->ca32_export.ex32_anon_uid); | 2090 | &arg->ca32_export.ex32_anon_uid) || |
2088 | err |= __get_user(karg->ca_export.ex_anon_gid, | 2091 | __get_user(karg->ca_export.ex_anon_gid, |
2089 | &arg->ca32_export.ex32_anon_gid); | 2092 | &arg->ca32_export.ex32_anon_gid)) |
2093 | return -EFAULT; | ||
2090 | SET_UID(karg->ca_export.ex_anon_uid, karg->ca_export.ex_anon_uid); | 2094 | SET_UID(karg->ca_export.ex_anon_uid, karg->ca_export.ex_anon_uid); |
2091 | SET_GID(karg->ca_export.ex_anon_gid, karg->ca_export.ex_anon_gid); | 2095 | SET_GID(karg->ca_export.ex_anon_gid, karg->ca_export.ex_anon_gid); |
2092 | 2096 | ||
2093 | return (err) ? -EFAULT : 0; | 2097 | return 0; |
2094 | } | 2098 | } |
2095 | 2099 | ||
2096 | static int compat_nfs_getfd_trans(struct nfsctl_arg *karg, struct compat_nfsctl_arg __user *arg) | 2100 | static int compat_nfs_getfd_trans(struct nfsctl_arg *karg, |
2097 | { | 2101 | struct compat_nfsctl_arg __user *arg) |
2098 | int err; | 2102 | { |
2099 | 2103 | if (!access_ok(VERIFY_READ, &arg->ca32_getfd, | |
2100 | err = access_ok(VERIFY_READ, &arg->ca32_getfd, sizeof(arg->ca32_getfd)); | 2104 | sizeof(arg->ca32_getfd)) || |
2101 | err |= get_user(karg->ca_version, &arg->ca32_version); | 2105 | get_user(karg->ca_version, &arg->ca32_version) || |
2102 | err |= __copy_from_user(&karg->ca_getfd.gd_addr, | 2106 | __copy_from_user(&karg->ca_getfd.gd_addr, |
2103 | &arg->ca32_getfd.gd32_addr, | 2107 | &arg->ca32_getfd.gd32_addr, |
2104 | (sizeof(struct sockaddr))); | 2108 | (sizeof(struct sockaddr))) || |
2105 | err |= __copy_from_user(&karg->ca_getfd.gd_path, | 2109 | __copy_from_user(&karg->ca_getfd.gd_path, |
2106 | &arg->ca32_getfd.gd32_path, | 2110 | &arg->ca32_getfd.gd32_path, |
2107 | (NFS_MAXPATHLEN+1)); | 2111 | (NFS_MAXPATHLEN+1)) || |
2108 | err |= __get_user(karg->ca_getfd.gd_version, | 2112 | __get_user(karg->ca_getfd.gd_version, |
2109 | &arg->ca32_getfd.gd32_version); | 2113 | &arg->ca32_getfd.gd32_version)) |
2114 | return -EFAULT; | ||
2110 | 2115 | ||
2111 | return (err) ? -EFAULT : 0; | 2116 | return 0; |
2112 | } | 2117 | } |
2113 | 2118 | ||
2114 | static int compat_nfs_getfs_trans(struct nfsctl_arg *karg, struct compat_nfsctl_arg __user *arg) | 2119 | static int compat_nfs_getfs_trans(struct nfsctl_arg *karg, |
2120 | struct compat_nfsctl_arg __user *arg) | ||
2115 | { | 2121 | { |
2116 | int err; | 2122 | if (!access_ok(VERIFY_READ,&arg->ca32_getfs,sizeof(arg->ca32_getfs)) || |
2117 | 2123 | get_user(karg->ca_version, &arg->ca32_version) || | |
2118 | err = access_ok(VERIFY_READ, &arg->ca32_getfs, sizeof(arg->ca32_getfs)); | 2124 | __copy_from_user(&karg->ca_getfs.gd_addr, |
2119 | err |= get_user(karg->ca_version, &arg->ca32_version); | 2125 | &arg->ca32_getfs.gd32_addr, |
2120 | err |= __copy_from_user(&karg->ca_getfs.gd_addr, | 2126 | (sizeof(struct sockaddr))) || |
2121 | &arg->ca32_getfs.gd32_addr, | 2127 | __copy_from_user(&karg->ca_getfs.gd_path, |
2122 | (sizeof(struct sockaddr))); | 2128 | &arg->ca32_getfs.gd32_path, |
2123 | err |= __copy_from_user(&karg->ca_getfs.gd_path, | 2129 | (NFS_MAXPATHLEN+1)) || |
2124 | &arg->ca32_getfs.gd32_path, | 2130 | __get_user(karg->ca_getfs.gd_maxlen, |
2125 | (NFS_MAXPATHLEN+1)); | 2131 | &arg->ca32_getfs.gd32_maxlen)) |
2126 | err |= __get_user(karg->ca_getfs.gd_maxlen, | 2132 | return -EFAULT; |
2127 | &arg->ca32_getfs.gd32_maxlen); | ||
2128 | 2133 | ||
2129 | return (err) ? -EFAULT : 0; | 2134 | return 0; |
2130 | } | 2135 | } |
2131 | 2136 | ||
2132 | /* This really doesn't need translations, we are only passing | 2137 | /* This really doesn't need translations, we are only passing |
2133 | * back a union which contains opaque nfs file handle data. | 2138 | * back a union which contains opaque nfs file handle data. |
2134 | */ | 2139 | */ |
2135 | static int compat_nfs_getfh_res_trans(union nfsctl_res *kres, union compat_nfsctl_res __user *res) | 2140 | static int compat_nfs_getfh_res_trans(union nfsctl_res *kres, |
2141 | union compat_nfsctl_res __user *res) | ||
2136 | { | 2142 | { |
2137 | int err; | 2143 | int err; |
2138 | 2144 | ||
@@ -2141,8 +2147,9 @@ static int compat_nfs_getfh_res_trans(union nfsctl_res *kres, union compat_nfsct | |||
2141 | return (err) ? -EFAULT : 0; | 2147 | return (err) ? -EFAULT : 0; |
2142 | } | 2148 | } |
2143 | 2149 | ||
2144 | asmlinkage long compat_sys_nfsservctl(int cmd, struct compat_nfsctl_arg __user *arg, | 2150 | asmlinkage long compat_sys_nfsservctl(int cmd, |
2145 | union compat_nfsctl_res __user *res) | 2151 | struct compat_nfsctl_arg __user *arg, |
2152 | union compat_nfsctl_res __user *res) | ||
2146 | { | 2153 | { |
2147 | struct nfsctl_arg *karg; | 2154 | struct nfsctl_arg *karg; |
2148 | union nfsctl_res *kres; | 2155 | union nfsctl_res *kres; |
diff --git a/fs/configfs/dir.c b/fs/configfs/dir.c index 5638c8f9362f..5f952187fc53 100644 --- a/fs/configfs/dir.c +++ b/fs/configfs/dir.c | |||
@@ -505,13 +505,15 @@ static int populate_groups(struct config_group *group) | |||
505 | int i; | 505 | int i; |
506 | 506 | ||
507 | if (group->default_groups) { | 507 | if (group->default_groups) { |
508 | /* FYI, we're faking mkdir here | 508 | /* |
509 | * FYI, we're faking mkdir here | ||
509 | * I'm not sure we need this semaphore, as we're called | 510 | * I'm not sure we need this semaphore, as we're called |
510 | * from our parent's mkdir. That holds our parent's | 511 | * from our parent's mkdir. That holds our parent's |
511 | * i_mutex, so afaik lookup cannot continue through our | 512 | * i_mutex, so afaik lookup cannot continue through our |
512 | * parent to find us, let alone mess with our tree. | 513 | * parent to find us, let alone mess with our tree. |
513 | * That said, taking our i_mutex is closer to mkdir | 514 | * That said, taking our i_mutex is closer to mkdir |
514 | * emulation, and shouldn't hurt. */ | 515 | * emulation, and shouldn't hurt. |
516 | */ | ||
515 | mutex_lock(&dentry->d_inode->i_mutex); | 517 | mutex_lock(&dentry->d_inode->i_mutex); |
516 | 518 | ||
517 | for (i = 0; group->default_groups[i]; i++) { | 519 | for (i = 0; group->default_groups[i]; i++) { |
@@ -546,20 +548,34 @@ static void unlink_obj(struct config_item *item) | |||
546 | 548 | ||
547 | item->ci_group = NULL; | 549 | item->ci_group = NULL; |
548 | item->ci_parent = NULL; | 550 | item->ci_parent = NULL; |
551 | |||
552 | /* Drop the reference for ci_entry */ | ||
549 | config_item_put(item); | 553 | config_item_put(item); |
550 | 554 | ||
555 | /* Drop the reference for ci_parent */ | ||
551 | config_group_put(group); | 556 | config_group_put(group); |
552 | } | 557 | } |
553 | } | 558 | } |
554 | 559 | ||
555 | static void link_obj(struct config_item *parent_item, struct config_item *item) | 560 | static void link_obj(struct config_item *parent_item, struct config_item *item) |
556 | { | 561 | { |
557 | /* Parent seems redundant with group, but it makes certain | 562 | /* |
558 | * traversals much nicer. */ | 563 | * Parent seems redundant with group, but it makes certain |
564 | * traversals much nicer. | ||
565 | */ | ||
559 | item->ci_parent = parent_item; | 566 | item->ci_parent = parent_item; |
567 | |||
568 | /* | ||
569 | * We hold a reference on the parent for the child's ci_parent | ||
570 | * link. | ||
571 | */ | ||
560 | item->ci_group = config_group_get(to_config_group(parent_item)); | 572 | item->ci_group = config_group_get(to_config_group(parent_item)); |
561 | list_add_tail(&item->ci_entry, &item->ci_group->cg_children); | 573 | list_add_tail(&item->ci_entry, &item->ci_group->cg_children); |
562 | 574 | ||
575 | /* | ||
576 | * We hold a reference on the child for ci_entry on the parent's | ||
577 | * cg_children | ||
578 | */ | ||
563 | config_item_get(item); | 579 | config_item_get(item); |
564 | } | 580 | } |
565 | 581 | ||
@@ -684,6 +700,10 @@ static void client_drop_item(struct config_item *parent_item, | |||
684 | type = parent_item->ci_type; | 700 | type = parent_item->ci_type; |
685 | BUG_ON(!type); | 701 | BUG_ON(!type); |
686 | 702 | ||
703 | /* | ||
704 | * If ->drop_item() exists, it is responsible for the | ||
705 | * config_item_put(). | ||
706 | */ | ||
687 | if (type->ct_group_ops && type->ct_group_ops->drop_item) | 707 | if (type->ct_group_ops && type->ct_group_ops->drop_item) |
688 | type->ct_group_ops->drop_item(to_config_group(parent_item), | 708 | type->ct_group_ops->drop_item(to_config_group(parent_item), |
689 | item); | 709 | item); |
@@ -694,23 +714,28 @@ static void client_drop_item(struct config_item *parent_item, | |||
694 | 714 | ||
695 | static int configfs_mkdir(struct inode *dir, struct dentry *dentry, int mode) | 715 | static int configfs_mkdir(struct inode *dir, struct dentry *dentry, int mode) |
696 | { | 716 | { |
697 | int ret; | 717 | int ret, module_got = 0; |
698 | struct config_group *group; | 718 | struct config_group *group; |
699 | struct config_item *item; | 719 | struct config_item *item; |
700 | struct config_item *parent_item; | 720 | struct config_item *parent_item; |
701 | struct configfs_subsystem *subsys; | 721 | struct configfs_subsystem *subsys; |
702 | struct configfs_dirent *sd; | 722 | struct configfs_dirent *sd; |
703 | struct config_item_type *type; | 723 | struct config_item_type *type; |
704 | struct module *owner; | 724 | struct module *owner = NULL; |
705 | char *name; | 725 | char *name; |
706 | 726 | ||
707 | if (dentry->d_parent == configfs_sb->s_root) | 727 | if (dentry->d_parent == configfs_sb->s_root) { |
708 | return -EPERM; | 728 | ret = -EPERM; |
729 | goto out; | ||
730 | } | ||
709 | 731 | ||
710 | sd = dentry->d_parent->d_fsdata; | 732 | sd = dentry->d_parent->d_fsdata; |
711 | if (!(sd->s_type & CONFIGFS_USET_DIR)) | 733 | if (!(sd->s_type & CONFIGFS_USET_DIR)) { |
712 | return -EPERM; | 734 | ret = -EPERM; |
735 | goto out; | ||
736 | } | ||
713 | 737 | ||
738 | /* Get a working ref for the duration of this function */ | ||
714 | parent_item = configfs_get_config_item(dentry->d_parent); | 739 | parent_item = configfs_get_config_item(dentry->d_parent); |
715 | type = parent_item->ci_type; | 740 | type = parent_item->ci_type; |
716 | subsys = to_config_group(parent_item)->cg_subsys; | 741 | subsys = to_config_group(parent_item)->cg_subsys; |
@@ -719,15 +744,16 @@ static int configfs_mkdir(struct inode *dir, struct dentry *dentry, int mode) | |||
719 | if (!type || !type->ct_group_ops || | 744 | if (!type || !type->ct_group_ops || |
720 | (!type->ct_group_ops->make_group && | 745 | (!type->ct_group_ops->make_group && |
721 | !type->ct_group_ops->make_item)) { | 746 | !type->ct_group_ops->make_item)) { |
722 | config_item_put(parent_item); | 747 | ret = -EPERM; /* Lack-of-mkdir returns -EPERM */ |
723 | return -EPERM; /* What lack-of-mkdir returns */ | 748 | goto out_put; |
724 | } | 749 | } |
725 | 750 | ||
726 | name = kmalloc(dentry->d_name.len + 1, GFP_KERNEL); | 751 | name = kmalloc(dentry->d_name.len + 1, GFP_KERNEL); |
727 | if (!name) { | 752 | if (!name) { |
728 | config_item_put(parent_item); | 753 | ret = -ENOMEM; |
729 | return -ENOMEM; | 754 | goto out_put; |
730 | } | 755 | } |
756 | |||
731 | snprintf(name, dentry->d_name.len + 1, "%s", dentry->d_name.name); | 757 | snprintf(name, dentry->d_name.len + 1, "%s", dentry->d_name.name); |
732 | 758 | ||
733 | down(&subsys->su_sem); | 759 | down(&subsys->su_sem); |
@@ -748,40 +774,67 @@ static int configfs_mkdir(struct inode *dir, struct dentry *dentry, int mode) | |||
748 | 774 | ||
749 | kfree(name); | 775 | kfree(name); |
750 | if (!item) { | 776 | if (!item) { |
751 | config_item_put(parent_item); | 777 | /* |
752 | return -ENOMEM; | 778 | * If item == NULL, then link_obj() was never called. |
779 | * There are no extra references to clean up. | ||
780 | */ | ||
781 | ret = -ENOMEM; | ||
782 | goto out_put; | ||
753 | } | 783 | } |
754 | 784 | ||
755 | ret = -EINVAL; | 785 | /* |
786 | * link_obj() has been called (via link_group() for groups). | ||
787 | * From here on out, errors must clean that up. | ||
788 | */ | ||
789 | |||
756 | type = item->ci_type; | 790 | type = item->ci_type; |
757 | if (type) { | 791 | if (!type) { |
758 | owner = type->ct_owner; | 792 | ret = -EINVAL; |
759 | if (try_module_get(owner)) { | 793 | goto out_unlink; |
760 | if (group) { | 794 | } |
761 | ret = configfs_attach_group(parent_item, | ||
762 | item, | ||
763 | dentry); | ||
764 | } else { | ||
765 | ret = configfs_attach_item(parent_item, | ||
766 | item, | ||
767 | dentry); | ||
768 | } | ||
769 | 795 | ||
770 | if (ret) { | 796 | owner = type->ct_owner; |
771 | down(&subsys->su_sem); | 797 | if (!try_module_get(owner)) { |
772 | if (group) | 798 | ret = -EINVAL; |
773 | unlink_group(group); | 799 | goto out_unlink; |
774 | else | 800 | } |
775 | unlink_obj(item); | ||
776 | client_drop_item(parent_item, item); | ||
777 | up(&subsys->su_sem); | ||
778 | 801 | ||
779 | config_item_put(parent_item); | 802 | /* |
780 | module_put(owner); | 803 | * I hate doing it this way, but if there is |
781 | } | 804 | * an error, module_put() probably should |
782 | } | 805 | * happen after any cleanup. |
806 | */ | ||
807 | module_got = 1; | ||
808 | |||
809 | if (group) | ||
810 | ret = configfs_attach_group(parent_item, item, dentry); | ||
811 | else | ||
812 | ret = configfs_attach_item(parent_item, item, dentry); | ||
813 | |||
814 | out_unlink: | ||
815 | if (ret) { | ||
816 | /* Tear down everything we built up */ | ||
817 | down(&subsys->su_sem); | ||
818 | if (group) | ||
819 | unlink_group(group); | ||
820 | else | ||
821 | unlink_obj(item); | ||
822 | client_drop_item(parent_item, item); | ||
823 | up(&subsys->su_sem); | ||
824 | |||
825 | if (module_got) | ||
826 | module_put(owner); | ||
783 | } | 827 | } |
784 | 828 | ||
829 | out_put: | ||
830 | /* | ||
831 | * link_obj()/link_group() took a reference from child->parent, | ||
832 | * so the parent is safely pinned. We can drop our working | ||
833 | * reference. | ||
834 | */ | ||
835 | config_item_put(parent_item); | ||
836 | |||
837 | out: | ||
785 | return ret; | 838 | return ret; |
786 | } | 839 | } |
787 | 840 | ||
@@ -801,6 +854,7 @@ static int configfs_rmdir(struct inode *dir, struct dentry *dentry) | |||
801 | if (sd->s_type & CONFIGFS_USET_DEFAULT) | 854 | if (sd->s_type & CONFIGFS_USET_DEFAULT) |
802 | return -EPERM; | 855 | return -EPERM; |
803 | 856 | ||
857 | /* Get a working ref until we have the child */ | ||
804 | parent_item = configfs_get_config_item(dentry->d_parent); | 858 | parent_item = configfs_get_config_item(dentry->d_parent); |
805 | subsys = to_config_group(parent_item)->cg_subsys; | 859 | subsys = to_config_group(parent_item)->cg_subsys; |
806 | BUG_ON(!subsys); | 860 | BUG_ON(!subsys); |
@@ -817,6 +871,7 @@ static int configfs_rmdir(struct inode *dir, struct dentry *dentry) | |||
817 | return ret; | 871 | return ret; |
818 | } | 872 | } |
819 | 873 | ||
874 | /* Get a working ref for the duration of this function */ | ||
820 | item = configfs_get_config_item(dentry); | 875 | item = configfs_get_config_item(dentry); |
821 | 876 | ||
822 | /* Drop reference from above, item already holds one. */ | 877 | /* Drop reference from above, item already holds one. */ |
diff --git a/fs/exportfs/expfs.c b/fs/exportfs/expfs.c index b06b54f1bbbb..4c39009350f3 100644 --- a/fs/exportfs/expfs.c +++ b/fs/exportfs/expfs.c | |||
@@ -102,7 +102,7 @@ find_exported_dentry(struct super_block *sb, void *obj, void *parent, | |||
102 | if (acceptable(context, result)) | 102 | if (acceptable(context, result)) |
103 | return result; | 103 | return result; |
104 | if (S_ISDIR(result->d_inode->i_mode)) { | 104 | if (S_ISDIR(result->d_inode->i_mode)) { |
105 | /* there is no other dentry, so fail */ | 105 | err = -EACCES; |
106 | goto err_result; | 106 | goto err_result; |
107 | } | 107 | } |
108 | 108 | ||
diff --git a/fs/inotify.c b/fs/inotify.c index 1f50302849c5..732ec4bd5774 100644 --- a/fs/inotify.c +++ b/fs/inotify.c | |||
@@ -848,7 +848,11 @@ static int inotify_release(struct inode *ignored, struct file *file) | |||
848 | inode = watch->inode; | 848 | inode = watch->inode; |
849 | mutex_lock(&inode->inotify_mutex); | 849 | mutex_lock(&inode->inotify_mutex); |
850 | mutex_lock(&dev->mutex); | 850 | mutex_lock(&dev->mutex); |
851 | remove_watch_no_event(watch, dev); | 851 | |
852 | /* make sure we didn't race with another list removal */ | ||
853 | if (likely(idr_find(&dev->idr, watch->wd))) | ||
854 | remove_watch_no_event(watch, dev); | ||
855 | |||
852 | mutex_unlock(&dev->mutex); | 856 | mutex_unlock(&dev->mutex); |
853 | mutex_unlock(&inode->inotify_mutex); | 857 | mutex_unlock(&inode->inotify_mutex); |
854 | put_inotify_watch(watch); | 858 | put_inotify_watch(watch); |
@@ -890,8 +894,7 @@ static int inotify_ignore(struct inotify_device *dev, s32 wd) | |||
890 | mutex_lock(&dev->mutex); | 894 | mutex_lock(&dev->mutex); |
891 | 895 | ||
892 | /* make sure that we did not race */ | 896 | /* make sure that we did not race */ |
893 | watch = idr_find(&dev->idr, wd); | 897 | if (likely(idr_find(&dev->idr, wd) == watch)) |
894 | if (likely(watch)) | ||
895 | remove_watch(watch, dev); | 898 | remove_watch(watch, dev); |
896 | 899 | ||
897 | mutex_unlock(&dev->mutex); | 900 | mutex_unlock(&dev->mutex); |
diff --git a/fs/jffs2/nodelist.c b/fs/jffs2/nodelist.c index d4d0c41490cd..1d46677afd17 100644 --- a/fs/jffs2/nodelist.c +++ b/fs/jffs2/nodelist.c | |||
@@ -438,7 +438,8 @@ static int check_node_data(struct jffs2_sb_info *c, struct jffs2_tmp_dnode_info | |||
438 | if (c->mtd->point) { | 438 | if (c->mtd->point) { |
439 | err = c->mtd->point(c->mtd, ofs, len, &retlen, &buffer); | 439 | err = c->mtd->point(c->mtd, ofs, len, &retlen, &buffer); |
440 | if (!err && retlen < tn->csize) { | 440 | if (!err && retlen < tn->csize) { |
441 | JFFS2_WARNING("MTD point returned len too short: %u instead of %u.\n", retlen, tn->csize); | 441 | JFFS2_WARNING("MTD point returned len too short: %zu " |
442 | "instead of %u.\n", retlen, tn->csize); | ||
442 | c->mtd->unpoint(c->mtd, buffer, ofs, len); | 443 | c->mtd->unpoint(c->mtd, buffer, ofs, len); |
443 | } else if (err) | 444 | } else if (err) |
444 | JFFS2_WARNING("MTD point failed: error code %d.\n", err); | 445 | JFFS2_WARNING("MTD point failed: error code %d.\n", err); |
@@ -461,7 +462,8 @@ static int check_node_data(struct jffs2_sb_info *c, struct jffs2_tmp_dnode_info | |||
461 | } | 462 | } |
462 | 463 | ||
463 | if (retlen != len) { | 464 | if (retlen != len) { |
464 | JFFS2_ERROR("short read at %#08x: %d instead of %d.\n", ofs, retlen, len); | 465 | JFFS2_ERROR("short read at %#08x: %zd instead of %d.\n", |
466 | ofs, retlen, len); | ||
465 | err = -EIO; | 467 | err = -EIO; |
466 | goto free_out; | 468 | goto free_out; |
467 | } | 469 | } |
diff --git a/fs/namespace.c b/fs/namespace.c index 2c5f1f80bdc2..bf478addb852 100644 --- a/fs/namespace.c +++ b/fs/namespace.c | |||
@@ -899,13 +899,11 @@ static int do_change_type(struct nameidata *nd, int flag) | |||
899 | /* | 899 | /* |
900 | * do loopback mount. | 900 | * do loopback mount. |
901 | */ | 901 | */ |
902 | static int do_loopback(struct nameidata *nd, char *old_name, unsigned long flags, int mnt_flags) | 902 | static int do_loopback(struct nameidata *nd, char *old_name, int recurse) |
903 | { | 903 | { |
904 | struct nameidata old_nd; | 904 | struct nameidata old_nd; |
905 | struct vfsmount *mnt = NULL; | 905 | struct vfsmount *mnt = NULL; |
906 | int recurse = flags & MS_REC; | ||
907 | int err = mount_is_safe(nd); | 906 | int err = mount_is_safe(nd); |
908 | |||
909 | if (err) | 907 | if (err) |
910 | return err; | 908 | return err; |
911 | if (!old_name || !*old_name) | 909 | if (!old_name || !*old_name) |
@@ -939,7 +937,6 @@ static int do_loopback(struct nameidata *nd, char *old_name, unsigned long flags | |||
939 | spin_unlock(&vfsmount_lock); | 937 | spin_unlock(&vfsmount_lock); |
940 | release_mounts(&umount_list); | 938 | release_mounts(&umount_list); |
941 | } | 939 | } |
942 | mnt->mnt_flags = mnt_flags; | ||
943 | 940 | ||
944 | out: | 941 | out: |
945 | up_write(&namespace_sem); | 942 | up_write(&namespace_sem); |
@@ -1353,7 +1350,7 @@ long do_mount(char *dev_name, char *dir_name, char *type_page, | |||
1353 | retval = do_remount(&nd, flags & ~MS_REMOUNT, mnt_flags, | 1350 | retval = do_remount(&nd, flags & ~MS_REMOUNT, mnt_flags, |
1354 | data_page); | 1351 | data_page); |
1355 | else if (flags & MS_BIND) | 1352 | else if (flags & MS_BIND) |
1356 | retval = do_loopback(&nd, dev_name, flags, mnt_flags); | 1353 | retval = do_loopback(&nd, dev_name, flags & MS_REC); |
1357 | else if (flags & (MS_SHARED | MS_PRIVATE | MS_SLAVE | MS_UNBINDABLE)) | 1354 | else if (flags & (MS_SHARED | MS_PRIVATE | MS_SLAVE | MS_UNBINDABLE)) |
1358 | retval = do_change_type(&nd, flags); | 1355 | retval = do_change_type(&nd, flags); |
1359 | else if (flags & MS_MOVE) | 1356 | else if (flags & MS_MOVE) |
diff --git a/fs/nfsd/vfs.c b/fs/nfsd/vfs.c index 6aa92d0e6876..1d65f13f458c 100644 --- a/fs/nfsd/vfs.c +++ b/fs/nfsd/vfs.c | |||
@@ -1922,11 +1922,10 @@ nfsd_set_posix_acl(struct svc_fh *fhp, int type, struct posix_acl *acl) | |||
1922 | value = kmalloc(size, GFP_KERNEL); | 1922 | value = kmalloc(size, GFP_KERNEL); |
1923 | if (!value) | 1923 | if (!value) |
1924 | return -ENOMEM; | 1924 | return -ENOMEM; |
1925 | size = posix_acl_to_xattr(acl, value, size); | 1925 | error = posix_acl_to_xattr(acl, value, size); |
1926 | if (size < 0) { | 1926 | if (error < 0) |
1927 | error = size; | ||
1928 | goto getout; | 1927 | goto getout; |
1929 | } | 1928 | size = error; |
1930 | } else | 1929 | } else |
1931 | size = 0; | 1930 | size = 0; |
1932 | 1931 | ||
diff --git a/fs/ocfs2/aops.c b/fs/ocfs2/aops.c index 0d858d0b25be..47152bf9a7f2 100644 --- a/fs/ocfs2/aops.c +++ b/fs/ocfs2/aops.c | |||
@@ -276,13 +276,29 @@ static int ocfs2_writepage(struct page *page, struct writeback_control *wbc) | |||
276 | return ret; | 276 | return ret; |
277 | } | 277 | } |
278 | 278 | ||
279 | /* This can also be called from ocfs2_write_zero_page() which has done | ||
280 | * it's own cluster locking. */ | ||
281 | int ocfs2_prepare_write_nolock(struct inode *inode, struct page *page, | ||
282 | unsigned from, unsigned to) | ||
283 | { | ||
284 | int ret; | ||
285 | |||
286 | down_read(&OCFS2_I(inode)->ip_alloc_sem); | ||
287 | |||
288 | ret = block_prepare_write(page, from, to, ocfs2_get_block); | ||
289 | |||
290 | up_read(&OCFS2_I(inode)->ip_alloc_sem); | ||
291 | |||
292 | return ret; | ||
293 | } | ||
294 | |||
279 | /* | 295 | /* |
280 | * ocfs2_prepare_write() can be an outer-most ocfs2 call when it is called | 296 | * ocfs2_prepare_write() can be an outer-most ocfs2 call when it is called |
281 | * from loopback. It must be able to perform its own locking around | 297 | * from loopback. It must be able to perform its own locking around |
282 | * ocfs2_get_block(). | 298 | * ocfs2_get_block(). |
283 | */ | 299 | */ |
284 | int ocfs2_prepare_write(struct file *file, struct page *page, | 300 | static int ocfs2_prepare_write(struct file *file, struct page *page, |
285 | unsigned from, unsigned to) | 301 | unsigned from, unsigned to) |
286 | { | 302 | { |
287 | struct inode *inode = page->mapping->host; | 303 | struct inode *inode = page->mapping->host; |
288 | int ret; | 304 | int ret; |
@@ -295,11 +311,7 @@ int ocfs2_prepare_write(struct file *file, struct page *page, | |||
295 | goto out; | 311 | goto out; |
296 | } | 312 | } |
297 | 313 | ||
298 | down_read(&OCFS2_I(inode)->ip_alloc_sem); | 314 | ret = ocfs2_prepare_write_nolock(inode, page, from, to); |
299 | |||
300 | ret = block_prepare_write(page, from, to, ocfs2_get_block); | ||
301 | |||
302 | up_read(&OCFS2_I(inode)->ip_alloc_sem); | ||
303 | 315 | ||
304 | ocfs2_meta_unlock(inode, 0); | 316 | ocfs2_meta_unlock(inode, 0); |
305 | out: | 317 | out: |
@@ -625,11 +637,31 @@ static ssize_t ocfs2_direct_IO(int rw, | |||
625 | int ret; | 637 | int ret; |
626 | 638 | ||
627 | mlog_entry_void(); | 639 | mlog_entry_void(); |
640 | |||
641 | /* | ||
642 | * We get PR data locks even for O_DIRECT. This allows | ||
643 | * concurrent O_DIRECT I/O but doesn't let O_DIRECT with | ||
644 | * extending and buffered zeroing writes race. If they did | ||
645 | * race then the buffered zeroing could be written back after | ||
646 | * the O_DIRECT I/O. It's one thing to tell people not to mix | ||
647 | * buffered and O_DIRECT writes, but expecting them to | ||
648 | * understand that file extension is also an implicit buffered | ||
649 | * write is too much. By getting the PR we force writeback of | ||
650 | * the buffered zeroing before proceeding. | ||
651 | */ | ||
652 | ret = ocfs2_data_lock(inode, 0); | ||
653 | if (ret < 0) { | ||
654 | mlog_errno(ret); | ||
655 | goto out; | ||
656 | } | ||
657 | ocfs2_data_unlock(inode, 0); | ||
658 | |||
628 | ret = blockdev_direct_IO_no_locking(rw, iocb, inode, | 659 | ret = blockdev_direct_IO_no_locking(rw, iocb, inode, |
629 | inode->i_sb->s_bdev, iov, offset, | 660 | inode->i_sb->s_bdev, iov, offset, |
630 | nr_segs, | 661 | nr_segs, |
631 | ocfs2_direct_IO_get_blocks, | 662 | ocfs2_direct_IO_get_blocks, |
632 | ocfs2_dio_end_io); | 663 | ocfs2_dio_end_io); |
664 | out: | ||
633 | mlog_exit(ret); | 665 | mlog_exit(ret); |
634 | return ret; | 666 | return ret; |
635 | } | 667 | } |
diff --git a/fs/ocfs2/aops.h b/fs/ocfs2/aops.h index d40456d509a0..e88c3f0b8fa9 100644 --- a/fs/ocfs2/aops.h +++ b/fs/ocfs2/aops.h | |||
@@ -22,8 +22,8 @@ | |||
22 | #ifndef OCFS2_AOPS_H | 22 | #ifndef OCFS2_AOPS_H |
23 | #define OCFS2_AOPS_H | 23 | #define OCFS2_AOPS_H |
24 | 24 | ||
25 | int ocfs2_prepare_write(struct file *file, struct page *page, | 25 | int ocfs2_prepare_write_nolock(struct inode *inode, struct page *page, |
26 | unsigned from, unsigned to); | 26 | unsigned from, unsigned to); |
27 | 27 | ||
28 | struct ocfs2_journal_handle *ocfs2_start_walk_page_trans(struct inode *inode, | 28 | struct ocfs2_journal_handle *ocfs2_start_walk_page_trans(struct inode *inode, |
29 | struct page *page, | 29 | struct page *page, |
diff --git a/fs/ocfs2/extent_map.c b/fs/ocfs2/extent_map.c index 4601fc256f11..1a5c69071df6 100644 --- a/fs/ocfs2/extent_map.c +++ b/fs/ocfs2/extent_map.c | |||
@@ -569,7 +569,7 @@ static int ocfs2_extent_map_insert(struct inode *inode, | |||
569 | 569 | ||
570 | ret = -ENOMEM; | 570 | ret = -ENOMEM; |
571 | ctxt.new_ent = kmem_cache_alloc(ocfs2_em_ent_cachep, | 571 | ctxt.new_ent = kmem_cache_alloc(ocfs2_em_ent_cachep, |
572 | GFP_KERNEL); | 572 | GFP_NOFS); |
573 | if (!ctxt.new_ent) { | 573 | if (!ctxt.new_ent) { |
574 | mlog_errno(ret); | 574 | mlog_errno(ret); |
575 | return ret; | 575 | return ret; |
@@ -583,14 +583,14 @@ static int ocfs2_extent_map_insert(struct inode *inode, | |||
583 | if (ctxt.need_left && !ctxt.left_ent) { | 583 | if (ctxt.need_left && !ctxt.left_ent) { |
584 | ctxt.left_ent = | 584 | ctxt.left_ent = |
585 | kmem_cache_alloc(ocfs2_em_ent_cachep, | 585 | kmem_cache_alloc(ocfs2_em_ent_cachep, |
586 | GFP_KERNEL); | 586 | GFP_NOFS); |
587 | if (!ctxt.left_ent) | 587 | if (!ctxt.left_ent) |
588 | break; | 588 | break; |
589 | } | 589 | } |
590 | if (ctxt.need_right && !ctxt.right_ent) { | 590 | if (ctxt.need_right && !ctxt.right_ent) { |
591 | ctxt.right_ent = | 591 | ctxt.right_ent = |
592 | kmem_cache_alloc(ocfs2_em_ent_cachep, | 592 | kmem_cache_alloc(ocfs2_em_ent_cachep, |
593 | GFP_KERNEL); | 593 | GFP_NOFS); |
594 | if (!ctxt.right_ent) | 594 | if (!ctxt.right_ent) |
595 | break; | 595 | break; |
596 | } | 596 | } |
diff --git a/fs/ocfs2/file.c b/fs/ocfs2/file.c index 581eb451a41a..a9559c874530 100644 --- a/fs/ocfs2/file.c +++ b/fs/ocfs2/file.c | |||
@@ -613,7 +613,8 @@ leave: | |||
613 | 613 | ||
614 | /* Some parts of this taken from generic_cont_expand, which turned out | 614 | /* Some parts of this taken from generic_cont_expand, which turned out |
615 | * to be too fragile to do exactly what we need without us having to | 615 | * to be too fragile to do exactly what we need without us having to |
616 | * worry about recursive locking in ->commit_write(). */ | 616 | * worry about recursive locking in ->prepare_write() and |
617 | * ->commit_write(). */ | ||
617 | static int ocfs2_write_zero_page(struct inode *inode, | 618 | static int ocfs2_write_zero_page(struct inode *inode, |
618 | u64 size) | 619 | u64 size) |
619 | { | 620 | { |
@@ -641,7 +642,7 @@ static int ocfs2_write_zero_page(struct inode *inode, | |||
641 | goto out; | 642 | goto out; |
642 | } | 643 | } |
643 | 644 | ||
644 | ret = ocfs2_prepare_write(NULL, page, offset, offset); | 645 | ret = ocfs2_prepare_write_nolock(inode, page, offset, offset); |
645 | if (ret < 0) { | 646 | if (ret < 0) { |
646 | mlog_errno(ret); | 647 | mlog_errno(ret); |
647 | goto out_unlock; | 648 | goto out_unlock; |
@@ -695,13 +696,26 @@ out: | |||
695 | return ret; | 696 | return ret; |
696 | } | 697 | } |
697 | 698 | ||
699 | /* | ||
700 | * A tail_to_skip value > 0 indicates that we're being called from | ||
701 | * ocfs2_file_aio_write(). This has the following implications: | ||
702 | * | ||
703 | * - we don't want to update i_size | ||
704 | * - di_bh will be NULL, which is fine because it's only used in the | ||
705 | * case where we want to update i_size. | ||
706 | * - ocfs2_zero_extend() will then only be filling the hole created | ||
707 | * between i_size and the start of the write. | ||
708 | */ | ||
698 | static int ocfs2_extend_file(struct inode *inode, | 709 | static int ocfs2_extend_file(struct inode *inode, |
699 | struct buffer_head *di_bh, | 710 | struct buffer_head *di_bh, |
700 | u64 new_i_size) | 711 | u64 new_i_size, |
712 | size_t tail_to_skip) | ||
701 | { | 713 | { |
702 | int ret = 0; | 714 | int ret = 0; |
703 | u32 clusters_to_add; | 715 | u32 clusters_to_add; |
704 | 716 | ||
717 | BUG_ON(!tail_to_skip && !di_bh); | ||
718 | |||
705 | /* setattr sometimes calls us like this. */ | 719 | /* setattr sometimes calls us like this. */ |
706 | if (new_i_size == 0) | 720 | if (new_i_size == 0) |
707 | goto out; | 721 | goto out; |
@@ -714,27 +728,44 @@ static int ocfs2_extend_file(struct inode *inode, | |||
714 | OCFS2_I(inode)->ip_clusters; | 728 | OCFS2_I(inode)->ip_clusters; |
715 | 729 | ||
716 | if (clusters_to_add) { | 730 | if (clusters_to_add) { |
717 | ret = ocfs2_extend_allocation(inode, clusters_to_add); | 731 | /* |
732 | * protect the pages that ocfs2_zero_extend is going to | ||
733 | * be pulling into the page cache.. we do this before the | ||
734 | * metadata extend so that we don't get into the situation | ||
735 | * where we've extended the metadata but can't get the data | ||
736 | * lock to zero. | ||
737 | */ | ||
738 | ret = ocfs2_data_lock(inode, 1); | ||
718 | if (ret < 0) { | 739 | if (ret < 0) { |
719 | mlog_errno(ret); | 740 | mlog_errno(ret); |
720 | goto out; | 741 | goto out; |
721 | } | 742 | } |
722 | 743 | ||
723 | ret = ocfs2_zero_extend(inode, new_i_size); | 744 | ret = ocfs2_extend_allocation(inode, clusters_to_add); |
724 | if (ret < 0) { | 745 | if (ret < 0) { |
725 | mlog_errno(ret); | 746 | mlog_errno(ret); |
726 | goto out; | 747 | goto out_unlock; |
727 | } | 748 | } |
728 | } | ||
729 | 749 | ||
730 | /* No allocation required, we just use this helper to | 750 | ret = ocfs2_zero_extend(inode, (u64)new_i_size - tail_to_skip); |
731 | * do a trivial update of i_size. */ | 751 | if (ret < 0) { |
732 | ret = ocfs2_simple_size_update(inode, di_bh, new_i_size); | 752 | mlog_errno(ret); |
733 | if (ret < 0) { | 753 | goto out_unlock; |
734 | mlog_errno(ret); | 754 | } |
735 | goto out; | 755 | } |
756 | |||
757 | if (!tail_to_skip) { | ||
758 | /* We're being called from ocfs2_setattr() which wants | ||
759 | * us to update i_size */ | ||
760 | ret = ocfs2_simple_size_update(inode, di_bh, new_i_size); | ||
761 | if (ret < 0) | ||
762 | mlog_errno(ret); | ||
736 | } | 763 | } |
737 | 764 | ||
765 | out_unlock: | ||
766 | if (clusters_to_add) /* this is the only case in which we lock */ | ||
767 | ocfs2_data_unlock(inode, 1); | ||
768 | |||
738 | out: | 769 | out: |
739 | return ret; | 770 | return ret; |
740 | } | 771 | } |
@@ -793,7 +824,7 @@ int ocfs2_setattr(struct dentry *dentry, struct iattr *attr) | |||
793 | if (i_size_read(inode) > attr->ia_size) | 824 | if (i_size_read(inode) > attr->ia_size) |
794 | status = ocfs2_truncate_file(inode, bh, attr->ia_size); | 825 | status = ocfs2_truncate_file(inode, bh, attr->ia_size); |
795 | else | 826 | else |
796 | status = ocfs2_extend_file(inode, bh, attr->ia_size); | 827 | status = ocfs2_extend_file(inode, bh, attr->ia_size, 0); |
797 | if (status < 0) { | 828 | if (status < 0) { |
798 | if (status != -ENOSPC) | 829 | if (status != -ENOSPC) |
799 | mlog_errno(status); | 830 | mlog_errno(status); |
@@ -1049,21 +1080,12 @@ static ssize_t ocfs2_file_aio_write(struct kiocb *iocb, | |||
1049 | if (!clusters) | 1080 | if (!clusters) |
1050 | break; | 1081 | break; |
1051 | 1082 | ||
1052 | ret = ocfs2_extend_allocation(inode, clusters); | 1083 | ret = ocfs2_extend_file(inode, NULL, newsize, count); |
1053 | if (ret < 0) { | 1084 | if (ret < 0) { |
1054 | if (ret != -ENOSPC) | 1085 | if (ret != -ENOSPC) |
1055 | mlog_errno(ret); | 1086 | mlog_errno(ret); |
1056 | goto out; | 1087 | goto out; |
1057 | } | 1088 | } |
1058 | |||
1059 | /* Fill any holes which would've been created by this | ||
1060 | * write. If we're O_APPEND, this will wind up | ||
1061 | * (correctly) being a noop. */ | ||
1062 | ret = ocfs2_zero_extend(inode, (u64) newsize - count); | ||
1063 | if (ret < 0) { | ||
1064 | mlog_errno(ret); | ||
1065 | goto out; | ||
1066 | } | ||
1067 | break; | 1089 | break; |
1068 | } | 1090 | } |
1069 | 1091 | ||
@@ -1146,6 +1168,22 @@ static ssize_t ocfs2_file_aio_read(struct kiocb *iocb, | |||
1146 | ocfs2_iocb_set_rw_locked(iocb); | 1168 | ocfs2_iocb_set_rw_locked(iocb); |
1147 | } | 1169 | } |
1148 | 1170 | ||
1171 | /* | ||
1172 | * We're fine letting folks race truncates and extending | ||
1173 | * writes with read across the cluster, just like they can | ||
1174 | * locally. Hence no rw_lock during read. | ||
1175 | * | ||
1176 | * Take and drop the meta data lock to update inode fields | ||
1177 | * like i_size. This allows the checks down below | ||
1178 | * generic_file_aio_read() a chance of actually working. | ||
1179 | */ | ||
1180 | ret = ocfs2_meta_lock(inode, NULL, NULL, 0); | ||
1181 | if (ret < 0) { | ||
1182 | mlog_errno(ret); | ||
1183 | goto bail; | ||
1184 | } | ||
1185 | ocfs2_meta_unlock(inode, 0); | ||
1186 | |||
1149 | ret = generic_file_aio_read(iocb, buf, count, iocb->ki_pos); | 1187 | ret = generic_file_aio_read(iocb, buf, count, iocb->ki_pos); |
1150 | if (ret == -EINVAL) | 1188 | if (ret == -EINVAL) |
1151 | mlog(ML_ERROR, "generic_file_aio_read returned -EINVAL\n"); | 1189 | mlog(ML_ERROR, "generic_file_aio_read returned -EINVAL\n"); |
diff --git a/fs/ocfs2/journal.c b/fs/ocfs2/journal.c index 6a610ae53583..eebc3cfa6be8 100644 --- a/fs/ocfs2/journal.c +++ b/fs/ocfs2/journal.c | |||
@@ -117,7 +117,7 @@ struct ocfs2_journal_handle *ocfs2_alloc_handle(struct ocfs2_super *osb) | |||
117 | { | 117 | { |
118 | struct ocfs2_journal_handle *retval = NULL; | 118 | struct ocfs2_journal_handle *retval = NULL; |
119 | 119 | ||
120 | retval = kcalloc(1, sizeof(*retval), GFP_KERNEL); | 120 | retval = kcalloc(1, sizeof(*retval), GFP_NOFS); |
121 | if (!retval) { | 121 | if (!retval) { |
122 | mlog(ML_ERROR, "Failed to allocate memory for journal " | 122 | mlog(ML_ERROR, "Failed to allocate memory for journal " |
123 | "handle!\n"); | 123 | "handle!\n"); |
@@ -870,9 +870,11 @@ static int ocfs2_force_read_journal(struct inode *inode) | |||
870 | if (p_blocks > CONCURRENT_JOURNAL_FILL) | 870 | if (p_blocks > CONCURRENT_JOURNAL_FILL) |
871 | p_blocks = CONCURRENT_JOURNAL_FILL; | 871 | p_blocks = CONCURRENT_JOURNAL_FILL; |
872 | 872 | ||
873 | /* We are reading journal data which should not | ||
874 | * be put in the uptodate cache */ | ||
873 | status = ocfs2_read_blocks(OCFS2_SB(inode->i_sb), | 875 | status = ocfs2_read_blocks(OCFS2_SB(inode->i_sb), |
874 | p_blkno, p_blocks, bhs, 0, | 876 | p_blkno, p_blocks, bhs, 0, |
875 | inode); | 877 | NULL); |
876 | if (status < 0) { | 878 | if (status < 0) { |
877 | mlog_errno(status); | 879 | mlog_errno(status); |
878 | goto bail; | 880 | goto bail; |
@@ -982,7 +984,7 @@ static void ocfs2_queue_recovery_completion(struct ocfs2_journal *journal, | |||
982 | { | 984 | { |
983 | struct ocfs2_la_recovery_item *item; | 985 | struct ocfs2_la_recovery_item *item; |
984 | 986 | ||
985 | item = kmalloc(sizeof(struct ocfs2_la_recovery_item), GFP_KERNEL); | 987 | item = kmalloc(sizeof(struct ocfs2_la_recovery_item), GFP_NOFS); |
986 | if (!item) { | 988 | if (!item) { |
987 | /* Though we wish to avoid it, we are in fact safe in | 989 | /* Though we wish to avoid it, we are in fact safe in |
988 | * skipping local alloc cleanup as fsck.ocfs2 is more | 990 | * skipping local alloc cleanup as fsck.ocfs2 is more |
diff --git a/fs/ocfs2/uptodate.c b/fs/ocfs2/uptodate.c index 04a684dfdd96..b8a00a793326 100644 --- a/fs/ocfs2/uptodate.c +++ b/fs/ocfs2/uptodate.c | |||
@@ -337,7 +337,7 @@ static void __ocfs2_set_buffer_uptodate(struct ocfs2_inode_info *oi, | |||
337 | (unsigned long long)oi->ip_blkno, | 337 | (unsigned long long)oi->ip_blkno, |
338 | (unsigned long long)block, expand_tree); | 338 | (unsigned long long)block, expand_tree); |
339 | 339 | ||
340 | new = kmem_cache_alloc(ocfs2_uptodate_cachep, GFP_KERNEL); | 340 | new = kmem_cache_alloc(ocfs2_uptodate_cachep, GFP_NOFS); |
341 | if (!new) { | 341 | if (!new) { |
342 | mlog_errno(-ENOMEM); | 342 | mlog_errno(-ENOMEM); |
343 | return; | 343 | return; |
@@ -349,7 +349,7 @@ static void __ocfs2_set_buffer_uptodate(struct ocfs2_inode_info *oi, | |||
349 | * has no way of tracking that. */ | 349 | * has no way of tracking that. */ |
350 | for(i = 0; i < OCFS2_INODE_MAX_CACHE_ARRAY; i++) { | 350 | for(i = 0; i < OCFS2_INODE_MAX_CACHE_ARRAY; i++) { |
351 | tree[i] = kmem_cache_alloc(ocfs2_uptodate_cachep, | 351 | tree[i] = kmem_cache_alloc(ocfs2_uptodate_cachep, |
352 | GFP_KERNEL); | 352 | GFP_NOFS); |
353 | if (!tree[i]) { | 353 | if (!tree[i]) { |
354 | mlog_errno(-ENOMEM); | 354 | mlog_errno(-ENOMEM); |
355 | goto out_free; | 355 | goto out_free; |
diff --git a/fs/ocfs2/vote.c b/fs/ocfs2/vote.c index 53049a204197..ee42765a8553 100644 --- a/fs/ocfs2/vote.c +++ b/fs/ocfs2/vote.c | |||
@@ -586,7 +586,7 @@ static struct ocfs2_net_wait_ctxt *ocfs2_new_net_wait_ctxt(unsigned int response | |||
586 | { | 586 | { |
587 | struct ocfs2_net_wait_ctxt *w; | 587 | struct ocfs2_net_wait_ctxt *w; |
588 | 588 | ||
589 | w = kcalloc(1, sizeof(*w), GFP_KERNEL); | 589 | w = kcalloc(1, sizeof(*w), GFP_NOFS); |
590 | if (!w) { | 590 | if (!w) { |
591 | mlog_errno(-ENOMEM); | 591 | mlog_errno(-ENOMEM); |
592 | goto bail; | 592 | goto bail; |
@@ -749,7 +749,7 @@ static struct ocfs2_vote_msg * ocfs2_new_vote_request(struct ocfs2_super *osb, | |||
749 | 749 | ||
750 | BUG_ON(!ocfs2_is_valid_vote_request(type)); | 750 | BUG_ON(!ocfs2_is_valid_vote_request(type)); |
751 | 751 | ||
752 | request = kcalloc(1, sizeof(*request), GFP_KERNEL); | 752 | request = kcalloc(1, sizeof(*request), GFP_NOFS); |
753 | if (!request) { | 753 | if (!request) { |
754 | mlog_errno(-ENOMEM); | 754 | mlog_errno(-ENOMEM); |
755 | } else { | 755 | } else { |
@@ -1129,7 +1129,7 @@ static int ocfs2_handle_vote_message(struct o2net_msg *msg, | |||
1129 | struct ocfs2_super *osb = data; | 1129 | struct ocfs2_super *osb = data; |
1130 | struct ocfs2_vote_work *work; | 1130 | struct ocfs2_vote_work *work; |
1131 | 1131 | ||
1132 | work = kmalloc(sizeof(struct ocfs2_vote_work), GFP_KERNEL); | 1132 | work = kmalloc(sizeof(struct ocfs2_vote_work), GFP_NOFS); |
1133 | if (!work) { | 1133 | if (!work) { |
1134 | status = -ENOMEM; | 1134 | status = -ENOMEM; |
1135 | mlog_errno(status); | 1135 | mlog_errno(status); |
@@ -1124,7 +1124,6 @@ asmlinkage long sys_openat(int dfd, const char __user *filename, int flags, | |||
1124 | prevent_tail_call(ret); | 1124 | prevent_tail_call(ret); |
1125 | return ret; | 1125 | return ret; |
1126 | } | 1126 | } |
1127 | EXPORT_SYMBOL_GPL(sys_openat); | ||
1128 | 1127 | ||
1129 | #ifndef __alpha__ | 1128 | #ifndef __alpha__ |
1130 | 1129 | ||
diff --git a/fs/partitions/check.c b/fs/partitions/check.c index 45ae7dd3c650..7ef1f094de91 100644 --- a/fs/partitions/check.c +++ b/fs/partitions/check.c | |||
@@ -533,6 +533,7 @@ void del_gendisk(struct gendisk *disk) | |||
533 | 533 | ||
534 | devfs_remove_disk(disk); | 534 | devfs_remove_disk(disk); |
535 | 535 | ||
536 | kobject_uevent(&disk->kobj, KOBJ_REMOVE); | ||
536 | if (disk->holder_dir) | 537 | if (disk->holder_dir) |
537 | kobject_unregister(disk->holder_dir); | 538 | kobject_unregister(disk->holder_dir); |
538 | if (disk->slave_dir) | 539 | if (disk->slave_dir) |
@@ -545,7 +546,7 @@ void del_gendisk(struct gendisk *disk) | |||
545 | kfree(disk_name); | 546 | kfree(disk_name); |
546 | } | 547 | } |
547 | put_device(disk->driverfs_dev); | 548 | put_device(disk->driverfs_dev); |
549 | disk->driverfs_dev = NULL; | ||
548 | } | 550 | } |
549 | kobject_uevent(&disk->kobj, KOBJ_REMOVE); | ||
550 | kobject_del(&disk->kobj); | 551 | kobject_del(&disk->kobj); |
551 | } | 552 | } |
diff --git a/fs/smbfs/dir.c b/fs/smbfs/dir.c index 34c7a11d91f0..70d9c5a37f5a 100644 --- a/fs/smbfs/dir.c +++ b/fs/smbfs/dir.c | |||
@@ -434,6 +434,11 @@ smb_lookup(struct inode *dir, struct dentry *dentry, struct nameidata *nd) | |||
434 | if (dentry->d_name.len > SMB_MAXNAMELEN) | 434 | if (dentry->d_name.len > SMB_MAXNAMELEN) |
435 | goto out; | 435 | goto out; |
436 | 436 | ||
437 | /* Do not allow lookup of names with backslashes in */ | ||
438 | error = -EINVAL; | ||
439 | if (memchr(dentry->d_name.name, '\\', dentry->d_name.len)) | ||
440 | goto out; | ||
441 | |||
437 | lock_kernel(); | 442 | lock_kernel(); |
438 | error = smb_proc_getattr(dentry, &finfo); | 443 | error = smb_proc_getattr(dentry, &finfo); |
439 | #ifdef SMBFS_PARANOIA | 444 | #ifdef SMBFS_PARANOIA |
diff --git a/fs/smbfs/request.c b/fs/smbfs/request.c index c71c375863cc..c71dd2760d32 100644 --- a/fs/smbfs/request.c +++ b/fs/smbfs/request.c | |||
@@ -339,9 +339,11 @@ int smb_add_request(struct smb_request *req) | |||
339 | /* | 339 | /* |
340 | * On timeout or on interrupt we want to try and remove the | 340 | * On timeout or on interrupt we want to try and remove the |
341 | * request from the recvq/xmitq. | 341 | * request from the recvq/xmitq. |
342 | * First check if the request is still part of a queue. (May | ||
343 | * have been removed by some error condition) | ||
342 | */ | 344 | */ |
343 | smb_lock_server(server); | 345 | smb_lock_server(server); |
344 | if (!(req->rq_flags & SMB_REQ_RECEIVED)) { | 346 | if (!list_empty(&req->rq_queue)) { |
345 | list_del_init(&req->rq_queue); | 347 | list_del_init(&req->rq_queue); |
346 | smb_rput(req); | 348 | smb_rput(req); |
347 | } | 349 | } |
diff --git a/include/asm-arm/arch-pxa/pxa2xx_spi.h b/include/asm-arm/arch-pxa/pxa2xx_spi.h new file mode 100644 index 000000000000..915590c391c8 --- /dev/null +++ b/include/asm-arm/arch-pxa/pxa2xx_spi.h | |||
@@ -0,0 +1,71 @@ | |||
1 | /* | ||
2 | * Copyright (C) 2005 Stephen Street / StreetFire Sound Labs | ||
3 | * | ||
4 | * This program is free software; you can redistribute it and/or modify | ||
5 | * it under the terms of the GNU General Public License as published by | ||
6 | * the Free Software Foundation; either version 2 of the License, or | ||
7 | * (at your option) any later version. | ||
8 | * | ||
9 | * This program is distributed in the hope that it will be useful, | ||
10 | * but WITHOUT ANY WARRANTY; without even the implied warranty of | ||
11 | * MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE. See the | ||
12 | * GNU General Public License for more details. | ||
13 | * | ||
14 | * You should have received a copy of the GNU General Public License | ||
15 | * along with this program; if not, write to the Free Software | ||
16 | * Foundation, Inc., 675 Mass Ave, Cambridge, MA 02139, USA. | ||
17 | */ | ||
18 | |||
19 | #ifndef PXA2XX_SPI_H_ | ||
20 | #define PXA2XX_SPI_H_ | ||
21 | |||
22 | #define PXA2XX_CS_ASSERT (0x01) | ||
23 | #define PXA2XX_CS_DEASSERT (0x02) | ||
24 | |||
25 | #if defined(CONFIG_PXA25x) | ||
26 | #define CLOCK_SPEED_HZ 3686400 | ||
27 | #define SSP1_SerClkDiv(x) (((CLOCK_SPEED_HZ/2/(x+1))<<8)&0x0000ff00) | ||
28 | #define SSP2_SerClkDiv(x) (((CLOCK_SPEED_HZ/(x+1))<<8)&0x000fff00) | ||
29 | #define SSP3_SerClkDiv(x) (((CLOCK_SPEED_HZ/(x+1))<<8)&0x000fff00) | ||
30 | #define SSP_TIMEOUT_SCALE (2712) | ||
31 | #elif defined(CONFIG_PXA27x) | ||
32 | #define CLOCK_SPEED_HZ 13000000 | ||
33 | #define SSP1_SerClkDiv(x) (((CLOCK_SPEED_HZ/(x+1))<<8)&0x000fff00) | ||
34 | #define SSP2_SerClkDiv(x) (((CLOCK_SPEED_HZ/(x+1))<<8)&0x000fff00) | ||
35 | #define SSP3_SerClkDiv(x) (((CLOCK_SPEED_HZ/(x+1))<<8)&0x000fff00) | ||
36 | #define SSP_TIMEOUT_SCALE (769) | ||
37 | #endif | ||
38 | |||
39 | #define SSP_TIMEOUT(x) ((x*10000)/SSP_TIMEOUT_SCALE) | ||
40 | #define SSP1_VIRT ((void *)(io_p2v(__PREG(SSCR0_P(1))))) | ||
41 | #define SSP2_VIRT ((void *)(io_p2v(__PREG(SSCR0_P(2))))) | ||
42 | #define SSP3_VIRT ((void *)(io_p2v(__PREG(SSCR0_P(3))))) | ||
43 | |||
44 | enum pxa_ssp_type { | ||
45 | SSP_UNDEFINED = 0, | ||
46 | PXA25x_SSP, /* pxa 210, 250, 255, 26x */ | ||
47 | PXA25x_NSSP, /* pxa 255, 26x (including ASSP) */ | ||
48 | PXA27x_SSP, | ||
49 | }; | ||
50 | |||
51 | /* device.platform_data for SSP controller devices */ | ||
52 | struct pxa2xx_spi_master { | ||
53 | enum pxa_ssp_type ssp_type; | ||
54 | u32 clock_enable; | ||
55 | u16 num_chipselect; | ||
56 | u8 enable_dma; | ||
57 | }; | ||
58 | |||
59 | /* spi_board_info.controller_data for SPI slave devices, | ||
60 | * copied to spi_device.platform_data ... mostly for dma tuning | ||
61 | */ | ||
62 | struct pxa2xx_spi_chip { | ||
63 | u8 tx_threshold; | ||
64 | u8 rx_threshold; | ||
65 | u8 dma_burst_size; | ||
66 | u32 timeout_microsecs; | ||
67 | u8 enable_loopback; | ||
68 | void (*cs_control)(u32 command); | ||
69 | }; | ||
70 | |||
71 | #endif /*PXA2XX_SPI_H_*/ | ||
diff --git a/include/asm-arm/arch-s3c2410/spi-gpio.h b/include/asm-arm/arch-s3c2410/spi-gpio.h new file mode 100644 index 000000000000..258c00bca270 --- /dev/null +++ b/include/asm-arm/arch-s3c2410/spi-gpio.h | |||
@@ -0,0 +1,31 @@ | |||
1 | /* linux/include/asm-arm/arch-s3c2410/spi.h | ||
2 | * | ||
3 | * Copyright (c) 2006 Simtec Electronics | ||
4 | * Ben Dooks <ben@simtec.co.uk> | ||
5 | * | ||
6 | * S3C2410 - SPI Controller platfrom_device info | ||
7 | * | ||
8 | * This program is free software; you can redistribute it and/or modify | ||
9 | * it under the terms of the GNU General Public License version 2 as | ||
10 | * published by the Free Software Foundation. | ||
11 | */ | ||
12 | |||
13 | #ifndef __ASM_ARCH_SPIGPIO_H | ||
14 | #define __ASM_ARCH_SPIGPIO_H __FILE__ | ||
15 | |||
16 | struct s3c2410_spigpio_info; | ||
17 | struct spi_board_info; | ||
18 | |||
19 | struct s3c2410_spigpio_info { | ||
20 | unsigned long pin_clk; | ||
21 | unsigned long pin_mosi; | ||
22 | unsigned long pin_miso; | ||
23 | |||
24 | unsigned long board_size; | ||
25 | struct spi_board_info *board_info; | ||
26 | |||
27 | void (*chip_select)(struct s3c2410_spigpio_info *spi, int cs); | ||
28 | }; | ||
29 | |||
30 | |||
31 | #endif /* __ASM_ARCH_SPIGPIO_H */ | ||
diff --git a/include/asm-arm/arch-s3c2410/spi.h b/include/asm-arm/arch-s3c2410/spi.h new file mode 100644 index 000000000000..4029a1a1ab40 --- /dev/null +++ b/include/asm-arm/arch-s3c2410/spi.h | |||
@@ -0,0 +1,29 @@ | |||
1 | /* linux/include/asm-arm/arch-s3c2410/spi.h | ||
2 | * | ||
3 | * Copyright (c) 2006 Simtec Electronics | ||
4 | * Ben Dooks <ben@simtec.co.uk> | ||
5 | * | ||
6 | * S3C2410 - SPI Controller platform_device info | ||
7 | * | ||
8 | * This program is free software; you can redistribute it and/or modify | ||
9 | * it under the terms of the GNU General Public License version 2 as | ||
10 | * published by the Free Software Foundation. | ||
11 | */ | ||
12 | |||
13 | #ifndef __ASM_ARCH_SPI_H | ||
14 | #define __ASM_ARCH_SPI_H __FILE__ | ||
15 | |||
16 | struct s3c2410_spi_info; | ||
17 | struct spi_board_info; | ||
18 | |||
19 | struct s3c2410_spi_info { | ||
20 | unsigned long pin_cs; /* simple gpio cs */ | ||
21 | |||
22 | unsigned long board_size; | ||
23 | struct spi_board_info *board_info; | ||
24 | |||
25 | void (*set_cs)(struct s3c2410_spi_info *spi, int cs, int pol); | ||
26 | }; | ||
27 | |||
28 | |||
29 | #endif /* __ASM_ARCH_SPI_H */ | ||
diff --git a/include/asm-arm/procinfo.h b/include/asm-arm/procinfo.h index a9c75b2c314f..842526055225 100644 --- a/include/asm-arm/procinfo.h +++ b/include/asm-arm/procinfo.h | |||
@@ -45,8 +45,6 @@ extern unsigned int elf_hwcap; | |||
45 | 45 | ||
46 | #endif /* __ASSEMBLY__ */ | 46 | #endif /* __ASSEMBLY__ */ |
47 | 47 | ||
48 | #define PROC_INFO_SZ 48 | ||
49 | |||
50 | #define HWCAP_SWP 1 | 48 | #define HWCAP_SWP 1 |
51 | #define HWCAP_HALF 2 | 49 | #define HWCAP_HALF 2 |
52 | #define HWCAP_THUMB 4 | 50 | #define HWCAP_THUMB 4 |
diff --git a/include/asm-arm/spinlock.h b/include/asm-arm/spinlock.h index 43ad4e55878c..406ca97a8ab2 100644 --- a/include/asm-arm/spinlock.h +++ b/include/asm-arm/spinlock.h | |||
@@ -142,6 +142,9 @@ static inline void __raw_write_unlock(raw_rwlock_t *rw) | |||
142 | : "cc"); | 142 | : "cc"); |
143 | } | 143 | } |
144 | 144 | ||
145 | /* write_can_lock - would write_trylock() succeed? */ | ||
146 | #define __raw_write_can_lock(x) ((x)->lock == 0x80000000) | ||
147 | |||
145 | /* | 148 | /* |
146 | * Read locks are a bit more hairy: | 149 | * Read locks are a bit more hairy: |
147 | * - Exclusively load the lock value. | 150 | * - Exclusively load the lock value. |
@@ -198,4 +201,7 @@ static inline void __raw_read_unlock(raw_rwlock_t *rw) | |||
198 | 201 | ||
199 | #define __raw_read_trylock(lock) generic__raw_read_trylock(lock) | 202 | #define __raw_read_trylock(lock) generic__raw_read_trylock(lock) |
200 | 203 | ||
204 | /* read_can_lock - would read_trylock() succeed? */ | ||
205 | #define __raw_read_can_lock(x) ((x)->lock < 0x80000000) | ||
206 | |||
201 | #endif /* __ASM_SPINLOCK_H */ | 207 | #endif /* __ASM_SPINLOCK_H */ |
diff --git a/include/asm-s390/unistd.h b/include/asm-s390/unistd.h index 657d582e8149..41c2792ff6b0 100644 --- a/include/asm-s390/unistd.h +++ b/include/asm-s390/unistd.h | |||
@@ -296,8 +296,14 @@ | |||
296 | #define __NR_pselect6 301 | 296 | #define __NR_pselect6 301 |
297 | #define __NR_ppoll 302 | 297 | #define __NR_ppoll 302 |
298 | #define __NR_unshare 303 | 298 | #define __NR_unshare 303 |
299 | #define __NR_set_robust_list 304 | ||
300 | #define __NR_get_robust_list 305 | ||
301 | #define __NR_splice 306 | ||
302 | #define __NR_sync_file_range 307 | ||
303 | #define __NR_tee 308 | ||
304 | #define __NR_vmsplice 309 | ||
299 | 305 | ||
300 | #define NR_syscalls 304 | 306 | #define NR_syscalls 310 |
301 | 307 | ||
302 | /* | 308 | /* |
303 | * There are some system calls that are not present on 64 bit, some | 309 | * There are some system calls that are not present on 64 bit, some |
diff --git a/include/linux/firmware.h b/include/linux/firmware.h index 2d716080be4a..33d8f2087b6e 100644 --- a/include/linux/firmware.h +++ b/include/linux/firmware.h | |||
@@ -19,5 +19,4 @@ int request_firmware_nowait( | |||
19 | void (*cont)(const struct firmware *fw, void *context)); | 19 | void (*cont)(const struct firmware *fw, void *context)); |
20 | 20 | ||
21 | void release_firmware(const struct firmware *fw); | 21 | void release_firmware(const struct firmware *fw); |
22 | void register_firmware(const char *name, const u8 *data, size_t size); | ||
23 | #endif | 22 | #endif |
diff --git a/include/linux/fsl_devices.h b/include/linux/fsl_devices.h index a3a0e078f79d..16fbe59edeb1 100644 --- a/include/linux/fsl_devices.h +++ b/include/linux/fsl_devices.h | |||
@@ -110,5 +110,16 @@ struct fsl_usb2_platform_data { | |||
110 | #define FSL_USB2_PORT0_ENABLED 0x00000001 | 110 | #define FSL_USB2_PORT0_ENABLED 0x00000001 |
111 | #define FSL_USB2_PORT1_ENABLED 0x00000002 | 111 | #define FSL_USB2_PORT1_ENABLED 0x00000002 |
112 | 112 | ||
113 | struct fsl_spi_platform_data { | ||
114 | u32 initial_spmode; /* initial SPMODE value */ | ||
115 | u16 bus_num; | ||
116 | |||
117 | /* board specific information */ | ||
118 | u16 max_chipselect; | ||
119 | void (*activate_cs)(u8 cs, u8 polarity); | ||
120 | void (*deactivate_cs)(u8 cs, u8 polarity); | ||
121 | u32 sysclk; | ||
122 | }; | ||
123 | |||
113 | #endif /* _FSL_DEVICE_H_ */ | 124 | #endif /* _FSL_DEVICE_H_ */ |
114 | #endif /* __KERNEL__ */ | 125 | #endif /* __KERNEL__ */ |
diff --git a/include/linux/kernel.h b/include/linux/kernel.h index e1bd0842f6a1..f4fc576ed4c4 100644 --- a/include/linux/kernel.h +++ b/include/linux/kernel.h | |||
@@ -124,6 +124,7 @@ extern int get_option(char **str, int *pint); | |||
124 | extern char *get_options(const char *str, int nints, int *ints); | 124 | extern char *get_options(const char *str, int nints, int *ints); |
125 | extern unsigned long long memparse(char *ptr, char **retptr); | 125 | extern unsigned long long memparse(char *ptr, char **retptr); |
126 | 126 | ||
127 | extern int core_kernel_text(unsigned long addr); | ||
127 | extern int __kernel_text_address(unsigned long addr); | 128 | extern int __kernel_text_address(unsigned long addr); |
128 | extern int kernel_text_address(unsigned long addr); | 129 | extern int kernel_text_address(unsigned long addr); |
129 | extern int session_of_pgrp(int pgrp); | 130 | extern int session_of_pgrp(int pgrp); |
diff --git a/include/linux/mmc/mmc.h b/include/linux/mmc/mmc.h index bdc556d88498..03a14a30c46a 100644 --- a/include/linux/mmc/mmc.h +++ b/include/linux/mmc/mmc.h | |||
@@ -69,6 +69,7 @@ struct mmc_data { | |||
69 | unsigned int timeout_ns; /* data timeout (in ns, max 80ms) */ | 69 | unsigned int timeout_ns; /* data timeout (in ns, max 80ms) */ |
70 | unsigned int timeout_clks; /* data timeout (in clocks) */ | 70 | unsigned int timeout_clks; /* data timeout (in clocks) */ |
71 | unsigned int blksz_bits; /* data block size */ | 71 | unsigned int blksz_bits; /* data block size */ |
72 | unsigned int blksz; /* data block size */ | ||
72 | unsigned int blocks; /* number of blocks */ | 73 | unsigned int blocks; /* number of blocks */ |
73 | unsigned int error; /* data error */ | 74 | unsigned int error; /* data error */ |
74 | unsigned int flags; | 75 | unsigned int flags; |
diff --git a/include/linux/mmzone.h b/include/linux/mmzone.h index b5c21122c299..36740354d4db 100644 --- a/include/linux/mmzone.h +++ b/include/linux/mmzone.h | |||
@@ -22,6 +22,7 @@ | |||
22 | #else | 22 | #else |
23 | #define MAX_ORDER CONFIG_FORCE_MAX_ZONEORDER | 23 | #define MAX_ORDER CONFIG_FORCE_MAX_ZONEORDER |
24 | #endif | 24 | #endif |
25 | #define MAX_ORDER_NR_PAGES (1 << (MAX_ORDER - 1)) | ||
25 | 26 | ||
26 | struct free_area { | 27 | struct free_area { |
27 | struct list_head free_list; | 28 | struct list_head free_list; |
diff --git a/include/linux/rcupdate.h b/include/linux/rcupdate.h index 5673008b61e1..970284f571a6 100644 --- a/include/linux/rcupdate.h +++ b/include/linux/rcupdate.h | |||
@@ -132,6 +132,7 @@ static inline void rcu_bh_qsctr_inc(int cpu) | |||
132 | } | 132 | } |
133 | 133 | ||
134 | extern int rcu_pending(int cpu); | 134 | extern int rcu_pending(int cpu); |
135 | extern int rcu_needs_cpu(int cpu); | ||
135 | 136 | ||
136 | /** | 137 | /** |
137 | * rcu_read_lock - mark the beginning of an RCU read-side critical section. | 138 | * rcu_read_lock - mark the beginning of an RCU read-side critical section. |
diff --git a/include/linux/slab.h b/include/linux/slab.h index 3af03b19c983..2d985d59c7b8 100644 --- a/include/linux/slab.h +++ b/include/linux/slab.h | |||
@@ -150,6 +150,7 @@ static inline void *kcalloc(size_t n, size_t size, gfp_t flags) | |||
150 | 150 | ||
151 | extern void kfree(const void *); | 151 | extern void kfree(const void *); |
152 | extern unsigned int ksize(const void *); | 152 | extern unsigned int ksize(const void *); |
153 | extern int slab_is_available(void); | ||
153 | 154 | ||
154 | #ifdef CONFIG_NUMA | 155 | #ifdef CONFIG_NUMA |
155 | extern void *kmem_cache_alloc_node(kmem_cache_t *, gfp_t flags, int node); | 156 | extern void *kmem_cache_alloc_node(kmem_cache_t *, gfp_t flags, int node); |
diff --git a/include/linux/spi/spi.h b/include/linux/spi/spi.h index b05f1463a267..e928c0dcc297 100644 --- a/include/linux/spi/spi.h +++ b/include/linux/spi/spi.h | |||
@@ -31,18 +31,23 @@ extern struct bus_type spi_bus_type; | |||
31 | * @master: SPI controller used with the device. | 31 | * @master: SPI controller used with the device. |
32 | * @max_speed_hz: Maximum clock rate to be used with this chip | 32 | * @max_speed_hz: Maximum clock rate to be used with this chip |
33 | * (on this board); may be changed by the device's driver. | 33 | * (on this board); may be changed by the device's driver. |
34 | * The spi_transfer.speed_hz can override this for each transfer. | ||
34 | * @chip-select: Chipselect, distinguishing chips handled by "master". | 35 | * @chip-select: Chipselect, distinguishing chips handled by "master". |
35 | * @mode: The spi mode defines how data is clocked out and in. | 36 | * @mode: The spi mode defines how data is clocked out and in. |
36 | * This may be changed by the device's driver. | 37 | * This may be changed by the device's driver. |
38 | * The "active low" default for chipselect mode can be overridden, | ||
39 | * as can the "MSB first" default for each word in a transfer. | ||
37 | * @bits_per_word: Data transfers involve one or more words; word sizes | 40 | * @bits_per_word: Data transfers involve one or more words; word sizes |
38 | * like eight or 12 bits are common. In-memory wordsizes are | 41 | * like eight or 12 bits are common. In-memory wordsizes are |
39 | * powers of two bytes (e.g. 20 bit samples use 32 bits). | 42 | * powers of two bytes (e.g. 20 bit samples use 32 bits). |
40 | * This may be changed by the device's driver. | 43 | * This may be changed by the device's driver, or left at the |
44 | * default (0) indicating protocol words are eight bit bytes. | ||
45 | * The spi_transfer.bits_per_word can override this for each transfer. | ||
41 | * @irq: Negative, or the number passed to request_irq() to receive | 46 | * @irq: Negative, or the number passed to request_irq() to receive |
42 | * interrupts from this device. | 47 | * interrupts from this device. |
43 | * @controller_state: Controller's runtime state | 48 | * @controller_state: Controller's runtime state |
44 | * @controller_data: Board-specific definitions for controller, such as | 49 | * @controller_data: Board-specific definitions for controller, such as |
45 | * FIFO initialization parameters; from board_info.controller_data | 50 | * FIFO initialization parameters; from board_info.controller_data |
46 | * | 51 | * |
47 | * An spi_device is used to interchange data between an SPI slave | 52 | * An spi_device is used to interchange data between an SPI slave |
48 | * (usually a discrete chip) and CPU memory. | 53 | * (usually a discrete chip) and CPU memory. |
@@ -65,6 +70,7 @@ struct spi_device { | |||
65 | #define SPI_MODE_2 (SPI_CPOL|0) | 70 | #define SPI_MODE_2 (SPI_CPOL|0) |
66 | #define SPI_MODE_3 (SPI_CPOL|SPI_CPHA) | 71 | #define SPI_MODE_3 (SPI_CPOL|SPI_CPHA) |
67 | #define SPI_CS_HIGH 0x04 /* chipselect active high? */ | 72 | #define SPI_CS_HIGH 0x04 /* chipselect active high? */ |
73 | #define SPI_LSB_FIRST 0x08 /* per-word bits-on-wire */ | ||
68 | u8 bits_per_word; | 74 | u8 bits_per_word; |
69 | int irq; | 75 | int irq; |
70 | void *controller_state; | 76 | void *controller_state; |
@@ -73,7 +79,6 @@ struct spi_device { | |||
73 | 79 | ||
74 | // likely need more hooks for more protocol options affecting how | 80 | // likely need more hooks for more protocol options affecting how |
75 | // the controller talks to each chip, like: | 81 | // the controller talks to each chip, like: |
76 | // - bit order (default is wordwise msb-first) | ||
77 | // - memory packing (12 bit samples into low bits, others zeroed) | 82 | // - memory packing (12 bit samples into low bits, others zeroed) |
78 | // - priority | 83 | // - priority |
79 | // - drop chipselect after each word | 84 | // - drop chipselect after each word |
@@ -143,13 +148,13 @@ static inline void spi_unregister_driver(struct spi_driver *sdrv) | |||
143 | * struct spi_master - interface to SPI master controller | 148 | * struct spi_master - interface to SPI master controller |
144 | * @cdev: class interface to this driver | 149 | * @cdev: class interface to this driver |
145 | * @bus_num: board-specific (and often SOC-specific) identifier for a | 150 | * @bus_num: board-specific (and often SOC-specific) identifier for a |
146 | * given SPI controller. | 151 | * given SPI controller. |
147 | * @num_chipselect: chipselects are used to distinguish individual | 152 | * @num_chipselect: chipselects are used to distinguish individual |
148 | * SPI slaves, and are numbered from zero to num_chipselects. | 153 | * SPI slaves, and are numbered from zero to num_chipselects. |
149 | * each slave has a chipselect signal, but it's common that not | 154 | * each slave has a chipselect signal, but it's common that not |
150 | * every chipselect is connected to a slave. | 155 | * every chipselect is connected to a slave. |
151 | * @setup: updates the device mode and clocking records used by a | 156 | * @setup: updates the device mode and clocking records used by a |
152 | * device's SPI controller; protocol code may call this. | 157 | * device's SPI controller; protocol code may call this. |
153 | * @transfer: adds a message to the controller's transfer queue. | 158 | * @transfer: adds a message to the controller's transfer queue. |
154 | * @cleanup: frees controller-specific state | 159 | * @cleanup: frees controller-specific state |
155 | * | 160 | * |
@@ -167,13 +172,13 @@ static inline void spi_unregister_driver(struct spi_driver *sdrv) | |||
167 | struct spi_master { | 172 | struct spi_master { |
168 | struct class_device cdev; | 173 | struct class_device cdev; |
169 | 174 | ||
170 | /* other than zero (== assign one dynamically), bus_num is fully | 175 | /* other than negative (== assign one dynamically), bus_num is fully |
171 | * board-specific. usually that simplifies to being SOC-specific. | 176 | * board-specific. usually that simplifies to being SOC-specific. |
172 | * example: one SOC has three SPI controllers, numbered 1..3, | 177 | * example: one SOC has three SPI controllers, numbered 0..2, |
173 | * and one board's schematics might show it using SPI-2. software | 178 | * and one board's schematics might show it using SPI-2. software |
174 | * would normally use bus_num=2 for that controller. | 179 | * would normally use bus_num=2 for that controller. |
175 | */ | 180 | */ |
176 | u16 bus_num; | 181 | s16 bus_num; |
177 | 182 | ||
178 | /* chipselects will be integral to many controllers; some others | 183 | /* chipselects will be integral to many controllers; some others |
179 | * might use board-specific GPIOs. | 184 | * might use board-specific GPIOs. |
@@ -268,10 +273,14 @@ extern struct spi_master *spi_busnum_to_master(u16 busnum); | |||
268 | * @tx_dma: DMA address of tx_buf, if spi_message.is_dma_mapped | 273 | * @tx_dma: DMA address of tx_buf, if spi_message.is_dma_mapped |
269 | * @rx_dma: DMA address of rx_buf, if spi_message.is_dma_mapped | 274 | * @rx_dma: DMA address of rx_buf, if spi_message.is_dma_mapped |
270 | * @len: size of rx and tx buffers (in bytes) | 275 | * @len: size of rx and tx buffers (in bytes) |
276 | * @speed_hz: Select a speed other then the device default for this | ||
277 | * transfer. If 0 the default (from spi_device) is used. | ||
278 | * @bits_per_word: select a bits_per_word other then the device default | ||
279 | * for this transfer. If 0 the default (from spi_device) is used. | ||
271 | * @cs_change: affects chipselect after this transfer completes | 280 | * @cs_change: affects chipselect after this transfer completes |
272 | * @delay_usecs: microseconds to delay after this transfer before | 281 | * @delay_usecs: microseconds to delay after this transfer before |
273 | * (optionally) changing the chipselect status, then starting | 282 | * (optionally) changing the chipselect status, then starting |
274 | * the next transfer or completing this spi_message. | 283 | * the next transfer or completing this spi_message. |
275 | * @transfer_list: transfers are sequenced through spi_message.transfers | 284 | * @transfer_list: transfers are sequenced through spi_message.transfers |
276 | * | 285 | * |
277 | * SPI transfers always write the same number of bytes as they read. | 286 | * SPI transfers always write the same number of bytes as they read. |
@@ -322,7 +331,9 @@ struct spi_transfer { | |||
322 | dma_addr_t rx_dma; | 331 | dma_addr_t rx_dma; |
323 | 332 | ||
324 | unsigned cs_change:1; | 333 | unsigned cs_change:1; |
334 | u8 bits_per_word; | ||
325 | u16 delay_usecs; | 335 | u16 delay_usecs; |
336 | u32 speed_hz; | ||
326 | 337 | ||
327 | struct list_head transfer_list; | 338 | struct list_head transfer_list; |
328 | }; | 339 | }; |
@@ -356,7 +367,7 @@ struct spi_transfer { | |||
356 | * and its transfers, ignore them until its completion callback. | 367 | * and its transfers, ignore them until its completion callback. |
357 | */ | 368 | */ |
358 | struct spi_message { | 369 | struct spi_message { |
359 | struct list_head transfers; | 370 | struct list_head transfers; |
360 | 371 | ||
361 | struct spi_device *spi; | 372 | struct spi_device *spi; |
362 | 373 | ||
@@ -374,7 +385,7 @@ struct spi_message { | |||
374 | */ | 385 | */ |
375 | 386 | ||
376 | /* completion is reported through a callback */ | 387 | /* completion is reported through a callback */ |
377 | void (*complete)(void *context); | 388 | void (*complete)(void *context); |
378 | void *context; | 389 | void *context; |
379 | unsigned actual_length; | 390 | unsigned actual_length; |
380 | int status; | 391 | int status; |
diff --git a/include/linux/spi/spi_bitbang.h b/include/linux/spi/spi_bitbang.h index c961fe9bf3eb..16ce178f54d7 100644 --- a/include/linux/spi/spi_bitbang.h +++ b/include/linux/spi/spi_bitbang.h | |||
@@ -30,6 +30,12 @@ struct spi_bitbang { | |||
30 | 30 | ||
31 | struct spi_master *master; | 31 | struct spi_master *master; |
32 | 32 | ||
33 | /* setup_transfer() changes clock and/or wordsize to match settings | ||
34 | * for this transfer; zeroes restore defaults from spi_device. | ||
35 | */ | ||
36 | int (*setup_transfer)(struct spi_device *spi, | ||
37 | struct spi_transfer *t); | ||
38 | |||
33 | void (*chipselect)(struct spi_device *spi, int is_on); | 39 | void (*chipselect)(struct spi_device *spi, int is_on); |
34 | #define BITBANG_CS_ACTIVE 1 /* normally nCS, active low */ | 40 | #define BITBANG_CS_ACTIVE 1 /* normally nCS, active low */ |
35 | #define BITBANG_CS_INACTIVE 0 | 41 | #define BITBANG_CS_INACTIVE 0 |
@@ -51,6 +57,8 @@ struct spi_bitbang { | |||
51 | extern int spi_bitbang_setup(struct spi_device *spi); | 57 | extern int spi_bitbang_setup(struct spi_device *spi); |
52 | extern void spi_bitbang_cleanup(const struct spi_device *spi); | 58 | extern void spi_bitbang_cleanup(const struct spi_device *spi); |
53 | extern int spi_bitbang_transfer(struct spi_device *spi, struct spi_message *m); | 59 | extern int spi_bitbang_transfer(struct spi_device *spi, struct spi_message *m); |
60 | extern int spi_bitbang_setup_transfer(struct spi_device *spi, | ||
61 | struct spi_transfer *t); | ||
54 | 62 | ||
55 | /* start or stop queue processing */ | 63 | /* start or stop queue processing */ |
56 | extern int spi_bitbang_start(struct spi_bitbang *spi); | 64 | extern int spi_bitbang_start(struct spi_bitbang *spi); |
diff --git a/include/linux/swap.h b/include/linux/swap.h index 5b1fdf1cff4f..f03c24719302 100644 --- a/include/linux/swap.h +++ b/include/linux/swap.h | |||
@@ -296,7 +296,7 @@ static inline void disable_swap_token(void) | |||
296 | #define read_swap_cache_async(swp,vma,addr) NULL | 296 | #define read_swap_cache_async(swp,vma,addr) NULL |
297 | #define lookup_swap_cache(swp) NULL | 297 | #define lookup_swap_cache(swp) NULL |
298 | #define valid_swaphandles(swp, off) 0 | 298 | #define valid_swaphandles(swp, off) 0 |
299 | #define can_share_swap_page(p) 0 | 299 | #define can_share_swap_page(p) (page_mapcount(p) == 1) |
300 | #define move_to_swap_cache(p, swp) 1 | 300 | #define move_to_swap_cache(p, swp) 1 |
301 | #define move_from_swap_cache(p, i, m) 1 | 301 | #define move_from_swap_cache(p, i, m) 1 |
302 | #define __delete_from_swap_cache(p) /*NOTHING*/ | 302 | #define __delete_from_swap_cache(p) /*NOTHING*/ |
diff --git a/include/net/neighbour.h b/include/net/neighbour.h index b0666d66293f..4901ee446879 100644 --- a/include/net/neighbour.h +++ b/include/net/neighbour.h | |||
@@ -211,6 +211,7 @@ struct neigh_table | |||
211 | #define NEIGH_UPDATE_F_ADMIN 0x80000000 | 211 | #define NEIGH_UPDATE_F_ADMIN 0x80000000 |
212 | 212 | ||
213 | extern void neigh_table_init(struct neigh_table *tbl); | 213 | extern void neigh_table_init(struct neigh_table *tbl); |
214 | extern void neigh_table_init_no_netlink(struct neigh_table *tbl); | ||
214 | extern int neigh_table_clear(struct neigh_table *tbl); | 215 | extern int neigh_table_clear(struct neigh_table *tbl); |
215 | extern struct neighbour * neigh_lookup(struct neigh_table *tbl, | 216 | extern struct neighbour * neigh_lookup(struct neigh_table *tbl, |
216 | const void *pkey, | 217 | const void *pkey, |
diff --git a/include/net/sctp/command.h b/include/net/sctp/command.h index 34a1a09e5aef..807d6f1ef4b5 100644 --- a/include/net/sctp/command.h +++ b/include/net/sctp/command.h | |||
@@ -99,6 +99,7 @@ typedef enum { | |||
99 | SCTP_CMD_DEL_NON_PRIMARY, /* Removes non-primary peer transports. */ | 99 | SCTP_CMD_DEL_NON_PRIMARY, /* Removes non-primary peer transports. */ |
100 | SCTP_CMD_T3_RTX_TIMERS_STOP, /* Stops T3-rtx pending timers */ | 100 | SCTP_CMD_T3_RTX_TIMERS_STOP, /* Stops T3-rtx pending timers */ |
101 | SCTP_CMD_FORCE_PRIM_RETRAN, /* Forces retrans. over primary path. */ | 101 | SCTP_CMD_FORCE_PRIM_RETRAN, /* Forces retrans. over primary path. */ |
102 | SCTP_CMD_SET_SK_ERR, /* Set sk_err */ | ||
102 | SCTP_CMD_LAST | 103 | SCTP_CMD_LAST |
103 | } sctp_verb_t; | 104 | } sctp_verb_t; |
104 | 105 | ||
diff --git a/include/net/sctp/sctp.h b/include/net/sctp/sctp.h index e673b2c984e9..aa6033ca7cd8 100644 --- a/include/net/sctp/sctp.h +++ b/include/net/sctp/sctp.h | |||
@@ -461,12 +461,12 @@ static inline int sctp_frag_point(const struct sctp_sock *sp, int pmtu) | |||
461 | * there is room for a param header too. | 461 | * there is room for a param header too. |
462 | */ | 462 | */ |
463 | #define sctp_walk_params(pos, chunk, member)\ | 463 | #define sctp_walk_params(pos, chunk, member)\ |
464 | _sctp_walk_params((pos), (chunk), WORD_ROUND(ntohs((chunk)->chunk_hdr.length)), member) | 464 | _sctp_walk_params((pos), (chunk), ntohs((chunk)->chunk_hdr.length), member) |
465 | 465 | ||
466 | #define _sctp_walk_params(pos, chunk, end, member)\ | 466 | #define _sctp_walk_params(pos, chunk, end, member)\ |
467 | for (pos.v = chunk->member;\ | 467 | for (pos.v = chunk->member;\ |
468 | pos.v <= (void *)chunk + end - sizeof(sctp_paramhdr_t) &&\ | 468 | pos.v <= (void *)chunk + end - sizeof(sctp_paramhdr_t) &&\ |
469 | pos.v <= (void *)chunk + end - WORD_ROUND(ntohs(pos.p->length)) &&\ | 469 | pos.v <= (void *)chunk + end - ntohs(pos.p->length) &&\ |
470 | ntohs(pos.p->length) >= sizeof(sctp_paramhdr_t);\ | 470 | ntohs(pos.p->length) >= sizeof(sctp_paramhdr_t);\ |
471 | pos.v += WORD_ROUND(ntohs(pos.p->length))) | 471 | pos.v += WORD_ROUND(ntohs(pos.p->length))) |
472 | 472 | ||
@@ -477,7 +477,7 @@ _sctp_walk_errors((err), (chunk_hdr), ntohs((chunk_hdr)->length)) | |||
477 | for (err = (sctp_errhdr_t *)((void *)chunk_hdr + \ | 477 | for (err = (sctp_errhdr_t *)((void *)chunk_hdr + \ |
478 | sizeof(sctp_chunkhdr_t));\ | 478 | sizeof(sctp_chunkhdr_t));\ |
479 | (void *)err <= (void *)chunk_hdr + end - sizeof(sctp_errhdr_t) &&\ | 479 | (void *)err <= (void *)chunk_hdr + end - sizeof(sctp_errhdr_t) &&\ |
480 | (void *)err <= (void *)chunk_hdr + end - WORD_ROUND(ntohs(err->length)) &&\ | 480 | (void *)err <= (void *)chunk_hdr + end - ntohs(err->length) &&\ |
481 | ntohs(err->length) >= sizeof(sctp_errhdr_t); \ | 481 | ntohs(err->length) >= sizeof(sctp_errhdr_t); \ |
482 | err = (sctp_errhdr_t *)((void *)err + WORD_ROUND(ntohs(err->length)))) | 482 | err = (sctp_errhdr_t *)((void *)err + WORD_ROUND(ntohs(err->length)))) |
483 | 483 | ||
diff --git a/init/do_mounts.c b/init/do_mounts.c index adb7cad3e6ee..f4b7b9d278cd 100644 --- a/init/do_mounts.c +++ b/init/do_mounts.c | |||
@@ -310,6 +310,11 @@ retry: | |||
310 | 310 | ||
311 | panic("VFS: Unable to mount root fs on %s", b); | 311 | panic("VFS: Unable to mount root fs on %s", b); |
312 | } | 312 | } |
313 | |||
314 | printk("No filesystem could mount root, tried: "); | ||
315 | for (p = fs_names; *p; p += strlen(p)+1) | ||
316 | printk(" %s", p); | ||
317 | printk("\n"); | ||
313 | panic("VFS: Unable to mount root fs on %s", __bdevname(ROOT_DEV, b)); | 318 | panic("VFS: Unable to mount root fs on %s", __bdevname(ROOT_DEV, b)); |
314 | out: | 319 | out: |
315 | putname(fs_names); | 320 | putname(fs_names); |
diff --git a/init/initramfs.c b/init/initramfs.c index 679d870d991b..f81cfa40a719 100644 --- a/init/initramfs.c +++ b/init/initramfs.c | |||
@@ -26,10 +26,12 @@ static void __init free(void *where) | |||
26 | 26 | ||
27 | /* link hash */ | 27 | /* link hash */ |
28 | 28 | ||
29 | #define N_ALIGN(len) ((((len) + 1) & ~3) + 2) | ||
30 | |||
29 | static __initdata struct hash { | 31 | static __initdata struct hash { |
30 | int ino, minor, major; | 32 | int ino, minor, major; |
31 | struct hash *next; | 33 | struct hash *next; |
32 | char *name; | 34 | char name[N_ALIGN(PATH_MAX)]; |
33 | } *head[32]; | 35 | } *head[32]; |
34 | 36 | ||
35 | static inline int hash(int major, int minor, int ino) | 37 | static inline int hash(int major, int minor, int ino) |
@@ -57,7 +59,7 @@ static char __init *find_link(int major, int minor, int ino, char *name) | |||
57 | q->ino = ino; | 59 | q->ino = ino; |
58 | q->minor = minor; | 60 | q->minor = minor; |
59 | q->major = major; | 61 | q->major = major; |
60 | q->name = name; | 62 | strcpy(q->name, name); |
61 | q->next = NULL; | 63 | q->next = NULL; |
62 | *p = q; | 64 | *p = q; |
63 | return NULL; | 65 | return NULL; |
@@ -133,8 +135,6 @@ static inline void eat(unsigned n) | |||
133 | count -= n; | 135 | count -= n; |
134 | } | 136 | } |
135 | 137 | ||
136 | #define N_ALIGN(len) ((((len) + 1) & ~3) + 2) | ||
137 | |||
138 | static __initdata char *collected; | 138 | static __initdata char *collected; |
139 | static __initdata int remains; | 139 | static __initdata int remains; |
140 | static __initdata char *collect; | 140 | static __initdata char *collect; |
diff --git a/kernel/cpuset.c b/kernel/cpuset.c index 72248d1b9e3f..ab81fdd4572b 100644 --- a/kernel/cpuset.c +++ b/kernel/cpuset.c | |||
@@ -2231,19 +2231,25 @@ static const struct cpuset *nearest_exclusive_ancestor(const struct cpuset *cs) | |||
2231 | * So only GFP_KERNEL allocations, if all nodes in the cpuset are | 2231 | * So only GFP_KERNEL allocations, if all nodes in the cpuset are |
2232 | * short of memory, might require taking the callback_mutex mutex. | 2232 | * short of memory, might require taking the callback_mutex mutex. |
2233 | * | 2233 | * |
2234 | * The first loop over the zonelist in mm/page_alloc.c:__alloc_pages() | 2234 | * The first call here from mm/page_alloc:get_page_from_freelist() |
2235 | * calls here with __GFP_HARDWALL always set in gfp_mask, enforcing | 2235 | * has __GFP_HARDWALL set in gfp_mask, enforcing hardwall cpusets, so |
2236 | * hardwall cpusets - no allocation on a node outside the cpuset is | 2236 | * no allocation on a node outside the cpuset is allowed (unless in |
2237 | * allowed (unless in interrupt, of course). | 2237 | * interrupt, of course). |
2238 | * | 2238 | * |
2239 | * The second loop doesn't even call here for GFP_ATOMIC requests | 2239 | * The second pass through get_page_from_freelist() doesn't even call |
2240 | * (if the __alloc_pages() local variable 'wait' is set). That check | 2240 | * here for GFP_ATOMIC calls. For those calls, the __alloc_pages() |
2241 | * and the checks below have the combined affect in the second loop of | 2241 | * variable 'wait' is not set, and the bit ALLOC_CPUSET is not set |
2242 | * the __alloc_pages() routine that: | 2242 | * in alloc_flags. That logic and the checks below have the combined |
2243 | * affect that: | ||
2243 | * in_interrupt - any node ok (current task context irrelevant) | 2244 | * in_interrupt - any node ok (current task context irrelevant) |
2244 | * GFP_ATOMIC - any node ok | 2245 | * GFP_ATOMIC - any node ok |
2245 | * GFP_KERNEL - any node in enclosing mem_exclusive cpuset ok | 2246 | * GFP_KERNEL - any node in enclosing mem_exclusive cpuset ok |
2246 | * GFP_USER - only nodes in current tasks mems allowed ok. | 2247 | * GFP_USER - only nodes in current tasks mems allowed ok. |
2248 | * | ||
2249 | * Rule: | ||
2250 | * Don't call cpuset_zone_allowed() if you can't sleep, unless you | ||
2251 | * pass in the __GFP_HARDWALL flag set in gfp_flag, which disables | ||
2252 | * the code that might scan up ancestor cpusets and sleep. | ||
2247 | **/ | 2253 | **/ |
2248 | 2254 | ||
2249 | int __cpuset_zone_allowed(struct zone *z, gfp_t gfp_mask) | 2255 | int __cpuset_zone_allowed(struct zone *z, gfp_t gfp_mask) |
@@ -2255,6 +2261,7 @@ int __cpuset_zone_allowed(struct zone *z, gfp_t gfp_mask) | |||
2255 | if (in_interrupt()) | 2261 | if (in_interrupt()) |
2256 | return 1; | 2262 | return 1; |
2257 | node = z->zone_pgdat->node_id; | 2263 | node = z->zone_pgdat->node_id; |
2264 | might_sleep_if(!(gfp_mask & __GFP_HARDWALL)); | ||
2258 | if (node_isset(node, current->mems_allowed)) | 2265 | if (node_isset(node, current->mems_allowed)) |
2259 | return 1; | 2266 | return 1; |
2260 | if (gfp_mask & __GFP_HARDWALL) /* If hardwall request, stop here */ | 2267 | if (gfp_mask & __GFP_HARDWALL) /* If hardwall request, stop here */ |
diff --git a/kernel/extable.c b/kernel/extable.c index 7501b531ceed..7fe262855317 100644 --- a/kernel/extable.c +++ b/kernel/extable.c | |||
@@ -40,7 +40,7 @@ const struct exception_table_entry *search_exception_tables(unsigned long addr) | |||
40 | return e; | 40 | return e; |
41 | } | 41 | } |
42 | 42 | ||
43 | static int core_kernel_text(unsigned long addr) | 43 | int core_kernel_text(unsigned long addr) |
44 | { | 44 | { |
45 | if (addr >= (unsigned long)_stext && | 45 | if (addr >= (unsigned long)_stext && |
46 | addr <= (unsigned long)_etext) | 46 | addr <= (unsigned long)_etext) |
diff --git a/kernel/module.c b/kernel/module.c index d24deb0dbbc9..bbe04862e1b0 100644 --- a/kernel/module.c +++ b/kernel/module.c | |||
@@ -705,14 +705,14 @@ EXPORT_SYMBOL(__symbol_put); | |||
705 | 705 | ||
706 | void symbol_put_addr(void *addr) | 706 | void symbol_put_addr(void *addr) |
707 | { | 707 | { |
708 | unsigned long flags; | 708 | struct module *modaddr; |
709 | 709 | ||
710 | spin_lock_irqsave(&modlist_lock, flags); | 710 | if (core_kernel_text((unsigned long)addr)) |
711 | if (!kernel_text_address((unsigned long)addr)) | 711 | return; |
712 | BUG(); | ||
713 | 712 | ||
714 | module_put(module_text_address((unsigned long)addr)); | 713 | if (!(modaddr = module_text_address((unsigned long)addr))) |
715 | spin_unlock_irqrestore(&modlist_lock, flags); | 714 | BUG(); |
715 | module_put(modaddr); | ||
716 | } | 716 | } |
717 | EXPORT_SYMBOL_GPL(symbol_put_addr); | 717 | EXPORT_SYMBOL_GPL(symbol_put_addr); |
718 | 718 | ||
diff --git a/kernel/rcupdate.c b/kernel/rcupdate.c index 6d32ff26f948..2058f88c7bbb 100644 --- a/kernel/rcupdate.c +++ b/kernel/rcupdate.c | |||
@@ -479,12 +479,31 @@ static int __rcu_pending(struct rcu_ctrlblk *rcp, struct rcu_data *rdp) | |||
479 | return 0; | 479 | return 0; |
480 | } | 480 | } |
481 | 481 | ||
482 | /* | ||
483 | * Check to see if there is any immediate RCU-related work to be done | ||
484 | * by the current CPU, returning 1 if so. This function is part of the | ||
485 | * RCU implementation; it is -not- an exported member of the RCU API. | ||
486 | */ | ||
482 | int rcu_pending(int cpu) | 487 | int rcu_pending(int cpu) |
483 | { | 488 | { |
484 | return __rcu_pending(&rcu_ctrlblk, &per_cpu(rcu_data, cpu)) || | 489 | return __rcu_pending(&rcu_ctrlblk, &per_cpu(rcu_data, cpu)) || |
485 | __rcu_pending(&rcu_bh_ctrlblk, &per_cpu(rcu_bh_data, cpu)); | 490 | __rcu_pending(&rcu_bh_ctrlblk, &per_cpu(rcu_bh_data, cpu)); |
486 | } | 491 | } |
487 | 492 | ||
493 | /* | ||
494 | * Check to see if any future RCU-related work will need to be done | ||
495 | * by the current CPU, even if none need be done immediately, returning | ||
496 | * 1 if so. This function is part of the RCU implementation; it is -not- | ||
497 | * an exported member of the RCU API. | ||
498 | */ | ||
499 | int rcu_needs_cpu(int cpu) | ||
500 | { | ||
501 | struct rcu_data *rdp = &per_cpu(rcu_data, cpu); | ||
502 | struct rcu_data *rdp_bh = &per_cpu(rcu_bh_data, cpu); | ||
503 | |||
504 | return (!!rdp->curlist || !!rdp_bh->curlist || rcu_pending(cpu)); | ||
505 | } | ||
506 | |||
488 | void rcu_check_callbacks(int cpu, int user) | 507 | void rcu_check_callbacks(int cpu, int user) |
489 | { | 508 | { |
490 | if (user || | 509 | if (user || |
diff --git a/kernel/timer.c b/kernel/timer.c index 67eaf0f54096..9e49deed468c 100644 --- a/kernel/timer.c +++ b/kernel/timer.c | |||
@@ -541,6 +541,22 @@ found: | |||
541 | } | 541 | } |
542 | spin_unlock(&base->lock); | 542 | spin_unlock(&base->lock); |
543 | 543 | ||
544 | /* | ||
545 | * It can happen that other CPUs service timer IRQs and increment | ||
546 | * jiffies, but we have not yet got a local timer tick to process | ||
547 | * the timer wheels. In that case, the expiry time can be before | ||
548 | * jiffies, but since the high-resolution timer here is relative to | ||
549 | * jiffies, the default expression when high-resolution timers are | ||
550 | * not active, | ||
551 | * | ||
552 | * time_before(MAX_JIFFY_OFFSET + jiffies, expires) | ||
553 | * | ||
554 | * would falsely evaluate to true. If that is the case, just | ||
555 | * return jiffies so that we can immediately fire the local timer | ||
556 | */ | ||
557 | if (time_before(expires, jiffies)) | ||
558 | return jiffies; | ||
559 | |||
544 | if (time_before(hr_expires, expires)) | 560 | if (time_before(hr_expires, expires)) |
545 | return hr_expires; | 561 | return hr_expires; |
546 | 562 | ||
diff --git a/lib/kobject.c b/lib/kobject.c index b46350c27837..687ab418d292 100644 --- a/lib/kobject.c +++ b/lib/kobject.c | |||
@@ -198,14 +198,14 @@ int kobject_add(struct kobject * kobj) | |||
198 | 198 | ||
199 | /* be noisy on error issues */ | 199 | /* be noisy on error issues */ |
200 | if (error == -EEXIST) | 200 | if (error == -EEXIST) |
201 | printk("kobject_add failed for %s with -EEXIST, " | 201 | pr_debug("kobject_add failed for %s with -EEXIST, " |
202 | "don't try to register things with the " | 202 | "don't try to register things with the " |
203 | "same name in the same directory.\n", | 203 | "same name in the same directory.\n", |
204 | kobject_name(kobj)); | 204 | kobject_name(kobj)); |
205 | else | 205 | else |
206 | printk("kobject_add failed for %s (%d)\n", | 206 | pr_debug("kobject_add failed for %s (%d)\n", |
207 | kobject_name(kobj), error); | 207 | kobject_name(kobj), error); |
208 | dump_stack(); | 208 | /* dump_stack(); */ |
209 | } | 209 | } |
210 | 210 | ||
211 | return error; | 211 | return error; |
diff --git a/mm/page_alloc.c b/mm/page_alloc.c index ea77c999047e..253a450c400d 100644 --- a/mm/page_alloc.c +++ b/mm/page_alloc.c | |||
@@ -39,6 +39,7 @@ | |||
39 | #include <linux/mempolicy.h> | 39 | #include <linux/mempolicy.h> |
40 | 40 | ||
41 | #include <asm/tlbflush.h> | 41 | #include <asm/tlbflush.h> |
42 | #include <asm/div64.h> | ||
42 | #include "internal.h" | 43 | #include "internal.h" |
43 | 44 | ||
44 | /* | 45 | /* |
@@ -950,7 +951,7 @@ restart: | |||
950 | goto got_pg; | 951 | goto got_pg; |
951 | 952 | ||
952 | do { | 953 | do { |
953 | if (cpuset_zone_allowed(*z, gfp_mask)) | 954 | if (cpuset_zone_allowed(*z, gfp_mask|__GFP_HARDWALL)) |
954 | wakeup_kswapd(*z, order); | 955 | wakeup_kswapd(*z, order); |
955 | } while (*(++z)); | 956 | } while (*(++z)); |
956 | 957 | ||
@@ -969,7 +970,8 @@ restart: | |||
969 | alloc_flags |= ALLOC_HARDER; | 970 | alloc_flags |= ALLOC_HARDER; |
970 | if (gfp_mask & __GFP_HIGH) | 971 | if (gfp_mask & __GFP_HIGH) |
971 | alloc_flags |= ALLOC_HIGH; | 972 | alloc_flags |= ALLOC_HIGH; |
972 | alloc_flags |= ALLOC_CPUSET; | 973 | if (wait) |
974 | alloc_flags |= ALLOC_CPUSET; | ||
973 | 975 | ||
974 | /* | 976 | /* |
975 | * Go through the zonelist again. Let __GFP_HIGH and allocations | 977 | * Go through the zonelist again. Let __GFP_HIGH and allocations |
@@ -2123,14 +2125,22 @@ static void __init alloc_node_mem_map(struct pglist_data *pgdat) | |||
2123 | #ifdef CONFIG_FLAT_NODE_MEM_MAP | 2125 | #ifdef CONFIG_FLAT_NODE_MEM_MAP |
2124 | /* ia64 gets its own node_mem_map, before this, without bootmem */ | 2126 | /* ia64 gets its own node_mem_map, before this, without bootmem */ |
2125 | if (!pgdat->node_mem_map) { | 2127 | if (!pgdat->node_mem_map) { |
2126 | unsigned long size; | 2128 | unsigned long size, start, end; |
2127 | struct page *map; | 2129 | struct page *map; |
2128 | 2130 | ||
2129 | size = (pgdat->node_spanned_pages + 1) * sizeof(struct page); | 2131 | /* |
2132 | * The zone's endpoints aren't required to be MAX_ORDER | ||
2133 | * aligned but the node_mem_map endpoints must be in order | ||
2134 | * for the buddy allocator to function correctly. | ||
2135 | */ | ||
2136 | start = pgdat->node_start_pfn & ~(MAX_ORDER_NR_PAGES - 1); | ||
2137 | end = pgdat->node_start_pfn + pgdat->node_spanned_pages; | ||
2138 | end = ALIGN(end, MAX_ORDER_NR_PAGES); | ||
2139 | size = (end - start) * sizeof(struct page); | ||
2130 | map = alloc_remap(pgdat->node_id, size); | 2140 | map = alloc_remap(pgdat->node_id, size); |
2131 | if (!map) | 2141 | if (!map) |
2132 | map = alloc_bootmem_node(pgdat, size); | 2142 | map = alloc_bootmem_node(pgdat, size); |
2133 | pgdat->node_mem_map = map; | 2143 | pgdat->node_mem_map = map + (pgdat->node_start_pfn - start); |
2134 | } | 2144 | } |
2135 | #ifdef CONFIG_FLATMEM | 2145 | #ifdef CONFIG_FLATMEM |
2136 | /* | 2146 | /* |
@@ -2566,9 +2576,11 @@ void setup_per_zone_pages_min(void) | |||
2566 | } | 2576 | } |
2567 | 2577 | ||
2568 | for_each_zone(zone) { | 2578 | for_each_zone(zone) { |
2569 | unsigned long tmp; | 2579 | u64 tmp; |
2580 | |||
2570 | spin_lock_irqsave(&zone->lru_lock, flags); | 2581 | spin_lock_irqsave(&zone->lru_lock, flags); |
2571 | tmp = (pages_min * zone->present_pages) / lowmem_pages; | 2582 | tmp = (u64)pages_min * zone->present_pages; |
2583 | do_div(tmp, lowmem_pages); | ||
2572 | if (is_highmem(zone)) { | 2584 | if (is_highmem(zone)) { |
2573 | /* | 2585 | /* |
2574 | * __GFP_HIGH and PF_MEMALLOC allocations usually don't | 2586 | * __GFP_HIGH and PF_MEMALLOC allocations usually don't |
@@ -2595,8 +2607,8 @@ void setup_per_zone_pages_min(void) | |||
2595 | zone->pages_min = tmp; | 2607 | zone->pages_min = tmp; |
2596 | } | 2608 | } |
2597 | 2609 | ||
2598 | zone->pages_low = zone->pages_min + tmp / 4; | 2610 | zone->pages_low = zone->pages_min + (tmp >> 2); |
2599 | zone->pages_high = zone->pages_min + tmp / 2; | 2611 | zone->pages_high = zone->pages_min + (tmp >> 1); |
2600 | spin_unlock_irqrestore(&zone->lru_lock, flags); | 2612 | spin_unlock_irqrestore(&zone->lru_lock, flags); |
2601 | } | 2613 | } |
2602 | 2614 | ||
@@ -700,6 +700,14 @@ static enum { | |||
700 | FULL | 700 | FULL |
701 | } g_cpucache_up; | 701 | } g_cpucache_up; |
702 | 702 | ||
703 | /* | ||
704 | * used by boot code to determine if it can use slab based allocator | ||
705 | */ | ||
706 | int slab_is_available(void) | ||
707 | { | ||
708 | return g_cpucache_up == FULL; | ||
709 | } | ||
710 | |||
703 | static DEFINE_PER_CPU(struct work_struct, reap_work); | 711 | static DEFINE_PER_CPU(struct work_struct, reap_work); |
704 | 712 | ||
705 | static void free_block(struct kmem_cache *cachep, void **objpp, int len, | 713 | static void free_block(struct kmem_cache *cachep, void **objpp, int len, |
@@ -2192,11 +2200,14 @@ static void drain_cpu_caches(struct kmem_cache *cachep) | |||
2192 | check_irq_on(); | 2200 | check_irq_on(); |
2193 | for_each_online_node(node) { | 2201 | for_each_online_node(node) { |
2194 | l3 = cachep->nodelists[node]; | 2202 | l3 = cachep->nodelists[node]; |
2195 | if (l3) { | 2203 | if (l3 && l3->alien) |
2204 | drain_alien_cache(cachep, l3->alien); | ||
2205 | } | ||
2206 | |||
2207 | for_each_online_node(node) { | ||
2208 | l3 = cachep->nodelists[node]; | ||
2209 | if (l3) | ||
2196 | drain_array(cachep, l3, l3->shared, 1, node); | 2210 | drain_array(cachep, l3, l3->shared, 1, node); |
2197 | if (l3->alien) | ||
2198 | drain_alien_cache(cachep, l3->alien); | ||
2199 | } | ||
2200 | } | 2211 | } |
2201 | } | 2212 | } |
2202 | 2213 | ||
diff --git a/mm/sparse.c b/mm/sparse.c index d7c32de99ee8..100040c0dfb6 100644 --- a/mm/sparse.c +++ b/mm/sparse.c | |||
@@ -32,7 +32,7 @@ static struct mem_section *sparse_index_alloc(int nid) | |||
32 | unsigned long array_size = SECTIONS_PER_ROOT * | 32 | unsigned long array_size = SECTIONS_PER_ROOT * |
33 | sizeof(struct mem_section); | 33 | sizeof(struct mem_section); |
34 | 34 | ||
35 | if (system_state == SYSTEM_RUNNING) | 35 | if (slab_is_available()) |
36 | section = kmalloc_node(array_size, GFP_KERNEL, nid); | 36 | section = kmalloc_node(array_size, GFP_KERNEL, nid); |
37 | else | 37 | else |
38 | section = alloc_bootmem_node(NODE_DATA(nid), array_size); | 38 | section = alloc_bootmem_node(NODE_DATA(nid), array_size); |
@@ -87,11 +87,8 @@ int __section_nr(struct mem_section* ms) | |||
87 | unsigned long root_nr; | 87 | unsigned long root_nr; |
88 | struct mem_section* root; | 88 | struct mem_section* root; |
89 | 89 | ||
90 | for (root_nr = 0; | 90 | for (root_nr = 0; root_nr < NR_SECTION_ROOTS; root_nr++) { |
91 | root_nr < NR_MEM_SECTIONS; | 91 | root = __nr_to_section(root_nr * SECTIONS_PER_ROOT); |
92 | root_nr += SECTIONS_PER_ROOT) { | ||
93 | root = __nr_to_section(root_nr); | ||
94 | |||
95 | if (!root) | 92 | if (!root) |
96 | continue; | 93 | continue; |
97 | 94 | ||
diff --git a/net/802/tr.c b/net/802/tr.c index afd8385c0c9c..e9dc803f2fe0 100644 --- a/net/802/tr.c +++ b/net/802/tr.c | |||
@@ -643,6 +643,5 @@ static int __init rif_init(void) | |||
643 | 643 | ||
644 | module_init(rif_init); | 644 | module_init(rif_init); |
645 | 645 | ||
646 | EXPORT_SYMBOL(tr_source_route); | ||
647 | EXPORT_SYMBOL(tr_type_trans); | 646 | EXPORT_SYMBOL(tr_type_trans); |
648 | EXPORT_SYMBOL(alloc_trdev); | 647 | EXPORT_SYMBOL(alloc_trdev); |
diff --git a/net/atm/clip.c b/net/atm/clip.c index 1a786bfaa416..72d852982664 100644 --- a/net/atm/clip.c +++ b/net/atm/clip.c | |||
@@ -963,7 +963,7 @@ static struct file_operations arp_seq_fops = { | |||
963 | static int __init atm_clip_init(void) | 963 | static int __init atm_clip_init(void) |
964 | { | 964 | { |
965 | struct proc_dir_entry *p; | 965 | struct proc_dir_entry *p; |
966 | neigh_table_init(&clip_tbl); | 966 | neigh_table_init_no_netlink(&clip_tbl); |
967 | 967 | ||
968 | clip_tbl_hook = &clip_tbl; | 968 | clip_tbl_hook = &clip_tbl; |
969 | register_atm_ioctl(&clip_ioctl_ops); | 969 | register_atm_ioctl(&clip_ioctl_ops); |
diff --git a/net/bridge/netfilter/ebt_log.c b/net/bridge/netfilter/ebt_log.c index d159c92cca84..466ed3440b74 100644 --- a/net/bridge/netfilter/ebt_log.c +++ b/net/bridge/netfilter/ebt_log.c | |||
@@ -168,7 +168,7 @@ static void ebt_log(const struct sk_buff *skb, unsigned int hooknr, | |||
168 | 168 | ||
169 | if (info->bitmask & EBT_LOG_NFLOG) | 169 | if (info->bitmask & EBT_LOG_NFLOG) |
170 | nf_log_packet(PF_BRIDGE, hooknr, skb, in, out, &li, | 170 | nf_log_packet(PF_BRIDGE, hooknr, skb, in, out, &li, |
171 | info->prefix); | 171 | "%s", info->prefix); |
172 | else | 172 | else |
173 | ebt_log_packet(PF_BRIDGE, hooknr, skb, in, out, &li, | 173 | ebt_log_packet(PF_BRIDGE, hooknr, skb, in, out, &li, |
174 | info->prefix); | 174 | info->prefix); |
diff --git a/net/core/neighbour.c b/net/core/neighbour.c index 4cf878efdb49..50a8c73caf97 100644 --- a/net/core/neighbour.c +++ b/net/core/neighbour.c | |||
@@ -1326,8 +1326,7 @@ void neigh_parms_destroy(struct neigh_parms *parms) | |||
1326 | kfree(parms); | 1326 | kfree(parms); |
1327 | } | 1327 | } |
1328 | 1328 | ||
1329 | 1329 | void neigh_table_init_no_netlink(struct neigh_table *tbl) | |
1330 | void neigh_table_init(struct neigh_table *tbl) | ||
1331 | { | 1330 | { |
1332 | unsigned long now = jiffies; | 1331 | unsigned long now = jiffies; |
1333 | unsigned long phsize; | 1332 | unsigned long phsize; |
@@ -1383,10 +1382,27 @@ void neigh_table_init(struct neigh_table *tbl) | |||
1383 | 1382 | ||
1384 | tbl->last_flush = now; | 1383 | tbl->last_flush = now; |
1385 | tbl->last_rand = now + tbl->parms.reachable_time * 20; | 1384 | tbl->last_rand = now + tbl->parms.reachable_time * 20; |
1385 | } | ||
1386 | |||
1387 | void neigh_table_init(struct neigh_table *tbl) | ||
1388 | { | ||
1389 | struct neigh_table *tmp; | ||
1390 | |||
1391 | neigh_table_init_no_netlink(tbl); | ||
1386 | write_lock(&neigh_tbl_lock); | 1392 | write_lock(&neigh_tbl_lock); |
1393 | for (tmp = neigh_tables; tmp; tmp = tmp->next) { | ||
1394 | if (tmp->family == tbl->family) | ||
1395 | break; | ||
1396 | } | ||
1387 | tbl->next = neigh_tables; | 1397 | tbl->next = neigh_tables; |
1388 | neigh_tables = tbl; | 1398 | neigh_tables = tbl; |
1389 | write_unlock(&neigh_tbl_lock); | 1399 | write_unlock(&neigh_tbl_lock); |
1400 | |||
1401 | if (unlikely(tmp)) { | ||
1402 | printk(KERN_ERR "NEIGH: Registering multiple tables for " | ||
1403 | "family %d\n", tbl->family); | ||
1404 | dump_stack(); | ||
1405 | } | ||
1390 | } | 1406 | } |
1391 | 1407 | ||
1392 | int neigh_table_clear(struct neigh_table *tbl) | 1408 | int neigh_table_clear(struct neigh_table *tbl) |
@@ -2657,6 +2673,7 @@ EXPORT_SYMBOL(neigh_rand_reach_time); | |||
2657 | EXPORT_SYMBOL(neigh_resolve_output); | 2673 | EXPORT_SYMBOL(neigh_resolve_output); |
2658 | EXPORT_SYMBOL(neigh_table_clear); | 2674 | EXPORT_SYMBOL(neigh_table_clear); |
2659 | EXPORT_SYMBOL(neigh_table_init); | 2675 | EXPORT_SYMBOL(neigh_table_init); |
2676 | EXPORT_SYMBOL(neigh_table_init_no_netlink); | ||
2660 | EXPORT_SYMBOL(neigh_update); | 2677 | EXPORT_SYMBOL(neigh_update); |
2661 | EXPORT_SYMBOL(neigh_update_hhs); | 2678 | EXPORT_SYMBOL(neigh_update_hhs); |
2662 | EXPORT_SYMBOL(pneigh_enqueue); | 2679 | EXPORT_SYMBOL(pneigh_enqueue); |
diff --git a/net/ipv4/netfilter/arp_tables.c b/net/ipv4/netfilter/arp_tables.c index c2d92f99a2b8..d0d19192026d 100644 --- a/net/ipv4/netfilter/arp_tables.c +++ b/net/ipv4/netfilter/arp_tables.c | |||
@@ -948,7 +948,7 @@ static int do_add_counters(void __user *user, unsigned int len) | |||
948 | 948 | ||
949 | write_lock_bh(&t->lock); | 949 | write_lock_bh(&t->lock); |
950 | private = t->private; | 950 | private = t->private; |
951 | if (private->number != paddc->num_counters) { | 951 | if (private->number != tmp.num_counters) { |
952 | ret = -EINVAL; | 952 | ret = -EINVAL; |
953 | goto unlock_up_free; | 953 | goto unlock_up_free; |
954 | } | 954 | } |
diff --git a/net/ipv4/netfilter/ip_nat_proto_gre.c b/net/ipv4/netfilter/ip_nat_proto_gre.c index 6c4899d8046a..96ceabaec402 100644 --- a/net/ipv4/netfilter/ip_nat_proto_gre.c +++ b/net/ipv4/netfilter/ip_nat_proto_gre.c | |||
@@ -49,15 +49,15 @@ gre_in_range(const struct ip_conntrack_tuple *tuple, | |||
49 | const union ip_conntrack_manip_proto *min, | 49 | const union ip_conntrack_manip_proto *min, |
50 | const union ip_conntrack_manip_proto *max) | 50 | const union ip_conntrack_manip_proto *max) |
51 | { | 51 | { |
52 | u_int32_t key; | 52 | __be16 key; |
53 | 53 | ||
54 | if (maniptype == IP_NAT_MANIP_SRC) | 54 | if (maniptype == IP_NAT_MANIP_SRC) |
55 | key = tuple->src.u.gre.key; | 55 | key = tuple->src.u.gre.key; |
56 | else | 56 | else |
57 | key = tuple->dst.u.gre.key; | 57 | key = tuple->dst.u.gre.key; |
58 | 58 | ||
59 | return ntohl(key) >= ntohl(min->gre.key) | 59 | return ntohs(key) >= ntohs(min->gre.key) |
60 | && ntohl(key) <= ntohl(max->gre.key); | 60 | && ntohs(key) <= ntohs(max->gre.key); |
61 | } | 61 | } |
62 | 62 | ||
63 | /* generate unique tuple ... */ | 63 | /* generate unique tuple ... */ |
@@ -81,14 +81,14 @@ gre_unique_tuple(struct ip_conntrack_tuple *tuple, | |||
81 | min = 1; | 81 | min = 1; |
82 | range_size = 0xffff; | 82 | range_size = 0xffff; |
83 | } else { | 83 | } else { |
84 | min = ntohl(range->min.gre.key); | 84 | min = ntohs(range->min.gre.key); |
85 | range_size = ntohl(range->max.gre.key) - min + 1; | 85 | range_size = ntohs(range->max.gre.key) - min + 1; |
86 | } | 86 | } |
87 | 87 | ||
88 | DEBUGP("min = %u, range_size = %u\n", min, range_size); | 88 | DEBUGP("min = %u, range_size = %u\n", min, range_size); |
89 | 89 | ||
90 | for (i = 0; i < range_size; i++, key++) { | 90 | for (i = 0; i < range_size; i++, key++) { |
91 | *keyptr = htonl(min + key % range_size); | 91 | *keyptr = htons(min + key % range_size); |
92 | if (!ip_nat_used_tuple(tuple, conntrack)) | 92 | if (!ip_nat_used_tuple(tuple, conntrack)) |
93 | return 1; | 93 | return 1; |
94 | } | 94 | } |
diff --git a/net/ipv4/netfilter/ipt_LOG.c b/net/ipv4/netfilter/ipt_LOG.c index 39fd4c2a2386..b98f7b08b084 100644 --- a/net/ipv4/netfilter/ipt_LOG.c +++ b/net/ipv4/netfilter/ipt_LOG.c | |||
@@ -428,7 +428,7 @@ ipt_log_target(struct sk_buff **pskb, | |||
428 | 428 | ||
429 | if (loginfo->logflags & IPT_LOG_NFLOG) | 429 | if (loginfo->logflags & IPT_LOG_NFLOG) |
430 | nf_log_packet(PF_INET, hooknum, *pskb, in, out, &li, | 430 | nf_log_packet(PF_INET, hooknum, *pskb, in, out, &li, |
431 | loginfo->prefix); | 431 | "%s", loginfo->prefix); |
432 | else | 432 | else |
433 | ipt_log_packet(PF_INET, hooknum, *pskb, in, out, &li, | 433 | ipt_log_packet(PF_INET, hooknum, *pskb, in, out, &li, |
434 | loginfo->prefix); | 434 | loginfo->prefix); |
diff --git a/net/ipv4/netfilter/ipt_recent.c b/net/ipv4/netfilter/ipt_recent.c index 143843285702..b847ee409efb 100644 --- a/net/ipv4/netfilter/ipt_recent.c +++ b/net/ipv4/netfilter/ipt_recent.c | |||
@@ -821,6 +821,7 @@ checkentry(const char *tablename, | |||
821 | /* Create our proc 'status' entry. */ | 821 | /* Create our proc 'status' entry. */ |
822 | curr_table->status_proc = create_proc_entry(curr_table->name, ip_list_perms, proc_net_ipt_recent); | 822 | curr_table->status_proc = create_proc_entry(curr_table->name, ip_list_perms, proc_net_ipt_recent); |
823 | if (!curr_table->status_proc) { | 823 | if (!curr_table->status_proc) { |
824 | vfree(hold); | ||
824 | printk(KERN_INFO RECENT_NAME ": checkentry: unable to allocate for /proc entry.\n"); | 825 | printk(KERN_INFO RECENT_NAME ": checkentry: unable to allocate for /proc entry.\n"); |
825 | /* Destroy the created table */ | 826 | /* Destroy the created table */ |
826 | spin_lock_bh(&recent_lock); | 827 | spin_lock_bh(&recent_lock); |
@@ -845,7 +846,6 @@ checkentry(const char *tablename, | |||
845 | spin_unlock_bh(&recent_lock); | 846 | spin_unlock_bh(&recent_lock); |
846 | vfree(curr_table->time_info); | 847 | vfree(curr_table->time_info); |
847 | vfree(curr_table->hash_table); | 848 | vfree(curr_table->hash_table); |
848 | vfree(hold); | ||
849 | vfree(curr_table->table); | 849 | vfree(curr_table->table); |
850 | vfree(curr_table); | 850 | vfree(curr_table); |
851 | return 0; | 851 | return 0; |
diff --git a/net/ipv4/tcp_input.c b/net/ipv4/tcp_input.c index 9f0cca4c4fae..4a538bc1683d 100644 --- a/net/ipv4/tcp_input.c +++ b/net/ipv4/tcp_input.c | |||
@@ -1662,6 +1662,8 @@ static void tcp_update_scoreboard(struct sock *sk, struct tcp_sock *tp) | |||
1662 | if (!(TCP_SKB_CB(skb)->sacked&TCPCB_TAGBITS)) { | 1662 | if (!(TCP_SKB_CB(skb)->sacked&TCPCB_TAGBITS)) { |
1663 | TCP_SKB_CB(skb)->sacked |= TCPCB_LOST; | 1663 | TCP_SKB_CB(skb)->sacked |= TCPCB_LOST; |
1664 | tp->lost_out += tcp_skb_pcount(skb); | 1664 | tp->lost_out += tcp_skb_pcount(skb); |
1665 | if (IsReno(tp)) | ||
1666 | tcp_remove_reno_sacks(sk, tp, tcp_skb_pcount(skb) + 1); | ||
1665 | 1667 | ||
1666 | /* clear xmit_retrans hint */ | 1668 | /* clear xmit_retrans hint */ |
1667 | if (tp->retransmit_skb_hint && | 1669 | if (tp->retransmit_skb_hint && |
diff --git a/net/ipv6/netfilter/ip6_tables.c b/net/ipv6/netfilter/ip6_tables.c index 0a673038344f..2e72f89a7019 100644 --- a/net/ipv6/netfilter/ip6_tables.c +++ b/net/ipv6/netfilter/ip6_tables.c | |||
@@ -1103,7 +1103,7 @@ do_add_counters(void __user *user, unsigned int len) | |||
1103 | 1103 | ||
1104 | write_lock_bh(&t->lock); | 1104 | write_lock_bh(&t->lock); |
1105 | private = t->private; | 1105 | private = t->private; |
1106 | if (private->number != paddc->num_counters) { | 1106 | if (private->number != tmp.num_counters) { |
1107 | ret = -EINVAL; | 1107 | ret = -EINVAL; |
1108 | goto unlock_up_free; | 1108 | goto unlock_up_free; |
1109 | } | 1109 | } |
diff --git a/net/ipv6/netfilter/ip6t_LOG.c b/net/ipv6/netfilter/ip6t_LOG.c index a96c0de14b00..73c6300109d6 100644 --- a/net/ipv6/netfilter/ip6t_LOG.c +++ b/net/ipv6/netfilter/ip6t_LOG.c | |||
@@ -439,7 +439,7 @@ ip6t_log_target(struct sk_buff **pskb, | |||
439 | 439 | ||
440 | if (loginfo->logflags & IP6T_LOG_NFLOG) | 440 | if (loginfo->logflags & IP6T_LOG_NFLOG) |
441 | nf_log_packet(PF_INET6, hooknum, *pskb, in, out, &li, | 441 | nf_log_packet(PF_INET6, hooknum, *pskb, in, out, &li, |
442 | loginfo->prefix); | 442 | "%s", loginfo->prefix); |
443 | else | 443 | else |
444 | ip6t_log_packet(PF_INET6, hooknum, *pskb, in, out, &li, | 444 | ip6t_log_packet(PF_INET6, hooknum, *pskb, in, out, &li, |
445 | loginfo->prefix); | 445 | loginfo->prefix); |
diff --git a/net/ipv6/netfilter/ip6t_eui64.c b/net/ipv6/netfilter/ip6t_eui64.c index 94dbdb8b458d..4f6b84c8f4ab 100644 --- a/net/ipv6/netfilter/ip6t_eui64.c +++ b/net/ipv6/netfilter/ip6t_eui64.c | |||
@@ -40,7 +40,7 @@ match(const struct sk_buff *skb, | |||
40 | 40 | ||
41 | memset(eui64, 0, sizeof(eui64)); | 41 | memset(eui64, 0, sizeof(eui64)); |
42 | 42 | ||
43 | if (eth_hdr(skb)->h_proto == ntohs(ETH_P_IPV6)) { | 43 | if (eth_hdr(skb)->h_proto == htons(ETH_P_IPV6)) { |
44 | if (skb->nh.ipv6h->version == 0x6) { | 44 | if (skb->nh.ipv6h->version == 0x6) { |
45 | memcpy(eui64, eth_hdr(skb)->h_source, 3); | 45 | memcpy(eui64, eth_hdr(skb)->h_source, 3); |
46 | memcpy(eui64 + 5, eth_hdr(skb)->h_source + 3, 3); | 46 | memcpy(eui64 + 5, eth_hdr(skb)->h_source + 3, 3); |
diff --git a/net/ipx/af_ipx.c b/net/ipx/af_ipx.c index 2dbf134d5266..811d998725bc 100644 --- a/net/ipx/af_ipx.c +++ b/net/ipx/af_ipx.c | |||
@@ -944,9 +944,9 @@ out: | |||
944 | return rc; | 944 | return rc; |
945 | } | 945 | } |
946 | 946 | ||
947 | static int ipx_map_frame_type(unsigned char type) | 947 | static __be16 ipx_map_frame_type(unsigned char type) |
948 | { | 948 | { |
949 | int rc = 0; | 949 | __be16 rc = 0; |
950 | 950 | ||
951 | switch (type) { | 951 | switch (type) { |
952 | case IPX_FRAME_ETHERII: rc = htons(ETH_P_IPX); break; | 952 | case IPX_FRAME_ETHERII: rc = htons(ETH_P_IPX); break; |
diff --git a/net/ipx/ipx_route.c b/net/ipx/ipx_route.c index 67774448efd9..a394c6fe19a2 100644 --- a/net/ipx/ipx_route.c +++ b/net/ipx/ipx_route.c | |||
@@ -119,7 +119,7 @@ out: | |||
119 | return rc; | 119 | return rc; |
120 | } | 120 | } |
121 | 121 | ||
122 | static int ipxrtr_delete(long net) | 122 | static int ipxrtr_delete(__u32 net) |
123 | { | 123 | { |
124 | struct ipx_route *r, *tmp; | 124 | struct ipx_route *r, *tmp; |
125 | int rc; | 125 | int rc; |
diff --git a/net/netfilter/nfnetlink_log.c b/net/netfilter/nfnetlink_log.c index c60273cad778..61cdda4e5d3b 100644 --- a/net/netfilter/nfnetlink_log.c +++ b/net/netfilter/nfnetlink_log.c | |||
@@ -321,7 +321,7 @@ static int | |||
321 | nfulnl_set_flags(struct nfulnl_instance *inst, u_int16_t flags) | 321 | nfulnl_set_flags(struct nfulnl_instance *inst, u_int16_t flags) |
322 | { | 322 | { |
323 | spin_lock_bh(&inst->lock); | 323 | spin_lock_bh(&inst->lock); |
324 | inst->flags = ntohs(flags); | 324 | inst->flags = flags; |
325 | spin_unlock_bh(&inst->lock); | 325 | spin_unlock_bh(&inst->lock); |
326 | 326 | ||
327 | return 0; | 327 | return 0; |
@@ -902,7 +902,7 @@ nfulnl_recv_config(struct sock *ctnl, struct sk_buff *skb, | |||
902 | if (nfula[NFULA_CFG_FLAGS-1]) { | 902 | if (nfula[NFULA_CFG_FLAGS-1]) { |
903 | u_int16_t flags = | 903 | u_int16_t flags = |
904 | *(u_int16_t *)NFA_DATA(nfula[NFULA_CFG_FLAGS-1]); | 904 | *(u_int16_t *)NFA_DATA(nfula[NFULA_CFG_FLAGS-1]); |
905 | nfulnl_set_flags(inst, ntohl(flags)); | 905 | nfulnl_set_flags(inst, ntohs(flags)); |
906 | } | 906 | } |
907 | 907 | ||
908 | out_put: | 908 | out_put: |
diff --git a/net/sched/sch_generic.c b/net/sched/sch_generic.c index 31eb83717c26..138ea92ed268 100644 --- a/net/sched/sch_generic.c +++ b/net/sched/sch_generic.c | |||
@@ -193,8 +193,10 @@ static void dev_watchdog(unsigned long arg) | |||
193 | netif_running(dev) && | 193 | netif_running(dev) && |
194 | netif_carrier_ok(dev)) { | 194 | netif_carrier_ok(dev)) { |
195 | if (netif_queue_stopped(dev) && | 195 | if (netif_queue_stopped(dev) && |
196 | (jiffies - dev->trans_start) > dev->watchdog_timeo) { | 196 | time_after(jiffies, dev->trans_start + dev->watchdog_timeo)) { |
197 | printk(KERN_INFO "NETDEV WATCHDOG: %s: transmit timed out\n", dev->name); | 197 | |
198 | printk(KERN_INFO "NETDEV WATCHDOG: %s: transmit timed out\n", | ||
199 | dev->name); | ||
198 | dev->tx_timeout(dev); | 200 | dev->tx_timeout(dev); |
199 | } | 201 | } |
200 | if (!mod_timer(&dev->watchdog_timer, jiffies + dev->watchdog_timeo)) | 202 | if (!mod_timer(&dev->watchdog_timer, jiffies + dev->watchdog_timeo)) |
diff --git a/net/sctp/input.c b/net/sctp/input.c index d117ebc75cf8..1662f9cc869e 100644 --- a/net/sctp/input.c +++ b/net/sctp/input.c | |||
@@ -73,6 +73,8 @@ static struct sctp_association *__sctp_lookup_association( | |||
73 | const union sctp_addr *peer, | 73 | const union sctp_addr *peer, |
74 | struct sctp_transport **pt); | 74 | struct sctp_transport **pt); |
75 | 75 | ||
76 | static void sctp_add_backlog(struct sock *sk, struct sk_buff *skb); | ||
77 | |||
76 | 78 | ||
77 | /* Calculate the SCTP checksum of an SCTP packet. */ | 79 | /* Calculate the SCTP checksum of an SCTP packet. */ |
78 | static inline int sctp_rcv_checksum(struct sk_buff *skb) | 80 | static inline int sctp_rcv_checksum(struct sk_buff *skb) |
@@ -186,7 +188,6 @@ int sctp_rcv(struct sk_buff *skb) | |||
186 | */ | 188 | */ |
187 | if (sk->sk_bound_dev_if && (sk->sk_bound_dev_if != af->skb_iif(skb))) | 189 | if (sk->sk_bound_dev_if && (sk->sk_bound_dev_if != af->skb_iif(skb))) |
188 | { | 190 | { |
189 | sock_put(sk); | ||
190 | if (asoc) { | 191 | if (asoc) { |
191 | sctp_association_put(asoc); | 192 | sctp_association_put(asoc); |
192 | asoc = NULL; | 193 | asoc = NULL; |
@@ -197,7 +198,6 @@ int sctp_rcv(struct sk_buff *skb) | |||
197 | sk = sctp_get_ctl_sock(); | 198 | sk = sctp_get_ctl_sock(); |
198 | ep = sctp_sk(sk)->ep; | 199 | ep = sctp_sk(sk)->ep; |
199 | sctp_endpoint_hold(ep); | 200 | sctp_endpoint_hold(ep); |
200 | sock_hold(sk); | ||
201 | rcvr = &ep->base; | 201 | rcvr = &ep->base; |
202 | } | 202 | } |
203 | 203 | ||
@@ -253,25 +253,18 @@ int sctp_rcv(struct sk_buff *skb) | |||
253 | */ | 253 | */ |
254 | sctp_bh_lock_sock(sk); | 254 | sctp_bh_lock_sock(sk); |
255 | 255 | ||
256 | /* It is possible that the association could have moved to a different | ||
257 | * socket if it is peeled off. If so, update the sk. | ||
258 | */ | ||
259 | if (sk != rcvr->sk) { | ||
260 | sctp_bh_lock_sock(rcvr->sk); | ||
261 | sctp_bh_unlock_sock(sk); | ||
262 | sk = rcvr->sk; | ||
263 | } | ||
264 | |||
265 | if (sock_owned_by_user(sk)) | 256 | if (sock_owned_by_user(sk)) |
266 | sk_add_backlog(sk, skb); | 257 | sctp_add_backlog(sk, skb); |
267 | else | 258 | else |
268 | sctp_backlog_rcv(sk, skb); | 259 | sctp_inq_push(&chunk->rcvr->inqueue, chunk); |
269 | 260 | ||
270 | /* Release the sock and the sock ref we took in the lookup calls. | ||
271 | * The asoc/ep ref will be released in sctp_backlog_rcv. | ||
272 | */ | ||
273 | sctp_bh_unlock_sock(sk); | 261 | sctp_bh_unlock_sock(sk); |
274 | sock_put(sk); | 262 | |
263 | /* Release the asoc/ep ref we took in the lookup calls. */ | ||
264 | if (asoc) | ||
265 | sctp_association_put(asoc); | ||
266 | else | ||
267 | sctp_endpoint_put(ep); | ||
275 | 268 | ||
276 | return 0; | 269 | return 0; |
277 | 270 | ||
@@ -280,8 +273,7 @@ discard_it: | |||
280 | return 0; | 273 | return 0; |
281 | 274 | ||
282 | discard_release: | 275 | discard_release: |
283 | /* Release any structures we may be holding. */ | 276 | /* Release the asoc/ep ref we took in the lookup calls. */ |
284 | sock_put(sk); | ||
285 | if (asoc) | 277 | if (asoc) |
286 | sctp_association_put(asoc); | 278 | sctp_association_put(asoc); |
287 | else | 279 | else |
@@ -290,56 +282,87 @@ discard_release: | |||
290 | goto discard_it; | 282 | goto discard_it; |
291 | } | 283 | } |
292 | 284 | ||
293 | /* Handle second half of inbound skb processing. If the sock was busy, | 285 | /* Process the backlog queue of the socket. Every skb on |
294 | * we may have need to delay processing until later when the sock is | 286 | * the backlog holds a ref on an association or endpoint. |
295 | * released (on the backlog). If not busy, we call this routine | 287 | * We hold this ref throughout the state machine to make |
296 | * directly from the bottom half. | 288 | * sure that the structure we need is still around. |
297 | */ | 289 | */ |
298 | int sctp_backlog_rcv(struct sock *sk, struct sk_buff *skb) | 290 | int sctp_backlog_rcv(struct sock *sk, struct sk_buff *skb) |
299 | { | 291 | { |
300 | struct sctp_chunk *chunk = SCTP_INPUT_CB(skb)->chunk; | 292 | struct sctp_chunk *chunk = SCTP_INPUT_CB(skb)->chunk; |
301 | struct sctp_inq *inqueue = NULL; | 293 | struct sctp_inq *inqueue = &chunk->rcvr->inqueue; |
302 | struct sctp_ep_common *rcvr = NULL; | 294 | struct sctp_ep_common *rcvr = NULL; |
295 | int backloged = 0; | ||
303 | 296 | ||
304 | rcvr = chunk->rcvr; | 297 | rcvr = chunk->rcvr; |
305 | 298 | ||
306 | BUG_TRAP(rcvr->sk == sk); | 299 | /* If the rcvr is dead then the association or endpoint |
307 | 300 | * has been deleted and we can safely drop the chunk | |
308 | if (rcvr->dead) { | 301 | * and refs that we are holding. |
309 | sctp_chunk_free(chunk); | 302 | */ |
310 | } else { | 303 | if (rcvr->dead) { |
311 | inqueue = &chunk->rcvr->inqueue; | 304 | sctp_chunk_free(chunk); |
312 | sctp_inq_push(inqueue, chunk); | 305 | goto done; |
313 | } | 306 | } |
314 | 307 | ||
315 | /* Release the asoc/ep ref we took in the lookup calls in sctp_rcv. */ | 308 | if (unlikely(rcvr->sk != sk)) { |
316 | if (SCTP_EP_TYPE_ASSOCIATION == rcvr->type) | 309 | /* In this case, the association moved from one socket to |
317 | sctp_association_put(sctp_assoc(rcvr)); | 310 | * another. We are currently sitting on the backlog of the |
318 | else | 311 | * old socket, so we need to move. |
319 | sctp_endpoint_put(sctp_ep(rcvr)); | 312 | * However, since we are here in the process context we |
320 | 313 | * need to take make sure that the user doesn't own | |
314 | * the new socket when we process the packet. | ||
315 | * If the new socket is user-owned, queue the chunk to the | ||
316 | * backlog of the new socket without dropping any refs. | ||
317 | * Otherwise, we can safely push the chunk on the inqueue. | ||
318 | */ | ||
319 | |||
320 | sk = rcvr->sk; | ||
321 | sctp_bh_lock_sock(sk); | ||
322 | |||
323 | if (sock_owned_by_user(sk)) { | ||
324 | sk_add_backlog(sk, skb); | ||
325 | backloged = 1; | ||
326 | } else | ||
327 | sctp_inq_push(inqueue, chunk); | ||
328 | |||
329 | sctp_bh_unlock_sock(sk); | ||
330 | |||
331 | /* If the chunk was backloged again, don't drop refs */ | ||
332 | if (backloged) | ||
333 | return 0; | ||
334 | } else { | ||
335 | sctp_inq_push(inqueue, chunk); | ||
336 | } | ||
337 | |||
338 | done: | ||
339 | /* Release the refs we took in sctp_add_backlog */ | ||
340 | if (SCTP_EP_TYPE_ASSOCIATION == rcvr->type) | ||
341 | sctp_association_put(sctp_assoc(rcvr)); | ||
342 | else if (SCTP_EP_TYPE_SOCKET == rcvr->type) | ||
343 | sctp_endpoint_put(sctp_ep(rcvr)); | ||
344 | else | ||
345 | BUG(); | ||
346 | |||
321 | return 0; | 347 | return 0; |
322 | } | 348 | } |
323 | 349 | ||
324 | void sctp_backlog_migrate(struct sctp_association *assoc, | 350 | static void sctp_add_backlog(struct sock *sk, struct sk_buff *skb) |
325 | struct sock *oldsk, struct sock *newsk) | ||
326 | { | 351 | { |
327 | struct sk_buff *skb; | 352 | struct sctp_chunk *chunk = SCTP_INPUT_CB(skb)->chunk; |
328 | struct sctp_chunk *chunk; | 353 | struct sctp_ep_common *rcvr = chunk->rcvr; |
329 | 354 | ||
330 | skb = oldsk->sk_backlog.head; | 355 | /* Hold the assoc/ep while hanging on the backlog queue. |
331 | oldsk->sk_backlog.head = oldsk->sk_backlog.tail = NULL; | 356 | * This way, we know structures we need will not disappear from us |
332 | while (skb != NULL) { | 357 | */ |
333 | struct sk_buff *next = skb->next; | 358 | if (SCTP_EP_TYPE_ASSOCIATION == rcvr->type) |
334 | 359 | sctp_association_hold(sctp_assoc(rcvr)); | |
335 | chunk = SCTP_INPUT_CB(skb)->chunk; | 360 | else if (SCTP_EP_TYPE_SOCKET == rcvr->type) |
336 | skb->next = NULL; | 361 | sctp_endpoint_hold(sctp_ep(rcvr)); |
337 | if (&assoc->base == chunk->rcvr) | 362 | else |
338 | sk_add_backlog(newsk, skb); | 363 | BUG(); |
339 | else | 364 | |
340 | sk_add_backlog(oldsk, skb); | 365 | sk_add_backlog(sk, skb); |
341 | skb = next; | ||
342 | } | ||
343 | } | 366 | } |
344 | 367 | ||
345 | /* Handle icmp frag needed error. */ | 368 | /* Handle icmp frag needed error. */ |
@@ -412,7 +435,7 @@ struct sock *sctp_err_lookup(int family, struct sk_buff *skb, | |||
412 | union sctp_addr daddr; | 435 | union sctp_addr daddr; |
413 | struct sctp_af *af; | 436 | struct sctp_af *af; |
414 | struct sock *sk = NULL; | 437 | struct sock *sk = NULL; |
415 | struct sctp_association *asoc = NULL; | 438 | struct sctp_association *asoc; |
416 | struct sctp_transport *transport = NULL; | 439 | struct sctp_transport *transport = NULL; |
417 | 440 | ||
418 | *app = NULL; *tpp = NULL; | 441 | *app = NULL; *tpp = NULL; |
@@ -453,7 +476,6 @@ struct sock *sctp_err_lookup(int family, struct sk_buff *skb, | |||
453 | return sk; | 476 | return sk; |
454 | 477 | ||
455 | out: | 478 | out: |
456 | sock_put(sk); | ||
457 | if (asoc) | 479 | if (asoc) |
458 | sctp_association_put(asoc); | 480 | sctp_association_put(asoc); |
459 | return NULL; | 481 | return NULL; |
@@ -463,7 +485,6 @@ out: | |||
463 | void sctp_err_finish(struct sock *sk, struct sctp_association *asoc) | 485 | void sctp_err_finish(struct sock *sk, struct sctp_association *asoc) |
464 | { | 486 | { |
465 | sctp_bh_unlock_sock(sk); | 487 | sctp_bh_unlock_sock(sk); |
466 | sock_put(sk); | ||
467 | if (asoc) | 488 | if (asoc) |
468 | sctp_association_put(asoc); | 489 | sctp_association_put(asoc); |
469 | } | 490 | } |
@@ -490,7 +511,7 @@ void sctp_v4_err(struct sk_buff *skb, __u32 info) | |||
490 | int type = skb->h.icmph->type; | 511 | int type = skb->h.icmph->type; |
491 | int code = skb->h.icmph->code; | 512 | int code = skb->h.icmph->code; |
492 | struct sock *sk; | 513 | struct sock *sk; |
493 | struct sctp_association *asoc; | 514 | struct sctp_association *asoc = NULL; |
494 | struct sctp_transport *transport; | 515 | struct sctp_transport *transport; |
495 | struct inet_sock *inet; | 516 | struct inet_sock *inet; |
496 | char *saveip, *savesctp; | 517 | char *saveip, *savesctp; |
@@ -716,7 +737,6 @@ static struct sctp_endpoint *__sctp_rcv_lookup_endpoint(const union sctp_addr *l | |||
716 | 737 | ||
717 | hit: | 738 | hit: |
718 | sctp_endpoint_hold(ep); | 739 | sctp_endpoint_hold(ep); |
719 | sock_hold(epb->sk); | ||
720 | read_unlock(&head->lock); | 740 | read_unlock(&head->lock); |
721 | return ep; | 741 | return ep; |
722 | } | 742 | } |
@@ -818,7 +838,6 @@ static struct sctp_association *__sctp_lookup_association( | |||
818 | hit: | 838 | hit: |
819 | *pt = transport; | 839 | *pt = transport; |
820 | sctp_association_hold(asoc); | 840 | sctp_association_hold(asoc); |
821 | sock_hold(epb->sk); | ||
822 | read_unlock(&head->lock); | 841 | read_unlock(&head->lock); |
823 | return asoc; | 842 | return asoc; |
824 | } | 843 | } |
@@ -846,7 +865,6 @@ int sctp_has_association(const union sctp_addr *laddr, | |||
846 | struct sctp_transport *transport; | 865 | struct sctp_transport *transport; |
847 | 866 | ||
848 | if ((asoc = sctp_lookup_association(laddr, paddr, &transport))) { | 867 | if ((asoc = sctp_lookup_association(laddr, paddr, &transport))) { |
849 | sock_put(asoc->base.sk); | ||
850 | sctp_association_put(asoc); | 868 | sctp_association_put(asoc); |
851 | return 1; | 869 | return 1; |
852 | } | 870 | } |
diff --git a/net/sctp/sm_sideeffect.c b/net/sctp/sm_sideeffect.c index 8d1dc24bab4c..c5beb2ad7ef7 100644 --- a/net/sctp/sm_sideeffect.c +++ b/net/sctp/sm_sideeffect.c | |||
@@ -498,10 +498,6 @@ static void sctp_cmd_assoc_failed(sctp_cmd_seq_t *commands, | |||
498 | sctp_add_cmd_sf(commands, SCTP_CMD_NEW_STATE, | 498 | sctp_add_cmd_sf(commands, SCTP_CMD_NEW_STATE, |
499 | SCTP_STATE(SCTP_STATE_CLOSED)); | 499 | SCTP_STATE(SCTP_STATE_CLOSED)); |
500 | 500 | ||
501 | /* Set sk_err to ECONNRESET on a 1-1 style socket. */ | ||
502 | if (!sctp_style(asoc->base.sk, UDP)) | ||
503 | asoc->base.sk->sk_err = ECONNRESET; | ||
504 | |||
505 | /* SEND_FAILED sent later when cleaning up the association. */ | 501 | /* SEND_FAILED sent later when cleaning up the association. */ |
506 | asoc->outqueue.error = error; | 502 | asoc->outqueue.error = error; |
507 | sctp_add_cmd_sf(commands, SCTP_CMD_DELETE_TCB, SCTP_NULL()); | 503 | sctp_add_cmd_sf(commands, SCTP_CMD_DELETE_TCB, SCTP_NULL()); |
@@ -838,6 +834,15 @@ static void sctp_cmd_del_non_primary(struct sctp_association *asoc) | |||
838 | return; | 834 | return; |
839 | } | 835 | } |
840 | 836 | ||
837 | /* Helper function to set sk_err on a 1-1 style socket. */ | ||
838 | static void sctp_cmd_set_sk_err(struct sctp_association *asoc, int error) | ||
839 | { | ||
840 | struct sock *sk = asoc->base.sk; | ||
841 | |||
842 | if (!sctp_style(sk, UDP)) | ||
843 | sk->sk_err = error; | ||
844 | } | ||
845 | |||
841 | /* These three macros allow us to pull the debugging code out of the | 846 | /* These three macros allow us to pull the debugging code out of the |
842 | * main flow of sctp_do_sm() to keep attention focused on the real | 847 | * main flow of sctp_do_sm() to keep attention focused on the real |
843 | * functionality there. | 848 | * functionality there. |
@@ -1458,6 +1463,9 @@ static int sctp_cmd_interpreter(sctp_event_t event_type, | |||
1458 | local_cork = 0; | 1463 | local_cork = 0; |
1459 | asoc->peer.retran_path = t; | 1464 | asoc->peer.retran_path = t; |
1460 | break; | 1465 | break; |
1466 | case SCTP_CMD_SET_SK_ERR: | ||
1467 | sctp_cmd_set_sk_err(asoc, cmd->obj.error); | ||
1468 | break; | ||
1461 | default: | 1469 | default: |
1462 | printk(KERN_WARNING "Impossible command: %u, %p\n", | 1470 | printk(KERN_WARNING "Impossible command: %u, %p\n", |
1463 | cmd->verb, cmd->obj.ptr); | 1471 | cmd->verb, cmd->obj.ptr); |
diff --git a/net/sctp/sm_statefuns.c b/net/sctp/sm_statefuns.c index 8cdba51ec076..8bc279219a72 100644 --- a/net/sctp/sm_statefuns.c +++ b/net/sctp/sm_statefuns.c | |||
@@ -93,7 +93,7 @@ static sctp_disposition_t sctp_sf_shut_8_4_5(const struct sctp_endpoint *ep, | |||
93 | static struct sctp_sackhdr *sctp_sm_pull_sack(struct sctp_chunk *chunk); | 93 | static struct sctp_sackhdr *sctp_sm_pull_sack(struct sctp_chunk *chunk); |
94 | 94 | ||
95 | static sctp_disposition_t sctp_stop_t1_and_abort(sctp_cmd_seq_t *commands, | 95 | static sctp_disposition_t sctp_stop_t1_and_abort(sctp_cmd_seq_t *commands, |
96 | __u16 error, | 96 | __u16 error, int sk_err, |
97 | const struct sctp_association *asoc, | 97 | const struct sctp_association *asoc, |
98 | struct sctp_transport *transport); | 98 | struct sctp_transport *transport); |
99 | 99 | ||
@@ -448,7 +448,7 @@ sctp_disposition_t sctp_sf_do_5_1C_ack(const struct sctp_endpoint *ep, | |||
448 | __u32 init_tag; | 448 | __u32 init_tag; |
449 | struct sctp_chunk *err_chunk; | 449 | struct sctp_chunk *err_chunk; |
450 | struct sctp_packet *packet; | 450 | struct sctp_packet *packet; |
451 | sctp_disposition_t ret; | 451 | __u16 error; |
452 | 452 | ||
453 | if (!sctp_vtag_verify(chunk, asoc)) | 453 | if (!sctp_vtag_verify(chunk, asoc)) |
454 | return sctp_sf_pdiscard(ep, asoc, type, arg, commands); | 454 | return sctp_sf_pdiscard(ep, asoc, type, arg, commands); |
@@ -480,11 +480,9 @@ sctp_disposition_t sctp_sf_do_5_1C_ack(const struct sctp_endpoint *ep, | |||
480 | goto nomem; | 480 | goto nomem; |
481 | 481 | ||
482 | sctp_add_cmd_sf(commands, SCTP_CMD_REPLY, SCTP_CHUNK(reply)); | 482 | sctp_add_cmd_sf(commands, SCTP_CMD_REPLY, SCTP_CHUNK(reply)); |
483 | sctp_add_cmd_sf(commands, SCTP_CMD_NEW_STATE, | 483 | return sctp_stop_t1_and_abort(commands, SCTP_ERROR_INV_PARAM, |
484 | SCTP_STATE(SCTP_STATE_CLOSED)); | 484 | ECONNREFUSED, asoc, |
485 | SCTP_INC_STATS(SCTP_MIB_ABORTEDS); | 485 | chunk->transport); |
486 | sctp_add_cmd_sf(commands, SCTP_CMD_DELETE_TCB, SCTP_NULL()); | ||
487 | return SCTP_DISPOSITION_DELETE_TCB; | ||
488 | } | 486 | } |
489 | 487 | ||
490 | /* Verify the INIT chunk before processing it. */ | 488 | /* Verify the INIT chunk before processing it. */ |
@@ -511,27 +509,16 @@ sctp_disposition_t sctp_sf_do_5_1C_ack(const struct sctp_endpoint *ep, | |||
511 | sctp_add_cmd_sf(commands, SCTP_CMD_SEND_PKT, | 509 | sctp_add_cmd_sf(commands, SCTP_CMD_SEND_PKT, |
512 | SCTP_PACKET(packet)); | 510 | SCTP_PACKET(packet)); |
513 | SCTP_INC_STATS(SCTP_MIB_OUTCTRLCHUNKS); | 511 | SCTP_INC_STATS(SCTP_MIB_OUTCTRLCHUNKS); |
514 | sctp_add_cmd_sf(commands, SCTP_CMD_NEW_STATE, | 512 | error = SCTP_ERROR_INV_PARAM; |
515 | SCTP_STATE(SCTP_STATE_CLOSED)); | ||
516 | sctp_add_cmd_sf(commands, SCTP_CMD_DELETE_TCB, | ||
517 | SCTP_NULL()); | ||
518 | return SCTP_DISPOSITION_CONSUME; | ||
519 | } else { | 513 | } else { |
520 | sctp_add_cmd_sf(commands, SCTP_CMD_NEW_STATE, | 514 | error = SCTP_ERROR_NO_RESOURCE; |
521 | SCTP_STATE(SCTP_STATE_CLOSED)); | ||
522 | sctp_add_cmd_sf(commands, SCTP_CMD_DELETE_TCB, | ||
523 | SCTP_NULL()); | ||
524 | return SCTP_DISPOSITION_NOMEM; | ||
525 | } | 515 | } |
526 | } else { | 516 | } else { |
527 | ret = sctp_sf_tabort_8_4_8(ep, asoc, type, arg, | 517 | sctp_sf_tabort_8_4_8(ep, asoc, type, arg, commands); |
528 | commands); | 518 | error = SCTP_ERROR_INV_PARAM; |
529 | sctp_add_cmd_sf(commands, SCTP_CMD_NEW_STATE, | ||
530 | SCTP_STATE(SCTP_STATE_CLOSED)); | ||
531 | sctp_add_cmd_sf(commands, SCTP_CMD_DELETE_TCB, | ||
532 | SCTP_NULL()); | ||
533 | return ret; | ||
534 | } | 519 | } |
520 | return sctp_stop_t1_and_abort(commands, error, ECONNREFUSED, | ||
521 | asoc, chunk->transport); | ||
535 | } | 522 | } |
536 | 523 | ||
537 | /* Tag the variable length parameters. Note that we never | 524 | /* Tag the variable length parameters. Note that we never |
@@ -886,6 +873,8 @@ sctp_disposition_t sctp_sf_sendbeat_8_3(const struct sctp_endpoint *ep, | |||
886 | struct sctp_transport *transport = (struct sctp_transport *) arg; | 873 | struct sctp_transport *transport = (struct sctp_transport *) arg; |
887 | 874 | ||
888 | if (asoc->overall_error_count >= asoc->max_retrans) { | 875 | if (asoc->overall_error_count >= asoc->max_retrans) { |
876 | sctp_add_cmd_sf(commands, SCTP_CMD_SET_SK_ERR, | ||
877 | SCTP_ERROR(ETIMEDOUT)); | ||
889 | /* CMD_ASSOC_FAILED calls CMD_DELETE_TCB. */ | 878 | /* CMD_ASSOC_FAILED calls CMD_DELETE_TCB. */ |
890 | sctp_add_cmd_sf(commands, SCTP_CMD_ASSOC_FAILED, | 879 | sctp_add_cmd_sf(commands, SCTP_CMD_ASSOC_FAILED, |
891 | SCTP_U32(SCTP_ERROR_NO_ERROR)); | 880 | SCTP_U32(SCTP_ERROR_NO_ERROR)); |
@@ -1030,6 +1019,12 @@ sctp_disposition_t sctp_sf_backbeat_8_3(const struct sctp_endpoint *ep, | |||
1030 | commands); | 1019 | commands); |
1031 | 1020 | ||
1032 | hbinfo = (sctp_sender_hb_info_t *) chunk->skb->data; | 1021 | hbinfo = (sctp_sender_hb_info_t *) chunk->skb->data; |
1022 | /* Make sure that the length of the parameter is what we expect */ | ||
1023 | if (ntohs(hbinfo->param_hdr.length) != | ||
1024 | sizeof(sctp_sender_hb_info_t)) { | ||
1025 | return SCTP_DISPOSITION_DISCARD; | ||
1026 | } | ||
1027 | |||
1033 | from_addr = hbinfo->daddr; | 1028 | from_addr = hbinfo->daddr; |
1034 | link = sctp_assoc_lookup_paddr(asoc, &from_addr); | 1029 | link = sctp_assoc_lookup_paddr(asoc, &from_addr); |
1035 | 1030 | ||
@@ -2126,6 +2121,8 @@ static sctp_disposition_t sctp_sf_do_5_2_6_stale(const struct sctp_endpoint *ep, | |||
2126 | int attempts = asoc->init_err_counter + 1; | 2121 | int attempts = asoc->init_err_counter + 1; |
2127 | 2122 | ||
2128 | if (attempts > asoc->max_init_attempts) { | 2123 | if (attempts > asoc->max_init_attempts) { |
2124 | sctp_add_cmd_sf(commands, SCTP_CMD_SET_SK_ERR, | ||
2125 | SCTP_ERROR(ETIMEDOUT)); | ||
2129 | sctp_add_cmd_sf(commands, SCTP_CMD_INIT_FAILED, | 2126 | sctp_add_cmd_sf(commands, SCTP_CMD_INIT_FAILED, |
2130 | SCTP_U32(SCTP_ERROR_STALE_COOKIE)); | 2127 | SCTP_U32(SCTP_ERROR_STALE_COOKIE)); |
2131 | return SCTP_DISPOSITION_DELETE_TCB; | 2128 | return SCTP_DISPOSITION_DELETE_TCB; |
@@ -2262,6 +2259,7 @@ sctp_disposition_t sctp_sf_do_9_1_abort(const struct sctp_endpoint *ep, | |||
2262 | if (len >= sizeof(struct sctp_chunkhdr) + sizeof(struct sctp_errhdr)) | 2259 | if (len >= sizeof(struct sctp_chunkhdr) + sizeof(struct sctp_errhdr)) |
2263 | error = ((sctp_errhdr_t *)chunk->skb->data)->cause; | 2260 | error = ((sctp_errhdr_t *)chunk->skb->data)->cause; |
2264 | 2261 | ||
2262 | sctp_add_cmd_sf(commands, SCTP_CMD_SET_SK_ERR, SCTP_ERROR(ECONNRESET)); | ||
2265 | /* ASSOC_FAILED will DELETE_TCB. */ | 2263 | /* ASSOC_FAILED will DELETE_TCB. */ |
2266 | sctp_add_cmd_sf(commands, SCTP_CMD_ASSOC_FAILED, SCTP_U32(error)); | 2264 | sctp_add_cmd_sf(commands, SCTP_CMD_ASSOC_FAILED, SCTP_U32(error)); |
2267 | SCTP_INC_STATS(SCTP_MIB_ABORTEDS); | 2265 | SCTP_INC_STATS(SCTP_MIB_ABORTEDS); |
@@ -2306,7 +2304,8 @@ sctp_disposition_t sctp_sf_cookie_wait_abort(const struct sctp_endpoint *ep, | |||
2306 | if (len >= sizeof(struct sctp_chunkhdr) + sizeof(struct sctp_errhdr)) | 2304 | if (len >= sizeof(struct sctp_chunkhdr) + sizeof(struct sctp_errhdr)) |
2307 | error = ((sctp_errhdr_t *)chunk->skb->data)->cause; | 2305 | error = ((sctp_errhdr_t *)chunk->skb->data)->cause; |
2308 | 2306 | ||
2309 | return sctp_stop_t1_and_abort(commands, error, asoc, chunk->transport); | 2307 | return sctp_stop_t1_and_abort(commands, error, ECONNREFUSED, asoc, |
2308 | chunk->transport); | ||
2310 | } | 2309 | } |
2311 | 2310 | ||
2312 | /* | 2311 | /* |
@@ -2318,7 +2317,8 @@ sctp_disposition_t sctp_sf_cookie_wait_icmp_abort(const struct sctp_endpoint *ep | |||
2318 | void *arg, | 2317 | void *arg, |
2319 | sctp_cmd_seq_t *commands) | 2318 | sctp_cmd_seq_t *commands) |
2320 | { | 2319 | { |
2321 | return sctp_stop_t1_and_abort(commands, SCTP_ERROR_NO_ERROR, asoc, | 2320 | return sctp_stop_t1_and_abort(commands, SCTP_ERROR_NO_ERROR, |
2321 | ENOPROTOOPT, asoc, | ||
2322 | (struct sctp_transport *)arg); | 2322 | (struct sctp_transport *)arg); |
2323 | } | 2323 | } |
2324 | 2324 | ||
@@ -2343,7 +2343,7 @@ sctp_disposition_t sctp_sf_cookie_echoed_abort(const struct sctp_endpoint *ep, | |||
2343 | * This is common code called by several sctp_sf_*_abort() functions above. | 2343 | * This is common code called by several sctp_sf_*_abort() functions above. |
2344 | */ | 2344 | */ |
2345 | static sctp_disposition_t sctp_stop_t1_and_abort(sctp_cmd_seq_t *commands, | 2345 | static sctp_disposition_t sctp_stop_t1_and_abort(sctp_cmd_seq_t *commands, |
2346 | __u16 error, | 2346 | __u16 error, int sk_err, |
2347 | const struct sctp_association *asoc, | 2347 | const struct sctp_association *asoc, |
2348 | struct sctp_transport *transport) | 2348 | struct sctp_transport *transport) |
2349 | { | 2349 | { |
@@ -2353,6 +2353,7 @@ static sctp_disposition_t sctp_stop_t1_and_abort(sctp_cmd_seq_t *commands, | |||
2353 | SCTP_INC_STATS(SCTP_MIB_ABORTEDS); | 2353 | SCTP_INC_STATS(SCTP_MIB_ABORTEDS); |
2354 | sctp_add_cmd_sf(commands, SCTP_CMD_TIMER_STOP, | 2354 | sctp_add_cmd_sf(commands, SCTP_CMD_TIMER_STOP, |
2355 | SCTP_TO(SCTP_EVENT_TIMEOUT_T1_INIT)); | 2355 | SCTP_TO(SCTP_EVENT_TIMEOUT_T1_INIT)); |
2356 | sctp_add_cmd_sf(commands, SCTP_CMD_SET_SK_ERR, SCTP_ERROR(sk_err)); | ||
2356 | /* CMD_INIT_FAILED will DELETE_TCB. */ | 2357 | /* CMD_INIT_FAILED will DELETE_TCB. */ |
2357 | sctp_add_cmd_sf(commands, SCTP_CMD_INIT_FAILED, | 2358 | sctp_add_cmd_sf(commands, SCTP_CMD_INIT_FAILED, |
2358 | SCTP_U32(error)); | 2359 | SCTP_U32(error)); |
@@ -3336,6 +3337,8 @@ sctp_disposition_t sctp_sf_do_asconf_ack(const struct sctp_endpoint *ep, | |||
3336 | sctp_add_cmd_sf(commands, SCTP_CMD_TIMER_STOP, | 3337 | sctp_add_cmd_sf(commands, SCTP_CMD_TIMER_STOP, |
3337 | SCTP_TO(SCTP_EVENT_TIMEOUT_T4_RTO)); | 3338 | SCTP_TO(SCTP_EVENT_TIMEOUT_T4_RTO)); |
3338 | sctp_add_cmd_sf(commands, SCTP_CMD_DISCARD_PACKET,SCTP_NULL()); | 3339 | sctp_add_cmd_sf(commands, SCTP_CMD_DISCARD_PACKET,SCTP_NULL()); |
3340 | sctp_add_cmd_sf(commands, SCTP_CMD_SET_SK_ERR, | ||
3341 | SCTP_ERROR(ECONNABORTED)); | ||
3339 | sctp_add_cmd_sf(commands, SCTP_CMD_ASSOC_FAILED, | 3342 | sctp_add_cmd_sf(commands, SCTP_CMD_ASSOC_FAILED, |
3340 | SCTP_U32(SCTP_ERROR_ASCONF_ACK)); | 3343 | SCTP_U32(SCTP_ERROR_ASCONF_ACK)); |
3341 | SCTP_INC_STATS(SCTP_MIB_ABORTEDS); | 3344 | SCTP_INC_STATS(SCTP_MIB_ABORTEDS); |
@@ -3362,6 +3365,8 @@ sctp_disposition_t sctp_sf_do_asconf_ack(const struct sctp_endpoint *ep, | |||
3362 | * processing the rest of the chunks in the packet. | 3365 | * processing the rest of the chunks in the packet. |
3363 | */ | 3366 | */ |
3364 | sctp_add_cmd_sf(commands, SCTP_CMD_DISCARD_PACKET,SCTP_NULL()); | 3367 | sctp_add_cmd_sf(commands, SCTP_CMD_DISCARD_PACKET,SCTP_NULL()); |
3368 | sctp_add_cmd_sf(commands, SCTP_CMD_SET_SK_ERR, | ||
3369 | SCTP_ERROR(ECONNABORTED)); | ||
3365 | sctp_add_cmd_sf(commands, SCTP_CMD_ASSOC_FAILED, | 3370 | sctp_add_cmd_sf(commands, SCTP_CMD_ASSOC_FAILED, |
3366 | SCTP_U32(SCTP_ERROR_ASCONF_ACK)); | 3371 | SCTP_U32(SCTP_ERROR_ASCONF_ACK)); |
3367 | SCTP_INC_STATS(SCTP_MIB_ABORTEDS); | 3372 | SCTP_INC_STATS(SCTP_MIB_ABORTEDS); |
@@ -3714,9 +3719,13 @@ static sctp_disposition_t sctp_sf_violation_chunklen( | |||
3714 | if (asoc->state <= SCTP_STATE_COOKIE_ECHOED) { | 3719 | if (asoc->state <= SCTP_STATE_COOKIE_ECHOED) { |
3715 | sctp_add_cmd_sf(commands, SCTP_CMD_TIMER_STOP, | 3720 | sctp_add_cmd_sf(commands, SCTP_CMD_TIMER_STOP, |
3716 | SCTP_TO(SCTP_EVENT_TIMEOUT_T1_INIT)); | 3721 | SCTP_TO(SCTP_EVENT_TIMEOUT_T1_INIT)); |
3722 | sctp_add_cmd_sf(commands, SCTP_CMD_SET_SK_ERR, | ||
3723 | SCTP_ERROR(ECONNREFUSED)); | ||
3717 | sctp_add_cmd_sf(commands, SCTP_CMD_INIT_FAILED, | 3724 | sctp_add_cmd_sf(commands, SCTP_CMD_INIT_FAILED, |
3718 | SCTP_U32(SCTP_ERROR_PROTO_VIOLATION)); | 3725 | SCTP_U32(SCTP_ERROR_PROTO_VIOLATION)); |
3719 | } else { | 3726 | } else { |
3727 | sctp_add_cmd_sf(commands, SCTP_CMD_SET_SK_ERR, | ||
3728 | SCTP_ERROR(ECONNABORTED)); | ||
3720 | sctp_add_cmd_sf(commands, SCTP_CMD_ASSOC_FAILED, | 3729 | sctp_add_cmd_sf(commands, SCTP_CMD_ASSOC_FAILED, |
3721 | SCTP_U32(SCTP_ERROR_PROTO_VIOLATION)); | 3730 | SCTP_U32(SCTP_ERROR_PROTO_VIOLATION)); |
3722 | SCTP_DEC_STATS(SCTP_MIB_CURRESTAB); | 3731 | SCTP_DEC_STATS(SCTP_MIB_CURRESTAB); |
@@ -4034,6 +4043,8 @@ sctp_disposition_t sctp_sf_do_9_1_prm_abort( | |||
4034 | * TCB. This is a departure from our typical NOMEM handling. | 4043 | * TCB. This is a departure from our typical NOMEM handling. |
4035 | */ | 4044 | */ |
4036 | 4045 | ||
4046 | sctp_add_cmd_sf(commands, SCTP_CMD_SET_SK_ERR, | ||
4047 | SCTP_ERROR(ECONNABORTED)); | ||
4037 | /* Delete the established association. */ | 4048 | /* Delete the established association. */ |
4038 | sctp_add_cmd_sf(commands, SCTP_CMD_ASSOC_FAILED, | 4049 | sctp_add_cmd_sf(commands, SCTP_CMD_ASSOC_FAILED, |
4039 | SCTP_U32(SCTP_ERROR_USER_ABORT)); | 4050 | SCTP_U32(SCTP_ERROR_USER_ABORT)); |
@@ -4175,6 +4186,8 @@ sctp_disposition_t sctp_sf_cookie_wait_prm_abort( | |||
4175 | * TCB. This is a departure from our typical NOMEM handling. | 4186 | * TCB. This is a departure from our typical NOMEM handling. |
4176 | */ | 4187 | */ |
4177 | 4188 | ||
4189 | sctp_add_cmd_sf(commands, SCTP_CMD_SET_SK_ERR, | ||
4190 | SCTP_ERROR(ECONNREFUSED)); | ||
4178 | /* Delete the established association. */ | 4191 | /* Delete the established association. */ |
4179 | sctp_add_cmd_sf(commands, SCTP_CMD_INIT_FAILED, | 4192 | sctp_add_cmd_sf(commands, SCTP_CMD_INIT_FAILED, |
4180 | SCTP_U32(SCTP_ERROR_USER_ABORT)); | 4193 | SCTP_U32(SCTP_ERROR_USER_ABORT)); |
@@ -4543,6 +4556,8 @@ sctp_disposition_t sctp_sf_do_6_3_3_rtx(const struct sctp_endpoint *ep, | |||
4543 | struct sctp_transport *transport = arg; | 4556 | struct sctp_transport *transport = arg; |
4544 | 4557 | ||
4545 | if (asoc->overall_error_count >= asoc->max_retrans) { | 4558 | if (asoc->overall_error_count >= asoc->max_retrans) { |
4559 | sctp_add_cmd_sf(commands, SCTP_CMD_SET_SK_ERR, | ||
4560 | SCTP_ERROR(ETIMEDOUT)); | ||
4546 | /* CMD_ASSOC_FAILED calls CMD_DELETE_TCB. */ | 4561 | /* CMD_ASSOC_FAILED calls CMD_DELETE_TCB. */ |
4547 | sctp_add_cmd_sf(commands, SCTP_CMD_ASSOC_FAILED, | 4562 | sctp_add_cmd_sf(commands, SCTP_CMD_ASSOC_FAILED, |
4548 | SCTP_U32(SCTP_ERROR_NO_ERROR)); | 4563 | SCTP_U32(SCTP_ERROR_NO_ERROR)); |
@@ -4662,6 +4677,8 @@ sctp_disposition_t sctp_sf_t1_init_timer_expire(const struct sctp_endpoint *ep, | |||
4662 | SCTP_DEBUG_PRINTK("Giving up on INIT, attempts: %d" | 4677 | SCTP_DEBUG_PRINTK("Giving up on INIT, attempts: %d" |
4663 | " max_init_attempts: %d\n", | 4678 | " max_init_attempts: %d\n", |
4664 | attempts, asoc->max_init_attempts); | 4679 | attempts, asoc->max_init_attempts); |
4680 | sctp_add_cmd_sf(commands, SCTP_CMD_SET_SK_ERR, | ||
4681 | SCTP_ERROR(ETIMEDOUT)); | ||
4665 | sctp_add_cmd_sf(commands, SCTP_CMD_INIT_FAILED, | 4682 | sctp_add_cmd_sf(commands, SCTP_CMD_INIT_FAILED, |
4666 | SCTP_U32(SCTP_ERROR_NO_ERROR)); | 4683 | SCTP_U32(SCTP_ERROR_NO_ERROR)); |
4667 | return SCTP_DISPOSITION_DELETE_TCB; | 4684 | return SCTP_DISPOSITION_DELETE_TCB; |
@@ -4711,6 +4728,8 @@ sctp_disposition_t sctp_sf_t1_cookie_timer_expire(const struct sctp_endpoint *ep | |||
4711 | 4728 | ||
4712 | sctp_add_cmd_sf(commands, SCTP_CMD_REPLY, SCTP_CHUNK(repl)); | 4729 | sctp_add_cmd_sf(commands, SCTP_CMD_REPLY, SCTP_CHUNK(repl)); |
4713 | } else { | 4730 | } else { |
4731 | sctp_add_cmd_sf(commands, SCTP_CMD_SET_SK_ERR, | ||
4732 | SCTP_ERROR(ETIMEDOUT)); | ||
4714 | sctp_add_cmd_sf(commands, SCTP_CMD_INIT_FAILED, | 4733 | sctp_add_cmd_sf(commands, SCTP_CMD_INIT_FAILED, |
4715 | SCTP_U32(SCTP_ERROR_NO_ERROR)); | 4734 | SCTP_U32(SCTP_ERROR_NO_ERROR)); |
4716 | return SCTP_DISPOSITION_DELETE_TCB; | 4735 | return SCTP_DISPOSITION_DELETE_TCB; |
@@ -4742,6 +4761,8 @@ sctp_disposition_t sctp_sf_t2_timer_expire(const struct sctp_endpoint *ep, | |||
4742 | 4761 | ||
4743 | SCTP_DEBUG_PRINTK("Timer T2 expired.\n"); | 4762 | SCTP_DEBUG_PRINTK("Timer T2 expired.\n"); |
4744 | if (asoc->overall_error_count >= asoc->max_retrans) { | 4763 | if (asoc->overall_error_count >= asoc->max_retrans) { |
4764 | sctp_add_cmd_sf(commands, SCTP_CMD_SET_SK_ERR, | ||
4765 | SCTP_ERROR(ETIMEDOUT)); | ||
4745 | /* Note: CMD_ASSOC_FAILED calls CMD_DELETE_TCB. */ | 4766 | /* Note: CMD_ASSOC_FAILED calls CMD_DELETE_TCB. */ |
4746 | sctp_add_cmd_sf(commands, SCTP_CMD_ASSOC_FAILED, | 4767 | sctp_add_cmd_sf(commands, SCTP_CMD_ASSOC_FAILED, |
4747 | SCTP_U32(SCTP_ERROR_NO_ERROR)); | 4768 | SCTP_U32(SCTP_ERROR_NO_ERROR)); |
@@ -4817,6 +4838,8 @@ sctp_disposition_t sctp_sf_t4_timer_expire( | |||
4817 | if (asoc->overall_error_count >= asoc->max_retrans) { | 4838 | if (asoc->overall_error_count >= asoc->max_retrans) { |
4818 | sctp_add_cmd_sf(commands, SCTP_CMD_TIMER_STOP, | 4839 | sctp_add_cmd_sf(commands, SCTP_CMD_TIMER_STOP, |
4819 | SCTP_TO(SCTP_EVENT_TIMEOUT_T4_RTO)); | 4840 | SCTP_TO(SCTP_EVENT_TIMEOUT_T4_RTO)); |
4841 | sctp_add_cmd_sf(commands, SCTP_CMD_SET_SK_ERR, | ||
4842 | SCTP_ERROR(ETIMEDOUT)); | ||
4820 | sctp_add_cmd_sf(commands, SCTP_CMD_ASSOC_FAILED, | 4843 | sctp_add_cmd_sf(commands, SCTP_CMD_ASSOC_FAILED, |
4821 | SCTP_U32(SCTP_ERROR_NO_ERROR)); | 4844 | SCTP_U32(SCTP_ERROR_NO_ERROR)); |
4822 | SCTP_INC_STATS(SCTP_MIB_ABORTEDS); | 4845 | SCTP_INC_STATS(SCTP_MIB_ABORTEDS); |
@@ -4870,6 +4893,8 @@ sctp_disposition_t sctp_sf_t5_timer_expire(const struct sctp_endpoint *ep, | |||
4870 | goto nomem; | 4893 | goto nomem; |
4871 | 4894 | ||
4872 | sctp_add_cmd_sf(commands, SCTP_CMD_REPLY, SCTP_CHUNK(reply)); | 4895 | sctp_add_cmd_sf(commands, SCTP_CMD_REPLY, SCTP_CHUNK(reply)); |
4896 | sctp_add_cmd_sf(commands, SCTP_CMD_SET_SK_ERR, | ||
4897 | SCTP_ERROR(ETIMEDOUT)); | ||
4873 | sctp_add_cmd_sf(commands, SCTP_CMD_ASSOC_FAILED, | 4898 | sctp_add_cmd_sf(commands, SCTP_CMD_ASSOC_FAILED, |
4874 | SCTP_U32(SCTP_ERROR_NO_ERROR)); | 4899 | SCTP_U32(SCTP_ERROR_NO_ERROR)); |
4875 | 4900 | ||
@@ -5309,6 +5334,8 @@ static int sctp_eat_data(const struct sctp_association *asoc, | |||
5309 | * processing the rest of the chunks in the packet. | 5334 | * processing the rest of the chunks in the packet. |
5310 | */ | 5335 | */ |
5311 | sctp_add_cmd_sf(commands, SCTP_CMD_DISCARD_PACKET,SCTP_NULL()); | 5336 | sctp_add_cmd_sf(commands, SCTP_CMD_DISCARD_PACKET,SCTP_NULL()); |
5337 | sctp_add_cmd_sf(commands, SCTP_CMD_SET_SK_ERR, | ||
5338 | SCTP_ERROR(ECONNABORTED)); | ||
5312 | sctp_add_cmd_sf(commands, SCTP_CMD_ASSOC_FAILED, | 5339 | sctp_add_cmd_sf(commands, SCTP_CMD_ASSOC_FAILED, |
5313 | SCTP_U32(SCTP_ERROR_NO_DATA)); | 5340 | SCTP_U32(SCTP_ERROR_NO_DATA)); |
5314 | SCTP_INC_STATS(SCTP_MIB_ABORTEDS); | 5341 | SCTP_INC_STATS(SCTP_MIB_ABORTEDS); |
diff --git a/net/sctp/socket.c b/net/sctp/socket.c index b6e4b89539b3..174d4d35e951 100644 --- a/net/sctp/socket.c +++ b/net/sctp/socket.c | |||
@@ -1057,6 +1057,7 @@ static int __sctp_connect(struct sock* sk, | |||
1057 | inet_sk(sk)->dport = htons(asoc->peer.port); | 1057 | inet_sk(sk)->dport = htons(asoc->peer.port); |
1058 | af = sctp_get_af_specific(to.sa.sa_family); | 1058 | af = sctp_get_af_specific(to.sa.sa_family); |
1059 | af->to_sk_daddr(&to, sk); | 1059 | af->to_sk_daddr(&to, sk); |
1060 | sk->sk_err = 0; | ||
1060 | 1061 | ||
1061 | timeo = sock_sndtimeo(sk, sk->sk_socket->file->f_flags & O_NONBLOCK); | 1062 | timeo = sock_sndtimeo(sk, sk->sk_socket->file->f_flags & O_NONBLOCK); |
1062 | err = sctp_wait_for_connect(asoc, &timeo); | 1063 | err = sctp_wait_for_connect(asoc, &timeo); |
@@ -1228,7 +1229,7 @@ SCTP_STATIC void sctp_close(struct sock *sk, long timeout) | |||
1228 | 1229 | ||
1229 | ep = sctp_sk(sk)->ep; | 1230 | ep = sctp_sk(sk)->ep; |
1230 | 1231 | ||
1231 | /* Walk all associations on a socket, not on an endpoint. */ | 1232 | /* Walk all associations on an endpoint. */ |
1232 | list_for_each_safe(pos, temp, &ep->asocs) { | 1233 | list_for_each_safe(pos, temp, &ep->asocs) { |
1233 | asoc = list_entry(pos, struct sctp_association, asocs); | 1234 | asoc = list_entry(pos, struct sctp_association, asocs); |
1234 | 1235 | ||
@@ -1241,13 +1242,13 @@ SCTP_STATIC void sctp_close(struct sock *sk, long timeout) | |||
1241 | if (sctp_state(asoc, CLOSED)) { | 1242 | if (sctp_state(asoc, CLOSED)) { |
1242 | sctp_unhash_established(asoc); | 1243 | sctp_unhash_established(asoc); |
1243 | sctp_association_free(asoc); | 1244 | sctp_association_free(asoc); |
1245 | continue; | ||
1246 | } | ||
1247 | } | ||
1244 | 1248 | ||
1245 | } else if (sock_flag(sk, SOCK_LINGER) && | 1249 | if (sock_flag(sk, SOCK_LINGER) && !sk->sk_lingertime) |
1246 | !sk->sk_lingertime) | 1250 | sctp_primitive_ABORT(asoc, NULL); |
1247 | sctp_primitive_ABORT(asoc, NULL); | 1251 | else |
1248 | else | ||
1249 | sctp_primitive_SHUTDOWN(asoc, NULL); | ||
1250 | } else | ||
1251 | sctp_primitive_SHUTDOWN(asoc, NULL); | 1252 | sctp_primitive_SHUTDOWN(asoc, NULL); |
1252 | } | 1253 | } |
1253 | 1254 | ||
@@ -5317,6 +5318,7 @@ static int sctp_wait_for_sndbuf(struct sctp_association *asoc, long *timeo_p, | |||
5317 | */ | 5318 | */ |
5318 | sctp_release_sock(sk); | 5319 | sctp_release_sock(sk); |
5319 | current_timeo = schedule_timeout(current_timeo); | 5320 | current_timeo = schedule_timeout(current_timeo); |
5321 | BUG_ON(sk != asoc->base.sk); | ||
5320 | sctp_lock_sock(sk); | 5322 | sctp_lock_sock(sk); |
5321 | 5323 | ||
5322 | *timeo_p = current_timeo; | 5324 | *timeo_p = current_timeo; |
@@ -5604,12 +5606,14 @@ static void sctp_sock_migrate(struct sock *oldsk, struct sock *newsk, | |||
5604 | */ | 5606 | */ |
5605 | newsp->type = type; | 5607 | newsp->type = type; |
5606 | 5608 | ||
5607 | spin_lock_bh(&oldsk->sk_lock.slock); | 5609 | /* Mark the new socket "in-use" by the user so that any packets |
5608 | /* Migrate the backlog from oldsk to newsk. */ | 5610 | * that may arrive on the association after we've moved it are |
5609 | sctp_backlog_migrate(assoc, oldsk, newsk); | 5611 | * queued to the backlog. This prevents a potential race between |
5610 | /* Migrate the association to the new socket. */ | 5612 | * backlog processing on the old socket and new-packet processing |
5613 | * on the new socket. | ||
5614 | */ | ||
5615 | sctp_lock_sock(newsk); | ||
5611 | sctp_assoc_migrate(assoc, newsk); | 5616 | sctp_assoc_migrate(assoc, newsk); |
5612 | spin_unlock_bh(&oldsk->sk_lock.slock); | ||
5613 | 5617 | ||
5614 | /* If the association on the newsk is already closed before accept() | 5618 | /* If the association on the newsk is already closed before accept() |
5615 | * is called, set RCV_SHUTDOWN flag. | 5619 | * is called, set RCV_SHUTDOWN flag. |
@@ -5618,6 +5622,7 @@ static void sctp_sock_migrate(struct sock *oldsk, struct sock *newsk, | |||
5618 | newsk->sk_shutdown |= RCV_SHUTDOWN; | 5622 | newsk->sk_shutdown |= RCV_SHUTDOWN; |
5619 | 5623 | ||
5620 | newsk->sk_state = SCTP_SS_ESTABLISHED; | 5624 | newsk->sk_state = SCTP_SS_ESTABLISHED; |
5625 | sctp_release_sock(newsk); | ||
5621 | } | 5626 | } |
5622 | 5627 | ||
5623 | /* This proto struct describes the ULP interface for SCTP. */ | 5628 | /* This proto struct describes the ULP interface for SCTP. */ |
diff --git a/scripts/mod/modpost.c b/scripts/mod/modpost.c index 6d04504b2fc1..d0f86ed43f7a 100644 --- a/scripts/mod/modpost.c +++ b/scripts/mod/modpost.c | |||
@@ -697,29 +697,79 @@ static void check_sec_ref(struct module *mod, const char *modname, | |||
697 | 697 | ||
698 | /* Walk through all sections */ | 698 | /* Walk through all sections */ |
699 | for (i = 0; i < hdr->e_shnum; i++) { | 699 | for (i = 0; i < hdr->e_shnum; i++) { |
700 | Elf_Rela *rela; | 700 | const char *name = secstrings + sechdrs[i].sh_name; |
701 | Elf_Rela *start = (void *)hdr + sechdrs[i].sh_offset; | 701 | const char *secname; |
702 | Elf_Rela *stop = (void*)start + sechdrs[i].sh_size; | 702 | Elf_Rela r; |
703 | const char *name = secstrings + sechdrs[i].sh_name + | 703 | unsigned int r_sym; |
704 | strlen(".rela"); | ||
705 | /* We want to process only relocation sections and not .init */ | 704 | /* We want to process only relocation sections and not .init */ |
706 | if (section_ref_ok(name) || (sechdrs[i].sh_type != SHT_RELA)) | 705 | if (sechdrs[i].sh_type == SHT_RELA) { |
707 | continue; | 706 | Elf_Rela *rela; |
707 | Elf_Rela *start = (void *)hdr + sechdrs[i].sh_offset; | ||
708 | Elf_Rela *stop = (void*)start + sechdrs[i].sh_size; | ||
709 | name += strlen(".rela"); | ||
710 | if (section_ref_ok(name)) | ||
711 | continue; | ||
708 | 712 | ||
709 | for (rela = start; rela < stop; rela++) { | 713 | for (rela = start; rela < stop; rela++) { |
710 | Elf_Rela r; | 714 | r.r_offset = TO_NATIVE(rela->r_offset); |
711 | const char *secname; | 715 | #if KERNEL_ELFCLASS == ELFCLASS64 |
712 | r.r_offset = TO_NATIVE(rela->r_offset); | 716 | if (hdr->e_machine == EM_MIPS) { |
713 | r.r_info = TO_NATIVE(rela->r_info); | 717 | r_sym = ELF64_MIPS_R_SYM(rela->r_info); |
714 | r.r_addend = TO_NATIVE(rela->r_addend); | 718 | r_sym = TO_NATIVE(r_sym); |
715 | sym = elf->symtab_start + ELF_R_SYM(r.r_info); | 719 | } else { |
716 | /* Skip special sections */ | 720 | r.r_info = TO_NATIVE(rela->r_info); |
717 | if (sym->st_shndx >= SHN_LORESERVE) | 721 | r_sym = ELF_R_SYM(r.r_info); |
722 | } | ||
723 | #else | ||
724 | r.r_info = TO_NATIVE(rela->r_info); | ||
725 | r_sym = ELF_R_SYM(r.r_info); | ||
726 | #endif | ||
727 | r.r_addend = TO_NATIVE(rela->r_addend); | ||
728 | sym = elf->symtab_start + r_sym; | ||
729 | /* Skip special sections */ | ||
730 | if (sym->st_shndx >= SHN_LORESERVE) | ||
731 | continue; | ||
732 | |||
733 | secname = secstrings + | ||
734 | sechdrs[sym->st_shndx].sh_name; | ||
735 | if (section(secname)) | ||
736 | warn_sec_mismatch(modname, name, | ||
737 | elf, sym, r); | ||
738 | } | ||
739 | } else if (sechdrs[i].sh_type == SHT_REL) { | ||
740 | Elf_Rel *rel; | ||
741 | Elf_Rel *start = (void *)hdr + sechdrs[i].sh_offset; | ||
742 | Elf_Rel *stop = (void*)start + sechdrs[i].sh_size; | ||
743 | name += strlen(".rel"); | ||
744 | if (section_ref_ok(name)) | ||
718 | continue; | 745 | continue; |
719 | 746 | ||
720 | secname = secstrings + sechdrs[sym->st_shndx].sh_name; | 747 | for (rel = start; rel < stop; rel++) { |
721 | if (section(secname)) | 748 | r.r_offset = TO_NATIVE(rel->r_offset); |
722 | warn_sec_mismatch(modname, name, elf, sym, r); | 749 | #if KERNEL_ELFCLASS == ELFCLASS64 |
750 | if (hdr->e_machine == EM_MIPS) { | ||
751 | r_sym = ELF64_MIPS_R_SYM(rel->r_info); | ||
752 | r_sym = TO_NATIVE(r_sym); | ||
753 | } else { | ||
754 | r.r_info = TO_NATIVE(rel->r_info); | ||
755 | r_sym = ELF_R_SYM(r.r_info); | ||
756 | } | ||
757 | #else | ||
758 | r.r_info = TO_NATIVE(rel->r_info); | ||
759 | r_sym = ELF_R_SYM(r.r_info); | ||
760 | #endif | ||
761 | r.r_addend = 0; | ||
762 | sym = elf->symtab_start + r_sym; | ||
763 | /* Skip special sections */ | ||
764 | if (sym->st_shndx >= SHN_LORESERVE) | ||
765 | continue; | ||
766 | |||
767 | secname = secstrings + | ||
768 | sechdrs[sym->st_shndx].sh_name; | ||
769 | if (section(secname)) | ||
770 | warn_sec_mismatch(modname, name, | ||
771 | elf, sym, r); | ||
772 | } | ||
723 | } | 773 | } |
724 | } | 774 | } |
725 | } | 775 | } |
diff --git a/scripts/mod/modpost.h b/scripts/mod/modpost.h index b14255c72a37..861d866fcd83 100644 --- a/scripts/mod/modpost.h +++ b/scripts/mod/modpost.h | |||
@@ -21,6 +21,7 @@ | |||
21 | #define ELF_ST_BIND ELF32_ST_BIND | 21 | #define ELF_ST_BIND ELF32_ST_BIND |
22 | #define ELF_ST_TYPE ELF32_ST_TYPE | 22 | #define ELF_ST_TYPE ELF32_ST_TYPE |
23 | 23 | ||
24 | #define Elf_Rel Elf32_Rel | ||
24 | #define Elf_Rela Elf32_Rela | 25 | #define Elf_Rela Elf32_Rela |
25 | #define ELF_R_SYM ELF32_R_SYM | 26 | #define ELF_R_SYM ELF32_R_SYM |
26 | #define ELF_R_TYPE ELF32_R_TYPE | 27 | #define ELF_R_TYPE ELF32_R_TYPE |
@@ -34,11 +35,31 @@ | |||
34 | #define ELF_ST_BIND ELF64_ST_BIND | 35 | #define ELF_ST_BIND ELF64_ST_BIND |
35 | #define ELF_ST_TYPE ELF64_ST_TYPE | 36 | #define ELF_ST_TYPE ELF64_ST_TYPE |
36 | 37 | ||
38 | #define Elf_Rel Elf64_Rel | ||
37 | #define Elf_Rela Elf64_Rela | 39 | #define Elf_Rela Elf64_Rela |
38 | #define ELF_R_SYM ELF64_R_SYM | 40 | #define ELF_R_SYM ELF64_R_SYM |
39 | #define ELF_R_TYPE ELF64_R_TYPE | 41 | #define ELF_R_TYPE ELF64_R_TYPE |
40 | #endif | 42 | #endif |
41 | 43 | ||
44 | /* The 64-bit MIPS ELF ABI uses an unusual reloc format. */ | ||
45 | typedef struct | ||
46 | { | ||
47 | Elf32_Word r_sym; /* Symbol index */ | ||
48 | unsigned char r_ssym; /* Special symbol for 2nd relocation */ | ||
49 | unsigned char r_type3; /* 3rd relocation type */ | ||
50 | unsigned char r_type2; /* 2nd relocation type */ | ||
51 | unsigned char r_type1; /* 1st relocation type */ | ||
52 | } _Elf64_Mips_R_Info; | ||
53 | |||
54 | typedef union | ||
55 | { | ||
56 | Elf64_Xword r_info_number; | ||
57 | _Elf64_Mips_R_Info r_info_fields; | ||
58 | } _Elf64_Mips_R_Info_union; | ||
59 | |||
60 | #define ELF64_MIPS_R_SYM(i) \ | ||
61 | ((__extension__ (_Elf64_Mips_R_Info_union)(i)).r_info_fields.r_sym) | ||
62 | |||
42 | #if KERNEL_ELFDATA != HOST_ELFDATA | 63 | #if KERNEL_ELFDATA != HOST_ELFDATA |
43 | 64 | ||
44 | static inline void __endian(const void *src, void *dest, unsigned int size) | 65 | static inline void __endian(const void *src, void *dest, unsigned int size) |
@@ -48,8 +69,6 @@ static inline void __endian(const void *src, void *dest, unsigned int size) | |||
48 | ((unsigned char*)dest)[i] = ((unsigned char*)src)[size - i-1]; | 69 | ((unsigned char*)dest)[i] = ((unsigned char*)src)[size - i-1]; |
49 | } | 70 | } |
50 | 71 | ||
51 | |||
52 | |||
53 | #define TO_NATIVE(x) \ | 72 | #define TO_NATIVE(x) \ |
54 | ({ \ | 73 | ({ \ |
55 | typeof(x) __x; \ | 74 | typeof(x) __x; \ |
diff --git a/security/selinux/hooks.c b/security/selinux/hooks.c index d987048d3f33..21dad415b896 100644 --- a/security/selinux/hooks.c +++ b/security/selinux/hooks.c | |||
@@ -3231,7 +3231,7 @@ static int selinux_socket_sock_rcv_skb(struct sock *sk, struct sk_buff *skb) | |||
3231 | goto out; | 3231 | goto out; |
3232 | 3232 | ||
3233 | /* Handle mapped IPv4 packets arriving via IPv6 sockets */ | 3233 | /* Handle mapped IPv4 packets arriving via IPv6 sockets */ |
3234 | if (family == PF_INET6 && skb->protocol == ntohs(ETH_P_IP)) | 3234 | if (family == PF_INET6 && skb->protocol == htons(ETH_P_IP)) |
3235 | family = PF_INET; | 3235 | family = PF_INET; |
3236 | 3236 | ||
3237 | read_lock_bh(&sk->sk_callback_lock); | 3237 | read_lock_bh(&sk->sk_callback_lock); |
diff --git a/security/selinux/ss/services.c b/security/selinux/ss/services.c index 7177e98df7f3..c284dbb8b8c0 100644 --- a/security/selinux/ss/services.c +++ b/security/selinux/ss/services.c | |||
@@ -594,6 +594,10 @@ int security_sid_to_context(u32 sid, char **scontext, u32 *scontext_len) | |||
594 | 594 | ||
595 | *scontext_len = strlen(initial_sid_to_string[sid]) + 1; | 595 | *scontext_len = strlen(initial_sid_to_string[sid]) + 1; |
596 | scontextp = kmalloc(*scontext_len,GFP_ATOMIC); | 596 | scontextp = kmalloc(*scontext_len,GFP_ATOMIC); |
597 | if (!scontextp) { | ||
598 | rc = -ENOMEM; | ||
599 | goto out; | ||
600 | } | ||
597 | strcpy(scontextp, initial_sid_to_string[sid]); | 601 | strcpy(scontextp, initial_sid_to_string[sid]); |
598 | *scontext = scontextp; | 602 | *scontext = scontextp; |
599 | goto out; | 603 | goto out; |
diff --git a/sound/drivers/mpu401/mpu401.c b/sound/drivers/mpu401/mpu401.c index da7ef26995c3..77b06009735d 100644 --- a/sound/drivers/mpu401/mpu401.c +++ b/sound/drivers/mpu401/mpu401.c | |||
@@ -151,7 +151,7 @@ static struct pnp_device_id snd_mpu401_pnpids[] = { | |||
151 | 151 | ||
152 | MODULE_DEVICE_TABLE(pnp, snd_mpu401_pnpids); | 152 | MODULE_DEVICE_TABLE(pnp, snd_mpu401_pnpids); |
153 | 153 | ||
154 | static int __init snd_mpu401_pnp(int dev, struct pnp_dev *device, | 154 | static int __devinit snd_mpu401_pnp(int dev, struct pnp_dev *device, |
155 | const struct pnp_device_id *id) | 155 | const struct pnp_device_id *id) |
156 | { | 156 | { |
157 | if (!pnp_port_valid(device, 0) || | 157 | if (!pnp_port_valid(device, 0) || |
diff --git a/sound/isa/es18xx.c b/sound/isa/es18xx.c index a36ec1daa5cb..e6945db8ed1b 100644 --- a/sound/isa/es18xx.c +++ b/sound/isa/es18xx.c | |||
@@ -85,6 +85,8 @@ | |||
85 | #include <linux/pnp.h> | 85 | #include <linux/pnp.h> |
86 | #include <linux/isapnp.h> | 86 | #include <linux/isapnp.h> |
87 | #include <linux/moduleparam.h> | 87 | #include <linux/moduleparam.h> |
88 | #include <linux/delay.h> | ||
89 | |||
88 | #include <asm/io.h> | 90 | #include <asm/io.h> |
89 | #include <asm/dma.h> | 91 | #include <asm/dma.h> |
90 | #include <sound/core.h> | 92 | #include <sound/core.h> |
diff --git a/sound/oss/ad1848.c b/sound/oss/ad1848.c index 49796be955f3..e04fa49b0dc8 100644 --- a/sound/oss/ad1848.c +++ b/sound/oss/ad1848.c | |||
@@ -2026,7 +2026,8 @@ int ad1848_init (char *name, struct resource *ports, int irq, int dma_playback, | |||
2026 | if (irq > 0) | 2026 | if (irq > 0) |
2027 | { | 2027 | { |
2028 | devc->dev_no = my_dev; | 2028 | devc->dev_no = my_dev; |
2029 | if (request_irq(devc->irq, adintr, 0, devc->name, (void *)my_dev) < 0) | 2029 | if (request_irq(devc->irq, adintr, 0, devc->name, |
2030 | (void *)(long)my_dev) < 0) | ||
2030 | { | 2031 | { |
2031 | printk(KERN_WARNING "ad1848: Unable to allocate IRQ\n"); | 2032 | printk(KERN_WARNING "ad1848: Unable to allocate IRQ\n"); |
2032 | /* Don't free it either then.. */ | 2033 | /* Don't free it either then.. */ |
@@ -2175,7 +2176,7 @@ void ad1848_unload(int io_base, int irq, int dma_playback, int dma_capture, int | |||
2175 | if (!share_dma) | 2176 | if (!share_dma) |
2176 | { | 2177 | { |
2177 | if (devc->irq > 0) /* There is no point in freeing irq, if it wasn't allocated */ | 2178 | if (devc->irq > 0) /* There is no point in freeing irq, if it wasn't allocated */ |
2178 | free_irq(devc->irq, (void *)devc->dev_no); | 2179 | free_irq(devc->irq, (void *)(long)devc->dev_no); |
2179 | 2180 | ||
2180 | sound_free_dma(dma_playback); | 2181 | sound_free_dma(dma_playback); |
2181 | 2182 | ||
@@ -2204,7 +2205,7 @@ irqreturn_t adintr(int irq, void *dev_id, struct pt_regs *dummy) | |||
2204 | unsigned char c930_stat = 0; | 2205 | unsigned char c930_stat = 0; |
2205 | int cnt = 0; | 2206 | int cnt = 0; |
2206 | 2207 | ||
2207 | dev = (int)dev_id; | 2208 | dev = (long)dev_id; |
2208 | devc = (ad1848_info *) audio_devs[dev]->devc; | 2209 | devc = (ad1848_info *) audio_devs[dev]->devc; |
2209 | 2210 | ||
2210 | interrupt_again: /* Jump back here if int status doesn't reset */ | 2211 | interrupt_again: /* Jump back here if int status doesn't reset */ |
@@ -2900,7 +2901,8 @@ static struct pnp_dev *activate_dev(char *devname, char *resname, struct pnp_dev | |||
2900 | return(dev); | 2901 | return(dev); |
2901 | } | 2902 | } |
2902 | 2903 | ||
2903 | static struct pnp_dev *ad1848_init_generic(struct pnp_card *bus, struct address_info *hw_config, int slot) | 2904 | static struct pnp_dev __init *ad1848_init_generic(struct pnp_card *bus, |
2905 | struct address_info *hw_config, int slot) | ||
2904 | { | 2906 | { |
2905 | 2907 | ||
2906 | /* Configure Audio device */ | 2908 | /* Configure Audio device */ |
diff --git a/sound/oss/nm256_audio.c b/sound/oss/nm256_audio.c index 7de079b202f2..6e662ac009ae 100644 --- a/sound/oss/nm256_audio.c +++ b/sound/oss/nm256_audio.c | |||
@@ -960,7 +960,7 @@ static struct ac97_mixer_value_list mixer_defaults[] = { | |||
960 | 960 | ||
961 | 961 | ||
962 | /* Installs the AC97 mixer into CARD. */ | 962 | /* Installs the AC97 mixer into CARD. */ |
963 | static int __init | 963 | static int __devinit |
964 | nm256_install_mixer (struct nm256_info *card) | 964 | nm256_install_mixer (struct nm256_info *card) |
965 | { | 965 | { |
966 | int mixer; | 966 | int mixer; |
@@ -995,7 +995,7 @@ nm256_install_mixer (struct nm256_info *card) | |||
995 | * RAM. | 995 | * RAM. |
996 | */ | 996 | */ |
997 | 997 | ||
998 | static void __init | 998 | static void __devinit |
999 | nm256_peek_for_sig (struct nm256_info *card) | 999 | nm256_peek_for_sig (struct nm256_info *card) |
1000 | { | 1000 | { |
1001 | u32 port1offset | 1001 | u32 port1offset |
@@ -1056,7 +1056,7 @@ nm256_install(struct pci_dev *pcidev, enum nm256rev rev, char *verstr) | |||
1056 | card->playing = 0; | 1056 | card->playing = 0; |
1057 | card->recording = 0; | 1057 | card->recording = 0; |
1058 | card->rev = rev; | 1058 | card->rev = rev; |
1059 | spin_lock_init(&card->lock); | 1059 | spin_lock_init(&card->lock); |
1060 | 1060 | ||
1061 | /* Init the memory port info. */ | 1061 | /* Init the memory port info. */ |
1062 | for (x = 0; x < 2; x++) { | 1062 | for (x = 0; x < 2; x++) { |