diff options
551 files changed, 11027 insertions, 7648 deletions
diff --git a/Documentation/DMA-API.txt b/Documentation/DMA-API.txt index b939ebb62871..80d150458c80 100644 --- a/Documentation/DMA-API.txt +++ b/Documentation/DMA-API.txt | |||
@@ -145,7 +145,7 @@ Part Ic - DMA addressing limitations | |||
145 | int | 145 | int |
146 | dma_supported(struct device *dev, u64 mask) | 146 | dma_supported(struct device *dev, u64 mask) |
147 | int | 147 | int |
148 | pci_dma_supported(struct device *dev, u64 mask) | 148 | pci_dma_supported(struct pci_dev *hwdev, u64 mask) |
149 | 149 | ||
150 | Checks to see if the device can support DMA to the memory described by | 150 | Checks to see if the device can support DMA to the memory described by |
151 | mask. | 151 | mask. |
@@ -189,7 +189,7 @@ dma_addr_t | |||
189 | dma_map_single(struct device *dev, void *cpu_addr, size_t size, | 189 | dma_map_single(struct device *dev, void *cpu_addr, size_t size, |
190 | enum dma_data_direction direction) | 190 | enum dma_data_direction direction) |
191 | dma_addr_t | 191 | dma_addr_t |
192 | pci_map_single(struct device *dev, void *cpu_addr, size_t size, | 192 | pci_map_single(struct pci_dev *hwdev, void *cpu_addr, size_t size, |
193 | int direction) | 193 | int direction) |
194 | 194 | ||
195 | Maps a piece of processor virtual memory so it can be accessed by the | 195 | Maps a piece of processor virtual memory so it can be accessed by the |
@@ -395,6 +395,71 @@ Notes: You must do this: | |||
395 | 395 | ||
396 | See also dma_map_single(). | 396 | See also dma_map_single(). |
397 | 397 | ||
398 | dma_addr_t | ||
399 | dma_map_single_attrs(struct device *dev, void *cpu_addr, size_t size, | ||
400 | enum dma_data_direction dir, | ||
401 | struct dma_attrs *attrs) | ||
402 | |||
403 | void | ||
404 | dma_unmap_single_attrs(struct device *dev, dma_addr_t dma_addr, | ||
405 | size_t size, enum dma_data_direction dir, | ||
406 | struct dma_attrs *attrs) | ||
407 | |||
408 | int | ||
409 | dma_map_sg_attrs(struct device *dev, struct scatterlist *sgl, | ||
410 | int nents, enum dma_data_direction dir, | ||
411 | struct dma_attrs *attrs) | ||
412 | |||
413 | void | ||
414 | dma_unmap_sg_attrs(struct device *dev, struct scatterlist *sgl, | ||
415 | int nents, enum dma_data_direction dir, | ||
416 | struct dma_attrs *attrs) | ||
417 | |||
418 | The four functions above are just like the counterpart functions | ||
419 | without the _attrs suffixes, except that they pass an optional | ||
420 | struct dma_attrs*. | ||
421 | |||
422 | struct dma_attrs encapsulates a set of "dma attributes". For the | ||
423 | definition of struct dma_attrs see linux/dma-attrs.h. | ||
424 | |||
425 | The interpretation of dma attributes is architecture-specific, and | ||
426 | each attribute should be documented in Documentation/DMA-attributes.txt. | ||
427 | |||
428 | If struct dma_attrs* is NULL, the semantics of each of these | ||
429 | functions is identical to those of the corresponding function | ||
430 | without the _attrs suffix. As a result dma_map_single_attrs() | ||
431 | can generally replace dma_map_single(), etc. | ||
432 | |||
433 | As an example of the use of the *_attrs functions, here's how | ||
434 | you could pass an attribute DMA_ATTR_FOO when mapping memory | ||
435 | for DMA: | ||
436 | |||
437 | #include <linux/dma-attrs.h> | ||
438 | /* DMA_ATTR_FOO should be defined in linux/dma-attrs.h and | ||
439 | * documented in Documentation/DMA-attributes.txt */ | ||
440 | ... | ||
441 | |||
442 | DEFINE_DMA_ATTRS(attrs); | ||
443 | dma_set_attr(DMA_ATTR_FOO, &attrs); | ||
444 | .... | ||
445 | n = dma_map_sg_attrs(dev, sg, nents, DMA_TO_DEVICE, &attr); | ||
446 | .... | ||
447 | |||
448 | Architectures that care about DMA_ATTR_FOO would check for its | ||
449 | presence in their implementations of the mapping and unmapping | ||
450 | routines, e.g.: | ||
451 | |||
452 | void whizco_dma_map_sg_attrs(struct device *dev, dma_addr_t dma_addr, | ||
453 | size_t size, enum dma_data_direction dir, | ||
454 | struct dma_attrs *attrs) | ||
455 | { | ||
456 | .... | ||
457 | int foo = dma_get_attr(DMA_ATTR_FOO, attrs); | ||
458 | .... | ||
459 | if (foo) | ||
460 | /* twizzle the frobnozzle */ | ||
461 | .... | ||
462 | |||
398 | 463 | ||
399 | Part II - Advanced dma_ usage | 464 | Part II - Advanced dma_ usage |
400 | ----------------------------- | 465 | ----------------------------- |
diff --git a/Documentation/DMA-attributes.txt b/Documentation/DMA-attributes.txt new file mode 100644 index 000000000000..6d772f84b477 --- /dev/null +++ b/Documentation/DMA-attributes.txt | |||
@@ -0,0 +1,24 @@ | |||
1 | DMA attributes | ||
2 | ============== | ||
3 | |||
4 | This document describes the semantics of the DMA attributes that are | ||
5 | defined in linux/dma-attrs.h. | ||
6 | |||
7 | DMA_ATTR_WRITE_BARRIER | ||
8 | ---------------------- | ||
9 | |||
10 | DMA_ATTR_WRITE_BARRIER is a (write) barrier attribute for DMA. DMA | ||
11 | to a memory region with the DMA_ATTR_WRITE_BARRIER attribute forces | ||
12 | all pending DMA writes to complete, and thus provides a mechanism to | ||
13 | strictly order DMA from a device across all intervening busses and | ||
14 | bridges. This barrier is not specific to a particular type of | ||
15 | interconnect, it applies to the system as a whole, and so its | ||
16 | implementation must account for the idiosyncracies of the system all | ||
17 | the way from the DMA device to memory. | ||
18 | |||
19 | As an example of a situation where DMA_ATTR_WRITE_BARRIER would be | ||
20 | useful, suppose that a device does a DMA write to indicate that data is | ||
21 | ready and available in memory. The DMA of the "completion indication" | ||
22 | could race with data DMA. Mapping the memory used for completion | ||
23 | indications with DMA_ATTR_WRITE_BARRIER would prevent the race. | ||
24 | |||
diff --git a/Documentation/cgroups.txt b/Documentation/cgroups.txt index 31d12e21ff8a..c298a6690e0d 100644 --- a/Documentation/cgroups.txt +++ b/Documentation/cgroups.txt | |||
@@ -500,8 +500,7 @@ post-attachment activity that requires memory allocations or blocking. | |||
500 | 500 | ||
501 | void fork(struct cgroup_subsy *ss, struct task_struct *task) | 501 | void fork(struct cgroup_subsy *ss, struct task_struct *task) |
502 | 502 | ||
503 | Called when a task is forked into a cgroup. Also called during | 503 | Called when a task is forked into a cgroup. |
504 | registration for all existing tasks. | ||
505 | 504 | ||
506 | void exit(struct cgroup_subsys *ss, struct task_struct *task) | 505 | void exit(struct cgroup_subsys *ss, struct task_struct *task) |
507 | 506 | ||
diff --git a/Documentation/controllers/devices.txt b/Documentation/controllers/devices.txt new file mode 100644 index 000000000000..4dcea42432c2 --- /dev/null +++ b/Documentation/controllers/devices.txt | |||
@@ -0,0 +1,48 @@ | |||
1 | Device Whitelist Controller | ||
2 | |||
3 | 1. Description: | ||
4 | |||
5 | Implement a cgroup to track and enforce open and mknod restrictions | ||
6 | on device files. A device cgroup associates a device access | ||
7 | whitelist with each cgroup. A whitelist entry has 4 fields. | ||
8 | 'type' is a (all), c (char), or b (block). 'all' means it applies | ||
9 | to all types and all major and minor numbers. Major and minor are | ||
10 | either an integer or * for all. Access is a composition of r | ||
11 | (read), w (write), and m (mknod). | ||
12 | |||
13 | The root device cgroup starts with rwm to 'all'. A child device | ||
14 | cgroup gets a copy of the parent. Administrators can then remove | ||
15 | devices from the whitelist or add new entries. A child cgroup can | ||
16 | never receive a device access which is denied its parent. However | ||
17 | when a device access is removed from a parent it will not also be | ||
18 | removed from the child(ren). | ||
19 | |||
20 | 2. User Interface | ||
21 | |||
22 | An entry is added using devices.allow, and removed using | ||
23 | devices.deny. For instance | ||
24 | |||
25 | echo 'c 1:3 mr' > /cgroups/1/devices.allow | ||
26 | |||
27 | allows cgroup 1 to read and mknod the device usually known as | ||
28 | /dev/null. Doing | ||
29 | |||
30 | echo a > /cgroups/1/devices.deny | ||
31 | |||
32 | will remove the default 'a *:* mrw' entry. | ||
33 | |||
34 | 3. Security | ||
35 | |||
36 | Any task can move itself between cgroups. This clearly won't | ||
37 | suffice, but we can decide the best way to adequately restrict | ||
38 | movement as people get some experience with this. We may just want | ||
39 | to require CAP_SYS_ADMIN, which at least is a separate bit from | ||
40 | CAP_MKNOD. We may want to just refuse moving to a cgroup which | ||
41 | isn't a descendent of the current one. Or we may want to use | ||
42 | CAP_MAC_ADMIN, since we really are trying to lock down root. | ||
43 | |||
44 | CAP_SYS_ADMIN is needed to modify the whitelist or move another | ||
45 | task to a new cgroup. (Again we'll probably want to change that). | ||
46 | |||
47 | A cgroup may not be granted more permissions than the cgroup's | ||
48 | parent has. | ||
diff --git a/Documentation/controllers/resource_counter.txt b/Documentation/controllers/resource_counter.txt new file mode 100644 index 000000000000..f196ac1d7d25 --- /dev/null +++ b/Documentation/controllers/resource_counter.txt | |||
@@ -0,0 +1,181 @@ | |||
1 | |||
2 | The Resource Counter | ||
3 | |||
4 | The resource counter, declared at include/linux/res_counter.h, | ||
5 | is supposed to facilitate the resource management by controllers | ||
6 | by providing common stuff for accounting. | ||
7 | |||
8 | This "stuff" includes the res_counter structure and routines | ||
9 | to work with it. | ||
10 | |||
11 | |||
12 | |||
13 | 1. Crucial parts of the res_counter structure | ||
14 | |||
15 | a. unsigned long long usage | ||
16 | |||
17 | The usage value shows the amount of a resource that is consumed | ||
18 | by a group at a given time. The units of measurement should be | ||
19 | determined by the controller that uses this counter. E.g. it can | ||
20 | be bytes, items or any other unit the controller operates on. | ||
21 | |||
22 | b. unsigned long long max_usage | ||
23 | |||
24 | The maximal value of the usage over time. | ||
25 | |||
26 | This value is useful when gathering statistical information about | ||
27 | the particular group, as it shows the actual resource requirements | ||
28 | for a particular group, not just some usage snapshot. | ||
29 | |||
30 | c. unsigned long long limit | ||
31 | |||
32 | The maximal allowed amount of resource to consume by the group. In | ||
33 | case the group requests for more resources, so that the usage value | ||
34 | would exceed the limit, the resource allocation is rejected (see | ||
35 | the next section). | ||
36 | |||
37 | d. unsigned long long failcnt | ||
38 | |||
39 | The failcnt stands for "failures counter". This is the number of | ||
40 | resource allocation attempts that failed. | ||
41 | |||
42 | c. spinlock_t lock | ||
43 | |||
44 | Protects changes of the above values. | ||
45 | |||
46 | |||
47 | |||
48 | 2. Basic accounting routines | ||
49 | |||
50 | a. void res_counter_init(struct res_counter *rc) | ||
51 | |||
52 | Initializes the resource counter. As usual, should be the first | ||
53 | routine called for a new counter. | ||
54 | |||
55 | b. int res_counter_charge[_locked] | ||
56 | (struct res_counter *rc, unsigned long val) | ||
57 | |||
58 | When a resource is about to be allocated it has to be accounted | ||
59 | with the appropriate resource counter (controller should determine | ||
60 | which one to use on its own). This operation is called "charging". | ||
61 | |||
62 | This is not very important which operation - resource allocation | ||
63 | or charging - is performed first, but | ||
64 | * if the allocation is performed first, this may create a | ||
65 | temporary resource over-usage by the time resource counter is | ||
66 | charged; | ||
67 | * if the charging is performed first, then it should be uncharged | ||
68 | on error path (if the one is called). | ||
69 | |||
70 | c. void res_counter_uncharge[_locked] | ||
71 | (struct res_counter *rc, unsigned long val) | ||
72 | |||
73 | When a resource is released (freed) it should be de-accounted | ||
74 | from the resource counter it was accounted to. This is called | ||
75 | "uncharging". | ||
76 | |||
77 | The _locked routines imply that the res_counter->lock is taken. | ||
78 | |||
79 | |||
80 | 2.1 Other accounting routines | ||
81 | |||
82 | There are more routines that may help you with common needs, like | ||
83 | checking whether the limit is reached or resetting the max_usage | ||
84 | value. They are all declared in include/linux/res_counter.h. | ||
85 | |||
86 | |||
87 | |||
88 | 3. Analyzing the resource counter registrations | ||
89 | |||
90 | a. If the failcnt value constantly grows, this means that the counter's | ||
91 | limit is too tight. Either the group is misbehaving and consumes too | ||
92 | many resources, or the configuration is not suitable for the group | ||
93 | and the limit should be increased. | ||
94 | |||
95 | b. The max_usage value can be used to quickly tune the group. One may | ||
96 | set the limits to maximal values and either load the container with | ||
97 | a common pattern or leave one for a while. After this the max_usage | ||
98 | value shows the amount of memory the container would require during | ||
99 | its common activity. | ||
100 | |||
101 | Setting the limit a bit above this value gives a pretty good | ||
102 | configuration that works in most of the cases. | ||
103 | |||
104 | c. If the max_usage is much less than the limit, but the failcnt value | ||
105 | is growing, then the group tries to allocate a big chunk of resource | ||
106 | at once. | ||
107 | |||
108 | d. If the max_usage is much less than the limit, but the failcnt value | ||
109 | is 0, then this group is given too high limit, that it does not | ||
110 | require. It is better to lower the limit a bit leaving more resource | ||
111 | for other groups. | ||
112 | |||
113 | |||
114 | |||
115 | 4. Communication with the control groups subsystem (cgroups) | ||
116 | |||
117 | All the resource controllers that are using cgroups and resource counters | ||
118 | should provide files (in the cgroup filesystem) to work with the resource | ||
119 | counter fields. They are recommended to adhere to the following rules: | ||
120 | |||
121 | a. File names | ||
122 | |||
123 | Field name File name | ||
124 | --------------------------------------------------- | ||
125 | usage usage_in_<unit_of_measurement> | ||
126 | max_usage max_usage_in_<unit_of_measurement> | ||
127 | limit limit_in_<unit_of_measurement> | ||
128 | failcnt failcnt | ||
129 | lock no file :) | ||
130 | |||
131 | b. Reading from file should show the corresponding field value in the | ||
132 | appropriate format. | ||
133 | |||
134 | c. Writing to file | ||
135 | |||
136 | Field Expected behavior | ||
137 | ---------------------------------- | ||
138 | usage prohibited | ||
139 | max_usage reset to usage | ||
140 | limit set the limit | ||
141 | failcnt reset to zero | ||
142 | |||
143 | |||
144 | |||
145 | 5. Usage example | ||
146 | |||
147 | a. Declare a task group (take a look at cgroups subsystem for this) and | ||
148 | fold a res_counter into it | ||
149 | |||
150 | struct my_group { | ||
151 | struct res_counter res; | ||
152 | |||
153 | <other fields> | ||
154 | } | ||
155 | |||
156 | b. Put hooks in resource allocation/release paths | ||
157 | |||
158 | int alloc_something(...) | ||
159 | { | ||
160 | if (res_counter_charge(res_counter_ptr, amount) < 0) | ||
161 | return -ENOMEM; | ||
162 | |||
163 | <allocate the resource and return to the caller> | ||
164 | } | ||
165 | |||
166 | void release_something(...) | ||
167 | { | ||
168 | res_counter_uncharge(res_counter_ptr, amount); | ||
169 | |||
170 | <release the resource> | ||
171 | } | ||
172 | |||
173 | In order to keep the usage value self-consistent, both the | ||
174 | "res_counter_ptr" and the "amount" in release_something() should be | ||
175 | the same as they were in the alloc_something() when the releasing | ||
176 | resource was allocated. | ||
177 | |||
178 | c. Provide the way to read res_counter values and set them (the cgroups | ||
179 | still can help with it). | ||
180 | |||
181 | c. Compile and run :) | ||
diff --git a/Documentation/cpusets.txt b/Documentation/cpusets.txt index aa854b9b18cd..fb7b361e6eea 100644 --- a/Documentation/cpusets.txt +++ b/Documentation/cpusets.txt | |||
@@ -171,6 +171,7 @@ files describing that cpuset: | |||
171 | - memory_migrate flag: if set, move pages to cpusets nodes | 171 | - memory_migrate flag: if set, move pages to cpusets nodes |
172 | - cpu_exclusive flag: is cpu placement exclusive? | 172 | - cpu_exclusive flag: is cpu placement exclusive? |
173 | - mem_exclusive flag: is memory placement exclusive? | 173 | - mem_exclusive flag: is memory placement exclusive? |
174 | - mem_hardwall flag: is memory allocation hardwalled | ||
174 | - memory_pressure: measure of how much paging pressure in cpuset | 175 | - memory_pressure: measure of how much paging pressure in cpuset |
175 | 176 | ||
176 | In addition, the root cpuset only has the following file: | 177 | In addition, the root cpuset only has the following file: |
@@ -222,17 +223,18 @@ If a cpuset is cpu or mem exclusive, no other cpuset, other than | |||
222 | a direct ancestor or descendent, may share any of the same CPUs or | 223 | a direct ancestor or descendent, may share any of the same CPUs or |
223 | Memory Nodes. | 224 | Memory Nodes. |
224 | 225 | ||
225 | A cpuset that is mem_exclusive restricts kernel allocations for | 226 | A cpuset that is mem_exclusive *or* mem_hardwall is "hardwalled", |
226 | page, buffer and other data commonly shared by the kernel across | 227 | i.e. it restricts kernel allocations for page, buffer and other data |
227 | multiple users. All cpusets, whether mem_exclusive or not, restrict | 228 | commonly shared by the kernel across multiple users. All cpusets, |
228 | allocations of memory for user space. This enables configuring a | 229 | whether hardwalled or not, restrict allocations of memory for user |
229 | system so that several independent jobs can share common kernel data, | 230 | space. This enables configuring a system so that several independent |
230 | such as file system pages, while isolating each jobs user allocation in | 231 | jobs can share common kernel data, such as file system pages, while |
231 | its own cpuset. To do this, construct a large mem_exclusive cpuset to | 232 | isolating each job's user allocation in its own cpuset. To do this, |
232 | hold all the jobs, and construct child, non-mem_exclusive cpusets for | 233 | construct a large mem_exclusive cpuset to hold all the jobs, and |
233 | each individual job. Only a small amount of typical kernel memory, | 234 | construct child, non-mem_exclusive cpusets for each individual job. |
234 | such as requests from interrupt handlers, is allowed to be taken | 235 | Only a small amount of typical kernel memory, such as requests from |
235 | outside even a mem_exclusive cpuset. | 236 | interrupt handlers, is allowed to be taken outside even a |
237 | mem_exclusive cpuset. | ||
236 | 238 | ||
237 | 239 | ||
238 | 1.5 What is memory_pressure ? | 240 | 1.5 What is memory_pressure ? |
@@ -707,7 +709,7 @@ Now you want to do something with this cpuset. | |||
707 | 709 | ||
708 | In this directory you can find several files: | 710 | In this directory you can find several files: |
709 | # ls | 711 | # ls |
710 | cpus cpu_exclusive mems mem_exclusive tasks | 712 | cpus cpu_exclusive mems mem_exclusive mem_hardwall tasks |
711 | 713 | ||
712 | Reading them will give you information about the state of this cpuset: | 714 | Reading them will give you information about the state of this cpuset: |
713 | the CPUs and Memory Nodes it can use, the processes that are using | 715 | the CPUs and Memory Nodes it can use, the processes that are using |
diff --git a/Documentation/kernel-parameters.txt b/Documentation/kernel-parameters.txt index e5f3d918316f..3ce193f86565 100644 --- a/Documentation/kernel-parameters.txt +++ b/Documentation/kernel-parameters.txt | |||
@@ -627,8 +627,7 @@ and is between 256 and 4096 characters. It is defined in the file | |||
627 | eata= [HW,SCSI] | 627 | eata= [HW,SCSI] |
628 | 628 | ||
629 | edd= [EDD] | 629 | edd= [EDD] |
630 | Format: {"of[f]" | "sk[ipmbr]"} | 630 | Format: {"off" | "on" | "skip[mbr]"} |
631 | See comment in arch/i386/boot/edd.S | ||
632 | 631 | ||
633 | eisa_irq_edge= [PARISC,HW] | 632 | eisa_irq_edge= [PARISC,HW] |
634 | See header of drivers/parisc/eisa.c. | 633 | See header of drivers/parisc/eisa.c. |
@@ -1389,6 +1388,13 @@ and is between 256 and 4096 characters. It is defined in the file | |||
1389 | 1388 | ||
1390 | nr_uarts= [SERIAL] maximum number of UARTs to be registered. | 1389 | nr_uarts= [SERIAL] maximum number of UARTs to be registered. |
1391 | 1390 | ||
1391 | olpc_ec_timeout= [OLPC] ms delay when issuing EC commands | ||
1392 | Rather than timing out after 20 ms if an EC | ||
1393 | command is not properly ACKed, override the length | ||
1394 | of the timeout. We have interrupts disabled while | ||
1395 | waiting for the ACK, so if this is set too high | ||
1396 | interrupts *may* be lost! | ||
1397 | |||
1392 | opl3= [HW,OSS] | 1398 | opl3= [HW,OSS] |
1393 | Format: <io> | 1399 | Format: <io> |
1394 | 1400 | ||
diff --git a/Documentation/keys-request-key.txt b/Documentation/keys-request-key.txt index 266955d23ee6..09b55e461740 100644 --- a/Documentation/keys-request-key.txt +++ b/Documentation/keys-request-key.txt | |||
@@ -11,26 +11,29 @@ request_key*(): | |||
11 | 11 | ||
12 | struct key *request_key(const struct key_type *type, | 12 | struct key *request_key(const struct key_type *type, |
13 | const char *description, | 13 | const char *description, |
14 | const char *callout_string); | 14 | const char *callout_info); |
15 | 15 | ||
16 | or: | 16 | or: |
17 | 17 | ||
18 | struct key *request_key_with_auxdata(const struct key_type *type, | 18 | struct key *request_key_with_auxdata(const struct key_type *type, |
19 | const char *description, | 19 | const char *description, |
20 | const char *callout_string, | 20 | const char *callout_info, |
21 | size_t callout_len, | ||
21 | void *aux); | 22 | void *aux); |
22 | 23 | ||
23 | or: | 24 | or: |
24 | 25 | ||
25 | struct key *request_key_async(const struct key_type *type, | 26 | struct key *request_key_async(const struct key_type *type, |
26 | const char *description, | 27 | const char *description, |
27 | const char *callout_string); | 28 | const char *callout_info, |
29 | size_t callout_len); | ||
28 | 30 | ||
29 | or: | 31 | or: |
30 | 32 | ||
31 | struct key *request_key_async_with_auxdata(const struct key_type *type, | 33 | struct key *request_key_async_with_auxdata(const struct key_type *type, |
32 | const char *description, | 34 | const char *description, |
33 | const char *callout_string, | 35 | const char *callout_info, |
36 | size_t callout_len, | ||
34 | void *aux); | 37 | void *aux); |
35 | 38 | ||
36 | Or by userspace invoking the request_key system call: | 39 | Or by userspace invoking the request_key system call: |
diff --git a/Documentation/keys.txt b/Documentation/keys.txt index 51652d39e61c..d5c7a57d1700 100644 --- a/Documentation/keys.txt +++ b/Documentation/keys.txt | |||
@@ -170,7 +170,8 @@ The key service provides a number of features besides keys: | |||
170 | amount of description and payload space that can be consumed. | 170 | amount of description and payload space that can be consumed. |
171 | 171 | ||
172 | The user can view information on this and other statistics through procfs | 172 | The user can view information on this and other statistics through procfs |
173 | files. | 173 | files. The root user may also alter the quota limits through sysctl files |
174 | (see the section "New procfs files"). | ||
174 | 175 | ||
175 | Process-specific and thread-specific keyrings are not counted towards a | 176 | Process-specific and thread-specific keyrings are not counted towards a |
176 | user's quota. | 177 | user's quota. |
@@ -329,6 +330,27 @@ about the status of the key service: | |||
329 | <bytes>/<max> Key size quota | 330 | <bytes>/<max> Key size quota |
330 | 331 | ||
331 | 332 | ||
333 | Four new sysctl files have been added also for the purpose of controlling the | ||
334 | quota limits on keys: | ||
335 | |||
336 | (*) /proc/sys/kernel/keys/root_maxkeys | ||
337 | /proc/sys/kernel/keys/root_maxbytes | ||
338 | |||
339 | These files hold the maximum number of keys that root may have and the | ||
340 | maximum total number of bytes of data that root may have stored in those | ||
341 | keys. | ||
342 | |||
343 | (*) /proc/sys/kernel/keys/maxkeys | ||
344 | /proc/sys/kernel/keys/maxbytes | ||
345 | |||
346 | These files hold the maximum number of keys that each non-root user may | ||
347 | have and the maximum total number of bytes of data that each of those | ||
348 | users may have stored in their keys. | ||
349 | |||
350 | Root may alter these by writing each new limit as a decimal number string to | ||
351 | the appropriate file. | ||
352 | |||
353 | |||
332 | =============================== | 354 | =============================== |
333 | USERSPACE SYSTEM CALL INTERFACE | 355 | USERSPACE SYSTEM CALL INTERFACE |
334 | =============================== | 356 | =============================== |
@@ -711,6 +733,27 @@ The keyctl syscall functions are: | |||
711 | The assumed authoritative key is inherited across fork and exec. | 733 | The assumed authoritative key is inherited across fork and exec. |
712 | 734 | ||
713 | 735 | ||
736 | (*) Get the LSM security context attached to a key. | ||
737 | |||
738 | long keyctl(KEYCTL_GET_SECURITY, key_serial_t key, char *buffer, | ||
739 | size_t buflen) | ||
740 | |||
741 | This function returns a string that represents the LSM security context | ||
742 | attached to a key in the buffer provided. | ||
743 | |||
744 | Unless there's an error, it always returns the amount of data it could | ||
745 | produce, even if that's too big for the buffer, but it won't copy more | ||
746 | than requested to userspace. If the buffer pointer is NULL then no copy | ||
747 | will take place. | ||
748 | |||
749 | A NUL character is included at the end of the string if the buffer is | ||
750 | sufficiently big. This is included in the returned count. If no LSM is | ||
751 | in force then an empty string will be returned. | ||
752 | |||
753 | A process must have view permission on the key for this function to be | ||
754 | successful. | ||
755 | |||
756 | |||
714 | =============== | 757 | =============== |
715 | KERNEL SERVICES | 758 | KERNEL SERVICES |
716 | =============== | 759 | =============== |
@@ -771,7 +814,7 @@ payload contents" for more information. | |||
771 | 814 | ||
772 | struct key *request_key(const struct key_type *type, | 815 | struct key *request_key(const struct key_type *type, |
773 | const char *description, | 816 | const char *description, |
774 | const char *callout_string); | 817 | const char *callout_info); |
775 | 818 | ||
776 | This is used to request a key or keyring with a description that matches | 819 | This is used to request a key or keyring with a description that matches |
777 | the description specified according to the key type's match function. This | 820 | the description specified according to the key type's match function. This |
@@ -793,24 +836,28 @@ payload contents" for more information. | |||
793 | 836 | ||
794 | struct key *request_key_with_auxdata(const struct key_type *type, | 837 | struct key *request_key_with_auxdata(const struct key_type *type, |
795 | const char *description, | 838 | const char *description, |
796 | const char *callout_string, | 839 | const void *callout_info, |
840 | size_t callout_len, | ||
797 | void *aux); | 841 | void *aux); |
798 | 842 | ||
799 | This is identical to request_key(), except that the auxiliary data is | 843 | This is identical to request_key(), except that the auxiliary data is |
800 | passed to the key_type->request_key() op if it exists. | 844 | passed to the key_type->request_key() op if it exists, and the callout_info |
845 | is a blob of length callout_len, if given (the length may be 0). | ||
801 | 846 | ||
802 | 847 | ||
803 | (*) A key can be requested asynchronously by calling one of: | 848 | (*) A key can be requested asynchronously by calling one of: |
804 | 849 | ||
805 | struct key *request_key_async(const struct key_type *type, | 850 | struct key *request_key_async(const struct key_type *type, |
806 | const char *description, | 851 | const char *description, |
807 | const char *callout_string); | 852 | const void *callout_info, |
853 | size_t callout_len); | ||
808 | 854 | ||
809 | or: | 855 | or: |
810 | 856 | ||
811 | struct key *request_key_async_with_auxdata(const struct key_type *type, | 857 | struct key *request_key_async_with_auxdata(const struct key_type *type, |
812 | const char *description, | 858 | const char *description, |
813 | const char *callout_string, | 859 | const char *callout_info, |
860 | size_t callout_len, | ||
814 | void *aux); | 861 | void *aux); |
815 | 862 | ||
816 | which are asynchronous equivalents of request_key() and | 863 | which are asynchronous equivalents of request_key() and |
diff --git a/Documentation/oops-tracing.txt b/Documentation/oops-tracing.txt index 7f60dfe642ca..b152e81da592 100644 --- a/Documentation/oops-tracing.txt +++ b/Documentation/oops-tracing.txt | |||
@@ -253,6 +253,10 @@ characters, each representing a particular tainted value. | |||
253 | 253 | ||
254 | 8: 'D' if the kernel has died recently, i.e. there was an OOPS or BUG. | 254 | 8: 'D' if the kernel has died recently, i.e. there was an OOPS or BUG. |
255 | 255 | ||
256 | 9: 'A' if the ACPI table has been overridden. | ||
257 | |||
258 | 10: 'W' if a warning has previously been issued by the kernel. | ||
259 | |||
256 | The primary reason for the 'Tainted: ' string is to tell kernel | 260 | The primary reason for the 'Tainted: ' string is to tell kernel |
257 | debuggers if this is a clean kernel or if anything unusual has | 261 | debuggers if this is a clean kernel or if anything unusual has |
258 | occurred. Tainting is permanent: even if an offending module is | 262 | occurred. Tainting is permanent: even if an offending module is |
diff --git a/Documentation/sysrq.txt b/Documentation/sysrq.txt index 10c8f6922ef4..5ce0952aa065 100644 --- a/Documentation/sysrq.txt +++ b/Documentation/sysrq.txt | |||
@@ -85,6 +85,8 @@ On all - write a character to /proc/sysrq-trigger. e.g.: | |||
85 | 'k' - Secure Access Key (SAK) Kills all programs on the current virtual | 85 | 'k' - Secure Access Key (SAK) Kills all programs on the current virtual |
86 | console. NOTE: See important comments below in SAK section. | 86 | console. NOTE: See important comments below in SAK section. |
87 | 87 | ||
88 | 'l' - Shows a stack backtrace for all active CPUs. | ||
89 | |||
88 | 'm' - Will dump current memory info to your console. | 90 | 'm' - Will dump current memory info to your console. |
89 | 91 | ||
90 | 'n' - Used to make RT tasks nice-able | 92 | 'n' - Used to make RT tasks nice-able |
diff --git a/MAINTAINERS b/MAINTAINERS index c1dd1ae7b133..d103766f3b40 100644 --- a/MAINTAINERS +++ b/MAINTAINERS | |||
@@ -2694,7 +2694,7 @@ P: David Howells | |||
2694 | M: dhowells@redhat.com | 2694 | M: dhowells@redhat.com |
2695 | P: Koichi Yasutake | 2695 | P: Koichi Yasutake |
2696 | M: yasutake.koichi@jp.panasonic.com | 2696 | M: yasutake.koichi@jp.panasonic.com |
2697 | L: linux-am33-list@redhat.com | 2697 | L: linux-am33-list@redhat.com (moderated for non-subscribers) |
2698 | W: ftp://ftp.redhat.com/pub/redhat/gnupro/AM33/ | 2698 | W: ftp://ftp.redhat.com/pub/redhat/gnupro/AM33/ |
2699 | S: Maintained | 2699 | S: Maintained |
2700 | 2700 | ||
diff --git a/arch/Kconfig b/arch/Kconfig index 694c9af520bb..3ea332b009e5 100644 --- a/arch/Kconfig +++ b/arch/Kconfig | |||
@@ -36,3 +36,6 @@ config HAVE_KPROBES | |||
36 | 36 | ||
37 | config HAVE_KRETPROBES | 37 | config HAVE_KRETPROBES |
38 | def_bool n | 38 | def_bool n |
39 | |||
40 | config HAVE_DMA_ATTRS | ||
41 | def_bool n | ||
diff --git a/arch/alpha/kernel/asm-offsets.c b/arch/alpha/kernel/asm-offsets.c index 6c56c754a0b5..4b18cd94d59d 100644 --- a/arch/alpha/kernel/asm-offsets.c +++ b/arch/alpha/kernel/asm-offsets.c | |||
@@ -8,13 +8,9 @@ | |||
8 | #include <linux/stddef.h> | 8 | #include <linux/stddef.h> |
9 | #include <linux/sched.h> | 9 | #include <linux/sched.h> |
10 | #include <linux/ptrace.h> | 10 | #include <linux/ptrace.h> |
11 | #include <linux/kbuild.h> | ||
11 | #include <asm/io.h> | 12 | #include <asm/io.h> |
12 | 13 | ||
13 | #define DEFINE(sym, val) \ | ||
14 | asm volatile("\n->" #sym " %0 " #val : : "i" (val)) | ||
15 | |||
16 | #define BLANK() asm volatile("\n->" : : ) | ||
17 | |||
18 | void foo(void) | 14 | void foo(void) |
19 | { | 15 | { |
20 | DEFINE(TI_TASK, offsetof(struct thread_info, task)); | 16 | DEFINE(TI_TASK, offsetof(struct thread_info, task)); |
diff --git a/arch/alpha/kernel/pci.c b/arch/alpha/kernel/pci.c index baf57563b14c..36ab22a7ea12 100644 --- a/arch/alpha/kernel/pci.c +++ b/arch/alpha/kernel/pci.c | |||
@@ -514,8 +514,8 @@ sys_pciconfig_iobase(long which, unsigned long bus, unsigned long dfn) | |||
514 | 514 | ||
515 | void __iomem *pci_iomap(struct pci_dev *dev, int bar, unsigned long maxlen) | 515 | void __iomem *pci_iomap(struct pci_dev *dev, int bar, unsigned long maxlen) |
516 | { | 516 | { |
517 | unsigned long start = pci_resource_start(dev, bar); | 517 | resource_size_t start = pci_resource_start(dev, bar); |
518 | unsigned long len = pci_resource_len(dev, bar); | 518 | resource_size_t len = pci_resource_len(dev, bar); |
519 | unsigned long flags = pci_resource_flags(dev, bar); | 519 | unsigned long flags = pci_resource_flags(dev, bar); |
520 | 520 | ||
521 | if (!len || !start) | 521 | if (!len || !start) |
diff --git a/arch/arm/kernel/asm-offsets.c b/arch/arm/kernel/asm-offsets.c index 0a0d2479274b..4a881258bb17 100644 --- a/arch/arm/kernel/asm-offsets.c +++ b/arch/arm/kernel/asm-offsets.c | |||
@@ -16,6 +16,7 @@ | |||
16 | #include <asm/thread_info.h> | 16 | #include <asm/thread_info.h> |
17 | #include <asm/memory.h> | 17 | #include <asm/memory.h> |
18 | #include <asm/procinfo.h> | 18 | #include <asm/procinfo.h> |
19 | #include <linux/kbuild.h> | ||
19 | 20 | ||
20 | /* | 21 | /* |
21 | * Make sure that the compiler and target are compatible. | 22 | * Make sure that the compiler and target are compatible. |
@@ -35,13 +36,6 @@ | |||
35 | #error Known good compilers: 3.3 | 36 | #error Known good compilers: 3.3 |
36 | #endif | 37 | #endif |
37 | 38 | ||
38 | /* Use marker if you need to separate the values later */ | ||
39 | |||
40 | #define DEFINE(sym, val) \ | ||
41 | asm volatile("\n->" #sym " %0 " #val : : "i" (val)) | ||
42 | |||
43 | #define BLANK() asm volatile("\n->" : : ) | ||
44 | |||
45 | int main(void) | 39 | int main(void) |
46 | { | 40 | { |
47 | DEFINE(TSK_ACTIVE_MM, offsetof(struct task_struct, active_mm)); | 41 | DEFINE(TSK_ACTIVE_MM, offsetof(struct task_struct, active_mm)); |
diff --git a/arch/arm/kernel/atags.c b/arch/arm/kernel/atags.c index e2e934c38080..64c420805e6f 100644 --- a/arch/arm/kernel/atags.c +++ b/arch/arm/kernel/atags.c | |||
@@ -35,7 +35,7 @@ create_proc_entries(void) | |||
35 | { | 35 | { |
36 | struct proc_dir_entry* tags_entry; | 36 | struct proc_dir_entry* tags_entry; |
37 | 37 | ||
38 | tags_entry = create_proc_read_entry("atags", 0400, &proc_root, read_buffer, &tags_buffer); | 38 | tags_entry = create_proc_read_entry("atags", 0400, NULL, read_buffer, &tags_buffer); |
39 | if (!tags_entry) | 39 | if (!tags_entry) |
40 | return -ENOMEM; | 40 | return -ENOMEM; |
41 | 41 | ||
diff --git a/arch/arm/kernel/ecard.c b/arch/arm/kernel/ecard.c index f56d48c451ea..a53c0aba5c14 100644 --- a/arch/arm/kernel/ecard.c +++ b/arch/arm/kernel/ecard.c | |||
@@ -37,6 +37,7 @@ | |||
37 | #include <linux/mm.h> | 37 | #include <linux/mm.h> |
38 | #include <linux/slab.h> | 38 | #include <linux/slab.h> |
39 | #include <linux/proc_fs.h> | 39 | #include <linux/proc_fs.h> |
40 | #include <linux/seq_file.h> | ||
40 | #include <linux/device.h> | 41 | #include <linux/device.h> |
41 | #include <linux/init.h> | 42 | #include <linux/init.h> |
42 | #include <linux/mutex.h> | 43 | #include <linux/mutex.h> |
@@ -723,17 +724,14 @@ unsigned int __ecard_address(ecard_t *ec, card_type_t type, card_speed_t speed) | |||
723 | return address; | 724 | return address; |
724 | } | 725 | } |
725 | 726 | ||
726 | static int ecard_prints(char *buffer, ecard_t *ec) | 727 | static int ecard_prints(struct seq_file *m, ecard_t *ec) |
727 | { | 728 | { |
728 | char *start = buffer; | 729 | seq_printf(m, " %d: %s ", ec->slot_no, ec->easi ? "EASI" : " "); |
729 | |||
730 | buffer += sprintf(buffer, " %d: %s ", ec->slot_no, | ||
731 | ec->easi ? "EASI" : " "); | ||
732 | 730 | ||
733 | if (ec->cid.id == 0) { | 731 | if (ec->cid.id == 0) { |
734 | struct in_chunk_dir incd; | 732 | struct in_chunk_dir incd; |
735 | 733 | ||
736 | buffer += sprintf(buffer, "[%04X:%04X] ", | 734 | seq_printf(m, "[%04X:%04X] ", |
737 | ec->cid.manufacturer, ec->cid.product); | 735 | ec->cid.manufacturer, ec->cid.product); |
738 | 736 | ||
739 | if (!ec->card_desc && ec->cid.cd && | 737 | if (!ec->card_desc && ec->cid.cd && |
@@ -744,43 +742,43 @@ static int ecard_prints(char *buffer, ecard_t *ec) | |||
744 | strcpy((char *)ec->card_desc, incd.d.string); | 742 | strcpy((char *)ec->card_desc, incd.d.string); |
745 | } | 743 | } |
746 | 744 | ||
747 | buffer += sprintf(buffer, "%s\n", ec->card_desc ? ec->card_desc : "*unknown*"); | 745 | seq_printf(m, "%s\n", ec->card_desc ? ec->card_desc : "*unknown*"); |
748 | } else | 746 | } else |
749 | buffer += sprintf(buffer, "Simple card %d\n", ec->cid.id); | 747 | seq_printf(m, "Simple card %d\n", ec->cid.id); |
750 | 748 | ||
751 | return buffer - start; | 749 | return 0; |
752 | } | 750 | } |
753 | 751 | ||
754 | static int get_ecard_dev_info(char *buf, char **start, off_t pos, int count) | 752 | static int ecard_devices_proc_show(struct seq_file *m, void *v) |
755 | { | 753 | { |
756 | ecard_t *ec = cards; | 754 | ecard_t *ec = cards; |
757 | off_t at = 0; | 755 | |
758 | int len, cnt; | 756 | while (ec) { |
759 | 757 | ecard_prints(m, ec); | |
760 | cnt = 0; | ||
761 | while (ec && count > cnt) { | ||
762 | len = ecard_prints(buf, ec); | ||
763 | at += len; | ||
764 | if (at >= pos) { | ||
765 | if (!*start) { | ||
766 | *start = buf + (pos - (at - len)); | ||
767 | cnt = at - pos; | ||
768 | } else | ||
769 | cnt += len; | ||
770 | buf += len; | ||
771 | } | ||
772 | ec = ec->next; | 758 | ec = ec->next; |
773 | } | 759 | } |
774 | return (count > cnt) ? cnt : count; | 760 | return 0; |
775 | } | 761 | } |
776 | 762 | ||
763 | static int ecard_devices_proc_open(struct inode *inode, struct file *file) | ||
764 | { | ||
765 | return single_open(file, ecard_devices_proc_show, NULL); | ||
766 | } | ||
767 | |||
768 | static const struct file_operations bus_ecard_proc_fops = { | ||
769 | .owner = THIS_MODULE, | ||
770 | .open = ecard_devices_proc_open, | ||
771 | .read = seq_read, | ||
772 | .llseek = seq_lseek, | ||
773 | .release = single_release, | ||
774 | }; | ||
775 | |||
777 | static struct proc_dir_entry *proc_bus_ecard_dir = NULL; | 776 | static struct proc_dir_entry *proc_bus_ecard_dir = NULL; |
778 | 777 | ||
779 | static void ecard_proc_init(void) | 778 | static void ecard_proc_init(void) |
780 | { | 779 | { |
781 | proc_bus_ecard_dir = proc_mkdir("ecard", proc_bus); | 780 | proc_bus_ecard_dir = proc_mkdir("bus/ecard", NULL); |
782 | create_proc_info_entry("devices", 0, proc_bus_ecard_dir, | 781 | proc_create("devices", 0, proc_bus_ecard_dir, &bus_ecard_proc_fops); |
783 | get_ecard_dev_info); | ||
784 | } | 782 | } |
785 | 783 | ||
786 | #define ec_set_resource(ec,nr,st,sz) \ | 784 | #define ec_set_resource(ec,nr,st,sz) \ |
diff --git a/arch/arm/mach-davinci/clock.c b/arch/arm/mach-davinci/clock.c index 4143828a9684..c6b94f60e0b2 100644 --- a/arch/arm/mach-davinci/clock.c +++ b/arch/arm/mach-davinci/clock.c | |||
@@ -311,11 +311,7 @@ static const struct file_operations proc_davinci_ck_operations = { | |||
311 | 311 | ||
312 | static int __init davinci_ck_proc_init(void) | 312 | static int __init davinci_ck_proc_init(void) |
313 | { | 313 | { |
314 | struct proc_dir_entry *entry; | 314 | proc_create("davinci_clocks", 0, NULL, &proc_davinci_ck_operations); |
315 | |||
316 | entry = create_proc_entry("davinci_clocks", 0, NULL); | ||
317 | if (entry) | ||
318 | entry->proc_fops = &proc_davinci_ck_operations; | ||
319 | return 0; | 315 | return 0; |
320 | 316 | ||
321 | } | 317 | } |
diff --git a/arch/arm/mm/iomap.c b/arch/arm/mm/iomap.c index 62066f3020c8..7429f8c01015 100644 --- a/arch/arm/mm/iomap.c +++ b/arch/arm/mm/iomap.c | |||
@@ -26,8 +26,8 @@ EXPORT_SYMBOL(ioport_unmap); | |||
26 | #ifdef CONFIG_PCI | 26 | #ifdef CONFIG_PCI |
27 | void __iomem *pci_iomap(struct pci_dev *dev, int bar, unsigned long maxlen) | 27 | void __iomem *pci_iomap(struct pci_dev *dev, int bar, unsigned long maxlen) |
28 | { | 28 | { |
29 | unsigned long start = pci_resource_start(dev, bar); | 29 | resource_size_t start = pci_resource_start(dev, bar); |
30 | unsigned long len = pci_resource_len(dev, bar); | 30 | resource_size_t len = pci_resource_len(dev, bar); |
31 | unsigned long flags = pci_resource_flags(dev, bar); | 31 | unsigned long flags = pci_resource_flags(dev, bar); |
32 | 32 | ||
33 | if (!len || !start) | 33 | if (!len || !start) |
diff --git a/arch/avr32/kernel/asm-offsets.c b/arch/avr32/kernel/asm-offsets.c index 078cd33f467b..e4796c67a831 100644 --- a/arch/avr32/kernel/asm-offsets.c +++ b/arch/avr32/kernel/asm-offsets.c | |||
@@ -5,14 +5,7 @@ | |||
5 | */ | 5 | */ |
6 | 6 | ||
7 | #include <linux/thread_info.h> | 7 | #include <linux/thread_info.h> |
8 | 8 | #include <linux/kbuild.h> | |
9 | #define DEFINE(sym, val) \ | ||
10 | asm volatile("\n->" #sym " %0 " #val : : "i" (val)) | ||
11 | |||
12 | #define BLANK() asm volatile("\n->" : : ) | ||
13 | |||
14 | #define OFFSET(sym, str, mem) \ | ||
15 | DEFINE(sym, offsetof(struct str, mem)); | ||
16 | 9 | ||
17 | void foo(void) | 10 | void foo(void) |
18 | { | 11 | { |
diff --git a/arch/avr32/mm/tlb.c b/arch/avr32/mm/tlb.c index b835257a8fa3..cd12edbea9f2 100644 --- a/arch/avr32/mm/tlb.c +++ b/arch/avr32/mm/tlb.c | |||
@@ -369,11 +369,7 @@ static const struct file_operations proc_tlb_operations = { | |||
369 | 369 | ||
370 | static int __init proctlb_init(void) | 370 | static int __init proctlb_init(void) |
371 | { | 371 | { |
372 | struct proc_dir_entry *entry; | 372 | proc_create("tlb", 0, NULL, &proc_tlb_operations); |
373 | |||
374 | entry = create_proc_entry("tlb", 0, NULL); | ||
375 | if (entry) | ||
376 | entry->proc_fops = &proc_tlb_operations; | ||
377 | return 0; | 373 | return 0; |
378 | } | 374 | } |
379 | late_initcall(proctlb_init); | 375 | late_initcall(proctlb_init); |
diff --git a/arch/blackfin/kernel/asm-offsets.c b/arch/blackfin/kernel/asm-offsets.c index b56b2741cdea..721f15f3cebf 100644 --- a/arch/blackfin/kernel/asm-offsets.c +++ b/arch/blackfin/kernel/asm-offsets.c | |||
@@ -34,8 +34,7 @@ | |||
34 | #include <linux/hardirq.h> | 34 | #include <linux/hardirq.h> |
35 | #include <linux/irq.h> | 35 | #include <linux/irq.h> |
36 | #include <linux/thread_info.h> | 36 | #include <linux/thread_info.h> |
37 | 37 | #include <linux/kbuild.h> | |
38 | #define DEFINE(sym, val) asm volatile("\n->" #sym " %0 " #val : : "i" (val)) | ||
39 | 38 | ||
40 | int main(void) | 39 | int main(void) |
41 | { | 40 | { |
diff --git a/arch/blackfin/kernel/signal.c b/arch/blackfin/kernel/signal.c index d1fa24401dc6..cb9d883d493c 100644 --- a/arch/blackfin/kernel/signal.c +++ b/arch/blackfin/kernel/signal.c | |||
@@ -212,7 +212,7 @@ setup_rt_frame(int sig, struct k_sigaction *ka, siginfo_t * info, | |||
212 | 212 | ||
213 | /* Set up registers for signal handler */ | 213 | /* Set up registers for signal handler */ |
214 | wrusp((unsigned long)frame); | 214 | wrusp((unsigned long)frame); |
215 | if (get_personality & FDPIC_FUNCPTRS) { | 215 | if (current->personality & FDPIC_FUNCPTRS) { |
216 | struct fdpic_func_descriptor __user *funcptr = | 216 | struct fdpic_func_descriptor __user *funcptr = |
217 | (struct fdpic_func_descriptor *) ka->sa.sa_handler; | 217 | (struct fdpic_func_descriptor *) ka->sa.sa_handler; |
218 | __get_user(regs->pc, &funcptr->text); | 218 | __get_user(regs->pc, &funcptr->text); |
diff --git a/arch/cris/kernel/profile.c b/arch/cris/kernel/profile.c index aad0a9e5991a..44f7b4f79476 100644 --- a/arch/cris/kernel/profile.c +++ b/arch/cris/kernel/profile.c | |||
@@ -75,9 +75,9 @@ __init init_cris_profile(void) | |||
75 | 75 | ||
76 | sample_buffer_pos = sample_buffer; | 76 | sample_buffer_pos = sample_buffer; |
77 | 77 | ||
78 | entry = create_proc_entry("system_profile", S_IWUSR | S_IRUGO, NULL); | 78 | entry = proc_create("system_profile", S_IWUSR | S_IRUGO, NULL, |
79 | &cris_proc_profile_operations); | ||
79 | if (entry) { | 80 | if (entry) { |
80 | entry->proc_fops = &cris_proc_profile_operations; | ||
81 | entry->size = SAMPLE_BUFFER_SIZE; | 81 | entry->size = SAMPLE_BUFFER_SIZE; |
82 | } | 82 | } |
83 | prof_running = 1; | 83 | prof_running = 1; |
diff --git a/arch/frv/kernel/asm-offsets.c b/arch/frv/kernel/asm-offsets.c index fbb19fc1af40..9de96843a278 100644 --- a/arch/frv/kernel/asm-offsets.c +++ b/arch/frv/kernel/asm-offsets.c | |||
@@ -7,15 +7,13 @@ | |||
7 | #include <linux/sched.h> | 7 | #include <linux/sched.h> |
8 | #include <linux/signal.h> | 8 | #include <linux/signal.h> |
9 | #include <linux/personality.h> | 9 | #include <linux/personality.h> |
10 | #include <linux/kbuild.h> | ||
10 | #include <asm/registers.h> | 11 | #include <asm/registers.h> |
11 | #include <asm/ucontext.h> | 12 | #include <asm/ucontext.h> |
12 | #include <asm/processor.h> | 13 | #include <asm/processor.h> |
13 | #include <asm/thread_info.h> | 14 | #include <asm/thread_info.h> |
14 | #include <asm/gdb-stub.h> | 15 | #include <asm/gdb-stub.h> |
15 | 16 | ||
16 | #define DEFINE(sym, val) \ | ||
17 | asm volatile("\n->" #sym " %0 " #val : : "i" (val)) | ||
18 | |||
19 | #define DEF_PTREG(sym, reg) \ | 17 | #define DEF_PTREG(sym, reg) \ |
20 | asm volatile("\n->" #sym " %0 offsetof(struct pt_regs, " #reg ")" \ | 18 | asm volatile("\n->" #sym " %0 offsetof(struct pt_regs, " #reg ")" \ |
21 | : : "i" (offsetof(struct pt_regs, reg))) | 19 | : : "i" (offsetof(struct pt_regs, reg))) |
@@ -32,11 +30,6 @@ | |||
32 | asm volatile("\n->" #sym " %0 offsetof(struct frv_frame0, " #reg ")" \ | 30 | asm volatile("\n->" #sym " %0 offsetof(struct frv_frame0, " #reg ")" \ |
33 | : : "i" (offsetof(struct frv_frame0, reg))) | 31 | : : "i" (offsetof(struct frv_frame0, reg))) |
34 | 32 | ||
35 | #define BLANK() asm volatile("\n->" : : ) | ||
36 | |||
37 | #define OFFSET(sym, str, mem) \ | ||
38 | DEFINE(sym, offsetof(struct str, mem)); | ||
39 | |||
40 | void foo(void) | 33 | void foo(void) |
41 | { | 34 | { |
42 | /* offsets into the thread_info structure */ | 35 | /* offsets into the thread_info structure */ |
diff --git a/arch/frv/kernel/signal.c b/arch/frv/kernel/signal.c index d64bcaff54cd..3bdb368292a8 100644 --- a/arch/frv/kernel/signal.c +++ b/arch/frv/kernel/signal.c | |||
@@ -297,7 +297,7 @@ static int setup_frame(int sig, struct k_sigaction *ka, sigset_t *set) | |||
297 | __frame->lr = (unsigned long) &frame->retcode; | 297 | __frame->lr = (unsigned long) &frame->retcode; |
298 | __frame->gr8 = sig; | 298 | __frame->gr8 = sig; |
299 | 299 | ||
300 | if (get_personality & FDPIC_FUNCPTRS) { | 300 | if (current->personality & FDPIC_FUNCPTRS) { |
301 | struct fdpic_func_descriptor __user *funcptr = | 301 | struct fdpic_func_descriptor __user *funcptr = |
302 | (struct fdpic_func_descriptor __user *) ka->sa.sa_handler; | 302 | (struct fdpic_func_descriptor __user *) ka->sa.sa_handler; |
303 | __get_user(__frame->pc, &funcptr->text); | 303 | __get_user(__frame->pc, &funcptr->text); |
@@ -396,7 +396,7 @@ static int setup_rt_frame(int sig, struct k_sigaction *ka, siginfo_t *info, | |||
396 | __frame->gr8 = sig; | 396 | __frame->gr8 = sig; |
397 | __frame->gr9 = (unsigned long) &frame->info; | 397 | __frame->gr9 = (unsigned long) &frame->info; |
398 | 398 | ||
399 | if (get_personality & FDPIC_FUNCPTRS) { | 399 | if (current->personality & FDPIC_FUNCPTRS) { |
400 | struct fdpic_func_descriptor __user *funcptr = | 400 | struct fdpic_func_descriptor __user *funcptr = |
401 | (struct fdpic_func_descriptor __user *) ka->sa.sa_handler; | 401 | (struct fdpic_func_descriptor __user *) ka->sa.sa_handler; |
402 | __get_user(__frame->pc, &funcptr->text); | 402 | __get_user(__frame->pc, &funcptr->text); |
diff --git a/arch/frv/kernel/traps.c b/arch/frv/kernel/traps.c index a40df80b2ebd..1d2dfe67d442 100644 --- a/arch/frv/kernel/traps.c +++ b/arch/frv/kernel/traps.c | |||
@@ -362,11 +362,8 @@ asmlinkage void memory_access_exception(unsigned long esr0, | |||
362 | #ifdef CONFIG_MMU | 362 | #ifdef CONFIG_MMU |
363 | unsigned long fixup; | 363 | unsigned long fixup; |
364 | 364 | ||
365 | if ((esr0 & ESRx_EC) == ESRx_EC_DATA_ACCESS) | 365 | fixup = search_exception_table(__frame->pc); |
366 | if (handle_misalignment(esr0, ear0, epcr0) == 0) | 366 | if (fixup) { |
367 | return; | ||
368 | |||
369 | if ((fixup = search_exception_table(__frame->pc)) != 0) { | ||
370 | __frame->pc = fixup; | 367 | __frame->pc = fixup; |
371 | return; | 368 | return; |
372 | } | 369 | } |
diff --git a/arch/frv/mb93090-mb00/pci-iomap.c b/arch/frv/mb93090-mb00/pci-iomap.c index 068fa04bd527..35f6df28351e 100644 --- a/arch/frv/mb93090-mb00/pci-iomap.c +++ b/arch/frv/mb93090-mb00/pci-iomap.c | |||
@@ -13,8 +13,8 @@ | |||
13 | 13 | ||
14 | void __iomem *pci_iomap(struct pci_dev *dev, int bar, unsigned long maxlen) | 14 | void __iomem *pci_iomap(struct pci_dev *dev, int bar, unsigned long maxlen) |
15 | { | 15 | { |
16 | unsigned long start = pci_resource_start(dev, bar); | 16 | resource_size_t start = pci_resource_start(dev, bar); |
17 | unsigned long len = pci_resource_len(dev, bar); | 17 | resource_size_t len = pci_resource_len(dev, bar); |
18 | unsigned long flags = pci_resource_flags(dev, bar); | 18 | unsigned long flags = pci_resource_flags(dev, bar); |
19 | 19 | ||
20 | if (!len || !start) | 20 | if (!len || !start) |
diff --git a/arch/frv/mm/unaligned.c b/arch/frv/mm/unaligned.c deleted file mode 100644 index 8f0375fc15a8..000000000000 --- a/arch/frv/mm/unaligned.c +++ /dev/null | |||
@@ -1,217 +0,0 @@ | |||
1 | /* unaligned.c: unalignment fixup handler for CPUs on which it is supported (FR451 only) | ||
2 | * | ||
3 | * Copyright (C) 2004 Red Hat, Inc. All Rights Reserved. | ||
4 | * Written by David Howells (dhowells@redhat.com) | ||
5 | * | ||
6 | * This program is free software; you can redistribute it and/or | ||
7 | * modify it under the terms of the GNU General Public License | ||
8 | * as published by the Free Software Foundation; either version | ||
9 | * 2 of the License, or (at your option) any later version. | ||
10 | */ | ||
11 | |||
12 | #include <linux/sched.h> | ||
13 | #include <linux/signal.h> | ||
14 | #include <linux/kernel.h> | ||
15 | #include <linux/mm.h> | ||
16 | #include <linux/types.h> | ||
17 | #include <linux/user.h> | ||
18 | #include <linux/string.h> | ||
19 | #include <linux/linkage.h> | ||
20 | #include <linux/init.h> | ||
21 | |||
22 | #include <asm/setup.h> | ||
23 | #include <asm/system.h> | ||
24 | #include <asm/uaccess.h> | ||
25 | |||
26 | #if 0 | ||
27 | #define kdebug(fmt, ...) printk("FDPIC "fmt"\n" ,##__VA_ARGS__ ) | ||
28 | #else | ||
29 | #define kdebug(fmt, ...) do {} while(0) | ||
30 | #endif | ||
31 | |||
32 | #define _MA_SIGNED 0x01 | ||
33 | #define _MA_HALF 0x02 | ||
34 | #define _MA_WORD 0x04 | ||
35 | #define _MA_DWORD 0x08 | ||
36 | #define _MA_SZ_MASK 0x0e | ||
37 | #define _MA_LOAD 0x10 | ||
38 | #define _MA_STORE 0x20 | ||
39 | #define _MA_UPDATE 0x40 | ||
40 | #define _MA_IMM 0x80 | ||
41 | |||
42 | #define _MA_LDxU _MA_LOAD | _MA_UPDATE | ||
43 | #define _MA_LDxI _MA_LOAD | _MA_IMM | ||
44 | #define _MA_STxU _MA_STORE | _MA_UPDATE | ||
45 | #define _MA_STxI _MA_STORE | _MA_IMM | ||
46 | |||
47 | static const uint8_t tbl_LDGRk_reg[0x40] = { | ||
48 | [0x02] = _MA_LOAD | _MA_HALF | _MA_SIGNED, /* LDSH @(GRi,GRj),GRk */ | ||
49 | [0x03] = _MA_LOAD | _MA_HALF, /* LDUH @(GRi,GRj),GRk */ | ||
50 | [0x04] = _MA_LOAD | _MA_WORD, /* LD @(GRi,GRj),GRk */ | ||
51 | [0x05] = _MA_LOAD | _MA_DWORD, /* LDD @(GRi,GRj),GRk */ | ||
52 | [0x12] = _MA_LDxU | _MA_HALF | _MA_SIGNED, /* LDSHU @(GRi,GRj),GRk */ | ||
53 | [0x13] = _MA_LDxU | _MA_HALF, /* LDUHU @(GRi,GRj),GRk */ | ||
54 | [0x14] = _MA_LDxU | _MA_WORD, /* LDU @(GRi,GRj),GRk */ | ||
55 | [0x15] = _MA_LDxU | _MA_DWORD, /* LDDU @(GRi,GRj),GRk */ | ||
56 | }; | ||
57 | |||
58 | static const uint8_t tbl_STGRk_reg[0x40] = { | ||
59 | [0x01] = _MA_STORE | _MA_HALF, /* STH @(GRi,GRj),GRk */ | ||
60 | [0x02] = _MA_STORE | _MA_WORD, /* ST @(GRi,GRj),GRk */ | ||
61 | [0x03] = _MA_STORE | _MA_DWORD, /* STD @(GRi,GRj),GRk */ | ||
62 | [0x11] = _MA_STxU | _MA_HALF, /* STHU @(GRi,GRj),GRk */ | ||
63 | [0x12] = _MA_STxU | _MA_WORD, /* STU @(GRi,GRj),GRk */ | ||
64 | [0x13] = _MA_STxU | _MA_DWORD, /* STDU @(GRi,GRj),GRk */ | ||
65 | }; | ||
66 | |||
67 | static const uint8_t tbl_LDSTGRk_imm[0x80] = { | ||
68 | [0x31] = _MA_LDxI | _MA_HALF | _MA_SIGNED, /* LDSHI @(GRi,d12),GRk */ | ||
69 | [0x32] = _MA_LDxI | _MA_WORD, /* LDI @(GRi,d12),GRk */ | ||
70 | [0x33] = _MA_LDxI | _MA_DWORD, /* LDDI @(GRi,d12),GRk */ | ||
71 | [0x36] = _MA_LDxI | _MA_HALF, /* LDUHI @(GRi,d12),GRk */ | ||
72 | [0x51] = _MA_STxI | _MA_HALF, /* STHI @(GRi,d12),GRk */ | ||
73 | [0x52] = _MA_STxI | _MA_WORD, /* STI @(GRi,d12),GRk */ | ||
74 | [0x53] = _MA_STxI | _MA_DWORD, /* STDI @(GRi,d12),GRk */ | ||
75 | }; | ||
76 | |||
77 | |||
78 | /*****************************************************************************/ | ||
79 | /* | ||
80 | * see if we can handle the exception by fixing up a misaligned memory access | ||
81 | */ | ||
82 | int handle_misalignment(unsigned long esr0, unsigned long ear0, unsigned long epcr0) | ||
83 | { | ||
84 | unsigned long insn, addr, *greg; | ||
85 | int GRi, GRj, GRk, D12, op; | ||
86 | |||
87 | union { | ||
88 | uint64_t _64; | ||
89 | uint32_t _32[2]; | ||
90 | uint16_t _16; | ||
91 | uint8_t _8[8]; | ||
92 | } x; | ||
93 | |||
94 | if (!(esr0 & ESR0_EAV) || !(epcr0 & EPCR0_V) || !(ear0 & 7)) | ||
95 | return -EAGAIN; | ||
96 | |||
97 | epcr0 &= EPCR0_PC; | ||
98 | |||
99 | if (__frame->pc != epcr0) { | ||
100 | kdebug("MISALIGN: Execution not halted on excepting instruction\n"); | ||
101 | BUG(); | ||
102 | } | ||
103 | |||
104 | if (__get_user(insn, (unsigned long *) epcr0) < 0) | ||
105 | return -EFAULT; | ||
106 | |||
107 | /* determine the instruction type first */ | ||
108 | switch ((insn >> 18) & 0x7f) { | ||
109 | case 0x2: | ||
110 | /* LDx @(GRi,GRj),GRk */ | ||
111 | op = tbl_LDGRk_reg[(insn >> 6) & 0x3f]; | ||
112 | break; | ||
113 | |||
114 | case 0x3: | ||
115 | /* STx GRk,@(GRi,GRj) */ | ||
116 | op = tbl_STGRk_reg[(insn >> 6) & 0x3f]; | ||
117 | break; | ||
118 | |||
119 | default: | ||
120 | op = tbl_LDSTGRk_imm[(insn >> 18) & 0x7f]; | ||
121 | break; | ||
122 | } | ||
123 | |||
124 | if (!op) | ||
125 | return -EAGAIN; | ||
126 | |||
127 | kdebug("MISALIGN: pc=%08lx insn=%08lx ad=%08lx op=%02x\n", epcr0, insn, ear0, op); | ||
128 | |||
129 | memset(&x, 0xba, 8); | ||
130 | |||
131 | /* validate the instruction parameters */ | ||
132 | greg = (unsigned long *) &__frame->tbr; | ||
133 | |||
134 | GRi = (insn >> 12) & 0x3f; | ||
135 | GRk = (insn >> 25) & 0x3f; | ||
136 | |||
137 | if (GRi > 31 || GRk > 31) | ||
138 | return -ENOENT; | ||
139 | |||
140 | if (op & _MA_DWORD && GRk & 1) | ||
141 | return -EINVAL; | ||
142 | |||
143 | if (op & _MA_IMM) { | ||
144 | D12 = insn & 0xfff; | ||
145 | asm ("slli %0,#20,%0 ! srai %0,#20,%0" : "=r"(D12) : "0"(D12)); /* sign extend */ | ||
146 | addr = (GRi ? greg[GRi] : 0) + D12; | ||
147 | } | ||
148 | else { | ||
149 | GRj = (insn >> 0) & 0x3f; | ||
150 | if (GRj > 31) | ||
151 | return -ENOENT; | ||
152 | addr = (GRi ? greg[GRi] : 0) + (GRj ? greg[GRj] : 0); | ||
153 | } | ||
154 | |||
155 | if (addr != ear0) { | ||
156 | kdebug("MISALIGN: Calculated addr (%08lx) does not match EAR0 (%08lx)\n", | ||
157 | addr, ear0); | ||
158 | return -EFAULT; | ||
159 | } | ||
160 | |||
161 | /* check the address is okay */ | ||
162 | if (user_mode(__frame) && ___range_ok(ear0, 8) < 0) | ||
163 | return -EFAULT; | ||
164 | |||
165 | /* perform the memory op */ | ||
166 | if (op & _MA_STORE) { | ||
167 | /* perform a store */ | ||
168 | x._32[0] = 0; | ||
169 | if (GRk != 0) { | ||
170 | if (op & _MA_HALF) { | ||
171 | x._16 = greg[GRk]; | ||
172 | } | ||
173 | else { | ||
174 | x._32[0] = greg[GRk]; | ||
175 | } | ||
176 | } | ||
177 | if (op & _MA_DWORD) | ||
178 | x._32[1] = greg[GRk + 1]; | ||
179 | |||
180 | kdebug("MISALIGN: Store GR%d { %08x:%08x } -> %08lx (%dB)\n", | ||
181 | GRk, x._32[1], x._32[0], addr, op & _MA_SZ_MASK); | ||
182 | |||
183 | if (__memcpy_user((void *) addr, &x, op & _MA_SZ_MASK) != 0) | ||
184 | return -EFAULT; | ||
185 | } | ||
186 | else { | ||
187 | /* perform a load */ | ||
188 | if (__memcpy_user(&x, (void *) addr, op & _MA_SZ_MASK) != 0) | ||
189 | return -EFAULT; | ||
190 | |||
191 | if (op & _MA_HALF) { | ||
192 | if (op & _MA_SIGNED) | ||
193 | asm ("slli %0,#16,%0 ! srai %0,#16,%0" | ||
194 | : "=r"(x._32[0]) : "0"(x._16)); | ||
195 | else | ||
196 | asm ("sethi #0,%0" | ||
197 | : "=r"(x._32[0]) : "0"(x._16)); | ||
198 | } | ||
199 | |||
200 | kdebug("MISALIGN: Load %08lx (%dB) -> GR%d, { %08x:%08x }\n", | ||
201 | addr, op & _MA_SZ_MASK, GRk, x._32[1], x._32[0]); | ||
202 | |||
203 | if (GRk != 0) | ||
204 | greg[GRk] = x._32[0]; | ||
205 | if (op & _MA_DWORD) | ||
206 | greg[GRk + 1] = x._32[1]; | ||
207 | } | ||
208 | |||
209 | /* update the base pointer if required */ | ||
210 | if (op & _MA_UPDATE) | ||
211 | greg[GRi] = addr; | ||
212 | |||
213 | /* well... we've done that insn */ | ||
214 | __frame->pc = __frame->pc + 4; | ||
215 | |||
216 | return 0; | ||
217 | } /* end handle_misalignment() */ | ||
diff --git a/arch/h8300/kernel/asm-offsets.c b/arch/h8300/kernel/asm-offsets.c index fc30b4fd0914..2042552e0871 100644 --- a/arch/h8300/kernel/asm-offsets.c +++ b/arch/h8300/kernel/asm-offsets.c | |||
@@ -13,15 +13,11 @@ | |||
13 | #include <linux/kernel_stat.h> | 13 | #include <linux/kernel_stat.h> |
14 | #include <linux/ptrace.h> | 14 | #include <linux/ptrace.h> |
15 | #include <linux/hardirq.h> | 15 | #include <linux/hardirq.h> |
16 | #include <linux/kbuild.h> | ||
16 | #include <asm/bootinfo.h> | 17 | #include <asm/bootinfo.h> |
17 | #include <asm/irq.h> | 18 | #include <asm/irq.h> |
18 | #include <asm/ptrace.h> | 19 | #include <asm/ptrace.h> |
19 | 20 | ||
20 | #define DEFINE(sym, val) \ | ||
21 | asm volatile("\n->" #sym " %0 " #val : : "i" (val)) | ||
22 | |||
23 | #define BLANK() asm volatile("\n->" : : ) | ||
24 | |||
25 | int main(void) | 21 | int main(void) |
26 | { | 22 | { |
27 | /* offsets into the task struct */ | 23 | /* offsets into the task struct */ |
diff --git a/arch/ia64/Kconfig b/arch/ia64/Kconfig index 3aa6c821449a..0df5f6f75edf 100644 --- a/arch/ia64/Kconfig +++ b/arch/ia64/Kconfig | |||
@@ -19,6 +19,7 @@ config IA64 | |||
19 | select HAVE_OPROFILE | 19 | select HAVE_OPROFILE |
20 | select HAVE_KPROBES | 20 | select HAVE_KPROBES |
21 | select HAVE_KRETPROBES | 21 | select HAVE_KRETPROBES |
22 | select HAVE_DMA_ATTRS | ||
22 | select HAVE_KVM | 23 | select HAVE_KVM |
23 | default y | 24 | default y |
24 | help | 25 | help |
@@ -47,6 +48,9 @@ config MMU | |||
47 | config SWIOTLB | 48 | config SWIOTLB |
48 | bool | 49 | bool |
49 | 50 | ||
51 | config IOMMU_HELPER | ||
52 | bool | ||
53 | |||
50 | config GENERIC_LOCKBREAK | 54 | config GENERIC_LOCKBREAK |
51 | bool | 55 | bool |
52 | default y | 56 | default y |
@@ -615,7 +619,7 @@ config IRQ_PER_CPU | |||
615 | default y | 619 | default y |
616 | 620 | ||
617 | config IOMMU_HELPER | 621 | config IOMMU_HELPER |
618 | def_bool (IA64_HP_ZX1 || IA64_HP_ZX1_SWIOTLB || IA64_GENERIC) | 622 | def_bool (IA64_HP_ZX1 || IA64_HP_ZX1_SWIOTLB || IA64_GENERIC || SWIOTLB) |
619 | 623 | ||
620 | source "arch/ia64/hp/sim/Kconfig" | 624 | source "arch/ia64/hp/sim/Kconfig" |
621 | 625 | ||
diff --git a/arch/ia64/hp/common/hwsw_iommu.c b/arch/ia64/hp/common/hwsw_iommu.c index 8f6bcfe1dada..1c44ec2a1d58 100644 --- a/arch/ia64/hp/common/hwsw_iommu.c +++ b/arch/ia64/hp/common/hwsw_iommu.c | |||
@@ -20,10 +20,10 @@ | |||
20 | extern int swiotlb_late_init_with_default_size (size_t size); | 20 | extern int swiotlb_late_init_with_default_size (size_t size); |
21 | extern ia64_mv_dma_alloc_coherent swiotlb_alloc_coherent; | 21 | extern ia64_mv_dma_alloc_coherent swiotlb_alloc_coherent; |
22 | extern ia64_mv_dma_free_coherent swiotlb_free_coherent; | 22 | extern ia64_mv_dma_free_coherent swiotlb_free_coherent; |
23 | extern ia64_mv_dma_map_single swiotlb_map_single; | 23 | extern ia64_mv_dma_map_single_attrs swiotlb_map_single_attrs; |
24 | extern ia64_mv_dma_unmap_single swiotlb_unmap_single; | 24 | extern ia64_mv_dma_unmap_single_attrs swiotlb_unmap_single_attrs; |
25 | extern ia64_mv_dma_map_sg swiotlb_map_sg; | 25 | extern ia64_mv_dma_map_sg_attrs swiotlb_map_sg_attrs; |
26 | extern ia64_mv_dma_unmap_sg swiotlb_unmap_sg; | 26 | extern ia64_mv_dma_unmap_sg_attrs swiotlb_unmap_sg_attrs; |
27 | extern ia64_mv_dma_supported swiotlb_dma_supported; | 27 | extern ia64_mv_dma_supported swiotlb_dma_supported; |
28 | extern ia64_mv_dma_mapping_error swiotlb_dma_mapping_error; | 28 | extern ia64_mv_dma_mapping_error swiotlb_dma_mapping_error; |
29 | 29 | ||
@@ -31,19 +31,19 @@ extern ia64_mv_dma_mapping_error swiotlb_dma_mapping_error; | |||
31 | 31 | ||
32 | extern ia64_mv_dma_alloc_coherent sba_alloc_coherent; | 32 | extern ia64_mv_dma_alloc_coherent sba_alloc_coherent; |
33 | extern ia64_mv_dma_free_coherent sba_free_coherent; | 33 | extern ia64_mv_dma_free_coherent sba_free_coherent; |
34 | extern ia64_mv_dma_map_single sba_map_single; | 34 | extern ia64_mv_dma_map_single_attrs sba_map_single_attrs; |
35 | extern ia64_mv_dma_unmap_single sba_unmap_single; | 35 | extern ia64_mv_dma_unmap_single_attrs sba_unmap_single_attrs; |
36 | extern ia64_mv_dma_map_sg sba_map_sg; | 36 | extern ia64_mv_dma_map_sg_attrs sba_map_sg_attrs; |
37 | extern ia64_mv_dma_unmap_sg sba_unmap_sg; | 37 | extern ia64_mv_dma_unmap_sg_attrs sba_unmap_sg_attrs; |
38 | extern ia64_mv_dma_supported sba_dma_supported; | 38 | extern ia64_mv_dma_supported sba_dma_supported; |
39 | extern ia64_mv_dma_mapping_error sba_dma_mapping_error; | 39 | extern ia64_mv_dma_mapping_error sba_dma_mapping_error; |
40 | 40 | ||
41 | #define hwiommu_alloc_coherent sba_alloc_coherent | 41 | #define hwiommu_alloc_coherent sba_alloc_coherent |
42 | #define hwiommu_free_coherent sba_free_coherent | 42 | #define hwiommu_free_coherent sba_free_coherent |
43 | #define hwiommu_map_single sba_map_single | 43 | #define hwiommu_map_single_attrs sba_map_single_attrs |
44 | #define hwiommu_unmap_single sba_unmap_single | 44 | #define hwiommu_unmap_single_attrs sba_unmap_single_attrs |
45 | #define hwiommu_map_sg sba_map_sg | 45 | #define hwiommu_map_sg_attrs sba_map_sg_attrs |
46 | #define hwiommu_unmap_sg sba_unmap_sg | 46 | #define hwiommu_unmap_sg_attrs sba_unmap_sg_attrs |
47 | #define hwiommu_dma_supported sba_dma_supported | 47 | #define hwiommu_dma_supported sba_dma_supported |
48 | #define hwiommu_dma_mapping_error sba_dma_mapping_error | 48 | #define hwiommu_dma_mapping_error sba_dma_mapping_error |
49 | #define hwiommu_sync_single_for_cpu machvec_dma_sync_single | 49 | #define hwiommu_sync_single_for_cpu machvec_dma_sync_single |
@@ -98,41 +98,48 @@ hwsw_free_coherent (struct device *dev, size_t size, void *vaddr, dma_addr_t dma | |||
98 | } | 98 | } |
99 | 99 | ||
100 | dma_addr_t | 100 | dma_addr_t |
101 | hwsw_map_single (struct device *dev, void *addr, size_t size, int dir) | 101 | hwsw_map_single_attrs(struct device *dev, void *addr, size_t size, int dir, |
102 | struct dma_attrs *attrs) | ||
102 | { | 103 | { |
103 | if (use_swiotlb(dev)) | 104 | if (use_swiotlb(dev)) |
104 | return swiotlb_map_single(dev, addr, size, dir); | 105 | return swiotlb_map_single_attrs(dev, addr, size, dir, attrs); |
105 | else | 106 | else |
106 | return hwiommu_map_single(dev, addr, size, dir); | 107 | return hwiommu_map_single_attrs(dev, addr, size, dir, attrs); |
107 | } | 108 | } |
109 | EXPORT_SYMBOL(hwsw_map_single_attrs); | ||
108 | 110 | ||
109 | void | 111 | void |
110 | hwsw_unmap_single (struct device *dev, dma_addr_t iova, size_t size, int dir) | 112 | hwsw_unmap_single_attrs(struct device *dev, dma_addr_t iova, size_t size, |
113 | int dir, struct dma_attrs *attrs) | ||
111 | { | 114 | { |
112 | if (use_swiotlb(dev)) | 115 | if (use_swiotlb(dev)) |
113 | return swiotlb_unmap_single(dev, iova, size, dir); | 116 | return swiotlb_unmap_single_attrs(dev, iova, size, dir, attrs); |
114 | else | 117 | else |
115 | return hwiommu_unmap_single(dev, iova, size, dir); | 118 | return hwiommu_unmap_single_attrs(dev, iova, size, dir, attrs); |
116 | } | 119 | } |
117 | 120 | EXPORT_SYMBOL(hwsw_unmap_single_attrs); | |
118 | 121 | ||
119 | int | 122 | int |
120 | hwsw_map_sg (struct device *dev, struct scatterlist *sglist, int nents, int dir) | 123 | hwsw_map_sg_attrs(struct device *dev, struct scatterlist *sglist, int nents, |
124 | int dir, struct dma_attrs *attrs) | ||
121 | { | 125 | { |
122 | if (use_swiotlb(dev)) | 126 | if (use_swiotlb(dev)) |
123 | return swiotlb_map_sg(dev, sglist, nents, dir); | 127 | return swiotlb_map_sg_attrs(dev, sglist, nents, dir, attrs); |
124 | else | 128 | else |
125 | return hwiommu_map_sg(dev, sglist, nents, dir); | 129 | return hwiommu_map_sg_attrs(dev, sglist, nents, dir, attrs); |
126 | } | 130 | } |
131 | EXPORT_SYMBOL(hwsw_map_sg_attrs); | ||
127 | 132 | ||
128 | void | 133 | void |
129 | hwsw_unmap_sg (struct device *dev, struct scatterlist *sglist, int nents, int dir) | 134 | hwsw_unmap_sg_attrs(struct device *dev, struct scatterlist *sglist, int nents, |
135 | int dir, struct dma_attrs *attrs) | ||
130 | { | 136 | { |
131 | if (use_swiotlb(dev)) | 137 | if (use_swiotlb(dev)) |
132 | return swiotlb_unmap_sg(dev, sglist, nents, dir); | 138 | return swiotlb_unmap_sg_attrs(dev, sglist, nents, dir, attrs); |
133 | else | 139 | else |
134 | return hwiommu_unmap_sg(dev, sglist, nents, dir); | 140 | return hwiommu_unmap_sg_attrs(dev, sglist, nents, dir, attrs); |
135 | } | 141 | } |
142 | EXPORT_SYMBOL(hwsw_unmap_sg_attrs); | ||
136 | 143 | ||
137 | void | 144 | void |
138 | hwsw_sync_single_for_cpu (struct device *dev, dma_addr_t addr, size_t size, int dir) | 145 | hwsw_sync_single_for_cpu (struct device *dev, dma_addr_t addr, size_t size, int dir) |
@@ -185,10 +192,6 @@ hwsw_dma_mapping_error (dma_addr_t dma_addr) | |||
185 | } | 192 | } |
186 | 193 | ||
187 | EXPORT_SYMBOL(hwsw_dma_mapping_error); | 194 | EXPORT_SYMBOL(hwsw_dma_mapping_error); |
188 | EXPORT_SYMBOL(hwsw_map_single); | ||
189 | EXPORT_SYMBOL(hwsw_unmap_single); | ||
190 | EXPORT_SYMBOL(hwsw_map_sg); | ||
191 | EXPORT_SYMBOL(hwsw_unmap_sg); | ||
192 | EXPORT_SYMBOL(hwsw_dma_supported); | 195 | EXPORT_SYMBOL(hwsw_dma_supported); |
193 | EXPORT_SYMBOL(hwsw_alloc_coherent); | 196 | EXPORT_SYMBOL(hwsw_alloc_coherent); |
194 | EXPORT_SYMBOL(hwsw_free_coherent); | 197 | EXPORT_SYMBOL(hwsw_free_coherent); |
diff --git a/arch/ia64/hp/common/sba_iommu.c b/arch/ia64/hp/common/sba_iommu.c index 9409de5c9441..34421aed1e2a 100644 --- a/arch/ia64/hp/common/sba_iommu.c +++ b/arch/ia64/hp/common/sba_iommu.c | |||
@@ -899,16 +899,18 @@ sba_mark_invalid(struct ioc *ioc, dma_addr_t iova, size_t byte_cnt) | |||
899 | } | 899 | } |
900 | 900 | ||
901 | /** | 901 | /** |
902 | * sba_map_single - map one buffer and return IOVA for DMA | 902 | * sba_map_single_attrs - map one buffer and return IOVA for DMA |
903 | * @dev: instance of PCI owned by the driver that's asking. | 903 | * @dev: instance of PCI owned by the driver that's asking. |
904 | * @addr: driver buffer to map. | 904 | * @addr: driver buffer to map. |
905 | * @size: number of bytes to map in driver buffer. | 905 | * @size: number of bytes to map in driver buffer. |
906 | * @dir: R/W or both. | 906 | * @dir: R/W or both. |
907 | * @attrs: optional dma attributes | ||
907 | * | 908 | * |
908 | * See Documentation/DMA-mapping.txt | 909 | * See Documentation/DMA-mapping.txt |
909 | */ | 910 | */ |
910 | dma_addr_t | 911 | dma_addr_t |
911 | sba_map_single(struct device *dev, void *addr, size_t size, int dir) | 912 | sba_map_single_attrs(struct device *dev, void *addr, size_t size, int dir, |
913 | struct dma_attrs *attrs) | ||
912 | { | 914 | { |
913 | struct ioc *ioc; | 915 | struct ioc *ioc; |
914 | dma_addr_t iovp; | 916 | dma_addr_t iovp; |
@@ -932,7 +934,8 @@ sba_map_single(struct device *dev, void *addr, size_t size, int dir) | |||
932 | ** Device is bit capable of DMA'ing to the buffer... | 934 | ** Device is bit capable of DMA'ing to the buffer... |
933 | ** just return the PCI address of ptr | 935 | ** just return the PCI address of ptr |
934 | */ | 936 | */ |
935 | DBG_BYPASS("sba_map_single() bypass mask/addr: 0x%lx/0x%lx\n", | 937 | DBG_BYPASS("sba_map_single_attrs() bypass mask/addr: " |
938 | "0x%lx/0x%lx\n", | ||
936 | to_pci_dev(dev)->dma_mask, pci_addr); | 939 | to_pci_dev(dev)->dma_mask, pci_addr); |
937 | return pci_addr; | 940 | return pci_addr; |
938 | } | 941 | } |
@@ -953,7 +956,7 @@ sba_map_single(struct device *dev, void *addr, size_t size, int dir) | |||
953 | 956 | ||
954 | #ifdef ASSERT_PDIR_SANITY | 957 | #ifdef ASSERT_PDIR_SANITY |
955 | spin_lock_irqsave(&ioc->res_lock, flags); | 958 | spin_lock_irqsave(&ioc->res_lock, flags); |
956 | if (sba_check_pdir(ioc,"Check before sba_map_single()")) | 959 | if (sba_check_pdir(ioc,"Check before sba_map_single_attrs()")) |
957 | panic("Sanity check failed"); | 960 | panic("Sanity check failed"); |
958 | spin_unlock_irqrestore(&ioc->res_lock, flags); | 961 | spin_unlock_irqrestore(&ioc->res_lock, flags); |
959 | #endif | 962 | #endif |
@@ -982,11 +985,12 @@ sba_map_single(struct device *dev, void *addr, size_t size, int dir) | |||
982 | /* form complete address */ | 985 | /* form complete address */ |
983 | #ifdef ASSERT_PDIR_SANITY | 986 | #ifdef ASSERT_PDIR_SANITY |
984 | spin_lock_irqsave(&ioc->res_lock, flags); | 987 | spin_lock_irqsave(&ioc->res_lock, flags); |
985 | sba_check_pdir(ioc,"Check after sba_map_single()"); | 988 | sba_check_pdir(ioc,"Check after sba_map_single_attrs()"); |
986 | spin_unlock_irqrestore(&ioc->res_lock, flags); | 989 | spin_unlock_irqrestore(&ioc->res_lock, flags); |
987 | #endif | 990 | #endif |
988 | return SBA_IOVA(ioc, iovp, offset); | 991 | return SBA_IOVA(ioc, iovp, offset); |
989 | } | 992 | } |
993 | EXPORT_SYMBOL(sba_map_single_attrs); | ||
990 | 994 | ||
991 | #ifdef ENABLE_MARK_CLEAN | 995 | #ifdef ENABLE_MARK_CLEAN |
992 | static SBA_INLINE void | 996 | static SBA_INLINE void |
@@ -1013,15 +1017,17 @@ sba_mark_clean(struct ioc *ioc, dma_addr_t iova, size_t size) | |||
1013 | #endif | 1017 | #endif |
1014 | 1018 | ||
1015 | /** | 1019 | /** |
1016 | * sba_unmap_single - unmap one IOVA and free resources | 1020 | * sba_unmap_single_attrs - unmap one IOVA and free resources |
1017 | * @dev: instance of PCI owned by the driver that's asking. | 1021 | * @dev: instance of PCI owned by the driver that's asking. |
1018 | * @iova: IOVA of driver buffer previously mapped. | 1022 | * @iova: IOVA of driver buffer previously mapped. |
1019 | * @size: number of bytes mapped in driver buffer. | 1023 | * @size: number of bytes mapped in driver buffer. |
1020 | * @dir: R/W or both. | 1024 | * @dir: R/W or both. |
1025 | * @attrs: optional dma attributes | ||
1021 | * | 1026 | * |
1022 | * See Documentation/DMA-mapping.txt | 1027 | * See Documentation/DMA-mapping.txt |
1023 | */ | 1028 | */ |
1024 | void sba_unmap_single(struct device *dev, dma_addr_t iova, size_t size, int dir) | 1029 | void sba_unmap_single_attrs(struct device *dev, dma_addr_t iova, size_t size, |
1030 | int dir, struct dma_attrs *attrs) | ||
1025 | { | 1031 | { |
1026 | struct ioc *ioc; | 1032 | struct ioc *ioc; |
1027 | #if DELAYED_RESOURCE_CNT > 0 | 1033 | #if DELAYED_RESOURCE_CNT > 0 |
@@ -1038,7 +1044,8 @@ void sba_unmap_single(struct device *dev, dma_addr_t iova, size_t size, int dir) | |||
1038 | /* | 1044 | /* |
1039 | ** Address does not fall w/in IOVA, must be bypassing | 1045 | ** Address does not fall w/in IOVA, must be bypassing |
1040 | */ | 1046 | */ |
1041 | DBG_BYPASS("sba_unmap_single() bypass addr: 0x%lx\n", iova); | 1047 | DBG_BYPASS("sba_unmap_single_atttrs() bypass addr: 0x%lx\n", |
1048 | iova); | ||
1042 | 1049 | ||
1043 | #ifdef ENABLE_MARK_CLEAN | 1050 | #ifdef ENABLE_MARK_CLEAN |
1044 | if (dir == DMA_FROM_DEVICE) { | 1051 | if (dir == DMA_FROM_DEVICE) { |
@@ -1087,7 +1094,7 @@ void sba_unmap_single(struct device *dev, dma_addr_t iova, size_t size, int dir) | |||
1087 | spin_unlock_irqrestore(&ioc->res_lock, flags); | 1094 | spin_unlock_irqrestore(&ioc->res_lock, flags); |
1088 | #endif /* DELAYED_RESOURCE_CNT == 0 */ | 1095 | #endif /* DELAYED_RESOURCE_CNT == 0 */ |
1089 | } | 1096 | } |
1090 | 1097 | EXPORT_SYMBOL(sba_unmap_single_attrs); | |
1091 | 1098 | ||
1092 | /** | 1099 | /** |
1093 | * sba_alloc_coherent - allocate/map shared mem for DMA | 1100 | * sba_alloc_coherent - allocate/map shared mem for DMA |
@@ -1144,7 +1151,8 @@ sba_alloc_coherent (struct device *dev, size_t size, dma_addr_t *dma_handle, gfp | |||
1144 | * If device can't bypass or bypass is disabled, pass the 32bit fake | 1151 | * If device can't bypass or bypass is disabled, pass the 32bit fake |
1145 | * device to map single to get an iova mapping. | 1152 | * device to map single to get an iova mapping. |
1146 | */ | 1153 | */ |
1147 | *dma_handle = sba_map_single(&ioc->sac_only_dev->dev, addr, size, 0); | 1154 | *dma_handle = sba_map_single_attrs(&ioc->sac_only_dev->dev, addr, |
1155 | size, 0, NULL); | ||
1148 | 1156 | ||
1149 | return addr; | 1157 | return addr; |
1150 | } | 1158 | } |
@@ -1161,7 +1169,7 @@ sba_alloc_coherent (struct device *dev, size_t size, dma_addr_t *dma_handle, gfp | |||
1161 | */ | 1169 | */ |
1162 | void sba_free_coherent (struct device *dev, size_t size, void *vaddr, dma_addr_t dma_handle) | 1170 | void sba_free_coherent (struct device *dev, size_t size, void *vaddr, dma_addr_t dma_handle) |
1163 | { | 1171 | { |
1164 | sba_unmap_single(dev, dma_handle, size, 0); | 1172 | sba_unmap_single_attrs(dev, dma_handle, size, 0, NULL); |
1165 | free_pages((unsigned long) vaddr, get_order(size)); | 1173 | free_pages((unsigned long) vaddr, get_order(size)); |
1166 | } | 1174 | } |
1167 | 1175 | ||
@@ -1410,10 +1418,12 @@ sba_coalesce_chunks(struct ioc *ioc, struct device *dev, | |||
1410 | * @sglist: array of buffer/length pairs | 1418 | * @sglist: array of buffer/length pairs |
1411 | * @nents: number of entries in list | 1419 | * @nents: number of entries in list |
1412 | * @dir: R/W or both. | 1420 | * @dir: R/W or both. |
1421 | * @attrs: optional dma attributes | ||
1413 | * | 1422 | * |
1414 | * See Documentation/DMA-mapping.txt | 1423 | * See Documentation/DMA-mapping.txt |
1415 | */ | 1424 | */ |
1416 | int sba_map_sg(struct device *dev, struct scatterlist *sglist, int nents, int dir) | 1425 | int sba_map_sg_attrs(struct device *dev, struct scatterlist *sglist, int nents, |
1426 | int dir, struct dma_attrs *attrs) | ||
1417 | { | 1427 | { |
1418 | struct ioc *ioc; | 1428 | struct ioc *ioc; |
1419 | int coalesced, filled = 0; | 1429 | int coalesced, filled = 0; |
@@ -1441,16 +1451,16 @@ int sba_map_sg(struct device *dev, struct scatterlist *sglist, int nents, int di | |||
1441 | /* Fast path single entry scatterlists. */ | 1451 | /* Fast path single entry scatterlists. */ |
1442 | if (nents == 1) { | 1452 | if (nents == 1) { |
1443 | sglist->dma_length = sglist->length; | 1453 | sglist->dma_length = sglist->length; |
1444 | sglist->dma_address = sba_map_single(dev, sba_sg_address(sglist), sglist->length, dir); | 1454 | sglist->dma_address = sba_map_single_attrs(dev, sba_sg_address(sglist), sglist->length, dir, attrs); |
1445 | return 1; | 1455 | return 1; |
1446 | } | 1456 | } |
1447 | 1457 | ||
1448 | #ifdef ASSERT_PDIR_SANITY | 1458 | #ifdef ASSERT_PDIR_SANITY |
1449 | spin_lock_irqsave(&ioc->res_lock, flags); | 1459 | spin_lock_irqsave(&ioc->res_lock, flags); |
1450 | if (sba_check_pdir(ioc,"Check before sba_map_sg()")) | 1460 | if (sba_check_pdir(ioc,"Check before sba_map_sg_attrs()")) |
1451 | { | 1461 | { |
1452 | sba_dump_sg(ioc, sglist, nents); | 1462 | sba_dump_sg(ioc, sglist, nents); |
1453 | panic("Check before sba_map_sg()"); | 1463 | panic("Check before sba_map_sg_attrs()"); |
1454 | } | 1464 | } |
1455 | spin_unlock_irqrestore(&ioc->res_lock, flags); | 1465 | spin_unlock_irqrestore(&ioc->res_lock, flags); |
1456 | #endif | 1466 | #endif |
@@ -1479,10 +1489,10 @@ int sba_map_sg(struct device *dev, struct scatterlist *sglist, int nents, int di | |||
1479 | 1489 | ||
1480 | #ifdef ASSERT_PDIR_SANITY | 1490 | #ifdef ASSERT_PDIR_SANITY |
1481 | spin_lock_irqsave(&ioc->res_lock, flags); | 1491 | spin_lock_irqsave(&ioc->res_lock, flags); |
1482 | if (sba_check_pdir(ioc,"Check after sba_map_sg()")) | 1492 | if (sba_check_pdir(ioc,"Check after sba_map_sg_attrs()")) |
1483 | { | 1493 | { |
1484 | sba_dump_sg(ioc, sglist, nents); | 1494 | sba_dump_sg(ioc, sglist, nents); |
1485 | panic("Check after sba_map_sg()\n"); | 1495 | panic("Check after sba_map_sg_attrs()\n"); |
1486 | } | 1496 | } |
1487 | spin_unlock_irqrestore(&ioc->res_lock, flags); | 1497 | spin_unlock_irqrestore(&ioc->res_lock, flags); |
1488 | #endif | 1498 | #endif |
@@ -1492,18 +1502,20 @@ int sba_map_sg(struct device *dev, struct scatterlist *sglist, int nents, int di | |||
1492 | 1502 | ||
1493 | return filled; | 1503 | return filled; |
1494 | } | 1504 | } |
1495 | 1505 | EXPORT_SYMBOL(sba_map_sg_attrs); | |
1496 | 1506 | ||
1497 | /** | 1507 | /** |
1498 | * sba_unmap_sg - unmap Scatter/Gather list | 1508 | * sba_unmap_sg_attrs - unmap Scatter/Gather list |
1499 | * @dev: instance of PCI owned by the driver that's asking. | 1509 | * @dev: instance of PCI owned by the driver that's asking. |
1500 | * @sglist: array of buffer/length pairs | 1510 | * @sglist: array of buffer/length pairs |
1501 | * @nents: number of entries in list | 1511 | * @nents: number of entries in list |
1502 | * @dir: R/W or both. | 1512 | * @dir: R/W or both. |
1513 | * @attrs: optional dma attributes | ||
1503 | * | 1514 | * |
1504 | * See Documentation/DMA-mapping.txt | 1515 | * See Documentation/DMA-mapping.txt |
1505 | */ | 1516 | */ |
1506 | void sba_unmap_sg (struct device *dev, struct scatterlist *sglist, int nents, int dir) | 1517 | void sba_unmap_sg_attrs(struct device *dev, struct scatterlist *sglist, |
1518 | int nents, int dir, struct dma_attrs *attrs) | ||
1507 | { | 1519 | { |
1508 | #ifdef ASSERT_PDIR_SANITY | 1520 | #ifdef ASSERT_PDIR_SANITY |
1509 | struct ioc *ioc; | 1521 | struct ioc *ioc; |
@@ -1518,13 +1530,14 @@ void sba_unmap_sg (struct device *dev, struct scatterlist *sglist, int nents, in | |||
1518 | ASSERT(ioc); | 1530 | ASSERT(ioc); |
1519 | 1531 | ||
1520 | spin_lock_irqsave(&ioc->res_lock, flags); | 1532 | spin_lock_irqsave(&ioc->res_lock, flags); |
1521 | sba_check_pdir(ioc,"Check before sba_unmap_sg()"); | 1533 | sba_check_pdir(ioc,"Check before sba_unmap_sg_attrs()"); |
1522 | spin_unlock_irqrestore(&ioc->res_lock, flags); | 1534 | spin_unlock_irqrestore(&ioc->res_lock, flags); |
1523 | #endif | 1535 | #endif |
1524 | 1536 | ||
1525 | while (nents && sglist->dma_length) { | 1537 | while (nents && sglist->dma_length) { |
1526 | 1538 | ||
1527 | sba_unmap_single(dev, sglist->dma_address, sglist->dma_length, dir); | 1539 | sba_unmap_single_attrs(dev, sglist->dma_address, |
1540 | sglist->dma_length, dir, attrs); | ||
1528 | sglist = sg_next(sglist); | 1541 | sglist = sg_next(sglist); |
1529 | nents--; | 1542 | nents--; |
1530 | } | 1543 | } |
@@ -1533,11 +1546,12 @@ void sba_unmap_sg (struct device *dev, struct scatterlist *sglist, int nents, in | |||
1533 | 1546 | ||
1534 | #ifdef ASSERT_PDIR_SANITY | 1547 | #ifdef ASSERT_PDIR_SANITY |
1535 | spin_lock_irqsave(&ioc->res_lock, flags); | 1548 | spin_lock_irqsave(&ioc->res_lock, flags); |
1536 | sba_check_pdir(ioc,"Check after sba_unmap_sg()"); | 1549 | sba_check_pdir(ioc,"Check after sba_unmap_sg_attrs()"); |
1537 | spin_unlock_irqrestore(&ioc->res_lock, flags); | 1550 | spin_unlock_irqrestore(&ioc->res_lock, flags); |
1538 | #endif | 1551 | #endif |
1539 | 1552 | ||
1540 | } | 1553 | } |
1554 | EXPORT_SYMBOL(sba_unmap_sg_attrs); | ||
1541 | 1555 | ||
1542 | /************************************************************** | 1556 | /************************************************************** |
1543 | * | 1557 | * |
@@ -1918,15 +1932,13 @@ static const struct file_operations ioc_fops = { | |||
1918 | static void __init | 1932 | static void __init |
1919 | ioc_proc_init(void) | 1933 | ioc_proc_init(void) |
1920 | { | 1934 | { |
1921 | struct proc_dir_entry *dir, *entry; | 1935 | struct proc_dir_entry *dir; |
1922 | 1936 | ||
1923 | dir = proc_mkdir("bus/mckinley", NULL); | 1937 | dir = proc_mkdir("bus/mckinley", NULL); |
1924 | if (!dir) | 1938 | if (!dir) |
1925 | return; | 1939 | return; |
1926 | 1940 | ||
1927 | entry = create_proc_entry(ioc_list->name, 0, dir); | 1941 | proc_create(ioc_list->name, 0, dir, &ioc_fops); |
1928 | if (entry) | ||
1929 | entry->proc_fops = &ioc_fops; | ||
1930 | } | 1942 | } |
1931 | #endif | 1943 | #endif |
1932 | 1944 | ||
@@ -2166,10 +2178,6 @@ sba_page_override(char *str) | |||
2166 | __setup("sbapagesize=",sba_page_override); | 2178 | __setup("sbapagesize=",sba_page_override); |
2167 | 2179 | ||
2168 | EXPORT_SYMBOL(sba_dma_mapping_error); | 2180 | EXPORT_SYMBOL(sba_dma_mapping_error); |
2169 | EXPORT_SYMBOL(sba_map_single); | ||
2170 | EXPORT_SYMBOL(sba_unmap_single); | ||
2171 | EXPORT_SYMBOL(sba_map_sg); | ||
2172 | EXPORT_SYMBOL(sba_unmap_sg); | ||
2173 | EXPORT_SYMBOL(sba_dma_supported); | 2181 | EXPORT_SYMBOL(sba_dma_supported); |
2174 | EXPORT_SYMBOL(sba_alloc_coherent); | 2182 | EXPORT_SYMBOL(sba_alloc_coherent); |
2175 | EXPORT_SYMBOL(sba_free_coherent); | 2183 | EXPORT_SYMBOL(sba_free_coherent); |
diff --git a/arch/ia64/kernel/asm-offsets.c b/arch/ia64/kernel/asm-offsets.c index 230a6f92367f..c64a55af9b95 100644 --- a/arch/ia64/kernel/asm-offsets.c +++ b/arch/ia64/kernel/asm-offsets.c | |||
@@ -9,7 +9,7 @@ | |||
9 | #include <linux/sched.h> | 9 | #include <linux/sched.h> |
10 | #include <linux/pid.h> | 10 | #include <linux/pid.h> |
11 | #include <linux/clocksource.h> | 11 | #include <linux/clocksource.h> |
12 | 12 | #include <linux/kbuild.h> | |
13 | #include <asm-ia64/processor.h> | 13 | #include <asm-ia64/processor.h> |
14 | #include <asm-ia64/ptrace.h> | 14 | #include <asm-ia64/ptrace.h> |
15 | #include <asm-ia64/siginfo.h> | 15 | #include <asm-ia64/siginfo.h> |
@@ -19,11 +19,6 @@ | |||
19 | #include "../kernel/sigframe.h" | 19 | #include "../kernel/sigframe.h" |
20 | #include "../kernel/fsyscall_gtod_data.h" | 20 | #include "../kernel/fsyscall_gtod_data.h" |
21 | 21 | ||
22 | #define DEFINE(sym, val) \ | ||
23 | asm volatile("\n->" #sym " %0 " #val : : "i" (val)) | ||
24 | |||
25 | #define BLANK() asm volatile("\n->" : : ) | ||
26 | |||
27 | void foo(void) | 22 | void foo(void) |
28 | { | 23 | { |
29 | DEFINE(IA64_TASK_SIZE, sizeof (struct task_struct)); | 24 | DEFINE(IA64_TASK_SIZE, sizeof (struct task_struct)); |
diff --git a/arch/ia64/kernel/perfmon.c b/arch/ia64/kernel/perfmon.c index c8e403752a0c..7fbb51e10bbe 100644 --- a/arch/ia64/kernel/perfmon.c +++ b/arch/ia64/kernel/perfmon.c | |||
@@ -6695,16 +6695,12 @@ pfm_init(void) | |||
6695 | /* | 6695 | /* |
6696 | * create /proc/perfmon (mostly for debugging purposes) | 6696 | * create /proc/perfmon (mostly for debugging purposes) |
6697 | */ | 6697 | */ |
6698 | perfmon_dir = create_proc_entry("perfmon", S_IRUGO, NULL); | 6698 | perfmon_dir = proc_create("perfmon", S_IRUGO, NULL, &pfm_proc_fops); |
6699 | if (perfmon_dir == NULL) { | 6699 | if (perfmon_dir == NULL) { |
6700 | printk(KERN_ERR "perfmon: cannot create /proc entry, perfmon disabled\n"); | 6700 | printk(KERN_ERR "perfmon: cannot create /proc entry, perfmon disabled\n"); |
6701 | pmu_conf = NULL; | 6701 | pmu_conf = NULL; |
6702 | return -1; | 6702 | return -1; |
6703 | } | 6703 | } |
6704 | /* | ||
6705 | * install customized file operations for /proc/perfmon entry | ||
6706 | */ | ||
6707 | perfmon_dir->proc_fops = &pfm_proc_fops; | ||
6708 | 6704 | ||
6709 | /* | 6705 | /* |
6710 | * create /proc/sys/kernel/perfmon (for debugging purposes) | 6706 | * create /proc/sys/kernel/perfmon (for debugging purposes) |
diff --git a/arch/ia64/kernel/salinfo.c b/arch/ia64/kernel/salinfo.c index b11bb50a197a..ecb9eb78d687 100644 --- a/arch/ia64/kernel/salinfo.c +++ b/arch/ia64/kernel/salinfo.c | |||
@@ -648,18 +648,16 @@ salinfo_init(void) | |||
648 | if (!dir) | 648 | if (!dir) |
649 | continue; | 649 | continue; |
650 | 650 | ||
651 | entry = create_proc_entry("event", S_IRUSR, dir); | 651 | entry = proc_create_data("event", S_IRUSR, dir, |
652 | &salinfo_event_fops, data); | ||
652 | if (!entry) | 653 | if (!entry) |
653 | continue; | 654 | continue; |
654 | entry->data = data; | ||
655 | entry->proc_fops = &salinfo_event_fops; | ||
656 | *sdir++ = entry; | 655 | *sdir++ = entry; |
657 | 656 | ||
658 | entry = create_proc_entry("data", S_IRUSR | S_IWUSR, dir); | 657 | entry = proc_create_data("data", S_IRUSR | S_IWUSR, dir, |
658 | &salinfo_data_fops, data); | ||
659 | if (!entry) | 659 | if (!entry) |
660 | continue; | 660 | continue; |
661 | entry->data = data; | ||
662 | entry->proc_fops = &salinfo_data_fops; | ||
663 | *sdir++ = entry; | 661 | *sdir++ = entry; |
664 | 662 | ||
665 | /* we missed any events before now */ | 663 | /* we missed any events before now */ |
diff --git a/arch/ia64/sn/kernel/sn2/sn2_smp.c b/arch/ia64/sn/kernel/sn2/sn2_smp.c index dfc6bf1c7b41..49d3120415eb 100644 --- a/arch/ia64/sn/kernel/sn2/sn2_smp.c +++ b/arch/ia64/sn/kernel/sn2/sn2_smp.c | |||
@@ -550,11 +550,12 @@ static int __init sn2_ptc_init(void) | |||
550 | if (!ia64_platform_is("sn2")) | 550 | if (!ia64_platform_is("sn2")) |
551 | return 0; | 551 | return 0; |
552 | 552 | ||
553 | if (!(proc_sn2_ptc = create_proc_entry(PTC_BASENAME, 0444, NULL))) { | 553 | proc_sn2_ptc = proc_create(PTC_BASENAME, 0444, |
554 | NULL, &proc_sn2_ptc_operations); | ||
555 | if (!&proc_sn2_ptc_operations) { | ||
554 | printk(KERN_ERR "unable to create %s proc entry", PTC_BASENAME); | 556 | printk(KERN_ERR "unable to create %s proc entry", PTC_BASENAME); |
555 | return -EINVAL; | 557 | return -EINVAL; |
556 | } | 558 | } |
557 | proc_sn2_ptc->proc_fops = &proc_sn2_ptc_operations; | ||
558 | spin_lock_init(&sn2_global_ptc_lock); | 559 | spin_lock_init(&sn2_global_ptc_lock); |
559 | return 0; | 560 | return 0; |
560 | } | 561 | } |
diff --git a/arch/ia64/sn/kernel/sn2/sn_proc_fs.c b/arch/ia64/sn/kernel/sn2/sn_proc_fs.c index 62b3e9a496ac..2526e5c783a4 100644 --- a/arch/ia64/sn/kernel/sn2/sn_proc_fs.c +++ b/arch/ia64/sn/kernel/sn2/sn_proc_fs.c | |||
@@ -139,30 +139,21 @@ static const struct file_operations proc_sn_topo_fops = { | |||
139 | void register_sn_procfs(void) | 139 | void register_sn_procfs(void) |
140 | { | 140 | { |
141 | static struct proc_dir_entry *sgi_proc_dir = NULL; | 141 | static struct proc_dir_entry *sgi_proc_dir = NULL; |
142 | struct proc_dir_entry *pde; | ||
143 | 142 | ||
144 | BUG_ON(sgi_proc_dir != NULL); | 143 | BUG_ON(sgi_proc_dir != NULL); |
145 | if (!(sgi_proc_dir = proc_mkdir("sgi_sn", NULL))) | 144 | if (!(sgi_proc_dir = proc_mkdir("sgi_sn", NULL))) |
146 | return; | 145 | return; |
147 | 146 | ||
148 | pde = create_proc_entry("partition_id", 0444, sgi_proc_dir); | 147 | proc_create("partition_id", 0444, sgi_proc_dir, |
149 | if (pde) | 148 | &proc_partition_id_fops); |
150 | pde->proc_fops = &proc_partition_id_fops; | 149 | proc_create("system_serial_number", 0444, sgi_proc_dir, |
151 | pde = create_proc_entry("system_serial_number", 0444, sgi_proc_dir); | 150 | &proc_system_sn_fops); |
152 | if (pde) | 151 | proc_create("licenseID", 0444, sgi_proc_dir, &proc_license_id_fops); |
153 | pde->proc_fops = &proc_system_sn_fops; | 152 | proc_create("sn_force_interrupt", 0644, sgi_proc_dir, |
154 | pde = create_proc_entry("licenseID", 0444, sgi_proc_dir); | 153 | &proc_sn_force_intr_fops); |
155 | if (pde) | 154 | proc_create("coherence_id", 0444, sgi_proc_dir, |
156 | pde->proc_fops = &proc_license_id_fops; | 155 | &proc_coherence_id_fops); |
157 | pde = create_proc_entry("sn_force_interrupt", 0644, sgi_proc_dir); | 156 | proc_create("sn_topology", 0444, sgi_proc_dir, &proc_sn_topo_fops); |
158 | if (pde) | ||
159 | pde->proc_fops = &proc_sn_force_intr_fops; | ||
160 | pde = create_proc_entry("coherence_id", 0444, sgi_proc_dir); | ||
161 | if (pde) | ||
162 | pde->proc_fops = &proc_coherence_id_fops; | ||
163 | pde = create_proc_entry("sn_topology", 0444, sgi_proc_dir); | ||
164 | if (pde) | ||
165 | pde->proc_fops = &proc_sn_topo_fops; | ||
166 | } | 157 | } |
167 | 158 | ||
168 | #endif /* CONFIG_PROC_FS */ | 159 | #endif /* CONFIG_PROC_FS */ |
diff --git a/arch/ia64/sn/pci/pci_dma.c b/arch/ia64/sn/pci/pci_dma.c index 18b94b792d54..52175af299a0 100644 --- a/arch/ia64/sn/pci/pci_dma.c +++ b/arch/ia64/sn/pci/pci_dma.c | |||
@@ -10,6 +10,7 @@ | |||
10 | */ | 10 | */ |
11 | 11 | ||
12 | #include <linux/module.h> | 12 | #include <linux/module.h> |
13 | #include <linux/dma-attrs.h> | ||
13 | #include <asm/dma.h> | 14 | #include <asm/dma.h> |
14 | #include <asm/sn/intr.h> | 15 | #include <asm/sn/intr.h> |
15 | #include <asm/sn/pcibus_provider_defs.h> | 16 | #include <asm/sn/pcibus_provider_defs.h> |
@@ -149,11 +150,12 @@ void sn_dma_free_coherent(struct device *dev, size_t size, void *cpu_addr, | |||
149 | EXPORT_SYMBOL(sn_dma_free_coherent); | 150 | EXPORT_SYMBOL(sn_dma_free_coherent); |
150 | 151 | ||
151 | /** | 152 | /** |
152 | * sn_dma_map_single - map a single page for DMA | 153 | * sn_dma_map_single_attrs - map a single page for DMA |
153 | * @dev: device to map for | 154 | * @dev: device to map for |
154 | * @cpu_addr: kernel virtual address of the region to map | 155 | * @cpu_addr: kernel virtual address of the region to map |
155 | * @size: size of the region | 156 | * @size: size of the region |
156 | * @direction: DMA direction | 157 | * @direction: DMA direction |
158 | * @attrs: optional dma attributes | ||
157 | * | 159 | * |
158 | * Map the region pointed to by @cpu_addr for DMA and return the | 160 | * Map the region pointed to by @cpu_addr for DMA and return the |
159 | * DMA address. | 161 | * DMA address. |
@@ -163,42 +165,59 @@ EXPORT_SYMBOL(sn_dma_free_coherent); | |||
163 | * no way of saving the dmamap handle from the alloc to later free | 165 | * no way of saving the dmamap handle from the alloc to later free |
164 | * (which is pretty much unacceptable). | 166 | * (which is pretty much unacceptable). |
165 | * | 167 | * |
168 | * mappings with the DMA_ATTR_WRITE_BARRIER get mapped with | ||
169 | * dma_map_consistent() so that writes force a flush of pending DMA. | ||
170 | * (See "SGI Altix Architecture Considerations for Linux Device Drivers", | ||
171 | * Document Number: 007-4763-001) | ||
172 | * | ||
166 | * TODO: simplify our interface; | 173 | * TODO: simplify our interface; |
167 | * figure out how to save dmamap handle so can use two step. | 174 | * figure out how to save dmamap handle so can use two step. |
168 | */ | 175 | */ |
169 | dma_addr_t sn_dma_map_single(struct device *dev, void *cpu_addr, size_t size, | 176 | dma_addr_t sn_dma_map_single_attrs(struct device *dev, void *cpu_addr, |
170 | int direction) | 177 | size_t size, int direction, |
178 | struct dma_attrs *attrs) | ||
171 | { | 179 | { |
172 | dma_addr_t dma_addr; | 180 | dma_addr_t dma_addr; |
173 | unsigned long phys_addr; | 181 | unsigned long phys_addr; |
174 | struct pci_dev *pdev = to_pci_dev(dev); | 182 | struct pci_dev *pdev = to_pci_dev(dev); |
175 | struct sn_pcibus_provider *provider = SN_PCIDEV_BUSPROVIDER(pdev); | 183 | struct sn_pcibus_provider *provider = SN_PCIDEV_BUSPROVIDER(pdev); |
184 | int dmabarr; | ||
185 | |||
186 | dmabarr = dma_get_attr(DMA_ATTR_WRITE_BARRIER, attrs); | ||
176 | 187 | ||
177 | BUG_ON(dev->bus != &pci_bus_type); | 188 | BUG_ON(dev->bus != &pci_bus_type); |
178 | 189 | ||
179 | phys_addr = __pa(cpu_addr); | 190 | phys_addr = __pa(cpu_addr); |
180 | dma_addr = provider->dma_map(pdev, phys_addr, size, SN_DMA_ADDR_PHYS); | 191 | if (dmabarr) |
192 | dma_addr = provider->dma_map_consistent(pdev, phys_addr, | ||
193 | size, SN_DMA_ADDR_PHYS); | ||
194 | else | ||
195 | dma_addr = provider->dma_map(pdev, phys_addr, size, | ||
196 | SN_DMA_ADDR_PHYS); | ||
197 | |||
181 | if (!dma_addr) { | 198 | if (!dma_addr) { |
182 | printk(KERN_ERR "%s: out of ATEs\n", __func__); | 199 | printk(KERN_ERR "%s: out of ATEs\n", __func__); |
183 | return 0; | 200 | return 0; |
184 | } | 201 | } |
185 | return dma_addr; | 202 | return dma_addr; |
186 | } | 203 | } |
187 | EXPORT_SYMBOL(sn_dma_map_single); | 204 | EXPORT_SYMBOL(sn_dma_map_single_attrs); |
188 | 205 | ||
189 | /** | 206 | /** |
190 | * sn_dma_unmap_single - unamp a DMA mapped page | 207 | * sn_dma_unmap_single_attrs - unamp a DMA mapped page |
191 | * @dev: device to sync | 208 | * @dev: device to sync |
192 | * @dma_addr: DMA address to sync | 209 | * @dma_addr: DMA address to sync |
193 | * @size: size of region | 210 | * @size: size of region |
194 | * @direction: DMA direction | 211 | * @direction: DMA direction |
212 | * @attrs: optional dma attributes | ||
195 | * | 213 | * |
196 | * This routine is supposed to sync the DMA region specified | 214 | * This routine is supposed to sync the DMA region specified |
197 | * by @dma_handle into the coherence domain. On SN, we're always cache | 215 | * by @dma_handle into the coherence domain. On SN, we're always cache |
198 | * coherent, so we just need to free any ATEs associated with this mapping. | 216 | * coherent, so we just need to free any ATEs associated with this mapping. |
199 | */ | 217 | */ |
200 | void sn_dma_unmap_single(struct device *dev, dma_addr_t dma_addr, size_t size, | 218 | void sn_dma_unmap_single_attrs(struct device *dev, dma_addr_t dma_addr, |
201 | int direction) | 219 | size_t size, int direction, |
220 | struct dma_attrs *attrs) | ||
202 | { | 221 | { |
203 | struct pci_dev *pdev = to_pci_dev(dev); | 222 | struct pci_dev *pdev = to_pci_dev(dev); |
204 | struct sn_pcibus_provider *provider = SN_PCIDEV_BUSPROVIDER(pdev); | 223 | struct sn_pcibus_provider *provider = SN_PCIDEV_BUSPROVIDER(pdev); |
@@ -207,19 +226,21 @@ void sn_dma_unmap_single(struct device *dev, dma_addr_t dma_addr, size_t size, | |||
207 | 226 | ||
208 | provider->dma_unmap(pdev, dma_addr, direction); | 227 | provider->dma_unmap(pdev, dma_addr, direction); |
209 | } | 228 | } |
210 | EXPORT_SYMBOL(sn_dma_unmap_single); | 229 | EXPORT_SYMBOL(sn_dma_unmap_single_attrs); |
211 | 230 | ||
212 | /** | 231 | /** |
213 | * sn_dma_unmap_sg - unmap a DMA scatterlist | 232 | * sn_dma_unmap_sg_attrs - unmap a DMA scatterlist |
214 | * @dev: device to unmap | 233 | * @dev: device to unmap |
215 | * @sg: scatterlist to unmap | 234 | * @sg: scatterlist to unmap |
216 | * @nhwentries: number of scatterlist entries | 235 | * @nhwentries: number of scatterlist entries |
217 | * @direction: DMA direction | 236 | * @direction: DMA direction |
237 | * @attrs: optional dma attributes | ||
218 | * | 238 | * |
219 | * Unmap a set of streaming mode DMA translations. | 239 | * Unmap a set of streaming mode DMA translations. |
220 | */ | 240 | */ |
221 | void sn_dma_unmap_sg(struct device *dev, struct scatterlist *sgl, | 241 | void sn_dma_unmap_sg_attrs(struct device *dev, struct scatterlist *sgl, |
222 | int nhwentries, int direction) | 242 | int nhwentries, int direction, |
243 | struct dma_attrs *attrs) | ||
223 | { | 244 | { |
224 | int i; | 245 | int i; |
225 | struct pci_dev *pdev = to_pci_dev(dev); | 246 | struct pci_dev *pdev = to_pci_dev(dev); |
@@ -234,25 +255,34 @@ void sn_dma_unmap_sg(struct device *dev, struct scatterlist *sgl, | |||
234 | sg->dma_length = 0; | 255 | sg->dma_length = 0; |
235 | } | 256 | } |
236 | } | 257 | } |
237 | EXPORT_SYMBOL(sn_dma_unmap_sg); | 258 | EXPORT_SYMBOL(sn_dma_unmap_sg_attrs); |
238 | 259 | ||
239 | /** | 260 | /** |
240 | * sn_dma_map_sg - map a scatterlist for DMA | 261 | * sn_dma_map_sg_attrs - map a scatterlist for DMA |
241 | * @dev: device to map for | 262 | * @dev: device to map for |
242 | * @sg: scatterlist to map | 263 | * @sg: scatterlist to map |
243 | * @nhwentries: number of entries | 264 | * @nhwentries: number of entries |
244 | * @direction: direction of the DMA transaction | 265 | * @direction: direction of the DMA transaction |
266 | * @attrs: optional dma attributes | ||
267 | * | ||
268 | * mappings with the DMA_ATTR_WRITE_BARRIER get mapped with | ||
269 | * dma_map_consistent() so that writes force a flush of pending DMA. | ||
270 | * (See "SGI Altix Architecture Considerations for Linux Device Drivers", | ||
271 | * Document Number: 007-4763-001) | ||
245 | * | 272 | * |
246 | * Maps each entry of @sg for DMA. | 273 | * Maps each entry of @sg for DMA. |
247 | */ | 274 | */ |
248 | int sn_dma_map_sg(struct device *dev, struct scatterlist *sgl, int nhwentries, | 275 | int sn_dma_map_sg_attrs(struct device *dev, struct scatterlist *sgl, |
249 | int direction) | 276 | int nhwentries, int direction, struct dma_attrs *attrs) |
250 | { | 277 | { |
251 | unsigned long phys_addr; | 278 | unsigned long phys_addr; |
252 | struct scatterlist *saved_sg = sgl, *sg; | 279 | struct scatterlist *saved_sg = sgl, *sg; |
253 | struct pci_dev *pdev = to_pci_dev(dev); | 280 | struct pci_dev *pdev = to_pci_dev(dev); |
254 | struct sn_pcibus_provider *provider = SN_PCIDEV_BUSPROVIDER(pdev); | 281 | struct sn_pcibus_provider *provider = SN_PCIDEV_BUSPROVIDER(pdev); |
255 | int i; | 282 | int i; |
283 | int dmabarr; | ||
284 | |||
285 | dmabarr = dma_get_attr(DMA_ATTR_WRITE_BARRIER, attrs); | ||
256 | 286 | ||
257 | BUG_ON(dev->bus != &pci_bus_type); | 287 | BUG_ON(dev->bus != &pci_bus_type); |
258 | 288 | ||
@@ -260,11 +290,19 @@ int sn_dma_map_sg(struct device *dev, struct scatterlist *sgl, int nhwentries, | |||
260 | * Setup a DMA address for each entry in the scatterlist. | 290 | * Setup a DMA address for each entry in the scatterlist. |
261 | */ | 291 | */ |
262 | for_each_sg(sgl, sg, nhwentries, i) { | 292 | for_each_sg(sgl, sg, nhwentries, i) { |
293 | dma_addr_t dma_addr; | ||
263 | phys_addr = SG_ENT_PHYS_ADDRESS(sg); | 294 | phys_addr = SG_ENT_PHYS_ADDRESS(sg); |
264 | sg->dma_address = provider->dma_map(pdev, | 295 | if (dmabarr) |
265 | phys_addr, sg->length, | 296 | dma_addr = provider->dma_map_consistent(pdev, |
266 | SN_DMA_ADDR_PHYS); | 297 | phys_addr, |
298 | sg->length, | ||
299 | SN_DMA_ADDR_PHYS); | ||
300 | else | ||
301 | dma_addr = provider->dma_map(pdev, phys_addr, | ||
302 | sg->length, | ||
303 | SN_DMA_ADDR_PHYS); | ||
267 | 304 | ||
305 | sg->dma_address = dma_addr; | ||
268 | if (!sg->dma_address) { | 306 | if (!sg->dma_address) { |
269 | printk(KERN_ERR "%s: out of ATEs\n", __func__); | 307 | printk(KERN_ERR "%s: out of ATEs\n", __func__); |
270 | 308 | ||
@@ -272,7 +310,8 @@ int sn_dma_map_sg(struct device *dev, struct scatterlist *sgl, int nhwentries, | |||
272 | * Free any successfully allocated entries. | 310 | * Free any successfully allocated entries. |
273 | */ | 311 | */ |
274 | if (i > 0) | 312 | if (i > 0) |
275 | sn_dma_unmap_sg(dev, saved_sg, i, direction); | 313 | sn_dma_unmap_sg_attrs(dev, saved_sg, i, |
314 | direction, attrs); | ||
276 | return 0; | 315 | return 0; |
277 | } | 316 | } |
278 | 317 | ||
@@ -281,7 +320,7 @@ int sn_dma_map_sg(struct device *dev, struct scatterlist *sgl, int nhwentries, | |||
281 | 320 | ||
282 | return nhwentries; | 321 | return nhwentries; |
283 | } | 322 | } |
284 | EXPORT_SYMBOL(sn_dma_map_sg); | 323 | EXPORT_SYMBOL(sn_dma_map_sg_attrs); |
285 | 324 | ||
286 | void sn_dma_sync_single_for_cpu(struct device *dev, dma_addr_t dma_handle, | 325 | void sn_dma_sync_single_for_cpu(struct device *dev, dma_addr_t dma_handle, |
287 | size_t size, int direction) | 326 | size_t size, int direction) |
diff --git a/arch/m68k/kernel/asm-offsets.c b/arch/m68k/kernel/asm-offsets.c index 246a8820c223..b1f012f6c493 100644 --- a/arch/m68k/kernel/asm-offsets.c +++ b/arch/m68k/kernel/asm-offsets.c | |||
@@ -11,14 +11,12 @@ | |||
11 | #include <linux/stddef.h> | 11 | #include <linux/stddef.h> |
12 | #include <linux/sched.h> | 12 | #include <linux/sched.h> |
13 | #include <linux/kernel_stat.h> | 13 | #include <linux/kernel_stat.h> |
14 | #include <linux/kbuild.h> | ||
14 | #include <asm/bootinfo.h> | 15 | #include <asm/bootinfo.h> |
15 | #include <asm/irq.h> | 16 | #include <asm/irq.h> |
16 | #include <asm/amigahw.h> | 17 | #include <asm/amigahw.h> |
17 | #include <linux/font.h> | 18 | #include <linux/font.h> |
18 | 19 | ||
19 | #define DEFINE(sym, val) \ | ||
20 | asm volatile("\n->" #sym " %0 " #val : : "i" (val)) | ||
21 | |||
22 | int main(void) | 20 | int main(void) |
23 | { | 21 | { |
24 | /* offsets into the task struct */ | 22 | /* offsets into the task struct */ |
diff --git a/arch/m68k/mac/iop.c b/arch/m68k/mac/iop.c index 5b2799eb96a6..326fb9978094 100644 --- a/arch/m68k/mac/iop.c +++ b/arch/m68k/mac/iop.c | |||
@@ -109,7 +109,6 @@ | |||
109 | #include <linux/mm.h> | 109 | #include <linux/mm.h> |
110 | #include <linux/delay.h> | 110 | #include <linux/delay.h> |
111 | #include <linux/init.h> | 111 | #include <linux/init.h> |
112 | #include <linux/proc_fs.h> | ||
113 | #include <linux/interrupt.h> | 112 | #include <linux/interrupt.h> |
114 | 113 | ||
115 | #include <asm/bootinfo.h> | 114 | #include <asm/bootinfo.h> |
@@ -124,10 +123,6 @@ | |||
124 | 123 | ||
125 | int iop_scc_present,iop_ism_present; | 124 | int iop_scc_present,iop_ism_present; |
126 | 125 | ||
127 | #ifdef CONFIG_PROC_FS | ||
128 | static int iop_get_proc_info(char *, char **, off_t, int); | ||
129 | #endif /* CONFIG_PROC_FS */ | ||
130 | |||
131 | /* structure for tracking channel listeners */ | 126 | /* structure for tracking channel listeners */ |
132 | 127 | ||
133 | struct listener { | 128 | struct listener { |
@@ -299,12 +294,6 @@ void __init iop_init(void) | |||
299 | iop_listeners[IOP_NUM_ISM][i].devname = NULL; | 294 | iop_listeners[IOP_NUM_ISM][i].devname = NULL; |
300 | iop_listeners[IOP_NUM_ISM][i].handler = NULL; | 295 | iop_listeners[IOP_NUM_ISM][i].handler = NULL; |
301 | } | 296 | } |
302 | |||
303 | #if 0 /* Crashing in 2.4 now, not yet sure why. --jmt */ | ||
304 | #ifdef CONFIG_PROC_FS | ||
305 | create_proc_info_entry("mac_iop", 0, &proc_root, iop_get_proc_info); | ||
306 | #endif | ||
307 | #endif | ||
308 | } | 297 | } |
309 | 298 | ||
310 | /* | 299 | /* |
@@ -637,77 +626,3 @@ irqreturn_t iop_ism_irq(int irq, void *dev_id) | |||
637 | } | 626 | } |
638 | return IRQ_HANDLED; | 627 | return IRQ_HANDLED; |
639 | } | 628 | } |
640 | |||
641 | #ifdef CONFIG_PROC_FS | ||
642 | |||
643 | char *iop_chan_state(int state) | ||
644 | { | ||
645 | switch(state) { | ||
646 | case IOP_MSG_IDLE : return "idle "; | ||
647 | case IOP_MSG_NEW : return "new "; | ||
648 | case IOP_MSG_RCVD : return "received "; | ||
649 | case IOP_MSG_COMPLETE : return "completed "; | ||
650 | default : return "unknown "; | ||
651 | } | ||
652 | } | ||
653 | |||
654 | int iop_dump_one_iop(char *buf, int iop_num, char *iop_name) | ||
655 | { | ||
656 | int i,len = 0; | ||
657 | volatile struct mac_iop *iop = iop_base[iop_num]; | ||
658 | |||
659 | len += sprintf(buf+len, "%s IOP channel states:\n\n", iop_name); | ||
660 | len += sprintf(buf+len, "## send_state recv_state device\n"); | ||
661 | len += sprintf(buf+len, "------------------------------------------------\n"); | ||
662 | for (i = 0 ; i < NUM_IOP_CHAN ; i++) { | ||
663 | len += sprintf(buf+len, "%2d %10s %10s %s\n", i, | ||
664 | iop_chan_state(iop_readb(iop, IOP_ADDR_SEND_STATE+i)), | ||
665 | iop_chan_state(iop_readb(iop, IOP_ADDR_RECV_STATE+i)), | ||
666 | iop_listeners[iop_num][i].handler? | ||
667 | iop_listeners[iop_num][i].devname : ""); | ||
668 | |||
669 | } | ||
670 | len += sprintf(buf+len, "\n"); | ||
671 | return len; | ||
672 | } | ||
673 | |||
674 | static int iop_get_proc_info(char *buf, char **start, off_t pos, int count) | ||
675 | { | ||
676 | int len, cnt; | ||
677 | |||
678 | cnt = 0; | ||
679 | len = sprintf(buf, "IOPs detected:\n\n"); | ||
680 | |||
681 | if (iop_scc_present) { | ||
682 | len += sprintf(buf+len, "SCC IOP (%p): status %02X\n", | ||
683 | iop_base[IOP_NUM_SCC], | ||
684 | (uint) iop_base[IOP_NUM_SCC]->status_ctrl); | ||
685 | } | ||
686 | if (iop_ism_present) { | ||
687 | len += sprintf(buf+len, "ISM IOP (%p): status %02X\n\n", | ||
688 | iop_base[IOP_NUM_ISM], | ||
689 | (uint) iop_base[IOP_NUM_ISM]->status_ctrl); | ||
690 | } | ||
691 | |||
692 | if (iop_scc_present) { | ||
693 | len += iop_dump_one_iop(buf+len, IOP_NUM_SCC, "SCC"); | ||
694 | |||
695 | } | ||
696 | |||
697 | if (iop_ism_present) { | ||
698 | len += iop_dump_one_iop(buf+len, IOP_NUM_ISM, "ISM"); | ||
699 | |||
700 | } | ||
701 | |||
702 | if (len >= pos) { | ||
703 | if (!*start) { | ||
704 | *start = buf + pos; | ||
705 | cnt = len - pos; | ||
706 | } else { | ||
707 | cnt += len; | ||
708 | } | ||
709 | } | ||
710 | return (count > cnt) ? cnt : count; | ||
711 | } | ||
712 | |||
713 | #endif /* CONFIG_PROC_FS */ | ||
diff --git a/arch/m68knommu/kernel/asm-offsets.c b/arch/m68knommu/kernel/asm-offsets.c index d97b89bae53c..fd0c685a7f11 100644 --- a/arch/m68knommu/kernel/asm-offsets.c +++ b/arch/m68knommu/kernel/asm-offsets.c | |||
@@ -13,15 +13,11 @@ | |||
13 | #include <linux/kernel_stat.h> | 13 | #include <linux/kernel_stat.h> |
14 | #include <linux/ptrace.h> | 14 | #include <linux/ptrace.h> |
15 | #include <linux/hardirq.h> | 15 | #include <linux/hardirq.h> |
16 | #include <linux/kbuild.h> | ||
16 | #include <asm/bootinfo.h> | 17 | #include <asm/bootinfo.h> |
17 | #include <asm/irq.h> | 18 | #include <asm/irq.h> |
18 | #include <asm/thread_info.h> | 19 | #include <asm/thread_info.h> |
19 | 20 | ||
20 | #define DEFINE(sym, val) \ | ||
21 | asm volatile("\n->" #sym " %0 " #val : : "i" (val)) | ||
22 | |||
23 | #define BLANK() asm volatile("\n->" : : ) | ||
24 | |||
25 | int main(void) | 21 | int main(void) |
26 | { | 22 | { |
27 | /* offsets into the task struct */ | 23 | /* offsets into the task struct */ |
diff --git a/arch/mips/basler/excite/excite_procfs.c b/arch/mips/basler/excite/excite_procfs.c index 9ee67a95f6b9..08923e6825b5 100644 --- a/arch/mips/basler/excite/excite_procfs.c +++ b/arch/mips/basler/excite/excite_procfs.c | |||
@@ -18,8 +18,9 @@ | |||
18 | * along with this program; if not, write to the Free Software | 18 | * along with this program; if not, write to the Free Software |
19 | * Foundation, Inc., 59 Temple Place, Suite 330, Boston, MA 02111-1307 USA | 19 | * Foundation, Inc., 59 Temple Place, Suite 330, Boston, MA 02111-1307 USA |
20 | */ | 20 | */ |
21 | 21 | #include <linux/module.h> | |
22 | #include <linux/proc_fs.h> | 22 | #include <linux/proc_fs.h> |
23 | #include <linux/seq_file.h> | ||
23 | #include <linux/stat.h> | 24 | #include <linux/stat.h> |
24 | #include <asm/page.h> | 25 | #include <asm/page.h> |
25 | #include <asm/io.h> | 26 | #include <asm/io.h> |
@@ -28,14 +29,25 @@ | |||
28 | 29 | ||
29 | #include <excite.h> | 30 | #include <excite.h> |
30 | 31 | ||
31 | static int excite_get_unit_id(char *buf, char **addr, off_t offs, int size) | 32 | static int excite_unit_id_proc_show(struct seq_file *m, void *v) |
32 | { | 33 | { |
33 | const int len = snprintf(buf, PAGE_SIZE, "%06x", unit_id); | 34 | seq_printf(m, "%06x", unit_id); |
34 | const int w = len - offs; | 35 | return 0; |
35 | *addr = buf + offs; | ||
36 | return w < size ? w : size; | ||
37 | } | 36 | } |
38 | 37 | ||
38 | static int excite_unit_id_proc_open(struct inode *inode, struct file *file) | ||
39 | { | ||
40 | return single_open(file, excite_unit_id_proc_show, NULL); | ||
41 | } | ||
42 | |||
43 | static const struct file_operations excite_unit_id_proc_fops = { | ||
44 | .owner = THIS_MODULE, | ||
45 | .open = excite_unit_id_proc_open, | ||
46 | .read = seq_read, | ||
47 | .llseek = seq_lseek, | ||
48 | .release = single_release, | ||
49 | }; | ||
50 | |||
39 | static int | 51 | static int |
40 | excite_bootrom_read(char *page, char **start, off_t off, int count, | 52 | excite_bootrom_read(char *page, char **start, off_t off, int count, |
41 | int *eof, void *data) | 53 | int *eof, void *data) |
@@ -65,12 +77,12 @@ excite_bootrom_read(char *page, char **start, off_t off, int count, | |||
65 | void excite_procfs_init(void) | 77 | void excite_procfs_init(void) |
66 | { | 78 | { |
67 | /* Create & populate /proc/excite */ | 79 | /* Create & populate /proc/excite */ |
68 | struct proc_dir_entry * const pdir = proc_mkdir("excite", &proc_root); | 80 | struct proc_dir_entry * const pdir = proc_mkdir("excite", NULL); |
69 | if (pdir) { | 81 | if (pdir) { |
70 | struct proc_dir_entry * e; | 82 | struct proc_dir_entry * e; |
71 | 83 | ||
72 | e = create_proc_info_entry("unit_id", S_IRUGO, pdir, | 84 | e = proc_create("unit_id", S_IRUGO, pdir, |
73 | excite_get_unit_id); | 85 | &excite_unit_id_proc_fops); |
74 | if (e) e->size = 6; | 86 | if (e) e->size = 6; |
75 | 87 | ||
76 | e = create_proc_read_entry("bootrom", S_IRUGO, pdir, | 88 | e = create_proc_read_entry("bootrom", S_IRUGO, pdir, |
diff --git a/arch/mips/kernel/asm-offsets.c b/arch/mips/kernel/asm-offsets.c index 5bf03b3c4150..72942226fcdd 100644 --- a/arch/mips/kernel/asm-offsets.c +++ b/arch/mips/kernel/asm-offsets.c | |||
@@ -13,327 +13,285 @@ | |||
13 | #include <linux/sched.h> | 13 | #include <linux/sched.h> |
14 | #include <linux/mm.h> | 14 | #include <linux/mm.h> |
15 | #include <linux/interrupt.h> | 15 | #include <linux/interrupt.h> |
16 | 16 | #include <linux/kbuild.h> | |
17 | #include <asm/ptrace.h> | 17 | #include <asm/ptrace.h> |
18 | #include <asm/processor.h> | 18 | #include <asm/processor.h> |
19 | 19 | ||
20 | #define text(t) __asm__("\n->#" t) | ||
21 | #define _offset(type, member) (&(((type *)NULL)->member)) | ||
22 | #define offset(string, ptr, member) \ | ||
23 | __asm__("\n->" string " %0" : : "i" (_offset(ptr, member))) | ||
24 | #define constant(string, member) \ | ||
25 | __asm__("\n->" string " %0" : : "ri" (member)) | ||
26 | #define size(string, size) \ | ||
27 | __asm__("\n->" string " %0" : : "i" (sizeof(size))) | ||
28 | #define linefeed text("") | ||
29 | |||
30 | void output_ptreg_defines(void) | 20 | void output_ptreg_defines(void) |
31 | { | 21 | { |
32 | text("MIPS pt_regs offsets."); | 22 | COMMENT("MIPS pt_regs offsets."); |
33 | offset("PT_R0", struct pt_regs, regs[0]); | 23 | OFFSET(PT_R0, pt_regs, regs[0]); |
34 | offset("PT_R1", struct pt_regs, regs[1]); | 24 | OFFSET(PT_R1, pt_regs, regs[1]); |
35 | offset("PT_R2", struct pt_regs, regs[2]); | 25 | OFFSET(PT_R2, pt_regs, regs[2]); |
36 | offset("PT_R3", struct pt_regs, regs[3]); | 26 | OFFSET(PT_R3, pt_regs, regs[3]); |
37 | offset("PT_R4", struct pt_regs, regs[4]); | 27 | OFFSET(PT_R4, pt_regs, regs[4]); |
38 | offset("PT_R5", struct pt_regs, regs[5]); | 28 | OFFSET(PT_R5, pt_regs, regs[5]); |
39 | offset("PT_R6", struct pt_regs, regs[6]); | 29 | OFFSET(PT_R6, pt_regs, regs[6]); |
40 | offset("PT_R7", struct pt_regs, regs[7]); | 30 | OFFSET(PT_R7, pt_regs, regs[7]); |
41 | offset("PT_R8", struct pt_regs, regs[8]); | 31 | OFFSET(PT_R8, pt_regs, regs[8]); |
42 | offset("PT_R9", struct pt_regs, regs[9]); | 32 | OFFSET(PT_R9, pt_regs, regs[9]); |
43 | offset("PT_R10", struct pt_regs, regs[10]); | 33 | OFFSET(PT_R10, pt_regs, regs[10]); |
44 | offset("PT_R11", struct pt_regs, regs[11]); | 34 | OFFSET(PT_R11, pt_regs, regs[11]); |
45 | offset("PT_R12", struct pt_regs, regs[12]); | 35 | OFFSET(PT_R12, pt_regs, regs[12]); |
46 | offset("PT_R13", struct pt_regs, regs[13]); | 36 | OFFSET(PT_R13, pt_regs, regs[13]); |
47 | offset("PT_R14", struct pt_regs, regs[14]); | 37 | OFFSET(PT_R14, pt_regs, regs[14]); |
48 | offset("PT_R15", struct pt_regs, regs[15]); | 38 | OFFSET(PT_R15, pt_regs, regs[15]); |
49 | offset("PT_R16", struct pt_regs, regs[16]); | 39 | OFFSET(PT_R16, pt_regs, regs[16]); |
50 | offset("PT_R17", struct pt_regs, regs[17]); | 40 | OFFSET(PT_R17, pt_regs, regs[17]); |
51 | offset("PT_R18", struct pt_regs, regs[18]); | 41 | OFFSET(PT_R18, pt_regs, regs[18]); |
52 | offset("PT_R19", struct pt_regs, regs[19]); | 42 | OFFSET(PT_R19, pt_regs, regs[19]); |
53 | offset("PT_R20", struct pt_regs, regs[20]); | 43 | OFFSET(PT_R20, pt_regs, regs[20]); |
54 | offset("PT_R21", struct pt_regs, regs[21]); | 44 | OFFSET(PT_R21, pt_regs, regs[21]); |
55 | offset("PT_R22", struct pt_regs, regs[22]); | 45 | OFFSET(PT_R22, pt_regs, regs[22]); |
56 | offset("PT_R23", struct pt_regs, regs[23]); | 46 | OFFSET(PT_R23, pt_regs, regs[23]); |
57 | offset("PT_R24", struct pt_regs, regs[24]); | 47 | OFFSET(PT_R24, pt_regs, regs[24]); |
58 | offset("PT_R25", struct pt_regs, regs[25]); | 48 | OFFSET(PT_R25, pt_regs, regs[25]); |
59 | offset("PT_R26", struct pt_regs, regs[26]); | 49 | OFFSET(PT_R26, pt_regs, regs[26]); |
60 | offset("PT_R27", struct pt_regs, regs[27]); | 50 | OFFSET(PT_R27, pt_regs, regs[27]); |
61 | offset("PT_R28", struct pt_regs, regs[28]); | 51 | OFFSET(PT_R28, pt_regs, regs[28]); |
62 | offset("PT_R29", struct pt_regs, regs[29]); | 52 | OFFSET(PT_R29, pt_regs, regs[29]); |
63 | offset("PT_R30", struct pt_regs, regs[30]); | 53 | OFFSET(PT_R30, pt_regs, regs[30]); |
64 | offset("PT_R31", struct pt_regs, regs[31]); | 54 | OFFSET(PT_R31, pt_regs, regs[31]); |
65 | offset("PT_LO", struct pt_regs, lo); | 55 | OFFSET(PT_LO, pt_regs, lo); |
66 | offset("PT_HI", struct pt_regs, hi); | 56 | OFFSET(PT_HI, pt_regs, hi); |
67 | #ifdef CONFIG_CPU_HAS_SMARTMIPS | 57 | #ifdef CONFIG_CPU_HAS_SMARTMIPS |
68 | offset("PT_ACX", struct pt_regs, acx); | 58 | OFFSET(PT_ACX, pt_regs, acx); |
69 | #endif | 59 | #endif |
70 | offset("PT_EPC", struct pt_regs, cp0_epc); | 60 | OFFSET(PT_EPC, pt_regs, cp0_epc); |
71 | offset("PT_BVADDR", struct pt_regs, cp0_badvaddr); | 61 | OFFSET(PT_BVADDR, pt_regs, cp0_badvaddr); |
72 | offset("PT_STATUS", struct pt_regs, cp0_status); | 62 | OFFSET(PT_STATUS, pt_regs, cp0_status); |
73 | offset("PT_CAUSE", struct pt_regs, cp0_cause); | 63 | OFFSET(PT_CAUSE, pt_regs, cp0_cause); |
74 | #ifdef CONFIG_MIPS_MT_SMTC | 64 | #ifdef CONFIG_MIPS_MT_SMTC |
75 | offset("PT_TCSTATUS", struct pt_regs, cp0_tcstatus); | 65 | OFFSET(PT_TCSTATUS, pt_regs, cp0_tcstatus); |
76 | #endif /* CONFIG_MIPS_MT_SMTC */ | 66 | #endif /* CONFIG_MIPS_MT_SMTC */ |
77 | size("PT_SIZE", struct pt_regs); | 67 | DEFINE(PT_SIZE, sizeof(struct pt_regs)); |
78 | linefeed; | 68 | BLANK(); |
79 | } | 69 | } |
80 | 70 | ||
81 | void output_task_defines(void) | 71 | void output_task_defines(void) |
82 | { | 72 | { |
83 | text("MIPS task_struct offsets."); | 73 | COMMENT("MIPS task_struct offsets."); |
84 | offset("TASK_STATE", struct task_struct, state); | 74 | OFFSET(TASK_STATE, task_struct, state); |
85 | offset("TASK_THREAD_INFO", struct task_struct, stack); | 75 | OFFSET(TASK_THREAD_INFO, task_struct, stack); |
86 | offset("TASK_FLAGS", struct task_struct, flags); | 76 | OFFSET(TASK_FLAGS, task_struct, flags); |
87 | offset("TASK_MM", struct task_struct, mm); | 77 | OFFSET(TASK_MM, task_struct, mm); |
88 | offset("TASK_PID", struct task_struct, pid); | 78 | OFFSET(TASK_PID, task_struct, pid); |
89 | size( "TASK_STRUCT_SIZE", struct task_struct); | 79 | DEFINE(TASK_STRUCT_SIZE, sizeof(struct task_struct)); |
90 | linefeed; | 80 | BLANK(); |
91 | } | 81 | } |
92 | 82 | ||
93 | void output_thread_info_defines(void) | 83 | void output_thread_info_defines(void) |
94 | { | 84 | { |
95 | text("MIPS thread_info offsets."); | 85 | COMMENT("MIPS thread_info offsets."); |
96 | offset("TI_TASK", struct thread_info, task); | 86 | OFFSET(TI_TASK, thread_info, task); |
97 | offset("TI_EXEC_DOMAIN", struct thread_info, exec_domain); | 87 | OFFSET(TI_EXEC_DOMAIN, thread_info, exec_domain); |
98 | offset("TI_FLAGS", struct thread_info, flags); | 88 | OFFSET(TI_FLAGS, thread_info, flags); |
99 | offset("TI_TP_VALUE", struct thread_info, tp_value); | 89 | OFFSET(TI_TP_VALUE, thread_info, tp_value); |
100 | offset("TI_CPU", struct thread_info, cpu); | 90 | OFFSET(TI_CPU, thread_info, cpu); |
101 | offset("TI_PRE_COUNT", struct thread_info, preempt_count); | 91 | OFFSET(TI_PRE_COUNT, thread_info, preempt_count); |
102 | offset("TI_ADDR_LIMIT", struct thread_info, addr_limit); | 92 | OFFSET(TI_ADDR_LIMIT, thread_info, addr_limit); |
103 | offset("TI_RESTART_BLOCK", struct thread_info, restart_block); | 93 | OFFSET(TI_RESTART_BLOCK, thread_info, restart_block); |
104 | offset("TI_REGS", struct thread_info, regs); | 94 | OFFSET(TI_REGS, thread_info, regs); |
105 | constant("_THREAD_SIZE", THREAD_SIZE); | 95 | DEFINE(_THREAD_SIZE, THREAD_SIZE); |
106 | constant("_THREAD_MASK", THREAD_MASK); | 96 | DEFINE(_THREAD_MASK, THREAD_MASK); |
107 | linefeed; | 97 | BLANK(); |
108 | } | 98 | } |
109 | 99 | ||
110 | void output_thread_defines(void) | 100 | void output_thread_defines(void) |
111 | { | 101 | { |
112 | text("MIPS specific thread_struct offsets."); | 102 | COMMENT("MIPS specific thread_struct offsets."); |
113 | offset("THREAD_REG16", struct task_struct, thread.reg16); | 103 | OFFSET(THREAD_REG16, task_struct, thread.reg16); |
114 | offset("THREAD_REG17", struct task_struct, thread.reg17); | 104 | OFFSET(THREAD_REG17, task_struct, thread.reg17); |
115 | offset("THREAD_REG18", struct task_struct, thread.reg18); | 105 | OFFSET(THREAD_REG18, task_struct, thread.reg18); |
116 | offset("THREAD_REG19", struct task_struct, thread.reg19); | 106 | OFFSET(THREAD_REG19, task_struct, thread.reg19); |
117 | offset("THREAD_REG20", struct task_struct, thread.reg20); | 107 | OFFSET(THREAD_REG20, task_struct, thread.reg20); |
118 | offset("THREAD_REG21", struct task_struct, thread.reg21); | 108 | OFFSET(THREAD_REG21, task_struct, thread.reg21); |
119 | offset("THREAD_REG22", struct task_struct, thread.reg22); | 109 | OFFSET(THREAD_REG22, task_struct, thread.reg22); |
120 | offset("THREAD_REG23", struct task_struct, thread.reg23); | 110 | OFFSET(THREAD_REG23, task_struct, thread.reg23); |
121 | offset("THREAD_REG29", struct task_struct, thread.reg29); | 111 | OFFSET(THREAD_REG29, task_struct, thread.reg29); |
122 | offset("THREAD_REG30", struct task_struct, thread.reg30); | 112 | OFFSET(THREAD_REG30, task_struct, thread.reg30); |
123 | offset("THREAD_REG31", struct task_struct, thread.reg31); | 113 | OFFSET(THREAD_REG31, task_struct, thread.reg31); |
124 | offset("THREAD_STATUS", struct task_struct, | 114 | OFFSET(THREAD_STATUS, task_struct, |
125 | thread.cp0_status); | 115 | thread.cp0_status); |
126 | offset("THREAD_FPU", struct task_struct, thread.fpu); | 116 | OFFSET(THREAD_FPU, task_struct, thread.fpu); |
127 | 117 | ||
128 | offset("THREAD_BVADDR", struct task_struct, \ | 118 | OFFSET(THREAD_BVADDR, task_struct, \ |
129 | thread.cp0_badvaddr); | 119 | thread.cp0_badvaddr); |
130 | offset("THREAD_BUADDR", struct task_struct, \ | 120 | OFFSET(THREAD_BUADDR, task_struct, \ |
131 | thread.cp0_baduaddr); | 121 | thread.cp0_baduaddr); |
132 | offset("THREAD_ECODE", struct task_struct, \ | 122 | OFFSET(THREAD_ECODE, task_struct, \ |
133 | thread.error_code); | 123 | thread.error_code); |
134 | offset("THREAD_TRAPNO", struct task_struct, thread.trap_no); | 124 | OFFSET(THREAD_TRAPNO, task_struct, thread.trap_no); |
135 | offset("THREAD_TRAMP", struct task_struct, \ | 125 | OFFSET(THREAD_TRAMP, task_struct, \ |
136 | thread.irix_trampoline); | 126 | thread.irix_trampoline); |
137 | offset("THREAD_OLDCTX", struct task_struct, \ | 127 | OFFSET(THREAD_OLDCTX, task_struct, \ |
138 | thread.irix_oldctx); | 128 | thread.irix_oldctx); |
139 | linefeed; | 129 | BLANK(); |
140 | } | 130 | } |
141 | 131 | ||
142 | void output_thread_fpu_defines(void) | 132 | void output_thread_fpu_defines(void) |
143 | { | 133 | { |
144 | offset("THREAD_FPR0", | 134 | OFFSET(THREAD_FPR0, task_struct, thread.fpu.fpr[0]); |
145 | struct task_struct, thread.fpu.fpr[0]); | 135 | OFFSET(THREAD_FPR1, task_struct, thread.fpu.fpr[1]); |
146 | offset("THREAD_FPR1", | 136 | OFFSET(THREAD_FPR2, task_struct, thread.fpu.fpr[2]); |
147 | struct task_struct, thread.fpu.fpr[1]); | 137 | OFFSET(THREAD_FPR3, task_struct, thread.fpu.fpr[3]); |
148 | offset("THREAD_FPR2", | 138 | OFFSET(THREAD_FPR4, task_struct, thread.fpu.fpr[4]); |
149 | struct task_struct, thread.fpu.fpr[2]); | 139 | OFFSET(THREAD_FPR5, task_struct, thread.fpu.fpr[5]); |
150 | offset("THREAD_FPR3", | 140 | OFFSET(THREAD_FPR6, task_struct, thread.fpu.fpr[6]); |
151 | struct task_struct, thread.fpu.fpr[3]); | 141 | OFFSET(THREAD_FPR7, task_struct, thread.fpu.fpr[7]); |
152 | offset("THREAD_FPR4", | 142 | OFFSET(THREAD_FPR8, task_struct, thread.fpu.fpr[8]); |
153 | struct task_struct, thread.fpu.fpr[4]); | 143 | OFFSET(THREAD_FPR9, task_struct, thread.fpu.fpr[9]); |
154 | offset("THREAD_FPR5", | 144 | OFFSET(THREAD_FPR10, task_struct, thread.fpu.fpr[10]); |
155 | struct task_struct, thread.fpu.fpr[5]); | 145 | OFFSET(THREAD_FPR11, task_struct, thread.fpu.fpr[11]); |
156 | offset("THREAD_FPR6", | 146 | OFFSET(THREAD_FPR12, task_struct, thread.fpu.fpr[12]); |
157 | struct task_struct, thread.fpu.fpr[6]); | 147 | OFFSET(THREAD_FPR13, task_struct, thread.fpu.fpr[13]); |
158 | offset("THREAD_FPR7", | 148 | OFFSET(THREAD_FPR14, task_struct, thread.fpu.fpr[14]); |
159 | struct task_struct, thread.fpu.fpr[7]); | 149 | OFFSET(THREAD_FPR15, task_struct, thread.fpu.fpr[15]); |
160 | offset("THREAD_FPR8", | 150 | OFFSET(THREAD_FPR16, task_struct, thread.fpu.fpr[16]); |
161 | struct task_struct, thread.fpu.fpr[8]); | 151 | OFFSET(THREAD_FPR17, task_struct, thread.fpu.fpr[17]); |
162 | offset("THREAD_FPR9", | 152 | OFFSET(THREAD_FPR18, task_struct, thread.fpu.fpr[18]); |
163 | struct task_struct, thread.fpu.fpr[9]); | 153 | OFFSET(THREAD_FPR19, task_struct, thread.fpu.fpr[19]); |
164 | offset("THREAD_FPR10", | 154 | OFFSET(THREAD_FPR20, task_struct, thread.fpu.fpr[20]); |
165 | struct task_struct, thread.fpu.fpr[10]); | 155 | OFFSET(THREAD_FPR21, task_struct, thread.fpu.fpr[21]); |
166 | offset("THREAD_FPR11", | 156 | OFFSET(THREAD_FPR22, task_struct, thread.fpu.fpr[22]); |
167 | struct task_struct, thread.fpu.fpr[11]); | 157 | OFFSET(THREAD_FPR23, task_struct, thread.fpu.fpr[23]); |
168 | offset("THREAD_FPR12", | 158 | OFFSET(THREAD_FPR24, task_struct, thread.fpu.fpr[24]); |
169 | struct task_struct, thread.fpu.fpr[12]); | 159 | OFFSET(THREAD_FPR25, task_struct, thread.fpu.fpr[25]); |
170 | offset("THREAD_FPR13", | 160 | OFFSET(THREAD_FPR26, task_struct, thread.fpu.fpr[26]); |
171 | struct task_struct, thread.fpu.fpr[13]); | 161 | OFFSET(THREAD_FPR27, task_struct, thread.fpu.fpr[27]); |
172 | offset("THREAD_FPR14", | 162 | OFFSET(THREAD_FPR28, task_struct, thread.fpu.fpr[28]); |
173 | struct task_struct, thread.fpu.fpr[14]); | 163 | OFFSET(THREAD_FPR29, task_struct, thread.fpu.fpr[29]); |
174 | offset("THREAD_FPR15", | 164 | OFFSET(THREAD_FPR30, task_struct, thread.fpu.fpr[30]); |
175 | struct task_struct, thread.fpu.fpr[15]); | 165 | OFFSET(THREAD_FPR31, task_struct, thread.fpu.fpr[31]); |
176 | offset("THREAD_FPR16", | ||
177 | struct task_struct, thread.fpu.fpr[16]); | ||
178 | offset("THREAD_FPR17", | ||
179 | struct task_struct, thread.fpu.fpr[17]); | ||
180 | offset("THREAD_FPR18", | ||
181 | struct task_struct, thread.fpu.fpr[18]); | ||
182 | offset("THREAD_FPR19", | ||
183 | struct task_struct, thread.fpu.fpr[19]); | ||
184 | offset("THREAD_FPR20", | ||
185 | struct task_struct, thread.fpu.fpr[20]); | ||
186 | offset("THREAD_FPR21", | ||
187 | struct task_struct, thread.fpu.fpr[21]); | ||
188 | offset("THREAD_FPR22", | ||
189 | struct task_struct, thread.fpu.fpr[22]); | ||
190 | offset("THREAD_FPR23", | ||
191 | struct task_struct, thread.fpu.fpr[23]); | ||
192 | offset("THREAD_FPR24", | ||
193 | struct task_struct, thread.fpu.fpr[24]); | ||
194 | offset("THREAD_FPR25", | ||
195 | struct task_struct, thread.fpu.fpr[25]); | ||
196 | offset("THREAD_FPR26", | ||
197 | struct task_struct, thread.fpu.fpr[26]); | ||
198 | offset("THREAD_FPR27", | ||
199 | struct task_struct, thread.fpu.fpr[27]); | ||
200 | offset("THREAD_FPR28", | ||
201 | struct task_struct, thread.fpu.fpr[28]); | ||
202 | offset("THREAD_FPR29", | ||
203 | struct task_struct, thread.fpu.fpr[29]); | ||
204 | offset("THREAD_FPR30", | ||
205 | struct task_struct, thread.fpu.fpr[30]); | ||
206 | offset("THREAD_FPR31", | ||
207 | struct task_struct, thread.fpu.fpr[31]); | ||
208 | 166 | ||
209 | offset("THREAD_FCR31", | 167 | OFFSET(THREAD_FCR31, task_struct, thread.fpu.fcr31); |
210 | struct task_struct, thread.fpu.fcr31); | 168 | BLANK(); |
211 | linefeed; | ||
212 | } | 169 | } |
213 | 170 | ||
214 | void output_mm_defines(void) | 171 | void output_mm_defines(void) |
215 | { | 172 | { |
216 | text("Size of struct page"); | 173 | COMMENT("Size of struct page"); |
217 | size("STRUCT_PAGE_SIZE", struct page); | 174 | DEFINE(STRUCT_PAGE_SIZE, sizeof(struct page)); |
218 | linefeed; | 175 | BLANK(); |
219 | text("Linux mm_struct offsets."); | 176 | COMMENT("Linux mm_struct offsets."); |
220 | offset("MM_USERS", struct mm_struct, mm_users); | 177 | OFFSET(MM_USERS, mm_struct, mm_users); |
221 | offset("MM_PGD", struct mm_struct, pgd); | 178 | OFFSET(MM_PGD, mm_struct, pgd); |
222 | offset("MM_CONTEXT", struct mm_struct, context); | 179 | OFFSET(MM_CONTEXT, mm_struct, context); |
223 | linefeed; | 180 | BLANK(); |
224 | constant("_PAGE_SIZE", PAGE_SIZE); | 181 | DEFINE(_PAGE_SIZE, PAGE_SIZE); |
225 | constant("_PAGE_SHIFT", PAGE_SHIFT); | 182 | DEFINE(_PAGE_SHIFT, PAGE_SHIFT); |
226 | linefeed; | 183 | BLANK(); |
227 | constant("_PGD_T_SIZE", sizeof(pgd_t)); | 184 | DEFINE(_PGD_T_SIZE, sizeof(pgd_t)); |
228 | constant("_PMD_T_SIZE", sizeof(pmd_t)); | 185 | DEFINE(_PMD_T_SIZE, sizeof(pmd_t)); |
229 | constant("_PTE_T_SIZE", sizeof(pte_t)); | 186 | DEFINE(_PTE_T_SIZE, sizeof(pte_t)); |
230 | linefeed; | 187 | BLANK(); |
231 | constant("_PGD_T_LOG2", PGD_T_LOG2); | 188 | DEFINE(_PGD_T_LOG2, PGD_T_LOG2); |
232 | constant("_PMD_T_LOG2", PMD_T_LOG2); | 189 | DEFINE(_PMD_T_LOG2, PMD_T_LOG2); |
233 | constant("_PTE_T_LOG2", PTE_T_LOG2); | 190 | DEFINE(_PTE_T_LOG2, PTE_T_LOG2); |
234 | linefeed; | 191 | BLANK(); |
235 | constant("_PGD_ORDER", PGD_ORDER); | 192 | DEFINE(_PGD_ORDER, PGD_ORDER); |
236 | constant("_PMD_ORDER", PMD_ORDER); | 193 | DEFINE(_PMD_ORDER, PMD_ORDER); |
237 | constant("_PTE_ORDER", PTE_ORDER); | 194 | DEFINE(_PTE_ORDER, PTE_ORDER); |
238 | linefeed; | 195 | BLANK(); |
239 | constant("_PMD_SHIFT", PMD_SHIFT); | 196 | DEFINE(_PMD_SHIFT, PMD_SHIFT); |
240 | constant("_PGDIR_SHIFT", PGDIR_SHIFT); | 197 | DEFINE(_PGDIR_SHIFT, PGDIR_SHIFT); |
241 | linefeed; | 198 | BLANK(); |
242 | constant("_PTRS_PER_PGD", PTRS_PER_PGD); | 199 | DEFINE(_PTRS_PER_PGD, PTRS_PER_PGD); |
243 | constant("_PTRS_PER_PMD", PTRS_PER_PMD); | 200 | DEFINE(_PTRS_PER_PMD, PTRS_PER_PMD); |
244 | constant("_PTRS_PER_PTE", PTRS_PER_PTE); | 201 | DEFINE(_PTRS_PER_PTE, PTRS_PER_PTE); |
245 | linefeed; | 202 | BLANK(); |
246 | } | 203 | } |
247 | 204 | ||
248 | #ifdef CONFIG_32BIT | 205 | #ifdef CONFIG_32BIT |
249 | void output_sc_defines(void) | 206 | void output_sc_defines(void) |
250 | { | 207 | { |
251 | text("Linux sigcontext offsets."); | 208 | COMMENT("Linux sigcontext offsets."); |
252 | offset("SC_REGS", struct sigcontext, sc_regs); | 209 | OFFSET(SC_REGS, sigcontext, sc_regs); |
253 | offset("SC_FPREGS", struct sigcontext, sc_fpregs); | 210 | OFFSET(SC_FPREGS, sigcontext, sc_fpregs); |
254 | offset("SC_ACX", struct sigcontext, sc_acx); | 211 | OFFSET(SC_ACX, sigcontext, sc_acx); |
255 | offset("SC_MDHI", struct sigcontext, sc_mdhi); | 212 | OFFSET(SC_MDHI, sigcontext, sc_mdhi); |
256 | offset("SC_MDLO", struct sigcontext, sc_mdlo); | 213 | OFFSET(SC_MDLO, sigcontext, sc_mdlo); |
257 | offset("SC_PC", struct sigcontext, sc_pc); | 214 | OFFSET(SC_PC, sigcontext, sc_pc); |
258 | offset("SC_FPC_CSR", struct sigcontext, sc_fpc_csr); | 215 | OFFSET(SC_FPC_CSR, sigcontext, sc_fpc_csr); |
259 | offset("SC_FPC_EIR", struct sigcontext, sc_fpc_eir); | 216 | OFFSET(SC_FPC_EIR, sigcontext, sc_fpc_eir); |
260 | offset("SC_HI1", struct sigcontext, sc_hi1); | 217 | OFFSET(SC_HI1, sigcontext, sc_hi1); |
261 | offset("SC_LO1", struct sigcontext, sc_lo1); | 218 | OFFSET(SC_LO1, sigcontext, sc_lo1); |
262 | offset("SC_HI2", struct sigcontext, sc_hi2); | 219 | OFFSET(SC_HI2, sigcontext, sc_hi2); |
263 | offset("SC_LO2", struct sigcontext, sc_lo2); | 220 | OFFSET(SC_LO2, sigcontext, sc_lo2); |
264 | offset("SC_HI3", struct sigcontext, sc_hi3); | 221 | OFFSET(SC_HI3, sigcontext, sc_hi3); |
265 | offset("SC_LO3", struct sigcontext, sc_lo3); | 222 | OFFSET(SC_LO3, sigcontext, sc_lo3); |
266 | linefeed; | 223 | BLANK(); |
267 | } | 224 | } |
268 | #endif | 225 | #endif |
269 | 226 | ||
270 | #ifdef CONFIG_64BIT | 227 | #ifdef CONFIG_64BIT |
271 | void output_sc_defines(void) | 228 | void output_sc_defines(void) |
272 | { | 229 | { |
273 | text("Linux sigcontext offsets."); | 230 | COMMENT("Linux sigcontext offsets."); |
274 | offset("SC_REGS", struct sigcontext, sc_regs); | 231 | OFFSET(SC_REGS, sigcontext, sc_regs); |
275 | offset("SC_FPREGS", struct sigcontext, sc_fpregs); | 232 | OFFSET(SC_FPREGS, sigcontext, sc_fpregs); |
276 | offset("SC_MDHI", struct sigcontext, sc_mdhi); | 233 | OFFSET(SC_MDHI, sigcontext, sc_mdhi); |
277 | offset("SC_MDLO", struct sigcontext, sc_mdlo); | 234 | OFFSET(SC_MDLO, sigcontext, sc_mdlo); |
278 | offset("SC_PC", struct sigcontext, sc_pc); | 235 | OFFSET(SC_PC, sigcontext, sc_pc); |
279 | offset("SC_FPC_CSR", struct sigcontext, sc_fpc_csr); | 236 | OFFSET(SC_FPC_CSR, sigcontext, sc_fpc_csr); |
280 | linefeed; | 237 | BLANK(); |
281 | } | 238 | } |
282 | #endif | 239 | #endif |
283 | 240 | ||
284 | #ifdef CONFIG_MIPS32_COMPAT | 241 | #ifdef CONFIG_MIPS32_COMPAT |
285 | void output_sc32_defines(void) | 242 | void output_sc32_defines(void) |
286 | { | 243 | { |
287 | text("Linux 32-bit sigcontext offsets."); | 244 | COMMENT("Linux 32-bit sigcontext offsets."); |
288 | offset("SC32_FPREGS", struct sigcontext32, sc_fpregs); | 245 | OFFSET(SC32_FPREGS, sigcontext32, sc_fpregs); |
289 | offset("SC32_FPC_CSR", struct sigcontext32, sc_fpc_csr); | 246 | OFFSET(SC32_FPC_CSR, sigcontext32, sc_fpc_csr); |
290 | offset("SC32_FPC_EIR", struct sigcontext32, sc_fpc_eir); | 247 | OFFSET(SC32_FPC_EIR, sigcontext32, sc_fpc_eir); |
291 | linefeed; | 248 | BLANK(); |
292 | } | 249 | } |
293 | #endif | 250 | #endif |
294 | 251 | ||
295 | void output_signal_defined(void) | 252 | void output_signal_defined(void) |
296 | { | 253 | { |
297 | text("Linux signal numbers."); | 254 | COMMENT("Linux signal numbers."); |
298 | constant("_SIGHUP", SIGHUP); | 255 | DEFINE(_SIGHUP, SIGHUP); |
299 | constant("_SIGINT", SIGINT); | 256 | DEFINE(_SIGINT, SIGINT); |
300 | constant("_SIGQUIT", SIGQUIT); | 257 | DEFINE(_SIGQUIT, SIGQUIT); |
301 | constant("_SIGILL", SIGILL); | 258 | DEFINE(_SIGILL, SIGILL); |
302 | constant("_SIGTRAP", SIGTRAP); | 259 | DEFINE(_SIGTRAP, SIGTRAP); |
303 | constant("_SIGIOT", SIGIOT); | 260 | DEFINE(_SIGIOT, SIGIOT); |
304 | constant("_SIGABRT", SIGABRT); | 261 | DEFINE(_SIGABRT, SIGABRT); |
305 | constant("_SIGEMT", SIGEMT); | 262 | DEFINE(_SIGEMT, SIGEMT); |
306 | constant("_SIGFPE", SIGFPE); | 263 | DEFINE(_SIGFPE, SIGFPE); |
307 | constant("_SIGKILL", SIGKILL); | 264 | DEFINE(_SIGKILL, SIGKILL); |
308 | constant("_SIGBUS", SIGBUS); | 265 | DEFINE(_SIGBUS, SIGBUS); |
309 | constant("_SIGSEGV", SIGSEGV); | 266 | DEFINE(_SIGSEGV, SIGSEGV); |
310 | constant("_SIGSYS", SIGSYS); | 267 | DEFINE(_SIGSYS, SIGSYS); |
311 | constant("_SIGPIPE", SIGPIPE); | 268 | DEFINE(_SIGPIPE, SIGPIPE); |
312 | constant("_SIGALRM", SIGALRM); | 269 | DEFINE(_SIGALRM, SIGALRM); |
313 | constant("_SIGTERM", SIGTERM); | 270 | DEFINE(_SIGTERM, SIGTERM); |
314 | constant("_SIGUSR1", SIGUSR1); | 271 | DEFINE(_SIGUSR1, SIGUSR1); |
315 | constant("_SIGUSR2", SIGUSR2); | 272 | DEFINE(_SIGUSR2, SIGUSR2); |
316 | constant("_SIGCHLD", SIGCHLD); | 273 | DEFINE(_SIGCHLD, SIGCHLD); |
317 | constant("_SIGPWR", SIGPWR); | 274 | DEFINE(_SIGPWR, SIGPWR); |
318 | constant("_SIGWINCH", SIGWINCH); | 275 | DEFINE(_SIGWINCH, SIGWINCH); |
319 | constant("_SIGURG", SIGURG); | 276 | DEFINE(_SIGURG, SIGURG); |
320 | constant("_SIGIO", SIGIO); | 277 | DEFINE(_SIGIO, SIGIO); |
321 | constant("_SIGSTOP", SIGSTOP); | 278 | DEFINE(_SIGSTOP, SIGSTOP); |
322 | constant("_SIGTSTP", SIGTSTP); | 279 | DEFINE(_SIGTSTP, SIGTSTP); |
323 | constant("_SIGCONT", SIGCONT); | 280 | DEFINE(_SIGCONT, SIGCONT); |
324 | constant("_SIGTTIN", SIGTTIN); | 281 | DEFINE(_SIGTTIN, SIGTTIN); |
325 | constant("_SIGTTOU", SIGTTOU); | 282 | DEFINE(_SIGTTOU, SIGTTOU); |
326 | constant("_SIGVTALRM", SIGVTALRM); | 283 | DEFINE(_SIGVTALRM, SIGVTALRM); |
327 | constant("_SIGPROF", SIGPROF); | 284 | DEFINE(_SIGPROF, SIGPROF); |
328 | constant("_SIGXCPU", SIGXCPU); | 285 | DEFINE(_SIGXCPU, SIGXCPU); |
329 | constant("_SIGXFSZ", SIGXFSZ); | 286 | DEFINE(_SIGXFSZ, SIGXFSZ); |
330 | linefeed; | 287 | BLANK(); |
331 | } | 288 | } |
332 | 289 | ||
333 | void output_irq_cpustat_t_defines(void) | 290 | void output_irq_cpustat_t_defines(void) |
334 | { | 291 | { |
335 | text("Linux irq_cpustat_t offsets."); | 292 | COMMENT("Linux irq_cpustat_t offsets."); |
336 | offset("IC_SOFTIRQ_PENDING", irq_cpustat_t, __softirq_pending); | 293 | DEFINE(IC_SOFTIRQ_PENDING, |
337 | size("IC_IRQ_CPUSTAT_T", irq_cpustat_t); | 294 | offsetof(irq_cpustat_t, __softirq_pending)); |
338 | linefeed; | 295 | DEFINE(IC_IRQ_CPUSTAT_T, sizeof(irq_cpustat_t)); |
296 | BLANK(); | ||
339 | } | 297 | } |
diff --git a/arch/mips/lib/iomap-pci.c b/arch/mips/lib/iomap-pci.c index c11b2494bb6e..2ab899c4b4ce 100644 --- a/arch/mips/lib/iomap-pci.c +++ b/arch/mips/lib/iomap-pci.c | |||
@@ -45,8 +45,8 @@ static void __iomem *ioport_map_pci(struct pci_dev *dev, | |||
45 | */ | 45 | */ |
46 | void __iomem *pci_iomap(struct pci_dev *dev, int bar, unsigned long maxlen) | 46 | void __iomem *pci_iomap(struct pci_dev *dev, int bar, unsigned long maxlen) |
47 | { | 47 | { |
48 | unsigned long start = pci_resource_start(dev, bar); | 48 | resource_size_t start = pci_resource_start(dev, bar); |
49 | unsigned long len = pci_resource_len(dev, bar); | 49 | resource_size_t len = pci_resource_len(dev, bar); |
50 | unsigned long flags = pci_resource_flags(dev, bar); | 50 | unsigned long flags = pci_resource_flags(dev, bar); |
51 | 51 | ||
52 | if (!len || !start) | 52 | if (!len || !start) |
diff --git a/arch/mips/pmc-sierra/yosemite/setup.c b/arch/mips/pmc-sierra/yosemite/setup.c index 855977ca51cd..6537d90a25bb 100644 --- a/arch/mips/pmc-sierra/yosemite/setup.c +++ b/arch/mips/pmc-sierra/yosemite/setup.c | |||
@@ -143,9 +143,6 @@ void __init plat_time_init(void) | |||
143 | mips_hpt_frequency = 33000000 * 3 * 5; | 143 | mips_hpt_frequency = 33000000 * 3 * 5; |
144 | } | 144 | } |
145 | 145 | ||
146 | /* No other usable initialization hook than this ... */ | ||
147 | extern void (*late_time_init)(void); | ||
148 | |||
149 | unsigned long ocd_base; | 146 | unsigned long ocd_base; |
150 | 147 | ||
151 | EXPORT_SYMBOL(ocd_base); | 148 | EXPORT_SYMBOL(ocd_base); |
diff --git a/arch/mn10300/kernel/asm-offsets.c b/arch/mn10300/kernel/asm-offsets.c index ee2d9f8af5ad..2646fcbd7d89 100644 --- a/arch/mn10300/kernel/asm-offsets.c +++ b/arch/mn10300/kernel/asm-offsets.c | |||
@@ -7,6 +7,7 @@ | |||
7 | #include <linux/sched.h> | 7 | #include <linux/sched.h> |
8 | #include <linux/signal.h> | 8 | #include <linux/signal.h> |
9 | #include <linux/personality.h> | 9 | #include <linux/personality.h> |
10 | #include <linux/kbuild.h> | ||
10 | #include <asm/ucontext.h> | 11 | #include <asm/ucontext.h> |
11 | #include <asm/processor.h> | 12 | #include <asm/processor.h> |
12 | #include <asm/thread_info.h> | 13 | #include <asm/thread_info.h> |
@@ -14,14 +15,6 @@ | |||
14 | #include "sigframe.h" | 15 | #include "sigframe.h" |
15 | #include "mn10300-serial.h" | 16 | #include "mn10300-serial.h" |
16 | 17 | ||
17 | #define DEFINE(sym, val) \ | ||
18 | asm volatile("\n->" #sym " %0 " #val : : "i" (val)) | ||
19 | |||
20 | #define BLANK() asm volatile("\n->") | ||
21 | |||
22 | #define OFFSET(sym, str, mem) \ | ||
23 | DEFINE(sym, offsetof(struct str, mem)); | ||
24 | |||
25 | void foo(void) | 18 | void foo(void) |
26 | { | 19 | { |
27 | OFFSET(SIGCONTEXT_d0, sigcontext, d0); | 20 | OFFSET(SIGCONTEXT_d0, sigcontext, d0); |
diff --git a/arch/mn10300/unit-asb2305/pci-iomap.c b/arch/mn10300/unit-asb2305/pci-iomap.c index dbceae4307da..c1a8d8f941fd 100644 --- a/arch/mn10300/unit-asb2305/pci-iomap.c +++ b/arch/mn10300/unit-asb2305/pci-iomap.c | |||
@@ -16,8 +16,8 @@ | |||
16 | */ | 16 | */ |
17 | void __iomem *pci_iomap(struct pci_dev *dev, int bar, unsigned long maxlen) | 17 | void __iomem *pci_iomap(struct pci_dev *dev, int bar, unsigned long maxlen) |
18 | { | 18 | { |
19 | unsigned long start = pci_resource_start(dev, bar); | 19 | resource_size_t start = pci_resource_start(dev, bar); |
20 | unsigned long len = pci_resource_len(dev, bar); | 20 | resource_size_t len = pci_resource_len(dev, bar); |
21 | unsigned long flags = pci_resource_flags(dev, bar); | 21 | unsigned long flags = pci_resource_flags(dev, bar); |
22 | 22 | ||
23 | if (!len || !start) | 23 | if (!len || !start) |
diff --git a/arch/parisc/kernel/asm-offsets.c b/arch/parisc/kernel/asm-offsets.c index eaa79bc14d94..3efc0b73e4ff 100644 --- a/arch/parisc/kernel/asm-offsets.c +++ b/arch/parisc/kernel/asm-offsets.c | |||
@@ -32,6 +32,7 @@ | |||
32 | #include <linux/thread_info.h> | 32 | #include <linux/thread_info.h> |
33 | #include <linux/ptrace.h> | 33 | #include <linux/ptrace.h> |
34 | #include <linux/hardirq.h> | 34 | #include <linux/hardirq.h> |
35 | #include <linux/kbuild.h> | ||
35 | 36 | ||
36 | #include <asm/pgtable.h> | 37 | #include <asm/pgtable.h> |
37 | #include <asm/ptrace.h> | 38 | #include <asm/ptrace.h> |
@@ -39,11 +40,6 @@ | |||
39 | #include <asm/pdc.h> | 40 | #include <asm/pdc.h> |
40 | #include <asm/uaccess.h> | 41 | #include <asm/uaccess.h> |
41 | 42 | ||
42 | #define DEFINE(sym, val) \ | ||
43 | asm volatile("\n->" #sym " %0 " #val : : "i" (val)) | ||
44 | |||
45 | #define BLANK() asm volatile("\n->" : : ) | ||
46 | |||
47 | #ifdef CONFIG_64BIT | 43 | #ifdef CONFIG_64BIT |
48 | #define FRAME_SIZE 128 | 44 | #define FRAME_SIZE 128 |
49 | #else | 45 | #else |
diff --git a/arch/parisc/kernel/pci-dma.c b/arch/parisc/kernel/pci-dma.c index 9448d4e91142..ccd61b9567a6 100644 --- a/arch/parisc/kernel/pci-dma.c +++ b/arch/parisc/kernel/pci-dma.c | |||
@@ -397,10 +397,9 @@ pcxl_dma_init(void) | |||
397 | "pcxl_dma_init: Unable to create gsc /proc dir entry\n"); | 397 | "pcxl_dma_init: Unable to create gsc /proc dir entry\n"); |
398 | else { | 398 | else { |
399 | struct proc_dir_entry* ent; | 399 | struct proc_dir_entry* ent; |
400 | ent = create_proc_entry("pcxl_dma", 0, proc_gsc_root); | 400 | ent = proc_create("pcxl_dma", 0, proc_gsc_root, |
401 | if (ent) | 401 | &proc_pcxl_dma_ops); |
402 | ent->proc_fops = &proc_pcxl_dma_ops; | 402 | if (!ent) |
403 | else | ||
404 | printk(KERN_WARNING | 403 | printk(KERN_WARNING |
405 | "pci-dma.c: Unable to create pcxl_dma /proc entry.\n"); | 404 | "pci-dma.c: Unable to create pcxl_dma /proc entry.\n"); |
406 | } | 405 | } |
diff --git a/arch/parisc/lib/iomap.c b/arch/parisc/lib/iomap.c index f4a811690ab3..9abed07db7fc 100644 --- a/arch/parisc/lib/iomap.c +++ b/arch/parisc/lib/iomap.c | |||
@@ -438,8 +438,8 @@ void ioport_unmap(void __iomem *addr) | |||
438 | /* Create a virtual mapping cookie for a PCI BAR (memory or IO) */ | 438 | /* Create a virtual mapping cookie for a PCI BAR (memory or IO) */ |
439 | void __iomem *pci_iomap(struct pci_dev *dev, int bar, unsigned long maxlen) | 439 | void __iomem *pci_iomap(struct pci_dev *dev, int bar, unsigned long maxlen) |
440 | { | 440 | { |
441 | unsigned long start = pci_resource_start(dev, bar); | 441 | resource_size_t start = pci_resource_start(dev, bar); |
442 | unsigned long len = pci_resource_len(dev, bar); | 442 | resource_size_t len = pci_resource_len(dev, bar); |
443 | unsigned long flags = pci_resource_flags(dev, bar); | 443 | unsigned long flags = pci_resource_flags(dev, bar); |
444 | 444 | ||
445 | if (!len || !start) | 445 | if (!len || !start) |
diff --git a/arch/powerpc/kernel/asm-offsets.c b/arch/powerpc/kernel/asm-offsets.c index 62134845af08..59044e7ed6f4 100644 --- a/arch/powerpc/kernel/asm-offsets.c +++ b/arch/powerpc/kernel/asm-offsets.c | |||
@@ -30,6 +30,7 @@ | |||
30 | #include <linux/time.h> | 30 | #include <linux/time.h> |
31 | #include <linux/hardirq.h> | 31 | #include <linux/hardirq.h> |
32 | #endif | 32 | #endif |
33 | #include <linux/kbuild.h> | ||
33 | 34 | ||
34 | #include <asm/io.h> | 35 | #include <asm/io.h> |
35 | #include <asm/page.h> | 36 | #include <asm/page.h> |
@@ -51,11 +52,6 @@ | |||
51 | #include <asm/iseries/alpaca.h> | 52 | #include <asm/iseries/alpaca.h> |
52 | #endif | 53 | #endif |
53 | 54 | ||
54 | #define DEFINE(sym, val) \ | ||
55 | asm volatile("\n->" #sym " %0 " #val : : "i" (val)) | ||
56 | |||
57 | #define BLANK() asm volatile("\n->" : : ) | ||
58 | |||
59 | int main(void) | 55 | int main(void) |
60 | { | 56 | { |
61 | DEFINE(THREAD, offsetof(struct task_struct, thread)); | 57 | DEFINE(THREAD, offsetof(struct task_struct, thread)); |
diff --git a/arch/powerpc/kernel/lparcfg.c b/arch/powerpc/kernel/lparcfg.c index 1ffacc698ffb..1e656b43ad7f 100644 --- a/arch/powerpc/kernel/lparcfg.c +++ b/arch/powerpc/kernel/lparcfg.c | |||
@@ -591,10 +591,8 @@ int __init lparcfg_init(void) | |||
591 | !firmware_has_feature(FW_FEATURE_ISERIES)) | 591 | !firmware_has_feature(FW_FEATURE_ISERIES)) |
592 | mode |= S_IWUSR; | 592 | mode |= S_IWUSR; |
593 | 593 | ||
594 | ent = create_proc_entry("ppc64/lparcfg", mode, NULL); | 594 | ent = proc_create("ppc64/lparcfg", mode, NULL, &lparcfg_fops); |
595 | if (ent) { | 595 | if (!ent) { |
596 | ent->proc_fops = &lparcfg_fops; | ||
597 | } else { | ||
598 | printk(KERN_ERR "Failed to create ppc64/lparcfg\n"); | 596 | printk(KERN_ERR "Failed to create ppc64/lparcfg\n"); |
599 | return -EIO; | 597 | return -EIO; |
600 | } | 598 | } |
diff --git a/arch/powerpc/kernel/proc_ppc64.c b/arch/powerpc/kernel/proc_ppc64.c index f78dfce1b771..c647ddef40dc 100644 --- a/arch/powerpc/kernel/proc_ppc64.c +++ b/arch/powerpc/kernel/proc_ppc64.c | |||
@@ -68,12 +68,11 @@ static int __init proc_ppc64_init(void) | |||
68 | { | 68 | { |
69 | struct proc_dir_entry *pde; | 69 | struct proc_dir_entry *pde; |
70 | 70 | ||
71 | pde = create_proc_entry("ppc64/systemcfg", S_IFREG|S_IRUGO, NULL); | 71 | pde = proc_create_data("ppc64/systemcfg", S_IFREG|S_IRUGO, NULL, |
72 | &page_map_fops, vdso_data); | ||
72 | if (!pde) | 73 | if (!pde) |
73 | return 1; | 74 | return 1; |
74 | pde->data = vdso_data; | ||
75 | pde->size = PAGE_SIZE; | 75 | pde->size = PAGE_SIZE; |
76 | pde->proc_fops = &page_map_fops; | ||
77 | 76 | ||
78 | return 0; | 77 | return 0; |
79 | } | 78 | } |
diff --git a/arch/powerpc/kernel/rtas-proc.c b/arch/powerpc/kernel/rtas-proc.c index f2e3bc714d76..f9c6abc84a94 100644 --- a/arch/powerpc/kernel/rtas-proc.c +++ b/arch/powerpc/kernel/rtas-proc.c | |||
@@ -255,8 +255,6 @@ static void check_location(struct seq_file *m, const char *c); | |||
255 | 255 | ||
256 | static int __init proc_rtas_init(void) | 256 | static int __init proc_rtas_init(void) |
257 | { | 257 | { |
258 | struct proc_dir_entry *entry; | ||
259 | |||
260 | if (!machine_is(pseries)) | 258 | if (!machine_is(pseries)) |
261 | return -ENODEV; | 259 | return -ENODEV; |
262 | 260 | ||
@@ -264,35 +262,20 @@ static int __init proc_rtas_init(void) | |||
264 | if (rtas_node == NULL) | 262 | if (rtas_node == NULL) |
265 | return -ENODEV; | 263 | return -ENODEV; |
266 | 264 | ||
267 | entry = create_proc_entry("ppc64/rtas/progress", S_IRUGO|S_IWUSR, NULL); | 265 | proc_create("ppc64/rtas/progress", S_IRUGO|S_IWUSR, NULL, |
268 | if (entry) | 266 | &ppc_rtas_progress_operations); |
269 | entry->proc_fops = &ppc_rtas_progress_operations; | 267 | proc_create("ppc64/rtas/clock", S_IRUGO|S_IWUSR, NULL, |
270 | 268 | &ppc_rtas_clock_operations); | |
271 | entry = create_proc_entry("ppc64/rtas/clock", S_IRUGO|S_IWUSR, NULL); | 269 | proc_create("ppc64/rtas/poweron", S_IWUSR|S_IRUGO, NULL, |
272 | if (entry) | 270 | &ppc_rtas_poweron_operations); |
273 | entry->proc_fops = &ppc_rtas_clock_operations; | 271 | proc_create("ppc64/rtas/sensors", S_IRUGO, NULL, |
274 | 272 | &ppc_rtas_sensors_operations); | |
275 | entry = create_proc_entry("ppc64/rtas/poweron", S_IWUSR|S_IRUGO, NULL); | 273 | proc_create("ppc64/rtas/frequency", S_IWUSR|S_IRUGO, NULL, |
276 | if (entry) | 274 | &ppc_rtas_tone_freq_operations); |
277 | entry->proc_fops = &ppc_rtas_poweron_operations; | 275 | proc_create("ppc64/rtas/volume", S_IWUSR|S_IRUGO, NULL, |
278 | 276 | &ppc_rtas_tone_volume_operations); | |
279 | entry = create_proc_entry("ppc64/rtas/sensors", S_IRUGO, NULL); | 277 | proc_create("ppc64/rtas/rmo_buffer", S_IRUSR, NULL, |
280 | if (entry) | 278 | &ppc_rtas_rmo_buf_ops); |
281 | entry->proc_fops = &ppc_rtas_sensors_operations; | ||
282 | |||
283 | entry = create_proc_entry("ppc64/rtas/frequency", S_IWUSR|S_IRUGO, | ||
284 | NULL); | ||
285 | if (entry) | ||
286 | entry->proc_fops = &ppc_rtas_tone_freq_operations; | ||
287 | |||
288 | entry = create_proc_entry("ppc64/rtas/volume", S_IWUSR|S_IRUGO, NULL); | ||
289 | if (entry) | ||
290 | entry->proc_fops = &ppc_rtas_tone_volume_operations; | ||
291 | |||
292 | entry = create_proc_entry("ppc64/rtas/rmo_buffer", S_IRUSR, NULL); | ||
293 | if (entry) | ||
294 | entry->proc_fops = &ppc_rtas_rmo_buf_ops; | ||
295 | |||
296 | return 0; | 279 | return 0; |
297 | } | 280 | } |
298 | 281 | ||
diff --git a/arch/powerpc/kernel/rtas_flash.c b/arch/powerpc/kernel/rtas_flash.c index 627f126d1848..0a5e22b22729 100644 --- a/arch/powerpc/kernel/rtas_flash.c +++ b/arch/powerpc/kernel/rtas_flash.c | |||
@@ -704,18 +704,11 @@ static int initialize_flash_pde_data(const char *rtas_call_name, | |||
704 | static struct proc_dir_entry *create_flash_pde(const char *filename, | 704 | static struct proc_dir_entry *create_flash_pde(const char *filename, |
705 | const struct file_operations *fops) | 705 | const struct file_operations *fops) |
706 | { | 706 | { |
707 | struct proc_dir_entry *ent = NULL; | 707 | return proc_create(filename, S_IRUSR | S_IWUSR, NULL, fops); |
708 | |||
709 | ent = create_proc_entry(filename, S_IRUSR | S_IWUSR, NULL); | ||
710 | if (ent != NULL) { | ||
711 | ent->proc_fops = fops; | ||
712 | ent->owner = THIS_MODULE; | ||
713 | } | ||
714 | |||
715 | return ent; | ||
716 | } | 708 | } |
717 | 709 | ||
718 | static const struct file_operations rtas_flash_operations = { | 710 | static const struct file_operations rtas_flash_operations = { |
711 | .owner = THIS_MODULE, | ||
719 | .read = rtas_flash_read, | 712 | .read = rtas_flash_read, |
720 | .write = rtas_flash_write, | 713 | .write = rtas_flash_write, |
721 | .open = rtas_excl_open, | 714 | .open = rtas_excl_open, |
@@ -723,6 +716,7 @@ static const struct file_operations rtas_flash_operations = { | |||
723 | }; | 716 | }; |
724 | 717 | ||
725 | static const struct file_operations manage_flash_operations = { | 718 | static const struct file_operations manage_flash_operations = { |
719 | .owner = THIS_MODULE, | ||
726 | .read = manage_flash_read, | 720 | .read = manage_flash_read, |
727 | .write = manage_flash_write, | 721 | .write = manage_flash_write, |
728 | .open = rtas_excl_open, | 722 | .open = rtas_excl_open, |
@@ -730,6 +724,7 @@ static const struct file_operations manage_flash_operations = { | |||
730 | }; | 724 | }; |
731 | 725 | ||
732 | static const struct file_operations validate_flash_operations = { | 726 | static const struct file_operations validate_flash_operations = { |
727 | .owner = THIS_MODULE, | ||
733 | .read = validate_flash_read, | 728 | .read = validate_flash_read, |
734 | .write = validate_flash_write, | 729 | .write = validate_flash_write, |
735 | .open = rtas_excl_open, | 730 | .open = rtas_excl_open, |
diff --git a/arch/powerpc/platforms/cell/spufs/sched.c b/arch/powerpc/platforms/cell/spufs/sched.c index 00528ef84ad2..45dcd2693502 100644 --- a/arch/powerpc/platforms/cell/spufs/sched.c +++ b/arch/powerpc/platforms/cell/spufs/sched.c | |||
@@ -1063,10 +1063,9 @@ int __init spu_sched_init(void) | |||
1063 | 1063 | ||
1064 | mod_timer(&spuloadavg_timer, 0); | 1064 | mod_timer(&spuloadavg_timer, 0); |
1065 | 1065 | ||
1066 | entry = create_proc_entry("spu_loadavg", 0, NULL); | 1066 | entry = proc_create("spu_loadavg", 0, NULL, &spu_loadavg_fops); |
1067 | if (!entry) | 1067 | if (!entry) |
1068 | goto out_stop_kthread; | 1068 | goto out_stop_kthread; |
1069 | entry->proc_fops = &spu_loadavg_fops; | ||
1070 | 1069 | ||
1071 | pr_debug("spusched: tick: %d, min ticks: %d, default ticks: %d\n", | 1070 | pr_debug("spusched: tick: %d, min ticks: %d, default ticks: %d\n", |
1072 | SPUSCHED_TICK, MIN_SPU_TIMESLICE, DEF_SPU_TIMESLICE); | 1071 | SPUSCHED_TICK, MIN_SPU_TIMESLICE, DEF_SPU_TIMESLICE); |
diff --git a/arch/powerpc/platforms/cell/spufs/sputrace.c b/arch/powerpc/platforms/cell/spufs/sputrace.c index 79aa773f3c99..aea5286f1245 100644 --- a/arch/powerpc/platforms/cell/spufs/sputrace.c +++ b/arch/powerpc/platforms/cell/spufs/sputrace.c | |||
@@ -201,10 +201,9 @@ static int __init sputrace_init(void) | |||
201 | if (!sputrace_log) | 201 | if (!sputrace_log) |
202 | goto out; | 202 | goto out; |
203 | 203 | ||
204 | entry = create_proc_entry("sputrace", S_IRUSR, NULL); | 204 | entry = proc_create("sputrace", S_IRUSR, NULL, &sputrace_fops); |
205 | if (!entry) | 205 | if (!entry) |
206 | goto out_free_log; | 206 | goto out_free_log; |
207 | entry->proc_fops = &sputrace_fops; | ||
208 | 207 | ||
209 | for (i = 0; i < ARRAY_SIZE(spu_probes); i++) { | 208 | for (i = 0; i < ARRAY_SIZE(spu_probes); i++) { |
210 | struct spu_probe *p = &spu_probes[i]; | 209 | struct spu_probe *p = &spu_probes[i]; |
diff --git a/arch/powerpc/platforms/iseries/lpevents.c b/arch/powerpc/platforms/iseries/lpevents.c index e5b40e3e0082..b0f8a857ec02 100644 --- a/arch/powerpc/platforms/iseries/lpevents.c +++ b/arch/powerpc/platforms/iseries/lpevents.c | |||
@@ -330,15 +330,11 @@ static const struct file_operations proc_lpevents_operations = { | |||
330 | 330 | ||
331 | static int __init proc_lpevents_init(void) | 331 | static int __init proc_lpevents_init(void) |
332 | { | 332 | { |
333 | struct proc_dir_entry *e; | ||
334 | |||
335 | if (!firmware_has_feature(FW_FEATURE_ISERIES)) | 333 | if (!firmware_has_feature(FW_FEATURE_ISERIES)) |
336 | return 0; | 334 | return 0; |
337 | 335 | ||
338 | e = create_proc_entry("iSeries/lpevents", S_IFREG|S_IRUGO, NULL); | 336 | proc_create("iSeries/lpevents", S_IFREG|S_IRUGO, NULL, |
339 | if (e) | 337 | &proc_lpevents_operations); |
340 | e->proc_fops = &proc_lpevents_operations; | ||
341 | |||
342 | return 0; | 338 | return 0; |
343 | } | 339 | } |
344 | __initcall(proc_lpevents_init); | 340 | __initcall(proc_lpevents_init); |
diff --git a/arch/powerpc/platforms/iseries/mf.c b/arch/powerpc/platforms/iseries/mf.c index c0f2433bc16e..1dc7295746da 100644 --- a/arch/powerpc/platforms/iseries/mf.c +++ b/arch/powerpc/platforms/iseries/mf.c | |||
@@ -1255,11 +1255,11 @@ static int __init mf_proc_init(void) | |||
1255 | if (i == 3) /* no vmlinux entry for 'D' */ | 1255 | if (i == 3) /* no vmlinux entry for 'D' */ |
1256 | continue; | 1256 | continue; |
1257 | 1257 | ||
1258 | ent = create_proc_entry("vmlinux", S_IFREG|S_IWUSR, mf); | 1258 | ent = proc_create_data("vmlinux", S_IFREG|S_IWUSR, mf, |
1259 | &proc_vmlinux_operations, | ||
1260 | (void *)(long)i); | ||
1259 | if (!ent) | 1261 | if (!ent) |
1260 | return 1; | 1262 | return 1; |
1261 | ent->data = (void *)(long)i; | ||
1262 | ent->proc_fops = &proc_vmlinux_operations; | ||
1263 | } | 1263 | } |
1264 | 1264 | ||
1265 | ent = create_proc_entry("side", S_IFREG|S_IRUSR|S_IWUSR, mf_proc_root); | 1265 | ent = create_proc_entry("side", S_IFREG|S_IRUSR|S_IWUSR, mf_proc_root); |
diff --git a/arch/powerpc/platforms/iseries/proc.c b/arch/powerpc/platforms/iseries/proc.c index f2cde4180204..91f4c6cd4b99 100644 --- a/arch/powerpc/platforms/iseries/proc.c +++ b/arch/powerpc/platforms/iseries/proc.c | |||
@@ -110,15 +110,11 @@ static const struct file_operations proc_titantod_operations = { | |||
110 | 110 | ||
111 | static int __init iseries_proc_init(void) | 111 | static int __init iseries_proc_init(void) |
112 | { | 112 | { |
113 | struct proc_dir_entry *e; | ||
114 | |||
115 | if (!firmware_has_feature(FW_FEATURE_ISERIES)) | 113 | if (!firmware_has_feature(FW_FEATURE_ISERIES)) |
116 | return 0; | 114 | return 0; |
117 | 115 | ||
118 | e = create_proc_entry("iSeries/titanTod", S_IFREG|S_IRUGO, NULL); | 116 | proc_create("iSeries/titanTod", S_IFREG|S_IRUGO, NULL, |
119 | if (e) | 117 | &proc_titantod_operations); |
120 | e->proc_fops = &proc_titantod_operations; | ||
121 | |||
122 | return 0; | 118 | return 0; |
123 | } | 119 | } |
124 | __initcall(iseries_proc_init); | 120 | __initcall(iseries_proc_init); |
diff --git a/arch/powerpc/platforms/iseries/viopath.c b/arch/powerpc/platforms/iseries/viopath.c index df23331eb25c..49ff4dc422b7 100644 --- a/arch/powerpc/platforms/iseries/viopath.c +++ b/arch/powerpc/platforms/iseries/viopath.c | |||
@@ -180,15 +180,10 @@ static const struct file_operations proc_viopath_operations = { | |||
180 | 180 | ||
181 | static int __init vio_proc_init(void) | 181 | static int __init vio_proc_init(void) |
182 | { | 182 | { |
183 | struct proc_dir_entry *e; | ||
184 | |||
185 | if (!firmware_has_feature(FW_FEATURE_ISERIES)) | 183 | if (!firmware_has_feature(FW_FEATURE_ISERIES)) |
186 | return 0; | 184 | return 0; |
187 | 185 | ||
188 | e = create_proc_entry("iSeries/config", 0, NULL); | 186 | proc_create("iSeries/config", 0, NULL, &proc_viopath_operations); |
189 | if (e) | ||
190 | e->proc_fops = &proc_viopath_operations; | ||
191 | |||
192 | return 0; | 187 | return 0; |
193 | } | 188 | } |
194 | __initcall(vio_proc_init); | 189 | __initcall(vio_proc_init); |
diff --git a/arch/powerpc/platforms/pseries/eeh.c b/arch/powerpc/platforms/pseries/eeh.c index a3fd56b186e6..6f544ba4b37f 100644 --- a/arch/powerpc/platforms/pseries/eeh.c +++ b/arch/powerpc/platforms/pseries/eeh.c | |||
@@ -1259,14 +1259,8 @@ static const struct file_operations proc_eeh_operations = { | |||
1259 | 1259 | ||
1260 | static int __init eeh_init_proc(void) | 1260 | static int __init eeh_init_proc(void) |
1261 | { | 1261 | { |
1262 | struct proc_dir_entry *e; | 1262 | if (machine_is(pseries)) |
1263 | 1263 | proc_create("ppc64/eeh", 0, NULL, &proc_eeh_operations); | |
1264 | if (machine_is(pseries)) { | ||
1265 | e = create_proc_entry("ppc64/eeh", 0, NULL); | ||
1266 | if (e) | ||
1267 | e->proc_fops = &proc_eeh_operations; | ||
1268 | } | ||
1269 | |||
1270 | return 0; | 1264 | return 0; |
1271 | } | 1265 | } |
1272 | __initcall(eeh_init_proc); | 1266 | __initcall(eeh_init_proc); |
diff --git a/arch/powerpc/platforms/pseries/reconfig.c b/arch/powerpc/platforms/pseries/reconfig.c index ac75c10de278..75769aae41d5 100644 --- a/arch/powerpc/platforms/pseries/reconfig.c +++ b/arch/powerpc/platforms/pseries/reconfig.c | |||
@@ -512,12 +512,9 @@ static int proc_ppc64_create_ofdt(void) | |||
512 | if (!machine_is(pseries)) | 512 | if (!machine_is(pseries)) |
513 | return 0; | 513 | return 0; |
514 | 514 | ||
515 | ent = create_proc_entry("ppc64/ofdt", S_IWUSR, NULL); | 515 | ent = proc_create("ppc64/ofdt", S_IWUSR, NULL, &ofdt_fops); |
516 | if (ent) { | 516 | if (ent) |
517 | ent->data = NULL; | ||
518 | ent->size = 0; | 517 | ent->size = 0; |
519 | ent->proc_fops = &ofdt_fops; | ||
520 | } | ||
521 | 518 | ||
522 | return 0; | 519 | return 0; |
523 | } | 520 | } |
diff --git a/arch/powerpc/platforms/pseries/rtasd.c b/arch/powerpc/platforms/pseries/rtasd.c index befadd4f9524..7d3e2b0bd4d2 100644 --- a/arch/powerpc/platforms/pseries/rtasd.c +++ b/arch/powerpc/platforms/pseries/rtasd.c | |||
@@ -468,10 +468,9 @@ static int __init rtas_init(void) | |||
468 | return -ENOMEM; | 468 | return -ENOMEM; |
469 | } | 469 | } |
470 | 470 | ||
471 | entry = create_proc_entry("ppc64/rtas/error_log", S_IRUSR, NULL); | 471 | entry = proc_create("ppc64/rtas/error_log", S_IRUSR, NULL, |
472 | if (entry) | 472 | &proc_rtas_log_operations); |
473 | entry->proc_fops = &proc_rtas_log_operations; | 473 | if (!entry) |
474 | else | ||
475 | printk(KERN_ERR "Failed to create error_log proc entry\n"); | 474 | printk(KERN_ERR "Failed to create error_log proc entry\n"); |
476 | 475 | ||
477 | if (kernel_thread(rtasd, NULL, CLONE_FS) < 0) | 476 | if (kernel_thread(rtasd, NULL, CLONE_FS) < 0) |
diff --git a/arch/ppc/kernel/asm-offsets.c b/arch/ppc/kernel/asm-offsets.c index a51a17714231..8dcbdd6c2d2c 100644 --- a/arch/ppc/kernel/asm-offsets.c +++ b/arch/ppc/kernel/asm-offsets.c | |||
@@ -18,6 +18,8 @@ | |||
18 | #include <linux/suspend.h> | 18 | #include <linux/suspend.h> |
19 | #include <linux/mman.h> | 19 | #include <linux/mman.h> |
20 | #include <linux/mm.h> | 20 | #include <linux/mm.h> |
21 | #include <linux/kbuild.h> | ||
22 | |||
21 | #include <asm/io.h> | 23 | #include <asm/io.h> |
22 | #include <asm/page.h> | 24 | #include <asm/page.h> |
23 | #include <asm/pgtable.h> | 25 | #include <asm/pgtable.h> |
@@ -26,11 +28,6 @@ | |||
26 | #include <asm/thread_info.h> | 28 | #include <asm/thread_info.h> |
27 | #include <asm/vdso_datapage.h> | 29 | #include <asm/vdso_datapage.h> |
28 | 30 | ||
29 | #define DEFINE(sym, val) \ | ||
30 | asm volatile("\n->" #sym " %0 " #val : : "i" (val)) | ||
31 | |||
32 | #define BLANK() asm volatile("\n->" : : ) | ||
33 | |||
34 | int | 31 | int |
35 | main(void) | 32 | main(void) |
36 | { | 33 | { |
diff --git a/arch/ppc/kernel/pci.c b/arch/ppc/kernel/pci.c index 50ce83f20adb..df3ef6db072c 100644 --- a/arch/ppc/kernel/pci.c +++ b/arch/ppc/kernel/pci.c | |||
@@ -1121,8 +1121,8 @@ void __init pci_init_resource(struct resource *res, resource_size_t start, | |||
1121 | 1121 | ||
1122 | void __iomem *pci_iomap(struct pci_dev *dev, int bar, unsigned long max) | 1122 | void __iomem *pci_iomap(struct pci_dev *dev, int bar, unsigned long max) |
1123 | { | 1123 | { |
1124 | unsigned long start = pci_resource_start(dev, bar); | 1124 | resource_size_t start = pci_resource_start(dev, bar); |
1125 | unsigned long len = pci_resource_len(dev, bar); | 1125 | resource_size_t len = pci_resource_len(dev, bar); |
1126 | unsigned long flags = pci_resource_flags(dev, bar); | 1126 | unsigned long flags = pci_resource_flags(dev, bar); |
1127 | 1127 | ||
1128 | if (!len) | 1128 | if (!len) |
diff --git a/arch/ppc/platforms/sbc82xx.c b/arch/ppc/platforms/sbc82xx.c index 0df6aacb8237..24f6e0694ac1 100644 --- a/arch/ppc/platforms/sbc82xx.c +++ b/arch/ppc/platforms/sbc82xx.c | |||
@@ -30,8 +30,6 @@ static void (*callback_init_IRQ)(void); | |||
30 | 30 | ||
31 | extern unsigned char __res[sizeof(bd_t)]; | 31 | extern unsigned char __res[sizeof(bd_t)]; |
32 | 32 | ||
33 | extern void (*late_time_init)(void); | ||
34 | |||
35 | #ifdef CONFIG_GEN_RTC | 33 | #ifdef CONFIG_GEN_RTC |
36 | TODC_ALLOC(); | 34 | TODC_ALLOC(); |
37 | 35 | ||
diff --git a/arch/s390/kernel/asm-offsets.c b/arch/s390/kernel/asm-offsets.c index 1375f8a4469e..fa28ecae636b 100644 --- a/arch/s390/kernel/asm-offsets.c +++ b/arch/s390/kernel/asm-offsets.c | |||
@@ -5,44 +5,38 @@ | |||
5 | */ | 5 | */ |
6 | 6 | ||
7 | #include <linux/sched.h> | 7 | #include <linux/sched.h> |
8 | 8 | #include <linux/kbuild.h> | |
9 | /* Use marker if you need to separate the values later */ | ||
10 | |||
11 | #define DEFINE(sym, val, marker) \ | ||
12 | asm volatile("\n->" #sym " %0 " #val " " #marker : : "i" (val)) | ||
13 | |||
14 | #define BLANK() asm volatile("\n->" : : ) | ||
15 | 9 | ||
16 | int main(void) | 10 | int main(void) |
17 | { | 11 | { |
18 | DEFINE(__THREAD_info, offsetof(struct task_struct, stack),); | 12 | DEFINE(__THREAD_info, offsetof(struct task_struct, stack)); |
19 | DEFINE(__THREAD_ksp, offsetof(struct task_struct, thread.ksp),); | 13 | DEFINE(__THREAD_ksp, offsetof(struct task_struct, thread.ksp)); |
20 | DEFINE(__THREAD_per, offsetof(struct task_struct, thread.per_info),); | 14 | DEFINE(__THREAD_per, offsetof(struct task_struct, thread.per_info)); |
21 | DEFINE(__THREAD_mm_segment, | 15 | DEFINE(__THREAD_mm_segment, |
22 | offsetof(struct task_struct, thread.mm_segment),); | 16 | offsetof(struct task_struct, thread.mm_segment)); |
23 | BLANK(); | 17 | BLANK(); |
24 | DEFINE(__TASK_pid, offsetof(struct task_struct, pid),); | 18 | DEFINE(__TASK_pid, offsetof(struct task_struct, pid)); |
25 | BLANK(); | 19 | BLANK(); |
26 | DEFINE(__PER_atmid, offsetof(per_struct, lowcore.words.perc_atmid),); | 20 | DEFINE(__PER_atmid, offsetof(per_struct, lowcore.words.perc_atmid)); |
27 | DEFINE(__PER_address, offsetof(per_struct, lowcore.words.address),); | 21 | DEFINE(__PER_address, offsetof(per_struct, lowcore.words.address)); |
28 | DEFINE(__PER_access_id, offsetof(per_struct, lowcore.words.access_id),); | 22 | DEFINE(__PER_access_id, offsetof(per_struct, lowcore.words.access_id)); |
29 | BLANK(); | 23 | BLANK(); |
30 | DEFINE(__TI_task, offsetof(struct thread_info, task),); | 24 | DEFINE(__TI_task, offsetof(struct thread_info, task)); |
31 | DEFINE(__TI_domain, offsetof(struct thread_info, exec_domain),); | 25 | DEFINE(__TI_domain, offsetof(struct thread_info, exec_domain)); |
32 | DEFINE(__TI_flags, offsetof(struct thread_info, flags),); | 26 | DEFINE(__TI_flags, offsetof(struct thread_info, flags)); |
33 | DEFINE(__TI_cpu, offsetof(struct thread_info, cpu),); | 27 | DEFINE(__TI_cpu, offsetof(struct thread_info, cpu)); |
34 | DEFINE(__TI_precount, offsetof(struct thread_info, preempt_count),); | 28 | DEFINE(__TI_precount, offsetof(struct thread_info, preempt_count)); |
35 | BLANK(); | 29 | BLANK(); |
36 | DEFINE(__PT_ARGS, offsetof(struct pt_regs, args),); | 30 | DEFINE(__PT_ARGS, offsetof(struct pt_regs, args)); |
37 | DEFINE(__PT_PSW, offsetof(struct pt_regs, psw),); | 31 | DEFINE(__PT_PSW, offsetof(struct pt_regs, psw)); |
38 | DEFINE(__PT_GPRS, offsetof(struct pt_regs, gprs),); | 32 | DEFINE(__PT_GPRS, offsetof(struct pt_regs, gprs)); |
39 | DEFINE(__PT_ORIG_GPR2, offsetof(struct pt_regs, orig_gpr2),); | 33 | DEFINE(__PT_ORIG_GPR2, offsetof(struct pt_regs, orig_gpr2)); |
40 | DEFINE(__PT_ILC, offsetof(struct pt_regs, ilc),); | 34 | DEFINE(__PT_ILC, offsetof(struct pt_regs, ilc)); |
41 | DEFINE(__PT_TRAP, offsetof(struct pt_regs, trap),); | 35 | DEFINE(__PT_TRAP, offsetof(struct pt_regs, trap)); |
42 | DEFINE(__PT_SIZE, sizeof(struct pt_regs),); | 36 | DEFINE(__PT_SIZE, sizeof(struct pt_regs)); |
43 | BLANK(); | 37 | BLANK(); |
44 | DEFINE(__SF_BACKCHAIN, offsetof(struct stack_frame, back_chain),); | 38 | DEFINE(__SF_BACKCHAIN, offsetof(struct stack_frame, back_chain)); |
45 | DEFINE(__SF_GPRS, offsetof(struct stack_frame, gprs),); | 39 | DEFINE(__SF_GPRS, offsetof(struct stack_frame, gprs)); |
46 | DEFINE(__SF_EMPTY, offsetof(struct stack_frame, empty1),); | 40 | DEFINE(__SF_EMPTY, offsetof(struct stack_frame, empty1)); |
47 | return 0; | 41 | return 0; |
48 | } | 42 | } |
diff --git a/arch/s390/kernel/irq.c b/arch/s390/kernel/irq.c index c36d8123ca14..c59a86dca584 100644 --- a/arch/s390/kernel/irq.c +++ b/arch/s390/kernel/irq.c | |||
@@ -60,8 +60,6 @@ init_IRQ(void) | |||
60 | /* | 60 | /* |
61 | * Switch to the asynchronous interrupt stack for softirq execution. | 61 | * Switch to the asynchronous interrupt stack for softirq execution. |
62 | */ | 62 | */ |
63 | extern void __do_softirq(void); | ||
64 | |||
65 | asmlinkage void do_softirq(void) | 63 | asmlinkage void do_softirq(void) |
66 | { | 64 | { |
67 | unsigned long flags, old, new; | 65 | unsigned long flags, old, new; |
diff --git a/arch/sh/drivers/pci/pci.c b/arch/sh/drivers/pci/pci.c index 49b435c3a57a..08d2e7325252 100644 --- a/arch/sh/drivers/pci/pci.c +++ b/arch/sh/drivers/pci/pci.c | |||
@@ -191,8 +191,8 @@ void __init pcibios_update_irq(struct pci_dev *dev, int irq) | |||
191 | 191 | ||
192 | void __iomem *pci_iomap(struct pci_dev *dev, int bar, unsigned long maxlen) | 192 | void __iomem *pci_iomap(struct pci_dev *dev, int bar, unsigned long maxlen) |
193 | { | 193 | { |
194 | unsigned long start = pci_resource_start(dev, bar); | 194 | resource_size_t start = pci_resource_start(dev, bar); |
195 | unsigned long len = pci_resource_len(dev, bar); | 195 | resource_size_t len = pci_resource_len(dev, bar); |
196 | unsigned long flags = pci_resource_flags(dev, bar); | 196 | unsigned long flags = pci_resource_flags(dev, bar); |
197 | 197 | ||
198 | if (unlikely(!len || !start)) | 198 | if (unlikely(!len || !start)) |
diff --git a/arch/sh/kernel/asm-offsets.c b/arch/sh/kernel/asm-offsets.c index dc6725c51a89..57cf0e0680f3 100644 --- a/arch/sh/kernel/asm-offsets.c +++ b/arch/sh/kernel/asm-offsets.c | |||
@@ -11,12 +11,9 @@ | |||
11 | #include <linux/stddef.h> | 11 | #include <linux/stddef.h> |
12 | #include <linux/types.h> | 12 | #include <linux/types.h> |
13 | #include <linux/mm.h> | 13 | #include <linux/mm.h> |
14 | #include <asm/thread_info.h> | 14 | #include <linux/kbuild.h> |
15 | |||
16 | #define DEFINE(sym, val) \ | ||
17 | asm volatile("\n->" #sym " %0 " #val : : "i" (val)) | ||
18 | 15 | ||
19 | #define BLANK() asm volatile("\n->" : : ) | 16 | #include <asm/thread_info.h> |
20 | 17 | ||
21 | int main(void) | 18 | int main(void) |
22 | { | 19 | { |
diff --git a/arch/sh/kernel/irq.c b/arch/sh/kernel/irq.c index 9bf19b00696a..a2a99e487e33 100644 --- a/arch/sh/kernel/irq.c +++ b/arch/sh/kernel/irq.c | |||
@@ -200,8 +200,6 @@ void irq_ctx_exit(int cpu) | |||
200 | hardirq_ctx[cpu] = NULL; | 200 | hardirq_ctx[cpu] = NULL; |
201 | } | 201 | } |
202 | 202 | ||
203 | extern asmlinkage void __do_softirq(void); | ||
204 | |||
205 | asmlinkage void do_softirq(void) | 203 | asmlinkage void do_softirq(void) |
206 | { | 204 | { |
207 | unsigned long flags; | 205 | unsigned long flags; |
diff --git a/arch/sparc/kernel/asm-offsets.c b/arch/sparc/kernel/asm-offsets.c index 6773ed76e414..cd3f7694e9b9 100644 --- a/arch/sparc/kernel/asm-offsets.c +++ b/arch/sparc/kernel/asm-offsets.c | |||
@@ -12,11 +12,7 @@ | |||
12 | 12 | ||
13 | #include <linux/sched.h> | 13 | #include <linux/sched.h> |
14 | // #include <linux/mm.h> | 14 | // #include <linux/mm.h> |
15 | 15 | #include <linux/kbuild.h> | |
16 | #define DEFINE(sym, val) \ | ||
17 | asm volatile("\n->" #sym " %0 " #val : : "i" (val)) | ||
18 | |||
19 | #define BLANK() asm volatile("\n->" : : ) | ||
20 | 16 | ||
21 | int foo(void) | 17 | int foo(void) |
22 | { | 18 | { |
diff --git a/arch/sparc/lib/iomap.c b/arch/sparc/lib/iomap.c index 54501c1ca785..9ef37e13a920 100644 --- a/arch/sparc/lib/iomap.c +++ b/arch/sparc/lib/iomap.c | |||
@@ -21,8 +21,8 @@ EXPORT_SYMBOL(ioport_unmap); | |||
21 | /* Create a virtual mapping cookie for a PCI BAR (memory or IO) */ | 21 | /* Create a virtual mapping cookie for a PCI BAR (memory or IO) */ |
22 | void __iomem *pci_iomap(struct pci_dev *dev, int bar, unsigned long maxlen) | 22 | void __iomem *pci_iomap(struct pci_dev *dev, int bar, unsigned long maxlen) |
23 | { | 23 | { |
24 | unsigned long start = pci_resource_start(dev, bar); | 24 | resource_size_t start = pci_resource_start(dev, bar); |
25 | unsigned long len = pci_resource_len(dev, bar); | 25 | resource_size_t len = pci_resource_len(dev, bar); |
26 | unsigned long flags = pci_resource_flags(dev, bar); | 26 | unsigned long flags = pci_resource_flags(dev, bar); |
27 | 27 | ||
28 | if (!len || !start) | 28 | if (!len || !start) |
diff --git a/arch/sparc64/lib/iomap.c b/arch/sparc64/lib/iomap.c index ac556db06973..7120ebbd4d03 100644 --- a/arch/sparc64/lib/iomap.c +++ b/arch/sparc64/lib/iomap.c | |||
@@ -21,8 +21,8 @@ EXPORT_SYMBOL(ioport_unmap); | |||
21 | /* Create a virtual mapping cookie for a PCI BAR (memory or IO) */ | 21 | /* Create a virtual mapping cookie for a PCI BAR (memory or IO) */ |
22 | void __iomem *pci_iomap(struct pci_dev *dev, int bar, unsigned long maxlen) | 22 | void __iomem *pci_iomap(struct pci_dev *dev, int bar, unsigned long maxlen) |
23 | { | 23 | { |
24 | unsigned long start = pci_resource_start(dev, bar); | 24 | resource_size_t start = pci_resource_start(dev, bar); |
25 | unsigned long len = pci_resource_len(dev, bar); | 25 | resource_size_t len = pci_resource_len(dev, bar); |
26 | unsigned long flags = pci_resource_flags(dev, bar); | 26 | unsigned long flags = pci_resource_flags(dev, bar); |
27 | 27 | ||
28 | if (!len || !start) | 28 | if (!len || !start) |
diff --git a/arch/um/kernel/exitcode.c b/arch/um/kernel/exitcode.c index 984f80e668ca..6540d2c9fbb7 100644 --- a/arch/um/kernel/exitcode.c +++ b/arch/um/kernel/exitcode.c | |||
@@ -59,7 +59,7 @@ static int make_proc_exitcode(void) | |||
59 | { | 59 | { |
60 | struct proc_dir_entry *ent; | 60 | struct proc_dir_entry *ent; |
61 | 61 | ||
62 | ent = create_proc_entry("exitcode", 0600, &proc_root); | 62 | ent = create_proc_entry("exitcode", 0600, NULL); |
63 | if (ent == NULL) { | 63 | if (ent == NULL) { |
64 | printk(KERN_WARNING "make_proc_exitcode : Failed to register " | 64 | printk(KERN_WARNING "make_proc_exitcode : Failed to register " |
65 | "/proc/exitcode\n"); | 65 | "/proc/exitcode\n"); |
diff --git a/arch/um/kernel/process.c b/arch/um/kernel/process.c index e8cb9ff183e9..83603cfbde81 100644 --- a/arch/um/kernel/process.c +++ b/arch/um/kernel/process.c | |||
@@ -364,7 +364,7 @@ int __init make_proc_sysemu(void) | |||
364 | if (!sysemu_supported) | 364 | if (!sysemu_supported) |
365 | return 0; | 365 | return 0; |
366 | 366 | ||
367 | ent = create_proc_entry("sysemu", 0600, &proc_root); | 367 | ent = create_proc_entry("sysemu", 0600, NULL); |
368 | 368 | ||
369 | if (ent == NULL) | 369 | if (ent == NULL) |
370 | { | 370 | { |
diff --git a/arch/um/kernel/time.c b/arch/um/kernel/time.c index e066e84493b1..0d0cea2ac98d 100644 --- a/arch/um/kernel/time.c +++ b/arch/um/kernel/time.c | |||
@@ -4,6 +4,7 @@ | |||
4 | */ | 4 | */ |
5 | 5 | ||
6 | #include <linux/clockchips.h> | 6 | #include <linux/clockchips.h> |
7 | #include <linux/init.h> | ||
7 | #include <linux/interrupt.h> | 8 | #include <linux/interrupt.h> |
8 | #include <linux/jiffies.h> | 9 | #include <linux/jiffies.h> |
9 | #include <linux/threads.h> | 10 | #include <linux/threads.h> |
@@ -109,8 +110,6 @@ static void __init setup_itimer(void) | |||
109 | clockevents_register_device(&itimer_clockevent); | 110 | clockevents_register_device(&itimer_clockevent); |
110 | } | 111 | } |
111 | 112 | ||
112 | extern void (*late_time_init)(void); | ||
113 | |||
114 | void __init time_init(void) | 113 | void __init time_init(void) |
115 | { | 114 | { |
116 | long long nsecs; | 115 | long long nsecs; |
diff --git a/arch/v850/kernel/asm-offsets.c b/arch/v850/kernel/asm-offsets.c index cee5c3142d41..581e6986a776 100644 --- a/arch/v850/kernel/asm-offsets.c +++ b/arch/v850/kernel/asm-offsets.c | |||
@@ -13,14 +13,11 @@ | |||
13 | #include <linux/kernel_stat.h> | 13 | #include <linux/kernel_stat.h> |
14 | #include <linux/ptrace.h> | 14 | #include <linux/ptrace.h> |
15 | #include <linux/hardirq.h> | 15 | #include <linux/hardirq.h> |
16 | #include <linux/kbuild.h> | ||
17 | |||
16 | #include <asm/irq.h> | 18 | #include <asm/irq.h> |
17 | #include <asm/errno.h> | 19 | #include <asm/errno.h> |
18 | 20 | ||
19 | #define DEFINE(sym, val) \ | ||
20 | asm volatile("\n->" #sym " %0 " #val : : "i" (val)) | ||
21 | |||
22 | #define BLANK() asm volatile("\n->" : : ) | ||
23 | |||
24 | int main (void) | 21 | int main (void) |
25 | { | 22 | { |
26 | /* offsets into the task struct */ | 23 | /* offsets into the task struct */ |
diff --git a/arch/v850/kernel/rte_mb_a_pci.c b/arch/v850/kernel/rte_mb_a_pci.c index 7165478824e7..687e367d8b64 100644 --- a/arch/v850/kernel/rte_mb_a_pci.c +++ b/arch/v850/kernel/rte_mb_a_pci.c | |||
@@ -790,8 +790,8 @@ pci_free_consistent (struct pci_dev *pdev, size_t size, void *cpu_addr, | |||
790 | 790 | ||
791 | void __iomem *pci_iomap (struct pci_dev *dev, int bar, unsigned long max) | 791 | void __iomem *pci_iomap (struct pci_dev *dev, int bar, unsigned long max) |
792 | { | 792 | { |
793 | unsigned long start = pci_resource_start (dev, bar); | 793 | resource_size_t start = pci_resource_start (dev, bar); |
794 | unsigned long len = pci_resource_len (dev, bar); | 794 | resource_size_t len = pci_resource_len (dev, bar); |
795 | 795 | ||
796 | if (!start || len == 0) | 796 | if (!start || len == 0) |
797 | return 0; | 797 | return 0; |
diff --git a/arch/x86/Kconfig b/arch/x86/Kconfig index a12dbb2b93f3..f70e3e3a9fa7 100644 --- a/arch/x86/Kconfig +++ b/arch/x86/Kconfig | |||
@@ -537,9 +537,6 @@ config CALGARY_IOMMU_ENABLED_BY_DEFAULT | |||
537 | Calgary anyway, pass 'iommu=calgary' on the kernel command line. | 537 | Calgary anyway, pass 'iommu=calgary' on the kernel command line. |
538 | If unsure, say Y. | 538 | If unsure, say Y. |
539 | 539 | ||
540 | config IOMMU_HELPER | ||
541 | def_bool (CALGARY_IOMMU || GART_IOMMU) | ||
542 | |||
543 | # need this always selected by IOMMU for the VIA workaround | 540 | # need this always selected by IOMMU for the VIA workaround |
544 | config SWIOTLB | 541 | config SWIOTLB |
545 | bool | 542 | bool |
@@ -550,6 +547,8 @@ config SWIOTLB | |||
550 | access 32-bits of memory can be used on systems with more than | 547 | access 32-bits of memory can be used on systems with more than |
551 | 3 GB of memory. If unsure, say Y. | 548 | 3 GB of memory. If unsure, say Y. |
552 | 549 | ||
550 | config IOMMU_HELPER | ||
551 | def_bool (CALGARY_IOMMU || GART_IOMMU || SWIOTLB) | ||
553 | 552 | ||
554 | config NR_CPUS | 553 | config NR_CPUS |
555 | int "Maximum number of CPUs (2-255)" | 554 | int "Maximum number of CPUs (2-255)" |
@@ -1505,6 +1504,10 @@ config PCI_GODIRECT | |||
1505 | config PCI_GOANY | 1504 | config PCI_GOANY |
1506 | bool "Any" | 1505 | bool "Any" |
1507 | 1506 | ||
1507 | config PCI_GOOLPC | ||
1508 | bool "OLPC" | ||
1509 | depends on OLPC | ||
1510 | |||
1508 | endchoice | 1511 | endchoice |
1509 | 1512 | ||
1510 | config PCI_BIOS | 1513 | config PCI_BIOS |
@@ -1514,12 +1517,17 @@ config PCI_BIOS | |||
1514 | # x86-64 doesn't support PCI BIOS access from long mode so always go direct. | 1517 | # x86-64 doesn't support PCI BIOS access from long mode so always go direct. |
1515 | config PCI_DIRECT | 1518 | config PCI_DIRECT |
1516 | def_bool y | 1519 | def_bool y |
1517 | depends on PCI && (X86_64 || (PCI_GODIRECT || PCI_GOANY) || X86_VISWS) | 1520 | depends on PCI && (X86_64 || (PCI_GODIRECT || PCI_GOANY || PCI_GOOLPC) || X86_VISWS) |
1518 | 1521 | ||
1519 | config PCI_MMCONFIG | 1522 | config PCI_MMCONFIG |
1520 | def_bool y | 1523 | def_bool y |
1521 | depends on X86_32 && PCI && ACPI && (PCI_GOMMCONFIG || PCI_GOANY) | 1524 | depends on X86_32 && PCI && ACPI && (PCI_GOMMCONFIG || PCI_GOANY) |
1522 | 1525 | ||
1526 | config PCI_OLPC | ||
1527 | bool | ||
1528 | depends on PCI && PCI_GOOLPC | ||
1529 | default y | ||
1530 | |||
1523 | config PCI_DOMAINS | 1531 | config PCI_DOMAINS |
1524 | def_bool y | 1532 | def_bool y |
1525 | depends on PCI | 1533 | depends on PCI |
@@ -1639,6 +1647,13 @@ config GEODE_MFGPT_TIMER | |||
1639 | MFGPTs have a better resolution and max interval than the | 1647 | MFGPTs have a better resolution and max interval than the |
1640 | generic PIT, and are suitable for use as high-res timers. | 1648 | generic PIT, and are suitable for use as high-res timers. |
1641 | 1649 | ||
1650 | config OLPC | ||
1651 | bool "One Laptop Per Child support" | ||
1652 | default n | ||
1653 | help | ||
1654 | Add support for detecting the unique features of the OLPC | ||
1655 | XO hardware. | ||
1656 | |||
1642 | endif # X86_32 | 1657 | endif # X86_32 |
1643 | 1658 | ||
1644 | config K8_NB | 1659 | config K8_NB |
diff --git a/arch/x86/boot/edd.c b/arch/x86/boot/edd.c index d84a48ece785..03399d64013b 100644 --- a/arch/x86/boot/edd.c +++ b/arch/x86/boot/edd.c | |||
@@ -126,17 +126,25 @@ void query_edd(void) | |||
126 | { | 126 | { |
127 | char eddarg[8]; | 127 | char eddarg[8]; |
128 | int do_mbr = 1; | 128 | int do_mbr = 1; |
129 | #ifdef CONFIG_EDD_OFF | ||
130 | int do_edd = 0; | ||
131 | #else | ||
129 | int do_edd = 1; | 132 | int do_edd = 1; |
133 | #endif | ||
130 | int be_quiet; | 134 | int be_quiet; |
131 | int devno; | 135 | int devno; |
132 | struct edd_info ei, *edp; | 136 | struct edd_info ei, *edp; |
133 | u32 *mbrptr; | 137 | u32 *mbrptr; |
134 | 138 | ||
135 | if (cmdline_find_option("edd", eddarg, sizeof eddarg) > 0) { | 139 | if (cmdline_find_option("edd", eddarg, sizeof eddarg) > 0) { |
136 | if (!strcmp(eddarg, "skipmbr") || !strcmp(eddarg, "skip")) | 140 | if (!strcmp(eddarg, "skipmbr") || !strcmp(eddarg, "skip")) { |
141 | do_edd = 1; | ||
137 | do_mbr = 0; | 142 | do_mbr = 0; |
143 | } | ||
138 | else if (!strcmp(eddarg, "off")) | 144 | else if (!strcmp(eddarg, "off")) |
139 | do_edd = 0; | 145 | do_edd = 0; |
146 | else if (!strcmp(eddarg, "on")) | ||
147 | do_edd = 1; | ||
140 | } | 148 | } |
141 | 149 | ||
142 | be_quiet = cmdline_find_option_bool("quiet"); | 150 | be_quiet = cmdline_find_option_bool("quiet"); |
diff --git a/arch/x86/kernel/Makefile b/arch/x86/kernel/Makefile index fa19c3819540..350eb1b2a208 100644 --- a/arch/x86/kernel/Makefile +++ b/arch/x86/kernel/Makefile | |||
@@ -91,6 +91,8 @@ endif | |||
91 | obj-$(CONFIG_SCx200) += scx200.o | 91 | obj-$(CONFIG_SCx200) += scx200.o |
92 | scx200-y += scx200_32.o | 92 | scx200-y += scx200_32.o |
93 | 93 | ||
94 | obj-$(CONFIG_OLPC) += olpc.o | ||
95 | |||
94 | ### | 96 | ### |
95 | # 64 bit specific files | 97 | # 64 bit specific files |
96 | ifeq ($(CONFIG_X86_64),y) | 98 | ifeq ($(CONFIG_X86_64),y) |
diff --git a/arch/x86/kernel/asm-offsets_32.c b/arch/x86/kernel/asm-offsets_32.c index 670c3c311289..92588083950f 100644 --- a/arch/x86/kernel/asm-offsets_32.c +++ b/arch/x86/kernel/asm-offsets_32.c | |||
@@ -9,6 +9,7 @@ | |||
9 | #include <linux/signal.h> | 9 | #include <linux/signal.h> |
10 | #include <linux/personality.h> | 10 | #include <linux/personality.h> |
11 | #include <linux/suspend.h> | 11 | #include <linux/suspend.h> |
12 | #include <linux/kbuild.h> | ||
12 | #include <asm/ucontext.h> | 13 | #include <asm/ucontext.h> |
13 | #include "sigframe.h" | 14 | #include "sigframe.h" |
14 | #include <asm/pgtable.h> | 15 | #include <asm/pgtable.h> |
@@ -23,14 +24,6 @@ | |||
23 | #include <linux/lguest.h> | 24 | #include <linux/lguest.h> |
24 | #include "../../../drivers/lguest/lg.h" | 25 | #include "../../../drivers/lguest/lg.h" |
25 | 26 | ||
26 | #define DEFINE(sym, val) \ | ||
27 | asm volatile("\n->" #sym " %0 " #val : : "i" (val)) | ||
28 | |||
29 | #define BLANK() asm volatile("\n->" : : ) | ||
30 | |||
31 | #define OFFSET(sym, str, mem) \ | ||
32 | DEFINE(sym, offsetof(struct str, mem)); | ||
33 | |||
34 | /* workaround for a warning with -Wmissing-prototypes */ | 27 | /* workaround for a warning with -Wmissing-prototypes */ |
35 | void foo(void); | 28 | void foo(void); |
36 | 29 | ||
diff --git a/arch/x86/kernel/asm-offsets_64.c b/arch/x86/kernel/asm-offsets_64.c index 494e1e096ee6..f126c05d6170 100644 --- a/arch/x86/kernel/asm-offsets_64.c +++ b/arch/x86/kernel/asm-offsets_64.c | |||
@@ -10,6 +10,7 @@ | |||
10 | #include <linux/errno.h> | 10 | #include <linux/errno.h> |
11 | #include <linux/hardirq.h> | 11 | #include <linux/hardirq.h> |
12 | #include <linux/suspend.h> | 12 | #include <linux/suspend.h> |
13 | #include <linux/kbuild.h> | ||
13 | #include <asm/pda.h> | 14 | #include <asm/pda.h> |
14 | #include <asm/processor.h> | 15 | #include <asm/processor.h> |
15 | #include <asm/segment.h> | 16 | #include <asm/segment.h> |
@@ -17,14 +18,6 @@ | |||
17 | #include <asm/ia32.h> | 18 | #include <asm/ia32.h> |
18 | #include <asm/bootparam.h> | 19 | #include <asm/bootparam.h> |
19 | 20 | ||
20 | #define DEFINE(sym, val) \ | ||
21 | asm volatile("\n->" #sym " %0 " #val : : "i" (val)) | ||
22 | |||
23 | #define BLANK() asm volatile("\n->" : : ) | ||
24 | |||
25 | #define OFFSET(sym, str, mem) \ | ||
26 | DEFINE(sym, offsetof(struct str, mem)) | ||
27 | |||
28 | #define __NO_STUBS 1 | 21 | #define __NO_STUBS 1 |
29 | #undef __SYSCALL | 22 | #undef __SYSCALL |
30 | #undef _ASM_X86_64_UNISTD_H_ | 23 | #undef _ASM_X86_64_UNISTD_H_ |
diff --git a/arch/x86/kernel/cpu/mtrr/if.c b/arch/x86/kernel/cpu/mtrr/if.c index 1960f1985e5e..84c480bb3715 100644 --- a/arch/x86/kernel/cpu/mtrr/if.c +++ b/arch/x86/kernel/cpu/mtrr/if.c | |||
@@ -424,7 +424,7 @@ static int __init mtrr_if_init(void) | |||
424 | return -ENODEV; | 424 | return -ENODEV; |
425 | 425 | ||
426 | proc_root_mtrr = | 426 | proc_root_mtrr = |
427 | proc_create("mtrr", S_IWUSR | S_IRUGO, &proc_root, &mtrr_fops); | 427 | proc_create("mtrr", S_IWUSR | S_IRUGO, NULL, &mtrr_fops); |
428 | 428 | ||
429 | if (proc_root_mtrr) | 429 | if (proc_root_mtrr) |
430 | proc_root_mtrr->owner = THIS_MODULE; | 430 | proc_root_mtrr->owner = THIS_MODULE; |
diff --git a/arch/x86/kernel/irq_32.c b/arch/x86/kernel/irq_32.c index 00bda7bcda63..147352df28b9 100644 --- a/arch/x86/kernel/irq_32.c +++ b/arch/x86/kernel/irq_32.c | |||
@@ -190,8 +190,6 @@ void irq_ctx_exit(int cpu) | |||
190 | hardirq_ctx[cpu] = NULL; | 190 | hardirq_ctx[cpu] = NULL; |
191 | } | 191 | } |
192 | 192 | ||
193 | extern asmlinkage void __do_softirq(void); | ||
194 | |||
195 | asmlinkage void do_softirq(void) | 193 | asmlinkage void do_softirq(void) |
196 | { | 194 | { |
197 | unsigned long flags; | 195 | unsigned long flags; |
diff --git a/arch/x86/kernel/olpc.c b/arch/x86/kernel/olpc.c new file mode 100644 index 000000000000..3e6672274807 --- /dev/null +++ b/arch/x86/kernel/olpc.c | |||
@@ -0,0 +1,260 @@ | |||
1 | /* | ||
2 | * Support for the OLPC DCON and OLPC EC access | ||
3 | * | ||
4 | * Copyright © 2006 Advanced Micro Devices, Inc. | ||
5 | * Copyright © 2007-2008 Andres Salomon <dilinger@debian.org> | ||
6 | * | ||
7 | * This program is free software; you can redistribute it and/or modify | ||
8 | * it under the terms of the GNU General Public License as published by | ||
9 | * the Free Software Foundation; either version 2 of the License, or | ||
10 | * (at your option) any later version. | ||
11 | */ | ||
12 | |||
13 | #include <linux/kernel.h> | ||
14 | #include <linux/init.h> | ||
15 | #include <linux/module.h> | ||
16 | #include <linux/delay.h> | ||
17 | #include <linux/spinlock.h> | ||
18 | #include <linux/io.h> | ||
19 | #include <linux/string.h> | ||
20 | #include <asm/geode.h> | ||
21 | #include <asm/olpc.h> | ||
22 | |||
23 | #ifdef CONFIG_OPEN_FIRMWARE | ||
24 | #include <asm/ofw.h> | ||
25 | #endif | ||
26 | |||
27 | struct olpc_platform_t olpc_platform_info; | ||
28 | EXPORT_SYMBOL_GPL(olpc_platform_info); | ||
29 | |||
30 | static DEFINE_SPINLOCK(ec_lock); | ||
31 | |||
32 | /* what the timeout *should* be (in ms) */ | ||
33 | #define EC_BASE_TIMEOUT 20 | ||
34 | |||
35 | /* the timeout that bugs in the EC might force us to actually use */ | ||
36 | static int ec_timeout = EC_BASE_TIMEOUT; | ||
37 | |||
38 | static int __init olpc_ec_timeout_set(char *str) | ||
39 | { | ||
40 | if (get_option(&str, &ec_timeout) != 1) { | ||
41 | ec_timeout = EC_BASE_TIMEOUT; | ||
42 | printk(KERN_ERR "olpc-ec: invalid argument to " | ||
43 | "'olpc_ec_timeout=', ignoring!\n"); | ||
44 | } | ||
45 | printk(KERN_DEBUG "olpc-ec: using %d ms delay for EC commands.\n", | ||
46 | ec_timeout); | ||
47 | return 1; | ||
48 | } | ||
49 | __setup("olpc_ec_timeout=", olpc_ec_timeout_set); | ||
50 | |||
51 | /* | ||
52 | * These {i,o}bf_status functions return whether the buffers are full or not. | ||
53 | */ | ||
54 | |||
55 | static inline unsigned int ibf_status(unsigned int port) | ||
56 | { | ||
57 | return !!(inb(port) & 0x02); | ||
58 | } | ||
59 | |||
60 | static inline unsigned int obf_status(unsigned int port) | ||
61 | { | ||
62 | return inb(port) & 0x01; | ||
63 | } | ||
64 | |||
65 | #define wait_on_ibf(p, d) __wait_on_ibf(__LINE__, (p), (d)) | ||
66 | static int __wait_on_ibf(unsigned int line, unsigned int port, int desired) | ||
67 | { | ||
68 | unsigned int timeo; | ||
69 | int state = ibf_status(port); | ||
70 | |||
71 | for (timeo = ec_timeout; state != desired && timeo; timeo--) { | ||
72 | mdelay(1); | ||
73 | state = ibf_status(port); | ||
74 | } | ||
75 | |||
76 | if ((state == desired) && (ec_timeout > EC_BASE_TIMEOUT) && | ||
77 | timeo < (ec_timeout - EC_BASE_TIMEOUT)) { | ||
78 | printk(KERN_WARNING "olpc-ec: %d: waited %u ms for IBF!\n", | ||
79 | line, ec_timeout - timeo); | ||
80 | } | ||
81 | |||
82 | return !(state == desired); | ||
83 | } | ||
84 | |||
85 | #define wait_on_obf(p, d) __wait_on_obf(__LINE__, (p), (d)) | ||
86 | static int __wait_on_obf(unsigned int line, unsigned int port, int desired) | ||
87 | { | ||
88 | unsigned int timeo; | ||
89 | int state = obf_status(port); | ||
90 | |||
91 | for (timeo = ec_timeout; state != desired && timeo; timeo--) { | ||
92 | mdelay(1); | ||
93 | state = obf_status(port); | ||
94 | } | ||
95 | |||
96 | if ((state == desired) && (ec_timeout > EC_BASE_TIMEOUT) && | ||
97 | timeo < (ec_timeout - EC_BASE_TIMEOUT)) { | ||
98 | printk(KERN_WARNING "olpc-ec: %d: waited %u ms for OBF!\n", | ||
99 | line, ec_timeout - timeo); | ||
100 | } | ||
101 | |||
102 | return !(state == desired); | ||
103 | } | ||
104 | |||
105 | /* | ||
106 | * This allows the kernel to run Embedded Controller commands. The EC is | ||
107 | * documented at <http://wiki.laptop.org/go/Embedded_controller>, and the | ||
108 | * available EC commands are here: | ||
109 | * <http://wiki.laptop.org/go/Ec_specification>. Unfortunately, while | ||
110 | * OpenFirmware's source is available, the EC's is not. | ||
111 | */ | ||
112 | int olpc_ec_cmd(unsigned char cmd, unsigned char *inbuf, size_t inlen, | ||
113 | unsigned char *outbuf, size_t outlen) | ||
114 | { | ||
115 | unsigned long flags; | ||
116 | int ret = -EIO; | ||
117 | int i; | ||
118 | |||
119 | spin_lock_irqsave(&ec_lock, flags); | ||
120 | |||
121 | /* Clear OBF */ | ||
122 | for (i = 0; i < 10 && (obf_status(0x6c) == 1); i++) | ||
123 | inb(0x68); | ||
124 | if (i == 10) { | ||
125 | printk(KERN_ERR "olpc-ec: timeout while attempting to " | ||
126 | "clear OBF flag!\n"); | ||
127 | goto err; | ||
128 | } | ||
129 | |||
130 | if (wait_on_ibf(0x6c, 0)) { | ||
131 | printk(KERN_ERR "olpc-ec: timeout waiting for EC to " | ||
132 | "quiesce!\n"); | ||
133 | goto err; | ||
134 | } | ||
135 | |||
136 | restart: | ||
137 | /* | ||
138 | * Note that if we time out during any IBF checks, that's a failure; | ||
139 | * we have to return. There's no way for the kernel to clear that. | ||
140 | * | ||
141 | * If we time out during an OBF check, we can restart the command; | ||
142 | * reissuing it will clear the OBF flag, and we should be alright. | ||
143 | * The OBF flag will sometimes misbehave due to what we believe | ||
144 | * is a hardware quirk.. | ||
145 | */ | ||
146 | printk(KERN_DEBUG "olpc-ec: running cmd 0x%x\n", cmd); | ||
147 | outb(cmd, 0x6c); | ||
148 | |||
149 | if (wait_on_ibf(0x6c, 0)) { | ||
150 | printk(KERN_ERR "olpc-ec: timeout waiting for EC to read " | ||
151 | "command!\n"); | ||
152 | goto err; | ||
153 | } | ||
154 | |||
155 | if (inbuf && inlen) { | ||
156 | /* write data to EC */ | ||
157 | for (i = 0; i < inlen; i++) { | ||
158 | if (wait_on_ibf(0x6c, 0)) { | ||
159 | printk(KERN_ERR "olpc-ec: timeout waiting for" | ||
160 | " EC accept data!\n"); | ||
161 | goto err; | ||
162 | } | ||
163 | printk(KERN_DEBUG "olpc-ec: sending cmd arg 0x%x\n", | ||
164 | inbuf[i]); | ||
165 | outb(inbuf[i], 0x68); | ||
166 | } | ||
167 | } | ||
168 | if (outbuf && outlen) { | ||
169 | /* read data from EC */ | ||
170 | for (i = 0; i < outlen; i++) { | ||
171 | if (wait_on_obf(0x6c, 1)) { | ||
172 | printk(KERN_ERR "olpc-ec: timeout waiting for" | ||
173 | " EC to provide data!\n"); | ||
174 | goto restart; | ||
175 | } | ||
176 | outbuf[i] = inb(0x68); | ||
177 | printk(KERN_DEBUG "olpc-ec: received 0x%x\n", | ||
178 | outbuf[i]); | ||
179 | } | ||
180 | } | ||
181 | |||
182 | ret = 0; | ||
183 | err: | ||
184 | spin_unlock_irqrestore(&ec_lock, flags); | ||
185 | return ret; | ||
186 | } | ||
187 | EXPORT_SYMBOL_GPL(olpc_ec_cmd); | ||
188 | |||
189 | #ifdef CONFIG_OPEN_FIRMWARE | ||
190 | static void __init platform_detect(void) | ||
191 | { | ||
192 | size_t propsize; | ||
193 | u32 rev; | ||
194 | |||
195 | if (ofw("getprop", 4, 1, NULL, "board-revision-int", &rev, 4, | ||
196 | &propsize) || propsize != 4) { | ||
197 | printk(KERN_ERR "ofw: getprop call failed!\n"); | ||
198 | rev = 0; | ||
199 | } | ||
200 | olpc_platform_info.boardrev = be32_to_cpu(rev); | ||
201 | } | ||
202 | #else | ||
203 | static void __init platform_detect(void) | ||
204 | { | ||
205 | /* stopgap until OFW support is added to the kernel */ | ||
206 | olpc_platform_info.boardrev = be32_to_cpu(0xc2); | ||
207 | } | ||
208 | #endif | ||
209 | |||
210 | static int __init olpc_init(void) | ||
211 | { | ||
212 | unsigned char *romsig; | ||
213 | |||
214 | /* The ioremap check is dangerous; limit what we run it on */ | ||
215 | if (!is_geode() || geode_has_vsa2()) | ||
216 | return 0; | ||
217 | |||
218 | spin_lock_init(&ec_lock); | ||
219 | |||
220 | romsig = ioremap(0xffffffc0, 16); | ||
221 | if (!romsig) | ||
222 | return 0; | ||
223 | |||
224 | if (strncmp(romsig, "CL1 Q", 7)) | ||
225 | goto unmap; | ||
226 | if (strncmp(romsig+6, romsig+13, 3)) { | ||
227 | printk(KERN_INFO "OLPC BIOS signature looks invalid. " | ||
228 | "Assuming not OLPC\n"); | ||
229 | goto unmap; | ||
230 | } | ||
231 | |||
232 | printk(KERN_INFO "OLPC board with OpenFirmware %.16s\n", romsig); | ||
233 | olpc_platform_info.flags |= OLPC_F_PRESENT; | ||
234 | |||
235 | /* get the platform revision */ | ||
236 | platform_detect(); | ||
237 | |||
238 | /* assume B1 and above models always have a DCON */ | ||
239 | if (olpc_board_at_least(olpc_board(0xb1))) | ||
240 | olpc_platform_info.flags |= OLPC_F_DCON; | ||
241 | |||
242 | /* get the EC revision */ | ||
243 | olpc_ec_cmd(EC_FIRMWARE_REV, NULL, 0, | ||
244 | (unsigned char *) &olpc_platform_info.ecver, 1); | ||
245 | |||
246 | /* check to see if the VSA exists */ | ||
247 | if (geode_has_vsa2()) | ||
248 | olpc_platform_info.flags |= OLPC_F_VSA; | ||
249 | |||
250 | printk(KERN_INFO "OLPC board revision %s%X (EC=%x)\n", | ||
251 | ((olpc_platform_info.boardrev & 0xf) < 8) ? "pre" : "", | ||
252 | olpc_platform_info.boardrev >> 4, | ||
253 | olpc_platform_info.ecver); | ||
254 | |||
255 | unmap: | ||
256 | iounmap(romsig); | ||
257 | return 0; | ||
258 | } | ||
259 | |||
260 | postcore_initcall(olpc_init); | ||
diff --git a/arch/x86/kernel/time_32.c b/arch/x86/kernel/time_32.c index 1a89e93f3f1c..2ff21f398934 100644 --- a/arch/x86/kernel/time_32.c +++ b/arch/x86/kernel/time_32.c | |||
@@ -115,7 +115,6 @@ irqreturn_t timer_interrupt(int irq, void *dev_id) | |||
115 | return IRQ_HANDLED; | 115 | return IRQ_HANDLED; |
116 | } | 116 | } |
117 | 117 | ||
118 | extern void (*late_time_init)(void); | ||
119 | /* Duplicate of time_init() below, with hpet_enable part added */ | 118 | /* Duplicate of time_init() below, with hpet_enable part added */ |
120 | void __init hpet_time_init(void) | 119 | void __init hpet_time_init(void) |
121 | { | 120 | { |
diff --git a/arch/x86/pci/Makefile_32 b/arch/x86/pci/Makefile_32 index cdd6828b5abb..b859047a6376 100644 --- a/arch/x86/pci/Makefile_32 +++ b/arch/x86/pci/Makefile_32 | |||
@@ -3,6 +3,7 @@ obj-y := i386.o init.o | |||
3 | obj-$(CONFIG_PCI_BIOS) += pcbios.o | 3 | obj-$(CONFIG_PCI_BIOS) += pcbios.o |
4 | obj-$(CONFIG_PCI_MMCONFIG) += mmconfig_32.o direct.o mmconfig-shared.o | 4 | obj-$(CONFIG_PCI_MMCONFIG) += mmconfig_32.o direct.o mmconfig-shared.o |
5 | obj-$(CONFIG_PCI_DIRECT) += direct.o | 5 | obj-$(CONFIG_PCI_DIRECT) += direct.o |
6 | obj-$(CONFIG_PCI_OLPC) += olpc.o | ||
6 | 7 | ||
7 | pci-y := fixup.o | 8 | pci-y := fixup.o |
8 | pci-$(CONFIG_ACPI) += acpi.o | 9 | pci-$(CONFIG_ACPI) += acpi.o |
diff --git a/arch/x86/pci/init.c b/arch/x86/pci/init.c index 3de9f9ba2da6..0f5f7dd2a620 100644 --- a/arch/x86/pci/init.c +++ b/arch/x86/pci/init.c | |||
@@ -14,6 +14,9 @@ static __init int pci_access_init(void) | |||
14 | #ifdef CONFIG_PCI_MMCONFIG | 14 | #ifdef CONFIG_PCI_MMCONFIG |
15 | pci_mmcfg_init(type); | 15 | pci_mmcfg_init(type); |
16 | #endif | 16 | #endif |
17 | #ifdef CONFIG_PCI_OLPC | ||
18 | pci_olpc_init(); | ||
19 | #endif | ||
17 | if (raw_pci_ops) | 20 | if (raw_pci_ops) |
18 | return 0; | 21 | return 0; |
19 | #ifdef CONFIG_PCI_BIOS | 22 | #ifdef CONFIG_PCI_BIOS |
diff --git a/arch/x86/pci/olpc.c b/arch/x86/pci/olpc.c new file mode 100644 index 000000000000..5e7636558c02 --- /dev/null +++ b/arch/x86/pci/olpc.c | |||
@@ -0,0 +1,313 @@ | |||
1 | /* | ||
2 | * Low-level PCI config space access for OLPC systems who lack the VSA | ||
3 | * PCI virtualization software. | ||
4 | * | ||
5 | * Copyright © 2006 Advanced Micro Devices, Inc. | ||
6 | * | ||
7 | * This program is free software; you can redistribute it and/or modify | ||
8 | * it under the terms of the GNU General Public License as published by | ||
9 | * the Free Software Foundation; either version 2 of the License, or | ||
10 | * (at your option) any later version. | ||
11 | * | ||
12 | * The AMD Geode chipset (ie: GX2 processor, cs5536 I/O companion device) | ||
13 | * has some I/O functions (display, southbridge, sound, USB HCIs, etc) | ||
14 | * that more or less behave like PCI devices, but the hardware doesn't | ||
15 | * directly implement the PCI configuration space headers. AMD provides | ||
16 | * "VSA" (Virtual System Architecture) software that emulates PCI config | ||
17 | * space for these devices, by trapping I/O accesses to PCI config register | ||
18 | * (CF8/CFC) and running some code in System Management Mode interrupt state. | ||
19 | * On the OLPC platform, we don't want to use that VSA code because | ||
20 | * (a) it slows down suspend/resume, and (b) recompiling it requires special | ||
21 | * compilers that are hard to get. So instead of letting the complex VSA | ||
22 | * code simulate the PCI config registers for the on-chip devices, we | ||
23 | * just simulate them the easy way, by inserting the code into the | ||
24 | * pci_write_config and pci_read_config path. Most of the config registers | ||
25 | * are read-only anyway, so the bulk of the simulation is just table lookup. | ||
26 | */ | ||
27 | |||
28 | #include <linux/pci.h> | ||
29 | #include <linux/init.h> | ||
30 | #include <asm/olpc.h> | ||
31 | #include <asm/geode.h> | ||
32 | #include "pci.h" | ||
33 | |||
34 | /* | ||
35 | * In the tables below, the first two line (8 longwords) are the | ||
36 | * size masks that are used when the higher level PCI code determines | ||
37 | * the size of the region by writing ~0 to a base address register | ||
38 | * and reading back the result. | ||
39 | * | ||
40 | * The following lines are the values that are read during normal | ||
41 | * PCI config access cycles, i.e. not after just having written | ||
42 | * ~0 to a base address register. | ||
43 | */ | ||
44 | |||
45 | static const uint32_t lxnb_hdr[] = { /* dev 1 function 0 - devfn = 8 */ | ||
46 | 0x0, 0x0, 0x0, 0x0, | ||
47 | 0x0, 0x0, 0x0, 0x0, | ||
48 | |||
49 | 0x281022, 0x2200005, 0x6000021, 0x80f808, /* AMD Vendor ID */ | ||
50 | 0x0, 0x0, 0x0, 0x0, /* No virtual registers, hence no BAR */ | ||
51 | 0x0, 0x0, 0x0, 0x28100b, | ||
52 | 0x0, 0x0, 0x0, 0x0, | ||
53 | 0x0, 0x0, 0x0, 0x0, | ||
54 | 0x0, 0x0, 0x0, 0x0, | ||
55 | 0x0, 0x0, 0x0, 0x0, | ||
56 | }; | ||
57 | |||
58 | static const uint32_t gxnb_hdr[] = { /* dev 1 function 0 - devfn = 8 */ | ||
59 | 0xfffffffd, 0x0, 0x0, 0x0, | ||
60 | 0x0, 0x0, 0x0, 0x0, | ||
61 | |||
62 | 0x28100b, 0x2200005, 0x6000021, 0x80f808, /* NSC Vendor ID */ | ||
63 | 0xac1d, 0x0, 0x0, 0x0, /* I/O BAR - base of virtual registers */ | ||
64 | 0x0, 0x0, 0x0, 0x28100b, | ||
65 | 0x0, 0x0, 0x0, 0x0, | ||
66 | 0x0, 0x0, 0x0, 0x0, | ||
67 | 0x0, 0x0, 0x0, 0x0, | ||
68 | 0x0, 0x0, 0x0, 0x0, | ||
69 | }; | ||
70 | |||
71 | static const uint32_t lxfb_hdr[] = { /* dev 1 function 1 - devfn = 9 */ | ||
72 | 0xff000008, 0xffffc000, 0xffffc000, 0xffffc000, | ||
73 | 0xffffc000, 0x0, 0x0, 0x0, | ||
74 | |||
75 | 0x20811022, 0x2200003, 0x3000000, 0x0, /* AMD Vendor ID */ | ||
76 | 0xfd000000, 0xfe000000, 0xfe004000, 0xfe008000, /* FB, GP, VG, DF */ | ||
77 | 0xfe00c000, 0x0, 0x0, 0x30100b, /* VIP */ | ||
78 | 0x0, 0x0, 0x0, 0x10e, /* INTA, IRQ14 for graphics accel */ | ||
79 | 0x0, 0x0, 0x0, 0x0, | ||
80 | 0x3d0, 0x3c0, 0xa0000, 0x0, /* VG IO, VG IO, EGA FB, MONO FB */ | ||
81 | 0x0, 0x0, 0x0, 0x0, | ||
82 | }; | ||
83 | |||
84 | static const uint32_t gxfb_hdr[] = { /* dev 1 function 1 - devfn = 9 */ | ||
85 | 0xff800008, 0xffffc000, 0xffffc000, 0xffffc000, | ||
86 | 0x0, 0x0, 0x0, 0x0, | ||
87 | |||
88 | 0x30100b, 0x2200003, 0x3000000, 0x0, /* NSC Vendor ID */ | ||
89 | 0xfd000000, 0xfe000000, 0xfe004000, 0xfe008000, /* FB, GP, VG, DF */ | ||
90 | 0x0, 0x0, 0x0, 0x30100b, | ||
91 | 0x0, 0x0, 0x0, 0x0, | ||
92 | 0x0, 0x0, 0x0, 0x0, | ||
93 | 0x3d0, 0x3c0, 0xa0000, 0x0, /* VG IO, VG IO, EGA FB, MONO FB */ | ||
94 | 0x0, 0x0, 0x0, 0x0, | ||
95 | }; | ||
96 | |||
97 | static const uint32_t aes_hdr[] = { /* dev 1 function 2 - devfn = 0xa */ | ||
98 | 0xffffc000, 0x0, 0x0, 0x0, | ||
99 | 0x0, 0x0, 0x0, 0x0, | ||
100 | |||
101 | 0x20821022, 0x2a00006, 0x10100000, 0x8, /* NSC Vendor ID */ | ||
102 | 0xfe010000, 0x0, 0x0, 0x0, /* AES registers */ | ||
103 | 0x0, 0x0, 0x0, 0x20821022, | ||
104 | 0x0, 0x0, 0x0, 0x0, | ||
105 | 0x0, 0x0, 0x0, 0x0, | ||
106 | 0x0, 0x0, 0x0, 0x0, | ||
107 | 0x0, 0x0, 0x0, 0x0, | ||
108 | }; | ||
109 | |||
110 | |||
111 | static const uint32_t isa_hdr[] = { /* dev f function 0 - devfn = 78 */ | ||
112 | 0xfffffff9, 0xffffff01, 0xffffffc1, 0xffffffe1, | ||
113 | 0xffffff81, 0xffffffc1, 0x0, 0x0, | ||
114 | |||
115 | 0x20901022, 0x2a00049, 0x6010003, 0x802000, | ||
116 | 0x18b1, 0x1001, 0x1801, 0x1881, /* SMB-8 GPIO-256 MFGPT-64 IRQ-32 */ | ||
117 | 0x1401, 0x1841, 0x0, 0x20901022, /* PMS-128 ACPI-64 */ | ||
118 | 0x0, 0x0, 0x0, 0x0, | ||
119 | 0x0, 0x0, 0x0, 0x0, | ||
120 | 0x0, 0x0, 0x0, 0xaa5b, /* IRQ steering */ | ||
121 | 0x0, 0x0, 0x0, 0x0, | ||
122 | }; | ||
123 | |||
124 | static const uint32_t ac97_hdr[] = { /* dev f function 3 - devfn = 7b */ | ||
125 | 0xffffff81, 0x0, 0x0, 0x0, | ||
126 | 0x0, 0x0, 0x0, 0x0, | ||
127 | |||
128 | 0x20931022, 0x2a00041, 0x4010001, 0x0, | ||
129 | 0x1481, 0x0, 0x0, 0x0, /* I/O BAR-128 */ | ||
130 | 0x0, 0x0, 0x0, 0x20931022, | ||
131 | 0x0, 0x0, 0x0, 0x205, /* IntB, IRQ5 */ | ||
132 | 0x0, 0x0, 0x0, 0x0, | ||
133 | 0x0, 0x0, 0x0, 0x0, | ||
134 | 0x0, 0x0, 0x0, 0x0, | ||
135 | }; | ||
136 | |||
137 | static const uint32_t ohci_hdr[] = { /* dev f function 4 - devfn = 7c */ | ||
138 | 0xfffff000, 0x0, 0x0, 0x0, | ||
139 | 0x0, 0x0, 0x0, 0x0, | ||
140 | |||
141 | 0x20941022, 0x2300006, 0xc031002, 0x0, | ||
142 | 0xfe01a000, 0x0, 0x0, 0x0, /* MEMBAR-1000 */ | ||
143 | 0x0, 0x0, 0x0, 0x20941022, | ||
144 | 0x0, 0x40, 0x0, 0x40a, /* CapPtr INT-D, IRQA */ | ||
145 | 0xc8020001, 0x0, 0x0, 0x0, /* Capabilities - 40 is R/O, | ||
146 | 44 is mask 8103 (power control) */ | ||
147 | 0x0, 0x0, 0x0, 0x0, | ||
148 | 0x0, 0x0, 0x0, 0x0, | ||
149 | }; | ||
150 | |||
151 | static const uint32_t ehci_hdr[] = { /* dev f function 4 - devfn = 7d */ | ||
152 | 0xfffff000, 0x0, 0x0, 0x0, | ||
153 | 0x0, 0x0, 0x0, 0x0, | ||
154 | |||
155 | 0x20951022, 0x2300006, 0xc032002, 0x0, | ||
156 | 0xfe01b000, 0x0, 0x0, 0x0, /* MEMBAR-1000 */ | ||
157 | 0x0, 0x0, 0x0, 0x20951022, | ||
158 | 0x0, 0x40, 0x0, 0x40a, /* CapPtr INT-D, IRQA */ | ||
159 | 0xc8020001, 0x0, 0x0, 0x0, /* Capabilities - 40 is R/O, 44 is | ||
160 | mask 8103 (power control) */ | ||
161 | #if 0 | ||
162 | 0x1, 0x40080000, 0x0, 0x0, /* EECP - see EHCI spec section 2.1.7 */ | ||
163 | #endif | ||
164 | 0x01000001, 0x0, 0x0, 0x0, /* EECP - see EHCI spec section 2.1.7 */ | ||
165 | 0x2020, 0x0, 0x0, 0x0, /* (EHCI page 8) 60 SBRN (R/O), | ||
166 | 61 FLADJ (R/W), PORTWAKECAP */ | ||
167 | }; | ||
168 | |||
169 | static uint32_t ff_loc = ~0; | ||
170 | static uint32_t zero_loc; | ||
171 | static int bar_probing; /* Set after a write of ~0 to a BAR */ | ||
172 | static int is_lx; | ||
173 | |||
174 | #define NB_SLOT 0x1 /* Northbridge - GX chip - Device 1 */ | ||
175 | #define SB_SLOT 0xf /* Southbridge - CS5536 chip - Device F */ | ||
176 | |||
177 | static int is_simulated(unsigned int bus, unsigned int devfn) | ||
178 | { | ||
179 | return (!bus && ((PCI_SLOT(devfn) == NB_SLOT) || | ||
180 | (PCI_SLOT(devfn) == SB_SLOT))); | ||
181 | } | ||
182 | |||
183 | static uint32_t *hdr_addr(const uint32_t *hdr, int reg) | ||
184 | { | ||
185 | uint32_t addr; | ||
186 | |||
187 | /* | ||
188 | * This is a little bit tricky. The header maps consist of | ||
189 | * 0x20 bytes of size masks, followed by 0x70 bytes of header data. | ||
190 | * In the normal case, when not probing a BAR's size, we want | ||
191 | * to access the header data, so we add 0x20 to the reg offset, | ||
192 | * thus skipping the size mask area. | ||
193 | * In the BAR probing case, we want to access the size mask for | ||
194 | * the BAR, so we subtract 0x10 (the config header offset for | ||
195 | * BAR0), and don't skip the size mask area. | ||
196 | */ | ||
197 | |||
198 | addr = (uint32_t)hdr + reg + (bar_probing ? -0x10 : 0x20); | ||
199 | |||
200 | bar_probing = 0; | ||
201 | return (uint32_t *)addr; | ||
202 | } | ||
203 | |||
204 | static int pci_olpc_read(unsigned int seg, unsigned int bus, | ||
205 | unsigned int devfn, int reg, int len, uint32_t *value) | ||
206 | { | ||
207 | uint32_t *addr; | ||
208 | |||
209 | /* Use the hardware mechanism for non-simulated devices */ | ||
210 | if (!is_simulated(bus, devfn)) | ||
211 | return pci_direct_conf1.read(seg, bus, devfn, reg, len, value); | ||
212 | |||
213 | /* | ||
214 | * No device has config registers past 0x70, so we save table space | ||
215 | * by not storing entries for the nonexistent registers | ||
216 | */ | ||
217 | if (reg >= 0x70) | ||
218 | addr = &zero_loc; | ||
219 | else { | ||
220 | switch (devfn) { | ||
221 | case 0x8: | ||
222 | addr = hdr_addr(is_lx ? lxnb_hdr : gxnb_hdr, reg); | ||
223 | break; | ||
224 | case 0x9: | ||
225 | addr = hdr_addr(is_lx ? lxfb_hdr : gxfb_hdr, reg); | ||
226 | break; | ||
227 | case 0xa: | ||
228 | addr = is_lx ? hdr_addr(aes_hdr, reg) : &ff_loc; | ||
229 | break; | ||
230 | case 0x78: | ||
231 | addr = hdr_addr(isa_hdr, reg); | ||
232 | break; | ||
233 | case 0x7b: | ||
234 | addr = hdr_addr(ac97_hdr, reg); | ||
235 | break; | ||
236 | case 0x7c: | ||
237 | addr = hdr_addr(ohci_hdr, reg); | ||
238 | break; | ||
239 | case 0x7d: | ||
240 | addr = hdr_addr(ehci_hdr, reg); | ||
241 | break; | ||
242 | default: | ||
243 | addr = &ff_loc; | ||
244 | break; | ||
245 | } | ||
246 | } | ||
247 | switch (len) { | ||
248 | case 1: | ||
249 | *value = *(uint8_t *)addr; | ||
250 | break; | ||
251 | case 2: | ||
252 | *value = *(uint16_t *)addr; | ||
253 | break; | ||
254 | case 4: | ||
255 | *value = *addr; | ||
256 | break; | ||
257 | default: | ||
258 | BUG(); | ||
259 | } | ||
260 | |||
261 | return 0; | ||
262 | } | ||
263 | |||
264 | static int pci_olpc_write(unsigned int seg, unsigned int bus, | ||
265 | unsigned int devfn, int reg, int len, uint32_t value) | ||
266 | { | ||
267 | /* Use the hardware mechanism for non-simulated devices */ | ||
268 | if (!is_simulated(bus, devfn)) | ||
269 | return pci_direct_conf1.write(seg, bus, devfn, reg, len, value); | ||
270 | |||
271 | /* XXX we may want to extend this to simulate EHCI power management */ | ||
272 | |||
273 | /* | ||
274 | * Mostly we just discard writes, but if the write is a size probe | ||
275 | * (i.e. writing ~0 to a BAR), we remember it and arrange to return | ||
276 | * the appropriate size mask on the next read. This is cheating | ||
277 | * to some extent, because it depends on the fact that the next | ||
278 | * access after such a write will always be a read to the same BAR. | ||
279 | */ | ||
280 | |||
281 | if ((reg >= 0x10) && (reg < 0x2c)) { | ||
282 | /* write is to a BAR */ | ||
283 | if (value == ~0) | ||
284 | bar_probing = 1; | ||
285 | } else { | ||
286 | /* | ||
287 | * No warning on writes to ROM BAR, CMD, LATENCY_TIMER, | ||
288 | * CACHE_LINE_SIZE, or PM registers. | ||
289 | */ | ||
290 | if ((reg != PCI_ROM_ADDRESS) && (reg != PCI_COMMAND_MASTER) && | ||
291 | (reg != PCI_LATENCY_TIMER) && | ||
292 | (reg != PCI_CACHE_LINE_SIZE) && (reg != 0x44)) | ||
293 | printk(KERN_WARNING "OLPC PCI: Config write to devfn" | ||
294 | " %x reg %x value %x\n", devfn, reg, value); | ||
295 | } | ||
296 | |||
297 | return 0; | ||
298 | } | ||
299 | |||
300 | static struct pci_raw_ops pci_olpc_conf = { | ||
301 | .read = pci_olpc_read, | ||
302 | .write = pci_olpc_write, | ||
303 | }; | ||
304 | |||
305 | void __init pci_olpc_init(void) | ||
306 | { | ||
307 | if (!machine_is_olpc() || olpc_has_vsa()) | ||
308 | return; | ||
309 | |||
310 | printk(KERN_INFO "PCI: Using configuration type OLPC\n"); | ||
311 | raw_pci_ops = &pci_olpc_conf; | ||
312 | is_lx = is_geode_lx(); | ||
313 | } | ||
diff --git a/arch/x86/pci/pci.h b/arch/x86/pci/pci.h index c4bddaeff619..7d84e813e958 100644 --- a/arch/x86/pci/pci.h +++ b/arch/x86/pci/pci.h | |||
@@ -98,6 +98,7 @@ extern int pci_direct_probe(void); | |||
98 | extern void pci_direct_init(int type); | 98 | extern void pci_direct_init(int type); |
99 | extern void pci_pcbios_init(void); | 99 | extern void pci_pcbios_init(void); |
100 | extern void pci_mmcfg_init(int type); | 100 | extern void pci_mmcfg_init(int type); |
101 | extern void pci_olpc_init(void); | ||
101 | 102 | ||
102 | /* pci-mmconfig.c */ | 103 | /* pci-mmconfig.c */ |
103 | 104 | ||
diff --git a/arch/xtensa/kernel/asm-offsets.c b/arch/xtensa/kernel/asm-offsets.c index ef63adadf7f4..070ff8af3a21 100644 --- a/arch/xtensa/kernel/asm-offsets.c +++ b/arch/xtensa/kernel/asm-offsets.c | |||
@@ -19,12 +19,11 @@ | |||
19 | #include <linux/thread_info.h> | 19 | #include <linux/thread_info.h> |
20 | #include <linux/ptrace.h> | 20 | #include <linux/ptrace.h> |
21 | #include <linux/mm.h> | 21 | #include <linux/mm.h> |
22 | #include <linux/kbuild.h> | ||
22 | 23 | ||
23 | #include <asm/ptrace.h> | 24 | #include <asm/ptrace.h> |
24 | #include <asm/uaccess.h> | 25 | #include <asm/uaccess.h> |
25 | 26 | ||
26 | #define DEFINE(sym, val) asm volatile("\n->" #sym " %0 " #val : : "i" (val)) | ||
27 | |||
28 | int main(void) | 27 | int main(void) |
29 | { | 28 | { |
30 | /* struct pt_regs */ | 29 | /* struct pt_regs */ |
diff --git a/drivers/acpi/ac.c b/drivers/acpi/ac.c index 43a95e5640de..5b73f6a2cd86 100644 --- a/drivers/acpi/ac.c +++ b/drivers/acpi/ac.c | |||
@@ -92,6 +92,7 @@ struct acpi_ac { | |||
92 | 92 | ||
93 | #ifdef CONFIG_ACPI_PROCFS_POWER | 93 | #ifdef CONFIG_ACPI_PROCFS_POWER |
94 | static const struct file_operations acpi_ac_fops = { | 94 | static const struct file_operations acpi_ac_fops = { |
95 | .owner = THIS_MODULE, | ||
95 | .open = acpi_ac_open_fs, | 96 | .open = acpi_ac_open_fs, |
96 | .read = seq_read, | 97 | .read = seq_read, |
97 | .llseek = seq_lseek, | 98 | .llseek = seq_lseek, |
@@ -195,16 +196,11 @@ static int acpi_ac_add_fs(struct acpi_device *device) | |||
195 | } | 196 | } |
196 | 197 | ||
197 | /* 'state' [R] */ | 198 | /* 'state' [R] */ |
198 | entry = create_proc_entry(ACPI_AC_FILE_STATE, | 199 | entry = proc_create_data(ACPI_AC_FILE_STATE, |
199 | S_IRUGO, acpi_device_dir(device)); | 200 | S_IRUGO, acpi_device_dir(device), |
201 | &acpi_ac_fops, acpi_driver_data(device)); | ||
200 | if (!entry) | 202 | if (!entry) |
201 | return -ENODEV; | 203 | return -ENODEV; |
202 | else { | ||
203 | entry->proc_fops = &acpi_ac_fops; | ||
204 | entry->data = acpi_driver_data(device); | ||
205 | entry->owner = THIS_MODULE; | ||
206 | } | ||
207 | |||
208 | return 0; | 204 | return 0; |
209 | } | 205 | } |
210 | 206 | ||
diff --git a/drivers/acpi/battery.c b/drivers/acpi/battery.c index d5729d5dc190..b1c723f9f58d 100644 --- a/drivers/acpi/battery.c +++ b/drivers/acpi/battery.c | |||
@@ -741,15 +741,13 @@ static int acpi_battery_add_fs(struct acpi_device *device) | |||
741 | } | 741 | } |
742 | 742 | ||
743 | for (i = 0; i < ACPI_BATTERY_NUMFILES; ++i) { | 743 | for (i = 0; i < ACPI_BATTERY_NUMFILES; ++i) { |
744 | entry = create_proc_entry(acpi_battery_file[i].name, | 744 | entry = proc_create_data(acpi_battery_file[i].name, |
745 | acpi_battery_file[i].mode, acpi_device_dir(device)); | 745 | acpi_battery_file[i].mode, |
746 | acpi_device_dir(device), | ||
747 | &acpi_battery_file[i].ops, | ||
748 | acpi_driver_data(device)); | ||
746 | if (!entry) | 749 | if (!entry) |
747 | return -ENODEV; | 750 | return -ENODEV; |
748 | else { | ||
749 | entry->proc_fops = &acpi_battery_file[i].ops; | ||
750 | entry->data = acpi_driver_data(device); | ||
751 | entry->owner = THIS_MODULE; | ||
752 | } | ||
753 | } | 751 | } |
754 | return 0; | 752 | return 0; |
755 | } | 753 | } |
diff --git a/drivers/acpi/button.c b/drivers/acpi/button.c index 6c5da83cdb68..1dfec413588c 100644 --- a/drivers/acpi/button.c +++ b/drivers/acpi/button.c | |||
@@ -102,6 +102,7 @@ struct acpi_button { | |||
102 | }; | 102 | }; |
103 | 103 | ||
104 | static const struct file_operations acpi_button_info_fops = { | 104 | static const struct file_operations acpi_button_info_fops = { |
105 | .owner = THIS_MODULE, | ||
105 | .open = acpi_button_info_open_fs, | 106 | .open = acpi_button_info_open_fs, |
106 | .read = seq_read, | 107 | .read = seq_read, |
107 | .llseek = seq_lseek, | 108 | .llseek = seq_lseek, |
@@ -109,6 +110,7 @@ static const struct file_operations acpi_button_info_fops = { | |||
109 | }; | 110 | }; |
110 | 111 | ||
111 | static const struct file_operations acpi_button_state_fops = { | 112 | static const struct file_operations acpi_button_state_fops = { |
113 | .owner = THIS_MODULE, | ||
112 | .open = acpi_button_state_open_fs, | 114 | .open = acpi_button_state_open_fs, |
113 | .read = seq_read, | 115 | .read = seq_read, |
114 | .llseek = seq_lseek, | 116 | .llseek = seq_lseek, |
@@ -207,27 +209,21 @@ static int acpi_button_add_fs(struct acpi_device *device) | |||
207 | acpi_device_dir(device)->owner = THIS_MODULE; | 209 | acpi_device_dir(device)->owner = THIS_MODULE; |
208 | 210 | ||
209 | /* 'info' [R] */ | 211 | /* 'info' [R] */ |
210 | entry = create_proc_entry(ACPI_BUTTON_FILE_INFO, | 212 | entry = proc_create_data(ACPI_BUTTON_FILE_INFO, |
211 | S_IRUGO, acpi_device_dir(device)); | 213 | S_IRUGO, acpi_device_dir(device), |
214 | &acpi_button_info_fops, | ||
215 | acpi_driver_data(device)); | ||
212 | if (!entry) | 216 | if (!entry) |
213 | return -ENODEV; | 217 | return -ENODEV; |
214 | else { | ||
215 | entry->proc_fops = &acpi_button_info_fops; | ||
216 | entry->data = acpi_driver_data(device); | ||
217 | entry->owner = THIS_MODULE; | ||
218 | } | ||
219 | 218 | ||
220 | /* show lid state [R] */ | 219 | /* show lid state [R] */ |
221 | if (button->type == ACPI_BUTTON_TYPE_LID) { | 220 | if (button->type == ACPI_BUTTON_TYPE_LID) { |
222 | entry = create_proc_entry(ACPI_BUTTON_FILE_STATE, | 221 | entry = proc_create_data(ACPI_BUTTON_FILE_STATE, |
223 | S_IRUGO, acpi_device_dir(device)); | 222 | S_IRUGO, acpi_device_dir(device), |
223 | &acpi_button_state_fops, | ||
224 | acpi_driver_data(device)); | ||
224 | if (!entry) | 225 | if (!entry) |
225 | return -ENODEV; | 226 | return -ENODEV; |
226 | else { | ||
227 | entry->proc_fops = &acpi_button_state_fops; | ||
228 | entry->data = acpi_driver_data(device); | ||
229 | entry->owner = THIS_MODULE; | ||
230 | } | ||
231 | } | 227 | } |
232 | 228 | ||
233 | return 0; | 229 | return 0; |
diff --git a/drivers/acpi/ec.c b/drivers/acpi/ec.c index 7222a18a0319..e3f04b272f3f 100644 --- a/drivers/acpi/ec.c +++ b/drivers/acpi/ec.c | |||
@@ -669,16 +669,11 @@ static int acpi_ec_add_fs(struct acpi_device *device) | |||
669 | return -ENODEV; | 669 | return -ENODEV; |
670 | } | 670 | } |
671 | 671 | ||
672 | entry = create_proc_entry(ACPI_EC_FILE_INFO, S_IRUGO, | 672 | entry = proc_create_data(ACPI_EC_FILE_INFO, S_IRUGO, |
673 | acpi_device_dir(device)); | 673 | acpi_device_dir(device), |
674 | &acpi_ec_info_ops, acpi_driver_data(device)); | ||
674 | if (!entry) | 675 | if (!entry) |
675 | return -ENODEV; | 676 | return -ENODEV; |
676 | else { | ||
677 | entry->proc_fops = &acpi_ec_info_ops; | ||
678 | entry->data = acpi_driver_data(device); | ||
679 | entry->owner = THIS_MODULE; | ||
680 | } | ||
681 | |||
682 | return 0; | 677 | return 0; |
683 | } | 678 | } |
684 | 679 | ||
diff --git a/drivers/acpi/event.c b/drivers/acpi/event.c index abec1ca94cf4..0c24bd4d6562 100644 --- a/drivers/acpi/event.c +++ b/drivers/acpi/event.c | |||
@@ -102,6 +102,7 @@ static unsigned int acpi_system_poll_event(struct file *file, poll_table * wait) | |||
102 | } | 102 | } |
103 | 103 | ||
104 | static const struct file_operations acpi_system_event_ops = { | 104 | static const struct file_operations acpi_system_event_ops = { |
105 | .owner = THIS_MODULE, | ||
105 | .open = acpi_system_open_event, | 106 | .open = acpi_system_open_event, |
106 | .read = acpi_system_read_event, | 107 | .read = acpi_system_read_event, |
107 | .release = acpi_system_close_event, | 108 | .release = acpi_system_close_event, |
@@ -294,10 +295,9 @@ static int __init acpi_event_init(void) | |||
294 | 295 | ||
295 | #ifdef CONFIG_ACPI_PROC_EVENT | 296 | #ifdef CONFIG_ACPI_PROC_EVENT |
296 | /* 'event' [R] */ | 297 | /* 'event' [R] */ |
297 | entry = create_proc_entry("event", S_IRUSR, acpi_root_dir); | 298 | entry = proc_create("event", S_IRUSR, acpi_root_dir, |
298 | if (entry) | 299 | &acpi_system_event_ops); |
299 | entry->proc_fops = &acpi_system_event_ops; | 300 | if (!entry) |
300 | else | ||
301 | return -ENODEV; | 301 | return -ENODEV; |
302 | #endif | 302 | #endif |
303 | 303 | ||
diff --git a/drivers/acpi/fan.c b/drivers/acpi/fan.c index c8e3cba423ef..194077ab9b85 100644 --- a/drivers/acpi/fan.c +++ b/drivers/acpi/fan.c | |||
@@ -192,17 +192,13 @@ static int acpi_fan_add_fs(struct acpi_device *device) | |||
192 | } | 192 | } |
193 | 193 | ||
194 | /* 'status' [R/W] */ | 194 | /* 'status' [R/W] */ |
195 | entry = create_proc_entry(ACPI_FAN_FILE_STATE, | 195 | entry = proc_create_data(ACPI_FAN_FILE_STATE, |
196 | S_IFREG | S_IRUGO | S_IWUSR, | 196 | S_IFREG | S_IRUGO | S_IWUSR, |
197 | acpi_device_dir(device)); | 197 | acpi_device_dir(device), |
198 | &acpi_fan_state_ops, | ||
199 | device); | ||
198 | if (!entry) | 200 | if (!entry) |
199 | return -ENODEV; | 201 | return -ENODEV; |
200 | else { | ||
201 | entry->proc_fops = &acpi_fan_state_ops; | ||
202 | entry->data = device; | ||
203 | entry->owner = THIS_MODULE; | ||
204 | } | ||
205 | |||
206 | return 0; | 202 | return 0; |
207 | } | 203 | } |
208 | 204 | ||
diff --git a/drivers/acpi/power.c b/drivers/acpi/power.c index 76bf6d90c700..21fc8bf0d31f 100644 --- a/drivers/acpi/power.c +++ b/drivers/acpi/power.c | |||
@@ -93,6 +93,7 @@ struct acpi_power_resource { | |||
93 | static struct list_head acpi_power_resource_list; | 93 | static struct list_head acpi_power_resource_list; |
94 | 94 | ||
95 | static const struct file_operations acpi_power_fops = { | 95 | static const struct file_operations acpi_power_fops = { |
96 | .owner = THIS_MODULE, | ||
96 | .open = acpi_power_open_fs, | 97 | .open = acpi_power_open_fs, |
97 | .read = seq_read, | 98 | .read = seq_read, |
98 | .llseek = seq_lseek, | 99 | .llseek = seq_lseek, |
@@ -543,15 +544,11 @@ static int acpi_power_add_fs(struct acpi_device *device) | |||
543 | } | 544 | } |
544 | 545 | ||
545 | /* 'status' [R] */ | 546 | /* 'status' [R] */ |
546 | entry = create_proc_entry(ACPI_POWER_FILE_STATUS, | 547 | entry = proc_create_data(ACPI_POWER_FILE_STATUS, |
547 | S_IRUGO, acpi_device_dir(device)); | 548 | S_IRUGO, acpi_device_dir(device), |
549 | &acpi_power_fops, acpi_driver_data(device)); | ||
548 | if (!entry) | 550 | if (!entry) |
549 | return -EIO; | 551 | return -EIO; |
550 | else { | ||
551 | entry->proc_fops = &acpi_power_fops; | ||
552 | entry->data = acpi_driver_data(device); | ||
553 | } | ||
554 | |||
555 | return 0; | 552 | return 0; |
556 | } | 553 | } |
557 | 554 | ||
diff --git a/drivers/acpi/processor_core.c b/drivers/acpi/processor_core.c index a825b431b64f..dd28c912e84f 100644 --- a/drivers/acpi/processor_core.c +++ b/drivers/acpi/processor_core.c | |||
@@ -112,6 +112,7 @@ static struct acpi_driver acpi_processor_driver = { | |||
112 | #define UNINSTALL_NOTIFY_HANDLER 2 | 112 | #define UNINSTALL_NOTIFY_HANDLER 2 |
113 | 113 | ||
114 | static const struct file_operations acpi_processor_info_fops = { | 114 | static const struct file_operations acpi_processor_info_fops = { |
115 | .owner = THIS_MODULE, | ||
115 | .open = acpi_processor_info_open_fs, | 116 | .open = acpi_processor_info_open_fs, |
116 | .read = seq_read, | 117 | .read = seq_read, |
117 | .llseek = seq_lseek, | 118 | .llseek = seq_lseek, |
@@ -326,40 +327,30 @@ static int acpi_processor_add_fs(struct acpi_device *device) | |||
326 | acpi_device_dir(device)->owner = THIS_MODULE; | 327 | acpi_device_dir(device)->owner = THIS_MODULE; |
327 | 328 | ||
328 | /* 'info' [R] */ | 329 | /* 'info' [R] */ |
329 | entry = create_proc_entry(ACPI_PROCESSOR_FILE_INFO, | 330 | entry = proc_create_data(ACPI_PROCESSOR_FILE_INFO, |
330 | S_IRUGO, acpi_device_dir(device)); | 331 | S_IRUGO, acpi_device_dir(device), |
332 | &acpi_processor_info_fops, | ||
333 | acpi_driver_data(device)); | ||
331 | if (!entry) | 334 | if (!entry) |
332 | return -EIO; | 335 | return -EIO; |
333 | else { | ||
334 | entry->proc_fops = &acpi_processor_info_fops; | ||
335 | entry->data = acpi_driver_data(device); | ||
336 | entry->owner = THIS_MODULE; | ||
337 | } | ||
338 | 336 | ||
339 | /* 'throttling' [R/W] */ | 337 | /* 'throttling' [R/W] */ |
340 | entry = create_proc_entry(ACPI_PROCESSOR_FILE_THROTTLING, | 338 | entry = proc_create_data(ACPI_PROCESSOR_FILE_THROTTLING, |
341 | S_IFREG | S_IRUGO | S_IWUSR, | 339 | S_IFREG | S_IRUGO | S_IWUSR, |
342 | acpi_device_dir(device)); | 340 | acpi_device_dir(device), |
341 | &acpi_processor_throttling_fops, | ||
342 | acpi_driver_data(device)); | ||
343 | if (!entry) | 343 | if (!entry) |
344 | return -EIO; | 344 | return -EIO; |
345 | else { | ||
346 | entry->proc_fops = &acpi_processor_throttling_fops; | ||
347 | entry->data = acpi_driver_data(device); | ||
348 | entry->owner = THIS_MODULE; | ||
349 | } | ||
350 | 345 | ||
351 | /* 'limit' [R/W] */ | 346 | /* 'limit' [R/W] */ |
352 | entry = create_proc_entry(ACPI_PROCESSOR_FILE_LIMIT, | 347 | entry = proc_create_data(ACPI_PROCESSOR_FILE_LIMIT, |
353 | S_IFREG | S_IRUGO | S_IWUSR, | 348 | S_IFREG | S_IRUGO | S_IWUSR, |
354 | acpi_device_dir(device)); | 349 | acpi_device_dir(device), |
350 | &acpi_processor_limit_fops, | ||
351 | acpi_driver_data(device)); | ||
355 | if (!entry) | 352 | if (!entry) |
356 | return -EIO; | 353 | return -EIO; |
357 | else { | ||
358 | entry->proc_fops = &acpi_processor_limit_fops; | ||
359 | entry->data = acpi_driver_data(device); | ||
360 | entry->owner = THIS_MODULE; | ||
361 | } | ||
362 | |||
363 | return 0; | 354 | return 0; |
364 | } | 355 | } |
365 | 356 | ||
diff --git a/drivers/acpi/processor_idle.c b/drivers/acpi/processor_idle.c index 0d90ff5fd117..789d4947ed31 100644 --- a/drivers/acpi/processor_idle.c +++ b/drivers/acpi/processor_idle.c | |||
@@ -1282,6 +1282,7 @@ static int acpi_processor_power_open_fs(struct inode *inode, struct file *file) | |||
1282 | } | 1282 | } |
1283 | 1283 | ||
1284 | static const struct file_operations acpi_processor_power_fops = { | 1284 | static const struct file_operations acpi_processor_power_fops = { |
1285 | .owner = THIS_MODULE, | ||
1285 | .open = acpi_processor_power_open_fs, | 1286 | .open = acpi_processor_power_open_fs, |
1286 | .read = seq_read, | 1287 | .read = seq_read, |
1287 | .llseek = seq_lseek, | 1288 | .llseek = seq_lseek, |
@@ -1822,16 +1823,12 @@ int __cpuinit acpi_processor_power_init(struct acpi_processor *pr, | |||
1822 | } | 1823 | } |
1823 | 1824 | ||
1824 | /* 'power' [R] */ | 1825 | /* 'power' [R] */ |
1825 | entry = create_proc_entry(ACPI_PROCESSOR_FILE_POWER, | 1826 | entry = proc_create_data(ACPI_PROCESSOR_FILE_POWER, |
1826 | S_IRUGO, acpi_device_dir(device)); | 1827 | S_IRUGO, acpi_device_dir(device), |
1828 | &acpi_processor_power_fops, | ||
1829 | acpi_driver_data(device)); | ||
1827 | if (!entry) | 1830 | if (!entry) |
1828 | return -EIO; | 1831 | return -EIO; |
1829 | else { | ||
1830 | entry->proc_fops = &acpi_processor_power_fops; | ||
1831 | entry->data = acpi_driver_data(device); | ||
1832 | entry->owner = THIS_MODULE; | ||
1833 | } | ||
1834 | |||
1835 | return 0; | 1832 | return 0; |
1836 | } | 1833 | } |
1837 | 1834 | ||
diff --git a/drivers/acpi/processor_perflib.c b/drivers/acpi/processor_perflib.c index b477a4be8a69..d80b2d1441af 100644 --- a/drivers/acpi/processor_perflib.c +++ b/drivers/acpi/processor_perflib.c | |||
@@ -411,6 +411,7 @@ EXPORT_SYMBOL(acpi_processor_notify_smm); | |||
411 | 411 | ||
412 | static int acpi_processor_perf_open_fs(struct inode *inode, struct file *file); | 412 | static int acpi_processor_perf_open_fs(struct inode *inode, struct file *file); |
413 | static struct file_operations acpi_processor_perf_fops = { | 413 | static struct file_operations acpi_processor_perf_fops = { |
414 | .owner = THIS_MODULE, | ||
414 | .open = acpi_processor_perf_open_fs, | 415 | .open = acpi_processor_perf_open_fs, |
415 | .read = seq_read, | 416 | .read = seq_read, |
416 | .llseek = seq_lseek, | 417 | .llseek = seq_lseek, |
@@ -456,7 +457,6 @@ static int acpi_processor_perf_open_fs(struct inode *inode, struct file *file) | |||
456 | 457 | ||
457 | static void acpi_cpufreq_add_file(struct acpi_processor *pr) | 458 | static void acpi_cpufreq_add_file(struct acpi_processor *pr) |
458 | { | 459 | { |
459 | struct proc_dir_entry *entry = NULL; | ||
460 | struct acpi_device *device = NULL; | 460 | struct acpi_device *device = NULL; |
461 | 461 | ||
462 | 462 | ||
@@ -464,14 +464,9 @@ static void acpi_cpufreq_add_file(struct acpi_processor *pr) | |||
464 | return; | 464 | return; |
465 | 465 | ||
466 | /* add file 'performance' [R/W] */ | 466 | /* add file 'performance' [R/W] */ |
467 | entry = create_proc_entry(ACPI_PROCESSOR_FILE_PERFORMANCE, | 467 | proc_create_data(ACPI_PROCESSOR_FILE_PERFORMANCE, S_IFREG | S_IRUGO, |
468 | S_IFREG | S_IRUGO, | 468 | acpi_device_dir(device), |
469 | acpi_device_dir(device)); | 469 | &acpi_processor_perf_fops, acpi_driver_data(device)); |
470 | if (entry){ | ||
471 | entry->proc_fops = &acpi_processor_perf_fops; | ||
472 | entry->data = acpi_driver_data(device); | ||
473 | entry->owner = THIS_MODULE; | ||
474 | } | ||
475 | return; | 470 | return; |
476 | } | 471 | } |
477 | 472 | ||
diff --git a/drivers/acpi/processor_thermal.c b/drivers/acpi/processor_thermal.c index 649ae99b9216..ef34b18f95ca 100644 --- a/drivers/acpi/processor_thermal.c +++ b/drivers/acpi/processor_thermal.c | |||
@@ -509,6 +509,7 @@ static ssize_t acpi_processor_write_limit(struct file * file, | |||
509 | } | 509 | } |
510 | 510 | ||
511 | struct file_operations acpi_processor_limit_fops = { | 511 | struct file_operations acpi_processor_limit_fops = { |
512 | .owner = THIS_MODULE, | ||
512 | .open = acpi_processor_limit_open_fs, | 513 | .open = acpi_processor_limit_open_fs, |
513 | .read = seq_read, | 514 | .read = seq_read, |
514 | .write = acpi_processor_write_limit, | 515 | .write = acpi_processor_write_limit, |
diff --git a/drivers/acpi/processor_throttling.c b/drivers/acpi/processor_throttling.c index 0bba3a914e86..bb06738860c4 100644 --- a/drivers/acpi/processor_throttling.c +++ b/drivers/acpi/processor_throttling.c | |||
@@ -1252,6 +1252,7 @@ static ssize_t acpi_processor_write_throttling(struct file *file, | |||
1252 | } | 1252 | } |
1253 | 1253 | ||
1254 | struct file_operations acpi_processor_throttling_fops = { | 1254 | struct file_operations acpi_processor_throttling_fops = { |
1255 | .owner = THIS_MODULE, | ||
1255 | .open = acpi_processor_throttling_open_fs, | 1256 | .open = acpi_processor_throttling_open_fs, |
1256 | .read = seq_read, | 1257 | .read = seq_read, |
1257 | .write = acpi_processor_write_throttling, | 1258 | .write = acpi_processor_write_throttling, |
diff --git a/drivers/acpi/sbs.c b/drivers/acpi/sbs.c index 585ae3c9c8ea..10a36512647c 100644 --- a/drivers/acpi/sbs.c +++ b/drivers/acpi/sbs.c | |||
@@ -483,8 +483,6 @@ acpi_sbs_add_fs(struct proc_dir_entry **dir, | |||
483 | struct file_operations *state_fops, | 483 | struct file_operations *state_fops, |
484 | struct file_operations *alarm_fops, void *data) | 484 | struct file_operations *alarm_fops, void *data) |
485 | { | 485 | { |
486 | struct proc_dir_entry *entry = NULL; | ||
487 | |||
488 | if (!*dir) { | 486 | if (!*dir) { |
489 | *dir = proc_mkdir(dir_name, parent_dir); | 487 | *dir = proc_mkdir(dir_name, parent_dir); |
490 | if (!*dir) { | 488 | if (!*dir) { |
@@ -494,34 +492,19 @@ acpi_sbs_add_fs(struct proc_dir_entry **dir, | |||
494 | } | 492 | } |
495 | 493 | ||
496 | /* 'info' [R] */ | 494 | /* 'info' [R] */ |
497 | if (info_fops) { | 495 | if (info_fops) |
498 | entry = create_proc_entry(ACPI_SBS_FILE_INFO, S_IRUGO, *dir); | 496 | proc_create_data(ACPI_SBS_FILE_INFO, S_IRUGO, *dir, |
499 | if (entry) { | 497 | info_fops, data); |
500 | entry->proc_fops = info_fops; | ||
501 | entry->data = data; | ||
502 | entry->owner = THIS_MODULE; | ||
503 | } | ||
504 | } | ||
505 | 498 | ||
506 | /* 'state' [R] */ | 499 | /* 'state' [R] */ |
507 | if (state_fops) { | 500 | if (state_fops) |
508 | entry = create_proc_entry(ACPI_SBS_FILE_STATE, S_IRUGO, *dir); | 501 | proc_create_data(ACPI_SBS_FILE_STATE, S_IRUGO, *dir, |
509 | if (entry) { | 502 | state_fops, data); |
510 | entry->proc_fops = state_fops; | ||
511 | entry->data = data; | ||
512 | entry->owner = THIS_MODULE; | ||
513 | } | ||
514 | } | ||
515 | 503 | ||
516 | /* 'alarm' [R/W] */ | 504 | /* 'alarm' [R/W] */ |
517 | if (alarm_fops) { | 505 | if (alarm_fops) |
518 | entry = create_proc_entry(ACPI_SBS_FILE_ALARM, S_IRUGO, *dir); | 506 | proc_create_data(ACPI_SBS_FILE_ALARM, S_IRUGO, *dir, |
519 | if (entry) { | 507 | alarm_fops, data); |
520 | entry->proc_fops = alarm_fops; | ||
521 | entry->data = data; | ||
522 | entry->owner = THIS_MODULE; | ||
523 | } | ||
524 | } | ||
525 | return 0; | 508 | return 0; |
526 | } | 509 | } |
527 | 510 | ||
diff --git a/drivers/acpi/sleep/proc.c b/drivers/acpi/sleep/proc.c index f8df5217d477..8a5fe8710513 100644 --- a/drivers/acpi/sleep/proc.c +++ b/drivers/acpi/sleep/proc.c | |||
@@ -440,6 +440,7 @@ acpi_system_wakeup_device_open_fs(struct inode *inode, struct file *file) | |||
440 | } | 440 | } |
441 | 441 | ||
442 | static const struct file_operations acpi_system_wakeup_device_fops = { | 442 | static const struct file_operations acpi_system_wakeup_device_fops = { |
443 | .owner = THIS_MODULE, | ||
443 | .open = acpi_system_wakeup_device_open_fs, | 444 | .open = acpi_system_wakeup_device_open_fs, |
444 | .read = seq_read, | 445 | .read = seq_read, |
445 | .write = acpi_system_write_wakeup_device, | 446 | .write = acpi_system_write_wakeup_device, |
@@ -449,6 +450,7 @@ static const struct file_operations acpi_system_wakeup_device_fops = { | |||
449 | 450 | ||
450 | #ifdef CONFIG_ACPI_PROCFS | 451 | #ifdef CONFIG_ACPI_PROCFS |
451 | static const struct file_operations acpi_system_sleep_fops = { | 452 | static const struct file_operations acpi_system_sleep_fops = { |
453 | .owner = THIS_MODULE, | ||
452 | .open = acpi_system_sleep_open_fs, | 454 | .open = acpi_system_sleep_open_fs, |
453 | .read = seq_read, | 455 | .read = seq_read, |
454 | .write = acpi_system_write_sleep, | 456 | .write = acpi_system_write_sleep, |
@@ -459,6 +461,7 @@ static const struct file_operations acpi_system_sleep_fops = { | |||
459 | 461 | ||
460 | #ifdef HAVE_ACPI_LEGACY_ALARM | 462 | #ifdef HAVE_ACPI_LEGACY_ALARM |
461 | static const struct file_operations acpi_system_alarm_fops = { | 463 | static const struct file_operations acpi_system_alarm_fops = { |
464 | .owner = THIS_MODULE, | ||
462 | .open = acpi_system_alarm_open_fs, | 465 | .open = acpi_system_alarm_open_fs, |
463 | .read = seq_read, | 466 | .read = seq_read, |
464 | .write = acpi_system_write_alarm, | 467 | .write = acpi_system_write_alarm, |
@@ -477,37 +480,26 @@ static u32 rtc_handler(void *context) | |||
477 | 480 | ||
478 | static int __init acpi_sleep_proc_init(void) | 481 | static int __init acpi_sleep_proc_init(void) |
479 | { | 482 | { |
480 | struct proc_dir_entry *entry = NULL; | ||
481 | |||
482 | if (acpi_disabled) | 483 | if (acpi_disabled) |
483 | return 0; | 484 | return 0; |
484 | 485 | ||
485 | #ifdef CONFIG_ACPI_PROCFS | 486 | #ifdef CONFIG_ACPI_PROCFS |
486 | /* 'sleep' [R/W] */ | 487 | /* 'sleep' [R/W] */ |
487 | entry = | 488 | proc_create("sleep", S_IFREG | S_IRUGO | S_IWUSR, |
488 | create_proc_entry("sleep", S_IFREG | S_IRUGO | S_IWUSR, | 489 | acpi_root_dir, &acpi_system_sleep_fops); |
489 | acpi_root_dir); | ||
490 | if (entry) | ||
491 | entry->proc_fops = &acpi_system_sleep_fops; | ||
492 | #endif /* CONFIG_ACPI_PROCFS */ | 490 | #endif /* CONFIG_ACPI_PROCFS */ |
493 | 491 | ||
494 | #ifdef HAVE_ACPI_LEGACY_ALARM | 492 | #ifdef HAVE_ACPI_LEGACY_ALARM |
495 | /* 'alarm' [R/W] */ | 493 | /* 'alarm' [R/W] */ |
496 | entry = | 494 | proc_create("alarm", S_IFREG | S_IRUGO | S_IWUSR, |
497 | create_proc_entry("alarm", S_IFREG | S_IRUGO | S_IWUSR, | 495 | acpi_root_dir, &acpi_system_alarm_fops); |
498 | acpi_root_dir); | ||
499 | if (entry) | ||
500 | entry->proc_fops = &acpi_system_alarm_fops; | ||
501 | 496 | ||
502 | acpi_install_fixed_event_handler(ACPI_EVENT_RTC, rtc_handler, NULL); | 497 | acpi_install_fixed_event_handler(ACPI_EVENT_RTC, rtc_handler, NULL); |
503 | #endif /* HAVE_ACPI_LEGACY_ALARM */ | 498 | #endif /* HAVE_ACPI_LEGACY_ALARM */ |
504 | 499 | ||
505 | /* 'wakeup device' [R/W] */ | 500 | /* 'wakeup device' [R/W] */ |
506 | entry = | 501 | proc_create("wakeup", S_IFREG | S_IRUGO | S_IWUSR, |
507 | create_proc_entry("wakeup", S_IFREG | S_IRUGO | S_IWUSR, | 502 | acpi_root_dir, &acpi_system_wakeup_device_fops); |
508 | acpi_root_dir); | ||
509 | if (entry) | ||
510 | entry->proc_fops = &acpi_system_wakeup_device_fops; | ||
511 | 503 | ||
512 | return 0; | 504 | return 0; |
513 | } | 505 | } |
diff --git a/drivers/acpi/system.c b/drivers/acpi/system.c index 4749f379a915..769f24855eb6 100644 --- a/drivers/acpi/system.c +++ b/drivers/acpi/system.c | |||
@@ -396,6 +396,7 @@ static int acpi_system_info_open_fs(struct inode *inode, struct file *file) | |||
396 | } | 396 | } |
397 | 397 | ||
398 | static const struct file_operations acpi_system_info_ops = { | 398 | static const struct file_operations acpi_system_info_ops = { |
399 | .owner = THIS_MODULE, | ||
399 | .open = acpi_system_info_open_fs, | 400 | .open = acpi_system_info_open_fs, |
400 | .read = seq_read, | 401 | .read = seq_read, |
401 | .llseek = seq_lseek, | 402 | .llseek = seq_lseek, |
@@ -406,6 +407,7 @@ static ssize_t acpi_system_read_dsdt(struct file *, char __user *, size_t, | |||
406 | loff_t *); | 407 | loff_t *); |
407 | 408 | ||
408 | static const struct file_operations acpi_system_dsdt_ops = { | 409 | static const struct file_operations acpi_system_dsdt_ops = { |
410 | .owner = THIS_MODULE, | ||
409 | .read = acpi_system_read_dsdt, | 411 | .read = acpi_system_read_dsdt, |
410 | }; | 412 | }; |
411 | 413 | ||
@@ -430,6 +432,7 @@ static ssize_t acpi_system_read_fadt(struct file *, char __user *, size_t, | |||
430 | loff_t *); | 432 | loff_t *); |
431 | 433 | ||
432 | static const struct file_operations acpi_system_fadt_ops = { | 434 | static const struct file_operations acpi_system_fadt_ops = { |
435 | .owner = THIS_MODULE, | ||
433 | .read = acpi_system_read_fadt, | 436 | .read = acpi_system_read_fadt, |
434 | }; | 437 | }; |
435 | 438 | ||
@@ -454,31 +457,23 @@ static int acpi_system_procfs_init(void) | |||
454 | { | 457 | { |
455 | struct proc_dir_entry *entry; | 458 | struct proc_dir_entry *entry; |
456 | int error = 0; | 459 | int error = 0; |
457 | char *name; | ||
458 | 460 | ||
459 | /* 'info' [R] */ | 461 | /* 'info' [R] */ |
460 | name = ACPI_SYSTEM_FILE_INFO; | 462 | entry = proc_create(ACPI_SYSTEM_FILE_INFO, S_IRUGO, acpi_root_dir, |
461 | entry = create_proc_entry(name, S_IRUGO, acpi_root_dir); | 463 | &acpi_system_info_ops); |
462 | if (!entry) | 464 | if (!entry) |
463 | goto Error; | 465 | goto Error; |
464 | else { | ||
465 | entry->proc_fops = &acpi_system_info_ops; | ||
466 | } | ||
467 | 466 | ||
468 | /* 'dsdt' [R] */ | 467 | /* 'dsdt' [R] */ |
469 | name = ACPI_SYSTEM_FILE_DSDT; | 468 | entry = proc_create(ACPI_SYSTEM_FILE_DSDT, S_IRUSR, acpi_root_dir, |
470 | entry = create_proc_entry(name, S_IRUSR, acpi_root_dir); | 469 | &acpi_system_dsdt_ops); |
471 | if (entry) | 470 | if (!entry) |
472 | entry->proc_fops = &acpi_system_dsdt_ops; | ||
473 | else | ||
474 | goto Error; | 471 | goto Error; |
475 | 472 | ||
476 | /* 'fadt' [R] */ | 473 | /* 'fadt' [R] */ |
477 | name = ACPI_SYSTEM_FILE_FADT; | 474 | entry = proc_create(ACPI_SYSTEM_FILE_FADT, S_IRUSR, acpi_root_dir, |
478 | entry = create_proc_entry(name, S_IRUSR, acpi_root_dir); | 475 | &acpi_system_fadt_ops); |
479 | if (entry) | 476 | if (!entry) |
480 | entry->proc_fops = &acpi_system_fadt_ops; | ||
481 | else | ||
482 | goto Error; | 477 | goto Error; |
483 | 478 | ||
484 | Done: | 479 | Done: |
diff --git a/drivers/acpi/thermal.c b/drivers/acpi/thermal.c index 766bd25d3376..0815ac3ae3d6 100644 --- a/drivers/acpi/thermal.c +++ b/drivers/acpi/thermal.c | |||
@@ -198,6 +198,7 @@ struct acpi_thermal { | |||
198 | }; | 198 | }; |
199 | 199 | ||
200 | static const struct file_operations acpi_thermal_state_fops = { | 200 | static const struct file_operations acpi_thermal_state_fops = { |
201 | .owner = THIS_MODULE, | ||
201 | .open = acpi_thermal_state_open_fs, | 202 | .open = acpi_thermal_state_open_fs, |
202 | .read = seq_read, | 203 | .read = seq_read, |
203 | .llseek = seq_lseek, | 204 | .llseek = seq_lseek, |
@@ -205,6 +206,7 @@ static const struct file_operations acpi_thermal_state_fops = { | |||
205 | }; | 206 | }; |
206 | 207 | ||
207 | static const struct file_operations acpi_thermal_temp_fops = { | 208 | static const struct file_operations acpi_thermal_temp_fops = { |
209 | .owner = THIS_MODULE, | ||
208 | .open = acpi_thermal_temp_open_fs, | 210 | .open = acpi_thermal_temp_open_fs, |
209 | .read = seq_read, | 211 | .read = seq_read, |
210 | .llseek = seq_lseek, | 212 | .llseek = seq_lseek, |
@@ -212,6 +214,7 @@ static const struct file_operations acpi_thermal_temp_fops = { | |||
212 | }; | 214 | }; |
213 | 215 | ||
214 | static const struct file_operations acpi_thermal_trip_fops = { | 216 | static const struct file_operations acpi_thermal_trip_fops = { |
217 | .owner = THIS_MODULE, | ||
215 | .open = acpi_thermal_trip_open_fs, | 218 | .open = acpi_thermal_trip_open_fs, |
216 | .read = seq_read, | 219 | .read = seq_read, |
217 | .llseek = seq_lseek, | 220 | .llseek = seq_lseek, |
@@ -219,6 +222,7 @@ static const struct file_operations acpi_thermal_trip_fops = { | |||
219 | }; | 222 | }; |
220 | 223 | ||
221 | static const struct file_operations acpi_thermal_cooling_fops = { | 224 | static const struct file_operations acpi_thermal_cooling_fops = { |
225 | .owner = THIS_MODULE, | ||
222 | .open = acpi_thermal_cooling_open_fs, | 226 | .open = acpi_thermal_cooling_open_fs, |
223 | .read = seq_read, | 227 | .read = seq_read, |
224 | .write = acpi_thermal_write_cooling_mode, | 228 | .write = acpi_thermal_write_cooling_mode, |
@@ -227,6 +231,7 @@ static const struct file_operations acpi_thermal_cooling_fops = { | |||
227 | }; | 231 | }; |
228 | 232 | ||
229 | static const struct file_operations acpi_thermal_polling_fops = { | 233 | static const struct file_operations acpi_thermal_polling_fops = { |
234 | .owner = THIS_MODULE, | ||
230 | .open = acpi_thermal_polling_open_fs, | 235 | .open = acpi_thermal_polling_open_fs, |
231 | .read = seq_read, | 236 | .read = seq_read, |
232 | .write = acpi_thermal_write_polling, | 237 | .write = acpi_thermal_write_polling, |
@@ -1419,63 +1424,47 @@ static int acpi_thermal_add_fs(struct acpi_device *device) | |||
1419 | } | 1424 | } |
1420 | 1425 | ||
1421 | /* 'state' [R] */ | 1426 | /* 'state' [R] */ |
1422 | entry = create_proc_entry(ACPI_THERMAL_FILE_STATE, | 1427 | entry = proc_create_data(ACPI_THERMAL_FILE_STATE, |
1423 | S_IRUGO, acpi_device_dir(device)); | 1428 | S_IRUGO, acpi_device_dir(device), |
1429 | &acpi_thermal_state_fops, | ||
1430 | acpi_driver_data(device)); | ||
1424 | if (!entry) | 1431 | if (!entry) |
1425 | return -ENODEV; | 1432 | return -ENODEV; |
1426 | else { | ||
1427 | entry->proc_fops = &acpi_thermal_state_fops; | ||
1428 | entry->data = acpi_driver_data(device); | ||
1429 | entry->owner = THIS_MODULE; | ||
1430 | } | ||
1431 | 1433 | ||
1432 | /* 'temperature' [R] */ | 1434 | /* 'temperature' [R] */ |
1433 | entry = create_proc_entry(ACPI_THERMAL_FILE_TEMPERATURE, | 1435 | entry = proc_create_data(ACPI_THERMAL_FILE_TEMPERATURE, |
1434 | S_IRUGO, acpi_device_dir(device)); | 1436 | S_IRUGO, acpi_device_dir(device), |
1437 | &acpi_thermal_temp_fops, | ||
1438 | acpi_driver_data(device)); | ||
1435 | if (!entry) | 1439 | if (!entry) |
1436 | return -ENODEV; | 1440 | return -ENODEV; |
1437 | else { | ||
1438 | entry->proc_fops = &acpi_thermal_temp_fops; | ||
1439 | entry->data = acpi_driver_data(device); | ||
1440 | entry->owner = THIS_MODULE; | ||
1441 | } | ||
1442 | 1441 | ||
1443 | /* 'trip_points' [R] */ | 1442 | /* 'trip_points' [R] */ |
1444 | entry = create_proc_entry(ACPI_THERMAL_FILE_TRIP_POINTS, | 1443 | entry = proc_create_data(ACPI_THERMAL_FILE_TRIP_POINTS, |
1445 | S_IRUGO, | 1444 | S_IRUGO, |
1446 | acpi_device_dir(device)); | 1445 | acpi_device_dir(device), |
1446 | &acpi_thermal_trip_fops, | ||
1447 | acpi_driver_data(device)); | ||
1447 | if (!entry) | 1448 | if (!entry) |
1448 | return -ENODEV; | 1449 | return -ENODEV; |
1449 | else { | ||
1450 | entry->proc_fops = &acpi_thermal_trip_fops; | ||
1451 | entry->data = acpi_driver_data(device); | ||
1452 | entry->owner = THIS_MODULE; | ||
1453 | } | ||
1454 | 1450 | ||
1455 | /* 'cooling_mode' [R/W] */ | 1451 | /* 'cooling_mode' [R/W] */ |
1456 | entry = create_proc_entry(ACPI_THERMAL_FILE_COOLING_MODE, | 1452 | entry = proc_create_data(ACPI_THERMAL_FILE_COOLING_MODE, |
1457 | S_IFREG | S_IRUGO | S_IWUSR, | 1453 | S_IFREG | S_IRUGO | S_IWUSR, |
1458 | acpi_device_dir(device)); | 1454 | acpi_device_dir(device), |
1455 | &acpi_thermal_cooling_fops, | ||
1456 | acpi_driver_data(device)); | ||
1459 | if (!entry) | 1457 | if (!entry) |
1460 | return -ENODEV; | 1458 | return -ENODEV; |
1461 | else { | ||
1462 | entry->proc_fops = &acpi_thermal_cooling_fops; | ||
1463 | entry->data = acpi_driver_data(device); | ||
1464 | entry->owner = THIS_MODULE; | ||
1465 | } | ||
1466 | 1459 | ||
1467 | /* 'polling_frequency' [R/W] */ | 1460 | /* 'polling_frequency' [R/W] */ |
1468 | entry = create_proc_entry(ACPI_THERMAL_FILE_POLLING_FREQ, | 1461 | entry = proc_create_data(ACPI_THERMAL_FILE_POLLING_FREQ, |
1469 | S_IFREG | S_IRUGO | S_IWUSR, | 1462 | S_IFREG | S_IRUGO | S_IWUSR, |
1470 | acpi_device_dir(device)); | 1463 | acpi_device_dir(device), |
1464 | &acpi_thermal_polling_fops, | ||
1465 | acpi_driver_data(device)); | ||
1471 | if (!entry) | 1466 | if (!entry) |
1472 | return -ENODEV; | 1467 | return -ENODEV; |
1473 | else { | ||
1474 | entry->proc_fops = &acpi_thermal_polling_fops; | ||
1475 | entry->data = acpi_driver_data(device); | ||
1476 | entry->owner = THIS_MODULE; | ||
1477 | } | ||
1478 | |||
1479 | return 0; | 1468 | return 0; |
1480 | } | 1469 | } |
1481 | 1470 | ||
diff --git a/drivers/acpi/video.c b/drivers/acpi/video.c index 980a74188781..43b228314a86 100644 --- a/drivers/acpi/video.c +++ b/drivers/acpi/video.c | |||
@@ -192,6 +192,7 @@ struct acpi_video_device { | |||
192 | /* bus */ | 192 | /* bus */ |
193 | static int acpi_video_bus_info_open_fs(struct inode *inode, struct file *file); | 193 | static int acpi_video_bus_info_open_fs(struct inode *inode, struct file *file); |
194 | static struct file_operations acpi_video_bus_info_fops = { | 194 | static struct file_operations acpi_video_bus_info_fops = { |
195 | .owner = THIS_MODULE, | ||
195 | .open = acpi_video_bus_info_open_fs, | 196 | .open = acpi_video_bus_info_open_fs, |
196 | .read = seq_read, | 197 | .read = seq_read, |
197 | .llseek = seq_lseek, | 198 | .llseek = seq_lseek, |
@@ -200,6 +201,7 @@ static struct file_operations acpi_video_bus_info_fops = { | |||
200 | 201 | ||
201 | static int acpi_video_bus_ROM_open_fs(struct inode *inode, struct file *file); | 202 | static int acpi_video_bus_ROM_open_fs(struct inode *inode, struct file *file); |
202 | static struct file_operations acpi_video_bus_ROM_fops = { | 203 | static struct file_operations acpi_video_bus_ROM_fops = { |
204 | .owner = THIS_MODULE, | ||
203 | .open = acpi_video_bus_ROM_open_fs, | 205 | .open = acpi_video_bus_ROM_open_fs, |
204 | .read = seq_read, | 206 | .read = seq_read, |
205 | .llseek = seq_lseek, | 207 | .llseek = seq_lseek, |
@@ -209,6 +211,7 @@ static struct file_operations acpi_video_bus_ROM_fops = { | |||
209 | static int acpi_video_bus_POST_info_open_fs(struct inode *inode, | 211 | static int acpi_video_bus_POST_info_open_fs(struct inode *inode, |
210 | struct file *file); | 212 | struct file *file); |
211 | static struct file_operations acpi_video_bus_POST_info_fops = { | 213 | static struct file_operations acpi_video_bus_POST_info_fops = { |
214 | .owner = THIS_MODULE, | ||
212 | .open = acpi_video_bus_POST_info_open_fs, | 215 | .open = acpi_video_bus_POST_info_open_fs, |
213 | .read = seq_read, | 216 | .read = seq_read, |
214 | .llseek = seq_lseek, | 217 | .llseek = seq_lseek, |
@@ -217,6 +220,7 @@ static struct file_operations acpi_video_bus_POST_info_fops = { | |||
217 | 220 | ||
218 | static int acpi_video_bus_POST_open_fs(struct inode *inode, struct file *file); | 221 | static int acpi_video_bus_POST_open_fs(struct inode *inode, struct file *file); |
219 | static struct file_operations acpi_video_bus_POST_fops = { | 222 | static struct file_operations acpi_video_bus_POST_fops = { |
223 | .owner = THIS_MODULE, | ||
220 | .open = acpi_video_bus_POST_open_fs, | 224 | .open = acpi_video_bus_POST_open_fs, |
221 | .read = seq_read, | 225 | .read = seq_read, |
222 | .llseek = seq_lseek, | 226 | .llseek = seq_lseek, |
@@ -225,6 +229,7 @@ static struct file_operations acpi_video_bus_POST_fops = { | |||
225 | 229 | ||
226 | static int acpi_video_bus_DOS_open_fs(struct inode *inode, struct file *file); | 230 | static int acpi_video_bus_DOS_open_fs(struct inode *inode, struct file *file); |
227 | static struct file_operations acpi_video_bus_DOS_fops = { | 231 | static struct file_operations acpi_video_bus_DOS_fops = { |
232 | .owner = THIS_MODULE, | ||
228 | .open = acpi_video_bus_DOS_open_fs, | 233 | .open = acpi_video_bus_DOS_open_fs, |
229 | .read = seq_read, | 234 | .read = seq_read, |
230 | .llseek = seq_lseek, | 235 | .llseek = seq_lseek, |
@@ -235,6 +240,7 @@ static struct file_operations acpi_video_bus_DOS_fops = { | |||
235 | static int acpi_video_device_info_open_fs(struct inode *inode, | 240 | static int acpi_video_device_info_open_fs(struct inode *inode, |
236 | struct file *file); | 241 | struct file *file); |
237 | static struct file_operations acpi_video_device_info_fops = { | 242 | static struct file_operations acpi_video_device_info_fops = { |
243 | .owner = THIS_MODULE, | ||
238 | .open = acpi_video_device_info_open_fs, | 244 | .open = acpi_video_device_info_open_fs, |
239 | .read = seq_read, | 245 | .read = seq_read, |
240 | .llseek = seq_lseek, | 246 | .llseek = seq_lseek, |
@@ -244,6 +250,7 @@ static struct file_operations acpi_video_device_info_fops = { | |||
244 | static int acpi_video_device_state_open_fs(struct inode *inode, | 250 | static int acpi_video_device_state_open_fs(struct inode *inode, |
245 | struct file *file); | 251 | struct file *file); |
246 | static struct file_operations acpi_video_device_state_fops = { | 252 | static struct file_operations acpi_video_device_state_fops = { |
253 | .owner = THIS_MODULE, | ||
247 | .open = acpi_video_device_state_open_fs, | 254 | .open = acpi_video_device_state_open_fs, |
248 | .read = seq_read, | 255 | .read = seq_read, |
249 | .llseek = seq_lseek, | 256 | .llseek = seq_lseek, |
@@ -253,6 +260,7 @@ static struct file_operations acpi_video_device_state_fops = { | |||
253 | static int acpi_video_device_brightness_open_fs(struct inode *inode, | 260 | static int acpi_video_device_brightness_open_fs(struct inode *inode, |
254 | struct file *file); | 261 | struct file *file); |
255 | static struct file_operations acpi_video_device_brightness_fops = { | 262 | static struct file_operations acpi_video_device_brightness_fops = { |
263 | .owner = THIS_MODULE, | ||
256 | .open = acpi_video_device_brightness_open_fs, | 264 | .open = acpi_video_device_brightness_open_fs, |
257 | .read = seq_read, | 265 | .read = seq_read, |
258 | .llseek = seq_lseek, | 266 | .llseek = seq_lseek, |
@@ -262,6 +270,7 @@ static struct file_operations acpi_video_device_brightness_fops = { | |||
262 | static int acpi_video_device_EDID_open_fs(struct inode *inode, | 270 | static int acpi_video_device_EDID_open_fs(struct inode *inode, |
263 | struct file *file); | 271 | struct file *file); |
264 | static struct file_operations acpi_video_device_EDID_fops = { | 272 | static struct file_operations acpi_video_device_EDID_fops = { |
273 | .owner = THIS_MODULE, | ||
265 | .open = acpi_video_device_EDID_open_fs, | 274 | .open = acpi_video_device_EDID_open_fs, |
266 | .read = seq_read, | 275 | .read = seq_read, |
267 | .llseek = seq_lseek, | 276 | .llseek = seq_lseek, |
@@ -1070,51 +1079,36 @@ static int acpi_video_device_add_fs(struct acpi_device *device) | |||
1070 | } | 1079 | } |
1071 | 1080 | ||
1072 | /* 'info' [R] */ | 1081 | /* 'info' [R] */ |
1073 | entry = create_proc_entry("info", S_IRUGO, acpi_device_dir(device)); | 1082 | entry = proc_create_data("info", S_IRUGO, acpi_device_dir(device), |
1083 | &acpi_video_device_info_fops, acpi_driver_data(device)); | ||
1074 | if (!entry) | 1084 | if (!entry) |
1075 | return -ENODEV; | 1085 | return -ENODEV; |
1076 | else { | ||
1077 | entry->proc_fops = &acpi_video_device_info_fops; | ||
1078 | entry->data = acpi_driver_data(device); | ||
1079 | entry->owner = THIS_MODULE; | ||
1080 | } | ||
1081 | 1086 | ||
1082 | /* 'state' [R/W] */ | 1087 | /* 'state' [R/W] */ |
1083 | entry = | 1088 | acpi_video_device_state_fops.write = acpi_video_device_write_state; |
1084 | create_proc_entry("state", S_IFREG | S_IRUGO | S_IWUSR, | 1089 | entry = proc_create_data("state", S_IFREG | S_IRUGO | S_IWUSR, |
1085 | acpi_device_dir(device)); | 1090 | acpi_device_dir(device), |
1091 | &acpi_video_device_state_fops, | ||
1092 | acpi_driver_data(device)); | ||
1086 | if (!entry) | 1093 | if (!entry) |
1087 | return -ENODEV; | 1094 | return -ENODEV; |
1088 | else { | ||
1089 | acpi_video_device_state_fops.write = acpi_video_device_write_state; | ||
1090 | entry->proc_fops = &acpi_video_device_state_fops; | ||
1091 | entry->data = acpi_driver_data(device); | ||
1092 | entry->owner = THIS_MODULE; | ||
1093 | } | ||
1094 | 1095 | ||
1095 | /* 'brightness' [R/W] */ | 1096 | /* 'brightness' [R/W] */ |
1096 | entry = | 1097 | acpi_video_device_brightness_fops.write = |
1097 | create_proc_entry("brightness", S_IFREG | S_IRUGO | S_IWUSR, | 1098 | acpi_video_device_write_brightness; |
1098 | acpi_device_dir(device)); | 1099 | entry = proc_create_data("brightness", S_IFREG | S_IRUGO | S_IWUSR, |
1100 | acpi_device_dir(device), | ||
1101 | &acpi_video_device_brightness_fops, | ||
1102 | acpi_driver_data(device)); | ||
1099 | if (!entry) | 1103 | if (!entry) |
1100 | return -ENODEV; | 1104 | return -ENODEV; |
1101 | else { | ||
1102 | acpi_video_device_brightness_fops.write = acpi_video_device_write_brightness; | ||
1103 | entry->proc_fops = &acpi_video_device_brightness_fops; | ||
1104 | entry->data = acpi_driver_data(device); | ||
1105 | entry->owner = THIS_MODULE; | ||
1106 | } | ||
1107 | 1105 | ||
1108 | /* 'EDID' [R] */ | 1106 | /* 'EDID' [R] */ |
1109 | entry = create_proc_entry("EDID", S_IRUGO, acpi_device_dir(device)); | 1107 | entry = proc_create_data("EDID", S_IRUGO, acpi_device_dir(device), |
1108 | &acpi_video_device_EDID_fops, | ||
1109 | acpi_driver_data(device)); | ||
1110 | if (!entry) | 1110 | if (!entry) |
1111 | return -ENODEV; | 1111 | return -ENODEV; |
1112 | else { | ||
1113 | entry->proc_fops = &acpi_video_device_EDID_fops; | ||
1114 | entry->data = acpi_driver_data(device); | ||
1115 | entry->owner = THIS_MODULE; | ||
1116 | } | ||
1117 | |||
1118 | return 0; | 1112 | return 0; |
1119 | } | 1113 | } |
1120 | 1114 | ||
@@ -1353,61 +1347,43 @@ static int acpi_video_bus_add_fs(struct acpi_device *device) | |||
1353 | } | 1347 | } |
1354 | 1348 | ||
1355 | /* 'info' [R] */ | 1349 | /* 'info' [R] */ |
1356 | entry = create_proc_entry("info", S_IRUGO, acpi_device_dir(device)); | 1350 | entry = proc_create_data("info", S_IRUGO, acpi_device_dir(device), |
1351 | &acpi_video_bus_info_fops, | ||
1352 | acpi_driver_data(device)); | ||
1357 | if (!entry) | 1353 | if (!entry) |
1358 | return -ENODEV; | 1354 | return -ENODEV; |
1359 | else { | ||
1360 | entry->proc_fops = &acpi_video_bus_info_fops; | ||
1361 | entry->data = acpi_driver_data(device); | ||
1362 | entry->owner = THIS_MODULE; | ||
1363 | } | ||
1364 | 1355 | ||
1365 | /* 'ROM' [R] */ | 1356 | /* 'ROM' [R] */ |
1366 | entry = create_proc_entry("ROM", S_IRUGO, acpi_device_dir(device)); | 1357 | entry = proc_create_data("ROM", S_IRUGO, acpi_device_dir(device), |
1358 | &acpi_video_bus_ROM_fops, | ||
1359 | acpi_driver_data(device)); | ||
1367 | if (!entry) | 1360 | if (!entry) |
1368 | return -ENODEV; | 1361 | return -ENODEV; |
1369 | else { | ||
1370 | entry->proc_fops = &acpi_video_bus_ROM_fops; | ||
1371 | entry->data = acpi_driver_data(device); | ||
1372 | entry->owner = THIS_MODULE; | ||
1373 | } | ||
1374 | 1362 | ||
1375 | /* 'POST_info' [R] */ | 1363 | /* 'POST_info' [R] */ |
1376 | entry = | 1364 | entry = proc_create_data("POST_info", S_IRUGO, acpi_device_dir(device), |
1377 | create_proc_entry("POST_info", S_IRUGO, acpi_device_dir(device)); | 1365 | &acpi_video_bus_POST_info_fops, |
1366 | acpi_driver_data(device)); | ||
1378 | if (!entry) | 1367 | if (!entry) |
1379 | return -ENODEV; | 1368 | return -ENODEV; |
1380 | else { | ||
1381 | entry->proc_fops = &acpi_video_bus_POST_info_fops; | ||
1382 | entry->data = acpi_driver_data(device); | ||
1383 | entry->owner = THIS_MODULE; | ||
1384 | } | ||
1385 | 1369 | ||
1386 | /* 'POST' [R/W] */ | 1370 | /* 'POST' [R/W] */ |
1387 | entry = | 1371 | acpi_video_bus_POST_fops.write = acpi_video_bus_write_POST; |
1388 | create_proc_entry("POST", S_IFREG | S_IRUGO | S_IRUSR, | 1372 | entry = proc_create_data("POST", S_IFREG | S_IRUGO | S_IRUSR, |
1389 | acpi_device_dir(device)); | 1373 | acpi_device_dir(device), |
1374 | &acpi_video_bus_POST_fops, | ||
1375 | acpi_driver_data(device)); | ||
1390 | if (!entry) | 1376 | if (!entry) |
1391 | return -ENODEV; | 1377 | return -ENODEV; |
1392 | else { | ||
1393 | acpi_video_bus_POST_fops.write = acpi_video_bus_write_POST; | ||
1394 | entry->proc_fops = &acpi_video_bus_POST_fops; | ||
1395 | entry->data = acpi_driver_data(device); | ||
1396 | entry->owner = THIS_MODULE; | ||
1397 | } | ||
1398 | 1378 | ||
1399 | /* 'DOS' [R/W] */ | 1379 | /* 'DOS' [R/W] */ |
1400 | entry = | 1380 | acpi_video_bus_DOS_fops.write = acpi_video_bus_write_DOS; |
1401 | create_proc_entry("DOS", S_IFREG | S_IRUGO | S_IRUSR, | 1381 | entry = proc_create_data("DOS", S_IFREG | S_IRUGO | S_IRUSR, |
1402 | acpi_device_dir(device)); | 1382 | acpi_device_dir(device), |
1383 | &acpi_video_bus_DOS_fops, | ||
1384 | acpi_driver_data(device)); | ||
1403 | if (!entry) | 1385 | if (!entry) |
1404 | return -ENODEV; | 1386 | return -ENODEV; |
1405 | else { | ||
1406 | acpi_video_bus_DOS_fops.write = acpi_video_bus_write_DOS; | ||
1407 | entry->proc_fops = &acpi_video_bus_DOS_fops; | ||
1408 | entry->data = acpi_driver_data(device); | ||
1409 | entry->owner = THIS_MODULE; | ||
1410 | } | ||
1411 | 1387 | ||
1412 | return 0; | 1388 | return 0; |
1413 | } | 1389 | } |
diff --git a/drivers/base/firmware_class.c b/drivers/base/firmware_class.c index 1fef7df8c9d6..9fd4a8534146 100644 --- a/drivers/base/firmware_class.c +++ b/drivers/base/firmware_class.c | |||
@@ -396,6 +396,8 @@ _request_firmware(const struct firmware **firmware_p, const char *name, | |||
396 | if (!firmware_p) | 396 | if (!firmware_p) |
397 | return -EINVAL; | 397 | return -EINVAL; |
398 | 398 | ||
399 | printk(KERN_INFO "firmware: requesting %s\n", name); | ||
400 | |||
399 | *firmware_p = firmware = kzalloc(sizeof(*firmware), GFP_KERNEL); | 401 | *firmware_p = firmware = kzalloc(sizeof(*firmware), GFP_KERNEL); |
400 | if (!firmware) { | 402 | if (!firmware) { |
401 | printk(KERN_ERR "%s: kmalloc(struct firmware) failed\n", | 403 | printk(KERN_ERR "%s: kmalloc(struct firmware) failed\n", |
diff --git a/drivers/block/aoe/aoe.h b/drivers/block/aoe/aoe.h index 280e71ee744c..5b4c6e649c11 100644 --- a/drivers/block/aoe/aoe.h +++ b/drivers/block/aoe/aoe.h | |||
@@ -195,7 +195,6 @@ void aoedev_exit(void); | |||
195 | struct aoedev *aoedev_by_aoeaddr(int maj, int min); | 195 | struct aoedev *aoedev_by_aoeaddr(int maj, int min); |
196 | struct aoedev *aoedev_by_sysminor_m(ulong sysminor); | 196 | struct aoedev *aoedev_by_sysminor_m(ulong sysminor); |
197 | void aoedev_downdev(struct aoedev *d); | 197 | void aoedev_downdev(struct aoedev *d); |
198 | int aoedev_isbusy(struct aoedev *d); | ||
199 | int aoedev_flush(const char __user *str, size_t size); | 198 | int aoedev_flush(const char __user *str, size_t size); |
200 | 199 | ||
201 | int aoenet_init(void); | 200 | int aoenet_init(void); |
diff --git a/drivers/block/aoe/aoecmd.c b/drivers/block/aoe/aoecmd.c index d00293ba3b45..8fc429cf82b6 100644 --- a/drivers/block/aoe/aoecmd.c +++ b/drivers/block/aoe/aoecmd.c | |||
@@ -668,16 +668,16 @@ ataid_complete(struct aoedev *d, struct aoetgt *t, unsigned char *id) | |||
668 | u16 n; | 668 | u16 n; |
669 | 669 | ||
670 | /* word 83: command set supported */ | 670 | /* word 83: command set supported */ |
671 | n = le16_to_cpu(get_unaligned((__le16 *) &id[83<<1])); | 671 | n = get_unaligned_le16(&id[83 << 1]); |
672 | 672 | ||
673 | /* word 86: command set/feature enabled */ | 673 | /* word 86: command set/feature enabled */ |
674 | n |= le16_to_cpu(get_unaligned((__le16 *) &id[86<<1])); | 674 | n |= get_unaligned_le16(&id[86 << 1]); |
675 | 675 | ||
676 | if (n & (1<<10)) { /* bit 10: LBA 48 */ | 676 | if (n & (1<<10)) { /* bit 10: LBA 48 */ |
677 | d->flags |= DEVFL_EXT; | 677 | d->flags |= DEVFL_EXT; |
678 | 678 | ||
679 | /* word 100: number lba48 sectors */ | 679 | /* word 100: number lba48 sectors */ |
680 | ssize = le64_to_cpu(get_unaligned((__le64 *) &id[100<<1])); | 680 | ssize = get_unaligned_le64(&id[100 << 1]); |
681 | 681 | ||
682 | /* set as in ide-disk.c:init_idedisk_capacity */ | 682 | /* set as in ide-disk.c:init_idedisk_capacity */ |
683 | d->geo.cylinders = ssize; | 683 | d->geo.cylinders = ssize; |
@@ -688,12 +688,12 @@ ataid_complete(struct aoedev *d, struct aoetgt *t, unsigned char *id) | |||
688 | d->flags &= ~DEVFL_EXT; | 688 | d->flags &= ~DEVFL_EXT; |
689 | 689 | ||
690 | /* number lba28 sectors */ | 690 | /* number lba28 sectors */ |
691 | ssize = le32_to_cpu(get_unaligned((__le32 *) &id[60<<1])); | 691 | ssize = get_unaligned_le32(&id[60 << 1]); |
692 | 692 | ||
693 | /* NOTE: obsolete in ATA 6 */ | 693 | /* NOTE: obsolete in ATA 6 */ |
694 | d->geo.cylinders = le16_to_cpu(get_unaligned((__le16 *) &id[54<<1])); | 694 | d->geo.cylinders = get_unaligned_le16(&id[54 << 1]); |
695 | d->geo.heads = le16_to_cpu(get_unaligned((__le16 *) &id[55<<1])); | 695 | d->geo.heads = get_unaligned_le16(&id[55 << 1]); |
696 | d->geo.sectors = le16_to_cpu(get_unaligned((__le16 *) &id[56<<1])); | 696 | d->geo.sectors = get_unaligned_le16(&id[56 << 1]); |
697 | } | 697 | } |
698 | 698 | ||
699 | if (d->ssize != ssize) | 699 | if (d->ssize != ssize) |
@@ -779,7 +779,7 @@ aoecmd_ata_rsp(struct sk_buff *skb) | |||
779 | u16 aoemajor; | 779 | u16 aoemajor; |
780 | 780 | ||
781 | hin = (struct aoe_hdr *) skb_mac_header(skb); | 781 | hin = (struct aoe_hdr *) skb_mac_header(skb); |
782 | aoemajor = be16_to_cpu(get_unaligned(&hin->major)); | 782 | aoemajor = get_unaligned_be16(&hin->major); |
783 | d = aoedev_by_aoeaddr(aoemajor, hin->minor); | 783 | d = aoedev_by_aoeaddr(aoemajor, hin->minor); |
784 | if (d == NULL) { | 784 | if (d == NULL) { |
785 | snprintf(ebuf, sizeof ebuf, "aoecmd_ata_rsp: ata response " | 785 | snprintf(ebuf, sizeof ebuf, "aoecmd_ata_rsp: ata response " |
@@ -791,7 +791,7 @@ aoecmd_ata_rsp(struct sk_buff *skb) | |||
791 | 791 | ||
792 | spin_lock_irqsave(&d->lock, flags); | 792 | spin_lock_irqsave(&d->lock, flags); |
793 | 793 | ||
794 | n = be32_to_cpu(get_unaligned(&hin->tag)); | 794 | n = get_unaligned_be32(&hin->tag); |
795 | t = gettgt(d, hin->src); | 795 | t = gettgt(d, hin->src); |
796 | if (t == NULL) { | 796 | if (t == NULL) { |
797 | printk(KERN_INFO "aoe: can't find target e%ld.%d:%012llx\n", | 797 | printk(KERN_INFO "aoe: can't find target e%ld.%d:%012llx\n", |
@@ -806,9 +806,9 @@ aoecmd_ata_rsp(struct sk_buff *skb) | |||
806 | snprintf(ebuf, sizeof ebuf, | 806 | snprintf(ebuf, sizeof ebuf, |
807 | "%15s e%d.%d tag=%08x@%08lx\n", | 807 | "%15s e%d.%d tag=%08x@%08lx\n", |
808 | "unexpected rsp", | 808 | "unexpected rsp", |
809 | be16_to_cpu(get_unaligned(&hin->major)), | 809 | get_unaligned_be16(&hin->major), |
810 | hin->minor, | 810 | hin->minor, |
811 | be32_to_cpu(get_unaligned(&hin->tag)), | 811 | get_unaligned_be32(&hin->tag), |
812 | jiffies); | 812 | jiffies); |
813 | aoechr_error(ebuf); | 813 | aoechr_error(ebuf); |
814 | return; | 814 | return; |
@@ -873,7 +873,7 @@ aoecmd_ata_rsp(struct sk_buff *skb) | |||
873 | printk(KERN_INFO | 873 | printk(KERN_INFO |
874 | "aoe: unrecognized ata command %2.2Xh for %d.%d\n", | 874 | "aoe: unrecognized ata command %2.2Xh for %d.%d\n", |
875 | ahout->cmdstat, | 875 | ahout->cmdstat, |
876 | be16_to_cpu(get_unaligned(&hin->major)), | 876 | get_unaligned_be16(&hin->major), |
877 | hin->minor); | 877 | hin->minor); |
878 | } | 878 | } |
879 | } | 879 | } |
diff --git a/drivers/block/aoe/aoedev.c b/drivers/block/aoe/aoedev.c index f9a1cd9edb77..a1d813ab0d6b 100644 --- a/drivers/block/aoe/aoedev.c +++ b/drivers/block/aoe/aoedev.c | |||
@@ -18,24 +18,6 @@ static void skbpoolfree(struct aoedev *d); | |||
18 | static struct aoedev *devlist; | 18 | static struct aoedev *devlist; |
19 | static DEFINE_SPINLOCK(devlist_lock); | 19 | static DEFINE_SPINLOCK(devlist_lock); |
20 | 20 | ||
21 | int | ||
22 | aoedev_isbusy(struct aoedev *d) | ||
23 | { | ||
24 | struct aoetgt **t, **te; | ||
25 | struct frame *f, *e; | ||
26 | |||
27 | t = d->targets; | ||
28 | te = t + NTARGETS; | ||
29 | for (; t < te && *t; t++) { | ||
30 | f = (*t)->frames; | ||
31 | e = f + (*t)->nframes; | ||
32 | for (; f < e; f++) | ||
33 | if (f->tag != FREETAG) | ||
34 | return 1; | ||
35 | } | ||
36 | return 0; | ||
37 | } | ||
38 | |||
39 | struct aoedev * | 21 | struct aoedev * |
40 | aoedev_by_aoeaddr(int maj, int min) | 22 | aoedev_by_aoeaddr(int maj, int min) |
41 | { | 23 | { |
diff --git a/drivers/block/aoe/aoenet.c b/drivers/block/aoe/aoenet.c index 18d243c73eee..d625169c8e48 100644 --- a/drivers/block/aoe/aoenet.c +++ b/drivers/block/aoe/aoenet.c | |||
@@ -128,7 +128,7 @@ aoenet_rcv(struct sk_buff *skb, struct net_device *ifp, struct packet_type *pt, | |||
128 | skb_push(skb, ETH_HLEN); /* (1) */ | 128 | skb_push(skb, ETH_HLEN); /* (1) */ |
129 | 129 | ||
130 | h = (struct aoe_hdr *) skb_mac_header(skb); | 130 | h = (struct aoe_hdr *) skb_mac_header(skb); |
131 | n = be32_to_cpu(get_unaligned(&h->tag)); | 131 | n = get_unaligned_be32(&h->tag); |
132 | if ((h->verfl & AOEFL_RSP) == 0 || (n & 1<<31)) | 132 | if ((h->verfl & AOEFL_RSP) == 0 || (n & 1<<31)) |
133 | goto exit; | 133 | goto exit; |
134 | 134 | ||
@@ -140,7 +140,7 @@ aoenet_rcv(struct sk_buff *skb, struct net_device *ifp, struct packet_type *pt, | |||
140 | printk(KERN_ERR | 140 | printk(KERN_ERR |
141 | "%s%d.%d@%s; ecode=%d '%s'\n", | 141 | "%s%d.%d@%s; ecode=%d '%s'\n", |
142 | "aoe: error packet from ", | 142 | "aoe: error packet from ", |
143 | be16_to_cpu(get_unaligned(&h->major)), | 143 | get_unaligned_be16(&h->major), |
144 | h->minor, skb->dev->name, | 144 | h->minor, skb->dev->name, |
145 | h->err, aoe_errlist[n]); | 145 | h->err, aoe_errlist[n]); |
146 | goto exit; | 146 | goto exit; |
diff --git a/drivers/block/cciss.c b/drivers/block/cciss.c index cf6083a1f928..e539be5750dc 100644 --- a/drivers/block/cciss.c +++ b/drivers/block/cciss.c | |||
@@ -425,7 +425,7 @@ static void __devinit cciss_procinit(int i) | |||
425 | struct proc_dir_entry *pde; | 425 | struct proc_dir_entry *pde; |
426 | 426 | ||
427 | if (proc_cciss == NULL) | 427 | if (proc_cciss == NULL) |
428 | proc_cciss = proc_mkdir("cciss", proc_root_driver); | 428 | proc_cciss = proc_mkdir("driver/cciss", NULL); |
429 | if (!proc_cciss) | 429 | if (!proc_cciss) |
430 | return; | 430 | return; |
431 | pde = proc_create(hba[i]->devname, S_IWUSR | S_IRUSR | S_IRGRP | | 431 | pde = proc_create(hba[i]->devname, S_IWUSR | S_IRUSR | S_IRGRP | |
@@ -3700,7 +3700,7 @@ static void __exit cciss_cleanup(void) | |||
3700 | cciss_remove_one(hba[i]->pdev); | 3700 | cciss_remove_one(hba[i]->pdev); |
3701 | } | 3701 | } |
3702 | } | 3702 | } |
3703 | remove_proc_entry("cciss", proc_root_driver); | 3703 | remove_proc_entry("driver/cciss", NULL); |
3704 | } | 3704 | } |
3705 | 3705 | ||
3706 | static void fail_all_cmds(unsigned long ctlr) | 3706 | static void fail_all_cmds(unsigned long ctlr) |
diff --git a/drivers/block/cpqarray.c b/drivers/block/cpqarray.c index 69199185ff4b..09c14341e6e3 100644 --- a/drivers/block/cpqarray.c +++ b/drivers/block/cpqarray.c | |||
@@ -214,7 +214,7 @@ static struct proc_dir_entry *proc_array; | |||
214 | static void __init ida_procinit(int i) | 214 | static void __init ida_procinit(int i) |
215 | { | 215 | { |
216 | if (proc_array == NULL) { | 216 | if (proc_array == NULL) { |
217 | proc_array = proc_mkdir("cpqarray", proc_root_driver); | 217 | proc_array = proc_mkdir("driver/cpqarray", NULL); |
218 | if (!proc_array) return; | 218 | if (!proc_array) return; |
219 | } | 219 | } |
220 | 220 | ||
@@ -1796,7 +1796,7 @@ static void __exit cpqarray_exit(void) | |||
1796 | } | 1796 | } |
1797 | } | 1797 | } |
1798 | 1798 | ||
1799 | remove_proc_entry("cpqarray", proc_root_driver); | 1799 | remove_proc_entry("driver/cpqarray", NULL); |
1800 | } | 1800 | } |
1801 | 1801 | ||
1802 | module_init(cpqarray_init) | 1802 | module_init(cpqarray_init) |
diff --git a/drivers/block/floppy.c b/drivers/block/floppy.c index 7652e87d60c5..395f8ea7981c 100644 --- a/drivers/block/floppy.c +++ b/drivers/block/floppy.c | |||
@@ -4526,14 +4526,15 @@ static void __init parse_floppy_cfg_string(char *cfg) | |||
4526 | } | 4526 | } |
4527 | } | 4527 | } |
4528 | 4528 | ||
4529 | int __init init_module(void) | 4529 | static int __init floppy_module_init(void) |
4530 | { | 4530 | { |
4531 | if (floppy) | 4531 | if (floppy) |
4532 | parse_floppy_cfg_string(floppy); | 4532 | parse_floppy_cfg_string(floppy); |
4533 | return floppy_init(); | 4533 | return floppy_init(); |
4534 | } | 4534 | } |
4535 | module_init(floppy_module_init); | ||
4535 | 4536 | ||
4536 | void cleanup_module(void) | 4537 | static void __exit floppy_module_exit(void) |
4537 | { | 4538 | { |
4538 | int drive; | 4539 | int drive; |
4539 | 4540 | ||
@@ -4562,6 +4563,7 @@ void cleanup_module(void) | |||
4562 | /* eject disk, if any */ | 4563 | /* eject disk, if any */ |
4563 | fd_eject(0); | 4564 | fd_eject(0); |
4564 | } | 4565 | } |
4566 | module_exit(floppy_module_exit); | ||
4565 | 4567 | ||
4566 | module_param(floppy, charp, 0); | 4568 | module_param(floppy, charp, 0); |
4567 | module_param(FLOPPY_IRQ, int, 0); | 4569 | module_param(FLOPPY_IRQ, int, 0); |
diff --git a/drivers/block/nbd.c b/drivers/block/nbd.c index f75bda16a1fc..ad98dda6037d 100644 --- a/drivers/block/nbd.c +++ b/drivers/block/nbd.c | |||
@@ -29,6 +29,7 @@ | |||
29 | #include <linux/kernel.h> | 29 | #include <linux/kernel.h> |
30 | #include <net/sock.h> | 30 | #include <net/sock.h> |
31 | #include <linux/net.h> | 31 | #include <linux/net.h> |
32 | #include <linux/kthread.h> | ||
32 | 33 | ||
33 | #include <asm/uaccess.h> | 34 | #include <asm/uaccess.h> |
34 | #include <asm/system.h> | 35 | #include <asm/system.h> |
@@ -55,6 +56,7 @@ static unsigned int debugflags; | |||
55 | 56 | ||
56 | static unsigned int nbds_max = 16; | 57 | static unsigned int nbds_max = 16; |
57 | static struct nbd_device *nbd_dev; | 58 | static struct nbd_device *nbd_dev; |
59 | static int max_part; | ||
58 | 60 | ||
59 | /* | 61 | /* |
60 | * Use just one lock (or at most 1 per NIC). Two arguments for this: | 62 | * Use just one lock (or at most 1 per NIC). Two arguments for this: |
@@ -337,7 +339,7 @@ static struct request *nbd_read_stat(struct nbd_device *lo) | |||
337 | } | 339 | } |
338 | 340 | ||
339 | req = nbd_find_request(lo, *(struct request **)reply.handle); | 341 | req = nbd_find_request(lo, *(struct request **)reply.handle); |
340 | if (unlikely(IS_ERR(req))) { | 342 | if (IS_ERR(req)) { |
341 | result = PTR_ERR(req); | 343 | result = PTR_ERR(req); |
342 | if (result != -ENOENT) | 344 | if (result != -ENOENT) |
343 | goto harderror; | 345 | goto harderror; |
@@ -441,6 +443,85 @@ static void nbd_clear_que(struct nbd_device *lo) | |||
441 | } | 443 | } |
442 | 444 | ||
443 | 445 | ||
446 | static void nbd_handle_req(struct nbd_device *lo, struct request *req) | ||
447 | { | ||
448 | if (!blk_fs_request(req)) | ||
449 | goto error_out; | ||
450 | |||
451 | nbd_cmd(req) = NBD_CMD_READ; | ||
452 | if (rq_data_dir(req) == WRITE) { | ||
453 | nbd_cmd(req) = NBD_CMD_WRITE; | ||
454 | if (lo->flags & NBD_READ_ONLY) { | ||
455 | printk(KERN_ERR "%s: Write on read-only\n", | ||
456 | lo->disk->disk_name); | ||
457 | goto error_out; | ||
458 | } | ||
459 | } | ||
460 | |||
461 | req->errors = 0; | ||
462 | |||
463 | mutex_lock(&lo->tx_lock); | ||
464 | if (unlikely(!lo->sock)) { | ||
465 | mutex_unlock(&lo->tx_lock); | ||
466 | printk(KERN_ERR "%s: Attempted send on closed socket\n", | ||
467 | lo->disk->disk_name); | ||
468 | req->errors++; | ||
469 | nbd_end_request(req); | ||
470 | return; | ||
471 | } | ||
472 | |||
473 | lo->active_req = req; | ||
474 | |||
475 | if (nbd_send_req(lo, req) != 0) { | ||
476 | printk(KERN_ERR "%s: Request send failed\n", | ||
477 | lo->disk->disk_name); | ||
478 | req->errors++; | ||
479 | nbd_end_request(req); | ||
480 | } else { | ||
481 | spin_lock(&lo->queue_lock); | ||
482 | list_add(&req->queuelist, &lo->queue_head); | ||
483 | spin_unlock(&lo->queue_lock); | ||
484 | } | ||
485 | |||
486 | lo->active_req = NULL; | ||
487 | mutex_unlock(&lo->tx_lock); | ||
488 | wake_up_all(&lo->active_wq); | ||
489 | |||
490 | return; | ||
491 | |||
492 | error_out: | ||
493 | req->errors++; | ||
494 | nbd_end_request(req); | ||
495 | } | ||
496 | |||
497 | static int nbd_thread(void *data) | ||
498 | { | ||
499 | struct nbd_device *lo = data; | ||
500 | struct request *req; | ||
501 | |||
502 | set_user_nice(current, -20); | ||
503 | while (!kthread_should_stop() || !list_empty(&lo->waiting_queue)) { | ||
504 | /* wait for something to do */ | ||
505 | wait_event_interruptible(lo->waiting_wq, | ||
506 | kthread_should_stop() || | ||
507 | !list_empty(&lo->waiting_queue)); | ||
508 | |||
509 | /* extract request */ | ||
510 | if (list_empty(&lo->waiting_queue)) | ||
511 | continue; | ||
512 | |||
513 | spin_lock_irq(&lo->queue_lock); | ||
514 | req = list_entry(lo->waiting_queue.next, struct request, | ||
515 | queuelist); | ||
516 | list_del_init(&req->queuelist); | ||
517 | spin_unlock_irq(&lo->queue_lock); | ||
518 | |||
519 | /* handle request */ | ||
520 | nbd_handle_req(lo, req); | ||
521 | } | ||
522 | return 0; | ||
523 | } | ||
524 | |||
444 | /* | 525 | /* |
445 | * We always wait for result of write, for now. It would be nice to make it optional | 526 | * We always wait for result of write, for now. It would be nice to make it optional |
446 | * in future | 527 | * in future |
@@ -456,65 +537,23 @@ static void do_nbd_request(struct request_queue * q) | |||
456 | struct nbd_device *lo; | 537 | struct nbd_device *lo; |
457 | 538 | ||
458 | blkdev_dequeue_request(req); | 539 | blkdev_dequeue_request(req); |
540 | |||
541 | spin_unlock_irq(q->queue_lock); | ||
542 | |||
459 | dprintk(DBG_BLKDEV, "%s: request %p: dequeued (flags=%x)\n", | 543 | dprintk(DBG_BLKDEV, "%s: request %p: dequeued (flags=%x)\n", |
460 | req->rq_disk->disk_name, req, req->cmd_type); | 544 | req->rq_disk->disk_name, req, req->cmd_type); |
461 | 545 | ||
462 | if (!blk_fs_request(req)) | ||
463 | goto error_out; | ||
464 | |||
465 | lo = req->rq_disk->private_data; | 546 | lo = req->rq_disk->private_data; |
466 | 547 | ||
467 | BUG_ON(lo->magic != LO_MAGIC); | 548 | BUG_ON(lo->magic != LO_MAGIC); |
468 | 549 | ||
469 | nbd_cmd(req) = NBD_CMD_READ; | 550 | spin_lock_irq(&lo->queue_lock); |
470 | if (rq_data_dir(req) == WRITE) { | 551 | list_add_tail(&req->queuelist, &lo->waiting_queue); |
471 | nbd_cmd(req) = NBD_CMD_WRITE; | 552 | spin_unlock_irq(&lo->queue_lock); |
472 | if (lo->flags & NBD_READ_ONLY) { | ||
473 | printk(KERN_ERR "%s: Write on read-only\n", | ||
474 | lo->disk->disk_name); | ||
475 | goto error_out; | ||
476 | } | ||
477 | } | ||
478 | |||
479 | req->errors = 0; | ||
480 | spin_unlock_irq(q->queue_lock); | ||
481 | |||
482 | mutex_lock(&lo->tx_lock); | ||
483 | if (unlikely(!lo->sock)) { | ||
484 | mutex_unlock(&lo->tx_lock); | ||
485 | printk(KERN_ERR "%s: Attempted send on closed socket\n", | ||
486 | lo->disk->disk_name); | ||
487 | req->errors++; | ||
488 | nbd_end_request(req); | ||
489 | spin_lock_irq(q->queue_lock); | ||
490 | continue; | ||
491 | } | ||
492 | |||
493 | lo->active_req = req; | ||
494 | 553 | ||
495 | if (nbd_send_req(lo, req) != 0) { | 554 | wake_up(&lo->waiting_wq); |
496 | printk(KERN_ERR "%s: Request send failed\n", | ||
497 | lo->disk->disk_name); | ||
498 | req->errors++; | ||
499 | nbd_end_request(req); | ||
500 | } else { | ||
501 | spin_lock(&lo->queue_lock); | ||
502 | list_add(&req->queuelist, &lo->queue_head); | ||
503 | spin_unlock(&lo->queue_lock); | ||
504 | } | ||
505 | |||
506 | lo->active_req = NULL; | ||
507 | mutex_unlock(&lo->tx_lock); | ||
508 | wake_up_all(&lo->active_wq); | ||
509 | 555 | ||
510 | spin_lock_irq(q->queue_lock); | 556 | spin_lock_irq(q->queue_lock); |
511 | continue; | ||
512 | |||
513 | error_out: | ||
514 | req->errors++; | ||
515 | spin_unlock(q->queue_lock); | ||
516 | nbd_end_request(req); | ||
517 | spin_lock(q->queue_lock); | ||
518 | } | 557 | } |
519 | } | 558 | } |
520 | 559 | ||
@@ -524,6 +563,7 @@ static int nbd_ioctl(struct inode *inode, struct file *file, | |||
524 | struct nbd_device *lo = inode->i_bdev->bd_disk->private_data; | 563 | struct nbd_device *lo = inode->i_bdev->bd_disk->private_data; |
525 | int error; | 564 | int error; |
526 | struct request sreq ; | 565 | struct request sreq ; |
566 | struct task_struct *thread; | ||
527 | 567 | ||
528 | if (!capable(CAP_SYS_ADMIN)) | 568 | if (!capable(CAP_SYS_ADMIN)) |
529 | return -EPERM; | 569 | return -EPERM; |
@@ -572,10 +612,13 @@ static int nbd_ioctl(struct inode *inode, struct file *file, | |||
572 | error = -EINVAL; | 612 | error = -EINVAL; |
573 | file = fget(arg); | 613 | file = fget(arg); |
574 | if (file) { | 614 | if (file) { |
615 | struct block_device *bdev = inode->i_bdev; | ||
575 | inode = file->f_path.dentry->d_inode; | 616 | inode = file->f_path.dentry->d_inode; |
576 | if (S_ISSOCK(inode->i_mode)) { | 617 | if (S_ISSOCK(inode->i_mode)) { |
577 | lo->file = file; | 618 | lo->file = file; |
578 | lo->sock = SOCKET_I(inode); | 619 | lo->sock = SOCKET_I(inode); |
620 | if (max_part > 0) | ||
621 | bdev->bd_invalidated = 1; | ||
579 | error = 0; | 622 | error = 0; |
580 | } else { | 623 | } else { |
581 | fput(file); | 624 | fput(file); |
@@ -607,7 +650,12 @@ static int nbd_ioctl(struct inode *inode, struct file *file, | |||
607 | case NBD_DO_IT: | 650 | case NBD_DO_IT: |
608 | if (!lo->file) | 651 | if (!lo->file) |
609 | return -EINVAL; | 652 | return -EINVAL; |
653 | thread = kthread_create(nbd_thread, lo, lo->disk->disk_name); | ||
654 | if (IS_ERR(thread)) | ||
655 | return PTR_ERR(thread); | ||
656 | wake_up_process(thread); | ||
610 | error = nbd_do_it(lo); | 657 | error = nbd_do_it(lo); |
658 | kthread_stop(thread); | ||
611 | if (error) | 659 | if (error) |
612 | return error; | 660 | return error; |
613 | sock_shutdown(lo, 1); | 661 | sock_shutdown(lo, 1); |
@@ -620,6 +668,8 @@ static int nbd_ioctl(struct inode *inode, struct file *file, | |||
620 | lo->bytesize = 0; | 668 | lo->bytesize = 0; |
621 | inode->i_bdev->bd_inode->i_size = 0; | 669 | inode->i_bdev->bd_inode->i_size = 0; |
622 | set_capacity(lo->disk, 0); | 670 | set_capacity(lo->disk, 0); |
671 | if (max_part > 0) | ||
672 | ioctl_by_bdev(inode->i_bdev, BLKRRPART, 0); | ||
623 | return lo->harderror; | 673 | return lo->harderror; |
624 | case NBD_CLEAR_QUE: | 674 | case NBD_CLEAR_QUE: |
625 | /* | 675 | /* |
@@ -653,6 +703,7 @@ static int __init nbd_init(void) | |||
653 | { | 703 | { |
654 | int err = -ENOMEM; | 704 | int err = -ENOMEM; |
655 | int i; | 705 | int i; |
706 | int part_shift; | ||
656 | 707 | ||
657 | BUILD_BUG_ON(sizeof(struct nbd_request) != 28); | 708 | BUILD_BUG_ON(sizeof(struct nbd_request) != 28); |
658 | 709 | ||
@@ -660,8 +711,17 @@ static int __init nbd_init(void) | |||
660 | if (!nbd_dev) | 711 | if (!nbd_dev) |
661 | return -ENOMEM; | 712 | return -ENOMEM; |
662 | 713 | ||
714 | if (max_part < 0) { | ||
715 | printk(KERN_CRIT "nbd: max_part must be >= 0\n"); | ||
716 | return -EINVAL; | ||
717 | } | ||
718 | |||
719 | part_shift = 0; | ||
720 | if (max_part > 0) | ||
721 | part_shift = fls(max_part); | ||
722 | |||
663 | for (i = 0; i < nbds_max; i++) { | 723 | for (i = 0; i < nbds_max; i++) { |
664 | struct gendisk *disk = alloc_disk(1); | 724 | struct gendisk *disk = alloc_disk(1 << part_shift); |
665 | elevator_t *old_e; | 725 | elevator_t *old_e; |
666 | if (!disk) | 726 | if (!disk) |
667 | goto out; | 727 | goto out; |
@@ -696,17 +756,18 @@ static int __init nbd_init(void) | |||
696 | nbd_dev[i].file = NULL; | 756 | nbd_dev[i].file = NULL; |
697 | nbd_dev[i].magic = LO_MAGIC; | 757 | nbd_dev[i].magic = LO_MAGIC; |
698 | nbd_dev[i].flags = 0; | 758 | nbd_dev[i].flags = 0; |
759 | INIT_LIST_HEAD(&nbd_dev[i].waiting_queue); | ||
699 | spin_lock_init(&nbd_dev[i].queue_lock); | 760 | spin_lock_init(&nbd_dev[i].queue_lock); |
700 | INIT_LIST_HEAD(&nbd_dev[i].queue_head); | 761 | INIT_LIST_HEAD(&nbd_dev[i].queue_head); |
701 | mutex_init(&nbd_dev[i].tx_lock); | 762 | mutex_init(&nbd_dev[i].tx_lock); |
702 | init_waitqueue_head(&nbd_dev[i].active_wq); | 763 | init_waitqueue_head(&nbd_dev[i].active_wq); |
764 | init_waitqueue_head(&nbd_dev[i].waiting_wq); | ||
703 | nbd_dev[i].blksize = 1024; | 765 | nbd_dev[i].blksize = 1024; |
704 | nbd_dev[i].bytesize = 0; | 766 | nbd_dev[i].bytesize = 0; |
705 | disk->major = NBD_MAJOR; | 767 | disk->major = NBD_MAJOR; |
706 | disk->first_minor = i; | 768 | disk->first_minor = i << part_shift; |
707 | disk->fops = &nbd_fops; | 769 | disk->fops = &nbd_fops; |
708 | disk->private_data = &nbd_dev[i]; | 770 | disk->private_data = &nbd_dev[i]; |
709 | disk->flags |= GENHD_FL_SUPPRESS_PARTITION_INFO; | ||
710 | sprintf(disk->disk_name, "nbd%d", i); | 771 | sprintf(disk->disk_name, "nbd%d", i); |
711 | set_capacity(disk, 0); | 772 | set_capacity(disk, 0); |
712 | add_disk(disk); | 773 | add_disk(disk); |
@@ -744,7 +805,9 @@ MODULE_DESCRIPTION("Network Block Device"); | |||
744 | MODULE_LICENSE("GPL"); | 805 | MODULE_LICENSE("GPL"); |
745 | 806 | ||
746 | module_param(nbds_max, int, 0444); | 807 | module_param(nbds_max, int, 0444); |
747 | MODULE_PARM_DESC(nbds_max, "How many network block devices to initialize."); | 808 | MODULE_PARM_DESC(nbds_max, "number of network block devices to initialize (default: 16)"); |
809 | module_param(max_part, int, 0444); | ||
810 | MODULE_PARM_DESC(max_part, "number of partitions per device (default: 0)"); | ||
748 | #ifndef NDEBUG | 811 | #ifndef NDEBUG |
749 | module_param(debugflags, int, 0644); | 812 | module_param(debugflags, int, 0644); |
750 | MODULE_PARM_DESC(debugflags, "flags for controlling debug output"); | 813 | MODULE_PARM_DESC(debugflags, "flags for controlling debug output"); |
diff --git a/drivers/block/pktcdvd.c b/drivers/block/pktcdvd.c index 3b806c9fb005..3ba1df93e9e3 100644 --- a/drivers/block/pktcdvd.c +++ b/drivers/block/pktcdvd.c | |||
@@ -2742,7 +2742,6 @@ static int pkt_new_dev(struct pktcdvd_device *pd, dev_t dev) | |||
2742 | int i; | 2742 | int i; |
2743 | int ret = 0; | 2743 | int ret = 0; |
2744 | char b[BDEVNAME_SIZE]; | 2744 | char b[BDEVNAME_SIZE]; |
2745 | struct proc_dir_entry *proc; | ||
2746 | struct block_device *bdev; | 2745 | struct block_device *bdev; |
2747 | 2746 | ||
2748 | if (pd->pkt_dev == dev) { | 2747 | if (pd->pkt_dev == dev) { |
@@ -2786,11 +2785,7 @@ static int pkt_new_dev(struct pktcdvd_device *pd, dev_t dev) | |||
2786 | goto out_mem; | 2785 | goto out_mem; |
2787 | } | 2786 | } |
2788 | 2787 | ||
2789 | proc = create_proc_entry(pd->name, 0, pkt_proc); | 2788 | proc_create_data(pd->name, 0, pkt_proc, &pkt_proc_fops, pd); |
2790 | if (proc) { | ||
2791 | proc->data = pd; | ||
2792 | proc->proc_fops = &pkt_proc_fops; | ||
2793 | } | ||
2794 | DPRINTK(DRIVER_NAME": writer %s mapped to %s\n", pd->name, bdevname(bdev, b)); | 2789 | DPRINTK(DRIVER_NAME": writer %s mapped to %s\n", pd->name, bdevname(bdev, b)); |
2795 | return 0; | 2790 | return 0; |
2796 | 2791 | ||
@@ -3099,7 +3094,7 @@ static int __init pkt_init(void) | |||
3099 | goto out_misc; | 3094 | goto out_misc; |
3100 | } | 3095 | } |
3101 | 3096 | ||
3102 | pkt_proc = proc_mkdir(DRIVER_NAME, proc_root_driver); | 3097 | pkt_proc = proc_mkdir("driver/"DRIVER_NAME, NULL); |
3103 | 3098 | ||
3104 | return 0; | 3099 | return 0; |
3105 | 3100 | ||
@@ -3115,7 +3110,7 @@ out2: | |||
3115 | 3110 | ||
3116 | static void __exit pkt_exit(void) | 3111 | static void __exit pkt_exit(void) |
3117 | { | 3112 | { |
3118 | remove_proc_entry(DRIVER_NAME, proc_root_driver); | 3113 | remove_proc_entry("driver/"DRIVER_NAME, NULL); |
3119 | misc_deregister(&pkt_misc); | 3114 | misc_deregister(&pkt_misc); |
3120 | 3115 | ||
3121 | pkt_debugfs_cleanup(); | 3116 | pkt_debugfs_cleanup(); |
diff --git a/drivers/block/xen-blkfront.c b/drivers/block/xen-blkfront.c index d771da816d95..f2fff5799ddf 100644 --- a/drivers/block/xen-blkfront.c +++ b/drivers/block/xen-blkfront.c | |||
@@ -137,7 +137,7 @@ static void blkif_restart_queue_callback(void *arg) | |||
137 | schedule_work(&info->work); | 137 | schedule_work(&info->work); |
138 | } | 138 | } |
139 | 139 | ||
140 | int blkif_getgeo(struct block_device *bd, struct hd_geometry *hg) | 140 | static int blkif_getgeo(struct block_device *bd, struct hd_geometry *hg) |
141 | { | 141 | { |
142 | /* We don't have real geometry info, but let's at least return | 142 | /* We don't have real geometry info, but let's at least return |
143 | values consistent with the size of the device */ | 143 | values consistent with the size of the device */ |
diff --git a/drivers/cdrom/viocd.c b/drivers/cdrom/viocd.c index b74b6c2768a8..5245a4a0ba74 100644 --- a/drivers/cdrom/viocd.c +++ b/drivers/cdrom/viocd.c | |||
@@ -144,6 +144,7 @@ static int proc_viocd_open(struct inode *inode, struct file *file) | |||
144 | } | 144 | } |
145 | 145 | ||
146 | static const struct file_operations proc_viocd_operations = { | 146 | static const struct file_operations proc_viocd_operations = { |
147 | .owner = THIS_MODULE, | ||
147 | .open = proc_viocd_open, | 148 | .open = proc_viocd_open, |
148 | .read = seq_read, | 149 | .read = seq_read, |
149 | .llseek = seq_lseek, | 150 | .llseek = seq_lseek, |
@@ -679,7 +680,6 @@ static struct vio_driver viocd_driver = { | |||
679 | 680 | ||
680 | static int __init viocd_init(void) | 681 | static int __init viocd_init(void) |
681 | { | 682 | { |
682 | struct proc_dir_entry *e; | ||
683 | int ret = 0; | 683 | int ret = 0; |
684 | 684 | ||
685 | if (!firmware_has_feature(FW_FEATURE_ISERIES)) | 685 | if (!firmware_has_feature(FW_FEATURE_ISERIES)) |
@@ -719,12 +719,8 @@ static int __init viocd_init(void) | |||
719 | if (ret) | 719 | if (ret) |
720 | goto out_free_info; | 720 | goto out_free_info; |
721 | 721 | ||
722 | e = create_proc_entry("iSeries/viocd", S_IFREG|S_IRUGO, NULL); | 722 | proc_create("iSeries/viocd", S_IFREG|S_IRUGO, NULL, |
723 | if (e) { | 723 | &proc_viocd_operations); |
724 | e->owner = THIS_MODULE; | ||
725 | e->proc_fops = &proc_viocd_operations; | ||
726 | } | ||
727 | |||
728 | return 0; | 724 | return 0; |
729 | 725 | ||
730 | out_free_info: | 726 | out_free_info: |
diff --git a/drivers/char/Kconfig b/drivers/char/Kconfig index 929d4fa73fd9..5dce3877eee5 100644 --- a/drivers/char/Kconfig +++ b/drivers/char/Kconfig | |||
@@ -80,6 +80,15 @@ config VT_HW_CONSOLE_BINDING | |||
80 | information. For framebuffer console users, please refer to | 80 | information. For framebuffer console users, please refer to |
81 | <file:Documentation/fb/fbcon.txt>. | 81 | <file:Documentation/fb/fbcon.txt>. |
82 | 82 | ||
83 | config DEVKMEM | ||
84 | bool "/dev/kmem virtual device support" | ||
85 | default y | ||
86 | help | ||
87 | Say Y here if you want to support the /dev/kmem device. The | ||
88 | /dev/kmem device is rarely used, but can be used for certain | ||
89 | kind of kernel debugging operations. | ||
90 | When in doubt, say "N". | ||
91 | |||
83 | config SERIAL_NONSTANDARD | 92 | config SERIAL_NONSTANDARD |
84 | bool "Non-standard serial port support" | 93 | bool "Non-standard serial port support" |
85 | depends on HAS_IOMEM | 94 | depends on HAS_IOMEM |
diff --git a/drivers/char/apm-emulation.c b/drivers/char/apm-emulation.c index 17d54315e146..cdd876dbb2b0 100644 --- a/drivers/char/apm-emulation.c +++ b/drivers/char/apm-emulation.c | |||
@@ -14,6 +14,7 @@ | |||
14 | #include <linux/poll.h> | 14 | #include <linux/poll.h> |
15 | #include <linux/slab.h> | 15 | #include <linux/slab.h> |
16 | #include <linux/proc_fs.h> | 16 | #include <linux/proc_fs.h> |
17 | #include <linux/seq_file.h> | ||
17 | #include <linux/miscdevice.h> | 18 | #include <linux/miscdevice.h> |
18 | #include <linux/apm_bios.h> | 19 | #include <linux/apm_bios.h> |
19 | #include <linux/capability.h> | 20 | #include <linux/capability.h> |
@@ -493,11 +494,10 @@ static struct miscdevice apm_device = { | |||
493 | * -1: Unknown | 494 | * -1: Unknown |
494 | * 8) min = minutes; sec = seconds | 495 | * 8) min = minutes; sec = seconds |
495 | */ | 496 | */ |
496 | static int apm_get_info(char *buf, char **start, off_t fpos, int length) | 497 | static int proc_apm_show(struct seq_file *m, void *v) |
497 | { | 498 | { |
498 | struct apm_power_info info; | 499 | struct apm_power_info info; |
499 | char *units; | 500 | char *units; |
500 | int ret; | ||
501 | 501 | ||
502 | info.ac_line_status = 0xff; | 502 | info.ac_line_status = 0xff; |
503 | info.battery_status = 0xff; | 503 | info.battery_status = 0xff; |
@@ -515,14 +515,27 @@ static int apm_get_info(char *buf, char **start, off_t fpos, int length) | |||
515 | case 1: units = "sec"; break; | 515 | case 1: units = "sec"; break; |
516 | } | 516 | } |
517 | 517 | ||
518 | ret = sprintf(buf, "%s 1.2 0x%02x 0x%02x 0x%02x 0x%02x %d%% %d %s\n", | 518 | seq_printf(m, "%s 1.2 0x%02x 0x%02x 0x%02x 0x%02x %d%% %d %s\n", |
519 | driver_version, APM_32_BIT_SUPPORT, | 519 | driver_version, APM_32_BIT_SUPPORT, |
520 | info.ac_line_status, info.battery_status, | 520 | info.ac_line_status, info.battery_status, |
521 | info.battery_flag, info.battery_life, | 521 | info.battery_flag, info.battery_life, |
522 | info.time, units); | 522 | info.time, units); |
523 | 523 | ||
524 | return ret; | 524 | return 0; |
525 | } | 525 | } |
526 | |||
527 | static int proc_apm_open(struct inode *inode, struct file *file) | ||
528 | { | ||
529 | return single_open(file, proc_apm_show, NULL); | ||
530 | } | ||
531 | |||
532 | static const struct file_operations apm_proc_fops = { | ||
533 | .owner = THIS_MODULE, | ||
534 | .open = proc_apm_open, | ||
535 | .read = seq_read, | ||
536 | .llseek = seq_lseek, | ||
537 | .release = single_release, | ||
538 | }; | ||
526 | #endif | 539 | #endif |
527 | 540 | ||
528 | static int kapmd(void *arg) | 541 | static int kapmd(void *arg) |
@@ -593,7 +606,7 @@ static int __init apm_init(void) | |||
593 | wake_up_process(kapmd_tsk); | 606 | wake_up_process(kapmd_tsk); |
594 | 607 | ||
595 | #ifdef CONFIG_PROC_FS | 608 | #ifdef CONFIG_PROC_FS |
596 | create_proc_info_entry("apm", 0, NULL, apm_get_info); | 609 | proc_create("apm", 0, NULL, &apm_proc_fops); |
597 | #endif | 610 | #endif |
598 | 611 | ||
599 | ret = misc_register(&apm_device); | 612 | ret = misc_register(&apm_device); |
diff --git a/drivers/char/i8k.c b/drivers/char/i8k.c index 8609b8236c67..f49037b744f9 100644 --- a/drivers/char/i8k.c +++ b/drivers/char/i8k.c | |||
@@ -82,6 +82,7 @@ static int i8k_ioctl(struct inode *, struct file *, unsigned int, | |||
82 | unsigned long); | 82 | unsigned long); |
83 | 83 | ||
84 | static const struct file_operations i8k_fops = { | 84 | static const struct file_operations i8k_fops = { |
85 | .owner = THIS_MODULE, | ||
85 | .open = i8k_open_fs, | 86 | .open = i8k_open_fs, |
86 | .read = seq_read, | 87 | .read = seq_read, |
87 | .llseek = seq_lseek, | 88 | .llseek = seq_lseek, |
@@ -554,13 +555,10 @@ static int __init i8k_init(void) | |||
554 | return -ENODEV; | 555 | return -ENODEV; |
555 | 556 | ||
556 | /* Register the proc entry */ | 557 | /* Register the proc entry */ |
557 | proc_i8k = create_proc_entry("i8k", 0, NULL); | 558 | proc_i8k = proc_create("i8k", 0, NULL, &i8k_fops); |
558 | if (!proc_i8k) | 559 | if (!proc_i8k) |
559 | return -ENOENT; | 560 | return -ENOENT; |
560 | 561 | ||
561 | proc_i8k->proc_fops = &i8k_fops; | ||
562 | proc_i8k->owner = THIS_MODULE; | ||
563 | |||
564 | printk(KERN_INFO | 562 | printk(KERN_INFO |
565 | "Dell laptop SMM driver v%s Massimo Dal Zotto (dz@debian.org)\n", | 563 | "Dell laptop SMM driver v%s Massimo Dal Zotto (dz@debian.org)\n", |
566 | I8K_VERSION); | 564 | I8K_VERSION); |
diff --git a/drivers/char/ip2/ip2main.c b/drivers/char/ip2/ip2main.c index b1d6cad84282..0a61856c631f 100644 --- a/drivers/char/ip2/ip2main.c +++ b/drivers/char/ip2/ip2main.c | |||
@@ -133,8 +133,9 @@ | |||
133 | *****************/ | 133 | *****************/ |
134 | 134 | ||
135 | #include <linux/proc_fs.h> | 135 | #include <linux/proc_fs.h> |
136 | #include <linux/seq_file.h> | ||
136 | 137 | ||
137 | static int ip2_read_procmem(char *, char **, off_t, int); | 138 | static const struct file_operations ip2mem_proc_fops; |
138 | static int ip2_read_proc(char *, char **, off_t, int, int *, void * ); | 139 | static int ip2_read_proc(char *, char **, off_t, int, int *, void * ); |
139 | 140 | ||
140 | /********************/ | 141 | /********************/ |
@@ -423,7 +424,7 @@ cleanup_module(void) | |||
423 | } | 424 | } |
424 | put_tty_driver(ip2_tty_driver); | 425 | put_tty_driver(ip2_tty_driver); |
425 | unregister_chrdev(IP2_IPL_MAJOR, pcIpl); | 426 | unregister_chrdev(IP2_IPL_MAJOR, pcIpl); |
426 | remove_proc_entry("ip2mem", &proc_root); | 427 | remove_proc_entry("ip2mem", NULL); |
427 | 428 | ||
428 | // free memory | 429 | // free memory |
429 | for (i = 0; i < IP2_MAX_BOARDS; i++) { | 430 | for (i = 0; i < IP2_MAX_BOARDS; i++) { |
@@ -695,7 +696,7 @@ ip2_loadmain(int *iop, int *irqp, unsigned char *firmware, int firmsize) | |||
695 | } | 696 | } |
696 | } | 697 | } |
697 | /* Register the read_procmem thing */ | 698 | /* Register the read_procmem thing */ |
698 | if (!create_proc_info_entry("ip2mem",0,&proc_root,ip2_read_procmem)) { | 699 | if (!proc_create("ip2mem",0,NULL,&ip2mem_proc_fops)) { |
699 | printk(KERN_ERR "IP2: failed to register read_procmem\n"); | 700 | printk(KERN_ERR "IP2: failed to register read_procmem\n"); |
700 | } else { | 701 | } else { |
701 | 702 | ||
@@ -2967,65 +2968,61 @@ ip2_ipl_open( struct inode *pInode, struct file *pFile ) | |||
2967 | } | 2968 | } |
2968 | return 0; | 2969 | return 0; |
2969 | } | 2970 | } |
2970 | /******************************************************************************/ | ||
2971 | /* Function: ip2_read_procmem */ | ||
2972 | /* Parameters: */ | ||
2973 | /* */ | ||
2974 | /* Returns: Length of output */ | ||
2975 | /* */ | ||
2976 | /* Description: */ | ||
2977 | /* Supplies some driver operating parameters */ | ||
2978 | /* Not real useful unless your debugging the fifo */ | ||
2979 | /* */ | ||
2980 | /******************************************************************************/ | ||
2981 | |||
2982 | #define LIMIT (PAGE_SIZE - 120) | ||
2983 | 2971 | ||
2984 | static int | 2972 | static int |
2985 | ip2_read_procmem(char *buf, char **start, off_t offset, int len) | 2973 | proc_ip2mem_show(struct seq_file *m, void *v) |
2986 | { | 2974 | { |
2987 | i2eBordStrPtr pB; | 2975 | i2eBordStrPtr pB; |
2988 | i2ChanStrPtr pCh; | 2976 | i2ChanStrPtr pCh; |
2989 | PTTY tty; | 2977 | PTTY tty; |
2990 | int i; | 2978 | int i; |
2991 | 2979 | ||
2992 | len = 0; | ||
2993 | |||
2994 | #define FMTLINE "%3d: 0x%08x 0x%08x 0%011o 0%011o\n" | 2980 | #define FMTLINE "%3d: 0x%08x 0x%08x 0%011o 0%011o\n" |
2995 | #define FMTLIN2 " 0x%04x 0x%04x tx flow 0x%x\n" | 2981 | #define FMTLIN2 " 0x%04x 0x%04x tx flow 0x%x\n" |
2996 | #define FMTLIN3 " 0x%04x 0x%04x rc flow\n" | 2982 | #define FMTLIN3 " 0x%04x 0x%04x rc flow\n" |
2997 | 2983 | ||
2998 | len += sprintf(buf+len,"\n"); | 2984 | seq_printf(m,"\n"); |
2999 | 2985 | ||
3000 | for( i = 0; i < IP2_MAX_BOARDS; ++i ) { | 2986 | for( i = 0; i < IP2_MAX_BOARDS; ++i ) { |
3001 | pB = i2BoardPtrTable[i]; | 2987 | pB = i2BoardPtrTable[i]; |
3002 | if ( pB ) { | 2988 | if ( pB ) { |
3003 | len += sprintf(buf+len,"board %d:\n",i); | 2989 | seq_printf(m,"board %d:\n",i); |
3004 | len += sprintf(buf+len,"\tFifo rem: %d mty: %x outM %x\n", | 2990 | seq_printf(m,"\tFifo rem: %d mty: %x outM %x\n", |
3005 | pB->i2eFifoRemains,pB->i2eWaitingForEmptyFifo,pB->i2eOutMailWaiting); | 2991 | pB->i2eFifoRemains,pB->i2eWaitingForEmptyFifo,pB->i2eOutMailWaiting); |
3006 | } | 2992 | } |
3007 | } | 2993 | } |
3008 | 2994 | ||
3009 | len += sprintf(buf+len,"#: tty flags, port flags, cflags, iflags\n"); | 2995 | seq_printf(m,"#: tty flags, port flags, cflags, iflags\n"); |
3010 | for (i=0; i < IP2_MAX_PORTS; i++) { | 2996 | for (i=0; i < IP2_MAX_PORTS; i++) { |
3011 | if (len > LIMIT) | ||
3012 | break; | ||
3013 | pCh = DevTable[i]; | 2997 | pCh = DevTable[i]; |
3014 | if (pCh) { | 2998 | if (pCh) { |
3015 | tty = pCh->pTTY; | 2999 | tty = pCh->pTTY; |
3016 | if (tty && tty->count) { | 3000 | if (tty && tty->count) { |
3017 | len += sprintf(buf+len,FMTLINE,i,(int)tty->flags,pCh->flags, | 3001 | seq_printf(m,FMTLINE,i,(int)tty->flags,pCh->flags, |
3018 | tty->termios->c_cflag,tty->termios->c_iflag); | 3002 | tty->termios->c_cflag,tty->termios->c_iflag); |
3019 | 3003 | ||
3020 | len += sprintf(buf+len,FMTLIN2, | 3004 | seq_printf(m,FMTLIN2, |
3021 | pCh->outfl.asof,pCh->outfl.room,pCh->channelNeeds); | 3005 | pCh->outfl.asof,pCh->outfl.room,pCh->channelNeeds); |
3022 | len += sprintf(buf+len,FMTLIN3,pCh->infl.asof,pCh->infl.room); | 3006 | seq_printf(m,FMTLIN3,pCh->infl.asof,pCh->infl.room); |
3023 | } | 3007 | } |
3024 | } | 3008 | } |
3025 | } | 3009 | } |
3026 | return len; | 3010 | return 0; |
3011 | } | ||
3012 | |||
3013 | static int proc_ip2mem_open(struct inode *inode, struct file *file) | ||
3014 | { | ||
3015 | return single_open(file, proc_ip2mem_show, NULL); | ||
3027 | } | 3016 | } |
3028 | 3017 | ||
3018 | static const struct file_operations ip2mem_proc_fops = { | ||
3019 | .owner = THIS_MODULE, | ||
3020 | .open = proc_ip2mem_open, | ||
3021 | .read = seq_read, | ||
3022 | .llseek = seq_lseek, | ||
3023 | .release = single_release, | ||
3024 | }; | ||
3025 | |||
3029 | /* | 3026 | /* |
3030 | * This is the handler for /proc/tty/driver/ip2 | 3027 | * This is the handler for /proc/tty/driver/ip2 |
3031 | * | 3028 | * |
diff --git a/drivers/char/ipmi/Makefile b/drivers/char/ipmi/Makefile index 553f0a408eda..eb8a1a8c188e 100644 --- a/drivers/char/ipmi/Makefile +++ b/drivers/char/ipmi/Makefile | |||
@@ -9,7 +9,3 @@ obj-$(CONFIG_IPMI_DEVICE_INTERFACE) += ipmi_devintf.o | |||
9 | obj-$(CONFIG_IPMI_SI) += ipmi_si.o | 9 | obj-$(CONFIG_IPMI_SI) += ipmi_si.o |
10 | obj-$(CONFIG_IPMI_WATCHDOG) += ipmi_watchdog.o | 10 | obj-$(CONFIG_IPMI_WATCHDOG) += ipmi_watchdog.o |
11 | obj-$(CONFIG_IPMI_POWEROFF) += ipmi_poweroff.o | 11 | obj-$(CONFIG_IPMI_POWEROFF) += ipmi_poweroff.o |
12 | |||
13 | ipmi_si.o: $(ipmi_si-objs) | ||
14 | $(LD) -r -o $@ $(ipmi_si-objs) | ||
15 | |||
diff --git a/drivers/char/ipmi/ipmi_bt_sm.c b/drivers/char/ipmi/ipmi_bt_sm.c index e736119b6497..7b98c067190a 100644 --- a/drivers/char/ipmi/ipmi_bt_sm.c +++ b/drivers/char/ipmi/ipmi_bt_sm.c | |||
@@ -37,26 +37,32 @@ | |||
37 | #define BT_DEBUG_ENABLE 1 /* Generic messages */ | 37 | #define BT_DEBUG_ENABLE 1 /* Generic messages */ |
38 | #define BT_DEBUG_MSG 2 /* Prints all request/response buffers */ | 38 | #define BT_DEBUG_MSG 2 /* Prints all request/response buffers */ |
39 | #define BT_DEBUG_STATES 4 /* Verbose look at state changes */ | 39 | #define BT_DEBUG_STATES 4 /* Verbose look at state changes */ |
40 | /* BT_DEBUG_OFF must be zero to correspond to the default uninitialized | 40 | /* |
41 | value */ | 41 | * BT_DEBUG_OFF must be zero to correspond to the default uninitialized |
42 | * value | ||
43 | */ | ||
42 | 44 | ||
43 | static int bt_debug; /* 0 == BT_DEBUG_OFF */ | 45 | static int bt_debug; /* 0 == BT_DEBUG_OFF */ |
44 | 46 | ||
45 | module_param(bt_debug, int, 0644); | 47 | module_param(bt_debug, int, 0644); |
46 | MODULE_PARM_DESC(bt_debug, "debug bitmask, 1=enable, 2=messages, 4=states"); | 48 | MODULE_PARM_DESC(bt_debug, "debug bitmask, 1=enable, 2=messages, 4=states"); |
47 | 49 | ||
48 | /* Typical "Get BT Capabilities" values are 2-3 retries, 5-10 seconds, | 50 | /* |
49 | and 64 byte buffers. However, one HP implementation wants 255 bytes of | 51 | * Typical "Get BT Capabilities" values are 2-3 retries, 5-10 seconds, |
50 | buffer (with a documented message of 160 bytes) so go for the max. | 52 | * and 64 byte buffers. However, one HP implementation wants 255 bytes of |
51 | Since the Open IPMI architecture is single-message oriented at this | 53 | * buffer (with a documented message of 160 bytes) so go for the max. |
52 | stage, the queue depth of BT is of no concern. */ | 54 | * Since the Open IPMI architecture is single-message oriented at this |
55 | * stage, the queue depth of BT is of no concern. | ||
56 | */ | ||
53 | 57 | ||
54 | #define BT_NORMAL_TIMEOUT 5 /* seconds */ | 58 | #define BT_NORMAL_TIMEOUT 5 /* seconds */ |
55 | #define BT_NORMAL_RETRY_LIMIT 2 | 59 | #define BT_NORMAL_RETRY_LIMIT 2 |
56 | #define BT_RESET_DELAY 6 /* seconds after warm reset */ | 60 | #define BT_RESET_DELAY 6 /* seconds after warm reset */ |
57 | 61 | ||
58 | /* States are written in chronological order and usually cover | 62 | /* |
59 | multiple rows of the state table discussion in the IPMI spec. */ | 63 | * States are written in chronological order and usually cover |
64 | * multiple rows of the state table discussion in the IPMI spec. | ||
65 | */ | ||
60 | 66 | ||
61 | enum bt_states { | 67 | enum bt_states { |
62 | BT_STATE_IDLE = 0, /* Order is critical in this list */ | 68 | BT_STATE_IDLE = 0, /* Order is critical in this list */ |
@@ -76,10 +82,12 @@ enum bt_states { | |||
76 | BT_STATE_LONG_BUSY /* BT doesn't get hosed :-) */ | 82 | BT_STATE_LONG_BUSY /* BT doesn't get hosed :-) */ |
77 | }; | 83 | }; |
78 | 84 | ||
79 | /* Macros seen at the end of state "case" blocks. They help with legibility | 85 | /* |
80 | and debugging. */ | 86 | * Macros seen at the end of state "case" blocks. They help with legibility |
87 | * and debugging. | ||
88 | */ | ||
81 | 89 | ||
82 | #define BT_STATE_CHANGE(X,Y) { bt->state = X; return Y; } | 90 | #define BT_STATE_CHANGE(X, Y) { bt->state = X; return Y; } |
83 | 91 | ||
84 | #define BT_SI_SM_RETURN(Y) { last_printed = BT_STATE_PRINTME; return Y; } | 92 | #define BT_SI_SM_RETURN(Y) { last_printed = BT_STATE_PRINTME; return Y; } |
85 | 93 | ||
@@ -110,11 +118,13 @@ struct si_sm_data { | |||
110 | #define BT_H_BUSY 0x40 | 118 | #define BT_H_BUSY 0x40 |
111 | #define BT_B_BUSY 0x80 | 119 | #define BT_B_BUSY 0x80 |
112 | 120 | ||
113 | /* Some bits are toggled on each write: write once to set it, once | 121 | /* |
114 | more to clear it; writing a zero does nothing. To absolutely | 122 | * Some bits are toggled on each write: write once to set it, once |
115 | clear it, check its state and write if set. This avoids the "get | 123 | * more to clear it; writing a zero does nothing. To absolutely |
116 | current then use as mask" scheme to modify one bit. Note that the | 124 | * clear it, check its state and write if set. This avoids the "get |
117 | variable "bt" is hardcoded into these macros. */ | 125 | * current then use as mask" scheme to modify one bit. Note that the |
126 | * variable "bt" is hardcoded into these macros. | ||
127 | */ | ||
118 | 128 | ||
119 | #define BT_STATUS bt->io->inputb(bt->io, 0) | 129 | #define BT_STATUS bt->io->inputb(bt->io, 0) |
120 | #define BT_CONTROL(x) bt->io->outputb(bt->io, 0, x) | 130 | #define BT_CONTROL(x) bt->io->outputb(bt->io, 0, x) |
@@ -125,8 +135,10 @@ struct si_sm_data { | |||
125 | #define BT_INTMASK_R bt->io->inputb(bt->io, 2) | 135 | #define BT_INTMASK_R bt->io->inputb(bt->io, 2) |
126 | #define BT_INTMASK_W(x) bt->io->outputb(bt->io, 2, x) | 136 | #define BT_INTMASK_W(x) bt->io->outputb(bt->io, 2, x) |
127 | 137 | ||
128 | /* Convenience routines for debugging. These are not multi-open safe! | 138 | /* |
129 | Note the macros have hardcoded variables in them. */ | 139 | * Convenience routines for debugging. These are not multi-open safe! |
140 | * Note the macros have hardcoded variables in them. | ||
141 | */ | ||
130 | 142 | ||
131 | static char *state2txt(unsigned char state) | 143 | static char *state2txt(unsigned char state) |
132 | { | 144 | { |
@@ -182,7 +194,8 @@ static char *status2txt(unsigned char status) | |||
182 | static unsigned int bt_init_data(struct si_sm_data *bt, struct si_sm_io *io) | 194 | static unsigned int bt_init_data(struct si_sm_data *bt, struct si_sm_io *io) |
183 | { | 195 | { |
184 | memset(bt, 0, sizeof(struct si_sm_data)); | 196 | memset(bt, 0, sizeof(struct si_sm_data)); |
185 | if (bt->io != io) { /* external: one-time only things */ | 197 | if (bt->io != io) { |
198 | /* external: one-time only things */ | ||
186 | bt->io = io; | 199 | bt->io = io; |
187 | bt->seq = 0; | 200 | bt->seq = 0; |
188 | } | 201 | } |
@@ -229,7 +242,7 @@ static int bt_start_transaction(struct si_sm_data *bt, | |||
229 | printk(KERN_WARNING "BT: +++++++++++++++++ New command\n"); | 242 | printk(KERN_WARNING "BT: +++++++++++++++++ New command\n"); |
230 | printk(KERN_WARNING "BT: NetFn/LUN CMD [%d data]:", size - 2); | 243 | printk(KERN_WARNING "BT: NetFn/LUN CMD [%d data]:", size - 2); |
231 | for (i = 0; i < size; i ++) | 244 | for (i = 0; i < size; i ++) |
232 | printk (" %02x", data[i]); | 245 | printk(" %02x", data[i]); |
233 | printk("\n"); | 246 | printk("\n"); |
234 | } | 247 | } |
235 | bt->write_data[0] = size + 1; /* all data plus seq byte */ | 248 | bt->write_data[0] = size + 1; /* all data plus seq byte */ |
@@ -246,8 +259,10 @@ static int bt_start_transaction(struct si_sm_data *bt, | |||
246 | return 0; | 259 | return 0; |
247 | } | 260 | } |
248 | 261 | ||
249 | /* After the upper state machine has been told SI_SM_TRANSACTION_COMPLETE | 262 | /* |
250 | it calls this. Strip out the length and seq bytes. */ | 263 | * After the upper state machine has been told SI_SM_TRANSACTION_COMPLETE |
264 | * it calls this. Strip out the length and seq bytes. | ||
265 | */ | ||
251 | 266 | ||
252 | static int bt_get_result(struct si_sm_data *bt, | 267 | static int bt_get_result(struct si_sm_data *bt, |
253 | unsigned char *data, | 268 | unsigned char *data, |
@@ -269,10 +284,10 @@ static int bt_get_result(struct si_sm_data *bt, | |||
269 | memcpy(data + 2, bt->read_data + 4, msg_len - 2); | 284 | memcpy(data + 2, bt->read_data + 4, msg_len - 2); |
270 | 285 | ||
271 | if (bt_debug & BT_DEBUG_MSG) { | 286 | if (bt_debug & BT_DEBUG_MSG) { |
272 | printk (KERN_WARNING "BT: result %d bytes:", msg_len); | 287 | printk(KERN_WARNING "BT: result %d bytes:", msg_len); |
273 | for (i = 0; i < msg_len; i++) | 288 | for (i = 0; i < msg_len; i++) |
274 | printk(" %02x", data[i]); | 289 | printk(" %02x", data[i]); |
275 | printk ("\n"); | 290 | printk("\n"); |
276 | } | 291 | } |
277 | return msg_len; | 292 | return msg_len; |
278 | } | 293 | } |
@@ -292,8 +307,10 @@ static void reset_flags(struct si_sm_data *bt) | |||
292 | BT_INTMASK_W(BT_BMC_HWRST); | 307 | BT_INTMASK_W(BT_BMC_HWRST); |
293 | } | 308 | } |
294 | 309 | ||
295 | /* Get rid of an unwanted/stale response. This should only be needed for | 310 | /* |
296 | BMCs that support multiple outstanding requests. */ | 311 | * Get rid of an unwanted/stale response. This should only be needed for |
312 | * BMCs that support multiple outstanding requests. | ||
313 | */ | ||
297 | 314 | ||
298 | static void drain_BMC2HOST(struct si_sm_data *bt) | 315 | static void drain_BMC2HOST(struct si_sm_data *bt) |
299 | { | 316 | { |
@@ -326,8 +343,8 @@ static inline void write_all_bytes(struct si_sm_data *bt) | |||
326 | printk(KERN_WARNING "BT: write %d bytes seq=0x%02X", | 343 | printk(KERN_WARNING "BT: write %d bytes seq=0x%02X", |
327 | bt->write_count, bt->seq); | 344 | bt->write_count, bt->seq); |
328 | for (i = 0; i < bt->write_count; i++) | 345 | for (i = 0; i < bt->write_count; i++) |
329 | printk (" %02x", bt->write_data[i]); | 346 | printk(" %02x", bt->write_data[i]); |
330 | printk ("\n"); | 347 | printk("\n"); |
331 | } | 348 | } |
332 | for (i = 0; i < bt->write_count; i++) | 349 | for (i = 0; i < bt->write_count; i++) |
333 | HOST2BMC(bt->write_data[i]); | 350 | HOST2BMC(bt->write_data[i]); |
@@ -337,8 +354,10 @@ static inline int read_all_bytes(struct si_sm_data *bt) | |||
337 | { | 354 | { |
338 | unsigned char i; | 355 | unsigned char i; |
339 | 356 | ||
340 | /* length is "framing info", minimum = 4: NetFn, Seq, Cmd, cCode. | 357 | /* |
341 | Keep layout of first four bytes aligned with write_data[] */ | 358 | * length is "framing info", minimum = 4: NetFn, Seq, Cmd, cCode. |
359 | * Keep layout of first four bytes aligned with write_data[] | ||
360 | */ | ||
342 | 361 | ||
343 | bt->read_data[0] = BMC2HOST; | 362 | bt->read_data[0] = BMC2HOST; |
344 | bt->read_count = bt->read_data[0]; | 363 | bt->read_count = bt->read_data[0]; |
@@ -362,8 +381,8 @@ static inline int read_all_bytes(struct si_sm_data *bt) | |||
362 | if (max > 16) | 381 | if (max > 16) |
363 | max = 16; | 382 | max = 16; |
364 | for (i = 0; i < max; i++) | 383 | for (i = 0; i < max; i++) |
365 | printk (" %02x", bt->read_data[i]); | 384 | printk(KERN_CONT " %02x", bt->read_data[i]); |
366 | printk ("%s\n", bt->read_count == max ? "" : " ..."); | 385 | printk(KERN_CONT "%s\n", bt->read_count == max ? "" : " ..."); |
367 | } | 386 | } |
368 | 387 | ||
369 | /* per the spec, the (NetFn[1], Seq[2], Cmd[3]) tuples must match */ | 388 | /* per the spec, the (NetFn[1], Seq[2], Cmd[3]) tuples must match */ |
@@ -402,8 +421,10 @@ static enum si_sm_result error_recovery(struct si_sm_data *bt, | |||
402 | printk(KERN_WARNING "IPMI BT: %s in %s %s ", /* open-ended line */ | 421 | printk(KERN_WARNING "IPMI BT: %s in %s %s ", /* open-ended line */ |
403 | reason, STATE2TXT, STATUS2TXT); | 422 | reason, STATE2TXT, STATUS2TXT); |
404 | 423 | ||
405 | /* Per the IPMI spec, retries are based on the sequence number | 424 | /* |
406 | known only to this module, so manage a restart here. */ | 425 | * Per the IPMI spec, retries are based on the sequence number |
426 | * known only to this module, so manage a restart here. | ||
427 | */ | ||
407 | (bt->error_retries)++; | 428 | (bt->error_retries)++; |
408 | if (bt->error_retries < bt->BT_CAP_retries) { | 429 | if (bt->error_retries < bt->BT_CAP_retries) { |
409 | printk("%d retries left\n", | 430 | printk("%d retries left\n", |
@@ -412,8 +433,8 @@ static enum si_sm_result error_recovery(struct si_sm_data *bt, | |||
412 | return SI_SM_CALL_WITHOUT_DELAY; | 433 | return SI_SM_CALL_WITHOUT_DELAY; |
413 | } | 434 | } |
414 | 435 | ||
415 | printk("failed %d retries, sending error response\n", | 436 | printk(KERN_WARNING "failed %d retries, sending error response\n", |
416 | bt->BT_CAP_retries); | 437 | bt->BT_CAP_retries); |
417 | if (!bt->nonzero_status) | 438 | if (!bt->nonzero_status) |
418 | printk(KERN_ERR "IPMI BT: stuck, try power cycle\n"); | 439 | printk(KERN_ERR "IPMI BT: stuck, try power cycle\n"); |
419 | 440 | ||
@@ -424,8 +445,10 @@ static enum si_sm_result error_recovery(struct si_sm_data *bt, | |||
424 | return SI_SM_CALL_WITHOUT_DELAY; | 445 | return SI_SM_CALL_WITHOUT_DELAY; |
425 | } | 446 | } |
426 | 447 | ||
427 | /* Concoct a useful error message, set up the next state, and | 448 | /* |
428 | be done with this sequence. */ | 449 | * Concoct a useful error message, set up the next state, and |
450 | * be done with this sequence. | ||
451 | */ | ||
429 | 452 | ||
430 | bt->state = BT_STATE_IDLE; | 453 | bt->state = BT_STATE_IDLE; |
431 | switch (cCode) { | 454 | switch (cCode) { |
@@ -461,10 +484,12 @@ static enum si_sm_result bt_event(struct si_sm_data *bt, long time) | |||
461 | last_printed = bt->state; | 484 | last_printed = bt->state; |
462 | } | 485 | } |
463 | 486 | ||
464 | /* Commands that time out may still (eventually) provide a response. | 487 | /* |
465 | This stale response will get in the way of a new response so remove | 488 | * Commands that time out may still (eventually) provide a response. |
466 | it if possible (hopefully during IDLE). Even if it comes up later | 489 | * This stale response will get in the way of a new response so remove |
467 | it will be rejected by its (now-forgotten) seq number. */ | 490 | * it if possible (hopefully during IDLE). Even if it comes up later |
491 | * it will be rejected by its (now-forgotten) seq number. | ||
492 | */ | ||
468 | 493 | ||
469 | if ((bt->state < BT_STATE_WRITE_BYTES) && (status & BT_B2H_ATN)) { | 494 | if ((bt->state < BT_STATE_WRITE_BYTES) && (status & BT_B2H_ATN)) { |
470 | drain_BMC2HOST(bt); | 495 | drain_BMC2HOST(bt); |
@@ -472,7 +497,8 @@ static enum si_sm_result bt_event(struct si_sm_data *bt, long time) | |||
472 | } | 497 | } |
473 | 498 | ||
474 | if ((bt->state != BT_STATE_IDLE) && | 499 | if ((bt->state != BT_STATE_IDLE) && |
475 | (bt->state < BT_STATE_PRINTME)) { /* check timeout */ | 500 | (bt->state < BT_STATE_PRINTME)) { |
501 | /* check timeout */ | ||
476 | bt->timeout -= time; | 502 | bt->timeout -= time; |
477 | if ((bt->timeout < 0) && (bt->state < BT_STATE_RESET1)) | 503 | if ((bt->timeout < 0) && (bt->state < BT_STATE_RESET1)) |
478 | return error_recovery(bt, | 504 | return error_recovery(bt, |
@@ -482,8 +508,10 @@ static enum si_sm_result bt_event(struct si_sm_data *bt, long time) | |||
482 | 508 | ||
483 | switch (bt->state) { | 509 | switch (bt->state) { |
484 | 510 | ||
485 | /* Idle state first checks for asynchronous messages from another | 511 | /* |
486 | channel, then does some opportunistic housekeeping. */ | 512 | * Idle state first checks for asynchronous messages from another |
513 | * channel, then does some opportunistic housekeeping. | ||
514 | */ | ||
487 | 515 | ||
488 | case BT_STATE_IDLE: | 516 | case BT_STATE_IDLE: |
489 | if (status & BT_SMS_ATN) { | 517 | if (status & BT_SMS_ATN) { |
@@ -531,16 +559,19 @@ static enum si_sm_result bt_event(struct si_sm_data *bt, long time) | |||
531 | BT_SI_SM_RETURN(SI_SM_CALL_WITH_DELAY); | 559 | BT_SI_SM_RETURN(SI_SM_CALL_WITH_DELAY); |
532 | BT_CONTROL(BT_H_BUSY); /* set */ | 560 | BT_CONTROL(BT_H_BUSY); /* set */ |
533 | 561 | ||
534 | /* Uncached, ordered writes should just proceeed serially but | 562 | /* |
535 | some BMCs don't clear B2H_ATN with one hit. Fast-path a | 563 | * Uncached, ordered writes should just proceeed serially but |
536 | workaround without too much penalty to the general case. */ | 564 | * some BMCs don't clear B2H_ATN with one hit. Fast-path a |
565 | * workaround without too much penalty to the general case. | ||
566 | */ | ||
537 | 567 | ||
538 | BT_CONTROL(BT_B2H_ATN); /* clear it to ACK the BMC */ | 568 | BT_CONTROL(BT_B2H_ATN); /* clear it to ACK the BMC */ |
539 | BT_STATE_CHANGE(BT_STATE_CLEAR_B2H, | 569 | BT_STATE_CHANGE(BT_STATE_CLEAR_B2H, |
540 | SI_SM_CALL_WITHOUT_DELAY); | 570 | SI_SM_CALL_WITHOUT_DELAY); |
541 | 571 | ||
542 | case BT_STATE_CLEAR_B2H: | 572 | case BT_STATE_CLEAR_B2H: |
543 | if (status & BT_B2H_ATN) { /* keep hitting it */ | 573 | if (status & BT_B2H_ATN) { |
574 | /* keep hitting it */ | ||
544 | BT_CONTROL(BT_B2H_ATN); | 575 | BT_CONTROL(BT_B2H_ATN); |
545 | BT_SI_SM_RETURN(SI_SM_CALL_WITH_DELAY); | 576 | BT_SI_SM_RETURN(SI_SM_CALL_WITH_DELAY); |
546 | } | 577 | } |
@@ -548,7 +579,8 @@ static enum si_sm_result bt_event(struct si_sm_data *bt, long time) | |||
548 | SI_SM_CALL_WITHOUT_DELAY); | 579 | SI_SM_CALL_WITHOUT_DELAY); |
549 | 580 | ||
550 | case BT_STATE_READ_BYTES: | 581 | case BT_STATE_READ_BYTES: |
551 | if (!(status & BT_H_BUSY)) /* check in case of retry */ | 582 | if (!(status & BT_H_BUSY)) |
583 | /* check in case of retry */ | ||
552 | BT_CONTROL(BT_H_BUSY); | 584 | BT_CONTROL(BT_H_BUSY); |
553 | BT_CONTROL(BT_CLR_RD_PTR); /* start of BMC2HOST buffer */ | 585 | BT_CONTROL(BT_CLR_RD_PTR); /* start of BMC2HOST buffer */ |
554 | i = read_all_bytes(bt); /* true == packet seq match */ | 586 | i = read_all_bytes(bt); /* true == packet seq match */ |
@@ -599,8 +631,10 @@ static enum si_sm_result bt_event(struct si_sm_data *bt, long time) | |||
599 | BT_STATE_CHANGE(BT_STATE_XACTION_START, | 631 | BT_STATE_CHANGE(BT_STATE_XACTION_START, |
600 | SI_SM_CALL_WITH_DELAY); | 632 | SI_SM_CALL_WITH_DELAY); |
601 | 633 | ||
602 | /* Get BT Capabilities, using timing of upper level state machine. | 634 | /* |
603 | Set outreqs to prevent infinite loop on timeout. */ | 635 | * Get BT Capabilities, using timing of upper level state machine. |
636 | * Set outreqs to prevent infinite loop on timeout. | ||
637 | */ | ||
604 | case BT_STATE_CAPABILITIES_BEGIN: | 638 | case BT_STATE_CAPABILITIES_BEGIN: |
605 | bt->BT_CAP_outreqs = 1; | 639 | bt->BT_CAP_outreqs = 1; |
606 | { | 640 | { |
@@ -638,10 +672,12 @@ static enum si_sm_result bt_event(struct si_sm_data *bt, long time) | |||
638 | 672 | ||
639 | static int bt_detect(struct si_sm_data *bt) | 673 | static int bt_detect(struct si_sm_data *bt) |
640 | { | 674 | { |
641 | /* It's impossible for the BT status and interrupt registers to be | 675 | /* |
642 | all 1's, (assuming a properly functioning, self-initialized BMC) | 676 | * It's impossible for the BT status and interrupt registers to be |
643 | but that's what you get from reading a bogus address, so we | 677 | * all 1's, (assuming a properly functioning, self-initialized BMC) |
644 | test that first. The calling routine uses negative logic. */ | 678 | * but that's what you get from reading a bogus address, so we |
679 | * test that first. The calling routine uses negative logic. | ||
680 | */ | ||
645 | 681 | ||
646 | if ((BT_STATUS == 0xFF) && (BT_INTMASK_R == 0xFF)) | 682 | if ((BT_STATUS == 0xFF) && (BT_INTMASK_R == 0xFF)) |
647 | return 1; | 683 | return 1; |
@@ -658,8 +694,7 @@ static int bt_size(void) | |||
658 | return sizeof(struct si_sm_data); | 694 | return sizeof(struct si_sm_data); |
659 | } | 695 | } |
660 | 696 | ||
661 | struct si_sm_handlers bt_smi_handlers = | 697 | struct si_sm_handlers bt_smi_handlers = { |
662 | { | ||
663 | .init_data = bt_init_data, | 698 | .init_data = bt_init_data, |
664 | .start_transaction = bt_start_transaction, | 699 | .start_transaction = bt_start_transaction, |
665 | .get_result = bt_get_result, | 700 | .get_result = bt_get_result, |
diff --git a/drivers/char/ipmi/ipmi_kcs_sm.c b/drivers/char/ipmi/ipmi_kcs_sm.c index c1b8228cb7b6..80704875794c 100644 --- a/drivers/char/ipmi/ipmi_kcs_sm.c +++ b/drivers/char/ipmi/ipmi_kcs_sm.c | |||
@@ -60,37 +60,58 @@ MODULE_PARM_DESC(kcs_debug, "debug bitmask, 1=enable, 2=messages, 4=states"); | |||
60 | 60 | ||
61 | /* The states the KCS driver may be in. */ | 61 | /* The states the KCS driver may be in. */ |
62 | enum kcs_states { | 62 | enum kcs_states { |
63 | KCS_IDLE, /* The KCS interface is currently | 63 | /* The KCS interface is currently doing nothing. */ |
64 | doing nothing. */ | 64 | KCS_IDLE, |
65 | KCS_START_OP, /* We are starting an operation. The | 65 | |
66 | data is in the output buffer, but | 66 | /* |
67 | nothing has been done to the | 67 | * We are starting an operation. The data is in the output |
68 | interface yet. This was added to | 68 | * buffer, but nothing has been done to the interface yet. This |
69 | the state machine in the spec to | 69 | * was added to the state machine in the spec to wait for the |
70 | wait for the initial IBF. */ | 70 | * initial IBF. |
71 | KCS_WAIT_WRITE_START, /* We have written a write cmd to the | 71 | */ |
72 | interface. */ | 72 | KCS_START_OP, |
73 | KCS_WAIT_WRITE, /* We are writing bytes to the | 73 | |
74 | interface. */ | 74 | /* We have written a write cmd to the interface. */ |
75 | KCS_WAIT_WRITE_END, /* We have written the write end cmd | 75 | KCS_WAIT_WRITE_START, |
76 | to the interface, and still need to | 76 | |
77 | write the last byte. */ | 77 | /* We are writing bytes to the interface. */ |
78 | KCS_WAIT_READ, /* We are waiting to read data from | 78 | KCS_WAIT_WRITE, |
79 | the interface. */ | 79 | |
80 | KCS_ERROR0, /* State to transition to the error | 80 | /* |
81 | handler, this was added to the | 81 | * We have written the write end cmd to the interface, and |
82 | state machine in the spec to be | 82 | * still need to write the last byte. |
83 | sure IBF was there. */ | 83 | */ |
84 | KCS_ERROR1, /* First stage error handler, wait for | 84 | KCS_WAIT_WRITE_END, |
85 | the interface to respond. */ | 85 | |
86 | KCS_ERROR2, /* The abort cmd has been written, | 86 | /* We are waiting to read data from the interface. */ |
87 | wait for the interface to | 87 | KCS_WAIT_READ, |
88 | respond. */ | 88 | |
89 | KCS_ERROR3, /* We wrote some data to the | 89 | /* |
90 | interface, wait for it to switch to | 90 | * State to transition to the error handler, this was added to |
91 | read mode. */ | 91 | * the state machine in the spec to be sure IBF was there. |
92 | KCS_HOSED /* The hardware failed to follow the | 92 | */ |
93 | state machine. */ | 93 | KCS_ERROR0, |
94 | |||
95 | /* | ||
96 | * First stage error handler, wait for the interface to | ||
97 | * respond. | ||
98 | */ | ||
99 | KCS_ERROR1, | ||
100 | |||
101 | /* | ||
102 | * The abort cmd has been written, wait for the interface to | ||
103 | * respond. | ||
104 | */ | ||
105 | KCS_ERROR2, | ||
106 | |||
107 | /* | ||
108 | * We wrote some data to the interface, wait for it to switch | ||
109 | * to read mode. | ||
110 | */ | ||
111 | KCS_ERROR3, | ||
112 | |||
113 | /* The hardware failed to follow the state machine. */ | ||
114 | KCS_HOSED | ||
94 | }; | 115 | }; |
95 | 116 | ||
96 | #define MAX_KCS_READ_SIZE IPMI_MAX_MSG_LENGTH | 117 | #define MAX_KCS_READ_SIZE IPMI_MAX_MSG_LENGTH |
@@ -102,8 +123,7 @@ enum kcs_states { | |||
102 | #define MAX_ERROR_RETRIES 10 | 123 | #define MAX_ERROR_RETRIES 10 |
103 | #define ERROR0_OBF_WAIT_JIFFIES (2*HZ) | 124 | #define ERROR0_OBF_WAIT_JIFFIES (2*HZ) |
104 | 125 | ||
105 | struct si_sm_data | 126 | struct si_sm_data { |
106 | { | ||
107 | enum kcs_states state; | 127 | enum kcs_states state; |
108 | struct si_sm_io *io; | 128 | struct si_sm_io *io; |
109 | unsigned char write_data[MAX_KCS_WRITE_SIZE]; | 129 | unsigned char write_data[MAX_KCS_WRITE_SIZE]; |
@@ -187,7 +207,8 @@ static inline void start_error_recovery(struct si_sm_data *kcs, char *reason) | |||
187 | (kcs->error_retries)++; | 207 | (kcs->error_retries)++; |
188 | if (kcs->error_retries > MAX_ERROR_RETRIES) { | 208 | if (kcs->error_retries > MAX_ERROR_RETRIES) { |
189 | if (kcs_debug & KCS_DEBUG_ENABLE) | 209 | if (kcs_debug & KCS_DEBUG_ENABLE) |
190 | printk(KERN_DEBUG "ipmi_kcs_sm: kcs hosed: %s\n", reason); | 210 | printk(KERN_DEBUG "ipmi_kcs_sm: kcs hosed: %s\n", |
211 | reason); | ||
191 | kcs->state = KCS_HOSED; | 212 | kcs->state = KCS_HOSED; |
192 | } else { | 213 | } else { |
193 | kcs->error0_timeout = jiffies + ERROR0_OBF_WAIT_JIFFIES; | 214 | kcs->error0_timeout = jiffies + ERROR0_OBF_WAIT_JIFFIES; |
@@ -271,10 +292,9 @@ static int start_kcs_transaction(struct si_sm_data *kcs, unsigned char *data, | |||
271 | 292 | ||
272 | if (kcs_debug & KCS_DEBUG_MSG) { | 293 | if (kcs_debug & KCS_DEBUG_MSG) { |
273 | printk(KERN_DEBUG "start_kcs_transaction -"); | 294 | printk(KERN_DEBUG "start_kcs_transaction -"); |
274 | for (i = 0; i < size; i ++) { | 295 | for (i = 0; i < size; i++) |
275 | printk(" %02x", (unsigned char) (data [i])); | 296 | printk(" %02x", (unsigned char) (data [i])); |
276 | } | 297 | printk("\n"); |
277 | printk ("\n"); | ||
278 | } | 298 | } |
279 | kcs->error_retries = 0; | 299 | kcs->error_retries = 0; |
280 | memcpy(kcs->write_data, data, size); | 300 | memcpy(kcs->write_data, data, size); |
@@ -305,9 +325,11 @@ static int get_kcs_result(struct si_sm_data *kcs, unsigned char *data, | |||
305 | kcs->read_pos = 3; | 325 | kcs->read_pos = 3; |
306 | } | 326 | } |
307 | if (kcs->truncated) { | 327 | if (kcs->truncated) { |
308 | /* Report a truncated error. We might overwrite | 328 | /* |
309 | another error, but that's too bad, the user needs | 329 | * Report a truncated error. We might overwrite |
310 | to know it was truncated. */ | 330 | * another error, but that's too bad, the user needs |
331 | * to know it was truncated. | ||
332 | */ | ||
311 | data[2] = IPMI_ERR_MSG_TRUNCATED; | 333 | data[2] = IPMI_ERR_MSG_TRUNCATED; |
312 | kcs->truncated = 0; | 334 | kcs->truncated = 0; |
313 | } | 335 | } |
@@ -315,9 +337,11 @@ static int get_kcs_result(struct si_sm_data *kcs, unsigned char *data, | |||
315 | return kcs->read_pos; | 337 | return kcs->read_pos; |
316 | } | 338 | } |
317 | 339 | ||
318 | /* This implements the state machine defined in the IPMI manual, see | 340 | /* |
319 | that for details on how this works. Divide that flowchart into | 341 | * This implements the state machine defined in the IPMI manual, see |
320 | sections delimited by "Wait for IBF" and this will become clear. */ | 342 | * that for details on how this works. Divide that flowchart into |
343 | * sections delimited by "Wait for IBF" and this will become clear. | ||
344 | */ | ||
321 | static enum si_sm_result kcs_event(struct si_sm_data *kcs, long time) | 345 | static enum si_sm_result kcs_event(struct si_sm_data *kcs, long time) |
322 | { | 346 | { |
323 | unsigned char status; | 347 | unsigned char status; |
@@ -388,11 +412,12 @@ static enum si_sm_result kcs_event(struct si_sm_data *kcs, long time) | |||
388 | write_next_byte(kcs); | 412 | write_next_byte(kcs); |
389 | } | 413 | } |
390 | break; | 414 | break; |
391 | 415 | ||
392 | case KCS_WAIT_WRITE_END: | 416 | case KCS_WAIT_WRITE_END: |
393 | if (state != KCS_WRITE_STATE) { | 417 | if (state != KCS_WRITE_STATE) { |
394 | start_error_recovery(kcs, | 418 | start_error_recovery(kcs, |
395 | "Not in write state for write end"); | 419 | "Not in write state" |
420 | " for write end"); | ||
396 | break; | 421 | break; |
397 | } | 422 | } |
398 | clear_obf(kcs, status); | 423 | clear_obf(kcs, status); |
@@ -413,13 +438,15 @@ static enum si_sm_result kcs_event(struct si_sm_data *kcs, long time) | |||
413 | return SI_SM_CALL_WITH_DELAY; | 438 | return SI_SM_CALL_WITH_DELAY; |
414 | read_next_byte(kcs); | 439 | read_next_byte(kcs); |
415 | } else { | 440 | } else { |
416 | /* We don't implement this exactly like the state | 441 | /* |
417 | machine in the spec. Some broken hardware | 442 | * We don't implement this exactly like the state |
418 | does not write the final dummy byte to the | 443 | * machine in the spec. Some broken hardware |
419 | read register. Thus obf will never go high | 444 | * does not write the final dummy byte to the |
420 | here. We just go straight to idle, and we | 445 | * read register. Thus obf will never go high |
421 | handle clearing out obf in idle state if it | 446 | * here. We just go straight to idle, and we |
422 | happens to come in. */ | 447 | * handle clearing out obf in idle state if it |
448 | * happens to come in. | ||
449 | */ | ||
423 | clear_obf(kcs, status); | 450 | clear_obf(kcs, status); |
424 | kcs->orig_write_count = 0; | 451 | kcs->orig_write_count = 0; |
425 | kcs->state = KCS_IDLE; | 452 | kcs->state = KCS_IDLE; |
@@ -430,7 +457,8 @@ static enum si_sm_result kcs_event(struct si_sm_data *kcs, long time) | |||
430 | case KCS_ERROR0: | 457 | case KCS_ERROR0: |
431 | clear_obf(kcs, status); | 458 | clear_obf(kcs, status); |
432 | status = read_status(kcs); | 459 | status = read_status(kcs); |
433 | if (GET_STATUS_OBF(status)) /* controller isn't responding */ | 460 | if (GET_STATUS_OBF(status)) |
461 | /* controller isn't responding */ | ||
434 | if (time_before(jiffies, kcs->error0_timeout)) | 462 | if (time_before(jiffies, kcs->error0_timeout)) |
435 | return SI_SM_CALL_WITH_TICK_DELAY; | 463 | return SI_SM_CALL_WITH_TICK_DELAY; |
436 | write_cmd(kcs, KCS_GET_STATUS_ABORT); | 464 | write_cmd(kcs, KCS_GET_STATUS_ABORT); |
@@ -442,7 +470,7 @@ static enum si_sm_result kcs_event(struct si_sm_data *kcs, long time) | |||
442 | write_data(kcs, 0); | 470 | write_data(kcs, 0); |
443 | kcs->state = KCS_ERROR2; | 471 | kcs->state = KCS_ERROR2; |
444 | break; | 472 | break; |
445 | 473 | ||
446 | case KCS_ERROR2: | 474 | case KCS_ERROR2: |
447 | if (state != KCS_READ_STATE) { | 475 | if (state != KCS_READ_STATE) { |
448 | start_error_recovery(kcs, | 476 | start_error_recovery(kcs, |
@@ -456,7 +484,7 @@ static enum si_sm_result kcs_event(struct si_sm_data *kcs, long time) | |||
456 | write_data(kcs, KCS_READ_BYTE); | 484 | write_data(kcs, KCS_READ_BYTE); |
457 | kcs->state = KCS_ERROR3; | 485 | kcs->state = KCS_ERROR3; |
458 | break; | 486 | break; |
459 | 487 | ||
460 | case KCS_ERROR3: | 488 | case KCS_ERROR3: |
461 | if (state != KCS_IDLE_STATE) { | 489 | if (state != KCS_IDLE_STATE) { |
462 | start_error_recovery(kcs, | 490 | start_error_recovery(kcs, |
@@ -475,7 +503,7 @@ static enum si_sm_result kcs_event(struct si_sm_data *kcs, long time) | |||
475 | return SI_SM_TRANSACTION_COMPLETE; | 503 | return SI_SM_TRANSACTION_COMPLETE; |
476 | } | 504 | } |
477 | break; | 505 | break; |
478 | 506 | ||
479 | case KCS_HOSED: | 507 | case KCS_HOSED: |
480 | break; | 508 | break; |
481 | } | 509 | } |
@@ -495,10 +523,12 @@ static int kcs_size(void) | |||
495 | 523 | ||
496 | static int kcs_detect(struct si_sm_data *kcs) | 524 | static int kcs_detect(struct si_sm_data *kcs) |
497 | { | 525 | { |
498 | /* It's impossible for the KCS status register to be all 1's, | 526 | /* |
499 | (assuming a properly functioning, self-initialized BMC) | 527 | * It's impossible for the KCS status register to be all 1's, |
500 | but that's what you get from reading a bogus address, so we | 528 | * (assuming a properly functioning, self-initialized BMC) |
501 | test that first. */ | 529 | * but that's what you get from reading a bogus address, so we |
530 | * test that first. | ||
531 | */ | ||
502 | if (read_status(kcs) == 0xff) | 532 | if (read_status(kcs) == 0xff) |
503 | return 1; | 533 | return 1; |
504 | 534 | ||
@@ -509,8 +539,7 @@ static void kcs_cleanup(struct si_sm_data *kcs) | |||
509 | { | 539 | { |
510 | } | 540 | } |
511 | 541 | ||
512 | struct si_sm_handlers kcs_smi_handlers = | 542 | struct si_sm_handlers kcs_smi_handlers = { |
513 | { | ||
514 | .init_data = init_kcs_data, | 543 | .init_data = init_kcs_data, |
515 | .start_transaction = start_kcs_transaction, | 544 | .start_transaction = start_kcs_transaction, |
516 | .get_result = get_kcs_result, | 545 | .get_result = get_kcs_result, |
diff --git a/drivers/char/ipmi/ipmi_msghandler.c b/drivers/char/ipmi/ipmi_msghandler.c index 32b2b22996dc..8a59aaa21be5 100644 --- a/drivers/char/ipmi/ipmi_msghandler.c +++ b/drivers/char/ipmi/ipmi_msghandler.c | |||
@@ -47,7 +47,7 @@ | |||
47 | 47 | ||
48 | #define PFX "IPMI message handler: " | 48 | #define PFX "IPMI message handler: " |
49 | 49 | ||
50 | #define IPMI_DRIVER_VERSION "39.1" | 50 | #define IPMI_DRIVER_VERSION "39.2" |
51 | 51 | ||
52 | static struct ipmi_recv_msg *ipmi_alloc_recv_msg(void); | 52 | static struct ipmi_recv_msg *ipmi_alloc_recv_msg(void); |
53 | static int ipmi_init_msghandler(void); | 53 | static int ipmi_init_msghandler(void); |
@@ -63,16 +63,16 @@ static struct proc_dir_entry *proc_ipmi_root; | |||
63 | 63 | ||
64 | #define MAX_EVENTS_IN_QUEUE 25 | 64 | #define MAX_EVENTS_IN_QUEUE 25 |
65 | 65 | ||
66 | /* Don't let a message sit in a queue forever, always time it with at lest | 66 | /* |
67 | the max message timer. This is in milliseconds. */ | 67 | * Don't let a message sit in a queue forever, always time it with at lest |
68 | * the max message timer. This is in milliseconds. | ||
69 | */ | ||
68 | #define MAX_MSG_TIMEOUT 60000 | 70 | #define MAX_MSG_TIMEOUT 60000 |
69 | 71 | ||
70 | |||
71 | /* | 72 | /* |
72 | * The main "user" data structure. | 73 | * The main "user" data structure. |
73 | */ | 74 | */ |
74 | struct ipmi_user | 75 | struct ipmi_user { |
75 | { | ||
76 | struct list_head link; | 76 | struct list_head link; |
77 | 77 | ||
78 | /* Set to "0" when the user is destroyed. */ | 78 | /* Set to "0" when the user is destroyed. */ |
@@ -91,8 +91,7 @@ struct ipmi_user | |||
91 | int gets_events; | 91 | int gets_events; |
92 | }; | 92 | }; |
93 | 93 | ||
94 | struct cmd_rcvr | 94 | struct cmd_rcvr { |
95 | { | ||
96 | struct list_head link; | 95 | struct list_head link; |
97 | 96 | ||
98 | ipmi_user_t user; | 97 | ipmi_user_t user; |
@@ -106,12 +105,12 @@ struct cmd_rcvr | |||
106 | * or change any data until the RCU period completes. So we | 105 | * or change any data until the RCU period completes. So we |
107 | * use this next variable during mass deletion so we can have | 106 | * use this next variable during mass deletion so we can have |
108 | * a list and don't have to wait and restart the search on | 107 | * a list and don't have to wait and restart the search on |
109 | * every individual deletion of a command. */ | 108 | * every individual deletion of a command. |
109 | */ | ||
110 | struct cmd_rcvr *next; | 110 | struct cmd_rcvr *next; |
111 | }; | 111 | }; |
112 | 112 | ||
113 | struct seq_table | 113 | struct seq_table { |
114 | { | ||
115 | unsigned int inuse : 1; | 114 | unsigned int inuse : 1; |
116 | unsigned int broadcast : 1; | 115 | unsigned int broadcast : 1; |
117 | 116 | ||
@@ -119,53 +118,60 @@ struct seq_table | |||
119 | unsigned long orig_timeout; | 118 | unsigned long orig_timeout; |
120 | unsigned int retries_left; | 119 | unsigned int retries_left; |
121 | 120 | ||
122 | /* To verify on an incoming send message response that this is | 121 | /* |
123 | the message that the response is for, we keep a sequence id | 122 | * To verify on an incoming send message response that this is |
124 | and increment it every time we send a message. */ | 123 | * the message that the response is for, we keep a sequence id |
124 | * and increment it every time we send a message. | ||
125 | */ | ||
125 | long seqid; | 126 | long seqid; |
126 | 127 | ||
127 | /* This is held so we can properly respond to the message on a | 128 | /* |
128 | timeout, and it is used to hold the temporary data for | 129 | * This is held so we can properly respond to the message on a |
129 | retransmission, too. */ | 130 | * timeout, and it is used to hold the temporary data for |
131 | * retransmission, too. | ||
132 | */ | ||
130 | struct ipmi_recv_msg *recv_msg; | 133 | struct ipmi_recv_msg *recv_msg; |
131 | }; | 134 | }; |
132 | 135 | ||
133 | /* Store the information in a msgid (long) to allow us to find a | 136 | /* |
134 | sequence table entry from the msgid. */ | 137 | * Store the information in a msgid (long) to allow us to find a |
138 | * sequence table entry from the msgid. | ||
139 | */ | ||
135 | #define STORE_SEQ_IN_MSGID(seq, seqid) (((seq&0xff)<<26) | (seqid&0x3ffffff)) | 140 | #define STORE_SEQ_IN_MSGID(seq, seqid) (((seq&0xff)<<26) | (seqid&0x3ffffff)) |
136 | 141 | ||
137 | #define GET_SEQ_FROM_MSGID(msgid, seq, seqid) \ | 142 | #define GET_SEQ_FROM_MSGID(msgid, seq, seqid) \ |
138 | do { \ | 143 | do { \ |
139 | seq = ((msgid >> 26) & 0x3f); \ | 144 | seq = ((msgid >> 26) & 0x3f); \ |
140 | seqid = (msgid & 0x3fffff); \ | 145 | seqid = (msgid & 0x3fffff); \ |
141 | } while (0) | 146 | } while (0) |
142 | 147 | ||
143 | #define NEXT_SEQID(seqid) (((seqid) + 1) & 0x3fffff) | 148 | #define NEXT_SEQID(seqid) (((seqid) + 1) & 0x3fffff) |
144 | 149 | ||
145 | struct ipmi_channel | 150 | struct ipmi_channel { |
146 | { | ||
147 | unsigned char medium; | 151 | unsigned char medium; |
148 | unsigned char protocol; | 152 | unsigned char protocol; |
149 | 153 | ||
150 | /* My slave address. This is initialized to IPMI_BMC_SLAVE_ADDR, | 154 | /* |
151 | but may be changed by the user. */ | 155 | * My slave address. This is initialized to IPMI_BMC_SLAVE_ADDR, |
156 | * but may be changed by the user. | ||
157 | */ | ||
152 | unsigned char address; | 158 | unsigned char address; |
153 | 159 | ||
154 | /* My LUN. This should generally stay the SMS LUN, but just in | 160 | /* |
155 | case... */ | 161 | * My LUN. This should generally stay the SMS LUN, but just in |
162 | * case... | ||
163 | */ | ||
156 | unsigned char lun; | 164 | unsigned char lun; |
157 | }; | 165 | }; |
158 | 166 | ||
159 | #ifdef CONFIG_PROC_FS | 167 | #ifdef CONFIG_PROC_FS |
160 | struct ipmi_proc_entry | 168 | struct ipmi_proc_entry { |
161 | { | ||
162 | char *name; | 169 | char *name; |
163 | struct ipmi_proc_entry *next; | 170 | struct ipmi_proc_entry *next; |
164 | }; | 171 | }; |
165 | #endif | 172 | #endif |
166 | 173 | ||
167 | struct bmc_device | 174 | struct bmc_device { |
168 | { | ||
169 | struct platform_device *dev; | 175 | struct platform_device *dev; |
170 | struct ipmi_device_id id; | 176 | struct ipmi_device_id id; |
171 | unsigned char guid[16]; | 177 | unsigned char guid[16]; |
@@ -186,10 +192,108 @@ struct bmc_device | |||
186 | struct device_attribute aux_firmware_rev_attr; | 192 | struct device_attribute aux_firmware_rev_attr; |
187 | }; | 193 | }; |
188 | 194 | ||
195 | /* | ||
196 | * Various statistics for IPMI, these index stats[] in the ipmi_smi | ||
197 | * structure. | ||
198 | */ | ||
199 | enum ipmi_stat_indexes { | ||
200 | /* Commands we got from the user that were invalid. */ | ||
201 | IPMI_STAT_sent_invalid_commands = 0, | ||
202 | |||
203 | /* Commands we sent to the MC. */ | ||
204 | IPMI_STAT_sent_local_commands, | ||
205 | |||
206 | /* Responses from the MC that were delivered to a user. */ | ||
207 | IPMI_STAT_handled_local_responses, | ||
208 | |||
209 | /* Responses from the MC that were not delivered to a user. */ | ||
210 | IPMI_STAT_unhandled_local_responses, | ||
211 | |||
212 | /* Commands we sent out to the IPMB bus. */ | ||
213 | IPMI_STAT_sent_ipmb_commands, | ||
214 | |||
215 | /* Commands sent on the IPMB that had errors on the SEND CMD */ | ||
216 | IPMI_STAT_sent_ipmb_command_errs, | ||
217 | |||
218 | /* Each retransmit increments this count. */ | ||
219 | IPMI_STAT_retransmitted_ipmb_commands, | ||
220 | |||
221 | /* | ||
222 | * When a message times out (runs out of retransmits) this is | ||
223 | * incremented. | ||
224 | */ | ||
225 | IPMI_STAT_timed_out_ipmb_commands, | ||
226 | |||
227 | /* | ||
228 | * This is like above, but for broadcasts. Broadcasts are | ||
229 | * *not* included in the above count (they are expected to | ||
230 | * time out). | ||
231 | */ | ||
232 | IPMI_STAT_timed_out_ipmb_broadcasts, | ||
233 | |||
234 | /* Responses I have sent to the IPMB bus. */ | ||
235 | IPMI_STAT_sent_ipmb_responses, | ||
236 | |||
237 | /* The response was delivered to the user. */ | ||
238 | IPMI_STAT_handled_ipmb_responses, | ||
239 | |||
240 | /* The response had invalid data in it. */ | ||
241 | IPMI_STAT_invalid_ipmb_responses, | ||
242 | |||
243 | /* The response didn't have anyone waiting for it. */ | ||
244 | IPMI_STAT_unhandled_ipmb_responses, | ||
245 | |||
246 | /* Commands we sent out to the IPMB bus. */ | ||
247 | IPMI_STAT_sent_lan_commands, | ||
248 | |||
249 | /* Commands sent on the IPMB that had errors on the SEND CMD */ | ||
250 | IPMI_STAT_sent_lan_command_errs, | ||
251 | |||
252 | /* Each retransmit increments this count. */ | ||
253 | IPMI_STAT_retransmitted_lan_commands, | ||
254 | |||
255 | /* | ||
256 | * When a message times out (runs out of retransmits) this is | ||
257 | * incremented. | ||
258 | */ | ||
259 | IPMI_STAT_timed_out_lan_commands, | ||
260 | |||
261 | /* Responses I have sent to the IPMB bus. */ | ||
262 | IPMI_STAT_sent_lan_responses, | ||
263 | |||
264 | /* The response was delivered to the user. */ | ||
265 | IPMI_STAT_handled_lan_responses, | ||
266 | |||
267 | /* The response had invalid data in it. */ | ||
268 | IPMI_STAT_invalid_lan_responses, | ||
269 | |||
270 | /* The response didn't have anyone waiting for it. */ | ||
271 | IPMI_STAT_unhandled_lan_responses, | ||
272 | |||
273 | /* The command was delivered to the user. */ | ||
274 | IPMI_STAT_handled_commands, | ||
275 | |||
276 | /* The command had invalid data in it. */ | ||
277 | IPMI_STAT_invalid_commands, | ||
278 | |||
279 | /* The command didn't have anyone waiting for it. */ | ||
280 | IPMI_STAT_unhandled_commands, | ||
281 | |||
282 | /* Invalid data in an event. */ | ||
283 | IPMI_STAT_invalid_events, | ||
284 | |||
285 | /* Events that were received with the proper format. */ | ||
286 | IPMI_STAT_events, | ||
287 | |||
288 | |||
289 | /* This *must* remain last, add new values above this. */ | ||
290 | IPMI_NUM_STATS | ||
291 | }; | ||
292 | |||
293 | |||
189 | #define IPMI_IPMB_NUM_SEQ 64 | 294 | #define IPMI_IPMB_NUM_SEQ 64 |
190 | #define IPMI_MAX_CHANNELS 16 | 295 | #define IPMI_MAX_CHANNELS 16 |
191 | struct ipmi_smi | 296 | struct ipmi_smi { |
192 | { | ||
193 | /* What interface number are we? */ | 297 | /* What interface number are we? */ |
194 | int intf_num; | 298 | int intf_num; |
195 | 299 | ||
@@ -198,8 +302,10 @@ struct ipmi_smi | |||
198 | /* Used for a list of interfaces. */ | 302 | /* Used for a list of interfaces. */ |
199 | struct list_head link; | 303 | struct list_head link; |
200 | 304 | ||
201 | /* The list of upper layers that are using me. seq_lock | 305 | /* |
202 | * protects this. */ | 306 | * The list of upper layers that are using me. seq_lock |
307 | * protects this. | ||
308 | */ | ||
203 | struct list_head users; | 309 | struct list_head users; |
204 | 310 | ||
205 | /* Information to supply to users. */ | 311 | /* Information to supply to users. */ |
@@ -213,10 +319,12 @@ struct ipmi_smi | |||
213 | char *my_dev_name; | 319 | char *my_dev_name; |
214 | char *sysfs_name; | 320 | char *sysfs_name; |
215 | 321 | ||
216 | /* This is the lower-layer's sender routine. Note that you | 322 | /* |
323 | * This is the lower-layer's sender routine. Note that you | ||
217 | * must either be holding the ipmi_interfaces_mutex or be in | 324 | * must either be holding the ipmi_interfaces_mutex or be in |
218 | * an umpreemptible region to use this. You must fetch the | 325 | * an umpreemptible region to use this. You must fetch the |
219 | * value into a local variable and make sure it is not NULL. */ | 326 | * value into a local variable and make sure it is not NULL. |
327 | */ | ||
220 | struct ipmi_smi_handlers *handlers; | 328 | struct ipmi_smi_handlers *handlers; |
221 | void *send_info; | 329 | void *send_info; |
222 | 330 | ||
@@ -229,34 +337,45 @@ struct ipmi_smi | |||
229 | /* Driver-model device for the system interface. */ | 337 | /* Driver-model device for the system interface. */ |
230 | struct device *si_dev; | 338 | struct device *si_dev; |
231 | 339 | ||
232 | /* A table of sequence numbers for this interface. We use the | 340 | /* |
233 | sequence numbers for IPMB messages that go out of the | 341 | * A table of sequence numbers for this interface. We use the |
234 | interface to match them up with their responses. A routine | 342 | * sequence numbers for IPMB messages that go out of the |
235 | is called periodically to time the items in this list. */ | 343 | * interface to match them up with their responses. A routine |
344 | * is called periodically to time the items in this list. | ||
345 | */ | ||
236 | spinlock_t seq_lock; | 346 | spinlock_t seq_lock; |
237 | struct seq_table seq_table[IPMI_IPMB_NUM_SEQ]; | 347 | struct seq_table seq_table[IPMI_IPMB_NUM_SEQ]; |
238 | int curr_seq; | 348 | int curr_seq; |
239 | 349 | ||
240 | /* Messages that were delayed for some reason (out of memory, | 350 | /* |
241 | for instance), will go in here to be processed later in a | 351 | * Messages that were delayed for some reason (out of memory, |
242 | periodic timer interrupt. */ | 352 | * for instance), will go in here to be processed later in a |
353 | * periodic timer interrupt. | ||
354 | */ | ||
243 | spinlock_t waiting_msgs_lock; | 355 | spinlock_t waiting_msgs_lock; |
244 | struct list_head waiting_msgs; | 356 | struct list_head waiting_msgs; |
245 | 357 | ||
246 | /* The list of command receivers that are registered for commands | 358 | /* |
247 | on this interface. */ | 359 | * The list of command receivers that are registered for commands |
360 | * on this interface. | ||
361 | */ | ||
248 | struct mutex cmd_rcvrs_mutex; | 362 | struct mutex cmd_rcvrs_mutex; |
249 | struct list_head cmd_rcvrs; | 363 | struct list_head cmd_rcvrs; |
250 | 364 | ||
251 | /* Events that were queues because no one was there to receive | 365 | /* |
252 | them. */ | 366 | * Events that were queues because no one was there to receive |
367 | * them. | ||
368 | */ | ||
253 | spinlock_t events_lock; /* For dealing with event stuff. */ | 369 | spinlock_t events_lock; /* For dealing with event stuff. */ |
254 | struct list_head waiting_events; | 370 | struct list_head waiting_events; |
255 | unsigned int waiting_events_count; /* How many events in queue? */ | 371 | unsigned int waiting_events_count; /* How many events in queue? */ |
256 | int delivering_events; | 372 | char delivering_events; |
373 | char event_msg_printed; | ||
257 | 374 | ||
258 | /* The event receiver for my BMC, only really used at panic | 375 | /* |
259 | shutdown as a place to store this. */ | 376 | * The event receiver for my BMC, only really used at panic |
377 | * shutdown as a place to store this. | ||
378 | */ | ||
260 | unsigned char event_receiver; | 379 | unsigned char event_receiver; |
261 | unsigned char event_receiver_lun; | 380 | unsigned char event_receiver_lun; |
262 | unsigned char local_sel_device; | 381 | unsigned char local_sel_device; |
@@ -268,14 +387,18 @@ struct ipmi_smi | |||
268 | int auto_maintenance_timeout; | 387 | int auto_maintenance_timeout; |
269 | spinlock_t maintenance_mode_lock; /* Used in a timer... */ | 388 | spinlock_t maintenance_mode_lock; /* Used in a timer... */ |
270 | 389 | ||
271 | /* A cheap hack, if this is non-null and a message to an | 390 | /* |
272 | interface comes in with a NULL user, call this routine with | 391 | * A cheap hack, if this is non-null and a message to an |
273 | it. Note that the message will still be freed by the | 392 | * interface comes in with a NULL user, call this routine with |
274 | caller. This only works on the system interface. */ | 393 | * it. Note that the message will still be freed by the |
394 | * caller. This only works on the system interface. | ||
395 | */ | ||
275 | void (*null_user_handler)(ipmi_smi_t intf, struct ipmi_recv_msg *msg); | 396 | void (*null_user_handler)(ipmi_smi_t intf, struct ipmi_recv_msg *msg); |
276 | 397 | ||
277 | /* When we are scanning the channels for an SMI, this will | 398 | /* |
278 | tell which channel we are scanning. */ | 399 | * When we are scanning the channels for an SMI, this will |
400 | * tell which channel we are scanning. | ||
401 | */ | ||
279 | int curr_channel; | 402 | int curr_channel; |
280 | 403 | ||
281 | /* Channel information */ | 404 | /* Channel information */ |
@@ -285,74 +408,14 @@ struct ipmi_smi | |||
285 | struct proc_dir_entry *proc_dir; | 408 | struct proc_dir_entry *proc_dir; |
286 | char proc_dir_name[10]; | 409 | char proc_dir_name[10]; |
287 | 410 | ||
288 | spinlock_t counter_lock; /* For making counters atomic. */ | 411 | atomic_t stats[IPMI_NUM_STATS]; |
289 | |||
290 | /* Commands we got that were invalid. */ | ||
291 | unsigned int sent_invalid_commands; | ||
292 | |||
293 | /* Commands we sent to the MC. */ | ||
294 | unsigned int sent_local_commands; | ||
295 | /* Responses from the MC that were delivered to a user. */ | ||
296 | unsigned int handled_local_responses; | ||
297 | /* Responses from the MC that were not delivered to a user. */ | ||
298 | unsigned int unhandled_local_responses; | ||
299 | |||
300 | /* Commands we sent out to the IPMB bus. */ | ||
301 | unsigned int sent_ipmb_commands; | ||
302 | /* Commands sent on the IPMB that had errors on the SEND CMD */ | ||
303 | unsigned int sent_ipmb_command_errs; | ||
304 | /* Each retransmit increments this count. */ | ||
305 | unsigned int retransmitted_ipmb_commands; | ||
306 | /* When a message times out (runs out of retransmits) this is | ||
307 | incremented. */ | ||
308 | unsigned int timed_out_ipmb_commands; | ||
309 | |||
310 | /* This is like above, but for broadcasts. Broadcasts are | ||
311 | *not* included in the above count (they are expected to | ||
312 | time out). */ | ||
313 | unsigned int timed_out_ipmb_broadcasts; | ||
314 | 412 | ||
315 | /* Responses I have sent to the IPMB bus. */ | 413 | /* |
316 | unsigned int sent_ipmb_responses; | 414 | * run_to_completion duplicate of smb_info, smi_info |
317 | 415 | * and ipmi_serial_info structures. Used to decrease numbers of | |
318 | /* The response was delivered to the user. */ | 416 | * parameters passed by "low" level IPMI code. |
319 | unsigned int handled_ipmb_responses; | 417 | */ |
320 | /* The response had invalid data in it. */ | 418 | int run_to_completion; |
321 | unsigned int invalid_ipmb_responses; | ||
322 | /* The response didn't have anyone waiting for it. */ | ||
323 | unsigned int unhandled_ipmb_responses; | ||
324 | |||
325 | /* Commands we sent out to the IPMB bus. */ | ||
326 | unsigned int sent_lan_commands; | ||
327 | /* Commands sent on the IPMB that had errors on the SEND CMD */ | ||
328 | unsigned int sent_lan_command_errs; | ||
329 | /* Each retransmit increments this count. */ | ||
330 | unsigned int retransmitted_lan_commands; | ||
331 | /* When a message times out (runs out of retransmits) this is | ||
332 | incremented. */ | ||
333 | unsigned int timed_out_lan_commands; | ||
334 | |||
335 | /* Responses I have sent to the IPMB bus. */ | ||
336 | unsigned int sent_lan_responses; | ||
337 | |||
338 | /* The response was delivered to the user. */ | ||
339 | unsigned int handled_lan_responses; | ||
340 | /* The response had invalid data in it. */ | ||
341 | unsigned int invalid_lan_responses; | ||
342 | /* The response didn't have anyone waiting for it. */ | ||
343 | unsigned int unhandled_lan_responses; | ||
344 | |||
345 | /* The command was delivered to the user. */ | ||
346 | unsigned int handled_commands; | ||
347 | /* The command had invalid data in it. */ | ||
348 | unsigned int invalid_commands; | ||
349 | /* The command didn't have anyone waiting for it. */ | ||
350 | unsigned int unhandled_commands; | ||
351 | |||
352 | /* Invalid data in an event. */ | ||
353 | unsigned int invalid_events; | ||
354 | /* Events that were received with the proper format. */ | ||
355 | unsigned int events; | ||
356 | }; | 419 | }; |
357 | #define to_si_intf_from_dev(device) container_of(device, struct ipmi_smi, dev) | 420 | #define to_si_intf_from_dev(device) container_of(device, struct ipmi_smi, dev) |
358 | 421 | ||
@@ -368,12 +431,19 @@ static DEFINE_MUTEX(ipmidriver_mutex); | |||
368 | static LIST_HEAD(ipmi_interfaces); | 431 | static LIST_HEAD(ipmi_interfaces); |
369 | static DEFINE_MUTEX(ipmi_interfaces_mutex); | 432 | static DEFINE_MUTEX(ipmi_interfaces_mutex); |
370 | 433 | ||
371 | /* List of watchers that want to know when smi's are added and | 434 | /* |
372 | deleted. */ | 435 | * List of watchers that want to know when smi's are added and deleted. |
436 | */ | ||
373 | static LIST_HEAD(smi_watchers); | 437 | static LIST_HEAD(smi_watchers); |
374 | static DEFINE_MUTEX(smi_watchers_mutex); | 438 | static DEFINE_MUTEX(smi_watchers_mutex); |
375 | 439 | ||
376 | 440 | ||
441 | #define ipmi_inc_stat(intf, stat) \ | ||
442 | atomic_inc(&(intf)->stats[IPMI_STAT_ ## stat]) | ||
443 | #define ipmi_get_stat(intf, stat) \ | ||
444 | ((unsigned int) atomic_read(&(intf)->stats[IPMI_STAT_ ## stat])) | ||
445 | |||
446 | |||
377 | static void free_recv_msg_list(struct list_head *q) | 447 | static void free_recv_msg_list(struct list_head *q) |
378 | { | 448 | { |
379 | struct ipmi_recv_msg *msg, *msg2; | 449 | struct ipmi_recv_msg *msg, *msg2; |
@@ -417,10 +487,8 @@ static void clean_up_interface_data(ipmi_smi_t intf) | |||
417 | 487 | ||
418 | for (i = 0; i < IPMI_IPMB_NUM_SEQ; i++) { | 488 | for (i = 0; i < IPMI_IPMB_NUM_SEQ; i++) { |
419 | if ((intf->seq_table[i].inuse) | 489 | if ((intf->seq_table[i].inuse) |
420 | && (intf->seq_table[i].recv_msg)) | 490 | && (intf->seq_table[i].recv_msg)) |
421 | { | ||
422 | ipmi_free_recv_msg(intf->seq_table[i].recv_msg); | 491 | ipmi_free_recv_msg(intf->seq_table[i].recv_msg); |
423 | } | ||
424 | } | 492 | } |
425 | } | 493 | } |
426 | 494 | ||
@@ -487,6 +555,7 @@ int ipmi_smi_watcher_register(struct ipmi_smi_watcher *watcher) | |||
487 | } | 555 | } |
488 | return -ENOMEM; | 556 | return -ENOMEM; |
489 | } | 557 | } |
558 | EXPORT_SYMBOL(ipmi_smi_watcher_register); | ||
490 | 559 | ||
491 | int ipmi_smi_watcher_unregister(struct ipmi_smi_watcher *watcher) | 560 | int ipmi_smi_watcher_unregister(struct ipmi_smi_watcher *watcher) |
492 | { | 561 | { |
@@ -495,6 +564,7 @@ int ipmi_smi_watcher_unregister(struct ipmi_smi_watcher *watcher) | |||
495 | mutex_unlock(&smi_watchers_mutex); | 564 | mutex_unlock(&smi_watchers_mutex); |
496 | return 0; | 565 | return 0; |
497 | } | 566 | } |
567 | EXPORT_SYMBOL(ipmi_smi_watcher_unregister); | ||
498 | 568 | ||
499 | /* | 569 | /* |
500 | * Must be called with smi_watchers_mutex held. | 570 | * Must be called with smi_watchers_mutex held. |
@@ -530,8 +600,7 @@ ipmi_addr_equal(struct ipmi_addr *addr1, struct ipmi_addr *addr2) | |||
530 | } | 600 | } |
531 | 601 | ||
532 | if ((addr1->addr_type == IPMI_IPMB_ADDR_TYPE) | 602 | if ((addr1->addr_type == IPMI_IPMB_ADDR_TYPE) |
533 | || (addr1->addr_type == IPMI_IPMB_BROADCAST_ADDR_TYPE)) | 603 | || (addr1->addr_type == IPMI_IPMB_BROADCAST_ADDR_TYPE)) { |
534 | { | ||
535 | struct ipmi_ipmb_addr *ipmb_addr1 | 604 | struct ipmi_ipmb_addr *ipmb_addr1 |
536 | = (struct ipmi_ipmb_addr *) addr1; | 605 | = (struct ipmi_ipmb_addr *) addr1; |
537 | struct ipmi_ipmb_addr *ipmb_addr2 | 606 | struct ipmi_ipmb_addr *ipmb_addr2 |
@@ -559,9 +628,8 @@ ipmi_addr_equal(struct ipmi_addr *addr1, struct ipmi_addr *addr2) | |||
559 | 628 | ||
560 | int ipmi_validate_addr(struct ipmi_addr *addr, int len) | 629 | int ipmi_validate_addr(struct ipmi_addr *addr, int len) |
561 | { | 630 | { |
562 | if (len < sizeof(struct ipmi_system_interface_addr)) { | 631 | if (len < sizeof(struct ipmi_system_interface_addr)) |
563 | return -EINVAL; | 632 | return -EINVAL; |
564 | } | ||
565 | 633 | ||
566 | if (addr->addr_type == IPMI_SYSTEM_INTERFACE_ADDR_TYPE) { | 634 | if (addr->addr_type == IPMI_SYSTEM_INTERFACE_ADDR_TYPE) { |
567 | if (addr->channel != IPMI_BMC_CHANNEL) | 635 | if (addr->channel != IPMI_BMC_CHANNEL) |
@@ -575,23 +643,21 @@ int ipmi_validate_addr(struct ipmi_addr *addr, int len) | |||
575 | return -EINVAL; | 643 | return -EINVAL; |
576 | 644 | ||
577 | if ((addr->addr_type == IPMI_IPMB_ADDR_TYPE) | 645 | if ((addr->addr_type == IPMI_IPMB_ADDR_TYPE) |
578 | || (addr->addr_type == IPMI_IPMB_BROADCAST_ADDR_TYPE)) | 646 | || (addr->addr_type == IPMI_IPMB_BROADCAST_ADDR_TYPE)) { |
579 | { | 647 | if (len < sizeof(struct ipmi_ipmb_addr)) |
580 | if (len < sizeof(struct ipmi_ipmb_addr)) { | ||
581 | return -EINVAL; | 648 | return -EINVAL; |
582 | } | ||
583 | return 0; | 649 | return 0; |
584 | } | 650 | } |
585 | 651 | ||
586 | if (addr->addr_type == IPMI_LAN_ADDR_TYPE) { | 652 | if (addr->addr_type == IPMI_LAN_ADDR_TYPE) { |
587 | if (len < sizeof(struct ipmi_lan_addr)) { | 653 | if (len < sizeof(struct ipmi_lan_addr)) |
588 | return -EINVAL; | 654 | return -EINVAL; |
589 | } | ||
590 | return 0; | 655 | return 0; |
591 | } | 656 | } |
592 | 657 | ||
593 | return -EINVAL; | 658 | return -EINVAL; |
594 | } | 659 | } |
660 | EXPORT_SYMBOL(ipmi_validate_addr); | ||
595 | 661 | ||
596 | unsigned int ipmi_addr_length(int addr_type) | 662 | unsigned int ipmi_addr_length(int addr_type) |
597 | { | 663 | { |
@@ -599,34 +665,28 @@ unsigned int ipmi_addr_length(int addr_type) | |||
599 | return sizeof(struct ipmi_system_interface_addr); | 665 | return sizeof(struct ipmi_system_interface_addr); |
600 | 666 | ||
601 | if ((addr_type == IPMI_IPMB_ADDR_TYPE) | 667 | if ((addr_type == IPMI_IPMB_ADDR_TYPE) |
602 | || (addr_type == IPMI_IPMB_BROADCAST_ADDR_TYPE)) | 668 | || (addr_type == IPMI_IPMB_BROADCAST_ADDR_TYPE)) |
603 | { | ||
604 | return sizeof(struct ipmi_ipmb_addr); | 669 | return sizeof(struct ipmi_ipmb_addr); |
605 | } | ||
606 | 670 | ||
607 | if (addr_type == IPMI_LAN_ADDR_TYPE) | 671 | if (addr_type == IPMI_LAN_ADDR_TYPE) |
608 | return sizeof(struct ipmi_lan_addr); | 672 | return sizeof(struct ipmi_lan_addr); |
609 | 673 | ||
610 | return 0; | 674 | return 0; |
611 | } | 675 | } |
676 | EXPORT_SYMBOL(ipmi_addr_length); | ||
612 | 677 | ||
613 | static void deliver_response(struct ipmi_recv_msg *msg) | 678 | static void deliver_response(struct ipmi_recv_msg *msg) |
614 | { | 679 | { |
615 | if (!msg->user) { | 680 | if (!msg->user) { |
616 | ipmi_smi_t intf = msg->user_msg_data; | 681 | ipmi_smi_t intf = msg->user_msg_data; |
617 | unsigned long flags; | ||
618 | 682 | ||
619 | /* Special handling for NULL users. */ | 683 | /* Special handling for NULL users. */ |
620 | if (intf->null_user_handler) { | 684 | if (intf->null_user_handler) { |
621 | intf->null_user_handler(intf, msg); | 685 | intf->null_user_handler(intf, msg); |
622 | spin_lock_irqsave(&intf->counter_lock, flags); | 686 | ipmi_inc_stat(intf, handled_local_responses); |
623 | intf->handled_local_responses++; | ||
624 | spin_unlock_irqrestore(&intf->counter_lock, flags); | ||
625 | } else { | 687 | } else { |
626 | /* No handler, so give up. */ | 688 | /* No handler, so give up. */ |
627 | spin_lock_irqsave(&intf->counter_lock, flags); | 689 | ipmi_inc_stat(intf, unhandled_local_responses); |
628 | intf->unhandled_local_responses++; | ||
629 | spin_unlock_irqrestore(&intf->counter_lock, flags); | ||
630 | } | 690 | } |
631 | ipmi_free_recv_msg(msg); | 691 | ipmi_free_recv_msg(msg); |
632 | } else { | 692 | } else { |
@@ -646,9 +706,11 @@ deliver_err_response(struct ipmi_recv_msg *msg, int err) | |||
646 | deliver_response(msg); | 706 | deliver_response(msg); |
647 | } | 707 | } |
648 | 708 | ||
649 | /* Find the next sequence number not being used and add the given | 709 | /* |
650 | message with the given timeout to the sequence table. This must be | 710 | * Find the next sequence number not being used and add the given |
651 | called with the interface's seq_lock held. */ | 711 | * message with the given timeout to the sequence table. This must be |
712 | * called with the interface's seq_lock held. | ||
713 | */ | ||
652 | static int intf_next_seq(ipmi_smi_t intf, | 714 | static int intf_next_seq(ipmi_smi_t intf, |
653 | struct ipmi_recv_msg *recv_msg, | 715 | struct ipmi_recv_msg *recv_msg, |
654 | unsigned long timeout, | 716 | unsigned long timeout, |
@@ -660,10 +722,8 @@ static int intf_next_seq(ipmi_smi_t intf, | |||
660 | int rv = 0; | 722 | int rv = 0; |
661 | unsigned int i; | 723 | unsigned int i; |
662 | 724 | ||
663 | for (i = intf->curr_seq; | 725 | for (i = intf->curr_seq; (i+1)%IPMI_IPMB_NUM_SEQ != intf->curr_seq; |
664 | (i+1)%IPMI_IPMB_NUM_SEQ != intf->curr_seq; | 726 | i = (i+1)%IPMI_IPMB_NUM_SEQ) { |
665 | i = (i+1)%IPMI_IPMB_NUM_SEQ) | ||
666 | { | ||
667 | if (!intf->seq_table[i].inuse) | 727 | if (!intf->seq_table[i].inuse) |
668 | break; | 728 | break; |
669 | } | 729 | } |
@@ -671,8 +731,10 @@ static int intf_next_seq(ipmi_smi_t intf, | |||
671 | if (!intf->seq_table[i].inuse) { | 731 | if (!intf->seq_table[i].inuse) { |
672 | intf->seq_table[i].recv_msg = recv_msg; | 732 | intf->seq_table[i].recv_msg = recv_msg; |
673 | 733 | ||
674 | /* Start with the maximum timeout, when the send response | 734 | /* |
675 | comes in we will start the real timer. */ | 735 | * Start with the maximum timeout, when the send response |
736 | * comes in we will start the real timer. | ||
737 | */ | ||
676 | intf->seq_table[i].timeout = MAX_MSG_TIMEOUT; | 738 | intf->seq_table[i].timeout = MAX_MSG_TIMEOUT; |
677 | intf->seq_table[i].orig_timeout = timeout; | 739 | intf->seq_table[i].orig_timeout = timeout; |
678 | intf->seq_table[i].retries_left = retries; | 740 | intf->seq_table[i].retries_left = retries; |
@@ -685,15 +747,17 @@ static int intf_next_seq(ipmi_smi_t intf, | |||
685 | } else { | 747 | } else { |
686 | rv = -EAGAIN; | 748 | rv = -EAGAIN; |
687 | } | 749 | } |
688 | 750 | ||
689 | return rv; | 751 | return rv; |
690 | } | 752 | } |
691 | 753 | ||
692 | /* Return the receive message for the given sequence number and | 754 | /* |
693 | release the sequence number so it can be reused. Some other data | 755 | * Return the receive message for the given sequence number and |
694 | is passed in to be sure the message matches up correctly (to help | 756 | * release the sequence number so it can be reused. Some other data |
695 | guard against message coming in after their timeout and the | 757 | * is passed in to be sure the message matches up correctly (to help |
696 | sequence number being reused). */ | 758 | * guard against message coming in after their timeout and the |
759 | * sequence number being reused). | ||
760 | */ | ||
697 | static int intf_find_seq(ipmi_smi_t intf, | 761 | static int intf_find_seq(ipmi_smi_t intf, |
698 | unsigned char seq, | 762 | unsigned char seq, |
699 | short channel, | 763 | short channel, |
@@ -712,11 +776,9 @@ static int intf_find_seq(ipmi_smi_t intf, | |||
712 | if (intf->seq_table[seq].inuse) { | 776 | if (intf->seq_table[seq].inuse) { |
713 | struct ipmi_recv_msg *msg = intf->seq_table[seq].recv_msg; | 777 | struct ipmi_recv_msg *msg = intf->seq_table[seq].recv_msg; |
714 | 778 | ||
715 | if ((msg->addr.channel == channel) | 779 | if ((msg->addr.channel == channel) && (msg->msg.cmd == cmd) |
716 | && (msg->msg.cmd == cmd) | 780 | && (msg->msg.netfn == netfn) |
717 | && (msg->msg.netfn == netfn) | 781 | && (ipmi_addr_equal(addr, &(msg->addr)))) { |
718 | && (ipmi_addr_equal(addr, &(msg->addr)))) | ||
719 | { | ||
720 | *recv_msg = msg; | 782 | *recv_msg = msg; |
721 | intf->seq_table[seq].inuse = 0; | 783 | intf->seq_table[seq].inuse = 0; |
722 | rv = 0; | 784 | rv = 0; |
@@ -741,11 +803,12 @@ static int intf_start_seq_timer(ipmi_smi_t intf, | |||
741 | GET_SEQ_FROM_MSGID(msgid, seq, seqid); | 803 | GET_SEQ_FROM_MSGID(msgid, seq, seqid); |
742 | 804 | ||
743 | spin_lock_irqsave(&(intf->seq_lock), flags); | 805 | spin_lock_irqsave(&(intf->seq_lock), flags); |
744 | /* We do this verification because the user can be deleted | 806 | /* |
745 | while a message is outstanding. */ | 807 | * We do this verification because the user can be deleted |
808 | * while a message is outstanding. | ||
809 | */ | ||
746 | if ((intf->seq_table[seq].inuse) | 810 | if ((intf->seq_table[seq].inuse) |
747 | && (intf->seq_table[seq].seqid == seqid)) | 811 | && (intf->seq_table[seq].seqid == seqid)) { |
748 | { | ||
749 | struct seq_table *ent = &(intf->seq_table[seq]); | 812 | struct seq_table *ent = &(intf->seq_table[seq]); |
750 | ent->timeout = ent->orig_timeout; | 813 | ent->timeout = ent->orig_timeout; |
751 | rv = 0; | 814 | rv = 0; |
@@ -770,11 +833,12 @@ static int intf_err_seq(ipmi_smi_t intf, | |||
770 | GET_SEQ_FROM_MSGID(msgid, seq, seqid); | 833 | GET_SEQ_FROM_MSGID(msgid, seq, seqid); |
771 | 834 | ||
772 | spin_lock_irqsave(&(intf->seq_lock), flags); | 835 | spin_lock_irqsave(&(intf->seq_lock), flags); |
773 | /* We do this verification because the user can be deleted | 836 | /* |
774 | while a message is outstanding. */ | 837 | * We do this verification because the user can be deleted |
838 | * while a message is outstanding. | ||
839 | */ | ||
775 | if ((intf->seq_table[seq].inuse) | 840 | if ((intf->seq_table[seq].inuse) |
776 | && (intf->seq_table[seq].seqid == seqid)) | 841 | && (intf->seq_table[seq].seqid == seqid)) { |
777 | { | ||
778 | struct seq_table *ent = &(intf->seq_table[seq]); | 842 | struct seq_table *ent = &(intf->seq_table[seq]); |
779 | 843 | ||
780 | ent->inuse = 0; | 844 | ent->inuse = 0; |
@@ -800,24 +864,30 @@ int ipmi_create_user(unsigned int if_num, | |||
800 | int rv = 0; | 864 | int rv = 0; |
801 | ipmi_smi_t intf; | 865 | ipmi_smi_t intf; |
802 | 866 | ||
803 | /* There is no module usecount here, because it's not | 867 | /* |
804 | required. Since this can only be used by and called from | 868 | * There is no module usecount here, because it's not |
805 | other modules, they will implicitly use this module, and | 869 | * required. Since this can only be used by and called from |
806 | thus this can't be removed unless the other modules are | 870 | * other modules, they will implicitly use this module, and |
807 | removed. */ | 871 | * thus this can't be removed unless the other modules are |
872 | * removed. | ||
873 | */ | ||
808 | 874 | ||
809 | if (handler == NULL) | 875 | if (handler == NULL) |
810 | return -EINVAL; | 876 | return -EINVAL; |
811 | 877 | ||
812 | /* Make sure the driver is actually initialized, this handles | 878 | /* |
813 | problems with initialization order. */ | 879 | * Make sure the driver is actually initialized, this handles |
880 | * problems with initialization order. | ||
881 | */ | ||
814 | if (!initialized) { | 882 | if (!initialized) { |
815 | rv = ipmi_init_msghandler(); | 883 | rv = ipmi_init_msghandler(); |
816 | if (rv) | 884 | if (rv) |
817 | return rv; | 885 | return rv; |
818 | 886 | ||
819 | /* The init code doesn't return an error if it was turned | 887 | /* |
820 | off, but it won't initialize. Check that. */ | 888 | * The init code doesn't return an error if it was turned |
889 | * off, but it won't initialize. Check that. | ||
890 | */ | ||
821 | if (!initialized) | 891 | if (!initialized) |
822 | return -ENODEV; | 892 | return -ENODEV; |
823 | } | 893 | } |
@@ -858,8 +928,10 @@ int ipmi_create_user(unsigned int if_num, | |||
858 | } | 928 | } |
859 | } | 929 | } |
860 | 930 | ||
861 | /* Hold the lock so intf->handlers is guaranteed to be good | 931 | /* |
862 | * until now */ | 932 | * Hold the lock so intf->handlers is guaranteed to be good |
933 | * until now | ||
934 | */ | ||
863 | mutex_unlock(&ipmi_interfaces_mutex); | 935 | mutex_unlock(&ipmi_interfaces_mutex); |
864 | 936 | ||
865 | new_user->valid = 1; | 937 | new_user->valid = 1; |
@@ -876,6 +948,7 @@ out_kfree: | |||
876 | kfree(new_user); | 948 | kfree(new_user); |
877 | return rv; | 949 | return rv; |
878 | } | 950 | } |
951 | EXPORT_SYMBOL(ipmi_create_user); | ||
879 | 952 | ||
880 | static void free_user(struct kref *ref) | 953 | static void free_user(struct kref *ref) |
881 | { | 954 | { |
@@ -899,8 +972,7 @@ int ipmi_destroy_user(ipmi_user_t user) | |||
899 | 972 | ||
900 | for (i = 0; i < IPMI_IPMB_NUM_SEQ; i++) { | 973 | for (i = 0; i < IPMI_IPMB_NUM_SEQ; i++) { |
901 | if (intf->seq_table[i].inuse | 974 | if (intf->seq_table[i].inuse |
902 | && (intf->seq_table[i].recv_msg->user == user)) | 975 | && (intf->seq_table[i].recv_msg->user == user)) { |
903 | { | ||
904 | intf->seq_table[i].inuse = 0; | 976 | intf->seq_table[i].inuse = 0; |
905 | ipmi_free_recv_msg(intf->seq_table[i].recv_msg); | 977 | ipmi_free_recv_msg(intf->seq_table[i].recv_msg); |
906 | } | 978 | } |
@@ -943,6 +1015,7 @@ int ipmi_destroy_user(ipmi_user_t user) | |||
943 | 1015 | ||
944 | return 0; | 1016 | return 0; |
945 | } | 1017 | } |
1018 | EXPORT_SYMBOL(ipmi_destroy_user); | ||
946 | 1019 | ||
947 | void ipmi_get_version(ipmi_user_t user, | 1020 | void ipmi_get_version(ipmi_user_t user, |
948 | unsigned char *major, | 1021 | unsigned char *major, |
@@ -951,6 +1024,7 @@ void ipmi_get_version(ipmi_user_t user, | |||
951 | *major = user->intf->ipmi_version_major; | 1024 | *major = user->intf->ipmi_version_major; |
952 | *minor = user->intf->ipmi_version_minor; | 1025 | *minor = user->intf->ipmi_version_minor; |
953 | } | 1026 | } |
1027 | EXPORT_SYMBOL(ipmi_get_version); | ||
954 | 1028 | ||
955 | int ipmi_set_my_address(ipmi_user_t user, | 1029 | int ipmi_set_my_address(ipmi_user_t user, |
956 | unsigned int channel, | 1030 | unsigned int channel, |
@@ -961,6 +1035,7 @@ int ipmi_set_my_address(ipmi_user_t user, | |||
961 | user->intf->channels[channel].address = address; | 1035 | user->intf->channels[channel].address = address; |
962 | return 0; | 1036 | return 0; |
963 | } | 1037 | } |
1038 | EXPORT_SYMBOL(ipmi_set_my_address); | ||
964 | 1039 | ||
965 | int ipmi_get_my_address(ipmi_user_t user, | 1040 | int ipmi_get_my_address(ipmi_user_t user, |
966 | unsigned int channel, | 1041 | unsigned int channel, |
@@ -971,6 +1046,7 @@ int ipmi_get_my_address(ipmi_user_t user, | |||
971 | *address = user->intf->channels[channel].address; | 1046 | *address = user->intf->channels[channel].address; |
972 | return 0; | 1047 | return 0; |
973 | } | 1048 | } |
1049 | EXPORT_SYMBOL(ipmi_get_my_address); | ||
974 | 1050 | ||
975 | int ipmi_set_my_LUN(ipmi_user_t user, | 1051 | int ipmi_set_my_LUN(ipmi_user_t user, |
976 | unsigned int channel, | 1052 | unsigned int channel, |
@@ -981,6 +1057,7 @@ int ipmi_set_my_LUN(ipmi_user_t user, | |||
981 | user->intf->channels[channel].lun = LUN & 0x3; | 1057 | user->intf->channels[channel].lun = LUN & 0x3; |
982 | return 0; | 1058 | return 0; |
983 | } | 1059 | } |
1060 | EXPORT_SYMBOL(ipmi_set_my_LUN); | ||
984 | 1061 | ||
985 | int ipmi_get_my_LUN(ipmi_user_t user, | 1062 | int ipmi_get_my_LUN(ipmi_user_t user, |
986 | unsigned int channel, | 1063 | unsigned int channel, |
@@ -991,6 +1068,7 @@ int ipmi_get_my_LUN(ipmi_user_t user, | |||
991 | *address = user->intf->channels[channel].lun; | 1068 | *address = user->intf->channels[channel].lun; |
992 | return 0; | 1069 | return 0; |
993 | } | 1070 | } |
1071 | EXPORT_SYMBOL(ipmi_get_my_LUN); | ||
994 | 1072 | ||
995 | int ipmi_get_maintenance_mode(ipmi_user_t user) | 1073 | int ipmi_get_maintenance_mode(ipmi_user_t user) |
996 | { | 1074 | { |
@@ -1075,6 +1153,11 @@ int ipmi_set_gets_events(ipmi_user_t user, int val) | |||
1075 | list_for_each_entry_safe(msg, msg2, &intf->waiting_events, link) | 1153 | list_for_each_entry_safe(msg, msg2, &intf->waiting_events, link) |
1076 | list_move_tail(&msg->link, &msgs); | 1154 | list_move_tail(&msg->link, &msgs); |
1077 | intf->waiting_events_count = 0; | 1155 | intf->waiting_events_count = 0; |
1156 | if (intf->event_msg_printed) { | ||
1157 | printk(KERN_WARNING PFX "Event queue no longer" | ||
1158 | " full\n"); | ||
1159 | intf->event_msg_printed = 0; | ||
1160 | } | ||
1078 | 1161 | ||
1079 | intf->delivering_events = 1; | 1162 | intf->delivering_events = 1; |
1080 | spin_unlock_irqrestore(&intf->events_lock, flags); | 1163 | spin_unlock_irqrestore(&intf->events_lock, flags); |
@@ -1094,6 +1177,7 @@ int ipmi_set_gets_events(ipmi_user_t user, int val) | |||
1094 | 1177 | ||
1095 | return 0; | 1178 | return 0; |
1096 | } | 1179 | } |
1180 | EXPORT_SYMBOL(ipmi_set_gets_events); | ||
1097 | 1181 | ||
1098 | static struct cmd_rcvr *find_cmd_rcvr(ipmi_smi_t intf, | 1182 | static struct cmd_rcvr *find_cmd_rcvr(ipmi_smi_t intf, |
1099 | unsigned char netfn, | 1183 | unsigned char netfn, |
@@ -1159,6 +1243,7 @@ int ipmi_register_for_cmd(ipmi_user_t user, | |||
1159 | 1243 | ||
1160 | return rv; | 1244 | return rv; |
1161 | } | 1245 | } |
1246 | EXPORT_SYMBOL(ipmi_register_for_cmd); | ||
1162 | 1247 | ||
1163 | int ipmi_unregister_for_cmd(ipmi_user_t user, | 1248 | int ipmi_unregister_for_cmd(ipmi_user_t user, |
1164 | unsigned char netfn, | 1249 | unsigned char netfn, |
@@ -1196,19 +1281,13 @@ int ipmi_unregister_for_cmd(ipmi_user_t user, | |||
1196 | } | 1281 | } |
1197 | return rv; | 1282 | return rv; |
1198 | } | 1283 | } |
1199 | 1284 | EXPORT_SYMBOL(ipmi_unregister_for_cmd); | |
1200 | void ipmi_user_set_run_to_completion(ipmi_user_t user, int val) | ||
1201 | { | ||
1202 | ipmi_smi_t intf = user->intf; | ||
1203 | if (intf->handlers) | ||
1204 | intf->handlers->set_run_to_completion(intf->send_info, val); | ||
1205 | } | ||
1206 | 1285 | ||
1207 | static unsigned char | 1286 | static unsigned char |
1208 | ipmb_checksum(unsigned char *data, int size) | 1287 | ipmb_checksum(unsigned char *data, int size) |
1209 | { | 1288 | { |
1210 | unsigned char csum = 0; | 1289 | unsigned char csum = 0; |
1211 | 1290 | ||
1212 | for (; size > 0; size--, data++) | 1291 | for (; size > 0; size--, data++) |
1213 | csum += *data; | 1292 | csum += *data; |
1214 | 1293 | ||
@@ -1250,8 +1329,10 @@ static inline void format_ipmb_msg(struct ipmi_smi_msg *smi_msg, | |||
1250 | = ipmb_checksum(&(smi_msg->data[i+6]), | 1329 | = ipmb_checksum(&(smi_msg->data[i+6]), |
1251 | smi_msg->data_size-6); | 1330 | smi_msg->data_size-6); |
1252 | 1331 | ||
1253 | /* Add on the checksum size and the offset from the | 1332 | /* |
1254 | broadcast. */ | 1333 | * Add on the checksum size and the offset from the |
1334 | * broadcast. | ||
1335 | */ | ||
1255 | smi_msg->data_size += 1 + i; | 1336 | smi_msg->data_size += 1 + i; |
1256 | 1337 | ||
1257 | smi_msg->msgid = msgid; | 1338 | smi_msg->msgid = msgid; |
@@ -1287,17 +1368,21 @@ static inline void format_lan_msg(struct ipmi_smi_msg *smi_msg, | |||
1287 | = ipmb_checksum(&(smi_msg->data[7]), | 1368 | = ipmb_checksum(&(smi_msg->data[7]), |
1288 | smi_msg->data_size-7); | 1369 | smi_msg->data_size-7); |
1289 | 1370 | ||
1290 | /* Add on the checksum size and the offset from the | 1371 | /* |
1291 | broadcast. */ | 1372 | * Add on the checksum size and the offset from the |
1373 | * broadcast. | ||
1374 | */ | ||
1292 | smi_msg->data_size += 1; | 1375 | smi_msg->data_size += 1; |
1293 | 1376 | ||
1294 | smi_msg->msgid = msgid; | 1377 | smi_msg->msgid = msgid; |
1295 | } | 1378 | } |
1296 | 1379 | ||
1297 | /* Separate from ipmi_request so that the user does not have to be | 1380 | /* |
1298 | supplied in certain circumstances (mainly at panic time). If | 1381 | * Separate from ipmi_request so that the user does not have to be |
1299 | messages are supplied, they will be freed, even if an error | 1382 | * supplied in certain circumstances (mainly at panic time). If |
1300 | occurs. */ | 1383 | * messages are supplied, they will be freed, even if an error |
1384 | * occurs. | ||
1385 | */ | ||
1301 | static int i_ipmi_request(ipmi_user_t user, | 1386 | static int i_ipmi_request(ipmi_user_t user, |
1302 | ipmi_smi_t intf, | 1387 | ipmi_smi_t intf, |
1303 | struct ipmi_addr *addr, | 1388 | struct ipmi_addr *addr, |
@@ -1319,19 +1404,18 @@ static int i_ipmi_request(ipmi_user_t user, | |||
1319 | struct ipmi_smi_handlers *handlers; | 1404 | struct ipmi_smi_handlers *handlers; |
1320 | 1405 | ||
1321 | 1406 | ||
1322 | if (supplied_recv) { | 1407 | if (supplied_recv) |
1323 | recv_msg = supplied_recv; | 1408 | recv_msg = supplied_recv; |
1324 | } else { | 1409 | else { |
1325 | recv_msg = ipmi_alloc_recv_msg(); | 1410 | recv_msg = ipmi_alloc_recv_msg(); |
1326 | if (recv_msg == NULL) { | 1411 | if (recv_msg == NULL) |
1327 | return -ENOMEM; | 1412 | return -ENOMEM; |
1328 | } | ||
1329 | } | 1413 | } |
1330 | recv_msg->user_msg_data = user_msg_data; | 1414 | recv_msg->user_msg_data = user_msg_data; |
1331 | 1415 | ||
1332 | if (supplied_smi) { | 1416 | if (supplied_smi) |
1333 | smi_msg = (struct ipmi_smi_msg *) supplied_smi; | 1417 | smi_msg = (struct ipmi_smi_msg *) supplied_smi; |
1334 | } else { | 1418 | else { |
1335 | smi_msg = ipmi_alloc_smi_msg(); | 1419 | smi_msg = ipmi_alloc_smi_msg(); |
1336 | if (smi_msg == NULL) { | 1420 | if (smi_msg == NULL) { |
1337 | ipmi_free_recv_msg(recv_msg); | 1421 | ipmi_free_recv_msg(recv_msg); |
@@ -1350,8 +1434,10 @@ static int i_ipmi_request(ipmi_user_t user, | |||
1350 | if (user) | 1434 | if (user) |
1351 | kref_get(&user->refcount); | 1435 | kref_get(&user->refcount); |
1352 | recv_msg->msgid = msgid; | 1436 | recv_msg->msgid = msgid; |
1353 | /* Store the message to send in the receive message so timeout | 1437 | /* |
1354 | responses can get the proper response data. */ | 1438 | * Store the message to send in the receive message so timeout |
1439 | * responses can get the proper response data. | ||
1440 | */ | ||
1355 | recv_msg->msg = *msg; | 1441 | recv_msg->msg = *msg; |
1356 | 1442 | ||
1357 | if (addr->addr_type == IPMI_SYSTEM_INTERFACE_ADDR_TYPE) { | 1443 | if (addr->addr_type == IPMI_SYSTEM_INTERFACE_ADDR_TYPE) { |
@@ -1365,9 +1451,7 @@ static int i_ipmi_request(ipmi_user_t user, | |||
1365 | 1451 | ||
1366 | smi_addr = (struct ipmi_system_interface_addr *) addr; | 1452 | smi_addr = (struct ipmi_system_interface_addr *) addr; |
1367 | if (smi_addr->lun > 3) { | 1453 | if (smi_addr->lun > 3) { |
1368 | spin_lock_irqsave(&intf->counter_lock, flags); | 1454 | ipmi_inc_stat(intf, sent_invalid_commands); |
1369 | intf->sent_invalid_commands++; | ||
1370 | spin_unlock_irqrestore(&intf->counter_lock, flags); | ||
1371 | rv = -EINVAL; | 1455 | rv = -EINVAL; |
1372 | goto out_err; | 1456 | goto out_err; |
1373 | } | 1457 | } |
@@ -1377,13 +1461,12 @@ static int i_ipmi_request(ipmi_user_t user, | |||
1377 | if ((msg->netfn == IPMI_NETFN_APP_REQUEST) | 1461 | if ((msg->netfn == IPMI_NETFN_APP_REQUEST) |
1378 | && ((msg->cmd == IPMI_SEND_MSG_CMD) | 1462 | && ((msg->cmd == IPMI_SEND_MSG_CMD) |
1379 | || (msg->cmd == IPMI_GET_MSG_CMD) | 1463 | || (msg->cmd == IPMI_GET_MSG_CMD) |
1380 | || (msg->cmd == IPMI_READ_EVENT_MSG_BUFFER_CMD))) | 1464 | || (msg->cmd == IPMI_READ_EVENT_MSG_BUFFER_CMD))) { |
1381 | { | 1465 | /* |
1382 | /* We don't let the user do these, since we manage | 1466 | * We don't let the user do these, since we manage |
1383 | the sequence numbers. */ | 1467 | * the sequence numbers. |
1384 | spin_lock_irqsave(&intf->counter_lock, flags); | 1468 | */ |
1385 | intf->sent_invalid_commands++; | 1469 | ipmi_inc_stat(intf, sent_invalid_commands); |
1386 | spin_unlock_irqrestore(&intf->counter_lock, flags); | ||
1387 | rv = -EINVAL; | 1470 | rv = -EINVAL; |
1388 | goto out_err; | 1471 | goto out_err; |
1389 | } | 1472 | } |
@@ -1391,14 +1474,12 @@ static int i_ipmi_request(ipmi_user_t user, | |||
1391 | if (((msg->netfn == IPMI_NETFN_APP_REQUEST) | 1474 | if (((msg->netfn == IPMI_NETFN_APP_REQUEST) |
1392 | && ((msg->cmd == IPMI_COLD_RESET_CMD) | 1475 | && ((msg->cmd == IPMI_COLD_RESET_CMD) |
1393 | || (msg->cmd == IPMI_WARM_RESET_CMD))) | 1476 | || (msg->cmd == IPMI_WARM_RESET_CMD))) |
1394 | || (msg->netfn == IPMI_NETFN_FIRMWARE_REQUEST)) | 1477 | || (msg->netfn == IPMI_NETFN_FIRMWARE_REQUEST)) { |
1395 | { | ||
1396 | spin_lock_irqsave(&intf->maintenance_mode_lock, flags); | 1478 | spin_lock_irqsave(&intf->maintenance_mode_lock, flags); |
1397 | intf->auto_maintenance_timeout | 1479 | intf->auto_maintenance_timeout |
1398 | = IPMI_MAINTENANCE_MODE_TIMEOUT; | 1480 | = IPMI_MAINTENANCE_MODE_TIMEOUT; |
1399 | if (!intf->maintenance_mode | 1481 | if (!intf->maintenance_mode |
1400 | && !intf->maintenance_mode_enable) | 1482 | && !intf->maintenance_mode_enable) { |
1401 | { | ||
1402 | intf->maintenance_mode_enable = 1; | 1483 | intf->maintenance_mode_enable = 1; |
1403 | maintenance_mode_update(intf); | 1484 | maintenance_mode_update(intf); |
1404 | } | 1485 | } |
@@ -1407,9 +1488,7 @@ static int i_ipmi_request(ipmi_user_t user, | |||
1407 | } | 1488 | } |
1408 | 1489 | ||
1409 | if ((msg->data_len + 2) > IPMI_MAX_MSG_LENGTH) { | 1490 | if ((msg->data_len + 2) > IPMI_MAX_MSG_LENGTH) { |
1410 | spin_lock_irqsave(&intf->counter_lock, flags); | 1491 | ipmi_inc_stat(intf, sent_invalid_commands); |
1411 | intf->sent_invalid_commands++; | ||
1412 | spin_unlock_irqrestore(&intf->counter_lock, flags); | ||
1413 | rv = -EMSGSIZE; | 1492 | rv = -EMSGSIZE; |
1414 | goto out_err; | 1493 | goto out_err; |
1415 | } | 1494 | } |
@@ -1421,31 +1500,23 @@ static int i_ipmi_request(ipmi_user_t user, | |||
1421 | if (msg->data_len > 0) | 1500 | if (msg->data_len > 0) |
1422 | memcpy(&(smi_msg->data[2]), msg->data, msg->data_len); | 1501 | memcpy(&(smi_msg->data[2]), msg->data, msg->data_len); |
1423 | smi_msg->data_size = msg->data_len + 2; | 1502 | smi_msg->data_size = msg->data_len + 2; |
1424 | spin_lock_irqsave(&intf->counter_lock, flags); | 1503 | ipmi_inc_stat(intf, sent_local_commands); |
1425 | intf->sent_local_commands++; | ||
1426 | spin_unlock_irqrestore(&intf->counter_lock, flags); | ||
1427 | } else if ((addr->addr_type == IPMI_IPMB_ADDR_TYPE) | 1504 | } else if ((addr->addr_type == IPMI_IPMB_ADDR_TYPE) |
1428 | || (addr->addr_type == IPMI_IPMB_BROADCAST_ADDR_TYPE)) | 1505 | || (addr->addr_type == IPMI_IPMB_BROADCAST_ADDR_TYPE)) { |
1429 | { | ||
1430 | struct ipmi_ipmb_addr *ipmb_addr; | 1506 | struct ipmi_ipmb_addr *ipmb_addr; |
1431 | unsigned char ipmb_seq; | 1507 | unsigned char ipmb_seq; |
1432 | long seqid; | 1508 | long seqid; |
1433 | int broadcast = 0; | 1509 | int broadcast = 0; |
1434 | 1510 | ||
1435 | if (addr->channel >= IPMI_MAX_CHANNELS) { | 1511 | if (addr->channel >= IPMI_MAX_CHANNELS) { |
1436 | spin_lock_irqsave(&intf->counter_lock, flags); | 1512 | ipmi_inc_stat(intf, sent_invalid_commands); |
1437 | intf->sent_invalid_commands++; | ||
1438 | spin_unlock_irqrestore(&intf->counter_lock, flags); | ||
1439 | rv = -EINVAL; | 1513 | rv = -EINVAL; |
1440 | goto out_err; | 1514 | goto out_err; |
1441 | } | 1515 | } |
1442 | 1516 | ||
1443 | if (intf->channels[addr->channel].medium | 1517 | if (intf->channels[addr->channel].medium |
1444 | != IPMI_CHANNEL_MEDIUM_IPMB) | 1518 | != IPMI_CHANNEL_MEDIUM_IPMB) { |
1445 | { | 1519 | ipmi_inc_stat(intf, sent_invalid_commands); |
1446 | spin_lock_irqsave(&intf->counter_lock, flags); | ||
1447 | intf->sent_invalid_commands++; | ||
1448 | spin_unlock_irqrestore(&intf->counter_lock, flags); | ||
1449 | rv = -EINVAL; | 1520 | rv = -EINVAL; |
1450 | goto out_err; | 1521 | goto out_err; |
1451 | } | 1522 | } |
@@ -1457,9 +1528,11 @@ static int i_ipmi_request(ipmi_user_t user, | |||
1457 | retries = 4; | 1528 | retries = 4; |
1458 | } | 1529 | } |
1459 | if (addr->addr_type == IPMI_IPMB_BROADCAST_ADDR_TYPE) { | 1530 | if (addr->addr_type == IPMI_IPMB_BROADCAST_ADDR_TYPE) { |
1460 | /* Broadcasts add a zero at the beginning of the | 1531 | /* |
1461 | message, but otherwise is the same as an IPMB | 1532 | * Broadcasts add a zero at the beginning of the |
1462 | address. */ | 1533 | * message, but otherwise is the same as an IPMB |
1534 | * address. | ||
1535 | */ | ||
1463 | addr->addr_type = IPMI_IPMB_ADDR_TYPE; | 1536 | addr->addr_type = IPMI_IPMB_ADDR_TYPE; |
1464 | broadcast = 1; | 1537 | broadcast = 1; |
1465 | } | 1538 | } |
@@ -1469,21 +1542,19 @@ static int i_ipmi_request(ipmi_user_t user, | |||
1469 | if (retry_time_ms == 0) | 1542 | if (retry_time_ms == 0) |
1470 | retry_time_ms = 1000; | 1543 | retry_time_ms = 1000; |
1471 | 1544 | ||
1472 | /* 9 for the header and 1 for the checksum, plus | 1545 | /* |
1473 | possibly one for the broadcast. */ | 1546 | * 9 for the header and 1 for the checksum, plus |
1547 | * possibly one for the broadcast. | ||
1548 | */ | ||
1474 | if ((msg->data_len + 10 + broadcast) > IPMI_MAX_MSG_LENGTH) { | 1549 | if ((msg->data_len + 10 + broadcast) > IPMI_MAX_MSG_LENGTH) { |
1475 | spin_lock_irqsave(&intf->counter_lock, flags); | 1550 | ipmi_inc_stat(intf, sent_invalid_commands); |
1476 | intf->sent_invalid_commands++; | ||
1477 | spin_unlock_irqrestore(&intf->counter_lock, flags); | ||
1478 | rv = -EMSGSIZE; | 1551 | rv = -EMSGSIZE; |
1479 | goto out_err; | 1552 | goto out_err; |
1480 | } | 1553 | } |
1481 | 1554 | ||
1482 | ipmb_addr = (struct ipmi_ipmb_addr *) addr; | 1555 | ipmb_addr = (struct ipmi_ipmb_addr *) addr; |
1483 | if (ipmb_addr->lun > 3) { | 1556 | if (ipmb_addr->lun > 3) { |
1484 | spin_lock_irqsave(&intf->counter_lock, flags); | 1557 | ipmi_inc_stat(intf, sent_invalid_commands); |
1485 | intf->sent_invalid_commands++; | ||
1486 | spin_unlock_irqrestore(&intf->counter_lock, flags); | ||
1487 | rv = -EINVAL; | 1558 | rv = -EINVAL; |
1488 | goto out_err; | 1559 | goto out_err; |
1489 | } | 1560 | } |
@@ -1491,29 +1562,31 @@ static int i_ipmi_request(ipmi_user_t user, | |||
1491 | memcpy(&recv_msg->addr, ipmb_addr, sizeof(*ipmb_addr)); | 1562 | memcpy(&recv_msg->addr, ipmb_addr, sizeof(*ipmb_addr)); |
1492 | 1563 | ||
1493 | if (recv_msg->msg.netfn & 0x1) { | 1564 | if (recv_msg->msg.netfn & 0x1) { |
1494 | /* It's a response, so use the user's sequence | 1565 | /* |
1495 | from msgid. */ | 1566 | * It's a response, so use the user's sequence |
1496 | spin_lock_irqsave(&intf->counter_lock, flags); | 1567 | * from msgid. |
1497 | intf->sent_ipmb_responses++; | 1568 | */ |
1498 | spin_unlock_irqrestore(&intf->counter_lock, flags); | 1569 | ipmi_inc_stat(intf, sent_ipmb_responses); |
1499 | format_ipmb_msg(smi_msg, msg, ipmb_addr, msgid, | 1570 | format_ipmb_msg(smi_msg, msg, ipmb_addr, msgid, |
1500 | msgid, broadcast, | 1571 | msgid, broadcast, |
1501 | source_address, source_lun); | 1572 | source_address, source_lun); |
1502 | 1573 | ||
1503 | /* Save the receive message so we can use it | 1574 | /* |
1504 | to deliver the response. */ | 1575 | * Save the receive message so we can use it |
1576 | * to deliver the response. | ||
1577 | */ | ||
1505 | smi_msg->user_data = recv_msg; | 1578 | smi_msg->user_data = recv_msg; |
1506 | } else { | 1579 | } else { |
1507 | /* It's a command, so get a sequence for it. */ | 1580 | /* It's a command, so get a sequence for it. */ |
1508 | 1581 | ||
1509 | spin_lock_irqsave(&(intf->seq_lock), flags); | 1582 | spin_lock_irqsave(&(intf->seq_lock), flags); |
1510 | 1583 | ||
1511 | spin_lock(&intf->counter_lock); | 1584 | ipmi_inc_stat(intf, sent_ipmb_commands); |
1512 | intf->sent_ipmb_commands++; | ||
1513 | spin_unlock(&intf->counter_lock); | ||
1514 | 1585 | ||
1515 | /* Create a sequence number with a 1 second | 1586 | /* |
1516 | timeout and 4 retries. */ | 1587 | * Create a sequence number with a 1 second |
1588 | * timeout and 4 retries. | ||
1589 | */ | ||
1517 | rv = intf_next_seq(intf, | 1590 | rv = intf_next_seq(intf, |
1518 | recv_msg, | 1591 | recv_msg, |
1519 | retry_time_ms, | 1592 | retry_time_ms, |
@@ -1522,34 +1595,42 @@ static int i_ipmi_request(ipmi_user_t user, | |||
1522 | &ipmb_seq, | 1595 | &ipmb_seq, |
1523 | &seqid); | 1596 | &seqid); |
1524 | if (rv) { | 1597 | if (rv) { |
1525 | /* We have used up all the sequence numbers, | 1598 | /* |
1526 | probably, so abort. */ | 1599 | * We have used up all the sequence numbers, |
1600 | * probably, so abort. | ||
1601 | */ | ||
1527 | spin_unlock_irqrestore(&(intf->seq_lock), | 1602 | spin_unlock_irqrestore(&(intf->seq_lock), |
1528 | flags); | 1603 | flags); |
1529 | goto out_err; | 1604 | goto out_err; |
1530 | } | 1605 | } |
1531 | 1606 | ||
1532 | /* Store the sequence number in the message, | 1607 | /* |
1533 | so that when the send message response | 1608 | * Store the sequence number in the message, |
1534 | comes back we can start the timer. */ | 1609 | * so that when the send message response |
1610 | * comes back we can start the timer. | ||
1611 | */ | ||
1535 | format_ipmb_msg(smi_msg, msg, ipmb_addr, | 1612 | format_ipmb_msg(smi_msg, msg, ipmb_addr, |
1536 | STORE_SEQ_IN_MSGID(ipmb_seq, seqid), | 1613 | STORE_SEQ_IN_MSGID(ipmb_seq, seqid), |
1537 | ipmb_seq, broadcast, | 1614 | ipmb_seq, broadcast, |
1538 | source_address, source_lun); | 1615 | source_address, source_lun); |
1539 | 1616 | ||
1540 | /* Copy the message into the recv message data, so we | 1617 | /* |
1541 | can retransmit it later if necessary. */ | 1618 | * Copy the message into the recv message data, so we |
1619 | * can retransmit it later if necessary. | ||
1620 | */ | ||
1542 | memcpy(recv_msg->msg_data, smi_msg->data, | 1621 | memcpy(recv_msg->msg_data, smi_msg->data, |
1543 | smi_msg->data_size); | 1622 | smi_msg->data_size); |
1544 | recv_msg->msg.data = recv_msg->msg_data; | 1623 | recv_msg->msg.data = recv_msg->msg_data; |
1545 | recv_msg->msg.data_len = smi_msg->data_size; | 1624 | recv_msg->msg.data_len = smi_msg->data_size; |
1546 | 1625 | ||
1547 | /* We don't unlock until here, because we need | 1626 | /* |
1548 | to copy the completed message into the | 1627 | * We don't unlock until here, because we need |
1549 | recv_msg before we release the lock. | 1628 | * to copy the completed message into the |
1550 | Otherwise, race conditions may bite us. I | 1629 | * recv_msg before we release the lock. |
1551 | know that's pretty paranoid, but I prefer | 1630 | * Otherwise, race conditions may bite us. I |
1552 | to be correct. */ | 1631 | * know that's pretty paranoid, but I prefer |
1632 | * to be correct. | ||
1633 | */ | ||
1553 | spin_unlock_irqrestore(&(intf->seq_lock), flags); | 1634 | spin_unlock_irqrestore(&(intf->seq_lock), flags); |
1554 | } | 1635 | } |
1555 | } else if (addr->addr_type == IPMI_LAN_ADDR_TYPE) { | 1636 | } else if (addr->addr_type == IPMI_LAN_ADDR_TYPE) { |
@@ -1558,21 +1639,16 @@ static int i_ipmi_request(ipmi_user_t user, | |||
1558 | long seqid; | 1639 | long seqid; |
1559 | 1640 | ||
1560 | if (addr->channel >= IPMI_MAX_CHANNELS) { | 1641 | if (addr->channel >= IPMI_MAX_CHANNELS) { |
1561 | spin_lock_irqsave(&intf->counter_lock, flags); | 1642 | ipmi_inc_stat(intf, sent_invalid_commands); |
1562 | intf->sent_invalid_commands++; | ||
1563 | spin_unlock_irqrestore(&intf->counter_lock, flags); | ||
1564 | rv = -EINVAL; | 1643 | rv = -EINVAL; |
1565 | goto out_err; | 1644 | goto out_err; |
1566 | } | 1645 | } |
1567 | 1646 | ||
1568 | if ((intf->channels[addr->channel].medium | 1647 | if ((intf->channels[addr->channel].medium |
1569 | != IPMI_CHANNEL_MEDIUM_8023LAN) | 1648 | != IPMI_CHANNEL_MEDIUM_8023LAN) |
1570 | && (intf->channels[addr->channel].medium | 1649 | && (intf->channels[addr->channel].medium |
1571 | != IPMI_CHANNEL_MEDIUM_ASYNC)) | 1650 | != IPMI_CHANNEL_MEDIUM_ASYNC)) { |
1572 | { | 1651 | ipmi_inc_stat(intf, sent_invalid_commands); |
1573 | spin_lock_irqsave(&intf->counter_lock, flags); | ||
1574 | intf->sent_invalid_commands++; | ||
1575 | spin_unlock_irqrestore(&intf->counter_lock, flags); | ||
1576 | rv = -EINVAL; | 1652 | rv = -EINVAL; |
1577 | goto out_err; | 1653 | goto out_err; |
1578 | } | 1654 | } |
@@ -1585,18 +1661,14 @@ static int i_ipmi_request(ipmi_user_t user, | |||
1585 | 1661 | ||
1586 | /* 11 for the header and 1 for the checksum. */ | 1662 | /* 11 for the header and 1 for the checksum. */ |
1587 | if ((msg->data_len + 12) > IPMI_MAX_MSG_LENGTH) { | 1663 | if ((msg->data_len + 12) > IPMI_MAX_MSG_LENGTH) { |
1588 | spin_lock_irqsave(&intf->counter_lock, flags); | 1664 | ipmi_inc_stat(intf, sent_invalid_commands); |
1589 | intf->sent_invalid_commands++; | ||
1590 | spin_unlock_irqrestore(&intf->counter_lock, flags); | ||
1591 | rv = -EMSGSIZE; | 1665 | rv = -EMSGSIZE; |
1592 | goto out_err; | 1666 | goto out_err; |
1593 | } | 1667 | } |
1594 | 1668 | ||
1595 | lan_addr = (struct ipmi_lan_addr *) addr; | 1669 | lan_addr = (struct ipmi_lan_addr *) addr; |
1596 | if (lan_addr->lun > 3) { | 1670 | if (lan_addr->lun > 3) { |
1597 | spin_lock_irqsave(&intf->counter_lock, flags); | 1671 | ipmi_inc_stat(intf, sent_invalid_commands); |
1598 | intf->sent_invalid_commands++; | ||
1599 | spin_unlock_irqrestore(&intf->counter_lock, flags); | ||
1600 | rv = -EINVAL; | 1672 | rv = -EINVAL; |
1601 | goto out_err; | 1673 | goto out_err; |
1602 | } | 1674 | } |
@@ -1604,28 +1676,30 @@ static int i_ipmi_request(ipmi_user_t user, | |||
1604 | memcpy(&recv_msg->addr, lan_addr, sizeof(*lan_addr)); | 1676 | memcpy(&recv_msg->addr, lan_addr, sizeof(*lan_addr)); |
1605 | 1677 | ||
1606 | if (recv_msg->msg.netfn & 0x1) { | 1678 | if (recv_msg->msg.netfn & 0x1) { |
1607 | /* It's a response, so use the user's sequence | 1679 | /* |
1608 | from msgid. */ | 1680 | * It's a response, so use the user's sequence |
1609 | spin_lock_irqsave(&intf->counter_lock, flags); | 1681 | * from msgid. |
1610 | intf->sent_lan_responses++; | 1682 | */ |
1611 | spin_unlock_irqrestore(&intf->counter_lock, flags); | 1683 | ipmi_inc_stat(intf, sent_lan_responses); |
1612 | format_lan_msg(smi_msg, msg, lan_addr, msgid, | 1684 | format_lan_msg(smi_msg, msg, lan_addr, msgid, |
1613 | msgid, source_lun); | 1685 | msgid, source_lun); |
1614 | 1686 | ||
1615 | /* Save the receive message so we can use it | 1687 | /* |
1616 | to deliver the response. */ | 1688 | * Save the receive message so we can use it |
1689 | * to deliver the response. | ||
1690 | */ | ||
1617 | smi_msg->user_data = recv_msg; | 1691 | smi_msg->user_data = recv_msg; |
1618 | } else { | 1692 | } else { |
1619 | /* It's a command, so get a sequence for it. */ | 1693 | /* It's a command, so get a sequence for it. */ |
1620 | 1694 | ||
1621 | spin_lock_irqsave(&(intf->seq_lock), flags); | 1695 | spin_lock_irqsave(&(intf->seq_lock), flags); |
1622 | 1696 | ||
1623 | spin_lock(&intf->counter_lock); | 1697 | ipmi_inc_stat(intf, sent_lan_commands); |
1624 | intf->sent_lan_commands++; | ||
1625 | spin_unlock(&intf->counter_lock); | ||
1626 | 1698 | ||
1627 | /* Create a sequence number with a 1 second | 1699 | /* |
1628 | timeout and 4 retries. */ | 1700 | * Create a sequence number with a 1 second |
1701 | * timeout and 4 retries. | ||
1702 | */ | ||
1629 | rv = intf_next_seq(intf, | 1703 | rv = intf_next_seq(intf, |
1630 | recv_msg, | 1704 | recv_msg, |
1631 | retry_time_ms, | 1705 | retry_time_ms, |
@@ -1634,40 +1708,46 @@ static int i_ipmi_request(ipmi_user_t user, | |||
1634 | &ipmb_seq, | 1708 | &ipmb_seq, |
1635 | &seqid); | 1709 | &seqid); |
1636 | if (rv) { | 1710 | if (rv) { |
1637 | /* We have used up all the sequence numbers, | 1711 | /* |
1638 | probably, so abort. */ | 1712 | * We have used up all the sequence numbers, |
1713 | * probably, so abort. | ||
1714 | */ | ||
1639 | spin_unlock_irqrestore(&(intf->seq_lock), | 1715 | spin_unlock_irqrestore(&(intf->seq_lock), |
1640 | flags); | 1716 | flags); |
1641 | goto out_err; | 1717 | goto out_err; |
1642 | } | 1718 | } |
1643 | 1719 | ||
1644 | /* Store the sequence number in the message, | 1720 | /* |
1645 | so that when the send message response | 1721 | * Store the sequence number in the message, |
1646 | comes back we can start the timer. */ | 1722 | * so that when the send message response |
1723 | * comes back we can start the timer. | ||
1724 | */ | ||
1647 | format_lan_msg(smi_msg, msg, lan_addr, | 1725 | format_lan_msg(smi_msg, msg, lan_addr, |
1648 | STORE_SEQ_IN_MSGID(ipmb_seq, seqid), | 1726 | STORE_SEQ_IN_MSGID(ipmb_seq, seqid), |
1649 | ipmb_seq, source_lun); | 1727 | ipmb_seq, source_lun); |
1650 | 1728 | ||
1651 | /* Copy the message into the recv message data, so we | 1729 | /* |
1652 | can retransmit it later if necessary. */ | 1730 | * Copy the message into the recv message data, so we |
1731 | * can retransmit it later if necessary. | ||
1732 | */ | ||
1653 | memcpy(recv_msg->msg_data, smi_msg->data, | 1733 | memcpy(recv_msg->msg_data, smi_msg->data, |
1654 | smi_msg->data_size); | 1734 | smi_msg->data_size); |
1655 | recv_msg->msg.data = recv_msg->msg_data; | 1735 | recv_msg->msg.data = recv_msg->msg_data; |
1656 | recv_msg->msg.data_len = smi_msg->data_size; | 1736 | recv_msg->msg.data_len = smi_msg->data_size; |
1657 | 1737 | ||
1658 | /* We don't unlock until here, because we need | 1738 | /* |
1659 | to copy the completed message into the | 1739 | * We don't unlock until here, because we need |
1660 | recv_msg before we release the lock. | 1740 | * to copy the completed message into the |
1661 | Otherwise, race conditions may bite us. I | 1741 | * recv_msg before we release the lock. |
1662 | know that's pretty paranoid, but I prefer | 1742 | * Otherwise, race conditions may bite us. I |
1663 | to be correct. */ | 1743 | * know that's pretty paranoid, but I prefer |
1744 | * to be correct. | ||
1745 | */ | ||
1664 | spin_unlock_irqrestore(&(intf->seq_lock), flags); | 1746 | spin_unlock_irqrestore(&(intf->seq_lock), flags); |
1665 | } | 1747 | } |
1666 | } else { | 1748 | } else { |
1667 | /* Unknown address type. */ | 1749 | /* Unknown address type. */ |
1668 | spin_lock_irqsave(&intf->counter_lock, flags); | 1750 | ipmi_inc_stat(intf, sent_invalid_commands); |
1669 | intf->sent_invalid_commands++; | ||
1670 | spin_unlock_irqrestore(&intf->counter_lock, flags); | ||
1671 | rv = -EINVAL; | 1751 | rv = -EINVAL; |
1672 | goto out_err; | 1752 | goto out_err; |
1673 | } | 1753 | } |
@@ -1735,6 +1815,7 @@ int ipmi_request_settime(ipmi_user_t user, | |||
1735 | retries, | 1815 | retries, |
1736 | retry_time_ms); | 1816 | retry_time_ms); |
1737 | } | 1817 | } |
1818 | EXPORT_SYMBOL(ipmi_request_settime); | ||
1738 | 1819 | ||
1739 | int ipmi_request_supply_msgs(ipmi_user_t user, | 1820 | int ipmi_request_supply_msgs(ipmi_user_t user, |
1740 | struct ipmi_addr *addr, | 1821 | struct ipmi_addr *addr, |
@@ -1766,6 +1847,7 @@ int ipmi_request_supply_msgs(ipmi_user_t user, | |||
1766 | lun, | 1847 | lun, |
1767 | -1, 0); | 1848 | -1, 0); |
1768 | } | 1849 | } |
1850 | EXPORT_SYMBOL(ipmi_request_supply_msgs); | ||
1769 | 1851 | ||
1770 | #ifdef CONFIG_PROC_FS | 1852 | #ifdef CONFIG_PROC_FS |
1771 | static int ipmb_file_read_proc(char *page, char **start, off_t off, | 1853 | static int ipmb_file_read_proc(char *page, char **start, off_t off, |
@@ -1790,7 +1872,7 @@ static int version_file_read_proc(char *page, char **start, off_t off, | |||
1790 | char *out = (char *) page; | 1872 | char *out = (char *) page; |
1791 | ipmi_smi_t intf = data; | 1873 | ipmi_smi_t intf = data; |
1792 | 1874 | ||
1793 | return sprintf(out, "%d.%d\n", | 1875 | return sprintf(out, "%u.%u\n", |
1794 | ipmi_version_major(&intf->bmc->id), | 1876 | ipmi_version_major(&intf->bmc->id), |
1795 | ipmi_version_minor(&intf->bmc->id)); | 1877 | ipmi_version_minor(&intf->bmc->id)); |
1796 | } | 1878 | } |
@@ -1801,65 +1883,65 @@ static int stat_file_read_proc(char *page, char **start, off_t off, | |||
1801 | char *out = (char *) page; | 1883 | char *out = (char *) page; |
1802 | ipmi_smi_t intf = data; | 1884 | ipmi_smi_t intf = data; |
1803 | 1885 | ||
1804 | out += sprintf(out, "sent_invalid_commands: %d\n", | 1886 | out += sprintf(out, "sent_invalid_commands: %u\n", |
1805 | intf->sent_invalid_commands); | 1887 | ipmi_get_stat(intf, sent_invalid_commands)); |
1806 | out += sprintf(out, "sent_local_commands: %d\n", | 1888 | out += sprintf(out, "sent_local_commands: %u\n", |
1807 | intf->sent_local_commands); | 1889 | ipmi_get_stat(intf, sent_local_commands)); |
1808 | out += sprintf(out, "handled_local_responses: %d\n", | 1890 | out += sprintf(out, "handled_local_responses: %u\n", |
1809 | intf->handled_local_responses); | 1891 | ipmi_get_stat(intf, handled_local_responses)); |
1810 | out += sprintf(out, "unhandled_local_responses: %d\n", | 1892 | out += sprintf(out, "unhandled_local_responses: %u\n", |
1811 | intf->unhandled_local_responses); | 1893 | ipmi_get_stat(intf, unhandled_local_responses)); |
1812 | out += sprintf(out, "sent_ipmb_commands: %d\n", | 1894 | out += sprintf(out, "sent_ipmb_commands: %u\n", |
1813 | intf->sent_ipmb_commands); | 1895 | ipmi_get_stat(intf, sent_ipmb_commands)); |
1814 | out += sprintf(out, "sent_ipmb_command_errs: %d\n", | 1896 | out += sprintf(out, "sent_ipmb_command_errs: %u\n", |
1815 | intf->sent_ipmb_command_errs); | 1897 | ipmi_get_stat(intf, sent_ipmb_command_errs)); |
1816 | out += sprintf(out, "retransmitted_ipmb_commands: %d\n", | 1898 | out += sprintf(out, "retransmitted_ipmb_commands: %u\n", |
1817 | intf->retransmitted_ipmb_commands); | 1899 | ipmi_get_stat(intf, retransmitted_ipmb_commands)); |
1818 | out += sprintf(out, "timed_out_ipmb_commands: %d\n", | 1900 | out += sprintf(out, "timed_out_ipmb_commands: %u\n", |
1819 | intf->timed_out_ipmb_commands); | 1901 | ipmi_get_stat(intf, timed_out_ipmb_commands)); |
1820 | out += sprintf(out, "timed_out_ipmb_broadcasts: %d\n", | 1902 | out += sprintf(out, "timed_out_ipmb_broadcasts: %u\n", |
1821 | intf->timed_out_ipmb_broadcasts); | 1903 | ipmi_get_stat(intf, timed_out_ipmb_broadcasts)); |
1822 | out += sprintf(out, "sent_ipmb_responses: %d\n", | 1904 | out += sprintf(out, "sent_ipmb_responses: %u\n", |
1823 | intf->sent_ipmb_responses); | 1905 | ipmi_get_stat(intf, sent_ipmb_responses)); |
1824 | out += sprintf(out, "handled_ipmb_responses: %d\n", | 1906 | out += sprintf(out, "handled_ipmb_responses: %u\n", |
1825 | intf->handled_ipmb_responses); | 1907 | ipmi_get_stat(intf, handled_ipmb_responses)); |
1826 | out += sprintf(out, "invalid_ipmb_responses: %d\n", | 1908 | out += sprintf(out, "invalid_ipmb_responses: %u\n", |
1827 | intf->invalid_ipmb_responses); | 1909 | ipmi_get_stat(intf, invalid_ipmb_responses)); |
1828 | out += sprintf(out, "unhandled_ipmb_responses: %d\n", | 1910 | out += sprintf(out, "unhandled_ipmb_responses: %u\n", |
1829 | intf->unhandled_ipmb_responses); | 1911 | ipmi_get_stat(intf, unhandled_ipmb_responses)); |
1830 | out += sprintf(out, "sent_lan_commands: %d\n", | 1912 | out += sprintf(out, "sent_lan_commands: %u\n", |
1831 | intf->sent_lan_commands); | 1913 | ipmi_get_stat(intf, sent_lan_commands)); |
1832 | out += sprintf(out, "sent_lan_command_errs: %d\n", | 1914 | out += sprintf(out, "sent_lan_command_errs: %u\n", |
1833 | intf->sent_lan_command_errs); | 1915 | ipmi_get_stat(intf, sent_lan_command_errs)); |
1834 | out += sprintf(out, "retransmitted_lan_commands: %d\n", | 1916 | out += sprintf(out, "retransmitted_lan_commands: %u\n", |
1835 | intf->retransmitted_lan_commands); | 1917 | ipmi_get_stat(intf, retransmitted_lan_commands)); |
1836 | out += sprintf(out, "timed_out_lan_commands: %d\n", | 1918 | out += sprintf(out, "timed_out_lan_commands: %u\n", |
1837 | intf->timed_out_lan_commands); | 1919 | ipmi_get_stat(intf, timed_out_lan_commands)); |
1838 | out += sprintf(out, "sent_lan_responses: %d\n", | 1920 | out += sprintf(out, "sent_lan_responses: %u\n", |
1839 | intf->sent_lan_responses); | 1921 | ipmi_get_stat(intf, sent_lan_responses)); |
1840 | out += sprintf(out, "handled_lan_responses: %d\n", | 1922 | out += sprintf(out, "handled_lan_responses: %u\n", |
1841 | intf->handled_lan_responses); | 1923 | ipmi_get_stat(intf, handled_lan_responses)); |
1842 | out += sprintf(out, "invalid_lan_responses: %d\n", | 1924 | out += sprintf(out, "invalid_lan_responses: %u\n", |
1843 | intf->invalid_lan_responses); | 1925 | ipmi_get_stat(intf, invalid_lan_responses)); |
1844 | out += sprintf(out, "unhandled_lan_responses: %d\n", | 1926 | out += sprintf(out, "unhandled_lan_responses: %u\n", |
1845 | intf->unhandled_lan_responses); | 1927 | ipmi_get_stat(intf, unhandled_lan_responses)); |
1846 | out += sprintf(out, "handled_commands: %d\n", | 1928 | out += sprintf(out, "handled_commands: %u\n", |
1847 | intf->handled_commands); | 1929 | ipmi_get_stat(intf, handled_commands)); |
1848 | out += sprintf(out, "invalid_commands: %d\n", | 1930 | out += sprintf(out, "invalid_commands: %u\n", |
1849 | intf->invalid_commands); | 1931 | ipmi_get_stat(intf, invalid_commands)); |
1850 | out += sprintf(out, "unhandled_commands: %d\n", | 1932 | out += sprintf(out, "unhandled_commands: %u\n", |
1851 | intf->unhandled_commands); | 1933 | ipmi_get_stat(intf, unhandled_commands)); |
1852 | out += sprintf(out, "invalid_events: %d\n", | 1934 | out += sprintf(out, "invalid_events: %u\n", |
1853 | intf->invalid_events); | 1935 | ipmi_get_stat(intf, invalid_events)); |
1854 | out += sprintf(out, "events: %d\n", | 1936 | out += sprintf(out, "events: %u\n", |
1855 | intf->events); | 1937 | ipmi_get_stat(intf, events)); |
1856 | 1938 | ||
1857 | return (out - ((char *) page)); | 1939 | return (out - ((char *) page)); |
1858 | } | 1940 | } |
1859 | #endif /* CONFIG_PROC_FS */ | 1941 | #endif /* CONFIG_PROC_FS */ |
1860 | 1942 | ||
1861 | int ipmi_smi_add_proc_entry(ipmi_smi_t smi, char *name, | 1943 | int ipmi_smi_add_proc_entry(ipmi_smi_t smi, char *name, |
1862 | read_proc_t *read_proc, write_proc_t *write_proc, | 1944 | read_proc_t *read_proc, |
1863 | void *data, struct module *owner) | 1945 | void *data, struct module *owner) |
1864 | { | 1946 | { |
1865 | int rv = 0; | 1947 | int rv = 0; |
@@ -1886,7 +1968,6 @@ int ipmi_smi_add_proc_entry(ipmi_smi_t smi, char *name, | |||
1886 | } else { | 1968 | } else { |
1887 | file->data = data; | 1969 | file->data = data; |
1888 | file->read_proc = read_proc; | 1970 | file->read_proc = read_proc; |
1889 | file->write_proc = write_proc; | ||
1890 | file->owner = owner; | 1971 | file->owner = owner; |
1891 | 1972 | ||
1892 | mutex_lock(&smi->proc_entry_lock); | 1973 | mutex_lock(&smi->proc_entry_lock); |
@@ -1899,6 +1980,7 @@ int ipmi_smi_add_proc_entry(ipmi_smi_t smi, char *name, | |||
1899 | 1980 | ||
1900 | return rv; | 1981 | return rv; |
1901 | } | 1982 | } |
1983 | EXPORT_SYMBOL(ipmi_smi_add_proc_entry); | ||
1902 | 1984 | ||
1903 | static int add_proc_entries(ipmi_smi_t smi, int num) | 1985 | static int add_proc_entries(ipmi_smi_t smi, int num) |
1904 | { | 1986 | { |
@@ -1909,23 +1991,22 @@ static int add_proc_entries(ipmi_smi_t smi, int num) | |||
1909 | smi->proc_dir = proc_mkdir(smi->proc_dir_name, proc_ipmi_root); | 1991 | smi->proc_dir = proc_mkdir(smi->proc_dir_name, proc_ipmi_root); |
1910 | if (!smi->proc_dir) | 1992 | if (!smi->proc_dir) |
1911 | rv = -ENOMEM; | 1993 | rv = -ENOMEM; |
1912 | else { | 1994 | else |
1913 | smi->proc_dir->owner = THIS_MODULE; | 1995 | smi->proc_dir->owner = THIS_MODULE; |
1914 | } | ||
1915 | 1996 | ||
1916 | if (rv == 0) | 1997 | if (rv == 0) |
1917 | rv = ipmi_smi_add_proc_entry(smi, "stats", | 1998 | rv = ipmi_smi_add_proc_entry(smi, "stats", |
1918 | stat_file_read_proc, NULL, | 1999 | stat_file_read_proc, |
1919 | smi, THIS_MODULE); | 2000 | smi, THIS_MODULE); |
1920 | 2001 | ||
1921 | if (rv == 0) | 2002 | if (rv == 0) |
1922 | rv = ipmi_smi_add_proc_entry(smi, "ipmb", | 2003 | rv = ipmi_smi_add_proc_entry(smi, "ipmb", |
1923 | ipmb_file_read_proc, NULL, | 2004 | ipmb_file_read_proc, |
1924 | smi, THIS_MODULE); | 2005 | smi, THIS_MODULE); |
1925 | 2006 | ||
1926 | if (rv == 0) | 2007 | if (rv == 0) |
1927 | rv = ipmi_smi_add_proc_entry(smi, "version", | 2008 | rv = ipmi_smi_add_proc_entry(smi, "version", |
1928 | version_file_read_proc, NULL, | 2009 | version_file_read_proc, |
1929 | smi, THIS_MODULE); | 2010 | smi, THIS_MODULE); |
1930 | #endif /* CONFIG_PROC_FS */ | 2011 | #endif /* CONFIG_PROC_FS */ |
1931 | 2012 | ||
@@ -2210,37 +2291,47 @@ static int create_files(struct bmc_device *bmc) | |||
2210 | 2291 | ||
2211 | err = device_create_file(&bmc->dev->dev, | 2292 | err = device_create_file(&bmc->dev->dev, |
2212 | &bmc->device_id_attr); | 2293 | &bmc->device_id_attr); |
2213 | if (err) goto out; | 2294 | if (err) |
2295 | goto out; | ||
2214 | err = device_create_file(&bmc->dev->dev, | 2296 | err = device_create_file(&bmc->dev->dev, |
2215 | &bmc->provides_dev_sdrs_attr); | 2297 | &bmc->provides_dev_sdrs_attr); |
2216 | if (err) goto out_devid; | 2298 | if (err) |
2299 | goto out_devid; | ||
2217 | err = device_create_file(&bmc->dev->dev, | 2300 | err = device_create_file(&bmc->dev->dev, |
2218 | &bmc->revision_attr); | 2301 | &bmc->revision_attr); |
2219 | if (err) goto out_sdrs; | 2302 | if (err) |
2303 | goto out_sdrs; | ||
2220 | err = device_create_file(&bmc->dev->dev, | 2304 | err = device_create_file(&bmc->dev->dev, |
2221 | &bmc->firmware_rev_attr); | 2305 | &bmc->firmware_rev_attr); |
2222 | if (err) goto out_rev; | 2306 | if (err) |
2307 | goto out_rev; | ||
2223 | err = device_create_file(&bmc->dev->dev, | 2308 | err = device_create_file(&bmc->dev->dev, |
2224 | &bmc->version_attr); | 2309 | &bmc->version_attr); |
2225 | if (err) goto out_firm; | 2310 | if (err) |
2311 | goto out_firm; | ||
2226 | err = device_create_file(&bmc->dev->dev, | 2312 | err = device_create_file(&bmc->dev->dev, |
2227 | &bmc->add_dev_support_attr); | 2313 | &bmc->add_dev_support_attr); |
2228 | if (err) goto out_version; | 2314 | if (err) |
2315 | goto out_version; | ||
2229 | err = device_create_file(&bmc->dev->dev, | 2316 | err = device_create_file(&bmc->dev->dev, |
2230 | &bmc->manufacturer_id_attr); | 2317 | &bmc->manufacturer_id_attr); |
2231 | if (err) goto out_add_dev; | 2318 | if (err) |
2319 | goto out_add_dev; | ||
2232 | err = device_create_file(&bmc->dev->dev, | 2320 | err = device_create_file(&bmc->dev->dev, |
2233 | &bmc->product_id_attr); | 2321 | &bmc->product_id_attr); |
2234 | if (err) goto out_manu; | 2322 | if (err) |
2323 | goto out_manu; | ||
2235 | if (bmc->id.aux_firmware_revision_set) { | 2324 | if (bmc->id.aux_firmware_revision_set) { |
2236 | err = device_create_file(&bmc->dev->dev, | 2325 | err = device_create_file(&bmc->dev->dev, |
2237 | &bmc->aux_firmware_rev_attr); | 2326 | &bmc->aux_firmware_rev_attr); |
2238 | if (err) goto out_prod_id; | 2327 | if (err) |
2328 | goto out_prod_id; | ||
2239 | } | 2329 | } |
2240 | if (bmc->guid_set) { | 2330 | if (bmc->guid_set) { |
2241 | err = device_create_file(&bmc->dev->dev, | 2331 | err = device_create_file(&bmc->dev->dev, |
2242 | &bmc->guid_attr); | 2332 | &bmc->guid_attr); |
2243 | if (err) goto out_aux_firm; | 2333 | if (err) |
2334 | goto out_aux_firm; | ||
2244 | } | 2335 | } |
2245 | 2336 | ||
2246 | return 0; | 2337 | return 0; |
@@ -2368,8 +2459,10 @@ static int ipmi_bmc_register(ipmi_smi_t intf, int ifnum, | |||
2368 | "ipmi_msghandler:" | 2459 | "ipmi_msghandler:" |
2369 | " Unable to register bmc device: %d\n", | 2460 | " Unable to register bmc device: %d\n", |
2370 | rv); | 2461 | rv); |
2371 | /* Don't go to out_err, you can only do that if | 2462 | /* |
2372 | the device is registered already. */ | 2463 | * Don't go to out_err, you can only do that if |
2464 | * the device is registered already. | ||
2465 | */ | ||
2373 | return rv; | 2466 | return rv; |
2374 | } | 2467 | } |
2375 | 2468 | ||
@@ -2560,17 +2653,18 @@ channel_handler(ipmi_smi_t intf, struct ipmi_recv_msg *msg) | |||
2560 | 2653 | ||
2561 | if ((msg->addr.addr_type == IPMI_SYSTEM_INTERFACE_ADDR_TYPE) | 2654 | if ((msg->addr.addr_type == IPMI_SYSTEM_INTERFACE_ADDR_TYPE) |
2562 | && (msg->msg.netfn == IPMI_NETFN_APP_RESPONSE) | 2655 | && (msg->msg.netfn == IPMI_NETFN_APP_RESPONSE) |
2563 | && (msg->msg.cmd == IPMI_GET_CHANNEL_INFO_CMD)) | 2656 | && (msg->msg.cmd == IPMI_GET_CHANNEL_INFO_CMD)) { |
2564 | { | ||
2565 | /* It's the one we want */ | 2657 | /* It's the one we want */ |
2566 | if (msg->msg.data[0] != 0) { | 2658 | if (msg->msg.data[0] != 0) { |
2567 | /* Got an error from the channel, just go on. */ | 2659 | /* Got an error from the channel, just go on. */ |
2568 | 2660 | ||
2569 | if (msg->msg.data[0] == IPMI_INVALID_COMMAND_ERR) { | 2661 | if (msg->msg.data[0] == IPMI_INVALID_COMMAND_ERR) { |
2570 | /* If the MC does not support this | 2662 | /* |
2571 | command, that is legal. We just | 2663 | * If the MC does not support this |
2572 | assume it has one IPMB at channel | 2664 | * command, that is legal. We just |
2573 | zero. */ | 2665 | * assume it has one IPMB at channel |
2666 | * zero. | ||
2667 | */ | ||
2574 | intf->channels[0].medium | 2668 | intf->channels[0].medium |
2575 | = IPMI_CHANNEL_MEDIUM_IPMB; | 2669 | = IPMI_CHANNEL_MEDIUM_IPMB; |
2576 | intf->channels[0].protocol | 2670 | intf->channels[0].protocol |
@@ -2591,7 +2685,7 @@ channel_handler(ipmi_smi_t intf, struct ipmi_recv_msg *msg) | |||
2591 | intf->channels[chan].medium = msg->msg.data[2] & 0x7f; | 2685 | intf->channels[chan].medium = msg->msg.data[2] & 0x7f; |
2592 | intf->channels[chan].protocol = msg->msg.data[3] & 0x1f; | 2686 | intf->channels[chan].protocol = msg->msg.data[3] & 0x1f; |
2593 | 2687 | ||
2594 | next_channel: | 2688 | next_channel: |
2595 | intf->curr_channel++; | 2689 | intf->curr_channel++; |
2596 | if (intf->curr_channel >= IPMI_MAX_CHANNELS) | 2690 | if (intf->curr_channel >= IPMI_MAX_CHANNELS) |
2597 | wake_up(&intf->waitq); | 2691 | wake_up(&intf->waitq); |
@@ -2619,6 +2713,7 @@ void ipmi_poll_interface(ipmi_user_t user) | |||
2619 | if (intf->handlers->poll) | 2713 | if (intf->handlers->poll) |
2620 | intf->handlers->poll(intf->send_info); | 2714 | intf->handlers->poll(intf->send_info); |
2621 | } | 2715 | } |
2716 | EXPORT_SYMBOL(ipmi_poll_interface); | ||
2622 | 2717 | ||
2623 | int ipmi_register_smi(struct ipmi_smi_handlers *handlers, | 2718 | int ipmi_register_smi(struct ipmi_smi_handlers *handlers, |
2624 | void *send_info, | 2719 | void *send_info, |
@@ -2633,14 +2728,18 @@ int ipmi_register_smi(struct ipmi_smi_handlers *handlers, | |||
2633 | ipmi_smi_t tintf; | 2728 | ipmi_smi_t tintf; |
2634 | struct list_head *link; | 2729 | struct list_head *link; |
2635 | 2730 | ||
2636 | /* Make sure the driver is actually initialized, this handles | 2731 | /* |
2637 | problems with initialization order. */ | 2732 | * Make sure the driver is actually initialized, this handles |
2733 | * problems with initialization order. | ||
2734 | */ | ||
2638 | if (!initialized) { | 2735 | if (!initialized) { |
2639 | rv = ipmi_init_msghandler(); | 2736 | rv = ipmi_init_msghandler(); |
2640 | if (rv) | 2737 | if (rv) |
2641 | return rv; | 2738 | return rv; |
2642 | /* The init code doesn't return an error if it was turned | 2739 | /* |
2643 | off, but it won't initialize. Check that. */ | 2740 | * The init code doesn't return an error if it was turned |
2741 | * off, but it won't initialize. Check that. | ||
2742 | */ | ||
2644 | if (!initialized) | 2743 | if (!initialized) |
2645 | return -ENODEV; | 2744 | return -ENODEV; |
2646 | } | 2745 | } |
@@ -2688,8 +2787,9 @@ int ipmi_register_smi(struct ipmi_smi_handlers *handlers, | |||
2688 | spin_lock_init(&intf->maintenance_mode_lock); | 2787 | spin_lock_init(&intf->maintenance_mode_lock); |
2689 | INIT_LIST_HEAD(&intf->cmd_rcvrs); | 2788 | INIT_LIST_HEAD(&intf->cmd_rcvrs); |
2690 | init_waitqueue_head(&intf->waitq); | 2789 | init_waitqueue_head(&intf->waitq); |
2790 | for (i = 0; i < IPMI_NUM_STATS; i++) | ||
2791 | atomic_set(&intf->stats[i], 0); | ||
2691 | 2792 | ||
2692 | spin_lock_init(&intf->counter_lock); | ||
2693 | intf->proc_dir = NULL; | 2793 | intf->proc_dir = NULL; |
2694 | 2794 | ||
2695 | mutex_lock(&smi_watchers_mutex); | 2795 | mutex_lock(&smi_watchers_mutex); |
@@ -2717,11 +2817,12 @@ int ipmi_register_smi(struct ipmi_smi_handlers *handlers, | |||
2717 | get_guid(intf); | 2817 | get_guid(intf); |
2718 | 2818 | ||
2719 | if ((intf->ipmi_version_major > 1) | 2819 | if ((intf->ipmi_version_major > 1) |
2720 | || ((intf->ipmi_version_major == 1) | 2820 | || ((intf->ipmi_version_major == 1) |
2721 | && (intf->ipmi_version_minor >= 5))) | 2821 | && (intf->ipmi_version_minor >= 5))) { |
2722 | { | 2822 | /* |
2723 | /* Start scanning the channels to see what is | 2823 | * Start scanning the channels to see what is |
2724 | available. */ | 2824 | * available. |
2825 | */ | ||
2725 | intf->null_user_handler = channel_handler; | 2826 | intf->null_user_handler = channel_handler; |
2726 | intf->curr_channel = 0; | 2827 | intf->curr_channel = 0; |
2727 | rv = send_channel_info_cmd(intf, 0); | 2828 | rv = send_channel_info_cmd(intf, 0); |
@@ -2769,6 +2870,7 @@ int ipmi_register_smi(struct ipmi_smi_handlers *handlers, | |||
2769 | 2870 | ||
2770 | return rv; | 2871 | return rv; |
2771 | } | 2872 | } |
2873 | EXPORT_SYMBOL(ipmi_register_smi); | ||
2772 | 2874 | ||
2773 | static void cleanup_smi_msgs(ipmi_smi_t intf) | 2875 | static void cleanup_smi_msgs(ipmi_smi_t intf) |
2774 | { | 2876 | { |
@@ -2803,8 +2905,10 @@ int ipmi_unregister_smi(ipmi_smi_t intf) | |||
2803 | 2905 | ||
2804 | remove_proc_entries(intf); | 2906 | remove_proc_entries(intf); |
2805 | 2907 | ||
2806 | /* Call all the watcher interfaces to tell them that | 2908 | /* |
2807 | an interface is gone. */ | 2909 | * Call all the watcher interfaces to tell them that |
2910 | * an interface is gone. | ||
2911 | */ | ||
2808 | list_for_each_entry(w, &smi_watchers, link) | 2912 | list_for_each_entry(w, &smi_watchers, link) |
2809 | w->smi_gone(intf_num); | 2913 | w->smi_gone(intf_num); |
2810 | mutex_unlock(&smi_watchers_mutex); | 2914 | mutex_unlock(&smi_watchers_mutex); |
@@ -2812,22 +2916,21 @@ int ipmi_unregister_smi(ipmi_smi_t intf) | |||
2812 | kref_put(&intf->refcount, intf_free); | 2916 | kref_put(&intf->refcount, intf_free); |
2813 | return 0; | 2917 | return 0; |
2814 | } | 2918 | } |
2919 | EXPORT_SYMBOL(ipmi_unregister_smi); | ||
2815 | 2920 | ||
2816 | static int handle_ipmb_get_msg_rsp(ipmi_smi_t intf, | 2921 | static int handle_ipmb_get_msg_rsp(ipmi_smi_t intf, |
2817 | struct ipmi_smi_msg *msg) | 2922 | struct ipmi_smi_msg *msg) |
2818 | { | 2923 | { |
2819 | struct ipmi_ipmb_addr ipmb_addr; | 2924 | struct ipmi_ipmb_addr ipmb_addr; |
2820 | struct ipmi_recv_msg *recv_msg; | 2925 | struct ipmi_recv_msg *recv_msg; |
2821 | unsigned long flags; | ||
2822 | 2926 | ||
2823 | 2927 | /* | |
2824 | /* This is 11, not 10, because the response must contain a | 2928 | * This is 11, not 10, because the response must contain a |
2825 | * completion code. */ | 2929 | * completion code. |
2930 | */ | ||
2826 | if (msg->rsp_size < 11) { | 2931 | if (msg->rsp_size < 11) { |
2827 | /* Message not big enough, just ignore it. */ | 2932 | /* Message not big enough, just ignore it. */ |
2828 | spin_lock_irqsave(&intf->counter_lock, flags); | 2933 | ipmi_inc_stat(intf, invalid_ipmb_responses); |
2829 | intf->invalid_ipmb_responses++; | ||
2830 | spin_unlock_irqrestore(&intf->counter_lock, flags); | ||
2831 | return 0; | 2934 | return 0; |
2832 | } | 2935 | } |
2833 | 2936 | ||
@@ -2841,37 +2944,38 @@ static int handle_ipmb_get_msg_rsp(ipmi_smi_t intf, | |||
2841 | ipmb_addr.channel = msg->rsp[3] & 0x0f; | 2944 | ipmb_addr.channel = msg->rsp[3] & 0x0f; |
2842 | ipmb_addr.lun = msg->rsp[7] & 3; | 2945 | ipmb_addr.lun = msg->rsp[7] & 3; |
2843 | 2946 | ||
2844 | /* It's a response from a remote entity. Look up the sequence | 2947 | /* |
2845 | number and handle the response. */ | 2948 | * It's a response from a remote entity. Look up the sequence |
2949 | * number and handle the response. | ||
2950 | */ | ||
2846 | if (intf_find_seq(intf, | 2951 | if (intf_find_seq(intf, |
2847 | msg->rsp[7] >> 2, | 2952 | msg->rsp[7] >> 2, |
2848 | msg->rsp[3] & 0x0f, | 2953 | msg->rsp[3] & 0x0f, |
2849 | msg->rsp[8], | 2954 | msg->rsp[8], |
2850 | (msg->rsp[4] >> 2) & (~1), | 2955 | (msg->rsp[4] >> 2) & (~1), |
2851 | (struct ipmi_addr *) &(ipmb_addr), | 2956 | (struct ipmi_addr *) &(ipmb_addr), |
2852 | &recv_msg)) | 2957 | &recv_msg)) { |
2853 | { | 2958 | /* |
2854 | /* We were unable to find the sequence number, | 2959 | * We were unable to find the sequence number, |
2855 | so just nuke the message. */ | 2960 | * so just nuke the message. |
2856 | spin_lock_irqsave(&intf->counter_lock, flags); | 2961 | */ |
2857 | intf->unhandled_ipmb_responses++; | 2962 | ipmi_inc_stat(intf, unhandled_ipmb_responses); |
2858 | spin_unlock_irqrestore(&intf->counter_lock, flags); | ||
2859 | return 0; | 2963 | return 0; |
2860 | } | 2964 | } |
2861 | 2965 | ||
2862 | memcpy(recv_msg->msg_data, | 2966 | memcpy(recv_msg->msg_data, |
2863 | &(msg->rsp[9]), | 2967 | &(msg->rsp[9]), |
2864 | msg->rsp_size - 9); | 2968 | msg->rsp_size - 9); |
2865 | /* THe other fields matched, so no need to set them, except | 2969 | /* |
2866 | for netfn, which needs to be the response that was | 2970 | * The other fields matched, so no need to set them, except |
2867 | returned, not the request value. */ | 2971 | * for netfn, which needs to be the response that was |
2972 | * returned, not the request value. | ||
2973 | */ | ||
2868 | recv_msg->msg.netfn = msg->rsp[4] >> 2; | 2974 | recv_msg->msg.netfn = msg->rsp[4] >> 2; |
2869 | recv_msg->msg.data = recv_msg->msg_data; | 2975 | recv_msg->msg.data = recv_msg->msg_data; |
2870 | recv_msg->msg.data_len = msg->rsp_size - 10; | 2976 | recv_msg->msg.data_len = msg->rsp_size - 10; |
2871 | recv_msg->recv_type = IPMI_RESPONSE_RECV_TYPE; | 2977 | recv_msg->recv_type = IPMI_RESPONSE_RECV_TYPE; |
2872 | spin_lock_irqsave(&intf->counter_lock, flags); | 2978 | ipmi_inc_stat(intf, handled_ipmb_responses); |
2873 | intf->handled_ipmb_responses++; | ||
2874 | spin_unlock_irqrestore(&intf->counter_lock, flags); | ||
2875 | deliver_response(recv_msg); | 2979 | deliver_response(recv_msg); |
2876 | 2980 | ||
2877 | return 0; | 2981 | return 0; |
@@ -2888,14 +2992,11 @@ static int handle_ipmb_get_msg_cmd(ipmi_smi_t intf, | |||
2888 | ipmi_user_t user = NULL; | 2992 | ipmi_user_t user = NULL; |
2889 | struct ipmi_ipmb_addr *ipmb_addr; | 2993 | struct ipmi_ipmb_addr *ipmb_addr; |
2890 | struct ipmi_recv_msg *recv_msg; | 2994 | struct ipmi_recv_msg *recv_msg; |
2891 | unsigned long flags; | ||
2892 | struct ipmi_smi_handlers *handlers; | 2995 | struct ipmi_smi_handlers *handlers; |
2893 | 2996 | ||
2894 | if (msg->rsp_size < 10) { | 2997 | if (msg->rsp_size < 10) { |
2895 | /* Message not big enough, just ignore it. */ | 2998 | /* Message not big enough, just ignore it. */ |
2896 | spin_lock_irqsave(&intf->counter_lock, flags); | 2999 | ipmi_inc_stat(intf, invalid_commands); |
2897 | intf->invalid_commands++; | ||
2898 | spin_unlock_irqrestore(&intf->counter_lock, flags); | ||
2899 | return 0; | 3000 | return 0; |
2900 | } | 3001 | } |
2901 | 3002 | ||
@@ -2919,19 +3020,17 @@ static int handle_ipmb_get_msg_cmd(ipmi_smi_t intf, | |||
2919 | 3020 | ||
2920 | if (user == NULL) { | 3021 | if (user == NULL) { |
2921 | /* We didn't find a user, deliver an error response. */ | 3022 | /* We didn't find a user, deliver an error response. */ |
2922 | spin_lock_irqsave(&intf->counter_lock, flags); | 3023 | ipmi_inc_stat(intf, unhandled_commands); |
2923 | intf->unhandled_commands++; | ||
2924 | spin_unlock_irqrestore(&intf->counter_lock, flags); | ||
2925 | 3024 | ||
2926 | msg->data[0] = (IPMI_NETFN_APP_REQUEST << 2); | 3025 | msg->data[0] = (IPMI_NETFN_APP_REQUEST << 2); |
2927 | msg->data[1] = IPMI_SEND_MSG_CMD; | 3026 | msg->data[1] = IPMI_SEND_MSG_CMD; |
2928 | msg->data[2] = msg->rsp[3]; | 3027 | msg->data[2] = msg->rsp[3]; |
2929 | msg->data[3] = msg->rsp[6]; | 3028 | msg->data[3] = msg->rsp[6]; |
2930 | msg->data[4] = ((netfn + 1) << 2) | (msg->rsp[7] & 0x3); | 3029 | msg->data[4] = ((netfn + 1) << 2) | (msg->rsp[7] & 0x3); |
2931 | msg->data[5] = ipmb_checksum(&(msg->data[3]), 2); | 3030 | msg->data[5] = ipmb_checksum(&(msg->data[3]), 2); |
2932 | msg->data[6] = intf->channels[msg->rsp[3] & 0xf].address; | 3031 | msg->data[6] = intf->channels[msg->rsp[3] & 0xf].address; |
2933 | /* rqseq/lun */ | 3032 | /* rqseq/lun */ |
2934 | msg->data[7] = (msg->rsp[7] & 0xfc) | (msg->rsp[4] & 0x3); | 3033 | msg->data[7] = (msg->rsp[7] & 0xfc) | (msg->rsp[4] & 0x3); |
2935 | msg->data[8] = msg->rsp[8]; /* cmd */ | 3034 | msg->data[8] = msg->rsp[8]; /* cmd */ |
2936 | msg->data[9] = IPMI_INVALID_CMD_COMPLETION_CODE; | 3035 | msg->data[9] = IPMI_INVALID_CMD_COMPLETION_CODE; |
2937 | msg->data[10] = ipmb_checksum(&(msg->data[6]), 4); | 3036 | msg->data[10] = ipmb_checksum(&(msg->data[6]), 4); |
@@ -2950,23 +3049,25 @@ static int handle_ipmb_get_msg_cmd(ipmi_smi_t intf, | |||
2950 | handlers = intf->handlers; | 3049 | handlers = intf->handlers; |
2951 | if (handlers) { | 3050 | if (handlers) { |
2952 | handlers->sender(intf->send_info, msg, 0); | 3051 | handlers->sender(intf->send_info, msg, 0); |
2953 | /* We used the message, so return the value | 3052 | /* |
2954 | that causes it to not be freed or | 3053 | * We used the message, so return the value |
2955 | queued. */ | 3054 | * that causes it to not be freed or |
3055 | * queued. | ||
3056 | */ | ||
2956 | rv = -1; | 3057 | rv = -1; |
2957 | } | 3058 | } |
2958 | rcu_read_unlock(); | 3059 | rcu_read_unlock(); |
2959 | } else { | 3060 | } else { |
2960 | /* Deliver the message to the user. */ | 3061 | /* Deliver the message to the user. */ |
2961 | spin_lock_irqsave(&intf->counter_lock, flags); | 3062 | ipmi_inc_stat(intf, handled_commands); |
2962 | intf->handled_commands++; | ||
2963 | spin_unlock_irqrestore(&intf->counter_lock, flags); | ||
2964 | 3063 | ||
2965 | recv_msg = ipmi_alloc_recv_msg(); | 3064 | recv_msg = ipmi_alloc_recv_msg(); |
2966 | if (!recv_msg) { | 3065 | if (!recv_msg) { |
2967 | /* We couldn't allocate memory for the | 3066 | /* |
2968 | message, so requeue it for handling | 3067 | * We couldn't allocate memory for the |
2969 | later. */ | 3068 | * message, so requeue it for handling |
3069 | * later. | ||
3070 | */ | ||
2970 | rv = 1; | 3071 | rv = 1; |
2971 | kref_put(&user->refcount, free_user); | 3072 | kref_put(&user->refcount, free_user); |
2972 | } else { | 3073 | } else { |
@@ -2977,8 +3078,10 @@ static int handle_ipmb_get_msg_cmd(ipmi_smi_t intf, | |||
2977 | ipmb_addr->lun = msg->rsp[7] & 3; | 3078 | ipmb_addr->lun = msg->rsp[7] & 3; |
2978 | ipmb_addr->channel = msg->rsp[3] & 0xf; | 3079 | ipmb_addr->channel = msg->rsp[3] & 0xf; |
2979 | 3080 | ||
2980 | /* Extract the rest of the message information | 3081 | /* |
2981 | from the IPMB header.*/ | 3082 | * Extract the rest of the message information |
3083 | * from the IPMB header. | ||
3084 | */ | ||
2982 | recv_msg->user = user; | 3085 | recv_msg->user = user; |
2983 | recv_msg->recv_type = IPMI_CMD_RECV_TYPE; | 3086 | recv_msg->recv_type = IPMI_CMD_RECV_TYPE; |
2984 | recv_msg->msgid = msg->rsp[7] >> 2; | 3087 | recv_msg->msgid = msg->rsp[7] >> 2; |
@@ -2986,8 +3089,10 @@ static int handle_ipmb_get_msg_cmd(ipmi_smi_t intf, | |||
2986 | recv_msg->msg.cmd = msg->rsp[8]; | 3089 | recv_msg->msg.cmd = msg->rsp[8]; |
2987 | recv_msg->msg.data = recv_msg->msg_data; | 3090 | recv_msg->msg.data = recv_msg->msg_data; |
2988 | 3091 | ||
2989 | /* We chop off 10, not 9 bytes because the checksum | 3092 | /* |
2990 | at the end also needs to be removed. */ | 3093 | * We chop off 10, not 9 bytes because the checksum |
3094 | * at the end also needs to be removed. | ||
3095 | */ | ||
2991 | recv_msg->msg.data_len = msg->rsp_size - 10; | 3096 | recv_msg->msg.data_len = msg->rsp_size - 10; |
2992 | memcpy(recv_msg->msg_data, | 3097 | memcpy(recv_msg->msg_data, |
2993 | &(msg->rsp[9]), | 3098 | &(msg->rsp[9]), |
@@ -3004,16 +3109,15 @@ static int handle_lan_get_msg_rsp(ipmi_smi_t intf, | |||
3004 | { | 3109 | { |
3005 | struct ipmi_lan_addr lan_addr; | 3110 | struct ipmi_lan_addr lan_addr; |
3006 | struct ipmi_recv_msg *recv_msg; | 3111 | struct ipmi_recv_msg *recv_msg; |
3007 | unsigned long flags; | ||
3008 | 3112 | ||
3009 | 3113 | ||
3010 | /* This is 13, not 12, because the response must contain a | 3114 | /* |
3011 | * completion code. */ | 3115 | * This is 13, not 12, because the response must contain a |
3116 | * completion code. | ||
3117 | */ | ||
3012 | if (msg->rsp_size < 13) { | 3118 | if (msg->rsp_size < 13) { |
3013 | /* Message not big enough, just ignore it. */ | 3119 | /* Message not big enough, just ignore it. */ |
3014 | spin_lock_irqsave(&intf->counter_lock, flags); | 3120 | ipmi_inc_stat(intf, invalid_lan_responses); |
3015 | intf->invalid_lan_responses++; | ||
3016 | spin_unlock_irqrestore(&intf->counter_lock, flags); | ||
3017 | return 0; | 3121 | return 0; |
3018 | } | 3122 | } |
3019 | 3123 | ||
@@ -3030,37 +3134,38 @@ static int handle_lan_get_msg_rsp(ipmi_smi_t intf, | |||
3030 | lan_addr.privilege = msg->rsp[3] >> 4; | 3134 | lan_addr.privilege = msg->rsp[3] >> 4; |
3031 | lan_addr.lun = msg->rsp[9] & 3; | 3135 | lan_addr.lun = msg->rsp[9] & 3; |
3032 | 3136 | ||
3033 | /* It's a response from a remote entity. Look up the sequence | 3137 | /* |
3034 | number and handle the response. */ | 3138 | * It's a response from a remote entity. Look up the sequence |
3139 | * number and handle the response. | ||
3140 | */ | ||
3035 | if (intf_find_seq(intf, | 3141 | if (intf_find_seq(intf, |
3036 | msg->rsp[9] >> 2, | 3142 | msg->rsp[9] >> 2, |
3037 | msg->rsp[3] & 0x0f, | 3143 | msg->rsp[3] & 0x0f, |
3038 | msg->rsp[10], | 3144 | msg->rsp[10], |
3039 | (msg->rsp[6] >> 2) & (~1), | 3145 | (msg->rsp[6] >> 2) & (~1), |
3040 | (struct ipmi_addr *) &(lan_addr), | 3146 | (struct ipmi_addr *) &(lan_addr), |
3041 | &recv_msg)) | 3147 | &recv_msg)) { |
3042 | { | 3148 | /* |
3043 | /* We were unable to find the sequence number, | 3149 | * We were unable to find the sequence number, |
3044 | so just nuke the message. */ | 3150 | * so just nuke the message. |
3045 | spin_lock_irqsave(&intf->counter_lock, flags); | 3151 | */ |
3046 | intf->unhandled_lan_responses++; | 3152 | ipmi_inc_stat(intf, unhandled_lan_responses); |
3047 | spin_unlock_irqrestore(&intf->counter_lock, flags); | ||
3048 | return 0; | 3153 | return 0; |
3049 | } | 3154 | } |
3050 | 3155 | ||
3051 | memcpy(recv_msg->msg_data, | 3156 | memcpy(recv_msg->msg_data, |
3052 | &(msg->rsp[11]), | 3157 | &(msg->rsp[11]), |
3053 | msg->rsp_size - 11); | 3158 | msg->rsp_size - 11); |
3054 | /* The other fields matched, so no need to set them, except | 3159 | /* |
3055 | for netfn, which needs to be the response that was | 3160 | * The other fields matched, so no need to set them, except |
3056 | returned, not the request value. */ | 3161 | * for netfn, which needs to be the response that was |
3162 | * returned, not the request value. | ||
3163 | */ | ||
3057 | recv_msg->msg.netfn = msg->rsp[6] >> 2; | 3164 | recv_msg->msg.netfn = msg->rsp[6] >> 2; |
3058 | recv_msg->msg.data = recv_msg->msg_data; | 3165 | recv_msg->msg.data = recv_msg->msg_data; |
3059 | recv_msg->msg.data_len = msg->rsp_size - 12; | 3166 | recv_msg->msg.data_len = msg->rsp_size - 12; |
3060 | recv_msg->recv_type = IPMI_RESPONSE_RECV_TYPE; | 3167 | recv_msg->recv_type = IPMI_RESPONSE_RECV_TYPE; |
3061 | spin_lock_irqsave(&intf->counter_lock, flags); | 3168 | ipmi_inc_stat(intf, handled_lan_responses); |
3062 | intf->handled_lan_responses++; | ||
3063 | spin_unlock_irqrestore(&intf->counter_lock, flags); | ||
3064 | deliver_response(recv_msg); | 3169 | deliver_response(recv_msg); |
3065 | 3170 | ||
3066 | return 0; | 3171 | return 0; |
@@ -3077,13 +3182,10 @@ static int handle_lan_get_msg_cmd(ipmi_smi_t intf, | |||
3077 | ipmi_user_t user = NULL; | 3182 | ipmi_user_t user = NULL; |
3078 | struct ipmi_lan_addr *lan_addr; | 3183 | struct ipmi_lan_addr *lan_addr; |
3079 | struct ipmi_recv_msg *recv_msg; | 3184 | struct ipmi_recv_msg *recv_msg; |
3080 | unsigned long flags; | ||
3081 | 3185 | ||
3082 | if (msg->rsp_size < 12) { | 3186 | if (msg->rsp_size < 12) { |
3083 | /* Message not big enough, just ignore it. */ | 3187 | /* Message not big enough, just ignore it. */ |
3084 | spin_lock_irqsave(&intf->counter_lock, flags); | 3188 | ipmi_inc_stat(intf, invalid_commands); |
3085 | intf->invalid_commands++; | ||
3086 | spin_unlock_irqrestore(&intf->counter_lock, flags); | ||
3087 | return 0; | 3189 | return 0; |
3088 | } | 3190 | } |
3089 | 3191 | ||
@@ -3107,23 +3209,23 @@ static int handle_lan_get_msg_cmd(ipmi_smi_t intf, | |||
3107 | 3209 | ||
3108 | if (user == NULL) { | 3210 | if (user == NULL) { |
3109 | /* We didn't find a user, just give up. */ | 3211 | /* We didn't find a user, just give up. */ |
3110 | spin_lock_irqsave(&intf->counter_lock, flags); | 3212 | ipmi_inc_stat(intf, unhandled_commands); |
3111 | intf->unhandled_commands++; | ||
3112 | spin_unlock_irqrestore(&intf->counter_lock, flags); | ||
3113 | 3213 | ||
3114 | rv = 0; /* Don't do anything with these messages, just | 3214 | /* |
3115 | allow them to be freed. */ | 3215 | * Don't do anything with these messages, just allow |
3216 | * them to be freed. | ||
3217 | */ | ||
3218 | rv = 0; | ||
3116 | } else { | 3219 | } else { |
3117 | /* Deliver the message to the user. */ | 3220 | /* Deliver the message to the user. */ |
3118 | spin_lock_irqsave(&intf->counter_lock, flags); | 3221 | ipmi_inc_stat(intf, handled_commands); |
3119 | intf->handled_commands++; | ||
3120 | spin_unlock_irqrestore(&intf->counter_lock, flags); | ||
3121 | 3222 | ||
3122 | recv_msg = ipmi_alloc_recv_msg(); | 3223 | recv_msg = ipmi_alloc_recv_msg(); |
3123 | if (!recv_msg) { | 3224 | if (!recv_msg) { |
3124 | /* We couldn't allocate memory for the | 3225 | /* |
3125 | message, so requeue it for handling | 3226 | * We couldn't allocate memory for the |
3126 | later. */ | 3227 | * message, so requeue it for handling later. |
3228 | */ | ||
3127 | rv = 1; | 3229 | rv = 1; |
3128 | kref_put(&user->refcount, free_user); | 3230 | kref_put(&user->refcount, free_user); |
3129 | } else { | 3231 | } else { |
@@ -3137,8 +3239,10 @@ static int handle_lan_get_msg_cmd(ipmi_smi_t intf, | |||
3137 | lan_addr->channel = msg->rsp[3] & 0xf; | 3239 | lan_addr->channel = msg->rsp[3] & 0xf; |
3138 | lan_addr->privilege = msg->rsp[3] >> 4; | 3240 | lan_addr->privilege = msg->rsp[3] >> 4; |
3139 | 3241 | ||
3140 | /* Extract the rest of the message information | 3242 | /* |
3141 | from the IPMB header.*/ | 3243 | * Extract the rest of the message information |
3244 | * from the IPMB header. | ||
3245 | */ | ||
3142 | recv_msg->user = user; | 3246 | recv_msg->user = user; |
3143 | recv_msg->recv_type = IPMI_CMD_RECV_TYPE; | 3247 | recv_msg->recv_type = IPMI_CMD_RECV_TYPE; |
3144 | recv_msg->msgid = msg->rsp[9] >> 2; | 3248 | recv_msg->msgid = msg->rsp[9] >> 2; |
@@ -3146,8 +3250,10 @@ static int handle_lan_get_msg_cmd(ipmi_smi_t intf, | |||
3146 | recv_msg->msg.cmd = msg->rsp[10]; | 3250 | recv_msg->msg.cmd = msg->rsp[10]; |
3147 | recv_msg->msg.data = recv_msg->msg_data; | 3251 | recv_msg->msg.data = recv_msg->msg_data; |
3148 | 3252 | ||
3149 | /* We chop off 12, not 11 bytes because the checksum | 3253 | /* |
3150 | at the end also needs to be removed. */ | 3254 | * We chop off 12, not 11 bytes because the checksum |
3255 | * at the end also needs to be removed. | ||
3256 | */ | ||
3151 | recv_msg->msg.data_len = msg->rsp_size - 12; | 3257 | recv_msg->msg.data_len = msg->rsp_size - 12; |
3152 | memcpy(recv_msg->msg_data, | 3258 | memcpy(recv_msg->msg_data, |
3153 | &(msg->rsp[11]), | 3259 | &(msg->rsp[11]), |
@@ -3163,7 +3269,7 @@ static void copy_event_into_recv_msg(struct ipmi_recv_msg *recv_msg, | |||
3163 | struct ipmi_smi_msg *msg) | 3269 | struct ipmi_smi_msg *msg) |
3164 | { | 3270 | { |
3165 | struct ipmi_system_interface_addr *smi_addr; | 3271 | struct ipmi_system_interface_addr *smi_addr; |
3166 | 3272 | ||
3167 | recv_msg->msgid = 0; | 3273 | recv_msg->msgid = 0; |
3168 | smi_addr = (struct ipmi_system_interface_addr *) &(recv_msg->addr); | 3274 | smi_addr = (struct ipmi_system_interface_addr *) &(recv_msg->addr); |
3169 | smi_addr->addr_type = IPMI_SYSTEM_INTERFACE_ADDR_TYPE; | 3275 | smi_addr->addr_type = IPMI_SYSTEM_INTERFACE_ADDR_TYPE; |
@@ -3189,9 +3295,7 @@ static int handle_read_event_rsp(ipmi_smi_t intf, | |||
3189 | 3295 | ||
3190 | if (msg->rsp_size < 19) { | 3296 | if (msg->rsp_size < 19) { |
3191 | /* Message is too small to be an IPMB event. */ | 3297 | /* Message is too small to be an IPMB event. */ |
3192 | spin_lock_irqsave(&intf->counter_lock, flags); | 3298 | ipmi_inc_stat(intf, invalid_events); |
3193 | intf->invalid_events++; | ||
3194 | spin_unlock_irqrestore(&intf->counter_lock, flags); | ||
3195 | return 0; | 3299 | return 0; |
3196 | } | 3300 | } |
3197 | 3301 | ||
@@ -3204,12 +3308,12 @@ static int handle_read_event_rsp(ipmi_smi_t intf, | |||
3204 | 3308 | ||
3205 | spin_lock_irqsave(&intf->events_lock, flags); | 3309 | spin_lock_irqsave(&intf->events_lock, flags); |
3206 | 3310 | ||
3207 | spin_lock(&intf->counter_lock); | 3311 | ipmi_inc_stat(intf, events); |
3208 | intf->events++; | ||
3209 | spin_unlock(&intf->counter_lock); | ||
3210 | 3312 | ||
3211 | /* Allocate and fill in one message for every user that is getting | 3313 | /* |
3212 | events. */ | 3314 | * Allocate and fill in one message for every user that is |
3315 | * getting events. | ||
3316 | */ | ||
3213 | rcu_read_lock(); | 3317 | rcu_read_lock(); |
3214 | list_for_each_entry_rcu(user, &intf->users, link) { | 3318 | list_for_each_entry_rcu(user, &intf->users, link) { |
3215 | if (!user->gets_events) | 3319 | if (!user->gets_events) |
@@ -3223,9 +3327,11 @@ static int handle_read_event_rsp(ipmi_smi_t intf, | |||
3223 | list_del(&recv_msg->link); | 3327 | list_del(&recv_msg->link); |
3224 | ipmi_free_recv_msg(recv_msg); | 3328 | ipmi_free_recv_msg(recv_msg); |
3225 | } | 3329 | } |
3226 | /* We couldn't allocate memory for the | 3330 | /* |
3227 | message, so requeue it for handling | 3331 | * We couldn't allocate memory for the |
3228 | later. */ | 3332 | * message, so requeue it for handling |
3333 | * later. | ||
3334 | */ | ||
3229 | rv = 1; | 3335 | rv = 1; |
3230 | goto out; | 3336 | goto out; |
3231 | } | 3337 | } |
@@ -3246,13 +3352,17 @@ static int handle_read_event_rsp(ipmi_smi_t intf, | |||
3246 | deliver_response(recv_msg); | 3352 | deliver_response(recv_msg); |
3247 | } | 3353 | } |
3248 | } else if (intf->waiting_events_count < MAX_EVENTS_IN_QUEUE) { | 3354 | } else if (intf->waiting_events_count < MAX_EVENTS_IN_QUEUE) { |
3249 | /* No one to receive the message, put it in queue if there's | 3355 | /* |
3250 | not already too many things in the queue. */ | 3356 | * No one to receive the message, put it in queue if there's |
3357 | * not already too many things in the queue. | ||
3358 | */ | ||
3251 | recv_msg = ipmi_alloc_recv_msg(); | 3359 | recv_msg = ipmi_alloc_recv_msg(); |
3252 | if (!recv_msg) { | 3360 | if (!recv_msg) { |
3253 | /* We couldn't allocate memory for the | 3361 | /* |
3254 | message, so requeue it for handling | 3362 | * We couldn't allocate memory for the |
3255 | later. */ | 3363 | * message, so requeue it for handling |
3364 | * later. | ||
3365 | */ | ||
3256 | rv = 1; | 3366 | rv = 1; |
3257 | goto out; | 3367 | goto out; |
3258 | } | 3368 | } |
@@ -3260,11 +3370,14 @@ static int handle_read_event_rsp(ipmi_smi_t intf, | |||
3260 | copy_event_into_recv_msg(recv_msg, msg); | 3370 | copy_event_into_recv_msg(recv_msg, msg); |
3261 | list_add_tail(&(recv_msg->link), &(intf->waiting_events)); | 3371 | list_add_tail(&(recv_msg->link), &(intf->waiting_events)); |
3262 | intf->waiting_events_count++; | 3372 | intf->waiting_events_count++; |
3263 | } else { | 3373 | } else if (!intf->event_msg_printed) { |
3264 | /* There's too many things in the queue, discard this | 3374 | /* |
3265 | message. */ | 3375 | * There's too many things in the queue, discard this |
3266 | printk(KERN_WARNING PFX "Event queue full, discarding an" | 3376 | * message. |
3267 | " incoming event\n"); | 3377 | */ |
3378 | printk(KERN_WARNING PFX "Event queue full, discarding" | ||
3379 | " incoming events\n"); | ||
3380 | intf->event_msg_printed = 1; | ||
3268 | } | 3381 | } |
3269 | 3382 | ||
3270 | out: | 3383 | out: |
@@ -3277,16 +3390,15 @@ static int handle_bmc_rsp(ipmi_smi_t intf, | |||
3277 | struct ipmi_smi_msg *msg) | 3390 | struct ipmi_smi_msg *msg) |
3278 | { | 3391 | { |
3279 | struct ipmi_recv_msg *recv_msg; | 3392 | struct ipmi_recv_msg *recv_msg; |
3280 | unsigned long flags; | ||
3281 | struct ipmi_user *user; | 3393 | struct ipmi_user *user; |
3282 | 3394 | ||
3283 | recv_msg = (struct ipmi_recv_msg *) msg->user_data; | 3395 | recv_msg = (struct ipmi_recv_msg *) msg->user_data; |
3284 | if (recv_msg == NULL) | 3396 | if (recv_msg == NULL) { |
3285 | { | 3397 | printk(KERN_WARNING |
3286 | printk(KERN_WARNING"IPMI message received with no owner. This\n" | 3398 | "IPMI message received with no owner. This\n" |
3287 | "could be because of a malformed message, or\n" | 3399 | "could be because of a malformed message, or\n" |
3288 | "because of a hardware error. Contact your\n" | 3400 | "because of a hardware error. Contact your\n" |
3289 | "hardware vender for assistance\n"); | 3401 | "hardware vender for assistance\n"); |
3290 | return 0; | 3402 | return 0; |
3291 | } | 3403 | } |
3292 | 3404 | ||
@@ -3294,16 +3406,12 @@ static int handle_bmc_rsp(ipmi_smi_t intf, | |||
3294 | /* Make sure the user still exists. */ | 3406 | /* Make sure the user still exists. */ |
3295 | if (user && !user->valid) { | 3407 | if (user && !user->valid) { |
3296 | /* The user for the message went away, so give up. */ | 3408 | /* The user for the message went away, so give up. */ |
3297 | spin_lock_irqsave(&intf->counter_lock, flags); | 3409 | ipmi_inc_stat(intf, unhandled_local_responses); |
3298 | intf->unhandled_local_responses++; | ||
3299 | spin_unlock_irqrestore(&intf->counter_lock, flags); | ||
3300 | ipmi_free_recv_msg(recv_msg); | 3410 | ipmi_free_recv_msg(recv_msg); |
3301 | } else { | 3411 | } else { |
3302 | struct ipmi_system_interface_addr *smi_addr; | 3412 | struct ipmi_system_interface_addr *smi_addr; |
3303 | 3413 | ||
3304 | spin_lock_irqsave(&intf->counter_lock, flags); | 3414 | ipmi_inc_stat(intf, handled_local_responses); |
3305 | intf->handled_local_responses++; | ||
3306 | spin_unlock_irqrestore(&intf->counter_lock, flags); | ||
3307 | recv_msg->recv_type = IPMI_RESPONSE_RECV_TYPE; | 3415 | recv_msg->recv_type = IPMI_RESPONSE_RECV_TYPE; |
3308 | recv_msg->msgid = msg->msgid; | 3416 | recv_msg->msgid = msg->msgid; |
3309 | smi_addr = ((struct ipmi_system_interface_addr *) | 3417 | smi_addr = ((struct ipmi_system_interface_addr *) |
@@ -3324,9 +3432,11 @@ static int handle_bmc_rsp(ipmi_smi_t intf, | |||
3324 | return 0; | 3432 | return 0; |
3325 | } | 3433 | } |
3326 | 3434 | ||
3327 | /* Handle a new message. Return 1 if the message should be requeued, | 3435 | /* |
3328 | 0 if the message should be freed, or -1 if the message should not | 3436 | * Handle a new message. Return 1 if the message should be requeued, |
3329 | be freed or requeued. */ | 3437 | * 0 if the message should be freed, or -1 if the message should not |
3438 | * be freed or requeued. | ||
3439 | */ | ||
3330 | static int handle_new_recv_msg(ipmi_smi_t intf, | 3440 | static int handle_new_recv_msg(ipmi_smi_t intf, |
3331 | struct ipmi_smi_msg *msg) | 3441 | struct ipmi_smi_msg *msg) |
3332 | { | 3442 | { |
@@ -3351,10 +3461,12 @@ static int handle_new_recv_msg(ipmi_smi_t intf, | |||
3351 | msg->rsp[1] = msg->data[1]; | 3461 | msg->rsp[1] = msg->data[1]; |
3352 | msg->rsp[2] = IPMI_ERR_UNSPECIFIED; | 3462 | msg->rsp[2] = IPMI_ERR_UNSPECIFIED; |
3353 | msg->rsp_size = 3; | 3463 | msg->rsp_size = 3; |
3354 | } else if (((msg->rsp[0] >> 2) != ((msg->data[0] >> 2) | 1))/* Netfn */ | 3464 | } else if (((msg->rsp[0] >> 2) != ((msg->data[0] >> 2) | 1)) |
3355 | || (msg->rsp[1] != msg->data[1])) /* Command */ | 3465 | || (msg->rsp[1] != msg->data[1])) { |
3356 | { | 3466 | /* |
3357 | /* The response is not even marginally correct. */ | 3467 | * The NetFN and Command in the response is not even |
3468 | * marginally correct. | ||
3469 | */ | ||
3358 | printk(KERN_WARNING PFX "BMC returned incorrect response," | 3470 | printk(KERN_WARNING PFX "BMC returned incorrect response," |
3359 | " expected netfn %x cmd %x, got netfn %x cmd %x\n", | 3471 | " expected netfn %x cmd %x, got netfn %x cmd %x\n", |
3360 | (msg->data[0] >> 2) | 1, msg->data[1], | 3472 | (msg->data[0] >> 2) | 1, msg->data[1], |
@@ -3369,10 +3481,11 @@ static int handle_new_recv_msg(ipmi_smi_t intf, | |||
3369 | 3481 | ||
3370 | if ((msg->rsp[0] == ((IPMI_NETFN_APP_REQUEST|1) << 2)) | 3482 | if ((msg->rsp[0] == ((IPMI_NETFN_APP_REQUEST|1) << 2)) |
3371 | && (msg->rsp[1] == IPMI_SEND_MSG_CMD) | 3483 | && (msg->rsp[1] == IPMI_SEND_MSG_CMD) |
3372 | && (msg->user_data != NULL)) | 3484 | && (msg->user_data != NULL)) { |
3373 | { | 3485 | /* |
3374 | /* It's a response to a response we sent. For this we | 3486 | * It's a response to a response we sent. For this we |
3375 | deliver a send message response to the user. */ | 3487 | * deliver a send message response to the user. |
3488 | */ | ||
3376 | struct ipmi_recv_msg *recv_msg = msg->user_data; | 3489 | struct ipmi_recv_msg *recv_msg = msg->user_data; |
3377 | 3490 | ||
3378 | requeue = 0; | 3491 | requeue = 0; |
@@ -3398,8 +3511,7 @@ static int handle_new_recv_msg(ipmi_smi_t intf, | |||
3398 | recv_msg->msg_data[0] = msg->rsp[2]; | 3511 | recv_msg->msg_data[0] = msg->rsp[2]; |
3399 | deliver_response(recv_msg); | 3512 | deliver_response(recv_msg); |
3400 | } else if ((msg->rsp[0] == ((IPMI_NETFN_APP_REQUEST|1) << 2)) | 3513 | } else if ((msg->rsp[0] == ((IPMI_NETFN_APP_REQUEST|1) << 2)) |
3401 | && (msg->rsp[1] == IPMI_GET_MSG_CMD)) | 3514 | && (msg->rsp[1] == IPMI_GET_MSG_CMD)) { |
3402 | { | ||
3403 | /* It's from the receive queue. */ | 3515 | /* It's from the receive queue. */ |
3404 | chan = msg->rsp[3] & 0xf; | 3516 | chan = msg->rsp[3] & 0xf; |
3405 | if (chan >= IPMI_MAX_CHANNELS) { | 3517 | if (chan >= IPMI_MAX_CHANNELS) { |
@@ -3411,12 +3523,16 @@ static int handle_new_recv_msg(ipmi_smi_t intf, | |||
3411 | switch (intf->channels[chan].medium) { | 3523 | switch (intf->channels[chan].medium) { |
3412 | case IPMI_CHANNEL_MEDIUM_IPMB: | 3524 | case IPMI_CHANNEL_MEDIUM_IPMB: |
3413 | if (msg->rsp[4] & 0x04) { | 3525 | if (msg->rsp[4] & 0x04) { |
3414 | /* It's a response, so find the | 3526 | /* |
3415 | requesting message and send it up. */ | 3527 | * It's a response, so find the |
3528 | * requesting message and send it up. | ||
3529 | */ | ||
3416 | requeue = handle_ipmb_get_msg_rsp(intf, msg); | 3530 | requeue = handle_ipmb_get_msg_rsp(intf, msg); |
3417 | } else { | 3531 | } else { |
3418 | /* It's a command to the SMS from some other | 3532 | /* |
3419 | entity. Handle that. */ | 3533 | * It's a command to the SMS from some other |
3534 | * entity. Handle that. | ||
3535 | */ | ||
3420 | requeue = handle_ipmb_get_msg_cmd(intf, msg); | 3536 | requeue = handle_ipmb_get_msg_cmd(intf, msg); |
3421 | } | 3537 | } |
3422 | break; | 3538 | break; |
@@ -3424,25 +3540,30 @@ static int handle_new_recv_msg(ipmi_smi_t intf, | |||
3424 | case IPMI_CHANNEL_MEDIUM_8023LAN: | 3540 | case IPMI_CHANNEL_MEDIUM_8023LAN: |
3425 | case IPMI_CHANNEL_MEDIUM_ASYNC: | 3541 | case IPMI_CHANNEL_MEDIUM_ASYNC: |
3426 | if (msg->rsp[6] & 0x04) { | 3542 | if (msg->rsp[6] & 0x04) { |
3427 | /* It's a response, so find the | 3543 | /* |
3428 | requesting message and send it up. */ | 3544 | * It's a response, so find the |
3545 | * requesting message and send it up. | ||
3546 | */ | ||
3429 | requeue = handle_lan_get_msg_rsp(intf, msg); | 3547 | requeue = handle_lan_get_msg_rsp(intf, msg); |
3430 | } else { | 3548 | } else { |
3431 | /* It's a command to the SMS from some other | 3549 | /* |
3432 | entity. Handle that. */ | 3550 | * It's a command to the SMS from some other |
3551 | * entity. Handle that. | ||
3552 | */ | ||
3433 | requeue = handle_lan_get_msg_cmd(intf, msg); | 3553 | requeue = handle_lan_get_msg_cmd(intf, msg); |
3434 | } | 3554 | } |
3435 | break; | 3555 | break; |
3436 | 3556 | ||
3437 | default: | 3557 | default: |
3438 | /* We don't handle the channel type, so just | 3558 | /* |
3439 | * free the message. */ | 3559 | * We don't handle the channel type, so just |
3560 | * free the message. | ||
3561 | */ | ||
3440 | requeue = 0; | 3562 | requeue = 0; |
3441 | } | 3563 | } |
3442 | 3564 | ||
3443 | } else if ((msg->rsp[0] == ((IPMI_NETFN_APP_REQUEST|1) << 2)) | 3565 | } else if ((msg->rsp[0] == ((IPMI_NETFN_APP_REQUEST|1) << 2)) |
3444 | && (msg->rsp[1] == IPMI_READ_EVENT_MSG_BUFFER_CMD)) | 3566 | && (msg->rsp[1] == IPMI_READ_EVENT_MSG_BUFFER_CMD)) { |
3445 | { | ||
3446 | /* It's an asyncronous event. */ | 3567 | /* It's an asyncronous event. */ |
3447 | requeue = handle_read_event_rsp(intf, msg); | 3568 | requeue = handle_read_event_rsp(intf, msg); |
3448 | } else { | 3569 | } else { |
@@ -3458,71 +3579,82 @@ static int handle_new_recv_msg(ipmi_smi_t intf, | |||
3458 | void ipmi_smi_msg_received(ipmi_smi_t intf, | 3579 | void ipmi_smi_msg_received(ipmi_smi_t intf, |
3459 | struct ipmi_smi_msg *msg) | 3580 | struct ipmi_smi_msg *msg) |
3460 | { | 3581 | { |
3461 | unsigned long flags; | 3582 | unsigned long flags = 0; /* keep us warning-free. */ |
3462 | int rv; | 3583 | int rv; |
3584 | int run_to_completion; | ||
3463 | 3585 | ||
3464 | 3586 | ||
3465 | if ((msg->data_size >= 2) | 3587 | if ((msg->data_size >= 2) |
3466 | && (msg->data[0] == (IPMI_NETFN_APP_REQUEST << 2)) | 3588 | && (msg->data[0] == (IPMI_NETFN_APP_REQUEST << 2)) |
3467 | && (msg->data[1] == IPMI_SEND_MSG_CMD) | 3589 | && (msg->data[1] == IPMI_SEND_MSG_CMD) |
3468 | && (msg->user_data == NULL)) | 3590 | && (msg->user_data == NULL)) { |
3469 | { | 3591 | /* |
3470 | /* This is the local response to a command send, start | 3592 | * This is the local response to a command send, start |
3471 | the timer for these. The user_data will not be | 3593 | * the timer for these. The user_data will not be |
3472 | NULL if this is a response send, and we will let | 3594 | * NULL if this is a response send, and we will let |
3473 | response sends just go through. */ | 3595 | * response sends just go through. |
3474 | 3596 | */ | |
3475 | /* Check for errors, if we get certain errors (ones | 3597 | |
3476 | that mean basically we can try again later), we | 3598 | /* |
3477 | ignore them and start the timer. Otherwise we | 3599 | * Check for errors, if we get certain errors (ones |
3478 | report the error immediately. */ | 3600 | * that mean basically we can try again later), we |
3601 | * ignore them and start the timer. Otherwise we | ||
3602 | * report the error immediately. | ||
3603 | */ | ||
3479 | if ((msg->rsp_size >= 3) && (msg->rsp[2] != 0) | 3604 | if ((msg->rsp_size >= 3) && (msg->rsp[2] != 0) |
3480 | && (msg->rsp[2] != IPMI_NODE_BUSY_ERR) | 3605 | && (msg->rsp[2] != IPMI_NODE_BUSY_ERR) |
3481 | && (msg->rsp[2] != IPMI_LOST_ARBITRATION_ERR) | 3606 | && (msg->rsp[2] != IPMI_LOST_ARBITRATION_ERR) |
3482 | && (msg->rsp[2] != IPMI_BUS_ERR) | 3607 | && (msg->rsp[2] != IPMI_BUS_ERR) |
3483 | && (msg->rsp[2] != IPMI_NAK_ON_WRITE_ERR)) | 3608 | && (msg->rsp[2] != IPMI_NAK_ON_WRITE_ERR)) { |
3484 | { | ||
3485 | int chan = msg->rsp[3] & 0xf; | 3609 | int chan = msg->rsp[3] & 0xf; |
3486 | 3610 | ||
3487 | /* Got an error sending the message, handle it. */ | 3611 | /* Got an error sending the message, handle it. */ |
3488 | spin_lock_irqsave(&intf->counter_lock, flags); | ||
3489 | if (chan >= IPMI_MAX_CHANNELS) | 3612 | if (chan >= IPMI_MAX_CHANNELS) |
3490 | ; /* This shouldn't happen */ | 3613 | ; /* This shouldn't happen */ |
3491 | else if ((intf->channels[chan].medium | 3614 | else if ((intf->channels[chan].medium |
3492 | == IPMI_CHANNEL_MEDIUM_8023LAN) | 3615 | == IPMI_CHANNEL_MEDIUM_8023LAN) |
3493 | || (intf->channels[chan].medium | 3616 | || (intf->channels[chan].medium |
3494 | == IPMI_CHANNEL_MEDIUM_ASYNC)) | 3617 | == IPMI_CHANNEL_MEDIUM_ASYNC)) |
3495 | intf->sent_lan_command_errs++; | 3618 | ipmi_inc_stat(intf, sent_lan_command_errs); |
3496 | else | 3619 | else |
3497 | intf->sent_ipmb_command_errs++; | 3620 | ipmi_inc_stat(intf, sent_ipmb_command_errs); |
3498 | spin_unlock_irqrestore(&intf->counter_lock, flags); | ||
3499 | intf_err_seq(intf, msg->msgid, msg->rsp[2]); | 3621 | intf_err_seq(intf, msg->msgid, msg->rsp[2]); |
3500 | } else { | 3622 | } else |
3501 | /* The message was sent, start the timer. */ | 3623 | /* The message was sent, start the timer. */ |
3502 | intf_start_seq_timer(intf, msg->msgid); | 3624 | intf_start_seq_timer(intf, msg->msgid); |
3503 | } | ||
3504 | 3625 | ||
3505 | ipmi_free_smi_msg(msg); | 3626 | ipmi_free_smi_msg(msg); |
3506 | goto out; | 3627 | goto out; |
3507 | } | 3628 | } |
3508 | 3629 | ||
3509 | /* To preserve message order, if the list is not empty, we | 3630 | /* |
3510 | tack this message onto the end of the list. */ | 3631 | * To preserve message order, if the list is not empty, we |
3511 | spin_lock_irqsave(&intf->waiting_msgs_lock, flags); | 3632 | * tack this message onto the end of the list. |
3633 | */ | ||
3634 | run_to_completion = intf->run_to_completion; | ||
3635 | if (!run_to_completion) | ||
3636 | spin_lock_irqsave(&intf->waiting_msgs_lock, flags); | ||
3512 | if (!list_empty(&intf->waiting_msgs)) { | 3637 | if (!list_empty(&intf->waiting_msgs)) { |
3513 | list_add_tail(&msg->link, &intf->waiting_msgs); | 3638 | list_add_tail(&msg->link, &intf->waiting_msgs); |
3514 | spin_unlock_irqrestore(&intf->waiting_msgs_lock, flags); | 3639 | if (!run_to_completion) |
3640 | spin_unlock_irqrestore(&intf->waiting_msgs_lock, flags); | ||
3515 | goto out; | 3641 | goto out; |
3516 | } | 3642 | } |
3517 | spin_unlock_irqrestore(&intf->waiting_msgs_lock, flags); | 3643 | if (!run_to_completion) |
3518 | 3644 | spin_unlock_irqrestore(&intf->waiting_msgs_lock, flags); | |
3645 | |||
3519 | rv = handle_new_recv_msg(intf, msg); | 3646 | rv = handle_new_recv_msg(intf, msg); |
3520 | if (rv > 0) { | 3647 | if (rv > 0) { |
3521 | /* Could not handle the message now, just add it to a | 3648 | /* |
3522 | list to handle later. */ | 3649 | * Could not handle the message now, just add it to a |
3523 | spin_lock_irqsave(&intf->waiting_msgs_lock, flags); | 3650 | * list to handle later. |
3651 | */ | ||
3652 | run_to_completion = intf->run_to_completion; | ||
3653 | if (!run_to_completion) | ||
3654 | spin_lock_irqsave(&intf->waiting_msgs_lock, flags); | ||
3524 | list_add_tail(&msg->link, &intf->waiting_msgs); | 3655 | list_add_tail(&msg->link, &intf->waiting_msgs); |
3525 | spin_unlock_irqrestore(&intf->waiting_msgs_lock, flags); | 3656 | if (!run_to_completion) |
3657 | spin_unlock_irqrestore(&intf->waiting_msgs_lock, flags); | ||
3526 | } else if (rv == 0) { | 3658 | } else if (rv == 0) { |
3527 | ipmi_free_smi_msg(msg); | 3659 | ipmi_free_smi_msg(msg); |
3528 | } | 3660 | } |
@@ -3530,6 +3662,7 @@ void ipmi_smi_msg_received(ipmi_smi_t intf, | |||
3530 | out: | 3662 | out: |
3531 | return; | 3663 | return; |
3532 | } | 3664 | } |
3665 | EXPORT_SYMBOL(ipmi_smi_msg_received); | ||
3533 | 3666 | ||
3534 | void ipmi_smi_watchdog_pretimeout(ipmi_smi_t intf) | 3667 | void ipmi_smi_watchdog_pretimeout(ipmi_smi_t intf) |
3535 | { | 3668 | { |
@@ -3544,7 +3677,7 @@ void ipmi_smi_watchdog_pretimeout(ipmi_smi_t intf) | |||
3544 | } | 3677 | } |
3545 | rcu_read_unlock(); | 3678 | rcu_read_unlock(); |
3546 | } | 3679 | } |
3547 | 3680 | EXPORT_SYMBOL(ipmi_smi_watchdog_pretimeout); | |
3548 | 3681 | ||
3549 | static struct ipmi_smi_msg * | 3682 | static struct ipmi_smi_msg * |
3550 | smi_from_recv_msg(ipmi_smi_t intf, struct ipmi_recv_msg *recv_msg, | 3683 | smi_from_recv_msg(ipmi_smi_t intf, struct ipmi_recv_msg *recv_msg, |
@@ -3552,14 +3685,16 @@ smi_from_recv_msg(ipmi_smi_t intf, struct ipmi_recv_msg *recv_msg, | |||
3552 | { | 3685 | { |
3553 | struct ipmi_smi_msg *smi_msg = ipmi_alloc_smi_msg(); | 3686 | struct ipmi_smi_msg *smi_msg = ipmi_alloc_smi_msg(); |
3554 | if (!smi_msg) | 3687 | if (!smi_msg) |
3555 | /* If we can't allocate the message, then just return, we | 3688 | /* |
3556 | get 4 retries, so this should be ok. */ | 3689 | * If we can't allocate the message, then just return, we |
3690 | * get 4 retries, so this should be ok. | ||
3691 | */ | ||
3557 | return NULL; | 3692 | return NULL; |
3558 | 3693 | ||
3559 | memcpy(smi_msg->data, recv_msg->msg.data, recv_msg->msg.data_len); | 3694 | memcpy(smi_msg->data, recv_msg->msg.data, recv_msg->msg.data_len); |
3560 | smi_msg->data_size = recv_msg->msg.data_len; | 3695 | smi_msg->data_size = recv_msg->msg.data_len; |
3561 | smi_msg->msgid = STORE_SEQ_IN_MSGID(seq, seqid); | 3696 | smi_msg->msgid = STORE_SEQ_IN_MSGID(seq, seqid); |
3562 | 3697 | ||
3563 | #ifdef DEBUG_MSGING | 3698 | #ifdef DEBUG_MSGING |
3564 | { | 3699 | { |
3565 | int m; | 3700 | int m; |
@@ -3594,28 +3729,26 @@ static void check_msg_timeout(ipmi_smi_t intf, struct seq_table *ent, | |||
3594 | ent->inuse = 0; | 3729 | ent->inuse = 0; |
3595 | msg = ent->recv_msg; | 3730 | msg = ent->recv_msg; |
3596 | list_add_tail(&msg->link, timeouts); | 3731 | list_add_tail(&msg->link, timeouts); |
3597 | spin_lock(&intf->counter_lock); | ||
3598 | if (ent->broadcast) | 3732 | if (ent->broadcast) |
3599 | intf->timed_out_ipmb_broadcasts++; | 3733 | ipmi_inc_stat(intf, timed_out_ipmb_broadcasts); |
3600 | else if (ent->recv_msg->addr.addr_type == IPMI_LAN_ADDR_TYPE) | 3734 | else if (ent->recv_msg->addr.addr_type == IPMI_LAN_ADDR_TYPE) |
3601 | intf->timed_out_lan_commands++; | 3735 | ipmi_inc_stat(intf, timed_out_lan_commands); |
3602 | else | 3736 | else |
3603 | intf->timed_out_ipmb_commands++; | 3737 | ipmi_inc_stat(intf, timed_out_ipmb_commands); |
3604 | spin_unlock(&intf->counter_lock); | ||
3605 | } else { | 3738 | } else { |
3606 | struct ipmi_smi_msg *smi_msg; | 3739 | struct ipmi_smi_msg *smi_msg; |
3607 | /* More retries, send again. */ | 3740 | /* More retries, send again. */ |
3608 | 3741 | ||
3609 | /* Start with the max timer, set to normal | 3742 | /* |
3610 | timer after the message is sent. */ | 3743 | * Start with the max timer, set to normal timer after |
3744 | * the message is sent. | ||
3745 | */ | ||
3611 | ent->timeout = MAX_MSG_TIMEOUT; | 3746 | ent->timeout = MAX_MSG_TIMEOUT; |
3612 | ent->retries_left--; | 3747 | ent->retries_left--; |
3613 | spin_lock(&intf->counter_lock); | ||
3614 | if (ent->recv_msg->addr.addr_type == IPMI_LAN_ADDR_TYPE) | 3748 | if (ent->recv_msg->addr.addr_type == IPMI_LAN_ADDR_TYPE) |
3615 | intf->retransmitted_lan_commands++; | 3749 | ipmi_inc_stat(intf, retransmitted_lan_commands); |
3616 | else | 3750 | else |
3617 | intf->retransmitted_ipmb_commands++; | 3751 | ipmi_inc_stat(intf, retransmitted_ipmb_commands); |
3618 | spin_unlock(&intf->counter_lock); | ||
3619 | 3752 | ||
3620 | smi_msg = smi_from_recv_msg(intf, ent->recv_msg, slot, | 3753 | smi_msg = smi_from_recv_msg(intf, ent->recv_msg, slot, |
3621 | ent->seqid); | 3754 | ent->seqid); |
@@ -3624,11 +3757,13 @@ static void check_msg_timeout(ipmi_smi_t intf, struct seq_table *ent, | |||
3624 | 3757 | ||
3625 | spin_unlock_irqrestore(&intf->seq_lock, *flags); | 3758 | spin_unlock_irqrestore(&intf->seq_lock, *flags); |
3626 | 3759 | ||
3627 | /* Send the new message. We send with a zero | 3760 | /* |
3628 | * priority. It timed out, I doubt time is | 3761 | * Send the new message. We send with a zero |
3629 | * that critical now, and high priority | 3762 | * priority. It timed out, I doubt time is that |
3630 | * messages are really only for messages to the | 3763 | * critical now, and high priority messages are really |
3631 | * local MC, which don't get resent. */ | 3764 | * only for messages to the local MC, which don't get |
3765 | * resent. | ||
3766 | */ | ||
3632 | handlers = intf->handlers; | 3767 | handlers = intf->handlers; |
3633 | if (handlers) | 3768 | if (handlers) |
3634 | intf->handlers->sender(intf->send_info, | 3769 | intf->handlers->sender(intf->send_info, |
@@ -3659,16 +3794,20 @@ static void ipmi_timeout_handler(long timeout_period) | |||
3659 | list_del(&smi_msg->link); | 3794 | list_del(&smi_msg->link); |
3660 | ipmi_free_smi_msg(smi_msg); | 3795 | ipmi_free_smi_msg(smi_msg); |
3661 | } else { | 3796 | } else { |
3662 | /* To preserve message order, quit if we | 3797 | /* |
3663 | can't handle a message. */ | 3798 | * To preserve message order, quit if we |
3799 | * can't handle a message. | ||
3800 | */ | ||
3664 | break; | 3801 | break; |
3665 | } | 3802 | } |
3666 | } | 3803 | } |
3667 | spin_unlock_irqrestore(&intf->waiting_msgs_lock, flags); | 3804 | spin_unlock_irqrestore(&intf->waiting_msgs_lock, flags); |
3668 | 3805 | ||
3669 | /* Go through the seq table and find any messages that | 3806 | /* |
3670 | have timed out, putting them in the timeouts | 3807 | * Go through the seq table and find any messages that |
3671 | list. */ | 3808 | * have timed out, putting them in the timeouts |
3809 | * list. | ||
3810 | */ | ||
3672 | INIT_LIST_HEAD(&timeouts); | 3811 | INIT_LIST_HEAD(&timeouts); |
3673 | spin_lock_irqsave(&intf->seq_lock, flags); | 3812 | spin_lock_irqsave(&intf->seq_lock, flags); |
3674 | for (i = 0; i < IPMI_IPMB_NUM_SEQ; i++) | 3813 | for (i = 0; i < IPMI_IPMB_NUM_SEQ; i++) |
@@ -3694,8 +3833,7 @@ static void ipmi_timeout_handler(long timeout_period) | |||
3694 | intf->auto_maintenance_timeout | 3833 | intf->auto_maintenance_timeout |
3695 | -= timeout_period; | 3834 | -= timeout_period; |
3696 | if (!intf->maintenance_mode | 3835 | if (!intf->maintenance_mode |
3697 | && (intf->auto_maintenance_timeout <= 0)) | 3836 | && (intf->auto_maintenance_timeout <= 0)) { |
3698 | { | ||
3699 | intf->maintenance_mode_enable = 0; | 3837 | intf->maintenance_mode_enable = 0; |
3700 | maintenance_mode_update(intf); | 3838 | maintenance_mode_update(intf); |
3701 | } | 3839 | } |
@@ -3713,8 +3851,10 @@ static void ipmi_request_event(void) | |||
3713 | struct ipmi_smi_handlers *handlers; | 3851 | struct ipmi_smi_handlers *handlers; |
3714 | 3852 | ||
3715 | rcu_read_lock(); | 3853 | rcu_read_lock(); |
3716 | /* Called from the timer, no need to check if handlers is | 3854 | /* |
3717 | * valid. */ | 3855 | * Called from the timer, no need to check if handlers is |
3856 | * valid. | ||
3857 | */ | ||
3718 | list_for_each_entry_rcu(intf, &ipmi_interfaces, link) { | 3858 | list_for_each_entry_rcu(intf, &ipmi_interfaces, link) { |
3719 | /* No event requests when in maintenance mode. */ | 3859 | /* No event requests when in maintenance mode. */ |
3720 | if (intf->maintenance_mode_enable) | 3860 | if (intf->maintenance_mode_enable) |
@@ -3735,10 +3875,12 @@ static struct timer_list ipmi_timer; | |||
3735 | /* How many jiffies does it take to get to the timeout time. */ | 3875 | /* How many jiffies does it take to get to the timeout time. */ |
3736 | #define IPMI_TIMEOUT_JIFFIES ((IPMI_TIMEOUT_TIME * HZ) / 1000) | 3876 | #define IPMI_TIMEOUT_JIFFIES ((IPMI_TIMEOUT_TIME * HZ) / 1000) |
3737 | 3877 | ||
3738 | /* Request events from the queue every second (this is the number of | 3878 | /* |
3739 | IPMI_TIMEOUT_TIMES between event requests). Hopefully, in the | 3879 | * Request events from the queue every second (this is the number of |
3740 | future, IPMI will add a way to know immediately if an event is in | 3880 | * IPMI_TIMEOUT_TIMES between event requests). Hopefully, in the |
3741 | the queue and this silliness can go away. */ | 3881 | * future, IPMI will add a way to know immediately if an event is in |
3882 | * the queue and this silliness can go away. | ||
3883 | */ | ||
3742 | #define IPMI_REQUEST_EV_TIME (1000 / (IPMI_TIMEOUT_TIME)) | 3884 | #define IPMI_REQUEST_EV_TIME (1000 / (IPMI_TIMEOUT_TIME)) |
3743 | 3885 | ||
3744 | static atomic_t stop_operation; | 3886 | static atomic_t stop_operation; |
@@ -3782,6 +3924,7 @@ struct ipmi_smi_msg *ipmi_alloc_smi_msg(void) | |||
3782 | } | 3924 | } |
3783 | return rv; | 3925 | return rv; |
3784 | } | 3926 | } |
3927 | EXPORT_SYMBOL(ipmi_alloc_smi_msg); | ||
3785 | 3928 | ||
3786 | static void free_recv_msg(struct ipmi_recv_msg *msg) | 3929 | static void free_recv_msg(struct ipmi_recv_msg *msg) |
3787 | { | 3930 | { |
@@ -3789,7 +3932,7 @@ static void free_recv_msg(struct ipmi_recv_msg *msg) | |||
3789 | kfree(msg); | 3932 | kfree(msg); |
3790 | } | 3933 | } |
3791 | 3934 | ||
3792 | struct ipmi_recv_msg *ipmi_alloc_recv_msg(void) | 3935 | static struct ipmi_recv_msg *ipmi_alloc_recv_msg(void) |
3793 | { | 3936 | { |
3794 | struct ipmi_recv_msg *rv; | 3937 | struct ipmi_recv_msg *rv; |
3795 | 3938 | ||
@@ -3808,6 +3951,7 @@ void ipmi_free_recv_msg(struct ipmi_recv_msg *msg) | |||
3808 | kref_put(&msg->user->refcount, free_user); | 3951 | kref_put(&msg->user->refcount, free_user); |
3809 | msg->done(msg); | 3952 | msg->done(msg); |
3810 | } | 3953 | } |
3954 | EXPORT_SYMBOL(ipmi_free_recv_msg); | ||
3811 | 3955 | ||
3812 | #ifdef CONFIG_IPMI_PANIC_EVENT | 3956 | #ifdef CONFIG_IPMI_PANIC_EVENT |
3813 | 3957 | ||
@@ -3825,8 +3969,7 @@ static void event_receiver_fetcher(ipmi_smi_t intf, struct ipmi_recv_msg *msg) | |||
3825 | if ((msg->addr.addr_type == IPMI_SYSTEM_INTERFACE_ADDR_TYPE) | 3969 | if ((msg->addr.addr_type == IPMI_SYSTEM_INTERFACE_ADDR_TYPE) |
3826 | && (msg->msg.netfn == IPMI_NETFN_SENSOR_EVENT_RESPONSE) | 3970 | && (msg->msg.netfn == IPMI_NETFN_SENSOR_EVENT_RESPONSE) |
3827 | && (msg->msg.cmd == IPMI_GET_EVENT_RECEIVER_CMD) | 3971 | && (msg->msg.cmd == IPMI_GET_EVENT_RECEIVER_CMD) |
3828 | && (msg->msg.data[0] == IPMI_CC_NO_ERROR)) | 3972 | && (msg->msg.data[0] == IPMI_CC_NO_ERROR)) { |
3829 | { | ||
3830 | /* A get event receiver command, save it. */ | 3973 | /* A get event receiver command, save it. */ |
3831 | intf->event_receiver = msg->msg.data[1]; | 3974 | intf->event_receiver = msg->msg.data[1]; |
3832 | intf->event_receiver_lun = msg->msg.data[2] & 0x3; | 3975 | intf->event_receiver_lun = msg->msg.data[2] & 0x3; |
@@ -3838,10 +3981,11 @@ static void device_id_fetcher(ipmi_smi_t intf, struct ipmi_recv_msg *msg) | |||
3838 | if ((msg->addr.addr_type == IPMI_SYSTEM_INTERFACE_ADDR_TYPE) | 3981 | if ((msg->addr.addr_type == IPMI_SYSTEM_INTERFACE_ADDR_TYPE) |
3839 | && (msg->msg.netfn == IPMI_NETFN_APP_RESPONSE) | 3982 | && (msg->msg.netfn == IPMI_NETFN_APP_RESPONSE) |
3840 | && (msg->msg.cmd == IPMI_GET_DEVICE_ID_CMD) | 3983 | && (msg->msg.cmd == IPMI_GET_DEVICE_ID_CMD) |
3841 | && (msg->msg.data[0] == IPMI_CC_NO_ERROR)) | 3984 | && (msg->msg.data[0] == IPMI_CC_NO_ERROR)) { |
3842 | { | 3985 | /* |
3843 | /* A get device id command, save if we are an event | 3986 | * A get device id command, save if we are an event |
3844 | receiver or generator. */ | 3987 | * receiver or generator. |
3988 | */ | ||
3845 | intf->local_sel_device = (msg->msg.data[6] >> 2) & 1; | 3989 | intf->local_sel_device = (msg->msg.data[6] >> 2) & 1; |
3846 | intf->local_event_generator = (msg->msg.data[6] >> 5) & 1; | 3990 | intf->local_event_generator = (msg->msg.data[6] >> 5) & 1; |
3847 | } | 3991 | } |
@@ -3874,8 +4018,10 @@ static void send_panic_events(char *str) | |||
3874 | data[4] = 0x6f; /* Sensor specific, IPMI table 36-1 */ | 4018 | data[4] = 0x6f; /* Sensor specific, IPMI table 36-1 */ |
3875 | data[5] = 0xa1; /* Runtime stop OEM bytes 2 & 3. */ | 4019 | data[5] = 0xa1; /* Runtime stop OEM bytes 2 & 3. */ |
3876 | 4020 | ||
3877 | /* Put a few breadcrumbs in. Hopefully later we can add more things | 4021 | /* |
3878 | to make the panic events more useful. */ | 4022 | * Put a few breadcrumbs in. Hopefully later we can add more things |
4023 | * to make the panic events more useful. | ||
4024 | */ | ||
3879 | if (str) { | 4025 | if (str) { |
3880 | data[3] = str[0]; | 4026 | data[3] = str[0]; |
3881 | data[6] = str[1]; | 4027 | data[6] = str[1]; |
@@ -3891,6 +4037,7 @@ static void send_panic_events(char *str) | |||
3891 | /* Interface is not ready. */ | 4037 | /* Interface is not ready. */ |
3892 | continue; | 4038 | continue; |
3893 | 4039 | ||
4040 | intf->run_to_completion = 1; | ||
3894 | /* Send the event announcing the panic. */ | 4041 | /* Send the event announcing the panic. */ |
3895 | intf->handlers->set_run_to_completion(intf->send_info, 1); | 4042 | intf->handlers->set_run_to_completion(intf->send_info, 1); |
3896 | i_ipmi_request(NULL, | 4043 | i_ipmi_request(NULL, |
@@ -3908,9 +4055,11 @@ static void send_panic_events(char *str) | |||
3908 | } | 4055 | } |
3909 | 4056 | ||
3910 | #ifdef CONFIG_IPMI_PANIC_STRING | 4057 | #ifdef CONFIG_IPMI_PANIC_STRING |
3911 | /* On every interface, dump a bunch of OEM event holding the | 4058 | /* |
3912 | string. */ | 4059 | * On every interface, dump a bunch of OEM event holding the |
3913 | if (!str) | 4060 | * string. |
4061 | */ | ||
4062 | if (!str) | ||
3914 | return; | 4063 | return; |
3915 | 4064 | ||
3916 | /* For every registered interface, send the event. */ | 4065 | /* For every registered interface, send the event. */ |
@@ -3931,11 +4080,13 @@ static void send_panic_events(char *str) | |||
3931 | */ | 4080 | */ |
3932 | smp_rmb(); | 4081 | smp_rmb(); |
3933 | 4082 | ||
3934 | /* First job here is to figure out where to send the | 4083 | /* |
3935 | OEM events. There's no way in IPMI to send OEM | 4084 | * First job here is to figure out where to send the |
3936 | events using an event send command, so we have to | 4085 | * OEM events. There's no way in IPMI to send OEM |
3937 | find the SEL to put them in and stick them in | 4086 | * events using an event send command, so we have to |
3938 | there. */ | 4087 | * find the SEL to put them in and stick them in |
4088 | * there. | ||
4089 | */ | ||
3939 | 4090 | ||
3940 | /* Get capabilities from the get device id. */ | 4091 | /* Get capabilities from the get device id. */ |
3941 | intf->local_sel_device = 0; | 4092 | intf->local_sel_device = 0; |
@@ -3983,24 +4134,29 @@ static void send_panic_events(char *str) | |||
3983 | } | 4134 | } |
3984 | intf->null_user_handler = NULL; | 4135 | intf->null_user_handler = NULL; |
3985 | 4136 | ||
3986 | /* Validate the event receiver. The low bit must not | 4137 | /* |
3987 | be 1 (it must be a valid IPMB address), it cannot | 4138 | * Validate the event receiver. The low bit must not |
3988 | be zero, and it must not be my address. */ | 4139 | * be 1 (it must be a valid IPMB address), it cannot |
3989 | if (((intf->event_receiver & 1) == 0) | 4140 | * be zero, and it must not be my address. |
4141 | */ | ||
4142 | if (((intf->event_receiver & 1) == 0) | ||
3990 | && (intf->event_receiver != 0) | 4143 | && (intf->event_receiver != 0) |
3991 | && (intf->event_receiver != intf->channels[0].address)) | 4144 | && (intf->event_receiver != intf->channels[0].address)) { |
3992 | { | 4145 | /* |
3993 | /* The event receiver is valid, send an IPMB | 4146 | * The event receiver is valid, send an IPMB |
3994 | message. */ | 4147 | * message. |
4148 | */ | ||
3995 | ipmb = (struct ipmi_ipmb_addr *) &addr; | 4149 | ipmb = (struct ipmi_ipmb_addr *) &addr; |
3996 | ipmb->addr_type = IPMI_IPMB_ADDR_TYPE; | 4150 | ipmb->addr_type = IPMI_IPMB_ADDR_TYPE; |
3997 | ipmb->channel = 0; /* FIXME - is this right? */ | 4151 | ipmb->channel = 0; /* FIXME - is this right? */ |
3998 | ipmb->lun = intf->event_receiver_lun; | 4152 | ipmb->lun = intf->event_receiver_lun; |
3999 | ipmb->slave_addr = intf->event_receiver; | 4153 | ipmb->slave_addr = intf->event_receiver; |
4000 | } else if (intf->local_sel_device) { | 4154 | } else if (intf->local_sel_device) { |
4001 | /* The event receiver was not valid (or was | 4155 | /* |
4002 | me), but I am an SEL device, just dump it | 4156 | * The event receiver was not valid (or was |
4003 | in my SEL. */ | 4157 | * me), but I am an SEL device, just dump it |
4158 | * in my SEL. | ||
4159 | */ | ||
4004 | si = (struct ipmi_system_interface_addr *) &addr; | 4160 | si = (struct ipmi_system_interface_addr *) &addr; |
4005 | si->addr_type = IPMI_SYSTEM_INTERFACE_ADDR_TYPE; | 4161 | si->addr_type = IPMI_SYSTEM_INTERFACE_ADDR_TYPE; |
4006 | si->channel = IPMI_BMC_CHANNEL; | 4162 | si->channel = IPMI_BMC_CHANNEL; |
@@ -4008,7 +4164,6 @@ static void send_panic_events(char *str) | |||
4008 | } else | 4164 | } else |
4009 | continue; /* No where to send the event. */ | 4165 | continue; /* No where to send the event. */ |
4010 | 4166 | ||
4011 | |||
4012 | msg.netfn = IPMI_NETFN_STORAGE_REQUEST; /* Storage. */ | 4167 | msg.netfn = IPMI_NETFN_STORAGE_REQUEST; /* Storage. */ |
4013 | msg.cmd = IPMI_ADD_SEL_ENTRY_CMD; | 4168 | msg.cmd = IPMI_ADD_SEL_ENTRY_CMD; |
4014 | msg.data = data; | 4169 | msg.data = data; |
@@ -4025,8 +4180,10 @@ static void send_panic_events(char *str) | |||
4025 | data[2] = 0xf0; /* OEM event without timestamp. */ | 4180 | data[2] = 0xf0; /* OEM event without timestamp. */ |
4026 | data[3] = intf->channels[0].address; | 4181 | data[3] = intf->channels[0].address; |
4027 | data[4] = j++; /* sequence # */ | 4182 | data[4] = j++; /* sequence # */ |
4028 | /* Always give 11 bytes, so strncpy will fill | 4183 | /* |
4029 | it with zeroes for me. */ | 4184 | * Always give 11 bytes, so strncpy will fill |
4185 | * it with zeroes for me. | ||
4186 | */ | ||
4030 | strncpy(data+5, p, 11); | 4187 | strncpy(data+5, p, 11); |
4031 | p += size; | 4188 | p += size; |
4032 | 4189 | ||
@@ -4043,7 +4200,7 @@ static void send_panic_events(char *str) | |||
4043 | intf->channels[0].lun, | 4200 | intf->channels[0].lun, |
4044 | 0, 1); /* no retry, and no wait. */ | 4201 | 0, 1); /* no retry, and no wait. */ |
4045 | } | 4202 | } |
4046 | } | 4203 | } |
4047 | #endif /* CONFIG_IPMI_PANIC_STRING */ | 4204 | #endif /* CONFIG_IPMI_PANIC_STRING */ |
4048 | } | 4205 | } |
4049 | #endif /* CONFIG_IPMI_PANIC_EVENT */ | 4206 | #endif /* CONFIG_IPMI_PANIC_EVENT */ |
@@ -4052,7 +4209,7 @@ static int has_panicked; | |||
4052 | 4209 | ||
4053 | static int panic_event(struct notifier_block *this, | 4210 | static int panic_event(struct notifier_block *this, |
4054 | unsigned long event, | 4211 | unsigned long event, |
4055 | void *ptr) | 4212 | void *ptr) |
4056 | { | 4213 | { |
4057 | ipmi_smi_t intf; | 4214 | ipmi_smi_t intf; |
4058 | 4215 | ||
@@ -4066,6 +4223,7 @@ static int panic_event(struct notifier_block *this, | |||
4066 | /* Interface is not ready. */ | 4223 | /* Interface is not ready. */ |
4067 | continue; | 4224 | continue; |
4068 | 4225 | ||
4226 | intf->run_to_completion = 1; | ||
4069 | intf->handlers->set_run_to_completion(intf->send_info, 1); | 4227 | intf->handlers->set_run_to_completion(intf->send_info, 1); |
4070 | } | 4228 | } |
4071 | 4229 | ||
@@ -4133,11 +4291,16 @@ static __exit void cleanup_ipmi(void) | |||
4133 | 4291 | ||
4134 | atomic_notifier_chain_unregister(&panic_notifier_list, &panic_block); | 4292 | atomic_notifier_chain_unregister(&panic_notifier_list, &panic_block); |
4135 | 4293 | ||
4136 | /* This can't be called if any interfaces exist, so no worry about | 4294 | /* |
4137 | shutting down the interfaces. */ | 4295 | * This can't be called if any interfaces exist, so no worry |
4296 | * about shutting down the interfaces. | ||
4297 | */ | ||
4138 | 4298 | ||
4139 | /* Tell the timer to stop, then wait for it to stop. This avoids | 4299 | /* |
4140 | problems with race conditions removing the timer here. */ | 4300 | * Tell the timer to stop, then wait for it to stop. This |
4301 | * avoids problems with race conditions removing the timer | ||
4302 | * here. | ||
4303 | */ | ||
4141 | atomic_inc(&stop_operation); | 4304 | atomic_inc(&stop_operation); |
4142 | del_timer_sync(&ipmi_timer); | 4305 | del_timer_sync(&ipmi_timer); |
4143 | 4306 | ||
@@ -4164,31 +4327,6 @@ module_exit(cleanup_ipmi); | |||
4164 | module_init(ipmi_init_msghandler_mod); | 4327 | module_init(ipmi_init_msghandler_mod); |
4165 | MODULE_LICENSE("GPL"); | 4328 | MODULE_LICENSE("GPL"); |
4166 | MODULE_AUTHOR("Corey Minyard <minyard@mvista.com>"); | 4329 | MODULE_AUTHOR("Corey Minyard <minyard@mvista.com>"); |
4167 | MODULE_DESCRIPTION("Incoming and outgoing message routing for an IPMI interface."); | 4330 | MODULE_DESCRIPTION("Incoming and outgoing message routing for an IPMI" |
4331 | " interface."); | ||
4168 | MODULE_VERSION(IPMI_DRIVER_VERSION); | 4332 | MODULE_VERSION(IPMI_DRIVER_VERSION); |
4169 | |||
4170 | EXPORT_SYMBOL(ipmi_create_user); | ||
4171 | EXPORT_SYMBOL(ipmi_destroy_user); | ||
4172 | EXPORT_SYMBOL(ipmi_get_version); | ||
4173 | EXPORT_SYMBOL(ipmi_request_settime); | ||
4174 | EXPORT_SYMBOL(ipmi_request_supply_msgs); | ||
4175 | EXPORT_SYMBOL(ipmi_poll_interface); | ||
4176 | EXPORT_SYMBOL(ipmi_register_smi); | ||
4177 | EXPORT_SYMBOL(ipmi_unregister_smi); | ||
4178 | EXPORT_SYMBOL(ipmi_register_for_cmd); | ||
4179 | EXPORT_SYMBOL(ipmi_unregister_for_cmd); | ||
4180 | EXPORT_SYMBOL(ipmi_smi_msg_received); | ||
4181 | EXPORT_SYMBOL(ipmi_smi_watchdog_pretimeout); | ||
4182 | EXPORT_SYMBOL(ipmi_alloc_smi_msg); | ||
4183 | EXPORT_SYMBOL(ipmi_addr_length); | ||
4184 | EXPORT_SYMBOL(ipmi_validate_addr); | ||
4185 | EXPORT_SYMBOL(ipmi_set_gets_events); | ||
4186 | EXPORT_SYMBOL(ipmi_smi_watcher_register); | ||
4187 | EXPORT_SYMBOL(ipmi_smi_watcher_unregister); | ||
4188 | EXPORT_SYMBOL(ipmi_set_my_address); | ||
4189 | EXPORT_SYMBOL(ipmi_get_my_address); | ||
4190 | EXPORT_SYMBOL(ipmi_set_my_LUN); | ||
4191 | EXPORT_SYMBOL(ipmi_get_my_LUN); | ||
4192 | EXPORT_SYMBOL(ipmi_smi_add_proc_entry); | ||
4193 | EXPORT_SYMBOL(ipmi_user_set_run_to_completion); | ||
4194 | EXPORT_SYMBOL(ipmi_free_recv_msg); | ||
diff --git a/drivers/char/ipmi/ipmi_poweroff.c b/drivers/char/ipmi/ipmi_poweroff.c index b86186de7f07..a261bd735dfb 100644 --- a/drivers/char/ipmi/ipmi_poweroff.c +++ b/drivers/char/ipmi/ipmi_poweroff.c | |||
@@ -87,7 +87,10 @@ MODULE_PARM_DESC(ifnum_to_use, "The interface number to use for the watchdog " | |||
87 | 87 | ||
88 | /* parameter definition to allow user to flag power cycle */ | 88 | /* parameter definition to allow user to flag power cycle */ |
89 | module_param(poweroff_powercycle, int, 0644); | 89 | module_param(poweroff_powercycle, int, 0644); |
90 | MODULE_PARM_DESC(poweroff_powercycle, " Set to non-zero to enable power cycle instead of power down. Power cycle is contingent on hardware support, otherwise it defaults back to power down."); | 90 | MODULE_PARM_DESC(poweroff_powercycle, |
91 | " Set to non-zero to enable power cycle instead of power" | ||
92 | " down. Power cycle is contingent on hardware support," | ||
93 | " otherwise it defaults back to power down."); | ||
91 | 94 | ||
92 | /* Stuff from the get device id command. */ | 95 | /* Stuff from the get device id command. */ |
93 | static unsigned int mfg_id; | 96 | static unsigned int mfg_id; |
@@ -95,22 +98,25 @@ static unsigned int prod_id; | |||
95 | static unsigned char capabilities; | 98 | static unsigned char capabilities; |
96 | static unsigned char ipmi_version; | 99 | static unsigned char ipmi_version; |
97 | 100 | ||
98 | /* We use our own messages for this operation, we don't let the system | 101 | /* |
99 | allocate them, since we may be in a panic situation. The whole | 102 | * We use our own messages for this operation, we don't let the system |
100 | thing is single-threaded, anyway, so multiple messages are not | 103 | * allocate them, since we may be in a panic situation. The whole |
101 | required. */ | 104 | * thing is single-threaded, anyway, so multiple messages are not |
105 | * required. | ||
106 | */ | ||
107 | static atomic_t dummy_count = ATOMIC_INIT(0); | ||
102 | static void dummy_smi_free(struct ipmi_smi_msg *msg) | 108 | static void dummy_smi_free(struct ipmi_smi_msg *msg) |
103 | { | 109 | { |
110 | atomic_dec(&dummy_count); | ||
104 | } | 111 | } |
105 | static void dummy_recv_free(struct ipmi_recv_msg *msg) | 112 | static void dummy_recv_free(struct ipmi_recv_msg *msg) |
106 | { | 113 | { |
114 | atomic_dec(&dummy_count); | ||
107 | } | 115 | } |
108 | static struct ipmi_smi_msg halt_smi_msg = | 116 | static struct ipmi_smi_msg halt_smi_msg = { |
109 | { | ||
110 | .done = dummy_smi_free | 117 | .done = dummy_smi_free |
111 | }; | 118 | }; |
112 | static struct ipmi_recv_msg halt_recv_msg = | 119 | static struct ipmi_recv_msg halt_recv_msg = { |
113 | { | ||
114 | .done = dummy_recv_free | 120 | .done = dummy_recv_free |
115 | }; | 121 | }; |
116 | 122 | ||
@@ -127,8 +133,7 @@ static void receive_handler(struct ipmi_recv_msg *recv_msg, void *handler_data) | |||
127 | complete(comp); | 133 | complete(comp); |
128 | } | 134 | } |
129 | 135 | ||
130 | static struct ipmi_user_hndl ipmi_poweroff_handler = | 136 | static struct ipmi_user_hndl ipmi_poweroff_handler = { |
131 | { | ||
132 | .ipmi_recv_hndl = receive_handler | 137 | .ipmi_recv_hndl = receive_handler |
133 | }; | 138 | }; |
134 | 139 | ||
@@ -152,17 +157,28 @@ static int ipmi_request_wait_for_response(ipmi_user_t user, | |||
152 | return halt_recv_msg.msg.data[0]; | 157 | return halt_recv_msg.msg.data[0]; |
153 | } | 158 | } |
154 | 159 | ||
155 | /* We are in run-to-completion mode, no completion is desired. */ | 160 | /* Wait for message to complete, spinning. */ |
156 | static int ipmi_request_in_rc_mode(ipmi_user_t user, | 161 | static int ipmi_request_in_rc_mode(ipmi_user_t user, |
157 | struct ipmi_addr *addr, | 162 | struct ipmi_addr *addr, |
158 | struct kernel_ipmi_msg *send_msg) | 163 | struct kernel_ipmi_msg *send_msg) |
159 | { | 164 | { |
160 | int rv; | 165 | int rv; |
161 | 166 | ||
167 | atomic_set(&dummy_count, 2); | ||
162 | rv = ipmi_request_supply_msgs(user, addr, 0, send_msg, NULL, | 168 | rv = ipmi_request_supply_msgs(user, addr, 0, send_msg, NULL, |
163 | &halt_smi_msg, &halt_recv_msg, 0); | 169 | &halt_smi_msg, &halt_recv_msg, 0); |
164 | if (rv) | 170 | if (rv) { |
171 | atomic_set(&dummy_count, 0); | ||
165 | return rv; | 172 | return rv; |
173 | } | ||
174 | |||
175 | /* | ||
176 | * Spin until our message is done. | ||
177 | */ | ||
178 | while (atomic_read(&dummy_count) > 0) { | ||
179 | ipmi_poll_interface(user); | ||
180 | cpu_relax(); | ||
181 | } | ||
166 | 182 | ||
167 | return halt_recv_msg.msg.data[0]; | 183 | return halt_recv_msg.msg.data[0]; |
168 | } | 184 | } |
@@ -184,47 +200,47 @@ static int ipmi_request_in_rc_mode(ipmi_user_t user, | |||
184 | 200 | ||
185 | static void (*atca_oem_poweroff_hook)(ipmi_user_t user); | 201 | static void (*atca_oem_poweroff_hook)(ipmi_user_t user); |
186 | 202 | ||
187 | static void pps_poweroff_atca (ipmi_user_t user) | 203 | static void pps_poweroff_atca(ipmi_user_t user) |
188 | { | 204 | { |
189 | struct ipmi_system_interface_addr smi_addr; | 205 | struct ipmi_system_interface_addr smi_addr; |
190 | struct kernel_ipmi_msg send_msg; | 206 | struct kernel_ipmi_msg send_msg; |
191 | int rv; | 207 | int rv; |
192 | /* | 208 | /* |
193 | * Configure IPMI address for local access | 209 | * Configure IPMI address for local access |
194 | */ | 210 | */ |
195 | smi_addr.addr_type = IPMI_SYSTEM_INTERFACE_ADDR_TYPE; | 211 | smi_addr.addr_type = IPMI_SYSTEM_INTERFACE_ADDR_TYPE; |
196 | smi_addr.channel = IPMI_BMC_CHANNEL; | 212 | smi_addr.channel = IPMI_BMC_CHANNEL; |
197 | smi_addr.lun = 0; | 213 | smi_addr.lun = 0; |
198 | 214 | ||
199 | printk(KERN_INFO PFX "PPS powerdown hook used"); | 215 | printk(KERN_INFO PFX "PPS powerdown hook used"); |
200 | 216 | ||
201 | send_msg.netfn = IPMI_NETFN_OEM; | 217 | send_msg.netfn = IPMI_NETFN_OEM; |
202 | send_msg.cmd = IPMI_ATCA_PPS_GRACEFUL_RESTART; | 218 | send_msg.cmd = IPMI_ATCA_PPS_GRACEFUL_RESTART; |
203 | send_msg.data = IPMI_ATCA_PPS_IANA; | 219 | send_msg.data = IPMI_ATCA_PPS_IANA; |
204 | send_msg.data_len = 3; | 220 | send_msg.data_len = 3; |
205 | rv = ipmi_request_in_rc_mode(user, | 221 | rv = ipmi_request_in_rc_mode(user, |
206 | (struct ipmi_addr *) &smi_addr, | 222 | (struct ipmi_addr *) &smi_addr, |
207 | &send_msg); | 223 | &send_msg); |
208 | if (rv && rv != IPMI_UNKNOWN_ERR_COMPLETION_CODE) { | 224 | if (rv && rv != IPMI_UNKNOWN_ERR_COMPLETION_CODE) { |
209 | printk(KERN_ERR PFX "Unable to send ATCA ," | 225 | printk(KERN_ERR PFX "Unable to send ATCA ," |
210 | " IPMI error 0x%x\n", rv); | 226 | " IPMI error 0x%x\n", rv); |
211 | } | 227 | } |
212 | return; | 228 | return; |
213 | } | 229 | } |
214 | 230 | ||
215 | static int ipmi_atca_detect (ipmi_user_t user) | 231 | static int ipmi_atca_detect(ipmi_user_t user) |
216 | { | 232 | { |
217 | struct ipmi_system_interface_addr smi_addr; | 233 | struct ipmi_system_interface_addr smi_addr; |
218 | struct kernel_ipmi_msg send_msg; | 234 | struct kernel_ipmi_msg send_msg; |
219 | int rv; | 235 | int rv; |
220 | unsigned char data[1]; | 236 | unsigned char data[1]; |
221 | 237 | ||
222 | /* | 238 | /* |
223 | * Configure IPMI address for local access | 239 | * Configure IPMI address for local access |
224 | */ | 240 | */ |
225 | smi_addr.addr_type = IPMI_SYSTEM_INTERFACE_ADDR_TYPE; | 241 | smi_addr.addr_type = IPMI_SYSTEM_INTERFACE_ADDR_TYPE; |
226 | smi_addr.channel = IPMI_BMC_CHANNEL; | 242 | smi_addr.channel = IPMI_BMC_CHANNEL; |
227 | smi_addr.lun = 0; | 243 | smi_addr.lun = 0; |
228 | 244 | ||
229 | /* | 245 | /* |
230 | * Use get address info to check and see if we are ATCA | 246 | * Use get address info to check and see if we are ATCA |
@@ -238,28 +254,30 @@ static int ipmi_atca_detect (ipmi_user_t user) | |||
238 | (struct ipmi_addr *) &smi_addr, | 254 | (struct ipmi_addr *) &smi_addr, |
239 | &send_msg); | 255 | &send_msg); |
240 | 256 | ||
241 | printk(KERN_INFO PFX "ATCA Detect mfg 0x%X prod 0x%X\n", mfg_id, prod_id); | 257 | printk(KERN_INFO PFX "ATCA Detect mfg 0x%X prod 0x%X\n", |
242 | if((mfg_id == IPMI_MOTOROLA_MANUFACTURER_ID) | 258 | mfg_id, prod_id); |
243 | && (prod_id == IPMI_MOTOROLA_PPS_IPMC_PRODUCT_ID)) { | 259 | if ((mfg_id == IPMI_MOTOROLA_MANUFACTURER_ID) |
244 | printk(KERN_INFO PFX "Installing Pigeon Point Systems Poweroff Hook\n"); | 260 | && (prod_id == IPMI_MOTOROLA_PPS_IPMC_PRODUCT_ID)) { |
261 | printk(KERN_INFO PFX | ||
262 | "Installing Pigeon Point Systems Poweroff Hook\n"); | ||
245 | atca_oem_poweroff_hook = pps_poweroff_atca; | 263 | atca_oem_poweroff_hook = pps_poweroff_atca; |
246 | } | 264 | } |
247 | return !rv; | 265 | return !rv; |
248 | } | 266 | } |
249 | 267 | ||
250 | static void ipmi_poweroff_atca (ipmi_user_t user) | 268 | static void ipmi_poweroff_atca(ipmi_user_t user) |
251 | { | 269 | { |
252 | struct ipmi_system_interface_addr smi_addr; | 270 | struct ipmi_system_interface_addr smi_addr; |
253 | struct kernel_ipmi_msg send_msg; | 271 | struct kernel_ipmi_msg send_msg; |
254 | int rv; | 272 | int rv; |
255 | unsigned char data[4]; | 273 | unsigned char data[4]; |
256 | 274 | ||
257 | /* | 275 | /* |
258 | * Configure IPMI address for local access | 276 | * Configure IPMI address for local access |
259 | */ | 277 | */ |
260 | smi_addr.addr_type = IPMI_SYSTEM_INTERFACE_ADDR_TYPE; | 278 | smi_addr.addr_type = IPMI_SYSTEM_INTERFACE_ADDR_TYPE; |
261 | smi_addr.channel = IPMI_BMC_CHANNEL; | 279 | smi_addr.channel = IPMI_BMC_CHANNEL; |
262 | smi_addr.lun = 0; | 280 | smi_addr.lun = 0; |
263 | 281 | ||
264 | printk(KERN_INFO PFX "Powering down via ATCA power command\n"); | 282 | printk(KERN_INFO PFX "Powering down via ATCA power command\n"); |
265 | 283 | ||
@@ -273,23 +291,24 @@ static void ipmi_poweroff_atca (ipmi_user_t user) | |||
273 | data[2] = 0; /* Power Level */ | 291 | data[2] = 0; /* Power Level */ |
274 | data[3] = 0; /* Don't change saved presets */ | 292 | data[3] = 0; /* Don't change saved presets */ |
275 | send_msg.data = data; | 293 | send_msg.data = data; |
276 | send_msg.data_len = sizeof (data); | 294 | send_msg.data_len = sizeof(data); |
277 | rv = ipmi_request_in_rc_mode(user, | 295 | rv = ipmi_request_in_rc_mode(user, |
278 | (struct ipmi_addr *) &smi_addr, | 296 | (struct ipmi_addr *) &smi_addr, |
279 | &send_msg); | 297 | &send_msg); |
280 | /** At this point, the system may be shutting down, and most | 298 | /* |
281 | ** serial drivers (if used) will have interrupts turned off | 299 | * At this point, the system may be shutting down, and most |
282 | ** it may be better to ignore IPMI_UNKNOWN_ERR_COMPLETION_CODE | 300 | * serial drivers (if used) will have interrupts turned off |
283 | ** return code | 301 | * it may be better to ignore IPMI_UNKNOWN_ERR_COMPLETION_CODE |
284 | **/ | 302 | * return code |
285 | if (rv && rv != IPMI_UNKNOWN_ERR_COMPLETION_CODE) { | 303 | */ |
304 | if (rv && rv != IPMI_UNKNOWN_ERR_COMPLETION_CODE) { | ||
286 | printk(KERN_ERR PFX "Unable to send ATCA powerdown message," | 305 | printk(KERN_ERR PFX "Unable to send ATCA powerdown message," |
287 | " IPMI error 0x%x\n", rv); | 306 | " IPMI error 0x%x\n", rv); |
288 | goto out; | 307 | goto out; |
289 | } | 308 | } |
290 | 309 | ||
291 | if(atca_oem_poweroff_hook) | 310 | if (atca_oem_poweroff_hook) |
292 | return atca_oem_poweroff_hook(user); | 311 | atca_oem_poweroff_hook(user); |
293 | out: | 312 | out: |
294 | return; | 313 | return; |
295 | } | 314 | } |
@@ -310,13 +329,13 @@ static void ipmi_poweroff_atca (ipmi_user_t user) | |||
310 | #define IPMI_CPI1_PRODUCT_ID 0x000157 | 329 | #define IPMI_CPI1_PRODUCT_ID 0x000157 |
311 | #define IPMI_CPI1_MANUFACTURER_ID 0x0108 | 330 | #define IPMI_CPI1_MANUFACTURER_ID 0x0108 |
312 | 331 | ||
313 | static int ipmi_cpi1_detect (ipmi_user_t user) | 332 | static int ipmi_cpi1_detect(ipmi_user_t user) |
314 | { | 333 | { |
315 | return ((mfg_id == IPMI_CPI1_MANUFACTURER_ID) | 334 | return ((mfg_id == IPMI_CPI1_MANUFACTURER_ID) |
316 | && (prod_id == IPMI_CPI1_PRODUCT_ID)); | 335 | && (prod_id == IPMI_CPI1_PRODUCT_ID)); |
317 | } | 336 | } |
318 | 337 | ||
319 | static void ipmi_poweroff_cpi1 (ipmi_user_t user) | 338 | static void ipmi_poweroff_cpi1(ipmi_user_t user) |
320 | { | 339 | { |
321 | struct ipmi_system_interface_addr smi_addr; | 340 | struct ipmi_system_interface_addr smi_addr; |
322 | struct ipmi_ipmb_addr ipmb_addr; | 341 | struct ipmi_ipmb_addr ipmb_addr; |
@@ -328,12 +347,12 @@ static void ipmi_poweroff_cpi1 (ipmi_user_t user) | |||
328 | unsigned char aer_addr; | 347 | unsigned char aer_addr; |
329 | unsigned char aer_lun; | 348 | unsigned char aer_lun; |
330 | 349 | ||
331 | /* | 350 | /* |
332 | * Configure IPMI address for local access | 351 | * Configure IPMI address for local access |
333 | */ | 352 | */ |
334 | smi_addr.addr_type = IPMI_SYSTEM_INTERFACE_ADDR_TYPE; | 353 | smi_addr.addr_type = IPMI_SYSTEM_INTERFACE_ADDR_TYPE; |
335 | smi_addr.channel = IPMI_BMC_CHANNEL; | 354 | smi_addr.channel = IPMI_BMC_CHANNEL; |
336 | smi_addr.lun = 0; | 355 | smi_addr.lun = 0; |
337 | 356 | ||
338 | printk(KERN_INFO PFX "Powering down via CPI1 power command\n"); | 357 | printk(KERN_INFO PFX "Powering down via CPI1 power command\n"); |
339 | 358 | ||
@@ -425,7 +444,7 @@ static void ipmi_poweroff_cpi1 (ipmi_user_t user) | |||
425 | */ | 444 | */ |
426 | 445 | ||
427 | #define DELL_IANA_MFR_ID {0xA2, 0x02, 0x00} | 446 | #define DELL_IANA_MFR_ID {0xA2, 0x02, 0x00} |
428 | static int ipmi_dell_chassis_detect (ipmi_user_t user) | 447 | static int ipmi_dell_chassis_detect(ipmi_user_t user) |
429 | { | 448 | { |
430 | const char ipmi_version_major = ipmi_version & 0xF; | 449 | const char ipmi_version_major = ipmi_version & 0xF; |
431 | const char ipmi_version_minor = (ipmi_version >> 4) & 0xF; | 450 | const char ipmi_version_minor = (ipmi_version >> 4) & 0xF; |
@@ -444,25 +463,25 @@ static int ipmi_dell_chassis_detect (ipmi_user_t user) | |||
444 | #define IPMI_NETFN_CHASSIS_REQUEST 0 | 463 | #define IPMI_NETFN_CHASSIS_REQUEST 0 |
445 | #define IPMI_CHASSIS_CONTROL_CMD 0x02 | 464 | #define IPMI_CHASSIS_CONTROL_CMD 0x02 |
446 | 465 | ||
447 | static int ipmi_chassis_detect (ipmi_user_t user) | 466 | static int ipmi_chassis_detect(ipmi_user_t user) |
448 | { | 467 | { |
449 | /* Chassis support, use it. */ | 468 | /* Chassis support, use it. */ |
450 | return (capabilities & 0x80); | 469 | return (capabilities & 0x80); |
451 | } | 470 | } |
452 | 471 | ||
453 | static void ipmi_poweroff_chassis (ipmi_user_t user) | 472 | static void ipmi_poweroff_chassis(ipmi_user_t user) |
454 | { | 473 | { |
455 | struct ipmi_system_interface_addr smi_addr; | 474 | struct ipmi_system_interface_addr smi_addr; |
456 | struct kernel_ipmi_msg send_msg; | 475 | struct kernel_ipmi_msg send_msg; |
457 | int rv; | 476 | int rv; |
458 | unsigned char data[1]; | 477 | unsigned char data[1]; |
459 | 478 | ||
460 | /* | 479 | /* |
461 | * Configure IPMI address for local access | 480 | * Configure IPMI address for local access |
462 | */ | 481 | */ |
463 | smi_addr.addr_type = IPMI_SYSTEM_INTERFACE_ADDR_TYPE; | 482 | smi_addr.addr_type = IPMI_SYSTEM_INTERFACE_ADDR_TYPE; |
464 | smi_addr.channel = IPMI_BMC_CHANNEL; | 483 | smi_addr.channel = IPMI_BMC_CHANNEL; |
465 | smi_addr.lun = 0; | 484 | smi_addr.lun = 0; |
466 | 485 | ||
467 | powercyclefailed: | 486 | powercyclefailed: |
468 | printk(KERN_INFO PFX "Powering %s via IPMI chassis control command\n", | 487 | printk(KERN_INFO PFX "Powering %s via IPMI chassis control command\n", |
@@ -525,15 +544,13 @@ static struct poweroff_function poweroff_functions[] = { | |||
525 | 544 | ||
526 | 545 | ||
527 | /* Called on a powerdown request. */ | 546 | /* Called on a powerdown request. */ |
528 | static void ipmi_poweroff_function (void) | 547 | static void ipmi_poweroff_function(void) |
529 | { | 548 | { |
530 | if (!ready) | 549 | if (!ready) |
531 | return; | 550 | return; |
532 | 551 | ||
533 | /* Use run-to-completion mode, since interrupts may be off. */ | 552 | /* Use run-to-completion mode, since interrupts may be off. */ |
534 | ipmi_user_set_run_to_completion(ipmi_user, 1); | ||
535 | specific_poweroff_func(ipmi_user); | 553 | specific_poweroff_func(ipmi_user); |
536 | ipmi_user_set_run_to_completion(ipmi_user, 0); | ||
537 | } | 554 | } |
538 | 555 | ||
539 | /* Wait for an IPMI interface to be installed, the first one installed | 556 | /* Wait for an IPMI interface to be installed, the first one installed |
@@ -561,13 +578,13 @@ static void ipmi_po_new_smi(int if_num, struct device *device) | |||
561 | 578 | ||
562 | ipmi_ifnum = if_num; | 579 | ipmi_ifnum = if_num; |
563 | 580 | ||
564 | /* | 581 | /* |
565 | * Do a get device ide and store some results, since this is | 582 | * Do a get device ide and store some results, since this is |
566 | * used by several functions. | 583 | * used by several functions. |
567 | */ | 584 | */ |
568 | smi_addr.addr_type = IPMI_SYSTEM_INTERFACE_ADDR_TYPE; | 585 | smi_addr.addr_type = IPMI_SYSTEM_INTERFACE_ADDR_TYPE; |
569 | smi_addr.channel = IPMI_BMC_CHANNEL; | 586 | smi_addr.channel = IPMI_BMC_CHANNEL; |
570 | smi_addr.lun = 0; | 587 | smi_addr.lun = 0; |
571 | 588 | ||
572 | send_msg.netfn = IPMI_NETFN_APP_REQUEST; | 589 | send_msg.netfn = IPMI_NETFN_APP_REQUEST; |
573 | send_msg.cmd = IPMI_GET_DEVICE_ID_CMD; | 590 | send_msg.cmd = IPMI_GET_DEVICE_ID_CMD; |
@@ -632,8 +649,7 @@ static void ipmi_po_smi_gone(int if_num) | |||
632 | pm_power_off = old_poweroff_func; | 649 | pm_power_off = old_poweroff_func; |
633 | } | 650 | } |
634 | 651 | ||
635 | static struct ipmi_smi_watcher smi_watcher = | 652 | static struct ipmi_smi_watcher smi_watcher = { |
636 | { | ||
637 | .owner = THIS_MODULE, | 653 | .owner = THIS_MODULE, |
638 | .new_smi = ipmi_po_new_smi, | 654 | .new_smi = ipmi_po_new_smi, |
639 | .smi_gone = ipmi_po_smi_gone | 655 | .smi_gone = ipmi_po_smi_gone |
@@ -675,12 +691,12 @@ static struct ctl_table_header *ipmi_table_header; | |||
675 | /* | 691 | /* |
676 | * Startup and shutdown functions. | 692 | * Startup and shutdown functions. |
677 | */ | 693 | */ |
678 | static int ipmi_poweroff_init (void) | 694 | static int ipmi_poweroff_init(void) |
679 | { | 695 | { |
680 | int rv; | 696 | int rv; |
681 | 697 | ||
682 | printk (KERN_INFO "Copyright (C) 2004 MontaVista Software -" | 698 | printk(KERN_INFO "Copyright (C) 2004 MontaVista Software -" |
683 | " IPMI Powerdown via sys_reboot.\n"); | 699 | " IPMI Powerdown via sys_reboot.\n"); |
684 | 700 | ||
685 | if (poweroff_powercycle) | 701 | if (poweroff_powercycle) |
686 | printk(KERN_INFO PFX "Power cycle is enabled.\n"); | 702 | printk(KERN_INFO PFX "Power cycle is enabled.\n"); |
diff --git a/drivers/char/ipmi/ipmi_si_intf.c b/drivers/char/ipmi/ipmi_si_intf.c index 4f560d0bb808..5a5455585c1d 100644 --- a/drivers/char/ipmi/ipmi_si_intf.c +++ b/drivers/char/ipmi/ipmi_si_intf.c | |||
@@ -80,7 +80,7 @@ | |||
80 | #define SI_USEC_PER_JIFFY (1000000/HZ) | 80 | #define SI_USEC_PER_JIFFY (1000000/HZ) |
81 | #define SI_TIMEOUT_JIFFIES (SI_TIMEOUT_TIME_USEC/SI_USEC_PER_JIFFY) | 81 | #define SI_TIMEOUT_JIFFIES (SI_TIMEOUT_TIME_USEC/SI_USEC_PER_JIFFY) |
82 | #define SI_SHORT_TIMEOUT_USEC 250 /* .25ms when the SM request a | 82 | #define SI_SHORT_TIMEOUT_USEC 250 /* .25ms when the SM request a |
83 | short timeout */ | 83 | short timeout */ |
84 | 84 | ||
85 | /* Bit for BMC global enables. */ | 85 | /* Bit for BMC global enables. */ |
86 | #define IPMI_BMC_RCV_MSG_INTR 0x01 | 86 | #define IPMI_BMC_RCV_MSG_INTR 0x01 |
@@ -114,14 +114,61 @@ static char *si_to_str[] = { "kcs", "smic", "bt" }; | |||
114 | 114 | ||
115 | #define DEVICE_NAME "ipmi_si" | 115 | #define DEVICE_NAME "ipmi_si" |
116 | 116 | ||
117 | static struct device_driver ipmi_driver = | 117 | static struct device_driver ipmi_driver = { |
118 | { | ||
119 | .name = DEVICE_NAME, | 118 | .name = DEVICE_NAME, |
120 | .bus = &platform_bus_type | 119 | .bus = &platform_bus_type |
121 | }; | 120 | }; |
122 | 121 | ||
123 | struct smi_info | 122 | |
124 | { | 123 | /* |
124 | * Indexes into stats[] in smi_info below. | ||
125 | */ | ||
126 | enum si_stat_indexes { | ||
127 | /* | ||
128 | * Number of times the driver requested a timer while an operation | ||
129 | * was in progress. | ||
130 | */ | ||
131 | SI_STAT_short_timeouts = 0, | ||
132 | |||
133 | /* | ||
134 | * Number of times the driver requested a timer while nothing was in | ||
135 | * progress. | ||
136 | */ | ||
137 | SI_STAT_long_timeouts, | ||
138 | |||
139 | /* Number of times the interface was idle while being polled. */ | ||
140 | SI_STAT_idles, | ||
141 | |||
142 | /* Number of interrupts the driver handled. */ | ||
143 | SI_STAT_interrupts, | ||
144 | |||
145 | /* Number of time the driver got an ATTN from the hardware. */ | ||
146 | SI_STAT_attentions, | ||
147 | |||
148 | /* Number of times the driver requested flags from the hardware. */ | ||
149 | SI_STAT_flag_fetches, | ||
150 | |||
151 | /* Number of times the hardware didn't follow the state machine. */ | ||
152 | SI_STAT_hosed_count, | ||
153 | |||
154 | /* Number of completed messages. */ | ||
155 | SI_STAT_complete_transactions, | ||
156 | |||
157 | /* Number of IPMI events received from the hardware. */ | ||
158 | SI_STAT_events, | ||
159 | |||
160 | /* Number of watchdog pretimeouts. */ | ||
161 | SI_STAT_watchdog_pretimeouts, | ||
162 | |||
163 | /* Number of asyncronous messages received. */ | ||
164 | SI_STAT_incoming_messages, | ||
165 | |||
166 | |||
167 | /* This *must* remain last, add new values above this. */ | ||
168 | SI_NUM_STATS | ||
169 | }; | ||
170 | |||
171 | struct smi_info { | ||
125 | int intf_num; | 172 | int intf_num; |
126 | ipmi_smi_t intf; | 173 | ipmi_smi_t intf; |
127 | struct si_sm_data *si_sm; | 174 | struct si_sm_data *si_sm; |
@@ -134,8 +181,10 @@ struct smi_info | |||
134 | struct ipmi_smi_msg *curr_msg; | 181 | struct ipmi_smi_msg *curr_msg; |
135 | enum si_intf_state si_state; | 182 | enum si_intf_state si_state; |
136 | 183 | ||
137 | /* Used to handle the various types of I/O that can occur with | 184 | /* |
138 | IPMI */ | 185 | * Used to handle the various types of I/O that can occur with |
186 | * IPMI | ||
187 | */ | ||
139 | struct si_sm_io io; | 188 | struct si_sm_io io; |
140 | int (*io_setup)(struct smi_info *info); | 189 | int (*io_setup)(struct smi_info *info); |
141 | void (*io_cleanup)(struct smi_info *info); | 190 | void (*io_cleanup)(struct smi_info *info); |
@@ -146,15 +195,18 @@ struct smi_info | |||
146 | void (*addr_source_cleanup)(struct smi_info *info); | 195 | void (*addr_source_cleanup)(struct smi_info *info); |
147 | void *addr_source_data; | 196 | void *addr_source_data; |
148 | 197 | ||
149 | /* Per-OEM handler, called from handle_flags(). | 198 | /* |
150 | Returns 1 when handle_flags() needs to be re-run | 199 | * Per-OEM handler, called from handle_flags(). Returns 1 |
151 | or 0 indicating it set si_state itself. | 200 | * when handle_flags() needs to be re-run or 0 indicating it |
152 | */ | 201 | * set si_state itself. |
202 | */ | ||
153 | int (*oem_data_avail_handler)(struct smi_info *smi_info); | 203 | int (*oem_data_avail_handler)(struct smi_info *smi_info); |
154 | 204 | ||
155 | /* Flags from the last GET_MSG_FLAGS command, used when an ATTN | 205 | /* |
156 | is set to hold the flags until we are done handling everything | 206 | * Flags from the last GET_MSG_FLAGS command, used when an ATTN |
157 | from the flags. */ | 207 | * is set to hold the flags until we are done handling everything |
208 | * from the flags. | ||
209 | */ | ||
158 | #define RECEIVE_MSG_AVAIL 0x01 | 210 | #define RECEIVE_MSG_AVAIL 0x01 |
159 | #define EVENT_MSG_BUFFER_FULL 0x02 | 211 | #define EVENT_MSG_BUFFER_FULL 0x02 |
160 | #define WDT_PRE_TIMEOUT_INT 0x08 | 212 | #define WDT_PRE_TIMEOUT_INT 0x08 |
@@ -162,25 +214,31 @@ struct smi_info | |||
162 | #define OEM1_DATA_AVAIL 0x40 | 214 | #define OEM1_DATA_AVAIL 0x40 |
163 | #define OEM2_DATA_AVAIL 0x80 | 215 | #define OEM2_DATA_AVAIL 0x80 |
164 | #define OEM_DATA_AVAIL (OEM0_DATA_AVAIL | \ | 216 | #define OEM_DATA_AVAIL (OEM0_DATA_AVAIL | \ |
165 | OEM1_DATA_AVAIL | \ | 217 | OEM1_DATA_AVAIL | \ |
166 | OEM2_DATA_AVAIL) | 218 | OEM2_DATA_AVAIL) |
167 | unsigned char msg_flags; | 219 | unsigned char msg_flags; |
168 | 220 | ||
169 | /* If set to true, this will request events the next time the | 221 | /* |
170 | state machine is idle. */ | 222 | * If set to true, this will request events the next time the |
223 | * state machine is idle. | ||
224 | */ | ||
171 | atomic_t req_events; | 225 | atomic_t req_events; |
172 | 226 | ||
173 | /* If true, run the state machine to completion on every send | 227 | /* |
174 | call. Generally used after a panic to make sure stuff goes | 228 | * If true, run the state machine to completion on every send |
175 | out. */ | 229 | * call. Generally used after a panic to make sure stuff goes |
230 | * out. | ||
231 | */ | ||
176 | int run_to_completion; | 232 | int run_to_completion; |
177 | 233 | ||
178 | /* The I/O port of an SI interface. */ | 234 | /* The I/O port of an SI interface. */ |
179 | int port; | 235 | int port; |
180 | 236 | ||
181 | /* The space between start addresses of the two ports. For | 237 | /* |
182 | instance, if the first port is 0xca2 and the spacing is 4, then | 238 | * The space between start addresses of the two ports. For |
183 | the second port is 0xca6. */ | 239 | * instance, if the first port is 0xca2 and the spacing is 4, then |
240 | * the second port is 0xca6. | ||
241 | */ | ||
184 | unsigned int spacing; | 242 | unsigned int spacing; |
185 | 243 | ||
186 | /* zero if no irq; */ | 244 | /* zero if no irq; */ |
@@ -195,10 +253,12 @@ struct smi_info | |||
195 | /* Used to gracefully stop the timer without race conditions. */ | 253 | /* Used to gracefully stop the timer without race conditions. */ |
196 | atomic_t stop_operation; | 254 | atomic_t stop_operation; |
197 | 255 | ||
198 | /* The driver will disable interrupts when it gets into a | 256 | /* |
199 | situation where it cannot handle messages due to lack of | 257 | * The driver will disable interrupts when it gets into a |
200 | memory. Once that situation clears up, it will re-enable | 258 | * situation where it cannot handle messages due to lack of |
201 | interrupts. */ | 259 | * memory. Once that situation clears up, it will re-enable |
260 | * interrupts. | ||
261 | */ | ||
202 | int interrupt_disabled; | 262 | int interrupt_disabled; |
203 | 263 | ||
204 | /* From the get device id response... */ | 264 | /* From the get device id response... */ |
@@ -208,33 +268,28 @@ struct smi_info | |||
208 | struct device *dev; | 268 | struct device *dev; |
209 | struct platform_device *pdev; | 269 | struct platform_device *pdev; |
210 | 270 | ||
211 | /* True if we allocated the device, false if it came from | 271 | /* |
212 | * someplace else (like PCI). */ | 272 | * True if we allocated the device, false if it came from |
273 | * someplace else (like PCI). | ||
274 | */ | ||
213 | int dev_registered; | 275 | int dev_registered; |
214 | 276 | ||
215 | /* Slave address, could be reported from DMI. */ | 277 | /* Slave address, could be reported from DMI. */ |
216 | unsigned char slave_addr; | 278 | unsigned char slave_addr; |
217 | 279 | ||
218 | /* Counters and things for the proc filesystem. */ | 280 | /* Counters and things for the proc filesystem. */ |
219 | spinlock_t count_lock; | 281 | atomic_t stats[SI_NUM_STATS]; |
220 | unsigned long short_timeouts; | 282 | |
221 | unsigned long long_timeouts; | 283 | struct task_struct *thread; |
222 | unsigned long timeout_restarts; | ||
223 | unsigned long idles; | ||
224 | unsigned long interrupts; | ||
225 | unsigned long attentions; | ||
226 | unsigned long flag_fetches; | ||
227 | unsigned long hosed_count; | ||
228 | unsigned long complete_transactions; | ||
229 | unsigned long events; | ||
230 | unsigned long watchdog_pretimeouts; | ||
231 | unsigned long incoming_messages; | ||
232 | |||
233 | struct task_struct *thread; | ||
234 | 284 | ||
235 | struct list_head link; | 285 | struct list_head link; |
236 | }; | 286 | }; |
237 | 287 | ||
288 | #define smi_inc_stat(smi, stat) \ | ||
289 | atomic_inc(&(smi)->stats[SI_STAT_ ## stat]) | ||
290 | #define smi_get_stat(smi, stat) \ | ||
291 | ((unsigned int) atomic_read(&(smi)->stats[SI_STAT_ ## stat])) | ||
292 | |||
238 | #define SI_MAX_PARMS 4 | 293 | #define SI_MAX_PARMS 4 |
239 | 294 | ||
240 | static int force_kipmid[SI_MAX_PARMS]; | 295 | static int force_kipmid[SI_MAX_PARMS]; |
@@ -246,7 +301,7 @@ static int try_smi_init(struct smi_info *smi); | |||
246 | static void cleanup_one_si(struct smi_info *to_clean); | 301 | static void cleanup_one_si(struct smi_info *to_clean); |
247 | 302 | ||
248 | static ATOMIC_NOTIFIER_HEAD(xaction_notifier_list); | 303 | static ATOMIC_NOTIFIER_HEAD(xaction_notifier_list); |
249 | static int register_xaction_notifier(struct notifier_block * nb) | 304 | static int register_xaction_notifier(struct notifier_block *nb) |
250 | { | 305 | { |
251 | return atomic_notifier_chain_register(&xaction_notifier_list, nb); | 306 | return atomic_notifier_chain_register(&xaction_notifier_list, nb); |
252 | } | 307 | } |
@@ -255,7 +310,7 @@ static void deliver_recv_msg(struct smi_info *smi_info, | |||
255 | struct ipmi_smi_msg *msg) | 310 | struct ipmi_smi_msg *msg) |
256 | { | 311 | { |
257 | /* Deliver the message to the upper layer with the lock | 312 | /* Deliver the message to the upper layer with the lock |
258 | released. */ | 313 | released. */ |
259 | spin_unlock(&(smi_info->si_lock)); | 314 | spin_unlock(&(smi_info->si_lock)); |
260 | ipmi_smi_msg_received(smi_info->intf, msg); | 315 | ipmi_smi_msg_received(smi_info->intf, msg); |
261 | spin_lock(&(smi_info->si_lock)); | 316 | spin_lock(&(smi_info->si_lock)); |
@@ -287,9 +342,12 @@ static enum si_sm_result start_next_msg(struct smi_info *smi_info) | |||
287 | struct timeval t; | 342 | struct timeval t; |
288 | #endif | 343 | #endif |
289 | 344 | ||
290 | /* No need to save flags, we aleady have interrupts off and we | 345 | /* |
291 | already hold the SMI lock. */ | 346 | * No need to save flags, we aleady have interrupts off and we |
292 | spin_lock(&(smi_info->msg_lock)); | 347 | * already hold the SMI lock. |
348 | */ | ||
349 | if (!smi_info->run_to_completion) | ||
350 | spin_lock(&(smi_info->msg_lock)); | ||
293 | 351 | ||
294 | /* Pick the high priority queue first. */ | 352 | /* Pick the high priority queue first. */ |
295 | if (!list_empty(&(smi_info->hp_xmit_msgs))) { | 353 | if (!list_empty(&(smi_info->hp_xmit_msgs))) { |
@@ -310,7 +368,7 @@ static enum si_sm_result start_next_msg(struct smi_info *smi_info) | |||
310 | link); | 368 | link); |
311 | #ifdef DEBUG_TIMING | 369 | #ifdef DEBUG_TIMING |
312 | do_gettimeofday(&t); | 370 | do_gettimeofday(&t); |
313 | printk("**Start2: %d.%9.9d\n", t.tv_sec, t.tv_usec); | 371 | printk(KERN_DEBUG "**Start2: %d.%9.9d\n", t.tv_sec, t.tv_usec); |
314 | #endif | 372 | #endif |
315 | err = atomic_notifier_call_chain(&xaction_notifier_list, | 373 | err = atomic_notifier_call_chain(&xaction_notifier_list, |
316 | 0, smi_info); | 374 | 0, smi_info); |
@@ -322,14 +380,14 @@ static enum si_sm_result start_next_msg(struct smi_info *smi_info) | |||
322 | smi_info->si_sm, | 380 | smi_info->si_sm, |
323 | smi_info->curr_msg->data, | 381 | smi_info->curr_msg->data, |
324 | smi_info->curr_msg->data_size); | 382 | smi_info->curr_msg->data_size); |
325 | if (err) { | 383 | if (err) |
326 | return_hosed_msg(smi_info, err); | 384 | return_hosed_msg(smi_info, err); |
327 | } | ||
328 | 385 | ||
329 | rv = SI_SM_CALL_WITHOUT_DELAY; | 386 | rv = SI_SM_CALL_WITHOUT_DELAY; |
330 | } | 387 | } |
331 | out: | 388 | out: |
332 | spin_unlock(&(smi_info->msg_lock)); | 389 | if (!smi_info->run_to_completion) |
390 | spin_unlock(&(smi_info->msg_lock)); | ||
333 | 391 | ||
334 | return rv; | 392 | return rv; |
335 | } | 393 | } |
@@ -338,8 +396,10 @@ static void start_enable_irq(struct smi_info *smi_info) | |||
338 | { | 396 | { |
339 | unsigned char msg[2]; | 397 | unsigned char msg[2]; |
340 | 398 | ||
341 | /* If we are enabling interrupts, we have to tell the | 399 | /* |
342 | BMC to use them. */ | 400 | * If we are enabling interrupts, we have to tell the |
401 | * BMC to use them. | ||
402 | */ | ||
343 | msg[0] = (IPMI_NETFN_APP_REQUEST << 2); | 403 | msg[0] = (IPMI_NETFN_APP_REQUEST << 2); |
344 | msg[1] = IPMI_GET_BMC_GLOBAL_ENABLES_CMD; | 404 | msg[1] = IPMI_GET_BMC_GLOBAL_ENABLES_CMD; |
345 | 405 | ||
@@ -371,10 +431,12 @@ static void start_clear_flags(struct smi_info *smi_info) | |||
371 | smi_info->si_state = SI_CLEARING_FLAGS; | 431 | smi_info->si_state = SI_CLEARING_FLAGS; |
372 | } | 432 | } |
373 | 433 | ||
374 | /* When we have a situtaion where we run out of memory and cannot | 434 | /* |
375 | allocate messages, we just leave them in the BMC and run the system | 435 | * When we have a situtaion where we run out of memory and cannot |
376 | polled until we can allocate some memory. Once we have some | 436 | * allocate messages, we just leave them in the BMC and run the system |
377 | memory, we will re-enable the interrupt. */ | 437 | * polled until we can allocate some memory. Once we have some |
438 | * memory, we will re-enable the interrupt. | ||
439 | */ | ||
378 | static inline void disable_si_irq(struct smi_info *smi_info) | 440 | static inline void disable_si_irq(struct smi_info *smi_info) |
379 | { | 441 | { |
380 | if ((smi_info->irq) && (!smi_info->interrupt_disabled)) { | 442 | if ((smi_info->irq) && (!smi_info->interrupt_disabled)) { |
@@ -396,9 +458,7 @@ static void handle_flags(struct smi_info *smi_info) | |||
396 | retry: | 458 | retry: |
397 | if (smi_info->msg_flags & WDT_PRE_TIMEOUT_INT) { | 459 | if (smi_info->msg_flags & WDT_PRE_TIMEOUT_INT) { |
398 | /* Watchdog pre-timeout */ | 460 | /* Watchdog pre-timeout */ |
399 | spin_lock(&smi_info->count_lock); | 461 | smi_inc_stat(smi_info, watchdog_pretimeouts); |
400 | smi_info->watchdog_pretimeouts++; | ||
401 | spin_unlock(&smi_info->count_lock); | ||
402 | 462 | ||
403 | start_clear_flags(smi_info); | 463 | start_clear_flags(smi_info); |
404 | smi_info->msg_flags &= ~WDT_PRE_TIMEOUT_INT; | 464 | smi_info->msg_flags &= ~WDT_PRE_TIMEOUT_INT; |
@@ -444,12 +504,11 @@ static void handle_flags(struct smi_info *smi_info) | |||
444 | smi_info->curr_msg->data_size); | 504 | smi_info->curr_msg->data_size); |
445 | smi_info->si_state = SI_GETTING_EVENTS; | 505 | smi_info->si_state = SI_GETTING_EVENTS; |
446 | } else if (smi_info->msg_flags & OEM_DATA_AVAIL && | 506 | } else if (smi_info->msg_flags & OEM_DATA_AVAIL && |
447 | smi_info->oem_data_avail_handler) { | 507 | smi_info->oem_data_avail_handler) { |
448 | if (smi_info->oem_data_avail_handler(smi_info)) | 508 | if (smi_info->oem_data_avail_handler(smi_info)) |
449 | goto retry; | 509 | goto retry; |
450 | } else { | 510 | } else |
451 | smi_info->si_state = SI_NORMAL; | 511 | smi_info->si_state = SI_NORMAL; |
452 | } | ||
453 | } | 512 | } |
454 | 513 | ||
455 | static void handle_transaction_done(struct smi_info *smi_info) | 514 | static void handle_transaction_done(struct smi_info *smi_info) |
@@ -459,7 +518,7 @@ static void handle_transaction_done(struct smi_info *smi_info) | |||
459 | struct timeval t; | 518 | struct timeval t; |
460 | 519 | ||
461 | do_gettimeofday(&t); | 520 | do_gettimeofday(&t); |
462 | printk("**Done: %d.%9.9d\n", t.tv_sec, t.tv_usec); | 521 | printk(KERN_DEBUG "**Done: %d.%9.9d\n", t.tv_sec, t.tv_usec); |
463 | #endif | 522 | #endif |
464 | switch (smi_info->si_state) { | 523 | switch (smi_info->si_state) { |
465 | case SI_NORMAL: | 524 | case SI_NORMAL: |
@@ -472,9 +531,11 @@ static void handle_transaction_done(struct smi_info *smi_info) | |||
472 | smi_info->curr_msg->rsp, | 531 | smi_info->curr_msg->rsp, |
473 | IPMI_MAX_MSG_LENGTH); | 532 | IPMI_MAX_MSG_LENGTH); |
474 | 533 | ||
475 | /* Do this here becase deliver_recv_msg() releases the | 534 | /* |
476 | lock, and a new message can be put in during the | 535 | * Do this here becase deliver_recv_msg() releases the |
477 | time the lock is released. */ | 536 | * lock, and a new message can be put in during the |
537 | * time the lock is released. | ||
538 | */ | ||
478 | msg = smi_info->curr_msg; | 539 | msg = smi_info->curr_msg; |
479 | smi_info->curr_msg = NULL; | 540 | smi_info->curr_msg = NULL; |
480 | deliver_recv_msg(smi_info, msg); | 541 | deliver_recv_msg(smi_info, msg); |
@@ -488,12 +549,13 @@ static void handle_transaction_done(struct smi_info *smi_info) | |||
488 | /* We got the flags from the SMI, now handle them. */ | 549 | /* We got the flags from the SMI, now handle them. */ |
489 | len = smi_info->handlers->get_result(smi_info->si_sm, msg, 4); | 550 | len = smi_info->handlers->get_result(smi_info->si_sm, msg, 4); |
490 | if (msg[2] != 0) { | 551 | if (msg[2] != 0) { |
491 | /* Error fetching flags, just give up for | 552 | /* Error fetching flags, just give up for now. */ |
492 | now. */ | ||
493 | smi_info->si_state = SI_NORMAL; | 553 | smi_info->si_state = SI_NORMAL; |
494 | } else if (len < 4) { | 554 | } else if (len < 4) { |
495 | /* Hmm, no flags. That's technically illegal, but | 555 | /* |
496 | don't use uninitialized data. */ | 556 | * Hmm, no flags. That's technically illegal, but |
557 | * don't use uninitialized data. | ||
558 | */ | ||
497 | smi_info->si_state = SI_NORMAL; | 559 | smi_info->si_state = SI_NORMAL; |
498 | } else { | 560 | } else { |
499 | smi_info->msg_flags = msg[3]; | 561 | smi_info->msg_flags = msg[3]; |
@@ -530,9 +592,11 @@ static void handle_transaction_done(struct smi_info *smi_info) | |||
530 | smi_info->curr_msg->rsp, | 592 | smi_info->curr_msg->rsp, |
531 | IPMI_MAX_MSG_LENGTH); | 593 | IPMI_MAX_MSG_LENGTH); |
532 | 594 | ||
533 | /* Do this here becase deliver_recv_msg() releases the | 595 | /* |
534 | lock, and a new message can be put in during the | 596 | * Do this here becase deliver_recv_msg() releases the |
535 | time the lock is released. */ | 597 | * lock, and a new message can be put in during the |
598 | * time the lock is released. | ||
599 | */ | ||
536 | msg = smi_info->curr_msg; | 600 | msg = smi_info->curr_msg; |
537 | smi_info->curr_msg = NULL; | 601 | smi_info->curr_msg = NULL; |
538 | if (msg->rsp[2] != 0) { | 602 | if (msg->rsp[2] != 0) { |
@@ -543,14 +607,14 @@ static void handle_transaction_done(struct smi_info *smi_info) | |||
543 | smi_info->msg_flags &= ~EVENT_MSG_BUFFER_FULL; | 607 | smi_info->msg_flags &= ~EVENT_MSG_BUFFER_FULL; |
544 | handle_flags(smi_info); | 608 | handle_flags(smi_info); |
545 | } else { | 609 | } else { |
546 | spin_lock(&smi_info->count_lock); | 610 | smi_inc_stat(smi_info, events); |
547 | smi_info->events++; | 611 | |
548 | spin_unlock(&smi_info->count_lock); | 612 | /* |
549 | 613 | * Do this before we deliver the message | |
550 | /* Do this before we deliver the message | 614 | * because delivering the message releases the |
551 | because delivering the message releases the | 615 | * lock and something else can mess with the |
552 | lock and something else can mess with the | 616 | * state. |
553 | state. */ | 617 | */ |
554 | handle_flags(smi_info); | 618 | handle_flags(smi_info); |
555 | 619 | ||
556 | deliver_recv_msg(smi_info, msg); | 620 | deliver_recv_msg(smi_info, msg); |
@@ -566,9 +630,11 @@ static void handle_transaction_done(struct smi_info *smi_info) | |||
566 | smi_info->curr_msg->rsp, | 630 | smi_info->curr_msg->rsp, |
567 | IPMI_MAX_MSG_LENGTH); | 631 | IPMI_MAX_MSG_LENGTH); |
568 | 632 | ||
569 | /* Do this here becase deliver_recv_msg() releases the | 633 | /* |
570 | lock, and a new message can be put in during the | 634 | * Do this here becase deliver_recv_msg() releases the |
571 | time the lock is released. */ | 635 | * lock, and a new message can be put in during the |
636 | * time the lock is released. | ||
637 | */ | ||
572 | msg = smi_info->curr_msg; | 638 | msg = smi_info->curr_msg; |
573 | smi_info->curr_msg = NULL; | 639 | smi_info->curr_msg = NULL; |
574 | if (msg->rsp[2] != 0) { | 640 | if (msg->rsp[2] != 0) { |
@@ -579,14 +645,14 @@ static void handle_transaction_done(struct smi_info *smi_info) | |||
579 | smi_info->msg_flags &= ~RECEIVE_MSG_AVAIL; | 645 | smi_info->msg_flags &= ~RECEIVE_MSG_AVAIL; |
580 | handle_flags(smi_info); | 646 | handle_flags(smi_info); |
581 | } else { | 647 | } else { |
582 | spin_lock(&smi_info->count_lock); | 648 | smi_inc_stat(smi_info, incoming_messages); |
583 | smi_info->incoming_messages++; | 649 | |
584 | spin_unlock(&smi_info->count_lock); | 650 | /* |
585 | 651 | * Do this before we deliver the message | |
586 | /* Do this before we deliver the message | 652 | * because delivering the message releases the |
587 | because delivering the message releases the | 653 | * lock and something else can mess with the |
588 | lock and something else can mess with the | 654 | * state. |
589 | state. */ | 655 | */ |
590 | handle_flags(smi_info); | 656 | handle_flags(smi_info); |
591 | 657 | ||
592 | deliver_recv_msg(smi_info, msg); | 658 | deliver_recv_msg(smi_info, msg); |
@@ -674,69 +740,70 @@ static void handle_transaction_done(struct smi_info *smi_info) | |||
674 | } | 740 | } |
675 | } | 741 | } |
676 | 742 | ||
677 | /* Called on timeouts and events. Timeouts should pass the elapsed | 743 | /* |
678 | time, interrupts should pass in zero. Must be called with | 744 | * Called on timeouts and events. Timeouts should pass the elapsed |
679 | si_lock held and interrupts disabled. */ | 745 | * time, interrupts should pass in zero. Must be called with |
746 | * si_lock held and interrupts disabled. | ||
747 | */ | ||
680 | static enum si_sm_result smi_event_handler(struct smi_info *smi_info, | 748 | static enum si_sm_result smi_event_handler(struct smi_info *smi_info, |
681 | int time) | 749 | int time) |
682 | { | 750 | { |
683 | enum si_sm_result si_sm_result; | 751 | enum si_sm_result si_sm_result; |
684 | 752 | ||
685 | restart: | 753 | restart: |
686 | /* There used to be a loop here that waited a little while | 754 | /* |
687 | (around 25us) before giving up. That turned out to be | 755 | * There used to be a loop here that waited a little while |
688 | pointless, the minimum delays I was seeing were in the 300us | 756 | * (around 25us) before giving up. That turned out to be |
689 | range, which is far too long to wait in an interrupt. So | 757 | * pointless, the minimum delays I was seeing were in the 300us |
690 | we just run until the state machine tells us something | 758 | * range, which is far too long to wait in an interrupt. So |
691 | happened or it needs a delay. */ | 759 | * we just run until the state machine tells us something |
760 | * happened or it needs a delay. | ||
761 | */ | ||
692 | si_sm_result = smi_info->handlers->event(smi_info->si_sm, time); | 762 | si_sm_result = smi_info->handlers->event(smi_info->si_sm, time); |
693 | time = 0; | 763 | time = 0; |
694 | while (si_sm_result == SI_SM_CALL_WITHOUT_DELAY) | 764 | while (si_sm_result == SI_SM_CALL_WITHOUT_DELAY) |
695 | { | ||
696 | si_sm_result = smi_info->handlers->event(smi_info->si_sm, 0); | 765 | si_sm_result = smi_info->handlers->event(smi_info->si_sm, 0); |
697 | } | ||
698 | 766 | ||
699 | if (si_sm_result == SI_SM_TRANSACTION_COMPLETE) | 767 | if (si_sm_result == SI_SM_TRANSACTION_COMPLETE) { |
700 | { | 768 | smi_inc_stat(smi_info, complete_transactions); |
701 | spin_lock(&smi_info->count_lock); | ||
702 | smi_info->complete_transactions++; | ||
703 | spin_unlock(&smi_info->count_lock); | ||
704 | 769 | ||
705 | handle_transaction_done(smi_info); | 770 | handle_transaction_done(smi_info); |
706 | si_sm_result = smi_info->handlers->event(smi_info->si_sm, 0); | 771 | si_sm_result = smi_info->handlers->event(smi_info->si_sm, 0); |
707 | } | 772 | } else if (si_sm_result == SI_SM_HOSED) { |
708 | else if (si_sm_result == SI_SM_HOSED) | 773 | smi_inc_stat(smi_info, hosed_count); |
709 | { | ||
710 | spin_lock(&smi_info->count_lock); | ||
711 | smi_info->hosed_count++; | ||
712 | spin_unlock(&smi_info->count_lock); | ||
713 | 774 | ||
714 | /* Do the before return_hosed_msg, because that | 775 | /* |
715 | releases the lock. */ | 776 | * Do the before return_hosed_msg, because that |
777 | * releases the lock. | ||
778 | */ | ||
716 | smi_info->si_state = SI_NORMAL; | 779 | smi_info->si_state = SI_NORMAL; |
717 | if (smi_info->curr_msg != NULL) { | 780 | if (smi_info->curr_msg != NULL) { |
718 | /* If we were handling a user message, format | 781 | /* |
719 | a response to send to the upper layer to | 782 | * If we were handling a user message, format |
720 | tell it about the error. */ | 783 | * a response to send to the upper layer to |
784 | * tell it about the error. | ||
785 | */ | ||
721 | return_hosed_msg(smi_info, IPMI_ERR_UNSPECIFIED); | 786 | return_hosed_msg(smi_info, IPMI_ERR_UNSPECIFIED); |
722 | } | 787 | } |
723 | si_sm_result = smi_info->handlers->event(smi_info->si_sm, 0); | 788 | si_sm_result = smi_info->handlers->event(smi_info->si_sm, 0); |
724 | } | 789 | } |
725 | 790 | ||
726 | /* We prefer handling attn over new messages. */ | 791 | /* |
727 | if (si_sm_result == SI_SM_ATTN) | 792 | * We prefer handling attn over new messages. But don't do |
728 | { | 793 | * this if there is not yet an upper layer to handle anything. |
794 | */ | ||
795 | if (likely(smi_info->intf) && si_sm_result == SI_SM_ATTN) { | ||
729 | unsigned char msg[2]; | 796 | unsigned char msg[2]; |
730 | 797 | ||
731 | spin_lock(&smi_info->count_lock); | 798 | smi_inc_stat(smi_info, attentions); |
732 | smi_info->attentions++; | ||
733 | spin_unlock(&smi_info->count_lock); | ||
734 | 799 | ||
735 | /* Got a attn, send down a get message flags to see | 800 | /* |
736 | what's causing it. It would be better to handle | 801 | * Got a attn, send down a get message flags to see |
737 | this in the upper layer, but due to the way | 802 | * what's causing it. It would be better to handle |
738 | interrupts work with the SMI, that's not really | 803 | * this in the upper layer, but due to the way |
739 | possible. */ | 804 | * interrupts work with the SMI, that's not really |
805 | * possible. | ||
806 | */ | ||
740 | msg[0] = (IPMI_NETFN_APP_REQUEST << 2); | 807 | msg[0] = (IPMI_NETFN_APP_REQUEST << 2); |
741 | msg[1] = IPMI_GET_MSG_FLAGS_CMD; | 808 | msg[1] = IPMI_GET_MSG_FLAGS_CMD; |
742 | 809 | ||
@@ -748,20 +815,19 @@ static enum si_sm_result smi_event_handler(struct smi_info *smi_info, | |||
748 | 815 | ||
749 | /* If we are currently idle, try to start the next message. */ | 816 | /* If we are currently idle, try to start the next message. */ |
750 | if (si_sm_result == SI_SM_IDLE) { | 817 | if (si_sm_result == SI_SM_IDLE) { |
751 | spin_lock(&smi_info->count_lock); | 818 | smi_inc_stat(smi_info, idles); |
752 | smi_info->idles++; | ||
753 | spin_unlock(&smi_info->count_lock); | ||
754 | 819 | ||
755 | si_sm_result = start_next_msg(smi_info); | 820 | si_sm_result = start_next_msg(smi_info); |
756 | if (si_sm_result != SI_SM_IDLE) | 821 | if (si_sm_result != SI_SM_IDLE) |
757 | goto restart; | 822 | goto restart; |
758 | } | 823 | } |
759 | 824 | ||
760 | if ((si_sm_result == SI_SM_IDLE) | 825 | if ((si_sm_result == SI_SM_IDLE) |
761 | && (atomic_read(&smi_info->req_events))) | 826 | && (atomic_read(&smi_info->req_events))) { |
762 | { | 827 | /* |
763 | /* We are idle and the upper layer requested that I fetch | 828 | * We are idle and the upper layer requested that I fetch |
764 | events, so do so. */ | 829 | * events, so do so. |
830 | */ | ||
765 | atomic_set(&smi_info->req_events, 0); | 831 | atomic_set(&smi_info->req_events, 0); |
766 | 832 | ||
767 | smi_info->curr_msg = ipmi_alloc_smi_msg(); | 833 | smi_info->curr_msg = ipmi_alloc_smi_msg(); |
@@ -803,56 +869,50 @@ static void sender(void *send_info, | |||
803 | return; | 869 | return; |
804 | } | 870 | } |
805 | 871 | ||
806 | spin_lock_irqsave(&(smi_info->msg_lock), flags); | ||
807 | #ifdef DEBUG_TIMING | 872 | #ifdef DEBUG_TIMING |
808 | do_gettimeofday(&t); | 873 | do_gettimeofday(&t); |
809 | printk("**Enqueue: %d.%9.9d\n", t.tv_sec, t.tv_usec); | 874 | printk("**Enqueue: %d.%9.9d\n", t.tv_sec, t.tv_usec); |
810 | #endif | 875 | #endif |
811 | 876 | ||
812 | if (smi_info->run_to_completion) { | 877 | if (smi_info->run_to_completion) { |
813 | /* If we are running to completion, then throw it in | 878 | /* |
814 | the list and run transactions until everything is | 879 | * If we are running to completion, then throw it in |
815 | clear. Priority doesn't matter here. */ | 880 | * the list and run transactions until everything is |
881 | * clear. Priority doesn't matter here. | ||
882 | */ | ||
883 | |||
884 | /* | ||
885 | * Run to completion means we are single-threaded, no | ||
886 | * need for locks. | ||
887 | */ | ||
816 | list_add_tail(&(msg->link), &(smi_info->xmit_msgs)); | 888 | list_add_tail(&(msg->link), &(smi_info->xmit_msgs)); |
817 | 889 | ||
818 | /* We have to release the msg lock and claim the smi | ||
819 | lock in this case, because of race conditions. */ | ||
820 | spin_unlock_irqrestore(&(smi_info->msg_lock), flags); | ||
821 | |||
822 | spin_lock_irqsave(&(smi_info->si_lock), flags); | ||
823 | result = smi_event_handler(smi_info, 0); | 890 | result = smi_event_handler(smi_info, 0); |
824 | while (result != SI_SM_IDLE) { | 891 | while (result != SI_SM_IDLE) { |
825 | udelay(SI_SHORT_TIMEOUT_USEC); | 892 | udelay(SI_SHORT_TIMEOUT_USEC); |
826 | result = smi_event_handler(smi_info, | 893 | result = smi_event_handler(smi_info, |
827 | SI_SHORT_TIMEOUT_USEC); | 894 | SI_SHORT_TIMEOUT_USEC); |
828 | } | 895 | } |
829 | spin_unlock_irqrestore(&(smi_info->si_lock), flags); | ||
830 | return; | 896 | return; |
831 | } else { | ||
832 | if (priority > 0) { | ||
833 | list_add_tail(&(msg->link), &(smi_info->hp_xmit_msgs)); | ||
834 | } else { | ||
835 | list_add_tail(&(msg->link), &(smi_info->xmit_msgs)); | ||
836 | } | ||
837 | } | 897 | } |
838 | spin_unlock_irqrestore(&(smi_info->msg_lock), flags); | ||
839 | 898 | ||
840 | spin_lock_irqsave(&(smi_info->si_lock), flags); | 899 | spin_lock_irqsave(&smi_info->msg_lock, flags); |
841 | if ((smi_info->si_state == SI_NORMAL) | 900 | if (priority > 0) |
842 | && (smi_info->curr_msg == NULL)) | 901 | list_add_tail(&msg->link, &smi_info->hp_xmit_msgs); |
843 | { | 902 | else |
903 | list_add_tail(&msg->link, &smi_info->xmit_msgs); | ||
904 | spin_unlock_irqrestore(&smi_info->msg_lock, flags); | ||
905 | |||
906 | spin_lock_irqsave(&smi_info->si_lock, flags); | ||
907 | if (smi_info->si_state == SI_NORMAL && smi_info->curr_msg == NULL) | ||
844 | start_next_msg(smi_info); | 908 | start_next_msg(smi_info); |
845 | } | 909 | spin_unlock_irqrestore(&smi_info->si_lock, flags); |
846 | spin_unlock_irqrestore(&(smi_info->si_lock), flags); | ||
847 | } | 910 | } |
848 | 911 | ||
849 | static void set_run_to_completion(void *send_info, int i_run_to_completion) | 912 | static void set_run_to_completion(void *send_info, int i_run_to_completion) |
850 | { | 913 | { |
851 | struct smi_info *smi_info = send_info; | 914 | struct smi_info *smi_info = send_info; |
852 | enum si_sm_result result; | 915 | enum si_sm_result result; |
853 | unsigned long flags; | ||
854 | |||
855 | spin_lock_irqsave(&(smi_info->si_lock), flags); | ||
856 | 916 | ||
857 | smi_info->run_to_completion = i_run_to_completion; | 917 | smi_info->run_to_completion = i_run_to_completion; |
858 | if (i_run_to_completion) { | 918 | if (i_run_to_completion) { |
@@ -863,8 +923,6 @@ static void set_run_to_completion(void *send_info, int i_run_to_completion) | |||
863 | SI_SHORT_TIMEOUT_USEC); | 923 | SI_SHORT_TIMEOUT_USEC); |
864 | } | 924 | } |
865 | } | 925 | } |
866 | |||
867 | spin_unlock_irqrestore(&(smi_info->si_lock), flags); | ||
868 | } | 926 | } |
869 | 927 | ||
870 | static int ipmi_thread(void *data) | 928 | static int ipmi_thread(void *data) |
@@ -878,9 +936,8 @@ static int ipmi_thread(void *data) | |||
878 | spin_lock_irqsave(&(smi_info->si_lock), flags); | 936 | spin_lock_irqsave(&(smi_info->si_lock), flags); |
879 | smi_result = smi_event_handler(smi_info, 0); | 937 | smi_result = smi_event_handler(smi_info, 0); |
880 | spin_unlock_irqrestore(&(smi_info->si_lock), flags); | 938 | spin_unlock_irqrestore(&(smi_info->si_lock), flags); |
881 | if (smi_result == SI_SM_CALL_WITHOUT_DELAY) { | 939 | if (smi_result == SI_SM_CALL_WITHOUT_DELAY) |
882 | /* do nothing */ | 940 | ; /* do nothing */ |
883 | } | ||
884 | else if (smi_result == SI_SM_CALL_WITH_DELAY) | 941 | else if (smi_result == SI_SM_CALL_WITH_DELAY) |
885 | schedule(); | 942 | schedule(); |
886 | else | 943 | else |
@@ -931,7 +988,7 @@ static void smi_timeout(unsigned long data) | |||
931 | spin_lock_irqsave(&(smi_info->si_lock), flags); | 988 | spin_lock_irqsave(&(smi_info->si_lock), flags); |
932 | #ifdef DEBUG_TIMING | 989 | #ifdef DEBUG_TIMING |
933 | do_gettimeofday(&t); | 990 | do_gettimeofday(&t); |
934 | printk("**Timer: %d.%9.9d\n", t.tv_sec, t.tv_usec); | 991 | printk(KERN_DEBUG "**Timer: %d.%9.9d\n", t.tv_sec, t.tv_usec); |
935 | #endif | 992 | #endif |
936 | jiffies_now = jiffies; | 993 | jiffies_now = jiffies; |
937 | time_diff = (((long)jiffies_now - (long)smi_info->last_timeout_jiffies) | 994 | time_diff = (((long)jiffies_now - (long)smi_info->last_timeout_jiffies) |
@@ -945,23 +1002,19 @@ static void smi_timeout(unsigned long data) | |||
945 | if ((smi_info->irq) && (!smi_info->interrupt_disabled)) { | 1002 | if ((smi_info->irq) && (!smi_info->interrupt_disabled)) { |
946 | /* Running with interrupts, only do long timeouts. */ | 1003 | /* Running with interrupts, only do long timeouts. */ |
947 | smi_info->si_timer.expires = jiffies + SI_TIMEOUT_JIFFIES; | 1004 | smi_info->si_timer.expires = jiffies + SI_TIMEOUT_JIFFIES; |
948 | spin_lock_irqsave(&smi_info->count_lock, flags); | 1005 | smi_inc_stat(smi_info, long_timeouts); |
949 | smi_info->long_timeouts++; | ||
950 | spin_unlock_irqrestore(&smi_info->count_lock, flags); | ||
951 | goto do_add_timer; | 1006 | goto do_add_timer; |
952 | } | 1007 | } |
953 | 1008 | ||
954 | /* If the state machine asks for a short delay, then shorten | 1009 | /* |
955 | the timer timeout. */ | 1010 | * If the state machine asks for a short delay, then shorten |
1011 | * the timer timeout. | ||
1012 | */ | ||
956 | if (smi_result == SI_SM_CALL_WITH_DELAY) { | 1013 | if (smi_result == SI_SM_CALL_WITH_DELAY) { |
957 | spin_lock_irqsave(&smi_info->count_lock, flags); | 1014 | smi_inc_stat(smi_info, short_timeouts); |
958 | smi_info->short_timeouts++; | ||
959 | spin_unlock_irqrestore(&smi_info->count_lock, flags); | ||
960 | smi_info->si_timer.expires = jiffies + 1; | 1015 | smi_info->si_timer.expires = jiffies + 1; |
961 | } else { | 1016 | } else { |
962 | spin_lock_irqsave(&smi_info->count_lock, flags); | 1017 | smi_inc_stat(smi_info, long_timeouts); |
963 | smi_info->long_timeouts++; | ||
964 | spin_unlock_irqrestore(&smi_info->count_lock, flags); | ||
965 | smi_info->si_timer.expires = jiffies + SI_TIMEOUT_JIFFIES; | 1018 | smi_info->si_timer.expires = jiffies + SI_TIMEOUT_JIFFIES; |
966 | } | 1019 | } |
967 | 1020 | ||
@@ -979,13 +1032,11 @@ static irqreturn_t si_irq_handler(int irq, void *data) | |||
979 | 1032 | ||
980 | spin_lock_irqsave(&(smi_info->si_lock), flags); | 1033 | spin_lock_irqsave(&(smi_info->si_lock), flags); |
981 | 1034 | ||
982 | spin_lock(&smi_info->count_lock); | 1035 | smi_inc_stat(smi_info, interrupts); |
983 | smi_info->interrupts++; | ||
984 | spin_unlock(&smi_info->count_lock); | ||
985 | 1036 | ||
986 | #ifdef DEBUG_TIMING | 1037 | #ifdef DEBUG_TIMING |
987 | do_gettimeofday(&t); | 1038 | do_gettimeofday(&t); |
988 | printk("**Interrupt: %d.%9.9d\n", t.tv_sec, t.tv_usec); | 1039 | printk(KERN_DEBUG "**Interrupt: %d.%9.9d\n", t.tv_sec, t.tv_usec); |
989 | #endif | 1040 | #endif |
990 | smi_event_handler(smi_info, 0); | 1041 | smi_event_handler(smi_info, 0); |
991 | spin_unlock_irqrestore(&(smi_info->si_lock), flags); | 1042 | spin_unlock_irqrestore(&(smi_info->si_lock), flags); |
@@ -1028,7 +1079,7 @@ static int smi_start_processing(void *send_info, | |||
1028 | * The BT interface is efficient enough to not need a thread, | 1079 | * The BT interface is efficient enough to not need a thread, |
1029 | * and there is no need for a thread if we have interrupts. | 1080 | * and there is no need for a thread if we have interrupts. |
1030 | */ | 1081 | */ |
1031 | else if ((new_smi->si_type != SI_BT) && (!new_smi->irq)) | 1082 | else if ((new_smi->si_type != SI_BT) && (!new_smi->irq)) |
1032 | enable = 1; | 1083 | enable = 1; |
1033 | 1084 | ||
1034 | if (enable) { | 1085 | if (enable) { |
@@ -1054,8 +1105,7 @@ static void set_maintenance_mode(void *send_info, int enable) | |||
1054 | atomic_set(&smi_info->req_events, 0); | 1105 | atomic_set(&smi_info->req_events, 0); |
1055 | } | 1106 | } |
1056 | 1107 | ||
1057 | static struct ipmi_smi_handlers handlers = | 1108 | static struct ipmi_smi_handlers handlers = { |
1058 | { | ||
1059 | .owner = THIS_MODULE, | 1109 | .owner = THIS_MODULE, |
1060 | .start_processing = smi_start_processing, | 1110 | .start_processing = smi_start_processing, |
1061 | .sender = sender, | 1111 | .sender = sender, |
@@ -1065,8 +1115,10 @@ static struct ipmi_smi_handlers handlers = | |||
1065 | .poll = poll, | 1115 | .poll = poll, |
1066 | }; | 1116 | }; |
1067 | 1117 | ||
1068 | /* There can be 4 IO ports passed in (with or without IRQs), 4 addresses, | 1118 | /* |
1069 | a default IO port, and 1 ACPI/SPMI address. That sets SI_MAX_DRIVERS */ | 1119 | * There can be 4 IO ports passed in (with or without IRQs), 4 addresses, |
1120 | * a default IO port, and 1 ACPI/SPMI address. That sets SI_MAX_DRIVERS. | ||
1121 | */ | ||
1070 | 1122 | ||
1071 | static LIST_HEAD(smi_infos); | 1123 | static LIST_HEAD(smi_infos); |
1072 | static DEFINE_MUTEX(smi_infos_lock); | 1124 | static DEFINE_MUTEX(smi_infos_lock); |
@@ -1257,10 +1309,9 @@ static void port_cleanup(struct smi_info *info) | |||
1257 | int idx; | 1309 | int idx; |
1258 | 1310 | ||
1259 | if (addr) { | 1311 | if (addr) { |
1260 | for (idx = 0; idx < info->io_size; idx++) { | 1312 | for (idx = 0; idx < info->io_size; idx++) |
1261 | release_region(addr + idx * info->io.regspacing, | 1313 | release_region(addr + idx * info->io.regspacing, |
1262 | info->io.regsize); | 1314 | info->io.regsize); |
1263 | } | ||
1264 | } | 1315 | } |
1265 | } | 1316 | } |
1266 | 1317 | ||
@@ -1274,8 +1325,10 @@ static int port_setup(struct smi_info *info) | |||
1274 | 1325 | ||
1275 | info->io_cleanup = port_cleanup; | 1326 | info->io_cleanup = port_cleanup; |
1276 | 1327 | ||
1277 | /* Figure out the actual inb/inw/inl/etc routine to use based | 1328 | /* |
1278 | upon the register size. */ | 1329 | * Figure out the actual inb/inw/inl/etc routine to use based |
1330 | * upon the register size. | ||
1331 | */ | ||
1279 | switch (info->io.regsize) { | 1332 | switch (info->io.regsize) { |
1280 | case 1: | 1333 | case 1: |
1281 | info->io.inputb = port_inb; | 1334 | info->io.inputb = port_inb; |
@@ -1290,17 +1343,18 @@ static int port_setup(struct smi_info *info) | |||
1290 | info->io.outputb = port_outl; | 1343 | info->io.outputb = port_outl; |
1291 | break; | 1344 | break; |
1292 | default: | 1345 | default: |
1293 | printk("ipmi_si: Invalid register size: %d\n", | 1346 | printk(KERN_WARNING "ipmi_si: Invalid register size: %d\n", |
1294 | info->io.regsize); | 1347 | info->io.regsize); |
1295 | return -EINVAL; | 1348 | return -EINVAL; |
1296 | } | 1349 | } |
1297 | 1350 | ||
1298 | /* Some BIOSes reserve disjoint I/O regions in their ACPI | 1351 | /* |
1352 | * Some BIOSes reserve disjoint I/O regions in their ACPI | ||
1299 | * tables. This causes problems when trying to register the | 1353 | * tables. This causes problems when trying to register the |
1300 | * entire I/O region. Therefore we must register each I/O | 1354 | * entire I/O region. Therefore we must register each I/O |
1301 | * port separately. | 1355 | * port separately. |
1302 | */ | 1356 | */ |
1303 | for (idx = 0; idx < info->io_size; idx++) { | 1357 | for (idx = 0; idx < info->io_size; idx++) { |
1304 | if (request_region(addr + idx * info->io.regspacing, | 1358 | if (request_region(addr + idx * info->io.regspacing, |
1305 | info->io.regsize, DEVICE_NAME) == NULL) { | 1359 | info->io.regsize, DEVICE_NAME) == NULL) { |
1306 | /* Undo allocations */ | 1360 | /* Undo allocations */ |
@@ -1388,8 +1442,10 @@ static int mem_setup(struct smi_info *info) | |||
1388 | 1442 | ||
1389 | info->io_cleanup = mem_cleanup; | 1443 | info->io_cleanup = mem_cleanup; |
1390 | 1444 | ||
1391 | /* Figure out the actual readb/readw/readl/etc routine to use based | 1445 | /* |
1392 | upon the register size. */ | 1446 | * Figure out the actual readb/readw/readl/etc routine to use based |
1447 | * upon the register size. | ||
1448 | */ | ||
1393 | switch (info->io.regsize) { | 1449 | switch (info->io.regsize) { |
1394 | case 1: | 1450 | case 1: |
1395 | info->io.inputb = intf_mem_inb; | 1451 | info->io.inputb = intf_mem_inb; |
@@ -1410,16 +1466,18 @@ static int mem_setup(struct smi_info *info) | |||
1410 | break; | 1466 | break; |
1411 | #endif | 1467 | #endif |
1412 | default: | 1468 | default: |
1413 | printk("ipmi_si: Invalid register size: %d\n", | 1469 | printk(KERN_WARNING "ipmi_si: Invalid register size: %d\n", |
1414 | info->io.regsize); | 1470 | info->io.regsize); |
1415 | return -EINVAL; | 1471 | return -EINVAL; |
1416 | } | 1472 | } |
1417 | 1473 | ||
1418 | /* Calculate the total amount of memory to claim. This is an | 1474 | /* |
1475 | * Calculate the total amount of memory to claim. This is an | ||
1419 | * unusual looking calculation, but it avoids claiming any | 1476 | * unusual looking calculation, but it avoids claiming any |
1420 | * more memory than it has to. It will claim everything | 1477 | * more memory than it has to. It will claim everything |
1421 | * between the first address to the end of the last full | 1478 | * between the first address to the end of the last full |
1422 | * register. */ | 1479 | * register. |
1480 | */ | ||
1423 | mapsize = ((info->io_size * info->io.regspacing) | 1481 | mapsize = ((info->io_size * info->io.regspacing) |
1424 | - (info->io.regspacing - info->io.regsize)); | 1482 | - (info->io.regspacing - info->io.regsize)); |
1425 | 1483 | ||
@@ -1749,9 +1807,11 @@ static __devinit void hardcode_find_bmc(void) | |||
1749 | 1807 | ||
1750 | #include <linux/acpi.h> | 1808 | #include <linux/acpi.h> |
1751 | 1809 | ||
1752 | /* Once we get an ACPI failure, we don't try any more, because we go | 1810 | /* |
1753 | through the tables sequentially. Once we don't find a table, there | 1811 | * Once we get an ACPI failure, we don't try any more, because we go |
1754 | are no more. */ | 1812 | * through the tables sequentially. Once we don't find a table, there |
1813 | * are no more. | ||
1814 | */ | ||
1755 | static int acpi_failure; | 1815 | static int acpi_failure; |
1756 | 1816 | ||
1757 | /* For GPE-type interrupts. */ | 1817 | /* For GPE-type interrupts. */ |
@@ -1765,9 +1825,7 @@ static u32 ipmi_acpi_gpe(void *context) | |||
1765 | 1825 | ||
1766 | spin_lock_irqsave(&(smi_info->si_lock), flags); | 1826 | spin_lock_irqsave(&(smi_info->si_lock), flags); |
1767 | 1827 | ||
1768 | spin_lock(&smi_info->count_lock); | 1828 | smi_inc_stat(smi_info, interrupts); |
1769 | smi_info->interrupts++; | ||
1770 | spin_unlock(&smi_info->count_lock); | ||
1771 | 1829 | ||
1772 | #ifdef DEBUG_TIMING | 1830 | #ifdef DEBUG_TIMING |
1773 | do_gettimeofday(&t); | 1831 | do_gettimeofday(&t); |
@@ -1816,7 +1874,8 @@ static int acpi_gpe_irq_setup(struct smi_info *info) | |||
1816 | 1874 | ||
1817 | /* | 1875 | /* |
1818 | * Defined at | 1876 | * Defined at |
1819 | * http://h21007.www2.hp.com/dspp/files/unprotected/devresource/Docs/TechPapers/IA64/hpspmi.pdf | 1877 | * http://h21007.www2.hp.com/dspp/files/unprotected/devresource/ |
1878 | * Docs/TechPapers/IA64/hpspmi.pdf | ||
1820 | */ | 1879 | */ |
1821 | struct SPMITable { | 1880 | struct SPMITable { |
1822 | s8 Signature[4]; | 1881 | s8 Signature[4]; |
@@ -1838,14 +1897,18 @@ struct SPMITable { | |||
1838 | */ | 1897 | */ |
1839 | u8 InterruptType; | 1898 | u8 InterruptType; |
1840 | 1899 | ||
1841 | /* If bit 0 of InterruptType is set, then this is the SCI | 1900 | /* |
1842 | interrupt in the GPEx_STS register. */ | 1901 | * If bit 0 of InterruptType is set, then this is the SCI |
1902 | * interrupt in the GPEx_STS register. | ||
1903 | */ | ||
1843 | u8 GPE; | 1904 | u8 GPE; |
1844 | 1905 | ||
1845 | s16 Reserved; | 1906 | s16 Reserved; |
1846 | 1907 | ||
1847 | /* If bit 1 of InterruptType is set, then this is the I/O | 1908 | /* |
1848 | APIC/SAPIC interrupt. */ | 1909 | * If bit 1 of InterruptType is set, then this is the I/O |
1910 | * APIC/SAPIC interrupt. | ||
1911 | */ | ||
1849 | u32 GlobalSystemInterrupt; | 1912 | u32 GlobalSystemInterrupt; |
1850 | 1913 | ||
1851 | /* The actual register address. */ | 1914 | /* The actual register address. */ |
@@ -1863,7 +1926,7 @@ static __devinit int try_init_acpi(struct SPMITable *spmi) | |||
1863 | 1926 | ||
1864 | if (spmi->IPMIlegacy != 1) { | 1927 | if (spmi->IPMIlegacy != 1) { |
1865 | printk(KERN_INFO "IPMI: Bad SPMI legacy %d\n", spmi->IPMIlegacy); | 1928 | printk(KERN_INFO "IPMI: Bad SPMI legacy %d\n", spmi->IPMIlegacy); |
1866 | return -ENODEV; | 1929 | return -ENODEV; |
1867 | } | 1930 | } |
1868 | 1931 | ||
1869 | if (spmi->addr.space_id == ACPI_ADR_SPACE_SYSTEM_MEMORY) | 1932 | if (spmi->addr.space_id == ACPI_ADR_SPACE_SYSTEM_MEMORY) |
@@ -1880,8 +1943,7 @@ static __devinit int try_init_acpi(struct SPMITable *spmi) | |||
1880 | info->addr_source = "ACPI"; | 1943 | info->addr_source = "ACPI"; |
1881 | 1944 | ||
1882 | /* Figure out the interface type. */ | 1945 | /* Figure out the interface type. */ |
1883 | switch (spmi->InterfaceType) | 1946 | switch (spmi->InterfaceType) { |
1884 | { | ||
1885 | case 1: /* KCS */ | 1947 | case 1: /* KCS */ |
1886 | info->si_type = SI_KCS; | 1948 | info->si_type = SI_KCS; |
1887 | break; | 1949 | break; |
@@ -1929,7 +1991,8 @@ static __devinit int try_init_acpi(struct SPMITable *spmi) | |||
1929 | info->io.addr_type = IPMI_IO_ADDR_SPACE; | 1991 | info->io.addr_type = IPMI_IO_ADDR_SPACE; |
1930 | } else { | 1992 | } else { |
1931 | kfree(info); | 1993 | kfree(info); |
1932 | printk("ipmi_si: Unknown ACPI I/O Address type\n"); | 1994 | printk(KERN_WARNING |
1995 | "ipmi_si: Unknown ACPI I/O Address type\n"); | ||
1933 | return -EIO; | 1996 | return -EIO; |
1934 | } | 1997 | } |
1935 | info->io.addr_data = spmi->addr.address; | 1998 | info->io.addr_data = spmi->addr.address; |
@@ -1963,8 +2026,7 @@ static __devinit void acpi_find_bmc(void) | |||
1963 | #endif | 2026 | #endif |
1964 | 2027 | ||
1965 | #ifdef CONFIG_DMI | 2028 | #ifdef CONFIG_DMI |
1966 | struct dmi_ipmi_data | 2029 | struct dmi_ipmi_data { |
1967 | { | ||
1968 | u8 type; | 2030 | u8 type; |
1969 | u8 addr_space; | 2031 | u8 addr_space; |
1970 | unsigned long base_addr; | 2032 | unsigned long base_addr; |
@@ -1989,11 +2051,10 @@ static int __devinit decode_dmi(const struct dmi_header *dm, | |||
1989 | /* I/O */ | 2051 | /* I/O */ |
1990 | base_addr &= 0xFFFE; | 2052 | base_addr &= 0xFFFE; |
1991 | dmi->addr_space = IPMI_IO_ADDR_SPACE; | 2053 | dmi->addr_space = IPMI_IO_ADDR_SPACE; |
1992 | } | 2054 | } else |
1993 | else { | ||
1994 | /* Memory */ | 2055 | /* Memory */ |
1995 | dmi->addr_space = IPMI_MEM_ADDR_SPACE; | 2056 | dmi->addr_space = IPMI_MEM_ADDR_SPACE; |
1996 | } | 2057 | |
1997 | /* If bit 4 of byte 0x10 is set, then the lsb for the address | 2058 | /* If bit 4 of byte 0x10 is set, then the lsb for the address |
1998 | is odd. */ | 2059 | is odd. */ |
1999 | dmi->base_addr = base_addr | ((data[0x10] & 0x10) >> 4); | 2060 | dmi->base_addr = base_addr | ((data[0x10] & 0x10) >> 4); |
@@ -2002,7 +2063,7 @@ static int __devinit decode_dmi(const struct dmi_header *dm, | |||
2002 | 2063 | ||
2003 | /* The top two bits of byte 0x10 hold the register spacing. */ | 2064 | /* The top two bits of byte 0x10 hold the register spacing. */ |
2004 | reg_spacing = (data[0x10] & 0xC0) >> 6; | 2065 | reg_spacing = (data[0x10] & 0xC0) >> 6; |
2005 | switch(reg_spacing){ | 2066 | switch (reg_spacing) { |
2006 | case 0x00: /* Byte boundaries */ | 2067 | case 0x00: /* Byte boundaries */ |
2007 | dmi->offset = 1; | 2068 | dmi->offset = 1; |
2008 | break; | 2069 | break; |
@@ -2018,12 +2079,14 @@ static int __devinit decode_dmi(const struct dmi_header *dm, | |||
2018 | } | 2079 | } |
2019 | } else { | 2080 | } else { |
2020 | /* Old DMI spec. */ | 2081 | /* Old DMI spec. */ |
2021 | /* Note that technically, the lower bit of the base | 2082 | /* |
2083 | * Note that technically, the lower bit of the base | ||
2022 | * address should be 1 if the address is I/O and 0 if | 2084 | * address should be 1 if the address is I/O and 0 if |
2023 | * the address is in memory. So many systems get that | 2085 | * the address is in memory. So many systems get that |
2024 | * wrong (and all that I have seen are I/O) so we just | 2086 | * wrong (and all that I have seen are I/O) so we just |
2025 | * ignore that bit and assume I/O. Systems that use | 2087 | * ignore that bit and assume I/O. Systems that use |
2026 | * memory should use the newer spec, anyway. */ | 2088 | * memory should use the newer spec, anyway. |
2089 | */ | ||
2027 | dmi->base_addr = base_addr & 0xfffe; | 2090 | dmi->base_addr = base_addr & 0xfffe; |
2028 | dmi->addr_space = IPMI_IO_ADDR_SPACE; | 2091 | dmi->addr_space = IPMI_IO_ADDR_SPACE; |
2029 | dmi->offset = 1; | 2092 | dmi->offset = 1; |
@@ -2230,13 +2293,13 @@ static struct pci_device_id ipmi_pci_devices[] = { | |||
2230 | MODULE_DEVICE_TABLE(pci, ipmi_pci_devices); | 2293 | MODULE_DEVICE_TABLE(pci, ipmi_pci_devices); |
2231 | 2294 | ||
2232 | static struct pci_driver ipmi_pci_driver = { | 2295 | static struct pci_driver ipmi_pci_driver = { |
2233 | .name = DEVICE_NAME, | 2296 | .name = DEVICE_NAME, |
2234 | .id_table = ipmi_pci_devices, | 2297 | .id_table = ipmi_pci_devices, |
2235 | .probe = ipmi_pci_probe, | 2298 | .probe = ipmi_pci_probe, |
2236 | .remove = __devexit_p(ipmi_pci_remove), | 2299 | .remove = __devexit_p(ipmi_pci_remove), |
2237 | #ifdef CONFIG_PM | 2300 | #ifdef CONFIG_PM |
2238 | .suspend = ipmi_pci_suspend, | 2301 | .suspend = ipmi_pci_suspend, |
2239 | .resume = ipmi_pci_resume, | 2302 | .resume = ipmi_pci_resume, |
2240 | #endif | 2303 | #endif |
2241 | }; | 2304 | }; |
2242 | #endif /* CONFIG_PCI */ | 2305 | #endif /* CONFIG_PCI */ |
@@ -2306,7 +2369,7 @@ static int __devinit ipmi_of_probe(struct of_device *dev, | |||
2306 | info->io.addr_data, info->io.regsize, info->io.regspacing, | 2369 | info->io.addr_data, info->io.regsize, info->io.regspacing, |
2307 | info->irq); | 2370 | info->irq); |
2308 | 2371 | ||
2309 | dev->dev.driver_data = (void*) info; | 2372 | dev->dev.driver_data = (void *) info; |
2310 | 2373 | ||
2311 | return try_smi_init(info); | 2374 | return try_smi_init(info); |
2312 | } | 2375 | } |
@@ -2319,14 +2382,16 @@ static int __devexit ipmi_of_remove(struct of_device *dev) | |||
2319 | 2382 | ||
2320 | static struct of_device_id ipmi_match[] = | 2383 | static struct of_device_id ipmi_match[] = |
2321 | { | 2384 | { |
2322 | { .type = "ipmi", .compatible = "ipmi-kcs", .data = (void *)(unsigned long) SI_KCS }, | 2385 | { .type = "ipmi", .compatible = "ipmi-kcs", |
2323 | { .type = "ipmi", .compatible = "ipmi-smic", .data = (void *)(unsigned long) SI_SMIC }, | 2386 | .data = (void *)(unsigned long) SI_KCS }, |
2324 | { .type = "ipmi", .compatible = "ipmi-bt", .data = (void *)(unsigned long) SI_BT }, | 2387 | { .type = "ipmi", .compatible = "ipmi-smic", |
2388 | .data = (void *)(unsigned long) SI_SMIC }, | ||
2389 | { .type = "ipmi", .compatible = "ipmi-bt", | ||
2390 | .data = (void *)(unsigned long) SI_BT }, | ||
2325 | {}, | 2391 | {}, |
2326 | }; | 2392 | }; |
2327 | 2393 | ||
2328 | static struct of_platform_driver ipmi_of_platform_driver = | 2394 | static struct of_platform_driver ipmi_of_platform_driver = { |
2329 | { | ||
2330 | .name = "ipmi", | 2395 | .name = "ipmi", |
2331 | .match_table = ipmi_match, | 2396 | .match_table = ipmi_match, |
2332 | .probe = ipmi_of_probe, | 2397 | .probe = ipmi_of_probe, |
@@ -2347,32 +2412,32 @@ static int try_get_dev_id(struct smi_info *smi_info) | |||
2347 | if (!resp) | 2412 | if (!resp) |
2348 | return -ENOMEM; | 2413 | return -ENOMEM; |
2349 | 2414 | ||
2350 | /* Do a Get Device ID command, since it comes back with some | 2415 | /* |
2351 | useful info. */ | 2416 | * Do a Get Device ID command, since it comes back with some |
2417 | * useful info. | ||
2418 | */ | ||
2352 | msg[0] = IPMI_NETFN_APP_REQUEST << 2; | 2419 | msg[0] = IPMI_NETFN_APP_REQUEST << 2; |
2353 | msg[1] = IPMI_GET_DEVICE_ID_CMD; | 2420 | msg[1] = IPMI_GET_DEVICE_ID_CMD; |
2354 | smi_info->handlers->start_transaction(smi_info->si_sm, msg, 2); | 2421 | smi_info->handlers->start_transaction(smi_info->si_sm, msg, 2); |
2355 | 2422 | ||
2356 | smi_result = smi_info->handlers->event(smi_info->si_sm, 0); | 2423 | smi_result = smi_info->handlers->event(smi_info->si_sm, 0); |
2357 | for (;;) | 2424 | for (;;) { |
2358 | { | ||
2359 | if (smi_result == SI_SM_CALL_WITH_DELAY || | 2425 | if (smi_result == SI_SM_CALL_WITH_DELAY || |
2360 | smi_result == SI_SM_CALL_WITH_TICK_DELAY) { | 2426 | smi_result == SI_SM_CALL_WITH_TICK_DELAY) { |
2361 | schedule_timeout_uninterruptible(1); | 2427 | schedule_timeout_uninterruptible(1); |
2362 | smi_result = smi_info->handlers->event( | 2428 | smi_result = smi_info->handlers->event( |
2363 | smi_info->si_sm, 100); | 2429 | smi_info->si_sm, 100); |
2364 | } | 2430 | } else if (smi_result == SI_SM_CALL_WITHOUT_DELAY) { |
2365 | else if (smi_result == SI_SM_CALL_WITHOUT_DELAY) | ||
2366 | { | ||
2367 | smi_result = smi_info->handlers->event( | 2431 | smi_result = smi_info->handlers->event( |
2368 | smi_info->si_sm, 0); | 2432 | smi_info->si_sm, 0); |
2369 | } | 2433 | } else |
2370 | else | ||
2371 | break; | 2434 | break; |
2372 | } | 2435 | } |
2373 | if (smi_result == SI_SM_HOSED) { | 2436 | if (smi_result == SI_SM_HOSED) { |
2374 | /* We couldn't get the state machine to run, so whatever's at | 2437 | /* |
2375 | the port is probably not an IPMI SMI interface. */ | 2438 | * We couldn't get the state machine to run, so whatever's at |
2439 | * the port is probably not an IPMI SMI interface. | ||
2440 | */ | ||
2376 | rv = -ENODEV; | 2441 | rv = -ENODEV; |
2377 | goto out; | 2442 | goto out; |
2378 | } | 2443 | } |
@@ -2405,30 +2470,28 @@ static int stat_file_read_proc(char *page, char **start, off_t off, | |||
2405 | 2470 | ||
2406 | out += sprintf(out, "interrupts_enabled: %d\n", | 2471 | out += sprintf(out, "interrupts_enabled: %d\n", |
2407 | smi->irq && !smi->interrupt_disabled); | 2472 | smi->irq && !smi->interrupt_disabled); |
2408 | out += sprintf(out, "short_timeouts: %ld\n", | 2473 | out += sprintf(out, "short_timeouts: %u\n", |
2409 | smi->short_timeouts); | 2474 | smi_get_stat(smi, short_timeouts)); |
2410 | out += sprintf(out, "long_timeouts: %ld\n", | 2475 | out += sprintf(out, "long_timeouts: %u\n", |
2411 | smi->long_timeouts); | 2476 | smi_get_stat(smi, long_timeouts)); |
2412 | out += sprintf(out, "timeout_restarts: %ld\n", | 2477 | out += sprintf(out, "idles: %u\n", |
2413 | smi->timeout_restarts); | 2478 | smi_get_stat(smi, idles)); |
2414 | out += sprintf(out, "idles: %ld\n", | 2479 | out += sprintf(out, "interrupts: %u\n", |
2415 | smi->idles); | 2480 | smi_get_stat(smi, interrupts)); |
2416 | out += sprintf(out, "interrupts: %ld\n", | 2481 | out += sprintf(out, "attentions: %u\n", |
2417 | smi->interrupts); | 2482 | smi_get_stat(smi, attentions)); |
2418 | out += sprintf(out, "attentions: %ld\n", | 2483 | out += sprintf(out, "flag_fetches: %u\n", |
2419 | smi->attentions); | 2484 | smi_get_stat(smi, flag_fetches)); |
2420 | out += sprintf(out, "flag_fetches: %ld\n", | 2485 | out += sprintf(out, "hosed_count: %u\n", |
2421 | smi->flag_fetches); | 2486 | smi_get_stat(smi, hosed_count)); |
2422 | out += sprintf(out, "hosed_count: %ld\n", | 2487 | out += sprintf(out, "complete_transactions: %u\n", |
2423 | smi->hosed_count); | 2488 | smi_get_stat(smi, complete_transactions)); |
2424 | out += sprintf(out, "complete_transactions: %ld\n", | 2489 | out += sprintf(out, "events: %u\n", |
2425 | smi->complete_transactions); | 2490 | smi_get_stat(smi, events)); |
2426 | out += sprintf(out, "events: %ld\n", | 2491 | out += sprintf(out, "watchdog_pretimeouts: %u\n", |
2427 | smi->events); | 2492 | smi_get_stat(smi, watchdog_pretimeouts)); |
2428 | out += sprintf(out, "watchdog_pretimeouts: %ld\n", | 2493 | out += sprintf(out, "incoming_messages: %u\n", |
2429 | smi->watchdog_pretimeouts); | 2494 | smi_get_stat(smi, incoming_messages)); |
2430 | out += sprintf(out, "incoming_messages: %ld\n", | ||
2431 | smi->incoming_messages); | ||
2432 | 2495 | ||
2433 | return out - page; | 2496 | return out - page; |
2434 | } | 2497 | } |
@@ -2460,7 +2523,7 @@ static int param_read_proc(char *page, char **start, off_t off, | |||
2460 | static int oem_data_avail_to_receive_msg_avail(struct smi_info *smi_info) | 2523 | static int oem_data_avail_to_receive_msg_avail(struct smi_info *smi_info) |
2461 | { | 2524 | { |
2462 | smi_info->msg_flags = ((smi_info->msg_flags & ~OEM_DATA_AVAIL) | | 2525 | smi_info->msg_flags = ((smi_info->msg_flags & ~OEM_DATA_AVAIL) | |
2463 | RECEIVE_MSG_AVAIL); | 2526 | RECEIVE_MSG_AVAIL); |
2464 | return 1; | 2527 | return 1; |
2465 | } | 2528 | } |
2466 | 2529 | ||
@@ -2502,10 +2565,9 @@ static void setup_dell_poweredge_oem_data_handler(struct smi_info *smi_info) | |||
2502 | id->ipmi_version == DELL_POWEREDGE_8G_BMC_IPMI_VERSION) { | 2565 | id->ipmi_version == DELL_POWEREDGE_8G_BMC_IPMI_VERSION) { |
2503 | smi_info->oem_data_avail_handler = | 2566 | smi_info->oem_data_avail_handler = |
2504 | oem_data_avail_to_receive_msg_avail; | 2567 | oem_data_avail_to_receive_msg_avail; |
2505 | } | 2568 | } else if (ipmi_version_major(id) < 1 || |
2506 | else if (ipmi_version_major(id) < 1 || | 2569 | (ipmi_version_major(id) == 1 && |
2507 | (ipmi_version_major(id) == 1 && | 2570 | ipmi_version_minor(id) < 5)) { |
2508 | ipmi_version_minor(id) < 5)) { | ||
2509 | smi_info->oem_data_avail_handler = | 2571 | smi_info->oem_data_avail_handler = |
2510 | oem_data_avail_to_receive_msg_avail; | 2572 | oem_data_avail_to_receive_msg_avail; |
2511 | } | 2573 | } |
@@ -2597,8 +2659,10 @@ static void setup_xaction_handlers(struct smi_info *smi_info) | |||
2597 | static inline void wait_for_timer_and_thread(struct smi_info *smi_info) | 2659 | static inline void wait_for_timer_and_thread(struct smi_info *smi_info) |
2598 | { | 2660 | { |
2599 | if (smi_info->intf) { | 2661 | if (smi_info->intf) { |
2600 | /* The timer and thread are only running if the | 2662 | /* |
2601 | interface has been started up and registered. */ | 2663 | * The timer and thread are only running if the |
2664 | * interface has been started up and registered. | ||
2665 | */ | ||
2602 | if (smi_info->thread != NULL) | 2666 | if (smi_info->thread != NULL) |
2603 | kthread_stop(smi_info->thread); | 2667 | kthread_stop(smi_info->thread); |
2604 | del_timer_sync(&smi_info->si_timer); | 2668 | del_timer_sync(&smi_info->si_timer); |
@@ -2676,6 +2740,7 @@ static int is_new_interface(struct smi_info *info) | |||
2676 | static int try_smi_init(struct smi_info *new_smi) | 2740 | static int try_smi_init(struct smi_info *new_smi) |
2677 | { | 2741 | { |
2678 | int rv; | 2742 | int rv; |
2743 | int i; | ||
2679 | 2744 | ||
2680 | if (new_smi->addr_source) { | 2745 | if (new_smi->addr_source) { |
2681 | printk(KERN_INFO "ipmi_si: Trying %s-specified %s state" | 2746 | printk(KERN_INFO "ipmi_si: Trying %s-specified %s state" |
@@ -2722,7 +2787,7 @@ static int try_smi_init(struct smi_info *new_smi) | |||
2722 | /* Allocate the state machine's data and initialize it. */ | 2787 | /* Allocate the state machine's data and initialize it. */ |
2723 | new_smi->si_sm = kmalloc(new_smi->handlers->size(), GFP_KERNEL); | 2788 | new_smi->si_sm = kmalloc(new_smi->handlers->size(), GFP_KERNEL); |
2724 | if (!new_smi->si_sm) { | 2789 | if (!new_smi->si_sm) { |
2725 | printk(" Could not allocate state machine memory\n"); | 2790 | printk(KERN_ERR "Could not allocate state machine memory\n"); |
2726 | rv = -ENOMEM; | 2791 | rv = -ENOMEM; |
2727 | goto out_err; | 2792 | goto out_err; |
2728 | } | 2793 | } |
@@ -2732,13 +2797,12 @@ static int try_smi_init(struct smi_info *new_smi) | |||
2732 | /* Now that we know the I/O size, we can set up the I/O. */ | 2797 | /* Now that we know the I/O size, we can set up the I/O. */ |
2733 | rv = new_smi->io_setup(new_smi); | 2798 | rv = new_smi->io_setup(new_smi); |
2734 | if (rv) { | 2799 | if (rv) { |
2735 | printk(" Could not set up I/O space\n"); | 2800 | printk(KERN_ERR "Could not set up I/O space\n"); |
2736 | goto out_err; | 2801 | goto out_err; |
2737 | } | 2802 | } |
2738 | 2803 | ||
2739 | spin_lock_init(&(new_smi->si_lock)); | 2804 | spin_lock_init(&(new_smi->si_lock)); |
2740 | spin_lock_init(&(new_smi->msg_lock)); | 2805 | spin_lock_init(&(new_smi->msg_lock)); |
2741 | spin_lock_init(&(new_smi->count_lock)); | ||
2742 | 2806 | ||
2743 | /* Do low-level detection first. */ | 2807 | /* Do low-level detection first. */ |
2744 | if (new_smi->handlers->detect(new_smi->si_sm)) { | 2808 | if (new_smi->handlers->detect(new_smi->si_sm)) { |
@@ -2749,8 +2813,10 @@ static int try_smi_init(struct smi_info *new_smi) | |||
2749 | goto out_err; | 2813 | goto out_err; |
2750 | } | 2814 | } |
2751 | 2815 | ||
2752 | /* Attempt a get device id command. If it fails, we probably | 2816 | /* |
2753 | don't have a BMC here. */ | 2817 | * Attempt a get device id command. If it fails, we probably |
2818 | * don't have a BMC here. | ||
2819 | */ | ||
2754 | rv = try_get_dev_id(new_smi); | 2820 | rv = try_get_dev_id(new_smi); |
2755 | if (rv) { | 2821 | if (rv) { |
2756 | if (new_smi->addr_source) | 2822 | if (new_smi->addr_source) |
@@ -2767,22 +2833,28 @@ static int try_smi_init(struct smi_info *new_smi) | |||
2767 | new_smi->curr_msg = NULL; | 2833 | new_smi->curr_msg = NULL; |
2768 | atomic_set(&new_smi->req_events, 0); | 2834 | atomic_set(&new_smi->req_events, 0); |
2769 | new_smi->run_to_completion = 0; | 2835 | new_smi->run_to_completion = 0; |
2836 | for (i = 0; i < SI_NUM_STATS; i++) | ||
2837 | atomic_set(&new_smi->stats[i], 0); | ||
2770 | 2838 | ||
2771 | new_smi->interrupt_disabled = 0; | 2839 | new_smi->interrupt_disabled = 0; |
2772 | atomic_set(&new_smi->stop_operation, 0); | 2840 | atomic_set(&new_smi->stop_operation, 0); |
2773 | new_smi->intf_num = smi_num; | 2841 | new_smi->intf_num = smi_num; |
2774 | smi_num++; | 2842 | smi_num++; |
2775 | 2843 | ||
2776 | /* Start clearing the flags before we enable interrupts or the | 2844 | /* |
2777 | timer to avoid racing with the timer. */ | 2845 | * Start clearing the flags before we enable interrupts or the |
2846 | * timer to avoid racing with the timer. | ||
2847 | */ | ||
2778 | start_clear_flags(new_smi); | 2848 | start_clear_flags(new_smi); |
2779 | /* IRQ is defined to be set when non-zero. */ | 2849 | /* IRQ is defined to be set when non-zero. */ |
2780 | if (new_smi->irq) | 2850 | if (new_smi->irq) |
2781 | new_smi->si_state = SI_CLEARING_FLAGS_THEN_SET_IRQ; | 2851 | new_smi->si_state = SI_CLEARING_FLAGS_THEN_SET_IRQ; |
2782 | 2852 | ||
2783 | if (!new_smi->dev) { | 2853 | if (!new_smi->dev) { |
2784 | /* If we don't already have a device from something | 2854 | /* |
2785 | * else (like PCI), then register a new one. */ | 2855 | * If we don't already have a device from something |
2856 | * else (like PCI), then register a new one. | ||
2857 | */ | ||
2786 | new_smi->pdev = platform_device_alloc("ipmi_si", | 2858 | new_smi->pdev = platform_device_alloc("ipmi_si", |
2787 | new_smi->intf_num); | 2859 | new_smi->intf_num); |
2788 | if (rv) { | 2860 | if (rv) { |
@@ -2820,7 +2892,7 @@ static int try_smi_init(struct smi_info *new_smi) | |||
2820 | } | 2892 | } |
2821 | 2893 | ||
2822 | rv = ipmi_smi_add_proc_entry(new_smi->intf, "type", | 2894 | rv = ipmi_smi_add_proc_entry(new_smi->intf, "type", |
2823 | type_file_read_proc, NULL, | 2895 | type_file_read_proc, |
2824 | new_smi, THIS_MODULE); | 2896 | new_smi, THIS_MODULE); |
2825 | if (rv) { | 2897 | if (rv) { |
2826 | printk(KERN_ERR | 2898 | printk(KERN_ERR |
@@ -2830,7 +2902,7 @@ static int try_smi_init(struct smi_info *new_smi) | |||
2830 | } | 2902 | } |
2831 | 2903 | ||
2832 | rv = ipmi_smi_add_proc_entry(new_smi->intf, "si_stats", | 2904 | rv = ipmi_smi_add_proc_entry(new_smi->intf, "si_stats", |
2833 | stat_file_read_proc, NULL, | 2905 | stat_file_read_proc, |
2834 | new_smi, THIS_MODULE); | 2906 | new_smi, THIS_MODULE); |
2835 | if (rv) { | 2907 | if (rv) { |
2836 | printk(KERN_ERR | 2908 | printk(KERN_ERR |
@@ -2840,7 +2912,7 @@ static int try_smi_init(struct smi_info *new_smi) | |||
2840 | } | 2912 | } |
2841 | 2913 | ||
2842 | rv = ipmi_smi_add_proc_entry(new_smi->intf, "params", | 2914 | rv = ipmi_smi_add_proc_entry(new_smi->intf, "params", |
2843 | param_read_proc, NULL, | 2915 | param_read_proc, |
2844 | new_smi, THIS_MODULE); | 2916 | new_smi, THIS_MODULE); |
2845 | if (rv) { | 2917 | if (rv) { |
2846 | printk(KERN_ERR | 2918 | printk(KERN_ERR |
@@ -2853,7 +2925,8 @@ static int try_smi_init(struct smi_info *new_smi) | |||
2853 | 2925 | ||
2854 | mutex_unlock(&smi_infos_lock); | 2926 | mutex_unlock(&smi_infos_lock); |
2855 | 2927 | ||
2856 | printk(KERN_INFO "IPMI %s interface initialized\n",si_to_str[new_smi->si_type]); | 2928 | printk(KERN_INFO "IPMI %s interface initialized\n", |
2929 | si_to_str[new_smi->si_type]); | ||
2857 | 2930 | ||
2858 | return 0; | 2931 | return 0; |
2859 | 2932 | ||
@@ -2868,9 +2941,11 @@ static int try_smi_init(struct smi_info *new_smi) | |||
2868 | if (new_smi->irq_cleanup) | 2941 | if (new_smi->irq_cleanup) |
2869 | new_smi->irq_cleanup(new_smi); | 2942 | new_smi->irq_cleanup(new_smi); |
2870 | 2943 | ||
2871 | /* Wait until we know that we are out of any interrupt | 2944 | /* |
2872 | handlers might have been running before we freed the | 2945 | * Wait until we know that we are out of any interrupt |
2873 | interrupt. */ | 2946 | * handlers might have been running before we freed the |
2947 | * interrupt. | ||
2948 | */ | ||
2874 | synchronize_sched(); | 2949 | synchronize_sched(); |
2875 | 2950 | ||
2876 | if (new_smi->si_sm) { | 2951 | if (new_smi->si_sm) { |
@@ -2942,11 +3017,10 @@ static __devinit int init_ipmi_si(void) | |||
2942 | 3017 | ||
2943 | #ifdef CONFIG_PCI | 3018 | #ifdef CONFIG_PCI |
2944 | rv = pci_register_driver(&ipmi_pci_driver); | 3019 | rv = pci_register_driver(&ipmi_pci_driver); |
2945 | if (rv){ | 3020 | if (rv) |
2946 | printk(KERN_ERR | 3021 | printk(KERN_ERR |
2947 | "init_ipmi_si: Unable to register PCI driver: %d\n", | 3022 | "init_ipmi_si: Unable to register PCI driver: %d\n", |
2948 | rv); | 3023 | rv); |
2949 | } | ||
2950 | #endif | 3024 | #endif |
2951 | 3025 | ||
2952 | #ifdef CONFIG_PPC_OF | 3026 | #ifdef CONFIG_PPC_OF |
@@ -2975,7 +3049,8 @@ static __devinit int init_ipmi_si(void) | |||
2975 | of_unregister_platform_driver(&ipmi_of_platform_driver); | 3049 | of_unregister_platform_driver(&ipmi_of_platform_driver); |
2976 | #endif | 3050 | #endif |
2977 | driver_unregister(&ipmi_driver); | 3051 | driver_unregister(&ipmi_driver); |
2978 | printk("ipmi_si: Unable to find any System Interface(s)\n"); | 3052 | printk(KERN_WARNING |
3053 | "ipmi_si: Unable to find any System Interface(s)\n"); | ||
2979 | return -ENODEV; | 3054 | return -ENODEV; |
2980 | } else { | 3055 | } else { |
2981 | mutex_unlock(&smi_infos_lock); | 3056 | mutex_unlock(&smi_infos_lock); |
@@ -2997,13 +3072,17 @@ static void cleanup_one_si(struct smi_info *to_clean) | |||
2997 | /* Tell the driver that we are shutting down. */ | 3072 | /* Tell the driver that we are shutting down. */ |
2998 | atomic_inc(&to_clean->stop_operation); | 3073 | atomic_inc(&to_clean->stop_operation); |
2999 | 3074 | ||
3000 | /* Make sure the timer and thread are stopped and will not run | 3075 | /* |
3001 | again. */ | 3076 | * Make sure the timer and thread are stopped and will not run |
3077 | * again. | ||
3078 | */ | ||
3002 | wait_for_timer_and_thread(to_clean); | 3079 | wait_for_timer_and_thread(to_clean); |
3003 | 3080 | ||
3004 | /* Timeouts are stopped, now make sure the interrupts are off | 3081 | /* |
3005 | for the device. A little tricky with locks to make sure | 3082 | * Timeouts are stopped, now make sure the interrupts are off |
3006 | there are no races. */ | 3083 | * for the device. A little tricky with locks to make sure |
3084 | * there are no races. | ||
3085 | */ | ||
3007 | spin_lock_irqsave(&to_clean->si_lock, flags); | 3086 | spin_lock_irqsave(&to_clean->si_lock, flags); |
3008 | while (to_clean->curr_msg || (to_clean->si_state != SI_NORMAL)) { | 3087 | while (to_clean->curr_msg || (to_clean->si_state != SI_NORMAL)) { |
3009 | spin_unlock_irqrestore(&to_clean->si_lock, flags); | 3088 | spin_unlock_irqrestore(&to_clean->si_lock, flags); |
@@ -3074,4 +3153,5 @@ module_exit(cleanup_ipmi_si); | |||
3074 | 3153 | ||
3075 | MODULE_LICENSE("GPL"); | 3154 | MODULE_LICENSE("GPL"); |
3076 | MODULE_AUTHOR("Corey Minyard <minyard@mvista.com>"); | 3155 | MODULE_AUTHOR("Corey Minyard <minyard@mvista.com>"); |
3077 | MODULE_DESCRIPTION("Interface to the IPMI driver for the KCS, SMIC, and BT system interfaces."); | 3156 | MODULE_DESCRIPTION("Interface to the IPMI driver for the KCS, SMIC, and BT" |
3157 | " system interfaces."); | ||
diff --git a/drivers/char/ipmi/ipmi_si_sm.h b/drivers/char/ipmi/ipmi_si_sm.h index 4b731b24dc16..df89f73475fb 100644 --- a/drivers/char/ipmi/ipmi_si_sm.h +++ b/drivers/char/ipmi/ipmi_si_sm.h | |||
@@ -34,22 +34,27 @@ | |||
34 | * 675 Mass Ave, Cambridge, MA 02139, USA. | 34 | * 675 Mass Ave, Cambridge, MA 02139, USA. |
35 | */ | 35 | */ |
36 | 36 | ||
37 | /* This is defined by the state machines themselves, it is an opaque | 37 | /* |
38 | data type for them to use. */ | 38 | * This is defined by the state machines themselves, it is an opaque |
39 | * data type for them to use. | ||
40 | */ | ||
39 | struct si_sm_data; | 41 | struct si_sm_data; |
40 | 42 | ||
41 | /* The structure for doing I/O in the state machine. The state | 43 | /* |
42 | machine doesn't have the actual I/O routines, they are done through | 44 | * The structure for doing I/O in the state machine. The state |
43 | this interface. */ | 45 | * machine doesn't have the actual I/O routines, they are done through |
44 | struct si_sm_io | 46 | * this interface. |
45 | { | 47 | */ |
48 | struct si_sm_io { | ||
46 | unsigned char (*inputb)(struct si_sm_io *io, unsigned int offset); | 49 | unsigned char (*inputb)(struct si_sm_io *io, unsigned int offset); |
47 | void (*outputb)(struct si_sm_io *io, | 50 | void (*outputb)(struct si_sm_io *io, |
48 | unsigned int offset, | 51 | unsigned int offset, |
49 | unsigned char b); | 52 | unsigned char b); |
50 | 53 | ||
51 | /* Generic info used by the actual handling routines, the | 54 | /* |
52 | state machine shouldn't touch these. */ | 55 | * Generic info used by the actual handling routines, the |
56 | * state machine shouldn't touch these. | ||
57 | */ | ||
53 | void __iomem *addr; | 58 | void __iomem *addr; |
54 | int regspacing; | 59 | int regspacing; |
55 | int regsize; | 60 | int regsize; |
@@ -59,53 +64,67 @@ struct si_sm_io | |||
59 | }; | 64 | }; |
60 | 65 | ||
61 | /* Results of SMI events. */ | 66 | /* Results of SMI events. */ |
62 | enum si_sm_result | 67 | enum si_sm_result { |
63 | { | ||
64 | SI_SM_CALL_WITHOUT_DELAY, /* Call the driver again immediately */ | 68 | SI_SM_CALL_WITHOUT_DELAY, /* Call the driver again immediately */ |
65 | SI_SM_CALL_WITH_DELAY, /* Delay some before calling again. */ | 69 | SI_SM_CALL_WITH_DELAY, /* Delay some before calling again. */ |
66 | SI_SM_CALL_WITH_TICK_DELAY, /* Delay at least 1 tick before calling again. */ | 70 | SI_SM_CALL_WITH_TICK_DELAY,/* Delay >=1 tick before calling again. */ |
67 | SI_SM_TRANSACTION_COMPLETE, /* A transaction is finished. */ | 71 | SI_SM_TRANSACTION_COMPLETE, /* A transaction is finished. */ |
68 | SI_SM_IDLE, /* The SM is in idle state. */ | 72 | SI_SM_IDLE, /* The SM is in idle state. */ |
69 | SI_SM_HOSED, /* The hardware violated the state machine. */ | 73 | SI_SM_HOSED, /* The hardware violated the state machine. */ |
70 | SI_SM_ATTN /* The hardware is asserting attn and the | 74 | |
71 | state machine is idle. */ | 75 | /* |
76 | * The hardware is asserting attn and the state machine is | ||
77 | * idle. | ||
78 | */ | ||
79 | SI_SM_ATTN | ||
72 | }; | 80 | }; |
73 | 81 | ||
74 | /* Handlers for the SMI state machine. */ | 82 | /* Handlers for the SMI state machine. */ |
75 | struct si_sm_handlers | 83 | struct si_sm_handlers { |
76 | { | 84 | /* |
77 | /* Put the version number of the state machine here so the | 85 | * Put the version number of the state machine here so the |
78 | upper layer can print it. */ | 86 | * upper layer can print it. |
87 | */ | ||
79 | char *version; | 88 | char *version; |
80 | 89 | ||
81 | /* Initialize the data and return the amount of I/O space to | 90 | /* |
82 | reserve for the space. */ | 91 | * Initialize the data and return the amount of I/O space to |
92 | * reserve for the space. | ||
93 | */ | ||
83 | unsigned int (*init_data)(struct si_sm_data *smi, | 94 | unsigned int (*init_data)(struct si_sm_data *smi, |
84 | struct si_sm_io *io); | 95 | struct si_sm_io *io); |
85 | 96 | ||
86 | /* Start a new transaction in the state machine. This will | 97 | /* |
87 | return -2 if the state machine is not idle, -1 if the size | 98 | * Start a new transaction in the state machine. This will |
88 | is invalid (to large or too small), or 0 if the transaction | 99 | * return -2 if the state machine is not idle, -1 if the size |
89 | is successfully completed. */ | 100 | * is invalid (to large or too small), or 0 if the transaction |
101 | * is successfully completed. | ||
102 | */ | ||
90 | int (*start_transaction)(struct si_sm_data *smi, | 103 | int (*start_transaction)(struct si_sm_data *smi, |
91 | unsigned char *data, unsigned int size); | 104 | unsigned char *data, unsigned int size); |
92 | 105 | ||
93 | /* Return the results after the transaction. This will return | 106 | /* |
94 | -1 if the buffer is too small, zero if no transaction is | 107 | * Return the results after the transaction. This will return |
95 | present, or the actual length of the result data. */ | 108 | * -1 if the buffer is too small, zero if no transaction is |
109 | * present, or the actual length of the result data. | ||
110 | */ | ||
96 | int (*get_result)(struct si_sm_data *smi, | 111 | int (*get_result)(struct si_sm_data *smi, |
97 | unsigned char *data, unsigned int length); | 112 | unsigned char *data, unsigned int length); |
98 | 113 | ||
99 | /* Call this periodically (for a polled interface) or upon | 114 | /* |
100 | receiving an interrupt (for a interrupt-driven interface). | 115 | * Call this periodically (for a polled interface) or upon |
101 | If interrupt driven, you should probably poll this | 116 | * receiving an interrupt (for a interrupt-driven interface). |
102 | periodically when not in idle state. This should be called | 117 | * If interrupt driven, you should probably poll this |
103 | with the time that passed since the last call, if it is | 118 | * periodically when not in idle state. This should be called |
104 | significant. Time is in microseconds. */ | 119 | * with the time that passed since the last call, if it is |
120 | * significant. Time is in microseconds. | ||
121 | */ | ||
105 | enum si_sm_result (*event)(struct si_sm_data *smi, long time); | 122 | enum si_sm_result (*event)(struct si_sm_data *smi, long time); |
106 | 123 | ||
107 | /* Attempt to detect an SMI. Returns 0 on success or nonzero | 124 | /* |
108 | on failure. */ | 125 | * Attempt to detect an SMI. Returns 0 on success or nonzero |
126 | * on failure. | ||
127 | */ | ||
109 | int (*detect)(struct si_sm_data *smi); | 128 | int (*detect)(struct si_sm_data *smi); |
110 | 129 | ||
111 | /* The interface is shutting down, so clean it up. */ | 130 | /* The interface is shutting down, so clean it up. */ |
diff --git a/drivers/char/ipmi/ipmi_smic_sm.c b/drivers/char/ipmi/ipmi_smic_sm.c index e64ea7d25d24..faed92971907 100644 --- a/drivers/char/ipmi/ipmi_smic_sm.c +++ b/drivers/char/ipmi/ipmi_smic_sm.c | |||
@@ -85,6 +85,7 @@ enum smic_states { | |||
85 | /* SMIC Flags Register Bits */ | 85 | /* SMIC Flags Register Bits */ |
86 | #define SMIC_RX_DATA_READY 0x80 | 86 | #define SMIC_RX_DATA_READY 0x80 |
87 | #define SMIC_TX_DATA_READY 0x40 | 87 | #define SMIC_TX_DATA_READY 0x40 |
88 | |||
88 | /* | 89 | /* |
89 | * SMIC_SMI and SMIC_EVM_DATA_AVAIL are only used by | 90 | * SMIC_SMI and SMIC_EVM_DATA_AVAIL are only used by |
90 | * a few systems, and then only by Systems Management | 91 | * a few systems, and then only by Systems Management |
@@ -104,23 +105,22 @@ enum smic_states { | |||
104 | #define EC_ILLEGAL_COMMAND 0x04 | 105 | #define EC_ILLEGAL_COMMAND 0x04 |
105 | #define EC_BUFFER_FULL 0x05 | 106 | #define EC_BUFFER_FULL 0x05 |
106 | 107 | ||
107 | struct si_sm_data | 108 | struct si_sm_data { |
108 | { | ||
109 | enum smic_states state; | 109 | enum smic_states state; |
110 | struct si_sm_io *io; | 110 | struct si_sm_io *io; |
111 | unsigned char write_data[MAX_SMIC_WRITE_SIZE]; | 111 | unsigned char write_data[MAX_SMIC_WRITE_SIZE]; |
112 | int write_pos; | 112 | int write_pos; |
113 | int write_count; | 113 | int write_count; |
114 | int orig_write_count; | 114 | int orig_write_count; |
115 | unsigned char read_data[MAX_SMIC_READ_SIZE]; | 115 | unsigned char read_data[MAX_SMIC_READ_SIZE]; |
116 | int read_pos; | 116 | int read_pos; |
117 | int truncated; | 117 | int truncated; |
118 | unsigned int error_retries; | 118 | unsigned int error_retries; |
119 | long smic_timeout; | 119 | long smic_timeout; |
120 | }; | 120 | }; |
121 | 121 | ||
122 | static unsigned int init_smic_data (struct si_sm_data *smic, | 122 | static unsigned int init_smic_data(struct si_sm_data *smic, |
123 | struct si_sm_io *io) | 123 | struct si_sm_io *io) |
124 | { | 124 | { |
125 | smic->state = SMIC_IDLE; | 125 | smic->state = SMIC_IDLE; |
126 | smic->io = io; | 126 | smic->io = io; |
@@ -150,11 +150,10 @@ static int start_smic_transaction(struct si_sm_data *smic, | |||
150 | return IPMI_NOT_IN_MY_STATE_ERR; | 150 | return IPMI_NOT_IN_MY_STATE_ERR; |
151 | 151 | ||
152 | if (smic_debug & SMIC_DEBUG_MSG) { | 152 | if (smic_debug & SMIC_DEBUG_MSG) { |
153 | printk(KERN_INFO "start_smic_transaction -"); | 153 | printk(KERN_DEBUG "start_smic_transaction -"); |
154 | for (i = 0; i < size; i ++) { | 154 | for (i = 0; i < size; i++) |
155 | printk (" %02x", (unsigned char) (data [i])); | 155 | printk(" %02x", (unsigned char) data[i]); |
156 | } | 156 | printk("\n"); |
157 | printk ("\n"); | ||
158 | } | 157 | } |
159 | smic->error_retries = 0; | 158 | smic->error_retries = 0; |
160 | memcpy(smic->write_data, data, size); | 159 | memcpy(smic->write_data, data, size); |
@@ -173,11 +172,10 @@ static int smic_get_result(struct si_sm_data *smic, | |||
173 | int i; | 172 | int i; |
174 | 173 | ||
175 | if (smic_debug & SMIC_DEBUG_MSG) { | 174 | if (smic_debug & SMIC_DEBUG_MSG) { |
176 | printk (KERN_INFO "smic_get result -"); | 175 | printk(KERN_DEBUG "smic_get result -"); |
177 | for (i = 0; i < smic->read_pos; i ++) { | 176 | for (i = 0; i < smic->read_pos; i++) |
178 | printk (" %02x", (smic->read_data [i])); | 177 | printk(" %02x", smic->read_data[i]); |
179 | } | 178 | printk("\n"); |
180 | printk ("\n"); | ||
181 | } | 179 | } |
182 | if (length < smic->read_pos) { | 180 | if (length < smic->read_pos) { |
183 | smic->read_pos = length; | 181 | smic->read_pos = length; |
@@ -223,8 +221,8 @@ static inline void write_smic_control(struct si_sm_data *smic, | |||
223 | smic->io->outputb(smic->io, 1, control); | 221 | smic->io->outputb(smic->io, 1, control); |
224 | } | 222 | } |
225 | 223 | ||
226 | static inline void write_si_sm_data (struct si_sm_data *smic, | 224 | static inline void write_si_sm_data(struct si_sm_data *smic, |
227 | unsigned char data) | 225 | unsigned char data) |
228 | { | 226 | { |
229 | smic->io->outputb(smic->io, 0, data); | 227 | smic->io->outputb(smic->io, 0, data); |
230 | } | 228 | } |
@@ -233,10 +231,9 @@ static inline void start_error_recovery(struct si_sm_data *smic, char *reason) | |||
233 | { | 231 | { |
234 | (smic->error_retries)++; | 232 | (smic->error_retries)++; |
235 | if (smic->error_retries > SMIC_MAX_ERROR_RETRIES) { | 233 | if (smic->error_retries > SMIC_MAX_ERROR_RETRIES) { |
236 | if (smic_debug & SMIC_DEBUG_ENABLE) { | 234 | if (smic_debug & SMIC_DEBUG_ENABLE) |
237 | printk(KERN_WARNING | 235 | printk(KERN_WARNING |
238 | "ipmi_smic_drv: smic hosed: %s\n", reason); | 236 | "ipmi_smic_drv: smic hosed: %s\n", reason); |
239 | } | ||
240 | smic->state = SMIC_HOSED; | 237 | smic->state = SMIC_HOSED; |
241 | } else { | 238 | } else { |
242 | smic->write_count = smic->orig_write_count; | 239 | smic->write_count = smic->orig_write_count; |
@@ -254,14 +251,14 @@ static inline void write_next_byte(struct si_sm_data *smic) | |||
254 | (smic->write_count)--; | 251 | (smic->write_count)--; |
255 | } | 252 | } |
256 | 253 | ||
257 | static inline void read_next_byte (struct si_sm_data *smic) | 254 | static inline void read_next_byte(struct si_sm_data *smic) |
258 | { | 255 | { |
259 | if (smic->read_pos >= MAX_SMIC_READ_SIZE) { | 256 | if (smic->read_pos >= MAX_SMIC_READ_SIZE) { |
260 | read_smic_data (smic); | 257 | read_smic_data(smic); |
261 | smic->truncated = 1; | 258 | smic->truncated = 1; |
262 | } else { | 259 | } else { |
263 | smic->read_data[smic->read_pos] = read_smic_data(smic); | 260 | smic->read_data[smic->read_pos] = read_smic_data(smic); |
264 | (smic->read_pos)++; | 261 | smic->read_pos++; |
265 | } | 262 | } |
266 | } | 263 | } |
267 | 264 | ||
@@ -336,7 +333,7 @@ static inline void read_next_byte (struct si_sm_data *smic) | |||
336 | SMIC_SC_SMS_RD_END 0xC6 | 333 | SMIC_SC_SMS_RD_END 0xC6 |
337 | */ | 334 | */ |
338 | 335 | ||
339 | static enum si_sm_result smic_event (struct si_sm_data *smic, long time) | 336 | static enum si_sm_result smic_event(struct si_sm_data *smic, long time) |
340 | { | 337 | { |
341 | unsigned char status; | 338 | unsigned char status; |
342 | unsigned char flags; | 339 | unsigned char flags; |
@@ -347,13 +344,15 @@ static enum si_sm_result smic_event (struct si_sm_data *smic, long time) | |||
347 | return SI_SM_HOSED; | 344 | return SI_SM_HOSED; |
348 | } | 345 | } |
349 | if (smic->state != SMIC_IDLE) { | 346 | if (smic->state != SMIC_IDLE) { |
350 | if (smic_debug & SMIC_DEBUG_STATES) { | 347 | if (smic_debug & SMIC_DEBUG_STATES) |
351 | printk(KERN_INFO | 348 | printk(KERN_DEBUG |
352 | "smic_event - smic->smic_timeout = %ld," | 349 | "smic_event - smic->smic_timeout = %ld," |
353 | " time = %ld\n", | 350 | " time = %ld\n", |
354 | smic->smic_timeout, time); | 351 | smic->smic_timeout, time); |
355 | } | 352 | /* |
356 | /* FIXME: smic_event is sometimes called with time > SMIC_RETRY_TIMEOUT */ | 353 | * FIXME: smic_event is sometimes called with time > |
354 | * SMIC_RETRY_TIMEOUT | ||
355 | */ | ||
357 | if (time < SMIC_RETRY_TIMEOUT) { | 356 | if (time < SMIC_RETRY_TIMEOUT) { |
358 | smic->smic_timeout -= time; | 357 | smic->smic_timeout -= time; |
359 | if (smic->smic_timeout < 0) { | 358 | if (smic->smic_timeout < 0) { |
@@ -366,9 +365,9 @@ static enum si_sm_result smic_event (struct si_sm_data *smic, long time) | |||
366 | if (flags & SMIC_FLAG_BSY) | 365 | if (flags & SMIC_FLAG_BSY) |
367 | return SI_SM_CALL_WITH_DELAY; | 366 | return SI_SM_CALL_WITH_DELAY; |
368 | 367 | ||
369 | status = read_smic_status (smic); | 368 | status = read_smic_status(smic); |
370 | if (smic_debug & SMIC_DEBUG_STATES) | 369 | if (smic_debug & SMIC_DEBUG_STATES) |
371 | printk(KERN_INFO | 370 | printk(KERN_DEBUG |
372 | "smic_event - state = %d, flags = 0x%02x," | 371 | "smic_event - state = %d, flags = 0x%02x," |
373 | " status = 0x%02x\n", | 372 | " status = 0x%02x\n", |
374 | smic->state, flags, status); | 373 | smic->state, flags, status); |
@@ -377,9 +376,7 @@ static enum si_sm_result smic_event (struct si_sm_data *smic, long time) | |||
377 | case SMIC_IDLE: | 376 | case SMIC_IDLE: |
378 | /* in IDLE we check for available messages */ | 377 | /* in IDLE we check for available messages */ |
379 | if (flags & SMIC_SMS_DATA_AVAIL) | 378 | if (flags & SMIC_SMS_DATA_AVAIL) |
380 | { | ||
381 | return SI_SM_ATTN; | 379 | return SI_SM_ATTN; |
382 | } | ||
383 | return SI_SM_IDLE; | 380 | return SI_SM_IDLE; |
384 | 381 | ||
385 | case SMIC_START_OP: | 382 | case SMIC_START_OP: |
@@ -391,7 +388,7 @@ static enum si_sm_result smic_event (struct si_sm_data *smic, long time) | |||
391 | 388 | ||
392 | case SMIC_OP_OK: | 389 | case SMIC_OP_OK: |
393 | if (status != SMIC_SC_SMS_READY) { | 390 | if (status != SMIC_SC_SMS_READY) { |
394 | /* this should not happen */ | 391 | /* this should not happen */ |
395 | start_error_recovery(smic, | 392 | start_error_recovery(smic, |
396 | "state = SMIC_OP_OK," | 393 | "state = SMIC_OP_OK," |
397 | " status != SMIC_SC_SMS_READY"); | 394 | " status != SMIC_SC_SMS_READY"); |
@@ -411,8 +408,10 @@ static enum si_sm_result smic_event (struct si_sm_data *smic, long time) | |||
411 | "status != SMIC_SC_SMS_WR_START"); | 408 | "status != SMIC_SC_SMS_WR_START"); |
412 | return SI_SM_CALL_WITH_DELAY; | 409 | return SI_SM_CALL_WITH_DELAY; |
413 | } | 410 | } |
414 | /* we must not issue WR_(NEXT|END) unless | 411 | /* |
415 | TX_DATA_READY is set */ | 412 | * we must not issue WR_(NEXT|END) unless |
413 | * TX_DATA_READY is set | ||
414 | * */ | ||
416 | if (flags & SMIC_TX_DATA_READY) { | 415 | if (flags & SMIC_TX_DATA_READY) { |
417 | if (smic->write_count == 1) { | 416 | if (smic->write_count == 1) { |
418 | /* last byte */ | 417 | /* last byte */ |
@@ -424,10 +423,8 @@ static enum si_sm_result smic_event (struct si_sm_data *smic, long time) | |||
424 | } | 423 | } |
425 | write_next_byte(smic); | 424 | write_next_byte(smic); |
426 | write_smic_flags(smic, flags | SMIC_FLAG_BSY); | 425 | write_smic_flags(smic, flags | SMIC_FLAG_BSY); |
427 | } | 426 | } else |
428 | else { | ||
429 | return SI_SM_CALL_WITH_DELAY; | 427 | return SI_SM_CALL_WITH_DELAY; |
430 | } | ||
431 | break; | 428 | break; |
432 | 429 | ||
433 | case SMIC_WRITE_NEXT: | 430 | case SMIC_WRITE_NEXT: |
@@ -442,52 +439,48 @@ static enum si_sm_result smic_event (struct si_sm_data *smic, long time) | |||
442 | if (smic->write_count == 1) { | 439 | if (smic->write_count == 1) { |
443 | write_smic_control(smic, SMIC_CC_SMS_WR_END); | 440 | write_smic_control(smic, SMIC_CC_SMS_WR_END); |
444 | smic->state = SMIC_WRITE_END; | 441 | smic->state = SMIC_WRITE_END; |
445 | } | 442 | } else { |
446 | else { | ||
447 | write_smic_control(smic, SMIC_CC_SMS_WR_NEXT); | 443 | write_smic_control(smic, SMIC_CC_SMS_WR_NEXT); |
448 | smic->state = SMIC_WRITE_NEXT; | 444 | smic->state = SMIC_WRITE_NEXT; |
449 | } | 445 | } |
450 | write_next_byte(smic); | 446 | write_next_byte(smic); |
451 | write_smic_flags(smic, flags | SMIC_FLAG_BSY); | 447 | write_smic_flags(smic, flags | SMIC_FLAG_BSY); |
452 | } | 448 | } else |
453 | else { | ||
454 | return SI_SM_CALL_WITH_DELAY; | 449 | return SI_SM_CALL_WITH_DELAY; |
455 | } | ||
456 | break; | 450 | break; |
457 | 451 | ||
458 | case SMIC_WRITE_END: | 452 | case SMIC_WRITE_END: |
459 | if (status != SMIC_SC_SMS_WR_END) { | 453 | if (status != SMIC_SC_SMS_WR_END) { |
460 | start_error_recovery (smic, | 454 | start_error_recovery(smic, |
461 | "state = SMIC_WRITE_END, " | 455 | "state = SMIC_WRITE_END, " |
462 | "status != SMIC_SC_SMS_WR_END"); | 456 | "status != SMIC_SC_SMS_WR_END"); |
463 | return SI_SM_CALL_WITH_DELAY; | 457 | return SI_SM_CALL_WITH_DELAY; |
464 | } | 458 | } |
465 | /* data register holds an error code */ | 459 | /* data register holds an error code */ |
466 | data = read_smic_data(smic); | 460 | data = read_smic_data(smic); |
467 | if (data != 0) { | 461 | if (data != 0) { |
468 | if (smic_debug & SMIC_DEBUG_ENABLE) { | 462 | if (smic_debug & SMIC_DEBUG_ENABLE) |
469 | printk(KERN_INFO | 463 | printk(KERN_DEBUG |
470 | "SMIC_WRITE_END: data = %02x\n", data); | 464 | "SMIC_WRITE_END: data = %02x\n", data); |
471 | } | ||
472 | start_error_recovery(smic, | 465 | start_error_recovery(smic, |
473 | "state = SMIC_WRITE_END, " | 466 | "state = SMIC_WRITE_END, " |
474 | "data != SUCCESS"); | 467 | "data != SUCCESS"); |
475 | return SI_SM_CALL_WITH_DELAY; | 468 | return SI_SM_CALL_WITH_DELAY; |
476 | } else { | 469 | } else |
477 | smic->state = SMIC_WRITE2READ; | 470 | smic->state = SMIC_WRITE2READ; |
478 | } | ||
479 | break; | 471 | break; |
480 | 472 | ||
481 | case SMIC_WRITE2READ: | 473 | case SMIC_WRITE2READ: |
482 | /* we must wait for RX_DATA_READY to be set before we | 474 | /* |
483 | can continue */ | 475 | * we must wait for RX_DATA_READY to be set before we |
476 | * can continue | ||
477 | */ | ||
484 | if (flags & SMIC_RX_DATA_READY) { | 478 | if (flags & SMIC_RX_DATA_READY) { |
485 | write_smic_control(smic, SMIC_CC_SMS_RD_START); | 479 | write_smic_control(smic, SMIC_CC_SMS_RD_START); |
486 | write_smic_flags(smic, flags | SMIC_FLAG_BSY); | 480 | write_smic_flags(smic, flags | SMIC_FLAG_BSY); |
487 | smic->state = SMIC_READ_START; | 481 | smic->state = SMIC_READ_START; |
488 | } else { | 482 | } else |
489 | return SI_SM_CALL_WITH_DELAY; | 483 | return SI_SM_CALL_WITH_DELAY; |
490 | } | ||
491 | break; | 484 | break; |
492 | 485 | ||
493 | case SMIC_READ_START: | 486 | case SMIC_READ_START: |
@@ -502,15 +495,16 @@ static enum si_sm_result smic_event (struct si_sm_data *smic, long time) | |||
502 | write_smic_control(smic, SMIC_CC_SMS_RD_NEXT); | 495 | write_smic_control(smic, SMIC_CC_SMS_RD_NEXT); |
503 | write_smic_flags(smic, flags | SMIC_FLAG_BSY); | 496 | write_smic_flags(smic, flags | SMIC_FLAG_BSY); |
504 | smic->state = SMIC_READ_NEXT; | 497 | smic->state = SMIC_READ_NEXT; |
505 | } else { | 498 | } else |
506 | return SI_SM_CALL_WITH_DELAY; | 499 | return SI_SM_CALL_WITH_DELAY; |
507 | } | ||
508 | break; | 500 | break; |
509 | 501 | ||
510 | case SMIC_READ_NEXT: | 502 | case SMIC_READ_NEXT: |
511 | switch (status) { | 503 | switch (status) { |
512 | /* smic tells us that this is the last byte to be read | 504 | /* |
513 | --> clean up */ | 505 | * smic tells us that this is the last byte to be read |
506 | * --> clean up | ||
507 | */ | ||
514 | case SMIC_SC_SMS_RD_END: | 508 | case SMIC_SC_SMS_RD_END: |
515 | read_next_byte(smic); | 509 | read_next_byte(smic); |
516 | write_smic_control(smic, SMIC_CC_SMS_RD_END); | 510 | write_smic_control(smic, SMIC_CC_SMS_RD_END); |
@@ -523,9 +517,8 @@ static enum si_sm_result smic_event (struct si_sm_data *smic, long time) | |||
523 | write_smic_control(smic, SMIC_CC_SMS_RD_NEXT); | 517 | write_smic_control(smic, SMIC_CC_SMS_RD_NEXT); |
524 | write_smic_flags(smic, flags | SMIC_FLAG_BSY); | 518 | write_smic_flags(smic, flags | SMIC_FLAG_BSY); |
525 | smic->state = SMIC_READ_NEXT; | 519 | smic->state = SMIC_READ_NEXT; |
526 | } else { | 520 | } else |
527 | return SI_SM_CALL_WITH_DELAY; | 521 | return SI_SM_CALL_WITH_DELAY; |
528 | } | ||
529 | break; | 522 | break; |
530 | default: | 523 | default: |
531 | start_error_recovery( | 524 | start_error_recovery( |
@@ -546,10 +539,9 @@ static enum si_sm_result smic_event (struct si_sm_data *smic, long time) | |||
546 | data = read_smic_data(smic); | 539 | data = read_smic_data(smic); |
547 | /* data register holds an error code */ | 540 | /* data register holds an error code */ |
548 | if (data != 0) { | 541 | if (data != 0) { |
549 | if (smic_debug & SMIC_DEBUG_ENABLE) { | 542 | if (smic_debug & SMIC_DEBUG_ENABLE) |
550 | printk(KERN_INFO | 543 | printk(KERN_DEBUG |
551 | "SMIC_READ_END: data = %02x\n", data); | 544 | "SMIC_READ_END: data = %02x\n", data); |
552 | } | ||
553 | start_error_recovery(smic, | 545 | start_error_recovery(smic, |
554 | "state = SMIC_READ_END, " | 546 | "state = SMIC_READ_END, " |
555 | "data != SUCCESS"); | 547 | "data != SUCCESS"); |
@@ -565,7 +557,7 @@ static enum si_sm_result smic_event (struct si_sm_data *smic, long time) | |||
565 | 557 | ||
566 | default: | 558 | default: |
567 | if (smic_debug & SMIC_DEBUG_ENABLE) { | 559 | if (smic_debug & SMIC_DEBUG_ENABLE) { |
568 | printk(KERN_WARNING "smic->state = %d\n", smic->state); | 560 | printk(KERN_DEBUG "smic->state = %d\n", smic->state); |
569 | start_error_recovery(smic, "state = UNKNOWN"); | 561 | start_error_recovery(smic, "state = UNKNOWN"); |
570 | return SI_SM_CALL_WITH_DELAY; | 562 | return SI_SM_CALL_WITH_DELAY; |
571 | } | 563 | } |
@@ -576,10 +568,12 @@ static enum si_sm_result smic_event (struct si_sm_data *smic, long time) | |||
576 | 568 | ||
577 | static int smic_detect(struct si_sm_data *smic) | 569 | static int smic_detect(struct si_sm_data *smic) |
578 | { | 570 | { |
579 | /* It's impossible for the SMIC fnags register to be all 1's, | 571 | /* |
580 | (assuming a properly functioning, self-initialized BMC) | 572 | * It's impossible for the SMIC fnags register to be all 1's, |
581 | but that's what you get from reading a bogus address, so we | 573 | * (assuming a properly functioning, self-initialized BMC) |
582 | test that first. */ | 574 | * but that's what you get from reading a bogus address, so we |
575 | * test that first. | ||
576 | */ | ||
583 | if (read_smic_flags(smic) == 0xff) | 577 | if (read_smic_flags(smic) == 0xff) |
584 | return 1; | 578 | return 1; |
585 | 579 | ||
@@ -595,8 +589,7 @@ static int smic_size(void) | |||
595 | return sizeof(struct si_sm_data); | 589 | return sizeof(struct si_sm_data); |
596 | } | 590 | } |
597 | 591 | ||
598 | struct si_sm_handlers smic_smi_handlers = | 592 | struct si_sm_handlers smic_smi_handlers = { |
599 | { | ||
600 | .init_data = init_smic_data, | 593 | .init_data = init_smic_data, |
601 | .start_transaction = start_smic_transaction, | 594 | .start_transaction = start_smic_transaction, |
602 | .get_result = smic_get_result, | 595 | .get_result = smic_get_result, |
diff --git a/drivers/char/ipmi/ipmi_watchdog.c b/drivers/char/ipmi/ipmi_watchdog.c index 8f45ca9235ad..1b9a87047817 100644 --- a/drivers/char/ipmi/ipmi_watchdog.c +++ b/drivers/char/ipmi/ipmi_watchdog.c | |||
@@ -54,13 +54,15 @@ | |||
54 | #include <asm/atomic.h> | 54 | #include <asm/atomic.h> |
55 | 55 | ||
56 | #ifdef CONFIG_X86 | 56 | #ifdef CONFIG_X86 |
57 | /* This is ugly, but I've determined that x86 is the only architecture | 57 | /* |
58 | that can reasonably support the IPMI NMI watchdog timeout at this | 58 | * This is ugly, but I've determined that x86 is the only architecture |
59 | time. If another architecture adds this capability somehow, it | 59 | * that can reasonably support the IPMI NMI watchdog timeout at this |
60 | will have to be a somewhat different mechanism and I have no idea | 60 | * time. If another architecture adds this capability somehow, it |
61 | how it will work. So in the unlikely event that another | 61 | * will have to be a somewhat different mechanism and I have no idea |
62 | architecture supports this, we can figure out a good generic | 62 | * how it will work. So in the unlikely event that another |
63 | mechanism for it at that time. */ | 63 | * architecture supports this, we can figure out a good generic |
64 | * mechanism for it at that time. | ||
65 | */ | ||
64 | #include <asm/kdebug.h> | 66 | #include <asm/kdebug.h> |
65 | #define HAVE_DIE_NMI | 67 | #define HAVE_DIE_NMI |
66 | #endif | 68 | #endif |
@@ -95,9 +97,8 @@ | |||
95 | /* Operations that can be performed on a pretimout. */ | 97 | /* Operations that can be performed on a pretimout. */ |
96 | #define WDOG_PREOP_NONE 0 | 98 | #define WDOG_PREOP_NONE 0 |
97 | #define WDOG_PREOP_PANIC 1 | 99 | #define WDOG_PREOP_PANIC 1 |
98 | #define WDOG_PREOP_GIVE_DATA 2 /* Cause data to be available to | 100 | /* Cause data to be available to read. Doesn't work in NMI mode. */ |
99 | read. Doesn't work in NMI | 101 | #define WDOG_PREOP_GIVE_DATA 2 |
100 | mode. */ | ||
101 | 102 | ||
102 | /* Actions to perform on a full timeout. */ | 103 | /* Actions to perform on a full timeout. */ |
103 | #define WDOG_SET_TIMEOUT_ACT(byte, use) \ | 104 | #define WDOG_SET_TIMEOUT_ACT(byte, use) \ |
@@ -108,8 +109,10 @@ | |||
108 | #define WDOG_TIMEOUT_POWER_DOWN 2 | 109 | #define WDOG_TIMEOUT_POWER_DOWN 2 |
109 | #define WDOG_TIMEOUT_POWER_CYCLE 3 | 110 | #define WDOG_TIMEOUT_POWER_CYCLE 3 |
110 | 111 | ||
111 | /* Byte 3 of the get command, byte 4 of the get response is the | 112 | /* |
112 | pre-timeout in seconds. */ | 113 | * Byte 3 of the get command, byte 4 of the get response is the |
114 | * pre-timeout in seconds. | ||
115 | */ | ||
113 | 116 | ||
114 | /* Bits for setting byte 4 of the set command, byte 5 of the get response. */ | 117 | /* Bits for setting byte 4 of the set command, byte 5 of the get response. */ |
115 | #define WDOG_EXPIRE_CLEAR_BIOS_FRB2 (1 << 1) | 118 | #define WDOG_EXPIRE_CLEAR_BIOS_FRB2 (1 << 1) |
@@ -118,11 +121,13 @@ | |||
118 | #define WDOG_EXPIRE_CLEAR_SMS_OS (1 << 4) | 121 | #define WDOG_EXPIRE_CLEAR_SMS_OS (1 << 4) |
119 | #define WDOG_EXPIRE_CLEAR_OEM (1 << 5) | 122 | #define WDOG_EXPIRE_CLEAR_OEM (1 << 5) |
120 | 123 | ||
121 | /* Setting/getting the watchdog timer value. This is for bytes 5 and | 124 | /* |
122 | 6 (the timeout time) of the set command, and bytes 6 and 7 (the | 125 | * Setting/getting the watchdog timer value. This is for bytes 5 and |
123 | timeout time) and 8 and 9 (the current countdown value) of the | 126 | * 6 (the timeout time) of the set command, and bytes 6 and 7 (the |
124 | response. The timeout value is given in seconds (in the command it | 127 | * timeout time) and 8 and 9 (the current countdown value) of the |
125 | is 100ms intervals). */ | 128 | * response. The timeout value is given in seconds (in the command it |
129 | * is 100ms intervals). | ||
130 | */ | ||
126 | #define WDOG_SET_TIMEOUT(byte1, byte2, val) \ | 131 | #define WDOG_SET_TIMEOUT(byte1, byte2, val) \ |
127 | (byte1) = (((val) * 10) & 0xff), (byte2) = (((val) * 10) >> 8) | 132 | (byte1) = (((val) * 10) & 0xff), (byte2) = (((val) * 10) >> 8) |
128 | #define WDOG_GET_TIMEOUT(byte1, byte2) \ | 133 | #define WDOG_GET_TIMEOUT(byte1, byte2) \ |
@@ -184,8 +189,10 @@ static int ipmi_set_timeout(int do_heartbeat); | |||
184 | static void ipmi_register_watchdog(int ipmi_intf); | 189 | static void ipmi_register_watchdog(int ipmi_intf); |
185 | static void ipmi_unregister_watchdog(int ipmi_intf); | 190 | static void ipmi_unregister_watchdog(int ipmi_intf); |
186 | 191 | ||
187 | /* If true, the driver will start running as soon as it is configured | 192 | /* |
188 | and ready. */ | 193 | * If true, the driver will start running as soon as it is configured |
194 | * and ready. | ||
195 | */ | ||
189 | static int start_now; | 196 | static int start_now; |
190 | 197 | ||
191 | static int set_param_int(const char *val, struct kernel_param *kp) | 198 | static int set_param_int(const char *val, struct kernel_param *kp) |
@@ -309,10 +316,12 @@ static int ipmi_ignore_heartbeat; | |||
309 | /* Is someone using the watchdog? Only one user is allowed. */ | 316 | /* Is someone using the watchdog? Only one user is allowed. */ |
310 | static unsigned long ipmi_wdog_open; | 317 | static unsigned long ipmi_wdog_open; |
311 | 318 | ||
312 | /* If set to 1, the heartbeat command will set the state to reset and | 319 | /* |
313 | start the timer. The timer doesn't normally run when the driver is | 320 | * If set to 1, the heartbeat command will set the state to reset and |
314 | first opened until the heartbeat is set the first time, this | 321 | * start the timer. The timer doesn't normally run when the driver is |
315 | variable is used to accomplish this. */ | 322 | * first opened until the heartbeat is set the first time, this |
323 | * variable is used to accomplish this. | ||
324 | */ | ||
316 | static int ipmi_start_timer_on_heartbeat; | 325 | static int ipmi_start_timer_on_heartbeat; |
317 | 326 | ||
318 | /* IPMI version of the BMC. */ | 327 | /* IPMI version of the BMC. */ |
@@ -329,10 +338,12 @@ static int nmi_handler_registered; | |||
329 | 338 | ||
330 | static int ipmi_heartbeat(void); | 339 | static int ipmi_heartbeat(void); |
331 | 340 | ||
332 | /* We use a mutex to make sure that only one thing can send a set | 341 | /* |
333 | timeout at one time, because we only have one copy of the data. | 342 | * We use a mutex to make sure that only one thing can send a set |
334 | The mutex is claimed when the set_timeout is sent and freed | 343 | * timeout at one time, because we only have one copy of the data. |
335 | when both messages are free. */ | 344 | * The mutex is claimed when the set_timeout is sent and freed |
345 | * when both messages are free. | ||
346 | */ | ||
336 | static atomic_t set_timeout_tofree = ATOMIC_INIT(0); | 347 | static atomic_t set_timeout_tofree = ATOMIC_INIT(0); |
337 | static DEFINE_MUTEX(set_timeout_lock); | 348 | static DEFINE_MUTEX(set_timeout_lock); |
338 | static DECLARE_COMPLETION(set_timeout_wait); | 349 | static DECLARE_COMPLETION(set_timeout_wait); |
@@ -346,15 +357,13 @@ static void set_timeout_free_recv(struct ipmi_recv_msg *msg) | |||
346 | if (atomic_dec_and_test(&set_timeout_tofree)) | 357 | if (atomic_dec_and_test(&set_timeout_tofree)) |
347 | complete(&set_timeout_wait); | 358 | complete(&set_timeout_wait); |
348 | } | 359 | } |
349 | static struct ipmi_smi_msg set_timeout_smi_msg = | 360 | static struct ipmi_smi_msg set_timeout_smi_msg = { |
350 | { | ||
351 | .done = set_timeout_free_smi | 361 | .done = set_timeout_free_smi |
352 | }; | 362 | }; |
353 | static struct ipmi_recv_msg set_timeout_recv_msg = | 363 | static struct ipmi_recv_msg set_timeout_recv_msg = { |
354 | { | ||
355 | .done = set_timeout_free_recv | 364 | .done = set_timeout_free_recv |
356 | }; | 365 | }; |
357 | 366 | ||
358 | static int i_ipmi_set_timeout(struct ipmi_smi_msg *smi_msg, | 367 | static int i_ipmi_set_timeout(struct ipmi_smi_msg *smi_msg, |
359 | struct ipmi_recv_msg *recv_msg, | 368 | struct ipmi_recv_msg *recv_msg, |
360 | int *send_heartbeat_now) | 369 | int *send_heartbeat_now) |
@@ -373,13 +382,14 @@ static int i_ipmi_set_timeout(struct ipmi_smi_msg *smi_msg, | |||
373 | WDOG_SET_TIMER_USE(data[0], WDOG_TIMER_USE_SMS_OS); | 382 | WDOG_SET_TIMER_USE(data[0], WDOG_TIMER_USE_SMS_OS); |
374 | 383 | ||
375 | if ((ipmi_version_major > 1) | 384 | if ((ipmi_version_major > 1) |
376 | || ((ipmi_version_major == 1) && (ipmi_version_minor >= 5))) | 385 | || ((ipmi_version_major == 1) && (ipmi_version_minor >= 5))) { |
377 | { | ||
378 | /* This is an IPMI 1.5-only feature. */ | 386 | /* This is an IPMI 1.5-only feature. */ |
379 | data[0] |= WDOG_DONT_STOP_ON_SET; | 387 | data[0] |= WDOG_DONT_STOP_ON_SET; |
380 | } else if (ipmi_watchdog_state != WDOG_TIMEOUT_NONE) { | 388 | } else if (ipmi_watchdog_state != WDOG_TIMEOUT_NONE) { |
381 | /* In ipmi 1.0, setting the timer stops the watchdog, we | 389 | /* |
382 | need to start it back up again. */ | 390 | * In ipmi 1.0, setting the timer stops the watchdog, we |
391 | * need to start it back up again. | ||
392 | */ | ||
383 | hbnow = 1; | 393 | hbnow = 1; |
384 | } | 394 | } |
385 | 395 | ||
@@ -465,12 +475,10 @@ static void panic_recv_free(struct ipmi_recv_msg *msg) | |||
465 | atomic_dec(&panic_done_count); | 475 | atomic_dec(&panic_done_count); |
466 | } | 476 | } |
467 | 477 | ||
468 | static struct ipmi_smi_msg panic_halt_heartbeat_smi_msg = | 478 | static struct ipmi_smi_msg panic_halt_heartbeat_smi_msg = { |
469 | { | ||
470 | .done = panic_smi_free | 479 | .done = panic_smi_free |
471 | }; | 480 | }; |
472 | static struct ipmi_recv_msg panic_halt_heartbeat_recv_msg = | 481 | static struct ipmi_recv_msg panic_halt_heartbeat_recv_msg = { |
473 | { | ||
474 | .done = panic_recv_free | 482 | .done = panic_recv_free |
475 | }; | 483 | }; |
476 | 484 | ||
@@ -480,8 +488,10 @@ static void panic_halt_ipmi_heartbeat(void) | |||
480 | struct ipmi_system_interface_addr addr; | 488 | struct ipmi_system_interface_addr addr; |
481 | int rv; | 489 | int rv; |
482 | 490 | ||
483 | /* Don't reset the timer if we have the timer turned off, that | 491 | /* |
484 | re-enables the watchdog. */ | 492 | * Don't reset the timer if we have the timer turned off, that |
493 | * re-enables the watchdog. | ||
494 | */ | ||
485 | if (ipmi_watchdog_state == WDOG_TIMEOUT_NONE) | 495 | if (ipmi_watchdog_state == WDOG_TIMEOUT_NONE) |
486 | return; | 496 | return; |
487 | 497 | ||
@@ -505,19 +515,19 @@ static void panic_halt_ipmi_heartbeat(void) | |||
505 | atomic_add(2, &panic_done_count); | 515 | atomic_add(2, &panic_done_count); |
506 | } | 516 | } |
507 | 517 | ||
508 | static struct ipmi_smi_msg panic_halt_smi_msg = | 518 | static struct ipmi_smi_msg panic_halt_smi_msg = { |
509 | { | ||
510 | .done = panic_smi_free | 519 | .done = panic_smi_free |
511 | }; | 520 | }; |
512 | static struct ipmi_recv_msg panic_halt_recv_msg = | 521 | static struct ipmi_recv_msg panic_halt_recv_msg = { |
513 | { | ||
514 | .done = panic_recv_free | 522 | .done = panic_recv_free |
515 | }; | 523 | }; |
516 | 524 | ||
517 | /* Special call, doesn't claim any locks. This is only to be called | 525 | /* |
518 | at panic or halt time, in run-to-completion mode, when the caller | 526 | * Special call, doesn't claim any locks. This is only to be called |
519 | is the only CPU and the only thing that will be going is these IPMI | 527 | * at panic or halt time, in run-to-completion mode, when the caller |
520 | calls. */ | 528 | * is the only CPU and the only thing that will be going is these IPMI |
529 | * calls. | ||
530 | */ | ||
521 | static void panic_halt_ipmi_set_timeout(void) | 531 | static void panic_halt_ipmi_set_timeout(void) |
522 | { | 532 | { |
523 | int send_heartbeat_now; | 533 | int send_heartbeat_now; |
@@ -540,10 +550,12 @@ static void panic_halt_ipmi_set_timeout(void) | |||
540 | ipmi_poll_interface(watchdog_user); | 550 | ipmi_poll_interface(watchdog_user); |
541 | } | 551 | } |
542 | 552 | ||
543 | /* We use a semaphore to make sure that only one thing can send a | 553 | /* |
544 | heartbeat at one time, because we only have one copy of the data. | 554 | * We use a mutex to make sure that only one thing can send a |
545 | The semaphore is claimed when the set_timeout is sent and freed | 555 | * heartbeat at one time, because we only have one copy of the data. |
546 | when both messages are free. */ | 556 | * The semaphore is claimed when the set_timeout is sent and freed |
557 | * when both messages are free. | ||
558 | */ | ||
547 | static atomic_t heartbeat_tofree = ATOMIC_INIT(0); | 559 | static atomic_t heartbeat_tofree = ATOMIC_INIT(0); |
548 | static DEFINE_MUTEX(heartbeat_lock); | 560 | static DEFINE_MUTEX(heartbeat_lock); |
549 | static DECLARE_COMPLETION(heartbeat_wait); | 561 | static DECLARE_COMPLETION(heartbeat_wait); |
@@ -557,15 +569,13 @@ static void heartbeat_free_recv(struct ipmi_recv_msg *msg) | |||
557 | if (atomic_dec_and_test(&heartbeat_tofree)) | 569 | if (atomic_dec_and_test(&heartbeat_tofree)) |
558 | complete(&heartbeat_wait); | 570 | complete(&heartbeat_wait); |
559 | } | 571 | } |
560 | static struct ipmi_smi_msg heartbeat_smi_msg = | 572 | static struct ipmi_smi_msg heartbeat_smi_msg = { |
561 | { | ||
562 | .done = heartbeat_free_smi | 573 | .done = heartbeat_free_smi |
563 | }; | 574 | }; |
564 | static struct ipmi_recv_msg heartbeat_recv_msg = | 575 | static struct ipmi_recv_msg heartbeat_recv_msg = { |
565 | { | ||
566 | .done = heartbeat_free_recv | 576 | .done = heartbeat_free_recv |
567 | }; | 577 | }; |
568 | 578 | ||
569 | static int ipmi_heartbeat(void) | 579 | static int ipmi_heartbeat(void) |
570 | { | 580 | { |
571 | struct kernel_ipmi_msg msg; | 581 | struct kernel_ipmi_msg msg; |
@@ -580,10 +590,12 @@ static int ipmi_heartbeat(void) | |||
580 | ipmi_watchdog_state = action_val; | 590 | ipmi_watchdog_state = action_val; |
581 | return ipmi_set_timeout(IPMI_SET_TIMEOUT_FORCE_HB); | 591 | return ipmi_set_timeout(IPMI_SET_TIMEOUT_FORCE_HB); |
582 | } else if (pretimeout_since_last_heartbeat) { | 592 | } else if (pretimeout_since_last_heartbeat) { |
583 | /* A pretimeout occurred, make sure we set the timeout. | 593 | /* |
584 | We don't want to set the action, though, we want to | 594 | * A pretimeout occurred, make sure we set the timeout. |
585 | leave that alone (thus it can't be combined with the | 595 | * We don't want to set the action, though, we want to |
586 | above operation. */ | 596 | * leave that alone (thus it can't be combined with the |
597 | * above operation. | ||
598 | */ | ||
587 | return ipmi_set_timeout(IPMI_SET_TIMEOUT_HB_IF_NECESSARY); | 599 | return ipmi_set_timeout(IPMI_SET_TIMEOUT_HB_IF_NECESSARY); |
588 | } | 600 | } |
589 | 601 | ||
@@ -591,8 +603,10 @@ static int ipmi_heartbeat(void) | |||
591 | 603 | ||
592 | atomic_set(&heartbeat_tofree, 2); | 604 | atomic_set(&heartbeat_tofree, 2); |
593 | 605 | ||
594 | /* Don't reset the timer if we have the timer turned off, that | 606 | /* |
595 | re-enables the watchdog. */ | 607 | * Don't reset the timer if we have the timer turned off, that |
608 | * re-enables the watchdog. | ||
609 | */ | ||
596 | if (ipmi_watchdog_state == WDOG_TIMEOUT_NONE) { | 610 | if (ipmi_watchdog_state == WDOG_TIMEOUT_NONE) { |
597 | mutex_unlock(&heartbeat_lock); | 611 | mutex_unlock(&heartbeat_lock); |
598 | return 0; | 612 | return 0; |
@@ -625,10 +639,12 @@ static int ipmi_heartbeat(void) | |||
625 | wait_for_completion(&heartbeat_wait); | 639 | wait_for_completion(&heartbeat_wait); |
626 | 640 | ||
627 | if (heartbeat_recv_msg.msg.data[0] != 0) { | 641 | if (heartbeat_recv_msg.msg.data[0] != 0) { |
628 | /* Got an error in the heartbeat response. It was already | 642 | /* |
629 | reported in ipmi_wdog_msg_handler, but we should return | 643 | * Got an error in the heartbeat response. It was already |
630 | an error here. */ | 644 | * reported in ipmi_wdog_msg_handler, but we should return |
631 | rv = -EINVAL; | 645 | * an error here. |
646 | */ | ||
647 | rv = -EINVAL; | ||
632 | } | 648 | } |
633 | 649 | ||
634 | mutex_unlock(&heartbeat_lock); | 650 | mutex_unlock(&heartbeat_lock); |
@@ -636,8 +652,7 @@ static int ipmi_heartbeat(void) | |||
636 | return rv; | 652 | return rv; |
637 | } | 653 | } |
638 | 654 | ||
639 | static struct watchdog_info ident = | 655 | static struct watchdog_info ident = { |
640 | { | ||
641 | .options = 0, /* WDIOF_SETTIMEOUT, */ | 656 | .options = 0, /* WDIOF_SETTIMEOUT, */ |
642 | .firmware_version = 1, | 657 | .firmware_version = 1, |
643 | .identity = "IPMI" | 658 | .identity = "IPMI" |
@@ -650,7 +665,7 @@ static int ipmi_ioctl(struct inode *inode, struct file *file, | |||
650 | int i; | 665 | int i; |
651 | int val; | 666 | int val; |
652 | 667 | ||
653 | switch(cmd) { | 668 | switch (cmd) { |
654 | case WDIOC_GETSUPPORT: | 669 | case WDIOC_GETSUPPORT: |
655 | i = copy_to_user(argp, &ident, sizeof(ident)); | 670 | i = copy_to_user(argp, &ident, sizeof(ident)); |
656 | return i ? -EFAULT : 0; | 671 | return i ? -EFAULT : 0; |
@@ -690,15 +705,13 @@ static int ipmi_ioctl(struct inode *inode, struct file *file, | |||
690 | i = copy_from_user(&val, argp, sizeof(int)); | 705 | i = copy_from_user(&val, argp, sizeof(int)); |
691 | if (i) | 706 | if (i) |
692 | return -EFAULT; | 707 | return -EFAULT; |
693 | if (val & WDIOS_DISABLECARD) | 708 | if (val & WDIOS_DISABLECARD) { |
694 | { | ||
695 | ipmi_watchdog_state = WDOG_TIMEOUT_NONE; | 709 | ipmi_watchdog_state = WDOG_TIMEOUT_NONE; |
696 | ipmi_set_timeout(IPMI_SET_TIMEOUT_NO_HB); | 710 | ipmi_set_timeout(IPMI_SET_TIMEOUT_NO_HB); |
697 | ipmi_start_timer_on_heartbeat = 0; | 711 | ipmi_start_timer_on_heartbeat = 0; |
698 | } | 712 | } |
699 | 713 | ||
700 | if (val & WDIOS_ENABLECARD) | 714 | if (val & WDIOS_ENABLECARD) { |
701 | { | ||
702 | ipmi_watchdog_state = action_val; | 715 | ipmi_watchdog_state = action_val; |
703 | ipmi_set_timeout(IPMI_SET_TIMEOUT_FORCE_HB); | 716 | ipmi_set_timeout(IPMI_SET_TIMEOUT_FORCE_HB); |
704 | } | 717 | } |
@@ -724,13 +737,13 @@ static ssize_t ipmi_write(struct file *file, | |||
724 | int rv; | 737 | int rv; |
725 | 738 | ||
726 | if (len) { | 739 | if (len) { |
727 | if (!nowayout) { | 740 | if (!nowayout) { |
728 | size_t i; | 741 | size_t i; |
729 | 742 | ||
730 | /* In case it was set long ago */ | 743 | /* In case it was set long ago */ |
731 | expect_close = 0; | 744 | expect_close = 0; |
732 | 745 | ||
733 | for (i = 0; i != len; i++) { | 746 | for (i = 0; i != len; i++) { |
734 | char c; | 747 | char c; |
735 | 748 | ||
736 | if (get_user(c, buf + i)) | 749 | if (get_user(c, buf + i)) |
@@ -758,15 +771,17 @@ static ssize_t ipmi_read(struct file *file, | |||
758 | if (count <= 0) | 771 | if (count <= 0) |
759 | return 0; | 772 | return 0; |
760 | 773 | ||
761 | /* Reading returns if the pretimeout has gone off, and it only does | 774 | /* |
762 | it once per pretimeout. */ | 775 | * Reading returns if the pretimeout has gone off, and it only does |
776 | * it once per pretimeout. | ||
777 | */ | ||
763 | spin_lock(&ipmi_read_lock); | 778 | spin_lock(&ipmi_read_lock); |
764 | if (!data_to_read) { | 779 | if (!data_to_read) { |
765 | if (file->f_flags & O_NONBLOCK) { | 780 | if (file->f_flags & O_NONBLOCK) { |
766 | rv = -EAGAIN; | 781 | rv = -EAGAIN; |
767 | goto out; | 782 | goto out; |
768 | } | 783 | } |
769 | 784 | ||
770 | init_waitqueue_entry(&wait, current); | 785 | init_waitqueue_entry(&wait, current); |
771 | add_wait_queue(&read_q, &wait); | 786 | add_wait_queue(&read_q, &wait); |
772 | while (!data_to_read) { | 787 | while (!data_to_read) { |
@@ -776,7 +791,7 @@ static ssize_t ipmi_read(struct file *file, | |||
776 | spin_lock(&ipmi_read_lock); | 791 | spin_lock(&ipmi_read_lock); |
777 | } | 792 | } |
778 | remove_wait_queue(&read_q, &wait); | 793 | remove_wait_queue(&read_q, &wait); |
779 | 794 | ||
780 | if (signal_pending(current)) { | 795 | if (signal_pending(current)) { |
781 | rv = -ERESTARTSYS; | 796 | rv = -ERESTARTSYS; |
782 | goto out; | 797 | goto out; |
@@ -799,25 +814,27 @@ static ssize_t ipmi_read(struct file *file, | |||
799 | 814 | ||
800 | static int ipmi_open(struct inode *ino, struct file *filep) | 815 | static int ipmi_open(struct inode *ino, struct file *filep) |
801 | { | 816 | { |
802 | switch (iminor(ino)) { | 817 | switch (iminor(ino)) { |
803 | case WATCHDOG_MINOR: | 818 | case WATCHDOG_MINOR: |
804 | if (test_and_set_bit(0, &ipmi_wdog_open)) | 819 | if (test_and_set_bit(0, &ipmi_wdog_open)) |
805 | return -EBUSY; | 820 | return -EBUSY; |
806 | 821 | ||
807 | /* Don't start the timer now, let it start on the | 822 | /* |
808 | first heartbeat. */ | 823 | * Don't start the timer now, let it start on the |
824 | * first heartbeat. | ||
825 | */ | ||
809 | ipmi_start_timer_on_heartbeat = 1; | 826 | ipmi_start_timer_on_heartbeat = 1; |
810 | return nonseekable_open(ino, filep); | 827 | return nonseekable_open(ino, filep); |
811 | 828 | ||
812 | default: | 829 | default: |
813 | return (-ENODEV); | 830 | return (-ENODEV); |
814 | } | 831 | } |
815 | } | 832 | } |
816 | 833 | ||
817 | static unsigned int ipmi_poll(struct file *file, poll_table *wait) | 834 | static unsigned int ipmi_poll(struct file *file, poll_table *wait) |
818 | { | 835 | { |
819 | unsigned int mask = 0; | 836 | unsigned int mask = 0; |
820 | 837 | ||
821 | poll_wait(file, &read_q, wait); | 838 | poll_wait(file, &read_q, wait); |
822 | 839 | ||
823 | spin_lock(&ipmi_read_lock); | 840 | spin_lock(&ipmi_read_lock); |
@@ -851,7 +868,7 @@ static int ipmi_close(struct inode *ino, struct file *filep) | |||
851 | clear_bit(0, &ipmi_wdog_open); | 868 | clear_bit(0, &ipmi_wdog_open); |
852 | } | 869 | } |
853 | 870 | ||
854 | ipmi_fasync (-1, filep, 0); | 871 | ipmi_fasync(-1, filep, 0); |
855 | expect_close = 0; | 872 | expect_close = 0; |
856 | 873 | ||
857 | return 0; | 874 | return 0; |
@@ -882,7 +899,7 @@ static void ipmi_wdog_msg_handler(struct ipmi_recv_msg *msg, | |||
882 | msg->msg.data[0], | 899 | msg->msg.data[0], |
883 | msg->msg.cmd); | 900 | msg->msg.cmd); |
884 | } | 901 | } |
885 | 902 | ||
886 | ipmi_free_recv_msg(msg); | 903 | ipmi_free_recv_msg(msg); |
887 | } | 904 | } |
888 | 905 | ||
@@ -902,14 +919,14 @@ static void ipmi_wdog_pretimeout_handler(void *handler_data) | |||
902 | } | 919 | } |
903 | } | 920 | } |
904 | 921 | ||
905 | /* On some machines, the heartbeat will give | 922 | /* |
906 | an error and not work unless we re-enable | 923 | * On some machines, the heartbeat will give an error and not |
907 | the timer. So do so. */ | 924 | * work unless we re-enable the timer. So do so. |
925 | */ | ||
908 | pretimeout_since_last_heartbeat = 1; | 926 | pretimeout_since_last_heartbeat = 1; |
909 | } | 927 | } |
910 | 928 | ||
911 | static struct ipmi_user_hndl ipmi_hndlrs = | 929 | static struct ipmi_user_hndl ipmi_hndlrs = { |
912 | { | ||
913 | .ipmi_recv_hndl = ipmi_wdog_msg_handler, | 930 | .ipmi_recv_hndl = ipmi_wdog_msg_handler, |
914 | .ipmi_watchdog_pretimeout = ipmi_wdog_pretimeout_handler | 931 | .ipmi_watchdog_pretimeout = ipmi_wdog_pretimeout_handler |
915 | }; | 932 | }; |
@@ -949,8 +966,10 @@ static void ipmi_register_watchdog(int ipmi_intf) | |||
949 | int old_timeout = timeout; | 966 | int old_timeout = timeout; |
950 | int old_preop_val = preop_val; | 967 | int old_preop_val = preop_val; |
951 | 968 | ||
952 | /* Set the pretimeout to go off in a second and give | 969 | /* |
953 | ourselves plenty of time to stop the timer. */ | 970 | * Set the pretimeout to go off in a second and give |
971 | * ourselves plenty of time to stop the timer. | ||
972 | */ | ||
954 | ipmi_watchdog_state = WDOG_TIMEOUT_RESET; | 973 | ipmi_watchdog_state = WDOG_TIMEOUT_RESET; |
955 | preop_val = WDOG_PREOP_NONE; /* Make sure nothing happens */ | 974 | preop_val = WDOG_PREOP_NONE; /* Make sure nothing happens */ |
956 | pretimeout = 99; | 975 | pretimeout = 99; |
@@ -974,7 +993,7 @@ static void ipmi_register_watchdog(int ipmi_intf) | |||
974 | " occur. The NMI pretimeout will" | 993 | " occur. The NMI pretimeout will" |
975 | " likely not work\n"); | 994 | " likely not work\n"); |
976 | } | 995 | } |
977 | out_restore: | 996 | out_restore: |
978 | testing_nmi = 0; | 997 | testing_nmi = 0; |
979 | preop_val = old_preop_val; | 998 | preop_val = old_preop_val; |
980 | pretimeout = old_pretimeout; | 999 | pretimeout = old_pretimeout; |
@@ -1009,9 +1028,11 @@ static void ipmi_unregister_watchdog(int ipmi_intf) | |||
1009 | /* Make sure no one can call us any more. */ | 1028 | /* Make sure no one can call us any more. */ |
1010 | misc_deregister(&ipmi_wdog_miscdev); | 1029 | misc_deregister(&ipmi_wdog_miscdev); |
1011 | 1030 | ||
1012 | /* Wait to make sure the message makes it out. The lower layer has | 1031 | /* |
1013 | pointers to our buffers, we want to make sure they are done before | 1032 | * Wait to make sure the message makes it out. The lower layer has |
1014 | we release our memory. */ | 1033 | * pointers to our buffers, we want to make sure they are done before |
1034 | * we release our memory. | ||
1035 | */ | ||
1015 | while (atomic_read(&set_timeout_tofree)) | 1036 | while (atomic_read(&set_timeout_tofree)) |
1016 | schedule_timeout_uninterruptible(1); | 1037 | schedule_timeout_uninterruptible(1); |
1017 | 1038 | ||
@@ -1052,15 +1073,17 @@ ipmi_nmi(struct notifier_block *self, unsigned long val, void *data) | |||
1052 | return NOTIFY_STOP; | 1073 | return NOTIFY_STOP; |
1053 | } | 1074 | } |
1054 | 1075 | ||
1055 | /* If we are not expecting a timeout, ignore it. */ | 1076 | /* If we are not expecting a timeout, ignore it. */ |
1056 | if (ipmi_watchdog_state == WDOG_TIMEOUT_NONE) | 1077 | if (ipmi_watchdog_state == WDOG_TIMEOUT_NONE) |
1057 | return NOTIFY_OK; | 1078 | return NOTIFY_OK; |
1058 | 1079 | ||
1059 | if (preaction_val != WDOG_PRETIMEOUT_NMI) | 1080 | if (preaction_val != WDOG_PRETIMEOUT_NMI) |
1060 | return NOTIFY_OK; | 1081 | return NOTIFY_OK; |
1061 | 1082 | ||
1062 | /* If no one else handled the NMI, we assume it was the IPMI | 1083 | /* |
1063 | watchdog. */ | 1084 | * If no one else handled the NMI, we assume it was the IPMI |
1085 | * watchdog. | ||
1086 | */ | ||
1064 | if (preop_val == WDOG_PREOP_PANIC) { | 1087 | if (preop_val == WDOG_PREOP_PANIC) { |
1065 | /* On some machines, the heartbeat will give | 1088 | /* On some machines, the heartbeat will give |
1066 | an error and not work unless we re-enable | 1089 | an error and not work unless we re-enable |
@@ -1082,7 +1105,7 @@ static int wdog_reboot_handler(struct notifier_block *this, | |||
1082 | unsigned long code, | 1105 | unsigned long code, |
1083 | void *unused) | 1106 | void *unused) |
1084 | { | 1107 | { |
1085 | static int reboot_event_handled = 0; | 1108 | static int reboot_event_handled; |
1086 | 1109 | ||
1087 | if ((watchdog_user) && (!reboot_event_handled)) { | 1110 | if ((watchdog_user) && (!reboot_event_handled)) { |
1088 | /* Make sure we only do this once. */ | 1111 | /* Make sure we only do this once. */ |
@@ -1115,7 +1138,7 @@ static int wdog_panic_handler(struct notifier_block *this, | |||
1115 | unsigned long event, | 1138 | unsigned long event, |
1116 | void *unused) | 1139 | void *unused) |
1117 | { | 1140 | { |
1118 | static int panic_event_handled = 0; | 1141 | static int panic_event_handled; |
1119 | 1142 | ||
1120 | /* On a panic, if we have a panic timeout, make sure to extend | 1143 | /* On a panic, if we have a panic timeout, make sure to extend |
1121 | the watchdog timer to a reasonable value to complete the | 1144 | the watchdog timer to a reasonable value to complete the |
@@ -1125,7 +1148,7 @@ static int wdog_panic_handler(struct notifier_block *this, | |||
1125 | ipmi_watchdog_state != WDOG_TIMEOUT_NONE) { | 1148 | ipmi_watchdog_state != WDOG_TIMEOUT_NONE) { |
1126 | /* Make sure we do this only once. */ | 1149 | /* Make sure we do this only once. */ |
1127 | panic_event_handled = 1; | 1150 | panic_event_handled = 1; |
1128 | 1151 | ||
1129 | timeout = 255; | 1152 | timeout = 255; |
1130 | pretimeout = 0; | 1153 | pretimeout = 0; |
1131 | panic_halt_ipmi_set_timeout(); | 1154 | panic_halt_ipmi_set_timeout(); |
@@ -1151,8 +1174,7 @@ static void ipmi_smi_gone(int if_num) | |||
1151 | ipmi_unregister_watchdog(if_num); | 1174 | ipmi_unregister_watchdog(if_num); |
1152 | } | 1175 | } |
1153 | 1176 | ||
1154 | static struct ipmi_smi_watcher smi_watcher = | 1177 | static struct ipmi_smi_watcher smi_watcher = { |
1155 | { | ||
1156 | .owner = THIS_MODULE, | 1178 | .owner = THIS_MODULE, |
1157 | .new_smi = ipmi_new_smi, | 1179 | .new_smi = ipmi_new_smi, |
1158 | .smi_gone = ipmi_smi_gone | 1180 | .smi_gone = ipmi_smi_gone |
diff --git a/drivers/char/mem.c b/drivers/char/mem.c index e83623ead441..934ffafedaea 100644 --- a/drivers/char/mem.c +++ b/drivers/char/mem.c | |||
@@ -364,6 +364,7 @@ static int mmap_mem(struct file * file, struct vm_area_struct * vma) | |||
364 | return 0; | 364 | return 0; |
365 | } | 365 | } |
366 | 366 | ||
367 | #ifdef CONFIG_DEVKMEM | ||
367 | static int mmap_kmem(struct file * file, struct vm_area_struct * vma) | 368 | static int mmap_kmem(struct file * file, struct vm_area_struct * vma) |
368 | { | 369 | { |
369 | unsigned long pfn; | 370 | unsigned long pfn; |
@@ -384,6 +385,7 @@ static int mmap_kmem(struct file * file, struct vm_area_struct * vma) | |||
384 | vma->vm_pgoff = pfn; | 385 | vma->vm_pgoff = pfn; |
385 | return mmap_mem(file, vma); | 386 | return mmap_mem(file, vma); |
386 | } | 387 | } |
388 | #endif | ||
387 | 389 | ||
388 | #ifdef CONFIG_CRASH_DUMP | 390 | #ifdef CONFIG_CRASH_DUMP |
389 | /* | 391 | /* |
@@ -422,6 +424,7 @@ static ssize_t read_oldmem(struct file *file, char __user *buf, | |||
422 | extern long vread(char *buf, char *addr, unsigned long count); | 424 | extern long vread(char *buf, char *addr, unsigned long count); |
423 | extern long vwrite(char *buf, char *addr, unsigned long count); | 425 | extern long vwrite(char *buf, char *addr, unsigned long count); |
424 | 426 | ||
427 | #ifdef CONFIG_DEVKMEM | ||
425 | /* | 428 | /* |
426 | * This function reads the *virtual* memory as seen by the kernel. | 429 | * This function reads the *virtual* memory as seen by the kernel. |
427 | */ | 430 | */ |
@@ -626,6 +629,7 @@ static ssize_t write_kmem(struct file * file, const char __user * buf, | |||
626 | *ppos = p; | 629 | *ppos = p; |
627 | return virtr + wrote; | 630 | return virtr + wrote; |
628 | } | 631 | } |
632 | #endif | ||
629 | 633 | ||
630 | #ifdef CONFIG_DEVPORT | 634 | #ifdef CONFIG_DEVPORT |
631 | static ssize_t read_port(struct file * file, char __user * buf, | 635 | static ssize_t read_port(struct file * file, char __user * buf, |
@@ -803,6 +807,7 @@ static const struct file_operations mem_fops = { | |||
803 | .get_unmapped_area = get_unmapped_area_mem, | 807 | .get_unmapped_area = get_unmapped_area_mem, |
804 | }; | 808 | }; |
805 | 809 | ||
810 | #ifdef CONFIG_DEVKMEM | ||
806 | static const struct file_operations kmem_fops = { | 811 | static const struct file_operations kmem_fops = { |
807 | .llseek = memory_lseek, | 812 | .llseek = memory_lseek, |
808 | .read = read_kmem, | 813 | .read = read_kmem, |
@@ -811,6 +816,7 @@ static const struct file_operations kmem_fops = { | |||
811 | .open = open_kmem, | 816 | .open = open_kmem, |
812 | .get_unmapped_area = get_unmapped_area_mem, | 817 | .get_unmapped_area = get_unmapped_area_mem, |
813 | }; | 818 | }; |
819 | #endif | ||
814 | 820 | ||
815 | static const struct file_operations null_fops = { | 821 | static const struct file_operations null_fops = { |
816 | .llseek = null_lseek, | 822 | .llseek = null_lseek, |
@@ -889,11 +895,13 @@ static int memory_open(struct inode * inode, struct file * filp) | |||
889 | filp->f_mapping->backing_dev_info = | 895 | filp->f_mapping->backing_dev_info = |
890 | &directly_mappable_cdev_bdi; | 896 | &directly_mappable_cdev_bdi; |
891 | break; | 897 | break; |
898 | #ifdef CONFIG_DEVKMEM | ||
892 | case 2: | 899 | case 2: |
893 | filp->f_op = &kmem_fops; | 900 | filp->f_op = &kmem_fops; |
894 | filp->f_mapping->backing_dev_info = | 901 | filp->f_mapping->backing_dev_info = |
895 | &directly_mappable_cdev_bdi; | 902 | &directly_mappable_cdev_bdi; |
896 | break; | 903 | break; |
904 | #endif | ||
897 | case 3: | 905 | case 3: |
898 | filp->f_op = &null_fops; | 906 | filp->f_op = &null_fops; |
899 | break; | 907 | break; |
@@ -942,7 +950,9 @@ static const struct { | |||
942 | const struct file_operations *fops; | 950 | const struct file_operations *fops; |
943 | } devlist[] = { /* list of minor devices */ | 951 | } devlist[] = { /* list of minor devices */ |
944 | {1, "mem", S_IRUSR | S_IWUSR | S_IRGRP, &mem_fops}, | 952 | {1, "mem", S_IRUSR | S_IWUSR | S_IRGRP, &mem_fops}, |
953 | #ifdef CONFIG_DEVKMEM | ||
945 | {2, "kmem", S_IRUSR | S_IWUSR | S_IRGRP, &kmem_fops}, | 954 | {2, "kmem", S_IRUSR | S_IWUSR | S_IRGRP, &kmem_fops}, |
955 | #endif | ||
946 | {3, "null", S_IRUGO | S_IWUGO, &null_fops}, | 956 | {3, "null", S_IRUGO | S_IWUGO, &null_fops}, |
947 | #ifdef CONFIG_DEVPORT | 957 | #ifdef CONFIG_DEVPORT |
948 | {4, "port", S_IRUSR | S_IWUSR | S_IRGRP, &port_fops}, | 958 | {4, "port", S_IRUSR | S_IWUSR | S_IRGRP, &port_fops}, |
diff --git a/drivers/char/misc.c b/drivers/char/misc.c index 4d058dadbfcc..eaace0db0ff4 100644 --- a/drivers/char/misc.c +++ b/drivers/char/misc.c | |||
@@ -263,23 +263,26 @@ EXPORT_SYMBOL(misc_deregister); | |||
263 | 263 | ||
264 | static int __init misc_init(void) | 264 | static int __init misc_init(void) |
265 | { | 265 | { |
266 | #ifdef CONFIG_PROC_FS | 266 | int err; |
267 | struct proc_dir_entry *ent; | ||
268 | 267 | ||
269 | ent = create_proc_entry("misc", 0, NULL); | 268 | #ifdef CONFIG_PROC_FS |
270 | if (ent) | 269 | proc_create("misc", 0, NULL, &misc_proc_fops); |
271 | ent->proc_fops = &misc_proc_fops; | ||
272 | #endif | 270 | #endif |
273 | misc_class = class_create(THIS_MODULE, "misc"); | 271 | misc_class = class_create(THIS_MODULE, "misc"); |
272 | err = PTR_ERR(misc_class); | ||
274 | if (IS_ERR(misc_class)) | 273 | if (IS_ERR(misc_class)) |
275 | return PTR_ERR(misc_class); | 274 | goto fail_remove; |
276 | 275 | ||
277 | if (register_chrdev(MISC_MAJOR,"misc",&misc_fops)) { | 276 | err = -EIO; |
278 | printk("unable to get major %d for misc devices\n", | 277 | if (register_chrdev(MISC_MAJOR,"misc",&misc_fops)) |
279 | MISC_MAJOR); | 278 | goto fail_printk; |
280 | class_destroy(misc_class); | ||
281 | return -EIO; | ||
282 | } | ||
283 | return 0; | 279 | return 0; |
280 | |||
281 | fail_printk: | ||
282 | printk("unable to get major %d for misc devices\n", MISC_MAJOR); | ||
283 | class_destroy(misc_class); | ||
284 | fail_remove: | ||
285 | remove_proc_entry("misc", NULL); | ||
286 | return err; | ||
284 | } | 287 | } |
285 | subsys_initcall(misc_init); | 288 | subsys_initcall(misc_init); |
diff --git a/drivers/char/pcmcia/ipwireless/hardware.c b/drivers/char/pcmcia/ipwireless/hardware.c index 1f978ff87fa8..fa9d3c945f31 100644 --- a/drivers/char/pcmcia/ipwireless/hardware.c +++ b/drivers/char/pcmcia/ipwireless/hardware.c | |||
@@ -354,32 +354,6 @@ struct ipw_rx_packet { | |||
354 | unsigned int channel_idx; | 354 | unsigned int channel_idx; |
355 | }; | 355 | }; |
356 | 356 | ||
357 | #ifdef IPWIRELESS_STATE_DEBUG | ||
358 | int ipwireless_dump_hardware_state(char *p, size_t limit, | ||
359 | struct ipw_hardware *hw) | ||
360 | { | ||
361 | return snprintf(p, limit, | ||
362 | "debug: initializing=%d\n" | ||
363 | "debug: tx_ready=%d\n" | ||
364 | "debug: tx_queued=%d\n" | ||
365 | "debug: rx_ready=%d\n" | ||
366 | "debug: rx_bytes_queued=%d\n" | ||
367 | "debug: blocking_rx=%d\n" | ||
368 | "debug: removed=%d\n" | ||
369 | "debug: hardware.shutting_down=%d\n" | ||
370 | "debug: to_setup=%d\n", | ||
371 | hw->initializing, | ||
372 | hw->tx_ready, | ||
373 | hw->tx_queued, | ||
374 | hw->rx_ready, | ||
375 | hw->rx_bytes_queued, | ||
376 | hw->blocking_rx, | ||
377 | hw->removed, | ||
378 | hw->shutting_down, | ||
379 | hw->to_setup); | ||
380 | } | ||
381 | #endif | ||
382 | |||
383 | static char *data_type(const unsigned char *buf, unsigned length) | 357 | static char *data_type(const unsigned char *buf, unsigned length) |
384 | { | 358 | { |
385 | struct nl_packet_header *hdr = (struct nl_packet_header *) buf; | 359 | struct nl_packet_header *hdr = (struct nl_packet_header *) buf; |
diff --git a/drivers/char/pcmcia/ipwireless/hardware.h b/drivers/char/pcmcia/ipwireless/hardware.h index c83190ffb0e7..19ce5eb266b1 100644 --- a/drivers/char/pcmcia/ipwireless/hardware.h +++ b/drivers/char/pcmcia/ipwireless/hardware.h | |||
@@ -58,7 +58,5 @@ void ipwireless_init_hardware_v1(struct ipw_hardware *hw, | |||
58 | void *reboot_cb_data); | 58 | void *reboot_cb_data); |
59 | void ipwireless_init_hardware_v2_v3(struct ipw_hardware *hw); | 59 | void ipwireless_init_hardware_v2_v3(struct ipw_hardware *hw); |
60 | void ipwireless_sleep(unsigned int tenths); | 60 | void ipwireless_sleep(unsigned int tenths); |
61 | int ipwireless_dump_hardware_state(char *p, size_t limit, | ||
62 | struct ipw_hardware *hw); | ||
63 | 61 | ||
64 | #endif | 62 | #endif |
diff --git a/drivers/char/pcmcia/ipwireless/network.c b/drivers/char/pcmcia/ipwireless/network.c index d793e68b3e0d..fe914d34f7f6 100644 --- a/drivers/char/pcmcia/ipwireless/network.c +++ b/drivers/char/pcmcia/ipwireless/network.c | |||
@@ -63,21 +63,6 @@ struct ipw_network { | |||
63 | struct work_struct work_go_offline; | 63 | struct work_struct work_go_offline; |
64 | }; | 64 | }; |
65 | 65 | ||
66 | |||
67 | #ifdef IPWIRELESS_STATE_DEBUG | ||
68 | int ipwireless_dump_network_state(char *p, size_t limit, | ||
69 | struct ipw_network *network) | ||
70 | { | ||
71 | return snprintf(p, limit, | ||
72 | "debug: ppp_blocked=%d\n" | ||
73 | "debug: outgoing_packets_queued=%d\n" | ||
74 | "debug: network.shutting_down=%d\n", | ||
75 | network->ppp_blocked, | ||
76 | network->outgoing_packets_queued, | ||
77 | network->shutting_down); | ||
78 | } | ||
79 | #endif | ||
80 | |||
81 | static void notify_packet_sent(void *callback_data, unsigned int packet_length) | 66 | static void notify_packet_sent(void *callback_data, unsigned int packet_length) |
82 | { | 67 | { |
83 | struct ipw_network *network = callback_data; | 68 | struct ipw_network *network = callback_data; |
diff --git a/drivers/char/pcmcia/ipwireless/network.h b/drivers/char/pcmcia/ipwireless/network.h index b0e1e952fd14..ccacd26fc7ef 100644 --- a/drivers/char/pcmcia/ipwireless/network.h +++ b/drivers/char/pcmcia/ipwireless/network.h | |||
@@ -49,7 +49,4 @@ void ipwireless_ppp_close(struct ipw_network *net); | |||
49 | int ipwireless_ppp_channel_index(struct ipw_network *net); | 49 | int ipwireless_ppp_channel_index(struct ipw_network *net); |
50 | int ipwireless_ppp_unit_number(struct ipw_network *net); | 50 | int ipwireless_ppp_unit_number(struct ipw_network *net); |
51 | 51 | ||
52 | int ipwireless_dump_network_state(char *p, size_t limit, | ||
53 | struct ipw_network *net); | ||
54 | |||
55 | #endif | 52 | #endif |
diff --git a/drivers/char/random.c b/drivers/char/random.c index f43c89f7c449..0cf98bd4f2d2 100644 --- a/drivers/char/random.c +++ b/drivers/char/random.c | |||
@@ -272,7 +272,7 @@ static int random_write_wakeup_thresh = 128; | |||
272 | 272 | ||
273 | static int trickle_thresh __read_mostly = INPUT_POOL_WORDS * 28; | 273 | static int trickle_thresh __read_mostly = INPUT_POOL_WORDS * 28; |
274 | 274 | ||
275 | static DEFINE_PER_CPU(int, trickle_count) = 0; | 275 | static DEFINE_PER_CPU(int, trickle_count); |
276 | 276 | ||
277 | /* | 277 | /* |
278 | * A pool of size .poolwords is stirred with a primitive polynomial | 278 | * A pool of size .poolwords is stirred with a primitive polynomial |
@@ -370,17 +370,19 @@ static struct poolinfo { | |||
370 | */ | 370 | */ |
371 | static DECLARE_WAIT_QUEUE_HEAD(random_read_wait); | 371 | static DECLARE_WAIT_QUEUE_HEAD(random_read_wait); |
372 | static DECLARE_WAIT_QUEUE_HEAD(random_write_wait); | 372 | static DECLARE_WAIT_QUEUE_HEAD(random_write_wait); |
373 | static struct fasync_struct *fasync; | ||
373 | 374 | ||
374 | #if 0 | 375 | #if 0 |
375 | static int debug = 0; | 376 | static int debug; |
376 | module_param(debug, bool, 0644); | 377 | module_param(debug, bool, 0644); |
377 | #define DEBUG_ENT(fmt, arg...) do { if (debug) \ | 378 | #define DEBUG_ENT(fmt, arg...) do { \ |
378 | printk(KERN_DEBUG "random %04d %04d %04d: " \ | 379 | if (debug) \ |
379 | fmt,\ | 380 | printk(KERN_DEBUG "random %04d %04d %04d: " \ |
380 | input_pool.entropy_count,\ | 381 | fmt,\ |
381 | blocking_pool.entropy_count,\ | 382 | input_pool.entropy_count,\ |
382 | nonblocking_pool.entropy_count,\ | 383 | blocking_pool.entropy_count,\ |
383 | ## arg); } while (0) | 384 | nonblocking_pool.entropy_count,\ |
385 | ## arg); } while (0) | ||
384 | #else | 386 | #else |
385 | #define DEBUG_ENT(fmt, arg...) do {} while (0) | 387 | #define DEBUG_ENT(fmt, arg...) do {} while (0) |
386 | #endif | 388 | #endif |
@@ -394,7 +396,7 @@ module_param(debug, bool, 0644); | |||
394 | 396 | ||
395 | struct entropy_store; | 397 | struct entropy_store; |
396 | struct entropy_store { | 398 | struct entropy_store { |
397 | /* mostly-read data: */ | 399 | /* read-only data: */ |
398 | struct poolinfo *poolinfo; | 400 | struct poolinfo *poolinfo; |
399 | __u32 *pool; | 401 | __u32 *pool; |
400 | const char *name; | 402 | const char *name; |
@@ -402,7 +404,7 @@ struct entropy_store { | |||
402 | struct entropy_store *pull; | 404 | struct entropy_store *pull; |
403 | 405 | ||
404 | /* read-write data: */ | 406 | /* read-write data: */ |
405 | spinlock_t lock ____cacheline_aligned_in_smp; | 407 | spinlock_t lock; |
406 | unsigned add_ptr; | 408 | unsigned add_ptr; |
407 | int entropy_count; | 409 | int entropy_count; |
408 | int input_rotate; | 410 | int input_rotate; |
@@ -438,25 +440,26 @@ static struct entropy_store nonblocking_pool = { | |||
438 | }; | 440 | }; |
439 | 441 | ||
440 | /* | 442 | /* |
441 | * This function adds a byte into the entropy "pool". It does not | 443 | * This function adds bytes into the entropy "pool". It does not |
442 | * update the entropy estimate. The caller should call | 444 | * update the entropy estimate. The caller should call |
443 | * credit_entropy_store if this is appropriate. | 445 | * credit_entropy_bits if this is appropriate. |
444 | * | 446 | * |
445 | * The pool is stirred with a primitive polynomial of the appropriate | 447 | * The pool is stirred with a primitive polynomial of the appropriate |
446 | * degree, and then twisted. We twist by three bits at a time because | 448 | * degree, and then twisted. We twist by three bits at a time because |
447 | * it's cheap to do so and helps slightly in the expected case where | 449 | * it's cheap to do so and helps slightly in the expected case where |
448 | * the entropy is concentrated in the low-order bits. | 450 | * the entropy is concentrated in the low-order bits. |
449 | */ | 451 | */ |
450 | static void __add_entropy_words(struct entropy_store *r, const __u32 *in, | 452 | static void mix_pool_bytes_extract(struct entropy_store *r, const void *in, |
451 | int nwords, __u32 out[16]) | 453 | int nbytes, __u8 out[64]) |
452 | { | 454 | { |
453 | static __u32 const twist_table[8] = { | 455 | static __u32 const twist_table[8] = { |
454 | 0x00000000, 0x3b6e20c8, 0x76dc4190, 0x4db26158, | 456 | 0x00000000, 0x3b6e20c8, 0x76dc4190, 0x4db26158, |
455 | 0xedb88320, 0xd6d6a3e8, 0x9b64c2b0, 0xa00ae278 }; | 457 | 0xedb88320, 0xd6d6a3e8, 0x9b64c2b0, 0xa00ae278 }; |
456 | unsigned long i, add_ptr, tap1, tap2, tap3, tap4, tap5; | 458 | unsigned long i, j, tap1, tap2, tap3, tap4, tap5; |
457 | int new_rotate, input_rotate; | 459 | int input_rotate; |
458 | int wordmask = r->poolinfo->poolwords - 1; | 460 | int wordmask = r->poolinfo->poolwords - 1; |
459 | __u32 w, next_w; | 461 | const char *bytes = in; |
462 | __u32 w; | ||
460 | unsigned long flags; | 463 | unsigned long flags; |
461 | 464 | ||
462 | /* Taps are constant, so we can load them without holding r->lock. */ | 465 | /* Taps are constant, so we can load them without holding r->lock. */ |
@@ -465,78 +468,76 @@ static void __add_entropy_words(struct entropy_store *r, const __u32 *in, | |||
465 | tap3 = r->poolinfo->tap3; | 468 | tap3 = r->poolinfo->tap3; |
466 | tap4 = r->poolinfo->tap4; | 469 | tap4 = r->poolinfo->tap4; |
467 | tap5 = r->poolinfo->tap5; | 470 | tap5 = r->poolinfo->tap5; |
468 | next_w = *in++; | ||
469 | 471 | ||
470 | spin_lock_irqsave(&r->lock, flags); | 472 | spin_lock_irqsave(&r->lock, flags); |
471 | prefetch_range(r->pool, wordmask); | ||
472 | input_rotate = r->input_rotate; | 473 | input_rotate = r->input_rotate; |
473 | add_ptr = r->add_ptr; | 474 | i = r->add_ptr; |
474 | 475 | ||
475 | while (nwords--) { | 476 | /* mix one byte at a time to simplify size handling and churn faster */ |
476 | w = rol32(next_w, input_rotate); | 477 | while (nbytes--) { |
477 | if (nwords > 0) | 478 | w = rol32(*bytes++, input_rotate & 31); |
478 | next_w = *in++; | 479 | i = (i - 1) & wordmask; |
479 | i = add_ptr = (add_ptr - 1) & wordmask; | ||
480 | /* | ||
481 | * Normally, we add 7 bits of rotation to the pool. | ||
482 | * At the beginning of the pool, add an extra 7 bits | ||
483 | * rotation, so that successive passes spread the | ||
484 | * input bits across the pool evenly. | ||
485 | */ | ||
486 | new_rotate = input_rotate + 14; | ||
487 | if (i) | ||
488 | new_rotate = input_rotate + 7; | ||
489 | input_rotate = new_rotate & 31; | ||
490 | 480 | ||
491 | /* XOR in the various taps */ | 481 | /* XOR in the various taps */ |
482 | w ^= r->pool[i]; | ||
492 | w ^= r->pool[(i + tap1) & wordmask]; | 483 | w ^= r->pool[(i + tap1) & wordmask]; |
493 | w ^= r->pool[(i + tap2) & wordmask]; | 484 | w ^= r->pool[(i + tap2) & wordmask]; |
494 | w ^= r->pool[(i + tap3) & wordmask]; | 485 | w ^= r->pool[(i + tap3) & wordmask]; |
495 | w ^= r->pool[(i + tap4) & wordmask]; | 486 | w ^= r->pool[(i + tap4) & wordmask]; |
496 | w ^= r->pool[(i + tap5) & wordmask]; | 487 | w ^= r->pool[(i + tap5) & wordmask]; |
497 | w ^= r->pool[i]; | 488 | |
489 | /* Mix the result back in with a twist */ | ||
498 | r->pool[i] = (w >> 3) ^ twist_table[w & 7]; | 490 | r->pool[i] = (w >> 3) ^ twist_table[w & 7]; |
491 | |||
492 | /* | ||
493 | * Normally, we add 7 bits of rotation to the pool. | ||
494 | * At the beginning of the pool, add an extra 7 bits | ||
495 | * rotation, so that successive passes spread the | ||
496 | * input bits across the pool evenly. | ||
497 | */ | ||
498 | input_rotate += i ? 7 : 14; | ||
499 | } | 499 | } |
500 | 500 | ||
501 | r->input_rotate = input_rotate; | 501 | r->input_rotate = input_rotate; |
502 | r->add_ptr = add_ptr; | 502 | r->add_ptr = i; |
503 | 503 | ||
504 | if (out) { | 504 | if (out) |
505 | for (i = 0; i < 16; i++) { | 505 | for (j = 0; j < 16; j++) |
506 | out[i] = r->pool[add_ptr]; | 506 | ((__u32 *)out)[j] = r->pool[(i - j) & wordmask]; |
507 | add_ptr = (add_ptr - 1) & wordmask; | ||
508 | } | ||
509 | } | ||
510 | 507 | ||
511 | spin_unlock_irqrestore(&r->lock, flags); | 508 | spin_unlock_irqrestore(&r->lock, flags); |
512 | } | 509 | } |
513 | 510 | ||
514 | static inline void add_entropy_words(struct entropy_store *r, const __u32 *in, | 511 | static void mix_pool_bytes(struct entropy_store *r, const void *in, int bytes) |
515 | int nwords) | ||
516 | { | 512 | { |
517 | __add_entropy_words(r, in, nwords, NULL); | 513 | mix_pool_bytes_extract(r, in, bytes, NULL); |
518 | } | 514 | } |
519 | 515 | ||
520 | /* | 516 | /* |
521 | * Credit (or debit) the entropy store with n bits of entropy | 517 | * Credit (or debit) the entropy store with n bits of entropy |
522 | */ | 518 | */ |
523 | static void credit_entropy_store(struct entropy_store *r, int nbits) | 519 | static void credit_entropy_bits(struct entropy_store *r, int nbits) |
524 | { | 520 | { |
525 | unsigned long flags; | 521 | unsigned long flags; |
526 | 522 | ||
523 | if (!nbits) | ||
524 | return; | ||
525 | |||
527 | spin_lock_irqsave(&r->lock, flags); | 526 | spin_lock_irqsave(&r->lock, flags); |
528 | 527 | ||
529 | if (r->entropy_count + nbits < 0) { | 528 | DEBUG_ENT("added %d entropy credits to %s\n", nbits, r->name); |
530 | DEBUG_ENT("negative entropy/overflow (%d+%d)\n", | 529 | r->entropy_count += nbits; |
531 | r->entropy_count, nbits); | 530 | if (r->entropy_count < 0) { |
531 | DEBUG_ENT("negative entropy/overflow\n"); | ||
532 | r->entropy_count = 0; | 532 | r->entropy_count = 0; |
533 | } else if (r->entropy_count + nbits > r->poolinfo->POOLBITS) { | 533 | } else if (r->entropy_count > r->poolinfo->POOLBITS) |
534 | r->entropy_count = r->poolinfo->POOLBITS; | 534 | r->entropy_count = r->poolinfo->POOLBITS; |
535 | } else { | 535 | |
536 | r->entropy_count += nbits; | 536 | /* should we wake readers? */ |
537 | if (nbits) | 537 | if (r == &input_pool && |
538 | DEBUG_ENT("added %d entropy credits to %s\n", | 538 | r->entropy_count >= random_read_wakeup_thresh) { |
539 | nbits, r->name); | 539 | wake_up_interruptible(&random_read_wait); |
540 | kill_fasync(&fasync, SIGIO, POLL_IN); | ||
540 | } | 541 | } |
541 | 542 | ||
542 | spin_unlock_irqrestore(&r->lock, flags); | 543 | spin_unlock_irqrestore(&r->lock, flags); |
@@ -551,7 +552,7 @@ static void credit_entropy_store(struct entropy_store *r, int nbits) | |||
551 | /* There is one of these per entropy source */ | 552 | /* There is one of these per entropy source */ |
552 | struct timer_rand_state { | 553 | struct timer_rand_state { |
553 | cycles_t last_time; | 554 | cycles_t last_time; |
554 | long last_delta,last_delta2; | 555 | long last_delta, last_delta2; |
555 | unsigned dont_count_entropy:1; | 556 | unsigned dont_count_entropy:1; |
556 | }; | 557 | }; |
557 | 558 | ||
@@ -586,7 +587,7 @@ static void add_timer_randomness(struct timer_rand_state *state, unsigned num) | |||
586 | sample.jiffies = jiffies; | 587 | sample.jiffies = jiffies; |
587 | sample.cycles = get_cycles(); | 588 | sample.cycles = get_cycles(); |
588 | sample.num = num; | 589 | sample.num = num; |
589 | add_entropy_words(&input_pool, (u32 *)&sample, sizeof(sample)/4); | 590 | mix_pool_bytes(&input_pool, &sample, sizeof(sample)); |
590 | 591 | ||
591 | /* | 592 | /* |
592 | * Calculate number of bits of randomness we probably added. | 593 | * Calculate number of bits of randomness we probably added. |
@@ -620,13 +621,9 @@ static void add_timer_randomness(struct timer_rand_state *state, unsigned num) | |||
620 | * Round down by 1 bit on general principles, | 621 | * Round down by 1 bit on general principles, |
621 | * and limit entropy entimate to 12 bits. | 622 | * and limit entropy entimate to 12 bits. |
622 | */ | 623 | */ |
623 | credit_entropy_store(&input_pool, | 624 | credit_entropy_bits(&input_pool, |
624 | min_t(int, fls(delta>>1), 11)); | 625 | min_t(int, fls(delta>>1), 11)); |
625 | } | 626 | } |
626 | |||
627 | if(input_pool.entropy_count >= random_read_wakeup_thresh) | ||
628 | wake_up_interruptible(&random_read_wait); | ||
629 | |||
630 | out: | 627 | out: |
631 | preempt_enable(); | 628 | preempt_enable(); |
632 | } | 629 | } |
@@ -677,7 +674,7 @@ void add_disk_randomness(struct gendisk *disk) | |||
677 | * | 674 | * |
678 | *********************************************************************/ | 675 | *********************************************************************/ |
679 | 676 | ||
680 | static ssize_t extract_entropy(struct entropy_store *r, void * buf, | 677 | static ssize_t extract_entropy(struct entropy_store *r, void *buf, |
681 | size_t nbytes, int min, int rsvd); | 678 | size_t nbytes, int min, int rsvd); |
682 | 679 | ||
683 | /* | 680 | /* |
@@ -704,10 +701,10 @@ static void xfer_secondary_pool(struct entropy_store *r, size_t nbytes) | |||
704 | "(%d of %d requested)\n", | 701 | "(%d of %d requested)\n", |
705 | r->name, bytes * 8, nbytes * 8, r->entropy_count); | 702 | r->name, bytes * 8, nbytes * 8, r->entropy_count); |
706 | 703 | ||
707 | bytes=extract_entropy(r->pull, tmp, bytes, | 704 | bytes = extract_entropy(r->pull, tmp, bytes, |
708 | random_read_wakeup_thresh / 8, rsvd); | 705 | random_read_wakeup_thresh / 8, rsvd); |
709 | add_entropy_words(r, tmp, (bytes + 3) / 4); | 706 | mix_pool_bytes(r, tmp, bytes); |
710 | credit_entropy_store(r, bytes*8); | 707 | credit_entropy_bits(r, bytes*8); |
711 | } | 708 | } |
712 | } | 709 | } |
713 | 710 | ||
@@ -744,13 +741,15 @@ static size_t account(struct entropy_store *r, size_t nbytes, int min, | |||
744 | if (r->limit && nbytes + reserved >= r->entropy_count / 8) | 741 | if (r->limit && nbytes + reserved >= r->entropy_count / 8) |
745 | nbytes = r->entropy_count/8 - reserved; | 742 | nbytes = r->entropy_count/8 - reserved; |
746 | 743 | ||
747 | if(r->entropy_count / 8 >= nbytes + reserved) | 744 | if (r->entropy_count / 8 >= nbytes + reserved) |
748 | r->entropy_count -= nbytes*8; | 745 | r->entropy_count -= nbytes*8; |
749 | else | 746 | else |
750 | r->entropy_count = reserved; | 747 | r->entropy_count = reserved; |
751 | 748 | ||
752 | if (r->entropy_count < random_write_wakeup_thresh) | 749 | if (r->entropy_count < random_write_wakeup_thresh) { |
753 | wake_up_interruptible(&random_write_wait); | 750 | wake_up_interruptible(&random_write_wait); |
751 | kill_fasync(&fasync, SIGIO, POLL_OUT); | ||
752 | } | ||
754 | } | 753 | } |
755 | 754 | ||
756 | DEBUG_ENT("debiting %d entropy credits from %s%s\n", | 755 | DEBUG_ENT("debiting %d entropy credits from %s%s\n", |
@@ -764,45 +763,46 @@ static size_t account(struct entropy_store *r, size_t nbytes, int min, | |||
764 | static void extract_buf(struct entropy_store *r, __u8 *out) | 763 | static void extract_buf(struct entropy_store *r, __u8 *out) |
765 | { | 764 | { |
766 | int i; | 765 | int i; |
767 | __u32 data[16], buf[5 + SHA_WORKSPACE_WORDS]; | 766 | __u32 hash[5], workspace[SHA_WORKSPACE_WORDS]; |
767 | __u8 extract[64]; | ||
768 | |||
769 | /* Generate a hash across the pool, 16 words (512 bits) at a time */ | ||
770 | sha_init(hash); | ||
771 | for (i = 0; i < r->poolinfo->poolwords; i += 16) | ||
772 | sha_transform(hash, (__u8 *)(r->pool + i), workspace); | ||
768 | 773 | ||
769 | sha_init(buf); | ||
770 | /* | 774 | /* |
771 | * As we hash the pool, we mix intermediate values of | 775 | * We mix the hash back into the pool to prevent backtracking |
772 | * the hash back into the pool. This eliminates | 776 | * attacks (where the attacker knows the state of the pool |
773 | * backtracking attacks (where the attacker knows | 777 | * plus the current outputs, and attempts to find previous |
774 | * the state of the pool plus the current outputs, and | 778 | * ouputs), unless the hash function can be inverted. By |
775 | * attempts to find previous ouputs), unless the hash | 779 | * mixing at least a SHA1 worth of hash data back, we make |
776 | * function can be inverted. | 780 | * brute-forcing the feedback as hard as brute-forcing the |
781 | * hash. | ||
777 | */ | 782 | */ |
778 | for (i = 0; i < r->poolinfo->poolwords; i += 16) { | 783 | mix_pool_bytes_extract(r, hash, sizeof(hash), extract); |
779 | /* hash blocks of 16 words = 512 bits */ | ||
780 | sha_transform(buf, (__u8 *)(r->pool + i), buf + 5); | ||
781 | /* feed back portion of the resulting hash */ | ||
782 | add_entropy_words(r, &buf[i % 5], 1); | ||
783 | } | ||
784 | 784 | ||
785 | /* | 785 | /* |
786 | * To avoid duplicates, we atomically extract a | 786 | * To avoid duplicates, we atomically extract a portion of the |
787 | * portion of the pool while mixing, and hash one | 787 | * pool while mixing, and hash one final time. |
788 | * final time. | ||
789 | */ | 788 | */ |
790 | __add_entropy_words(r, &buf[i % 5], 1, data); | 789 | sha_transform(hash, extract, workspace); |
791 | sha_transform(buf, (__u8 *)data, buf + 5); | 790 | memset(extract, 0, sizeof(extract)); |
791 | memset(workspace, 0, sizeof(workspace)); | ||
792 | 792 | ||
793 | /* | 793 | /* |
794 | * In case the hash function has some recognizable | 794 | * In case the hash function has some recognizable output |
795 | * output pattern, we fold it in half. | 795 | * pattern, we fold it in half. Thus, we always feed back |
796 | * twice as much data as we output. | ||
796 | */ | 797 | */ |
797 | 798 | hash[0] ^= hash[3]; | |
798 | buf[0] ^= buf[3]; | 799 | hash[1] ^= hash[4]; |
799 | buf[1] ^= buf[4]; | 800 | hash[2] ^= rol32(hash[2], 16); |
800 | buf[2] ^= rol32(buf[2], 16); | 801 | memcpy(out, hash, EXTRACT_SIZE); |
801 | memcpy(out, buf, EXTRACT_SIZE); | 802 | memset(hash, 0, sizeof(hash)); |
802 | memset(buf, 0, sizeof(buf)); | ||
803 | } | 803 | } |
804 | 804 | ||
805 | static ssize_t extract_entropy(struct entropy_store *r, void * buf, | 805 | static ssize_t extract_entropy(struct entropy_store *r, void *buf, |
806 | size_t nbytes, int min, int reserved) | 806 | size_t nbytes, int min, int reserved) |
807 | { | 807 | { |
808 | ssize_t ret = 0, i; | 808 | ssize_t ret = 0, i; |
@@ -872,7 +872,6 @@ void get_random_bytes(void *buf, int nbytes) | |||
872 | { | 872 | { |
873 | extract_entropy(&nonblocking_pool, buf, nbytes, 0, 0); | 873 | extract_entropy(&nonblocking_pool, buf, nbytes, 0, 0); |
874 | } | 874 | } |
875 | |||
876 | EXPORT_SYMBOL(get_random_bytes); | 875 | EXPORT_SYMBOL(get_random_bytes); |
877 | 876 | ||
878 | /* | 877 | /* |
@@ -894,12 +893,11 @@ static void init_std_data(struct entropy_store *r) | |||
894 | spin_unlock_irqrestore(&r->lock, flags); | 893 | spin_unlock_irqrestore(&r->lock, flags); |
895 | 894 | ||
896 | now = ktime_get_real(); | 895 | now = ktime_get_real(); |
897 | add_entropy_words(r, (__u32 *)&now, sizeof(now)/4); | 896 | mix_pool_bytes(r, &now, sizeof(now)); |
898 | add_entropy_words(r, (__u32 *)utsname(), | 897 | mix_pool_bytes(r, utsname(), sizeof(*(utsname()))); |
899 | sizeof(*(utsname()))/4); | ||
900 | } | 898 | } |
901 | 899 | ||
902 | static int __init rand_initialize(void) | 900 | static int rand_initialize(void) |
903 | { | 901 | { |
904 | init_std_data(&input_pool); | 902 | init_std_data(&input_pool); |
905 | init_std_data(&blocking_pool); | 903 | init_std_data(&blocking_pool); |
@@ -940,7 +938,7 @@ void rand_initialize_disk(struct gendisk *disk) | |||
940 | #endif | 938 | #endif |
941 | 939 | ||
942 | static ssize_t | 940 | static ssize_t |
943 | random_read(struct file * file, char __user * buf, size_t nbytes, loff_t *ppos) | 941 | random_read(struct file *file, char __user *buf, size_t nbytes, loff_t *ppos) |
944 | { | 942 | { |
945 | ssize_t n, retval = 0, count = 0; | 943 | ssize_t n, retval = 0, count = 0; |
946 | 944 | ||
@@ -1002,8 +1000,7 @@ random_read(struct file * file, char __user * buf, size_t nbytes, loff_t *ppos) | |||
1002 | } | 1000 | } |
1003 | 1001 | ||
1004 | static ssize_t | 1002 | static ssize_t |
1005 | urandom_read(struct file * file, char __user * buf, | 1003 | urandom_read(struct file *file, char __user *buf, size_t nbytes, loff_t *ppos) |
1006 | size_t nbytes, loff_t *ppos) | ||
1007 | { | 1004 | { |
1008 | return extract_entropy_user(&nonblocking_pool, buf, nbytes); | 1005 | return extract_entropy_user(&nonblocking_pool, buf, nbytes); |
1009 | } | 1006 | } |
@@ -1038,16 +1035,15 @@ write_pool(struct entropy_store *r, const char __user *buffer, size_t count) | |||
1038 | count -= bytes; | 1035 | count -= bytes; |
1039 | p += bytes; | 1036 | p += bytes; |
1040 | 1037 | ||
1041 | add_entropy_words(r, buf, (bytes + 3) / 4); | 1038 | mix_pool_bytes(r, buf, bytes); |
1042 | cond_resched(); | 1039 | cond_resched(); |
1043 | } | 1040 | } |
1044 | 1041 | ||
1045 | return 0; | 1042 | return 0; |
1046 | } | 1043 | } |
1047 | 1044 | ||
1048 | static ssize_t | 1045 | static ssize_t random_write(struct file *file, const char __user *buffer, |
1049 | random_write(struct file * file, const char __user * buffer, | 1046 | size_t count, loff_t *ppos) |
1050 | size_t count, loff_t *ppos) | ||
1051 | { | 1047 | { |
1052 | size_t ret; | 1048 | size_t ret; |
1053 | struct inode *inode = file->f_path.dentry->d_inode; | 1049 | struct inode *inode = file->f_path.dentry->d_inode; |
@@ -1064,9 +1060,7 @@ random_write(struct file * file, const char __user * buffer, | |||
1064 | return (ssize_t)count; | 1060 | return (ssize_t)count; |
1065 | } | 1061 | } |
1066 | 1062 | ||
1067 | static int | 1063 | static long random_ioctl(struct file *f, unsigned int cmd, unsigned long arg) |
1068 | random_ioctl(struct inode * inode, struct file * file, | ||
1069 | unsigned int cmd, unsigned long arg) | ||
1070 | { | 1064 | { |
1071 | int size, ent_count; | 1065 | int size, ent_count; |
1072 | int __user *p = (int __user *)arg; | 1066 | int __user *p = (int __user *)arg; |
@@ -1074,8 +1068,8 @@ random_ioctl(struct inode * inode, struct file * file, | |||
1074 | 1068 | ||
1075 | switch (cmd) { | 1069 | switch (cmd) { |
1076 | case RNDGETENTCNT: | 1070 | case RNDGETENTCNT: |
1077 | ent_count = input_pool.entropy_count; | 1071 | /* inherently racy, no point locking */ |
1078 | if (put_user(ent_count, p)) | 1072 | if (put_user(input_pool.entropy_count, p)) |
1079 | return -EFAULT; | 1073 | return -EFAULT; |
1080 | return 0; | 1074 | return 0; |
1081 | case RNDADDTOENTCNT: | 1075 | case RNDADDTOENTCNT: |
@@ -1083,13 +1077,7 @@ random_ioctl(struct inode * inode, struct file * file, | |||
1083 | return -EPERM; | 1077 | return -EPERM; |
1084 | if (get_user(ent_count, p)) | 1078 | if (get_user(ent_count, p)) |
1085 | return -EFAULT; | 1079 | return -EFAULT; |
1086 | credit_entropy_store(&input_pool, ent_count); | 1080 | credit_entropy_bits(&input_pool, ent_count); |
1087 | /* | ||
1088 | * Wake up waiting processes if we have enough | ||
1089 | * entropy. | ||
1090 | */ | ||
1091 | if (input_pool.entropy_count >= random_read_wakeup_thresh) | ||
1092 | wake_up_interruptible(&random_read_wait); | ||
1093 | return 0; | 1081 | return 0; |
1094 | case RNDADDENTROPY: | 1082 | case RNDADDENTROPY: |
1095 | if (!capable(CAP_SYS_ADMIN)) | 1083 | if (!capable(CAP_SYS_ADMIN)) |
@@ -1104,39 +1092,45 @@ random_ioctl(struct inode * inode, struct file * file, | |||
1104 | size); | 1092 | size); |
1105 | if (retval < 0) | 1093 | if (retval < 0) |
1106 | return retval; | 1094 | return retval; |
1107 | credit_entropy_store(&input_pool, ent_count); | 1095 | credit_entropy_bits(&input_pool, ent_count); |
1108 | /* | ||
1109 | * Wake up waiting processes if we have enough | ||
1110 | * entropy. | ||
1111 | */ | ||
1112 | if (input_pool.entropy_count >= random_read_wakeup_thresh) | ||
1113 | wake_up_interruptible(&random_read_wait); | ||
1114 | return 0; | 1096 | return 0; |
1115 | case RNDZAPENTCNT: | 1097 | case RNDZAPENTCNT: |
1116 | case RNDCLEARPOOL: | 1098 | case RNDCLEARPOOL: |
1117 | /* Clear the entropy pool counters. */ | 1099 | /* Clear the entropy pool counters. */ |
1118 | if (!capable(CAP_SYS_ADMIN)) | 1100 | if (!capable(CAP_SYS_ADMIN)) |
1119 | return -EPERM; | 1101 | return -EPERM; |
1120 | init_std_data(&input_pool); | 1102 | rand_initialize(); |
1121 | init_std_data(&blocking_pool); | ||
1122 | init_std_data(&nonblocking_pool); | ||
1123 | return 0; | 1103 | return 0; |
1124 | default: | 1104 | default: |
1125 | return -EINVAL; | 1105 | return -EINVAL; |
1126 | } | 1106 | } |
1127 | } | 1107 | } |
1128 | 1108 | ||
1109 | static int random_fasync(int fd, struct file *filp, int on) | ||
1110 | { | ||
1111 | return fasync_helper(fd, filp, on, &fasync); | ||
1112 | } | ||
1113 | |||
1114 | static int random_release(struct inode *inode, struct file *filp) | ||
1115 | { | ||
1116 | return fasync_helper(-1, filp, 0, &fasync); | ||
1117 | } | ||
1118 | |||
1129 | const struct file_operations random_fops = { | 1119 | const struct file_operations random_fops = { |
1130 | .read = random_read, | 1120 | .read = random_read, |
1131 | .write = random_write, | 1121 | .write = random_write, |
1132 | .poll = random_poll, | 1122 | .poll = random_poll, |
1133 | .ioctl = random_ioctl, | 1123 | .unlocked_ioctl = random_ioctl, |
1124 | .fasync = random_fasync, | ||
1125 | .release = random_release, | ||
1134 | }; | 1126 | }; |
1135 | 1127 | ||
1136 | const struct file_operations urandom_fops = { | 1128 | const struct file_operations urandom_fops = { |
1137 | .read = urandom_read, | 1129 | .read = urandom_read, |
1138 | .write = random_write, | 1130 | .write = random_write, |
1139 | .ioctl = random_ioctl, | 1131 | .unlocked_ioctl = random_ioctl, |
1132 | .fasync = random_fasync, | ||
1133 | .release = random_release, | ||
1140 | }; | 1134 | }; |
1141 | 1135 | ||
1142 | /*************************************************************** | 1136 | /*************************************************************** |
@@ -1157,7 +1151,6 @@ void generate_random_uuid(unsigned char uuid_out[16]) | |||
1157 | /* Set the UUID variant to DCE */ | 1151 | /* Set the UUID variant to DCE */ |
1158 | uuid_out[8] = (uuid_out[8] & 0x3F) | 0x80; | 1152 | uuid_out[8] = (uuid_out[8] & 0x3F) | 0x80; |
1159 | } | 1153 | } |
1160 | |||
1161 | EXPORT_SYMBOL(generate_random_uuid); | 1154 | EXPORT_SYMBOL(generate_random_uuid); |
1162 | 1155 | ||
1163 | /******************************************************************** | 1156 | /******************************************************************** |
@@ -1339,7 +1332,7 @@ ctl_table random_table[] = { | |||
1339 | 1332 | ||
1340 | #if defined(CONFIG_IPV6) || defined(CONFIG_IPV6_MODULE) | 1333 | #if defined(CONFIG_IPV6) || defined(CONFIG_IPV6_MODULE) |
1341 | 1334 | ||
1342 | static __u32 twothirdsMD4Transform (__u32 const buf[4], __u32 const in[12]) | 1335 | static __u32 twothirdsMD4Transform(__u32 const buf[4], __u32 const in[12]) |
1343 | { | 1336 | { |
1344 | __u32 a = buf[0], b = buf[1], c = buf[2], d = buf[3]; | 1337 | __u32 a = buf[0], b = buf[1], c = buf[2], d = buf[3]; |
1345 | 1338 | ||
@@ -1487,8 +1480,8 @@ __u32 secure_tcpv6_sequence_number(__be32 *saddr, __be32 *daddr, | |||
1487 | */ | 1480 | */ |
1488 | 1481 | ||
1489 | memcpy(hash, saddr, 16); | 1482 | memcpy(hash, saddr, 16); |
1490 | hash[4]=((__force u16)sport << 16) + (__force u16)dport; | 1483 | hash[4] = ((__force u16)sport << 16) + (__force u16)dport; |
1491 | memcpy(&hash[5],keyptr->secret,sizeof(__u32) * 7); | 1484 | memcpy(&hash[5], keyptr->secret, sizeof(__u32) * 7); |
1492 | 1485 | ||
1493 | seq = twothirdsMD4Transform((const __u32 *)daddr, hash) & HASH_MASK; | 1486 | seq = twothirdsMD4Transform((const __u32 *)daddr, hash) & HASH_MASK; |
1494 | seq += keyptr->count; | 1487 | seq += keyptr->count; |
@@ -1538,10 +1531,10 @@ __u32 secure_tcp_sequence_number(__be32 saddr, __be32 daddr, | |||
1538 | * Note that the words are placed into the starting vector, which is | 1531 | * Note that the words are placed into the starting vector, which is |
1539 | * then mixed with a partial MD4 over random data. | 1532 | * then mixed with a partial MD4 over random data. |
1540 | */ | 1533 | */ |
1541 | hash[0]=(__force u32)saddr; | 1534 | hash[0] = (__force u32)saddr; |
1542 | hash[1]=(__force u32)daddr; | 1535 | hash[1] = (__force u32)daddr; |
1543 | hash[2]=((__force u16)sport << 16) + (__force u16)dport; | 1536 | hash[2] = ((__force u16)sport << 16) + (__force u16)dport; |
1544 | hash[3]=keyptr->secret[11]; | 1537 | hash[3] = keyptr->secret[11]; |
1545 | 1538 | ||
1546 | seq = half_md4_transform(hash, keyptr->secret) & HASH_MASK; | 1539 | seq = half_md4_transform(hash, keyptr->secret) & HASH_MASK; |
1547 | seq += keyptr->count; | 1540 | seq += keyptr->count; |
@@ -1556,10 +1549,7 @@ __u32 secure_tcp_sequence_number(__be32 saddr, __be32 daddr, | |||
1556 | * Choosing a clock of 64 ns period is OK. (period of 274 s) | 1549 | * Choosing a clock of 64 ns period is OK. (period of 274 s) |
1557 | */ | 1550 | */ |
1558 | seq += ktime_to_ns(ktime_get_real()) >> 6; | 1551 | seq += ktime_to_ns(ktime_get_real()) >> 6; |
1559 | #if 0 | 1552 | |
1560 | printk("init_seq(%lx, %lx, %d, %d) = %d\n", | ||
1561 | saddr, daddr, sport, dport, seq); | ||
1562 | #endif | ||
1563 | return seq; | 1553 | return seq; |
1564 | } | 1554 | } |
1565 | 1555 | ||
@@ -1582,14 +1572,15 @@ u32 secure_ipv4_port_ephemeral(__be32 saddr, __be32 daddr, __be16 dport) | |||
1582 | } | 1572 | } |
1583 | 1573 | ||
1584 | #if defined(CONFIG_IPV6) || defined(CONFIG_IPV6_MODULE) | 1574 | #if defined(CONFIG_IPV6) || defined(CONFIG_IPV6_MODULE) |
1585 | u32 secure_ipv6_port_ephemeral(const __be32 *saddr, const __be32 *daddr, __be16 dport) | 1575 | u32 secure_ipv6_port_ephemeral(const __be32 *saddr, const __be32 *daddr, |
1576 | __be16 dport) | ||
1586 | { | 1577 | { |
1587 | struct keydata *keyptr = get_keyptr(); | 1578 | struct keydata *keyptr = get_keyptr(); |
1588 | u32 hash[12]; | 1579 | u32 hash[12]; |
1589 | 1580 | ||
1590 | memcpy(hash, saddr, 16); | 1581 | memcpy(hash, saddr, 16); |
1591 | hash[4] = (__force u32)dport; | 1582 | hash[4] = (__force u32)dport; |
1592 | memcpy(&hash[5],keyptr->secret,sizeof(__u32) * 7); | 1583 | memcpy(&hash[5], keyptr->secret, sizeof(__u32) * 7); |
1593 | 1584 | ||
1594 | return twothirdsMD4Transform((const __u32 *)daddr, hash); | 1585 | return twothirdsMD4Transform((const __u32 *)daddr, hash); |
1595 | } | 1586 | } |
@@ -1617,13 +1608,9 @@ u64 secure_dccp_sequence_number(__be32 saddr, __be32 daddr, | |||
1617 | 1608 | ||
1618 | seq += ktime_to_ns(ktime_get_real()); | 1609 | seq += ktime_to_ns(ktime_get_real()); |
1619 | seq &= (1ull << 48) - 1; | 1610 | seq &= (1ull << 48) - 1; |
1620 | #if 0 | 1611 | |
1621 | printk("dccp init_seq(%lx, %lx, %d, %d) = %d\n", | ||
1622 | saddr, daddr, sport, dport, seq); | ||
1623 | #endif | ||
1624 | return seq; | 1612 | return seq; |
1625 | } | 1613 | } |
1626 | |||
1627 | EXPORT_SYMBOL(secure_dccp_sequence_number); | 1614 | EXPORT_SYMBOL(secure_dccp_sequence_number); |
1628 | #endif | 1615 | #endif |
1629 | 1616 | ||
diff --git a/drivers/char/rocket_int.h b/drivers/char/rocket_int.h index b01d38125a8f..143cc432fdb2 100644 --- a/drivers/char/rocket_int.h +++ b/drivers/char/rocket_int.h | |||
@@ -55,7 +55,7 @@ static inline void sOutW(unsigned short port, unsigned short value) | |||
55 | 55 | ||
56 | static inline void out32(unsigned short port, Byte_t *p) | 56 | static inline void out32(unsigned short port, Byte_t *p) |
57 | { | 57 | { |
58 | u32 value = le32_to_cpu(get_unaligned((__le32 *)p)); | 58 | u32 value = get_unaligned_le32(p); |
59 | #ifdef ROCKET_DEBUG_IO | 59 | #ifdef ROCKET_DEBUG_IO |
60 | printk(KERN_DEBUG "out32(%x, %lx)...\n", port, value); | 60 | printk(KERN_DEBUG "out32(%x, %lx)...\n", port, value); |
61 | #endif | 61 | #endif |
diff --git a/drivers/char/rtc.c b/drivers/char/rtc.c index e2ec2ee4cf79..5f80a9dff573 100644 --- a/drivers/char/rtc.c +++ b/drivers/char/rtc.c | |||
@@ -1069,10 +1069,8 @@ no_irq: | |||
1069 | } | 1069 | } |
1070 | 1070 | ||
1071 | #ifdef CONFIG_PROC_FS | 1071 | #ifdef CONFIG_PROC_FS |
1072 | ent = create_proc_entry("driver/rtc", 0, NULL); | 1072 | ent = proc_create("driver/rtc", 0, NULL, &rtc_proc_fops); |
1073 | if (ent) | 1073 | if (!ent) |
1074 | ent->proc_fops = &rtc_proc_fops; | ||
1075 | else | ||
1076 | printk(KERN_WARNING "rtc: Failed to register with procfs.\n"); | 1074 | printk(KERN_WARNING "rtc: Failed to register with procfs.\n"); |
1077 | #endif | 1075 | #endif |
1078 | 1076 | ||
diff --git a/drivers/char/snsc_event.c b/drivers/char/snsc_event.c index 1b75b0b7d542..31a7765eaf73 100644 --- a/drivers/char/snsc_event.c +++ b/drivers/char/snsc_event.c | |||
@@ -63,16 +63,13 @@ static int | |||
63 | scdrv_parse_event(char *event, int *src, int *code, int *esp_code, char *desc) | 63 | scdrv_parse_event(char *event, int *src, int *code, int *esp_code, char *desc) |
64 | { | 64 | { |
65 | char *desc_end; | 65 | char *desc_end; |
66 | __be32 from_buf; | ||
67 | 66 | ||
68 | /* record event source address */ | 67 | /* record event source address */ |
69 | from_buf = get_unaligned((__be32 *)event); | 68 | *src = get_unaligned_be32(event); |
70 | *src = be32_to_cpup(&from_buf); | ||
71 | event += 4; /* move on to event code */ | 69 | event += 4; /* move on to event code */ |
72 | 70 | ||
73 | /* record the system controller's event code */ | 71 | /* record the system controller's event code */ |
74 | from_buf = get_unaligned((__be32 *)event); | 72 | *code = get_unaligned_be32(event); |
75 | *code = be32_to_cpup(&from_buf); | ||
76 | event += 4; /* move on to event arguments */ | 73 | event += 4; /* move on to event arguments */ |
77 | 74 | ||
78 | /* how many arguments are in the packet? */ | 75 | /* how many arguments are in the packet? */ |
@@ -86,8 +83,7 @@ scdrv_parse_event(char *event, int *src, int *code, int *esp_code, char *desc) | |||
86 | /* not an integer argument, so give up */ | 83 | /* not an integer argument, so give up */ |
87 | return -1; | 84 | return -1; |
88 | } | 85 | } |
89 | from_buf = get_unaligned((__be32 *)event); | 86 | *esp_code = get_unaligned_be32(event); |
90 | *esp_code = be32_to_cpup(&from_buf); | ||
91 | event += 4; | 87 | event += 4; |
92 | 88 | ||
93 | /* parse out the event description */ | 89 | /* parse out the event description */ |
diff --git a/drivers/char/sysrq.c b/drivers/char/sysrq.c index 1ade193c9128..9e9bad8bdcf4 100644 --- a/drivers/char/sysrq.c +++ b/drivers/char/sysrq.c | |||
@@ -196,6 +196,48 @@ static struct sysrq_key_op sysrq_showlocks_op = { | |||
196 | #define sysrq_showlocks_op (*(struct sysrq_key_op *)0) | 196 | #define sysrq_showlocks_op (*(struct sysrq_key_op *)0) |
197 | #endif | 197 | #endif |
198 | 198 | ||
199 | #ifdef CONFIG_SMP | ||
200 | static DEFINE_SPINLOCK(show_lock); | ||
201 | |||
202 | static void showacpu(void *dummy) | ||
203 | { | ||
204 | unsigned long flags; | ||
205 | |||
206 | /* Idle CPUs have no interesting backtrace. */ | ||
207 | if (idle_cpu(smp_processor_id())) | ||
208 | return; | ||
209 | |||
210 | spin_lock_irqsave(&show_lock, flags); | ||
211 | printk(KERN_INFO "CPU%d:\n", smp_processor_id()); | ||
212 | show_stack(NULL, NULL); | ||
213 | spin_unlock_irqrestore(&show_lock, flags); | ||
214 | } | ||
215 | |||
216 | static void sysrq_showregs_othercpus(struct work_struct *dummy) | ||
217 | { | ||
218 | smp_call_function(showacpu, NULL, 0, 0); | ||
219 | } | ||
220 | |||
221 | static DECLARE_WORK(sysrq_showallcpus, sysrq_showregs_othercpus); | ||
222 | |||
223 | static void sysrq_handle_showallcpus(int key, struct tty_struct *tty) | ||
224 | { | ||
225 | struct pt_regs *regs = get_irq_regs(); | ||
226 | if (regs) { | ||
227 | printk(KERN_INFO "CPU%d:\n", smp_processor_id()); | ||
228 | show_regs(regs); | ||
229 | } | ||
230 | schedule_work(&sysrq_showallcpus); | ||
231 | } | ||
232 | |||
233 | static struct sysrq_key_op sysrq_showallcpus_op = { | ||
234 | .handler = sysrq_handle_showallcpus, | ||
235 | .help_msg = "aLlcpus", | ||
236 | .action_msg = "Show backtrace of all active CPUs", | ||
237 | .enable_mask = SYSRQ_ENABLE_DUMP, | ||
238 | }; | ||
239 | #endif | ||
240 | |||
199 | static void sysrq_handle_showregs(int key, struct tty_struct *tty) | 241 | static void sysrq_handle_showregs(int key, struct tty_struct *tty) |
200 | { | 242 | { |
201 | struct pt_regs *regs = get_irq_regs(); | 243 | struct pt_regs *regs = get_irq_regs(); |
@@ -340,7 +382,11 @@ static struct sysrq_key_op *sysrq_key_table[36] = { | |||
340 | &sysrq_kill_op, /* i */ | 382 | &sysrq_kill_op, /* i */ |
341 | NULL, /* j */ | 383 | NULL, /* j */ |
342 | &sysrq_SAK_op, /* k */ | 384 | &sysrq_SAK_op, /* k */ |
385 | #ifdef CONFIG_SMP | ||
386 | &sysrq_showallcpus_op, /* l */ | ||
387 | #else | ||
343 | NULL, /* l */ | 388 | NULL, /* l */ |
389 | #endif | ||
344 | &sysrq_showmem_op, /* m */ | 390 | &sysrq_showmem_op, /* m */ |
345 | &sysrq_unrt_op, /* n */ | 391 | &sysrq_unrt_op, /* n */ |
346 | /* o: This will often be registered as 'Off' at init time */ | 392 | /* o: This will often be registered as 'Off' at init time */ |
diff --git a/drivers/char/toshiba.c b/drivers/char/toshiba.c index ce5ebe3b168f..64f1ceed0b2c 100644 --- a/drivers/char/toshiba.c +++ b/drivers/char/toshiba.c | |||
@@ -520,12 +520,11 @@ static int __init toshiba_init(void) | |||
520 | { | 520 | { |
521 | struct proc_dir_entry *pde; | 521 | struct proc_dir_entry *pde; |
522 | 522 | ||
523 | pde = create_proc_entry("toshiba", 0, NULL); | 523 | pde = proc_create("toshiba", 0, NULL, &proc_toshiba_fops); |
524 | if (!pde) { | 524 | if (!pde) { |
525 | misc_deregister(&tosh_device); | 525 | misc_deregister(&tosh_device); |
526 | return -ENOMEM; | 526 | return -ENOMEM; |
527 | } | 527 | } |
528 | pde->proc_fops = &proc_toshiba_fops; | ||
529 | } | 528 | } |
530 | #endif | 529 | #endif |
531 | 530 | ||
diff --git a/drivers/char/tpm/Kconfig b/drivers/char/tpm/Kconfig index 8f3f7620f95a..3738cfa209ff 100644 --- a/drivers/char/tpm/Kconfig +++ b/drivers/char/tpm/Kconfig | |||
@@ -23,7 +23,7 @@ if TCG_TPM | |||
23 | 23 | ||
24 | config TCG_TIS | 24 | config TCG_TIS |
25 | tristate "TPM Interface Specification 1.2 Interface" | 25 | tristate "TPM Interface Specification 1.2 Interface" |
26 | depends on PNPACPI | 26 | depends on PNP |
27 | ---help--- | 27 | ---help--- |
28 | If you have a TPM security chip that is compliant with the | 28 | If you have a TPM security chip that is compliant with the |
29 | TCG TIS 1.2 TPM specification say Yes and it will be accessible | 29 | TCG TIS 1.2 TPM specification say Yes and it will be accessible |
@@ -32,7 +32,6 @@ config TCG_TIS | |||
32 | 32 | ||
33 | config TCG_NSC | 33 | config TCG_NSC |
34 | tristate "National Semiconductor TPM Interface" | 34 | tristate "National Semiconductor TPM Interface" |
35 | depends on PNPACPI | ||
36 | ---help--- | 35 | ---help--- |
37 | If you have a TPM security chip from National Semiconductor | 36 | If you have a TPM security chip from National Semiconductor |
38 | say Yes and it will be accessible from within Linux. To | 37 | say Yes and it will be accessible from within Linux. To |
@@ -48,7 +47,7 @@ config TCG_ATMEL | |||
48 | 47 | ||
49 | config TCG_INFINEON | 48 | config TCG_INFINEON |
50 | tristate "Infineon Technologies TPM Interface" | 49 | tristate "Infineon Technologies TPM Interface" |
51 | depends on PNPACPI | 50 | depends on PNP |
52 | ---help--- | 51 | ---help--- |
53 | If you have a TPM security chip from Infineon Technologies | 52 | If you have a TPM security chip from Infineon Technologies |
54 | (either SLD 9630 TT 1.1 or SLB 9635 TT 1.2) say Yes and it | 53 | (either SLD 9630 TT 1.1 or SLB 9635 TT 1.2) say Yes and it |
diff --git a/drivers/char/tpm/tpm_nsc.c b/drivers/char/tpm/tpm_nsc.c index 6313326bc41f..ab18c1e7b115 100644 --- a/drivers/char/tpm/tpm_nsc.c +++ b/drivers/char/tpm/tpm_nsc.c | |||
@@ -264,7 +264,7 @@ static const struct tpm_vendor_specific tpm_nsc = { | |||
264 | 264 | ||
265 | static struct platform_device *pdev = NULL; | 265 | static struct platform_device *pdev = NULL; |
266 | 266 | ||
267 | static void __devexit tpm_nsc_remove(struct device *dev) | 267 | static void tpm_nsc_remove(struct device *dev) |
268 | { | 268 | { |
269 | struct tpm_chip *chip = dev_get_drvdata(dev); | 269 | struct tpm_chip *chip = dev_get_drvdata(dev); |
270 | if ( chip ) { | 270 | if ( chip ) { |
diff --git a/drivers/char/viotape.c b/drivers/char/viotape.c index db7a731e2362..58aad63831f4 100644 --- a/drivers/char/viotape.c +++ b/drivers/char/viotape.c | |||
@@ -249,6 +249,7 @@ static int proc_viotape_open(struct inode *inode, struct file *file) | |||
249 | } | 249 | } |
250 | 250 | ||
251 | static const struct file_operations proc_viotape_operations = { | 251 | static const struct file_operations proc_viotape_operations = { |
252 | .owner = THIS_MODULE, | ||
252 | .open = proc_viotape_open, | 253 | .open = proc_viotape_open, |
253 | .read = seq_read, | 254 | .read = seq_read, |
254 | .llseek = seq_lseek, | 255 | .llseek = seq_lseek, |
@@ -915,7 +916,6 @@ static struct vio_driver viotape_driver = { | |||
915 | int __init viotap_init(void) | 916 | int __init viotap_init(void) |
916 | { | 917 | { |
917 | int ret; | 918 | int ret; |
918 | struct proc_dir_entry *e; | ||
919 | 919 | ||
920 | if (!firmware_has_feature(FW_FEATURE_ISERIES)) | 920 | if (!firmware_has_feature(FW_FEATURE_ISERIES)) |
921 | return -ENODEV; | 921 | return -ENODEV; |
@@ -968,11 +968,8 @@ int __init viotap_init(void) | |||
968 | if (ret) | 968 | if (ret) |
969 | goto unreg_class; | 969 | goto unreg_class; |
970 | 970 | ||
971 | e = create_proc_entry("iSeries/viotape", S_IFREG|S_IRUGO, NULL); | 971 | proc_create("iSeries/viotape", S_IFREG|S_IRUGO, NULL, |
972 | if (e) { | 972 | &proc_viotape_operations); |
973 | e->owner = THIS_MODULE; | ||
974 | e->proc_fops = &proc_viotape_operations; | ||
975 | } | ||
976 | 973 | ||
977 | return 0; | 974 | return 0; |
978 | 975 | ||
diff --git a/drivers/char/vt.c b/drivers/char/vt.c index df4c3ead9e2b..1c2660477135 100644 --- a/drivers/char/vt.c +++ b/drivers/char/vt.c | |||
@@ -301,7 +301,7 @@ static void scrup(struct vc_data *vc, unsigned int t, unsigned int b, int nr) | |||
301 | d = (unsigned short *)(vc->vc_origin + vc->vc_size_row * t); | 301 | d = (unsigned short *)(vc->vc_origin + vc->vc_size_row * t); |
302 | s = (unsigned short *)(vc->vc_origin + vc->vc_size_row * (t + nr)); | 302 | s = (unsigned short *)(vc->vc_origin + vc->vc_size_row * (t + nr)); |
303 | scr_memmovew(d, s, (b - t - nr) * vc->vc_size_row); | 303 | scr_memmovew(d, s, (b - t - nr) * vc->vc_size_row); |
304 | scr_memsetw(d + (b - t - nr) * vc->vc_cols, vc->vc_video_erase_char, | 304 | scr_memsetw(d + (b - t - nr) * vc->vc_cols, vc->vc_scrl_erase_char, |
305 | vc->vc_size_row * nr); | 305 | vc->vc_size_row * nr); |
306 | } | 306 | } |
307 | 307 | ||
@@ -319,7 +319,7 @@ static void scrdown(struct vc_data *vc, unsigned int t, unsigned int b, int nr) | |||
319 | s = (unsigned short *)(vc->vc_origin + vc->vc_size_row * t); | 319 | s = (unsigned short *)(vc->vc_origin + vc->vc_size_row * t); |
320 | step = vc->vc_cols * nr; | 320 | step = vc->vc_cols * nr; |
321 | scr_memmovew(s + step, s, (b - t - nr) * vc->vc_size_row); | 321 | scr_memmovew(s + step, s, (b - t - nr) * vc->vc_size_row); |
322 | scr_memsetw(s, vc->vc_video_erase_char, 2 * step); | 322 | scr_memsetw(s, vc->vc_scrl_erase_char, 2 * step); |
323 | } | 323 | } |
324 | 324 | ||
325 | static void do_update_region(struct vc_data *vc, unsigned long start, int count) | 325 | static void do_update_region(struct vc_data *vc, unsigned long start, int count) |
@@ -400,7 +400,7 @@ static u8 build_attr(struct vc_data *vc, u8 _color, u8 _intensity, u8 _blink, | |||
400 | * Bit 7 : blink | 400 | * Bit 7 : blink |
401 | */ | 401 | */ |
402 | { | 402 | { |
403 | u8 a = vc->vc_color; | 403 | u8 a = _color; |
404 | if (!vc->vc_can_do_color) | 404 | if (!vc->vc_can_do_color) |
405 | return _intensity | | 405 | return _intensity | |
406 | (_italic ? 2 : 0) | | 406 | (_italic ? 2 : 0) | |
@@ -434,6 +434,7 @@ static void update_attr(struct vc_data *vc) | |||
434 | vc->vc_blink, vc->vc_underline, | 434 | vc->vc_blink, vc->vc_underline, |
435 | vc->vc_reverse ^ vc->vc_decscnm, vc->vc_italic); | 435 | vc->vc_reverse ^ vc->vc_decscnm, vc->vc_italic); |
436 | vc->vc_video_erase_char = (build_attr(vc, vc->vc_color, 1, vc->vc_blink, 0, vc->vc_decscnm, 0) << 8) | ' '; | 436 | vc->vc_video_erase_char = (build_attr(vc, vc->vc_color, 1, vc->vc_blink, 0, vc->vc_decscnm, 0) << 8) | ' '; |
437 | vc->vc_scrl_erase_char = (build_attr(vc, vc->vc_def_color, 1, false, false, false, false) << 8) | ' '; | ||
437 | } | 438 | } |
438 | 439 | ||
439 | /* Note: inverting the screen twice should revert to the original state */ | 440 | /* Note: inverting the screen twice should revert to the original state */ |
diff --git a/drivers/edac/Kconfig b/drivers/edac/Kconfig index 2b382990fe58..6e6c3c4aea6b 100644 --- a/drivers/edac/Kconfig +++ b/drivers/edac/Kconfig | |||
@@ -67,7 +67,7 @@ config EDAC_E7XXX | |||
67 | E7205, E7500, E7501 and E7505 server chipsets. | 67 | E7205, E7500, E7501 and E7505 server chipsets. |
68 | 68 | ||
69 | config EDAC_E752X | 69 | config EDAC_E752X |
70 | tristate "Intel e752x (e7520, e7525, e7320)" | 70 | tristate "Intel e752x (e7520, e7525, e7320) and 3100" |
71 | depends on EDAC_MM_EDAC && PCI && X86 && HOTPLUG | 71 | depends on EDAC_MM_EDAC && PCI && X86 && HOTPLUG |
72 | help | 72 | help |
73 | Support for error detection and correction on the Intel | 73 | Support for error detection and correction on the Intel |
diff --git a/drivers/edac/amd76x_edac.c b/drivers/edac/amd76x_edac.c index f22075410591..2b95f1a3edfc 100644 --- a/drivers/edac/amd76x_edac.c +++ b/drivers/edac/amd76x_edac.c | |||
@@ -17,6 +17,7 @@ | |||
17 | #include <linux/pci.h> | 17 | #include <linux/pci.h> |
18 | #include <linux/pci_ids.h> | 18 | #include <linux/pci_ids.h> |
19 | #include <linux/slab.h> | 19 | #include <linux/slab.h> |
20 | #include <linux/edac.h> | ||
20 | #include "edac_core.h" | 21 | #include "edac_core.h" |
21 | 22 | ||
22 | #define AMD76X_REVISION " Ver: 2.0.2 " __DATE__ | 23 | #define AMD76X_REVISION " Ver: 2.0.2 " __DATE__ |
@@ -344,6 +345,9 @@ static struct pci_driver amd76x_driver = { | |||
344 | 345 | ||
345 | static int __init amd76x_init(void) | 346 | static int __init amd76x_init(void) |
346 | { | 347 | { |
348 | /* Ensure that the OPSTATE is set correctly for POLL or NMI */ | ||
349 | opstate_init(); | ||
350 | |||
347 | return pci_register_driver(&amd76x_driver); | 351 | return pci_register_driver(&amd76x_driver); |
348 | } | 352 | } |
349 | 353 | ||
@@ -358,3 +362,6 @@ module_exit(amd76x_exit); | |||
358 | MODULE_LICENSE("GPL"); | 362 | MODULE_LICENSE("GPL"); |
359 | MODULE_AUTHOR("Linux Networx (http://lnxi.com) Thayne Harbaugh"); | 363 | MODULE_AUTHOR("Linux Networx (http://lnxi.com) Thayne Harbaugh"); |
360 | MODULE_DESCRIPTION("MC support for AMD 76x memory controllers"); | 364 | MODULE_DESCRIPTION("MC support for AMD 76x memory controllers"); |
365 | |||
366 | module_param(edac_op_state, int, 0444); | ||
367 | MODULE_PARM_DESC(edac_op_state, "EDAC Error Reporting state: 0=Poll,1=NMI"); | ||
diff --git a/drivers/edac/e752x_edac.c b/drivers/edac/e752x_edac.c index 6eb434749cd5..c94a0eb492cb 100644 --- a/drivers/edac/e752x_edac.c +++ b/drivers/edac/e752x_edac.c | |||
@@ -29,6 +29,7 @@ | |||
29 | #define EDAC_MOD_STR "e752x_edac" | 29 | #define EDAC_MOD_STR "e752x_edac" |
30 | 30 | ||
31 | static int force_function_unhide; | 31 | static int force_function_unhide; |
32 | static int sysbus_parity = -1; | ||
32 | 33 | ||
33 | static struct edac_pci_ctl_info *e752x_pci; | 34 | static struct edac_pci_ctl_info *e752x_pci; |
34 | 35 | ||
@@ -62,6 +63,14 @@ static struct edac_pci_ctl_info *e752x_pci; | |||
62 | #define PCI_DEVICE_ID_INTEL_7320_1_ERR 0x3593 | 63 | #define PCI_DEVICE_ID_INTEL_7320_1_ERR 0x3593 |
63 | #endif /* PCI_DEVICE_ID_INTEL_7320_1_ERR */ | 64 | #endif /* PCI_DEVICE_ID_INTEL_7320_1_ERR */ |
64 | 65 | ||
66 | #ifndef PCI_DEVICE_ID_INTEL_3100_0 | ||
67 | #define PCI_DEVICE_ID_INTEL_3100_0 0x35B0 | ||
68 | #endif /* PCI_DEVICE_ID_INTEL_3100_0 */ | ||
69 | |||
70 | #ifndef PCI_DEVICE_ID_INTEL_3100_1_ERR | ||
71 | #define PCI_DEVICE_ID_INTEL_3100_1_ERR 0x35B1 | ||
72 | #endif /* PCI_DEVICE_ID_INTEL_3100_1_ERR */ | ||
73 | |||
65 | #define E752X_NR_CSROWS 8 /* number of csrows */ | 74 | #define E752X_NR_CSROWS 8 /* number of csrows */ |
66 | 75 | ||
67 | /* E752X register addresses - device 0 function 0 */ | 76 | /* E752X register addresses - device 0 function 0 */ |
@@ -152,6 +161,12 @@ static struct edac_pci_ctl_info *e752x_pci; | |||
152 | /* error syndrome register (16b) */ | 161 | /* error syndrome register (16b) */ |
153 | #define E752X_DEVPRES1 0xF4 /* Device Present 1 register (8b) */ | 162 | #define E752X_DEVPRES1 0xF4 /* Device Present 1 register (8b) */ |
154 | 163 | ||
164 | /* 3100 IMCH specific register addresses - device 0 function 1 */ | ||
165 | #define I3100_NSI_FERR 0x48 /* NSI first error reg (32b) */ | ||
166 | #define I3100_NSI_NERR 0x4C /* NSI next error reg (32b) */ | ||
167 | #define I3100_NSI_SMICMD 0x54 /* NSI SMI command register (32b) */ | ||
168 | #define I3100_NSI_EMASK 0x90 /* NSI error mask register (32b) */ | ||
169 | |||
155 | /* ICH5R register addresses - device 30 function 0 */ | 170 | /* ICH5R register addresses - device 30 function 0 */ |
156 | #define ICH5R_PCI_STAT 0x06 /* PCI status register (16b) */ | 171 | #define ICH5R_PCI_STAT 0x06 /* PCI status register (16b) */ |
157 | #define ICH5R_PCI_2ND_STAT 0x1E /* PCI status secondary reg (16b) */ | 172 | #define ICH5R_PCI_2ND_STAT 0x1E /* PCI status secondary reg (16b) */ |
@@ -160,7 +175,8 @@ static struct edac_pci_ctl_info *e752x_pci; | |||
160 | enum e752x_chips { | 175 | enum e752x_chips { |
161 | E7520 = 0, | 176 | E7520 = 0, |
162 | E7525 = 1, | 177 | E7525 = 1, |
163 | E7320 = 2 | 178 | E7320 = 2, |
179 | I3100 = 3 | ||
164 | }; | 180 | }; |
165 | 181 | ||
166 | struct e752x_pvt { | 182 | struct e752x_pvt { |
@@ -185,8 +201,10 @@ struct e752x_dev_info { | |||
185 | struct e752x_error_info { | 201 | struct e752x_error_info { |
186 | u32 ferr_global; | 202 | u32 ferr_global; |
187 | u32 nerr_global; | 203 | u32 nerr_global; |
188 | u8 hi_ferr; | 204 | u32 nsi_ferr; /* 3100 only */ |
189 | u8 hi_nerr; | 205 | u32 nsi_nerr; /* 3100 only */ |
206 | u8 hi_ferr; /* all but 3100 */ | ||
207 | u8 hi_nerr; /* all but 3100 */ | ||
190 | u16 sysbus_ferr; | 208 | u16 sysbus_ferr; |
191 | u16 sysbus_nerr; | 209 | u16 sysbus_nerr; |
192 | u8 buf_ferr; | 210 | u8 buf_ferr; |
@@ -215,6 +233,10 @@ static const struct e752x_dev_info e752x_devs[] = { | |||
215 | .err_dev = PCI_DEVICE_ID_INTEL_7320_1_ERR, | 233 | .err_dev = PCI_DEVICE_ID_INTEL_7320_1_ERR, |
216 | .ctl_dev = PCI_DEVICE_ID_INTEL_7320_0, | 234 | .ctl_dev = PCI_DEVICE_ID_INTEL_7320_0, |
217 | .ctl_name = "E7320"}, | 235 | .ctl_name = "E7320"}, |
236 | [I3100] = { | ||
237 | .err_dev = PCI_DEVICE_ID_INTEL_3100_1_ERR, | ||
238 | .ctl_dev = PCI_DEVICE_ID_INTEL_3100_0, | ||
239 | .ctl_name = "3100"}, | ||
218 | }; | 240 | }; |
219 | 241 | ||
220 | static unsigned long ctl_page_to_phys(struct mem_ctl_info *mci, | 242 | static unsigned long ctl_page_to_phys(struct mem_ctl_info *mci, |
@@ -402,7 +424,7 @@ static inline void process_threshold_ce(struct mem_ctl_info *mci, u16 error, | |||
402 | static char *global_message[11] = { | 424 | static char *global_message[11] = { |
403 | "PCI Express C1", "PCI Express C", "PCI Express B1", | 425 | "PCI Express C1", "PCI Express C", "PCI Express B1", |
404 | "PCI Express B", "PCI Express A1", "PCI Express A", | 426 | "PCI Express B", "PCI Express A1", "PCI Express A", |
405 | "DMA Controler", "HUB Interface", "System Bus", | 427 | "DMA Controler", "HUB or NS Interface", "System Bus", |
406 | "DRAM Controler", "Internal Buffer" | 428 | "DRAM Controler", "Internal Buffer" |
407 | }; | 429 | }; |
408 | 430 | ||
@@ -455,6 +477,63 @@ static inline void hub_error(int fatal, u8 errors, int *error_found, | |||
455 | do_hub_error(fatal, errors); | 477 | do_hub_error(fatal, errors); |
456 | } | 478 | } |
457 | 479 | ||
480 | #define NSI_FATAL_MASK 0x0c080081 | ||
481 | #define NSI_NON_FATAL_MASK 0x23a0ba64 | ||
482 | #define NSI_ERR_MASK (NSI_FATAL_MASK | NSI_NON_FATAL_MASK) | ||
483 | |||
484 | static char *nsi_message[30] = { | ||
485 | "NSI Link Down", /* NSI_FERR/NSI_NERR bit 0, fatal error */ | ||
486 | "", /* reserved */ | ||
487 | "NSI Parity Error", /* bit 2, non-fatal */ | ||
488 | "", /* reserved */ | ||
489 | "", /* reserved */ | ||
490 | "Correctable Error Message", /* bit 5, non-fatal */ | ||
491 | "Non-Fatal Error Message", /* bit 6, non-fatal */ | ||
492 | "Fatal Error Message", /* bit 7, fatal */ | ||
493 | "", /* reserved */ | ||
494 | "Receiver Error", /* bit 9, non-fatal */ | ||
495 | "", /* reserved */ | ||
496 | "Bad TLP", /* bit 11, non-fatal */ | ||
497 | "Bad DLLP", /* bit 12, non-fatal */ | ||
498 | "REPLAY_NUM Rollover", /* bit 13, non-fatal */ | ||
499 | "", /* reserved */ | ||
500 | "Replay Timer Timeout", /* bit 15, non-fatal */ | ||
501 | "", /* reserved */ | ||
502 | "", /* reserved */ | ||
503 | "", /* reserved */ | ||
504 | "Data Link Protocol Error", /* bit 19, fatal */ | ||
505 | "", /* reserved */ | ||
506 | "Poisoned TLP", /* bit 21, non-fatal */ | ||
507 | "", /* reserved */ | ||
508 | "Completion Timeout", /* bit 23, non-fatal */ | ||
509 | "Completer Abort", /* bit 24, non-fatal */ | ||
510 | "Unexpected Completion", /* bit 25, non-fatal */ | ||
511 | "Receiver Overflow", /* bit 26, fatal */ | ||
512 | "Malformed TLP", /* bit 27, fatal */ | ||
513 | "", /* reserved */ | ||
514 | "Unsupported Request" /* bit 29, non-fatal */ | ||
515 | }; | ||
516 | |||
517 | static void do_nsi_error(int fatal, u32 errors) | ||
518 | { | ||
519 | int i; | ||
520 | |||
521 | for (i = 0; i < 30; i++) { | ||
522 | if (errors & (1 << i)) | ||
523 | printk(KERN_WARNING "%sError %s\n", | ||
524 | fatal_message[fatal], nsi_message[i]); | ||
525 | } | ||
526 | } | ||
527 | |||
528 | static inline void nsi_error(int fatal, u32 errors, int *error_found, | ||
529 | int handle_error) | ||
530 | { | ||
531 | *error_found = 1; | ||
532 | |||
533 | if (handle_error) | ||
534 | do_nsi_error(fatal, errors); | ||
535 | } | ||
536 | |||
458 | static char *membuf_message[4] = { | 537 | static char *membuf_message[4] = { |
459 | "Internal PMWB to DRAM parity", | 538 | "Internal PMWB to DRAM parity", |
460 | "Internal PMWB to System Bus Parity", | 539 | "Internal PMWB to System Bus Parity", |
@@ -546,6 +625,31 @@ static void e752x_check_hub_interface(struct e752x_error_info *info, | |||
546 | } | 625 | } |
547 | } | 626 | } |
548 | 627 | ||
628 | static void e752x_check_ns_interface(struct e752x_error_info *info, | ||
629 | int *error_found, int handle_error) | ||
630 | { | ||
631 | u32 stat32; | ||
632 | |||
633 | stat32 = info->nsi_ferr; | ||
634 | if (stat32 & NSI_ERR_MASK) { /* Error, so process */ | ||
635 | if (stat32 & NSI_FATAL_MASK) /* check for fatal errors */ | ||
636 | nsi_error(1, stat32 & NSI_FATAL_MASK, error_found, | ||
637 | handle_error); | ||
638 | if (stat32 & NSI_NON_FATAL_MASK) /* check for non-fatal ones */ | ||
639 | nsi_error(0, stat32 & NSI_NON_FATAL_MASK, error_found, | ||
640 | handle_error); | ||
641 | } | ||
642 | stat32 = info->nsi_nerr; | ||
643 | if (stat32 & NSI_ERR_MASK) { | ||
644 | if (stat32 & NSI_FATAL_MASK) | ||
645 | nsi_error(1, stat32 & NSI_FATAL_MASK, error_found, | ||
646 | handle_error); | ||
647 | if (stat32 & NSI_NON_FATAL_MASK) | ||
648 | nsi_error(0, stat32 & NSI_NON_FATAL_MASK, error_found, | ||
649 | handle_error); | ||
650 | } | ||
651 | } | ||
652 | |||
549 | static void e752x_check_sysbus(struct e752x_error_info *info, | 653 | static void e752x_check_sysbus(struct e752x_error_info *info, |
550 | int *error_found, int handle_error) | 654 | int *error_found, int handle_error) |
551 | { | 655 | { |
@@ -653,7 +757,15 @@ static void e752x_get_error_info(struct mem_ctl_info *mci, | |||
653 | pci_read_config_dword(dev, E752X_FERR_GLOBAL, &info->ferr_global); | 757 | pci_read_config_dword(dev, E752X_FERR_GLOBAL, &info->ferr_global); |
654 | 758 | ||
655 | if (info->ferr_global) { | 759 | if (info->ferr_global) { |
656 | pci_read_config_byte(dev, E752X_HI_FERR, &info->hi_ferr); | 760 | if (pvt->dev_info->err_dev == PCI_DEVICE_ID_INTEL_3100_1_ERR) { |
761 | pci_read_config_dword(dev, I3100_NSI_FERR, | ||
762 | &info->nsi_ferr); | ||
763 | info->hi_ferr = 0; | ||
764 | } else { | ||
765 | pci_read_config_byte(dev, E752X_HI_FERR, | ||
766 | &info->hi_ferr); | ||
767 | info->nsi_ferr = 0; | ||
768 | } | ||
657 | pci_read_config_word(dev, E752X_SYSBUS_FERR, | 769 | pci_read_config_word(dev, E752X_SYSBUS_FERR, |
658 | &info->sysbus_ferr); | 770 | &info->sysbus_ferr); |
659 | pci_read_config_byte(dev, E752X_BUF_FERR, &info->buf_ferr); | 771 | pci_read_config_byte(dev, E752X_BUF_FERR, &info->buf_ferr); |
@@ -669,10 +781,15 @@ static void e752x_get_error_info(struct mem_ctl_info *mci, | |||
669 | pci_read_config_dword(dev, E752X_DRAM_RETR_ADD, | 781 | pci_read_config_dword(dev, E752X_DRAM_RETR_ADD, |
670 | &info->dram_retr_add); | 782 | &info->dram_retr_add); |
671 | 783 | ||
784 | /* ignore the reserved bits just in case */ | ||
672 | if (info->hi_ferr & 0x7f) | 785 | if (info->hi_ferr & 0x7f) |
673 | pci_write_config_byte(dev, E752X_HI_FERR, | 786 | pci_write_config_byte(dev, E752X_HI_FERR, |
674 | info->hi_ferr); | 787 | info->hi_ferr); |
675 | 788 | ||
789 | if (info->nsi_ferr & NSI_ERR_MASK) | ||
790 | pci_write_config_dword(dev, I3100_NSI_FERR, | ||
791 | info->nsi_ferr); | ||
792 | |||
676 | if (info->sysbus_ferr) | 793 | if (info->sysbus_ferr) |
677 | pci_write_config_word(dev, E752X_SYSBUS_FERR, | 794 | pci_write_config_word(dev, E752X_SYSBUS_FERR, |
678 | info->sysbus_ferr); | 795 | info->sysbus_ferr); |
@@ -692,7 +809,15 @@ static void e752x_get_error_info(struct mem_ctl_info *mci, | |||
692 | pci_read_config_dword(dev, E752X_NERR_GLOBAL, &info->nerr_global); | 809 | pci_read_config_dword(dev, E752X_NERR_GLOBAL, &info->nerr_global); |
693 | 810 | ||
694 | if (info->nerr_global) { | 811 | if (info->nerr_global) { |
695 | pci_read_config_byte(dev, E752X_HI_NERR, &info->hi_nerr); | 812 | if (pvt->dev_info->err_dev == PCI_DEVICE_ID_INTEL_3100_1_ERR) { |
813 | pci_read_config_dword(dev, I3100_NSI_NERR, | ||
814 | &info->nsi_nerr); | ||
815 | info->hi_nerr = 0; | ||
816 | } else { | ||
817 | pci_read_config_byte(dev, E752X_HI_NERR, | ||
818 | &info->hi_nerr); | ||
819 | info->nsi_nerr = 0; | ||
820 | } | ||
696 | pci_read_config_word(dev, E752X_SYSBUS_NERR, | 821 | pci_read_config_word(dev, E752X_SYSBUS_NERR, |
697 | &info->sysbus_nerr); | 822 | &info->sysbus_nerr); |
698 | pci_read_config_byte(dev, E752X_BUF_NERR, &info->buf_nerr); | 823 | pci_read_config_byte(dev, E752X_BUF_NERR, &info->buf_nerr); |
@@ -706,6 +831,10 @@ static void e752x_get_error_info(struct mem_ctl_info *mci, | |||
706 | pci_write_config_byte(dev, E752X_HI_NERR, | 831 | pci_write_config_byte(dev, E752X_HI_NERR, |
707 | info->hi_nerr); | 832 | info->hi_nerr); |
708 | 833 | ||
834 | if (info->nsi_nerr & NSI_ERR_MASK) | ||
835 | pci_write_config_dword(dev, I3100_NSI_NERR, | ||
836 | info->nsi_nerr); | ||
837 | |||
709 | if (info->sysbus_nerr) | 838 | if (info->sysbus_nerr) |
710 | pci_write_config_word(dev, E752X_SYSBUS_NERR, | 839 | pci_write_config_word(dev, E752X_SYSBUS_NERR, |
711 | info->sysbus_nerr); | 840 | info->sysbus_nerr); |
@@ -750,6 +879,7 @@ static int e752x_process_error_info(struct mem_ctl_info *mci, | |||
750 | global_error(0, stat32, &error_found, handle_errors); | 879 | global_error(0, stat32, &error_found, handle_errors); |
751 | 880 | ||
752 | e752x_check_hub_interface(info, &error_found, handle_errors); | 881 | e752x_check_hub_interface(info, &error_found, handle_errors); |
882 | e752x_check_ns_interface(info, &error_found, handle_errors); | ||
753 | e752x_check_sysbus(info, &error_found, handle_errors); | 883 | e752x_check_sysbus(info, &error_found, handle_errors); |
754 | e752x_check_membuf(info, &error_found, handle_errors); | 884 | e752x_check_membuf(info, &error_found, handle_errors); |
755 | e752x_check_dram(mci, info, &error_found, handle_errors); | 885 | e752x_check_dram(mci, info, &error_found, handle_errors); |
@@ -920,15 +1050,53 @@ fail: | |||
920 | return 1; | 1050 | return 1; |
921 | } | 1051 | } |
922 | 1052 | ||
1053 | /* Setup system bus parity mask register. | ||
1054 | * Sysbus parity supported on: | ||
1055 | * e7320/e7520/e7525 + Xeon | ||
1056 | * i3100 + Xeon/Celeron | ||
1057 | * Sysbus parity not supported on: | ||
1058 | * i3100 + Pentium M/Celeron M/Core Duo/Core2 Duo | ||
1059 | */ | ||
1060 | static void e752x_init_sysbus_parity_mask(struct e752x_pvt *pvt) | ||
1061 | { | ||
1062 | char *cpu_id = cpu_data(0).x86_model_id; | ||
1063 | struct pci_dev *dev = pvt->dev_d0f1; | ||
1064 | int enable = 1; | ||
1065 | |||
1066 | /* Allow module paramter override, else see if CPU supports parity */ | ||
1067 | if (sysbus_parity != -1) { | ||
1068 | enable = sysbus_parity; | ||
1069 | } else if (cpu_id[0] && | ||
1070 | ((strstr(cpu_id, "Pentium") && strstr(cpu_id, " M ")) || | ||
1071 | (strstr(cpu_id, "Celeron") && strstr(cpu_id, " M ")) || | ||
1072 | (strstr(cpu_id, "Core") && strstr(cpu_id, "Duo")))) { | ||
1073 | e752x_printk(KERN_INFO, "System Bus Parity not " | ||
1074 | "supported by CPU, disabling\n"); | ||
1075 | enable = 0; | ||
1076 | } | ||
1077 | |||
1078 | if (enable) | ||
1079 | pci_write_config_word(dev, E752X_SYSBUS_ERRMASK, 0x0000); | ||
1080 | else | ||
1081 | pci_write_config_word(dev, E752X_SYSBUS_ERRMASK, 0x0309); | ||
1082 | } | ||
1083 | |||
923 | static void e752x_init_error_reporting_regs(struct e752x_pvt *pvt) | 1084 | static void e752x_init_error_reporting_regs(struct e752x_pvt *pvt) |
924 | { | 1085 | { |
925 | struct pci_dev *dev; | 1086 | struct pci_dev *dev; |
926 | 1087 | ||
927 | dev = pvt->dev_d0f1; | 1088 | dev = pvt->dev_d0f1; |
928 | /* Turn off error disable & SMI in case the BIOS turned it on */ | 1089 | /* Turn off error disable & SMI in case the BIOS turned it on */ |
929 | pci_write_config_byte(dev, E752X_HI_ERRMASK, 0x00); | 1090 | if (pvt->dev_info->err_dev == PCI_DEVICE_ID_INTEL_3100_1_ERR) { |
930 | pci_write_config_byte(dev, E752X_HI_SMICMD, 0x00); | 1091 | pci_write_config_dword(dev, I3100_NSI_EMASK, 0); |
931 | pci_write_config_word(dev, E752X_SYSBUS_ERRMASK, 0x00); | 1092 | pci_write_config_dword(dev, I3100_NSI_SMICMD, 0); |
1093 | } else { | ||
1094 | pci_write_config_byte(dev, E752X_HI_ERRMASK, 0x00); | ||
1095 | pci_write_config_byte(dev, E752X_HI_SMICMD, 0x00); | ||
1096 | } | ||
1097 | |||
1098 | e752x_init_sysbus_parity_mask(pvt); | ||
1099 | |||
932 | pci_write_config_word(dev, E752X_SYSBUS_SMICMD, 0x00); | 1100 | pci_write_config_word(dev, E752X_SYSBUS_SMICMD, 0x00); |
933 | pci_write_config_byte(dev, E752X_BUF_ERRMASK, 0x00); | 1101 | pci_write_config_byte(dev, E752X_BUF_ERRMASK, 0x00); |
934 | pci_write_config_byte(dev, E752X_BUF_SMICMD, 0x00); | 1102 | pci_write_config_byte(dev, E752X_BUF_SMICMD, 0x00); |
@@ -949,16 +1117,6 @@ static int e752x_probe1(struct pci_dev *pdev, int dev_idx) | |||
949 | debugf0("%s(): mci\n", __func__); | 1117 | debugf0("%s(): mci\n", __func__); |
950 | debugf0("Starting Probe1\n"); | 1118 | debugf0("Starting Probe1\n"); |
951 | 1119 | ||
952 | /* make sure error reporting method is sane */ | ||
953 | switch (edac_op_state) { | ||
954 | case EDAC_OPSTATE_POLL: | ||
955 | case EDAC_OPSTATE_NMI: | ||
956 | break; | ||
957 | default: | ||
958 | edac_op_state = EDAC_OPSTATE_POLL; | ||
959 | break; | ||
960 | } | ||
961 | |||
962 | /* check to see if device 0 function 1 is enabled; if it isn't, we | 1120 | /* check to see if device 0 function 1 is enabled; if it isn't, we |
963 | * assume the BIOS has reserved it for a reason and is expecting | 1121 | * assume the BIOS has reserved it for a reason and is expecting |
964 | * exclusive access, we take care not to violate that assumption and | 1122 | * exclusive access, we take care not to violate that assumption and |
@@ -985,8 +1143,9 @@ static int e752x_probe1(struct pci_dev *pdev, int dev_idx) | |||
985 | 1143 | ||
986 | debugf3("%s(): init mci\n", __func__); | 1144 | debugf3("%s(): init mci\n", __func__); |
987 | mci->mtype_cap = MEM_FLAG_RDDR; | 1145 | mci->mtype_cap = MEM_FLAG_RDDR; |
988 | mci->edac_ctl_cap = EDAC_FLAG_NONE | EDAC_FLAG_SECDED | | 1146 | /* 3100 IMCH supports SECDEC only */ |
989 | EDAC_FLAG_S4ECD4ED; | 1147 | mci->edac_ctl_cap = (dev_idx == I3100) ? EDAC_FLAG_SECDED : |
1148 | (EDAC_FLAG_NONE | EDAC_FLAG_SECDED | EDAC_FLAG_S4ECD4ED); | ||
990 | /* FIXME - what if different memory types are in different csrows? */ | 1149 | /* FIXME - what if different memory types are in different csrows? */ |
991 | mci->mod_name = EDAC_MOD_STR; | 1150 | mci->mod_name = EDAC_MOD_STR; |
992 | mci->mod_ver = E752X_REVISION; | 1151 | mci->mod_ver = E752X_REVISION; |
@@ -1018,7 +1177,10 @@ static int e752x_probe1(struct pci_dev *pdev, int dev_idx) | |||
1018 | e752x_init_csrows(mci, pdev, ddrcsr); | 1177 | e752x_init_csrows(mci, pdev, ddrcsr); |
1019 | e752x_init_mem_map_table(pdev, pvt); | 1178 | e752x_init_mem_map_table(pdev, pvt); |
1020 | 1179 | ||
1021 | mci->edac_cap |= EDAC_FLAG_NONE; | 1180 | if (dev_idx == I3100) |
1181 | mci->edac_cap = EDAC_FLAG_SECDED; /* the only mode supported */ | ||
1182 | else | ||
1183 | mci->edac_cap |= EDAC_FLAG_NONE; | ||
1022 | debugf3("%s(): tolm, remapbase, remaplimit\n", __func__); | 1184 | debugf3("%s(): tolm, remapbase, remaplimit\n", __func__); |
1023 | 1185 | ||
1024 | /* load the top of low memory, remap base, and remap limit vars */ | 1186 | /* load the top of low memory, remap base, and remap limit vars */ |
@@ -1110,6 +1272,9 @@ static const struct pci_device_id e752x_pci_tbl[] __devinitdata = { | |||
1110 | PCI_VEND_DEV(INTEL, 7320_0), PCI_ANY_ID, PCI_ANY_ID, 0, 0, | 1272 | PCI_VEND_DEV(INTEL, 7320_0), PCI_ANY_ID, PCI_ANY_ID, 0, 0, |
1111 | E7320}, | 1273 | E7320}, |
1112 | { | 1274 | { |
1275 | PCI_VEND_DEV(INTEL, 3100_0), PCI_ANY_ID, PCI_ANY_ID, 0, 0, | ||
1276 | I3100}, | ||
1277 | { | ||
1113 | 0, | 1278 | 0, |
1114 | } /* 0 terminated list. */ | 1279 | } /* 0 terminated list. */ |
1115 | }; | 1280 | }; |
@@ -1128,6 +1293,10 @@ static int __init e752x_init(void) | |||
1128 | int pci_rc; | 1293 | int pci_rc; |
1129 | 1294 | ||
1130 | debugf3("%s()\n", __func__); | 1295 | debugf3("%s()\n", __func__); |
1296 | |||
1297 | /* Ensure that the OPSTATE is set correctly for POLL or NMI */ | ||
1298 | opstate_init(); | ||
1299 | |||
1131 | pci_rc = pci_register_driver(&e752x_driver); | 1300 | pci_rc = pci_register_driver(&e752x_driver); |
1132 | return (pci_rc < 0) ? pci_rc : 0; | 1301 | return (pci_rc < 0) ? pci_rc : 0; |
1133 | } | 1302 | } |
@@ -1143,10 +1312,15 @@ module_exit(e752x_exit); | |||
1143 | 1312 | ||
1144 | MODULE_LICENSE("GPL"); | 1313 | MODULE_LICENSE("GPL"); |
1145 | MODULE_AUTHOR("Linux Networx (http://lnxi.com) Tom Zimmerman\n"); | 1314 | MODULE_AUTHOR("Linux Networx (http://lnxi.com) Tom Zimmerman\n"); |
1146 | MODULE_DESCRIPTION("MC support for Intel e752x memory controllers"); | 1315 | MODULE_DESCRIPTION("MC support for Intel e752x/3100 memory controllers"); |
1147 | 1316 | ||
1148 | module_param(force_function_unhide, int, 0444); | 1317 | module_param(force_function_unhide, int, 0444); |
1149 | MODULE_PARM_DESC(force_function_unhide, "if BIOS sets Dev0:Fun1 up as hidden:" | 1318 | MODULE_PARM_DESC(force_function_unhide, "if BIOS sets Dev0:Fun1 up as hidden:" |
1150 | " 1=force unhide and hope BIOS doesn't fight driver for Dev0:Fun1 access"); | 1319 | " 1=force unhide and hope BIOS doesn't fight driver for Dev0:Fun1 access"); |
1320 | |||
1151 | module_param(edac_op_state, int, 0444); | 1321 | module_param(edac_op_state, int, 0444); |
1152 | MODULE_PARM_DESC(edac_op_state, "EDAC Error Reporting state: 0=Poll,1=NMI"); | 1322 | MODULE_PARM_DESC(edac_op_state, "EDAC Error Reporting state: 0=Poll,1=NMI"); |
1323 | |||
1324 | module_param(sysbus_parity, int, 0444); | ||
1325 | MODULE_PARM_DESC(sysbus_parity, "0=disable system bus parity checking," | ||
1326 | " 1=enable system bus parity checking, default=auto-detect"); | ||
diff --git a/drivers/edac/e7xxx_edac.c b/drivers/edac/e7xxx_edac.c index 96ecc4926641..c7d11cc4e21a 100644 --- a/drivers/edac/e7xxx_edac.c +++ b/drivers/edac/e7xxx_edac.c | |||
@@ -414,16 +414,6 @@ static int e7xxx_probe1(struct pci_dev *pdev, int dev_idx) | |||
414 | 414 | ||
415 | debugf0("%s(): mci\n", __func__); | 415 | debugf0("%s(): mci\n", __func__); |
416 | 416 | ||
417 | /* make sure error reporting method is sane */ | ||
418 | switch (edac_op_state) { | ||
419 | case EDAC_OPSTATE_POLL: | ||
420 | case EDAC_OPSTATE_NMI: | ||
421 | break; | ||
422 | default: | ||
423 | edac_op_state = EDAC_OPSTATE_POLL; | ||
424 | break; | ||
425 | } | ||
426 | |||
427 | pci_read_config_dword(pdev, E7XXX_DRC, &drc); | 417 | pci_read_config_dword(pdev, E7XXX_DRC, &drc); |
428 | 418 | ||
429 | drc_chan = dual_channel_active(drc, dev_idx); | 419 | drc_chan = dual_channel_active(drc, dev_idx); |
@@ -565,6 +555,9 @@ static struct pci_driver e7xxx_driver = { | |||
565 | 555 | ||
566 | static int __init e7xxx_init(void) | 556 | static int __init e7xxx_init(void) |
567 | { | 557 | { |
558 | /* Ensure that the OPSTATE is set correctly for POLL or NMI */ | ||
559 | opstate_init(); | ||
560 | |||
568 | return pci_register_driver(&e7xxx_driver); | 561 | return pci_register_driver(&e7xxx_driver); |
569 | } | 562 | } |
570 | 563 | ||
diff --git a/drivers/edac/edac_device.c b/drivers/edac/edac_device.c index b9552bc03dea..63372fa7ecfe 100644 --- a/drivers/edac/edac_device.c +++ b/drivers/edac/edac_device.c | |||
@@ -36,7 +36,7 @@ | |||
36 | * is protected by the 'device_ctls_mutex' lock | 36 | * is protected by the 'device_ctls_mutex' lock |
37 | */ | 37 | */ |
38 | static DEFINE_MUTEX(device_ctls_mutex); | 38 | static DEFINE_MUTEX(device_ctls_mutex); |
39 | static struct list_head edac_device_list = LIST_HEAD_INIT(edac_device_list); | 39 | static LIST_HEAD(edac_device_list); |
40 | 40 | ||
41 | #ifdef CONFIG_EDAC_DEBUG | 41 | #ifdef CONFIG_EDAC_DEBUG |
42 | static void edac_device_dump_device(struct edac_device_ctl_info *edac_dev) | 42 | static void edac_device_dump_device(struct edac_device_ctl_info *edac_dev) |
@@ -375,37 +375,6 @@ static void del_edac_device_from_global_list(struct edac_device_ctl_info | |||
375 | wait_for_completion(&edac_device->removal_complete); | 375 | wait_for_completion(&edac_device->removal_complete); |
376 | } | 376 | } |
377 | 377 | ||
378 | /** | ||
379 | * edac_device_find | ||
380 | * Search for a edac_device_ctl_info structure whose index is 'idx'. | ||
381 | * | ||
382 | * If found, return a pointer to the structure. | ||
383 | * Else return NULL. | ||
384 | * | ||
385 | * Caller must hold device_ctls_mutex. | ||
386 | */ | ||
387 | struct edac_device_ctl_info *edac_device_find(int idx) | ||
388 | { | ||
389 | struct list_head *item; | ||
390 | struct edac_device_ctl_info *edac_dev; | ||
391 | |||
392 | /* Iterate over list, looking for exact match of ID */ | ||
393 | list_for_each(item, &edac_device_list) { | ||
394 | edac_dev = list_entry(item, struct edac_device_ctl_info, link); | ||
395 | |||
396 | if (edac_dev->dev_idx >= idx) { | ||
397 | if (edac_dev->dev_idx == idx) | ||
398 | return edac_dev; | ||
399 | |||
400 | /* not on list, so terminate early */ | ||
401 | break; | ||
402 | } | ||
403 | } | ||
404 | |||
405 | return NULL; | ||
406 | } | ||
407 | EXPORT_SYMBOL_GPL(edac_device_find); | ||
408 | |||
409 | /* | 378 | /* |
410 | * edac_device_workq_function | 379 | * edac_device_workq_function |
411 | * performs the operation scheduled by a workq request | 380 | * performs the operation scheduled by a workq request |
diff --git a/drivers/edac/edac_mc.c b/drivers/edac/edac_mc.c index 063a1bffe38b..a4cf1645f588 100644 --- a/drivers/edac/edac_mc.c +++ b/drivers/edac/edac_mc.c | |||
@@ -36,7 +36,7 @@ | |||
36 | 36 | ||
37 | /* lock to memory controller's control array */ | 37 | /* lock to memory controller's control array */ |
38 | static DEFINE_MUTEX(mem_ctls_mutex); | 38 | static DEFINE_MUTEX(mem_ctls_mutex); |
39 | static struct list_head mc_devices = LIST_HEAD_INIT(mc_devices); | 39 | static LIST_HEAD(mc_devices); |
40 | 40 | ||
41 | #ifdef CONFIG_EDAC_DEBUG | 41 | #ifdef CONFIG_EDAC_DEBUG |
42 | 42 | ||
@@ -886,24 +886,3 @@ void edac_mc_handle_fbd_ce(struct mem_ctl_info *mci, | |||
886 | mci->csrows[csrow].channels[channel].ce_count++; | 886 | mci->csrows[csrow].channels[channel].ce_count++; |
887 | } | 887 | } |
888 | EXPORT_SYMBOL(edac_mc_handle_fbd_ce); | 888 | EXPORT_SYMBOL(edac_mc_handle_fbd_ce); |
889 | |||
890 | /* | ||
891 | * Iterate over all MC instances and check for ECC, et al, errors | ||
892 | */ | ||
893 | void edac_check_mc_devices(void) | ||
894 | { | ||
895 | struct list_head *item; | ||
896 | struct mem_ctl_info *mci; | ||
897 | |||
898 | debugf3("%s()\n", __func__); | ||
899 | mutex_lock(&mem_ctls_mutex); | ||
900 | |||
901 | list_for_each(item, &mc_devices) { | ||
902 | mci = list_entry(item, struct mem_ctl_info, link); | ||
903 | |||
904 | if (mci->edac_check != NULL) | ||
905 | mci->edac_check(mci); | ||
906 | } | ||
907 | |||
908 | mutex_unlock(&mem_ctls_mutex); | ||
909 | } | ||
diff --git a/drivers/edac/edac_module.h b/drivers/edac/edac_module.h index cbc419c8ebc1..233d4798c3aa 100644 --- a/drivers/edac/edac_module.h +++ b/drivers/edac/edac_module.h | |||
@@ -27,7 +27,6 @@ extern int edac_mc_register_sysfs_main_kobj(struct mem_ctl_info *mci); | |||
27 | extern void edac_mc_unregister_sysfs_main_kobj(struct mem_ctl_info *mci); | 27 | extern void edac_mc_unregister_sysfs_main_kobj(struct mem_ctl_info *mci); |
28 | extern int edac_create_sysfs_mci_device(struct mem_ctl_info *mci); | 28 | extern int edac_create_sysfs_mci_device(struct mem_ctl_info *mci); |
29 | extern void edac_remove_sysfs_mci_device(struct mem_ctl_info *mci); | 29 | extern void edac_remove_sysfs_mci_device(struct mem_ctl_info *mci); |
30 | extern void edac_check_mc_devices(void); | ||
31 | extern int edac_get_log_ue(void); | 30 | extern int edac_get_log_ue(void); |
32 | extern int edac_get_log_ce(void); | 31 | extern int edac_get_log_ce(void); |
33 | extern int edac_get_panic_on_ue(void); | 32 | extern int edac_get_panic_on_ue(void); |
diff --git a/drivers/edac/edac_pci.c b/drivers/edac/edac_pci.c index 32be43576a8e..9b24340b52e1 100644 --- a/drivers/edac/edac_pci.c +++ b/drivers/edac/edac_pci.c | |||
@@ -29,7 +29,7 @@ | |||
29 | #include "edac_module.h" | 29 | #include "edac_module.h" |
30 | 30 | ||
31 | static DEFINE_MUTEX(edac_pci_ctls_mutex); | 31 | static DEFINE_MUTEX(edac_pci_ctls_mutex); |
32 | static struct list_head edac_pci_list = LIST_HEAD_INIT(edac_pci_list); | 32 | static LIST_HEAD(edac_pci_list); |
33 | 33 | ||
34 | /* | 34 | /* |
35 | * edac_pci_alloc_ctl_info | 35 | * edac_pci_alloc_ctl_info |
@@ -189,6 +189,9 @@ static void del_edac_pci_from_global_list(struct edac_pci_ctl_info *pci) | |||
189 | wait_for_completion(&pci->complete); | 189 | wait_for_completion(&pci->complete); |
190 | } | 190 | } |
191 | 191 | ||
192 | #if 0 | ||
193 | /* Older code, but might use in the future */ | ||
194 | |||
192 | /* | 195 | /* |
193 | * edac_pci_find() | 196 | * edac_pci_find() |
194 | * Search for an edac_pci_ctl_info structure whose index is 'idx' | 197 | * Search for an edac_pci_ctl_info structure whose index is 'idx' |
@@ -219,6 +222,7 @@ struct edac_pci_ctl_info *edac_pci_find(int idx) | |||
219 | return NULL; | 222 | return NULL; |
220 | } | 223 | } |
221 | EXPORT_SYMBOL_GPL(edac_pci_find); | 224 | EXPORT_SYMBOL_GPL(edac_pci_find); |
225 | #endif | ||
222 | 226 | ||
223 | /* | 227 | /* |
224 | * edac_pci_workq_function() | 228 | * edac_pci_workq_function() |
@@ -422,7 +426,7 @@ EXPORT_SYMBOL_GPL(edac_pci_del_device); | |||
422 | * | 426 | * |
423 | * a Generic parity check API | 427 | * a Generic parity check API |
424 | */ | 428 | */ |
425 | void edac_pci_generic_check(struct edac_pci_ctl_info *pci) | 429 | static void edac_pci_generic_check(struct edac_pci_ctl_info *pci) |
426 | { | 430 | { |
427 | debugf4("%s()\n", __func__); | 431 | debugf4("%s()\n", __func__); |
428 | edac_pci_do_parity_check(); | 432 | edac_pci_do_parity_check(); |
diff --git a/drivers/edac/edac_pci_sysfs.c b/drivers/edac/edac_pci_sysfs.c index 71c3195d3704..2c1fa1bb6df2 100644 --- a/drivers/edac/edac_pci_sysfs.c +++ b/drivers/edac/edac_pci_sysfs.c | |||
@@ -37,17 +37,17 @@ int edac_pci_get_check_errors(void) | |||
37 | return check_pci_errors; | 37 | return check_pci_errors; |
38 | } | 38 | } |
39 | 39 | ||
40 | int edac_pci_get_log_pe(void) | 40 | static int edac_pci_get_log_pe(void) |
41 | { | 41 | { |
42 | return edac_pci_log_pe; | 42 | return edac_pci_log_pe; |
43 | } | 43 | } |
44 | 44 | ||
45 | int edac_pci_get_log_npe(void) | 45 | static int edac_pci_get_log_npe(void) |
46 | { | 46 | { |
47 | return edac_pci_log_npe; | 47 | return edac_pci_log_npe; |
48 | } | 48 | } |
49 | 49 | ||
50 | int edac_pci_get_panic_on_pe(void) | 50 | static int edac_pci_get_panic_on_pe(void) |
51 | { | 51 | { |
52 | return edac_pci_panic_on_pe; | 52 | return edac_pci_panic_on_pe; |
53 | } | 53 | } |
@@ -197,7 +197,8 @@ error_out: | |||
197 | * | 197 | * |
198 | * unregister the kobj for the EDAC PCI instance | 198 | * unregister the kobj for the EDAC PCI instance |
199 | */ | 199 | */ |
200 | void edac_pci_unregister_sysfs_instance_kobj(struct edac_pci_ctl_info *pci) | 200 | static void edac_pci_unregister_sysfs_instance_kobj( |
201 | struct edac_pci_ctl_info *pci) | ||
201 | { | 202 | { |
202 | debugf0("%s()\n", __func__); | 203 | debugf0("%s()\n", __func__); |
203 | 204 | ||
@@ -337,7 +338,7 @@ static struct kobj_type ktype_edac_pci_main_kobj = { | |||
337 | * setup the sysfs for EDAC PCI attributes | 338 | * setup the sysfs for EDAC PCI attributes |
338 | * assumes edac_class has already been initialized | 339 | * assumes edac_class has already been initialized |
339 | */ | 340 | */ |
340 | int edac_pci_main_kobj_setup(void) | 341 | static int edac_pci_main_kobj_setup(void) |
341 | { | 342 | { |
342 | int err; | 343 | int err; |
343 | struct sysdev_class *edac_class; | 344 | struct sysdev_class *edac_class; |
diff --git a/drivers/edac/i3000_edac.c b/drivers/edac/i3000_edac.c index 5d4292811c14..6c9a0f2a593c 100644 --- a/drivers/edac/i3000_edac.c +++ b/drivers/edac/i3000_edac.c | |||
@@ -326,15 +326,6 @@ static int i3000_probe1(struct pci_dev *pdev, int dev_idx) | |||
326 | return -ENODEV; | 326 | return -ENODEV; |
327 | } | 327 | } |
328 | 328 | ||
329 | switch (edac_op_state) { | ||
330 | case EDAC_OPSTATE_POLL: | ||
331 | case EDAC_OPSTATE_NMI: | ||
332 | break; | ||
333 | default: | ||
334 | edac_op_state = EDAC_OPSTATE_POLL; | ||
335 | break; | ||
336 | } | ||
337 | |||
338 | c0dra[0] = readb(window + I3000_C0DRA + 0); /* ranks 0,1 */ | 329 | c0dra[0] = readb(window + I3000_C0DRA + 0); /* ranks 0,1 */ |
339 | c0dra[1] = readb(window + I3000_C0DRA + 1); /* ranks 2,3 */ | 330 | c0dra[1] = readb(window + I3000_C0DRA + 1); /* ranks 2,3 */ |
340 | c1dra[0] = readb(window + I3000_C1DRA + 0); /* ranks 0,1 */ | 331 | c1dra[0] = readb(window + I3000_C1DRA + 0); /* ranks 0,1 */ |
@@ -503,6 +494,10 @@ static int __init i3000_init(void) | |||
503 | int pci_rc; | 494 | int pci_rc; |
504 | 495 | ||
505 | debugf3("MC: %s()\n", __func__); | 496 | debugf3("MC: %s()\n", __func__); |
497 | |||
498 | /* Ensure that the OPSTATE is set correctly for POLL or NMI */ | ||
499 | opstate_init(); | ||
500 | |||
506 | pci_rc = pci_register_driver(&i3000_driver); | 501 | pci_rc = pci_register_driver(&i3000_driver); |
507 | if (pci_rc < 0) | 502 | if (pci_rc < 0) |
508 | goto fail0; | 503 | goto fail0; |
diff --git a/drivers/edac/i5000_edac.c b/drivers/edac/i5000_edac.c index 5a852017c17a..4a16b5b61cfb 100644 --- a/drivers/edac/i5000_edac.c +++ b/drivers/edac/i5000_edac.c | |||
@@ -1286,16 +1286,6 @@ static int i5000_probe1(struct pci_dev *pdev, int dev_idx) | |||
1286 | if (PCI_FUNC(pdev->devfn) != 0) | 1286 | if (PCI_FUNC(pdev->devfn) != 0) |
1287 | return -ENODEV; | 1287 | return -ENODEV; |
1288 | 1288 | ||
1289 | /* make sure error reporting method is sane */ | ||
1290 | switch (edac_op_state) { | ||
1291 | case EDAC_OPSTATE_POLL: | ||
1292 | case EDAC_OPSTATE_NMI: | ||
1293 | break; | ||
1294 | default: | ||
1295 | edac_op_state = EDAC_OPSTATE_POLL; | ||
1296 | break; | ||
1297 | } | ||
1298 | |||
1299 | /* Ask the devices for the number of CSROWS and CHANNELS so | 1289 | /* Ask the devices for the number of CSROWS and CHANNELS so |
1300 | * that we can calculate the memory resources, etc | 1290 | * that we can calculate the memory resources, etc |
1301 | * | 1291 | * |
@@ -1478,6 +1468,9 @@ static int __init i5000_init(void) | |||
1478 | 1468 | ||
1479 | debugf2("MC: " __FILE__ ": %s()\n", __func__); | 1469 | debugf2("MC: " __FILE__ ": %s()\n", __func__); |
1480 | 1470 | ||
1471 | /* Ensure that the OPSTATE is set correctly for POLL or NMI */ | ||
1472 | opstate_init(); | ||
1473 | |||
1481 | pci_rc = pci_register_driver(&i5000_driver); | 1474 | pci_rc = pci_register_driver(&i5000_driver); |
1482 | 1475 | ||
1483 | return (pci_rc < 0) ? pci_rc : 0; | 1476 | return (pci_rc < 0) ? pci_rc : 0; |
@@ -1501,5 +1494,6 @@ MODULE_AUTHOR | |||
1501 | ("Linux Networx (http://lnxi.com) Doug Thompson <norsk5@xmission.com>"); | 1494 | ("Linux Networx (http://lnxi.com) Doug Thompson <norsk5@xmission.com>"); |
1502 | MODULE_DESCRIPTION("MC Driver for Intel I5000 memory controllers - " | 1495 | MODULE_DESCRIPTION("MC Driver for Intel I5000 memory controllers - " |
1503 | I5000_REVISION); | 1496 | I5000_REVISION); |
1497 | |||
1504 | module_param(edac_op_state, int, 0444); | 1498 | module_param(edac_op_state, int, 0444); |
1505 | MODULE_PARM_DESC(edac_op_state, "EDAC Error Reporting state: 0=Poll,1=NMI"); | 1499 | MODULE_PARM_DESC(edac_op_state, "EDAC Error Reporting state: 0=Poll,1=NMI"); |
diff --git a/drivers/edac/i82443bxgx_edac.c b/drivers/edac/i82443bxgx_edac.c index 83bfe37c4bbb..c5305e3ee434 100644 --- a/drivers/edac/i82443bxgx_edac.c +++ b/drivers/edac/i82443bxgx_edac.c | |||
@@ -29,6 +29,7 @@ | |||
29 | 29 | ||
30 | #include <linux/slab.h> | 30 | #include <linux/slab.h> |
31 | 31 | ||
32 | #include <linux/edac.h> | ||
32 | #include "edac_core.h" | 33 | #include "edac_core.h" |
33 | 34 | ||
34 | #define I82443_REVISION "0.1" | 35 | #define I82443_REVISION "0.1" |
@@ -386,6 +387,9 @@ static struct pci_driver i82443bxgx_edacmc_driver = { | |||
386 | 387 | ||
387 | static int __init i82443bxgx_edacmc_init(void) | 388 | static int __init i82443bxgx_edacmc_init(void) |
388 | { | 389 | { |
390 | /* Ensure that the OPSTATE is set correctly for POLL or NMI */ | ||
391 | opstate_init(); | ||
392 | |||
389 | return pci_register_driver(&i82443bxgx_edacmc_driver); | 393 | return pci_register_driver(&i82443bxgx_edacmc_driver); |
390 | } | 394 | } |
391 | 395 | ||
@@ -400,3 +404,6 @@ module_exit(i82443bxgx_edacmc_exit); | |||
400 | MODULE_LICENSE("GPL"); | 404 | MODULE_LICENSE("GPL"); |
401 | MODULE_AUTHOR("Tim Small <tim@buttersideup.com> - WPAD"); | 405 | MODULE_AUTHOR("Tim Small <tim@buttersideup.com> - WPAD"); |
402 | MODULE_DESCRIPTION("EDAC MC support for Intel 82443BX/GX memory controllers"); | 406 | MODULE_DESCRIPTION("EDAC MC support for Intel 82443BX/GX memory controllers"); |
407 | |||
408 | module_param(edac_op_state, int, 0444); | ||
409 | MODULE_PARM_DESC(edac_op_state, "EDAC Error Reporting state: 0=Poll,1=NMI"); | ||
diff --git a/drivers/edac/i82860_edac.c b/drivers/edac/i82860_edac.c index f5ecd2c4d813..c0088ba9672b 100644 --- a/drivers/edac/i82860_edac.c +++ b/drivers/edac/i82860_edac.c | |||
@@ -14,6 +14,7 @@ | |||
14 | #include <linux/pci.h> | 14 | #include <linux/pci.h> |
15 | #include <linux/pci_ids.h> | 15 | #include <linux/pci_ids.h> |
16 | #include <linux/slab.h> | 16 | #include <linux/slab.h> |
17 | #include <linux/edac.h> | ||
17 | #include "edac_core.h" | 18 | #include "edac_core.h" |
18 | 19 | ||
19 | #define I82860_REVISION " Ver: 2.0.2 " __DATE__ | 20 | #define I82860_REVISION " Ver: 2.0.2 " __DATE__ |
@@ -294,6 +295,9 @@ static int __init i82860_init(void) | |||
294 | 295 | ||
295 | debugf3("%s()\n", __func__); | 296 | debugf3("%s()\n", __func__); |
296 | 297 | ||
298 | /* Ensure that the OPSTATE is set correctly for POLL or NMI */ | ||
299 | opstate_init(); | ||
300 | |||
297 | if ((pci_rc = pci_register_driver(&i82860_driver)) < 0) | 301 | if ((pci_rc = pci_register_driver(&i82860_driver)) < 0) |
298 | goto fail0; | 302 | goto fail0; |
299 | 303 | ||
@@ -345,3 +349,6 @@ MODULE_LICENSE("GPL"); | |||
345 | MODULE_AUTHOR("Red Hat Inc. (http://www.redhat.com) " | 349 | MODULE_AUTHOR("Red Hat Inc. (http://www.redhat.com) " |
346 | "Ben Woodard <woodard@redhat.com>"); | 350 | "Ben Woodard <woodard@redhat.com>"); |
347 | MODULE_DESCRIPTION("ECC support for Intel 82860 memory hub controllers"); | 351 | MODULE_DESCRIPTION("ECC support for Intel 82860 memory hub controllers"); |
352 | |||
353 | module_param(edac_op_state, int, 0444); | ||
354 | MODULE_PARM_DESC(edac_op_state, "EDAC Error Reporting state: 0=Poll,1=NMI"); | ||
diff --git a/drivers/edac/i82875p_edac.c b/drivers/edac/i82875p_edac.c index 031abadc439a..e43bdc43a1bf 100644 --- a/drivers/edac/i82875p_edac.c +++ b/drivers/edac/i82875p_edac.c | |||
@@ -18,6 +18,7 @@ | |||
18 | #include <linux/pci.h> | 18 | #include <linux/pci.h> |
19 | #include <linux/pci_ids.h> | 19 | #include <linux/pci_ids.h> |
20 | #include <linux/slab.h> | 20 | #include <linux/slab.h> |
21 | #include <linux/edac.h> | ||
21 | #include "edac_core.h" | 22 | #include "edac_core.h" |
22 | 23 | ||
23 | #define I82875P_REVISION " Ver: 2.0.2 " __DATE__ | 24 | #define I82875P_REVISION " Ver: 2.0.2 " __DATE__ |
@@ -393,6 +394,7 @@ static int i82875p_probe1(struct pci_dev *pdev, int dev_idx) | |||
393 | struct i82875p_error_info discard; | 394 | struct i82875p_error_info discard; |
394 | 395 | ||
395 | debugf0("%s()\n", __func__); | 396 | debugf0("%s()\n", __func__); |
397 | |||
396 | ovrfl_pdev = pci_get_device(PCI_VEND_DEV(INTEL, 82875_6), NULL); | 398 | ovrfl_pdev = pci_get_device(PCI_VEND_DEV(INTEL, 82875_6), NULL); |
397 | 399 | ||
398 | if (i82875p_setup_overfl_dev(pdev, &ovrfl_pdev, &ovrfl_window)) | 400 | if (i82875p_setup_overfl_dev(pdev, &ovrfl_pdev, &ovrfl_window)) |
@@ -532,6 +534,10 @@ static int __init i82875p_init(void) | |||
532 | int pci_rc; | 534 | int pci_rc; |
533 | 535 | ||
534 | debugf3("%s()\n", __func__); | 536 | debugf3("%s()\n", __func__); |
537 | |||
538 | /* Ensure that the OPSTATE is set correctly for POLL or NMI */ | ||
539 | opstate_init(); | ||
540 | |||
535 | pci_rc = pci_register_driver(&i82875p_driver); | 541 | pci_rc = pci_register_driver(&i82875p_driver); |
536 | 542 | ||
537 | if (pci_rc < 0) | 543 | if (pci_rc < 0) |
@@ -586,3 +592,6 @@ module_exit(i82875p_exit); | |||
586 | MODULE_LICENSE("GPL"); | 592 | MODULE_LICENSE("GPL"); |
587 | MODULE_AUTHOR("Linux Networx (http://lnxi.com) Thayne Harbaugh"); | 593 | MODULE_AUTHOR("Linux Networx (http://lnxi.com) Thayne Harbaugh"); |
588 | MODULE_DESCRIPTION("MC support for Intel 82875 memory hub controllers"); | 594 | MODULE_DESCRIPTION("MC support for Intel 82875 memory hub controllers"); |
595 | |||
596 | module_param(edac_op_state, int, 0444); | ||
597 | MODULE_PARM_DESC(edac_op_state, "EDAC Error Reporting state: 0=Poll,1=NMI"); | ||
diff --git a/drivers/edac/i82975x_edac.c b/drivers/edac/i82975x_edac.c index 0ee888456932..2eed3ea2cf62 100644 --- a/drivers/edac/i82975x_edac.c +++ b/drivers/edac/i82975x_edac.c | |||
@@ -14,7 +14,7 @@ | |||
14 | #include <linux/pci.h> | 14 | #include <linux/pci.h> |
15 | #include <linux/pci_ids.h> | 15 | #include <linux/pci_ids.h> |
16 | #include <linux/slab.h> | 16 | #include <linux/slab.h> |
17 | 17 | #include <linux/edac.h> | |
18 | #include "edac_core.h" | 18 | #include "edac_core.h" |
19 | 19 | ||
20 | #define I82975X_REVISION " Ver: 1.0.0 " __DATE__ | 20 | #define I82975X_REVISION " Ver: 1.0.0 " __DATE__ |
@@ -611,6 +611,9 @@ static int __init i82975x_init(void) | |||
611 | 611 | ||
612 | debugf3("%s()\n", __func__); | 612 | debugf3("%s()\n", __func__); |
613 | 613 | ||
614 | /* Ensure that the OPSTATE is set correctly for POLL or NMI */ | ||
615 | opstate_init(); | ||
616 | |||
614 | pci_rc = pci_register_driver(&i82975x_driver); | 617 | pci_rc = pci_register_driver(&i82975x_driver); |
615 | if (pci_rc < 0) | 618 | if (pci_rc < 0) |
616 | goto fail0; | 619 | goto fail0; |
@@ -664,3 +667,6 @@ module_exit(i82975x_exit); | |||
664 | MODULE_LICENSE("GPL"); | 667 | MODULE_LICENSE("GPL"); |
665 | MODULE_AUTHOR("Arvind R. <arvind@acarlab.com>"); | 668 | MODULE_AUTHOR("Arvind R. <arvind@acarlab.com>"); |
666 | MODULE_DESCRIPTION("MC support for Intel 82975 memory hub controllers"); | 669 | MODULE_DESCRIPTION("MC support for Intel 82975 memory hub controllers"); |
670 | |||
671 | module_param(edac_op_state, int, 0444); | ||
672 | MODULE_PARM_DESC(edac_op_state, "EDAC Error Reporting state: 0=Poll,1=NMI"); | ||
diff --git a/drivers/edac/pasemi_edac.c b/drivers/edac/pasemi_edac.c index 90320917be28..3fd65a563848 100644 --- a/drivers/edac/pasemi_edac.c +++ b/drivers/edac/pasemi_edac.c | |||
@@ -284,6 +284,9 @@ static struct pci_driver pasemi_edac_driver = { | |||
284 | 284 | ||
285 | static int __init pasemi_edac_init(void) | 285 | static int __init pasemi_edac_init(void) |
286 | { | 286 | { |
287 | /* Ensure that the OPSTATE is set correctly for POLL or NMI */ | ||
288 | opstate_init(); | ||
289 | |||
287 | return pci_register_driver(&pasemi_edac_driver); | 290 | return pci_register_driver(&pasemi_edac_driver); |
288 | } | 291 | } |
289 | 292 | ||
@@ -298,3 +301,6 @@ module_exit(pasemi_edac_exit); | |||
298 | MODULE_LICENSE("GPL"); | 301 | MODULE_LICENSE("GPL"); |
299 | MODULE_AUTHOR("Egor Martovetsky <egor@pasemi.com>"); | 302 | MODULE_AUTHOR("Egor Martovetsky <egor@pasemi.com>"); |
300 | MODULE_DESCRIPTION("MC support for PA Semi PWRficient memory controller"); | 303 | MODULE_DESCRIPTION("MC support for PA Semi PWRficient memory controller"); |
304 | module_param(edac_op_state, int, 0444); | ||
305 | MODULE_PARM_DESC(edac_op_state, "EDAC Error Reporting state: 0=Poll,1=NMI"); | ||
306 | |||
diff --git a/drivers/edac/r82600_edac.c b/drivers/edac/r82600_edac.c index e25f712f2dc3..9900675e9598 100644 --- a/drivers/edac/r82600_edac.c +++ b/drivers/edac/r82600_edac.c | |||
@@ -20,6 +20,7 @@ | |||
20 | #include <linux/pci.h> | 20 | #include <linux/pci.h> |
21 | #include <linux/pci_ids.h> | 21 | #include <linux/pci_ids.h> |
22 | #include <linux/slab.h> | 22 | #include <linux/slab.h> |
23 | #include <linux/edac.h> | ||
23 | #include "edac_core.h" | 24 | #include "edac_core.h" |
24 | 25 | ||
25 | #define R82600_REVISION " Ver: 2.0.2 " __DATE__ | 26 | #define R82600_REVISION " Ver: 2.0.2 " __DATE__ |
@@ -393,6 +394,9 @@ static struct pci_driver r82600_driver = { | |||
393 | 394 | ||
394 | static int __init r82600_init(void) | 395 | static int __init r82600_init(void) |
395 | { | 396 | { |
397 | /* Ensure that the OPSTATE is set correctly for POLL or NMI */ | ||
398 | opstate_init(); | ||
399 | |||
396 | return pci_register_driver(&r82600_driver); | 400 | return pci_register_driver(&r82600_driver); |
397 | } | 401 | } |
398 | 402 | ||
@@ -412,3 +416,6 @@ MODULE_DESCRIPTION("MC support for Radisys 82600 memory controllers"); | |||
412 | module_param(disable_hardware_scrub, bool, 0644); | 416 | module_param(disable_hardware_scrub, bool, 0644); |
413 | MODULE_PARM_DESC(disable_hardware_scrub, | 417 | MODULE_PARM_DESC(disable_hardware_scrub, |
414 | "If set, disable the chipset's automatic scrub for CEs"); | 418 | "If set, disable the chipset's automatic scrub for CEs"); |
419 | |||
420 | module_param(edac_op_state, int, 0444); | ||
421 | MODULE_PARM_DESC(edac_op_state, "EDAC Error Reporting state: 0=Poll,1=NMI"); | ||
diff --git a/drivers/firmware/Kconfig b/drivers/firmware/Kconfig index 40ffd767647d..dc2cec6127d1 100644 --- a/drivers/firmware/Kconfig +++ b/drivers/firmware/Kconfig | |||
@@ -17,6 +17,15 @@ config EDD | |||
17 | obscure configurations. Most disk controller BIOS vendors do | 17 | obscure configurations. Most disk controller BIOS vendors do |
18 | not yet implement this feature. | 18 | not yet implement this feature. |
19 | 19 | ||
20 | config EDD_OFF | ||
21 | bool "Sets default behavior for EDD detection to off" | ||
22 | depends on EDD | ||
23 | default n | ||
24 | help | ||
25 | Say Y if you want EDD disabled by default, even though it is compiled into the | ||
26 | kernel. Say N if you want EDD enabled by default. EDD can be dynamically set | ||
27 | using the kernel parameter 'edd={on|skipmbr|off}'. | ||
28 | |||
20 | config EFI_VARS | 29 | config EFI_VARS |
21 | tristate "EFI Variable Support via sysfs" | 30 | tristate "EFI Variable Support via sysfs" |
22 | depends on EFI | 31 | depends on EFI |
diff --git a/drivers/firmware/dcdbas.c b/drivers/firmware/dcdbas.c index f235940719e7..25918f7dfd0f 100644 --- a/drivers/firmware/dcdbas.c +++ b/drivers/firmware/dcdbas.c | |||
@@ -63,7 +63,7 @@ static void smi_data_buf_free(void) | |||
63 | return; | 63 | return; |
64 | 64 | ||
65 | dev_dbg(&dcdbas_pdev->dev, "%s: phys: %x size: %lu\n", | 65 | dev_dbg(&dcdbas_pdev->dev, "%s: phys: %x size: %lu\n", |
66 | __FUNCTION__, smi_data_buf_phys_addr, smi_data_buf_size); | 66 | __func__, smi_data_buf_phys_addr, smi_data_buf_size); |
67 | 67 | ||
68 | dma_free_coherent(&dcdbas_pdev->dev, smi_data_buf_size, smi_data_buf, | 68 | dma_free_coherent(&dcdbas_pdev->dev, smi_data_buf_size, smi_data_buf, |
69 | smi_data_buf_handle); | 69 | smi_data_buf_handle); |
@@ -92,7 +92,7 @@ static int smi_data_buf_realloc(unsigned long size) | |||
92 | if (!buf) { | 92 | if (!buf) { |
93 | dev_dbg(&dcdbas_pdev->dev, | 93 | dev_dbg(&dcdbas_pdev->dev, |
94 | "%s: failed to allocate memory size %lu\n", | 94 | "%s: failed to allocate memory size %lu\n", |
95 | __FUNCTION__, size); | 95 | __func__, size); |
96 | return -ENOMEM; | 96 | return -ENOMEM; |
97 | } | 97 | } |
98 | /* memory zeroed by dma_alloc_coherent */ | 98 | /* memory zeroed by dma_alloc_coherent */ |
@@ -110,7 +110,7 @@ static int smi_data_buf_realloc(unsigned long size) | |||
110 | smi_data_buf_size = size; | 110 | smi_data_buf_size = size; |
111 | 111 | ||
112 | dev_dbg(&dcdbas_pdev->dev, "%s: phys: %x size: %lu\n", | 112 | dev_dbg(&dcdbas_pdev->dev, "%s: phys: %x size: %lu\n", |
113 | __FUNCTION__, smi_data_buf_phys_addr, smi_data_buf_size); | 113 | __func__, smi_data_buf_phys_addr, smi_data_buf_size); |
114 | 114 | ||
115 | return 0; | 115 | return 0; |
116 | } | 116 | } |
@@ -258,7 +258,7 @@ static int smi_request(struct smi_cmd *smi_cmd) | |||
258 | 258 | ||
259 | if (smi_cmd->magic != SMI_CMD_MAGIC) { | 259 | if (smi_cmd->magic != SMI_CMD_MAGIC) { |
260 | dev_info(&dcdbas_pdev->dev, "%s: invalid magic value\n", | 260 | dev_info(&dcdbas_pdev->dev, "%s: invalid magic value\n", |
261 | __FUNCTION__); | 261 | __func__); |
262 | return -EBADR; | 262 | return -EBADR; |
263 | } | 263 | } |
264 | 264 | ||
@@ -267,7 +267,7 @@ static int smi_request(struct smi_cmd *smi_cmd) | |||
267 | set_cpus_allowed_ptr(current, &cpumask_of_cpu(0)); | 267 | set_cpus_allowed_ptr(current, &cpumask_of_cpu(0)); |
268 | if (smp_processor_id() != 0) { | 268 | if (smp_processor_id() != 0) { |
269 | dev_dbg(&dcdbas_pdev->dev, "%s: failed to get CPU 0\n", | 269 | dev_dbg(&dcdbas_pdev->dev, "%s: failed to get CPU 0\n", |
270 | __FUNCTION__); | 270 | __func__); |
271 | ret = -EBUSY; | 271 | ret = -EBUSY; |
272 | goto out; | 272 | goto out; |
273 | } | 273 | } |
@@ -428,7 +428,7 @@ static int host_control_smi(void) | |||
428 | 428 | ||
429 | default: | 429 | default: |
430 | dev_dbg(&dcdbas_pdev->dev, "%s: invalid SMI type %u\n", | 430 | dev_dbg(&dcdbas_pdev->dev, "%s: invalid SMI type %u\n", |
431 | __FUNCTION__, host_control_smi_type); | 431 | __func__, host_control_smi_type); |
432 | return -ENOSYS; | 432 | return -ENOSYS; |
433 | } | 433 | } |
434 | 434 | ||
@@ -456,13 +456,13 @@ static void dcdbas_host_control(void) | |||
456 | host_control_action = HC_ACTION_NONE; | 456 | host_control_action = HC_ACTION_NONE; |
457 | 457 | ||
458 | if (!smi_data_buf) { | 458 | if (!smi_data_buf) { |
459 | dev_dbg(&dcdbas_pdev->dev, "%s: no SMI buffer\n", __FUNCTION__); | 459 | dev_dbg(&dcdbas_pdev->dev, "%s: no SMI buffer\n", __func__); |
460 | return; | 460 | return; |
461 | } | 461 | } |
462 | 462 | ||
463 | if (smi_data_buf_size < sizeof(struct apm_cmd)) { | 463 | if (smi_data_buf_size < sizeof(struct apm_cmd)) { |
464 | dev_dbg(&dcdbas_pdev->dev, "%s: SMI buffer too small\n", | 464 | dev_dbg(&dcdbas_pdev->dev, "%s: SMI buffer too small\n", |
465 | __FUNCTION__); | 465 | __func__); |
466 | return; | 466 | return; |
467 | } | 467 | } |
468 | 468 | ||
diff --git a/drivers/firmware/dell_rbu.c b/drivers/firmware/dell_rbu.c index 477a3d0e3caf..6a8b1e037e07 100644 --- a/drivers/firmware/dell_rbu.c +++ b/drivers/firmware/dell_rbu.c | |||
@@ -123,7 +123,7 @@ static int create_packet(void *data, size_t length) | |||
123 | if (!newpacket) { | 123 | if (!newpacket) { |
124 | printk(KERN_WARNING | 124 | printk(KERN_WARNING |
125 | "dell_rbu:%s: failed to allocate new " | 125 | "dell_rbu:%s: failed to allocate new " |
126 | "packet\n", __FUNCTION__); | 126 | "packet\n", __func__); |
127 | retval = -ENOMEM; | 127 | retval = -ENOMEM; |
128 | spin_lock(&rbu_data.lock); | 128 | spin_lock(&rbu_data.lock); |
129 | goto out_noalloc; | 129 | goto out_noalloc; |
@@ -152,7 +152,7 @@ static int create_packet(void *data, size_t length) | |||
152 | printk(KERN_WARNING | 152 | printk(KERN_WARNING |
153 | "dell_rbu:%s: failed to allocate " | 153 | "dell_rbu:%s: failed to allocate " |
154 | "invalid_addr_packet_array \n", | 154 | "invalid_addr_packet_array \n", |
155 | __FUNCTION__); | 155 | __func__); |
156 | retval = -ENOMEM; | 156 | retval = -ENOMEM; |
157 | spin_lock(&rbu_data.lock); | 157 | spin_lock(&rbu_data.lock); |
158 | goto out_alloc_packet; | 158 | goto out_alloc_packet; |
@@ -164,7 +164,7 @@ static int create_packet(void *data, size_t length) | |||
164 | if (!packet_data_temp_buf) { | 164 | if (!packet_data_temp_buf) { |
165 | printk(KERN_WARNING | 165 | printk(KERN_WARNING |
166 | "dell_rbu:%s: failed to allocate new " | 166 | "dell_rbu:%s: failed to allocate new " |
167 | "packet\n", __FUNCTION__); | 167 | "packet\n", __func__); |
168 | retval = -ENOMEM; | 168 | retval = -ENOMEM; |
169 | spin_lock(&rbu_data.lock); | 169 | spin_lock(&rbu_data.lock); |
170 | goto out_alloc_packet_array; | 170 | goto out_alloc_packet_array; |
@@ -416,7 +416,7 @@ static int img_update_realloc(unsigned long size) | |||
416 | */ | 416 | */ |
417 | if ((size != 0) && (rbu_data.image_update_buffer == NULL)) { | 417 | if ((size != 0) && (rbu_data.image_update_buffer == NULL)) { |
418 | printk(KERN_ERR "dell_rbu:%s: corruption " | 418 | printk(KERN_ERR "dell_rbu:%s: corruption " |
419 | "check failed\n", __FUNCTION__); | 419 | "check failed\n", __func__); |
420 | return -EINVAL; | 420 | return -EINVAL; |
421 | } | 421 | } |
422 | /* | 422 | /* |
@@ -642,7 +642,7 @@ static ssize_t write_rbu_image_type(struct kobject *kobj, | |||
642 | if (req_firm_rc) { | 642 | if (req_firm_rc) { |
643 | printk(KERN_ERR | 643 | printk(KERN_ERR |
644 | "dell_rbu:%s request_firmware_nowait" | 644 | "dell_rbu:%s request_firmware_nowait" |
645 | " failed %d\n", __FUNCTION__, rc); | 645 | " failed %d\n", __func__, rc); |
646 | rc = -EIO; | 646 | rc = -EIO; |
647 | } else | 647 | } else |
648 | rbu_data.entry_created = 1; | 648 | rbu_data.entry_created = 1; |
@@ -718,7 +718,7 @@ static int __init dcdrbu_init(void) | |||
718 | if (IS_ERR(rbu_device)) { | 718 | if (IS_ERR(rbu_device)) { |
719 | printk(KERN_ERR | 719 | printk(KERN_ERR |
720 | "dell_rbu:%s:platform_device_register_simple " | 720 | "dell_rbu:%s:platform_device_register_simple " |
721 | "failed\n", __FUNCTION__); | 721 | "failed\n", __func__); |
722 | return PTR_ERR(rbu_device); | 722 | return PTR_ERR(rbu_device); |
723 | } | 723 | } |
724 | 724 | ||
diff --git a/drivers/hid/hid-core.c b/drivers/hid/hid-core.c index e03c67dd3e63..f43d6d3cf2fa 100644 --- a/drivers/hid/hid-core.c +++ b/drivers/hid/hid-core.c | |||
@@ -606,7 +606,7 @@ static u8 *fetch_item(__u8 *start, __u8 *end, struct hid_item *item) | |||
606 | case 2: | 606 | case 2: |
607 | if ((end - start) < 2) | 607 | if ((end - start) < 2) |
608 | return NULL; | 608 | return NULL; |
609 | item->data.u16 = le16_to_cpu(get_unaligned((__le16*)start)); | 609 | item->data.u16 = get_unaligned_le16(start); |
610 | start = (__u8 *)((__le16 *)start + 1); | 610 | start = (__u8 *)((__le16 *)start + 1); |
611 | return start; | 611 | return start; |
612 | 612 | ||
@@ -614,7 +614,7 @@ static u8 *fetch_item(__u8 *start, __u8 *end, struct hid_item *item) | |||
614 | item->size++; | 614 | item->size++; |
615 | if ((end - start) < 4) | 615 | if ((end - start) < 4) |
616 | return NULL; | 616 | return NULL; |
617 | item->data.u32 = le32_to_cpu(get_unaligned((__le32*)start)); | 617 | item->data.u32 = get_unaligned_le32(start); |
618 | start = (__u8 *)((__le32 *)start + 1); | 618 | start = (__u8 *)((__le32 *)start + 1); |
619 | return start; | 619 | return start; |
620 | } | 620 | } |
@@ -765,7 +765,7 @@ static __inline__ __u32 extract(__u8 *report, unsigned offset, unsigned n) | |||
765 | 765 | ||
766 | report += offset >> 3; /* adjust byte index */ | 766 | report += offset >> 3; /* adjust byte index */ |
767 | offset &= 7; /* now only need bit offset into one byte */ | 767 | offset &= 7; /* now only need bit offset into one byte */ |
768 | x = le64_to_cpu(get_unaligned((__le64 *) report)); | 768 | x = get_unaligned_le64(report); |
769 | x = (x >> offset) & ((1ULL << n) - 1); /* extract bit field */ | 769 | x = (x >> offset) & ((1ULL << n) - 1); /* extract bit field */ |
770 | return (u32) x; | 770 | return (u32) x; |
771 | } | 771 | } |
diff --git a/drivers/ide/ide-proc.c b/drivers/ide/ide-proc.c index 7b2f3815a838..8d6ad812a014 100644 --- a/drivers/ide/ide-proc.c +++ b/drivers/ide/ide-proc.c | |||
@@ -822,6 +822,7 @@ static int ide_drivers_open(struct inode *inode, struct file *file) | |||
822 | } | 822 | } |
823 | 823 | ||
824 | static const struct file_operations ide_drivers_operations = { | 824 | static const struct file_operations ide_drivers_operations = { |
825 | .owner = THIS_MODULE, | ||
825 | .open = ide_drivers_open, | 826 | .open = ide_drivers_open, |
826 | .read = seq_read, | 827 | .read = seq_read, |
827 | .llseek = seq_lseek, | 828 | .llseek = seq_lseek, |
@@ -830,16 +831,12 @@ static const struct file_operations ide_drivers_operations = { | |||
830 | 831 | ||
831 | void proc_ide_create(void) | 832 | void proc_ide_create(void) |
832 | { | 833 | { |
833 | struct proc_dir_entry *entry; | ||
834 | |||
835 | proc_ide_root = proc_mkdir("ide", NULL); | 834 | proc_ide_root = proc_mkdir("ide", NULL); |
836 | 835 | ||
837 | if (!proc_ide_root) | 836 | if (!proc_ide_root) |
838 | return; | 837 | return; |
839 | 838 | ||
840 | entry = create_proc_entry("drivers", 0, proc_ide_root); | 839 | proc_create("drivers", 0, proc_ide_root, &ide_drivers_operations); |
841 | if (entry) | ||
842 | entry->proc_fops = &ide_drivers_operations; | ||
843 | } | 840 | } |
844 | 841 | ||
845 | void proc_ide_destroy(void) | 842 | void proc_ide_destroy(void) |
diff --git a/drivers/infiniband/core/umem.c b/drivers/infiniband/core/umem.c index 4e3128ff73c1..fe78f7d25099 100644 --- a/drivers/infiniband/core/umem.c +++ b/drivers/infiniband/core/umem.c | |||
@@ -38,6 +38,7 @@ | |||
38 | #include <linux/dma-mapping.h> | 38 | #include <linux/dma-mapping.h> |
39 | #include <linux/sched.h> | 39 | #include <linux/sched.h> |
40 | #include <linux/hugetlb.h> | 40 | #include <linux/hugetlb.h> |
41 | #include <linux/dma-attrs.h> | ||
41 | 42 | ||
42 | #include "uverbs.h" | 43 | #include "uverbs.h" |
43 | 44 | ||
@@ -72,9 +73,10 @@ static void __ib_umem_release(struct ib_device *dev, struct ib_umem *umem, int d | |||
72 | * @addr: userspace virtual address to start at | 73 | * @addr: userspace virtual address to start at |
73 | * @size: length of region to pin | 74 | * @size: length of region to pin |
74 | * @access: IB_ACCESS_xxx flags for memory being pinned | 75 | * @access: IB_ACCESS_xxx flags for memory being pinned |
76 | * @dmasync: flush in-flight DMA when the memory region is written | ||
75 | */ | 77 | */ |
76 | struct ib_umem *ib_umem_get(struct ib_ucontext *context, unsigned long addr, | 78 | struct ib_umem *ib_umem_get(struct ib_ucontext *context, unsigned long addr, |
77 | size_t size, int access) | 79 | size_t size, int access, int dmasync) |
78 | { | 80 | { |
79 | struct ib_umem *umem; | 81 | struct ib_umem *umem; |
80 | struct page **page_list; | 82 | struct page **page_list; |
@@ -87,6 +89,10 @@ struct ib_umem *ib_umem_get(struct ib_ucontext *context, unsigned long addr, | |||
87 | int ret; | 89 | int ret; |
88 | int off; | 90 | int off; |
89 | int i; | 91 | int i; |
92 | DEFINE_DMA_ATTRS(attrs); | ||
93 | |||
94 | if (dmasync) | ||
95 | dma_set_attr(DMA_ATTR_WRITE_BARRIER, &attrs); | ||
90 | 96 | ||
91 | if (!can_do_mlock()) | 97 | if (!can_do_mlock()) |
92 | return ERR_PTR(-EPERM); | 98 | return ERR_PTR(-EPERM); |
@@ -174,10 +180,11 @@ struct ib_umem *ib_umem_get(struct ib_ucontext *context, unsigned long addr, | |||
174 | sg_set_page(&chunk->page_list[i], page_list[i + off], PAGE_SIZE, 0); | 180 | sg_set_page(&chunk->page_list[i], page_list[i + off], PAGE_SIZE, 0); |
175 | } | 181 | } |
176 | 182 | ||
177 | chunk->nmap = ib_dma_map_sg(context->device, | 183 | chunk->nmap = ib_dma_map_sg_attrs(context->device, |
178 | &chunk->page_list[0], | 184 | &chunk->page_list[0], |
179 | chunk->nents, | 185 | chunk->nents, |
180 | DMA_BIDIRECTIONAL); | 186 | DMA_BIDIRECTIONAL, |
187 | &attrs); | ||
181 | if (chunk->nmap <= 0) { | 188 | if (chunk->nmap <= 0) { |
182 | for (i = 0; i < chunk->nents; ++i) | 189 | for (i = 0; i < chunk->nents; ++i) |
183 | put_page(sg_page(&chunk->page_list[i])); | 190 | put_page(sg_page(&chunk->page_list[i])); |
diff --git a/drivers/infiniband/hw/amso1100/c2_provider.c b/drivers/infiniband/hw/amso1100/c2_provider.c index 6af2c0f79a67..2acf9b62cf99 100644 --- a/drivers/infiniband/hw/amso1100/c2_provider.c +++ b/drivers/infiniband/hw/amso1100/c2_provider.c | |||
@@ -452,7 +452,7 @@ static struct ib_mr *c2_reg_user_mr(struct ib_pd *pd, u64 start, u64 length, | |||
452 | return ERR_PTR(-ENOMEM); | 452 | return ERR_PTR(-ENOMEM); |
453 | c2mr->pd = c2pd; | 453 | c2mr->pd = c2pd; |
454 | 454 | ||
455 | c2mr->umem = ib_umem_get(pd->uobject->context, start, length, acc); | 455 | c2mr->umem = ib_umem_get(pd->uobject->context, start, length, acc, 0); |
456 | if (IS_ERR(c2mr->umem)) { | 456 | if (IS_ERR(c2mr->umem)) { |
457 | err = PTR_ERR(c2mr->umem); | 457 | err = PTR_ERR(c2mr->umem); |
458 | kfree(c2mr); | 458 | kfree(c2mr); |
diff --git a/drivers/infiniband/hw/cxgb3/iwch_provider.c b/drivers/infiniband/hw/cxgb3/iwch_provider.c index ab4695c1dd56..e343e9e64844 100644 --- a/drivers/infiniband/hw/cxgb3/iwch_provider.c +++ b/drivers/infiniband/hw/cxgb3/iwch_provider.c | |||
@@ -602,7 +602,7 @@ static struct ib_mr *iwch_reg_user_mr(struct ib_pd *pd, u64 start, u64 length, | |||
602 | if (!mhp) | 602 | if (!mhp) |
603 | return ERR_PTR(-ENOMEM); | 603 | return ERR_PTR(-ENOMEM); |
604 | 604 | ||
605 | mhp->umem = ib_umem_get(pd->uobject->context, start, length, acc); | 605 | mhp->umem = ib_umem_get(pd->uobject->context, start, length, acc, 0); |
606 | if (IS_ERR(mhp->umem)) { | 606 | if (IS_ERR(mhp->umem)) { |
607 | err = PTR_ERR(mhp->umem); | 607 | err = PTR_ERR(mhp->umem); |
608 | kfree(mhp); | 608 | kfree(mhp); |
diff --git a/drivers/infiniband/hw/ehca/ehca_mrmw.c b/drivers/infiniband/hw/ehca/ehca_mrmw.c index 46ae4eb2c4e1..f974367cad40 100644 --- a/drivers/infiniband/hw/ehca/ehca_mrmw.c +++ b/drivers/infiniband/hw/ehca/ehca_mrmw.c | |||
@@ -323,7 +323,7 @@ struct ib_mr *ehca_reg_user_mr(struct ib_pd *pd, u64 start, u64 length, | |||
323 | } | 323 | } |
324 | 324 | ||
325 | e_mr->umem = ib_umem_get(pd->uobject->context, start, length, | 325 | e_mr->umem = ib_umem_get(pd->uobject->context, start, length, |
326 | mr_access_flags); | 326 | mr_access_flags, 0); |
327 | if (IS_ERR(e_mr->umem)) { | 327 | if (IS_ERR(e_mr->umem)) { |
328 | ib_mr = (void *)e_mr->umem; | 328 | ib_mr = (void *)e_mr->umem; |
329 | goto reg_user_mr_exit1; | 329 | goto reg_user_mr_exit1; |
diff --git a/drivers/infiniband/hw/ipath/ipath_mr.c b/drivers/infiniband/hw/ipath/ipath_mr.c index db4ba92f79fc..9d343b7c2f3b 100644 --- a/drivers/infiniband/hw/ipath/ipath_mr.c +++ b/drivers/infiniband/hw/ipath/ipath_mr.c | |||
@@ -195,7 +195,8 @@ struct ib_mr *ipath_reg_user_mr(struct ib_pd *pd, u64 start, u64 length, | |||
195 | goto bail; | 195 | goto bail; |
196 | } | 196 | } |
197 | 197 | ||
198 | umem = ib_umem_get(pd->uobject->context, start, length, mr_access_flags); | 198 | umem = ib_umem_get(pd->uobject->context, start, length, |
199 | mr_access_flags, 0); | ||
199 | if (IS_ERR(umem)) | 200 | if (IS_ERR(umem)) |
200 | return (void *) umem; | 201 | return (void *) umem; |
201 | 202 | ||
diff --git a/drivers/infiniband/hw/mlx4/cq.c b/drivers/infiniband/hw/mlx4/cq.c index 5e570bb0bb6f..e3dddfc687f9 100644 --- a/drivers/infiniband/hw/mlx4/cq.c +++ b/drivers/infiniband/hw/mlx4/cq.c | |||
@@ -137,7 +137,7 @@ static int mlx4_ib_get_cq_umem(struct mlx4_ib_dev *dev, struct ib_ucontext *cont | |||
137 | int err; | 137 | int err; |
138 | 138 | ||
139 | *umem = ib_umem_get(context, buf_addr, cqe * sizeof (struct mlx4_cqe), | 139 | *umem = ib_umem_get(context, buf_addr, cqe * sizeof (struct mlx4_cqe), |
140 | IB_ACCESS_LOCAL_WRITE); | 140 | IB_ACCESS_LOCAL_WRITE, 1); |
141 | if (IS_ERR(*umem)) | 141 | if (IS_ERR(*umem)) |
142 | return PTR_ERR(*umem); | 142 | return PTR_ERR(*umem); |
143 | 143 | ||
diff --git a/drivers/infiniband/hw/mlx4/doorbell.c b/drivers/infiniband/hw/mlx4/doorbell.c index 8e342cc9baec..8aee4233b388 100644 --- a/drivers/infiniband/hw/mlx4/doorbell.c +++ b/drivers/infiniband/hw/mlx4/doorbell.c | |||
@@ -63,7 +63,7 @@ int mlx4_ib_db_map_user(struct mlx4_ib_ucontext *context, unsigned long virt, | |||
63 | page->user_virt = (virt & PAGE_MASK); | 63 | page->user_virt = (virt & PAGE_MASK); |
64 | page->refcnt = 0; | 64 | page->refcnt = 0; |
65 | page->umem = ib_umem_get(&context->ibucontext, virt & PAGE_MASK, | 65 | page->umem = ib_umem_get(&context->ibucontext, virt & PAGE_MASK, |
66 | PAGE_SIZE, 0); | 66 | PAGE_SIZE, 0, 0); |
67 | if (IS_ERR(page->umem)) { | 67 | if (IS_ERR(page->umem)) { |
68 | err = PTR_ERR(page->umem); | 68 | err = PTR_ERR(page->umem); |
69 | kfree(page); | 69 | kfree(page); |
diff --git a/drivers/infiniband/hw/mlx4/mr.c b/drivers/infiniband/hw/mlx4/mr.c index fe2c2e94a5f8..68e92485fc76 100644 --- a/drivers/infiniband/hw/mlx4/mr.c +++ b/drivers/infiniband/hw/mlx4/mr.c | |||
@@ -132,7 +132,8 @@ struct ib_mr *mlx4_ib_reg_user_mr(struct ib_pd *pd, u64 start, u64 length, | |||
132 | if (!mr) | 132 | if (!mr) |
133 | return ERR_PTR(-ENOMEM); | 133 | return ERR_PTR(-ENOMEM); |
134 | 134 | ||
135 | mr->umem = ib_umem_get(pd->uobject->context, start, length, access_flags); | 135 | mr->umem = ib_umem_get(pd->uobject->context, start, length, |
136 | access_flags, 0); | ||
136 | if (IS_ERR(mr->umem)) { | 137 | if (IS_ERR(mr->umem)) { |
137 | err = PTR_ERR(mr->umem); | 138 | err = PTR_ERR(mr->umem); |
138 | goto err_free; | 139 | goto err_free; |
diff --git a/drivers/infiniband/hw/mlx4/qp.c b/drivers/infiniband/hw/mlx4/qp.c index 80ea8b9e7761..8e02ecfec188 100644 --- a/drivers/infiniband/hw/mlx4/qp.c +++ b/drivers/infiniband/hw/mlx4/qp.c | |||
@@ -482,7 +482,7 @@ static int create_qp_common(struct mlx4_ib_dev *dev, struct ib_pd *pd, | |||
482 | goto err; | 482 | goto err; |
483 | 483 | ||
484 | qp->umem = ib_umem_get(pd->uobject->context, ucmd.buf_addr, | 484 | qp->umem = ib_umem_get(pd->uobject->context, ucmd.buf_addr, |
485 | qp->buf_size, 0); | 485 | qp->buf_size, 0, 0); |
486 | if (IS_ERR(qp->umem)) { | 486 | if (IS_ERR(qp->umem)) { |
487 | err = PTR_ERR(qp->umem); | 487 | err = PTR_ERR(qp->umem); |
488 | goto err; | 488 | goto err; |
diff --git a/drivers/infiniband/hw/mlx4/srq.c b/drivers/infiniband/hw/mlx4/srq.c index 204619702f9d..12d6bc6f8007 100644 --- a/drivers/infiniband/hw/mlx4/srq.c +++ b/drivers/infiniband/hw/mlx4/srq.c | |||
@@ -109,7 +109,7 @@ struct ib_srq *mlx4_ib_create_srq(struct ib_pd *pd, | |||
109 | } | 109 | } |
110 | 110 | ||
111 | srq->umem = ib_umem_get(pd->uobject->context, ucmd.buf_addr, | 111 | srq->umem = ib_umem_get(pd->uobject->context, ucmd.buf_addr, |
112 | buf_size, 0); | 112 | buf_size, 0, 0); |
113 | if (IS_ERR(srq->umem)) { | 113 | if (IS_ERR(srq->umem)) { |
114 | err = PTR_ERR(srq->umem); | 114 | err = PTR_ERR(srq->umem); |
115 | goto err_srq; | 115 | goto err_srq; |
diff --git a/drivers/infiniband/hw/mthca/mthca_provider.c b/drivers/infiniband/hw/mthca/mthca_provider.c index 696e1f302332..2a9f460cf061 100644 --- a/drivers/infiniband/hw/mthca/mthca_provider.c +++ b/drivers/infiniband/hw/mthca/mthca_provider.c | |||
@@ -1006,17 +1006,23 @@ static struct ib_mr *mthca_reg_user_mr(struct ib_pd *pd, u64 start, u64 length, | |||
1006 | struct mthca_dev *dev = to_mdev(pd->device); | 1006 | struct mthca_dev *dev = to_mdev(pd->device); |
1007 | struct ib_umem_chunk *chunk; | 1007 | struct ib_umem_chunk *chunk; |
1008 | struct mthca_mr *mr; | 1008 | struct mthca_mr *mr; |
1009 | struct mthca_reg_mr ucmd; | ||
1009 | u64 *pages; | 1010 | u64 *pages; |
1010 | int shift, n, len; | 1011 | int shift, n, len; |
1011 | int i, j, k; | 1012 | int i, j, k; |
1012 | int err = 0; | 1013 | int err = 0; |
1013 | int write_mtt_size; | 1014 | int write_mtt_size; |
1014 | 1015 | ||
1016 | if (ib_copy_from_udata(&ucmd, udata, sizeof ucmd)) | ||
1017 | return ERR_PTR(-EFAULT); | ||
1018 | |||
1015 | mr = kmalloc(sizeof *mr, GFP_KERNEL); | 1019 | mr = kmalloc(sizeof *mr, GFP_KERNEL); |
1016 | if (!mr) | 1020 | if (!mr) |
1017 | return ERR_PTR(-ENOMEM); | 1021 | return ERR_PTR(-ENOMEM); |
1018 | 1022 | ||
1019 | mr->umem = ib_umem_get(pd->uobject->context, start, length, acc); | 1023 | mr->umem = ib_umem_get(pd->uobject->context, start, length, acc, |
1024 | ucmd.mr_attrs & MTHCA_MR_DMASYNC); | ||
1025 | |||
1020 | if (IS_ERR(mr->umem)) { | 1026 | if (IS_ERR(mr->umem)) { |
1021 | err = PTR_ERR(mr->umem); | 1027 | err = PTR_ERR(mr->umem); |
1022 | goto err; | 1028 | goto err; |
diff --git a/drivers/infiniband/hw/mthca/mthca_user.h b/drivers/infiniband/hw/mthca/mthca_user.h index 02cc0a766f3a..f8cb3b664d37 100644 --- a/drivers/infiniband/hw/mthca/mthca_user.h +++ b/drivers/infiniband/hw/mthca/mthca_user.h | |||
@@ -41,7 +41,7 @@ | |||
41 | * Increment this value if any changes that break userspace ABI | 41 | * Increment this value if any changes that break userspace ABI |
42 | * compatibility are made. | 42 | * compatibility are made. |
43 | */ | 43 | */ |
44 | #define MTHCA_UVERBS_ABI_VERSION 1 | 44 | #define MTHCA_UVERBS_ABI_VERSION 2 |
45 | 45 | ||
46 | /* | 46 | /* |
47 | * Make sure that all structs defined in this file remain laid out so | 47 | * Make sure that all structs defined in this file remain laid out so |
@@ -61,6 +61,14 @@ struct mthca_alloc_pd_resp { | |||
61 | __u32 reserved; | 61 | __u32 reserved; |
62 | }; | 62 | }; |
63 | 63 | ||
64 | struct mthca_reg_mr { | ||
65 | __u32 mr_attrs; | ||
66 | #define MTHCA_MR_DMASYNC 0x1 | ||
67 | /* mark the memory region with a DMA attribute that causes | ||
68 | * in-flight DMA to be flushed when the region is written to */ | ||
69 | __u32 reserved; | ||
70 | }; | ||
71 | |||
64 | struct mthca_create_cq { | 72 | struct mthca_create_cq { |
65 | __u32 lkey; | 73 | __u32 lkey; |
66 | __u32 pdn; | 74 | __u32 pdn; |
diff --git a/drivers/infiniband/hw/nes/nes_verbs.c b/drivers/infiniband/hw/nes/nes_verbs.c index ee74f7c7a6da..9ae397a0ff7e 100644 --- a/drivers/infiniband/hw/nes/nes_verbs.c +++ b/drivers/infiniband/hw/nes/nes_verbs.c | |||
@@ -2377,7 +2377,7 @@ static struct ib_mr *nes_reg_user_mr(struct ib_pd *pd, u64 start, u64 length, | |||
2377 | u8 single_page = 1; | 2377 | u8 single_page = 1; |
2378 | u8 stag_key; | 2378 | u8 stag_key; |
2379 | 2379 | ||
2380 | region = ib_umem_get(pd->uobject->context, start, length, acc); | 2380 | region = ib_umem_get(pd->uobject->context, start, length, acc, 0); |
2381 | if (IS_ERR(region)) { | 2381 | if (IS_ERR(region)) { |
2382 | return (struct ib_mr *)region; | 2382 | return (struct ib_mr *)region; |
2383 | } | 2383 | } |
diff --git a/drivers/input/input.c b/drivers/input/input.c index f02c242c3114..27006fc18305 100644 --- a/drivers/input/input.c +++ b/drivers/input/input.c | |||
@@ -898,30 +898,26 @@ static int __init input_proc_init(void) | |||
898 | { | 898 | { |
899 | struct proc_dir_entry *entry; | 899 | struct proc_dir_entry *entry; |
900 | 900 | ||
901 | proc_bus_input_dir = proc_mkdir("input", proc_bus); | 901 | proc_bus_input_dir = proc_mkdir("bus/input", NULL); |
902 | if (!proc_bus_input_dir) | 902 | if (!proc_bus_input_dir) |
903 | return -ENOMEM; | 903 | return -ENOMEM; |
904 | 904 | ||
905 | proc_bus_input_dir->owner = THIS_MODULE; | 905 | proc_bus_input_dir->owner = THIS_MODULE; |
906 | 906 | ||
907 | entry = create_proc_entry("devices", 0, proc_bus_input_dir); | 907 | entry = proc_create("devices", 0, proc_bus_input_dir, |
908 | &input_devices_fileops); | ||
908 | if (!entry) | 909 | if (!entry) |
909 | goto fail1; | 910 | goto fail1; |
910 | 911 | ||
911 | entry->owner = THIS_MODULE; | 912 | entry = proc_create("handlers", 0, proc_bus_input_dir, |
912 | entry->proc_fops = &input_devices_fileops; | 913 | &input_handlers_fileops); |
913 | |||
914 | entry = create_proc_entry("handlers", 0, proc_bus_input_dir); | ||
915 | if (!entry) | 914 | if (!entry) |
916 | goto fail2; | 915 | goto fail2; |
917 | 916 | ||
918 | entry->owner = THIS_MODULE; | ||
919 | entry->proc_fops = &input_handlers_fileops; | ||
920 | |||
921 | return 0; | 917 | return 0; |
922 | 918 | ||
923 | fail2: remove_proc_entry("devices", proc_bus_input_dir); | 919 | fail2: remove_proc_entry("devices", proc_bus_input_dir); |
924 | fail1: remove_proc_entry("input", proc_bus); | 920 | fail1: remove_proc_entry("bus/input", NULL); |
925 | return -ENOMEM; | 921 | return -ENOMEM; |
926 | } | 922 | } |
927 | 923 | ||
@@ -929,7 +925,7 @@ static void input_proc_exit(void) | |||
929 | { | 925 | { |
930 | remove_proc_entry("devices", proc_bus_input_dir); | 926 | remove_proc_entry("devices", proc_bus_input_dir); |
931 | remove_proc_entry("handlers", proc_bus_input_dir); | 927 | remove_proc_entry("handlers", proc_bus_input_dir); |
932 | remove_proc_entry("input", proc_bus); | 928 | remove_proc_entry("bus/input", NULL); |
933 | } | 929 | } |
934 | 930 | ||
935 | #else /* !CONFIG_PROC_FS */ | 931 | #else /* !CONFIG_PROC_FS */ |
diff --git a/drivers/input/tablet/aiptek.c b/drivers/input/tablet/aiptek.c index 1d759f6f8076..55c1134d6137 100644 --- a/drivers/input/tablet/aiptek.c +++ b/drivers/input/tablet/aiptek.c | |||
@@ -528,9 +528,9 @@ static void aiptek_irq(struct urb *urb) | |||
528 | (aiptek->curSetting.pointerMode)) { | 528 | (aiptek->curSetting.pointerMode)) { |
529 | aiptek->diagnostic = AIPTEK_DIAGNOSTIC_TOOL_DISALLOWED; | 529 | aiptek->diagnostic = AIPTEK_DIAGNOSTIC_TOOL_DISALLOWED; |
530 | } else { | 530 | } else { |
531 | x = le16_to_cpu(get_unaligned((__le16 *) (data + 1))); | 531 | x = get_unaligned_le16(data + 1); |
532 | y = le16_to_cpu(get_unaligned((__le16 *) (data + 3))); | 532 | y = get_unaligned_le16(data + 3); |
533 | z = le16_to_cpu(get_unaligned((__le16 *) (data + 6))); | 533 | z = get_unaligned_le16(data + 6); |
534 | 534 | ||
535 | dv = (data[5] & 0x01) != 0 ? 1 : 0; | 535 | dv = (data[5] & 0x01) != 0 ? 1 : 0; |
536 | p = (data[5] & 0x02) != 0 ? 1 : 0; | 536 | p = (data[5] & 0x02) != 0 ? 1 : 0; |
@@ -613,8 +613,8 @@ static void aiptek_irq(struct urb *urb) | |||
613 | (aiptek->curSetting.pointerMode)) { | 613 | (aiptek->curSetting.pointerMode)) { |
614 | aiptek->diagnostic = AIPTEK_DIAGNOSTIC_TOOL_DISALLOWED; | 614 | aiptek->diagnostic = AIPTEK_DIAGNOSTIC_TOOL_DISALLOWED; |
615 | } else { | 615 | } else { |
616 | x = le16_to_cpu(get_unaligned((__le16 *) (data + 1))); | 616 | x = get_unaligned_le16(data + 1); |
617 | y = le16_to_cpu(get_unaligned((__le16 *) (data + 3))); | 617 | y = get_unaligned_le16(data + 3); |
618 | 618 | ||
619 | jitterable = data[5] & 0x1c; | 619 | jitterable = data[5] & 0x1c; |
620 | 620 | ||
@@ -679,7 +679,7 @@ static void aiptek_irq(struct urb *urb) | |||
679 | pck = (data[1] & aiptek->curSetting.stylusButtonUpper) != 0 ? 1 : 0; | 679 | pck = (data[1] & aiptek->curSetting.stylusButtonUpper) != 0 ? 1 : 0; |
680 | 680 | ||
681 | macro = dv && p && tip && !(data[3] & 1) ? (data[3] >> 1) : -1; | 681 | macro = dv && p && tip && !(data[3] & 1) ? (data[3] >> 1) : -1; |
682 | z = le16_to_cpu(get_unaligned((__le16 *) (data + 4))); | 682 | z = get_unaligned_le16(data + 4); |
683 | 683 | ||
684 | if (dv) { | 684 | if (dv) { |
685 | /* If the selected tool changed, reset the old | 685 | /* If the selected tool changed, reset the old |
@@ -757,7 +757,7 @@ static void aiptek_irq(struct urb *urb) | |||
757 | * hat switches (which just so happen to be the macroKeys.) | 757 | * hat switches (which just so happen to be the macroKeys.) |
758 | */ | 758 | */ |
759 | else if (data[0] == 6) { | 759 | else if (data[0] == 6) { |
760 | macro = le16_to_cpu(get_unaligned((__le16 *) (data + 1))); | 760 | macro = get_unaligned_le16(data + 1); |
761 | if (macro > 0) { | 761 | if (macro > 0) { |
762 | input_report_key(inputdev, macroKeyEvents[macro - 1], | 762 | input_report_key(inputdev, macroKeyEvents[macro - 1], |
763 | 0); | 763 | 0); |
@@ -952,7 +952,7 @@ aiptek_query(struct aiptek *aiptek, unsigned char command, unsigned char data) | |||
952 | buf[0], buf[1], buf[2]); | 952 | buf[0], buf[1], buf[2]); |
953 | ret = -EIO; | 953 | ret = -EIO; |
954 | } else { | 954 | } else { |
955 | ret = le16_to_cpu(get_unaligned((__le16 *) (buf + 1))); | 955 | ret = get_unaligned_le16(buf + 1); |
956 | } | 956 | } |
957 | kfree(buf); | 957 | kfree(buf); |
958 | return ret; | 958 | return ret; |
diff --git a/drivers/input/tablet/gtco.c b/drivers/input/tablet/gtco.c index f66ca215cdec..c5a8661a1baa 100644 --- a/drivers/input/tablet/gtco.c +++ b/drivers/input/tablet/gtco.c | |||
@@ -245,11 +245,11 @@ static void parse_hid_report_descriptor(struct gtco *device, char * report, | |||
245 | data = report[i]; | 245 | data = report[i]; |
246 | break; | 246 | break; |
247 | case 2: | 247 | case 2: |
248 | data16 = le16_to_cpu(get_unaligned((__le16 *)&report[i])); | 248 | data16 = get_unaligned_le16(&report[i]); |
249 | break; | 249 | break; |
250 | case 3: | 250 | case 3: |
251 | size = 4; | 251 | size = 4; |
252 | data32 = le32_to_cpu(get_unaligned((__le32 *)&report[i])); | 252 | data32 = get_unaligned_le32(&report[i]); |
253 | break; | 253 | break; |
254 | } | 254 | } |
255 | 255 | ||
@@ -695,10 +695,10 @@ static void gtco_urb_callback(struct urb *urbinfo) | |||
695 | /* Fall thru */ | 695 | /* Fall thru */ |
696 | case 1: | 696 | case 1: |
697 | /* All reports have X and Y coords in the same place */ | 697 | /* All reports have X and Y coords in the same place */ |
698 | val = le16_to_cpu(get_unaligned((__le16 *)&device->buffer[1])); | 698 | val = get_unaligned_le16(&device->buffer[1]); |
699 | input_report_abs(inputdev, ABS_X, val); | 699 | input_report_abs(inputdev, ABS_X, val); |
700 | 700 | ||
701 | val = le16_to_cpu(get_unaligned((__le16 *)&device->buffer[3])); | 701 | val = get_unaligned_le16(&device->buffer[3]); |
702 | input_report_abs(inputdev, ABS_Y, val); | 702 | input_report_abs(inputdev, ABS_Y, val); |
703 | 703 | ||
704 | /* Ditto for proximity bit */ | 704 | /* Ditto for proximity bit */ |
@@ -762,7 +762,7 @@ static void gtco_urb_callback(struct urb *urbinfo) | |||
762 | le_buffer[1] = (u8)(device->buffer[4] >> 1); | 762 | le_buffer[1] = (u8)(device->buffer[4] >> 1); |
763 | le_buffer[1] |= (u8)((device->buffer[5] & 0x1) << 7); | 763 | le_buffer[1] |= (u8)((device->buffer[5] & 0x1) << 7); |
764 | 764 | ||
765 | val = le16_to_cpu(get_unaligned((__le16 *)le_buffer)); | 765 | val = get_unaligned_le16(le_buffer); |
766 | input_report_abs(inputdev, ABS_Y, val); | 766 | input_report_abs(inputdev, ABS_Y, val); |
767 | 767 | ||
768 | /* | 768 | /* |
@@ -772,10 +772,10 @@ static void gtco_urb_callback(struct urb *urbinfo) | |||
772 | buttonbyte = device->buffer[5] >> 1; | 772 | buttonbyte = device->buffer[5] >> 1; |
773 | } else { | 773 | } else { |
774 | 774 | ||
775 | val = le16_to_cpu(get_unaligned((__le16 *)&device->buffer[1])); | 775 | val = get_unaligned_le16(&device->buffer[1]); |
776 | input_report_abs(inputdev, ABS_X, val); | 776 | input_report_abs(inputdev, ABS_X, val); |
777 | 777 | ||
778 | val = le16_to_cpu(get_unaligned((__le16 *)&device->buffer[3])); | 778 | val = get_unaligned_le16(&device->buffer[3]); |
779 | input_report_abs(inputdev, ABS_Y, val); | 779 | input_report_abs(inputdev, ABS_Y, val); |
780 | 780 | ||
781 | buttonbyte = device->buffer[5]; | 781 | buttonbyte = device->buffer[5]; |
diff --git a/drivers/input/tablet/kbtab.c b/drivers/input/tablet/kbtab.c index 1182fc133167..f23f5a97fb38 100644 --- a/drivers/input/tablet/kbtab.c +++ b/drivers/input/tablet/kbtab.c | |||
@@ -63,8 +63,8 @@ static void kbtab_irq(struct urb *urb) | |||
63 | goto exit; | 63 | goto exit; |
64 | } | 64 | } |
65 | 65 | ||
66 | kbtab->x = le16_to_cpu(get_unaligned((__le16 *) &data[1])); | 66 | kbtab->x = get_unaligned_le16(&data[1]); |
67 | kbtab->y = le16_to_cpu(get_unaligned((__le16 *) &data[3])); | 67 | kbtab->y = get_unaligned_le16(&data[3]); |
68 | 68 | ||
69 | kbtab->pressure = (data[5]); | 69 | kbtab->pressure = (data[5]); |
70 | 70 | ||
diff --git a/drivers/isdn/capi/kcapi_proc.c b/drivers/isdn/capi/kcapi_proc.c index 845a797b0030..c29208bd7521 100644 --- a/drivers/isdn/capi/kcapi_proc.c +++ b/drivers/isdn/capi/kcapi_proc.c | |||
@@ -114,6 +114,7 @@ static int seq_contrstats_open(struct inode *inode, struct file *file) | |||
114 | } | 114 | } |
115 | 115 | ||
116 | static const struct file_operations proc_controller_ops = { | 116 | static const struct file_operations proc_controller_ops = { |
117 | .owner = THIS_MODULE, | ||
117 | .open = seq_controller_open, | 118 | .open = seq_controller_open, |
118 | .read = seq_read, | 119 | .read = seq_read, |
119 | .llseek = seq_lseek, | 120 | .llseek = seq_lseek, |
@@ -121,6 +122,7 @@ static const struct file_operations proc_controller_ops = { | |||
121 | }; | 122 | }; |
122 | 123 | ||
123 | static const struct file_operations proc_contrstats_ops = { | 124 | static const struct file_operations proc_contrstats_ops = { |
125 | .owner = THIS_MODULE, | ||
124 | .open = seq_contrstats_open, | 126 | .open = seq_contrstats_open, |
125 | .read = seq_read, | 127 | .read = seq_read, |
126 | .llseek = seq_lseek, | 128 | .llseek = seq_lseek, |
@@ -219,6 +221,7 @@ seq_applstats_open(struct inode *inode, struct file *file) | |||
219 | } | 221 | } |
220 | 222 | ||
221 | static const struct file_operations proc_applications_ops = { | 223 | static const struct file_operations proc_applications_ops = { |
224 | .owner = THIS_MODULE, | ||
222 | .open = seq_applications_open, | 225 | .open = seq_applications_open, |
223 | .read = seq_read, | 226 | .read = seq_read, |
224 | .llseek = seq_lseek, | 227 | .llseek = seq_lseek, |
@@ -226,21 +229,13 @@ static const struct file_operations proc_applications_ops = { | |||
226 | }; | 229 | }; |
227 | 230 | ||
228 | static const struct file_operations proc_applstats_ops = { | 231 | static const struct file_operations proc_applstats_ops = { |
232 | .owner = THIS_MODULE, | ||
229 | .open = seq_applstats_open, | 233 | .open = seq_applstats_open, |
230 | .read = seq_read, | 234 | .read = seq_read, |
231 | .llseek = seq_lseek, | 235 | .llseek = seq_lseek, |
232 | .release = seq_release, | 236 | .release = seq_release, |
233 | }; | 237 | }; |
234 | 238 | ||
235 | static void | ||
236 | create_seq_entry(char *name, mode_t mode, const struct file_operations *f) | ||
237 | { | ||
238 | struct proc_dir_entry *entry; | ||
239 | entry = create_proc_entry(name, mode, NULL); | ||
240 | if (entry) | ||
241 | entry->proc_fops = f; | ||
242 | } | ||
243 | |||
244 | // --------------------------------------------------------------------------- | 239 | // --------------------------------------------------------------------------- |
245 | 240 | ||
246 | static void *capi_driver_start(struct seq_file *seq, loff_t *pos) | 241 | static void *capi_driver_start(struct seq_file *seq, loff_t *pos) |
@@ -283,6 +278,7 @@ seq_capi_driver_open(struct inode *inode, struct file *file) | |||
283 | } | 278 | } |
284 | 279 | ||
285 | static const struct file_operations proc_driver_ops = { | 280 | static const struct file_operations proc_driver_ops = { |
281 | .owner = THIS_MODULE, | ||
286 | .open = seq_capi_driver_open, | 282 | .open = seq_capi_driver_open, |
287 | .read = seq_read, | 283 | .read = seq_read, |
288 | .llseek = seq_lseek, | 284 | .llseek = seq_lseek, |
@@ -296,11 +292,11 @@ kcapi_proc_init(void) | |||
296 | { | 292 | { |
297 | proc_mkdir("capi", NULL); | 293 | proc_mkdir("capi", NULL); |
298 | proc_mkdir("capi/controllers", NULL); | 294 | proc_mkdir("capi/controllers", NULL); |
299 | create_seq_entry("capi/controller", 0, &proc_controller_ops); | 295 | proc_create("capi/controller", 0, NULL, &proc_controller_ops); |
300 | create_seq_entry("capi/contrstats", 0, &proc_contrstats_ops); | 296 | proc_create("capi/contrstats", 0, NULL, &proc_contrstats_ops); |
301 | create_seq_entry("capi/applications", 0, &proc_applications_ops); | 297 | proc_create("capi/applications", 0, NULL, &proc_applications_ops); |
302 | create_seq_entry("capi/applstats", 0, &proc_applstats_ops); | 298 | proc_create("capi/applstats", 0, NULL, &proc_applstats_ops); |
303 | create_seq_entry("capi/driver", 0, &proc_driver_ops); | 299 | proc_create("capi/driver", 0, NULL, &proc_driver_ops); |
304 | } | 300 | } |
305 | 301 | ||
306 | void __exit | 302 | void __exit |
diff --git a/drivers/isdn/divert/divert_procfs.c b/drivers/isdn/divert/divert_procfs.c index 4fd4c46892e3..8b256a617c8a 100644 --- a/drivers/isdn/divert/divert_procfs.c +++ b/drivers/isdn/divert/divert_procfs.c | |||
@@ -288,13 +288,12 @@ divert_dev_init(void) | |||
288 | isdn_proc_entry = proc_mkdir("isdn", init_net.proc_net); | 288 | isdn_proc_entry = proc_mkdir("isdn", init_net.proc_net); |
289 | if (!isdn_proc_entry) | 289 | if (!isdn_proc_entry) |
290 | return (-1); | 290 | return (-1); |
291 | isdn_divert_entry = create_proc_entry("divert", S_IFREG | S_IRUGO, isdn_proc_entry); | 291 | isdn_divert_entry = proc_create("divert", S_IFREG | S_IRUGO, |
292 | isdn_proc_entry, &isdn_fops); | ||
292 | if (!isdn_divert_entry) { | 293 | if (!isdn_divert_entry) { |
293 | remove_proc_entry("isdn", init_net.proc_net); | 294 | remove_proc_entry("isdn", init_net.proc_net); |
294 | return (-1); | 295 | return (-1); |
295 | } | 296 | } |
296 | isdn_divert_entry->proc_fops = &isdn_fops; | ||
297 | isdn_divert_entry->owner = THIS_MODULE; | ||
298 | #endif /* CONFIG_PROC_FS */ | 297 | #endif /* CONFIG_PROC_FS */ |
299 | 298 | ||
300 | return (0); | 299 | return (0); |
diff --git a/drivers/isdn/hardware/eicon/divasproc.c b/drivers/isdn/hardware/eicon/divasproc.c index 0632a2606998..fae895828a17 100644 --- a/drivers/isdn/hardware/eicon/divasproc.c +++ b/drivers/isdn/hardware/eicon/divasproc.c | |||
@@ -125,15 +125,11 @@ static const struct file_operations divas_fops = { | |||
125 | 125 | ||
126 | int create_divas_proc(void) | 126 | int create_divas_proc(void) |
127 | { | 127 | { |
128 | divas_proc_entry = create_proc_entry(divas_proc_name, | 128 | proc_create(divas_proc_name, S_IFREG | S_IRUGO, proc_net_eicon, |
129 | S_IFREG | S_IRUGO, | 129 | &divas_fops); |
130 | proc_net_eicon); | ||
131 | if (!divas_proc_entry) | 130 | if (!divas_proc_entry) |
132 | return (0); | 131 | return (0); |
133 | 132 | ||
134 | divas_proc_entry->proc_fops = &divas_fops; | ||
135 | divas_proc_entry->owner = THIS_MODULE; | ||
136 | |||
137 | return (1); | 133 | return (1); |
138 | } | 134 | } |
139 | 135 | ||
diff --git a/drivers/isdn/hysdn/hysdn_procconf.c b/drivers/isdn/hysdn/hysdn_procconf.c index 27d890b48f88..877be9922c3d 100644 --- a/drivers/isdn/hysdn/hysdn_procconf.c +++ b/drivers/isdn/hysdn/hysdn_procconf.c | |||
@@ -370,6 +370,7 @@ hysdn_conf_close(struct inode *ino, struct file *filep) | |||
370 | /******************************************************/ | 370 | /******************************************************/ |
371 | static const struct file_operations conf_fops = | 371 | static const struct file_operations conf_fops = |
372 | { | 372 | { |
373 | .owner = THIS_MODULE, | ||
373 | .llseek = no_llseek, | 374 | .llseek = no_llseek, |
374 | .read = hysdn_conf_read, | 375 | .read = hysdn_conf_read, |
375 | .write = hysdn_conf_write, | 376 | .write = hysdn_conf_write, |
@@ -402,11 +403,9 @@ hysdn_procconf_init(void) | |||
402 | while (card) { | 403 | while (card) { |
403 | 404 | ||
404 | sprintf(conf_name, "%s%d", PROC_CONF_BASENAME, card->myid); | 405 | sprintf(conf_name, "%s%d", PROC_CONF_BASENAME, card->myid); |
405 | if ((card->procconf = (void *) create_proc_entry(conf_name, | 406 | if ((card->procconf = (void *) proc_create(conf_name, |
406 | S_IFREG | S_IRUGO | S_IWUSR, | 407 | S_IFREG | S_IRUGO | S_IWUSR, |
407 | hysdn_proc_entry)) != NULL) { | 408 | hysdn_proc_entry)) != NULL) { |
408 | ((struct proc_dir_entry *) card->procconf)->proc_fops = &conf_fops; | ||
409 | ((struct proc_dir_entry *) card->procconf)->owner = THIS_MODULE; | ||
410 | hysdn_proclog_init(card); /* init the log file entry */ | 409 | hysdn_proclog_init(card); /* init the log file entry */ |
411 | } | 410 | } |
412 | card = card->next; /* next entry */ | 411 | card = card->next; /* next entry */ |
diff --git a/drivers/isdn/hysdn/hysdn_proclog.c b/drivers/isdn/hysdn/hysdn_proclog.c index 27b3991fb0ec..8991d2c8ee4a 100644 --- a/drivers/isdn/hysdn/hysdn_proclog.c +++ b/drivers/isdn/hysdn/hysdn_proclog.c | |||
@@ -380,6 +380,7 @@ hysdn_log_poll(struct file *file, poll_table * wait) | |||
380 | /**************************************************/ | 380 | /**************************************************/ |
381 | static const struct file_operations log_fops = | 381 | static const struct file_operations log_fops = |
382 | { | 382 | { |
383 | .owner = THIS_MODULE, | ||
383 | .llseek = no_llseek, | 384 | .llseek = no_llseek, |
384 | .read = hysdn_log_read, | 385 | .read = hysdn_log_read, |
385 | .write = hysdn_log_write, | 386 | .write = hysdn_log_write, |
@@ -402,10 +403,9 @@ hysdn_proclog_init(hysdn_card * card) | |||
402 | 403 | ||
403 | if ((pd = kzalloc(sizeof(struct procdata), GFP_KERNEL)) != NULL) { | 404 | if ((pd = kzalloc(sizeof(struct procdata), GFP_KERNEL)) != NULL) { |
404 | sprintf(pd->log_name, "%s%d", PROC_LOG_BASENAME, card->myid); | 405 | sprintf(pd->log_name, "%s%d", PROC_LOG_BASENAME, card->myid); |
405 | if ((pd->log = create_proc_entry(pd->log_name, S_IFREG | S_IRUGO | S_IWUSR, hysdn_proc_entry)) != NULL) { | 406 | pd->log = proc_create(pd->log_name, |
406 | pd->log->proc_fops = &log_fops; | 407 | S_IFREG | S_IRUGO | S_IWUSR, hysdn_proc_entry, |
407 | pd->log->owner = THIS_MODULE; | 408 | &log_fops); |
408 | } | ||
409 | 409 | ||
410 | init_waitqueue_head(&(pd->rd_queue)); | 410 | init_waitqueue_head(&(pd->rd_queue)); |
411 | 411 | ||
diff --git a/drivers/leds/led-class.c b/drivers/leds/led-class.c index ac05a928f764..b3c54be74556 100644 --- a/drivers/leds/led-class.c +++ b/drivers/leds/led-class.c | |||
@@ -105,7 +105,7 @@ int led_classdev_register(struct device *parent, struct led_classdev *led_cdev) | |||
105 | 105 | ||
106 | led_cdev->dev = device_create(leds_class, parent, 0, "%s", | 106 | led_cdev->dev = device_create(leds_class, parent, 0, "%s", |
107 | led_cdev->name); | 107 | led_cdev->name); |
108 | if (unlikely(IS_ERR(led_cdev->dev))) | 108 | if (IS_ERR(led_cdev->dev)) |
109 | return PTR_ERR(led_cdev->dev); | 109 | return PTR_ERR(led_cdev->dev); |
110 | 110 | ||
111 | dev_set_drvdata(led_cdev->dev, led_cdev); | 111 | dev_set_drvdata(led_cdev->dev, led_cdev); |
diff --git a/drivers/mca/mca-legacy.c b/drivers/mca/mca-legacy.c index 0c7bfa74c8ef..494f0c2001f5 100644 --- a/drivers/mca/mca-legacy.c +++ b/drivers/mca/mca-legacy.c | |||
@@ -282,24 +282,6 @@ void mca_set_adapter_name(int slot, char* name) | |||
282 | EXPORT_SYMBOL(mca_set_adapter_name); | 282 | EXPORT_SYMBOL(mca_set_adapter_name); |
283 | 283 | ||
284 | /** | 284 | /** |
285 | * mca_is_adapter_used - check if claimed by driver | ||
286 | * @slot: slot to check | ||
287 | * | ||
288 | * Returns 1 if the slot has been claimed by a driver | ||
289 | */ | ||
290 | |||
291 | int mca_is_adapter_used(int slot) | ||
292 | { | ||
293 | struct mca_device *mca_dev = mca_find_device_by_slot(slot); | ||
294 | |||
295 | if(!mca_dev) | ||
296 | return 0; | ||
297 | |||
298 | return mca_device_claimed(mca_dev); | ||
299 | } | ||
300 | EXPORT_SYMBOL(mca_is_adapter_used); | ||
301 | |||
302 | /** | ||
303 | * mca_mark_as_used - claim an MCA device | 285 | * mca_mark_as_used - claim an MCA device |
304 | * @slot: slot to claim | 286 | * @slot: slot to claim |
305 | * FIXME: should we make this threadsafe | 287 | * FIXME: should we make this threadsafe |
diff --git a/drivers/mca/mca-proc.c b/drivers/mca/mca-proc.c index 33d5e0820cc5..81ea0d377bf4 100644 --- a/drivers/mca/mca-proc.c +++ b/drivers/mca/mca-proc.c | |||
@@ -183,7 +183,7 @@ void __init mca_do_proc_init(void) | |||
183 | struct proc_dir_entry* node = NULL; | 183 | struct proc_dir_entry* node = NULL; |
184 | struct mca_device *mca_dev; | 184 | struct mca_device *mca_dev; |
185 | 185 | ||
186 | proc_mca = proc_mkdir("mca", &proc_root); | 186 | proc_mca = proc_mkdir("mca", NULL); |
187 | create_proc_read_entry("pos",0,proc_mca,get_mca_info,NULL); | 187 | create_proc_read_entry("pos",0,proc_mca,get_mca_info,NULL); |
188 | create_proc_read_entry("machine",0,proc_mca,get_mca_machine_info,NULL); | 188 | create_proc_read_entry("machine",0,proc_mca,get_mca_machine_info,NULL); |
189 | 189 | ||
diff --git a/drivers/md/md.c b/drivers/md/md.c index acd716b657b8..bb3e4b1cb773 100644 --- a/drivers/md/md.c +++ b/drivers/md/md.c | |||
@@ -5948,13 +5948,9 @@ static struct notifier_block md_notifier = { | |||
5948 | 5948 | ||
5949 | static void md_geninit(void) | 5949 | static void md_geninit(void) |
5950 | { | 5950 | { |
5951 | struct proc_dir_entry *p; | ||
5952 | |||
5953 | dprintk("md: sizeof(mdp_super_t) = %d\n", (int)sizeof(mdp_super_t)); | 5951 | dprintk("md: sizeof(mdp_super_t) = %d\n", (int)sizeof(mdp_super_t)); |
5954 | 5952 | ||
5955 | p = create_proc_entry("mdstat", S_IRUGO, NULL); | 5953 | proc_create("mdstat", S_IRUGO, NULL, &md_seq_fops); |
5956 | if (p) | ||
5957 | p->proc_fops = &md_seq_fops; | ||
5958 | } | 5954 | } |
5959 | 5955 | ||
5960 | static int __init md_init(void) | 5956 | static int __init md_init(void) |
diff --git a/drivers/media/video/zoran_procfs.c b/drivers/media/video/zoran_procfs.c index 328ed6e7ac6a..870bc5a70e3f 100644 --- a/drivers/media/video/zoran_procfs.c +++ b/drivers/media/video/zoran_procfs.c | |||
@@ -180,6 +180,7 @@ static ssize_t zoran_write(struct file *file, const char __user *buffer, | |||
180 | } | 180 | } |
181 | 181 | ||
182 | static const struct file_operations zoran_operations = { | 182 | static const struct file_operations zoran_operations = { |
183 | .owner = THIS_MODULE, | ||
183 | .open = zoran_open, | 184 | .open = zoran_open, |
184 | .read = seq_read, | 185 | .read = seq_read, |
185 | .write = zoran_write, | 186 | .write = zoran_write, |
@@ -195,10 +196,8 @@ zoran_proc_init (struct zoran *zr) | |||
195 | char name[8]; | 196 | char name[8]; |
196 | 197 | ||
197 | snprintf(name, 7, "zoran%d", zr->id); | 198 | snprintf(name, 7, "zoran%d", zr->id); |
198 | if ((zr->zoran_proc = create_proc_entry(name, 0, NULL))) { | 199 | zr->zoran_proc = proc_create_data(name, 0, NULL, &zoran_operations, zr); |
199 | zr->zoran_proc->data = zr; | 200 | if (zr->zoran_proc != NULL) { |
200 | zr->zoran_proc->owner = THIS_MODULE; | ||
201 | zr->zoran_proc->proc_fops = &zoran_operations; | ||
202 | dprintk(2, | 201 | dprintk(2, |
203 | KERN_INFO | 202 | KERN_INFO |
204 | "%s: procfs entry /proc/%s allocated. data=%p\n", | 203 | "%s: procfs entry /proc/%s allocated. data=%p\n", |
diff --git a/drivers/message/i2o/i2o_block.c b/drivers/message/i2o/i2o_block.c index a95314897402..81483de8c0fd 100644 --- a/drivers/message/i2o/i2o_block.c +++ b/drivers/message/i2o/i2o_block.c | |||
@@ -371,7 +371,7 @@ static int i2o_block_prep_req_fn(struct request_queue *q, struct request *req) | |||
371 | /* connect the i2o_block_request to the request */ | 371 | /* connect the i2o_block_request to the request */ |
372 | if (!req->special) { | 372 | if (!req->special) { |
373 | ireq = i2o_block_request_alloc(); | 373 | ireq = i2o_block_request_alloc(); |
374 | if (unlikely(IS_ERR(ireq))) { | 374 | if (IS_ERR(ireq)) { |
375 | osm_debug("unable to allocate i2o_block_request!\n"); | 375 | osm_debug("unable to allocate i2o_block_request!\n"); |
376 | return BLKPREP_DEFER; | 376 | return BLKPREP_DEFER; |
377 | } | 377 | } |
diff --git a/drivers/message/i2o/i2o_proc.c b/drivers/message/i2o/i2o_proc.c index 6fdd072201f9..54a3016ff45d 100644 --- a/drivers/message/i2o/i2o_proc.c +++ b/drivers/message/i2o/i2o_proc.c | |||
@@ -1893,13 +1893,11 @@ static int i2o_proc_create_entries(struct proc_dir_entry *dir, | |||
1893 | struct proc_dir_entry *tmp; | 1893 | struct proc_dir_entry *tmp; |
1894 | 1894 | ||
1895 | while (i2o_pe->name) { | 1895 | while (i2o_pe->name) { |
1896 | tmp = create_proc_entry(i2o_pe->name, i2o_pe->mode, dir); | 1896 | tmp = proc_create_data(i2o_pe->name, i2o_pe->mode, dir, |
1897 | i2o_pe->fops, data); | ||
1897 | if (!tmp) | 1898 | if (!tmp) |
1898 | return -1; | 1899 | return -1; |
1899 | 1900 | ||
1900 | tmp->data = data; | ||
1901 | tmp->proc_fops = i2o_pe->fops; | ||
1902 | |||
1903 | i2o_pe++; | 1901 | i2o_pe++; |
1904 | } | 1902 | } |
1905 | 1903 | ||
diff --git a/drivers/misc/hdpuftrs/hdpu_cpustate.c b/drivers/misc/hdpuftrs/hdpu_cpustate.c index 302e92418bbe..ff51ab67231c 100644 --- a/drivers/misc/hdpuftrs/hdpu_cpustate.c +++ b/drivers/misc/hdpuftrs/hdpu_cpustate.c | |||
@@ -210,13 +210,10 @@ static int hdpu_cpustate_probe(struct platform_device *pdev) | |||
210 | return ret; | 210 | return ret; |
211 | } | 211 | } |
212 | 212 | ||
213 | proc_de = create_proc_entry("sky_cpustate", 0666, &proc_root); | 213 | proc_de = proc_create("sky_cpustate", 0666, NULL, &proc_cpustate); |
214 | if (!proc_de) { | 214 | if (!proc_de) { |
215 | printk(KERN_WARNING "sky_cpustate: " | 215 | printk(KERN_WARNING "sky_cpustate: " |
216 | "Unable to create proc entry\n"); | 216 | "Unable to create proc entry\n"); |
217 | } else { | ||
218 | proc_de->proc_fops = &proc_cpustate; | ||
219 | proc_de->owner = THIS_MODULE; | ||
220 | } | 217 | } |
221 | 218 | ||
222 | printk(KERN_INFO "Sky CPU State Driver v" SKY_CPUSTATE_VERSION "\n"); | 219 | printk(KERN_INFO "Sky CPU State Driver v" SKY_CPUSTATE_VERSION "\n"); |
diff --git a/drivers/misc/hdpuftrs/hdpu_nexus.c b/drivers/misc/hdpuftrs/hdpu_nexus.c index 2fa36f7a6eb3..08e26beefe64 100644 --- a/drivers/misc/hdpuftrs/hdpu_nexus.c +++ b/drivers/misc/hdpuftrs/hdpu_nexus.c | |||
@@ -102,22 +102,17 @@ static int hdpu_nexus_probe(struct platform_device *pdev) | |||
102 | printk(KERN_ERR "sky_nexus: Could not map slot id\n"); | 102 | printk(KERN_ERR "sky_nexus: Could not map slot id\n"); |
103 | } | 103 | } |
104 | 104 | ||
105 | hdpu_slot_id = create_proc_entry("sky_slot_id", 0666, &proc_root); | 105 | hdpu_slot_id = proc_create("sky_slot_id", 0666, NULL, &proc_slot_id); |
106 | if (!hdpu_slot_id) { | 106 | if (!hdpu_slot_id) { |
107 | printk(KERN_WARNING "sky_nexus: " | 107 | printk(KERN_WARNING "sky_nexus: " |
108 | "Unable to create proc dir entry: sky_slot_id\n"); | 108 | "Unable to create proc dir entry: sky_slot_id\n"); |
109 | } else { | ||
110 | hdpu_slot_id->proc_fops = &proc_slot_id; | ||
111 | hdpu_slot_id->owner = THIS_MODULE; | ||
112 | } | 109 | } |
113 | 110 | ||
114 | hdpu_chassis_id = create_proc_entry("sky_chassis_id", 0666, &proc_root); | 111 | hdpu_chassis_id = proc_create("sky_chassis_id", 0666, NULL, |
115 | if (!hdpu_chassis_id) { | 112 | &proc_chassis_id); |
113 | if (!hdpu_chassis_id) | ||
116 | printk(KERN_WARNING "sky_nexus: " | 114 | printk(KERN_WARNING "sky_nexus: " |
117 | "Unable to create proc dir entry: sky_chassis_id\n"); | 115 | "Unable to create proc dir entry: sky_chassis_id\n"); |
118 | } else { | ||
119 | hdpu_chassis_id->proc_fops = &proc_chassis_id; | ||
120 | hdpu_chassis_id->owner = THIS_MODULE; | ||
121 | } | 116 | } |
122 | 117 | ||
123 | return 0; | 118 | return 0; |
@@ -128,8 +123,8 @@ static int hdpu_nexus_remove(struct platform_device *pdev) | |||
128 | slot_id = -1; | 123 | slot_id = -1; |
129 | chassis_id = -1; | 124 | chassis_id = -1; |
130 | 125 | ||
131 | remove_proc_entry("sky_slot_id", &proc_root); | 126 | remove_proc_entry("sky_slot_id", NULL); |
132 | remove_proc_entry("sky_chassis_id", &proc_root); | 127 | remove_proc_entry("sky_chassis_id", NULL); |
133 | 128 | ||
134 | hdpu_slot_id = 0; | 129 | hdpu_slot_id = 0; |
135 | hdpu_chassis_id = 0; | 130 | hdpu_chassis_id = 0; |
diff --git a/drivers/misc/ibmasm/command.c b/drivers/misc/ibmasm/command.c index 1a0e7978226a..276d3fb68094 100644 --- a/drivers/misc/ibmasm/command.c +++ b/drivers/misc/ibmasm/command.c | |||
@@ -96,7 +96,7 @@ static inline void do_exec_command(struct service_processor *sp) | |||
96 | { | 96 | { |
97 | char tsbuf[32]; | 97 | char tsbuf[32]; |
98 | 98 | ||
99 | dbg("%s:%d at %s\n", __FUNCTION__, __LINE__, get_timestamp(tsbuf)); | 99 | dbg("%s:%d at %s\n", __func__, __LINE__, get_timestamp(tsbuf)); |
100 | 100 | ||
101 | if (ibmasm_send_i2o_message(sp)) { | 101 | if (ibmasm_send_i2o_message(sp)) { |
102 | sp->current_command->status = IBMASM_CMD_FAILED; | 102 | sp->current_command->status = IBMASM_CMD_FAILED; |
@@ -119,7 +119,7 @@ void ibmasm_exec_command(struct service_processor *sp, struct command *cmd) | |||
119 | unsigned long flags; | 119 | unsigned long flags; |
120 | char tsbuf[32]; | 120 | char tsbuf[32]; |
121 | 121 | ||
122 | dbg("%s:%d at %s\n", __FUNCTION__, __LINE__, get_timestamp(tsbuf)); | 122 | dbg("%s:%d at %s\n", __func__, __LINE__, get_timestamp(tsbuf)); |
123 | 123 | ||
124 | spin_lock_irqsave(&sp->lock, flags); | 124 | spin_lock_irqsave(&sp->lock, flags); |
125 | 125 | ||
@@ -139,7 +139,7 @@ static void exec_next_command(struct service_processor *sp) | |||
139 | unsigned long flags; | 139 | unsigned long flags; |
140 | char tsbuf[32]; | 140 | char tsbuf[32]; |
141 | 141 | ||
142 | dbg("%s:%d at %s\n", __FUNCTION__, __LINE__, get_timestamp(tsbuf)); | 142 | dbg("%s:%d at %s\n", __func__, __LINE__, get_timestamp(tsbuf)); |
143 | 143 | ||
144 | spin_lock_irqsave(&sp->lock, flags); | 144 | spin_lock_irqsave(&sp->lock, flags); |
145 | sp->current_command = dequeue_command(sp); | 145 | sp->current_command = dequeue_command(sp); |
diff --git a/drivers/misc/ibmasm/heartbeat.c b/drivers/misc/ibmasm/heartbeat.c index 3036e785b3e4..1bc4306572a4 100644 --- a/drivers/misc/ibmasm/heartbeat.c +++ b/drivers/misc/ibmasm/heartbeat.c | |||
@@ -75,9 +75,9 @@ void ibmasm_heartbeat_exit(struct service_processor *sp) | |||
75 | { | 75 | { |
76 | char tsbuf[32]; | 76 | char tsbuf[32]; |
77 | 77 | ||
78 | dbg("%s:%d at %s\n", __FUNCTION__, __LINE__, get_timestamp(tsbuf)); | 78 | dbg("%s:%d at %s\n", __func__, __LINE__, get_timestamp(tsbuf)); |
79 | ibmasm_wait_for_response(sp->heartbeat, IBMASM_CMD_TIMEOUT_NORMAL); | 79 | ibmasm_wait_for_response(sp->heartbeat, IBMASM_CMD_TIMEOUT_NORMAL); |
80 | dbg("%s:%d at %s\n", __FUNCTION__, __LINE__, get_timestamp(tsbuf)); | 80 | dbg("%s:%d at %s\n", __func__, __LINE__, get_timestamp(tsbuf)); |
81 | suspend_heartbeats = 1; | 81 | suspend_heartbeats = 1; |
82 | command_put(sp->heartbeat); | 82 | command_put(sp->heartbeat); |
83 | } | 83 | } |
@@ -88,7 +88,7 @@ void ibmasm_receive_heartbeat(struct service_processor *sp, void *message, size | |||
88 | struct dot_command_header *header = (struct dot_command_header *)cmd->buffer; | 88 | struct dot_command_header *header = (struct dot_command_header *)cmd->buffer; |
89 | char tsbuf[32]; | 89 | char tsbuf[32]; |
90 | 90 | ||
91 | dbg("%s:%d at %s\n", __FUNCTION__, __LINE__, get_timestamp(tsbuf)); | 91 | dbg("%s:%d at %s\n", __func__, __LINE__, get_timestamp(tsbuf)); |
92 | if (suspend_heartbeats) | 92 | if (suspend_heartbeats) |
93 | return; | 93 | return; |
94 | 94 | ||
diff --git a/drivers/misc/intel_menlow.c b/drivers/misc/intel_menlow.c index 0c0bb3093e07..80a136352408 100644 --- a/drivers/misc/intel_menlow.c +++ b/drivers/misc/intel_menlow.c | |||
@@ -175,19 +175,17 @@ static int intel_menlow_memory_add(struct acpi_device *device) | |||
175 | goto end; | 175 | goto end; |
176 | } | 176 | } |
177 | 177 | ||
178 | if (cdev) { | 178 | acpi_driver_data(device) = cdev; |
179 | acpi_driver_data(device) = cdev; | 179 | result = sysfs_create_link(&device->dev.kobj, |
180 | result = sysfs_create_link(&device->dev.kobj, | 180 | &cdev->device.kobj, "thermal_cooling"); |
181 | &cdev->device.kobj, "thermal_cooling"); | 181 | if (result) |
182 | if (result) | 182 | goto unregister; |
183 | goto unregister; | 183 | |
184 | 184 | result = sysfs_create_link(&cdev->device.kobj, | |
185 | result = sysfs_create_link(&cdev->device.kobj, | 185 | &device->dev.kobj, "device"); |
186 | &device->dev.kobj, "device"); | 186 | if (result) { |
187 | if (result) { | 187 | sysfs_remove_link(&device->dev.kobj, "thermal_cooling"); |
188 | sysfs_remove_link(&device->dev.kobj, "thermal_cooling"); | 188 | goto unregister; |
189 | goto unregister; | ||
190 | } | ||
191 | } | 189 | } |
192 | 190 | ||
193 | end: | 191 | end: |
diff --git a/drivers/misc/ioc4.c b/drivers/misc/ioc4.c index 05172d2613d6..6f76573e7c8a 100644 --- a/drivers/misc/ioc4.c +++ b/drivers/misc/ioc4.c | |||
@@ -75,7 +75,7 @@ ioc4_register_submodule(struct ioc4_submodule *is) | |||
75 | printk(KERN_WARNING | 75 | printk(KERN_WARNING |
76 | "%s: IOC4 submodule %s probe failed " | 76 | "%s: IOC4 submodule %s probe failed " |
77 | "for pci_dev %s", | 77 | "for pci_dev %s", |
78 | __FUNCTION__, module_name(is->is_owner), | 78 | __func__, module_name(is->is_owner), |
79 | pci_name(idd->idd_pdev)); | 79 | pci_name(idd->idd_pdev)); |
80 | } | 80 | } |
81 | } | 81 | } |
@@ -102,7 +102,7 @@ ioc4_unregister_submodule(struct ioc4_submodule *is) | |||
102 | printk(KERN_WARNING | 102 | printk(KERN_WARNING |
103 | "%s: IOC4 submodule %s remove failed " | 103 | "%s: IOC4 submodule %s remove failed " |
104 | "for pci_dev %s.\n", | 104 | "for pci_dev %s.\n", |
105 | __FUNCTION__, module_name(is->is_owner), | 105 | __func__, module_name(is->is_owner), |
106 | pci_name(idd->idd_pdev)); | 106 | pci_name(idd->idd_pdev)); |
107 | } | 107 | } |
108 | } | 108 | } |
@@ -282,7 +282,7 @@ ioc4_probe(struct pci_dev *pdev, const struct pci_device_id *pci_id) | |||
282 | if ((ret = pci_enable_device(pdev))) { | 282 | if ((ret = pci_enable_device(pdev))) { |
283 | printk(KERN_WARNING | 283 | printk(KERN_WARNING |
284 | "%s: Failed to enable IOC4 device for pci_dev %s.\n", | 284 | "%s: Failed to enable IOC4 device for pci_dev %s.\n", |
285 | __FUNCTION__, pci_name(pdev)); | 285 | __func__, pci_name(pdev)); |
286 | goto out; | 286 | goto out; |
287 | } | 287 | } |
288 | pci_set_master(pdev); | 288 | pci_set_master(pdev); |
@@ -292,7 +292,7 @@ ioc4_probe(struct pci_dev *pdev, const struct pci_device_id *pci_id) | |||
292 | if (!idd) { | 292 | if (!idd) { |
293 | printk(KERN_WARNING | 293 | printk(KERN_WARNING |
294 | "%s: Failed to allocate IOC4 data for pci_dev %s.\n", | 294 | "%s: Failed to allocate IOC4 data for pci_dev %s.\n", |
295 | __FUNCTION__, pci_name(pdev)); | 295 | __func__, pci_name(pdev)); |
296 | ret = -ENODEV; | 296 | ret = -ENODEV; |
297 | goto out_idd; | 297 | goto out_idd; |
298 | } | 298 | } |
@@ -307,7 +307,7 @@ ioc4_probe(struct pci_dev *pdev, const struct pci_device_id *pci_id) | |||
307 | printk(KERN_WARNING | 307 | printk(KERN_WARNING |
308 | "%s: Unable to find IOC4 misc resource " | 308 | "%s: Unable to find IOC4 misc resource " |
309 | "for pci_dev %s.\n", | 309 | "for pci_dev %s.\n", |
310 | __FUNCTION__, pci_name(idd->idd_pdev)); | 310 | __func__, pci_name(idd->idd_pdev)); |
311 | ret = -ENODEV; | 311 | ret = -ENODEV; |
312 | goto out_pci; | 312 | goto out_pci; |
313 | } | 313 | } |
@@ -316,7 +316,7 @@ ioc4_probe(struct pci_dev *pdev, const struct pci_device_id *pci_id) | |||
316 | printk(KERN_WARNING | 316 | printk(KERN_WARNING |
317 | "%s: Unable to request IOC4 misc region " | 317 | "%s: Unable to request IOC4 misc region " |
318 | "for pci_dev %s.\n", | 318 | "for pci_dev %s.\n", |
319 | __FUNCTION__, pci_name(idd->idd_pdev)); | 319 | __func__, pci_name(idd->idd_pdev)); |
320 | ret = -ENODEV; | 320 | ret = -ENODEV; |
321 | goto out_pci; | 321 | goto out_pci; |
322 | } | 322 | } |
@@ -326,7 +326,7 @@ ioc4_probe(struct pci_dev *pdev, const struct pci_device_id *pci_id) | |||
326 | printk(KERN_WARNING | 326 | printk(KERN_WARNING |
327 | "%s: Unable to remap IOC4 misc region " | 327 | "%s: Unable to remap IOC4 misc region " |
328 | "for pci_dev %s.\n", | 328 | "for pci_dev %s.\n", |
329 | __FUNCTION__, pci_name(idd->idd_pdev)); | 329 | __func__, pci_name(idd->idd_pdev)); |
330 | ret = -ENODEV; | 330 | ret = -ENODEV; |
331 | goto out_misc_region; | 331 | goto out_misc_region; |
332 | } | 332 | } |
@@ -372,7 +372,7 @@ ioc4_probe(struct pci_dev *pdev, const struct pci_device_id *pci_id) | |||
372 | printk(KERN_WARNING | 372 | printk(KERN_WARNING |
373 | "%s: IOC4 submodule 0x%s probe failed " | 373 | "%s: IOC4 submodule 0x%s probe failed " |
374 | "for pci_dev %s.\n", | 374 | "for pci_dev %s.\n", |
375 | __FUNCTION__, module_name(is->is_owner), | 375 | __func__, module_name(is->is_owner), |
376 | pci_name(idd->idd_pdev)); | 376 | pci_name(idd->idd_pdev)); |
377 | } | 377 | } |
378 | } | 378 | } |
@@ -406,7 +406,7 @@ ioc4_remove(struct pci_dev *pdev) | |||
406 | printk(KERN_WARNING | 406 | printk(KERN_WARNING |
407 | "%s: IOC4 submodule 0x%s remove failed " | 407 | "%s: IOC4 submodule 0x%s remove failed " |
408 | "for pci_dev %s.\n", | 408 | "for pci_dev %s.\n", |
409 | __FUNCTION__, module_name(is->is_owner), | 409 | __func__, module_name(is->is_owner), |
410 | pci_name(idd->idd_pdev)); | 410 | pci_name(idd->idd_pdev)); |
411 | } | 411 | } |
412 | } | 412 | } |
@@ -418,7 +418,7 @@ ioc4_remove(struct pci_dev *pdev) | |||
418 | printk(KERN_WARNING | 418 | printk(KERN_WARNING |
419 | "%s: Unable to get IOC4 misc mapping for pci_dev %s. " | 419 | "%s: Unable to get IOC4 misc mapping for pci_dev %s. " |
420 | "Device removal may be incomplete.\n", | 420 | "Device removal may be incomplete.\n", |
421 | __FUNCTION__, pci_name(idd->idd_pdev)); | 421 | __func__, pci_name(idd->idd_pdev)); |
422 | } | 422 | } |
423 | release_mem_region(idd->idd_bar0, sizeof(struct ioc4_misc_regs)); | 423 | release_mem_region(idd->idd_bar0, sizeof(struct ioc4_misc_regs)); |
424 | 424 | ||
diff --git a/drivers/misc/phantom.c b/drivers/misc/phantom.c index 7fa61e907e1c..71d1c84e2fa8 100644 --- a/drivers/misc/phantom.c +++ b/drivers/misc/phantom.c | |||
@@ -12,6 +12,7 @@ | |||
12 | * or alternatively, you might use OpenHaptics provided by Sensable. | 12 | * or alternatively, you might use OpenHaptics provided by Sensable. |
13 | */ | 13 | */ |
14 | 14 | ||
15 | #include <linux/compat.h> | ||
15 | #include <linux/kernel.h> | 16 | #include <linux/kernel.h> |
16 | #include <linux/module.h> | 17 | #include <linux/module.h> |
17 | #include <linux/device.h> | 18 | #include <linux/device.h> |
@@ -91,11 +92,8 @@ static long phantom_ioctl(struct file *file, unsigned int cmd, | |||
91 | unsigned long flags; | 92 | unsigned long flags; |
92 | unsigned int i; | 93 | unsigned int i; |
93 | 94 | ||
94 | if (_IOC_TYPE(cmd) != PH_IOC_MAGIC || | ||
95 | _IOC_NR(cmd) > PH_IOC_MAXNR) | ||
96 | return -ENOTTY; | ||
97 | |||
98 | switch (cmd) { | 95 | switch (cmd) { |
96 | case PHN_SETREG: | ||
99 | case PHN_SET_REG: | 97 | case PHN_SET_REG: |
100 | if (copy_from_user(&r, argp, sizeof(r))) | 98 | if (copy_from_user(&r, argp, sizeof(r))) |
101 | return -EFAULT; | 99 | return -EFAULT; |
@@ -126,6 +124,7 @@ static long phantom_ioctl(struct file *file, unsigned int cmd, | |||
126 | phantom_status(dev, dev->status & ~PHB_RUNNING); | 124 | phantom_status(dev, dev->status & ~PHB_RUNNING); |
127 | spin_unlock_irqrestore(&dev->regs_lock, flags); | 125 | spin_unlock_irqrestore(&dev->regs_lock, flags); |
128 | break; | 126 | break; |
127 | case PHN_SETREGS: | ||
129 | case PHN_SET_REGS: | 128 | case PHN_SET_REGS: |
130 | if (copy_from_user(&rs, argp, sizeof(rs))) | 129 | if (copy_from_user(&rs, argp, sizeof(rs))) |
131 | return -EFAULT; | 130 | return -EFAULT; |
@@ -143,6 +142,7 @@ static long phantom_ioctl(struct file *file, unsigned int cmd, | |||
143 | } | 142 | } |
144 | spin_unlock_irqrestore(&dev->regs_lock, flags); | 143 | spin_unlock_irqrestore(&dev->regs_lock, flags); |
145 | break; | 144 | break; |
145 | case PHN_GETREG: | ||
146 | case PHN_GET_REG: | 146 | case PHN_GET_REG: |
147 | if (copy_from_user(&r, argp, sizeof(r))) | 147 | if (copy_from_user(&r, argp, sizeof(r))) |
148 | return -EFAULT; | 148 | return -EFAULT; |
@@ -155,6 +155,7 @@ static long phantom_ioctl(struct file *file, unsigned int cmd, | |||
155 | if (copy_to_user(argp, &r, sizeof(r))) | 155 | if (copy_to_user(argp, &r, sizeof(r))) |
156 | return -EFAULT; | 156 | return -EFAULT; |
157 | break; | 157 | break; |
158 | case PHN_GETREGS: | ||
158 | case PHN_GET_REGS: { | 159 | case PHN_GET_REGS: { |
159 | u32 m; | 160 | u32 m; |
160 | 161 | ||
@@ -168,6 +169,7 @@ static long phantom_ioctl(struct file *file, unsigned int cmd, | |||
168 | for (i = 0; i < m; i++) | 169 | for (i = 0; i < m; i++) |
169 | if (rs.mask & BIT(i)) | 170 | if (rs.mask & BIT(i)) |
170 | rs.values[i] = ioread32(dev->iaddr + i); | 171 | rs.values[i] = ioread32(dev->iaddr + i); |
172 | atomic_set(&dev->counter, 0); | ||
171 | spin_unlock_irqrestore(&dev->regs_lock, flags); | 173 | spin_unlock_irqrestore(&dev->regs_lock, flags); |
172 | 174 | ||
173 | if (copy_to_user(argp, &rs, sizeof(rs))) | 175 | if (copy_to_user(argp, &rs, sizeof(rs))) |
@@ -191,6 +193,20 @@ static long phantom_ioctl(struct file *file, unsigned int cmd, | |||
191 | return 0; | 193 | return 0; |
192 | } | 194 | } |
193 | 195 | ||
196 | #ifdef CONFIG_COMPAT | ||
197 | static long phantom_compat_ioctl(struct file *filp, unsigned int cmd, | ||
198 | unsigned long arg) | ||
199 | { | ||
200 | if (_IOC_NR(cmd) <= 3 && _IOC_SIZE(cmd) == sizeof(compat_uptr_t)) { | ||
201 | cmd &= ~(_IOC_SIZEMASK << _IOC_SIZESHIFT); | ||
202 | cmd |= sizeof(void *) << _IOC_SIZESHIFT; | ||
203 | } | ||
204 | return phantom_ioctl(filp, cmd, (unsigned long)compat_ptr(arg)); | ||
205 | } | ||
206 | #else | ||
207 | #define phantom_compat_ioctl NULL | ||
208 | #endif | ||
209 | |||
194 | static int phantom_open(struct inode *inode, struct file *file) | 210 | static int phantom_open(struct inode *inode, struct file *file) |
195 | { | 211 | { |
196 | struct phantom_device *dev = container_of(inode->i_cdev, | 212 | struct phantom_device *dev = container_of(inode->i_cdev, |
@@ -239,11 +255,12 @@ static unsigned int phantom_poll(struct file *file, poll_table *wait) | |||
239 | 255 | ||
240 | pr_debug("phantom_poll: %d\n", atomic_read(&dev->counter)); | 256 | pr_debug("phantom_poll: %d\n", atomic_read(&dev->counter)); |
241 | poll_wait(file, &dev->wait, wait); | 257 | poll_wait(file, &dev->wait, wait); |
242 | if (atomic_read(&dev->counter)) { | 258 | |
259 | if (!(dev->status & PHB_RUNNING)) | ||
260 | mask = POLLERR; | ||
261 | else if (atomic_read(&dev->counter)) | ||
243 | mask = POLLIN | POLLRDNORM; | 262 | mask = POLLIN | POLLRDNORM; |
244 | atomic_dec(&dev->counter); | 263 | |
245 | } else if ((dev->status & PHB_RUNNING) == 0) | ||
246 | mask = POLLIN | POLLRDNORM | POLLERR; | ||
247 | pr_debug("phantom_poll end: %x/%d\n", mask, atomic_read(&dev->counter)); | 264 | pr_debug("phantom_poll end: %x/%d\n", mask, atomic_read(&dev->counter)); |
248 | 265 | ||
249 | return mask; | 266 | return mask; |
@@ -253,6 +270,7 @@ static struct file_operations phantom_file_ops = { | |||
253 | .open = phantom_open, | 270 | .open = phantom_open, |
254 | .release = phantom_release, | 271 | .release = phantom_release, |
255 | .unlocked_ioctl = phantom_ioctl, | 272 | .unlocked_ioctl = phantom_ioctl, |
273 | .compat_ioctl = phantom_compat_ioctl, | ||
256 | .poll = phantom_poll, | 274 | .poll = phantom_poll, |
257 | }; | 275 | }; |
258 | 276 | ||
diff --git a/drivers/misc/sony-laptop.c b/drivers/misc/sony-laptop.c index 02ff3d19b1cc..00e48e2a9c11 100644 --- a/drivers/misc/sony-laptop.c +++ b/drivers/misc/sony-laptop.c | |||
@@ -961,7 +961,7 @@ static int sony_nc_resume(struct acpi_device *device) | |||
961 | ret = acpi_callsetfunc(sony_nc_acpi_handle, *item->acpiset, | 961 | ret = acpi_callsetfunc(sony_nc_acpi_handle, *item->acpiset, |
962 | item->value, NULL); | 962 | item->value, NULL); |
963 | if (ret < 0) { | 963 | if (ret < 0) { |
964 | printk("%s: %d\n", __FUNCTION__, ret); | 964 | printk("%s: %d\n", __func__, ret); |
965 | break; | 965 | break; |
966 | } | 966 | } |
967 | } | 967 | } |
@@ -1453,7 +1453,7 @@ static struct sonypi_eventtypes type4_events[] = { | |||
1453 | udelay(1); \ | 1453 | udelay(1); \ |
1454 | if (!n) \ | 1454 | if (!n) \ |
1455 | dprintk("command failed at %s : %s (line %d)\n", \ | 1455 | dprintk("command failed at %s : %s (line %d)\n", \ |
1456 | __FILE__, __FUNCTION__, __LINE__); \ | 1456 | __FILE__, __func__, __LINE__); \ |
1457 | } | 1457 | } |
1458 | 1458 | ||
1459 | static u8 sony_pic_call1(u8 dev) | 1459 | static u8 sony_pic_call1(u8 dev) |
diff --git a/drivers/mmc/host/mmc_spi.c b/drivers/mmc/host/mmc_spi.c index 365024b83d3d..35508584ac2a 100644 --- a/drivers/mmc/host/mmc_spi.c +++ b/drivers/mmc/host/mmc_spi.c | |||
@@ -340,7 +340,7 @@ checkstatus: | |||
340 | 340 | ||
341 | /* SPI R3, R4, or R7 == R1 + 4 bytes */ | 341 | /* SPI R3, R4, or R7 == R1 + 4 bytes */ |
342 | case MMC_RSP_SPI_R3: | 342 | case MMC_RSP_SPI_R3: |
343 | cmd->resp[1] = be32_to_cpu(get_unaligned((u32 *)cp)); | 343 | cmd->resp[1] = get_unaligned_be32(cp); |
344 | break; | 344 | break; |
345 | 345 | ||
346 | /* SPI R1 == just one status byte */ | 346 | /* SPI R1 == just one status byte */ |
diff --git a/drivers/net/bonding/bond_main.c b/drivers/net/bonding/bond_main.c index 6e91b4b7aabb..6425603bc379 100644 --- a/drivers/net/bonding/bond_main.c +++ b/drivers/net/bonding/bond_main.c | |||
@@ -3282,17 +3282,14 @@ static int bond_create_proc_entry(struct bonding *bond) | |||
3282 | struct net_device *bond_dev = bond->dev; | 3282 | struct net_device *bond_dev = bond->dev; |
3283 | 3283 | ||
3284 | if (bond_proc_dir) { | 3284 | if (bond_proc_dir) { |
3285 | bond->proc_entry = create_proc_entry(bond_dev->name, | 3285 | bond->proc_entry = proc_create_data(bond_dev->name, |
3286 | S_IRUGO, | 3286 | S_IRUGO, bond_proc_dir, |
3287 | bond_proc_dir); | 3287 | &bond_info_fops, bond); |
3288 | if (bond->proc_entry == NULL) { | 3288 | if (bond->proc_entry == NULL) { |
3289 | printk(KERN_WARNING DRV_NAME | 3289 | printk(KERN_WARNING DRV_NAME |
3290 | ": Warning: Cannot create /proc/net/%s/%s\n", | 3290 | ": Warning: Cannot create /proc/net/%s/%s\n", |
3291 | DRV_NAME, bond_dev->name); | 3291 | DRV_NAME, bond_dev->name); |
3292 | } else { | 3292 | } else { |
3293 | bond->proc_entry->data = bond; | ||
3294 | bond->proc_entry->proc_fops = &bond_info_fops; | ||
3295 | bond->proc_entry->owner = THIS_MODULE; | ||
3296 | memcpy(bond->proc_file_name, bond_dev->name, IFNAMSIZ); | 3293 | memcpy(bond->proc_file_name, bond_dev->name, IFNAMSIZ); |
3297 | } | 3294 | } |
3298 | } | 3295 | } |
diff --git a/drivers/net/e100.c b/drivers/net/e100.c index 2d139ec79777..f3cba5e24ec5 100644 --- a/drivers/net/e100.c +++ b/drivers/net/e100.c | |||
@@ -1802,7 +1802,7 @@ static int e100_rx_alloc_skb(struct nic *nic, struct rx *rx) | |||
1802 | * it is protected by the before last buffer's el bit being set */ | 1802 | * it is protected by the before last buffer's el bit being set */ |
1803 | if (rx->prev->skb) { | 1803 | if (rx->prev->skb) { |
1804 | struct rfd *prev_rfd = (struct rfd *)rx->prev->skb->data; | 1804 | struct rfd *prev_rfd = (struct rfd *)rx->prev->skb->data; |
1805 | put_unaligned(cpu_to_le32(rx->dma_addr), &prev_rfd->link); | 1805 | put_unaligned_le32(rx->dma_addr, &prev_rfd->link); |
1806 | } | 1806 | } |
1807 | 1807 | ||
1808 | return 0; | 1808 | return 0; |
diff --git a/drivers/net/hamachi.c b/drivers/net/hamachi.c index b53f6b6491b3..e5c2380f50ca 100644 --- a/drivers/net/hamachi.c +++ b/drivers/net/hamachi.c | |||
@@ -1508,7 +1508,7 @@ static int hamachi_rx(struct net_device *dev) | |||
1508 | hmp->rx_buf_sz, | 1508 | hmp->rx_buf_sz, |
1509 | PCI_DMA_FROMDEVICE); | 1509 | PCI_DMA_FROMDEVICE); |
1510 | buf_addr = (u8 *) hmp->rx_skbuff[entry]->data; | 1510 | buf_addr = (u8 *) hmp->rx_skbuff[entry]->data; |
1511 | frame_status = le32_to_cpu(get_unaligned((__le32*)&(buf_addr[data_size - 12]))); | 1511 | frame_status = get_unaligned_le32(&(buf_addr[data_size - 12])); |
1512 | if (hamachi_debug > 4) | 1512 | if (hamachi_debug > 4) |
1513 | printk(KERN_DEBUG " hamachi_rx() status was %8.8x.\n", | 1513 | printk(KERN_DEBUG " hamachi_rx() status was %8.8x.\n", |
1514 | frame_status); | 1514 | frame_status); |
diff --git a/drivers/net/ibmveth.c b/drivers/net/ibmveth.c index ce4fc2ec2fe4..00527805e4f1 100644 --- a/drivers/net/ibmveth.c +++ b/drivers/net/ibmveth.c | |||
@@ -1302,13 +1302,10 @@ static void ibmveth_proc_register_adapter(struct ibmveth_adapter *adapter) | |||
1302 | if (ibmveth_proc_dir) { | 1302 | if (ibmveth_proc_dir) { |
1303 | char u_addr[10]; | 1303 | char u_addr[10]; |
1304 | sprintf(u_addr, "%x", adapter->vdev->unit_address); | 1304 | sprintf(u_addr, "%x", adapter->vdev->unit_address); |
1305 | entry = create_proc_entry(u_addr, S_IFREG, ibmveth_proc_dir); | 1305 | entry = proc_create_data(u_addr, S_IFREG, ibmveth_proc_dir, |
1306 | if (!entry) { | 1306 | &ibmveth_proc_fops, adapter); |
1307 | if (!entry) | ||
1307 | ibmveth_error_printk("Cannot create adapter proc entry"); | 1308 | ibmveth_error_printk("Cannot create adapter proc entry"); |
1308 | } else { | ||
1309 | entry->data = (void *) adapter; | ||
1310 | entry->proc_fops = &ibmveth_proc_fops; | ||
1311 | } | ||
1312 | } | 1309 | } |
1313 | return; | 1310 | return; |
1314 | } | 1311 | } |
diff --git a/drivers/net/irda/mcs7780.c b/drivers/net/irda/mcs7780.c index 93916cf33f29..ad92d3ff1c40 100644 --- a/drivers/net/irda/mcs7780.c +++ b/drivers/net/irda/mcs7780.c | |||
@@ -464,7 +464,7 @@ static void mcs_unwrap_fir(struct mcs_cb *mcs, __u8 *buf, int len) | |||
464 | } | 464 | } |
465 | 465 | ||
466 | fcs = ~(crc32_le(~0, buf, new_len)); | 466 | fcs = ~(crc32_le(~0, buf, new_len)); |
467 | if(fcs != le32_to_cpu(get_unaligned((__le32 *)(buf+new_len)))) { | 467 | if(fcs != get_unaligned_le32(buf + new_len)) { |
468 | IRDA_ERROR("crc error calc 0x%x len %d\n", fcs, new_len); | 468 | IRDA_ERROR("crc error calc 0x%x len %d\n", fcs, new_len); |
469 | mcs->stats.rx_errors++; | 469 | mcs->stats.rx_errors++; |
470 | mcs->stats.rx_crc_errors++; | 470 | mcs->stats.rx_crc_errors++; |
diff --git a/drivers/net/irda/stir4200.c b/drivers/net/irda/stir4200.c index e59c485bc497..051963782749 100644 --- a/drivers/net/irda/stir4200.c +++ b/drivers/net/irda/stir4200.c | |||
@@ -329,7 +329,7 @@ static void fir_eof(struct stir_cb *stir) | |||
329 | } | 329 | } |
330 | 330 | ||
331 | fcs = ~(crc32_le(~0, rx_buff->data, len)); | 331 | fcs = ~(crc32_le(~0, rx_buff->data, len)); |
332 | if (fcs != le32_to_cpu(get_unaligned((__le32 *)(rx_buff->data+len)))) { | 332 | if (fcs != get_unaligned_le32(rx_buff->data + len)) { |
333 | pr_debug("crc error calc 0x%x len %d\n", fcs, len); | 333 | pr_debug("crc error calc 0x%x len %d\n", fcs, len); |
334 | stir->stats.rx_errors++; | 334 | stir->stats.rx_errors++; |
335 | stir->stats.rx_crc_errors++; | 335 | stir->stats.rx_crc_errors++; |
diff --git a/drivers/net/irda/vlsi_ir.c b/drivers/net/irda/vlsi_ir.c index acd082a96a4f..d15e00b8591e 100644 --- a/drivers/net/irda/vlsi_ir.c +++ b/drivers/net/irda/vlsi_ir.c | |||
@@ -1674,13 +1674,12 @@ vlsi_irda_probe(struct pci_dev *pdev, const struct pci_device_id *id) | |||
1674 | if (vlsi_proc_root != NULL) { | 1674 | if (vlsi_proc_root != NULL) { |
1675 | struct proc_dir_entry *ent; | 1675 | struct proc_dir_entry *ent; |
1676 | 1676 | ||
1677 | ent = create_proc_entry(ndev->name, S_IFREG|S_IRUGO, vlsi_proc_root); | 1677 | ent = proc_create_data(ndev->name, S_IFREG|S_IRUGO, |
1678 | vlsi_proc_root, VLSI_PROC_FOPS, ndev); | ||
1678 | if (!ent) { | 1679 | if (!ent) { |
1679 | IRDA_WARNING("%s: failed to create proc entry\n", | 1680 | IRDA_WARNING("%s: failed to create proc entry\n", |
1680 | __FUNCTION__); | 1681 | __FUNCTION__); |
1681 | } else { | 1682 | } else { |
1682 | ent->data = ndev; | ||
1683 | ent->proc_fops = VLSI_PROC_FOPS; | ||
1684 | ent->size = 0; | 1683 | ent->size = 0; |
1685 | } | 1684 | } |
1686 | idev->proc_entry = ent; | 1685 | idev->proc_entry = ent; |
diff --git a/drivers/net/myri10ge/myri10ge.c b/drivers/net/myri10ge/myri10ge.c index cead81e80f0c..ef63c8d2bd7e 100644 --- a/drivers/net/myri10ge/myri10ge.c +++ b/drivers/net/myri10ge/myri10ge.c | |||
@@ -2437,7 +2437,7 @@ static int myri10ge_sw_tso(struct sk_buff *skb, struct net_device *dev) | |||
2437 | int status; | 2437 | int status; |
2438 | 2438 | ||
2439 | segs = skb_gso_segment(skb, dev->features & ~NETIF_F_TSO6); | 2439 | segs = skb_gso_segment(skb, dev->features & ~NETIF_F_TSO6); |
2440 | if (unlikely(IS_ERR(segs))) | 2440 | if (IS_ERR(segs)) |
2441 | goto drop; | 2441 | goto drop; |
2442 | 2442 | ||
2443 | while (segs) { | 2443 | while (segs) { |
diff --git a/drivers/net/pppoe.c b/drivers/net/pppoe.c index 4fad4ddb3504..58a26a47af29 100644 --- a/drivers/net/pppoe.c +++ b/drivers/net/pppoe.c | |||
@@ -1052,11 +1052,9 @@ static int __init pppoe_proc_init(void) | |||
1052 | { | 1052 | { |
1053 | struct proc_dir_entry *p; | 1053 | struct proc_dir_entry *p; |
1054 | 1054 | ||
1055 | p = create_proc_entry("pppoe", S_IRUGO, init_net.proc_net); | 1055 | p = proc_net_fops_create(&init_net, "pppoe", S_IRUGO, &pppoe_seq_fops); |
1056 | if (!p) | 1056 | if (!p) |
1057 | return -ENOMEM; | 1057 | return -ENOMEM; |
1058 | |||
1059 | p->proc_fops = &pppoe_seq_fops; | ||
1060 | return 0; | 1058 | return 0; |
1061 | } | 1059 | } |
1062 | #else /* CONFIG_PROC_FS */ | 1060 | #else /* CONFIG_PROC_FS */ |
diff --git a/drivers/net/pppol2tp.c b/drivers/net/pppol2tp.c index 3d10ca050b79..244d7830c92a 100644 --- a/drivers/net/pppol2tp.c +++ b/drivers/net/pppol2tp.c | |||
@@ -2469,12 +2469,12 @@ static int __init pppol2tp_init(void) | |||
2469 | goto out_unregister_pppol2tp_proto; | 2469 | goto out_unregister_pppol2tp_proto; |
2470 | 2470 | ||
2471 | #ifdef CONFIG_PROC_FS | 2471 | #ifdef CONFIG_PROC_FS |
2472 | pppol2tp_proc = create_proc_entry("pppol2tp", 0, init_net.proc_net); | 2472 | pppol2tp_proc = proc_net_fops_create(&init_net, "pppol2tp", 0, |
2473 | &pppol2tp_proc_fops); | ||
2473 | if (!pppol2tp_proc) { | 2474 | if (!pppol2tp_proc) { |
2474 | err = -ENOMEM; | 2475 | err = -ENOMEM; |
2475 | goto out_unregister_pppox_proto; | 2476 | goto out_unregister_pppox_proto; |
2476 | } | 2477 | } |
2477 | pppol2tp_proc->proc_fops = &pppol2tp_proc_fops; | ||
2478 | #endif /* CONFIG_PROC_FS */ | 2478 | #endif /* CONFIG_PROC_FS */ |
2479 | printk(KERN_INFO "PPPoL2TP kernel driver, %s\n", | 2479 | printk(KERN_INFO "PPPoL2TP kernel driver, %s\n", |
2480 | PPPOL2TP_DRV_VERSION); | 2480 | PPPOL2TP_DRV_VERSION); |
diff --git a/drivers/net/tg3.c b/drivers/net/tg3.c index e3f74c9f78bd..b66c75e3b8a1 100644 --- a/drivers/net/tg3.c +++ b/drivers/net/tg3.c | |||
@@ -4361,7 +4361,7 @@ static int tg3_tso_bug(struct tg3 *tp, struct sk_buff *skb) | |||
4361 | } | 4361 | } |
4362 | 4362 | ||
4363 | segs = skb_gso_segment(skb, tp->dev->features & ~NETIF_F_TSO); | 4363 | segs = skb_gso_segment(skb, tp->dev->features & ~NETIF_F_TSO); |
4364 | if (unlikely(IS_ERR(segs))) | 4364 | if (IS_ERR(segs)) |
4365 | goto tg3_tso_bug_end; | 4365 | goto tg3_tso_bug_end; |
4366 | 4366 | ||
4367 | do { | 4367 | do { |
diff --git a/drivers/net/tulip/de4x5.c b/drivers/net/tulip/de4x5.c index 6c6fc325c8f9..bc30c6e8fea2 100644 --- a/drivers/net/tulip/de4x5.c +++ b/drivers/net/tulip/de4x5.c | |||
@@ -482,7 +482,6 @@ | |||
482 | static char version[] __devinitdata = "de4x5.c:V0.546 2001/02/22 davies@maniac.ultranet.com\n"; | 482 | static char version[] __devinitdata = "de4x5.c:V0.546 2001/02/22 davies@maniac.ultranet.com\n"; |
483 | 483 | ||
484 | #define c_char const char | 484 | #define c_char const char |
485 | #define TWIDDLE(a) (u_short)le16_to_cpu(get_unaligned((__le16 *)(a))) | ||
486 | 485 | ||
487 | /* | 486 | /* |
488 | ** MII Information | 487 | ** MII Information |
@@ -4405,7 +4404,7 @@ srom_infoleaf_info(struct net_device *dev) | |||
4405 | } | 4404 | } |
4406 | } | 4405 | } |
4407 | 4406 | ||
4408 | lp->infoleaf_offset = TWIDDLE(p+1); | 4407 | lp->infoleaf_offset = get_unaligned_le16(p + 1); |
4409 | 4408 | ||
4410 | return 0; | 4409 | return 0; |
4411 | } | 4410 | } |
@@ -4476,7 +4475,7 @@ srom_exec(struct net_device *dev, u_char *p) | |||
4476 | 4475 | ||
4477 | while (count--) { | 4476 | while (count--) { |
4478 | gep_wr(((lp->chipset==DC21140) && (lp->ibn!=5) ? | 4477 | gep_wr(((lp->chipset==DC21140) && (lp->ibn!=5) ? |
4479 | *p++ : TWIDDLE(w++)), dev); | 4478 | *p++ : get_unaligned_le16(w++)), dev); |
4480 | mdelay(2); /* 2ms per action */ | 4479 | mdelay(2); /* 2ms per action */ |
4481 | } | 4480 | } |
4482 | 4481 | ||
@@ -4711,10 +4710,10 @@ type1_infoblock(struct net_device *dev, u_char count, u_char *p) | |||
4711 | lp->active = *p++; | 4710 | lp->active = *p++; |
4712 | lp->phy[lp->active].gep = (*p ? p : NULL); p += (*p + 1); | 4711 | lp->phy[lp->active].gep = (*p ? p : NULL); p += (*p + 1); |
4713 | lp->phy[lp->active].rst = (*p ? p : NULL); p += (*p + 1); | 4712 | lp->phy[lp->active].rst = (*p ? p : NULL); p += (*p + 1); |
4714 | lp->phy[lp->active].mc = TWIDDLE(p); p += 2; | 4713 | lp->phy[lp->active].mc = get_unaligned_le16(p); p += 2; |
4715 | lp->phy[lp->active].ana = TWIDDLE(p); p += 2; | 4714 | lp->phy[lp->active].ana = get_unaligned_le16(p); p += 2; |
4716 | lp->phy[lp->active].fdx = TWIDDLE(p); p += 2; | 4715 | lp->phy[lp->active].fdx = get_unaligned_le16(p); p += 2; |
4717 | lp->phy[lp->active].ttm = TWIDDLE(p); | 4716 | lp->phy[lp->active].ttm = get_unaligned_le16(p); |
4718 | return 0; | 4717 | return 0; |
4719 | } else if ((lp->media == INIT) && (lp->timeout < 0)) { | 4718 | } else if ((lp->media == INIT) && (lp->timeout < 0)) { |
4720 | lp->ibn = 1; | 4719 | lp->ibn = 1; |
@@ -4751,16 +4750,16 @@ type2_infoblock(struct net_device *dev, u_char count, u_char *p) | |||
4751 | lp->infoblock_media = (*p) & MEDIA_CODE; | 4750 | lp->infoblock_media = (*p) & MEDIA_CODE; |
4752 | 4751 | ||
4753 | if ((*p++) & EXT_FIELD) { | 4752 | if ((*p++) & EXT_FIELD) { |
4754 | lp->cache.csr13 = TWIDDLE(p); p += 2; | 4753 | lp->cache.csr13 = get_unaligned_le16(p); p += 2; |
4755 | lp->cache.csr14 = TWIDDLE(p); p += 2; | 4754 | lp->cache.csr14 = get_unaligned_le16(p); p += 2; |
4756 | lp->cache.csr15 = TWIDDLE(p); p += 2; | 4755 | lp->cache.csr15 = get_unaligned_le16(p); p += 2; |
4757 | } else { | 4756 | } else { |
4758 | lp->cache.csr13 = CSR13; | 4757 | lp->cache.csr13 = CSR13; |
4759 | lp->cache.csr14 = CSR14; | 4758 | lp->cache.csr14 = CSR14; |
4760 | lp->cache.csr15 = CSR15; | 4759 | lp->cache.csr15 = CSR15; |
4761 | } | 4760 | } |
4762 | lp->cache.gepc = ((s32)(TWIDDLE(p)) << 16); p += 2; | 4761 | lp->cache.gepc = ((s32)(get_unaligned_le16(p)) << 16); p += 2; |
4763 | lp->cache.gep = ((s32)(TWIDDLE(p)) << 16); | 4762 | lp->cache.gep = ((s32)(get_unaligned_le16(p)) << 16); |
4764 | lp->infoblock_csr6 = OMR_SIA; | 4763 | lp->infoblock_csr6 = OMR_SIA; |
4765 | lp->useMII = false; | 4764 | lp->useMII = false; |
4766 | 4765 | ||
@@ -4792,10 +4791,10 @@ type3_infoblock(struct net_device *dev, u_char count, u_char *p) | |||
4792 | if (MOTO_SROM_BUG) lp->active = 0; | 4791 | if (MOTO_SROM_BUG) lp->active = 0; |
4793 | lp->phy[lp->active].gep = (*p ? p : NULL); p += (2 * (*p) + 1); | 4792 | lp->phy[lp->active].gep = (*p ? p : NULL); p += (2 * (*p) + 1); |
4794 | lp->phy[lp->active].rst = (*p ? p : NULL); p += (2 * (*p) + 1); | 4793 | lp->phy[lp->active].rst = (*p ? p : NULL); p += (2 * (*p) + 1); |
4795 | lp->phy[lp->active].mc = TWIDDLE(p); p += 2; | 4794 | lp->phy[lp->active].mc = get_unaligned_le16(p); p += 2; |
4796 | lp->phy[lp->active].ana = TWIDDLE(p); p += 2; | 4795 | lp->phy[lp->active].ana = get_unaligned_le16(p); p += 2; |
4797 | lp->phy[lp->active].fdx = TWIDDLE(p); p += 2; | 4796 | lp->phy[lp->active].fdx = get_unaligned_le16(p); p += 2; |
4798 | lp->phy[lp->active].ttm = TWIDDLE(p); p += 2; | 4797 | lp->phy[lp->active].ttm = get_unaligned_le16(p); p += 2; |
4799 | lp->phy[lp->active].mci = *p; | 4798 | lp->phy[lp->active].mci = *p; |
4800 | return 0; | 4799 | return 0; |
4801 | } else if ((lp->media == INIT) && (lp->timeout < 0)) { | 4800 | } else if ((lp->media == INIT) && (lp->timeout < 0)) { |
@@ -4835,8 +4834,8 @@ type4_infoblock(struct net_device *dev, u_char count, u_char *p) | |||
4835 | lp->cache.csr13 = CSR13; /* Hard coded defaults */ | 4834 | lp->cache.csr13 = CSR13; /* Hard coded defaults */ |
4836 | lp->cache.csr14 = CSR14; | 4835 | lp->cache.csr14 = CSR14; |
4837 | lp->cache.csr15 = CSR15; | 4836 | lp->cache.csr15 = CSR15; |
4838 | lp->cache.gepc = ((s32)(TWIDDLE(p)) << 16); p += 2; | 4837 | lp->cache.gepc = ((s32)(get_unaligned_le16(p)) << 16); p += 2; |
4839 | lp->cache.gep = ((s32)(TWIDDLE(p)) << 16); p += 2; | 4838 | lp->cache.gep = ((s32)(get_unaligned_le16(p)) << 16); p += 2; |
4840 | csr6 = *p++; | 4839 | csr6 = *p++; |
4841 | flags = *p++; | 4840 | flags = *p++; |
4842 | 4841 | ||
diff --git a/drivers/net/tulip/de4x5.h b/drivers/net/tulip/de4x5.h index 9fb8d7f07994..f5f33b3eb067 100644 --- a/drivers/net/tulip/de4x5.h +++ b/drivers/net/tulip/de4x5.h | |||
@@ -1017,4 +1017,4 @@ struct de4x5_ioctl { | |||
1017 | #define DE4X5_SET_OMR 0x0d /* Set the OMR Register contents */ | 1017 | #define DE4X5_SET_OMR 0x0d /* Set the OMR Register contents */ |
1018 | #define DE4X5_GET_REG 0x0e /* Get the DE4X5 Registers */ | 1018 | #define DE4X5_GET_REG 0x0e /* Get the DE4X5 Registers */ |
1019 | 1019 | ||
1020 | #define MOTO_SROM_BUG ((lp->active == 8) && (((le32_to_cpu(get_unaligned(((__le32 *)dev->dev_addr))))&0x00ffffff)==0x3e0008)) | 1020 | #define MOTO_SROM_BUG (lp->active == 8 && (get_unaligned_le32(dev->dev_addr) & 0x00ffffff) == 0x3e0008) |
diff --git a/drivers/net/tulip/tulip.h b/drivers/net/tulip/tulip.h index 908422f2f320..92c68a22f16b 100644 --- a/drivers/net/tulip/tulip.h +++ b/drivers/net/tulip/tulip.h | |||
@@ -25,6 +25,7 @@ | |||
25 | #include <linux/pci.h> | 25 | #include <linux/pci.h> |
26 | #include <asm/io.h> | 26 | #include <asm/io.h> |
27 | #include <asm/irq.h> | 27 | #include <asm/irq.h> |
28 | #include <asm/unaligned.h> | ||
28 | 29 | ||
29 | 30 | ||
30 | 31 | ||
@@ -304,11 +305,7 @@ enum t21143_csr6_bits { | |||
304 | 305 | ||
305 | #define RUN_AT(x) (jiffies + (x)) | 306 | #define RUN_AT(x) (jiffies + (x)) |
306 | 307 | ||
307 | #if defined(__i386__) /* AKA get_unaligned() */ | 308 | #define get_u16(ptr) get_unaligned_le16((ptr)) |
308 | #define get_u16(ptr) (*(u16 *)(ptr)) | ||
309 | #else | ||
310 | #define get_u16(ptr) (((u8*)(ptr))[0] + (((u8*)(ptr))[1]<<8)) | ||
311 | #endif | ||
312 | 309 | ||
313 | struct medialeaf { | 310 | struct medialeaf { |
314 | u8 type; | 311 | u8 type; |
diff --git a/drivers/net/tulip/tulip_core.c b/drivers/net/tulip/tulip_core.c index fa1c1c329a2d..f9d13fa05d64 100644 --- a/drivers/net/tulip/tulip_core.c +++ b/drivers/net/tulip/tulip_core.c | |||
@@ -327,8 +327,8 @@ static void tulip_up(struct net_device *dev) | |||
327 | tp->dirty_rx = tp->dirty_tx = 0; | 327 | tp->dirty_rx = tp->dirty_tx = 0; |
328 | 328 | ||
329 | if (tp->flags & MC_HASH_ONLY) { | 329 | if (tp->flags & MC_HASH_ONLY) { |
330 | u32 addr_low = le32_to_cpu(get_unaligned((__le32 *)dev->dev_addr)); | 330 | u32 addr_low = get_unaligned_le32(dev->dev_addr); |
331 | u32 addr_high = le16_to_cpu(get_unaligned((__le16 *)(dev->dev_addr+4))); | 331 | u32 addr_high = get_unaligned_le16(dev->dev_addr + 4); |
332 | if (tp->chip_id == AX88140) { | 332 | if (tp->chip_id == AX88140) { |
333 | iowrite32(0, ioaddr + CSR13); | 333 | iowrite32(0, ioaddr + CSR13); |
334 | iowrite32(addr_low, ioaddr + CSR14); | 334 | iowrite32(addr_low, ioaddr + CSR14); |
@@ -1437,13 +1437,13 @@ static int __devinit tulip_init_one (struct pci_dev *pdev, | |||
1437 | do | 1437 | do |
1438 | value = ioread32(ioaddr + CSR9); | 1438 | value = ioread32(ioaddr + CSR9); |
1439 | while (value < 0 && --boguscnt > 0); | 1439 | while (value < 0 && --boguscnt > 0); |
1440 | put_unaligned(cpu_to_le16(value), ((__le16*)dev->dev_addr) + i); | 1440 | put_unaligned_le16(value, ((__le16 *)dev->dev_addr) + i); |
1441 | sum += value & 0xffff; | 1441 | sum += value & 0xffff; |
1442 | } | 1442 | } |
1443 | } else if (chip_idx == COMET) { | 1443 | } else if (chip_idx == COMET) { |
1444 | /* No need to read the EEPROM. */ | 1444 | /* No need to read the EEPROM. */ |
1445 | put_unaligned(cpu_to_le32(ioread32(ioaddr + 0xA4)), (__le32 *)dev->dev_addr); | 1445 | put_unaligned_le32(ioread32(ioaddr + 0xA4), dev->dev_addr); |
1446 | put_unaligned(cpu_to_le16(ioread32(ioaddr + 0xA8)), (__le16 *)(dev->dev_addr + 4)); | 1446 | put_unaligned_le16(ioread32(ioaddr + 0xA8), dev->dev_addr + 4); |
1447 | for (i = 0; i < 6; i ++) | 1447 | for (i = 0; i < 6; i ++) |
1448 | sum += dev->dev_addr[i]; | 1448 | sum += dev->dev_addr[i]; |
1449 | } else { | 1449 | } else { |
diff --git a/drivers/net/wireless/airo.c b/drivers/net/wireless/airo.c index 932d6b1c9d0b..45f47c1c0a35 100644 --- a/drivers/net/wireless/airo.c +++ b/drivers/net/wireless/airo.c | |||
@@ -3657,7 +3657,7 @@ void mpi_receive_802_11 (struct airo_info *ai) | |||
3657 | ptr += hdrlen; | 3657 | ptr += hdrlen; |
3658 | if (hdrlen == 24) | 3658 | if (hdrlen == 24) |
3659 | ptr += 6; | 3659 | ptr += 6; |
3660 | gap = le16_to_cpu(get_unaligned((__le16 *)ptr)); | 3660 | gap = get_unaligned_le16(ptr); |
3661 | ptr += sizeof(__le16); | 3661 | ptr += sizeof(__le16); |
3662 | if (gap) { | 3662 | if (gap) { |
3663 | if (gap <= 8) | 3663 | if (gap <= 8) |
@@ -4347,24 +4347,28 @@ static int proc_config_open( struct inode *inode, struct file *file ); | |||
4347 | static int proc_wepkey_open( struct inode *inode, struct file *file ); | 4347 | static int proc_wepkey_open( struct inode *inode, struct file *file ); |
4348 | 4348 | ||
4349 | static const struct file_operations proc_statsdelta_ops = { | 4349 | static const struct file_operations proc_statsdelta_ops = { |
4350 | .owner = THIS_MODULE, | ||
4350 | .read = proc_read, | 4351 | .read = proc_read, |
4351 | .open = proc_statsdelta_open, | 4352 | .open = proc_statsdelta_open, |
4352 | .release = proc_close | 4353 | .release = proc_close |
4353 | }; | 4354 | }; |
4354 | 4355 | ||
4355 | static const struct file_operations proc_stats_ops = { | 4356 | static const struct file_operations proc_stats_ops = { |
4357 | .owner = THIS_MODULE, | ||
4356 | .read = proc_read, | 4358 | .read = proc_read, |
4357 | .open = proc_stats_open, | 4359 | .open = proc_stats_open, |
4358 | .release = proc_close | 4360 | .release = proc_close |
4359 | }; | 4361 | }; |
4360 | 4362 | ||
4361 | static const struct file_operations proc_status_ops = { | 4363 | static const struct file_operations proc_status_ops = { |
4364 | .owner = THIS_MODULE, | ||
4362 | .read = proc_read, | 4365 | .read = proc_read, |
4363 | .open = proc_status_open, | 4366 | .open = proc_status_open, |
4364 | .release = proc_close | 4367 | .release = proc_close |
4365 | }; | 4368 | }; |
4366 | 4369 | ||
4367 | static const struct file_operations proc_SSID_ops = { | 4370 | static const struct file_operations proc_SSID_ops = { |
4371 | .owner = THIS_MODULE, | ||
4368 | .read = proc_read, | 4372 | .read = proc_read, |
4369 | .write = proc_write, | 4373 | .write = proc_write, |
4370 | .open = proc_SSID_open, | 4374 | .open = proc_SSID_open, |
@@ -4372,6 +4376,7 @@ static const struct file_operations proc_SSID_ops = { | |||
4372 | }; | 4376 | }; |
4373 | 4377 | ||
4374 | static const struct file_operations proc_BSSList_ops = { | 4378 | static const struct file_operations proc_BSSList_ops = { |
4379 | .owner = THIS_MODULE, | ||
4375 | .read = proc_read, | 4380 | .read = proc_read, |
4376 | .write = proc_write, | 4381 | .write = proc_write, |
4377 | .open = proc_BSSList_open, | 4382 | .open = proc_BSSList_open, |
@@ -4379,6 +4384,7 @@ static const struct file_operations proc_BSSList_ops = { | |||
4379 | }; | 4384 | }; |
4380 | 4385 | ||
4381 | static const struct file_operations proc_APList_ops = { | 4386 | static const struct file_operations proc_APList_ops = { |
4387 | .owner = THIS_MODULE, | ||
4382 | .read = proc_read, | 4388 | .read = proc_read, |
4383 | .write = proc_write, | 4389 | .write = proc_write, |
4384 | .open = proc_APList_open, | 4390 | .open = proc_APList_open, |
@@ -4386,6 +4392,7 @@ static const struct file_operations proc_APList_ops = { | |||
4386 | }; | 4392 | }; |
4387 | 4393 | ||
4388 | static const struct file_operations proc_config_ops = { | 4394 | static const struct file_operations proc_config_ops = { |
4395 | .owner = THIS_MODULE, | ||
4389 | .read = proc_read, | 4396 | .read = proc_read, |
4390 | .write = proc_write, | 4397 | .write = proc_write, |
4391 | .open = proc_config_open, | 4398 | .open = proc_config_open, |
@@ -4393,6 +4400,7 @@ static const struct file_operations proc_config_ops = { | |||
4393 | }; | 4400 | }; |
4394 | 4401 | ||
4395 | static const struct file_operations proc_wepkey_ops = { | 4402 | static const struct file_operations proc_wepkey_ops = { |
4403 | .owner = THIS_MODULE, | ||
4396 | .read = proc_read, | 4404 | .read = proc_read, |
4397 | .write = proc_write, | 4405 | .write = proc_write, |
4398 | .open = proc_wepkey_open, | 4406 | .open = proc_wepkey_open, |
@@ -4411,10 +4419,6 @@ struct proc_data { | |||
4411 | void (*on_close) (struct inode *, struct file *); | 4419 | void (*on_close) (struct inode *, struct file *); |
4412 | }; | 4420 | }; |
4413 | 4421 | ||
4414 | #ifndef SETPROC_OPS | ||
4415 | #define SETPROC_OPS(entry, ops) (entry)->proc_fops = &(ops) | ||
4416 | #endif | ||
4417 | |||
4418 | static int setup_proc_entry( struct net_device *dev, | 4422 | static int setup_proc_entry( struct net_device *dev, |
4419 | struct airo_info *apriv ) { | 4423 | struct airo_info *apriv ) { |
4420 | struct proc_dir_entry *entry; | 4424 | struct proc_dir_entry *entry; |
@@ -4430,100 +4434,76 @@ static int setup_proc_entry( struct net_device *dev, | |||
4430 | apriv->proc_entry->owner = THIS_MODULE; | 4434 | apriv->proc_entry->owner = THIS_MODULE; |
4431 | 4435 | ||
4432 | /* Setup the StatsDelta */ | 4436 | /* Setup the StatsDelta */ |
4433 | entry = create_proc_entry("StatsDelta", | 4437 | entry = proc_create_data("StatsDelta", |
4434 | S_IFREG | (S_IRUGO&proc_perm), | 4438 | S_IFREG | (S_IRUGO&proc_perm), |
4435 | apriv->proc_entry); | 4439 | apriv->proc_entry, &proc_statsdelta_ops, dev); |
4436 | if (!entry) | 4440 | if (!entry) |
4437 | goto fail_stats_delta; | 4441 | goto fail_stats_delta; |
4438 | entry->uid = proc_uid; | 4442 | entry->uid = proc_uid; |
4439 | entry->gid = proc_gid; | 4443 | entry->gid = proc_gid; |
4440 | entry->data = dev; | ||
4441 | entry->owner = THIS_MODULE; | ||
4442 | SETPROC_OPS(entry, proc_statsdelta_ops); | ||
4443 | 4444 | ||
4444 | /* Setup the Stats */ | 4445 | /* Setup the Stats */ |
4445 | entry = create_proc_entry("Stats", | 4446 | entry = proc_create_data("Stats", |
4446 | S_IFREG | (S_IRUGO&proc_perm), | 4447 | S_IFREG | (S_IRUGO&proc_perm), |
4447 | apriv->proc_entry); | 4448 | apriv->proc_entry, &proc_stats_ops, dev); |
4448 | if (!entry) | 4449 | if (!entry) |
4449 | goto fail_stats; | 4450 | goto fail_stats; |
4450 | entry->uid = proc_uid; | 4451 | entry->uid = proc_uid; |
4451 | entry->gid = proc_gid; | 4452 | entry->gid = proc_gid; |
4452 | entry->data = dev; | ||
4453 | entry->owner = THIS_MODULE; | ||
4454 | SETPROC_OPS(entry, proc_stats_ops); | ||
4455 | 4453 | ||
4456 | /* Setup the Status */ | 4454 | /* Setup the Status */ |
4457 | entry = create_proc_entry("Status", | 4455 | entry = proc_create_data("Status", |
4458 | S_IFREG | (S_IRUGO&proc_perm), | 4456 | S_IFREG | (S_IRUGO&proc_perm), |
4459 | apriv->proc_entry); | 4457 | apriv->proc_entry, &proc_status_ops, dev); |
4460 | if (!entry) | 4458 | if (!entry) |
4461 | goto fail_status; | 4459 | goto fail_status; |
4462 | entry->uid = proc_uid; | 4460 | entry->uid = proc_uid; |
4463 | entry->gid = proc_gid; | 4461 | entry->gid = proc_gid; |
4464 | entry->data = dev; | ||
4465 | entry->owner = THIS_MODULE; | ||
4466 | SETPROC_OPS(entry, proc_status_ops); | ||
4467 | 4462 | ||
4468 | /* Setup the Config */ | 4463 | /* Setup the Config */ |
4469 | entry = create_proc_entry("Config", | 4464 | entry = proc_create_data("Config", |
4470 | S_IFREG | proc_perm, | 4465 | S_IFREG | proc_perm, |
4471 | apriv->proc_entry); | 4466 | apriv->proc_entry, &proc_config_ops, dev); |
4472 | if (!entry) | 4467 | if (!entry) |
4473 | goto fail_config; | 4468 | goto fail_config; |
4474 | entry->uid = proc_uid; | 4469 | entry->uid = proc_uid; |
4475 | entry->gid = proc_gid; | 4470 | entry->gid = proc_gid; |
4476 | entry->data = dev; | ||
4477 | entry->owner = THIS_MODULE; | ||
4478 | SETPROC_OPS(entry, proc_config_ops); | ||
4479 | 4471 | ||
4480 | /* Setup the SSID */ | 4472 | /* Setup the SSID */ |
4481 | entry = create_proc_entry("SSID", | 4473 | entry = proc_create_data("SSID", |
4482 | S_IFREG | proc_perm, | 4474 | S_IFREG | proc_perm, |
4483 | apriv->proc_entry); | 4475 | apriv->proc_entry, &proc_SSID_ops, dev); |
4484 | if (!entry) | 4476 | if (!entry) |
4485 | goto fail_ssid; | 4477 | goto fail_ssid; |
4486 | entry->uid = proc_uid; | 4478 | entry->uid = proc_uid; |
4487 | entry->gid = proc_gid; | 4479 | entry->gid = proc_gid; |
4488 | entry->data = dev; | ||
4489 | entry->owner = THIS_MODULE; | ||
4490 | SETPROC_OPS(entry, proc_SSID_ops); | ||
4491 | 4480 | ||
4492 | /* Setup the APList */ | 4481 | /* Setup the APList */ |
4493 | entry = create_proc_entry("APList", | 4482 | entry = proc_create_data("APList", |
4494 | S_IFREG | proc_perm, | 4483 | S_IFREG | proc_perm, |
4495 | apriv->proc_entry); | 4484 | apriv->proc_entry, &proc_APList_ops, dev); |
4496 | if (!entry) | 4485 | if (!entry) |
4497 | goto fail_aplist; | 4486 | goto fail_aplist; |
4498 | entry->uid = proc_uid; | 4487 | entry->uid = proc_uid; |
4499 | entry->gid = proc_gid; | 4488 | entry->gid = proc_gid; |
4500 | entry->data = dev; | ||
4501 | entry->owner = THIS_MODULE; | ||
4502 | SETPROC_OPS(entry, proc_APList_ops); | ||
4503 | 4489 | ||
4504 | /* Setup the BSSList */ | 4490 | /* Setup the BSSList */ |
4505 | entry = create_proc_entry("BSSList", | 4491 | entry = proc_create_data("BSSList", |
4506 | S_IFREG | proc_perm, | 4492 | S_IFREG | proc_perm, |
4507 | apriv->proc_entry); | 4493 | apriv->proc_entry, &proc_BSSList_ops, dev); |
4508 | if (!entry) | 4494 | if (!entry) |
4509 | goto fail_bsslist; | 4495 | goto fail_bsslist; |
4510 | entry->uid = proc_uid; | 4496 | entry->uid = proc_uid; |
4511 | entry->gid = proc_gid; | 4497 | entry->gid = proc_gid; |
4512 | entry->data = dev; | ||
4513 | entry->owner = THIS_MODULE; | ||
4514 | SETPROC_OPS(entry, proc_BSSList_ops); | ||
4515 | 4498 | ||
4516 | /* Setup the WepKey */ | 4499 | /* Setup the WepKey */ |
4517 | entry = create_proc_entry("WepKey", | 4500 | entry = proc_create_data("WepKey", |
4518 | S_IFREG | proc_perm, | 4501 | S_IFREG | proc_perm, |
4519 | apriv->proc_entry); | 4502 | apriv->proc_entry, &proc_wepkey_ops, dev); |
4520 | if (!entry) | 4503 | if (!entry) |
4521 | goto fail_wepkey; | 4504 | goto fail_wepkey; |
4522 | entry->uid = proc_uid; | 4505 | entry->uid = proc_uid; |
4523 | entry->gid = proc_gid; | 4506 | entry->gid = proc_gid; |
4524 | entry->data = dev; | ||
4525 | entry->owner = THIS_MODULE; | ||
4526 | SETPROC_OPS(entry, proc_wepkey_ops); | ||
4527 | 4507 | ||
4528 | return 0; | 4508 | return 0; |
4529 | 4509 | ||
@@ -5625,9 +5605,9 @@ static int __init airo_init_module( void ) | |||
5625 | int have_isa_dev = 0; | 5605 | int have_isa_dev = 0; |
5626 | #endif | 5606 | #endif |
5627 | 5607 | ||
5628 | airo_entry = create_proc_entry("aironet", | 5608 | airo_entry = create_proc_entry("driver/aironet", |
5629 | S_IFDIR | airo_perm, | 5609 | S_IFDIR | airo_perm, |
5630 | proc_root_driver); | 5610 | NULL); |
5631 | 5611 | ||
5632 | if (airo_entry) { | 5612 | if (airo_entry) { |
5633 | airo_entry->uid = proc_uid; | 5613 | airo_entry->uid = proc_uid; |
@@ -5651,7 +5631,7 @@ static int __init airo_init_module( void ) | |||
5651 | airo_print_info("", "Finished probing for PCI adapters"); | 5631 | airo_print_info("", "Finished probing for PCI adapters"); |
5652 | 5632 | ||
5653 | if (i) { | 5633 | if (i) { |
5654 | remove_proc_entry("aironet", proc_root_driver); | 5634 | remove_proc_entry("driver/aironet", NULL); |
5655 | return i; | 5635 | return i; |
5656 | } | 5636 | } |
5657 | #endif | 5637 | #endif |
@@ -5673,7 +5653,7 @@ static void __exit airo_cleanup_module( void ) | |||
5673 | #ifdef CONFIG_PCI | 5653 | #ifdef CONFIG_PCI |
5674 | pci_unregister_driver(&airo_driver); | 5654 | pci_unregister_driver(&airo_driver); |
5675 | #endif | 5655 | #endif |
5676 | remove_proc_entry("aironet", proc_root_driver); | 5656 | remove_proc_entry("driver/aironet", NULL); |
5677 | } | 5657 | } |
5678 | 5658 | ||
5679 | /* | 5659 | /* |
diff --git a/drivers/net/wireless/ath5k/base.c b/drivers/net/wireless/ath5k/base.c index e18305b781c9..4e5c8fc35200 100644 --- a/drivers/net/wireless/ath5k/base.c +++ b/drivers/net/wireless/ath5k/base.c | |||
@@ -58,10 +58,6 @@ | |||
58 | #include "reg.h" | 58 | #include "reg.h" |
59 | #include "debug.h" | 59 | #include "debug.h" |
60 | 60 | ||
61 | /* unaligned little endian access */ | ||
62 | #define LE_READ_2(_p) (le16_to_cpu(get_unaligned((__le16 *)(_p)))) | ||
63 | #define LE_READ_4(_p) (le32_to_cpu(get_unaligned((__le32 *)(_p)))) | ||
64 | |||
65 | enum { | 61 | enum { |
66 | ATH_LED_TX, | 62 | ATH_LED_TX, |
67 | ATH_LED_RX, | 63 | ATH_LED_RX, |
@@ -2909,9 +2905,9 @@ static void ath5k_configure_filter(struct ieee80211_hw *hw, | |||
2909 | if (!mclist) | 2905 | if (!mclist) |
2910 | break; | 2906 | break; |
2911 | /* calculate XOR of eight 6-bit values */ | 2907 | /* calculate XOR of eight 6-bit values */ |
2912 | val = LE_READ_4(mclist->dmi_addr + 0); | 2908 | val = get_unaligned_le32(mclist->dmi_addr + 0); |
2913 | pos = (val >> 18) ^ (val >> 12) ^ (val >> 6) ^ val; | 2909 | pos = (val >> 18) ^ (val >> 12) ^ (val >> 6) ^ val; |
2914 | val = LE_READ_4(mclist->dmi_addr + 3); | 2910 | val = get_unaligned_le32(mclist->dmi_addr + 3); |
2915 | pos ^= (val >> 18) ^ (val >> 12) ^ (val >> 6) ^ val; | 2911 | pos ^= (val >> 18) ^ (val >> 12) ^ (val >> 6) ^ val; |
2916 | pos &= 0x3f; | 2912 | pos &= 0x3f; |
2917 | mfilt[pos / 32] |= (1 << (pos % 32)); | 2913 | mfilt[pos / 32] |= (1 << (pos % 32)); |
diff --git a/drivers/net/wireless/b43/main.c b/drivers/net/wireless/b43/main.c index 4bf8a99099fe..8c24cd72aaca 100644 --- a/drivers/net/wireless/b43/main.c +++ b/drivers/net/wireless/b43/main.c | |||
@@ -2171,7 +2171,7 @@ static int b43_write_initvals(struct b43_wldev *dev, | |||
2171 | goto err_format; | 2171 | goto err_format; |
2172 | array_size -= sizeof(iv->data.d32); | 2172 | array_size -= sizeof(iv->data.d32); |
2173 | 2173 | ||
2174 | value = be32_to_cpu(get_unaligned(&iv->data.d32)); | 2174 | value = get_unaligned_be32(&iv->data.d32); |
2175 | b43_write32(dev, offset, value); | 2175 | b43_write32(dev, offset, value); |
2176 | 2176 | ||
2177 | iv = (const struct b43_iv *)((const uint8_t *)iv + | 2177 | iv = (const struct b43_iv *)((const uint8_t *)iv + |
diff --git a/drivers/net/wireless/b43legacy/main.c b/drivers/net/wireless/b43legacy/main.c index ef829ee8ffd4..14a5eea2573e 100644 --- a/drivers/net/wireless/b43legacy/main.c +++ b/drivers/net/wireless/b43legacy/main.c | |||
@@ -1720,7 +1720,7 @@ static int b43legacy_write_initvals(struct b43legacy_wldev *dev, | |||
1720 | goto err_format; | 1720 | goto err_format; |
1721 | array_size -= sizeof(iv->data.d32); | 1721 | array_size -= sizeof(iv->data.d32); |
1722 | 1722 | ||
1723 | value = be32_to_cpu(get_unaligned(&iv->data.d32)); | 1723 | value = get_unaligned_be32(&iv->data.d32); |
1724 | b43legacy_write32(dev, offset, value); | 1724 | b43legacy_write32(dev, offset, value); |
1725 | 1725 | ||
1726 | iv = (const struct b43legacy_iv *)((const uint8_t *)iv + | 1726 | iv = (const struct b43legacy_iv *)((const uint8_t *)iv + |
diff --git a/drivers/net/wireless/iwlwifi/iwl-3945.c b/drivers/net/wireless/iwlwifi/iwl-3945.c index 598e4eef4f40..d3406830c8e3 100644 --- a/drivers/net/wireless/iwlwifi/iwl-3945.c +++ b/drivers/net/wireless/iwlwifi/iwl-3945.c | |||
@@ -554,40 +554,36 @@ static void iwl3945_add_radiotap(struct iwl3945_priv *priv, | |||
554 | iwl3945_rt->rt_hdr.it_pad = 0; | 554 | iwl3945_rt->rt_hdr.it_pad = 0; |
555 | 555 | ||
556 | /* total header + data */ | 556 | /* total header + data */ |
557 | put_unaligned(cpu_to_le16(sizeof(*iwl3945_rt)), | 557 | put_unaligned_le16(sizeof(*iwl3945_rt), &iwl3945_rt->rt_hdr.it_len); |
558 | &iwl3945_rt->rt_hdr.it_len); | ||
559 | 558 | ||
560 | /* Indicate all the fields we add to the radiotap header */ | 559 | /* Indicate all the fields we add to the radiotap header */ |
561 | put_unaligned(cpu_to_le32((1 << IEEE80211_RADIOTAP_TSFT) | | 560 | put_unaligned_le32((1 << IEEE80211_RADIOTAP_TSFT) | |
562 | (1 << IEEE80211_RADIOTAP_FLAGS) | | 561 | (1 << IEEE80211_RADIOTAP_FLAGS) | |
563 | (1 << IEEE80211_RADIOTAP_RATE) | | 562 | (1 << IEEE80211_RADIOTAP_RATE) | |
564 | (1 << IEEE80211_RADIOTAP_CHANNEL) | | 563 | (1 << IEEE80211_RADIOTAP_CHANNEL) | |
565 | (1 << IEEE80211_RADIOTAP_DBM_ANTSIGNAL) | | 564 | (1 << IEEE80211_RADIOTAP_DBM_ANTSIGNAL) | |
566 | (1 << IEEE80211_RADIOTAP_DBM_ANTNOISE) | | 565 | (1 << IEEE80211_RADIOTAP_DBM_ANTNOISE) | |
567 | (1 << IEEE80211_RADIOTAP_ANTENNA)), | 566 | (1 << IEEE80211_RADIOTAP_ANTENNA), |
568 | &iwl3945_rt->rt_hdr.it_present); | 567 | &iwl3945_rt->rt_hdr.it_present); |
569 | 568 | ||
570 | /* Zero the flags, we'll add to them as we go */ | 569 | /* Zero the flags, we'll add to them as we go */ |
571 | iwl3945_rt->rt_flags = 0; | 570 | iwl3945_rt->rt_flags = 0; |
572 | 571 | ||
573 | put_unaligned(cpu_to_le64(tsf), &iwl3945_rt->rt_tsf); | 572 | put_unaligned_le64(tsf, &iwl3945_rt->rt_tsf); |
574 | 573 | ||
575 | iwl3945_rt->rt_dbmsignal = signal; | 574 | iwl3945_rt->rt_dbmsignal = signal; |
576 | iwl3945_rt->rt_dbmnoise = noise; | 575 | iwl3945_rt->rt_dbmnoise = noise; |
577 | 576 | ||
578 | /* Convert the channel frequency and set the flags */ | 577 | /* Convert the channel frequency and set the flags */ |
579 | put_unaligned(cpu_to_le16(stats->freq), &iwl3945_rt->rt_channelMHz); | 578 | put_unaligned_le16(stats->freq, &iwl3945_rt->rt_channelMHz); |
580 | if (!(phy_flags_hw & RX_RES_PHY_FLAGS_BAND_24_MSK)) | 579 | if (!(phy_flags_hw & RX_RES_PHY_FLAGS_BAND_24_MSK)) |
581 | put_unaligned(cpu_to_le16(IEEE80211_CHAN_OFDM | | 580 | put_unaligned_le16(IEEE80211_CHAN_OFDM | IEEE80211_CHAN_5GHZ, |
582 | IEEE80211_CHAN_5GHZ), | ||
583 | &iwl3945_rt->rt_chbitmask); | 581 | &iwl3945_rt->rt_chbitmask); |
584 | else if (phy_flags_hw & RX_RES_PHY_FLAGS_MOD_CCK_MSK) | 582 | else if (phy_flags_hw & RX_RES_PHY_FLAGS_MOD_CCK_MSK) |
585 | put_unaligned(cpu_to_le16(IEEE80211_CHAN_CCK | | 583 | put_unaligned_le16(IEEE80211_CHAN_CCK | IEEE80211_CHAN_2GHZ, |
586 | IEEE80211_CHAN_2GHZ), | ||
587 | &iwl3945_rt->rt_chbitmask); | 584 | &iwl3945_rt->rt_chbitmask); |
588 | else /* 802.11g */ | 585 | else /* 802.11g */ |
589 | put_unaligned(cpu_to_le16(IEEE80211_CHAN_OFDM | | 586 | put_unaligned_le16(IEEE80211_CHAN_OFDM | IEEE80211_CHAN_2GHZ, |
590 | IEEE80211_CHAN_2GHZ), | ||
591 | &iwl3945_rt->rt_chbitmask); | 587 | &iwl3945_rt->rt_chbitmask); |
592 | 588 | ||
593 | if (rate == -1) | 589 | if (rate == -1) |
diff --git a/drivers/net/wireless/libertas/scan.c b/drivers/net/wireless/libertas/scan.c index e72c97a0d6c1..1a409fcc80d3 100644 --- a/drivers/net/wireless/libertas/scan.c +++ b/drivers/net/wireless/libertas/scan.c | |||
@@ -522,7 +522,7 @@ static int lbs_process_bss(struct bss_descriptor *bss, | |||
522 | 522 | ||
523 | if (*bytesleft >= sizeof(beaconsize)) { | 523 | if (*bytesleft >= sizeof(beaconsize)) { |
524 | /* Extract & convert beacon size from the command buffer */ | 524 | /* Extract & convert beacon size from the command buffer */ |
525 | beaconsize = le16_to_cpu(get_unaligned((__le16 *)*pbeaconinfo)); | 525 | beaconsize = get_unaligned_le16(*pbeaconinfo); |
526 | *bytesleft -= sizeof(beaconsize); | 526 | *bytesleft -= sizeof(beaconsize); |
527 | *pbeaconinfo += sizeof(beaconsize); | 527 | *pbeaconinfo += sizeof(beaconsize); |
528 | } | 528 | } |
diff --git a/drivers/net/wireless/zd1211rw/zd_usb.c b/drivers/net/wireless/zd1211rw/zd_usb.c index e34675c2f8fc..5316074f39f0 100644 --- a/drivers/net/wireless/zd1211rw/zd_usb.c +++ b/drivers/net/wireless/zd1211rw/zd_usb.c | |||
@@ -545,11 +545,11 @@ static void handle_rx_packet(struct zd_usb *usb, const u8 *buffer, | |||
545 | * be padded. Unaligned access might also happen if the length_info | 545 | * be padded. Unaligned access might also happen if the length_info |
546 | * structure is not present. | 546 | * structure is not present. |
547 | */ | 547 | */ |
548 | if (get_unaligned(&length_info->tag) == cpu_to_le16(RX_LENGTH_INFO_TAG)) | 548 | if (get_unaligned_le16(&length_info->tag) == RX_LENGTH_INFO_TAG) |
549 | { | 549 | { |
550 | unsigned int l, k, n; | 550 | unsigned int l, k, n; |
551 | for (i = 0, l = 0;; i++) { | 551 | for (i = 0, l = 0;; i++) { |
552 | k = le16_to_cpu(get_unaligned(&length_info->length[i])); | 552 | k = get_unaligned_le16(&length_info->length[i]); |
553 | if (k == 0) | 553 | if (k == 0) |
554 | return; | 554 | return; |
555 | n = l+k; | 555 | n = l+k; |
diff --git a/drivers/net/yellowfin.c b/drivers/net/yellowfin.c index 24640726f8bb..57e1f495b9fc 100644 --- a/drivers/net/yellowfin.c +++ b/drivers/net/yellowfin.c | |||
@@ -1062,7 +1062,7 @@ static int yellowfin_rx(struct net_device *dev) | |||
1062 | buf_addr = rx_skb->data; | 1062 | buf_addr = rx_skb->data; |
1063 | data_size = (le32_to_cpu(desc->dbdma_cmd) - | 1063 | data_size = (le32_to_cpu(desc->dbdma_cmd) - |
1064 | le32_to_cpu(desc->result_status)) & 0xffff; | 1064 | le32_to_cpu(desc->result_status)) & 0xffff; |
1065 | frame_status = le16_to_cpu(get_unaligned((__le16*)&(buf_addr[data_size - 2]))); | 1065 | frame_status = get_unaligned_le16(&(buf_addr[data_size - 2])); |
1066 | if (yellowfin_debug > 4) | 1066 | if (yellowfin_debug > 4) |
1067 | printk(KERN_DEBUG " yellowfin_rx() status was %4.4x.\n", | 1067 | printk(KERN_DEBUG " yellowfin_rx() status was %4.4x.\n", |
1068 | frame_status); | 1068 | frame_status); |
diff --git a/drivers/nubus/proc.c b/drivers/nubus/proc.c index e07492be1f4a..208dd12825bc 100644 --- a/drivers/nubus/proc.c +++ b/drivers/nubus/proc.c | |||
@@ -21,6 +21,7 @@ | |||
21 | #include <linux/kernel.h> | 21 | #include <linux/kernel.h> |
22 | #include <linux/nubus.h> | 22 | #include <linux/nubus.h> |
23 | #include <linux/proc_fs.h> | 23 | #include <linux/proc_fs.h> |
24 | #include <linux/seq_file.h> | ||
24 | #include <linux/init.h> | 25 | #include <linux/init.h> |
25 | #include <linux/module.h> | 26 | #include <linux/module.h> |
26 | 27 | ||
@@ -28,38 +29,36 @@ | |||
28 | #include <asm/byteorder.h> | 29 | #include <asm/byteorder.h> |
29 | 30 | ||
30 | static int | 31 | static int |
31 | get_nubus_dev_info(char *buf, char **start, off_t pos, int count) | 32 | nubus_devices_proc_show(struct seq_file *m, void *v) |
32 | { | 33 | { |
33 | struct nubus_dev *dev = nubus_devices; | 34 | struct nubus_dev *dev = nubus_devices; |
34 | off_t at = 0; | ||
35 | int len, cnt; | ||
36 | 35 | ||
37 | cnt = 0; | 36 | while (dev) { |
38 | while (dev && count > cnt) { | 37 | seq_printf(m, "%x\t%04x %04x %04x %04x", |
39 | len = sprintf(buf, "%x\t%04x %04x %04x %04x", | ||
40 | dev->board->slot, | 38 | dev->board->slot, |
41 | dev->category, | 39 | dev->category, |
42 | dev->type, | 40 | dev->type, |
43 | dev->dr_sw, | 41 | dev->dr_sw, |
44 | dev->dr_hw); | 42 | dev->dr_hw); |
45 | len += sprintf(buf+len, | 43 | seq_printf(m, "\t%08lx\n", dev->board->slot_addr); |
46 | "\t%08lx", | ||
47 | dev->board->slot_addr); | ||
48 | buf[len++] = '\n'; | ||
49 | at += len; | ||
50 | if (at >= pos) { | ||
51 | if (!*start) { | ||
52 | *start = buf + (pos - (at - len)); | ||
53 | cnt = at - pos; | ||
54 | } else | ||
55 | cnt += len; | ||
56 | buf += len; | ||
57 | } | ||
58 | dev = dev->next; | 44 | dev = dev->next; |
59 | } | 45 | } |
60 | return (count > cnt) ? cnt : count; | 46 | return 0; |
47 | } | ||
48 | |||
49 | static int nubus_devices_proc_open(struct inode *inode, struct file *file) | ||
50 | { | ||
51 | return single_open(file, nubus_devices_proc_show, NULL); | ||
61 | } | 52 | } |
62 | 53 | ||
54 | static const struct file_operations nubus_devices_proc_fops = { | ||
55 | .owner = THIS_MODULE, | ||
56 | .open = nubus_devices_proc_open, | ||
57 | .read = seq_read, | ||
58 | .llseek = seq_lseek, | ||
59 | .release = single_release, | ||
60 | }; | ||
61 | |||
63 | static struct proc_dir_entry *proc_bus_nubus_dir; | 62 | static struct proc_dir_entry *proc_bus_nubus_dir; |
64 | 63 | ||
65 | static void nubus_proc_subdir(struct nubus_dev* dev, | 64 | static void nubus_proc_subdir(struct nubus_dev* dev, |
@@ -171,8 +170,7 @@ void __init nubus_proc_init(void) | |||
171 | { | 170 | { |
172 | if (!MACH_IS_MAC) | 171 | if (!MACH_IS_MAC) |
173 | return; | 172 | return; |
174 | proc_bus_nubus_dir = proc_mkdir("nubus", proc_bus); | 173 | proc_bus_nubus_dir = proc_mkdir("bus/nubus", NULL); |
175 | create_proc_info_entry("devices", 0, proc_bus_nubus_dir, | 174 | proc_create("devices", 0, proc_bus_nubus_dir, &nubus_devices_proc_fops); |
176 | get_nubus_dev_info); | ||
177 | proc_bus_nubus_add_devices(); | 175 | proc_bus_nubus_add_devices(); |
178 | } | 176 | } |
diff --git a/drivers/parisc/ccio-dma.c b/drivers/parisc/ccio-dma.c index 62db3c3fe4dc..07d2a8d4498f 100644 --- a/drivers/parisc/ccio-dma.c +++ b/drivers/parisc/ccio-dma.c | |||
@@ -1551,8 +1551,7 @@ static int __init ccio_probe(struct parisc_device *dev) | |||
1551 | { | 1551 | { |
1552 | int i; | 1552 | int i; |
1553 | struct ioc *ioc, **ioc_p = &ioc_list; | 1553 | struct ioc *ioc, **ioc_p = &ioc_list; |
1554 | struct proc_dir_entry *info_entry, *bitmap_entry; | 1554 | |
1555 | |||
1556 | ioc = kzalloc(sizeof(struct ioc), GFP_KERNEL); | 1555 | ioc = kzalloc(sizeof(struct ioc), GFP_KERNEL); |
1557 | if (ioc == NULL) { | 1556 | if (ioc == NULL) { |
1558 | printk(KERN_ERR MODULE_NAME ": memory allocation failure\n"); | 1557 | printk(KERN_ERR MODULE_NAME ": memory allocation failure\n"); |
@@ -1580,13 +1579,10 @@ static int __init ccio_probe(struct parisc_device *dev) | |||
1580 | HBA_DATA(dev->dev.platform_data)->iommu = ioc; | 1579 | HBA_DATA(dev->dev.platform_data)->iommu = ioc; |
1581 | 1580 | ||
1582 | if (ioc_count == 0) { | 1581 | if (ioc_count == 0) { |
1583 | info_entry = create_proc_entry(MODULE_NAME, 0, proc_runway_root); | 1582 | proc_create(MODULE_NAME, 0, proc_runway_root, |
1584 | if (info_entry) | 1583 | &ccio_proc_info_fops); |
1585 | info_entry->proc_fops = &ccio_proc_info_fops; | 1584 | proc_create(MODULE_NAME"-bitmap", 0, proc_runway_root, |
1586 | 1585 | &ccio_proc_bitmap_fops); | |
1587 | bitmap_entry = create_proc_entry(MODULE_NAME"-bitmap", 0, proc_runway_root); | ||
1588 | if (bitmap_entry) | ||
1589 | bitmap_entry->proc_fops = &ccio_proc_bitmap_fops; | ||
1590 | } | 1586 | } |
1591 | 1587 | ||
1592 | ioc_count++; | 1588 | ioc_count++; |
diff --git a/drivers/parisc/sba_iommu.c b/drivers/parisc/sba_iommu.c index 8c4d2c13d5f2..afc849bd3f58 100644 --- a/drivers/parisc/sba_iommu.c +++ b/drivers/parisc/sba_iommu.c | |||
@@ -1895,7 +1895,9 @@ sba_driver_callback(struct parisc_device *dev) | |||
1895 | int i; | 1895 | int i; |
1896 | char *version; | 1896 | char *version; |
1897 | void __iomem *sba_addr = ioremap_nocache(dev->hpa.start, SBA_FUNC_SIZE); | 1897 | void __iomem *sba_addr = ioremap_nocache(dev->hpa.start, SBA_FUNC_SIZE); |
1898 | struct proc_dir_entry *info_entry, *bitmap_entry, *root; | 1898 | #ifdef CONFIG_PROC_FS |
1899 | struct proc_dir_entry *root; | ||
1900 | #endif | ||
1899 | 1901 | ||
1900 | sba_dump_ranges(sba_addr); | 1902 | sba_dump_ranges(sba_addr); |
1901 | 1903 | ||
@@ -1973,14 +1975,8 @@ sba_driver_callback(struct parisc_device *dev) | |||
1973 | break; | 1975 | break; |
1974 | } | 1976 | } |
1975 | 1977 | ||
1976 | info_entry = create_proc_entry("sba_iommu", 0, root); | 1978 | proc_create("sba_iommu", 0, root, &sba_proc_fops); |
1977 | bitmap_entry = create_proc_entry("sba_iommu-bitmap", 0, root); | 1979 | proc_create("sba_iommu-bitmap", 0, root, &sba_proc_bitmap_fops); |
1978 | |||
1979 | if (info_entry) | ||
1980 | info_entry->proc_fops = &sba_proc_fops; | ||
1981 | |||
1982 | if (bitmap_entry) | ||
1983 | bitmap_entry->proc_fops = &sba_proc_bitmap_fops; | ||
1984 | #endif | 1980 | #endif |
1985 | 1981 | ||
1986 | parisc_vmerge_boundary = IOVP_SIZE; | 1982 | parisc_vmerge_boundary = IOVP_SIZE; |
diff --git a/drivers/parport/parport_pc.c b/drivers/parport/parport_pc.c index a85808938205..e71092e80288 100644 --- a/drivers/parport/parport_pc.c +++ b/drivers/parport/parport_pc.c | |||
@@ -3082,6 +3082,7 @@ static struct pci_driver parport_pc_pci_driver; | |||
3082 | static int __init parport_pc_init_superio(int autoirq, int autodma) {return 0;} | 3082 | static int __init parport_pc_init_superio(int autoirq, int autodma) {return 0;} |
3083 | #endif /* CONFIG_PCI */ | 3083 | #endif /* CONFIG_PCI */ |
3084 | 3084 | ||
3085 | #ifdef CONFIG_PNP | ||
3085 | 3086 | ||
3086 | static const struct pnp_device_id parport_pc_pnp_tbl[] = { | 3087 | static const struct pnp_device_id parport_pc_pnp_tbl[] = { |
3087 | /* Standard LPT Printer Port */ | 3088 | /* Standard LPT Printer Port */ |
@@ -3148,6 +3149,9 @@ static struct pnp_driver parport_pc_pnp_driver = { | |||
3148 | .remove = parport_pc_pnp_remove, | 3149 | .remove = parport_pc_pnp_remove, |
3149 | }; | 3150 | }; |
3150 | 3151 | ||
3152 | #else | ||
3153 | static struct pnp_driver parport_pc_pnp_driver; | ||
3154 | #endif /* CONFIG_PNP */ | ||
3151 | 3155 | ||
3152 | static int __devinit parport_pc_platform_probe(struct platform_device *pdev) | 3156 | static int __devinit parport_pc_platform_probe(struct platform_device *pdev) |
3153 | { | 3157 | { |
diff --git a/drivers/pci/proc.c b/drivers/pci/proc.c index ef18fcd641e2..963a97642ae9 100644 --- a/drivers/pci/proc.c +++ b/drivers/pci/proc.c | |||
@@ -293,6 +293,7 @@ static int proc_bus_pci_release(struct inode *inode, struct file *file) | |||
293 | #endif /* HAVE_PCI_MMAP */ | 293 | #endif /* HAVE_PCI_MMAP */ |
294 | 294 | ||
295 | static const struct file_operations proc_bus_pci_operations = { | 295 | static const struct file_operations proc_bus_pci_operations = { |
296 | .owner = THIS_MODULE, | ||
296 | .llseek = proc_bus_pci_lseek, | 297 | .llseek = proc_bus_pci_lseek, |
297 | .read = proc_bus_pci_read, | 298 | .read = proc_bus_pci_read, |
298 | .write = proc_bus_pci_write, | 299 | .write = proc_bus_pci_write, |
@@ -406,11 +407,10 @@ int pci_proc_attach_device(struct pci_dev *dev) | |||
406 | } | 407 | } |
407 | 408 | ||
408 | sprintf(name, "%02x.%x", PCI_SLOT(dev->devfn), PCI_FUNC(dev->devfn)); | 409 | sprintf(name, "%02x.%x", PCI_SLOT(dev->devfn), PCI_FUNC(dev->devfn)); |
409 | e = create_proc_entry(name, S_IFREG | S_IRUGO | S_IWUSR, bus->procdir); | 410 | e = proc_create_data(name, S_IFREG | S_IRUGO | S_IWUSR, bus->procdir, |
411 | &proc_bus_pci_operations, dev); | ||
410 | if (!e) | 412 | if (!e) |
411 | return -ENOMEM; | 413 | return -ENOMEM; |
412 | e->proc_fops = &proc_bus_pci_operations; | ||
413 | e->data = dev; | ||
414 | e->size = dev->cfg_size; | 414 | e->size = dev->cfg_size; |
415 | dev->procent = e; | 415 | dev->procent = e; |
416 | 416 | ||
@@ -462,6 +462,7 @@ static int proc_bus_pci_dev_open(struct inode *inode, struct file *file) | |||
462 | return seq_open(file, &proc_bus_pci_devices_op); | 462 | return seq_open(file, &proc_bus_pci_devices_op); |
463 | } | 463 | } |
464 | static const struct file_operations proc_bus_pci_dev_operations = { | 464 | static const struct file_operations proc_bus_pci_dev_operations = { |
465 | .owner = THIS_MODULE, | ||
465 | .open = proc_bus_pci_dev_open, | 466 | .open = proc_bus_pci_dev_open, |
466 | .read = seq_read, | 467 | .read = seq_read, |
467 | .llseek = seq_lseek, | 468 | .llseek = seq_lseek, |
@@ -470,12 +471,10 @@ static const struct file_operations proc_bus_pci_dev_operations = { | |||
470 | 471 | ||
471 | static int __init pci_proc_init(void) | 472 | static int __init pci_proc_init(void) |
472 | { | 473 | { |
473 | struct proc_dir_entry *entry; | ||
474 | struct pci_dev *dev = NULL; | 474 | struct pci_dev *dev = NULL; |
475 | proc_bus_pci_dir = proc_mkdir("pci", proc_bus); | 475 | proc_bus_pci_dir = proc_mkdir("bus/pci", NULL); |
476 | entry = create_proc_entry("devices", 0, proc_bus_pci_dir); | 476 | proc_create("devices", 0, proc_bus_pci_dir, |
477 | if (entry) | 477 | &proc_bus_pci_dev_operations); |
478 | entry->proc_fops = &proc_bus_pci_dev_operations; | ||
479 | proc_initialized = 1; | 478 | proc_initialized = 1; |
480 | while ((dev = pci_get_device(PCI_ANY_ID, PCI_ANY_ID, dev)) != NULL) { | 479 | while ((dev = pci_get_device(PCI_ANY_ID, PCI_ANY_ID, dev)) != NULL) { |
481 | pci_proc_attach_device(dev); | 480 | pci_proc_attach_device(dev); |
diff --git a/drivers/pcmcia/cistpl.c b/drivers/pcmcia/cistpl.c index 06a85d7d5aa2..36379535f9da 100644 --- a/drivers/pcmcia/cistpl.c +++ b/drivers/pcmcia/cistpl.c | |||
@@ -402,15 +402,6 @@ EXPORT_SYMBOL(pcmcia_replace_cis); | |||
402 | 402 | ||
403 | ======================================================================*/ | 403 | ======================================================================*/ |
404 | 404 | ||
405 | static inline u16 cis_get_u16(void *ptr) | ||
406 | { | ||
407 | return le16_to_cpu(get_unaligned((__le16 *) ptr)); | ||
408 | } | ||
409 | static inline u32 cis_get_u32(void *ptr) | ||
410 | { | ||
411 | return le32_to_cpu(get_unaligned((__le32 *) ptr)); | ||
412 | } | ||
413 | |||
414 | typedef struct tuple_flags { | 405 | typedef struct tuple_flags { |
415 | u_int link_space:4; | 406 | u_int link_space:4; |
416 | u_int has_link:1; | 407 | u_int has_link:1; |
@@ -471,7 +462,7 @@ static int follow_link(struct pcmcia_socket *s, tuple_t *tuple) | |||
471 | /* Get indirect link from the MFC tuple */ | 462 | /* Get indirect link from the MFC tuple */ |
472 | read_cis_cache(s, LINK_SPACE(tuple->Flags), | 463 | read_cis_cache(s, LINK_SPACE(tuple->Flags), |
473 | tuple->LinkOffset, 5, link); | 464 | tuple->LinkOffset, 5, link); |
474 | ofs = cis_get_u32(link + 1); | 465 | ofs = get_unaligned_le32(link + 1); |
475 | SPACE(tuple->Flags) = (link[0] == CISTPL_MFC_ATTR); | 466 | SPACE(tuple->Flags) = (link[0] == CISTPL_MFC_ATTR); |
476 | /* Move to the next indirect link */ | 467 | /* Move to the next indirect link */ |
477 | tuple->LinkOffset += 5; | 468 | tuple->LinkOffset += 5; |
@@ -679,8 +670,8 @@ static int parse_checksum(tuple_t *tuple, cistpl_checksum_t *csum) | |||
679 | if (tuple->TupleDataLen < 5) | 670 | if (tuple->TupleDataLen < 5) |
680 | return CS_BAD_TUPLE; | 671 | return CS_BAD_TUPLE; |
681 | p = (u_char *) tuple->TupleData; | 672 | p = (u_char *) tuple->TupleData; |
682 | csum->addr = tuple->CISOffset + cis_get_u16(p) - 2; | 673 | csum->addr = tuple->CISOffset + get_unaligned_le16(p) - 2; |
683 | csum->len = cis_get_u16(p + 2); | 674 | csum->len = get_unaligned_le16(p + 2); |
684 | csum->sum = *(p + 4); | 675 | csum->sum = *(p + 4); |
685 | return CS_SUCCESS; | 676 | return CS_SUCCESS; |
686 | } | 677 | } |
@@ -691,7 +682,7 @@ static int parse_longlink(tuple_t *tuple, cistpl_longlink_t *link) | |||
691 | { | 682 | { |
692 | if (tuple->TupleDataLen < 4) | 683 | if (tuple->TupleDataLen < 4) |
693 | return CS_BAD_TUPLE; | 684 | return CS_BAD_TUPLE; |
694 | link->addr = cis_get_u32(tuple->TupleData); | 685 | link->addr = get_unaligned_le32(tuple->TupleData); |
695 | return CS_SUCCESS; | 686 | return CS_SUCCESS; |
696 | } | 687 | } |
697 | 688 | ||
@@ -710,7 +701,7 @@ static int parse_longlink_mfc(tuple_t *tuple, | |||
710 | return CS_BAD_TUPLE; | 701 | return CS_BAD_TUPLE; |
711 | for (i = 0; i < link->nfn; i++) { | 702 | for (i = 0; i < link->nfn; i++) { |
712 | link->fn[i].space = *p; p++; | 703 | link->fn[i].space = *p; p++; |
713 | link->fn[i].addr = cis_get_u32(p); | 704 | link->fn[i].addr = get_unaligned_le32(p); |
714 | p += 4; | 705 | p += 4; |
715 | } | 706 | } |
716 | return CS_SUCCESS; | 707 | return CS_SUCCESS; |
@@ -800,8 +791,8 @@ static int parse_manfid(tuple_t *tuple, cistpl_manfid_t *m) | |||
800 | { | 791 | { |
801 | if (tuple->TupleDataLen < 4) | 792 | if (tuple->TupleDataLen < 4) |
802 | return CS_BAD_TUPLE; | 793 | return CS_BAD_TUPLE; |
803 | m->manf = cis_get_u16(tuple->TupleData); | 794 | m->manf = get_unaligned_le16(tuple->TupleData); |
804 | m->card = cis_get_u16(tuple->TupleData + 2); | 795 | m->card = get_unaligned_le16(tuple->TupleData + 2); |
805 | return CS_SUCCESS; | 796 | return CS_SUCCESS; |
806 | } | 797 | } |
807 | 798 | ||
@@ -1100,7 +1091,7 @@ static int parse_cftable_entry(tuple_t *tuple, | |||
1100 | break; | 1091 | break; |
1101 | case 0x20: | 1092 | case 0x20: |
1102 | entry->mem.nwin = 1; | 1093 | entry->mem.nwin = 1; |
1103 | entry->mem.win[0].len = cis_get_u16(p) << 8; | 1094 | entry->mem.win[0].len = get_unaligned_le16(p) << 8; |
1104 | entry->mem.win[0].card_addr = 0; | 1095 | entry->mem.win[0].card_addr = 0; |
1105 | entry->mem.win[0].host_addr = 0; | 1096 | entry->mem.win[0].host_addr = 0; |
1106 | p += 2; | 1097 | p += 2; |
@@ -1108,8 +1099,8 @@ static int parse_cftable_entry(tuple_t *tuple, | |||
1108 | break; | 1099 | break; |
1109 | case 0x40: | 1100 | case 0x40: |
1110 | entry->mem.nwin = 1; | 1101 | entry->mem.nwin = 1; |
1111 | entry->mem.win[0].len = cis_get_u16(p) << 8; | 1102 | entry->mem.win[0].len = get_unaligned_le16(p) << 8; |
1112 | entry->mem.win[0].card_addr = cis_get_u16(p + 2) << 8; | 1103 | entry->mem.win[0].card_addr = get_unaligned_le16(p + 2) << 8; |
1113 | entry->mem.win[0].host_addr = 0; | 1104 | entry->mem.win[0].host_addr = 0; |
1114 | p += 4; | 1105 | p += 4; |
1115 | if (p > q) return CS_BAD_TUPLE; | 1106 | if (p > q) return CS_BAD_TUPLE; |
@@ -1146,7 +1137,7 @@ static int parse_bar(tuple_t *tuple, cistpl_bar_t *bar) | |||
1146 | p = (u_char *)tuple->TupleData; | 1137 | p = (u_char *)tuple->TupleData; |
1147 | bar->attr = *p; | 1138 | bar->attr = *p; |
1148 | p += 2; | 1139 | p += 2; |
1149 | bar->size = cis_get_u32(p); | 1140 | bar->size = get_unaligned_le32(p); |
1150 | return CS_SUCCESS; | 1141 | return CS_SUCCESS; |
1151 | } | 1142 | } |
1152 | 1143 | ||
@@ -1159,7 +1150,7 @@ static int parse_config_cb(tuple_t *tuple, cistpl_config_t *config) | |||
1159 | return CS_BAD_TUPLE; | 1150 | return CS_BAD_TUPLE; |
1160 | config->last_idx = *(++p); | 1151 | config->last_idx = *(++p); |
1161 | p++; | 1152 | p++; |
1162 | config->base = cis_get_u32(p); | 1153 | config->base = get_unaligned_le32(p); |
1163 | config->subtuples = tuple->TupleDataLen - 6; | 1154 | config->subtuples = tuple->TupleDataLen - 6; |
1164 | return CS_SUCCESS; | 1155 | return CS_SUCCESS; |
1165 | } | 1156 | } |
@@ -1275,7 +1266,7 @@ static int parse_vers_2(tuple_t *tuple, cistpl_vers_2_t *v2) | |||
1275 | 1266 | ||
1276 | v2->vers = p[0]; | 1267 | v2->vers = p[0]; |
1277 | v2->comply = p[1]; | 1268 | v2->comply = p[1]; |
1278 | v2->dindex = cis_get_u16(p +2 ); | 1269 | v2->dindex = get_unaligned_le16(p +2 ); |
1279 | v2->vspec8 = p[6]; | 1270 | v2->vspec8 = p[6]; |
1280 | v2->vspec9 = p[7]; | 1271 | v2->vspec9 = p[7]; |
1281 | v2->nhdr = p[8]; | 1272 | v2->nhdr = p[8]; |
@@ -1316,8 +1307,8 @@ static int parse_format(tuple_t *tuple, cistpl_format_t *fmt) | |||
1316 | 1307 | ||
1317 | fmt->type = p[0]; | 1308 | fmt->type = p[0]; |
1318 | fmt->edc = p[1]; | 1309 | fmt->edc = p[1]; |
1319 | fmt->offset = cis_get_u32(p + 2); | 1310 | fmt->offset = get_unaligned_le32(p + 2); |
1320 | fmt->length = cis_get_u32(p + 6); | 1311 | fmt->length = get_unaligned_le32(p + 6); |
1321 | 1312 | ||
1322 | return CS_SUCCESS; | 1313 | return CS_SUCCESS; |
1323 | } | 1314 | } |
diff --git a/drivers/pnp/isapnp/proc.c b/drivers/pnp/isapnp/proc.c index 2b8266c3d40f..3f94edab25fa 100644 --- a/drivers/pnp/isapnp/proc.c +++ b/drivers/pnp/isapnp/proc.c | |||
@@ -85,6 +85,7 @@ static ssize_t isapnp_proc_bus_read(struct file *file, char __user * buf, | |||
85 | } | 85 | } |
86 | 86 | ||
87 | static const struct file_operations isapnp_proc_bus_file_operations = { | 87 | static const struct file_operations isapnp_proc_bus_file_operations = { |
88 | .owner = THIS_MODULE, | ||
88 | .llseek = isapnp_proc_bus_lseek, | 89 | .llseek = isapnp_proc_bus_lseek, |
89 | .read = isapnp_proc_bus_read, | 90 | .read = isapnp_proc_bus_read, |
90 | }; | 91 | }; |
@@ -102,12 +103,10 @@ static int isapnp_proc_attach_device(struct pnp_dev *dev) | |||
102 | return -ENOMEM; | 103 | return -ENOMEM; |
103 | } | 104 | } |
104 | sprintf(name, "%02x", dev->number); | 105 | sprintf(name, "%02x", dev->number); |
105 | e = dev->procent = create_proc_entry(name, S_IFREG | S_IRUGO, de); | 106 | e = dev->procent = proc_create_data(name, S_IFREG | S_IRUGO, de, |
107 | &isapnp_proc_bus_file_operations, dev); | ||
106 | if (!e) | 108 | if (!e) |
107 | return -ENOMEM; | 109 | return -ENOMEM; |
108 | e->proc_fops = &isapnp_proc_bus_file_operations; | ||
109 | e->owner = THIS_MODULE; | ||
110 | e->data = dev; | ||
111 | e->size = 256; | 110 | e->size = 256; |
112 | return 0; | 111 | return 0; |
113 | } | 112 | } |
@@ -116,7 +115,7 @@ int __init isapnp_proc_init(void) | |||
116 | { | 115 | { |
117 | struct pnp_dev *dev; | 116 | struct pnp_dev *dev; |
118 | 117 | ||
119 | isapnp_proc_bus_dir = proc_mkdir("isapnp", proc_bus); | 118 | isapnp_proc_bus_dir = proc_mkdir("bus/isapnp", NULL); |
120 | protocol_for_each_dev(&isapnp_protocol, dev) { | 119 | protocol_for_each_dev(&isapnp_protocol, dev) { |
121 | isapnp_proc_attach_device(dev); | 120 | isapnp_proc_attach_device(dev); |
122 | } | 121 | } |
diff --git a/drivers/pnp/pnpbios/proc.c b/drivers/pnp/pnpbios/proc.c index bb19bc957bad..46d506f66259 100644 --- a/drivers/pnp/pnpbios/proc.c +++ b/drivers/pnp/pnpbios/proc.c | |||
@@ -256,7 +256,7 @@ int pnpbios_interface_attach_device(struct pnp_bios_node *node) | |||
256 | */ | 256 | */ |
257 | int __init pnpbios_proc_init(void) | 257 | int __init pnpbios_proc_init(void) |
258 | { | 258 | { |
259 | proc_pnp = proc_mkdir("pnp", proc_bus); | 259 | proc_pnp = proc_mkdir("bus/pnp", NULL); |
260 | if (!proc_pnp) | 260 | if (!proc_pnp) |
261 | return -EIO; | 261 | return -EIO; |
262 | proc_pnp_boot = proc_mkdir("boot", proc_pnp); | 262 | proc_pnp_boot = proc_mkdir("boot", proc_pnp); |
@@ -294,5 +294,5 @@ void __exit pnpbios_proc_exit(void) | |||
294 | remove_proc_entry("configuration_info", proc_pnp); | 294 | remove_proc_entry("configuration_info", proc_pnp); |
295 | remove_proc_entry("devices", proc_pnp); | 295 | remove_proc_entry("devices", proc_pnp); |
296 | remove_proc_entry("boot", proc_pnp); | 296 | remove_proc_entry("boot", proc_pnp); |
297 | remove_proc_entry("pnp", proc_bus); | 297 | remove_proc_entry("bus/pnp", NULL); |
298 | } | 298 | } |
diff --git a/drivers/power/ds2760_battery.c b/drivers/power/ds2760_battery.c index bdb9b7285b3d..71be36f18709 100644 --- a/drivers/power/ds2760_battery.c +++ b/drivers/power/ds2760_battery.c | |||
@@ -262,7 +262,7 @@ static void ds2760_battery_work(struct work_struct *work) | |||
262 | struct ds2760_device_info, monitor_work.work); | 262 | struct ds2760_device_info, monitor_work.work); |
263 | const int interval = HZ * 60; | 263 | const int interval = HZ * 60; |
264 | 264 | ||
265 | dev_dbg(di->dev, "%s\n", __FUNCTION__); | 265 | dev_dbg(di->dev, "%s\n", __func__); |
266 | 266 | ||
267 | ds2760_battery_update_status(di); | 267 | ds2760_battery_update_status(di); |
268 | queue_delayed_work(di->monitor_wqueue, &di->monitor_work, interval); | 268 | queue_delayed_work(di->monitor_wqueue, &di->monitor_work, interval); |
@@ -275,7 +275,7 @@ static void ds2760_battery_external_power_changed(struct power_supply *psy) | |||
275 | { | 275 | { |
276 | struct ds2760_device_info *di = to_ds2760_device_info(psy); | 276 | struct ds2760_device_info *di = to_ds2760_device_info(psy); |
277 | 277 | ||
278 | dev_dbg(di->dev, "%s\n", __FUNCTION__); | 278 | dev_dbg(di->dev, "%s\n", __func__); |
279 | 279 | ||
280 | cancel_delayed_work(&di->monitor_work); | 280 | cancel_delayed_work(&di->monitor_work); |
281 | queue_delayed_work(di->monitor_wqueue, &di->monitor_work, HZ/10); | 281 | queue_delayed_work(di->monitor_wqueue, &di->monitor_work, HZ/10); |
diff --git a/drivers/power/olpc_battery.c b/drivers/power/olpc_battery.c index af7a231092a4..ab1e8289f07f 100644 --- a/drivers/power/olpc_battery.c +++ b/drivers/power/olpc_battery.c | |||
@@ -315,7 +315,6 @@ static int __init olpc_bat_init(void) | |||
315 | if (ret) | 315 | if (ret) |
316 | goto battery_failed; | 316 | goto battery_failed; |
317 | 317 | ||
318 | olpc_register_battery_callback(&olpc_battery_trigger_uevent); | ||
319 | goto success; | 318 | goto success; |
320 | 319 | ||
321 | battery_failed: | 320 | battery_failed: |
@@ -328,7 +327,6 @@ success: | |||
328 | 327 | ||
329 | static void __exit olpc_bat_exit(void) | 328 | static void __exit olpc_bat_exit(void) |
330 | { | 329 | { |
331 | olpc_deregister_battery_callback(); | ||
332 | power_supply_unregister(&olpc_bat); | 330 | power_supply_unregister(&olpc_bat); |
333 | power_supply_unregister(&olpc_ac); | 331 | power_supply_unregister(&olpc_ac); |
334 | platform_device_unregister(bat_pdev); | 332 | platform_device_unregister(bat_pdev); |
diff --git a/drivers/power/power_supply_core.c b/drivers/power/power_supply_core.c index 03d6a38464ef..138dd76ee347 100644 --- a/drivers/power/power_supply_core.c +++ b/drivers/power/power_supply_core.c | |||
@@ -39,7 +39,7 @@ static void power_supply_changed_work(struct work_struct *work) | |||
39 | struct power_supply *psy = container_of(work, struct power_supply, | 39 | struct power_supply *psy = container_of(work, struct power_supply, |
40 | changed_work); | 40 | changed_work); |
41 | 41 | ||
42 | dev_dbg(psy->dev, "%s\n", __FUNCTION__); | 42 | dev_dbg(psy->dev, "%s\n", __func__); |
43 | 43 | ||
44 | class_for_each_device(power_supply_class, psy, | 44 | class_for_each_device(power_supply_class, psy, |
45 | __power_supply_changed_work); | 45 | __power_supply_changed_work); |
@@ -51,7 +51,7 @@ static void power_supply_changed_work(struct work_struct *work) | |||
51 | 51 | ||
52 | void power_supply_changed(struct power_supply *psy) | 52 | void power_supply_changed(struct power_supply *psy) |
53 | { | 53 | { |
54 | dev_dbg(psy->dev, "%s\n", __FUNCTION__); | 54 | dev_dbg(psy->dev, "%s\n", __func__); |
55 | 55 | ||
56 | schedule_work(&psy->changed_work); | 56 | schedule_work(&psy->changed_work); |
57 | } | 57 | } |
@@ -82,7 +82,7 @@ int power_supply_am_i_supplied(struct power_supply *psy) | |||
82 | error = class_for_each_device(power_supply_class, psy, | 82 | error = class_for_each_device(power_supply_class, psy, |
83 | __power_supply_am_i_supplied); | 83 | __power_supply_am_i_supplied); |
84 | 84 | ||
85 | dev_dbg(psy->dev, "%s %d\n", __FUNCTION__, error); | 85 | dev_dbg(psy->dev, "%s %d\n", __func__, error); |
86 | 86 | ||
87 | return error; | 87 | return error; |
88 | } | 88 | } |
diff --git a/drivers/power/power_supply_leds.c b/drivers/power/power_supply_leds.c index fa3034f85c38..2dece40c544f 100644 --- a/drivers/power/power_supply_leds.c +++ b/drivers/power/power_supply_leds.c | |||
@@ -24,7 +24,7 @@ static void power_supply_update_bat_leds(struct power_supply *psy) | |||
24 | if (psy->get_property(psy, POWER_SUPPLY_PROP_STATUS, &status)) | 24 | if (psy->get_property(psy, POWER_SUPPLY_PROP_STATUS, &status)) |
25 | return; | 25 | return; |
26 | 26 | ||
27 | dev_dbg(psy->dev, "%s %d\n", __FUNCTION__, status.intval); | 27 | dev_dbg(psy->dev, "%s %d\n", __func__, status.intval); |
28 | 28 | ||
29 | switch (status.intval) { | 29 | switch (status.intval) { |
30 | case POWER_SUPPLY_STATUS_FULL: | 30 | case POWER_SUPPLY_STATUS_FULL: |
@@ -101,7 +101,7 @@ static void power_supply_update_gen_leds(struct power_supply *psy) | |||
101 | if (psy->get_property(psy, POWER_SUPPLY_PROP_ONLINE, &online)) | 101 | if (psy->get_property(psy, POWER_SUPPLY_PROP_ONLINE, &online)) |
102 | return; | 102 | return; |
103 | 103 | ||
104 | dev_dbg(psy->dev, "%s %d\n", __FUNCTION__, online.intval); | 104 | dev_dbg(psy->dev, "%s %d\n", __func__, online.intval); |
105 | 105 | ||
106 | if (online.intval) | 106 | if (online.intval) |
107 | led_trigger_event(psy->online_trig, LED_FULL); | 107 | led_trigger_event(psy->online_trig, LED_FULL); |
diff --git a/drivers/rtc/rtc-bfin.c b/drivers/rtc/rtc-bfin.c index 4f28045d9ef2..8624f55d0560 100644 --- a/drivers/rtc/rtc-bfin.c +++ b/drivers/rtc/rtc-bfin.c | |||
@@ -419,7 +419,7 @@ static int __devinit bfin_rtc_probe(struct platform_device *pdev) | |||
419 | return -ENOMEM; | 419 | return -ENOMEM; |
420 | 420 | ||
421 | rtc->rtc_dev = rtc_device_register(pdev->name, &pdev->dev, &bfin_rtc_ops, THIS_MODULE); | 421 | rtc->rtc_dev = rtc_device_register(pdev->name, &pdev->dev, &bfin_rtc_ops, THIS_MODULE); |
422 | if (unlikely(IS_ERR(rtc))) { | 422 | if (IS_ERR(rtc)) { |
423 | ret = PTR_ERR(rtc->rtc_dev); | 423 | ret = PTR_ERR(rtc->rtc_dev); |
424 | goto err; | 424 | goto err; |
425 | } | 425 | } |
diff --git a/drivers/rtc/rtc-proc.c b/drivers/rtc/rtc-proc.c index 8d300e6d0d9e..0c6257a034ff 100644 --- a/drivers/rtc/rtc-proc.c +++ b/drivers/rtc/rtc-proc.c | |||
@@ -108,12 +108,10 @@ void rtc_proc_add_device(struct rtc_device *rtc) | |||
108 | if (rtc->id == 0) { | 108 | if (rtc->id == 0) { |
109 | struct proc_dir_entry *ent; | 109 | struct proc_dir_entry *ent; |
110 | 110 | ||
111 | ent = create_proc_entry("driver/rtc", 0, NULL); | 111 | ent = proc_create_data("driver/rtc", 0, NULL, |
112 | if (ent) { | 112 | &rtc_proc_fops, rtc); |
113 | ent->proc_fops = &rtc_proc_fops; | 113 | if (ent) |
114 | ent->owner = rtc->owner; | 114 | ent->owner = rtc->owner; |
115 | ent->data = rtc; | ||
116 | } | ||
117 | } | 115 | } |
118 | } | 116 | } |
119 | 117 | ||
diff --git a/drivers/s390/block/dasd_proc.c b/drivers/s390/block/dasd_proc.c index 556063e8f7a9..03c0e40a92ff 100644 --- a/drivers/s390/block/dasd_proc.c +++ b/drivers/s390/block/dasd_proc.c | |||
@@ -157,6 +157,7 @@ static int dasd_devices_open(struct inode *inode, struct file *file) | |||
157 | } | 157 | } |
158 | 158 | ||
159 | static const struct file_operations dasd_devices_file_ops = { | 159 | static const struct file_operations dasd_devices_file_ops = { |
160 | .owner = THIS_MODULE, | ||
160 | .open = dasd_devices_open, | 161 | .open = dasd_devices_open, |
161 | .read = seq_read, | 162 | .read = seq_read, |
162 | .llseek = seq_lseek, | 163 | .llseek = seq_lseek, |
@@ -311,17 +312,16 @@ out_error: | |||
311 | int | 312 | int |
312 | dasd_proc_init(void) | 313 | dasd_proc_init(void) |
313 | { | 314 | { |
314 | dasd_proc_root_entry = proc_mkdir("dasd", &proc_root); | 315 | dasd_proc_root_entry = proc_mkdir("dasd", NULL); |
315 | if (!dasd_proc_root_entry) | 316 | if (!dasd_proc_root_entry) |
316 | goto out_nodasd; | 317 | goto out_nodasd; |
317 | dasd_proc_root_entry->owner = THIS_MODULE; | 318 | dasd_proc_root_entry->owner = THIS_MODULE; |
318 | dasd_devices_entry = create_proc_entry("devices", | 319 | dasd_devices_entry = proc_create("devices", |
319 | S_IFREG | S_IRUGO | S_IWUSR, | 320 | S_IFREG | S_IRUGO | S_IWUSR, |
320 | dasd_proc_root_entry); | 321 | dasd_proc_root_entry, |
322 | &dasd_devices_file_ops); | ||
321 | if (!dasd_devices_entry) | 323 | if (!dasd_devices_entry) |
322 | goto out_nodevices; | 324 | goto out_nodevices; |
323 | dasd_devices_entry->proc_fops = &dasd_devices_file_ops; | ||
324 | dasd_devices_entry->owner = THIS_MODULE; | ||
325 | dasd_statistics_entry = create_proc_entry("statistics", | 325 | dasd_statistics_entry = create_proc_entry("statistics", |
326 | S_IFREG | S_IRUGO | S_IWUSR, | 326 | S_IFREG | S_IRUGO | S_IWUSR, |
327 | dasd_proc_root_entry); | 327 | dasd_proc_root_entry); |
@@ -335,7 +335,7 @@ dasd_proc_init(void) | |||
335 | out_nostatistics: | 335 | out_nostatistics: |
336 | remove_proc_entry("devices", dasd_proc_root_entry); | 336 | remove_proc_entry("devices", dasd_proc_root_entry); |
337 | out_nodevices: | 337 | out_nodevices: |
338 | remove_proc_entry("dasd", &proc_root); | 338 | remove_proc_entry("dasd", NULL); |
339 | out_nodasd: | 339 | out_nodasd: |
340 | return -ENOENT; | 340 | return -ENOENT; |
341 | } | 341 | } |
@@ -345,5 +345,5 @@ dasd_proc_exit(void) | |||
345 | { | 345 | { |
346 | remove_proc_entry("devices", dasd_proc_root_entry); | 346 | remove_proc_entry("devices", dasd_proc_root_entry); |
347 | remove_proc_entry("statistics", dasd_proc_root_entry); | 347 | remove_proc_entry("statistics", dasd_proc_root_entry); |
348 | remove_proc_entry("dasd", &proc_root); | 348 | remove_proc_entry("dasd", NULL); |
349 | } | 349 | } |
diff --git a/drivers/s390/char/tape_proc.c b/drivers/s390/char/tape_proc.c index c9b96d51b28f..e7c888c14e71 100644 --- a/drivers/s390/char/tape_proc.c +++ b/drivers/s390/char/tape_proc.c | |||
@@ -111,6 +111,7 @@ static int tape_proc_open(struct inode *inode, struct file *file) | |||
111 | 111 | ||
112 | static const struct file_operations tape_proc_ops = | 112 | static const struct file_operations tape_proc_ops = |
113 | { | 113 | { |
114 | .owner = THIS_MODULE, | ||
114 | .open = tape_proc_open, | 115 | .open = tape_proc_open, |
115 | .read = seq_read, | 116 | .read = seq_read, |
116 | .llseek = seq_lseek, | 117 | .llseek = seq_lseek, |
@@ -124,14 +125,12 @@ void | |||
124 | tape_proc_init(void) | 125 | tape_proc_init(void) |
125 | { | 126 | { |
126 | tape_proc_devices = | 127 | tape_proc_devices = |
127 | create_proc_entry ("tapedevices", S_IFREG | S_IRUGO | S_IWUSR, | 128 | proc_create("tapedevices", S_IFREG | S_IRUGO | S_IWUSR, NULL, |
128 | &proc_root); | 129 | &tape_proc_ops); |
129 | if (tape_proc_devices == NULL) { | 130 | if (tape_proc_devices == NULL) { |
130 | PRINT_WARN("tape: Cannot register procfs entry tapedevices\n"); | 131 | PRINT_WARN("tape: Cannot register procfs entry tapedevices\n"); |
131 | return; | 132 | return; |
132 | } | 133 | } |
133 | tape_proc_devices->proc_fops = &tape_proc_ops; | ||
134 | tape_proc_devices->owner = THIS_MODULE; | ||
135 | } | 134 | } |
136 | 135 | ||
137 | /* | 136 | /* |
@@ -141,5 +140,5 @@ void | |||
141 | tape_proc_cleanup(void) | 140 | tape_proc_cleanup(void) |
142 | { | 141 | { |
143 | if (tape_proc_devices != NULL) | 142 | if (tape_proc_devices != NULL) |
144 | remove_proc_entry ("tapedevices", &proc_root); | 143 | remove_proc_entry ("tapedevices", NULL); |
145 | } | 144 | } |
diff --git a/drivers/s390/cio/blacklist.c b/drivers/s390/cio/blacklist.c index e8597ec92247..40ef948fcb3a 100644 --- a/drivers/s390/cio/blacklist.c +++ b/drivers/s390/cio/blacklist.c | |||
@@ -374,13 +374,10 @@ cio_ignore_proc_init (void) | |||
374 | { | 374 | { |
375 | struct proc_dir_entry *entry; | 375 | struct proc_dir_entry *entry; |
376 | 376 | ||
377 | entry = create_proc_entry ("cio_ignore", S_IFREG | S_IRUGO | S_IWUSR, | 377 | entry = proc_create("cio_ignore", S_IFREG | S_IRUGO | S_IWUSR, NULL, |
378 | &proc_root); | 378 | &cio_ignore_proc_fops); |
379 | if (!entry) | 379 | if (!entry) |
380 | return -ENOENT; | 380 | return -ENOENT; |
381 | |||
382 | entry->proc_fops = &cio_ignore_proc_fops; | ||
383 | |||
384 | return 0; | 381 | return 0; |
385 | } | 382 | } |
386 | 383 | ||
diff --git a/drivers/s390/cio/qdio.c b/drivers/s390/cio/qdio.c index 10aa1e780801..43876e287370 100644 --- a/drivers/s390/cio/qdio.c +++ b/drivers/s390/cio/qdio.c | |||
@@ -3632,7 +3632,7 @@ qdio_add_procfs_entry(void) | |||
3632 | { | 3632 | { |
3633 | proc_perf_file_registration=0; | 3633 | proc_perf_file_registration=0; |
3634 | qdio_perf_proc_file=create_proc_entry(QDIO_PERF, | 3634 | qdio_perf_proc_file=create_proc_entry(QDIO_PERF, |
3635 | S_IFREG|0444,&proc_root); | 3635 | S_IFREG|0444,NULL); |
3636 | if (qdio_perf_proc_file) { | 3636 | if (qdio_perf_proc_file) { |
3637 | qdio_perf_proc_file->read_proc=&qdio_perf_procfile_read; | 3637 | qdio_perf_proc_file->read_proc=&qdio_perf_procfile_read; |
3638 | } else proc_perf_file_registration=-1; | 3638 | } else proc_perf_file_registration=-1; |
@@ -3647,7 +3647,7 @@ static void | |||
3647 | qdio_remove_procfs_entry(void) | 3647 | qdio_remove_procfs_entry(void) |
3648 | { | 3648 | { |
3649 | if (!proc_perf_file_registration) /* means if it went ok earlier */ | 3649 | if (!proc_perf_file_registration) /* means if it went ok earlier */ |
3650 | remove_proc_entry(QDIO_PERF,&proc_root); | 3650 | remove_proc_entry(QDIO_PERF,NULL); |
3651 | } | 3651 | } |
3652 | 3652 | ||
3653 | /** | 3653 | /** |
diff --git a/drivers/scsi/megaraid.c b/drivers/scsi/megaraid.c index b135a1ed4b2c..18551aaf5e09 100644 --- a/drivers/scsi/megaraid.c +++ b/drivers/scsi/megaraid.c | |||
@@ -4996,7 +4996,7 @@ static int __init megaraid_init(void) | |||
4996 | max_mbox_busy_wait = MBOX_BUSY_WAIT; | 4996 | max_mbox_busy_wait = MBOX_BUSY_WAIT; |
4997 | 4997 | ||
4998 | #ifdef CONFIG_PROC_FS | 4998 | #ifdef CONFIG_PROC_FS |
4999 | mega_proc_dir_entry = proc_mkdir("megaraid", &proc_root); | 4999 | mega_proc_dir_entry = proc_mkdir("megaraid", NULL); |
5000 | if (!mega_proc_dir_entry) { | 5000 | if (!mega_proc_dir_entry) { |
5001 | printk(KERN_WARNING | 5001 | printk(KERN_WARNING |
5002 | "megaraid: failed to create megaraid root\n"); | 5002 | "megaraid: failed to create megaraid root\n"); |
@@ -5005,7 +5005,7 @@ static int __init megaraid_init(void) | |||
5005 | error = pci_register_driver(&megaraid_pci_driver); | 5005 | error = pci_register_driver(&megaraid_pci_driver); |
5006 | if (error) { | 5006 | if (error) { |
5007 | #ifdef CONFIG_PROC_FS | 5007 | #ifdef CONFIG_PROC_FS |
5008 | remove_proc_entry("megaraid", &proc_root); | 5008 | remove_proc_entry("megaraid", NULL); |
5009 | #endif | 5009 | #endif |
5010 | return error; | 5010 | return error; |
5011 | } | 5011 | } |
@@ -5035,7 +5035,7 @@ static void __exit megaraid_exit(void) | |||
5035 | pci_unregister_driver(&megaraid_pci_driver); | 5035 | pci_unregister_driver(&megaraid_pci_driver); |
5036 | 5036 | ||
5037 | #ifdef CONFIG_PROC_FS | 5037 | #ifdef CONFIG_PROC_FS |
5038 | remove_proc_entry("megaraid", &proc_root); | 5038 | remove_proc_entry("megaraid", NULL); |
5039 | #endif | 5039 | #endif |
5040 | } | 5040 | } |
5041 | 5041 | ||
diff --git a/drivers/scsi/scsi_devinfo.c b/drivers/scsi/scsi_devinfo.c index b8de041bc0ae..a235802f2981 100644 --- a/drivers/scsi/scsi_devinfo.c +++ b/drivers/scsi/scsi_devinfo.c | |||
@@ -449,37 +449,40 @@ int scsi_get_device_flags(struct scsi_device *sdev, | |||
449 | } | 449 | } |
450 | 450 | ||
451 | #ifdef CONFIG_SCSI_PROC_FS | 451 | #ifdef CONFIG_SCSI_PROC_FS |
452 | /* | 452 | static int devinfo_seq_show(struct seq_file *m, void *v) |
453 | * proc_scsi_dev_info_read: dump the scsi_dev_info_list via | ||
454 | * /proc/scsi/device_info | ||
455 | */ | ||
456 | static int proc_scsi_devinfo_read(char *buffer, char **start, | ||
457 | off_t offset, int length) | ||
458 | { | 453 | { |
459 | struct scsi_dev_info_list *devinfo; | 454 | struct scsi_dev_info_list *devinfo = |
460 | int size, len = 0; | 455 | list_entry(v, struct scsi_dev_info_list, dev_info_list); |
461 | off_t begin = 0; | ||
462 | off_t pos = 0; | ||
463 | 456 | ||
464 | list_for_each_entry(devinfo, &scsi_dev_info_list, dev_info_list) { | 457 | seq_printf(m, "'%.8s' '%.16s' 0x%x\n", |
465 | size = sprintf(buffer + len, "'%.8s' '%.16s' 0x%x\n", | ||
466 | devinfo->vendor, devinfo->model, devinfo->flags); | 458 | devinfo->vendor, devinfo->model, devinfo->flags); |
467 | len += size; | 459 | return 0; |
468 | pos = begin + len; | 460 | } |
469 | if (pos < offset) { | 461 | |
470 | len = 0; | 462 | static void * devinfo_seq_start(struct seq_file *m, loff_t *pos) |
471 | begin = pos; | 463 | { |
472 | } | 464 | return seq_list_start(&scsi_dev_info_list, *pos); |
473 | if (pos > offset + length) | 465 | } |
474 | goto stop_output; | ||
475 | } | ||
476 | 466 | ||
477 | stop_output: | 467 | static void * devinfo_seq_next(struct seq_file *m, void *v, loff_t *pos) |
478 | *start = buffer + (offset - begin); /* Start of wanted data */ | 468 | { |
479 | len -= (offset - begin); /* Start slop */ | 469 | return seq_list_next(v, &scsi_dev_info_list, pos); |
480 | if (len > length) | 470 | } |
481 | len = length; /* Ending slop */ | 471 | |
482 | return (len); | 472 | static void devinfo_seq_stop(struct seq_file *m, void *v) |
473 | { | ||
474 | } | ||
475 | |||
476 | static const struct seq_operations scsi_devinfo_seq_ops = { | ||
477 | .start = devinfo_seq_start, | ||
478 | .next = devinfo_seq_next, | ||
479 | .stop = devinfo_seq_stop, | ||
480 | .show = devinfo_seq_show, | ||
481 | }; | ||
482 | |||
483 | static int proc_scsi_devinfo_open(struct inode *inode, struct file *file) | ||
484 | { | ||
485 | return seq_open(file, &scsi_devinfo_seq_ops); | ||
483 | } | 486 | } |
484 | 487 | ||
485 | /* | 488 | /* |
@@ -489,11 +492,12 @@ stop_output: | |||
489 | * integer value of flag to the scsi device info list. | 492 | * integer value of flag to the scsi device info list. |
490 | * To use, echo "vendor:model:flag" > /proc/scsi/device_info | 493 | * To use, echo "vendor:model:flag" > /proc/scsi/device_info |
491 | */ | 494 | */ |
492 | static int proc_scsi_devinfo_write(struct file *file, const char __user *buf, | 495 | static ssize_t proc_scsi_devinfo_write(struct file *file, |
493 | unsigned long length, void *data) | 496 | const char __user *buf, |
497 | size_t length, loff_t *ppos) | ||
494 | { | 498 | { |
495 | char *buffer; | 499 | char *buffer; |
496 | int err = length; | 500 | ssize_t err = length; |
497 | 501 | ||
498 | if (!buf || length>PAGE_SIZE) | 502 | if (!buf || length>PAGE_SIZE) |
499 | return -EINVAL; | 503 | return -EINVAL; |
@@ -517,6 +521,15 @@ out: | |||
517 | free_page((unsigned long)buffer); | 521 | free_page((unsigned long)buffer); |
518 | return err; | 522 | return err; |
519 | } | 523 | } |
524 | |||
525 | static const struct file_operations scsi_devinfo_proc_fops = { | ||
526 | .owner = THIS_MODULE, | ||
527 | .open = proc_scsi_devinfo_open, | ||
528 | .read = seq_read, | ||
529 | .write = proc_scsi_devinfo_write, | ||
530 | .llseek = seq_lseek, | ||
531 | .release = seq_release, | ||
532 | }; | ||
520 | #endif /* CONFIG_SCSI_PROC_FS */ | 533 | #endif /* CONFIG_SCSI_PROC_FS */ |
521 | 534 | ||
522 | module_param_string(dev_flags, scsi_dev_flags, sizeof(scsi_dev_flags), 0); | 535 | module_param_string(dev_flags, scsi_dev_flags, sizeof(scsi_dev_flags), 0); |
@@ -577,15 +590,13 @@ int __init scsi_init_devinfo(void) | |||
577 | } | 590 | } |
578 | 591 | ||
579 | #ifdef CONFIG_SCSI_PROC_FS | 592 | #ifdef CONFIG_SCSI_PROC_FS |
580 | p = create_proc_entry("scsi/device_info", 0, NULL); | 593 | p = proc_create("scsi/device_info", 0, NULL, &scsi_devinfo_proc_fops); |
581 | if (!p) { | 594 | if (!p) { |
582 | error = -ENOMEM; | 595 | error = -ENOMEM; |
583 | goto out; | 596 | goto out; |
584 | } | 597 | } |
585 | 598 | ||
586 | p->owner = THIS_MODULE; | 599 | p->owner = THIS_MODULE; |
587 | p->get_info = proc_scsi_devinfo_read; | ||
588 | p->write_proc = proc_scsi_devinfo_write; | ||
589 | #endif /* CONFIG_SCSI_PROC_FS */ | 600 | #endif /* CONFIG_SCSI_PROC_FS */ |
590 | 601 | ||
591 | out: | 602 | out: |
diff --git a/drivers/scsi/scsi_proc.c b/drivers/scsi/scsi_proc.c index 3a1c99d5c775..e4a0d2f9b357 100644 --- a/drivers/scsi/scsi_proc.c +++ b/drivers/scsi/scsi_proc.c | |||
@@ -413,6 +413,7 @@ static int proc_scsi_open(struct inode *inode, struct file *file) | |||
413 | } | 413 | } |
414 | 414 | ||
415 | static const struct file_operations proc_scsi_operations = { | 415 | static const struct file_operations proc_scsi_operations = { |
416 | .owner = THIS_MODULE, | ||
416 | .open = proc_scsi_open, | 417 | .open = proc_scsi_open, |
417 | .read = seq_read, | 418 | .read = seq_read, |
418 | .write = proc_scsi_write, | 419 | .write = proc_scsi_write, |
@@ -431,10 +432,9 @@ int __init scsi_init_procfs(void) | |||
431 | if (!proc_scsi) | 432 | if (!proc_scsi) |
432 | goto err1; | 433 | goto err1; |
433 | 434 | ||
434 | pde = create_proc_entry("scsi/scsi", 0, NULL); | 435 | pde = proc_create("scsi/scsi", 0, NULL, &proc_scsi_operations); |
435 | if (!pde) | 436 | if (!pde) |
436 | goto err2; | 437 | goto err2; |
437 | pde->proc_fops = &proc_scsi_operations; | ||
438 | 438 | ||
439 | return 0; | 439 | return 0; |
440 | 440 | ||
diff --git a/drivers/scsi/scsi_scan.c b/drivers/scsi/scsi_scan.c index fcd7455ffc39..a00eee6f7be9 100644 --- a/drivers/scsi/scsi_scan.c +++ b/drivers/scsi/scsi_scan.c | |||
@@ -1828,7 +1828,7 @@ void scsi_scan_host(struct Scsi_Host *shost) | |||
1828 | } | 1828 | } |
1829 | 1829 | ||
1830 | p = kthread_run(do_scan_async, data, "scsi_scan_%d", shost->host_no); | 1830 | p = kthread_run(do_scan_async, data, "scsi_scan_%d", shost->host_no); |
1831 | if (unlikely(IS_ERR(p))) | 1831 | if (IS_ERR(p)) |
1832 | do_scan_async(data); | 1832 | do_scan_async(data); |
1833 | } | 1833 | } |
1834 | EXPORT_SYMBOL(scsi_scan_host); | 1834 | EXPORT_SYMBOL(scsi_scan_host); |
diff --git a/drivers/scsi/sg.c b/drivers/scsi/sg.c index 2029422bc04d..c9d7f721b9e2 100644 --- a/drivers/scsi/sg.c +++ b/drivers/scsi/sg.c | |||
@@ -2667,7 +2667,6 @@ sg_proc_init(void) | |||
2667 | { | 2667 | { |
2668 | int k, mask; | 2668 | int k, mask; |
2669 | int num_leaves = ARRAY_SIZE(sg_proc_leaf_arr); | 2669 | int num_leaves = ARRAY_SIZE(sg_proc_leaf_arr); |
2670 | struct proc_dir_entry *pdep; | ||
2671 | struct sg_proc_leaf * leaf; | 2670 | struct sg_proc_leaf * leaf; |
2672 | 2671 | ||
2673 | sg_proc_sgp = proc_mkdir(sg_proc_sg_dirname, NULL); | 2672 | sg_proc_sgp = proc_mkdir(sg_proc_sg_dirname, NULL); |
@@ -2676,13 +2675,10 @@ sg_proc_init(void) | |||
2676 | for (k = 0; k < num_leaves; ++k) { | 2675 | for (k = 0; k < num_leaves; ++k) { |
2677 | leaf = &sg_proc_leaf_arr[k]; | 2676 | leaf = &sg_proc_leaf_arr[k]; |
2678 | mask = leaf->fops->write ? S_IRUGO | S_IWUSR : S_IRUGO; | 2677 | mask = leaf->fops->write ? S_IRUGO | S_IWUSR : S_IRUGO; |
2679 | pdep = create_proc_entry(leaf->name, mask, sg_proc_sgp); | 2678 | leaf->fops->owner = THIS_MODULE; |
2680 | if (pdep) { | 2679 | leaf->fops->read = seq_read; |
2681 | leaf->fops->owner = THIS_MODULE, | 2680 | leaf->fops->llseek = seq_lseek; |
2682 | leaf->fops->read = seq_read, | 2681 | proc_create(leaf->name, mask, sg_proc_sgp, leaf->fops); |
2683 | leaf->fops->llseek = seq_lseek, | ||
2684 | pdep->proc_fops = leaf->fops; | ||
2685 | } | ||
2686 | } | 2682 | } |
2687 | return 0; | 2683 | return 0; |
2688 | } | 2684 | } |
diff --git a/drivers/usb/atm/ueagle-atm.c b/drivers/usb/atm/ueagle-atm.c index 4220f22b6660..5f71ff3aee35 100644 --- a/drivers/usb/atm/ueagle-atm.c +++ b/drivers/usb/atm/ueagle-atm.c | |||
@@ -305,8 +305,6 @@ enum { | |||
305 | */ | 305 | */ |
306 | 306 | ||
307 | #define FW_GET_BYTE(p) *((__u8 *) (p)) | 307 | #define FW_GET_BYTE(p) *((__u8 *) (p)) |
308 | #define FW_GET_WORD(p) le16_to_cpu(get_unaligned((__le16 *) (p))) | ||
309 | #define FW_GET_LONG(p) le32_to_cpu(get_unaligned((__le32 *) (p))) | ||
310 | 308 | ||
311 | #define FW_DIR "ueagle-atm/" | 309 | #define FW_DIR "ueagle-atm/" |
312 | #define NB_MODEM 4 | 310 | #define NB_MODEM 4 |
@@ -621,7 +619,7 @@ static void uea_upload_pre_firmware(const struct firmware *fw_entry, void *conte | |||
621 | if (size < 4) | 619 | if (size < 4) |
622 | goto err_fw_corrupted; | 620 | goto err_fw_corrupted; |
623 | 621 | ||
624 | crc = FW_GET_LONG(pfw); | 622 | crc = get_unaligned_le32(pfw); |
625 | pfw += 4; | 623 | pfw += 4; |
626 | size -= 4; | 624 | size -= 4; |
627 | if (crc32_be(0, pfw, size) != crc) | 625 | if (crc32_be(0, pfw, size) != crc) |
@@ -640,7 +638,7 @@ static void uea_upload_pre_firmware(const struct firmware *fw_entry, void *conte | |||
640 | 638 | ||
641 | while (size > 3) { | 639 | while (size > 3) { |
642 | u8 len = FW_GET_BYTE(pfw); | 640 | u8 len = FW_GET_BYTE(pfw); |
643 | u16 add = FW_GET_WORD(pfw + 1); | 641 | u16 add = get_unaligned_le16(pfw + 1); |
644 | 642 | ||
645 | size -= len + 3; | 643 | size -= len + 3; |
646 | if (size < 0) | 644 | if (size < 0) |
@@ -738,7 +736,7 @@ static int check_dsp_e1(u8 *dsp, unsigned int len) | |||
738 | 736 | ||
739 | for (i = 0; i < pagecount; i++) { | 737 | for (i = 0; i < pagecount; i++) { |
740 | 738 | ||
741 | pageoffset = FW_GET_LONG(dsp + p); | 739 | pageoffset = get_unaligned_le32(dsp + p); |
742 | p += 4; | 740 | p += 4; |
743 | 741 | ||
744 | if (pageoffset == 0) | 742 | if (pageoffset == 0) |
@@ -759,7 +757,7 @@ static int check_dsp_e1(u8 *dsp, unsigned int len) | |||
759 | return 1; | 757 | return 1; |
760 | 758 | ||
761 | pp += 2; /* skip blockaddr */ | 759 | pp += 2; /* skip blockaddr */ |
762 | blocksize = FW_GET_WORD(dsp + pp); | 760 | blocksize = get_unaligned_le16(dsp + pp); |
763 | pp += 2; | 761 | pp += 2; |
764 | 762 | ||
765 | /* enough space for block data? */ | 763 | /* enough space for block data? */ |
@@ -928,7 +926,7 @@ static void uea_load_page_e1(struct work_struct *work) | |||
928 | goto bad1; | 926 | goto bad1; |
929 | 927 | ||
930 | p += 4 * pageno; | 928 | p += 4 * pageno; |
931 | pageoffset = FW_GET_LONG(p); | 929 | pageoffset = get_unaligned_le32(p); |
932 | 930 | ||
933 | if (pageoffset == 0) | 931 | if (pageoffset == 0) |
934 | goto bad1; | 932 | goto bad1; |
@@ -945,10 +943,10 @@ static void uea_load_page_e1(struct work_struct *work) | |||
945 | bi.wOvlOffset = cpu_to_le16(ovl | 0x8000); | 943 | bi.wOvlOffset = cpu_to_le16(ovl | 0x8000); |
946 | 944 | ||
947 | for (i = 0; i < blockcount; i++) { | 945 | for (i = 0; i < blockcount; i++) { |
948 | blockaddr = FW_GET_WORD(p); | 946 | blockaddr = get_unaligned_le16(p); |
949 | p += 2; | 947 | p += 2; |
950 | 948 | ||
951 | blocksize = FW_GET_WORD(p); | 949 | blocksize = get_unaligned_le16(p); |
952 | p += 2; | 950 | p += 2; |
953 | 951 | ||
954 | bi.wSize = cpu_to_le16(blocksize); | 952 | bi.wSize = cpu_to_le16(blocksize); |
@@ -1152,9 +1150,9 @@ static int uea_cmv_e1(struct uea_softc *sc, | |||
1152 | cmv.bDirection = E1_HOSTTOMODEM; | 1150 | cmv.bDirection = E1_HOSTTOMODEM; |
1153 | cmv.bFunction = function; | 1151 | cmv.bFunction = function; |
1154 | cmv.wIndex = cpu_to_le16(sc->cmv_dsc.e1.idx); | 1152 | cmv.wIndex = cpu_to_le16(sc->cmv_dsc.e1.idx); |
1155 | put_unaligned(cpu_to_le32(address), &cmv.dwSymbolicAddress); | 1153 | put_unaligned_le32(address, &cmv.dwSymbolicAddress); |
1156 | cmv.wOffsetAddress = cpu_to_le16(offset); | 1154 | cmv.wOffsetAddress = cpu_to_le16(offset); |
1157 | put_unaligned(cpu_to_le32(data >> 16 | data << 16), &cmv.dwData); | 1155 | put_unaligned_le32(data >> 16 | data << 16, &cmv.dwData); |
1158 | 1156 | ||
1159 | ret = uea_request(sc, UEA_E1_SET_BLOCK, UEA_MPTX_START, sizeof(cmv), &cmv); | 1157 | ret = uea_request(sc, UEA_E1_SET_BLOCK, UEA_MPTX_START, sizeof(cmv), &cmv); |
1160 | if (ret < 0) | 1158 | if (ret < 0) |
@@ -1646,7 +1644,7 @@ static int request_cmvs(struct uea_softc *sc, | |||
1646 | if (size < 5) | 1644 | if (size < 5) |
1647 | goto err_fw_corrupted; | 1645 | goto err_fw_corrupted; |
1648 | 1646 | ||
1649 | crc = FW_GET_LONG(data); | 1647 | crc = get_unaligned_le32(data); |
1650 | data += 4; | 1648 | data += 4; |
1651 | size -= 4; | 1649 | size -= 4; |
1652 | if (crc32_be(0, data, size) != crc) | 1650 | if (crc32_be(0, data, size) != crc) |
@@ -1696,9 +1694,9 @@ static int uea_send_cmvs_e1(struct uea_softc *sc) | |||
1696 | "please update your firmware\n"); | 1694 | "please update your firmware\n"); |
1697 | 1695 | ||
1698 | for (i = 0; i < len; i++) { | 1696 | for (i = 0; i < len; i++) { |
1699 | ret = uea_write_cmv_e1(sc, FW_GET_LONG(&cmvs_v1[i].address), | 1697 | ret = uea_write_cmv_e1(sc, get_unaligned_le32(&cmvs_v1[i].address), |
1700 | FW_GET_WORD(&cmvs_v1[i].offset), | 1698 | get_unaligned_le16(&cmvs_v1[i].offset), |
1701 | FW_GET_LONG(&cmvs_v1[i].data)); | 1699 | get_unaligned_le32(&cmvs_v1[i].data)); |
1702 | if (ret < 0) | 1700 | if (ret < 0) |
1703 | goto out; | 1701 | goto out; |
1704 | } | 1702 | } |
@@ -1706,9 +1704,9 @@ static int uea_send_cmvs_e1(struct uea_softc *sc) | |||
1706 | struct uea_cmvs_v2 *cmvs_v2 = cmvs_ptr; | 1704 | struct uea_cmvs_v2 *cmvs_v2 = cmvs_ptr; |
1707 | 1705 | ||
1708 | for (i = 0; i < len; i++) { | 1706 | for (i = 0; i < len; i++) { |
1709 | ret = uea_write_cmv_e1(sc, FW_GET_LONG(&cmvs_v2[i].address), | 1707 | ret = uea_write_cmv_e1(sc, get_unaligned_le32(&cmvs_v2[i].address), |
1710 | (u16) FW_GET_LONG(&cmvs_v2[i].offset), | 1708 | (u16) get_unaligned_le32(&cmvs_v2[i].offset), |
1711 | FW_GET_LONG(&cmvs_v2[i].data)); | 1709 | get_unaligned_le32(&cmvs_v2[i].data)); |
1712 | if (ret < 0) | 1710 | if (ret < 0) |
1713 | goto out; | 1711 | goto out; |
1714 | } | 1712 | } |
@@ -1759,10 +1757,10 @@ static int uea_send_cmvs_e4(struct uea_softc *sc) | |||
1759 | 1757 | ||
1760 | for (i = 0; i < len; i++) { | 1758 | for (i = 0; i < len; i++) { |
1761 | ret = uea_write_cmv_e4(sc, 1, | 1759 | ret = uea_write_cmv_e4(sc, 1, |
1762 | FW_GET_LONG(&cmvs_v2[i].group), | 1760 | get_unaligned_le32(&cmvs_v2[i].group), |
1763 | FW_GET_LONG(&cmvs_v2[i].address), | 1761 | get_unaligned_le32(&cmvs_v2[i].address), |
1764 | FW_GET_LONG(&cmvs_v2[i].offset), | 1762 | get_unaligned_le32(&cmvs_v2[i].offset), |
1765 | FW_GET_LONG(&cmvs_v2[i].data)); | 1763 | get_unaligned_le32(&cmvs_v2[i].data)); |
1766 | if (ret < 0) | 1764 | if (ret < 0) |
1767 | goto out; | 1765 | goto out; |
1768 | } | 1766 | } |
@@ -1964,7 +1962,7 @@ static void uea_dispatch_cmv_e1(struct uea_softc *sc, struct intr_pkt *intr) | |||
1964 | if (UEA_CHIP_VERSION(sc) == ADI930 | 1962 | if (UEA_CHIP_VERSION(sc) == ADI930 |
1965 | && cmv->bFunction == E1_MAKEFUNCTION(2, 2)) { | 1963 | && cmv->bFunction == E1_MAKEFUNCTION(2, 2)) { |
1966 | cmv->wIndex = cpu_to_le16(dsc->idx); | 1964 | cmv->wIndex = cpu_to_le16(dsc->idx); |
1967 | put_unaligned(cpu_to_le32(dsc->address), &cmv->dwSymbolicAddress); | 1965 | put_unaligned_le32(dsc->address, &cmv->dwSymbolicAddress); |
1968 | cmv->wOffsetAddress = cpu_to_le16(dsc->offset); | 1966 | cmv->wOffsetAddress = cpu_to_le16(dsc->offset); |
1969 | } else | 1967 | } else |
1970 | goto bad2; | 1968 | goto bad2; |
@@ -1978,11 +1976,11 @@ static void uea_dispatch_cmv_e1(struct uea_softc *sc, struct intr_pkt *intr) | |||
1978 | 1976 | ||
1979 | /* in case of MEMACCESS */ | 1977 | /* in case of MEMACCESS */ |
1980 | if (le16_to_cpu(cmv->wIndex) != dsc->idx || | 1978 | if (le16_to_cpu(cmv->wIndex) != dsc->idx || |
1981 | le32_to_cpu(get_unaligned(&cmv->dwSymbolicAddress)) != dsc->address || | 1979 | get_unaligned_le32(&cmv->dwSymbolicAddress) != dsc->address || |
1982 | le16_to_cpu(cmv->wOffsetAddress) != dsc->offset) | 1980 | le16_to_cpu(cmv->wOffsetAddress) != dsc->offset) |
1983 | goto bad2; | 1981 | goto bad2; |
1984 | 1982 | ||
1985 | sc->data = le32_to_cpu(get_unaligned(&cmv->dwData)); | 1983 | sc->data = get_unaligned_le32(&cmv->dwData); |
1986 | sc->data = sc->data << 16 | sc->data >> 16; | 1984 | sc->data = sc->data << 16 | sc->data >> 16; |
1987 | 1985 | ||
1988 | wake_up_cmv_ack(sc); | 1986 | wake_up_cmv_ack(sc); |
diff --git a/drivers/usb/class/cdc-acm.c b/drivers/usb/class/cdc-acm.c index 7b572e75e73c..cefe7f2c6f75 100644 --- a/drivers/usb/class/cdc-acm.c +++ b/drivers/usb/class/cdc-acm.c | |||
@@ -280,7 +280,7 @@ static void acm_ctrl_irq(struct urb *urb) | |||
280 | 280 | ||
281 | case USB_CDC_NOTIFY_SERIAL_STATE: | 281 | case USB_CDC_NOTIFY_SERIAL_STATE: |
282 | 282 | ||
283 | newctrl = le16_to_cpu(get_unaligned((__le16 *) data)); | 283 | newctrl = get_unaligned_le16(data); |
284 | 284 | ||
285 | if (acm->tty && !acm->clocal && (acm->ctrlin & ~newctrl & ACM_CTRL_DCD)) { | 285 | if (acm->tty && !acm->clocal && (acm->ctrlin & ~newctrl & ACM_CTRL_DCD)) { |
286 | dbg("calling hangup"); | 286 | dbg("calling hangup"); |
diff --git a/drivers/usb/core/inode.c b/drivers/usb/core/inode.c index 8607846e3c3f..1d253dd4ea81 100644 --- a/drivers/usb/core/inode.c +++ b/drivers/usb/core/inode.c | |||
@@ -773,7 +773,7 @@ int __init usbfs_init(void) | |||
773 | usb_register_notify(&usbfs_nb); | 773 | usb_register_notify(&usbfs_nb); |
774 | 774 | ||
775 | /* create mount point for usbfs */ | 775 | /* create mount point for usbfs */ |
776 | usbdir = proc_mkdir("usb", proc_bus); | 776 | usbdir = proc_mkdir("bus/usb", NULL); |
777 | 777 | ||
778 | return 0; | 778 | return 0; |
779 | } | 779 | } |
@@ -783,6 +783,6 @@ void usbfs_cleanup(void) | |||
783 | usb_unregister_notify(&usbfs_nb); | 783 | usb_unregister_notify(&usbfs_nb); |
784 | unregister_filesystem(&usb_fs_type); | 784 | unregister_filesystem(&usb_fs_type); |
785 | if (usbdir) | 785 | if (usbdir) |
786 | remove_proc_entry("usb", proc_bus); | 786 | remove_proc_entry("bus/usb", NULL); |
787 | } | 787 | } |
788 | 788 | ||
diff --git a/drivers/usb/gadget/at91_udc.c b/drivers/usb/gadget/at91_udc.c index 9b913afb2e6d..274c60a970cd 100644 --- a/drivers/usb/gadget/at91_udc.c +++ b/drivers/usb/gadget/at91_udc.c | |||
@@ -231,6 +231,7 @@ static int proc_udc_open(struct inode *inode, struct file *file) | |||
231 | } | 231 | } |
232 | 232 | ||
233 | static const struct file_operations proc_ops = { | 233 | static const struct file_operations proc_ops = { |
234 | .owner = THIS_MODULE, | ||
234 | .open = proc_udc_open, | 235 | .open = proc_udc_open, |
235 | .read = seq_read, | 236 | .read = seq_read, |
236 | .llseek = seq_lseek, | 237 | .llseek = seq_lseek, |
@@ -239,15 +240,7 @@ static const struct file_operations proc_ops = { | |||
239 | 240 | ||
240 | static void create_debug_file(struct at91_udc *udc) | 241 | static void create_debug_file(struct at91_udc *udc) |
241 | { | 242 | { |
242 | struct proc_dir_entry *pde; | 243 | udc->pde = proc_create_data(debug_filename, 0, NULL, &proc_ops, udc); |
243 | |||
244 | pde = create_proc_entry (debug_filename, 0, NULL); | ||
245 | udc->pde = pde; | ||
246 | if (pde == NULL) | ||
247 | return; | ||
248 | |||
249 | pde->proc_fops = &proc_ops; | ||
250 | pde->data = udc; | ||
251 | } | 244 | } |
252 | 245 | ||
253 | static void remove_debug_file(struct at91_udc *udc) | 246 | static void remove_debug_file(struct at91_udc *udc) |
diff --git a/drivers/usb/gadget/goku_udc.c b/drivers/usb/gadget/goku_udc.c index 64a592cbbe7b..be6613afedbf 100644 --- a/drivers/usb/gadget/goku_udc.c +++ b/drivers/usb/gadget/goku_udc.c | |||
@@ -127,7 +127,7 @@ goku_ep_enable(struct usb_ep *_ep, const struct usb_endpoint_descriptor *desc) | |||
127 | 127 | ||
128 | /* enabling the no-toggle interrupt mode would need an api hook */ | 128 | /* enabling the no-toggle interrupt mode would need an api hook */ |
129 | mode = 0; | 129 | mode = 0; |
130 | max = le16_to_cpu(get_unaligned(&desc->wMaxPacketSize)); | 130 | max = get_unaligned_le16(&desc->wMaxPacketSize); |
131 | switch (max) { | 131 | switch (max) { |
132 | case 64: mode++; | 132 | case 64: mode++; |
133 | case 32: mode++; | 133 | case 32: mode++; |
diff --git a/drivers/usb/gadget/omap_udc.c b/drivers/usb/gadget/omap_udc.c index 95f7662376f1..881d74c3d964 100644 --- a/drivers/usb/gadget/omap_udc.c +++ b/drivers/usb/gadget/omap_udc.c | |||
@@ -2504,6 +2504,7 @@ static int proc_udc_open(struct inode *inode, struct file *file) | |||
2504 | } | 2504 | } |
2505 | 2505 | ||
2506 | static const struct file_operations proc_ops = { | 2506 | static const struct file_operations proc_ops = { |
2507 | .owner = THIS_MODULE, | ||
2507 | .open = proc_udc_open, | 2508 | .open = proc_udc_open, |
2508 | .read = seq_read, | 2509 | .read = seq_read, |
2509 | .llseek = seq_lseek, | 2510 | .llseek = seq_lseek, |
@@ -2512,11 +2513,7 @@ static const struct file_operations proc_ops = { | |||
2512 | 2513 | ||
2513 | static void create_proc_file(void) | 2514 | static void create_proc_file(void) |
2514 | { | 2515 | { |
2515 | struct proc_dir_entry *pde; | 2516 | proc_create(proc_filename, 0, NULL, &proc_ops); |
2516 | |||
2517 | pde = create_proc_entry (proc_filename, 0, NULL); | ||
2518 | if (pde) | ||
2519 | pde->proc_fops = &proc_ops; | ||
2520 | } | 2517 | } |
2521 | 2518 | ||
2522 | static void remove_proc_file(void) | 2519 | static void remove_proc_file(void) |
diff --git a/drivers/usb/gadget/rndis.c b/drivers/usb/gadget/rndis.c index bd58dd504f6f..d0677f5d3cd5 100644 --- a/drivers/usb/gadget/rndis.c +++ b/drivers/usb/gadget/rndis.c | |||
@@ -183,14 +183,10 @@ gen_ndis_query_resp (int configNr, u32 OID, u8 *buf, unsigned buf_len, | |||
183 | DBG("query OID %08x value, len %d:\n", OID, buf_len); | 183 | DBG("query OID %08x value, len %d:\n", OID, buf_len); |
184 | for (i = 0; i < buf_len; i += 16) { | 184 | for (i = 0; i < buf_len; i += 16) { |
185 | DBG("%03d: %08x %08x %08x %08x\n", i, | 185 | DBG("%03d: %08x %08x %08x %08x\n", i, |
186 | le32_to_cpu(get_unaligned((__le32 *) | 186 | get_unaligned_le32(&buf[i]), |
187 | &buf[i])), | 187 | get_unaligned_le32(&buf[i + 4]), |
188 | le32_to_cpu(get_unaligned((__le32 *) | 188 | get_unaligned_le32(&buf[i + 8]), |
189 | &buf[i + 4])), | 189 | get_unaligned_le32(&buf[i + 12])); |
190 | le32_to_cpu(get_unaligned((__le32 *) | ||
191 | &buf[i + 8])), | ||
192 | le32_to_cpu(get_unaligned((__le32 *) | ||
193 | &buf[i + 12]))); | ||
194 | } | 190 | } |
195 | } | 191 | } |
196 | 192 | ||
@@ -666,7 +662,7 @@ gen_ndis_query_resp (int configNr, u32 OID, u8 *buf, unsigned buf_len, | |||
666 | break; | 662 | break; |
667 | case OID_PNP_QUERY_POWER: | 663 | case OID_PNP_QUERY_POWER: |
668 | DBG("%s: OID_PNP_QUERY_POWER D%d\n", __func__, | 664 | DBG("%s: OID_PNP_QUERY_POWER D%d\n", __func__, |
669 | le32_to_cpu(get_unaligned((__le32 *)buf)) - 1); | 665 | get_unaligned_le32(buf) - 1); |
670 | /* only suspend is a real power state, and | 666 | /* only suspend is a real power state, and |
671 | * it can't be entered by OID_PNP_SET_POWER... | 667 | * it can't be entered by OID_PNP_SET_POWER... |
672 | */ | 668 | */ |
@@ -705,14 +701,10 @@ static int gen_ndis_set_resp (u8 configNr, u32 OID, u8 *buf, u32 buf_len, | |||
705 | DBG("set OID %08x value, len %d:\n", OID, buf_len); | 701 | DBG("set OID %08x value, len %d:\n", OID, buf_len); |
706 | for (i = 0; i < buf_len; i += 16) { | 702 | for (i = 0; i < buf_len; i += 16) { |
707 | DBG("%03d: %08x %08x %08x %08x\n", i, | 703 | DBG("%03d: %08x %08x %08x %08x\n", i, |
708 | le32_to_cpu(get_unaligned((__le32 *) | 704 | get_unaligned_le32(&buf[i]), |
709 | &buf[i])), | 705 | get_unaligned_le32(&buf[i + 4]), |
710 | le32_to_cpu(get_unaligned((__le32 *) | 706 | get_unaligned_le32(&buf[i + 8]), |
711 | &buf[i + 4])), | 707 | get_unaligned_le32(&buf[i + 12])); |
712 | le32_to_cpu(get_unaligned((__le32 *) | ||
713 | &buf[i + 8])), | ||
714 | le32_to_cpu(get_unaligned((__le32 *) | ||
715 | &buf[i + 12]))); | ||
716 | } | 708 | } |
717 | } | 709 | } |
718 | 710 | ||
@@ -726,8 +718,7 @@ static int gen_ndis_set_resp (u8 configNr, u32 OID, u8 *buf, u32 buf_len, | |||
726 | * PROMISCUOUS, DIRECTED, | 718 | * PROMISCUOUS, DIRECTED, |
727 | * MULTICAST, ALL_MULTICAST, BROADCAST | 719 | * MULTICAST, ALL_MULTICAST, BROADCAST |
728 | */ | 720 | */ |
729 | *params->filter = (u16) le32_to_cpu(get_unaligned( | 721 | *params->filter = (u16)get_unaligned_le32(buf); |
730 | (__le32 *)buf)); | ||
731 | DBG("%s: OID_GEN_CURRENT_PACKET_FILTER %08x\n", | 722 | DBG("%s: OID_GEN_CURRENT_PACKET_FILTER %08x\n", |
732 | __func__, *params->filter); | 723 | __func__, *params->filter); |
733 | 724 | ||
@@ -777,7 +768,7 @@ update_linkstate: | |||
777 | * resuming, Windows forces a reset, and then SET_POWER D0. | 768 | * resuming, Windows forces a reset, and then SET_POWER D0. |
778 | * FIXME ... then things go batty; Windows wedges itself. | 769 | * FIXME ... then things go batty; Windows wedges itself. |
779 | */ | 770 | */ |
780 | i = le32_to_cpu(get_unaligned((__le32 *)buf)); | 771 | i = get_unaligned_le32(buf); |
781 | DBG("%s: OID_PNP_SET_POWER D%d\n", __func__, i - 1); | 772 | DBG("%s: OID_PNP_SET_POWER D%d\n", __func__, i - 1); |
782 | switch (i) { | 773 | switch (i) { |
783 | case NdisDeviceStateD0: | 774 | case NdisDeviceStateD0: |
@@ -1064,8 +1055,8 @@ int rndis_msg_parser (u8 configNr, u8 *buf) | |||
1064 | return -ENOMEM; | 1055 | return -ENOMEM; |
1065 | 1056 | ||
1066 | tmp = (__le32 *) buf; | 1057 | tmp = (__le32 *) buf; |
1067 | MsgType = le32_to_cpu(get_unaligned(tmp++)); | 1058 | MsgType = get_unaligned_le32(tmp++); |
1068 | MsgLength = le32_to_cpu(get_unaligned(tmp++)); | 1059 | MsgLength = get_unaligned_le32(tmp++); |
1069 | 1060 | ||
1070 | if (configNr >= RNDIS_MAX_CONFIGS) | 1061 | if (configNr >= RNDIS_MAX_CONFIGS) |
1071 | return -ENOTSUPP; | 1062 | return -ENOTSUPP; |
@@ -1296,10 +1287,9 @@ int rndis_rm_hdr(struct sk_buff *skb) | |||
1296 | tmp++; | 1287 | tmp++; |
1297 | 1288 | ||
1298 | /* DataOffset, DataLength */ | 1289 | /* DataOffset, DataLength */ |
1299 | if (!skb_pull(skb, le32_to_cpu(get_unaligned(tmp++)) | 1290 | if (!skb_pull(skb, get_unaligned_le32(tmp++) + 8)) |
1300 | + 8 /* offset of DataOffset */)) | ||
1301 | return -EOVERFLOW; | 1291 | return -EOVERFLOW; |
1302 | skb_trim(skb, le32_to_cpu(get_unaligned(tmp++))); | 1292 | skb_trim(skb, get_unaligned_le32(tmp++)); |
1303 | 1293 | ||
1304 | return 0; | 1294 | return 0; |
1305 | } | 1295 | } |
diff --git a/drivers/usb/gadget/usbstring.c b/drivers/usb/gadget/usbstring.c index 878e428a0ec1..4154be375c7a 100644 --- a/drivers/usb/gadget/usbstring.c +++ b/drivers/usb/gadget/usbstring.c | |||
@@ -74,7 +74,7 @@ static int utf8_to_utf16le(const char *s, __le16 *cp, unsigned len) | |||
74 | goto fail; | 74 | goto fail; |
75 | } else | 75 | } else |
76 | uchar = c; | 76 | uchar = c; |
77 | put_unaligned (cpu_to_le16 (uchar), cp++); | 77 | put_unaligned_le16(uchar, cp++); |
78 | count++; | 78 | count++; |
79 | len--; | 79 | len--; |
80 | } | 80 | } |
diff --git a/drivers/usb/host/ehci-hub.c b/drivers/usb/host/ehci-hub.c index f13d1029aeb2..382587c4457c 100644 --- a/drivers/usb/host/ehci-hub.c +++ b/drivers/usb/host/ehci-hub.c | |||
@@ -770,7 +770,7 @@ static int ehci_hub_control ( | |||
770 | if (status & ~0xffff) /* only if wPortChange is interesting */ | 770 | if (status & ~0xffff) /* only if wPortChange is interesting */ |
771 | #endif | 771 | #endif |
772 | dbg_port (ehci, "GetStatus", wIndex + 1, temp); | 772 | dbg_port (ehci, "GetStatus", wIndex + 1, temp); |
773 | put_unaligned(cpu_to_le32 (status), (__le32 *) buf); | 773 | put_unaligned_le32(status, buf); |
774 | break; | 774 | break; |
775 | case SetHubFeature: | 775 | case SetHubFeature: |
776 | switch (wValue) { | 776 | switch (wValue) { |
diff --git a/drivers/usb/host/ohci-hub.c b/drivers/usb/host/ohci-hub.c index 5be3bb3e6a9d..17dc2eccda83 100644 --- a/drivers/usb/host/ohci-hub.c +++ b/drivers/usb/host/ohci-hub.c | |||
@@ -736,14 +736,14 @@ static int ohci_hub_control ( | |||
736 | break; | 736 | break; |
737 | case GetHubStatus: | 737 | case GetHubStatus: |
738 | temp = roothub_status (ohci) & ~(RH_HS_CRWE | RH_HS_DRWE); | 738 | temp = roothub_status (ohci) & ~(RH_HS_CRWE | RH_HS_DRWE); |
739 | put_unaligned(cpu_to_le32 (temp), (__le32 *) buf); | 739 | put_unaligned_le32(temp, buf); |
740 | break; | 740 | break; |
741 | case GetPortStatus: | 741 | case GetPortStatus: |
742 | if (!wIndex || wIndex > ports) | 742 | if (!wIndex || wIndex > ports) |
743 | goto error; | 743 | goto error; |
744 | wIndex--; | 744 | wIndex--; |
745 | temp = roothub_portstatus (ohci, wIndex); | 745 | temp = roothub_portstatus (ohci, wIndex); |
746 | put_unaligned(cpu_to_le32 (temp), (__le32 *) buf); | 746 | put_unaligned_le32(temp, buf); |
747 | 747 | ||
748 | #ifndef OHCI_VERBOSE_DEBUG | 748 | #ifndef OHCI_VERBOSE_DEBUG |
749 | if (*(u16*)(buf+2)) /* only if wPortChange is interesting */ | 749 | if (*(u16*)(buf+2)) /* only if wPortChange is interesting */ |
diff --git a/drivers/usb/host/sl811-hcd.c b/drivers/usb/host/sl811-hcd.c index 3fd7a0c12078..426575247b23 100644 --- a/drivers/usb/host/sl811-hcd.c +++ b/drivers/usb/host/sl811-hcd.c | |||
@@ -1506,15 +1506,7 @@ static const char proc_filename[] = "driver/sl811h"; | |||
1506 | 1506 | ||
1507 | static void create_debug_file(struct sl811 *sl811) | 1507 | static void create_debug_file(struct sl811 *sl811) |
1508 | { | 1508 | { |
1509 | struct proc_dir_entry *pde; | 1509 | sl811->pde = proc_create_data(proc_filename, 0, NULL, &proc_ops, sl811); |
1510 | |||
1511 | pde = create_proc_entry(proc_filename, 0, NULL); | ||
1512 | if (pde == NULL) | ||
1513 | return; | ||
1514 | |||
1515 | pde->proc_fops = &proc_ops; | ||
1516 | pde->data = sl811; | ||
1517 | sl811->pde = pde; | ||
1518 | } | 1510 | } |
1519 | 1511 | ||
1520 | static void remove_debug_file(struct sl811 *sl811) | 1512 | static void remove_debug_file(struct sl811 *sl811) |
diff --git a/drivers/video/clps711xfb.c b/drivers/video/clps711xfb.c index 17b5267f44d7..9f8a389dc7ae 100644 --- a/drivers/video/clps711xfb.c +++ b/drivers/video/clps711xfb.c | |||
@@ -381,7 +381,7 @@ int __init clps711xfb_init(void) | |||
381 | 381 | ||
382 | /* Register the /proc entries. */ | 382 | /* Register the /proc entries. */ |
383 | clps7111fb_backlight_proc_entry = create_proc_entry("backlight", 0444, | 383 | clps7111fb_backlight_proc_entry = create_proc_entry("backlight", 0444, |
384 | &proc_root); | 384 | NULL); |
385 | if (clps7111fb_backlight_proc_entry == NULL) { | 385 | if (clps7111fb_backlight_proc_entry == NULL) { |
386 | printk("Couldn't create the /proc entry for the backlight.\n"); | 386 | printk("Couldn't create the /proc entry for the backlight.\n"); |
387 | return -EINVAL; | 387 | return -EINVAL; |
diff --git a/drivers/video/console/fbcon.c b/drivers/video/console/fbcon.c index 8eda7b60df8f..ad31983b43eb 100644 --- a/drivers/video/console/fbcon.c +++ b/drivers/video/console/fbcon.c | |||
@@ -1881,7 +1881,7 @@ static int fbcon_scroll(struct vc_data *vc, int t, int b, int dir, | |||
1881 | scr_memsetw((unsigned short *) (vc->vc_origin + | 1881 | scr_memsetw((unsigned short *) (vc->vc_origin + |
1882 | vc->vc_size_row * | 1882 | vc->vc_size_row * |
1883 | (b - count)), | 1883 | (b - count)), |
1884 | vc->vc_video_erase_char, | 1884 | vc->vc_scrl_erase_char, |
1885 | vc->vc_size_row * count); | 1885 | vc->vc_size_row * count); |
1886 | return 1; | 1886 | return 1; |
1887 | break; | 1887 | break; |
@@ -1953,7 +1953,7 @@ static int fbcon_scroll(struct vc_data *vc, int t, int b, int dir, | |||
1953 | scr_memsetw((unsigned short *) (vc->vc_origin + | 1953 | scr_memsetw((unsigned short *) (vc->vc_origin + |
1954 | vc->vc_size_row * | 1954 | vc->vc_size_row * |
1955 | (b - count)), | 1955 | (b - count)), |
1956 | vc->vc_video_erase_char, | 1956 | vc->vc_scrl_erase_char, |
1957 | vc->vc_size_row * count); | 1957 | vc->vc_size_row * count); |
1958 | return 1; | 1958 | return 1; |
1959 | } | 1959 | } |
@@ -1972,7 +1972,7 @@ static int fbcon_scroll(struct vc_data *vc, int t, int b, int dir, | |||
1972 | scr_memsetw((unsigned short *) (vc->vc_origin + | 1972 | scr_memsetw((unsigned short *) (vc->vc_origin + |
1973 | vc->vc_size_row * | 1973 | vc->vc_size_row * |
1974 | t), | 1974 | t), |
1975 | vc->vc_video_erase_char, | 1975 | vc->vc_scrl_erase_char, |
1976 | vc->vc_size_row * count); | 1976 | vc->vc_size_row * count); |
1977 | return 1; | 1977 | return 1; |
1978 | break; | 1978 | break; |
@@ -2042,7 +2042,7 @@ static int fbcon_scroll(struct vc_data *vc, int t, int b, int dir, | |||
2042 | scr_memsetw((unsigned short *) (vc->vc_origin + | 2042 | scr_memsetw((unsigned short *) (vc->vc_origin + |
2043 | vc->vc_size_row * | 2043 | vc->vc_size_row * |
2044 | t), | 2044 | t), |
2045 | vc->vc_video_erase_char, | 2045 | vc->vc_scrl_erase_char, |
2046 | vc->vc_size_row * count); | 2046 | vc->vc_size_row * count); |
2047 | return 1; | 2047 | return 1; |
2048 | } | 2048 | } |
diff --git a/drivers/video/console/mdacon.c b/drivers/video/console/mdacon.c index bd8d995fe25d..38a296bbdfc9 100644 --- a/drivers/video/console/mdacon.c +++ b/drivers/video/console/mdacon.c | |||
@@ -531,7 +531,7 @@ static void mdacon_cursor(struct vc_data *c, int mode) | |||
531 | 531 | ||
532 | static int mdacon_scroll(struct vc_data *c, int t, int b, int dir, int lines) | 532 | static int mdacon_scroll(struct vc_data *c, int t, int b, int dir, int lines) |
533 | { | 533 | { |
534 | u16 eattr = mda_convert_attr(c->vc_video_erase_char); | 534 | u16 eattr = mda_convert_attr(c->vc_scrl_erase_char); |
535 | 535 | ||
536 | if (!lines) | 536 | if (!lines) |
537 | return 0; | 537 | return 0; |
diff --git a/drivers/video/console/sticon.c b/drivers/video/console/sticon.c index 67a682d6cc7b..a11cc2fdd4cd 100644 --- a/drivers/video/console/sticon.c +++ b/drivers/video/console/sticon.c | |||
@@ -170,12 +170,12 @@ static int sticon_scroll(struct vc_data *conp, int t, int b, int dir, int count) | |||
170 | switch (dir) { | 170 | switch (dir) { |
171 | case SM_UP: | 171 | case SM_UP: |
172 | sti_bmove(sti, t + count, 0, t, 0, b - t - count, conp->vc_cols); | 172 | sti_bmove(sti, t + count, 0, t, 0, b - t - count, conp->vc_cols); |
173 | sti_clear(sti, b - count, 0, count, conp->vc_cols, conp->vc_video_erase_char); | 173 | sti_clear(sti, b - count, 0, count, conp->vc_cols, conp->vc_scrl_erase_char); |
174 | break; | 174 | break; |
175 | 175 | ||
176 | case SM_DOWN: | 176 | case SM_DOWN: |
177 | sti_bmove(sti, t, 0, t + count, 0, b - t - count, conp->vc_cols); | 177 | sti_bmove(sti, t, 0, t + count, 0, b - t - count, conp->vc_cols); |
178 | sti_clear(sti, t, 0, count, conp->vc_cols, conp->vc_video_erase_char); | 178 | sti_clear(sti, t, 0, count, conp->vc_cols, conp->vc_scrl_erase_char); |
179 | break; | 179 | break; |
180 | } | 180 | } |
181 | 181 | ||
diff --git a/drivers/video/console/vgacon.c b/drivers/video/console/vgacon.c index 6df29a62d720..bd1f57b259d9 100644 --- a/drivers/video/console/vgacon.c +++ b/drivers/video/console/vgacon.c | |||
@@ -1350,7 +1350,7 @@ static int vgacon_scroll(struct vc_data *c, int t, int b, int dir, | |||
1350 | } else | 1350 | } else |
1351 | c->vc_origin += delta; | 1351 | c->vc_origin += delta; |
1352 | scr_memsetw((u16 *) (c->vc_origin + c->vc_screenbuf_size - | 1352 | scr_memsetw((u16 *) (c->vc_origin + c->vc_screenbuf_size - |
1353 | delta), c->vc_video_erase_char, | 1353 | delta), c->vc_scrl_erase_char, |
1354 | delta); | 1354 | delta); |
1355 | } else { | 1355 | } else { |
1356 | if (oldo - delta < vga_vram_base) { | 1356 | if (oldo - delta < vga_vram_base) { |
@@ -1363,7 +1363,7 @@ static int vgacon_scroll(struct vc_data *c, int t, int b, int dir, | |||
1363 | } else | 1363 | } else |
1364 | c->vc_origin -= delta; | 1364 | c->vc_origin -= delta; |
1365 | c->vc_scr_end = c->vc_origin + c->vc_screenbuf_size; | 1365 | c->vc_scr_end = c->vc_origin + c->vc_screenbuf_size; |
1366 | scr_memsetw((u16 *) (c->vc_origin), c->vc_video_erase_char, | 1366 | scr_memsetw((u16 *) (c->vc_origin), c->vc_scrl_erase_char, |
1367 | delta); | 1367 | delta); |
1368 | } | 1368 | } |
1369 | c->vc_scr_end = c->vc_origin + c->vc_screenbuf_size; | 1369 | c->vc_scr_end = c->vc_origin + c->vc_screenbuf_size; |
diff --git a/drivers/video/matrox/matroxfb_misc.c b/drivers/video/matrox/matroxfb_misc.c index aaa3e538e5da..5b5f072fc1a8 100644 --- a/drivers/video/matrox/matroxfb_misc.c +++ b/drivers/video/matrox/matroxfb_misc.c | |||
@@ -522,8 +522,6 @@ static void parse_bios(unsigned char __iomem* vbios, struct matrox_bios* bd) { | |||
522 | #endif | 522 | #endif |
523 | } | 523 | } |
524 | 524 | ||
525 | #define get_u16(x) (le16_to_cpu(get_unaligned((__u16*)(x)))) | ||
526 | #define get_u32(x) (le32_to_cpu(get_unaligned((__u32*)(x)))) | ||
527 | static int parse_pins1(WPMINFO const struct matrox_bios* bd) { | 525 | static int parse_pins1(WPMINFO const struct matrox_bios* bd) { |
528 | unsigned int maxdac; | 526 | unsigned int maxdac; |
529 | 527 | ||
@@ -532,11 +530,12 @@ static int parse_pins1(WPMINFO const struct matrox_bios* bd) { | |||
532 | case 1: maxdac = 220000; break; | 530 | case 1: maxdac = 220000; break; |
533 | default: maxdac = 240000; break; | 531 | default: maxdac = 240000; break; |
534 | } | 532 | } |
535 | if (get_u16(bd->pins + 24)) { | 533 | if (get_unaligned_le16(bd->pins + 24)) { |
536 | maxdac = get_u16(bd->pins + 24) * 10; | 534 | maxdac = get_unaligned_le16(bd->pins + 24) * 10; |
537 | } | 535 | } |
538 | MINFO->limits.pixel.vcomax = maxdac; | 536 | MINFO->limits.pixel.vcomax = maxdac; |
539 | MINFO->values.pll.system = get_u16(bd->pins + 28) ? get_u16(bd->pins + 28) * 10 : 50000; | 537 | MINFO->values.pll.system = get_unaligned_le16(bd->pins + 28) ? |
538 | get_unaligned_le16(bd->pins + 28) * 10 : 50000; | ||
540 | /* ignore 4MB, 8MB, module clocks */ | 539 | /* ignore 4MB, 8MB, module clocks */ |
541 | MINFO->features.pll.ref_freq = 14318; | 540 | MINFO->features.pll.ref_freq = 14318; |
542 | MINFO->values.reg.mctlwtst = 0x00030101; | 541 | MINFO->values.reg.mctlwtst = 0x00030101; |
@@ -575,7 +574,8 @@ static void default_pins2(WPMINFO2) { | |||
575 | static int parse_pins3(WPMINFO const struct matrox_bios* bd) { | 574 | static int parse_pins3(WPMINFO const struct matrox_bios* bd) { |
576 | MINFO->limits.pixel.vcomax = | 575 | MINFO->limits.pixel.vcomax = |
577 | MINFO->limits.system.vcomax = (bd->pins[36] == 0xFF) ? 230000 : ((bd->pins[36] + 100) * 1000); | 576 | MINFO->limits.system.vcomax = (bd->pins[36] == 0xFF) ? 230000 : ((bd->pins[36] + 100) * 1000); |
578 | MINFO->values.reg.mctlwtst = get_u32(bd->pins + 48) == 0xFFFFFFFF ? 0x01250A21 : get_u32(bd->pins + 48); | 577 | MINFO->values.reg.mctlwtst = get_unaligned_le32(bd->pins + 48) == 0xFFFFFFFF ? |
578 | 0x01250A21 : get_unaligned_le32(bd->pins + 48); | ||
579 | /* memory config */ | 579 | /* memory config */ |
580 | MINFO->values.reg.memrdbk = ((bd->pins[57] << 21) & 0x1E000000) | | 580 | MINFO->values.reg.memrdbk = ((bd->pins[57] << 21) & 0x1E000000) | |
581 | ((bd->pins[57] << 22) & 0x00C00000) | | 581 | ((bd->pins[57] << 22) & 0x00C00000) | |
@@ -601,7 +601,7 @@ static void default_pins3(WPMINFO2) { | |||
601 | static int parse_pins4(WPMINFO const struct matrox_bios* bd) { | 601 | static int parse_pins4(WPMINFO const struct matrox_bios* bd) { |
602 | MINFO->limits.pixel.vcomax = (bd->pins[ 39] == 0xFF) ? 230000 : bd->pins[ 39] * 4000; | 602 | MINFO->limits.pixel.vcomax = (bd->pins[ 39] == 0xFF) ? 230000 : bd->pins[ 39] * 4000; |
603 | MINFO->limits.system.vcomax = (bd->pins[ 38] == 0xFF) ? MINFO->limits.pixel.vcomax : bd->pins[ 38] * 4000; | 603 | MINFO->limits.system.vcomax = (bd->pins[ 38] == 0xFF) ? MINFO->limits.pixel.vcomax : bd->pins[ 38] * 4000; |
604 | MINFO->values.reg.mctlwtst = get_u32(bd->pins + 71); | 604 | MINFO->values.reg.mctlwtst = get_unaligned_le32(bd->pins + 71); |
605 | MINFO->values.reg.memrdbk = ((bd->pins[87] << 21) & 0x1E000000) | | 605 | MINFO->values.reg.memrdbk = ((bd->pins[87] << 21) & 0x1E000000) | |
606 | ((bd->pins[87] << 22) & 0x00C00000) | | 606 | ((bd->pins[87] << 22) & 0x00C00000) | |
607 | ((bd->pins[86] << 1) & 0x000001E0) | | 607 | ((bd->pins[86] << 1) & 0x000001E0) | |
@@ -609,7 +609,7 @@ static int parse_pins4(WPMINFO const struct matrox_bios* bd) { | |||
609 | MINFO->values.reg.opt = ((bd->pins[53] << 15) & 0x00400000) | | 609 | MINFO->values.reg.opt = ((bd->pins[53] << 15) & 0x00400000) | |
610 | ((bd->pins[53] << 22) & 0x10000000) | | 610 | ((bd->pins[53] << 22) & 0x10000000) | |
611 | ((bd->pins[53] << 7) & 0x00001C00); | 611 | ((bd->pins[53] << 7) & 0x00001C00); |
612 | MINFO->values.reg.opt3 = get_u32(bd->pins + 67); | 612 | MINFO->values.reg.opt3 = get_unaligned_le32(bd->pins + 67); |
613 | MINFO->values.pll.system = (bd->pins[ 65] == 0xFF) ? 200000 : bd->pins[ 65] * 4000; | 613 | MINFO->values.pll.system = (bd->pins[ 65] == 0xFF) ? 200000 : bd->pins[ 65] * 4000; |
614 | MINFO->features.pll.ref_freq = (bd->pins[ 92] & 0x01) ? 14318 : 27000; | 614 | MINFO->features.pll.ref_freq = (bd->pins[ 92] & 0x01) ? 14318 : 27000; |
615 | return 0; | 615 | return 0; |
@@ -640,12 +640,12 @@ static int parse_pins5(WPMINFO const struct matrox_bios* bd) { | |||
640 | MINFO->limits.video.vcomin = (bd->pins[122] == 0xFF) ? MINFO->limits.system.vcomin : bd->pins[122] * mult; | 640 | MINFO->limits.video.vcomin = (bd->pins[122] == 0xFF) ? MINFO->limits.system.vcomin : bd->pins[122] * mult; |
641 | MINFO->values.pll.system = | 641 | MINFO->values.pll.system = |
642 | MINFO->values.pll.video = (bd->pins[ 92] == 0xFF) ? 284000 : bd->pins[ 92] * 4000; | 642 | MINFO->values.pll.video = (bd->pins[ 92] == 0xFF) ? 284000 : bd->pins[ 92] * 4000; |
643 | MINFO->values.reg.opt = get_u32(bd->pins+ 48); | 643 | MINFO->values.reg.opt = get_unaligned_le32(bd->pins + 48); |
644 | MINFO->values.reg.opt2 = get_u32(bd->pins+ 52); | 644 | MINFO->values.reg.opt2 = get_unaligned_le32(bd->pins + 52); |
645 | MINFO->values.reg.opt3 = get_u32(bd->pins+ 94); | 645 | MINFO->values.reg.opt3 = get_unaligned_le32(bd->pins + 94); |
646 | MINFO->values.reg.mctlwtst = get_u32(bd->pins+ 98); | 646 | MINFO->values.reg.mctlwtst = get_unaligned_le32(bd->pins + 98); |
647 | MINFO->values.reg.memmisc = get_u32(bd->pins+102); | 647 | MINFO->values.reg.memmisc = get_unaligned_le32(bd->pins + 102); |
648 | MINFO->values.reg.memrdbk = get_u32(bd->pins+106); | 648 | MINFO->values.reg.memrdbk = get_unaligned_le32(bd->pins + 106); |
649 | MINFO->features.pll.ref_freq = (bd->pins[110] & 0x01) ? 14318 : 27000; | 649 | MINFO->features.pll.ref_freq = (bd->pins[110] & 0x01) ? 14318 : 27000; |
650 | MINFO->values.memory.ddr = (bd->pins[114] & 0x60) == 0x20; | 650 | MINFO->values.memory.ddr = (bd->pins[114] & 0x60) == 0x20; |
651 | MINFO->values.memory.dll = (bd->pins[115] & 0x02) != 0; | 651 | MINFO->values.memory.dll = (bd->pins[115] & 0x02) != 0; |
diff --git a/drivers/video/metronomefb.c b/drivers/video/metronomefb.c index 249791286367..cc4c038a1b3f 100644 --- a/drivers/video/metronomefb.c +++ b/drivers/video/metronomefb.c | |||
@@ -206,8 +206,7 @@ static int load_waveform(u8 *mem, size_t size, u8 *metromem, int m, int t, | |||
206 | } | 206 | } |
207 | 207 | ||
208 | /* check waveform mode table address checksum */ | 208 | /* check waveform mode table address checksum */ |
209 | wmta = le32_to_cpu(get_unaligned((__le32 *) wfm_hdr->wmta)); | 209 | wmta = get_unaligned_le32(wfm_hdr->wmta) & 0x00FFFFFF; |
210 | wmta &= 0x00FFFFFF; | ||
211 | cksum_idx = wmta + m*4 + 3; | 210 | cksum_idx = wmta + m*4 + 3; |
212 | if (cksum_idx > size) | 211 | if (cksum_idx > size) |
213 | return -EINVAL; | 212 | return -EINVAL; |
@@ -219,8 +218,7 @@ static int load_waveform(u8 *mem, size_t size, u8 *metromem, int m, int t, | |||
219 | } | 218 | } |
220 | 219 | ||
221 | /* check waveform temperature table address checksum */ | 220 | /* check waveform temperature table address checksum */ |
222 | tta = le32_to_cpu(get_unaligned((int *) (mem + wmta + m*4))); | 221 | tta = get_unaligned_le32(mem + wmta + m * 4) & 0x00FFFFFF; |
223 | tta &= 0x00FFFFFF; | ||
224 | cksum_idx = tta + trn*4 + 3; | 222 | cksum_idx = tta + trn*4 + 3; |
225 | if (cksum_idx > size) | 223 | if (cksum_idx > size) |
226 | return -EINVAL; | 224 | return -EINVAL; |
@@ -233,8 +231,7 @@ static int load_waveform(u8 *mem, size_t size, u8 *metromem, int m, int t, | |||
233 | 231 | ||
234 | /* here we do the real work of putting the waveform into the | 232 | /* here we do the real work of putting the waveform into the |
235 | metromem buffer. this does runlength decoding of the waveform */ | 233 | metromem buffer. this does runlength decoding of the waveform */ |
236 | wfm_idx = le32_to_cpu(get_unaligned((__le32 *) (mem + tta + trn*4))); | 234 | wfm_idx = get_unaligned_le32(mem + tta + trn * 4) & 0x00FFFFFF; |
237 | wfm_idx &= 0x00FFFFFF; | ||
238 | owfm_idx = wfm_idx; | 235 | owfm_idx = wfm_idx; |
239 | if (wfm_idx > size) | 236 | if (wfm_idx > size) |
240 | return -EINVAL; | 237 | return -EINVAL; |
diff --git a/drivers/zorro/proc.c b/drivers/zorro/proc.c index 2ce4cebc31d9..099b6fb5b5cb 100644 --- a/drivers/zorro/proc.c +++ b/drivers/zorro/proc.c | |||
@@ -13,6 +13,7 @@ | |||
13 | #include <linux/types.h> | 13 | #include <linux/types.h> |
14 | #include <linux/zorro.h> | 14 | #include <linux/zorro.h> |
15 | #include <linux/proc_fs.h> | 15 | #include <linux/proc_fs.h> |
16 | #include <linux/seq_file.h> | ||
16 | #include <linux/init.h> | 17 | #include <linux/init.h> |
17 | #include <linux/smp_lock.h> | 18 | #include <linux/smp_lock.h> |
18 | #include <asm/uaccess.h> | 19 | #include <asm/uaccess.h> |
@@ -76,36 +77,58 @@ proc_bus_zorro_read(struct file *file, char __user *buf, size_t nbytes, loff_t * | |||
76 | } | 77 | } |
77 | 78 | ||
78 | static const struct file_operations proc_bus_zorro_operations = { | 79 | static const struct file_operations proc_bus_zorro_operations = { |
80 | .owner = THIS_MODULE, | ||
79 | .llseek = proc_bus_zorro_lseek, | 81 | .llseek = proc_bus_zorro_lseek, |
80 | .read = proc_bus_zorro_read, | 82 | .read = proc_bus_zorro_read, |
81 | }; | 83 | }; |
82 | 84 | ||
83 | static int | 85 | static void * zorro_seq_start(struct seq_file *m, loff_t *pos) |
84 | get_zorro_dev_info(char *buf, char **start, off_t pos, int count) | ||
85 | { | 86 | { |
86 | u_int slot; | 87 | return (*pos < zorro_num_autocon) ? pos : NULL; |
87 | off_t at = 0; | 88 | } |
88 | int len, cnt; | 89 | |
89 | 90 | static void * zorro_seq_next(struct seq_file *m, void *v, loff_t *pos) | |
90 | for (slot = cnt = 0; slot < zorro_num_autocon && count > cnt; slot++) { | 91 | { |
91 | struct zorro_dev *z = &zorro_autocon[slot]; | 92 | (*pos)++; |
92 | len = sprintf(buf, "%02x\t%08x\t%08lx\t%08lx\t%02x\n", slot, | 93 | return (*pos < zorro_num_autocon) ? pos : NULL; |
93 | z->id, (unsigned long)zorro_resource_start(z), | 94 | } |
94 | (unsigned long)zorro_resource_len(z), | 95 | |
95 | z->rom.er_Type); | 96 | static void zorro_seq_stop(struct seq_file *m, void *v) |
96 | at += len; | 97 | { |
97 | if (at >= pos) { | 98 | } |
98 | if (!*start) { | 99 | |
99 | *start = buf + (pos - (at - len)); | 100 | static int zorro_seq_show(struct seq_file *m, void *v) |
100 | cnt = at - pos; | 101 | { |
101 | } else | 102 | u_int slot = *(loff_t *)v; |
102 | cnt += len; | 103 | struct zorro_dev *z = &zorro_autocon[slot]; |
103 | buf += len; | 104 | |
104 | } | 105 | seq_printf(m, "%02x\t%08x\t%08lx\t%08lx\t%02x\n", slot, z->id, |
105 | } | 106 | (unsigned long)zorro_resource_start(z), |
106 | return (count > cnt) ? cnt : count; | 107 | (unsigned long)zorro_resource_len(z), |
108 | z->rom.er_Type); | ||
109 | return 0; | ||
110 | } | ||
111 | |||
112 | static const struct seq_operations zorro_devices_seq_ops = { | ||
113 | .start = zorro_seq_start, | ||
114 | .next = zorro_seq_next, | ||
115 | .stop = zorro_seq_stop, | ||
116 | .show = zorro_seq_show, | ||
117 | }; | ||
118 | |||
119 | static int zorro_devices_proc_open(struct inode *inode, struct file *file) | ||
120 | { | ||
121 | return seq_open(file, &zorro_devices_seq_ops); | ||
107 | } | 122 | } |
108 | 123 | ||
124 | static const struct file_operations zorro_devices_proc_fops = { | ||
125 | .owner = THIS_MODULE, | ||
126 | .open = zorro_devices_proc_open, | ||
127 | .read = seq_read, | ||
128 | .llseek = seq_lseek, | ||
129 | .release = seq_release, | ||
130 | }; | ||
131 | |||
109 | static struct proc_dir_entry *proc_bus_zorro_dir; | 132 | static struct proc_dir_entry *proc_bus_zorro_dir; |
110 | 133 | ||
111 | static int __init zorro_proc_attach_device(u_int slot) | 134 | static int __init zorro_proc_attach_device(u_int slot) |
@@ -114,11 +137,11 @@ static int __init zorro_proc_attach_device(u_int slot) | |||
114 | char name[4]; | 137 | char name[4]; |
115 | 138 | ||
116 | sprintf(name, "%02x", slot); | 139 | sprintf(name, "%02x", slot); |
117 | entry = create_proc_entry(name, 0, proc_bus_zorro_dir); | 140 | entry = proc_create_data(name, 0, proc_bus_zorro_dir, |
141 | &proc_bus_zorro_operations, | ||
142 | &zorro_autocon[slot]); | ||
118 | if (!entry) | 143 | if (!entry) |
119 | return -ENOMEM; | 144 | return -ENOMEM; |
120 | entry->proc_fops = &proc_bus_zorro_operations; | ||
121 | entry->data = &zorro_autocon[slot]; | ||
122 | entry->size = sizeof(struct zorro_dev); | 145 | entry->size = sizeof(struct zorro_dev); |
123 | return 0; | 146 | return 0; |
124 | } | 147 | } |
@@ -128,9 +151,9 @@ static int __init zorro_proc_init(void) | |||
128 | u_int slot; | 151 | u_int slot; |
129 | 152 | ||
130 | if (MACH_IS_AMIGA && AMIGAHW_PRESENT(ZORRO)) { | 153 | if (MACH_IS_AMIGA && AMIGAHW_PRESENT(ZORRO)) { |
131 | proc_bus_zorro_dir = proc_mkdir("zorro", proc_bus); | 154 | proc_bus_zorro_dir = proc_mkdir("bus/zorro", NULL); |
132 | create_proc_info_entry("devices", 0, proc_bus_zorro_dir, | 155 | proc_create("devices", 0, proc_bus_zorro_dir, |
133 | get_zorro_dev_info); | 156 | &zorro_devices_proc_fops); |
134 | for (slot = 0; slot < zorro_num_autocon; slot++) | 157 | for (slot = 0; slot < zorro_num_autocon; slot++) |
135 | zorro_proc_attach_device(slot); | 158 | zorro_proc_attach_device(slot); |
136 | } | 159 | } |
diff --git a/fs/Kconfig.binfmt b/fs/Kconfig.binfmt index 853845abcca6..55e8ee1900a5 100644 --- a/fs/Kconfig.binfmt +++ b/fs/Kconfig.binfmt | |||
@@ -41,7 +41,7 @@ config BINFMT_ELF_FDPIC | |||
41 | It is also possible to run FDPIC ELF binaries on MMU linux also. | 41 | It is also possible to run FDPIC ELF binaries on MMU linux also. |
42 | 42 | ||
43 | config BINFMT_FLAT | 43 | config BINFMT_FLAT |
44 | tristate "Kernel support for flat binaries" | 44 | bool "Kernel support for flat binaries" |
45 | depends on !MMU | 45 | depends on !MMU |
46 | help | 46 | help |
47 | Support uClinux FLAT format binaries. | 47 | Support uClinux FLAT format binaries. |
diff --git a/fs/adfs/dir_f.c b/fs/adfs/dir_f.c index b9b2b27b68c3..ea7df2146921 100644 --- a/fs/adfs/dir_f.c +++ b/fs/adfs/dir_f.c | |||
@@ -122,9 +122,9 @@ adfs_dir_checkbyte(const struct adfs_dir *dir) | |||
122 | ptr.ptr8 = bufoff(bh, i); | 122 | ptr.ptr8 = bufoff(bh, i); |
123 | end.ptr8 = ptr.ptr8 + last - i; | 123 | end.ptr8 = ptr.ptr8 + last - i; |
124 | 124 | ||
125 | do | 125 | do { |
126 | dircheck = *ptr.ptr8++ ^ ror13(dircheck); | 126 | dircheck = *ptr.ptr8++ ^ ror13(dircheck); |
127 | while (ptr.ptr8 < end.ptr8); | 127 | } while (ptr.ptr8 < end.ptr8); |
128 | } | 128 | } |
129 | 129 | ||
130 | /* | 130 | /* |
diff --git a/fs/affs/file.c b/fs/affs/file.c index 6e0c9399200e..e87ede608f77 100644 --- a/fs/affs/file.c +++ b/fs/affs/file.c | |||
@@ -325,8 +325,7 @@ affs_get_block(struct inode *inode, sector_t block, struct buffer_head *bh_resul | |||
325 | pr_debug("AFFS: get_block(%u, %lu)\n", (u32)inode->i_ino, (unsigned long)block); | 325 | pr_debug("AFFS: get_block(%u, %lu)\n", (u32)inode->i_ino, (unsigned long)block); |
326 | 326 | ||
327 | 327 | ||
328 | if (block > (sector_t)0x7fffffffUL) | 328 | BUG_ON(block > (sector_t)0x7fffffffUL); |
329 | BUG(); | ||
330 | 329 | ||
331 | if (block >= AFFS_I(inode)->i_blkcnt) { | 330 | if (block >= AFFS_I(inode)->i_blkcnt) { |
332 | if (block > AFFS_I(inode)->i_blkcnt || !create) | 331 | if (block > AFFS_I(inode)->i_blkcnt || !create) |
@@ -493,8 +492,7 @@ affs_do_readpage_ofs(struct file *file, struct page *page, unsigned from, unsign | |||
493 | u32 tmp; | 492 | u32 tmp; |
494 | 493 | ||
495 | pr_debug("AFFS: read_page(%u, %ld, %d, %d)\n", (u32)inode->i_ino, page->index, from, to); | 494 | pr_debug("AFFS: read_page(%u, %ld, %d, %d)\n", (u32)inode->i_ino, page->index, from, to); |
496 | if (from > to || to > PAGE_CACHE_SIZE) | 495 | BUG_ON(from > to || to > PAGE_CACHE_SIZE); |
497 | BUG(); | ||
498 | kmap(page); | 496 | kmap(page); |
499 | data = page_address(page); | 497 | data = page_address(page); |
500 | bsize = AFFS_SB(sb)->s_data_blksize; | 498 | bsize = AFFS_SB(sb)->s_data_blksize; |
@@ -507,8 +505,7 @@ affs_do_readpage_ofs(struct file *file, struct page *page, unsigned from, unsign | |||
507 | if (IS_ERR(bh)) | 505 | if (IS_ERR(bh)) |
508 | return PTR_ERR(bh); | 506 | return PTR_ERR(bh); |
509 | tmp = min(bsize - boff, to - from); | 507 | tmp = min(bsize - boff, to - from); |
510 | if (from + tmp > to || tmp > bsize) | 508 | BUG_ON(from + tmp > to || tmp > bsize); |
511 | BUG(); | ||
512 | memcpy(data + from, AFFS_DATA(bh) + boff, tmp); | 509 | memcpy(data + from, AFFS_DATA(bh) + boff, tmp); |
513 | affs_brelse(bh); | 510 | affs_brelse(bh); |
514 | bidx++; | 511 | bidx++; |
@@ -540,8 +537,7 @@ affs_extent_file_ofs(struct inode *inode, u32 newsize) | |||
540 | if (IS_ERR(bh)) | 537 | if (IS_ERR(bh)) |
541 | return PTR_ERR(bh); | 538 | return PTR_ERR(bh); |
542 | tmp = min(bsize - boff, newsize - size); | 539 | tmp = min(bsize - boff, newsize - size); |
543 | if (boff + tmp > bsize || tmp > bsize) | 540 | BUG_ON(boff + tmp > bsize || tmp > bsize); |
544 | BUG(); | ||
545 | memset(AFFS_DATA(bh) + boff, 0, tmp); | 541 | memset(AFFS_DATA(bh) + boff, 0, tmp); |
546 | AFFS_DATA_HEAD(bh)->size = cpu_to_be32(be32_to_cpu(AFFS_DATA_HEAD(bh)->size) + tmp); | 542 | AFFS_DATA_HEAD(bh)->size = cpu_to_be32(be32_to_cpu(AFFS_DATA_HEAD(bh)->size) + tmp); |
547 | affs_fix_checksum(sb, bh); | 543 | affs_fix_checksum(sb, bh); |
@@ -560,8 +556,7 @@ affs_extent_file_ofs(struct inode *inode, u32 newsize) | |||
560 | if (IS_ERR(bh)) | 556 | if (IS_ERR(bh)) |
561 | goto out; | 557 | goto out; |
562 | tmp = min(bsize, newsize - size); | 558 | tmp = min(bsize, newsize - size); |
563 | if (tmp > bsize) | 559 | BUG_ON(tmp > bsize); |
564 | BUG(); | ||
565 | AFFS_DATA_HEAD(bh)->ptype = cpu_to_be32(T_DATA); | 560 | AFFS_DATA_HEAD(bh)->ptype = cpu_to_be32(T_DATA); |
566 | AFFS_DATA_HEAD(bh)->key = cpu_to_be32(inode->i_ino); | 561 | AFFS_DATA_HEAD(bh)->key = cpu_to_be32(inode->i_ino); |
567 | AFFS_DATA_HEAD(bh)->sequence = cpu_to_be32(bidx); | 562 | AFFS_DATA_HEAD(bh)->sequence = cpu_to_be32(bidx); |
@@ -683,8 +678,7 @@ static int affs_write_end_ofs(struct file *file, struct address_space *mapping, | |||
683 | if (IS_ERR(bh)) | 678 | if (IS_ERR(bh)) |
684 | return PTR_ERR(bh); | 679 | return PTR_ERR(bh); |
685 | tmp = min(bsize - boff, to - from); | 680 | tmp = min(bsize - boff, to - from); |
686 | if (boff + tmp > bsize || tmp > bsize) | 681 | BUG_ON(boff + tmp > bsize || tmp > bsize); |
687 | BUG(); | ||
688 | memcpy(AFFS_DATA(bh) + boff, data + from, tmp); | 682 | memcpy(AFFS_DATA(bh) + boff, data + from, tmp); |
689 | AFFS_DATA_HEAD(bh)->size = cpu_to_be32(be32_to_cpu(AFFS_DATA_HEAD(bh)->size) + tmp); | 683 | AFFS_DATA_HEAD(bh)->size = cpu_to_be32(be32_to_cpu(AFFS_DATA_HEAD(bh)->size) + tmp); |
690 | affs_fix_checksum(sb, bh); | 684 | affs_fix_checksum(sb, bh); |
@@ -732,8 +726,7 @@ static int affs_write_end_ofs(struct file *file, struct address_space *mapping, | |||
732 | if (IS_ERR(bh)) | 726 | if (IS_ERR(bh)) |
733 | goto out; | 727 | goto out; |
734 | tmp = min(bsize, to - from); | 728 | tmp = min(bsize, to - from); |
735 | if (tmp > bsize) | 729 | BUG_ON(tmp > bsize); |
736 | BUG(); | ||
737 | memcpy(AFFS_DATA(bh), data + from, tmp); | 730 | memcpy(AFFS_DATA(bh), data + from, tmp); |
738 | if (buffer_new(bh)) { | 731 | if (buffer_new(bh)) { |
739 | AFFS_DATA_HEAD(bh)->ptype = cpu_to_be32(T_DATA); | 732 | AFFS_DATA_HEAD(bh)->ptype = cpu_to_be32(T_DATA); |
diff --git a/fs/affs/super.c b/fs/affs/super.c index d2dc047cb479..01d25d532541 100644 --- a/fs/affs/super.c +++ b/fs/affs/super.c | |||
@@ -199,7 +199,6 @@ parse_options(char *options, uid_t *uid, gid_t *gid, int *mode, int *reserved, s | |||
199 | case Opt_prefix: | 199 | case Opt_prefix: |
200 | /* Free any previous prefix */ | 200 | /* Free any previous prefix */ |
201 | kfree(*prefix); | 201 | kfree(*prefix); |
202 | *prefix = NULL; | ||
203 | *prefix = match_strdup(&args[0]); | 202 | *prefix = match_strdup(&args[0]); |
204 | if (!*prefix) | 203 | if (!*prefix) |
205 | return 0; | 204 | return 0; |
@@ -233,6 +232,8 @@ parse_options(char *options, uid_t *uid, gid_t *gid, int *mode, int *reserved, s | |||
233 | break; | 232 | break; |
234 | case Opt_volume: { | 233 | case Opt_volume: { |
235 | char *vol = match_strdup(&args[0]); | 234 | char *vol = match_strdup(&args[0]); |
235 | if (!vol) | ||
236 | return 0; | ||
236 | strlcpy(volume, vol, 32); | 237 | strlcpy(volume, vol, 32); |
237 | kfree(vol); | 238 | kfree(vol); |
238 | break; | 239 | break; |
diff --git a/fs/afs/afs_cm.h b/fs/afs/afs_cm.h index 7b4d4fab4c80..255f5dd6040c 100644 --- a/fs/afs/afs_cm.h +++ b/fs/afs/afs_cm.h | |||
@@ -24,7 +24,8 @@ enum AFS_CM_Operations { | |||
24 | CBGetXStatsVersion = 209, /* get version of extended statistics */ | 24 | CBGetXStatsVersion = 209, /* get version of extended statistics */ |
25 | CBGetXStats = 210, /* get contents of extended statistics data */ | 25 | CBGetXStats = 210, /* get contents of extended statistics data */ |
26 | CBInitCallBackState3 = 213, /* initialise callback state, version 3 */ | 26 | CBInitCallBackState3 = 213, /* initialise callback state, version 3 */ |
27 | CBGetCapabilities = 65538, /* get client capabilities */ | 27 | CBProbeUuid = 214, /* check the client hasn't rebooted */ |
28 | CBTellMeAboutYourself = 65538, /* get client capabilities */ | ||
28 | }; | 29 | }; |
29 | 30 | ||
30 | #define AFS_CAP_ERROR_TRANSLATION 0x1 | 31 | #define AFS_CAP_ERROR_TRANSLATION 0x1 |
diff --git a/fs/afs/cell.c b/fs/afs/cell.c index 584bb0f9c36a..5e1df14e16b1 100644 --- a/fs/afs/cell.c +++ b/fs/afs/cell.c | |||
@@ -20,7 +20,7 @@ | |||
20 | DECLARE_RWSEM(afs_proc_cells_sem); | 20 | DECLARE_RWSEM(afs_proc_cells_sem); |
21 | LIST_HEAD(afs_proc_cells); | 21 | LIST_HEAD(afs_proc_cells); |
22 | 22 | ||
23 | static struct list_head afs_cells = LIST_HEAD_INIT(afs_cells); | 23 | static LIST_HEAD(afs_cells); |
24 | static DEFINE_RWLOCK(afs_cells_lock); | 24 | static DEFINE_RWLOCK(afs_cells_lock); |
25 | static DECLARE_RWSEM(afs_cells_sem); /* add/remove serialisation */ | 25 | static DECLARE_RWSEM(afs_cells_sem); /* add/remove serialisation */ |
26 | static DECLARE_WAIT_QUEUE_HEAD(afs_cells_freeable_wq); | 26 | static DECLARE_WAIT_QUEUE_HEAD(afs_cells_freeable_wq); |
diff --git a/fs/afs/cmservice.c b/fs/afs/cmservice.c index 47b71c8947f9..eb765489164f 100644 --- a/fs/afs/cmservice.c +++ b/fs/afs/cmservice.c | |||
@@ -26,8 +26,9 @@ static int afs_deliver_cb_init_call_back_state3(struct afs_call *, | |||
26 | struct sk_buff *, bool); | 26 | struct sk_buff *, bool); |
27 | static int afs_deliver_cb_probe(struct afs_call *, struct sk_buff *, bool); | 27 | static int afs_deliver_cb_probe(struct afs_call *, struct sk_buff *, bool); |
28 | static int afs_deliver_cb_callback(struct afs_call *, struct sk_buff *, bool); | 28 | static int afs_deliver_cb_callback(struct afs_call *, struct sk_buff *, bool); |
29 | static int afs_deliver_cb_get_capabilities(struct afs_call *, struct sk_buff *, | 29 | static int afs_deliver_cb_probe_uuid(struct afs_call *, struct sk_buff *, bool); |
30 | bool); | 30 | static int afs_deliver_cb_tell_me_about_yourself(struct afs_call *, |
31 | struct sk_buff *, bool); | ||
31 | static void afs_cm_destructor(struct afs_call *); | 32 | static void afs_cm_destructor(struct afs_call *); |
32 | 33 | ||
33 | /* | 34 | /* |
@@ -71,11 +72,21 @@ static const struct afs_call_type afs_SRXCBProbe = { | |||
71 | }; | 72 | }; |
72 | 73 | ||
73 | /* | 74 | /* |
74 | * CB.GetCapabilities operation type | 75 | * CB.ProbeUuid operation type |
75 | */ | 76 | */ |
76 | static const struct afs_call_type afs_SRXCBGetCapabilites = { | 77 | static const struct afs_call_type afs_SRXCBProbeUuid = { |
77 | .name = "CB.GetCapabilities", | 78 | .name = "CB.ProbeUuid", |
78 | .deliver = afs_deliver_cb_get_capabilities, | 79 | .deliver = afs_deliver_cb_probe_uuid, |
80 | .abort_to_error = afs_abort_to_error, | ||
81 | .destructor = afs_cm_destructor, | ||
82 | }; | ||
83 | |||
84 | /* | ||
85 | * CB.TellMeAboutYourself operation type | ||
86 | */ | ||
87 | static const struct afs_call_type afs_SRXCBTellMeAboutYourself = { | ||
88 | .name = "CB.TellMeAboutYourself", | ||
89 | .deliver = afs_deliver_cb_tell_me_about_yourself, | ||
79 | .abort_to_error = afs_abort_to_error, | 90 | .abort_to_error = afs_abort_to_error, |
80 | .destructor = afs_cm_destructor, | 91 | .destructor = afs_cm_destructor, |
81 | }; | 92 | }; |
@@ -103,8 +114,8 @@ bool afs_cm_incoming_call(struct afs_call *call) | |||
103 | case CBProbe: | 114 | case CBProbe: |
104 | call->type = &afs_SRXCBProbe; | 115 | call->type = &afs_SRXCBProbe; |
105 | return true; | 116 | return true; |
106 | case CBGetCapabilities: | 117 | case CBTellMeAboutYourself: |
107 | call->type = &afs_SRXCBGetCapabilites; | 118 | call->type = &afs_SRXCBTellMeAboutYourself; |
108 | return true; | 119 | return true; |
109 | default: | 120 | default: |
110 | return false; | 121 | return false; |
@@ -393,9 +404,105 @@ static int afs_deliver_cb_probe(struct afs_call *call, struct sk_buff *skb, | |||
393 | } | 404 | } |
394 | 405 | ||
395 | /* | 406 | /* |
407 | * allow the fileserver to quickly find out if the fileserver has been rebooted | ||
408 | */ | ||
409 | static void SRXAFSCB_ProbeUuid(struct work_struct *work) | ||
410 | { | ||
411 | struct afs_call *call = container_of(work, struct afs_call, work); | ||
412 | struct afs_uuid *r = call->request; | ||
413 | |||
414 | struct { | ||
415 | __be32 match; | ||
416 | } reply; | ||
417 | |||
418 | _enter(""); | ||
419 | |||
420 | |||
421 | if (memcmp(r, &afs_uuid, sizeof(afs_uuid)) == 0) | ||
422 | reply.match = htonl(0); | ||
423 | else | ||
424 | reply.match = htonl(1); | ||
425 | |||
426 | afs_send_simple_reply(call, &reply, sizeof(reply)); | ||
427 | _leave(""); | ||
428 | } | ||
429 | |||
430 | /* | ||
431 | * deliver request data to a CB.ProbeUuid call | ||
432 | */ | ||
433 | static int afs_deliver_cb_probe_uuid(struct afs_call *call, struct sk_buff *skb, | ||
434 | bool last) | ||
435 | { | ||
436 | struct afs_uuid *r; | ||
437 | unsigned loop; | ||
438 | __be32 *b; | ||
439 | int ret; | ||
440 | |||
441 | _enter("{%u},{%u},%d", call->unmarshall, skb->len, last); | ||
442 | |||
443 | if (skb->len > 0) | ||
444 | return -EBADMSG; | ||
445 | if (!last) | ||
446 | return 0; | ||
447 | |||
448 | switch (call->unmarshall) { | ||
449 | case 0: | ||
450 | call->offset = 0; | ||
451 | call->buffer = kmalloc(11 * sizeof(__be32), GFP_KERNEL); | ||
452 | if (!call->buffer) | ||
453 | return -ENOMEM; | ||
454 | call->unmarshall++; | ||
455 | |||
456 | case 1: | ||
457 | _debug("extract UUID"); | ||
458 | ret = afs_extract_data(call, skb, last, call->buffer, | ||
459 | 11 * sizeof(__be32)); | ||
460 | switch (ret) { | ||
461 | case 0: break; | ||
462 | case -EAGAIN: return 0; | ||
463 | default: return ret; | ||
464 | } | ||
465 | |||
466 | _debug("unmarshall UUID"); | ||
467 | call->request = kmalloc(sizeof(struct afs_uuid), GFP_KERNEL); | ||
468 | if (!call->request) | ||
469 | return -ENOMEM; | ||
470 | |||
471 | b = call->buffer; | ||
472 | r = call->request; | ||
473 | r->time_low = ntohl(b[0]); | ||
474 | r->time_mid = ntohl(b[1]); | ||
475 | r->time_hi_and_version = ntohl(b[2]); | ||
476 | r->clock_seq_hi_and_reserved = ntohl(b[3]); | ||
477 | r->clock_seq_low = ntohl(b[4]); | ||
478 | |||
479 | for (loop = 0; loop < 6; loop++) | ||
480 | r->node[loop] = ntohl(b[loop + 5]); | ||
481 | |||
482 | call->offset = 0; | ||
483 | call->unmarshall++; | ||
484 | |||
485 | case 2: | ||
486 | _debug("trailer"); | ||
487 | if (skb->len != 0) | ||
488 | return -EBADMSG; | ||
489 | break; | ||
490 | } | ||
491 | |||
492 | if (!last) | ||
493 | return 0; | ||
494 | |||
495 | call->state = AFS_CALL_REPLYING; | ||
496 | |||
497 | INIT_WORK(&call->work, SRXAFSCB_ProbeUuid); | ||
498 | schedule_work(&call->work); | ||
499 | return 0; | ||
500 | } | ||
501 | |||
502 | /* | ||
396 | * allow the fileserver to ask about the cache manager's capabilities | 503 | * allow the fileserver to ask about the cache manager's capabilities |
397 | */ | 504 | */ |
398 | static void SRXAFSCB_GetCapabilities(struct work_struct *work) | 505 | static void SRXAFSCB_TellMeAboutYourself(struct work_struct *work) |
399 | { | 506 | { |
400 | struct afs_interface *ifs; | 507 | struct afs_interface *ifs; |
401 | struct afs_call *call = container_of(work, struct afs_call, work); | 508 | struct afs_call *call = container_of(work, struct afs_call, work); |
@@ -456,10 +563,10 @@ static void SRXAFSCB_GetCapabilities(struct work_struct *work) | |||
456 | } | 563 | } |
457 | 564 | ||
458 | /* | 565 | /* |
459 | * deliver request data to a CB.GetCapabilities call | 566 | * deliver request data to a CB.TellMeAboutYourself call |
460 | */ | 567 | */ |
461 | static int afs_deliver_cb_get_capabilities(struct afs_call *call, | 568 | static int afs_deliver_cb_tell_me_about_yourself(struct afs_call *call, |
462 | struct sk_buff *skb, bool last) | 569 | struct sk_buff *skb, bool last) |
463 | { | 570 | { |
464 | _enter(",{%u},%d", skb->len, last); | 571 | _enter(",{%u},%d", skb->len, last); |
465 | 572 | ||
@@ -471,7 +578,7 @@ static int afs_deliver_cb_get_capabilities(struct afs_call *call, | |||
471 | /* no unmarshalling required */ | 578 | /* no unmarshalling required */ |
472 | call->state = AFS_CALL_REPLYING; | 579 | call->state = AFS_CALL_REPLYING; |
473 | 580 | ||
474 | INIT_WORK(&call->work, SRXAFSCB_GetCapabilities); | 581 | INIT_WORK(&call->work, SRXAFSCB_TellMeAboutYourself); |
475 | schedule_work(&call->work); | 582 | schedule_work(&call->work); |
476 | return 0; | 583 | return 0; |
477 | } | 584 | } |
diff --git a/fs/afs/proc.c b/fs/afs/proc.c index 846c7615ac9e..9f7d1ae70269 100644 --- a/fs/afs/proc.c +++ b/fs/afs/proc.c | |||
@@ -41,6 +41,7 @@ static const struct file_operations afs_proc_cells_fops = { | |||
41 | .write = afs_proc_cells_write, | 41 | .write = afs_proc_cells_write, |
42 | .llseek = seq_lseek, | 42 | .llseek = seq_lseek, |
43 | .release = seq_release, | 43 | .release = seq_release, |
44 | .owner = THIS_MODULE, | ||
44 | }; | 45 | }; |
45 | 46 | ||
46 | static int afs_proc_rootcell_open(struct inode *inode, struct file *file); | 47 | static int afs_proc_rootcell_open(struct inode *inode, struct file *file); |
@@ -56,7 +57,8 @@ static const struct file_operations afs_proc_rootcell_fops = { | |||
56 | .read = afs_proc_rootcell_read, | 57 | .read = afs_proc_rootcell_read, |
57 | .write = afs_proc_rootcell_write, | 58 | .write = afs_proc_rootcell_write, |
58 | .llseek = no_llseek, | 59 | .llseek = no_llseek, |
59 | .release = afs_proc_rootcell_release | 60 | .release = afs_proc_rootcell_release, |
61 | .owner = THIS_MODULE, | ||
60 | }; | 62 | }; |
61 | 63 | ||
62 | static int afs_proc_cell_volumes_open(struct inode *inode, struct file *file); | 64 | static int afs_proc_cell_volumes_open(struct inode *inode, struct file *file); |
@@ -80,6 +82,7 @@ static const struct file_operations afs_proc_cell_volumes_fops = { | |||
80 | .read = seq_read, | 82 | .read = seq_read, |
81 | .llseek = seq_lseek, | 83 | .llseek = seq_lseek, |
82 | .release = afs_proc_cell_volumes_release, | 84 | .release = afs_proc_cell_volumes_release, |
85 | .owner = THIS_MODULE, | ||
83 | }; | 86 | }; |
84 | 87 | ||
85 | static int afs_proc_cell_vlservers_open(struct inode *inode, | 88 | static int afs_proc_cell_vlservers_open(struct inode *inode, |
@@ -104,6 +107,7 @@ static const struct file_operations afs_proc_cell_vlservers_fops = { | |||
104 | .read = seq_read, | 107 | .read = seq_read, |
105 | .llseek = seq_lseek, | 108 | .llseek = seq_lseek, |
106 | .release = afs_proc_cell_vlservers_release, | 109 | .release = afs_proc_cell_vlservers_release, |
110 | .owner = THIS_MODULE, | ||
107 | }; | 111 | }; |
108 | 112 | ||
109 | static int afs_proc_cell_servers_open(struct inode *inode, struct file *file); | 113 | static int afs_proc_cell_servers_open(struct inode *inode, struct file *file); |
@@ -127,6 +131,7 @@ static const struct file_operations afs_proc_cell_servers_fops = { | |||
127 | .read = seq_read, | 131 | .read = seq_read, |
128 | .llseek = seq_lseek, | 132 | .llseek = seq_lseek, |
129 | .release = afs_proc_cell_servers_release, | 133 | .release = afs_proc_cell_servers_release, |
134 | .owner = THIS_MODULE, | ||
130 | }; | 135 | }; |
131 | 136 | ||
132 | /* | 137 | /* |
@@ -143,17 +148,13 @@ int afs_proc_init(void) | |||
143 | goto error_dir; | 148 | goto error_dir; |
144 | proc_afs->owner = THIS_MODULE; | 149 | proc_afs->owner = THIS_MODULE; |
145 | 150 | ||
146 | p = create_proc_entry("cells", 0, proc_afs); | 151 | p = proc_create("cells", 0, proc_afs, &afs_proc_cells_fops); |
147 | if (!p) | 152 | if (!p) |
148 | goto error_cells; | 153 | goto error_cells; |
149 | p->proc_fops = &afs_proc_cells_fops; | ||
150 | p->owner = THIS_MODULE; | ||
151 | 154 | ||
152 | p = create_proc_entry("rootcell", 0, proc_afs); | 155 | p = proc_create("rootcell", 0, proc_afs, &afs_proc_rootcell_fops); |
153 | if (!p) | 156 | if (!p) |
154 | goto error_rootcell; | 157 | goto error_rootcell; |
155 | p->proc_fops = &afs_proc_rootcell_fops; | ||
156 | p->owner = THIS_MODULE; | ||
157 | 158 | ||
158 | _leave(" = 0"); | 159 | _leave(" = 0"); |
159 | return 0; | 160 | return 0; |
@@ -395,26 +396,20 @@ int afs_proc_cell_setup(struct afs_cell *cell) | |||
395 | if (!cell->proc_dir) | 396 | if (!cell->proc_dir) |
396 | goto error_dir; | 397 | goto error_dir; |
397 | 398 | ||
398 | p = create_proc_entry("servers", 0, cell->proc_dir); | 399 | p = proc_create_data("servers", 0, cell->proc_dir, |
400 | &afs_proc_cell_servers_fops, cell); | ||
399 | if (!p) | 401 | if (!p) |
400 | goto error_servers; | 402 | goto error_servers; |
401 | p->proc_fops = &afs_proc_cell_servers_fops; | ||
402 | p->owner = THIS_MODULE; | ||
403 | p->data = cell; | ||
404 | 403 | ||
405 | p = create_proc_entry("vlservers", 0, cell->proc_dir); | 404 | p = proc_create_data("vlservers", 0, cell->proc_dir, |
405 | &afs_proc_cell_vlservers_fops, cell); | ||
406 | if (!p) | 406 | if (!p) |
407 | goto error_vlservers; | 407 | goto error_vlservers; |
408 | p->proc_fops = &afs_proc_cell_vlservers_fops; | ||
409 | p->owner = THIS_MODULE; | ||
410 | p->data = cell; | ||
411 | 408 | ||
412 | p = create_proc_entry("volumes", 0, cell->proc_dir); | 409 | p = proc_create_data("volumes", 0, cell->proc_dir, |
410 | &afs_proc_cell_volumes_fops, cell); | ||
413 | if (!p) | 411 | if (!p) |
414 | goto error_volumes; | 412 | goto error_volumes; |
415 | p->proc_fops = &afs_proc_cell_volumes_fops; | ||
416 | p->owner = THIS_MODULE; | ||
417 | p->data = cell; | ||
418 | 413 | ||
419 | _leave(" = 0"); | 414 | _leave(" = 0"); |
420 | return 0; | 415 | return 0; |
@@ -191,6 +191,43 @@ static int aio_setup_ring(struct kioctx *ctx) | |||
191 | kunmap_atomic((void *)((unsigned long)__event & PAGE_MASK), km); \ | 191 | kunmap_atomic((void *)((unsigned long)__event & PAGE_MASK), km); \ |
192 | } while(0) | 192 | } while(0) |
193 | 193 | ||
194 | |||
195 | /* __put_ioctx | ||
196 | * Called when the last user of an aio context has gone away, | ||
197 | * and the struct needs to be freed. | ||
198 | */ | ||
199 | static void __put_ioctx(struct kioctx *ctx) | ||
200 | { | ||
201 | unsigned nr_events = ctx->max_reqs; | ||
202 | |||
203 | BUG_ON(ctx->reqs_active); | ||
204 | |||
205 | cancel_delayed_work(&ctx->wq); | ||
206 | cancel_work_sync(&ctx->wq.work); | ||
207 | aio_free_ring(ctx); | ||
208 | mmdrop(ctx->mm); | ||
209 | ctx->mm = NULL; | ||
210 | pr_debug("__put_ioctx: freeing %p\n", ctx); | ||
211 | kmem_cache_free(kioctx_cachep, ctx); | ||
212 | |||
213 | if (nr_events) { | ||
214 | spin_lock(&aio_nr_lock); | ||
215 | BUG_ON(aio_nr - nr_events > aio_nr); | ||
216 | aio_nr -= nr_events; | ||
217 | spin_unlock(&aio_nr_lock); | ||
218 | } | ||
219 | } | ||
220 | |||
221 | #define get_ioctx(kioctx) do { \ | ||
222 | BUG_ON(atomic_read(&(kioctx)->users) <= 0); \ | ||
223 | atomic_inc(&(kioctx)->users); \ | ||
224 | } while (0) | ||
225 | #define put_ioctx(kioctx) do { \ | ||
226 | BUG_ON(atomic_read(&(kioctx)->users) <= 0); \ | ||
227 | if (unlikely(atomic_dec_and_test(&(kioctx)->users))) \ | ||
228 | __put_ioctx(kioctx); \ | ||
229 | } while (0) | ||
230 | |||
194 | /* ioctx_alloc | 231 | /* ioctx_alloc |
195 | * Allocates and initializes an ioctx. Returns an ERR_PTR if it failed. | 232 | * Allocates and initializes an ioctx. Returns an ERR_PTR if it failed. |
196 | */ | 233 | */ |
@@ -240,7 +277,7 @@ static struct kioctx *ioctx_alloc(unsigned nr_events) | |||
240 | if (ctx->max_reqs == 0) | 277 | if (ctx->max_reqs == 0) |
241 | goto out_cleanup; | 278 | goto out_cleanup; |
242 | 279 | ||
243 | /* now link into global list. kludge. FIXME */ | 280 | /* now link into global list. */ |
244 | write_lock(&mm->ioctx_list_lock); | 281 | write_lock(&mm->ioctx_list_lock); |
245 | ctx->next = mm->ioctx_list; | 282 | ctx->next = mm->ioctx_list; |
246 | mm->ioctx_list = ctx; | 283 | mm->ioctx_list = ctx; |
@@ -361,32 +398,6 @@ void exit_aio(struct mm_struct *mm) | |||
361 | } | 398 | } |
362 | } | 399 | } |
363 | 400 | ||
364 | /* __put_ioctx | ||
365 | * Called when the last user of an aio context has gone away, | ||
366 | * and the struct needs to be freed. | ||
367 | */ | ||
368 | void __put_ioctx(struct kioctx *ctx) | ||
369 | { | ||
370 | unsigned nr_events = ctx->max_reqs; | ||
371 | |||
372 | BUG_ON(ctx->reqs_active); | ||
373 | |||
374 | cancel_delayed_work(&ctx->wq); | ||
375 | cancel_work_sync(&ctx->wq.work); | ||
376 | aio_free_ring(ctx); | ||
377 | mmdrop(ctx->mm); | ||
378 | ctx->mm = NULL; | ||
379 | pr_debug("__put_ioctx: freeing %p\n", ctx); | ||
380 | kmem_cache_free(kioctx_cachep, ctx); | ||
381 | |||
382 | if (nr_events) { | ||
383 | spin_lock(&aio_nr_lock); | ||
384 | BUG_ON(aio_nr - nr_events > aio_nr); | ||
385 | aio_nr -= nr_events; | ||
386 | spin_unlock(&aio_nr_lock); | ||
387 | } | ||
388 | } | ||
389 | |||
390 | /* aio_get_req | 401 | /* aio_get_req |
391 | * Allocate a slot for an aio request. Increments the users count | 402 | * Allocate a slot for an aio request. Increments the users count |
392 | * of the kioctx so that the kioctx stays around until all requests are | 403 | * of the kioctx so that the kioctx stays around until all requests are |
@@ -542,10 +553,7 @@ int aio_put_req(struct kiocb *req) | |||
542 | return ret; | 553 | return ret; |
543 | } | 554 | } |
544 | 555 | ||
545 | /* Lookup an ioctx id. ioctx_list is lockless for reads. | 556 | static struct kioctx *lookup_ioctx(unsigned long ctx_id) |
546 | * FIXME: this is O(n) and is only suitable for development. | ||
547 | */ | ||
548 | struct kioctx *lookup_ioctx(unsigned long ctx_id) | ||
549 | { | 557 | { |
550 | struct kioctx *ioctx; | 558 | struct kioctx *ioctx; |
551 | struct mm_struct *mm; | 559 | struct mm_struct *mm; |
@@ -1552,7 +1560,7 @@ static int aio_wake_function(wait_queue_t *wait, unsigned mode, | |||
1552 | return 1; | 1560 | return 1; |
1553 | } | 1561 | } |
1554 | 1562 | ||
1555 | int io_submit_one(struct kioctx *ctx, struct iocb __user *user_iocb, | 1563 | static int io_submit_one(struct kioctx *ctx, struct iocb __user *user_iocb, |
1556 | struct iocb *iocb) | 1564 | struct iocb *iocb) |
1557 | { | 1565 | { |
1558 | struct kiocb *req; | 1566 | struct kiocb *req; |
@@ -1593,7 +1601,7 @@ int io_submit_one(struct kioctx *ctx, struct iocb __user *user_iocb, | |||
1593 | * event using the eventfd_signal() function. | 1601 | * event using the eventfd_signal() function. |
1594 | */ | 1602 | */ |
1595 | req->ki_eventfd = eventfd_fget((int) iocb->aio_resfd); | 1603 | req->ki_eventfd = eventfd_fget((int) iocb->aio_resfd); |
1596 | if (unlikely(IS_ERR(req->ki_eventfd))) { | 1604 | if (IS_ERR(req->ki_eventfd)) { |
1597 | ret = PTR_ERR(req->ki_eventfd); | 1605 | ret = PTR_ERR(req->ki_eventfd); |
1598 | goto out_put_req; | 1606 | goto out_put_req; |
1599 | } | 1607 | } |
diff --git a/fs/autofs4/root.c b/fs/autofs4/root.c index a54a946a50ae..aa4c5ff8a40d 100644 --- a/fs/autofs4/root.c +++ b/fs/autofs4/root.c | |||
@@ -533,9 +533,9 @@ static struct dentry *autofs4_lookup_unhashed(struct autofs_sb_info *sbi, struct | |||
533 | goto next; | 533 | goto next; |
534 | 534 | ||
535 | if (d_unhashed(dentry)) { | 535 | if (d_unhashed(dentry)) { |
536 | struct autofs_info *ino = autofs4_dentry_ino(dentry); | ||
537 | struct inode *inode = dentry->d_inode; | 536 | struct inode *inode = dentry->d_inode; |
538 | 537 | ||
538 | ino = autofs4_dentry_ino(dentry); | ||
539 | list_del_init(&ino->rehash); | 539 | list_del_init(&ino->rehash); |
540 | dget(dentry); | 540 | dget(dentry); |
541 | /* | 541 | /* |
diff --git a/fs/befs/linuxvfs.c b/fs/befs/linuxvfs.c index 82123ff3e1dd..e8717de3bab3 100644 --- a/fs/befs/linuxvfs.c +++ b/fs/befs/linuxvfs.c | |||
@@ -489,9 +489,9 @@ static void befs_put_link(struct dentry *dentry, struct nameidata *nd, void *p) | |||
489 | { | 489 | { |
490 | befs_inode_info *befs_ino = BEFS_I(dentry->d_inode); | 490 | befs_inode_info *befs_ino = BEFS_I(dentry->d_inode); |
491 | if (befs_ino->i_flags & BEFS_LONG_SYMLINK) { | 491 | if (befs_ino->i_flags & BEFS_LONG_SYMLINK) { |
492 | char *p = nd_get_link(nd); | 492 | char *link = nd_get_link(nd); |
493 | if (!IS_ERR(p)) | 493 | if (!IS_ERR(link)) |
494 | kfree(p); | 494 | kfree(link); |
495 | } | 495 | } |
496 | } | 496 | } |
497 | 497 | ||
diff --git a/fs/binfmt_aout.c b/fs/binfmt_aout.c index a1bb2244cac7..ba4cddb92f1d 100644 --- a/fs/binfmt_aout.c +++ b/fs/binfmt_aout.c | |||
@@ -372,21 +372,17 @@ static int load_aout_binary(struct linux_binprm * bprm, struct pt_regs * regs) | |||
372 | 372 | ||
373 | flush_icache_range(text_addr, text_addr+ex.a_text+ex.a_data); | 373 | flush_icache_range(text_addr, text_addr+ex.a_text+ex.a_data); |
374 | } else { | 374 | } else { |
375 | static unsigned long error_time, error_time2; | ||
376 | if ((ex.a_text & 0xfff || ex.a_data & 0xfff) && | 375 | if ((ex.a_text & 0xfff || ex.a_data & 0xfff) && |
377 | (N_MAGIC(ex) != NMAGIC) && (jiffies-error_time2) > 5*HZ) | 376 | (N_MAGIC(ex) != NMAGIC) && printk_ratelimit()) |
378 | { | 377 | { |
379 | printk(KERN_NOTICE "executable not page aligned\n"); | 378 | printk(KERN_NOTICE "executable not page aligned\n"); |
380 | error_time2 = jiffies; | ||
381 | } | 379 | } |
382 | 380 | ||
383 | if ((fd_offset & ~PAGE_MASK) != 0 && | 381 | if ((fd_offset & ~PAGE_MASK) != 0 && printk_ratelimit()) |
384 | (jiffies-error_time) > 5*HZ) | ||
385 | { | 382 | { |
386 | printk(KERN_WARNING | 383 | printk(KERN_WARNING |
387 | "fd_offset is not page aligned. Please convert program: %s\n", | 384 | "fd_offset is not page aligned. Please convert program: %s\n", |
388 | bprm->file->f_path.dentry->d_name.name); | 385 | bprm->file->f_path.dentry->d_name.name); |
389 | error_time = jiffies; | ||
390 | } | 386 | } |
391 | 387 | ||
392 | if (!bprm->file->f_op->mmap||((fd_offset & ~PAGE_MASK) != 0)) { | 388 | if (!bprm->file->f_op->mmap||((fd_offset & ~PAGE_MASK) != 0)) { |
@@ -495,15 +491,13 @@ static int load_aout_library(struct file *file) | |||
495 | start_addr = ex.a_entry & 0xfffff000; | 491 | start_addr = ex.a_entry & 0xfffff000; |
496 | 492 | ||
497 | if ((N_TXTOFF(ex) & ~PAGE_MASK) != 0) { | 493 | if ((N_TXTOFF(ex) & ~PAGE_MASK) != 0) { |
498 | static unsigned long error_time; | ||
499 | loff_t pos = N_TXTOFF(ex); | 494 | loff_t pos = N_TXTOFF(ex); |
500 | 495 | ||
501 | if ((jiffies-error_time) > 5*HZ) | 496 | if (printk_ratelimit()) |
502 | { | 497 | { |
503 | printk(KERN_WARNING | 498 | printk(KERN_WARNING |
504 | "N_TXTOFF is not page aligned. Please convert library: %s\n", | 499 | "N_TXTOFF is not page aligned. Please convert library: %s\n", |
505 | file->f_path.dentry->d_name.name); | 500 | file->f_path.dentry->d_name.name); |
506 | error_time = jiffies; | ||
507 | } | 501 | } |
508 | down_write(¤t->mm->mmap_sem); | 502 | down_write(¤t->mm->mmap_sem); |
509 | do_brk(start_addr, ex.a_text + ex.a_data + ex.a_bss); | 503 | do_brk(start_addr, ex.a_text + ex.a_data + ex.a_bss); |
diff --git a/fs/binfmt_elf.c b/fs/binfmt_elf.c index 9924581df6f6..b25707fee2cc 100644 --- a/fs/binfmt_elf.c +++ b/fs/binfmt_elf.c | |||
@@ -1255,26 +1255,23 @@ static int writenote(struct memelfnote *men, struct file *file, | |||
1255 | static void fill_elf_header(struct elfhdr *elf, int segs, | 1255 | static void fill_elf_header(struct elfhdr *elf, int segs, |
1256 | u16 machine, u32 flags, u8 osabi) | 1256 | u16 machine, u32 flags, u8 osabi) |
1257 | { | 1257 | { |
1258 | memset(elf, 0, sizeof(*elf)); | ||
1259 | |||
1258 | memcpy(elf->e_ident, ELFMAG, SELFMAG); | 1260 | memcpy(elf->e_ident, ELFMAG, SELFMAG); |
1259 | elf->e_ident[EI_CLASS] = ELF_CLASS; | 1261 | elf->e_ident[EI_CLASS] = ELF_CLASS; |
1260 | elf->e_ident[EI_DATA] = ELF_DATA; | 1262 | elf->e_ident[EI_DATA] = ELF_DATA; |
1261 | elf->e_ident[EI_VERSION] = EV_CURRENT; | 1263 | elf->e_ident[EI_VERSION] = EV_CURRENT; |
1262 | elf->e_ident[EI_OSABI] = ELF_OSABI; | 1264 | elf->e_ident[EI_OSABI] = ELF_OSABI; |
1263 | memset(elf->e_ident+EI_PAD, 0, EI_NIDENT-EI_PAD); | ||
1264 | 1265 | ||
1265 | elf->e_type = ET_CORE; | 1266 | elf->e_type = ET_CORE; |
1266 | elf->e_machine = machine; | 1267 | elf->e_machine = machine; |
1267 | elf->e_version = EV_CURRENT; | 1268 | elf->e_version = EV_CURRENT; |
1268 | elf->e_entry = 0; | ||
1269 | elf->e_phoff = sizeof(struct elfhdr); | 1269 | elf->e_phoff = sizeof(struct elfhdr); |
1270 | elf->e_shoff = 0; | ||
1271 | elf->e_flags = flags; | 1270 | elf->e_flags = flags; |
1272 | elf->e_ehsize = sizeof(struct elfhdr); | 1271 | elf->e_ehsize = sizeof(struct elfhdr); |
1273 | elf->e_phentsize = sizeof(struct elf_phdr); | 1272 | elf->e_phentsize = sizeof(struct elf_phdr); |
1274 | elf->e_phnum = segs; | 1273 | elf->e_phnum = segs; |
1275 | elf->e_shentsize = 0; | 1274 | |
1276 | elf->e_shnum = 0; | ||
1277 | elf->e_shstrndx = 0; | ||
1278 | return; | 1275 | return; |
1279 | } | 1276 | } |
1280 | 1277 | ||
@@ -1725,26 +1722,25 @@ static int fill_note_info(struct elfhdr *elf, int phdrs, | |||
1725 | 1722 | ||
1726 | info->thread_status_size = 0; | 1723 | info->thread_status_size = 0; |
1727 | if (signr) { | 1724 | if (signr) { |
1728 | struct elf_thread_status *tmp; | 1725 | struct elf_thread_status *ets; |
1729 | rcu_read_lock(); | 1726 | rcu_read_lock(); |
1730 | do_each_thread(g, p) | 1727 | do_each_thread(g, p) |
1731 | if (current->mm == p->mm && current != p) { | 1728 | if (current->mm == p->mm && current != p) { |
1732 | tmp = kzalloc(sizeof(*tmp), GFP_ATOMIC); | 1729 | ets = kzalloc(sizeof(*ets), GFP_ATOMIC); |
1733 | if (!tmp) { | 1730 | if (!ets) { |
1734 | rcu_read_unlock(); | 1731 | rcu_read_unlock(); |
1735 | return 0; | 1732 | return 0; |
1736 | } | 1733 | } |
1737 | tmp->thread = p; | 1734 | ets->thread = p; |
1738 | list_add(&tmp->list, &info->thread_list); | 1735 | list_add(&ets->list, &info->thread_list); |
1739 | } | 1736 | } |
1740 | while_each_thread(g, p); | 1737 | while_each_thread(g, p); |
1741 | rcu_read_unlock(); | 1738 | rcu_read_unlock(); |
1742 | list_for_each(t, &info->thread_list) { | 1739 | list_for_each(t, &info->thread_list) { |
1743 | struct elf_thread_status *tmp; | ||
1744 | int sz; | 1740 | int sz; |
1745 | 1741 | ||
1746 | tmp = list_entry(t, struct elf_thread_status, list); | 1742 | ets = list_entry(t, struct elf_thread_status, list); |
1747 | sz = elf_dump_thread_status(signr, tmp); | 1743 | sz = elf_dump_thread_status(signr, ets); |
1748 | info->thread_status_size += sz; | 1744 | info->thread_status_size += sz; |
1749 | } | 1745 | } |
1750 | } | 1746 | } |
@@ -2000,10 +1996,10 @@ static int elf_core_dump(long signr, struct pt_regs *regs, struct file *file, un | |||
2000 | 1996 | ||
2001 | for (addr = vma->vm_start; addr < end; addr += PAGE_SIZE) { | 1997 | for (addr = vma->vm_start; addr < end; addr += PAGE_SIZE) { |
2002 | struct page *page; | 1998 | struct page *page; |
2003 | struct vm_area_struct *vma; | 1999 | struct vm_area_struct *tmp_vma; |
2004 | 2000 | ||
2005 | if (get_user_pages(current, current->mm, addr, 1, 0, 1, | 2001 | if (get_user_pages(current, current->mm, addr, 1, 0, 1, |
2006 | &page, &vma) <= 0) { | 2002 | &page, &tmp_vma) <= 0) { |
2007 | DUMP_SEEK(PAGE_SIZE); | 2003 | DUMP_SEEK(PAGE_SIZE); |
2008 | } else { | 2004 | } else { |
2009 | if (page == ZERO_PAGE(0)) { | 2005 | if (page == ZERO_PAGE(0)) { |
@@ -2013,7 +2009,7 @@ static int elf_core_dump(long signr, struct pt_regs *regs, struct file *file, un | |||
2013 | } | 2009 | } |
2014 | } else { | 2010 | } else { |
2015 | void *kaddr; | 2011 | void *kaddr; |
2016 | flush_cache_page(vma, addr, | 2012 | flush_cache_page(tmp_vma, addr, |
2017 | page_to_pfn(page)); | 2013 | page_to_pfn(page)); |
2018 | kaddr = kmap(page); | 2014 | kaddr = kmap(page); |
2019 | if ((size += PAGE_SIZE) > limit || | 2015 | if ((size += PAGE_SIZE) > limit || |
diff --git a/fs/binfmt_elf_fdpic.c b/fs/binfmt_elf_fdpic.c index 32649f2a1654..ddd35d873391 100644 --- a/fs/binfmt_elf_fdpic.c +++ b/fs/binfmt_elf_fdpic.c | |||
@@ -136,8 +136,8 @@ static int elf_fdpic_fetch_phdrs(struct elf_fdpic_params *params, | |||
136 | 136 | ||
137 | retval = kernel_read(file, params->hdr.e_phoff, | 137 | retval = kernel_read(file, params->hdr.e_phoff, |
138 | (char *) params->phdrs, size); | 138 | (char *) params->phdrs, size); |
139 | if (retval < 0) | 139 | if (unlikely(retval != size)) |
140 | return retval; | 140 | return retval < 0 ? retval : -ENOEXEC; |
141 | 141 | ||
142 | /* determine stack size for this binary */ | 142 | /* determine stack size for this binary */ |
143 | phdr = params->phdrs; | 143 | phdr = params->phdrs; |
@@ -218,8 +218,11 @@ static int load_elf_fdpic_binary(struct linux_binprm *bprm, | |||
218 | phdr->p_offset, | 218 | phdr->p_offset, |
219 | interpreter_name, | 219 | interpreter_name, |
220 | phdr->p_filesz); | 220 | phdr->p_filesz); |
221 | if (retval < 0) | 221 | if (unlikely(retval != phdr->p_filesz)) { |
222 | if (retval >= 0) | ||
223 | retval = -ENOEXEC; | ||
222 | goto error; | 224 | goto error; |
225 | } | ||
223 | 226 | ||
224 | retval = -ENOENT; | 227 | retval = -ENOENT; |
225 | if (interpreter_name[phdr->p_filesz - 1] != '\0') | 228 | if (interpreter_name[phdr->p_filesz - 1] != '\0') |
@@ -245,8 +248,11 @@ static int load_elf_fdpic_binary(struct linux_binprm *bprm, | |||
245 | 248 | ||
246 | retval = kernel_read(interpreter, 0, bprm->buf, | 249 | retval = kernel_read(interpreter, 0, bprm->buf, |
247 | BINPRM_BUF_SIZE); | 250 | BINPRM_BUF_SIZE); |
248 | if (retval < 0) | 251 | if (unlikely(retval != BINPRM_BUF_SIZE)) { |
252 | if (retval >= 0) | ||
253 | retval = -ENOEXEC; | ||
249 | goto error; | 254 | goto error; |
255 | } | ||
250 | 256 | ||
251 | interp_params.hdr = *((struct elfhdr *) bprm->buf); | 257 | interp_params.hdr = *((struct elfhdr *) bprm->buf); |
252 | break; | 258 | break; |
diff --git a/fs/binfmt_em86.c b/fs/binfmt_em86.c index f95ae9789c91..f9c88d0c8ced 100644 --- a/fs/binfmt_em86.c +++ b/fs/binfmt_em86.c | |||
@@ -43,7 +43,7 @@ static int load_em86(struct linux_binprm *bprm,struct pt_regs *regs) | |||
43 | return -ENOEXEC; | 43 | return -ENOEXEC; |
44 | } | 44 | } |
45 | 45 | ||
46 | bprm->sh_bang++; /* Well, the bang-shell is implicit... */ | 46 | bprm->sh_bang = 1; /* Well, the bang-shell is implicit... */ |
47 | allow_write_access(bprm->file); | 47 | allow_write_access(bprm->file); |
48 | fput(bprm->file); | 48 | fput(bprm->file); |
49 | bprm->file = NULL; | 49 | bprm->file = NULL; |
diff --git a/fs/binfmt_flat.c b/fs/binfmt_flat.c index 0498b181dd52..3b40d45a3a16 100644 --- a/fs/binfmt_flat.c +++ b/fs/binfmt_flat.c | |||
@@ -531,7 +531,8 @@ static int load_flat_file(struct linux_binprm * bprm, | |||
531 | DBG_FLT("BINFMT_FLAT: ROM mapping of file (we hope)\n"); | 531 | DBG_FLT("BINFMT_FLAT: ROM mapping of file (we hope)\n"); |
532 | 532 | ||
533 | down_write(¤t->mm->mmap_sem); | 533 | down_write(¤t->mm->mmap_sem); |
534 | textpos = do_mmap(bprm->file, 0, text_len, PROT_READ|PROT_EXEC, MAP_PRIVATE, 0); | 534 | textpos = do_mmap(bprm->file, 0, text_len, PROT_READ|PROT_EXEC, |
535 | MAP_PRIVATE|MAP_EXECUTABLE, 0); | ||
535 | up_write(¤t->mm->mmap_sem); | 536 | up_write(¤t->mm->mmap_sem); |
536 | if (!textpos || textpos >= (unsigned long) -4096) { | 537 | if (!textpos || textpos >= (unsigned long) -4096) { |
537 | if (!textpos) | 538 | if (!textpos) |
@@ -932,14 +933,8 @@ static int __init init_flat_binfmt(void) | |||
932 | return register_binfmt(&flat_format); | 933 | return register_binfmt(&flat_format); |
933 | } | 934 | } |
934 | 935 | ||
935 | static void __exit exit_flat_binfmt(void) | ||
936 | { | ||
937 | unregister_binfmt(&flat_format); | ||
938 | } | ||
939 | |||
940 | /****************************************************************************/ | 936 | /****************************************************************************/ |
941 | 937 | ||
942 | core_initcall(init_flat_binfmt); | 938 | core_initcall(init_flat_binfmt); |
943 | module_exit(exit_flat_binfmt); | ||
944 | 939 | ||
945 | /****************************************************************************/ | 940 | /****************************************************************************/ |
diff --git a/fs/binfmt_misc.c b/fs/binfmt_misc.c index dbf0ac0523de..7191306367c5 100644 --- a/fs/binfmt_misc.c +++ b/fs/binfmt_misc.c | |||
@@ -115,6 +115,12 @@ static int load_misc_binary(struct linux_binprm *bprm, struct pt_regs *regs) | |||
115 | if (!enabled) | 115 | if (!enabled) |
116 | goto _ret; | 116 | goto _ret; |
117 | 117 | ||
118 | retval = -ENOEXEC; | ||
119 | if (bprm->misc_bang) | ||
120 | goto _ret; | ||
121 | |||
122 | bprm->misc_bang = 1; | ||
123 | |||
118 | /* to keep locking time low, we copy the interpreter string */ | 124 | /* to keep locking time low, we copy the interpreter string */ |
119 | read_lock(&entries_lock); | 125 | read_lock(&entries_lock); |
120 | fmt = check_file(bprm); | 126 | fmt = check_file(bprm); |
diff --git a/fs/binfmt_script.c b/fs/binfmt_script.c index ab33939b12a7..9e3963f7ebf1 100644 --- a/fs/binfmt_script.c +++ b/fs/binfmt_script.c | |||
@@ -29,7 +29,7 @@ static int load_script(struct linux_binprm *bprm,struct pt_regs *regs) | |||
29 | * Sorta complicated, but hopefully it will work. -TYT | 29 | * Sorta complicated, but hopefully it will work. -TYT |
30 | */ | 30 | */ |
31 | 31 | ||
32 | bprm->sh_bang++; | 32 | bprm->sh_bang = 1; |
33 | allow_write_access(bprm->file); | 33 | allow_write_access(bprm->file); |
34 | fput(bprm->file); | 34 | fput(bprm->file); |
35 | bprm->file = NULL; | 35 | bprm->file = NULL; |
diff --git a/fs/buffer.c b/fs/buffer.c index 3db4a26adc44..189efa4efc6e 100644 --- a/fs/buffer.c +++ b/fs/buffer.c | |||
@@ -2211,8 +2211,8 @@ out: | |||
2211 | return err; | 2211 | return err; |
2212 | } | 2212 | } |
2213 | 2213 | ||
2214 | int cont_expand_zero(struct file *file, struct address_space *mapping, | 2214 | static int cont_expand_zero(struct file *file, struct address_space *mapping, |
2215 | loff_t pos, loff_t *bytes) | 2215 | loff_t pos, loff_t *bytes) |
2216 | { | 2216 | { |
2217 | struct inode *inode = mapping->host; | 2217 | struct inode *inode = mapping->host; |
2218 | unsigned blocksize = 1 << inode->i_blkbits; | 2218 | unsigned blocksize = 1 << inode->i_blkbits; |
@@ -2328,23 +2328,6 @@ int block_commit_write(struct page *page, unsigned from, unsigned to) | |||
2328 | return 0; | 2328 | return 0; |
2329 | } | 2329 | } |
2330 | 2330 | ||
2331 | int generic_commit_write(struct file *file, struct page *page, | ||
2332 | unsigned from, unsigned to) | ||
2333 | { | ||
2334 | struct inode *inode = page->mapping->host; | ||
2335 | loff_t pos = ((loff_t)page->index << PAGE_CACHE_SHIFT) + to; | ||
2336 | __block_commit_write(inode,page,from,to); | ||
2337 | /* | ||
2338 | * No need to use i_size_read() here, the i_size | ||
2339 | * cannot change under us because we hold i_mutex. | ||
2340 | */ | ||
2341 | if (pos > inode->i_size) { | ||
2342 | i_size_write(inode, pos); | ||
2343 | mark_inode_dirty(inode); | ||
2344 | } | ||
2345 | return 0; | ||
2346 | } | ||
2347 | |||
2348 | /* | 2331 | /* |
2349 | * block_page_mkwrite() is not allowed to change the file size as it gets | 2332 | * block_page_mkwrite() is not allowed to change the file size as it gets |
2350 | * called from a page fault handler when a page is first dirtied. Hence we must | 2333 | * called from a page fault handler when a page is first dirtied. Hence we must |
@@ -3315,7 +3298,6 @@ EXPORT_SYMBOL(end_buffer_write_sync); | |||
3315 | EXPORT_SYMBOL(file_fsync); | 3298 | EXPORT_SYMBOL(file_fsync); |
3316 | EXPORT_SYMBOL(fsync_bdev); | 3299 | EXPORT_SYMBOL(fsync_bdev); |
3317 | EXPORT_SYMBOL(generic_block_bmap); | 3300 | EXPORT_SYMBOL(generic_block_bmap); |
3318 | EXPORT_SYMBOL(generic_commit_write); | ||
3319 | EXPORT_SYMBOL(generic_cont_expand_simple); | 3301 | EXPORT_SYMBOL(generic_cont_expand_simple); |
3320 | EXPORT_SYMBOL(init_buffer); | 3302 | EXPORT_SYMBOL(init_buffer); |
3321 | EXPORT_SYMBOL(invalidate_bdev); | 3303 | EXPORT_SYMBOL(invalidate_bdev); |
diff --git a/fs/char_dev.c b/fs/char_dev.c index 038674aa88a7..68e510b88457 100644 --- a/fs/char_dev.c +++ b/fs/char_dev.c | |||
@@ -55,7 +55,6 @@ static struct char_device_struct { | |||
55 | unsigned int baseminor; | 55 | unsigned int baseminor; |
56 | int minorct; | 56 | int minorct; |
57 | char name[64]; | 57 | char name[64]; |
58 | struct file_operations *fops; | ||
59 | struct cdev *cdev; /* will die */ | 58 | struct cdev *cdev; /* will die */ |
60 | } *chrdevs[CHRDEV_MAJOR_HASH_SIZE]; | 59 | } *chrdevs[CHRDEV_MAJOR_HASH_SIZE]; |
61 | 60 | ||
diff --git a/fs/cifs/cifs_debug.c b/fs/cifs/cifs_debug.c index 0228ed06069e..cc950f69e51e 100644 --- a/fs/cifs/cifs_debug.c +++ b/fs/cifs/cifs_debug.c | |||
@@ -468,7 +468,7 @@ cifs_proc_init(void) | |||
468 | { | 468 | { |
469 | struct proc_dir_entry *pde; | 469 | struct proc_dir_entry *pde; |
470 | 470 | ||
471 | proc_fs_cifs = proc_mkdir("cifs", proc_root_fs); | 471 | proc_fs_cifs = proc_mkdir("fs/cifs", NULL); |
472 | if (proc_fs_cifs == NULL) | 472 | if (proc_fs_cifs == NULL) |
473 | return; | 473 | return; |
474 | 474 | ||
@@ -559,7 +559,7 @@ cifs_proc_clean(void) | |||
559 | remove_proc_entry("LinuxExtensionsEnabled", proc_fs_cifs); | 559 | remove_proc_entry("LinuxExtensionsEnabled", proc_fs_cifs); |
560 | remove_proc_entry("Experimental", proc_fs_cifs); | 560 | remove_proc_entry("Experimental", proc_fs_cifs); |
561 | remove_proc_entry("LookupCacheEnabled", proc_fs_cifs); | 561 | remove_proc_entry("LookupCacheEnabled", proc_fs_cifs); |
562 | remove_proc_entry("cifs", proc_root_fs); | 562 | remove_proc_entry("fs/cifs", NULL); |
563 | } | 563 | } |
564 | 564 | ||
565 | static int | 565 | static int |
diff --git a/fs/coda/coda_linux.c b/fs/coda/coda_linux.c index 95a54253c047..e1c854890f94 100644 --- a/fs/coda/coda_linux.c +++ b/fs/coda/coda_linux.c | |||
@@ -134,7 +134,7 @@ void coda_iattr_to_vattr(struct iattr *iattr, struct coda_vattr *vattr) | |||
134 | unsigned int valid; | 134 | unsigned int valid; |
135 | 135 | ||
136 | /* clean out */ | 136 | /* clean out */ |
137 | vattr->va_mode = (umode_t) -1; | 137 | vattr->va_mode = -1; |
138 | vattr->va_uid = (vuid_t) -1; | 138 | vattr->va_uid = (vuid_t) -1; |
139 | vattr->va_gid = (vgid_t) -1; | 139 | vattr->va_gid = (vgid_t) -1; |
140 | vattr->va_size = (off_t) -1; | 140 | vattr->va_size = (off_t) -1; |
diff --git a/fs/coda/dir.c b/fs/coda/dir.c index f89ff083079b..3d2580e00a3e 100644 --- a/fs/coda/dir.c +++ b/fs/coda/dir.c | |||
@@ -345,7 +345,7 @@ static int coda_symlink(struct inode *dir_inode, struct dentry *de, | |||
345 | } | 345 | } |
346 | 346 | ||
347 | /* destruction routines: unlink, rmdir */ | 347 | /* destruction routines: unlink, rmdir */ |
348 | int coda_unlink(struct inode *dir, struct dentry *de) | 348 | static int coda_unlink(struct inode *dir, struct dentry *de) |
349 | { | 349 | { |
350 | int error; | 350 | int error; |
351 | const char *name = de->d_name.name; | 351 | const char *name = de->d_name.name; |
@@ -365,7 +365,7 @@ int coda_unlink(struct inode *dir, struct dentry *de) | |||
365 | return 0; | 365 | return 0; |
366 | } | 366 | } |
367 | 367 | ||
368 | int coda_rmdir(struct inode *dir, struct dentry *de) | 368 | static int coda_rmdir(struct inode *dir, struct dentry *de) |
369 | { | 369 | { |
370 | const char *name = de->d_name.name; | 370 | const char *name = de->d_name.name; |
371 | int len = de->d_name.len; | 371 | int len = de->d_name.len; |
@@ -424,7 +424,7 @@ static int coda_rename(struct inode *old_dir, struct dentry *old_dentry, | |||
424 | 424 | ||
425 | 425 | ||
426 | /* file operations for directories */ | 426 | /* file operations for directories */ |
427 | int coda_readdir(struct file *coda_file, void *buf, filldir_t filldir) | 427 | static int coda_readdir(struct file *coda_file, void *buf, filldir_t filldir) |
428 | { | 428 | { |
429 | struct coda_file_info *cfi; | 429 | struct coda_file_info *cfi; |
430 | struct file *host_file; | 430 | struct file *host_file; |
diff --git a/fs/drop_caches.c b/fs/drop_caches.c index 59375efcf39d..3e5637fc3779 100644 --- a/fs/drop_caches.c +++ b/fs/drop_caches.c | |||
@@ -14,18 +14,26 @@ int sysctl_drop_caches; | |||
14 | 14 | ||
15 | static void drop_pagecache_sb(struct super_block *sb) | 15 | static void drop_pagecache_sb(struct super_block *sb) |
16 | { | 16 | { |
17 | struct inode *inode; | 17 | struct inode *inode, *toput_inode = NULL; |
18 | 18 | ||
19 | spin_lock(&inode_lock); | 19 | spin_lock(&inode_lock); |
20 | list_for_each_entry(inode, &sb->s_inodes, i_sb_list) { | 20 | list_for_each_entry(inode, &sb->s_inodes, i_sb_list) { |
21 | if (inode->i_state & (I_FREEING|I_WILL_FREE)) | 21 | if (inode->i_state & (I_FREEING|I_WILL_FREE)) |
22 | continue; | 22 | continue; |
23 | if (inode->i_mapping->nrpages == 0) | ||
24 | continue; | ||
25 | __iget(inode); | ||
26 | spin_unlock(&inode_lock); | ||
23 | __invalidate_mapping_pages(inode->i_mapping, 0, -1, true); | 27 | __invalidate_mapping_pages(inode->i_mapping, 0, -1, true); |
28 | iput(toput_inode); | ||
29 | toput_inode = inode; | ||
30 | spin_lock(&inode_lock); | ||
24 | } | 31 | } |
25 | spin_unlock(&inode_lock); | 32 | spin_unlock(&inode_lock); |
33 | iput(toput_inode); | ||
26 | } | 34 | } |
27 | 35 | ||
28 | void drop_pagecache(void) | 36 | static void drop_pagecache(void) |
29 | { | 37 | { |
30 | struct super_block *sb; | 38 | struct super_block *sb; |
31 | 39 | ||
@@ -45,7 +53,7 @@ restart: | |||
45 | spin_unlock(&sb_lock); | 53 | spin_unlock(&sb_lock); |
46 | } | 54 | } |
47 | 55 | ||
48 | void drop_slab(void) | 56 | static void drop_slab(void) |
49 | { | 57 | { |
50 | int nr_objects; | 58 | int nr_objects; |
51 | 59 | ||
diff --git a/fs/ecryptfs/Makefile b/fs/ecryptfs/Makefile index 768857015516..1e34a7fd4884 100644 --- a/fs/ecryptfs/Makefile +++ b/fs/ecryptfs/Makefile | |||
@@ -4,4 +4,4 @@ | |||
4 | 4 | ||
5 | obj-$(CONFIG_ECRYPT_FS) += ecryptfs.o | 5 | obj-$(CONFIG_ECRYPT_FS) += ecryptfs.o |
6 | 6 | ||
7 | ecryptfs-objs := dentry.o file.o inode.o main.o super.o mmap.o read_write.o crypto.o keystore.o messaging.o netlink.o debug.o | 7 | ecryptfs-objs := dentry.o file.o inode.o main.o super.o mmap.o read_write.o crypto.o keystore.o messaging.o netlink.o miscdev.o debug.o |
diff --git a/fs/ecryptfs/crypto.c b/fs/ecryptfs/crypto.c index a066e109ad9c..cd62d75b2cc0 100644 --- a/fs/ecryptfs/crypto.c +++ b/fs/ecryptfs/crypto.c | |||
@@ -119,21 +119,21 @@ static int ecryptfs_calculate_md5(char *dst, | |||
119 | if (rc) { | 119 | if (rc) { |
120 | printk(KERN_ERR | 120 | printk(KERN_ERR |
121 | "%s: Error initializing crypto hash; rc = [%d]\n", | 121 | "%s: Error initializing crypto hash; rc = [%d]\n", |
122 | __FUNCTION__, rc); | 122 | __func__, rc); |
123 | goto out; | 123 | goto out; |
124 | } | 124 | } |
125 | rc = crypto_hash_update(&desc, &sg, len); | 125 | rc = crypto_hash_update(&desc, &sg, len); |
126 | if (rc) { | 126 | if (rc) { |
127 | printk(KERN_ERR | 127 | printk(KERN_ERR |
128 | "%s: Error updating crypto hash; rc = [%d]\n", | 128 | "%s: Error updating crypto hash; rc = [%d]\n", |
129 | __FUNCTION__, rc); | 129 | __func__, rc); |
130 | goto out; | 130 | goto out; |
131 | } | 131 | } |
132 | rc = crypto_hash_final(&desc, dst); | 132 | rc = crypto_hash_final(&desc, dst); |
133 | if (rc) { | 133 | if (rc) { |
134 | printk(KERN_ERR | 134 | printk(KERN_ERR |
135 | "%s: Error finalizing crypto hash; rc = [%d]\n", | 135 | "%s: Error finalizing crypto hash; rc = [%d]\n", |
136 | __FUNCTION__, rc); | 136 | __func__, rc); |
137 | goto out; | 137 | goto out; |
138 | } | 138 | } |
139 | out: | 139 | out: |
@@ -437,7 +437,7 @@ static int ecryptfs_encrypt_extent(struct page *enc_extent_page, | |||
437 | if (rc < 0) { | 437 | if (rc < 0) { |
438 | printk(KERN_ERR "%s: Error attempting to encrypt page with " | 438 | printk(KERN_ERR "%s: Error attempting to encrypt page with " |
439 | "page->index = [%ld], extent_offset = [%ld]; " | 439 | "page->index = [%ld], extent_offset = [%ld]; " |
440 | "rc = [%d]\n", __FUNCTION__, page->index, extent_offset, | 440 | "rc = [%d]\n", __func__, page->index, extent_offset, |
441 | rc); | 441 | rc); |
442 | goto out; | 442 | goto out; |
443 | } | 443 | } |
@@ -487,7 +487,7 @@ int ecryptfs_encrypt_page(struct page *page) | |||
487 | 0, PAGE_CACHE_SIZE); | 487 | 0, PAGE_CACHE_SIZE); |
488 | if (rc) | 488 | if (rc) |
489 | printk(KERN_ERR "%s: Error attempting to copy " | 489 | printk(KERN_ERR "%s: Error attempting to copy " |
490 | "page at index [%ld]\n", __FUNCTION__, | 490 | "page at index [%ld]\n", __func__, |
491 | page->index); | 491 | page->index); |
492 | goto out; | 492 | goto out; |
493 | } | 493 | } |
@@ -508,7 +508,7 @@ int ecryptfs_encrypt_page(struct page *page) | |||
508 | extent_offset); | 508 | extent_offset); |
509 | if (rc) { | 509 | if (rc) { |
510 | printk(KERN_ERR "%s: Error encrypting extent; " | 510 | printk(KERN_ERR "%s: Error encrypting extent; " |
511 | "rc = [%d]\n", __FUNCTION__, rc); | 511 | "rc = [%d]\n", __func__, rc); |
512 | goto out; | 512 | goto out; |
513 | } | 513 | } |
514 | ecryptfs_lower_offset_for_extent( | 514 | ecryptfs_lower_offset_for_extent( |
@@ -569,7 +569,7 @@ static int ecryptfs_decrypt_extent(struct page *page, | |||
569 | if (rc < 0) { | 569 | if (rc < 0) { |
570 | printk(KERN_ERR "%s: Error attempting to decrypt to page with " | 570 | printk(KERN_ERR "%s: Error attempting to decrypt to page with " |
571 | "page->index = [%ld], extent_offset = [%ld]; " | 571 | "page->index = [%ld], extent_offset = [%ld]; " |
572 | "rc = [%d]\n", __FUNCTION__, page->index, extent_offset, | 572 | "rc = [%d]\n", __func__, page->index, extent_offset, |
573 | rc); | 573 | rc); |
574 | goto out; | 574 | goto out; |
575 | } | 575 | } |
@@ -622,7 +622,7 @@ int ecryptfs_decrypt_page(struct page *page) | |||
622 | ecryptfs_inode); | 622 | ecryptfs_inode); |
623 | if (rc) | 623 | if (rc) |
624 | printk(KERN_ERR "%s: Error attempting to copy " | 624 | printk(KERN_ERR "%s: Error attempting to copy " |
625 | "page at index [%ld]\n", __FUNCTION__, | 625 | "page at index [%ld]\n", __func__, |
626 | page->index); | 626 | page->index); |
627 | goto out; | 627 | goto out; |
628 | } | 628 | } |
@@ -656,7 +656,7 @@ int ecryptfs_decrypt_page(struct page *page) | |||
656 | extent_offset); | 656 | extent_offset); |
657 | if (rc) { | 657 | if (rc) { |
658 | printk(KERN_ERR "%s: Error encrypting extent; " | 658 | printk(KERN_ERR "%s: Error encrypting extent; " |
659 | "rc = [%d]\n", __FUNCTION__, rc); | 659 | "rc = [%d]\n", __func__, rc); |
660 | goto out; | 660 | goto out; |
661 | } | 661 | } |
662 | } | 662 | } |
@@ -1215,7 +1215,7 @@ int ecryptfs_read_and_validate_header_region(char *data, | |||
1215 | ecryptfs_inode); | 1215 | ecryptfs_inode); |
1216 | if (rc) { | 1216 | if (rc) { |
1217 | printk(KERN_ERR "%s: Error reading header region; rc = [%d]\n", | 1217 | printk(KERN_ERR "%s: Error reading header region; rc = [%d]\n", |
1218 | __FUNCTION__, rc); | 1218 | __func__, rc); |
1219 | goto out; | 1219 | goto out; |
1220 | } | 1220 | } |
1221 | if (!contains_ecryptfs_marker(data + ECRYPTFS_FILE_SIZE_BYTES)) { | 1221 | if (!contains_ecryptfs_marker(data + ECRYPTFS_FILE_SIZE_BYTES)) { |
@@ -1246,7 +1246,6 @@ ecryptfs_write_header_metadata(char *virt, | |||
1246 | (*written) = 6; | 1246 | (*written) = 6; |
1247 | } | 1247 | } |
1248 | 1248 | ||
1249 | struct kmem_cache *ecryptfs_header_cache_0; | ||
1250 | struct kmem_cache *ecryptfs_header_cache_1; | 1249 | struct kmem_cache *ecryptfs_header_cache_1; |
1251 | struct kmem_cache *ecryptfs_header_cache_2; | 1250 | struct kmem_cache *ecryptfs_header_cache_2; |
1252 | 1251 | ||
@@ -1320,7 +1319,7 @@ ecryptfs_write_metadata_to_contents(struct ecryptfs_crypt_stat *crypt_stat, | |||
1320 | 0, crypt_stat->num_header_bytes_at_front); | 1319 | 0, crypt_stat->num_header_bytes_at_front); |
1321 | if (rc) | 1320 | if (rc) |
1322 | printk(KERN_ERR "%s: Error attempting to write header " | 1321 | printk(KERN_ERR "%s: Error attempting to write header " |
1323 | "information to lower file; rc = [%d]\n", __FUNCTION__, | 1322 | "information to lower file; rc = [%d]\n", __func__, |
1324 | rc); | 1323 | rc); |
1325 | return rc; | 1324 | return rc; |
1326 | } | 1325 | } |
@@ -1365,14 +1364,14 @@ int ecryptfs_write_metadata(struct dentry *ecryptfs_dentry) | |||
1365 | } | 1364 | } |
1366 | } else { | 1365 | } else { |
1367 | printk(KERN_WARNING "%s: Encrypted flag not set\n", | 1366 | printk(KERN_WARNING "%s: Encrypted flag not set\n", |
1368 | __FUNCTION__); | 1367 | __func__); |
1369 | rc = -EINVAL; | 1368 | rc = -EINVAL; |
1370 | goto out; | 1369 | goto out; |
1371 | } | 1370 | } |
1372 | /* Released in this function */ | 1371 | /* Released in this function */ |
1373 | virt = kzalloc(crypt_stat->num_header_bytes_at_front, GFP_KERNEL); | 1372 | virt = kzalloc(crypt_stat->num_header_bytes_at_front, GFP_KERNEL); |
1374 | if (!virt) { | 1373 | if (!virt) { |
1375 | printk(KERN_ERR "%s: Out of memory\n", __FUNCTION__); | 1374 | printk(KERN_ERR "%s: Out of memory\n", __func__); |
1376 | rc = -ENOMEM; | 1375 | rc = -ENOMEM; |
1377 | goto out; | 1376 | goto out; |
1378 | } | 1377 | } |
@@ -1380,7 +1379,7 @@ int ecryptfs_write_metadata(struct dentry *ecryptfs_dentry) | |||
1380 | ecryptfs_dentry); | 1379 | ecryptfs_dentry); |
1381 | if (unlikely(rc)) { | 1380 | if (unlikely(rc)) { |
1382 | printk(KERN_ERR "%s: Error whilst writing headers; rc = [%d]\n", | 1381 | printk(KERN_ERR "%s: Error whilst writing headers; rc = [%d]\n", |
1383 | __FUNCTION__, rc); | 1382 | __func__, rc); |
1384 | goto out_free; | 1383 | goto out_free; |
1385 | } | 1384 | } |
1386 | if (crypt_stat->flags & ECRYPTFS_METADATA_IN_XATTR) | 1385 | if (crypt_stat->flags & ECRYPTFS_METADATA_IN_XATTR) |
@@ -1391,7 +1390,7 @@ int ecryptfs_write_metadata(struct dentry *ecryptfs_dentry) | |||
1391 | ecryptfs_dentry, virt); | 1390 | ecryptfs_dentry, virt); |
1392 | if (rc) { | 1391 | if (rc) { |
1393 | printk(KERN_ERR "%s: Error writing metadata out to lower file; " | 1392 | printk(KERN_ERR "%s: Error writing metadata out to lower file; " |
1394 | "rc = [%d]\n", __FUNCTION__, rc); | 1393 | "rc = [%d]\n", __func__, rc); |
1395 | goto out_free; | 1394 | goto out_free; |
1396 | } | 1395 | } |
1397 | out_free: | 1396 | out_free: |
@@ -1585,7 +1584,7 @@ int ecryptfs_read_metadata(struct dentry *ecryptfs_dentry) | |||
1585 | if (!page_virt) { | 1584 | if (!page_virt) { |
1586 | rc = -ENOMEM; | 1585 | rc = -ENOMEM; |
1587 | printk(KERN_ERR "%s: Unable to allocate page_virt\n", | 1586 | printk(KERN_ERR "%s: Unable to allocate page_virt\n", |
1588 | __FUNCTION__); | 1587 | __func__); |
1589 | goto out; | 1588 | goto out; |
1590 | } | 1589 | } |
1591 | rc = ecryptfs_read_lower(page_virt, 0, crypt_stat->extent_size, | 1590 | rc = ecryptfs_read_lower(page_virt, 0, crypt_stat->extent_size, |
diff --git a/fs/ecryptfs/ecryptfs_kernel.h b/fs/ecryptfs/ecryptfs_kernel.h index 5007f788da01..951ee33a022d 100644 --- a/fs/ecryptfs/ecryptfs_kernel.h +++ b/fs/ecryptfs/ecryptfs_kernel.h | |||
@@ -4,7 +4,7 @@ | |||
4 | * | 4 | * |
5 | * Copyright (C) 1997-2003 Erez Zadok | 5 | * Copyright (C) 1997-2003 Erez Zadok |
6 | * Copyright (C) 2001-2003 Stony Brook University | 6 | * Copyright (C) 2001-2003 Stony Brook University |
7 | * Copyright (C) 2004-2007 International Business Machines Corp. | 7 | * Copyright (C) 2004-2008 International Business Machines Corp. |
8 | * Author(s): Michael A. Halcrow <mahalcro@us.ibm.com> | 8 | * Author(s): Michael A. Halcrow <mahalcro@us.ibm.com> |
9 | * Trevor S. Highland <trevor.highland@gmail.com> | 9 | * Trevor S. Highland <trevor.highland@gmail.com> |
10 | * Tyler Hicks <tyhicks@ou.edu> | 10 | * Tyler Hicks <tyhicks@ou.edu> |
@@ -34,6 +34,7 @@ | |||
34 | #include <linux/namei.h> | 34 | #include <linux/namei.h> |
35 | #include <linux/scatterlist.h> | 35 | #include <linux/scatterlist.h> |
36 | #include <linux/hash.h> | 36 | #include <linux/hash.h> |
37 | #include <linux/nsproxy.h> | ||
37 | 38 | ||
38 | /* Version verification for shared data structures w/ userspace */ | 39 | /* Version verification for shared data structures w/ userspace */ |
39 | #define ECRYPTFS_VERSION_MAJOR 0x00 | 40 | #define ECRYPTFS_VERSION_MAJOR 0x00 |
@@ -49,11 +50,13 @@ | |||
49 | #define ECRYPTFS_VERSIONING_POLICY 0x00000008 | 50 | #define ECRYPTFS_VERSIONING_POLICY 0x00000008 |
50 | #define ECRYPTFS_VERSIONING_XATTR 0x00000010 | 51 | #define ECRYPTFS_VERSIONING_XATTR 0x00000010 |
51 | #define ECRYPTFS_VERSIONING_MULTKEY 0x00000020 | 52 | #define ECRYPTFS_VERSIONING_MULTKEY 0x00000020 |
53 | #define ECRYPTFS_VERSIONING_DEVMISC 0x00000040 | ||
52 | #define ECRYPTFS_VERSIONING_MASK (ECRYPTFS_VERSIONING_PASSPHRASE \ | 54 | #define ECRYPTFS_VERSIONING_MASK (ECRYPTFS_VERSIONING_PASSPHRASE \ |
53 | | ECRYPTFS_VERSIONING_PLAINTEXT_PASSTHROUGH \ | 55 | | ECRYPTFS_VERSIONING_PLAINTEXT_PASSTHROUGH \ |
54 | | ECRYPTFS_VERSIONING_PUBKEY \ | 56 | | ECRYPTFS_VERSIONING_PUBKEY \ |
55 | | ECRYPTFS_VERSIONING_XATTR \ | 57 | | ECRYPTFS_VERSIONING_XATTR \ |
56 | | ECRYPTFS_VERSIONING_MULTKEY) | 58 | | ECRYPTFS_VERSIONING_MULTKEY \ |
59 | | ECRYPTFS_VERSIONING_DEVMISC) | ||
57 | #define ECRYPTFS_MAX_PASSWORD_LENGTH 64 | 60 | #define ECRYPTFS_MAX_PASSWORD_LENGTH 64 |
58 | #define ECRYPTFS_MAX_PASSPHRASE_BYTES ECRYPTFS_MAX_PASSWORD_LENGTH | 61 | #define ECRYPTFS_MAX_PASSPHRASE_BYTES ECRYPTFS_MAX_PASSWORD_LENGTH |
59 | #define ECRYPTFS_SALT_SIZE 8 | 62 | #define ECRYPTFS_SALT_SIZE 8 |
@@ -73,17 +76,14 @@ | |||
73 | #define ECRYPTFS_DEFAULT_MSG_CTX_ELEMS 32 | 76 | #define ECRYPTFS_DEFAULT_MSG_CTX_ELEMS 32 |
74 | #define ECRYPTFS_DEFAULT_SEND_TIMEOUT HZ | 77 | #define ECRYPTFS_DEFAULT_SEND_TIMEOUT HZ |
75 | #define ECRYPTFS_MAX_MSG_CTX_TTL (HZ*3) | 78 | #define ECRYPTFS_MAX_MSG_CTX_TTL (HZ*3) |
76 | #define ECRYPTFS_NLMSG_HELO 100 | ||
77 | #define ECRYPTFS_NLMSG_QUIT 101 | ||
78 | #define ECRYPTFS_NLMSG_REQUEST 102 | ||
79 | #define ECRYPTFS_NLMSG_RESPONSE 103 | ||
80 | #define ECRYPTFS_MAX_PKI_NAME_BYTES 16 | 79 | #define ECRYPTFS_MAX_PKI_NAME_BYTES 16 |
81 | #define ECRYPTFS_DEFAULT_NUM_USERS 4 | 80 | #define ECRYPTFS_DEFAULT_NUM_USERS 4 |
82 | #define ECRYPTFS_MAX_NUM_USERS 32768 | 81 | #define ECRYPTFS_MAX_NUM_USERS 32768 |
83 | #define ECRYPTFS_TRANSPORT_NETLINK 0 | 82 | #define ECRYPTFS_TRANSPORT_NETLINK 0 |
84 | #define ECRYPTFS_TRANSPORT_CONNECTOR 1 | 83 | #define ECRYPTFS_TRANSPORT_CONNECTOR 1 |
85 | #define ECRYPTFS_TRANSPORT_RELAYFS 2 | 84 | #define ECRYPTFS_TRANSPORT_RELAYFS 2 |
86 | #define ECRYPTFS_DEFAULT_TRANSPORT ECRYPTFS_TRANSPORT_NETLINK | 85 | #define ECRYPTFS_TRANSPORT_MISCDEV 3 |
86 | #define ECRYPTFS_DEFAULT_TRANSPORT ECRYPTFS_TRANSPORT_MISCDEV | ||
87 | #define ECRYPTFS_XATTR_NAME "user.ecryptfs" | 87 | #define ECRYPTFS_XATTR_NAME "user.ecryptfs" |
88 | 88 | ||
89 | #define RFC2440_CIPHER_DES3_EDE 0x02 | 89 | #define RFC2440_CIPHER_DES3_EDE 0x02 |
@@ -366,32 +366,63 @@ struct ecryptfs_auth_tok_list_item { | |||
366 | }; | 366 | }; |
367 | 367 | ||
368 | struct ecryptfs_message { | 368 | struct ecryptfs_message { |
369 | /* Can never be greater than ecryptfs_message_buf_len */ | ||
370 | /* Used to find the parent msg_ctx */ | ||
371 | /* Inherits from msg_ctx->index */ | ||
369 | u32 index; | 372 | u32 index; |
370 | u32 data_len; | 373 | u32 data_len; |
371 | u8 data[]; | 374 | u8 data[]; |
372 | }; | 375 | }; |
373 | 376 | ||
374 | struct ecryptfs_msg_ctx { | 377 | struct ecryptfs_msg_ctx { |
375 | #define ECRYPTFS_MSG_CTX_STATE_FREE 0x0001 | 378 | #define ECRYPTFS_MSG_CTX_STATE_FREE 0x01 |
376 | #define ECRYPTFS_MSG_CTX_STATE_PENDING 0x0002 | 379 | #define ECRYPTFS_MSG_CTX_STATE_PENDING 0x02 |
377 | #define ECRYPTFS_MSG_CTX_STATE_DONE 0x0003 | 380 | #define ECRYPTFS_MSG_CTX_STATE_DONE 0x03 |
378 | u32 state; | 381 | #define ECRYPTFS_MSG_CTX_STATE_NO_REPLY 0x04 |
379 | unsigned int index; | 382 | u8 state; |
380 | unsigned int counter; | 383 | #define ECRYPTFS_MSG_HELO 100 |
384 | #define ECRYPTFS_MSG_QUIT 101 | ||
385 | #define ECRYPTFS_MSG_REQUEST 102 | ||
386 | #define ECRYPTFS_MSG_RESPONSE 103 | ||
387 | u8 type; | ||
388 | u32 index; | ||
389 | /* Counter converts to a sequence number. Each message sent | ||
390 | * out for which we expect a response has an associated | ||
391 | * sequence number. The response must have the same sequence | ||
392 | * number as the counter for the msg_stc for the message to be | ||
393 | * valid. */ | ||
394 | u32 counter; | ||
395 | size_t msg_size; | ||
381 | struct ecryptfs_message *msg; | 396 | struct ecryptfs_message *msg; |
382 | struct task_struct *task; | 397 | struct task_struct *task; |
383 | struct list_head node; | 398 | struct list_head node; |
399 | struct list_head daemon_out_list; | ||
384 | struct mutex mux; | 400 | struct mutex mux; |
385 | }; | 401 | }; |
386 | 402 | ||
387 | extern unsigned int ecryptfs_transport; | 403 | extern unsigned int ecryptfs_transport; |
388 | 404 | ||
389 | struct ecryptfs_daemon_id { | 405 | struct ecryptfs_daemon; |
390 | pid_t pid; | 406 | |
391 | uid_t uid; | 407 | struct ecryptfs_daemon { |
392 | struct hlist_node id_chain; | 408 | #define ECRYPTFS_DAEMON_IN_READ 0x00000001 |
409 | #define ECRYPTFS_DAEMON_IN_POLL 0x00000002 | ||
410 | #define ECRYPTFS_DAEMON_ZOMBIE 0x00000004 | ||
411 | #define ECRYPTFS_DAEMON_MISCDEV_OPEN 0x00000008 | ||
412 | u32 flags; | ||
413 | u32 num_queued_msg_ctx; | ||
414 | struct pid *pid; | ||
415 | uid_t euid; | ||
416 | struct user_namespace *user_ns; | ||
417 | struct task_struct *task; | ||
418 | struct mutex mux; | ||
419 | struct list_head msg_ctx_out_queue; | ||
420 | wait_queue_head_t wait; | ||
421 | struct hlist_node euid_chain; | ||
393 | }; | 422 | }; |
394 | 423 | ||
424 | extern struct mutex ecryptfs_daemon_hash_mux; | ||
425 | |||
395 | static inline struct ecryptfs_file_info * | 426 | static inline struct ecryptfs_file_info * |
396 | ecryptfs_file_to_private(struct file *file) | 427 | ecryptfs_file_to_private(struct file *file) |
397 | { | 428 | { |
@@ -500,7 +531,7 @@ ecryptfs_set_dentry_lower_mnt(struct dentry *dentry, struct vfsmount *lower_mnt) | |||
500 | } | 531 | } |
501 | 532 | ||
502 | #define ecryptfs_printk(type, fmt, arg...) \ | 533 | #define ecryptfs_printk(type, fmt, arg...) \ |
503 | __ecryptfs_printk(type "%s: " fmt, __FUNCTION__, ## arg); | 534 | __ecryptfs_printk(type "%s: " fmt, __func__, ## arg); |
504 | void __ecryptfs_printk(const char *fmt, ...); | 535 | void __ecryptfs_printk(const char *fmt, ...); |
505 | 536 | ||
506 | extern const struct file_operations ecryptfs_main_fops; | 537 | extern const struct file_operations ecryptfs_main_fops; |
@@ -581,10 +612,13 @@ int | |||
581 | ecryptfs_setxattr(struct dentry *dentry, const char *name, const void *value, | 612 | ecryptfs_setxattr(struct dentry *dentry, const char *name, const void *value, |
582 | size_t size, int flags); | 613 | size_t size, int flags); |
583 | int ecryptfs_read_xattr_region(char *page_virt, struct inode *ecryptfs_inode); | 614 | int ecryptfs_read_xattr_region(char *page_virt, struct inode *ecryptfs_inode); |
584 | int ecryptfs_process_helo(unsigned int transport, uid_t uid, pid_t pid); | 615 | int ecryptfs_process_helo(unsigned int transport, uid_t euid, |
585 | int ecryptfs_process_quit(uid_t uid, pid_t pid); | 616 | struct user_namespace *user_ns, struct pid *pid); |
586 | int ecryptfs_process_response(struct ecryptfs_message *msg, uid_t uid, | 617 | int ecryptfs_process_quit(uid_t euid, struct user_namespace *user_ns, |
587 | pid_t pid, u32 seq); | 618 | struct pid *pid); |
619 | int ecryptfs_process_response(struct ecryptfs_message *msg, uid_t euid, | ||
620 | struct user_namespace *user_ns, struct pid *pid, | ||
621 | u32 seq); | ||
588 | int ecryptfs_send_message(unsigned int transport, char *data, int data_len, | 622 | int ecryptfs_send_message(unsigned int transport, char *data, int data_len, |
589 | struct ecryptfs_msg_ctx **msg_ctx); | 623 | struct ecryptfs_msg_ctx **msg_ctx); |
590 | int ecryptfs_wait_for_response(struct ecryptfs_msg_ctx *msg_ctx, | 624 | int ecryptfs_wait_for_response(struct ecryptfs_msg_ctx *msg_ctx, |
@@ -593,14 +627,14 @@ int ecryptfs_init_messaging(unsigned int transport); | |||
593 | void ecryptfs_release_messaging(unsigned int transport); | 627 | void ecryptfs_release_messaging(unsigned int transport); |
594 | 628 | ||
595 | int ecryptfs_send_netlink(char *data, int data_len, | 629 | int ecryptfs_send_netlink(char *data, int data_len, |
596 | struct ecryptfs_msg_ctx *msg_ctx, u16 msg_type, | 630 | struct ecryptfs_msg_ctx *msg_ctx, u8 msg_type, |
597 | u16 msg_flags, pid_t daemon_pid); | 631 | u16 msg_flags, struct pid *daemon_pid); |
598 | int ecryptfs_init_netlink(void); | 632 | int ecryptfs_init_netlink(void); |
599 | void ecryptfs_release_netlink(void); | 633 | void ecryptfs_release_netlink(void); |
600 | 634 | ||
601 | int ecryptfs_send_connector(char *data, int data_len, | 635 | int ecryptfs_send_connector(char *data, int data_len, |
602 | struct ecryptfs_msg_ctx *msg_ctx, u16 msg_type, | 636 | struct ecryptfs_msg_ctx *msg_ctx, u8 msg_type, |
603 | u16 msg_flags, pid_t daemon_pid); | 637 | u16 msg_flags, struct pid *daemon_pid); |
604 | int ecryptfs_init_connector(void); | 638 | int ecryptfs_init_connector(void); |
605 | void ecryptfs_release_connector(void); | 639 | void ecryptfs_release_connector(void); |
606 | void | 640 | void |
@@ -642,5 +676,21 @@ int ecryptfs_read_lower_page_segment(struct page *page_for_ecryptfs, | |||
642 | size_t offset_in_page, size_t size, | 676 | size_t offset_in_page, size_t size, |
643 | struct inode *ecryptfs_inode); | 677 | struct inode *ecryptfs_inode); |
644 | struct page *ecryptfs_get_locked_page(struct file *file, loff_t index); | 678 | struct page *ecryptfs_get_locked_page(struct file *file, loff_t index); |
679 | int ecryptfs_exorcise_daemon(struct ecryptfs_daemon *daemon); | ||
680 | int ecryptfs_find_daemon_by_euid(struct ecryptfs_daemon **daemon, uid_t euid, | ||
681 | struct user_namespace *user_ns); | ||
682 | int ecryptfs_parse_packet_length(unsigned char *data, size_t *size, | ||
683 | size_t *length_size); | ||
684 | int ecryptfs_write_packet_length(char *dest, size_t size, | ||
685 | size_t *packet_size_length); | ||
686 | int ecryptfs_init_ecryptfs_miscdev(void); | ||
687 | void ecryptfs_destroy_ecryptfs_miscdev(void); | ||
688 | int ecryptfs_send_miscdev(char *data, size_t data_size, | ||
689 | struct ecryptfs_msg_ctx *msg_ctx, u8 msg_type, | ||
690 | u16 msg_flags, struct ecryptfs_daemon *daemon); | ||
691 | void ecryptfs_msg_ctx_alloc_to_free(struct ecryptfs_msg_ctx *msg_ctx); | ||
692 | int | ||
693 | ecryptfs_spawn_daemon(struct ecryptfs_daemon **daemon, uid_t euid, | ||
694 | struct user_namespace *user_ns, struct pid *pid); | ||
645 | 695 | ||
646 | #endif /* #ifndef ECRYPTFS_KERNEL_H */ | 696 | #endif /* #ifndef ECRYPTFS_KERNEL_H */ |
diff --git a/fs/ecryptfs/file.c b/fs/ecryptfs/file.c index 2b8f5ed4adea..2258b8f654a6 100644 --- a/fs/ecryptfs/file.c +++ b/fs/ecryptfs/file.c | |||
@@ -195,7 +195,9 @@ static int ecryptfs_open(struct inode *inode, struct file *file) | |||
195 | file, ecryptfs_inode_to_private(inode)->lower_file); | 195 | file, ecryptfs_inode_to_private(inode)->lower_file); |
196 | if (S_ISDIR(ecryptfs_dentry->d_inode->i_mode)) { | 196 | if (S_ISDIR(ecryptfs_dentry->d_inode->i_mode)) { |
197 | ecryptfs_printk(KERN_DEBUG, "This is a directory\n"); | 197 | ecryptfs_printk(KERN_DEBUG, "This is a directory\n"); |
198 | mutex_lock(&crypt_stat->cs_mutex); | ||
198 | crypt_stat->flags &= ~(ECRYPTFS_ENCRYPTED); | 199 | crypt_stat->flags &= ~(ECRYPTFS_ENCRYPTED); |
200 | mutex_unlock(&crypt_stat->cs_mutex); | ||
199 | rc = 0; | 201 | rc = 0; |
200 | goto out; | 202 | goto out; |
201 | } | 203 | } |
diff --git a/fs/ecryptfs/inode.c b/fs/ecryptfs/inode.c index e23861152101..0a1397335a8e 100644 --- a/fs/ecryptfs/inode.c +++ b/fs/ecryptfs/inode.c | |||
@@ -111,7 +111,7 @@ ecryptfs_do_create(struct inode *directory_inode, | |||
111 | 111 | ||
112 | lower_dentry = ecryptfs_dentry_to_lower(ecryptfs_dentry); | 112 | lower_dentry = ecryptfs_dentry_to_lower(ecryptfs_dentry); |
113 | lower_dir_dentry = lock_parent(lower_dentry); | 113 | lower_dir_dentry = lock_parent(lower_dentry); |
114 | if (unlikely(IS_ERR(lower_dir_dentry))) { | 114 | if (IS_ERR(lower_dir_dentry)) { |
115 | ecryptfs_printk(KERN_ERR, "Error locking directory of " | 115 | ecryptfs_printk(KERN_ERR, "Error locking directory of " |
116 | "dentry\n"); | 116 | "dentry\n"); |
117 | rc = PTR_ERR(lower_dir_dentry); | 117 | rc = PTR_ERR(lower_dir_dentry); |
@@ -121,7 +121,7 @@ ecryptfs_do_create(struct inode *directory_inode, | |||
121 | ecryptfs_dentry, mode, nd); | 121 | ecryptfs_dentry, mode, nd); |
122 | if (rc) { | 122 | if (rc) { |
123 | printk(KERN_ERR "%s: Failure to create dentry in lower fs; " | 123 | printk(KERN_ERR "%s: Failure to create dentry in lower fs; " |
124 | "rc = [%d]\n", __FUNCTION__, rc); | 124 | "rc = [%d]\n", __func__, rc); |
125 | goto out_lock; | 125 | goto out_lock; |
126 | } | 126 | } |
127 | rc = ecryptfs_interpose(lower_dentry, ecryptfs_dentry, | 127 | rc = ecryptfs_interpose(lower_dentry, ecryptfs_dentry, |
@@ -908,7 +908,9 @@ static int ecryptfs_setattr(struct dentry *dentry, struct iattr *ia) | |||
908 | if (ia->ia_valid & (ATTR_KILL_SUID | ATTR_KILL_SGID)) | 908 | if (ia->ia_valid & (ATTR_KILL_SUID | ATTR_KILL_SGID)) |
909 | ia->ia_valid &= ~ATTR_MODE; | 909 | ia->ia_valid &= ~ATTR_MODE; |
910 | 910 | ||
911 | mutex_lock(&lower_dentry->d_inode->i_mutex); | ||
911 | rc = notify_change(lower_dentry, ia); | 912 | rc = notify_change(lower_dentry, ia); |
913 | mutex_unlock(&lower_dentry->d_inode->i_mutex); | ||
912 | out: | 914 | out: |
913 | fsstack_copy_attr_all(inode, lower_inode, NULL); | 915 | fsstack_copy_attr_all(inode, lower_inode, NULL); |
914 | return rc; | 916 | return rc; |
diff --git a/fs/ecryptfs/keystore.c b/fs/ecryptfs/keystore.c index 682b1b2482c2..e82b457180be 100644 --- a/fs/ecryptfs/keystore.c +++ b/fs/ecryptfs/keystore.c | |||
@@ -65,7 +65,7 @@ static int process_request_key_err(long err_code) | |||
65 | } | 65 | } |
66 | 66 | ||
67 | /** | 67 | /** |
68 | * parse_packet_length | 68 | * ecryptfs_parse_packet_length |
69 | * @data: Pointer to memory containing length at offset | 69 | * @data: Pointer to memory containing length at offset |
70 | * @size: This function writes the decoded size to this memory | 70 | * @size: This function writes the decoded size to this memory |
71 | * address; zero on error | 71 | * address; zero on error |
@@ -73,8 +73,8 @@ static int process_request_key_err(long err_code) | |||
73 | * | 73 | * |
74 | * Returns zero on success; non-zero on error | 74 | * Returns zero on success; non-zero on error |
75 | */ | 75 | */ |
76 | static int parse_packet_length(unsigned char *data, size_t *size, | 76 | int ecryptfs_parse_packet_length(unsigned char *data, size_t *size, |
77 | size_t *length_size) | 77 | size_t *length_size) |
78 | { | 78 | { |
79 | int rc = 0; | 79 | int rc = 0; |
80 | 80 | ||
@@ -105,7 +105,7 @@ out: | |||
105 | } | 105 | } |
106 | 106 | ||
107 | /** | 107 | /** |
108 | * write_packet_length | 108 | * ecryptfs_write_packet_length |
109 | * @dest: The byte array target into which to write the length. Must | 109 | * @dest: The byte array target into which to write the length. Must |
110 | * have at least 5 bytes allocated. | 110 | * have at least 5 bytes allocated. |
111 | * @size: The length to write. | 111 | * @size: The length to write. |
@@ -114,8 +114,8 @@ out: | |||
114 | * | 114 | * |
115 | * Returns zero on success; non-zero on error. | 115 | * Returns zero on success; non-zero on error. |
116 | */ | 116 | */ |
117 | static int write_packet_length(char *dest, size_t size, | 117 | int ecryptfs_write_packet_length(char *dest, size_t size, |
118 | size_t *packet_size_length) | 118 | size_t *packet_size_length) |
119 | { | 119 | { |
120 | int rc = 0; | 120 | int rc = 0; |
121 | 121 | ||
@@ -162,8 +162,8 @@ write_tag_64_packet(char *signature, struct ecryptfs_session_key *session_key, | |||
162 | goto out; | 162 | goto out; |
163 | } | 163 | } |
164 | message[i++] = ECRYPTFS_TAG_64_PACKET_TYPE; | 164 | message[i++] = ECRYPTFS_TAG_64_PACKET_TYPE; |
165 | rc = write_packet_length(&message[i], ECRYPTFS_SIG_SIZE_HEX, | 165 | rc = ecryptfs_write_packet_length(&message[i], ECRYPTFS_SIG_SIZE_HEX, |
166 | &packet_size_len); | 166 | &packet_size_len); |
167 | if (rc) { | 167 | if (rc) { |
168 | ecryptfs_printk(KERN_ERR, "Error generating tag 64 packet " | 168 | ecryptfs_printk(KERN_ERR, "Error generating tag 64 packet " |
169 | "header; cannot generate packet length\n"); | 169 | "header; cannot generate packet length\n"); |
@@ -172,8 +172,9 @@ write_tag_64_packet(char *signature, struct ecryptfs_session_key *session_key, | |||
172 | i += packet_size_len; | 172 | i += packet_size_len; |
173 | memcpy(&message[i], signature, ECRYPTFS_SIG_SIZE_HEX); | 173 | memcpy(&message[i], signature, ECRYPTFS_SIG_SIZE_HEX); |
174 | i += ECRYPTFS_SIG_SIZE_HEX; | 174 | i += ECRYPTFS_SIG_SIZE_HEX; |
175 | rc = write_packet_length(&message[i], session_key->encrypted_key_size, | 175 | rc = ecryptfs_write_packet_length(&message[i], |
176 | &packet_size_len); | 176 | session_key->encrypted_key_size, |
177 | &packet_size_len); | ||
177 | if (rc) { | 178 | if (rc) { |
178 | ecryptfs_printk(KERN_ERR, "Error generating tag 64 packet " | 179 | ecryptfs_printk(KERN_ERR, "Error generating tag 64 packet " |
179 | "header; cannot generate packet length\n"); | 180 | "header; cannot generate packet length\n"); |
@@ -225,7 +226,7 @@ parse_tag_65_packet(struct ecryptfs_session_key *session_key, u8 *cipher_code, | |||
225 | rc = -EIO; | 226 | rc = -EIO; |
226 | goto out; | 227 | goto out; |
227 | } | 228 | } |
228 | rc = parse_packet_length(&data[i], &m_size, &data_len); | 229 | rc = ecryptfs_parse_packet_length(&data[i], &m_size, &data_len); |
229 | if (rc) { | 230 | if (rc) { |
230 | ecryptfs_printk(KERN_WARNING, "Error parsing packet length; " | 231 | ecryptfs_printk(KERN_WARNING, "Error parsing packet length; " |
231 | "rc = [%d]\n", rc); | 232 | "rc = [%d]\n", rc); |
@@ -304,8 +305,8 @@ write_tag_66_packet(char *signature, u8 cipher_code, | |||
304 | goto out; | 305 | goto out; |
305 | } | 306 | } |
306 | message[i++] = ECRYPTFS_TAG_66_PACKET_TYPE; | 307 | message[i++] = ECRYPTFS_TAG_66_PACKET_TYPE; |
307 | rc = write_packet_length(&message[i], ECRYPTFS_SIG_SIZE_HEX, | 308 | rc = ecryptfs_write_packet_length(&message[i], ECRYPTFS_SIG_SIZE_HEX, |
308 | &packet_size_len); | 309 | &packet_size_len); |
309 | if (rc) { | 310 | if (rc) { |
310 | ecryptfs_printk(KERN_ERR, "Error generating tag 66 packet " | 311 | ecryptfs_printk(KERN_ERR, "Error generating tag 66 packet " |
311 | "header; cannot generate packet length\n"); | 312 | "header; cannot generate packet length\n"); |
@@ -315,8 +316,8 @@ write_tag_66_packet(char *signature, u8 cipher_code, | |||
315 | memcpy(&message[i], signature, ECRYPTFS_SIG_SIZE_HEX); | 316 | memcpy(&message[i], signature, ECRYPTFS_SIG_SIZE_HEX); |
316 | i += ECRYPTFS_SIG_SIZE_HEX; | 317 | i += ECRYPTFS_SIG_SIZE_HEX; |
317 | /* The encrypted key includes 1 byte cipher code and 2 byte checksum */ | 318 | /* The encrypted key includes 1 byte cipher code and 2 byte checksum */ |
318 | rc = write_packet_length(&message[i], crypt_stat->key_size + 3, | 319 | rc = ecryptfs_write_packet_length(&message[i], crypt_stat->key_size + 3, |
319 | &packet_size_len); | 320 | &packet_size_len); |
320 | if (rc) { | 321 | if (rc) { |
321 | ecryptfs_printk(KERN_ERR, "Error generating tag 66 packet " | 322 | ecryptfs_printk(KERN_ERR, "Error generating tag 66 packet " |
322 | "header; cannot generate packet length\n"); | 323 | "header; cannot generate packet length\n"); |
@@ -357,20 +358,25 @@ parse_tag_67_packet(struct ecryptfs_key_record *key_rec, | |||
357 | /* verify that everything through the encrypted FEK size is present */ | 358 | /* verify that everything through the encrypted FEK size is present */ |
358 | if (message_len < 4) { | 359 | if (message_len < 4) { |
359 | rc = -EIO; | 360 | rc = -EIO; |
361 | printk(KERN_ERR "%s: message_len is [%Zd]; minimum acceptable " | ||
362 | "message length is [%d]\n", __func__, message_len, 4); | ||
360 | goto out; | 363 | goto out; |
361 | } | 364 | } |
362 | if (data[i++] != ECRYPTFS_TAG_67_PACKET_TYPE) { | 365 | if (data[i++] != ECRYPTFS_TAG_67_PACKET_TYPE) { |
363 | ecryptfs_printk(KERN_ERR, "Type should be ECRYPTFS_TAG_67\n"); | ||
364 | rc = -EIO; | 366 | rc = -EIO; |
367 | printk(KERN_ERR "%s: Type should be ECRYPTFS_TAG_67\n", | ||
368 | __func__); | ||
365 | goto out; | 369 | goto out; |
366 | } | 370 | } |
367 | if (data[i++]) { | 371 | if (data[i++]) { |
368 | ecryptfs_printk(KERN_ERR, "Status indicator has non zero value" | ||
369 | " [%d]\n", data[i-1]); | ||
370 | rc = -EIO; | 372 | rc = -EIO; |
373 | printk(KERN_ERR "%s: Status indicator has non zero " | ||
374 | "value [%d]\n", __func__, data[i-1]); | ||
375 | |||
371 | goto out; | 376 | goto out; |
372 | } | 377 | } |
373 | rc = parse_packet_length(&data[i], &key_rec->enc_key_size, &data_len); | 378 | rc = ecryptfs_parse_packet_length(&data[i], &key_rec->enc_key_size, |
379 | &data_len); | ||
374 | if (rc) { | 380 | if (rc) { |
375 | ecryptfs_printk(KERN_WARNING, "Error parsing packet length; " | 381 | ecryptfs_printk(KERN_WARNING, "Error parsing packet length; " |
376 | "rc = [%d]\n", rc); | 382 | "rc = [%d]\n", rc); |
@@ -378,17 +384,17 @@ parse_tag_67_packet(struct ecryptfs_key_record *key_rec, | |||
378 | } | 384 | } |
379 | i += data_len; | 385 | i += data_len; |
380 | if (message_len < (i + key_rec->enc_key_size)) { | 386 | if (message_len < (i + key_rec->enc_key_size)) { |
381 | ecryptfs_printk(KERN_ERR, "message_len [%d]; max len is [%d]\n", | ||
382 | message_len, (i + key_rec->enc_key_size)); | ||
383 | rc = -EIO; | 387 | rc = -EIO; |
388 | printk(KERN_ERR "%s: message_len [%Zd]; max len is [%Zd]\n", | ||
389 | __func__, message_len, (i + key_rec->enc_key_size)); | ||
384 | goto out; | 390 | goto out; |
385 | } | 391 | } |
386 | if (key_rec->enc_key_size > ECRYPTFS_MAX_ENCRYPTED_KEY_BYTES) { | 392 | if (key_rec->enc_key_size > ECRYPTFS_MAX_ENCRYPTED_KEY_BYTES) { |
387 | ecryptfs_printk(KERN_ERR, "Encrypted key_size [%d] larger than " | ||
388 | "the maximum key size [%d]\n", | ||
389 | key_rec->enc_key_size, | ||
390 | ECRYPTFS_MAX_ENCRYPTED_KEY_BYTES); | ||
391 | rc = -EIO; | 393 | rc = -EIO; |
394 | printk(KERN_ERR "%s: Encrypted key_size [%Zd] larger than " | ||
395 | "the maximum key size [%d]\n", __func__, | ||
396 | key_rec->enc_key_size, | ||
397 | ECRYPTFS_MAX_ENCRYPTED_KEY_BYTES); | ||
392 | goto out; | 398 | goto out; |
393 | } | 399 | } |
394 | memcpy(key_rec->enc_key, &data[i], key_rec->enc_key_size); | 400 | memcpy(key_rec->enc_key, &data[i], key_rec->enc_key_size); |
@@ -445,7 +451,7 @@ decrypt_pki_encrypted_session_key(struct ecryptfs_auth_tok *auth_tok, | |||
445 | rc = write_tag_64_packet(auth_tok_sig, &(auth_tok->session_key), | 451 | rc = write_tag_64_packet(auth_tok_sig, &(auth_tok->session_key), |
446 | &netlink_message, &netlink_message_length); | 452 | &netlink_message, &netlink_message_length); |
447 | if (rc) { | 453 | if (rc) { |
448 | ecryptfs_printk(KERN_ERR, "Failed to write tag 64 packet"); | 454 | ecryptfs_printk(KERN_ERR, "Failed to write tag 64 packet\n"); |
449 | goto out; | 455 | goto out; |
450 | } | 456 | } |
451 | rc = ecryptfs_send_message(ecryptfs_transport, netlink_message, | 457 | rc = ecryptfs_send_message(ecryptfs_transport, netlink_message, |
@@ -570,8 +576,8 @@ parse_tag_1_packet(struct ecryptfs_crypt_stat *crypt_stat, | |||
570 | goto out; | 576 | goto out; |
571 | } | 577 | } |
572 | (*new_auth_tok) = &auth_tok_list_item->auth_tok; | 578 | (*new_auth_tok) = &auth_tok_list_item->auth_tok; |
573 | rc = parse_packet_length(&data[(*packet_size)], &body_size, | 579 | rc = ecryptfs_parse_packet_length(&data[(*packet_size)], &body_size, |
574 | &length_size); | 580 | &length_size); |
575 | if (rc) { | 581 | if (rc) { |
576 | printk(KERN_WARNING "Error parsing packet length; " | 582 | printk(KERN_WARNING "Error parsing packet length; " |
577 | "rc = [%d]\n", rc); | 583 | "rc = [%d]\n", rc); |
@@ -704,8 +710,8 @@ parse_tag_3_packet(struct ecryptfs_crypt_stat *crypt_stat, | |||
704 | goto out; | 710 | goto out; |
705 | } | 711 | } |
706 | (*new_auth_tok) = &auth_tok_list_item->auth_tok; | 712 | (*new_auth_tok) = &auth_tok_list_item->auth_tok; |
707 | rc = parse_packet_length(&data[(*packet_size)], &body_size, | 713 | rc = ecryptfs_parse_packet_length(&data[(*packet_size)], &body_size, |
708 | &length_size); | 714 | &length_size); |
709 | if (rc) { | 715 | if (rc) { |
710 | printk(KERN_WARNING "Error parsing packet length; rc = [%d]\n", | 716 | printk(KERN_WARNING "Error parsing packet length; rc = [%d]\n", |
711 | rc); | 717 | rc); |
@@ -852,8 +858,8 @@ parse_tag_11_packet(unsigned char *data, unsigned char *contents, | |||
852 | rc = -EINVAL; | 858 | rc = -EINVAL; |
853 | goto out; | 859 | goto out; |
854 | } | 860 | } |
855 | rc = parse_packet_length(&data[(*packet_size)], &body_size, | 861 | rc = ecryptfs_parse_packet_length(&data[(*packet_size)], &body_size, |
856 | &length_size); | 862 | &length_size); |
857 | if (rc) { | 863 | if (rc) { |
858 | printk(KERN_WARNING "Invalid tag 11 packet format\n"); | 864 | printk(KERN_WARNING "Invalid tag 11 packet format\n"); |
859 | goto out; | 865 | goto out; |
@@ -1405,8 +1411,8 @@ write_tag_1_packet(char *dest, size_t *remaining_bytes, | |||
1405 | auth_tok->token.private_key.key_size; | 1411 | auth_tok->token.private_key.key_size; |
1406 | rc = pki_encrypt_session_key(auth_tok, crypt_stat, key_rec); | 1412 | rc = pki_encrypt_session_key(auth_tok, crypt_stat, key_rec); |
1407 | if (rc) { | 1413 | if (rc) { |
1408 | ecryptfs_printk(KERN_ERR, "Failed to encrypt session key " | 1414 | printk(KERN_ERR "Failed to encrypt session key via a key " |
1409 | "via a pki"); | 1415 | "module; rc = [%d]\n", rc); |
1410 | goto out; | 1416 | goto out; |
1411 | } | 1417 | } |
1412 | if (ecryptfs_verbosity > 0) { | 1418 | if (ecryptfs_verbosity > 0) { |
@@ -1430,8 +1436,9 @@ encrypted_session_key_set: | |||
1430 | goto out; | 1436 | goto out; |
1431 | } | 1437 | } |
1432 | dest[(*packet_size)++] = ECRYPTFS_TAG_1_PACKET_TYPE; | 1438 | dest[(*packet_size)++] = ECRYPTFS_TAG_1_PACKET_TYPE; |
1433 | rc = write_packet_length(&dest[(*packet_size)], (max_packet_size - 4), | 1439 | rc = ecryptfs_write_packet_length(&dest[(*packet_size)], |
1434 | &packet_size_length); | 1440 | (max_packet_size - 4), |
1441 | &packet_size_length); | ||
1435 | if (rc) { | 1442 | if (rc) { |
1436 | ecryptfs_printk(KERN_ERR, "Error generating tag 1 packet " | 1443 | ecryptfs_printk(KERN_ERR, "Error generating tag 1 packet " |
1437 | "header; cannot generate packet length\n"); | 1444 | "header; cannot generate packet length\n"); |
@@ -1489,8 +1496,9 @@ write_tag_11_packet(char *dest, size_t *remaining_bytes, char *contents, | |||
1489 | goto out; | 1496 | goto out; |
1490 | } | 1497 | } |
1491 | dest[(*packet_length)++] = ECRYPTFS_TAG_11_PACKET_TYPE; | 1498 | dest[(*packet_length)++] = ECRYPTFS_TAG_11_PACKET_TYPE; |
1492 | rc = write_packet_length(&dest[(*packet_length)], | 1499 | rc = ecryptfs_write_packet_length(&dest[(*packet_length)], |
1493 | (max_packet_size - 4), &packet_size_length); | 1500 | (max_packet_size - 4), |
1501 | &packet_size_length); | ||
1494 | if (rc) { | 1502 | if (rc) { |
1495 | printk(KERN_ERR "Error generating tag 11 packet header; cannot " | 1503 | printk(KERN_ERR "Error generating tag 11 packet header; cannot " |
1496 | "generate packet length. rc = [%d]\n", rc); | 1504 | "generate packet length. rc = [%d]\n", rc); |
@@ -1682,8 +1690,9 @@ encrypted_session_key_set: | |||
1682 | dest[(*packet_size)++] = ECRYPTFS_TAG_3_PACKET_TYPE; | 1690 | dest[(*packet_size)++] = ECRYPTFS_TAG_3_PACKET_TYPE; |
1683 | /* Chop off the Tag 3 identifier(1) and Tag 3 packet size(3) | 1691 | /* Chop off the Tag 3 identifier(1) and Tag 3 packet size(3) |
1684 | * to get the number of octets in the actual Tag 3 packet */ | 1692 | * to get the number of octets in the actual Tag 3 packet */ |
1685 | rc = write_packet_length(&dest[(*packet_size)], (max_packet_size - 4), | 1693 | rc = ecryptfs_write_packet_length(&dest[(*packet_size)], |
1686 | &packet_size_length); | 1694 | (max_packet_size - 4), |
1695 | &packet_size_length); | ||
1687 | if (rc) { | 1696 | if (rc) { |
1688 | printk(KERN_ERR "Error generating tag 3 packet header; cannot " | 1697 | printk(KERN_ERR "Error generating tag 3 packet header; cannot " |
1689 | "generate packet length. rc = [%d]\n", rc); | 1698 | "generate packet length. rc = [%d]\n", rc); |
diff --git a/fs/ecryptfs/main.c b/fs/ecryptfs/main.c index d25ac9500a92..d603631601eb 100644 --- a/fs/ecryptfs/main.c +++ b/fs/ecryptfs/main.c | |||
@@ -219,7 +219,7 @@ int ecryptfs_interpose(struct dentry *lower_dentry, struct dentry *dentry, | |||
219 | if (rc) { | 219 | if (rc) { |
220 | printk(KERN_ERR "%s: Error attempting to initialize the " | 220 | printk(KERN_ERR "%s: Error attempting to initialize the " |
221 | "persistent file for the dentry with name [%s]; " | 221 | "persistent file for the dentry with name [%s]; " |
222 | "rc = [%d]\n", __FUNCTION__, dentry->d_name.name, rc); | 222 | "rc = [%d]\n", __func__, dentry->d_name.name, rc); |
223 | goto out; | 223 | goto out; |
224 | } | 224 | } |
225 | out: | 225 | out: |
diff --git a/fs/ecryptfs/messaging.c b/fs/ecryptfs/messaging.c index 9cc2aec27b0d..1b5c20058acb 100644 --- a/fs/ecryptfs/messaging.c +++ b/fs/ecryptfs/messaging.c | |||
@@ -1,7 +1,7 @@ | |||
1 | /** | 1 | /** |
2 | * eCryptfs: Linux filesystem encryption layer | 2 | * eCryptfs: Linux filesystem encryption layer |
3 | * | 3 | * |
4 | * Copyright (C) 2004-2006 International Business Machines Corp. | 4 | * Copyright (C) 2004-2008 International Business Machines Corp. |
5 | * Author(s): Michael A. Halcrow <mhalcrow@us.ibm.com> | 5 | * Author(s): Michael A. Halcrow <mhalcrow@us.ibm.com> |
6 | * Tyler Hicks <tyhicks@ou.edu> | 6 | * Tyler Hicks <tyhicks@ou.edu> |
7 | * | 7 | * |
@@ -20,19 +20,21 @@ | |||
20 | * 02111-1307, USA. | 20 | * 02111-1307, USA. |
21 | */ | 21 | */ |
22 | #include <linux/sched.h> | 22 | #include <linux/sched.h> |
23 | #include <linux/user_namespace.h> | ||
24 | #include <linux/nsproxy.h> | ||
23 | #include "ecryptfs_kernel.h" | 25 | #include "ecryptfs_kernel.h" |
24 | 26 | ||
25 | static LIST_HEAD(ecryptfs_msg_ctx_free_list); | 27 | static LIST_HEAD(ecryptfs_msg_ctx_free_list); |
26 | static LIST_HEAD(ecryptfs_msg_ctx_alloc_list); | 28 | static LIST_HEAD(ecryptfs_msg_ctx_alloc_list); |
27 | static struct mutex ecryptfs_msg_ctx_lists_mux; | 29 | static struct mutex ecryptfs_msg_ctx_lists_mux; |
28 | 30 | ||
29 | static struct hlist_head *ecryptfs_daemon_id_hash; | 31 | static struct hlist_head *ecryptfs_daemon_hash; |
30 | static struct mutex ecryptfs_daemon_id_hash_mux; | 32 | struct mutex ecryptfs_daemon_hash_mux; |
31 | static int ecryptfs_hash_buckets; | 33 | static int ecryptfs_hash_buckets; |
32 | #define ecryptfs_uid_hash(uid) \ | 34 | #define ecryptfs_uid_hash(uid) \ |
33 | hash_long((unsigned long)uid, ecryptfs_hash_buckets) | 35 | hash_long((unsigned long)uid, ecryptfs_hash_buckets) |
34 | 36 | ||
35 | static unsigned int ecryptfs_msg_counter; | 37 | static u32 ecryptfs_msg_counter; |
36 | static struct ecryptfs_msg_ctx *ecryptfs_msg_ctx_arr; | 38 | static struct ecryptfs_msg_ctx *ecryptfs_msg_ctx_arr; |
37 | 39 | ||
38 | /** | 40 | /** |
@@ -40,9 +42,10 @@ static struct ecryptfs_msg_ctx *ecryptfs_msg_ctx_arr; | |||
40 | * @msg_ctx: The context that was acquired from the free list | 42 | * @msg_ctx: The context that was acquired from the free list |
41 | * | 43 | * |
42 | * Acquires a context element from the free list and locks the mutex | 44 | * Acquires a context element from the free list and locks the mutex |
43 | * on the context. Returns zero on success; non-zero on error or upon | 45 | * on the context. Sets the msg_ctx task to current. Returns zero on |
44 | * failure to acquire a free context element. Be sure to lock the | 46 | * success; non-zero on error or upon failure to acquire a free |
45 | * list mutex before calling. | 47 | * context element. Must be called with ecryptfs_msg_ctx_lists_mux |
48 | * held. | ||
46 | */ | 49 | */ |
47 | static int ecryptfs_acquire_free_msg_ctx(struct ecryptfs_msg_ctx **msg_ctx) | 50 | static int ecryptfs_acquire_free_msg_ctx(struct ecryptfs_msg_ctx **msg_ctx) |
48 | { | 51 | { |
@@ -50,11 +53,11 @@ static int ecryptfs_acquire_free_msg_ctx(struct ecryptfs_msg_ctx **msg_ctx) | |||
50 | int rc; | 53 | int rc; |
51 | 54 | ||
52 | if (list_empty(&ecryptfs_msg_ctx_free_list)) { | 55 | if (list_empty(&ecryptfs_msg_ctx_free_list)) { |
53 | ecryptfs_printk(KERN_WARNING, "The eCryptfs free " | 56 | printk(KERN_WARNING "%s: The eCryptfs free " |
54 | "context list is empty. It may be helpful to " | 57 | "context list is empty. It may be helpful to " |
55 | "specify the ecryptfs_message_buf_len " | 58 | "specify the ecryptfs_message_buf_len " |
56 | "parameter to be greater than the current " | 59 | "parameter to be greater than the current " |
57 | "value of [%d]\n", ecryptfs_message_buf_len); | 60 | "value of [%d]\n", __func__, ecryptfs_message_buf_len); |
58 | rc = -ENOMEM; | 61 | rc = -ENOMEM; |
59 | goto out; | 62 | goto out; |
60 | } | 63 | } |
@@ -75,8 +78,7 @@ out: | |||
75 | * ecryptfs_msg_ctx_free_to_alloc | 78 | * ecryptfs_msg_ctx_free_to_alloc |
76 | * @msg_ctx: The context to move from the free list to the alloc list | 79 | * @msg_ctx: The context to move from the free list to the alloc list |
77 | * | 80 | * |
78 | * Be sure to lock the list mutex and the context mutex before | 81 | * Must be called with ecryptfs_msg_ctx_lists_mux held. |
79 | * calling. | ||
80 | */ | 82 | */ |
81 | static void ecryptfs_msg_ctx_free_to_alloc(struct ecryptfs_msg_ctx *msg_ctx) | 83 | static void ecryptfs_msg_ctx_free_to_alloc(struct ecryptfs_msg_ctx *msg_ctx) |
82 | { | 84 | { |
@@ -89,36 +91,39 @@ static void ecryptfs_msg_ctx_free_to_alloc(struct ecryptfs_msg_ctx *msg_ctx) | |||
89 | * ecryptfs_msg_ctx_alloc_to_free | 91 | * ecryptfs_msg_ctx_alloc_to_free |
90 | * @msg_ctx: The context to move from the alloc list to the free list | 92 | * @msg_ctx: The context to move from the alloc list to the free list |
91 | * | 93 | * |
92 | * Be sure to lock the list mutex and the context mutex before | 94 | * Must be called with ecryptfs_msg_ctx_lists_mux held. |
93 | * calling. | ||
94 | */ | 95 | */ |
95 | static void ecryptfs_msg_ctx_alloc_to_free(struct ecryptfs_msg_ctx *msg_ctx) | 96 | void ecryptfs_msg_ctx_alloc_to_free(struct ecryptfs_msg_ctx *msg_ctx) |
96 | { | 97 | { |
97 | list_move(&(msg_ctx->node), &ecryptfs_msg_ctx_free_list); | 98 | list_move(&(msg_ctx->node), &ecryptfs_msg_ctx_free_list); |
98 | if (msg_ctx->msg) | 99 | if (msg_ctx->msg) |
99 | kfree(msg_ctx->msg); | 100 | kfree(msg_ctx->msg); |
101 | msg_ctx->msg = NULL; | ||
100 | msg_ctx->state = ECRYPTFS_MSG_CTX_STATE_FREE; | 102 | msg_ctx->state = ECRYPTFS_MSG_CTX_STATE_FREE; |
101 | } | 103 | } |
102 | 104 | ||
103 | /** | 105 | /** |
104 | * ecryptfs_find_daemon_id | 106 | * ecryptfs_find_daemon_by_euid |
105 | * @uid: The user id which maps to the desired daemon id | 107 | * @euid: The effective user id which maps to the desired daemon id |
106 | * @id: If return value is zero, points to the desired daemon id | 108 | * @user_ns: The namespace in which @euid applies |
107 | * pointer | 109 | * @daemon: If return value is zero, points to the desired daemon pointer |
108 | * | 110 | * |
109 | * Search the hash list for the given user id. Returns zero if the | 111 | * Must be called with ecryptfs_daemon_hash_mux held. |
110 | * user id exists in the list; non-zero otherwise. The daemon id hash | 112 | * |
111 | * mutex should be held before calling this function. | 113 | * Search the hash list for the given user id. |
114 | * | ||
115 | * Returns zero if the user id exists in the list; non-zero otherwise. | ||
112 | */ | 116 | */ |
113 | static int ecryptfs_find_daemon_id(uid_t uid, struct ecryptfs_daemon_id **id) | 117 | int ecryptfs_find_daemon_by_euid(struct ecryptfs_daemon **daemon, uid_t euid, |
118 | struct user_namespace *user_ns) | ||
114 | { | 119 | { |
115 | struct hlist_node *elem; | 120 | struct hlist_node *elem; |
116 | int rc; | 121 | int rc; |
117 | 122 | ||
118 | hlist_for_each_entry(*id, elem, | 123 | hlist_for_each_entry(*daemon, elem, |
119 | &ecryptfs_daemon_id_hash[ecryptfs_uid_hash(uid)], | 124 | &ecryptfs_daemon_hash[ecryptfs_uid_hash(euid)], |
120 | id_chain) { | 125 | euid_chain) { |
121 | if ((*id)->uid == uid) { | 126 | if ((*daemon)->euid == euid && (*daemon)->user_ns == user_ns) { |
122 | rc = 0; | 127 | rc = 0; |
123 | goto out; | 128 | goto out; |
124 | } | 129 | } |
@@ -128,181 +133,325 @@ out: | |||
128 | return rc; | 133 | return rc; |
129 | } | 134 | } |
130 | 135 | ||
131 | static int ecryptfs_send_raw_message(unsigned int transport, u16 msg_type, | 136 | static int |
132 | pid_t pid) | 137 | ecryptfs_send_message_locked(unsigned int transport, char *data, int data_len, |
138 | u8 msg_type, struct ecryptfs_msg_ctx **msg_ctx); | ||
139 | |||
140 | /** | ||
141 | * ecryptfs_send_raw_message | ||
142 | * @transport: Transport type | ||
143 | * @msg_type: Message type | ||
144 | * @daemon: Daemon struct for recipient of message | ||
145 | * | ||
146 | * A raw message is one that does not include an ecryptfs_message | ||
147 | * struct. It simply has a type. | ||
148 | * | ||
149 | * Must be called with ecryptfs_daemon_hash_mux held. | ||
150 | * | ||
151 | * Returns zero on success; non-zero otherwise | ||
152 | */ | ||
153 | static int ecryptfs_send_raw_message(unsigned int transport, u8 msg_type, | ||
154 | struct ecryptfs_daemon *daemon) | ||
133 | { | 155 | { |
156 | struct ecryptfs_msg_ctx *msg_ctx; | ||
134 | int rc; | 157 | int rc; |
135 | 158 | ||
136 | switch(transport) { | 159 | switch(transport) { |
137 | case ECRYPTFS_TRANSPORT_NETLINK: | 160 | case ECRYPTFS_TRANSPORT_NETLINK: |
138 | rc = ecryptfs_send_netlink(NULL, 0, NULL, msg_type, 0, pid); | 161 | rc = ecryptfs_send_netlink(NULL, 0, NULL, msg_type, 0, |
162 | daemon->pid); | ||
163 | break; | ||
164 | case ECRYPTFS_TRANSPORT_MISCDEV: | ||
165 | rc = ecryptfs_send_message_locked(transport, NULL, 0, msg_type, | ||
166 | &msg_ctx); | ||
167 | if (rc) { | ||
168 | printk(KERN_ERR "%s: Error whilst attempting to send " | ||
169 | "message via procfs; rc = [%d]\n", __func__, rc); | ||
170 | goto out; | ||
171 | } | ||
172 | /* Raw messages are logically context-free (e.g., no | ||
173 | * reply is expected), so we set the state of the | ||
174 | * ecryptfs_msg_ctx object to indicate that it should | ||
175 | * be freed as soon as the transport sends out the message. */ | ||
176 | mutex_lock(&msg_ctx->mux); | ||
177 | msg_ctx->state = ECRYPTFS_MSG_CTX_STATE_NO_REPLY; | ||
178 | mutex_unlock(&msg_ctx->mux); | ||
139 | break; | 179 | break; |
140 | case ECRYPTFS_TRANSPORT_CONNECTOR: | 180 | case ECRYPTFS_TRANSPORT_CONNECTOR: |
141 | case ECRYPTFS_TRANSPORT_RELAYFS: | 181 | case ECRYPTFS_TRANSPORT_RELAYFS: |
142 | default: | 182 | default: |
143 | rc = -ENOSYS; | 183 | rc = -ENOSYS; |
144 | } | 184 | } |
185 | out: | ||
186 | return rc; | ||
187 | } | ||
188 | |||
189 | /** | ||
190 | * ecryptfs_spawn_daemon - Create and initialize a new daemon struct | ||
191 | * @daemon: Pointer to set to newly allocated daemon struct | ||
192 | * @euid: Effective user id for the daemon | ||
193 | * @user_ns: The namespace in which @euid applies | ||
194 | * @pid: Process id for the daemon | ||
195 | * | ||
196 | * Must be called ceremoniously while in possession of | ||
197 | * ecryptfs_sacred_daemon_hash_mux | ||
198 | * | ||
199 | * Returns zero on success; non-zero otherwise | ||
200 | */ | ||
201 | int | ||
202 | ecryptfs_spawn_daemon(struct ecryptfs_daemon **daemon, uid_t euid, | ||
203 | struct user_namespace *user_ns, struct pid *pid) | ||
204 | { | ||
205 | int rc = 0; | ||
206 | |||
207 | (*daemon) = kzalloc(sizeof(**daemon), GFP_KERNEL); | ||
208 | if (!(*daemon)) { | ||
209 | rc = -ENOMEM; | ||
210 | printk(KERN_ERR "%s: Failed to allocate [%Zd] bytes of " | ||
211 | "GFP_KERNEL memory\n", __func__, sizeof(**daemon)); | ||
212 | goto out; | ||
213 | } | ||
214 | (*daemon)->euid = euid; | ||
215 | (*daemon)->user_ns = get_user_ns(user_ns); | ||
216 | (*daemon)->pid = get_pid(pid); | ||
217 | (*daemon)->task = current; | ||
218 | mutex_init(&(*daemon)->mux); | ||
219 | INIT_LIST_HEAD(&(*daemon)->msg_ctx_out_queue); | ||
220 | init_waitqueue_head(&(*daemon)->wait); | ||
221 | (*daemon)->num_queued_msg_ctx = 0; | ||
222 | hlist_add_head(&(*daemon)->euid_chain, | ||
223 | &ecryptfs_daemon_hash[ecryptfs_uid_hash(euid)]); | ||
224 | out: | ||
145 | return rc; | 225 | return rc; |
146 | } | 226 | } |
147 | 227 | ||
148 | /** | 228 | /** |
149 | * ecryptfs_process_helo | 229 | * ecryptfs_process_helo |
150 | * @transport: The underlying transport (netlink, etc.) | 230 | * @transport: The underlying transport (netlink, etc.) |
151 | * @uid: The user ID owner of the message | 231 | * @euid: The user ID owner of the message |
232 | * @user_ns: The namespace in which @euid applies | ||
152 | * @pid: The process ID for the userspace program that sent the | 233 | * @pid: The process ID for the userspace program that sent the |
153 | * message | 234 | * message |
154 | * | 235 | * |
155 | * Adds the uid and pid values to the daemon id hash. If a uid | 236 | * Adds the euid and pid values to the daemon euid hash. If an euid |
156 | * already has a daemon pid registered, the daemon will be | 237 | * already has a daemon pid registered, the daemon will be |
157 | * unregistered before the new daemon id is put into the hash list. | 238 | * unregistered before the new daemon is put into the hash list. |
158 | * Returns zero after adding a new daemon id to the hash list; | 239 | * Returns zero after adding a new daemon to the hash list; |
159 | * non-zero otherwise. | 240 | * non-zero otherwise. |
160 | */ | 241 | */ |
161 | int ecryptfs_process_helo(unsigned int transport, uid_t uid, pid_t pid) | 242 | int ecryptfs_process_helo(unsigned int transport, uid_t euid, |
243 | struct user_namespace *user_ns, struct pid *pid) | ||
162 | { | 244 | { |
163 | struct ecryptfs_daemon_id *new_id; | 245 | struct ecryptfs_daemon *new_daemon; |
164 | struct ecryptfs_daemon_id *old_id; | 246 | struct ecryptfs_daemon *old_daemon; |
165 | int rc; | 247 | int rc; |
166 | 248 | ||
167 | mutex_lock(&ecryptfs_daemon_id_hash_mux); | 249 | mutex_lock(&ecryptfs_daemon_hash_mux); |
168 | new_id = kmalloc(sizeof(*new_id), GFP_KERNEL); | 250 | rc = ecryptfs_find_daemon_by_euid(&old_daemon, euid, user_ns); |
169 | if (!new_id) { | 251 | if (rc != 0) { |
170 | rc = -ENOMEM; | ||
171 | ecryptfs_printk(KERN_ERR, "Failed to allocate memory; unable " | ||
172 | "to register daemon [%d] for user [%d]\n", | ||
173 | pid, uid); | ||
174 | goto unlock; | ||
175 | } | ||
176 | if (!ecryptfs_find_daemon_id(uid, &old_id)) { | ||
177 | printk(KERN_WARNING "Received request from user [%d] " | 252 | printk(KERN_WARNING "Received request from user [%d] " |
178 | "to register daemon [%d]; unregistering daemon " | 253 | "to register daemon [0x%p]; unregistering daemon " |
179 | "[%d]\n", uid, pid, old_id->pid); | 254 | "[0x%p]\n", euid, pid, old_daemon->pid); |
180 | hlist_del(&old_id->id_chain); | 255 | rc = ecryptfs_send_raw_message(transport, ECRYPTFS_MSG_QUIT, |
181 | rc = ecryptfs_send_raw_message(transport, ECRYPTFS_NLMSG_QUIT, | 256 | old_daemon); |
182 | old_id->pid); | ||
183 | if (rc) | 257 | if (rc) |
184 | printk(KERN_WARNING "Failed to send QUIT " | 258 | printk(KERN_WARNING "Failed to send QUIT " |
185 | "message to daemon [%d]; rc = [%d]\n", | 259 | "message to daemon [0x%p]; rc = [%d]\n", |
186 | old_id->pid, rc); | 260 | old_daemon->pid, rc); |
187 | kfree(old_id); | 261 | hlist_del(&old_daemon->euid_chain); |
262 | kfree(old_daemon); | ||
188 | } | 263 | } |
189 | new_id->uid = uid; | 264 | rc = ecryptfs_spawn_daemon(&new_daemon, euid, user_ns, pid); |
190 | new_id->pid = pid; | 265 | if (rc) |
191 | hlist_add_head(&new_id->id_chain, | 266 | printk(KERN_ERR "%s: The gods are displeased with this attempt " |
192 | &ecryptfs_daemon_id_hash[ecryptfs_uid_hash(uid)]); | 267 | "to create a new daemon object for euid [%d]; pid " |
193 | rc = 0; | 268 | "[0x%p]; rc = [%d]\n", __func__, euid, pid, rc); |
194 | unlock: | 269 | mutex_unlock(&ecryptfs_daemon_hash_mux); |
195 | mutex_unlock(&ecryptfs_daemon_id_hash_mux); | 270 | return rc; |
271 | } | ||
272 | |||
273 | /** | ||
274 | * ecryptfs_exorcise_daemon - Destroy the daemon struct | ||
275 | * | ||
276 | * Must be called ceremoniously while in possession of | ||
277 | * ecryptfs_daemon_hash_mux and the daemon's own mux. | ||
278 | */ | ||
279 | int ecryptfs_exorcise_daemon(struct ecryptfs_daemon *daemon) | ||
280 | { | ||
281 | struct ecryptfs_msg_ctx *msg_ctx, *msg_ctx_tmp; | ||
282 | int rc = 0; | ||
283 | |||
284 | mutex_lock(&daemon->mux); | ||
285 | if ((daemon->flags & ECRYPTFS_DAEMON_IN_READ) | ||
286 | || (daemon->flags & ECRYPTFS_DAEMON_IN_POLL)) { | ||
287 | rc = -EBUSY; | ||
288 | printk(KERN_WARNING "%s: Attempt to destroy daemon with pid " | ||
289 | "[0x%p], but it is in the midst of a read or a poll\n", | ||
290 | __func__, daemon->pid); | ||
291 | mutex_unlock(&daemon->mux); | ||
292 | goto out; | ||
293 | } | ||
294 | list_for_each_entry_safe(msg_ctx, msg_ctx_tmp, | ||
295 | &daemon->msg_ctx_out_queue, daemon_out_list) { | ||
296 | list_del(&msg_ctx->daemon_out_list); | ||
297 | daemon->num_queued_msg_ctx--; | ||
298 | printk(KERN_WARNING "%s: Warning: dropping message that is in " | ||
299 | "the out queue of a dying daemon\n", __func__); | ||
300 | ecryptfs_msg_ctx_alloc_to_free(msg_ctx); | ||
301 | } | ||
302 | hlist_del(&daemon->euid_chain); | ||
303 | if (daemon->task) | ||
304 | wake_up_process(daemon->task); | ||
305 | if (daemon->pid) | ||
306 | put_pid(daemon->pid); | ||
307 | if (daemon->user_ns) | ||
308 | put_user_ns(daemon->user_ns); | ||
309 | mutex_unlock(&daemon->mux); | ||
310 | memset(daemon, 0, sizeof(*daemon)); | ||
311 | kfree(daemon); | ||
312 | out: | ||
196 | return rc; | 313 | return rc; |
197 | } | 314 | } |
198 | 315 | ||
199 | /** | 316 | /** |
200 | * ecryptfs_process_quit | 317 | * ecryptfs_process_quit |
201 | * @uid: The user ID owner of the message | 318 | * @euid: The user ID owner of the message |
319 | * @user_ns: The namespace in which @euid applies | ||
202 | * @pid: The process ID for the userspace program that sent the | 320 | * @pid: The process ID for the userspace program that sent the |
203 | * message | 321 | * message |
204 | * | 322 | * |
205 | * Deletes the corresponding daemon id for the given uid and pid, if | 323 | * Deletes the corresponding daemon for the given euid and pid, if |
206 | * it is the registered that is requesting the deletion. Returns zero | 324 | * it is the registered that is requesting the deletion. Returns zero |
207 | * after deleting the desired daemon id; non-zero otherwise. | 325 | * after deleting the desired daemon; non-zero otherwise. |
208 | */ | 326 | */ |
209 | int ecryptfs_process_quit(uid_t uid, pid_t pid) | 327 | int ecryptfs_process_quit(uid_t euid, struct user_namespace *user_ns, |
328 | struct pid *pid) | ||
210 | { | 329 | { |
211 | struct ecryptfs_daemon_id *id; | 330 | struct ecryptfs_daemon *daemon; |
212 | int rc; | 331 | int rc; |
213 | 332 | ||
214 | mutex_lock(&ecryptfs_daemon_id_hash_mux); | 333 | mutex_lock(&ecryptfs_daemon_hash_mux); |
215 | if (ecryptfs_find_daemon_id(uid, &id)) { | 334 | rc = ecryptfs_find_daemon_by_euid(&daemon, euid, user_ns); |
335 | if (rc || !daemon) { | ||
216 | rc = -EINVAL; | 336 | rc = -EINVAL; |
217 | ecryptfs_printk(KERN_ERR, "Received request from user [%d] to " | 337 | printk(KERN_ERR "Received request from user [%d] to " |
218 | "unregister unrecognized daemon [%d]\n", uid, | 338 | "unregister unrecognized daemon [0x%p]\n", euid, pid); |
219 | pid); | 339 | goto out_unlock; |
220 | goto unlock; | ||
221 | } | 340 | } |
222 | if (id->pid != pid) { | 341 | rc = ecryptfs_exorcise_daemon(daemon); |
223 | rc = -EINVAL; | 342 | out_unlock: |
224 | ecryptfs_printk(KERN_WARNING, "Received request from user [%d] " | 343 | mutex_unlock(&ecryptfs_daemon_hash_mux); |
225 | "with pid [%d] to unregister daemon [%d]\n", | ||
226 | uid, pid, id->pid); | ||
227 | goto unlock; | ||
228 | } | ||
229 | hlist_del(&id->id_chain); | ||
230 | kfree(id); | ||
231 | rc = 0; | ||
232 | unlock: | ||
233 | mutex_unlock(&ecryptfs_daemon_id_hash_mux); | ||
234 | return rc; | 344 | return rc; |
235 | } | 345 | } |
236 | 346 | ||
237 | /** | 347 | /** |
238 | * ecryptfs_process_reponse | 348 | * ecryptfs_process_reponse |
239 | * @msg: The ecryptfs message received; the caller should sanity check | 349 | * @msg: The ecryptfs message received; the caller should sanity check |
240 | * msg->data_len | 350 | * msg->data_len and free the memory |
241 | * @pid: The process ID of the userspace application that sent the | 351 | * @pid: The process ID of the userspace application that sent the |
242 | * message | 352 | * message |
243 | * @seq: The sequence number of the message | 353 | * @seq: The sequence number of the message; must match the sequence |
354 | * number for the existing message context waiting for this | ||
355 | * response | ||
356 | * | ||
357 | * Processes a response message after sending an operation request to | ||
358 | * userspace. Some other process is awaiting this response. Before | ||
359 | * sending out its first communications, the other process allocated a | ||
360 | * msg_ctx from the ecryptfs_msg_ctx_arr at a particular index. The | ||
361 | * response message contains this index so that we can copy over the | ||
362 | * response message into the msg_ctx that the process holds a | ||
363 | * reference to. The other process is going to wake up, check to see | ||
364 | * that msg_ctx->state == ECRYPTFS_MSG_CTX_STATE_DONE, and then | ||
365 | * proceed to read off and process the response message. Returns zero | ||
366 | * upon delivery to desired context element; non-zero upon delivery | ||
367 | * failure or error. | ||
244 | * | 368 | * |
245 | * Processes a response message after sending a operation request to | 369 | * Returns zero on success; non-zero otherwise |
246 | * userspace. Returns zero upon delivery to desired context element; | ||
247 | * non-zero upon delivery failure or error. | ||
248 | */ | 370 | */ |
249 | int ecryptfs_process_response(struct ecryptfs_message *msg, uid_t uid, | 371 | int ecryptfs_process_response(struct ecryptfs_message *msg, uid_t euid, |
250 | pid_t pid, u32 seq) | 372 | struct user_namespace *user_ns, struct pid *pid, |
373 | u32 seq) | ||
251 | { | 374 | { |
252 | struct ecryptfs_daemon_id *id; | 375 | struct ecryptfs_daemon *daemon; |
253 | struct ecryptfs_msg_ctx *msg_ctx; | 376 | struct ecryptfs_msg_ctx *msg_ctx; |
254 | int msg_size; | 377 | size_t msg_size; |
378 | struct nsproxy *nsproxy; | ||
379 | struct user_namespace *current_user_ns; | ||
255 | int rc; | 380 | int rc; |
256 | 381 | ||
257 | if (msg->index >= ecryptfs_message_buf_len) { | 382 | if (msg->index >= ecryptfs_message_buf_len) { |
258 | rc = -EINVAL; | 383 | rc = -EINVAL; |
259 | ecryptfs_printk(KERN_ERR, "Attempt to reference " | 384 | printk(KERN_ERR "%s: Attempt to reference " |
260 | "context buffer at index [%d]; maximum " | 385 | "context buffer at index [%d]; maximum " |
261 | "allowable is [%d]\n", msg->index, | 386 | "allowable is [%d]\n", __func__, msg->index, |
262 | (ecryptfs_message_buf_len - 1)); | 387 | (ecryptfs_message_buf_len - 1)); |
263 | goto out; | 388 | goto out; |
264 | } | 389 | } |
265 | msg_ctx = &ecryptfs_msg_ctx_arr[msg->index]; | 390 | msg_ctx = &ecryptfs_msg_ctx_arr[msg->index]; |
266 | mutex_lock(&msg_ctx->mux); | 391 | mutex_lock(&msg_ctx->mux); |
267 | if (ecryptfs_find_daemon_id(msg_ctx->task->euid, &id)) { | 392 | mutex_lock(&ecryptfs_daemon_hash_mux); |
393 | rcu_read_lock(); | ||
394 | nsproxy = task_nsproxy(msg_ctx->task); | ||
395 | if (nsproxy == NULL) { | ||
268 | rc = -EBADMSG; | 396 | rc = -EBADMSG; |
269 | ecryptfs_printk(KERN_WARNING, "User [%d] received a " | 397 | printk(KERN_ERR "%s: Receiving process is a zombie. Dropping " |
270 | "message response from process [%d] but does " | 398 | "message.\n", __func__); |
271 | "not have a registered daemon\n", | 399 | rcu_read_unlock(); |
272 | msg_ctx->task->euid, pid); | 400 | mutex_unlock(&ecryptfs_daemon_hash_mux); |
273 | goto wake_up; | 401 | goto wake_up; |
274 | } | 402 | } |
275 | if (msg_ctx->task->euid != uid) { | 403 | current_user_ns = nsproxy->user_ns; |
404 | rc = ecryptfs_find_daemon_by_euid(&daemon, msg_ctx->task->euid, | ||
405 | current_user_ns); | ||
406 | rcu_read_unlock(); | ||
407 | mutex_unlock(&ecryptfs_daemon_hash_mux); | ||
408 | if (rc) { | ||
409 | rc = -EBADMSG; | ||
410 | printk(KERN_WARNING "%s: User [%d] received a " | ||
411 | "message response from process [0x%p] but does " | ||
412 | "not have a registered daemon\n", __func__, | ||
413 | msg_ctx->task->euid, pid); | ||
414 | goto wake_up; | ||
415 | } | ||
416 | if (msg_ctx->task->euid != euid) { | ||
276 | rc = -EBADMSG; | 417 | rc = -EBADMSG; |
277 | ecryptfs_printk(KERN_WARNING, "Received message from user " | 418 | printk(KERN_WARNING "%s: Received message from user " |
278 | "[%d]; expected message from user [%d]\n", | 419 | "[%d]; expected message from user [%d]\n", __func__, |
279 | uid, msg_ctx->task->euid); | 420 | euid, msg_ctx->task->euid); |
280 | goto unlock; | 421 | goto unlock; |
281 | } | 422 | } |
282 | if (id->pid != pid) { | 423 | if (current_user_ns != user_ns) { |
283 | rc = -EBADMSG; | 424 | rc = -EBADMSG; |
284 | ecryptfs_printk(KERN_ERR, "User [%d] received a " | 425 | printk(KERN_WARNING "%s: Received message from user_ns " |
285 | "message response from an unrecognized " | 426 | "[0x%p]; expected message from user_ns [0x%p]\n", |
286 | "process [%d]\n", msg_ctx->task->euid, pid); | 427 | __func__, user_ns, nsproxy->user_ns); |
428 | goto unlock; | ||
429 | } | ||
430 | if (daemon->pid != pid) { | ||
431 | rc = -EBADMSG; | ||
432 | printk(KERN_ERR "%s: User [%d] sent a message response " | ||
433 | "from an unrecognized process [0x%p]\n", | ||
434 | __func__, msg_ctx->task->euid, pid); | ||
287 | goto unlock; | 435 | goto unlock; |
288 | } | 436 | } |
289 | if (msg_ctx->state != ECRYPTFS_MSG_CTX_STATE_PENDING) { | 437 | if (msg_ctx->state != ECRYPTFS_MSG_CTX_STATE_PENDING) { |
290 | rc = -EINVAL; | 438 | rc = -EINVAL; |
291 | ecryptfs_printk(KERN_WARNING, "Desired context element is not " | 439 | printk(KERN_WARNING "%s: Desired context element is not " |
292 | "pending a response\n"); | 440 | "pending a response\n", __func__); |
293 | goto unlock; | 441 | goto unlock; |
294 | } else if (msg_ctx->counter != seq) { | 442 | } else if (msg_ctx->counter != seq) { |
295 | rc = -EINVAL; | 443 | rc = -EINVAL; |
296 | ecryptfs_printk(KERN_WARNING, "Invalid message sequence; " | 444 | printk(KERN_WARNING "%s: Invalid message sequence; " |
297 | "expected [%d]; received [%d]\n", | 445 | "expected [%d]; received [%d]\n", __func__, |
298 | msg_ctx->counter, seq); | 446 | msg_ctx->counter, seq); |
299 | goto unlock; | 447 | goto unlock; |
300 | } | 448 | } |
301 | msg_size = sizeof(*msg) + msg->data_len; | 449 | msg_size = (sizeof(*msg) + msg->data_len); |
302 | msg_ctx->msg = kmalloc(msg_size, GFP_KERNEL); | 450 | msg_ctx->msg = kmalloc(msg_size, GFP_KERNEL); |
303 | if (!msg_ctx->msg) { | 451 | if (!msg_ctx->msg) { |
304 | rc = -ENOMEM; | 452 | rc = -ENOMEM; |
305 | ecryptfs_printk(KERN_ERR, "Failed to allocate memory\n"); | 453 | printk(KERN_ERR "%s: Failed to allocate [%Zd] bytes of " |
454 | "GFP_KERNEL memory\n", __func__, msg_size); | ||
306 | goto unlock; | 455 | goto unlock; |
307 | } | 456 | } |
308 | memcpy(msg_ctx->msg, msg, msg_size); | 457 | memcpy(msg_ctx->msg, msg, msg_size); |
@@ -317,34 +466,38 @@ out: | |||
317 | } | 466 | } |
318 | 467 | ||
319 | /** | 468 | /** |
320 | * ecryptfs_send_message | 469 | * ecryptfs_send_message_locked |
321 | * @transport: The transport over which to send the message (i.e., | 470 | * @transport: The transport over which to send the message (i.e., |
322 | * netlink) | 471 | * netlink) |
323 | * @data: The data to send | 472 | * @data: The data to send |
324 | * @data_len: The length of data | 473 | * @data_len: The length of data |
325 | * @msg_ctx: The message context allocated for the send | 474 | * @msg_ctx: The message context allocated for the send |
475 | * | ||
476 | * Must be called with ecryptfs_daemon_hash_mux held. | ||
477 | * | ||
478 | * Returns zero on success; non-zero otherwise | ||
326 | */ | 479 | */ |
327 | int ecryptfs_send_message(unsigned int transport, char *data, int data_len, | 480 | static int |
328 | struct ecryptfs_msg_ctx **msg_ctx) | 481 | ecryptfs_send_message_locked(unsigned int transport, char *data, int data_len, |
482 | u8 msg_type, struct ecryptfs_msg_ctx **msg_ctx) | ||
329 | { | 483 | { |
330 | struct ecryptfs_daemon_id *id; | 484 | struct ecryptfs_daemon *daemon; |
331 | int rc; | 485 | int rc; |
332 | 486 | ||
333 | mutex_lock(&ecryptfs_daemon_id_hash_mux); | 487 | rc = ecryptfs_find_daemon_by_euid(&daemon, current->euid, |
334 | if (ecryptfs_find_daemon_id(current->euid, &id)) { | 488 | current->nsproxy->user_ns); |
335 | mutex_unlock(&ecryptfs_daemon_id_hash_mux); | 489 | if (rc || !daemon) { |
336 | rc = -ENOTCONN; | 490 | rc = -ENOTCONN; |
337 | ecryptfs_printk(KERN_ERR, "User [%d] does not have a daemon " | 491 | printk(KERN_ERR "%s: User [%d] does not have a daemon " |
338 | "registered\n", current->euid); | 492 | "registered\n", __func__, current->euid); |
339 | goto out; | 493 | goto out; |
340 | } | 494 | } |
341 | mutex_unlock(&ecryptfs_daemon_id_hash_mux); | ||
342 | mutex_lock(&ecryptfs_msg_ctx_lists_mux); | 495 | mutex_lock(&ecryptfs_msg_ctx_lists_mux); |
343 | rc = ecryptfs_acquire_free_msg_ctx(msg_ctx); | 496 | rc = ecryptfs_acquire_free_msg_ctx(msg_ctx); |
344 | if (rc) { | 497 | if (rc) { |
345 | mutex_unlock(&ecryptfs_msg_ctx_lists_mux); | 498 | mutex_unlock(&ecryptfs_msg_ctx_lists_mux); |
346 | ecryptfs_printk(KERN_WARNING, "Could not claim a free " | 499 | printk(KERN_WARNING "%s: Could not claim a free " |
347 | "context element\n"); | 500 | "context element\n", __func__); |
348 | goto out; | 501 | goto out; |
349 | } | 502 | } |
350 | ecryptfs_msg_ctx_free_to_alloc(*msg_ctx); | 503 | ecryptfs_msg_ctx_free_to_alloc(*msg_ctx); |
@@ -352,23 +505,50 @@ int ecryptfs_send_message(unsigned int transport, char *data, int data_len, | |||
352 | mutex_unlock(&ecryptfs_msg_ctx_lists_mux); | 505 | mutex_unlock(&ecryptfs_msg_ctx_lists_mux); |
353 | switch (transport) { | 506 | switch (transport) { |
354 | case ECRYPTFS_TRANSPORT_NETLINK: | 507 | case ECRYPTFS_TRANSPORT_NETLINK: |
355 | rc = ecryptfs_send_netlink(data, data_len, *msg_ctx, | 508 | rc = ecryptfs_send_netlink(data, data_len, *msg_ctx, msg_type, |
356 | ECRYPTFS_NLMSG_REQUEST, 0, id->pid); | 509 | 0, daemon->pid); |
510 | break; | ||
511 | case ECRYPTFS_TRANSPORT_MISCDEV: | ||
512 | rc = ecryptfs_send_miscdev(data, data_len, *msg_ctx, msg_type, | ||
513 | 0, daemon); | ||
357 | break; | 514 | break; |
358 | case ECRYPTFS_TRANSPORT_CONNECTOR: | 515 | case ECRYPTFS_TRANSPORT_CONNECTOR: |
359 | case ECRYPTFS_TRANSPORT_RELAYFS: | 516 | case ECRYPTFS_TRANSPORT_RELAYFS: |
360 | default: | 517 | default: |
361 | rc = -ENOSYS; | 518 | rc = -ENOSYS; |
362 | } | 519 | } |
363 | if (rc) { | 520 | if (rc) |
364 | printk(KERN_ERR "Error attempting to send message to userspace " | 521 | printk(KERN_ERR "%s: Error attempting to send message to " |
365 | "daemon; rc = [%d]\n", rc); | 522 | "userspace daemon; rc = [%d]\n", __func__, rc); |
366 | } | ||
367 | out: | 523 | out: |
368 | return rc; | 524 | return rc; |
369 | } | 525 | } |
370 | 526 | ||
371 | /** | 527 | /** |
528 | * ecryptfs_send_message | ||
529 | * @transport: The transport over which to send the message (i.e., | ||
530 | * netlink) | ||
531 | * @data: The data to send | ||
532 | * @data_len: The length of data | ||
533 | * @msg_ctx: The message context allocated for the send | ||
534 | * | ||
535 | * Grabs ecryptfs_daemon_hash_mux. | ||
536 | * | ||
537 | * Returns zero on success; non-zero otherwise | ||
538 | */ | ||
539 | int ecryptfs_send_message(unsigned int transport, char *data, int data_len, | ||
540 | struct ecryptfs_msg_ctx **msg_ctx) | ||
541 | { | ||
542 | int rc; | ||
543 | |||
544 | mutex_lock(&ecryptfs_daemon_hash_mux); | ||
545 | rc = ecryptfs_send_message_locked(transport, data, data_len, | ||
546 | ECRYPTFS_MSG_REQUEST, msg_ctx); | ||
547 | mutex_unlock(&ecryptfs_daemon_hash_mux); | ||
548 | return rc; | ||
549 | } | ||
550 | |||
551 | /** | ||
372 | * ecryptfs_wait_for_response | 552 | * ecryptfs_wait_for_response |
373 | * @msg_ctx: The context that was assigned when sending a message | 553 | * @msg_ctx: The context that was assigned when sending a message |
374 | * @msg: The incoming message from userspace; not set if rc != 0 | 554 | * @msg: The incoming message from userspace; not set if rc != 0 |
@@ -377,7 +557,7 @@ out: | |||
377 | * of time exceeds ecryptfs_message_wait_timeout. If zero is | 557 | * of time exceeds ecryptfs_message_wait_timeout. If zero is |
378 | * returned, msg will point to a valid message from userspace; a | 558 | * returned, msg will point to a valid message from userspace; a |
379 | * non-zero value is returned upon failure to receive a message or an | 559 | * non-zero value is returned upon failure to receive a message or an |
380 | * error occurs. | 560 | * error occurs. Callee must free @msg on success. |
381 | */ | 561 | */ |
382 | int ecryptfs_wait_for_response(struct ecryptfs_msg_ctx *msg_ctx, | 562 | int ecryptfs_wait_for_response(struct ecryptfs_msg_ctx *msg_ctx, |
383 | struct ecryptfs_message **msg) | 563 | struct ecryptfs_message **msg) |
@@ -413,32 +593,32 @@ int ecryptfs_init_messaging(unsigned int transport) | |||
413 | 593 | ||
414 | if (ecryptfs_number_of_users > ECRYPTFS_MAX_NUM_USERS) { | 594 | if (ecryptfs_number_of_users > ECRYPTFS_MAX_NUM_USERS) { |
415 | ecryptfs_number_of_users = ECRYPTFS_MAX_NUM_USERS; | 595 | ecryptfs_number_of_users = ECRYPTFS_MAX_NUM_USERS; |
416 | ecryptfs_printk(KERN_WARNING, "Specified number of users is " | 596 | printk(KERN_WARNING "%s: Specified number of users is " |
417 | "too large, defaulting to [%d] users\n", | 597 | "too large, defaulting to [%d] users\n", __func__, |
418 | ecryptfs_number_of_users); | 598 | ecryptfs_number_of_users); |
419 | } | 599 | } |
420 | mutex_init(&ecryptfs_daemon_id_hash_mux); | 600 | mutex_init(&ecryptfs_daemon_hash_mux); |
421 | mutex_lock(&ecryptfs_daemon_id_hash_mux); | 601 | mutex_lock(&ecryptfs_daemon_hash_mux); |
422 | ecryptfs_hash_buckets = 1; | 602 | ecryptfs_hash_buckets = 1; |
423 | while (ecryptfs_number_of_users >> ecryptfs_hash_buckets) | 603 | while (ecryptfs_number_of_users >> ecryptfs_hash_buckets) |
424 | ecryptfs_hash_buckets++; | 604 | ecryptfs_hash_buckets++; |
425 | ecryptfs_daemon_id_hash = kmalloc(sizeof(struct hlist_head) | 605 | ecryptfs_daemon_hash = kmalloc((sizeof(struct hlist_head) |
426 | * ecryptfs_hash_buckets, GFP_KERNEL); | 606 | * ecryptfs_hash_buckets), GFP_KERNEL); |
427 | if (!ecryptfs_daemon_id_hash) { | 607 | if (!ecryptfs_daemon_hash) { |
428 | rc = -ENOMEM; | 608 | rc = -ENOMEM; |
429 | ecryptfs_printk(KERN_ERR, "Failed to allocate memory\n"); | 609 | printk(KERN_ERR "%s: Failed to allocate memory\n", __func__); |
430 | mutex_unlock(&ecryptfs_daemon_id_hash_mux); | 610 | mutex_unlock(&ecryptfs_daemon_hash_mux); |
431 | goto out; | 611 | goto out; |
432 | } | 612 | } |
433 | for (i = 0; i < ecryptfs_hash_buckets; i++) | 613 | for (i = 0; i < ecryptfs_hash_buckets; i++) |
434 | INIT_HLIST_HEAD(&ecryptfs_daemon_id_hash[i]); | 614 | INIT_HLIST_HEAD(&ecryptfs_daemon_hash[i]); |
435 | mutex_unlock(&ecryptfs_daemon_id_hash_mux); | 615 | mutex_unlock(&ecryptfs_daemon_hash_mux); |
436 | |||
437 | ecryptfs_msg_ctx_arr = kmalloc((sizeof(struct ecryptfs_msg_ctx) | 616 | ecryptfs_msg_ctx_arr = kmalloc((sizeof(struct ecryptfs_msg_ctx) |
438 | * ecryptfs_message_buf_len), GFP_KERNEL); | 617 | * ecryptfs_message_buf_len), |
618 | GFP_KERNEL); | ||
439 | if (!ecryptfs_msg_ctx_arr) { | 619 | if (!ecryptfs_msg_ctx_arr) { |
440 | rc = -ENOMEM; | 620 | rc = -ENOMEM; |
441 | ecryptfs_printk(KERN_ERR, "Failed to allocate memory\n"); | 621 | printk(KERN_ERR "%s: Failed to allocate memory\n", __func__); |
442 | goto out; | 622 | goto out; |
443 | } | 623 | } |
444 | mutex_init(&ecryptfs_msg_ctx_lists_mux); | 624 | mutex_init(&ecryptfs_msg_ctx_lists_mux); |
@@ -446,6 +626,7 @@ int ecryptfs_init_messaging(unsigned int transport) | |||
446 | ecryptfs_msg_counter = 0; | 626 | ecryptfs_msg_counter = 0; |
447 | for (i = 0; i < ecryptfs_message_buf_len; i++) { | 627 | for (i = 0; i < ecryptfs_message_buf_len; i++) { |
448 | INIT_LIST_HEAD(&ecryptfs_msg_ctx_arr[i].node); | 628 | INIT_LIST_HEAD(&ecryptfs_msg_ctx_arr[i].node); |
629 | INIT_LIST_HEAD(&ecryptfs_msg_ctx_arr[i].daemon_out_list); | ||
449 | mutex_init(&ecryptfs_msg_ctx_arr[i].mux); | 630 | mutex_init(&ecryptfs_msg_ctx_arr[i].mux); |
450 | mutex_lock(&ecryptfs_msg_ctx_arr[i].mux); | 631 | mutex_lock(&ecryptfs_msg_ctx_arr[i].mux); |
451 | ecryptfs_msg_ctx_arr[i].index = i; | 632 | ecryptfs_msg_ctx_arr[i].index = i; |
@@ -464,6 +645,11 @@ int ecryptfs_init_messaging(unsigned int transport) | |||
464 | if (rc) | 645 | if (rc) |
465 | ecryptfs_release_messaging(transport); | 646 | ecryptfs_release_messaging(transport); |
466 | break; | 647 | break; |
648 | case ECRYPTFS_TRANSPORT_MISCDEV: | ||
649 | rc = ecryptfs_init_ecryptfs_miscdev(); | ||
650 | if (rc) | ||
651 | ecryptfs_release_messaging(transport); | ||
652 | break; | ||
467 | case ECRYPTFS_TRANSPORT_CONNECTOR: | 653 | case ECRYPTFS_TRANSPORT_CONNECTOR: |
468 | case ECRYPTFS_TRANSPORT_RELAYFS: | 654 | case ECRYPTFS_TRANSPORT_RELAYFS: |
469 | default: | 655 | default: |
@@ -488,27 +674,37 @@ void ecryptfs_release_messaging(unsigned int transport) | |||
488 | kfree(ecryptfs_msg_ctx_arr); | 674 | kfree(ecryptfs_msg_ctx_arr); |
489 | mutex_unlock(&ecryptfs_msg_ctx_lists_mux); | 675 | mutex_unlock(&ecryptfs_msg_ctx_lists_mux); |
490 | } | 676 | } |
491 | if (ecryptfs_daemon_id_hash) { | 677 | if (ecryptfs_daemon_hash) { |
492 | struct hlist_node *elem; | 678 | struct hlist_node *elem; |
493 | struct ecryptfs_daemon_id *id; | 679 | struct ecryptfs_daemon *daemon; |
494 | int i; | 680 | int i; |
495 | 681 | ||
496 | mutex_lock(&ecryptfs_daemon_id_hash_mux); | 682 | mutex_lock(&ecryptfs_daemon_hash_mux); |
497 | for (i = 0; i < ecryptfs_hash_buckets; i++) { | 683 | for (i = 0; i < ecryptfs_hash_buckets; i++) { |
498 | hlist_for_each_entry(id, elem, | 684 | int rc; |
499 | &ecryptfs_daemon_id_hash[i], | 685 | |
500 | id_chain) { | 686 | hlist_for_each_entry(daemon, elem, |
501 | hlist_del(elem); | 687 | &ecryptfs_daemon_hash[i], |
502 | kfree(id); | 688 | euid_chain) { |
689 | rc = ecryptfs_exorcise_daemon(daemon); | ||
690 | if (rc) | ||
691 | printk(KERN_ERR "%s: Error whilst " | ||
692 | "attempting to destroy daemon; " | ||
693 | "rc = [%d]. Dazed and confused, " | ||
694 | "but trying to continue.\n", | ||
695 | __func__, rc); | ||
503 | } | 696 | } |
504 | } | 697 | } |
505 | kfree(ecryptfs_daemon_id_hash); | 698 | kfree(ecryptfs_daemon_hash); |
506 | mutex_unlock(&ecryptfs_daemon_id_hash_mux); | 699 | mutex_unlock(&ecryptfs_daemon_hash_mux); |
507 | } | 700 | } |
508 | switch(transport) { | 701 | switch(transport) { |
509 | case ECRYPTFS_TRANSPORT_NETLINK: | 702 | case ECRYPTFS_TRANSPORT_NETLINK: |
510 | ecryptfs_release_netlink(); | 703 | ecryptfs_release_netlink(); |
511 | break; | 704 | break; |
705 | case ECRYPTFS_TRANSPORT_MISCDEV: | ||
706 | ecryptfs_destroy_ecryptfs_miscdev(); | ||
707 | break; | ||
512 | case ECRYPTFS_TRANSPORT_CONNECTOR: | 708 | case ECRYPTFS_TRANSPORT_CONNECTOR: |
513 | case ECRYPTFS_TRANSPORT_RELAYFS: | 709 | case ECRYPTFS_TRANSPORT_RELAYFS: |
514 | default: | 710 | default: |
diff --git a/fs/ecryptfs/miscdev.c b/fs/ecryptfs/miscdev.c new file mode 100644 index 000000000000..788995efd1d3 --- /dev/null +++ b/fs/ecryptfs/miscdev.c | |||
@@ -0,0 +1,598 @@ | |||
1 | /** | ||
2 | * eCryptfs: Linux filesystem encryption layer | ||
3 | * | ||
4 | * Copyright (C) 2008 International Business Machines Corp. | ||
5 | * Author(s): Michael A. Halcrow <mhalcrow@us.ibm.com> | ||
6 | * | ||
7 | * This program is free software; you can redistribute it and/or | ||
8 | * modify it under the terms of the GNU General Public License version | ||
9 | * 2 as published by the Free Software Foundation. | ||
10 | * | ||
11 | * This program is distributed in the hope that it will be useful, but | ||
12 | * WITHOUT ANY WARRANTY; without even the implied warranty of | ||
13 | * MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE. See the GNU | ||
14 | * General Public License for more details. | ||
15 | * | ||
16 | * You should have received a copy of the GNU General Public License | ||
17 | * along with this program; if not, write to the Free Software | ||
18 | * Foundation, Inc., 59 Temple Place - Suite 330, Boston, MA | ||
19 | * 02111-1307, USA. | ||
20 | */ | ||
21 | |||
22 | #include <linux/fs.h> | ||
23 | #include <linux/hash.h> | ||
24 | #include <linux/random.h> | ||
25 | #include <linux/miscdevice.h> | ||
26 | #include <linux/poll.h> | ||
27 | #include <linux/wait.h> | ||
28 | #include <linux/module.h> | ||
29 | #include "ecryptfs_kernel.h" | ||
30 | |||
31 | static atomic_t ecryptfs_num_miscdev_opens; | ||
32 | |||
33 | /** | ||
34 | * ecryptfs_miscdev_poll | ||
35 | * @file: dev file (ignored) | ||
36 | * @pt: dev poll table (ignored) | ||
37 | * | ||
38 | * Returns the poll mask | ||
39 | */ | ||
40 | static unsigned int | ||
41 | ecryptfs_miscdev_poll(struct file *file, poll_table *pt) | ||
42 | { | ||
43 | struct ecryptfs_daemon *daemon; | ||
44 | unsigned int mask = 0; | ||
45 | int rc; | ||
46 | |||
47 | mutex_lock(&ecryptfs_daemon_hash_mux); | ||
48 | /* TODO: Just use file->private_data? */ | ||
49 | rc = ecryptfs_find_daemon_by_euid(&daemon, current->euid, | ||
50 | current->nsproxy->user_ns); | ||
51 | BUG_ON(rc || !daemon); | ||
52 | mutex_lock(&daemon->mux); | ||
53 | mutex_unlock(&ecryptfs_daemon_hash_mux); | ||
54 | if (daemon->flags & ECRYPTFS_DAEMON_ZOMBIE) { | ||
55 | printk(KERN_WARNING "%s: Attempt to poll on zombified " | ||
56 | "daemon\n", __func__); | ||
57 | goto out_unlock_daemon; | ||
58 | } | ||
59 | if (daemon->flags & ECRYPTFS_DAEMON_IN_READ) | ||
60 | goto out_unlock_daemon; | ||
61 | if (daemon->flags & ECRYPTFS_DAEMON_IN_POLL) | ||
62 | goto out_unlock_daemon; | ||
63 | daemon->flags |= ECRYPTFS_DAEMON_IN_POLL; | ||
64 | mutex_unlock(&daemon->mux); | ||
65 | poll_wait(file, &daemon->wait, pt); | ||
66 | mutex_lock(&daemon->mux); | ||
67 | if (!list_empty(&daemon->msg_ctx_out_queue)) | ||
68 | mask |= POLLIN | POLLRDNORM; | ||
69 | out_unlock_daemon: | ||
70 | daemon->flags &= ~ECRYPTFS_DAEMON_IN_POLL; | ||
71 | mutex_unlock(&daemon->mux); | ||
72 | return mask; | ||
73 | } | ||
74 | |||
75 | /** | ||
76 | * ecryptfs_miscdev_open | ||
77 | * @inode: inode of miscdev handle (ignored) | ||
78 | * @file: file for miscdev handle (ignored) | ||
79 | * | ||
80 | * Returns zero on success; non-zero otherwise | ||
81 | */ | ||
82 | static int | ||
83 | ecryptfs_miscdev_open(struct inode *inode, struct file *file) | ||
84 | { | ||
85 | struct ecryptfs_daemon *daemon = NULL; | ||
86 | int rc; | ||
87 | |||
88 | mutex_lock(&ecryptfs_daemon_hash_mux); | ||
89 | rc = try_module_get(THIS_MODULE); | ||
90 | if (rc == 0) { | ||
91 | rc = -EIO; | ||
92 | printk(KERN_ERR "%s: Error attempting to increment module use " | ||
93 | "count; rc = [%d]\n", __func__, rc); | ||
94 | goto out_unlock_daemon_list; | ||
95 | } | ||
96 | rc = ecryptfs_find_daemon_by_euid(&daemon, current->euid, | ||
97 | current->nsproxy->user_ns); | ||
98 | if (rc || !daemon) { | ||
99 | rc = ecryptfs_spawn_daemon(&daemon, current->euid, | ||
100 | current->nsproxy->user_ns, | ||
101 | task_pid(current)); | ||
102 | if (rc) { | ||
103 | printk(KERN_ERR "%s: Error attempting to spawn daemon; " | ||
104 | "rc = [%d]\n", __func__, rc); | ||
105 | goto out_module_put_unlock_daemon_list; | ||
106 | } | ||
107 | } | ||
108 | mutex_lock(&daemon->mux); | ||
109 | if (daemon->pid != task_pid(current)) { | ||
110 | rc = -EINVAL; | ||
111 | printk(KERN_ERR "%s: pid [0x%p] has registered with euid [%d], " | ||
112 | "but pid [0x%p] has attempted to open the handle " | ||
113 | "instead\n", __func__, daemon->pid, daemon->euid, | ||
114 | task_pid(current)); | ||
115 | goto out_unlock_daemon; | ||
116 | } | ||
117 | if (daemon->flags & ECRYPTFS_DAEMON_MISCDEV_OPEN) { | ||
118 | rc = -EBUSY; | ||
119 | printk(KERN_ERR "%s: Miscellaneous device handle may only be " | ||
120 | "opened once per daemon; pid [0x%p] already has this " | ||
121 | "handle open\n", __func__, daemon->pid); | ||
122 | goto out_unlock_daemon; | ||
123 | } | ||
124 | daemon->flags |= ECRYPTFS_DAEMON_MISCDEV_OPEN; | ||
125 | atomic_inc(&ecryptfs_num_miscdev_opens); | ||
126 | out_unlock_daemon: | ||
127 | mutex_unlock(&daemon->mux); | ||
128 | out_module_put_unlock_daemon_list: | ||
129 | if (rc) | ||
130 | module_put(THIS_MODULE); | ||
131 | out_unlock_daemon_list: | ||
132 | mutex_unlock(&ecryptfs_daemon_hash_mux); | ||
133 | return rc; | ||
134 | } | ||
135 | |||
136 | /** | ||
137 | * ecryptfs_miscdev_release | ||
138 | * @inode: inode of fs/ecryptfs/euid handle (ignored) | ||
139 | * @file: file for fs/ecryptfs/euid handle (ignored) | ||
140 | * | ||
141 | * This keeps the daemon registered until the daemon sends another | ||
142 | * ioctl to fs/ecryptfs/ctl or until the kernel module unregisters. | ||
143 | * | ||
144 | * Returns zero on success; non-zero otherwise | ||
145 | */ | ||
146 | static int | ||
147 | ecryptfs_miscdev_release(struct inode *inode, struct file *file) | ||
148 | { | ||
149 | struct ecryptfs_daemon *daemon = NULL; | ||
150 | int rc; | ||
151 | |||
152 | mutex_lock(&ecryptfs_daemon_hash_mux); | ||
153 | rc = ecryptfs_find_daemon_by_euid(&daemon, current->euid, | ||
154 | current->nsproxy->user_ns); | ||
155 | BUG_ON(rc || !daemon); | ||
156 | mutex_lock(&daemon->mux); | ||
157 | BUG_ON(daemon->pid != task_pid(current)); | ||
158 | BUG_ON(!(daemon->flags & ECRYPTFS_DAEMON_MISCDEV_OPEN)); | ||
159 | daemon->flags &= ~ECRYPTFS_DAEMON_MISCDEV_OPEN; | ||
160 | atomic_dec(&ecryptfs_num_miscdev_opens); | ||
161 | mutex_unlock(&daemon->mux); | ||
162 | rc = ecryptfs_exorcise_daemon(daemon); | ||
163 | if (rc) { | ||
164 | printk(KERN_CRIT "%s: Fatal error whilst attempting to " | ||
165 | "shut down daemon; rc = [%d]. Please report this " | ||
166 | "bug.\n", __func__, rc); | ||
167 | BUG(); | ||
168 | } | ||
169 | module_put(THIS_MODULE); | ||
170 | mutex_unlock(&ecryptfs_daemon_hash_mux); | ||
171 | return rc; | ||
172 | } | ||
173 | |||
174 | /** | ||
175 | * ecryptfs_send_miscdev | ||
176 | * @data: Data to send to daemon; may be NULL | ||
177 | * @data_size: Amount of data to send to daemon | ||
178 | * @msg_ctx: Message context, which is used to handle the reply. If | ||
179 | * this is NULL, then we do not expect a reply. | ||
180 | * @msg_type: Type of message | ||
181 | * @msg_flags: Flags for message | ||
182 | * @daemon: eCryptfs daemon object | ||
183 | * | ||
184 | * Add msg_ctx to queue and then, if it exists, notify the blocked | ||
185 | * miscdevess about the data being available. Must be called with | ||
186 | * ecryptfs_daemon_hash_mux held. | ||
187 | * | ||
188 | * Returns zero on success; non-zero otherwise | ||
189 | */ | ||
190 | int ecryptfs_send_miscdev(char *data, size_t data_size, | ||
191 | struct ecryptfs_msg_ctx *msg_ctx, u8 msg_type, | ||
192 | u16 msg_flags, struct ecryptfs_daemon *daemon) | ||
193 | { | ||
194 | int rc = 0; | ||
195 | |||
196 | mutex_lock(&msg_ctx->mux); | ||
197 | if (data) { | ||
198 | msg_ctx->msg = kmalloc((sizeof(*msg_ctx->msg) + data_size), | ||
199 | GFP_KERNEL); | ||
200 | if (!msg_ctx->msg) { | ||
201 | rc = -ENOMEM; | ||
202 | printk(KERN_ERR "%s: Out of memory whilst attempting " | ||
203 | "to kmalloc(%Zd, GFP_KERNEL)\n", __func__, | ||
204 | (sizeof(*msg_ctx->msg) + data_size)); | ||
205 | goto out_unlock; | ||
206 | } | ||
207 | } else | ||
208 | msg_ctx->msg = NULL; | ||
209 | msg_ctx->msg->index = msg_ctx->index; | ||
210 | msg_ctx->msg->data_len = data_size; | ||
211 | msg_ctx->type = msg_type; | ||
212 | if (data) { | ||
213 | memcpy(msg_ctx->msg->data, data, data_size); | ||
214 | msg_ctx->msg_size = (sizeof(*msg_ctx->msg) + data_size); | ||
215 | } else | ||
216 | msg_ctx->msg_size = 0; | ||
217 | mutex_lock(&daemon->mux); | ||
218 | list_add_tail(&msg_ctx->daemon_out_list, &daemon->msg_ctx_out_queue); | ||
219 | daemon->num_queued_msg_ctx++; | ||
220 | wake_up_interruptible(&daemon->wait); | ||
221 | mutex_unlock(&daemon->mux); | ||
222 | out_unlock: | ||
223 | mutex_unlock(&msg_ctx->mux); | ||
224 | return rc; | ||
225 | } | ||
226 | |||
227 | /** | ||
228 | * ecryptfs_miscdev_read - format and send message from queue | ||
229 | * @file: fs/ecryptfs/euid miscdevfs handle (ignored) | ||
230 | * @buf: User buffer into which to copy the next message on the daemon queue | ||
231 | * @count: Amount of space available in @buf | ||
232 | * @ppos: Offset in file (ignored) | ||
233 | * | ||
234 | * Pulls the most recent message from the daemon queue, formats it for | ||
235 | * being sent via a miscdevfs handle, and copies it into @buf | ||
236 | * | ||
237 | * Returns the number of bytes copied into the user buffer | ||
238 | */ | ||
239 | static ssize_t | ||
240 | ecryptfs_miscdev_read(struct file *file, char __user *buf, size_t count, | ||
241 | loff_t *ppos) | ||
242 | { | ||
243 | struct ecryptfs_daemon *daemon; | ||
244 | struct ecryptfs_msg_ctx *msg_ctx; | ||
245 | size_t packet_length_size; | ||
246 | u32 counter_nbo; | ||
247 | char packet_length[3]; | ||
248 | size_t i; | ||
249 | size_t total_length; | ||
250 | int rc; | ||
251 | |||
252 | mutex_lock(&ecryptfs_daemon_hash_mux); | ||
253 | /* TODO: Just use file->private_data? */ | ||
254 | rc = ecryptfs_find_daemon_by_euid(&daemon, current->euid, | ||
255 | current->nsproxy->user_ns); | ||
256 | BUG_ON(rc || !daemon); | ||
257 | mutex_lock(&daemon->mux); | ||
258 | if (daemon->flags & ECRYPTFS_DAEMON_ZOMBIE) { | ||
259 | rc = 0; | ||
260 | printk(KERN_WARNING "%s: Attempt to read from zombified " | ||
261 | "daemon\n", __func__); | ||
262 | goto out_unlock_daemon; | ||
263 | } | ||
264 | if (daemon->flags & ECRYPTFS_DAEMON_IN_READ) { | ||
265 | rc = 0; | ||
266 | goto out_unlock_daemon; | ||
267 | } | ||
268 | /* This daemon will not go away so long as this flag is set */ | ||
269 | daemon->flags |= ECRYPTFS_DAEMON_IN_READ; | ||
270 | mutex_unlock(&ecryptfs_daemon_hash_mux); | ||
271 | check_list: | ||
272 | if (list_empty(&daemon->msg_ctx_out_queue)) { | ||
273 | mutex_unlock(&daemon->mux); | ||
274 | rc = wait_event_interruptible( | ||
275 | daemon->wait, !list_empty(&daemon->msg_ctx_out_queue)); | ||
276 | mutex_lock(&daemon->mux); | ||
277 | if (rc < 0) { | ||
278 | rc = 0; | ||
279 | goto out_unlock_daemon; | ||
280 | } | ||
281 | } | ||
282 | if (daemon->flags & ECRYPTFS_DAEMON_ZOMBIE) { | ||
283 | rc = 0; | ||
284 | goto out_unlock_daemon; | ||
285 | } | ||
286 | if (list_empty(&daemon->msg_ctx_out_queue)) { | ||
287 | /* Something else jumped in since the | ||
288 | * wait_event_interruptable() and removed the | ||
289 | * message from the queue; try again */ | ||
290 | goto check_list; | ||
291 | } | ||
292 | BUG_ON(current->euid != daemon->euid); | ||
293 | BUG_ON(current->nsproxy->user_ns != daemon->user_ns); | ||
294 | BUG_ON(task_pid(current) != daemon->pid); | ||
295 | msg_ctx = list_first_entry(&daemon->msg_ctx_out_queue, | ||
296 | struct ecryptfs_msg_ctx, daemon_out_list); | ||
297 | BUG_ON(!msg_ctx); | ||
298 | mutex_lock(&msg_ctx->mux); | ||
299 | if (msg_ctx->msg) { | ||
300 | rc = ecryptfs_write_packet_length(packet_length, | ||
301 | msg_ctx->msg_size, | ||
302 | &packet_length_size); | ||
303 | if (rc) { | ||
304 | rc = 0; | ||
305 | printk(KERN_WARNING "%s: Error writing packet length; " | ||
306 | "rc = [%d]\n", __func__, rc); | ||
307 | goto out_unlock_msg_ctx; | ||
308 | } | ||
309 | } else { | ||
310 | packet_length_size = 0; | ||
311 | msg_ctx->msg_size = 0; | ||
312 | } | ||
313 | /* miscdevfs packet format: | ||
314 | * Octet 0: Type | ||
315 | * Octets 1-4: network byte order msg_ctx->counter | ||
316 | * Octets 5-N0: Size of struct ecryptfs_message to follow | ||
317 | * Octets N0-N1: struct ecryptfs_message (including data) | ||
318 | * | ||
319 | * Octets 5-N1 not written if the packet type does not | ||
320 | * include a message */ | ||
321 | total_length = (1 + 4 + packet_length_size + msg_ctx->msg_size); | ||
322 | if (count < total_length) { | ||
323 | rc = 0; | ||
324 | printk(KERN_WARNING "%s: Only given user buffer of " | ||
325 | "size [%Zd], but we need [%Zd] to read the " | ||
326 | "pending message\n", __func__, count, total_length); | ||
327 | goto out_unlock_msg_ctx; | ||
328 | } | ||
329 | i = 0; | ||
330 | buf[i++] = msg_ctx->type; | ||
331 | counter_nbo = cpu_to_be32(msg_ctx->counter); | ||
332 | memcpy(&buf[i], (char *)&counter_nbo, 4); | ||
333 | i += 4; | ||
334 | if (msg_ctx->msg) { | ||
335 | memcpy(&buf[i], packet_length, packet_length_size); | ||
336 | i += packet_length_size; | ||
337 | rc = copy_to_user(&buf[i], msg_ctx->msg, msg_ctx->msg_size); | ||
338 | if (rc) { | ||
339 | printk(KERN_ERR "%s: copy_to_user returned error " | ||
340 | "[%d]\n", __func__, rc); | ||
341 | goto out_unlock_msg_ctx; | ||
342 | } | ||
343 | i += msg_ctx->msg_size; | ||
344 | } | ||
345 | rc = i; | ||
346 | list_del(&msg_ctx->daemon_out_list); | ||
347 | kfree(msg_ctx->msg); | ||
348 | msg_ctx->msg = NULL; | ||
349 | /* We do not expect a reply from the userspace daemon for any | ||
350 | * message type other than ECRYPTFS_MSG_REQUEST */ | ||
351 | if (msg_ctx->type != ECRYPTFS_MSG_REQUEST) | ||
352 | ecryptfs_msg_ctx_alloc_to_free(msg_ctx); | ||
353 | out_unlock_msg_ctx: | ||
354 | mutex_unlock(&msg_ctx->mux); | ||
355 | out_unlock_daemon: | ||
356 | daemon->flags &= ~ECRYPTFS_DAEMON_IN_READ; | ||
357 | mutex_unlock(&daemon->mux); | ||
358 | return rc; | ||
359 | } | ||
360 | |||
361 | /** | ||
362 | * ecryptfs_miscdev_helo | ||
363 | * @euid: effective user id of miscdevess sending helo packet | ||
364 | * @user_ns: The namespace in which @euid applies | ||
365 | * @pid: miscdevess id of miscdevess sending helo packet | ||
366 | * | ||
367 | * Returns zero on success; non-zero otherwise | ||
368 | */ | ||
369 | static int ecryptfs_miscdev_helo(uid_t euid, struct user_namespace *user_ns, | ||
370 | struct pid *pid) | ||
371 | { | ||
372 | int rc; | ||
373 | |||
374 | rc = ecryptfs_process_helo(ECRYPTFS_TRANSPORT_MISCDEV, euid, user_ns, | ||
375 | pid); | ||
376 | if (rc) | ||
377 | printk(KERN_WARNING "Error processing HELO; rc = [%d]\n", rc); | ||
378 | return rc; | ||
379 | } | ||
380 | |||
381 | /** | ||
382 | * ecryptfs_miscdev_quit | ||
383 | * @euid: effective user id of miscdevess sending quit packet | ||
384 | * @user_ns: The namespace in which @euid applies | ||
385 | * @pid: miscdevess id of miscdevess sending quit packet | ||
386 | * | ||
387 | * Returns zero on success; non-zero otherwise | ||
388 | */ | ||
389 | static int ecryptfs_miscdev_quit(uid_t euid, struct user_namespace *user_ns, | ||
390 | struct pid *pid) | ||
391 | { | ||
392 | int rc; | ||
393 | |||
394 | rc = ecryptfs_process_quit(euid, user_ns, pid); | ||
395 | if (rc) | ||
396 | printk(KERN_WARNING | ||
397 | "Error processing QUIT message; rc = [%d]\n", rc); | ||
398 | return rc; | ||
399 | } | ||
400 | |||
401 | /** | ||
402 | * ecryptfs_miscdev_response - miscdevess response to message previously sent to daemon | ||
403 | * @data: Bytes comprising struct ecryptfs_message | ||
404 | * @data_size: sizeof(struct ecryptfs_message) + data len | ||
405 | * @euid: Effective user id of miscdevess sending the miscdev response | ||
406 | * @user_ns: The namespace in which @euid applies | ||
407 | * @pid: Miscdevess id of miscdevess sending the miscdev response | ||
408 | * @seq: Sequence number for miscdev response packet | ||
409 | * | ||
410 | * Returns zero on success; non-zero otherwise | ||
411 | */ | ||
412 | static int ecryptfs_miscdev_response(char *data, size_t data_size, | ||
413 | uid_t euid, struct user_namespace *user_ns, | ||
414 | struct pid *pid, u32 seq) | ||
415 | { | ||
416 | struct ecryptfs_message *msg = (struct ecryptfs_message *)data; | ||
417 | int rc; | ||
418 | |||
419 | if ((sizeof(*msg) + msg->data_len) != data_size) { | ||
420 | printk(KERN_WARNING "%s: (sizeof(*msg) + msg->data_len) = " | ||
421 | "[%Zd]; data_size = [%Zd]. Invalid packet.\n", __func__, | ||
422 | (sizeof(*msg) + msg->data_len), data_size); | ||
423 | rc = -EINVAL; | ||
424 | goto out; | ||
425 | } | ||
426 | rc = ecryptfs_process_response(msg, euid, user_ns, pid, seq); | ||
427 | if (rc) | ||
428 | printk(KERN_ERR | ||
429 | "Error processing response message; rc = [%d]\n", rc); | ||
430 | out: | ||
431 | return rc; | ||
432 | } | ||
433 | |||
434 | /** | ||
435 | * ecryptfs_miscdev_write - handle write to daemon miscdev handle | ||
436 | * @file: File for misc dev handle (ignored) | ||
437 | * @buf: Buffer containing user data | ||
438 | * @count: Amount of data in @buf | ||
439 | * @ppos: Pointer to offset in file (ignored) | ||
440 | * | ||
441 | * miscdevfs packet format: | ||
442 | * Octet 0: Type | ||
443 | * Octets 1-4: network byte order msg_ctx->counter (0's for non-response) | ||
444 | * Octets 5-N0: Size of struct ecryptfs_message to follow | ||
445 | * Octets N0-N1: struct ecryptfs_message (including data) | ||
446 | * | ||
447 | * Returns the number of bytes read from @buf | ||
448 | */ | ||
449 | static ssize_t | ||
450 | ecryptfs_miscdev_write(struct file *file, const char __user *buf, | ||
451 | size_t count, loff_t *ppos) | ||
452 | { | ||
453 | u32 counter_nbo, seq; | ||
454 | size_t packet_size, packet_size_length, i; | ||
455 | ssize_t sz = 0; | ||
456 | char *data; | ||
457 | int rc; | ||
458 | |||
459 | if (count == 0) | ||
460 | goto out; | ||
461 | data = kmalloc(count, GFP_KERNEL); | ||
462 | if (!data) { | ||
463 | printk(KERN_ERR "%s: Out of memory whilst attempting to " | ||
464 | "kmalloc([%Zd], GFP_KERNEL)\n", __func__, count); | ||
465 | goto out; | ||
466 | } | ||
467 | rc = copy_from_user(data, buf, count); | ||
468 | if (rc) { | ||
469 | printk(KERN_ERR "%s: copy_from_user returned error [%d]\n", | ||
470 | __func__, rc); | ||
471 | goto out_free; | ||
472 | } | ||
473 | sz = count; | ||
474 | i = 0; | ||
475 | switch (data[i++]) { | ||
476 | case ECRYPTFS_MSG_RESPONSE: | ||
477 | if (count < (1 + 4 + 1 + sizeof(struct ecryptfs_message))) { | ||
478 | printk(KERN_WARNING "%s: Minimum acceptable packet " | ||
479 | "size is [%Zd], but amount of data written is " | ||
480 | "only [%Zd]. Discarding response packet.\n", | ||
481 | __func__, | ||
482 | (1 + 4 + 1 + sizeof(struct ecryptfs_message)), | ||
483 | count); | ||
484 | goto out_free; | ||
485 | } | ||
486 | memcpy((char *)&counter_nbo, &data[i], 4); | ||
487 | seq = be32_to_cpu(counter_nbo); | ||
488 | i += 4; | ||
489 | rc = ecryptfs_parse_packet_length(&data[i], &packet_size, | ||
490 | &packet_size_length); | ||
491 | if (rc) { | ||
492 | printk(KERN_WARNING "%s: Error parsing packet length; " | ||
493 | "rc = [%d]\n", __func__, rc); | ||
494 | goto out_free; | ||
495 | } | ||
496 | i += packet_size_length; | ||
497 | if ((1 + 4 + packet_size_length + packet_size) != count) { | ||
498 | printk(KERN_WARNING "%s: (1 + packet_size_length([%Zd])" | ||
499 | " + packet_size([%Zd]))([%Zd]) != " | ||
500 | "count([%Zd]). Invalid packet format.\n", | ||
501 | __func__, packet_size_length, packet_size, | ||
502 | (1 + packet_size_length + packet_size), count); | ||
503 | goto out_free; | ||
504 | } | ||
505 | rc = ecryptfs_miscdev_response(&data[i], packet_size, | ||
506 | current->euid, | ||
507 | current->nsproxy->user_ns, | ||
508 | task_pid(current), seq); | ||
509 | if (rc) | ||
510 | printk(KERN_WARNING "%s: Failed to deliver miscdev " | ||
511 | "response to requesting operation; rc = [%d]\n", | ||
512 | __func__, rc); | ||
513 | break; | ||
514 | case ECRYPTFS_MSG_HELO: | ||
515 | rc = ecryptfs_miscdev_helo(current->euid, | ||
516 | current->nsproxy->user_ns, | ||
517 | task_pid(current)); | ||
518 | if (rc) { | ||
519 | printk(KERN_ERR "%s: Error attempting to process " | ||
520 | "helo from pid [0x%p]; rc = [%d]\n", __func__, | ||
521 | task_pid(current), rc); | ||
522 | goto out_free; | ||
523 | } | ||
524 | break; | ||
525 | case ECRYPTFS_MSG_QUIT: | ||
526 | rc = ecryptfs_miscdev_quit(current->euid, | ||
527 | current->nsproxy->user_ns, | ||
528 | task_pid(current)); | ||
529 | if (rc) { | ||
530 | printk(KERN_ERR "%s: Error attempting to process " | ||
531 | "quit from pid [0x%p]; rc = [%d]\n", __func__, | ||
532 | task_pid(current), rc); | ||
533 | goto out_free; | ||
534 | } | ||
535 | break; | ||
536 | default: | ||
537 | ecryptfs_printk(KERN_WARNING, "Dropping miscdev " | ||
538 | "message of unrecognized type [%d]\n", | ||
539 | data[0]); | ||
540 | break; | ||
541 | } | ||
542 | out_free: | ||
543 | kfree(data); | ||
544 | out: | ||
545 | return sz; | ||
546 | } | ||
547 | |||
548 | |||
549 | static const struct file_operations ecryptfs_miscdev_fops = { | ||
550 | .open = ecryptfs_miscdev_open, | ||
551 | .poll = ecryptfs_miscdev_poll, | ||
552 | .read = ecryptfs_miscdev_read, | ||
553 | .write = ecryptfs_miscdev_write, | ||
554 | .release = ecryptfs_miscdev_release, | ||
555 | }; | ||
556 | |||
557 | static struct miscdevice ecryptfs_miscdev = { | ||
558 | .minor = MISC_DYNAMIC_MINOR, | ||
559 | .name = "ecryptfs", | ||
560 | .fops = &ecryptfs_miscdev_fops | ||
561 | }; | ||
562 | |||
563 | /** | ||
564 | * ecryptfs_init_ecryptfs_miscdev | ||
565 | * | ||
566 | * Messages sent to the userspace daemon from the kernel are placed on | ||
567 | * a queue associated with the daemon. The next read against the | ||
568 | * miscdev handle by that daemon will return the oldest message placed | ||
569 | * on the message queue for the daemon. | ||
570 | * | ||
571 | * Returns zero on success; non-zero otherwise | ||
572 | */ | ||
573 | int ecryptfs_init_ecryptfs_miscdev(void) | ||
574 | { | ||
575 | int rc; | ||
576 | |||
577 | atomic_set(&ecryptfs_num_miscdev_opens, 0); | ||
578 | mutex_lock(&ecryptfs_daemon_hash_mux); | ||
579 | rc = misc_register(&ecryptfs_miscdev); | ||
580 | if (rc) | ||
581 | printk(KERN_ERR "%s: Failed to register miscellaneous device " | ||
582 | "for communications with userspace daemons; rc = [%d]\n", | ||
583 | __func__, rc); | ||
584 | mutex_unlock(&ecryptfs_daemon_hash_mux); | ||
585 | return rc; | ||
586 | } | ||
587 | |||
588 | /** | ||
589 | * ecryptfs_destroy_ecryptfs_miscdev | ||
590 | * | ||
591 | * All of the daemons must be exorcised prior to calling this | ||
592 | * function. | ||
593 | */ | ||
594 | void ecryptfs_destroy_ecryptfs_miscdev(void) | ||
595 | { | ||
596 | BUG_ON(atomic_read(&ecryptfs_num_miscdev_opens) != 0); | ||
597 | misc_deregister(&ecryptfs_miscdev); | ||
598 | } | ||
diff --git a/fs/ecryptfs/mmap.c b/fs/ecryptfs/mmap.c index 6df1debdccce..2b6fe1e6e8ba 100644 --- a/fs/ecryptfs/mmap.c +++ b/fs/ecryptfs/mmap.c | |||
@@ -153,7 +153,7 @@ ecryptfs_copy_up_encrypted_with_header(struct page *page, | |||
153 | flush_dcache_page(page); | 153 | flush_dcache_page(page); |
154 | if (rc) { | 154 | if (rc) { |
155 | printk(KERN_ERR "%s: Error reading xattr " | 155 | printk(KERN_ERR "%s: Error reading xattr " |
156 | "region; rc = [%d]\n", __FUNCTION__, rc); | 156 | "region; rc = [%d]\n", __func__, rc); |
157 | goto out; | 157 | goto out; |
158 | } | 158 | } |
159 | } else { | 159 | } else { |
@@ -169,7 +169,7 @@ ecryptfs_copy_up_encrypted_with_header(struct page *page, | |||
169 | if (rc) { | 169 | if (rc) { |
170 | printk(KERN_ERR "%s: Error attempting to read " | 170 | printk(KERN_ERR "%s: Error attempting to read " |
171 | "extent at offset [%lld] in the lower " | 171 | "extent at offset [%lld] in the lower " |
172 | "file; rc = [%d]\n", __FUNCTION__, | 172 | "file; rc = [%d]\n", __func__, |
173 | lower_offset, rc); | 173 | lower_offset, rc); |
174 | goto out; | 174 | goto out; |
175 | } | 175 | } |
@@ -212,7 +212,7 @@ static int ecryptfs_readpage(struct file *file, struct page *page) | |||
212 | "the encrypted content from the lower " | 212 | "the encrypted content from the lower " |
213 | "file whilst inserting the metadata " | 213 | "file whilst inserting the metadata " |
214 | "from the xattr into the header; rc = " | 214 | "from the xattr into the header; rc = " |
215 | "[%d]\n", __FUNCTION__, rc); | 215 | "[%d]\n", __func__, rc); |
216 | goto out; | 216 | goto out; |
217 | } | 217 | } |
218 | 218 | ||
@@ -293,7 +293,7 @@ static int ecryptfs_prepare_write(struct file *file, struct page *page, | |||
293 | if (rc) { | 293 | if (rc) { |
294 | printk(KERN_ERR "%s: Error attemping to read " | 294 | printk(KERN_ERR "%s: Error attemping to read " |
295 | "lower page segment; rc = [%d]\n", | 295 | "lower page segment; rc = [%d]\n", |
296 | __FUNCTION__, rc); | 296 | __func__, rc); |
297 | ClearPageUptodate(page); | 297 | ClearPageUptodate(page); |
298 | goto out; | 298 | goto out; |
299 | } else | 299 | } else |
@@ -308,7 +308,7 @@ static int ecryptfs_prepare_write(struct file *file, struct page *page, | |||
308 | "from the lower file whilst " | 308 | "from the lower file whilst " |
309 | "inserting the metadata from " | 309 | "inserting the metadata from " |
310 | "the xattr into the header; rc " | 310 | "the xattr into the header; rc " |
311 | "= [%d]\n", __FUNCTION__, rc); | 311 | "= [%d]\n", __func__, rc); |
312 | ClearPageUptodate(page); | 312 | ClearPageUptodate(page); |
313 | goto out; | 313 | goto out; |
314 | } | 314 | } |
@@ -320,7 +320,7 @@ static int ecryptfs_prepare_write(struct file *file, struct page *page, | |||
320 | if (rc) { | 320 | if (rc) { |
321 | printk(KERN_ERR "%s: Error reading " | 321 | printk(KERN_ERR "%s: Error reading " |
322 | "page; rc = [%d]\n", | 322 | "page; rc = [%d]\n", |
323 | __FUNCTION__, rc); | 323 | __func__, rc); |
324 | ClearPageUptodate(page); | 324 | ClearPageUptodate(page); |
325 | goto out; | 325 | goto out; |
326 | } | 326 | } |
@@ -331,7 +331,7 @@ static int ecryptfs_prepare_write(struct file *file, struct page *page, | |||
331 | if (rc) { | 331 | if (rc) { |
332 | printk(KERN_ERR "%s: Error decrypting page " | 332 | printk(KERN_ERR "%s: Error decrypting page " |
333 | "at index [%ld]; rc = [%d]\n", | 333 | "at index [%ld]; rc = [%d]\n", |
334 | __FUNCTION__, page->index, rc); | 334 | __func__, page->index, rc); |
335 | ClearPageUptodate(page); | 335 | ClearPageUptodate(page); |
336 | goto out; | 336 | goto out; |
337 | } | 337 | } |
@@ -348,7 +348,7 @@ static int ecryptfs_prepare_write(struct file *file, struct page *page, | |||
348 | if (rc) { | 348 | if (rc) { |
349 | printk(KERN_ERR "%s: Error on attempt to " | 349 | printk(KERN_ERR "%s: Error on attempt to " |
350 | "truncate to (higher) offset [%lld];" | 350 | "truncate to (higher) offset [%lld];" |
351 | " rc = [%d]\n", __FUNCTION__, | 351 | " rc = [%d]\n", __func__, |
352 | prev_page_end_size, rc); | 352 | prev_page_end_size, rc); |
353 | goto out; | 353 | goto out; |
354 | } | 354 | } |
@@ -389,7 +389,7 @@ static int ecryptfs_write_inode_size_to_header(struct inode *ecryptfs_inode) | |||
389 | kfree(file_size_virt); | 389 | kfree(file_size_virt); |
390 | if (rc) | 390 | if (rc) |
391 | printk(KERN_ERR "%s: Error writing file size to header; " | 391 | printk(KERN_ERR "%s: Error writing file size to header; " |
392 | "rc = [%d]\n", __FUNCTION__, rc); | 392 | "rc = [%d]\n", __func__, rc); |
393 | out: | 393 | out: |
394 | return rc; | 394 | return rc; |
395 | } | 395 | } |
diff --git a/fs/ecryptfs/netlink.c b/fs/ecryptfs/netlink.c index f638a698dc52..e0abad62b395 100644 --- a/fs/ecryptfs/netlink.c +++ b/fs/ecryptfs/netlink.c | |||
@@ -44,8 +44,8 @@ static struct sock *ecryptfs_nl_sock; | |||
44 | * upon sending the message; non-zero upon error. | 44 | * upon sending the message; non-zero upon error. |
45 | */ | 45 | */ |
46 | int ecryptfs_send_netlink(char *data, int data_len, | 46 | int ecryptfs_send_netlink(char *data, int data_len, |
47 | struct ecryptfs_msg_ctx *msg_ctx, u16 msg_type, | 47 | struct ecryptfs_msg_ctx *msg_ctx, u8 msg_type, |
48 | u16 msg_flags, pid_t daemon_pid) | 48 | u16 msg_flags, struct pid *daemon_pid) |
49 | { | 49 | { |
50 | struct sk_buff *skb; | 50 | struct sk_buff *skb; |
51 | struct nlmsghdr *nlh; | 51 | struct nlmsghdr *nlh; |
@@ -60,7 +60,7 @@ int ecryptfs_send_netlink(char *data, int data_len, | |||
60 | ecryptfs_printk(KERN_ERR, "Failed to allocate socket buffer\n"); | 60 | ecryptfs_printk(KERN_ERR, "Failed to allocate socket buffer\n"); |
61 | goto out; | 61 | goto out; |
62 | } | 62 | } |
63 | nlh = NLMSG_PUT(skb, daemon_pid, msg_ctx ? msg_ctx->counter : 0, | 63 | nlh = NLMSG_PUT(skb, pid_nr(daemon_pid), msg_ctx ? msg_ctx->counter : 0, |
64 | msg_type, payload_len); | 64 | msg_type, payload_len); |
65 | nlh->nlmsg_flags = msg_flags; | 65 | nlh->nlmsg_flags = msg_flags; |
66 | if (msg_ctx && payload_len) { | 66 | if (msg_ctx && payload_len) { |
@@ -69,7 +69,7 @@ int ecryptfs_send_netlink(char *data, int data_len, | |||
69 | msg->data_len = data_len; | 69 | msg->data_len = data_len; |
70 | memcpy(msg->data, data, data_len); | 70 | memcpy(msg->data, data, data_len); |
71 | } | 71 | } |
72 | rc = netlink_unicast(ecryptfs_nl_sock, skb, daemon_pid, 0); | 72 | rc = netlink_unicast(ecryptfs_nl_sock, skb, pid_nr(daemon_pid), 0); |
73 | if (rc < 0) { | 73 | if (rc < 0) { |
74 | ecryptfs_printk(KERN_ERR, "Failed to send eCryptfs netlink " | 74 | ecryptfs_printk(KERN_ERR, "Failed to send eCryptfs netlink " |
75 | "message; rc = [%d]\n", rc); | 75 | "message; rc = [%d]\n", rc); |
@@ -99,6 +99,7 @@ static int ecryptfs_process_nl_response(struct sk_buff *skb) | |||
99 | { | 99 | { |
100 | struct nlmsghdr *nlh = nlmsg_hdr(skb); | 100 | struct nlmsghdr *nlh = nlmsg_hdr(skb); |
101 | struct ecryptfs_message *msg = NLMSG_DATA(nlh); | 101 | struct ecryptfs_message *msg = NLMSG_DATA(nlh); |
102 | struct pid *pid; | ||
102 | int rc; | 103 | int rc; |
103 | 104 | ||
104 | if (skb->len - NLMSG_HDRLEN - sizeof(*msg) != msg->data_len) { | 105 | if (skb->len - NLMSG_HDRLEN - sizeof(*msg) != msg->data_len) { |
@@ -107,8 +108,10 @@ static int ecryptfs_process_nl_response(struct sk_buff *skb) | |||
107 | "incorrectly specified data length\n"); | 108 | "incorrectly specified data length\n"); |
108 | goto out; | 109 | goto out; |
109 | } | 110 | } |
110 | rc = ecryptfs_process_response(msg, NETLINK_CREDS(skb)->uid, | 111 | pid = find_get_pid(NETLINK_CREDS(skb)->pid); |
111 | NETLINK_CREDS(skb)->pid, nlh->nlmsg_seq); | 112 | rc = ecryptfs_process_response(msg, NETLINK_CREDS(skb)->uid, NULL, |
113 | pid, nlh->nlmsg_seq); | ||
114 | put_pid(pid); | ||
112 | if (rc) | 115 | if (rc) |
113 | printk(KERN_ERR | 116 | printk(KERN_ERR |
114 | "Error processing response message; rc = [%d]\n", rc); | 117 | "Error processing response message; rc = [%d]\n", rc); |
@@ -126,11 +129,13 @@ out: | |||
126 | */ | 129 | */ |
127 | static int ecryptfs_process_nl_helo(struct sk_buff *skb) | 130 | static int ecryptfs_process_nl_helo(struct sk_buff *skb) |
128 | { | 131 | { |
132 | struct pid *pid; | ||
129 | int rc; | 133 | int rc; |
130 | 134 | ||
135 | pid = find_get_pid(NETLINK_CREDS(skb)->pid); | ||
131 | rc = ecryptfs_process_helo(ECRYPTFS_TRANSPORT_NETLINK, | 136 | rc = ecryptfs_process_helo(ECRYPTFS_TRANSPORT_NETLINK, |
132 | NETLINK_CREDS(skb)->uid, | 137 | NETLINK_CREDS(skb)->uid, NULL, pid); |
133 | NETLINK_CREDS(skb)->pid); | 138 | put_pid(pid); |
134 | if (rc) | 139 | if (rc) |
135 | printk(KERN_WARNING "Error processing HELO; rc = [%d]\n", rc); | 140 | printk(KERN_WARNING "Error processing HELO; rc = [%d]\n", rc); |
136 | return rc; | 141 | return rc; |
@@ -147,10 +152,12 @@ static int ecryptfs_process_nl_helo(struct sk_buff *skb) | |||
147 | */ | 152 | */ |
148 | static int ecryptfs_process_nl_quit(struct sk_buff *skb) | 153 | static int ecryptfs_process_nl_quit(struct sk_buff *skb) |
149 | { | 154 | { |
155 | struct pid *pid; | ||
150 | int rc; | 156 | int rc; |
151 | 157 | ||
152 | rc = ecryptfs_process_quit(NETLINK_CREDS(skb)->uid, | 158 | pid = find_get_pid(NETLINK_CREDS(skb)->pid); |
153 | NETLINK_CREDS(skb)->pid); | 159 | rc = ecryptfs_process_quit(NETLINK_CREDS(skb)->uid, NULL, pid); |
160 | put_pid(pid); | ||
154 | if (rc) | 161 | if (rc) |
155 | printk(KERN_WARNING | 162 | printk(KERN_WARNING |
156 | "Error processing QUIT message; rc = [%d]\n", rc); | 163 | "Error processing QUIT message; rc = [%d]\n", rc); |
@@ -176,20 +183,20 @@ static void ecryptfs_receive_nl_message(struct sk_buff *skb) | |||
176 | goto free; | 183 | goto free; |
177 | } | 184 | } |
178 | switch (nlh->nlmsg_type) { | 185 | switch (nlh->nlmsg_type) { |
179 | case ECRYPTFS_NLMSG_RESPONSE: | 186 | case ECRYPTFS_MSG_RESPONSE: |
180 | if (ecryptfs_process_nl_response(skb)) { | 187 | if (ecryptfs_process_nl_response(skb)) { |
181 | ecryptfs_printk(KERN_WARNING, "Failed to " | 188 | ecryptfs_printk(KERN_WARNING, "Failed to " |
182 | "deliver netlink response to " | 189 | "deliver netlink response to " |
183 | "requesting operation\n"); | 190 | "requesting operation\n"); |
184 | } | 191 | } |
185 | break; | 192 | break; |
186 | case ECRYPTFS_NLMSG_HELO: | 193 | case ECRYPTFS_MSG_HELO: |
187 | if (ecryptfs_process_nl_helo(skb)) { | 194 | if (ecryptfs_process_nl_helo(skb)) { |
188 | ecryptfs_printk(KERN_WARNING, "Failed to " | 195 | ecryptfs_printk(KERN_WARNING, "Failed to " |
189 | "fulfill HELO request\n"); | 196 | "fulfill HELO request\n"); |
190 | } | 197 | } |
191 | break; | 198 | break; |
192 | case ECRYPTFS_NLMSG_QUIT: | 199 | case ECRYPTFS_MSG_QUIT: |
193 | if (ecryptfs_process_nl_quit(skb)) { | 200 | if (ecryptfs_process_nl_quit(skb)) { |
194 | ecryptfs_printk(KERN_WARNING, "Failed to " | 201 | ecryptfs_printk(KERN_WARNING, "Failed to " |
195 | "fulfill QUIT request\n"); | 202 | "fulfill QUIT request\n"); |
diff --git a/fs/ecryptfs/read_write.c b/fs/ecryptfs/read_write.c index 0c4928623bbc..ebf55150be56 100644 --- a/fs/ecryptfs/read_write.c +++ b/fs/ecryptfs/read_write.c | |||
@@ -55,7 +55,7 @@ int ecryptfs_write_lower(struct inode *ecryptfs_inode, char *data, | |||
55 | set_fs(fs_save); | 55 | set_fs(fs_save); |
56 | if (octets_written < 0) { | 56 | if (octets_written < 0) { |
57 | printk(KERN_ERR "%s: octets_written = [%td]; " | 57 | printk(KERN_ERR "%s: octets_written = [%td]; " |
58 | "expected [%td]\n", __FUNCTION__, octets_written, size); | 58 | "expected [%td]\n", __func__, octets_written, size); |
59 | rc = -EINVAL; | 59 | rc = -EINVAL; |
60 | } | 60 | } |
61 | mutex_unlock(&inode_info->lower_file_mutex); | 61 | mutex_unlock(&inode_info->lower_file_mutex); |
@@ -153,7 +153,7 @@ int ecryptfs_write(struct file *ecryptfs_file, char *data, loff_t offset, | |||
153 | rc = PTR_ERR(ecryptfs_page); | 153 | rc = PTR_ERR(ecryptfs_page); |
154 | printk(KERN_ERR "%s: Error getting page at " | 154 | printk(KERN_ERR "%s: Error getting page at " |
155 | "index [%ld] from eCryptfs inode " | 155 | "index [%ld] from eCryptfs inode " |
156 | "mapping; rc = [%d]\n", __FUNCTION__, | 156 | "mapping; rc = [%d]\n", __func__, |
157 | ecryptfs_page_idx, rc); | 157 | ecryptfs_page_idx, rc); |
158 | goto out; | 158 | goto out; |
159 | } | 159 | } |
@@ -165,7 +165,7 @@ int ecryptfs_write(struct file *ecryptfs_file, char *data, loff_t offset, | |||
165 | if (rc) { | 165 | if (rc) { |
166 | printk(KERN_ERR "%s: Error decrypting " | 166 | printk(KERN_ERR "%s: Error decrypting " |
167 | "page; rc = [%d]\n", | 167 | "page; rc = [%d]\n", |
168 | __FUNCTION__, rc); | 168 | __func__, rc); |
169 | ClearPageUptodate(ecryptfs_page); | 169 | ClearPageUptodate(ecryptfs_page); |
170 | page_cache_release(ecryptfs_page); | 170 | page_cache_release(ecryptfs_page); |
171 | goto out; | 171 | goto out; |
@@ -202,7 +202,7 @@ int ecryptfs_write(struct file *ecryptfs_file, char *data, loff_t offset, | |||
202 | page_cache_release(ecryptfs_page); | 202 | page_cache_release(ecryptfs_page); |
203 | if (rc) { | 203 | if (rc) { |
204 | printk(KERN_ERR "%s: Error encrypting " | 204 | printk(KERN_ERR "%s: Error encrypting " |
205 | "page; rc = [%d]\n", __FUNCTION__, rc); | 205 | "page; rc = [%d]\n", __func__, rc); |
206 | goto out; | 206 | goto out; |
207 | } | 207 | } |
208 | pos += num_bytes; | 208 | pos += num_bytes; |
@@ -254,7 +254,7 @@ int ecryptfs_read_lower(char *data, loff_t offset, size_t size, | |||
254 | set_fs(fs_save); | 254 | set_fs(fs_save); |
255 | if (octets_read < 0) { | 255 | if (octets_read < 0) { |
256 | printk(KERN_ERR "%s: octets_read = [%td]; " | 256 | printk(KERN_ERR "%s: octets_read = [%td]; " |
257 | "expected [%td]\n", __FUNCTION__, octets_read, size); | 257 | "expected [%td]\n", __func__, octets_read, size); |
258 | rc = -EINVAL; | 258 | rc = -EINVAL; |
259 | } | 259 | } |
260 | mutex_unlock(&inode_info->lower_file_mutex); | 260 | mutex_unlock(&inode_info->lower_file_mutex); |
@@ -327,7 +327,7 @@ int ecryptfs_read(char *data, loff_t offset, size_t size, | |||
327 | printk(KERN_ERR "%s: Attempt to read data past the end of the " | 327 | printk(KERN_ERR "%s: Attempt to read data past the end of the " |
328 | "file; offset = [%lld]; size = [%td]; " | 328 | "file; offset = [%lld]; size = [%td]; " |
329 | "ecryptfs_file_size = [%lld]\n", | 329 | "ecryptfs_file_size = [%lld]\n", |
330 | __FUNCTION__, offset, size, ecryptfs_file_size); | 330 | __func__, offset, size, ecryptfs_file_size); |
331 | goto out; | 331 | goto out; |
332 | } | 332 | } |
333 | pos = offset; | 333 | pos = offset; |
@@ -345,14 +345,14 @@ int ecryptfs_read(char *data, loff_t offset, size_t size, | |||
345 | rc = PTR_ERR(ecryptfs_page); | 345 | rc = PTR_ERR(ecryptfs_page); |
346 | printk(KERN_ERR "%s: Error getting page at " | 346 | printk(KERN_ERR "%s: Error getting page at " |
347 | "index [%ld] from eCryptfs inode " | 347 | "index [%ld] from eCryptfs inode " |
348 | "mapping; rc = [%d]\n", __FUNCTION__, | 348 | "mapping; rc = [%d]\n", __func__, |
349 | ecryptfs_page_idx, rc); | 349 | ecryptfs_page_idx, rc); |
350 | goto out; | 350 | goto out; |
351 | } | 351 | } |
352 | rc = ecryptfs_decrypt_page(ecryptfs_page); | 352 | rc = ecryptfs_decrypt_page(ecryptfs_page); |
353 | if (rc) { | 353 | if (rc) { |
354 | printk(KERN_ERR "%s: Error decrypting " | 354 | printk(KERN_ERR "%s: Error decrypting " |
355 | "page; rc = [%d]\n", __FUNCTION__, rc); | 355 | "page; rc = [%d]\n", __func__, rc); |
356 | ClearPageUptodate(ecryptfs_page); | 356 | ClearPageUptodate(ecryptfs_page); |
357 | page_cache_release(ecryptfs_page); | 357 | page_cache_release(ecryptfs_page); |
358 | goto out; | 358 | goto out; |
diff --git a/fs/eventpoll.c b/fs/eventpoll.c index a415f42d32cf..0d237182d721 100644 --- a/fs/eventpoll.c +++ b/fs/eventpoll.c | |||
@@ -257,25 +257,6 @@ static inline int ep_cmp_ffd(struct epoll_filefd *p1, | |||
257 | (p1->file < p2->file ? -1 : p1->fd - p2->fd)); | 257 | (p1->file < p2->file ? -1 : p1->fd - p2->fd)); |
258 | } | 258 | } |
259 | 259 | ||
260 | /* Special initialization for the RB tree node to detect linkage */ | ||
261 | static inline void ep_rb_initnode(struct rb_node *n) | ||
262 | { | ||
263 | rb_set_parent(n, n); | ||
264 | } | ||
265 | |||
266 | /* Removes a node from the RB tree and marks it for a fast is-linked check */ | ||
267 | static inline void ep_rb_erase(struct rb_node *n, struct rb_root *r) | ||
268 | { | ||
269 | rb_erase(n, r); | ||
270 | rb_set_parent(n, n); | ||
271 | } | ||
272 | |||
273 | /* Fast check to verify that the item is linked to the main RB tree */ | ||
274 | static inline int ep_rb_linked(struct rb_node *n) | ||
275 | { | ||
276 | return rb_parent(n) != n; | ||
277 | } | ||
278 | |||
279 | /* Tells us if the item is currently linked */ | 260 | /* Tells us if the item is currently linked */ |
280 | static inline int ep_is_linked(struct list_head *p) | 261 | static inline int ep_is_linked(struct list_head *p) |
281 | { | 262 | { |
@@ -283,13 +264,13 @@ static inline int ep_is_linked(struct list_head *p) | |||
283 | } | 264 | } |
284 | 265 | ||
285 | /* Get the "struct epitem" from a wait queue pointer */ | 266 | /* Get the "struct epitem" from a wait queue pointer */ |
286 | static inline struct epitem * ep_item_from_wait(wait_queue_t *p) | 267 | static inline struct epitem *ep_item_from_wait(wait_queue_t *p) |
287 | { | 268 | { |
288 | return container_of(p, struct eppoll_entry, wait)->base; | 269 | return container_of(p, struct eppoll_entry, wait)->base; |
289 | } | 270 | } |
290 | 271 | ||
291 | /* Get the "struct epitem" from an epoll queue wrapper */ | 272 | /* Get the "struct epitem" from an epoll queue wrapper */ |
292 | static inline struct epitem * ep_item_from_epqueue(poll_table *p) | 273 | static inline struct epitem *ep_item_from_epqueue(poll_table *p) |
293 | { | 274 | { |
294 | return container_of(p, struct ep_pqueue, pt)->epi; | 275 | return container_of(p, struct ep_pqueue, pt)->epi; |
295 | } | 276 | } |
@@ -411,8 +392,7 @@ static int ep_remove(struct eventpoll *ep, struct epitem *epi) | |||
411 | list_del_init(&epi->fllink); | 392 | list_del_init(&epi->fllink); |
412 | spin_unlock(&file->f_ep_lock); | 393 | spin_unlock(&file->f_ep_lock); |
413 | 394 | ||
414 | if (ep_rb_linked(&epi->rbn)) | 395 | rb_erase(&epi->rbn, &ep->rbr); |
415 | ep_rb_erase(&epi->rbn, &ep->rbr); | ||
416 | 396 | ||
417 | spin_lock_irqsave(&ep->lock, flags); | 397 | spin_lock_irqsave(&ep->lock, flags); |
418 | if (ep_is_linked(&epi->rdllink)) | 398 | if (ep_is_linked(&epi->rdllink)) |
@@ -728,7 +708,6 @@ static int ep_insert(struct eventpoll *ep, struct epoll_event *event, | |||
728 | goto error_return; | 708 | goto error_return; |
729 | 709 | ||
730 | /* Item initialization follow here ... */ | 710 | /* Item initialization follow here ... */ |
731 | ep_rb_initnode(&epi->rbn); | ||
732 | INIT_LIST_HEAD(&epi->rdllink); | 711 | INIT_LIST_HEAD(&epi->rdllink); |
733 | INIT_LIST_HEAD(&epi->fllink); | 712 | INIT_LIST_HEAD(&epi->fllink); |
734 | INIT_LIST_HEAD(&epi->pwqlist); | 713 | INIT_LIST_HEAD(&epi->pwqlist); |
@@ -735,6 +735,7 @@ static int exec_mmap(struct mm_struct *mm) | |||
735 | tsk->active_mm = mm; | 735 | tsk->active_mm = mm; |
736 | activate_mm(active_mm, mm); | 736 | activate_mm(active_mm, mm); |
737 | task_unlock(tsk); | 737 | task_unlock(tsk); |
738 | mm_update_next_owner(mm); | ||
738 | arch_pick_mmap_layout(mm); | 739 | arch_pick_mmap_layout(mm); |
739 | if (old_mm) { | 740 | if (old_mm) { |
740 | up_read(&old_mm->mmap_sem); | 741 | up_read(&old_mm->mmap_sem); |
@@ -963,6 +964,8 @@ int flush_old_exec(struct linux_binprm * bprm) | |||
963 | if (retval) | 964 | if (retval) |
964 | goto out; | 965 | goto out; |
965 | 966 | ||
967 | set_mm_exe_file(bprm->mm, bprm->file); | ||
968 | |||
966 | /* | 969 | /* |
967 | * Release all of the old mmap stuff | 970 | * Release all of the old mmap stuff |
968 | */ | 971 | */ |
@@ -1268,7 +1271,6 @@ int do_execve(char * filename, | |||
1268 | { | 1271 | { |
1269 | struct linux_binprm *bprm; | 1272 | struct linux_binprm *bprm; |
1270 | struct file *file; | 1273 | struct file *file; |
1271 | unsigned long env_p; | ||
1272 | struct files_struct *displaced; | 1274 | struct files_struct *displaced; |
1273 | int retval; | 1275 | int retval; |
1274 | 1276 | ||
@@ -1321,11 +1323,9 @@ int do_execve(char * filename, | |||
1321 | if (retval < 0) | 1323 | if (retval < 0) |
1322 | goto out; | 1324 | goto out; |
1323 | 1325 | ||
1324 | env_p = bprm->p; | ||
1325 | retval = copy_strings(bprm->argc, argv, bprm); | 1326 | retval = copy_strings(bprm->argc, argv, bprm); |
1326 | if (retval < 0) | 1327 | if (retval < 0) |
1327 | goto out; | 1328 | goto out; |
1328 | bprm->argv_len = env_p - bprm->p; | ||
1329 | 1329 | ||
1330 | retval = search_binary_handler(bprm,regs); | 1330 | retval = search_binary_handler(bprm,regs); |
1331 | if (retval >= 0) { | 1331 | if (retval >= 0) { |
diff --git a/fs/ext4/mballoc.c b/fs/ext4/mballoc.c index ef97f19c2f9d..9d57695de746 100644 --- a/fs/ext4/mballoc.c +++ b/fs/ext4/mballoc.c | |||
@@ -2449,17 +2449,10 @@ static void ext4_mb_history_init(struct super_block *sb) | |||
2449 | int i; | 2449 | int i; |
2450 | 2450 | ||
2451 | if (sbi->s_mb_proc != NULL) { | 2451 | if (sbi->s_mb_proc != NULL) { |
2452 | struct proc_dir_entry *p; | 2452 | proc_create_data("mb_history", S_IRUGO, sbi->s_mb_proc, |
2453 | p = create_proc_entry("mb_history", S_IRUGO, sbi->s_mb_proc); | 2453 | &ext4_mb_seq_history_fops, sb); |
2454 | if (p) { | 2454 | proc_create_data("mb_groups", S_IRUGO, sbi->s_mb_proc, |
2455 | p->proc_fops = &ext4_mb_seq_history_fops; | 2455 | &ext4_mb_seq_groups_fops, sb); |
2456 | p->data = sb; | ||
2457 | } | ||
2458 | p = create_proc_entry("mb_groups", S_IRUGO, sbi->s_mb_proc); | ||
2459 | if (p) { | ||
2460 | p->proc_fops = &ext4_mb_seq_groups_fops; | ||
2461 | p->data = sb; | ||
2462 | } | ||
2463 | } | 2456 | } |
2464 | 2457 | ||
2465 | sbi->s_mb_history_max = 1000; | 2458 | sbi->s_mb_history_max = 1000; |
@@ -2867,7 +2860,6 @@ static void ext4_mb_free_committed_blocks(struct super_block *sb) | |||
2867 | mb_debug("freed %u blocks in %u structures\n", count, count2); | 2860 | mb_debug("freed %u blocks in %u structures\n", count, count2); |
2868 | } | 2861 | } |
2869 | 2862 | ||
2870 | #define EXT4_ROOT "ext4" | ||
2871 | #define EXT4_MB_STATS_NAME "stats" | 2863 | #define EXT4_MB_STATS_NAME "stats" |
2872 | #define EXT4_MB_MAX_TO_SCAN_NAME "max_to_scan" | 2864 | #define EXT4_MB_MAX_TO_SCAN_NAME "max_to_scan" |
2873 | #define EXT4_MB_MIN_TO_SCAN_NAME "min_to_scan" | 2865 | #define EXT4_MB_MIN_TO_SCAN_NAME "min_to_scan" |
@@ -3007,9 +2999,9 @@ int __init init_ext4_mballoc(void) | |||
3007 | return -ENOMEM; | 2999 | return -ENOMEM; |
3008 | } | 3000 | } |
3009 | #ifdef CONFIG_PROC_FS | 3001 | #ifdef CONFIG_PROC_FS |
3010 | proc_root_ext4 = proc_mkdir(EXT4_ROOT, proc_root_fs); | 3002 | proc_root_ext4 = proc_mkdir("fs/ext4", NULL); |
3011 | if (proc_root_ext4 == NULL) | 3003 | if (proc_root_ext4 == NULL) |
3012 | printk(KERN_ERR "EXT4-fs: Unable to create %s\n", EXT4_ROOT); | 3004 | printk(KERN_ERR "EXT4-fs: Unable to create fs/ext4\n"); |
3013 | #endif | 3005 | #endif |
3014 | return 0; | 3006 | return 0; |
3015 | } | 3007 | } |
@@ -3020,7 +3012,7 @@ void exit_ext4_mballoc(void) | |||
3020 | kmem_cache_destroy(ext4_pspace_cachep); | 3012 | kmem_cache_destroy(ext4_pspace_cachep); |
3021 | kmem_cache_destroy(ext4_ac_cachep); | 3013 | kmem_cache_destroy(ext4_ac_cachep); |
3022 | #ifdef CONFIG_PROC_FS | 3014 | #ifdef CONFIG_PROC_FS |
3023 | remove_proc_entry(EXT4_ROOT, proc_root_fs); | 3015 | remove_proc_entry("fs/ext4", NULL); |
3024 | #endif | 3016 | #endif |
3025 | } | 3017 | } |
3026 | 3018 | ||
diff --git a/fs/fat/inode.c b/fs/fat/inode.c index 5f522a55b596..4e0a3dd9d677 100644 --- a/fs/fat/inode.c +++ b/fs/fat/inode.c | |||
@@ -1222,8 +1222,7 @@ int fat_fill_super(struct super_block *sb, void *data, int silent, | |||
1222 | brelse(bh); | 1222 | brelse(bh); |
1223 | goto out_invalid; | 1223 | goto out_invalid; |
1224 | } | 1224 | } |
1225 | logical_sector_size = | 1225 | logical_sector_size = get_unaligned_le16(&b->sector_size); |
1226 | le16_to_cpu(get_unaligned((__le16 *)&b->sector_size)); | ||
1227 | if (!is_power_of_2(logical_sector_size) | 1226 | if (!is_power_of_2(logical_sector_size) |
1228 | || (logical_sector_size < 512) | 1227 | || (logical_sector_size < 512) |
1229 | || (logical_sector_size > 4096)) { | 1228 | || (logical_sector_size > 4096)) { |
@@ -1322,8 +1321,7 @@ int fat_fill_super(struct super_block *sb, void *data, int silent, | |||
1322 | sbi->dir_per_block_bits = ffs(sbi->dir_per_block) - 1; | 1321 | sbi->dir_per_block_bits = ffs(sbi->dir_per_block) - 1; |
1323 | 1322 | ||
1324 | sbi->dir_start = sbi->fat_start + sbi->fats * sbi->fat_length; | 1323 | sbi->dir_start = sbi->fat_start + sbi->fats * sbi->fat_length; |
1325 | sbi->dir_entries = | 1324 | sbi->dir_entries = get_unaligned_le16(&b->dir_entries); |
1326 | le16_to_cpu(get_unaligned((__le16 *)&b->dir_entries)); | ||
1327 | if (sbi->dir_entries & (sbi->dir_per_block - 1)) { | 1325 | if (sbi->dir_entries & (sbi->dir_per_block - 1)) { |
1328 | if (!silent) | 1326 | if (!silent) |
1329 | printk(KERN_ERR "FAT: bogus directroy-entries per block" | 1327 | printk(KERN_ERR "FAT: bogus directroy-entries per block" |
@@ -1335,7 +1333,7 @@ int fat_fill_super(struct super_block *sb, void *data, int silent, | |||
1335 | rootdir_sectors = sbi->dir_entries | 1333 | rootdir_sectors = sbi->dir_entries |
1336 | * sizeof(struct msdos_dir_entry) / sb->s_blocksize; | 1334 | * sizeof(struct msdos_dir_entry) / sb->s_blocksize; |
1337 | sbi->data_start = sbi->dir_start + rootdir_sectors; | 1335 | sbi->data_start = sbi->dir_start + rootdir_sectors; |
1338 | total_sectors = le16_to_cpu(get_unaligned((__le16 *)&b->sectors)); | 1336 | total_sectors = get_unaligned_le16(&b->sectors); |
1339 | if (total_sectors == 0) | 1337 | if (total_sectors == 0) |
1340 | total_sectors = le32_to_cpu(b->total_sect); | 1338 | total_sectors = le32_to_cpu(b->total_sect); |
1341 | 1339 | ||
diff --git a/fs/freevxfs/vxfs_extern.h b/fs/freevxfs/vxfs_extern.h index 2b46064f66b2..50ab5eecb99b 100644 --- a/fs/freevxfs/vxfs_extern.h +++ b/fs/freevxfs/vxfs_extern.h | |||
@@ -50,7 +50,11 @@ extern daddr_t vxfs_bmap1(struct inode *, long); | |||
50 | /* vxfs_fshead.c */ | 50 | /* vxfs_fshead.c */ |
51 | extern int vxfs_read_fshead(struct super_block *); | 51 | extern int vxfs_read_fshead(struct super_block *); |
52 | 52 | ||
53 | /* vxfs_immed.c */ | ||
54 | extern const struct inode_operations vxfs_immed_symlink_iops; | ||
55 | |||
53 | /* vxfs_inode.c */ | 56 | /* vxfs_inode.c */ |
57 | extern const struct address_space_operations vxfs_immed_aops; | ||
54 | extern struct kmem_cache *vxfs_inode_cachep; | 58 | extern struct kmem_cache *vxfs_inode_cachep; |
55 | extern void vxfs_dumpi(struct vxfs_inode_info *, ino_t); | 59 | extern void vxfs_dumpi(struct vxfs_inode_info *, ino_t); |
56 | extern struct inode * vxfs_get_fake_inode(struct super_block *, | 60 | extern struct inode * vxfs_get_fake_inode(struct super_block *, |
@@ -69,6 +73,7 @@ extern const struct file_operations vxfs_dir_operations; | |||
69 | extern int vxfs_read_olt(struct super_block *, u_long); | 73 | extern int vxfs_read_olt(struct super_block *, u_long); |
70 | 74 | ||
71 | /* vxfs_subr.c */ | 75 | /* vxfs_subr.c */ |
76 | extern const struct address_space_operations vxfs_aops; | ||
72 | extern struct page * vxfs_get_page(struct address_space *, u_long); | 77 | extern struct page * vxfs_get_page(struct address_space *, u_long); |
73 | extern void vxfs_put_page(struct page *); | 78 | extern void vxfs_put_page(struct page *); |
74 | extern struct buffer_head * vxfs_bread(struct inode *, int); | 79 | extern struct buffer_head * vxfs_bread(struct inode *, int); |
diff --git a/fs/freevxfs/vxfs_immed.c b/fs/freevxfs/vxfs_immed.c index 8a5959a61ba9..c36aeaf92e41 100644 --- a/fs/freevxfs/vxfs_immed.c +++ b/fs/freevxfs/vxfs_immed.c | |||
@@ -35,6 +35,7 @@ | |||
35 | #include <linux/namei.h> | 35 | #include <linux/namei.h> |
36 | 36 | ||
37 | #include "vxfs.h" | 37 | #include "vxfs.h" |
38 | #include "vxfs_extern.h" | ||
38 | #include "vxfs_inode.h" | 39 | #include "vxfs_inode.h" |
39 | 40 | ||
40 | 41 | ||
diff --git a/fs/freevxfs/vxfs_inode.c b/fs/freevxfs/vxfs_inode.c index ad88d2364bc2..9f3f2ceb73f0 100644 --- a/fs/freevxfs/vxfs_inode.c +++ b/fs/freevxfs/vxfs_inode.c | |||
@@ -41,11 +41,6 @@ | |||
41 | #include "vxfs_extern.h" | 41 | #include "vxfs_extern.h" |
42 | 42 | ||
43 | 43 | ||
44 | extern const struct address_space_operations vxfs_aops; | ||
45 | extern const struct address_space_operations vxfs_immed_aops; | ||
46 | |||
47 | extern const struct inode_operations vxfs_immed_symlink_iops; | ||
48 | |||
49 | struct kmem_cache *vxfs_inode_cachep; | 44 | struct kmem_cache *vxfs_inode_cachep; |
50 | 45 | ||
51 | 46 | ||
diff --git a/fs/fs-writeback.c b/fs/fs-writeback.c index 06557679ca41..ae45f77765c0 100644 --- a/fs/fs-writeback.c +++ b/fs/fs-writeback.c | |||
@@ -25,6 +25,45 @@ | |||
25 | #include <linux/buffer_head.h> | 25 | #include <linux/buffer_head.h> |
26 | #include "internal.h" | 26 | #include "internal.h" |
27 | 27 | ||
28 | |||
29 | /** | ||
30 | * writeback_acquire - attempt to get exclusive writeback access to a device | ||
31 | * @bdi: the device's backing_dev_info structure | ||
32 | * | ||
33 | * It is a waste of resources to have more than one pdflush thread blocked on | ||
34 | * a single request queue. Exclusion at the request_queue level is obtained | ||
35 | * via a flag in the request_queue's backing_dev_info.state. | ||
36 | * | ||
37 | * Non-request_queue-backed address_spaces will share default_backing_dev_info, | ||
38 | * unless they implement their own. Which is somewhat inefficient, as this | ||
39 | * may prevent concurrent writeback against multiple devices. | ||
40 | */ | ||
41 | static int writeback_acquire(struct backing_dev_info *bdi) | ||
42 | { | ||
43 | return !test_and_set_bit(BDI_pdflush, &bdi->state); | ||
44 | } | ||
45 | |||
46 | /** | ||
47 | * writeback_in_progress - determine whether there is writeback in progress | ||
48 | * @bdi: the device's backing_dev_info structure. | ||
49 | * | ||
50 | * Determine whether there is writeback in progress against a backing device. | ||
51 | */ | ||
52 | int writeback_in_progress(struct backing_dev_info *bdi) | ||
53 | { | ||
54 | return test_bit(BDI_pdflush, &bdi->state); | ||
55 | } | ||
56 | |||
57 | /** | ||
58 | * writeback_release - relinquish exclusive writeback access against a device. | ||
59 | * @bdi: the device's backing_dev_info structure | ||
60 | */ | ||
61 | static void writeback_release(struct backing_dev_info *bdi) | ||
62 | { | ||
63 | BUG_ON(!writeback_in_progress(bdi)); | ||
64 | clear_bit(BDI_pdflush, &bdi->state); | ||
65 | } | ||
66 | |||
28 | /** | 67 | /** |
29 | * __mark_inode_dirty - internal function | 68 | * __mark_inode_dirty - internal function |
30 | * @inode: inode to mark | 69 | * @inode: inode to mark |
@@ -747,43 +786,4 @@ int generic_osync_inode(struct inode *inode, struct address_space *mapping, int | |||
747 | 786 | ||
748 | return err; | 787 | return err; |
749 | } | 788 | } |
750 | |||
751 | EXPORT_SYMBOL(generic_osync_inode); | 789 | EXPORT_SYMBOL(generic_osync_inode); |
752 | |||
753 | /** | ||
754 | * writeback_acquire - attempt to get exclusive writeback access to a device | ||
755 | * @bdi: the device's backing_dev_info structure | ||
756 | * | ||
757 | * It is a waste of resources to have more than one pdflush thread blocked on | ||
758 | * a single request queue. Exclusion at the request_queue level is obtained | ||
759 | * via a flag in the request_queue's backing_dev_info.state. | ||
760 | * | ||
761 | * Non-request_queue-backed address_spaces will share default_backing_dev_info, | ||
762 | * unless they implement their own. Which is somewhat inefficient, as this | ||
763 | * may prevent concurrent writeback against multiple devices. | ||
764 | */ | ||
765 | int writeback_acquire(struct backing_dev_info *bdi) | ||
766 | { | ||
767 | return !test_and_set_bit(BDI_pdflush, &bdi->state); | ||
768 | } | ||
769 | |||
770 | /** | ||
771 | * writeback_in_progress - determine whether there is writeback in progress | ||
772 | * @bdi: the device's backing_dev_info structure. | ||
773 | * | ||
774 | * Determine whether there is writeback in progress against a backing device. | ||
775 | */ | ||
776 | int writeback_in_progress(struct backing_dev_info *bdi) | ||
777 | { | ||
778 | return test_bit(BDI_pdflush, &bdi->state); | ||
779 | } | ||
780 | |||
781 | /** | ||
782 | * writeback_release - relinquish exclusive writeback access against a device. | ||
783 | * @bdi: the device's backing_dev_info structure | ||
784 | */ | ||
785 | void writeback_release(struct backing_dev_info *bdi) | ||
786 | { | ||
787 | BUG_ON(!writeback_in_progress(bdi)); | ||
788 | clear_bit(BDI_pdflush, &bdi->state); | ||
789 | } | ||
diff --git a/fs/hfs/super.c b/fs/hfs/super.c index 32de44ed0021..8cf67974adf6 100644 --- a/fs/hfs/super.c +++ b/fs/hfs/super.c | |||
@@ -297,7 +297,8 @@ static int parse_options(char *options, struct hfs_sb_info *hsb) | |||
297 | return 0; | 297 | return 0; |
298 | } | 298 | } |
299 | p = match_strdup(&args[0]); | 299 | p = match_strdup(&args[0]); |
300 | hsb->nls_disk = load_nls(p); | 300 | if (p) |
301 | hsb->nls_disk = load_nls(p); | ||
301 | if (!hsb->nls_disk) { | 302 | if (!hsb->nls_disk) { |
302 | printk(KERN_ERR "hfs: unable to load codepage \"%s\"\n", p); | 303 | printk(KERN_ERR "hfs: unable to load codepage \"%s\"\n", p); |
303 | kfree(p); | 304 | kfree(p); |
@@ -311,7 +312,8 @@ static int parse_options(char *options, struct hfs_sb_info *hsb) | |||
311 | return 0; | 312 | return 0; |
312 | } | 313 | } |
313 | p = match_strdup(&args[0]); | 314 | p = match_strdup(&args[0]); |
314 | hsb->nls_io = load_nls(p); | 315 | if (p) |
316 | hsb->nls_io = load_nls(p); | ||
315 | if (!hsb->nls_io) { | 317 | if (!hsb->nls_io) { |
316 | printk(KERN_ERR "hfs: unable to load iocharset \"%s\"\n", p); | 318 | printk(KERN_ERR "hfs: unable to load iocharset \"%s\"\n", p); |
317 | kfree(p); | 319 | kfree(p); |
diff --git a/fs/hfsplus/hfsplus_fs.h b/fs/hfsplus/hfsplus_fs.h index d72d0a8b25aa..9e59537b43d5 100644 --- a/fs/hfsplus/hfsplus_fs.h +++ b/fs/hfsplus/hfsplus_fs.h | |||
@@ -311,6 +311,10 @@ int hfsplus_delete_cat(u32, struct inode *, struct qstr *); | |||
311 | int hfsplus_rename_cat(u32, struct inode *, struct qstr *, | 311 | int hfsplus_rename_cat(u32, struct inode *, struct qstr *, |
312 | struct inode *, struct qstr *); | 312 | struct inode *, struct qstr *); |
313 | 313 | ||
314 | /* dir.c */ | ||
315 | extern const struct inode_operations hfsplus_dir_inode_operations; | ||
316 | extern const struct file_operations hfsplus_dir_operations; | ||
317 | |||
314 | /* extents.c */ | 318 | /* extents.c */ |
315 | int hfsplus_ext_cmp_key(const hfsplus_btree_key *, const hfsplus_btree_key *); | 319 | int hfsplus_ext_cmp_key(const hfsplus_btree_key *, const hfsplus_btree_key *); |
316 | void hfsplus_ext_write_extent(struct inode *); | 320 | void hfsplus_ext_write_extent(struct inode *); |
diff --git a/fs/hfsplus/inode.c b/fs/hfsplus/inode.c index 37744cf3706a..d53b2af91c25 100644 --- a/fs/hfsplus/inode.c +++ b/fs/hfsplus/inode.c | |||
@@ -278,9 +278,6 @@ static int hfsplus_file_release(struct inode *inode, struct file *file) | |||
278 | return 0; | 278 | return 0; |
279 | } | 279 | } |
280 | 280 | ||
281 | extern const struct inode_operations hfsplus_dir_inode_operations; | ||
282 | extern struct file_operations hfsplus_dir_operations; | ||
283 | |||
284 | static const struct inode_operations hfsplus_file_inode_operations = { | 281 | static const struct inode_operations hfsplus_file_inode_operations = { |
285 | .lookup = hfsplus_file_lookup, | 282 | .lookup = hfsplus_file_lookup, |
286 | .truncate = hfsplus_file_truncate, | 283 | .truncate = hfsplus_file_truncate, |
diff --git a/fs/hfsplus/options.c b/fs/hfsplus/options.c index dc64fac00831..9997cbf8beb5 100644 --- a/fs/hfsplus/options.c +++ b/fs/hfsplus/options.c | |||
@@ -132,7 +132,8 @@ int hfsplus_parse_options(char *input, struct hfsplus_sb_info *sbi) | |||
132 | return 0; | 132 | return 0; |
133 | } | 133 | } |
134 | p = match_strdup(&args[0]); | 134 | p = match_strdup(&args[0]); |
135 | sbi->nls = load_nls(p); | 135 | if (p) |
136 | sbi->nls = load_nls(p); | ||
136 | if (!sbi->nls) { | 137 | if (!sbi->nls) { |
137 | printk(KERN_ERR "hfs: unable to load nls mapping \"%s\"\n", p); | 138 | printk(KERN_ERR "hfs: unable to load nls mapping \"%s\"\n", p); |
138 | kfree(p); | 139 | kfree(p); |
diff --git a/fs/hfsplus/wrapper.c b/fs/hfsplus/wrapper.c index 72cab78f0509..175d08eacc86 100644 --- a/fs/hfsplus/wrapper.c +++ b/fs/hfsplus/wrapper.c | |||
@@ -47,7 +47,7 @@ static int hfsplus_read_mdb(void *bufptr, struct hfsplus_wd *wd) | |||
47 | return 0; | 47 | return 0; |
48 | wd->ablk_start = be16_to_cpu(*(__be16 *)(bufptr + HFSP_WRAPOFF_ABLKSTART)); | 48 | wd->ablk_start = be16_to_cpu(*(__be16 *)(bufptr + HFSP_WRAPOFF_ABLKSTART)); |
49 | 49 | ||
50 | extent = be32_to_cpu(get_unaligned((__be32 *)(bufptr + HFSP_WRAPOFF_EMBEDEXT))); | 50 | extent = get_unaligned_be32(bufptr + HFSP_WRAPOFF_EMBEDEXT); |
51 | wd->embed_start = (extent >> 16) & 0xFFFF; | 51 | wd->embed_start = (extent >> 16) & 0xFFFF; |
52 | wd->embed_count = extent & 0xFFFF; | 52 | wd->embed_count = extent & 0xFFFF; |
53 | 53 | ||
diff --git a/fs/inode.c b/fs/inode.c index 27ee1af50d02..bf6478130424 100644 --- a/fs/inode.c +++ b/fs/inode.c | |||
@@ -495,8 +495,7 @@ static struct inode * find_inode(struct super_block * sb, struct hlist_head *hea | |||
495 | struct inode * inode = NULL; | 495 | struct inode * inode = NULL; |
496 | 496 | ||
497 | repeat: | 497 | repeat: |
498 | hlist_for_each (node, head) { | 498 | hlist_for_each_entry(inode, node, head, i_hash) { |
499 | inode = hlist_entry(node, struct inode, i_hash); | ||
500 | if (inode->i_sb != sb) | 499 | if (inode->i_sb != sb) |
501 | continue; | 500 | continue; |
502 | if (!test(inode, data)) | 501 | if (!test(inode, data)) |
@@ -520,8 +519,7 @@ static struct inode * find_inode_fast(struct super_block * sb, struct hlist_head | |||
520 | struct inode * inode = NULL; | 519 | struct inode * inode = NULL; |
521 | 520 | ||
522 | repeat: | 521 | repeat: |
523 | hlist_for_each (node, head) { | 522 | hlist_for_each_entry(inode, node, head, i_hash) { |
524 | inode = hlist_entry(node, struct inode, i_hash); | ||
525 | if (inode->i_ino != ino) | 523 | if (inode->i_ino != ino) |
526 | continue; | 524 | continue; |
527 | if (inode->i_sb != sb) | 525 | if (inode->i_sb != sb) |
diff --git a/fs/inotify_user.c b/fs/inotify_user.c index 7b94a1e3c015..6676c06bb7c1 100644 --- a/fs/inotify_user.c +++ b/fs/inotify_user.c | |||
@@ -598,7 +598,7 @@ asmlinkage long sys_inotify_init(void) | |||
598 | } | 598 | } |
599 | 599 | ||
600 | ih = inotify_init(&inotify_user_ops); | 600 | ih = inotify_init(&inotify_user_ops); |
601 | if (unlikely(IS_ERR(ih))) { | 601 | if (IS_ERR(ih)) { |
602 | ret = PTR_ERR(ih); | 602 | ret = PTR_ERR(ih); |
603 | goto out_free_dev; | 603 | goto out_free_dev; |
604 | } | 604 | } |
diff --git a/fs/ioctl.c b/fs/ioctl.c index f32fbde2175e..7db32b3382d3 100644 --- a/fs/ioctl.c +++ b/fs/ioctl.c | |||
@@ -28,8 +28,8 @@ | |||
28 | * | 28 | * |
29 | * Returns 0 on success, -errno on error. | 29 | * Returns 0 on success, -errno on error. |
30 | */ | 30 | */ |
31 | long vfs_ioctl(struct file *filp, unsigned int cmd, | 31 | static long vfs_ioctl(struct file *filp, unsigned int cmd, |
32 | unsigned long arg) | 32 | unsigned long arg) |
33 | { | 33 | { |
34 | int error = -ENOTTY; | 34 | int error = -ENOTTY; |
35 | 35 | ||
diff --git a/fs/isofs/isofs.h b/fs/isofs/isofs.h index d1bdf8adb351..ccbf72faf27a 100644 --- a/fs/isofs/isofs.h +++ b/fs/isofs/isofs.h | |||
@@ -78,29 +78,29 @@ static inline int isonum_712(char *p) | |||
78 | } | 78 | } |
79 | static inline unsigned int isonum_721(char *p) | 79 | static inline unsigned int isonum_721(char *p) |
80 | { | 80 | { |
81 | return le16_to_cpu(get_unaligned((__le16 *)p)); | 81 | return get_unaligned_le16(p); |
82 | } | 82 | } |
83 | static inline unsigned int isonum_722(char *p) | 83 | static inline unsigned int isonum_722(char *p) |
84 | { | 84 | { |
85 | return be16_to_cpu(get_unaligned((__le16 *)p)); | 85 | return get_unaligned_be16(p); |
86 | } | 86 | } |
87 | static inline unsigned int isonum_723(char *p) | 87 | static inline unsigned int isonum_723(char *p) |
88 | { | 88 | { |
89 | /* Ignore bigendian datum due to broken mastering programs */ | 89 | /* Ignore bigendian datum due to broken mastering programs */ |
90 | return le16_to_cpu(get_unaligned((__le16 *)p)); | 90 | return get_unaligned_le16(p); |
91 | } | 91 | } |
92 | static inline unsigned int isonum_731(char *p) | 92 | static inline unsigned int isonum_731(char *p) |
93 | { | 93 | { |
94 | return le32_to_cpu(get_unaligned((__le32 *)p)); | 94 | return get_unaligned_le32(p); |
95 | } | 95 | } |
96 | static inline unsigned int isonum_732(char *p) | 96 | static inline unsigned int isonum_732(char *p) |
97 | { | 97 | { |
98 | return be32_to_cpu(get_unaligned((__le32 *)p)); | 98 | return get_unaligned_be32(p); |
99 | } | 99 | } |
100 | static inline unsigned int isonum_733(char *p) | 100 | static inline unsigned int isonum_733(char *p) |
101 | { | 101 | { |
102 | /* Ignore bigendian datum due to broken mastering programs */ | 102 | /* Ignore bigendian datum due to broken mastering programs */ |
103 | return le32_to_cpu(get_unaligned((__le32 *)p)); | 103 | return get_unaligned_le32(p); |
104 | } | 104 | } |
105 | extern int iso_date(char *, int); | 105 | extern int iso_date(char *, int); |
106 | 106 | ||
diff --git a/fs/jbd2/journal.c b/fs/jbd2/journal.c index 954cff001df6..eb7eb6c27bcb 100644 --- a/fs/jbd2/journal.c +++ b/fs/jbd2/journal.c | |||
@@ -904,19 +904,10 @@ static void jbd2_stats_proc_init(journal_t *journal) | |||
904 | snprintf(name, sizeof(name) - 1, "%s", bdevname(journal->j_dev, name)); | 904 | snprintf(name, sizeof(name) - 1, "%s", bdevname(journal->j_dev, name)); |
905 | journal->j_proc_entry = proc_mkdir(name, proc_jbd2_stats); | 905 | journal->j_proc_entry = proc_mkdir(name, proc_jbd2_stats); |
906 | if (journal->j_proc_entry) { | 906 | if (journal->j_proc_entry) { |
907 | struct proc_dir_entry *p; | 907 | proc_create_data("history", S_IRUGO, journal->j_proc_entry, |
908 | p = create_proc_entry("history", S_IRUGO, | 908 | &jbd2_seq_history_fops, journal); |
909 | journal->j_proc_entry); | 909 | proc_create_data("info", S_IRUGO, journal->j_proc_entry, |
910 | if (p) { | 910 | &jbd2_seq_info_fops, journal); |
911 | p->proc_fops = &jbd2_seq_history_fops; | ||
912 | p->data = journal; | ||
913 | p = create_proc_entry("info", S_IRUGO, | ||
914 | journal->j_proc_entry); | ||
915 | if (p) { | ||
916 | p->proc_fops = &jbd2_seq_info_fops; | ||
917 | p->data = journal; | ||
918 | } | ||
919 | } | ||
920 | } | 911 | } |
921 | } | 912 | } |
922 | 913 | ||
diff --git a/fs/jfs/jfs_debug.c b/fs/jfs/jfs_debug.c index 887f5759e536..bf6ab19b86ee 100644 --- a/fs/jfs/jfs_debug.c +++ b/fs/jfs/jfs_debug.c | |||
@@ -89,7 +89,7 @@ void jfs_proc_init(void) | |||
89 | { | 89 | { |
90 | int i; | 90 | int i; |
91 | 91 | ||
92 | if (!(base = proc_mkdir("jfs", proc_root_fs))) | 92 | if (!(base = proc_mkdir("fs/jfs", NULL))) |
93 | return; | 93 | return; |
94 | base->owner = THIS_MODULE; | 94 | base->owner = THIS_MODULE; |
95 | 95 | ||
@@ -109,7 +109,7 @@ void jfs_proc_clean(void) | |||
109 | if (base) { | 109 | if (base) { |
110 | for (i = 0; i < NPROCENT; i++) | 110 | for (i = 0; i < NPROCENT; i++) |
111 | remove_proc_entry(Entries[i].name, base); | 111 | remove_proc_entry(Entries[i].name, base); |
112 | remove_proc_entry("jfs", proc_root_fs); | 112 | remove_proc_entry("fs/jfs", NULL); |
113 | } | 113 | } |
114 | } | 114 | } |
115 | 115 | ||
diff --git a/fs/namei.c b/fs/namei.c index e179f71bfcb0..32fd9655485b 100644 --- a/fs/namei.c +++ b/fs/namei.c | |||
@@ -30,6 +30,7 @@ | |||
30 | #include <linux/capability.h> | 30 | #include <linux/capability.h> |
31 | #include <linux/file.h> | 31 | #include <linux/file.h> |
32 | #include <linux/fcntl.h> | 32 | #include <linux/fcntl.h> |
33 | #include <linux/device_cgroup.h> | ||
33 | #include <asm/namei.h> | 34 | #include <asm/namei.h> |
34 | #include <asm/uaccess.h> | 35 | #include <asm/uaccess.h> |
35 | 36 | ||
@@ -281,6 +282,10 @@ int permission(struct inode *inode, int mask, struct nameidata *nd) | |||
281 | if (retval) | 282 | if (retval) |
282 | return retval; | 283 | return retval; |
283 | 284 | ||
285 | retval = devcgroup_inode_permission(inode, mask); | ||
286 | if (retval) | ||
287 | return retval; | ||
288 | |||
284 | return security_inode_permission(inode, mask, nd); | 289 | return security_inode_permission(inode, mask, nd); |
285 | } | 290 | } |
286 | 291 | ||
@@ -2028,6 +2033,10 @@ int vfs_mknod(struct inode *dir, struct dentry *dentry, int mode, dev_t dev) | |||
2028 | if (!dir->i_op || !dir->i_op->mknod) | 2033 | if (!dir->i_op || !dir->i_op->mknod) |
2029 | return -EPERM; | 2034 | return -EPERM; |
2030 | 2035 | ||
2036 | error = devcgroup_inode_mknod(mode, dev); | ||
2037 | if (error) | ||
2038 | return error; | ||
2039 | |||
2031 | error = security_inode_mknod(dir, dentry, mode, dev); | 2040 | error = security_inode_mknod(dir, dentry, mode, dev); |
2032 | if (error) | 2041 | if (error) |
2033 | return error; | 2042 | return error; |
diff --git a/fs/namespace.c b/fs/namespace.c index fe376805cf5f..061e5edb4d27 100644 --- a/fs/namespace.c +++ b/fs/namespace.c | |||
@@ -1176,17 +1176,6 @@ static int mount_is_safe(struct nameidata *nd) | |||
1176 | #endif | 1176 | #endif |
1177 | } | 1177 | } |
1178 | 1178 | ||
1179 | static int lives_below_in_same_fs(struct dentry *d, struct dentry *dentry) | ||
1180 | { | ||
1181 | while (1) { | ||
1182 | if (d == dentry) | ||
1183 | return 1; | ||
1184 | if (d == NULL || d == d->d_parent) | ||
1185 | return 0; | ||
1186 | d = d->d_parent; | ||
1187 | } | ||
1188 | } | ||
1189 | |||
1190 | struct vfsmount *copy_tree(struct vfsmount *mnt, struct dentry *dentry, | 1179 | struct vfsmount *copy_tree(struct vfsmount *mnt, struct dentry *dentry, |
1191 | int flag) | 1180 | int flag) |
1192 | { | 1181 | { |
@@ -1203,7 +1192,7 @@ struct vfsmount *copy_tree(struct vfsmount *mnt, struct dentry *dentry, | |||
1203 | 1192 | ||
1204 | p = mnt; | 1193 | p = mnt; |
1205 | list_for_each_entry(r, &mnt->mnt_mounts, mnt_child) { | 1194 | list_for_each_entry(r, &mnt->mnt_mounts, mnt_child) { |
1206 | if (!lives_below_in_same_fs(r->mnt_mountpoint, dentry)) | 1195 | if (!is_subdir(r->mnt_mountpoint, dentry)) |
1207 | continue; | 1196 | continue; |
1208 | 1197 | ||
1209 | for (s = r; s; s = next_mnt(s, r)) { | 1198 | for (s = r; s; s = next_mnt(s, r)) { |
diff --git a/fs/ncpfs/ncplib_kernel.c b/fs/ncpfs/ncplib_kernel.c index df6d60bdfcd3..97645f112114 100644 --- a/fs/ncpfs/ncplib_kernel.c +++ b/fs/ncpfs/ncplib_kernel.c | |||
@@ -102,48 +102,47 @@ static inline void ncp_init_request_s(struct ncp_server *server, int subfunction | |||
102 | } | 102 | } |
103 | 103 | ||
104 | static inline char * | 104 | static inline char * |
105 | ncp_reply_data(struct ncp_server *server, int offset) | 105 | ncp_reply_data(struct ncp_server *server, int offset) |
106 | { | 106 | { |
107 | return &(server->packet[sizeof(struct ncp_reply_header) + offset]); | 107 | return &(server->packet[sizeof(struct ncp_reply_header) + offset]); |
108 | } | 108 | } |
109 | 109 | ||
110 | static inline __u8 BVAL(void* data) | 110 | static inline u8 BVAL(void *data) |
111 | { | 111 | { |
112 | return get_unaligned((__u8*)data); | 112 | return *(u8 *)data; |
113 | } | 113 | } |
114 | 114 | ||
115 | static __u8 | 115 | static u8 ncp_reply_byte(struct ncp_server *server, int offset) |
116 | ncp_reply_byte(struct ncp_server *server, int offset) | ||
117 | { | 116 | { |
118 | return get_unaligned((__u8 *) ncp_reply_data(server, offset)); | 117 | return *(u8 *)ncp_reply_data(server, offset); |
119 | } | 118 | } |
120 | 119 | ||
121 | static inline __u16 WVAL_LH(void* data) | 120 | static inline u16 WVAL_LH(void *data) |
122 | { | 121 | { |
123 | return le16_to_cpu(get_unaligned((__le16*)data)); | 122 | return get_unaligned_le16(data); |
124 | } | 123 | } |
125 | 124 | ||
126 | static __u16 | 125 | static u16 |
127 | ncp_reply_le16(struct ncp_server *server, int offset) | 126 | ncp_reply_le16(struct ncp_server *server, int offset) |
128 | { | 127 | { |
129 | return le16_to_cpu(get_unaligned((__le16 *) ncp_reply_data(server, offset))); | 128 | return get_unaligned_le16(ncp_reply_data(server, offset)); |
130 | } | 129 | } |
131 | 130 | ||
132 | static __u16 | 131 | static u16 |
133 | ncp_reply_be16(struct ncp_server *server, int offset) | 132 | ncp_reply_be16(struct ncp_server *server, int offset) |
134 | { | 133 | { |
135 | return be16_to_cpu(get_unaligned((__be16 *) ncp_reply_data(server, offset))); | 134 | return get_unaligned_be16(ncp_reply_data(server, offset)); |
136 | } | 135 | } |
137 | 136 | ||
138 | static inline __u32 DVAL_LH(void* data) | 137 | static inline u32 DVAL_LH(void *data) |
139 | { | 138 | { |
140 | return le32_to_cpu(get_unaligned((__le32*)data)); | 139 | return get_unaligned_le32(data); |
141 | } | 140 | } |
142 | 141 | ||
143 | static __le32 | 142 | static __le32 |
144 | ncp_reply_dword(struct ncp_server *server, int offset) | 143 | ncp_reply_dword(struct ncp_server *server, int offset) |
145 | { | 144 | { |
146 | return get_unaligned((__le32 *) ncp_reply_data(server, offset)); | 145 | return get_unaligned((__le32 *)ncp_reply_data(server, offset)); |
147 | } | 146 | } |
148 | 147 | ||
149 | static inline __u32 ncp_reply_dword_lh(struct ncp_server* server, int offset) { | 148 | static inline __u32 ncp_reply_dword_lh(struct ncp_server* server, int offset) { |
@@ -1006,8 +1005,8 @@ ncp_read_bounce(struct ncp_server *server, const char *file_id, | |||
1006 | result = ncp_request2(server, 72, bounce, bufsize); | 1005 | result = ncp_request2(server, 72, bounce, bufsize); |
1007 | ncp_unlock_server(server); | 1006 | ncp_unlock_server(server); |
1008 | if (!result) { | 1007 | if (!result) { |
1009 | int len = be16_to_cpu(get_unaligned((__be16*)((char*)bounce + | 1008 | int len = get_unaligned_be16((char *)bounce + |
1010 | sizeof(struct ncp_reply_header)))); | 1009 | sizeof(struct ncp_reply_header)); |
1011 | result = -EIO; | 1010 | result = -EIO; |
1012 | if (len <= to_read) { | 1011 | if (len <= to_read) { |
1013 | char* source; | 1012 | char* source; |
diff --git a/fs/nfs/client.c b/fs/nfs/client.c index f2f3b284e6dd..89ac5bb0401c 100644 --- a/fs/nfs/client.c +++ b/fs/nfs/client.c | |||
@@ -1321,6 +1321,7 @@ static const struct file_operations nfs_server_list_fops = { | |||
1321 | .read = seq_read, | 1321 | .read = seq_read, |
1322 | .llseek = seq_lseek, | 1322 | .llseek = seq_lseek, |
1323 | .release = seq_release, | 1323 | .release = seq_release, |
1324 | .owner = THIS_MODULE, | ||
1324 | }; | 1325 | }; |
1325 | 1326 | ||
1326 | static int nfs_volume_list_open(struct inode *inode, struct file *file); | 1327 | static int nfs_volume_list_open(struct inode *inode, struct file *file); |
@@ -1341,6 +1342,7 @@ static const struct file_operations nfs_volume_list_fops = { | |||
1341 | .read = seq_read, | 1342 | .read = seq_read, |
1342 | .llseek = seq_lseek, | 1343 | .llseek = seq_lseek, |
1343 | .release = seq_release, | 1344 | .release = seq_release, |
1345 | .owner = THIS_MODULE, | ||
1344 | }; | 1346 | }; |
1345 | 1347 | ||
1346 | /* | 1348 | /* |
@@ -1500,33 +1502,29 @@ int __init nfs_fs_proc_init(void) | |||
1500 | { | 1502 | { |
1501 | struct proc_dir_entry *p; | 1503 | struct proc_dir_entry *p; |
1502 | 1504 | ||
1503 | proc_fs_nfs = proc_mkdir("nfsfs", proc_root_fs); | 1505 | proc_fs_nfs = proc_mkdir("fs/nfsfs", NULL); |
1504 | if (!proc_fs_nfs) | 1506 | if (!proc_fs_nfs) |
1505 | goto error_0; | 1507 | goto error_0; |
1506 | 1508 | ||
1507 | proc_fs_nfs->owner = THIS_MODULE; | 1509 | proc_fs_nfs->owner = THIS_MODULE; |
1508 | 1510 | ||
1509 | /* a file of servers with which we're dealing */ | 1511 | /* a file of servers with which we're dealing */ |
1510 | p = create_proc_entry("servers", S_IFREG|S_IRUGO, proc_fs_nfs); | 1512 | p = proc_create("servers", S_IFREG|S_IRUGO, |
1513 | proc_fs_nfs, &nfs_server_list_fops); | ||
1511 | if (!p) | 1514 | if (!p) |
1512 | goto error_1; | 1515 | goto error_1; |
1513 | 1516 | ||
1514 | p->proc_fops = &nfs_server_list_fops; | ||
1515 | p->owner = THIS_MODULE; | ||
1516 | |||
1517 | /* a file of volumes that we have mounted */ | 1517 | /* a file of volumes that we have mounted */ |
1518 | p = create_proc_entry("volumes", S_IFREG|S_IRUGO, proc_fs_nfs); | 1518 | p = proc_create("volumes", S_IFREG|S_IRUGO, |
1519 | proc_fs_nfs, &nfs_volume_list_fops); | ||
1519 | if (!p) | 1520 | if (!p) |
1520 | goto error_2; | 1521 | goto error_2; |
1521 | |||
1522 | p->proc_fops = &nfs_volume_list_fops; | ||
1523 | p->owner = THIS_MODULE; | ||
1524 | return 0; | 1522 | return 0; |
1525 | 1523 | ||
1526 | error_2: | 1524 | error_2: |
1527 | remove_proc_entry("servers", proc_fs_nfs); | 1525 | remove_proc_entry("servers", proc_fs_nfs); |
1528 | error_1: | 1526 | error_1: |
1529 | remove_proc_entry("nfsfs", proc_root_fs); | 1527 | remove_proc_entry("fs/nfsfs", NULL); |
1530 | error_0: | 1528 | error_0: |
1531 | return -ENOMEM; | 1529 | return -ENOMEM; |
1532 | } | 1530 | } |
@@ -1538,7 +1536,7 @@ void nfs_fs_proc_exit(void) | |||
1538 | { | 1536 | { |
1539 | remove_proc_entry("volumes", proc_fs_nfs); | 1537 | remove_proc_entry("volumes", proc_fs_nfs); |
1540 | remove_proc_entry("servers", proc_fs_nfs); | 1538 | remove_proc_entry("servers", proc_fs_nfs); |
1541 | remove_proc_entry("nfsfs", proc_root_fs); | 1539 | remove_proc_entry("fs/nfsfs", NULL); |
1542 | } | 1540 | } |
1543 | 1541 | ||
1544 | #endif /* CONFIG_PROC_FS */ | 1542 | #endif /* CONFIG_PROC_FS */ |
diff --git a/fs/nfsd/nfsctl.c b/fs/nfsd/nfsctl.c index 42f3820ee8f5..5ac00c4fee91 100644 --- a/fs/nfsd/nfsctl.c +++ b/fs/nfsd/nfsctl.c | |||
@@ -169,6 +169,7 @@ static const struct file_operations exports_operations = { | |||
169 | .read = seq_read, | 169 | .read = seq_read, |
170 | .llseek = seq_lseek, | 170 | .llseek = seq_lseek, |
171 | .release = seq_release, | 171 | .release = seq_release, |
172 | .owner = THIS_MODULE, | ||
172 | }; | 173 | }; |
173 | 174 | ||
174 | /*----------------------------------------------------------------------------*/ | 175 | /*----------------------------------------------------------------------------*/ |
@@ -801,10 +802,9 @@ static int create_proc_exports_entry(void) | |||
801 | entry = proc_mkdir("fs/nfs", NULL); | 802 | entry = proc_mkdir("fs/nfs", NULL); |
802 | if (!entry) | 803 | if (!entry) |
803 | return -ENOMEM; | 804 | return -ENOMEM; |
804 | entry = create_proc_entry("fs/nfs/exports", 0, NULL); | 805 | entry = proc_create("exports", 0, entry, &exports_operations); |
805 | if (!entry) | 806 | if (!entry) |
806 | return -ENOMEM; | 807 | return -ENOMEM; |
807 | entry->proc_fops = &exports_operations; | ||
808 | return 0; | 808 | return 0; |
809 | } | 809 | } |
810 | #else /* CONFIG_PROC_FS */ | 810 | #else /* CONFIG_PROC_FS */ |
diff --git a/fs/ntfs/mft.c b/fs/ntfs/mft.c index 2ad5c8b104b9..790defb847e7 100644 --- a/fs/ntfs/mft.c +++ b/fs/ntfs/mft.c | |||
@@ -1191,7 +1191,7 @@ static int ntfs_mft_bitmap_find_and_alloc_free_rec_nolock(ntfs_volume *vol, | |||
1191 | if (size) { | 1191 | if (size) { |
1192 | page = ntfs_map_page(mftbmp_mapping, | 1192 | page = ntfs_map_page(mftbmp_mapping, |
1193 | ofs >> PAGE_CACHE_SHIFT); | 1193 | ofs >> PAGE_CACHE_SHIFT); |
1194 | if (unlikely(IS_ERR(page))) { | 1194 | if (IS_ERR(page)) { |
1195 | ntfs_error(vol->sb, "Failed to read mft " | 1195 | ntfs_error(vol->sb, "Failed to read mft " |
1196 | "bitmap, aborting."); | 1196 | "bitmap, aborting."); |
1197 | return PTR_ERR(page); | 1197 | return PTR_ERR(page); |
@@ -2118,7 +2118,7 @@ static int ntfs_mft_record_format(const ntfs_volume *vol, const s64 mft_no) | |||
2118 | } | 2118 | } |
2119 | /* Read, map, and pin the page containing the mft record. */ | 2119 | /* Read, map, and pin the page containing the mft record. */ |
2120 | page = ntfs_map_page(mft_vi->i_mapping, index); | 2120 | page = ntfs_map_page(mft_vi->i_mapping, index); |
2121 | if (unlikely(IS_ERR(page))) { | 2121 | if (IS_ERR(page)) { |
2122 | ntfs_error(vol->sb, "Failed to map page containing mft record " | 2122 | ntfs_error(vol->sb, "Failed to map page containing mft record " |
2123 | "to format 0x%llx.", (long long)mft_no); | 2123 | "to format 0x%llx.", (long long)mft_no); |
2124 | return PTR_ERR(page); | 2124 | return PTR_ERR(page); |
@@ -2519,7 +2519,7 @@ mft_rec_already_initialized: | |||
2519 | ofs = (bit << vol->mft_record_size_bits) & ~PAGE_CACHE_MASK; | 2519 | ofs = (bit << vol->mft_record_size_bits) & ~PAGE_CACHE_MASK; |
2520 | /* Read, map, and pin the page containing the mft record. */ | 2520 | /* Read, map, and pin the page containing the mft record. */ |
2521 | page = ntfs_map_page(vol->mft_ino->i_mapping, index); | 2521 | page = ntfs_map_page(vol->mft_ino->i_mapping, index); |
2522 | if (unlikely(IS_ERR(page))) { | 2522 | if (IS_ERR(page)) { |
2523 | ntfs_error(vol->sb, "Failed to map page containing allocated " | 2523 | ntfs_error(vol->sb, "Failed to map page containing allocated " |
2524 | "mft record 0x%llx.", (long long)bit); | 2524 | "mft record 0x%llx.", (long long)bit); |
2525 | err = PTR_ERR(page); | 2525 | err = PTR_ERR(page); |
diff --git a/fs/proc/base.c b/fs/proc/base.c index c5e412a00b17..fcf02f2deeba 100644 --- a/fs/proc/base.c +++ b/fs/proc/base.c | |||
@@ -195,12 +195,32 @@ static int proc_root_link(struct inode *inode, struct path *path) | |||
195 | return result; | 195 | return result; |
196 | } | 196 | } |
197 | 197 | ||
198 | #define MAY_PTRACE(task) \ | 198 | /* |
199 | (task == current || \ | 199 | * Return zero if current may access user memory in @task, -error if not. |
200 | (task->parent == current && \ | 200 | */ |
201 | (task->ptrace & PT_PTRACED) && \ | 201 | static int check_mem_permission(struct task_struct *task) |
202 | (task_is_stopped_or_traced(task)) && \ | 202 | { |
203 | security_ptrace(current,task) == 0)) | 203 | /* |
204 | * A task can always look at itself, in case it chooses | ||
205 | * to use system calls instead of load instructions. | ||
206 | */ | ||
207 | if (task == current) | ||
208 | return 0; | ||
209 | |||
210 | /* | ||
211 | * If current is actively ptrace'ing, and would also be | ||
212 | * permitted to freshly attach with ptrace now, permit it. | ||
213 | */ | ||
214 | if (task->parent == current && (task->ptrace & PT_PTRACED) && | ||
215 | task_is_stopped_or_traced(task) && | ||
216 | ptrace_may_attach(task)) | ||
217 | return 0; | ||
218 | |||
219 | /* | ||
220 | * Noone else is allowed. | ||
221 | */ | ||
222 | return -EPERM; | ||
223 | } | ||
204 | 224 | ||
205 | struct mm_struct *mm_for_maps(struct task_struct *task) | 225 | struct mm_struct *mm_for_maps(struct task_struct *task) |
206 | { | 226 | { |
@@ -722,7 +742,7 @@ static ssize_t mem_read(struct file * file, char __user * buf, | |||
722 | if (!task) | 742 | if (!task) |
723 | goto out_no_task; | 743 | goto out_no_task; |
724 | 744 | ||
725 | if (!MAY_PTRACE(task) || !ptrace_may_attach(task)) | 745 | if (check_mem_permission(task)) |
726 | goto out; | 746 | goto out; |
727 | 747 | ||
728 | ret = -ENOMEM; | 748 | ret = -ENOMEM; |
@@ -748,7 +768,7 @@ static ssize_t mem_read(struct file * file, char __user * buf, | |||
748 | 768 | ||
749 | this_len = (count > PAGE_SIZE) ? PAGE_SIZE : count; | 769 | this_len = (count > PAGE_SIZE) ? PAGE_SIZE : count; |
750 | retval = access_process_vm(task, src, page, this_len, 0); | 770 | retval = access_process_vm(task, src, page, this_len, 0); |
751 | if (!retval || !MAY_PTRACE(task) || !ptrace_may_attach(task)) { | 771 | if (!retval || check_mem_permission(task)) { |
752 | if (!ret) | 772 | if (!ret) |
753 | ret = -EIO; | 773 | ret = -EIO; |
754 | break; | 774 | break; |
@@ -792,7 +812,7 @@ static ssize_t mem_write(struct file * file, const char __user *buf, | |||
792 | if (!task) | 812 | if (!task) |
793 | goto out_no_task; | 813 | goto out_no_task; |
794 | 814 | ||
795 | if (!MAY_PTRACE(task) || !ptrace_may_attach(task)) | 815 | if (check_mem_permission(task)) |
796 | goto out; | 816 | goto out; |
797 | 817 | ||
798 | copied = -ENOMEM; | 818 | copied = -ENOMEM; |
@@ -1181,6 +1201,81 @@ static const struct file_operations proc_pid_sched_operations = { | |||
1181 | 1201 | ||
1182 | #endif | 1202 | #endif |
1183 | 1203 | ||
1204 | /* | ||
1205 | * We added or removed a vma mapping the executable. The vmas are only mapped | ||
1206 | * during exec and are not mapped with the mmap system call. | ||
1207 | * Callers must hold down_write() on the mm's mmap_sem for these | ||
1208 | */ | ||
1209 | void added_exe_file_vma(struct mm_struct *mm) | ||
1210 | { | ||
1211 | mm->num_exe_file_vmas++; | ||
1212 | } | ||
1213 | |||
1214 | void removed_exe_file_vma(struct mm_struct *mm) | ||
1215 | { | ||
1216 | mm->num_exe_file_vmas--; | ||
1217 | if ((mm->num_exe_file_vmas == 0) && mm->exe_file){ | ||
1218 | fput(mm->exe_file); | ||
1219 | mm->exe_file = NULL; | ||
1220 | } | ||
1221 | |||
1222 | } | ||
1223 | |||
1224 | void set_mm_exe_file(struct mm_struct *mm, struct file *new_exe_file) | ||
1225 | { | ||
1226 | if (new_exe_file) | ||
1227 | get_file(new_exe_file); | ||
1228 | if (mm->exe_file) | ||
1229 | fput(mm->exe_file); | ||
1230 | mm->exe_file = new_exe_file; | ||
1231 | mm->num_exe_file_vmas = 0; | ||
1232 | } | ||
1233 | |||
1234 | struct file *get_mm_exe_file(struct mm_struct *mm) | ||
1235 | { | ||
1236 | struct file *exe_file; | ||
1237 | |||
1238 | /* We need mmap_sem to protect against races with removal of | ||
1239 | * VM_EXECUTABLE vmas */ | ||
1240 | down_read(&mm->mmap_sem); | ||
1241 | exe_file = mm->exe_file; | ||
1242 | if (exe_file) | ||
1243 | get_file(exe_file); | ||
1244 | up_read(&mm->mmap_sem); | ||
1245 | return exe_file; | ||
1246 | } | ||
1247 | |||
1248 | void dup_mm_exe_file(struct mm_struct *oldmm, struct mm_struct *newmm) | ||
1249 | { | ||
1250 | /* It's safe to write the exe_file pointer without exe_file_lock because | ||
1251 | * this is called during fork when the task is not yet in /proc */ | ||
1252 | newmm->exe_file = get_mm_exe_file(oldmm); | ||
1253 | } | ||
1254 | |||
1255 | static int proc_exe_link(struct inode *inode, struct path *exe_path) | ||
1256 | { | ||
1257 | struct task_struct *task; | ||
1258 | struct mm_struct *mm; | ||
1259 | struct file *exe_file; | ||
1260 | |||
1261 | task = get_proc_task(inode); | ||
1262 | if (!task) | ||
1263 | return -ENOENT; | ||
1264 | mm = get_task_mm(task); | ||
1265 | put_task_struct(task); | ||
1266 | if (!mm) | ||
1267 | return -ENOENT; | ||
1268 | exe_file = get_mm_exe_file(mm); | ||
1269 | mmput(mm); | ||
1270 | if (exe_file) { | ||
1271 | *exe_path = exe_file->f_path; | ||
1272 | path_get(&exe_file->f_path); | ||
1273 | fput(exe_file); | ||
1274 | return 0; | ||
1275 | } else | ||
1276 | return -ENOENT; | ||
1277 | } | ||
1278 | |||
1184 | static void *proc_pid_follow_link(struct dentry *dentry, struct nameidata *nd) | 1279 | static void *proc_pid_follow_link(struct dentry *dentry, struct nameidata *nd) |
1185 | { | 1280 | { |
1186 | struct inode *inode = dentry->d_inode; | 1281 | struct inode *inode = dentry->d_inode; |
diff --git a/fs/proc/generic.c b/fs/proc/generic.c index a36ad3c75cf4..9d53b39a9cf8 100644 --- a/fs/proc/generic.c +++ b/fs/proc/generic.c | |||
@@ -69,12 +69,7 @@ proc_file_read(struct file *file, char __user *buf, size_t nbytes, | |||
69 | count = min_t(size_t, PROC_BLOCK_SIZE, nbytes); | 69 | count = min_t(size_t, PROC_BLOCK_SIZE, nbytes); |
70 | 70 | ||
71 | start = NULL; | 71 | start = NULL; |
72 | if (dp->get_info) { | 72 | if (dp->read_proc) { |
73 | /* Handle old net routines */ | ||
74 | n = dp->get_info(page, &start, *ppos, count); | ||
75 | if (n < count) | ||
76 | eof = 1; | ||
77 | } else if (dp->read_proc) { | ||
78 | /* | 73 | /* |
79 | * How to be a proc read function | 74 | * How to be a proc read function |
80 | * ------------------------------ | 75 | * ------------------------------ |
@@ -277,8 +272,11 @@ static int xlate_proc_name(const char *name, | |||
277 | int len; | 272 | int len; |
278 | int rtn = 0; | 273 | int rtn = 0; |
279 | 274 | ||
275 | de = *ret; | ||
276 | if (!de) | ||
277 | de = &proc_root; | ||
278 | |||
280 | spin_lock(&proc_subdir_lock); | 279 | spin_lock(&proc_subdir_lock); |
281 | de = &proc_root; | ||
282 | while (1) { | 280 | while (1) { |
283 | next = strchr(cp, '/'); | 281 | next = strchr(cp, '/'); |
284 | if (!next) | 282 | if (!next) |
@@ -385,20 +383,18 @@ struct dentry *proc_lookup_de(struct proc_dir_entry *de, struct inode *dir, | |||
385 | 383 | ||
386 | lock_kernel(); | 384 | lock_kernel(); |
387 | spin_lock(&proc_subdir_lock); | 385 | spin_lock(&proc_subdir_lock); |
388 | if (de) { | 386 | for (de = de->subdir; de ; de = de->next) { |
389 | for (de = de->subdir; de ; de = de->next) { | 387 | if (de->namelen != dentry->d_name.len) |
390 | if (de->namelen != dentry->d_name.len) | 388 | continue; |
391 | continue; | 389 | if (!memcmp(dentry->d_name.name, de->name, de->namelen)) { |
392 | if (!memcmp(dentry->d_name.name, de->name, de->namelen)) { | 390 | unsigned int ino; |
393 | unsigned int ino; | ||
394 | 391 | ||
395 | ino = de->low_ino; | 392 | ino = de->low_ino; |
396 | de_get(de); | 393 | de_get(de); |
397 | spin_unlock(&proc_subdir_lock); | 394 | spin_unlock(&proc_subdir_lock); |
398 | error = -EINVAL; | 395 | error = -EINVAL; |
399 | inode = proc_get_inode(dir->i_sb, ino, de); | 396 | inode = proc_get_inode(dir->i_sb, ino, de); |
400 | goto out_unlock; | 397 | goto out_unlock; |
401 | } | ||
402 | } | 398 | } |
403 | } | 399 | } |
404 | spin_unlock(&proc_subdir_lock); | 400 | spin_unlock(&proc_subdir_lock); |
@@ -410,7 +406,8 @@ out_unlock: | |||
410 | d_add(dentry, inode); | 406 | d_add(dentry, inode); |
411 | return NULL; | 407 | return NULL; |
412 | } | 408 | } |
413 | de_put(de); | 409 | if (de) |
410 | de_put(de); | ||
414 | return ERR_PTR(error); | 411 | return ERR_PTR(error); |
415 | } | 412 | } |
416 | 413 | ||
@@ -440,10 +437,6 @@ int proc_readdir_de(struct proc_dir_entry *de, struct file *filp, void *dirent, | |||
440 | lock_kernel(); | 437 | lock_kernel(); |
441 | 438 | ||
442 | ino = inode->i_ino; | 439 | ino = inode->i_ino; |
443 | if (!de) { | ||
444 | ret = -EINVAL; | ||
445 | goto out; | ||
446 | } | ||
447 | i = filp->f_pos; | 440 | i = filp->f_pos; |
448 | switch (i) { | 441 | switch (i) { |
449 | case 0: | 442 | case 0: |
@@ -582,7 +575,7 @@ static struct proc_dir_entry *__proc_create(struct proc_dir_entry **parent, | |||
582 | /* make sure name is valid */ | 575 | /* make sure name is valid */ |
583 | if (!name || !strlen(name)) goto out; | 576 | if (!name || !strlen(name)) goto out; |
584 | 577 | ||
585 | if (!(*parent) && xlate_proc_name(name, parent, &fn) != 0) | 578 | if (xlate_proc_name(name, parent, &fn) != 0) |
586 | goto out; | 579 | goto out; |
587 | 580 | ||
588 | /* At this point there must not be any '/' characters beyond *fn */ | 581 | /* At this point there must not be any '/' characters beyond *fn */ |
@@ -682,9 +675,10 @@ struct proc_dir_entry *create_proc_entry(const char *name, mode_t mode, | |||
682 | return ent; | 675 | return ent; |
683 | } | 676 | } |
684 | 677 | ||
685 | struct proc_dir_entry *proc_create(const char *name, mode_t mode, | 678 | struct proc_dir_entry *proc_create_data(const char *name, mode_t mode, |
686 | struct proc_dir_entry *parent, | 679 | struct proc_dir_entry *parent, |
687 | const struct file_operations *proc_fops) | 680 | const struct file_operations *proc_fops, |
681 | void *data) | ||
688 | { | 682 | { |
689 | struct proc_dir_entry *pde; | 683 | struct proc_dir_entry *pde; |
690 | nlink_t nlink; | 684 | nlink_t nlink; |
@@ -705,6 +699,7 @@ struct proc_dir_entry *proc_create(const char *name, mode_t mode, | |||
705 | if (!pde) | 699 | if (!pde) |
706 | goto out; | 700 | goto out; |
707 | pde->proc_fops = proc_fops; | 701 | pde->proc_fops = proc_fops; |
702 | pde->data = data; | ||
708 | if (proc_register(parent, pde) < 0) | 703 | if (proc_register(parent, pde) < 0) |
709 | goto out_free; | 704 | goto out_free; |
710 | return pde; | 705 | return pde; |
@@ -734,55 +729,58 @@ void free_proc_entry(struct proc_dir_entry *de) | |||
734 | void remove_proc_entry(const char *name, struct proc_dir_entry *parent) | 729 | void remove_proc_entry(const char *name, struct proc_dir_entry *parent) |
735 | { | 730 | { |
736 | struct proc_dir_entry **p; | 731 | struct proc_dir_entry **p; |
737 | struct proc_dir_entry *de; | 732 | struct proc_dir_entry *de = NULL; |
738 | const char *fn = name; | 733 | const char *fn = name; |
739 | int len; | 734 | int len; |
740 | 735 | ||
741 | if (!parent && xlate_proc_name(name, &parent, &fn) != 0) | 736 | if (xlate_proc_name(name, &parent, &fn) != 0) |
742 | goto out; | 737 | return; |
743 | len = strlen(fn); | 738 | len = strlen(fn); |
744 | 739 | ||
745 | spin_lock(&proc_subdir_lock); | 740 | spin_lock(&proc_subdir_lock); |
746 | for (p = &parent->subdir; *p; p=&(*p)->next ) { | 741 | for (p = &parent->subdir; *p; p=&(*p)->next ) { |
747 | if (!proc_match(len, fn, *p)) | 742 | if (proc_match(len, fn, *p)) { |
748 | continue; | 743 | de = *p; |
749 | de = *p; | 744 | *p = de->next; |
750 | *p = de->next; | 745 | de->next = NULL; |
751 | de->next = NULL; | 746 | break; |
752 | 747 | } | |
753 | spin_lock(&de->pde_unload_lock); | 748 | } |
754 | /* | 749 | spin_unlock(&proc_subdir_lock); |
755 | * Stop accepting new callers into module. If you're | 750 | if (!de) |
756 | * dynamically allocating ->proc_fops, save a pointer somewhere. | 751 | return; |
757 | */ | ||
758 | de->proc_fops = NULL; | ||
759 | /* Wait until all existing callers into module are done. */ | ||
760 | if (de->pde_users > 0) { | ||
761 | DECLARE_COMPLETION_ONSTACK(c); | ||
762 | |||
763 | if (!de->pde_unload_completion) | ||
764 | de->pde_unload_completion = &c; | ||
765 | |||
766 | spin_unlock(&de->pde_unload_lock); | ||
767 | spin_unlock(&proc_subdir_lock); | ||
768 | 752 | ||
769 | wait_for_completion(de->pde_unload_completion); | 753 | spin_lock(&de->pde_unload_lock); |
754 | /* | ||
755 | * Stop accepting new callers into module. If you're | ||
756 | * dynamically allocating ->proc_fops, save a pointer somewhere. | ||
757 | */ | ||
758 | de->proc_fops = NULL; | ||
759 | /* Wait until all existing callers into module are done. */ | ||
760 | if (de->pde_users > 0) { | ||
761 | DECLARE_COMPLETION_ONSTACK(c); | ||
762 | |||
763 | if (!de->pde_unload_completion) | ||
764 | de->pde_unload_completion = &c; | ||
770 | 765 | ||
771 | spin_lock(&proc_subdir_lock); | ||
772 | goto continue_removing; | ||
773 | } | ||
774 | spin_unlock(&de->pde_unload_lock); | 766 | spin_unlock(&de->pde_unload_lock); |
775 | 767 | ||
768 | wait_for_completion(de->pde_unload_completion); | ||
769 | |||
770 | goto continue_removing; | ||
771 | } | ||
772 | spin_unlock(&de->pde_unload_lock); | ||
773 | |||
776 | continue_removing: | 774 | continue_removing: |
777 | if (S_ISDIR(de->mode)) | 775 | if (S_ISDIR(de->mode)) |
778 | parent->nlink--; | 776 | parent->nlink--; |
779 | de->nlink = 0; | 777 | de->nlink = 0; |
780 | WARN_ON(de->subdir); | 778 | if (de->subdir) { |
781 | if (atomic_dec_and_test(&de->count)) | 779 | printk(KERN_WARNING "%s: removing non-empty directory " |
782 | free_proc_entry(de); | 780 | "'%s/%s', leaking at least '%s'\n", __func__, |
783 | break; | 781 | de->parent->name, de->name, de->subdir->name); |
782 | WARN_ON(1); | ||
784 | } | 783 | } |
785 | spin_unlock(&proc_subdir_lock); | 784 | if (atomic_dec_and_test(&de->count)) |
786 | out: | 785 | free_proc_entry(de); |
787 | return; | ||
788 | } | 786 | } |
diff --git a/fs/proc/inode.c b/fs/proc/inode.c index 82b3a1b5a70b..6f4e8dc97da1 100644 --- a/fs/proc/inode.c +++ b/fs/proc/inode.c | |||
@@ -25,8 +25,7 @@ | |||
25 | 25 | ||
26 | struct proc_dir_entry *de_get(struct proc_dir_entry *de) | 26 | struct proc_dir_entry *de_get(struct proc_dir_entry *de) |
27 | { | 27 | { |
28 | if (de) | 28 | atomic_inc(&de->count); |
29 | atomic_inc(&de->count); | ||
30 | return de; | 29 | return de; |
31 | } | 30 | } |
32 | 31 | ||
@@ -35,18 +34,16 @@ struct proc_dir_entry *de_get(struct proc_dir_entry *de) | |||
35 | */ | 34 | */ |
36 | void de_put(struct proc_dir_entry *de) | 35 | void de_put(struct proc_dir_entry *de) |
37 | { | 36 | { |
38 | if (de) { | 37 | lock_kernel(); |
39 | lock_kernel(); | 38 | if (!atomic_read(&de->count)) { |
40 | if (!atomic_read(&de->count)) { | 39 | printk("de_put: entry %s already free!\n", de->name); |
41 | printk("de_put: entry %s already free!\n", de->name); | ||
42 | unlock_kernel(); | ||
43 | return; | ||
44 | } | ||
45 | |||
46 | if (atomic_dec_and_test(&de->count)) | ||
47 | free_proc_entry(de); | ||
48 | unlock_kernel(); | 40 | unlock_kernel(); |
41 | return; | ||
49 | } | 42 | } |
43 | |||
44 | if (atomic_dec_and_test(&de->count)) | ||
45 | free_proc_entry(de); | ||
46 | unlock_kernel(); | ||
50 | } | 47 | } |
51 | 48 | ||
52 | /* | 49 | /* |
@@ -392,7 +389,7 @@ struct inode *proc_get_inode(struct super_block *sb, unsigned int ino, | |||
392 | { | 389 | { |
393 | struct inode * inode; | 390 | struct inode * inode; |
394 | 391 | ||
395 | if (de != NULL && !try_module_get(de->owner)) | 392 | if (!try_module_get(de->owner)) |
396 | goto out_mod; | 393 | goto out_mod; |
397 | 394 | ||
398 | inode = iget_locked(sb, ino); | 395 | inode = iget_locked(sb, ino); |
@@ -402,30 +399,29 @@ struct inode *proc_get_inode(struct super_block *sb, unsigned int ino, | |||
402 | inode->i_mtime = inode->i_atime = inode->i_ctime = CURRENT_TIME; | 399 | inode->i_mtime = inode->i_atime = inode->i_ctime = CURRENT_TIME; |
403 | PROC_I(inode)->fd = 0; | 400 | PROC_I(inode)->fd = 0; |
404 | PROC_I(inode)->pde = de; | 401 | PROC_I(inode)->pde = de; |
405 | if (de) { | 402 | |
406 | if (de->mode) { | 403 | if (de->mode) { |
407 | inode->i_mode = de->mode; | 404 | inode->i_mode = de->mode; |
408 | inode->i_uid = de->uid; | 405 | inode->i_uid = de->uid; |
409 | inode->i_gid = de->gid; | 406 | inode->i_gid = de->gid; |
410 | } | 407 | } |
411 | if (de->size) | 408 | if (de->size) |
412 | inode->i_size = de->size; | 409 | inode->i_size = de->size; |
413 | if (de->nlink) | 410 | if (de->nlink) |
414 | inode->i_nlink = de->nlink; | 411 | inode->i_nlink = de->nlink; |
415 | if (de->proc_iops) | 412 | if (de->proc_iops) |
416 | inode->i_op = de->proc_iops; | 413 | inode->i_op = de->proc_iops; |
417 | if (de->proc_fops) { | 414 | if (de->proc_fops) { |
418 | if (S_ISREG(inode->i_mode)) { | 415 | if (S_ISREG(inode->i_mode)) { |
419 | #ifdef CONFIG_COMPAT | 416 | #ifdef CONFIG_COMPAT |
420 | if (!de->proc_fops->compat_ioctl) | 417 | if (!de->proc_fops->compat_ioctl) |
421 | inode->i_fop = | 418 | inode->i_fop = |
422 | &proc_reg_file_ops_no_compat; | 419 | &proc_reg_file_ops_no_compat; |
423 | else | 420 | else |
424 | #endif | 421 | #endif |
425 | inode->i_fop = &proc_reg_file_ops; | 422 | inode->i_fop = &proc_reg_file_ops; |
426 | } else { | 423 | } else { |
427 | inode->i_fop = de->proc_fops; | 424 | inode->i_fop = de->proc_fops; |
428 | } | ||
429 | } | 425 | } |
430 | } | 426 | } |
431 | unlock_new_inode(inode); | 427 | unlock_new_inode(inode); |
@@ -433,8 +429,7 @@ struct inode *proc_get_inode(struct super_block *sb, unsigned int ino, | |||
433 | return inode; | 429 | return inode; |
434 | 430 | ||
435 | out_ino: | 431 | out_ino: |
436 | if (de != NULL) | 432 | module_put(de->owner); |
437 | module_put(de->owner); | ||
438 | out_mod: | 433 | out_mod: |
439 | return NULL; | 434 | return NULL; |
440 | } | 435 | } |
diff --git a/fs/proc/internal.h b/fs/proc/internal.h index bc72f5c8c47d..28cbca805905 100644 --- a/fs/proc/internal.h +++ b/fs/proc/internal.h | |||
@@ -11,6 +11,7 @@ | |||
11 | 11 | ||
12 | #include <linux/proc_fs.h> | 12 | #include <linux/proc_fs.h> |
13 | 13 | ||
14 | extern struct proc_dir_entry proc_root; | ||
14 | #ifdef CONFIG_PROC_SYSCTL | 15 | #ifdef CONFIG_PROC_SYSCTL |
15 | extern int proc_sys_init(void); | 16 | extern int proc_sys_init(void); |
16 | #else | 17 | #else |
@@ -46,9 +47,6 @@ extern int nommu_vma_show(struct seq_file *, struct vm_area_struct *); | |||
46 | 47 | ||
47 | extern int maps_protect; | 48 | extern int maps_protect; |
48 | 49 | ||
49 | extern void create_seq_entry(char *name, mode_t mode, | ||
50 | const struct file_operations *f); | ||
51 | extern int proc_exe_link(struct inode *, struct path *); | ||
52 | extern int proc_tid_stat(struct seq_file *m, struct pid_namespace *ns, | 50 | extern int proc_tid_stat(struct seq_file *m, struct pid_namespace *ns, |
53 | struct pid *pid, struct task_struct *task); | 51 | struct pid *pid, struct task_struct *task); |
54 | extern int proc_tgid_stat(struct seq_file *m, struct pid_namespace *ns, | 52 | extern int proc_tgid_stat(struct seq_file *m, struct pid_namespace *ns, |
diff --git a/fs/proc/nommu.c b/fs/proc/nommu.c index 941e95114b5a..79ecd281d2cb 100644 --- a/fs/proc/nommu.c +++ b/fs/proc/nommu.c | |||
@@ -137,7 +137,7 @@ static const struct file_operations proc_nommu_vma_list_operations = { | |||
137 | 137 | ||
138 | static int __init proc_nommu_init(void) | 138 | static int __init proc_nommu_init(void) |
139 | { | 139 | { |
140 | create_seq_entry("maps", S_IRUGO, &proc_nommu_vma_list_operations); | 140 | proc_create("maps", S_IRUGO, NULL, &proc_nommu_vma_list_operations); |
141 | return 0; | 141 | return 0; |
142 | } | 142 | } |
143 | 143 | ||
diff --git a/fs/proc/proc_misc.c b/fs/proc/proc_misc.c index 441a32f0e5f2..48bcf20cec2f 100644 --- a/fs/proc/proc_misc.c +++ b/fs/proc/proc_misc.c | |||
@@ -826,14 +826,6 @@ static struct file_operations proc_kpageflags_operations = { | |||
826 | 826 | ||
827 | struct proc_dir_entry *proc_root_kcore; | 827 | struct proc_dir_entry *proc_root_kcore; |
828 | 828 | ||
829 | void create_seq_entry(char *name, mode_t mode, const struct file_operations *f) | ||
830 | { | ||
831 | struct proc_dir_entry *entry; | ||
832 | entry = create_proc_entry(name, mode, NULL); | ||
833 | if (entry) | ||
834 | entry->proc_fops = f; | ||
835 | } | ||
836 | |||
837 | void __init proc_misc_init(void) | 829 | void __init proc_misc_init(void) |
838 | { | 830 | { |
839 | static struct { | 831 | static struct { |
@@ -862,66 +854,52 @@ void __init proc_misc_init(void) | |||
862 | 854 | ||
863 | /* And now for trickier ones */ | 855 | /* And now for trickier ones */ |
864 | #ifdef CONFIG_PRINTK | 856 | #ifdef CONFIG_PRINTK |
865 | { | 857 | proc_create("kmsg", S_IRUSR, NULL, &proc_kmsg_operations); |
866 | struct proc_dir_entry *entry; | ||
867 | entry = create_proc_entry("kmsg", S_IRUSR, &proc_root); | ||
868 | if (entry) | ||
869 | entry->proc_fops = &proc_kmsg_operations; | ||
870 | } | ||
871 | #endif | 858 | #endif |
872 | create_seq_entry("locks", 0, &proc_locks_operations); | 859 | proc_create("locks", 0, NULL, &proc_locks_operations); |
873 | create_seq_entry("devices", 0, &proc_devinfo_operations); | 860 | proc_create("devices", 0, NULL, &proc_devinfo_operations); |
874 | create_seq_entry("cpuinfo", 0, &proc_cpuinfo_operations); | 861 | proc_create("cpuinfo", 0, NULL, &proc_cpuinfo_operations); |
875 | #ifdef CONFIG_BLOCK | 862 | #ifdef CONFIG_BLOCK |
876 | create_seq_entry("partitions", 0, &proc_partitions_operations); | 863 | proc_create("partitions", 0, NULL, &proc_partitions_operations); |
877 | #endif | 864 | #endif |
878 | create_seq_entry("stat", 0, &proc_stat_operations); | 865 | proc_create("stat", 0, NULL, &proc_stat_operations); |
879 | create_seq_entry("interrupts", 0, &proc_interrupts_operations); | 866 | proc_create("interrupts", 0, NULL, &proc_interrupts_operations); |
880 | #ifdef CONFIG_SLABINFO | 867 | #ifdef CONFIG_SLABINFO |
881 | create_seq_entry("slabinfo",S_IWUSR|S_IRUGO,&proc_slabinfo_operations); | 868 | proc_create("slabinfo",S_IWUSR|S_IRUGO,NULL,&proc_slabinfo_operations); |
882 | #ifdef CONFIG_DEBUG_SLAB_LEAK | 869 | #ifdef CONFIG_DEBUG_SLAB_LEAK |
883 | create_seq_entry("slab_allocators", 0 ,&proc_slabstats_operations); | 870 | proc_create("slab_allocators", 0, NULL, &proc_slabstats_operations); |
884 | #endif | 871 | #endif |
885 | #endif | 872 | #endif |
886 | #ifdef CONFIG_MMU | 873 | #ifdef CONFIG_MMU |
887 | proc_create("vmallocinfo", S_IRUSR, NULL, &proc_vmalloc_operations); | 874 | proc_create("vmallocinfo", S_IRUSR, NULL, &proc_vmalloc_operations); |
888 | #endif | 875 | #endif |
889 | create_seq_entry("buddyinfo",S_IRUGO, &fragmentation_file_operations); | 876 | proc_create("buddyinfo", S_IRUGO, NULL, &fragmentation_file_operations); |
890 | create_seq_entry("pagetypeinfo", S_IRUGO, &pagetypeinfo_file_ops); | 877 | proc_create("pagetypeinfo", S_IRUGO, NULL, &pagetypeinfo_file_ops); |
891 | create_seq_entry("vmstat",S_IRUGO, &proc_vmstat_file_operations); | 878 | proc_create("vmstat", S_IRUGO, NULL, &proc_vmstat_file_operations); |
892 | create_seq_entry("zoneinfo",S_IRUGO, &proc_zoneinfo_file_operations); | 879 | proc_create("zoneinfo", S_IRUGO, NULL, &proc_zoneinfo_file_operations); |
893 | #ifdef CONFIG_BLOCK | 880 | #ifdef CONFIG_BLOCK |
894 | create_seq_entry("diskstats", 0, &proc_diskstats_operations); | 881 | proc_create("diskstats", 0, NULL, &proc_diskstats_operations); |
895 | #endif | 882 | #endif |
896 | #ifdef CONFIG_MODULES | 883 | #ifdef CONFIG_MODULES |
897 | create_seq_entry("modules", 0, &proc_modules_operations); | 884 | proc_create("modules", 0, NULL, &proc_modules_operations); |
898 | #endif | 885 | #endif |
899 | #ifdef CONFIG_SCHEDSTATS | 886 | #ifdef CONFIG_SCHEDSTATS |
900 | create_seq_entry("schedstat", 0, &proc_schedstat_operations); | 887 | proc_create("schedstat", 0, NULL, &proc_schedstat_operations); |
901 | #endif | 888 | #endif |
902 | #ifdef CONFIG_PROC_KCORE | 889 | #ifdef CONFIG_PROC_KCORE |
903 | proc_root_kcore = create_proc_entry("kcore", S_IRUSR, NULL); | 890 | proc_root_kcore = proc_create("kcore", S_IRUSR, NULL, &proc_kcore_operations); |
904 | if (proc_root_kcore) { | 891 | if (proc_root_kcore) |
905 | proc_root_kcore->proc_fops = &proc_kcore_operations; | ||
906 | proc_root_kcore->size = | 892 | proc_root_kcore->size = |
907 | (size_t)high_memory - PAGE_OFFSET + PAGE_SIZE; | 893 | (size_t)high_memory - PAGE_OFFSET + PAGE_SIZE; |
908 | } | ||
909 | #endif | 894 | #endif |
910 | #ifdef CONFIG_PROC_PAGE_MONITOR | 895 | #ifdef CONFIG_PROC_PAGE_MONITOR |
911 | create_seq_entry("kpagecount", S_IRUSR, &proc_kpagecount_operations); | 896 | proc_create("kpagecount", S_IRUSR, NULL, &proc_kpagecount_operations); |
912 | create_seq_entry("kpageflags", S_IRUSR, &proc_kpageflags_operations); | 897 | proc_create("kpageflags", S_IRUSR, NULL, &proc_kpageflags_operations); |
913 | #endif | 898 | #endif |
914 | #ifdef CONFIG_PROC_VMCORE | 899 | #ifdef CONFIG_PROC_VMCORE |
915 | proc_vmcore = create_proc_entry("vmcore", S_IRUSR, NULL); | 900 | proc_vmcore = proc_create("vmcore", S_IRUSR, NULL, &proc_vmcore_operations); |
916 | if (proc_vmcore) | ||
917 | proc_vmcore->proc_fops = &proc_vmcore_operations; | ||
918 | #endif | 901 | #endif |
919 | #ifdef CONFIG_MAGIC_SYSRQ | 902 | #ifdef CONFIG_MAGIC_SYSRQ |
920 | { | 903 | proc_create("sysrq-trigger", S_IWUSR, NULL, &proc_sysrq_trigger_operations); |
921 | struct proc_dir_entry *entry; | ||
922 | entry = create_proc_entry("sysrq-trigger", S_IWUSR, NULL); | ||
923 | if (entry) | ||
924 | entry->proc_fops = &proc_sysrq_trigger_operations; | ||
925 | } | ||
926 | #endif | 904 | #endif |
927 | } | 905 | } |
diff --git a/fs/proc/proc_sysctl.c b/fs/proc/proc_sysctl.c index 614c34b6d1c2..5acc001d49f6 100644 --- a/fs/proc/proc_sysctl.c +++ b/fs/proc/proc_sysctl.c | |||
@@ -165,8 +165,8 @@ out: | |||
165 | return err; | 165 | return err; |
166 | } | 166 | } |
167 | 167 | ||
168 | static ssize_t proc_sys_read(struct file *filp, char __user *buf, | 168 | static ssize_t proc_sys_call_handler(struct file *filp, void __user *buf, |
169 | size_t count, loff_t *ppos) | 169 | size_t count, loff_t *ppos, int write) |
170 | { | 170 | { |
171 | struct dentry *dentry = filp->f_dentry; | 171 | struct dentry *dentry = filp->f_dentry; |
172 | struct ctl_table_header *head; | 172 | struct ctl_table_header *head; |
@@ -190,12 +190,12 @@ static ssize_t proc_sys_read(struct file *filp, char __user *buf, | |||
190 | * and won't be until we finish. | 190 | * and won't be until we finish. |
191 | */ | 191 | */ |
192 | error = -EPERM; | 192 | error = -EPERM; |
193 | if (sysctl_perm(table, MAY_READ)) | 193 | if (sysctl_perm(head->root, table, write ? MAY_WRITE : MAY_READ)) |
194 | goto out; | 194 | goto out; |
195 | 195 | ||
196 | /* careful: calling conventions are nasty here */ | 196 | /* careful: calling conventions are nasty here */ |
197 | res = count; | 197 | res = count; |
198 | error = table->proc_handler(table, 0, filp, buf, &res, ppos); | 198 | error = table->proc_handler(table, write, filp, buf, &res, ppos); |
199 | if (!error) | 199 | if (!error) |
200 | error = res; | 200 | error = res; |
201 | out: | 201 | out: |
@@ -204,44 +204,16 @@ out: | |||
204 | return error; | 204 | return error; |
205 | } | 205 | } |
206 | 206 | ||
207 | static ssize_t proc_sys_write(struct file *filp, const char __user *buf, | 207 | static ssize_t proc_sys_read(struct file *filp, char __user *buf, |
208 | size_t count, loff_t *ppos) | 208 | size_t count, loff_t *ppos) |
209 | { | 209 | { |
210 | struct dentry *dentry = filp->f_dentry; | 210 | return proc_sys_call_handler(filp, (void __user *)buf, count, ppos, 0); |
211 | struct ctl_table_header *head; | 211 | } |
212 | struct ctl_table *table; | ||
213 | ssize_t error; | ||
214 | size_t res; | ||
215 | |||
216 | table = do_proc_sys_lookup(dentry->d_parent, &dentry->d_name, &head); | ||
217 | /* Has the sysctl entry disappeared on us? */ | ||
218 | error = -ENOENT; | ||
219 | if (!table) | ||
220 | goto out; | ||
221 | |||
222 | /* Has the sysctl entry been replaced by a directory? */ | ||
223 | error = -EISDIR; | ||
224 | if (!table->proc_handler) | ||
225 | goto out; | ||
226 | |||
227 | /* | ||
228 | * At this point we know that the sysctl was not unregistered | ||
229 | * and won't be until we finish. | ||
230 | */ | ||
231 | error = -EPERM; | ||
232 | if (sysctl_perm(table, MAY_WRITE)) | ||
233 | goto out; | ||
234 | |||
235 | /* careful: calling conventions are nasty here */ | ||
236 | res = count; | ||
237 | error = table->proc_handler(table, 1, filp, (char __user *)buf, | ||
238 | &res, ppos); | ||
239 | if (!error) | ||
240 | error = res; | ||
241 | out: | ||
242 | sysctl_head_finish(head); | ||
243 | 212 | ||
244 | return error; | 213 | static ssize_t proc_sys_write(struct file *filp, const char __user *buf, |
214 | size_t count, loff_t *ppos) | ||
215 | { | ||
216 | return proc_sys_call_handler(filp, (void __user *)buf, count, ppos, 1); | ||
245 | } | 217 | } |
246 | 218 | ||
247 | 219 | ||
@@ -416,7 +388,7 @@ static int proc_sys_permission(struct inode *inode, int mask, struct nameidata * | |||
416 | goto out; | 388 | goto out; |
417 | 389 | ||
418 | /* Use the permissions on the sysctl table entry */ | 390 | /* Use the permissions on the sysctl table entry */ |
419 | error = sysctl_perm(table, mask); | 391 | error = sysctl_perm(head->root, table, mask); |
420 | out: | 392 | out: |
421 | sysctl_head_finish(head); | 393 | sysctl_head_finish(head); |
422 | return error; | 394 | return error; |
diff --git a/fs/proc/proc_tty.c b/fs/proc/proc_tty.c index 49816e00b51a..ac26ccc25f42 100644 --- a/fs/proc/proc_tty.c +++ b/fs/proc/proc_tty.c | |||
@@ -5,7 +5,7 @@ | |||
5 | */ | 5 | */ |
6 | 6 | ||
7 | #include <asm/uaccess.h> | 7 | #include <asm/uaccess.h> |
8 | 8 | #include <linux/module.h> | |
9 | #include <linux/init.h> | 9 | #include <linux/init.h> |
10 | #include <linux/errno.h> | 10 | #include <linux/errno.h> |
11 | #include <linux/time.h> | 11 | #include <linux/time.h> |
@@ -136,39 +136,54 @@ static const struct file_operations proc_tty_drivers_operations = { | |||
136 | .release = seq_release, | 136 | .release = seq_release, |
137 | }; | 137 | }; |
138 | 138 | ||
139 | /* | 139 | static void * tty_ldiscs_seq_start(struct seq_file *m, loff_t *pos) |
140 | * This is the handler for /proc/tty/ldiscs | ||
141 | */ | ||
142 | static int tty_ldiscs_read_proc(char *page, char **start, off_t off, | ||
143 | int count, int *eof, void *data) | ||
144 | { | 140 | { |
145 | int i; | 141 | return (*pos < NR_LDISCS) ? pos : NULL; |
146 | int len = 0; | 142 | } |
147 | off_t begin = 0; | 143 | |
144 | static void * tty_ldiscs_seq_next(struct seq_file *m, void *v, loff_t *pos) | ||
145 | { | ||
146 | (*pos)++; | ||
147 | return (*pos < NR_LDISCS) ? pos : NULL; | ||
148 | } | ||
149 | |||
150 | static void tty_ldiscs_seq_stop(struct seq_file *m, void *v) | ||
151 | { | ||
152 | } | ||
153 | |||
154 | static int tty_ldiscs_seq_show(struct seq_file *m, void *v) | ||
155 | { | ||
156 | int i = *(loff_t *)v; | ||
148 | struct tty_ldisc *ld; | 157 | struct tty_ldisc *ld; |
149 | 158 | ||
150 | for (i=0; i < NR_LDISCS; i++) { | 159 | ld = tty_ldisc_get(i); |
151 | ld = tty_ldisc_get(i); | 160 | if (ld == NULL) |
152 | if (ld == NULL) | ||
153 | continue; | ||
154 | len += sprintf(page+len, "%-10s %2d\n", | ||
155 | ld->name ? ld->name : "???", i); | ||
156 | tty_ldisc_put(i); | ||
157 | if (len+begin > off+count) | ||
158 | break; | ||
159 | if (len+begin < off) { | ||
160 | begin += len; | ||
161 | len = 0; | ||
162 | } | ||
163 | } | ||
164 | if (i >= NR_LDISCS) | ||
165 | *eof = 1; | ||
166 | if (off >= len+begin) | ||
167 | return 0; | 161 | return 0; |
168 | *start = page + (off-begin); | 162 | seq_printf(m, "%-10s %2d\n", ld->name ? ld->name : "???", i); |
169 | return ((count < begin+len-off) ? count : begin+len-off); | 163 | tty_ldisc_put(i); |
164 | return 0; | ||
165 | } | ||
166 | |||
167 | static const struct seq_operations tty_ldiscs_seq_ops = { | ||
168 | .start = tty_ldiscs_seq_start, | ||
169 | .next = tty_ldiscs_seq_next, | ||
170 | .stop = tty_ldiscs_seq_stop, | ||
171 | .show = tty_ldiscs_seq_show, | ||
172 | }; | ||
173 | |||
174 | static int proc_tty_ldiscs_open(struct inode *inode, struct file *file) | ||
175 | { | ||
176 | return seq_open(file, &tty_ldiscs_seq_ops); | ||
170 | } | 177 | } |
171 | 178 | ||
179 | static const struct file_operations tty_ldiscs_proc_fops = { | ||
180 | .owner = THIS_MODULE, | ||
181 | .open = proc_tty_ldiscs_open, | ||
182 | .read = seq_read, | ||
183 | .llseek = seq_lseek, | ||
184 | .release = seq_release, | ||
185 | }; | ||
186 | |||
172 | /* | 187 | /* |
173 | * This function is called by tty_register_driver() to handle | 188 | * This function is called by tty_register_driver() to handle |
174 | * registering the driver's /proc handler into /proc/tty/driver/<foo> | 189 | * registering the driver's /proc handler into /proc/tty/driver/<foo> |
@@ -214,7 +229,6 @@ void proc_tty_unregister_driver(struct tty_driver *driver) | |||
214 | */ | 229 | */ |
215 | void __init proc_tty_init(void) | 230 | void __init proc_tty_init(void) |
216 | { | 231 | { |
217 | struct proc_dir_entry *entry; | ||
218 | if (!proc_mkdir("tty", NULL)) | 232 | if (!proc_mkdir("tty", NULL)) |
219 | return; | 233 | return; |
220 | proc_tty_ldisc = proc_mkdir("tty/ldisc", NULL); | 234 | proc_tty_ldisc = proc_mkdir("tty/ldisc", NULL); |
@@ -224,10 +238,7 @@ void __init proc_tty_init(void) | |||
224 | * password lengths and inter-keystroke timings during password | 238 | * password lengths and inter-keystroke timings during password |
225 | * entry. | 239 | * entry. |
226 | */ | 240 | */ |
227 | proc_tty_driver = proc_mkdir_mode("tty/driver", S_IRUSR | S_IXUSR, NULL); | 241 | proc_tty_driver = proc_mkdir_mode("tty/driver", S_IRUSR|S_IXUSR, NULL); |
228 | 242 | proc_create("tty/ldiscs", 0, NULL, &tty_ldiscs_proc_fops); | |
229 | create_proc_read_entry("tty/ldiscs", 0, NULL, tty_ldiscs_read_proc, NULL); | 243 | proc_create("tty/drivers", 0, NULL, &proc_tty_drivers_operations); |
230 | entry = create_proc_entry("tty/drivers", 0, NULL); | ||
231 | if (entry) | ||
232 | entry->proc_fops = &proc_tty_drivers_operations; | ||
233 | } | 244 | } |
diff --git a/fs/proc/root.c b/fs/proc/root.c index ef0fb57fc9ef..95117538a4f6 100644 --- a/fs/proc/root.c +++ b/fs/proc/root.c | |||
@@ -22,8 +22,6 @@ | |||
22 | 22 | ||
23 | #include "internal.h" | 23 | #include "internal.h" |
24 | 24 | ||
25 | struct proc_dir_entry *proc_bus, *proc_root_fs, *proc_root_driver; | ||
26 | |||
27 | static int proc_test_super(struct super_block *sb, void *data) | 25 | static int proc_test_super(struct super_block *sb, void *data) |
28 | { | 26 | { |
29 | return sb->s_fs_info == data; | 27 | return sb->s_fs_info == data; |
@@ -126,8 +124,8 @@ void __init proc_root_init(void) | |||
126 | #ifdef CONFIG_SYSVIPC | 124 | #ifdef CONFIG_SYSVIPC |
127 | proc_mkdir("sysvipc", NULL); | 125 | proc_mkdir("sysvipc", NULL); |
128 | #endif | 126 | #endif |
129 | proc_root_fs = proc_mkdir("fs", NULL); | 127 | proc_mkdir("fs", NULL); |
130 | proc_root_driver = proc_mkdir("driver", NULL); | 128 | proc_mkdir("driver", NULL); |
131 | proc_mkdir("fs/nfsd", NULL); /* somewhere for the nfsd filesystem to be mounted */ | 129 | proc_mkdir("fs/nfsd", NULL); /* somewhere for the nfsd filesystem to be mounted */ |
132 | #if defined(CONFIG_SUN_OPENPROMFS) || defined(CONFIG_SUN_OPENPROMFS_MODULE) | 130 | #if defined(CONFIG_SUN_OPENPROMFS) || defined(CONFIG_SUN_OPENPROMFS_MODULE) |
133 | /* just give it a mountpoint */ | 131 | /* just give it a mountpoint */ |
@@ -137,7 +135,7 @@ void __init proc_root_init(void) | |||
137 | #ifdef CONFIG_PROC_DEVICETREE | 135 | #ifdef CONFIG_PROC_DEVICETREE |
138 | proc_device_tree_init(); | 136 | proc_device_tree_init(); |
139 | #endif | 137 | #endif |
140 | proc_bus = proc_mkdir("bus", NULL); | 138 | proc_mkdir("bus", NULL); |
141 | proc_sys_init(); | 139 | proc_sys_init(); |
142 | } | 140 | } |
143 | 141 | ||
@@ -232,9 +230,5 @@ void pid_ns_release_proc(struct pid_namespace *ns) | |||
232 | EXPORT_SYMBOL(proc_symlink); | 230 | EXPORT_SYMBOL(proc_symlink); |
233 | EXPORT_SYMBOL(proc_mkdir); | 231 | EXPORT_SYMBOL(proc_mkdir); |
234 | EXPORT_SYMBOL(create_proc_entry); | 232 | EXPORT_SYMBOL(create_proc_entry); |
235 | EXPORT_SYMBOL(proc_create); | 233 | EXPORT_SYMBOL(proc_create_data); |
236 | EXPORT_SYMBOL(remove_proc_entry); | 234 | EXPORT_SYMBOL(remove_proc_entry); |
237 | EXPORT_SYMBOL(proc_root); | ||
238 | EXPORT_SYMBOL(proc_root_fs); | ||
239 | EXPORT_SYMBOL(proc_bus); | ||
240 | EXPORT_SYMBOL(proc_root_driver); | ||
diff --git a/fs/proc/task_mmu.c b/fs/proc/task_mmu.c index 7415eeb7cc3a..e2b8e769f510 100644 --- a/fs/proc/task_mmu.c +++ b/fs/proc/task_mmu.c | |||
@@ -75,40 +75,6 @@ int task_statm(struct mm_struct *mm, int *shared, int *text, | |||
75 | return mm->total_vm; | 75 | return mm->total_vm; |
76 | } | 76 | } |
77 | 77 | ||
78 | int proc_exe_link(struct inode *inode, struct path *path) | ||
79 | { | ||
80 | struct vm_area_struct * vma; | ||
81 | int result = -ENOENT; | ||
82 | struct task_struct *task = get_proc_task(inode); | ||
83 | struct mm_struct * mm = NULL; | ||
84 | |||
85 | if (task) { | ||
86 | mm = get_task_mm(task); | ||
87 | put_task_struct(task); | ||
88 | } | ||
89 | if (!mm) | ||
90 | goto out; | ||
91 | down_read(&mm->mmap_sem); | ||
92 | |||
93 | vma = mm->mmap; | ||
94 | while (vma) { | ||
95 | if ((vma->vm_flags & VM_EXECUTABLE) && vma->vm_file) | ||
96 | break; | ||
97 | vma = vma->vm_next; | ||
98 | } | ||
99 | |||
100 | if (vma) { | ||
101 | *path = vma->vm_file->f_path; | ||
102 | path_get(&vma->vm_file->f_path); | ||
103 | result = 0; | ||
104 | } | ||
105 | |||
106 | up_read(&mm->mmap_sem); | ||
107 | mmput(mm); | ||
108 | out: | ||
109 | return result; | ||
110 | } | ||
111 | |||
112 | static void pad_len_spaces(struct seq_file *m, int len) | 78 | static void pad_len_spaces(struct seq_file *m, int len) |
113 | { | 79 | { |
114 | len = 25 + sizeof(void*) * 6 - len; | 80 | len = 25 + sizeof(void*) * 6 - len; |
diff --git a/fs/proc/task_nommu.c b/fs/proc/task_nommu.c index 8011528518bd..4b733f108455 100644 --- a/fs/proc/task_nommu.c +++ b/fs/proc/task_nommu.c | |||
@@ -103,40 +103,6 @@ int task_statm(struct mm_struct *mm, int *shared, int *text, | |||
103 | return size; | 103 | return size; |
104 | } | 104 | } |
105 | 105 | ||
106 | int proc_exe_link(struct inode *inode, struct path *path) | ||
107 | { | ||
108 | struct vm_list_struct *vml; | ||
109 | struct vm_area_struct *vma; | ||
110 | struct task_struct *task = get_proc_task(inode); | ||
111 | struct mm_struct *mm = get_task_mm(task); | ||
112 | int result = -ENOENT; | ||
113 | |||
114 | if (!mm) | ||
115 | goto out; | ||
116 | down_read(&mm->mmap_sem); | ||
117 | |||
118 | vml = mm->context.vmlist; | ||
119 | vma = NULL; | ||
120 | while (vml) { | ||
121 | if ((vml->vma->vm_flags & VM_EXECUTABLE) && vml->vma->vm_file) { | ||
122 | vma = vml->vma; | ||
123 | break; | ||
124 | } | ||
125 | vml = vml->next; | ||
126 | } | ||
127 | |||
128 | if (vma) { | ||
129 | *path = vma->vm_file->f_path; | ||
130 | path_get(&vma->vm_file->f_path); | ||
131 | result = 0; | ||
132 | } | ||
133 | |||
134 | up_read(&mm->mmap_sem); | ||
135 | mmput(mm); | ||
136 | out: | ||
137 | return result; | ||
138 | } | ||
139 | |||
140 | /* | 106 | /* |
141 | * display mapping lines for a particular process's /proc/pid/maps | 107 | * display mapping lines for a particular process's /proc/pid/maps |
142 | */ | 108 | */ |
diff --git a/fs/ramfs/file-mmu.c b/fs/ramfs/file-mmu.c index b41a514b0976..9590b9024300 100644 --- a/fs/ramfs/file-mmu.c +++ b/fs/ramfs/file-mmu.c | |||
@@ -26,6 +26,9 @@ | |||
26 | 26 | ||
27 | #include <linux/fs.h> | 27 | #include <linux/fs.h> |
28 | #include <linux/mm.h> | 28 | #include <linux/mm.h> |
29 | #include <linux/ramfs.h> | ||
30 | |||
31 | #include "internal.h" | ||
29 | 32 | ||
30 | const struct address_space_operations ramfs_aops = { | 33 | const struct address_space_operations ramfs_aops = { |
31 | .readpage = simple_readpage, | 34 | .readpage = simple_readpage, |
diff --git a/fs/ramfs/internal.h b/fs/ramfs/internal.h index af7cc074a476..6b330639b51d 100644 --- a/fs/ramfs/internal.h +++ b/fs/ramfs/internal.h | |||
@@ -11,5 +11,4 @@ | |||
11 | 11 | ||
12 | 12 | ||
13 | extern const struct address_space_operations ramfs_aops; | 13 | extern const struct address_space_operations ramfs_aops; |
14 | extern const struct file_operations ramfs_file_operations; | ||
15 | extern const struct inode_operations ramfs_file_inode_operations; | 14 | extern const struct inode_operations ramfs_file_inode_operations; |
diff --git a/fs/reiserfs/procfs.c b/fs/reiserfs/procfs.c index 8f86c52b30d8..b9dbeeca7049 100644 --- a/fs/reiserfs/procfs.c +++ b/fs/reiserfs/procfs.c | |||
@@ -467,6 +467,7 @@ static const struct file_operations r_file_operations = { | |||
467 | .read = seq_read, | 467 | .read = seq_read, |
468 | .llseek = seq_lseek, | 468 | .llseek = seq_lseek, |
469 | .release = seq_release, | 469 | .release = seq_release, |
470 | .owner = THIS_MODULE, | ||
470 | }; | 471 | }; |
471 | 472 | ||
472 | static struct proc_dir_entry *proc_info_root = NULL; | 473 | static struct proc_dir_entry *proc_info_root = NULL; |
@@ -475,12 +476,8 @@ static const char proc_info_root_name[] = "fs/reiserfs"; | |||
475 | static void add_file(struct super_block *sb, char *name, | 476 | static void add_file(struct super_block *sb, char *name, |
476 | int (*func) (struct seq_file *, struct super_block *)) | 477 | int (*func) (struct seq_file *, struct super_block *)) |
477 | { | 478 | { |
478 | struct proc_dir_entry *de; | 479 | proc_create_data(name, 0, REISERFS_SB(sb)->procdir, |
479 | de = create_proc_entry(name, 0, REISERFS_SB(sb)->procdir); | 480 | &r_file_operations, func); |
480 | if (de) { | ||
481 | de->data = func; | ||
482 | de->proc_fops = &r_file_operations; | ||
483 | } | ||
484 | } | 481 | } |
485 | 482 | ||
486 | int reiserfs_proc_info_init(struct super_block *sb) | 483 | int reiserfs_proc_info_init(struct super_block *sb) |
diff --git a/fs/super.c b/fs/super.c index a5a4aca7e22f..453877c5697b 100644 --- a/fs/super.c +++ b/fs/super.c | |||
@@ -117,7 +117,7 @@ static inline void destroy_super(struct super_block *s) | |||
117 | * Drop a superblock's refcount. Returns non-zero if the superblock was | 117 | * Drop a superblock's refcount. Returns non-zero if the superblock was |
118 | * destroyed. The caller must hold sb_lock. | 118 | * destroyed. The caller must hold sb_lock. |
119 | */ | 119 | */ |
120 | int __put_super(struct super_block *sb) | 120 | static int __put_super(struct super_block *sb) |
121 | { | 121 | { |
122 | int ret = 0; | 122 | int ret = 0; |
123 | 123 | ||
@@ -64,7 +64,7 @@ int file_fsync(struct file *filp, struct dentry *dentry, int datasync) | |||
64 | /* sync the superblock to buffers */ | 64 | /* sync the superblock to buffers */ |
65 | sb = inode->i_sb; | 65 | sb = inode->i_sb; |
66 | lock_super(sb); | 66 | lock_super(sb); |
67 | if (sb->s_op->write_super) | 67 | if (sb->s_dirt && sb->s_op->write_super) |
68 | sb->s_op->write_super(sb); | 68 | sb->s_op->write_super(sb); |
69 | unlock_super(sb); | 69 | unlock_super(sb); |
70 | 70 | ||
diff --git a/fs/timerfd.c b/fs/timerfd.c index 10c80b59ec4b..5400524e9cb1 100644 --- a/fs/timerfd.c +++ b/fs/timerfd.c | |||
@@ -20,6 +20,7 @@ | |||
20 | #include <linux/hrtimer.h> | 20 | #include <linux/hrtimer.h> |
21 | #include <linux/anon_inodes.h> | 21 | #include <linux/anon_inodes.h> |
22 | #include <linux/timerfd.h> | 22 | #include <linux/timerfd.h> |
23 | #include <linux/syscalls.h> | ||
23 | 24 | ||
24 | struct timerfd_ctx { | 25 | struct timerfd_ctx { |
25 | struct hrtimer tmr; | 26 | struct hrtimer tmr; |
diff --git a/fs/xattr.c b/fs/xattr.c index 89a942f07e1b..4706a8b1f495 100644 --- a/fs/xattr.c +++ b/fs/xattr.c | |||
@@ -67,7 +67,7 @@ xattr_permission(struct inode *inode, const char *name, int mask) | |||
67 | } | 67 | } |
68 | 68 | ||
69 | int | 69 | int |
70 | vfs_setxattr(struct dentry *dentry, char *name, void *value, | 70 | vfs_setxattr(struct dentry *dentry, const char *name, const void *value, |
71 | size_t size, int flags) | 71 | size_t size, int flags) |
72 | { | 72 | { |
73 | struct inode *inode = dentry->d_inode; | 73 | struct inode *inode = dentry->d_inode; |
@@ -131,7 +131,7 @@ out_noalloc: | |||
131 | EXPORT_SYMBOL_GPL(xattr_getsecurity); | 131 | EXPORT_SYMBOL_GPL(xattr_getsecurity); |
132 | 132 | ||
133 | ssize_t | 133 | ssize_t |
134 | vfs_getxattr(struct dentry *dentry, char *name, void *value, size_t size) | 134 | vfs_getxattr(struct dentry *dentry, const char *name, void *value, size_t size) |
135 | { | 135 | { |
136 | struct inode *inode = dentry->d_inode; | 136 | struct inode *inode = dentry->d_inode; |
137 | int error; | 137 | int error; |
@@ -187,7 +187,7 @@ vfs_listxattr(struct dentry *d, char *list, size_t size) | |||
187 | EXPORT_SYMBOL_GPL(vfs_listxattr); | 187 | EXPORT_SYMBOL_GPL(vfs_listxattr); |
188 | 188 | ||
189 | int | 189 | int |
190 | vfs_removexattr(struct dentry *dentry, char *name) | 190 | vfs_removexattr(struct dentry *dentry, const char *name) |
191 | { | 191 | { |
192 | struct inode *inode = dentry->d_inode; | 192 | struct inode *inode = dentry->d_inode; |
193 | int error; | 193 | int error; |
@@ -218,7 +218,7 @@ EXPORT_SYMBOL_GPL(vfs_removexattr); | |||
218 | * Extended attribute SET operations | 218 | * Extended attribute SET operations |
219 | */ | 219 | */ |
220 | static long | 220 | static long |
221 | setxattr(struct dentry *d, char __user *name, void __user *value, | 221 | setxattr(struct dentry *d, const char __user *name, const void __user *value, |
222 | size_t size, int flags) | 222 | size_t size, int flags) |
223 | { | 223 | { |
224 | int error; | 224 | int error; |
@@ -252,8 +252,8 @@ setxattr(struct dentry *d, char __user *name, void __user *value, | |||
252 | } | 252 | } |
253 | 253 | ||
254 | asmlinkage long | 254 | asmlinkage long |
255 | sys_setxattr(char __user *path, char __user *name, void __user *value, | 255 | sys_setxattr(const char __user *path, const char __user *name, |
256 | size_t size, int flags) | 256 | const void __user *value, size_t size, int flags) |
257 | { | 257 | { |
258 | struct nameidata nd; | 258 | struct nameidata nd; |
259 | int error; | 259 | int error; |
@@ -271,8 +271,8 @@ sys_setxattr(char __user *path, char __user *name, void __user *value, | |||
271 | } | 271 | } |
272 | 272 | ||
273 | asmlinkage long | 273 | asmlinkage long |
274 | sys_lsetxattr(char __user *path, char __user *name, void __user *value, | 274 | sys_lsetxattr(const char __user *path, const char __user *name, |
275 | size_t size, int flags) | 275 | const void __user *value, size_t size, int flags) |
276 | { | 276 | { |
277 | struct nameidata nd; | 277 | struct nameidata nd; |
278 | int error; | 278 | int error; |
@@ -290,7 +290,7 @@ sys_lsetxattr(char __user *path, char __user *name, void __user *value, | |||
290 | } | 290 | } |
291 | 291 | ||
292 | asmlinkage long | 292 | asmlinkage long |
293 | sys_fsetxattr(int fd, char __user *name, void __user *value, | 293 | sys_fsetxattr(int fd, const char __user *name, const void __user *value, |
294 | size_t size, int flags) | 294 | size_t size, int flags) |
295 | { | 295 | { |
296 | struct file *f; | 296 | struct file *f; |
@@ -315,7 +315,8 @@ sys_fsetxattr(int fd, char __user *name, void __user *value, | |||
315 | * Extended attribute GET operations | 315 | * Extended attribute GET operations |
316 | */ | 316 | */ |
317 | static ssize_t | 317 | static ssize_t |
318 | getxattr(struct dentry *d, char __user *name, void __user *value, size_t size) | 318 | getxattr(struct dentry *d, const char __user *name, void __user *value, |
319 | size_t size) | ||
319 | { | 320 | { |
320 | ssize_t error; | 321 | ssize_t error; |
321 | void *kvalue = NULL; | 322 | void *kvalue = NULL; |
@@ -349,8 +350,8 @@ getxattr(struct dentry *d, char __user *name, void __user *value, size_t size) | |||
349 | } | 350 | } |
350 | 351 | ||
351 | asmlinkage ssize_t | 352 | asmlinkage ssize_t |
352 | sys_getxattr(char __user *path, char __user *name, void __user *value, | 353 | sys_getxattr(const char __user *path, const char __user *name, |
353 | size_t size) | 354 | void __user *value, size_t size) |
354 | { | 355 | { |
355 | struct nameidata nd; | 356 | struct nameidata nd; |
356 | ssize_t error; | 357 | ssize_t error; |
@@ -364,7 +365,7 @@ sys_getxattr(char __user *path, char __user *name, void __user *value, | |||
364 | } | 365 | } |
365 | 366 | ||
366 | asmlinkage ssize_t | 367 | asmlinkage ssize_t |
367 | sys_lgetxattr(char __user *path, char __user *name, void __user *value, | 368 | sys_lgetxattr(const char __user *path, const char __user *name, void __user *value, |
368 | size_t size) | 369 | size_t size) |
369 | { | 370 | { |
370 | struct nameidata nd; | 371 | struct nameidata nd; |
@@ -379,7 +380,7 @@ sys_lgetxattr(char __user *path, char __user *name, void __user *value, | |||
379 | } | 380 | } |
380 | 381 | ||
381 | asmlinkage ssize_t | 382 | asmlinkage ssize_t |
382 | sys_fgetxattr(int fd, char __user *name, void __user *value, size_t size) | 383 | sys_fgetxattr(int fd, const char __user *name, void __user *value, size_t size) |
383 | { | 384 | { |
384 | struct file *f; | 385 | struct file *f; |
385 | ssize_t error = -EBADF; | 386 | ssize_t error = -EBADF; |
@@ -424,7 +425,7 @@ listxattr(struct dentry *d, char __user *list, size_t size) | |||
424 | } | 425 | } |
425 | 426 | ||
426 | asmlinkage ssize_t | 427 | asmlinkage ssize_t |
427 | sys_listxattr(char __user *path, char __user *list, size_t size) | 428 | sys_listxattr(const char __user *path, char __user *list, size_t size) |
428 | { | 429 | { |
429 | struct nameidata nd; | 430 | struct nameidata nd; |
430 | ssize_t error; | 431 | ssize_t error; |
@@ -438,7 +439,7 @@ sys_listxattr(char __user *path, char __user *list, size_t size) | |||
438 | } | 439 | } |
439 | 440 | ||
440 | asmlinkage ssize_t | 441 | asmlinkage ssize_t |
441 | sys_llistxattr(char __user *path, char __user *list, size_t size) | 442 | sys_llistxattr(const char __user *path, char __user *list, size_t size) |
442 | { | 443 | { |
443 | struct nameidata nd; | 444 | struct nameidata nd; |
444 | ssize_t error; | 445 | ssize_t error; |
@@ -470,7 +471,7 @@ sys_flistxattr(int fd, char __user *list, size_t size) | |||
470 | * Extended attribute REMOVE operations | 471 | * Extended attribute REMOVE operations |
471 | */ | 472 | */ |
472 | static long | 473 | static long |
473 | removexattr(struct dentry *d, char __user *name) | 474 | removexattr(struct dentry *d, const char __user *name) |
474 | { | 475 | { |
475 | int error; | 476 | int error; |
476 | char kname[XATTR_NAME_MAX + 1]; | 477 | char kname[XATTR_NAME_MAX + 1]; |
@@ -485,7 +486,7 @@ removexattr(struct dentry *d, char __user *name) | |||
485 | } | 486 | } |
486 | 487 | ||
487 | asmlinkage long | 488 | asmlinkage long |
488 | sys_removexattr(char __user *path, char __user *name) | 489 | sys_removexattr(const char __user *path, const char __user *name) |
489 | { | 490 | { |
490 | struct nameidata nd; | 491 | struct nameidata nd; |
491 | int error; | 492 | int error; |
@@ -503,7 +504,7 @@ sys_removexattr(char __user *path, char __user *name) | |||
503 | } | 504 | } |
504 | 505 | ||
505 | asmlinkage long | 506 | asmlinkage long |
506 | sys_lremovexattr(char __user *path, char __user *name) | 507 | sys_lremovexattr(const char __user *path, const char __user *name) |
507 | { | 508 | { |
508 | struct nameidata nd; | 509 | struct nameidata nd; |
509 | int error; | 510 | int error; |
@@ -521,7 +522,7 @@ sys_lremovexattr(char __user *path, char __user *name) | |||
521 | } | 522 | } |
522 | 523 | ||
523 | asmlinkage long | 524 | asmlinkage long |
524 | sys_fremovexattr(int fd, char __user *name) | 525 | sys_fremovexattr(int fd, const char __user *name) |
525 | { | 526 | { |
526 | struct file *f; | 527 | struct file *f; |
527 | struct dentry *dentry; | 528 | struct dentry *dentry; |
diff --git a/include/asm-alpha/unaligned.h b/include/asm-alpha/unaligned.h index a1d72846f61c..3787c60aed3f 100644 --- a/include/asm-alpha/unaligned.h +++ b/include/asm-alpha/unaligned.h | |||
@@ -1,6 +1,11 @@ | |||
1 | #ifndef __ALPHA_UNALIGNED_H | 1 | #ifndef _ASM_ALPHA_UNALIGNED_H |
2 | #define __ALPHA_UNALIGNED_H | 2 | #define _ASM_ALPHA_UNALIGNED_H |
3 | 3 | ||
4 | #include <asm-generic/unaligned.h> | 4 | #include <linux/unaligned/le_struct.h> |
5 | #include <linux/unaligned/be_byteshift.h> | ||
6 | #include <linux/unaligned/generic.h> | ||
5 | 7 | ||
6 | #endif | 8 | #define get_unaligned __get_unaligned_le |
9 | #define put_unaligned __put_unaligned_le | ||
10 | |||
11 | #endif /* _ASM_ALPHA_UNALIGNED_H */ | ||
diff --git a/include/asm-arm/unaligned.h b/include/asm-arm/unaligned.h index 5db03cf3b905..44593a894903 100644 --- a/include/asm-arm/unaligned.h +++ b/include/asm-arm/unaligned.h | |||
@@ -1,171 +1,9 @@ | |||
1 | #ifndef __ASM_ARM_UNALIGNED_H | 1 | #ifndef _ASM_ARM_UNALIGNED_H |
2 | #define __ASM_ARM_UNALIGNED_H | 2 | #define _ASM_ARM_UNALIGNED_H |
3 | 3 | ||
4 | #include <asm/types.h> | 4 | #include <linux/unaligned/le_byteshift.h> |
5 | 5 | #include <linux/unaligned/be_byteshift.h> | |
6 | extern int __bug_unaligned_x(const void *ptr); | 6 | #include <linux/unaligned/generic.h> |
7 | |||
8 | /* | ||
9 | * What is the most efficient way of loading/storing an unaligned value? | ||
10 | * | ||
11 | * That is the subject of this file. Efficiency here is defined as | ||
12 | * minimum code size with minimum register usage for the common cases. | ||
13 | * It is currently not believed that long longs are common, so we | ||
14 | * trade efficiency for the chars, shorts and longs against the long | ||
15 | * longs. | ||
16 | * | ||
17 | * Current stats with gcc 2.7.2.2 for these functions: | ||
18 | * | ||
19 | * ptrsize get: code regs put: code regs | ||
20 | * 1 1 1 1 2 | ||
21 | * 2 3 2 3 2 | ||
22 | * 4 7 3 7 3 | ||
23 | * 8 20 6 16 6 | ||
24 | * | ||
25 | * gcc 2.95.1 seems to code differently: | ||
26 | * | ||
27 | * ptrsize get: code regs put: code regs | ||
28 | * 1 1 1 1 2 | ||
29 | * 2 3 2 3 2 | ||
30 | * 4 7 4 7 4 | ||
31 | * 8 19 8 15 6 | ||
32 | * | ||
33 | * which may or may not be more efficient (depending upon whether | ||
34 | * you can afford the extra registers). Hopefully the gcc 2.95 | ||
35 | * is inteligent enough to decide if it is better to use the | ||
36 | * extra register, but evidence so far seems to suggest otherwise. | ||
37 | * | ||
38 | * Unfortunately, gcc is not able to optimise the high word | ||
39 | * out of long long >> 32, or the low word from long long << 32 | ||
40 | */ | ||
41 | |||
42 | #define __get_unaligned_2_le(__p) \ | ||
43 | (unsigned int)(__p[0] | __p[1] << 8) | ||
44 | |||
45 | #define __get_unaligned_2_be(__p) \ | ||
46 | (unsigned int)(__p[0] << 8 | __p[1]) | ||
47 | |||
48 | #define __get_unaligned_4_le(__p) \ | ||
49 | (unsigned int)(__p[0] | __p[1] << 8 | __p[2] << 16 | __p[3] << 24) | ||
50 | |||
51 | #define __get_unaligned_4_be(__p) \ | ||
52 | (unsigned int)(__p[0] << 24 | __p[1] << 16 | __p[2] << 8 | __p[3]) | ||
53 | |||
54 | #define __get_unaligned_8_le(__p) \ | ||
55 | ((unsigned long long)__get_unaligned_4_le((__p+4)) << 32 | \ | ||
56 | __get_unaligned_4_le(__p)) | ||
57 | |||
58 | #define __get_unaligned_8_be(__p) \ | ||
59 | ((unsigned long long)__get_unaligned_4_be(__p) << 32 | \ | ||
60 | __get_unaligned_4_be((__p+4))) | ||
61 | |||
62 | #define __get_unaligned_le(ptr) \ | ||
63 | ((__force typeof(*(ptr)))({ \ | ||
64 | const __u8 *__p = (const __u8 *)(ptr); \ | ||
65 | __builtin_choose_expr(sizeof(*(ptr)) == 1, *__p, \ | ||
66 | __builtin_choose_expr(sizeof(*(ptr)) == 2, __get_unaligned_2_le(__p), \ | ||
67 | __builtin_choose_expr(sizeof(*(ptr)) == 4, __get_unaligned_4_le(__p), \ | ||
68 | __builtin_choose_expr(sizeof(*(ptr)) == 8, __get_unaligned_8_le(__p), \ | ||
69 | (void)__bug_unaligned_x(__p))))); \ | ||
70 | })) | ||
71 | |||
72 | #define __get_unaligned_be(ptr) \ | ||
73 | ((__force typeof(*(ptr)))({ \ | ||
74 | const __u8 *__p = (const __u8 *)(ptr); \ | ||
75 | __builtin_choose_expr(sizeof(*(ptr)) == 1, *__p, \ | ||
76 | __builtin_choose_expr(sizeof(*(ptr)) == 2, __get_unaligned_2_be(__p), \ | ||
77 | __builtin_choose_expr(sizeof(*(ptr)) == 4, __get_unaligned_4_be(__p), \ | ||
78 | __builtin_choose_expr(sizeof(*(ptr)) == 8, __get_unaligned_8_be(__p), \ | ||
79 | (void)__bug_unaligned_x(__p))))); \ | ||
80 | })) | ||
81 | |||
82 | |||
83 | static inline void __put_unaligned_2_le(__u32 __v, register __u8 *__p) | ||
84 | { | ||
85 | *__p++ = __v; | ||
86 | *__p++ = __v >> 8; | ||
87 | } | ||
88 | |||
89 | static inline void __put_unaligned_2_be(__u32 __v, register __u8 *__p) | ||
90 | { | ||
91 | *__p++ = __v >> 8; | ||
92 | *__p++ = __v; | ||
93 | } | ||
94 | |||
95 | static inline void __put_unaligned_4_le(__u32 __v, register __u8 *__p) | ||
96 | { | ||
97 | __put_unaligned_2_le(__v >> 16, __p + 2); | ||
98 | __put_unaligned_2_le(__v, __p); | ||
99 | } | ||
100 | |||
101 | static inline void __put_unaligned_4_be(__u32 __v, register __u8 *__p) | ||
102 | { | ||
103 | __put_unaligned_2_be(__v >> 16, __p); | ||
104 | __put_unaligned_2_be(__v, __p + 2); | ||
105 | } | ||
106 | |||
107 | static inline void __put_unaligned_8_le(const unsigned long long __v, register __u8 *__p) | ||
108 | { | ||
109 | /* | ||
110 | * tradeoff: 8 bytes of stack for all unaligned puts (2 | ||
111 | * instructions), or an extra register in the long long | ||
112 | * case - go for the extra register. | ||
113 | */ | ||
114 | __put_unaligned_4_le(__v >> 32, __p+4); | ||
115 | __put_unaligned_4_le(__v, __p); | ||
116 | } | ||
117 | |||
118 | static inline void __put_unaligned_8_be(const unsigned long long __v, register __u8 *__p) | ||
119 | { | ||
120 | /* | ||
121 | * tradeoff: 8 bytes of stack for all unaligned puts (2 | ||
122 | * instructions), or an extra register in the long long | ||
123 | * case - go for the extra register. | ||
124 | */ | ||
125 | __put_unaligned_4_be(__v >> 32, __p); | ||
126 | __put_unaligned_4_be(__v, __p+4); | ||
127 | } | ||
128 | |||
129 | /* | ||
130 | * Try to store an unaligned value as efficiently as possible. | ||
131 | */ | ||
132 | #define __put_unaligned_le(val,ptr) \ | ||
133 | ({ \ | ||
134 | (void)sizeof(*(ptr) = (val)); \ | ||
135 | switch (sizeof(*(ptr))) { \ | ||
136 | case 1: \ | ||
137 | *(ptr) = (val); \ | ||
138 | break; \ | ||
139 | case 2: __put_unaligned_2_le((__force u16)(val),(__u8 *)(ptr)); \ | ||
140 | break; \ | ||
141 | case 4: __put_unaligned_4_le((__force u32)(val),(__u8 *)(ptr)); \ | ||
142 | break; \ | ||
143 | case 8: __put_unaligned_8_le((__force u64)(val),(__u8 *)(ptr)); \ | ||
144 | break; \ | ||
145 | default: __bug_unaligned_x(ptr); \ | ||
146 | break; \ | ||
147 | } \ | ||
148 | (void) 0; \ | ||
149 | }) | ||
150 | |||
151 | #define __put_unaligned_be(val,ptr) \ | ||
152 | ({ \ | ||
153 | (void)sizeof(*(ptr) = (val)); \ | ||
154 | switch (sizeof(*(ptr))) { \ | ||
155 | case 1: \ | ||
156 | *(ptr) = (val); \ | ||
157 | break; \ | ||
158 | case 2: __put_unaligned_2_be((__force u16)(val),(__u8 *)(ptr)); \ | ||
159 | break; \ | ||
160 | case 4: __put_unaligned_4_be((__force u32)(val),(__u8 *)(ptr)); \ | ||
161 | break; \ | ||
162 | case 8: __put_unaligned_8_be((__force u64)(val),(__u8 *)(ptr)); \ | ||
163 | break; \ | ||
164 | default: __bug_unaligned_x(ptr); \ | ||
165 | break; \ | ||
166 | } \ | ||
167 | (void) 0; \ | ||
168 | }) | ||
169 | 7 | ||
170 | /* | 8 | /* |
171 | * Select endianness | 9 | * Select endianness |
@@ -178,4 +16,4 @@ static inline void __put_unaligned_8_be(const unsigned long long __v, register _ | |||
178 | #define put_unaligned __put_unaligned_be | 16 | #define put_unaligned __put_unaligned_be |
179 | #endif | 17 | #endif |
180 | 18 | ||
181 | #endif | 19 | #endif /* _ASM_ARM_UNALIGNED_H */ |
diff --git a/include/asm-avr32/unaligned.h b/include/asm-avr32/unaligned.h index 36f5fd430543..041877290470 100644 --- a/include/asm-avr32/unaligned.h +++ b/include/asm-avr32/unaligned.h | |||
@@ -1,5 +1,5 @@ | |||
1 | #ifndef __ASM_AVR32_UNALIGNED_H | 1 | #ifndef _ASM_AVR32_UNALIGNED_H |
2 | #define __ASM_AVR32_UNALIGNED_H | 2 | #define _ASM_AVR32_UNALIGNED_H |
3 | 3 | ||
4 | /* | 4 | /* |
5 | * AVR32 can handle some unaligned accesses, depending on the | 5 | * AVR32 can handle some unaligned accesses, depending on the |
@@ -11,6 +11,11 @@ | |||
11 | * optimize word loads in general. | 11 | * optimize word loads in general. |
12 | */ | 12 | */ |
13 | 13 | ||
14 | #include <asm-generic/unaligned.h> | 14 | #include <linux/unaligned/be_struct.h> |
15 | #include <linux/unaligned/le_byteshift.h> | ||
16 | #include <linux/unaligned/generic.h> | ||
15 | 17 | ||
16 | #endif /* __ASM_AVR32_UNALIGNED_H */ | 18 | #define get_unaligned __get_unaligned_be |
19 | #define put_unaligned __put_unaligned_be | ||
20 | |||
21 | #endif /* _ASM_AVR32_UNALIGNED_H */ | ||
diff --git a/include/asm-blackfin/unaligned.h b/include/asm-blackfin/unaligned.h index 10081dc241ef..fd8a1d634945 100644 --- a/include/asm-blackfin/unaligned.h +++ b/include/asm-blackfin/unaligned.h | |||
@@ -1,6 +1,11 @@ | |||
1 | #ifndef __BFIN_UNALIGNED_H | 1 | #ifndef _ASM_BLACKFIN_UNALIGNED_H |
2 | #define __BFIN_UNALIGNED_H | 2 | #define _ASM_BLACKFIN_UNALIGNED_H |
3 | 3 | ||
4 | #include <asm-generic/unaligned.h> | 4 | #include <linux/unaligned/le_struct.h> |
5 | #include <linux/unaligned/be_byteshift.h> | ||
6 | #include <linux/unaligned/generic.h> | ||
5 | 7 | ||
6 | #endif /* __BFIN_UNALIGNED_H */ | 8 | #define get_unaligned __get_unaligned_le |
9 | #define put_unaligned __put_unaligned_le | ||
10 | |||
11 | #endif /* _ASM_BLACKFIN_UNALIGNED_H */ | ||
diff --git a/include/asm-cris/unaligned.h b/include/asm-cris/unaligned.h index 7fbbb399f6f1..7b3f3fec567c 100644 --- a/include/asm-cris/unaligned.h +++ b/include/asm-cris/unaligned.h | |||
@@ -1,16 +1,13 @@ | |||
1 | #ifndef __CRIS_UNALIGNED_H | 1 | #ifndef _ASM_CRIS_UNALIGNED_H |
2 | #define __CRIS_UNALIGNED_H | 2 | #define _ASM_CRIS_UNALIGNED_H |
3 | 3 | ||
4 | /* | 4 | /* |
5 | * CRIS can do unaligned accesses itself. | 5 | * CRIS can do unaligned accesses itself. |
6 | * | ||
7 | * The strange macros are there to make sure these can't | ||
8 | * be misused in a way that makes them not work on other | ||
9 | * architectures where unaligned accesses aren't as simple. | ||
10 | */ | 6 | */ |
7 | #include <linux/unaligned/access_ok.h> | ||
8 | #include <linux/unaligned/generic.h> | ||
11 | 9 | ||
12 | #define get_unaligned(ptr) (*(ptr)) | 10 | #define get_unaligned __get_unaligned_le |
11 | #define put_unaligned __put_unaligned_le | ||
13 | 12 | ||
14 | #define put_unaligned(val, ptr) ((void)( *(ptr) = (val) )) | 13 | #endif /* _ASM_CRIS_UNALIGNED_H */ |
15 | |||
16 | #endif | ||
diff --git a/include/asm-frv/unaligned.h b/include/asm-frv/unaligned.h index dc8e9c9bf6bd..64ccc736f2d8 100644 --- a/include/asm-frv/unaligned.h +++ b/include/asm-frv/unaligned.h | |||
@@ -9,194 +9,14 @@ | |||
9 | * 2 of the License, or (at your option) any later version. | 9 | * 2 of the License, or (at your option) any later version. |
10 | */ | 10 | */ |
11 | 11 | ||
12 | #ifndef _ASM_UNALIGNED_H | 12 | #ifndef _ASM_FRV_UNALIGNED_H |
13 | #define _ASM_UNALIGNED_H | 13 | #define _ASM_FRV_UNALIGNED_H |
14 | 14 | ||
15 | #include <linux/unaligned/le_byteshift.h> | ||
16 | #include <linux/unaligned/be_byteshift.h> | ||
17 | #include <linux/unaligned/generic.h> | ||
15 | 18 | ||
16 | /* | 19 | #define get_unaligned __get_unaligned_be |
17 | * Unaligned accesses on uClinux can't be performed in a fault handler - the | 20 | #define put_unaligned __put_unaligned_be |
18 | * CPU detects them as imprecise exceptions making this impossible. | ||
19 | * | ||
20 | * With the FR451, however, they are precise, and so we used to fix them up in | ||
21 | * the memory access fault handler. However, instruction bundling make this | ||
22 | * impractical. So, now we fall back to using memcpy. | ||
23 | */ | ||
24 | #ifdef CONFIG_MMU | ||
25 | |||
26 | /* | ||
27 | * The asm statement in the macros below is a way to get GCC to copy a | ||
28 | * value from one variable to another without having any clue it's | ||
29 | * actually doing so, so that it won't have any idea that the values | ||
30 | * in the two variables are related. | ||
31 | */ | ||
32 | |||
33 | #define get_unaligned(ptr) ({ \ | ||
34 | typeof((*(ptr))) __x; \ | ||
35 | void *__ptrcopy; \ | ||
36 | asm("" : "=r" (__ptrcopy) : "0" (ptr)); \ | ||
37 | memcpy(&__x, __ptrcopy, sizeof(*(ptr))); \ | ||
38 | __x; \ | ||
39 | }) | ||
40 | |||
41 | #define put_unaligned(val, ptr) ({ \ | ||
42 | typeof((*(ptr))) __x = (val); \ | ||
43 | void *__ptrcopy; \ | ||
44 | asm("" : "=r" (__ptrcopy) : "0" (ptr)); \ | ||
45 | memcpy(__ptrcopy, &__x, sizeof(*(ptr))); \ | ||
46 | }) | ||
47 | |||
48 | extern int handle_misalignment(unsigned long esr0, unsigned long ear0, unsigned long epcr0); | ||
49 | |||
50 | #else | ||
51 | |||
52 | #define get_unaligned(ptr) \ | ||
53 | ({ \ | ||
54 | typeof(*(ptr)) x; \ | ||
55 | const char *__p = (const char *) (ptr); \ | ||
56 | \ | ||
57 | switch (sizeof(x)) { \ | ||
58 | case 1: \ | ||
59 | x = *(ptr); \ | ||
60 | break; \ | ||
61 | case 2: \ | ||
62 | { \ | ||
63 | uint8_t a; \ | ||
64 | asm(" ldub%I2 %M2,%0 \n" \ | ||
65 | " ldub%I3.p %M3,%1 \n" \ | ||
66 | " slli %0,#8,%0 \n" \ | ||
67 | " or %0,%1,%0 \n" \ | ||
68 | : "=&r"(x), "=&r"(a) \ | ||
69 | : "m"(__p[0]), "m"(__p[1]) \ | ||
70 | ); \ | ||
71 | break; \ | ||
72 | } \ | ||
73 | \ | ||
74 | case 4: \ | ||
75 | { \ | ||
76 | uint8_t a; \ | ||
77 | asm(" ldub%I2 %M2,%0 \n" \ | ||
78 | " ldub%I3.p %M3,%1 \n" \ | ||
79 | " slli %0,#8,%0 \n" \ | ||
80 | " or %0,%1,%0 \n" \ | ||
81 | " ldub%I4.p %M4,%1 \n" \ | ||
82 | " slli %0,#8,%0 \n" \ | ||
83 | " or %0,%1,%0 \n" \ | ||
84 | " ldub%I5.p %M5,%1 \n" \ | ||
85 | " slli %0,#8,%0 \n" \ | ||
86 | " or %0,%1,%0 \n" \ | ||
87 | : "=&r"(x), "=&r"(a) \ | ||
88 | : "m"(__p[0]), "m"(__p[1]), "m"(__p[2]), "m"(__p[3]) \ | ||
89 | ); \ | ||
90 | break; \ | ||
91 | } \ | ||
92 | \ | ||
93 | case 8: \ | ||
94 | { \ | ||
95 | union { uint64_t x; u32 y[2]; } z; \ | ||
96 | uint8_t a; \ | ||
97 | asm(" ldub%I3 %M3,%0 \n" \ | ||
98 | " ldub%I4.p %M4,%2 \n" \ | ||
99 | " slli %0,#8,%0 \n" \ | ||
100 | " or %0,%2,%0 \n" \ | ||
101 | " ldub%I5.p %M5,%2 \n" \ | ||
102 | " slli %0,#8,%0 \n" \ | ||
103 | " or %0,%2,%0 \n" \ | ||
104 | " ldub%I6.p %M6,%2 \n" \ | ||
105 | " slli %0,#8,%0 \n" \ | ||
106 | " or %0,%2,%0 \n" \ | ||
107 | " ldub%I7 %M7,%1 \n" \ | ||
108 | " ldub%I8.p %M8,%2 \n" \ | ||
109 | " slli %1,#8,%1 \n" \ | ||
110 | " or %1,%2,%1 \n" \ | ||
111 | " ldub%I9.p %M9,%2 \n" \ | ||
112 | " slli %1,#8,%1 \n" \ | ||
113 | " or %1,%2,%1 \n" \ | ||
114 | " ldub%I10.p %M10,%2 \n" \ | ||
115 | " slli %1,#8,%1 \n" \ | ||
116 | " or %1,%2,%1 \n" \ | ||
117 | : "=&r"(z.y[0]), "=&r"(z.y[1]), "=&r"(a) \ | ||
118 | : "m"(__p[0]), "m"(__p[1]), "m"(__p[2]), "m"(__p[3]), \ | ||
119 | "m"(__p[4]), "m"(__p[5]), "m"(__p[6]), "m"(__p[7]) \ | ||
120 | ); \ | ||
121 | x = z.x; \ | ||
122 | break; \ | ||
123 | } \ | ||
124 | \ | ||
125 | default: \ | ||
126 | x = 0; \ | ||
127 | BUG(); \ | ||
128 | break; \ | ||
129 | } \ | ||
130 | \ | ||
131 | x; \ | ||
132 | }) | ||
133 | |||
134 | #define put_unaligned(val, ptr) \ | ||
135 | do { \ | ||
136 | char *__p = (char *) (ptr); \ | ||
137 | int x; \ | ||
138 | \ | ||
139 | switch (sizeof(*ptr)) { \ | ||
140 | case 2: \ | ||
141 | { \ | ||
142 | asm(" stb%I1.p %0,%M1 \n" \ | ||
143 | " srli %0,#8,%0 \n" \ | ||
144 | " stb%I2 %0,%M2 \n" \ | ||
145 | : "=r"(x), "=m"(__p[1]), "=m"(__p[0]) \ | ||
146 | : "0"(val) \ | ||
147 | ); \ | ||
148 | break; \ | ||
149 | } \ | ||
150 | \ | ||
151 | case 4: \ | ||
152 | { \ | ||
153 | asm(" stb%I1.p %0,%M1 \n" \ | ||
154 | " srli %0,#8,%0 \n" \ | ||
155 | " stb%I2.p %0,%M2 \n" \ | ||
156 | " srli %0,#8,%0 \n" \ | ||
157 | " stb%I3.p %0,%M3 \n" \ | ||
158 | " srli %0,#8,%0 \n" \ | ||
159 | " stb%I4 %0,%M4 \n" \ | ||
160 | : "=r"(x), "=m"(__p[3]), "=m"(__p[2]), "=m"(__p[1]), "=m"(__p[0]) \ | ||
161 | : "0"(val) \ | ||
162 | ); \ | ||
163 | break; \ | ||
164 | } \ | ||
165 | \ | ||
166 | case 8: \ | ||
167 | { \ | ||
168 | uint32_t __high, __low; \ | ||
169 | __high = (uint64_t)val >> 32; \ | ||
170 | __low = val & 0xffffffff; \ | ||
171 | asm(" stb%I2.p %0,%M2 \n" \ | ||
172 | " srli %0,#8,%0 \n" \ | ||
173 | " stb%I3.p %0,%M3 \n" \ | ||
174 | " srli %0,#8,%0 \n" \ | ||
175 | " stb%I4.p %0,%M4 \n" \ | ||
176 | " srli %0,#8,%0 \n" \ | ||
177 | " stb%I5.p %0,%M5 \n" \ | ||
178 | " srli %0,#8,%0 \n" \ | ||
179 | " stb%I6.p %1,%M6 \n" \ | ||
180 | " srli %1,#8,%1 \n" \ | ||
181 | " stb%I7.p %1,%M7 \n" \ | ||
182 | " srli %1,#8,%1 \n" \ | ||
183 | " stb%I8.p %1,%M8 \n" \ | ||
184 | " srli %1,#8,%1 \n" \ | ||
185 | " stb%I9 %1,%M9 \n" \ | ||
186 | : "=&r"(__low), "=&r"(__high), "=m"(__p[7]), "=m"(__p[6]), \ | ||
187 | "=m"(__p[5]), "=m"(__p[4]), "=m"(__p[3]), "=m"(__p[2]), \ | ||
188 | "=m"(__p[1]), "=m"(__p[0]) \ | ||
189 | : "0"(__low), "1"(__high) \ | ||
190 | ); \ | ||
191 | break; \ | ||
192 | } \ | ||
193 | \ | ||
194 | default: \ | ||
195 | *(ptr) = (val); \ | ||
196 | break; \ | ||
197 | } \ | ||
198 | } while(0) | ||
199 | |||
200 | #endif | ||
201 | 21 | ||
202 | #endif | 22 | #endif /* _ASM_FRV_UNALIGNED_H */ |
diff --git a/include/asm-generic/ioctl.h b/include/asm-generic/ioctl.h index cd027298beb1..864181385579 100644 --- a/include/asm-generic/ioctl.h +++ b/include/asm-generic/ioctl.h | |||
@@ -21,8 +21,19 @@ | |||
21 | */ | 21 | */ |
22 | #define _IOC_NRBITS 8 | 22 | #define _IOC_NRBITS 8 |
23 | #define _IOC_TYPEBITS 8 | 23 | #define _IOC_TYPEBITS 8 |
24 | #define _IOC_SIZEBITS 14 | 24 | |
25 | #define _IOC_DIRBITS 2 | 25 | /* |
26 | * Let any architecture override either of the following before | ||
27 | * including this file. | ||
28 | */ | ||
29 | |||
30 | #ifndef _IOC_SIZEBITS | ||
31 | # define _IOC_SIZEBITS 14 | ||
32 | #endif | ||
33 | |||
34 | #ifndef _IOC_DIRBITS | ||
35 | # define _IOC_DIRBITS 2 | ||
36 | #endif | ||
26 | 37 | ||
27 | #define _IOC_NRMASK ((1 << _IOC_NRBITS)-1) | 38 | #define _IOC_NRMASK ((1 << _IOC_NRBITS)-1) |
28 | #define _IOC_TYPEMASK ((1 << _IOC_TYPEBITS)-1) | 39 | #define _IOC_TYPEMASK ((1 << _IOC_TYPEBITS)-1) |
@@ -35,11 +46,21 @@ | |||
35 | #define _IOC_DIRSHIFT (_IOC_SIZESHIFT+_IOC_SIZEBITS) | 46 | #define _IOC_DIRSHIFT (_IOC_SIZESHIFT+_IOC_SIZEBITS) |
36 | 47 | ||
37 | /* | 48 | /* |
38 | * Direction bits. | 49 | * Direction bits, which any architecture can choose to override |
50 | * before including this file. | ||
39 | */ | 51 | */ |
40 | #define _IOC_NONE 0U | 52 | |
41 | #define _IOC_WRITE 1U | 53 | #ifndef _IOC_NONE |
42 | #define _IOC_READ 2U | 54 | # define _IOC_NONE 0U |
55 | #endif | ||
56 | |||
57 | #ifndef _IOC_WRITE | ||
58 | # define _IOC_WRITE 1U | ||
59 | #endif | ||
60 | |||
61 | #ifndef _IOC_READ | ||
62 | # define _IOC_READ 2U | ||
63 | #endif | ||
43 | 64 | ||
44 | #define _IOC(dir,type,nr,size) \ | 65 | #define _IOC(dir,type,nr,size) \ |
45 | (((dir) << _IOC_DIRSHIFT) | \ | 66 | (((dir) << _IOC_DIRSHIFT) | \ |
diff --git a/include/asm-generic/unaligned.h b/include/asm-generic/unaligned.h deleted file mode 100644 index 2fe1b2e67f01..000000000000 --- a/include/asm-generic/unaligned.h +++ /dev/null | |||
@@ -1,124 +0,0 @@ | |||
1 | #ifndef _ASM_GENERIC_UNALIGNED_H_ | ||
2 | #define _ASM_GENERIC_UNALIGNED_H_ | ||
3 | |||
4 | /* | ||
5 | * For the benefit of those who are trying to port Linux to another | ||
6 | * architecture, here are some C-language equivalents. | ||
7 | * | ||
8 | * This is based almost entirely upon Richard Henderson's | ||
9 | * asm-alpha/unaligned.h implementation. Some comments were | ||
10 | * taken from David Mosberger's asm-ia64/unaligned.h header. | ||
11 | */ | ||
12 | |||
13 | #include <linux/types.h> | ||
14 | |||
15 | /* | ||
16 | * The main single-value unaligned transfer routines. | ||
17 | */ | ||
18 | #define get_unaligned(ptr) \ | ||
19 | __get_unaligned((ptr), sizeof(*(ptr))) | ||
20 | #define put_unaligned(x,ptr) \ | ||
21 | ((void)sizeof(*(ptr)=(x)),\ | ||
22 | __put_unaligned((__force __u64)(x), (ptr), sizeof(*(ptr)))) | ||
23 | |||
24 | /* | ||
25 | * This function doesn't actually exist. The idea is that when | ||
26 | * someone uses the macros below with an unsupported size (datatype), | ||
27 | * the linker will alert us to the problem via an unresolved reference | ||
28 | * error. | ||
29 | */ | ||
30 | extern void bad_unaligned_access_length(void) __attribute__((noreturn)); | ||
31 | |||
32 | struct __una_u64 { __u64 x __attribute__((packed)); }; | ||
33 | struct __una_u32 { __u32 x __attribute__((packed)); }; | ||
34 | struct __una_u16 { __u16 x __attribute__((packed)); }; | ||
35 | |||
36 | /* | ||
37 | * Elemental unaligned loads | ||
38 | */ | ||
39 | |||
40 | static inline __u64 __uldq(const __u64 *addr) | ||
41 | { | ||
42 | const struct __una_u64 *ptr = (const struct __una_u64 *) addr; | ||
43 | return ptr->x; | ||
44 | } | ||
45 | |||
46 | static inline __u32 __uldl(const __u32 *addr) | ||
47 | { | ||
48 | const struct __una_u32 *ptr = (const struct __una_u32 *) addr; | ||
49 | return ptr->x; | ||
50 | } | ||
51 | |||
52 | static inline __u16 __uldw(const __u16 *addr) | ||
53 | { | ||
54 | const struct __una_u16 *ptr = (const struct __una_u16 *) addr; | ||
55 | return ptr->x; | ||
56 | } | ||
57 | |||
58 | /* | ||
59 | * Elemental unaligned stores | ||
60 | */ | ||
61 | |||
62 | static inline void __ustq(__u64 val, __u64 *addr) | ||
63 | { | ||
64 | struct __una_u64 *ptr = (struct __una_u64 *) addr; | ||
65 | ptr->x = val; | ||
66 | } | ||
67 | |||
68 | static inline void __ustl(__u32 val, __u32 *addr) | ||
69 | { | ||
70 | struct __una_u32 *ptr = (struct __una_u32 *) addr; | ||
71 | ptr->x = val; | ||
72 | } | ||
73 | |||
74 | static inline void __ustw(__u16 val, __u16 *addr) | ||
75 | { | ||
76 | struct __una_u16 *ptr = (struct __una_u16 *) addr; | ||
77 | ptr->x = val; | ||
78 | } | ||
79 | |||
80 | #define __get_unaligned(ptr, size) ({ \ | ||
81 | const void *__gu_p = ptr; \ | ||
82 | __u64 __val; \ | ||
83 | switch (size) { \ | ||
84 | case 1: \ | ||
85 | __val = *(const __u8 *)__gu_p; \ | ||
86 | break; \ | ||
87 | case 2: \ | ||
88 | __val = __uldw(__gu_p); \ | ||
89 | break; \ | ||
90 | case 4: \ | ||
91 | __val = __uldl(__gu_p); \ | ||
92 | break; \ | ||
93 | case 8: \ | ||
94 | __val = __uldq(__gu_p); \ | ||
95 | break; \ | ||
96 | default: \ | ||
97 | bad_unaligned_access_length(); \ | ||
98 | }; \ | ||
99 | (__force __typeof__(*(ptr)))__val; \ | ||
100 | }) | ||
101 | |||
102 | #define __put_unaligned(val, ptr, size) \ | ||
103 | ({ \ | ||
104 | void *__gu_p = ptr; \ | ||
105 | switch (size) { \ | ||
106 | case 1: \ | ||
107 | *(__u8 *)__gu_p = (__force __u8)val; \ | ||
108 | break; \ | ||
109 | case 2: \ | ||
110 | __ustw((__force __u16)val, __gu_p); \ | ||
111 | break; \ | ||
112 | case 4: \ | ||
113 | __ustl((__force __u32)val, __gu_p); \ | ||
114 | break; \ | ||
115 | case 8: \ | ||
116 | __ustq(val, __gu_p); \ | ||
117 | break; \ | ||
118 | default: \ | ||
119 | bad_unaligned_access_length(); \ | ||
120 | }; \ | ||
121 | (void)0; \ | ||
122 | }) | ||
123 | |||
124 | #endif /* _ASM_GENERIC_UNALIGNED_H */ | ||
diff --git a/include/asm-h8300/unaligned.h b/include/asm-h8300/unaligned.h index ffb67f472070..b8d06c70c2da 100644 --- a/include/asm-h8300/unaligned.h +++ b/include/asm-h8300/unaligned.h | |||
@@ -1,15 +1,11 @@ | |||
1 | #ifndef __H8300_UNALIGNED_H | 1 | #ifndef _ASM_H8300_UNALIGNED_H |
2 | #define __H8300_UNALIGNED_H | 2 | #define _ASM_H8300_UNALIGNED_H |
3 | 3 | ||
4 | #include <linux/unaligned/be_memmove.h> | ||
5 | #include <linux/unaligned/le_byteshift.h> | ||
6 | #include <linux/unaligned/generic.h> | ||
4 | 7 | ||
5 | /* Use memmove here, so gcc does not insert a __builtin_memcpy. */ | 8 | #define get_unaligned __get_unaligned_be |
9 | #define put_unaligned __put_unaligned_be | ||
6 | 10 | ||
7 | #define get_unaligned(ptr) \ | 11 | #endif /* _ASM_H8300_UNALIGNED_H */ |
8 | ({ __typeof__(*(ptr)) __tmp; memmove(&__tmp, (ptr), sizeof(*(ptr))); __tmp; }) | ||
9 | |||
10 | #define put_unaligned(val, ptr) \ | ||
11 | ({ __typeof__(*(ptr)) __tmp = (val); \ | ||
12 | memmove((ptr), &__tmp, sizeof(*(ptr))); \ | ||
13 | (void)0; }) | ||
14 | |||
15 | #endif | ||
diff --git a/include/asm-ia64/dma-mapping.h b/include/asm-ia64/dma-mapping.h index f1735a22d0ea..9f0df9bd46b7 100644 --- a/include/asm-ia64/dma-mapping.h +++ b/include/asm-ia64/dma-mapping.h | |||
@@ -23,10 +23,30 @@ dma_free_noncoherent(struct device *dev, size_t size, void *cpu_addr, | |||
23 | { | 23 | { |
24 | dma_free_coherent(dev, size, cpu_addr, dma_handle); | 24 | dma_free_coherent(dev, size, cpu_addr, dma_handle); |
25 | } | 25 | } |
26 | #define dma_map_single platform_dma_map_single | 26 | #define dma_map_single_attrs platform_dma_map_single_attrs |
27 | #define dma_map_sg platform_dma_map_sg | 27 | static inline dma_addr_t dma_map_single(struct device *dev, void *cpu_addr, |
28 | #define dma_unmap_single platform_dma_unmap_single | 28 | size_t size, int dir) |
29 | #define dma_unmap_sg platform_dma_unmap_sg | 29 | { |
30 | return dma_map_single_attrs(dev, cpu_addr, size, dir, NULL); | ||
31 | } | ||
32 | #define dma_map_sg_attrs platform_dma_map_sg_attrs | ||
33 | static inline int dma_map_sg(struct device *dev, struct scatterlist *sgl, | ||
34 | int nents, int dir) | ||
35 | { | ||
36 | return dma_map_sg_attrs(dev, sgl, nents, dir, NULL); | ||
37 | } | ||
38 | #define dma_unmap_single_attrs platform_dma_unmap_single_attrs | ||
39 | static inline void dma_unmap_single(struct device *dev, dma_addr_t cpu_addr, | ||
40 | size_t size, int dir) | ||
41 | { | ||
42 | return dma_unmap_single_attrs(dev, cpu_addr, size, dir, NULL); | ||
43 | } | ||
44 | #define dma_unmap_sg_attrs platform_dma_unmap_sg_attrs | ||
45 | static inline void dma_unmap_sg(struct device *dev, struct scatterlist *sgl, | ||
46 | int nents, int dir) | ||
47 | { | ||
48 | return dma_unmap_sg_attrs(dev, sgl, nents, dir, NULL); | ||
49 | } | ||
30 | #define dma_sync_single_for_cpu platform_dma_sync_single_for_cpu | 50 | #define dma_sync_single_for_cpu platform_dma_sync_single_for_cpu |
31 | #define dma_sync_sg_for_cpu platform_dma_sync_sg_for_cpu | 51 | #define dma_sync_sg_for_cpu platform_dma_sync_sg_for_cpu |
32 | #define dma_sync_single_for_device platform_dma_sync_single_for_device | 52 | #define dma_sync_single_for_device platform_dma_sync_single_for_device |
diff --git a/include/asm-ia64/machvec.h b/include/asm-ia64/machvec.h index c201a2020aa4..9f020eb825c5 100644 --- a/include/asm-ia64/machvec.h +++ b/include/asm-ia64/machvec.h | |||
@@ -22,6 +22,7 @@ struct pci_bus; | |||
22 | struct task_struct; | 22 | struct task_struct; |
23 | struct pci_dev; | 23 | struct pci_dev; |
24 | struct msi_desc; | 24 | struct msi_desc; |
25 | struct dma_attrs; | ||
25 | 26 | ||
26 | typedef void ia64_mv_setup_t (char **); | 27 | typedef void ia64_mv_setup_t (char **); |
27 | typedef void ia64_mv_cpu_init_t (void); | 28 | typedef void ia64_mv_cpu_init_t (void); |
@@ -56,6 +57,11 @@ typedef void ia64_mv_dma_sync_sg_for_device (struct device *, struct scatterlist | |||
56 | typedef int ia64_mv_dma_mapping_error (dma_addr_t dma_addr); | 57 | typedef int ia64_mv_dma_mapping_error (dma_addr_t dma_addr); |
57 | typedef int ia64_mv_dma_supported (struct device *, u64); | 58 | typedef int ia64_mv_dma_supported (struct device *, u64); |
58 | 59 | ||
60 | typedef dma_addr_t ia64_mv_dma_map_single_attrs (struct device *, void *, size_t, int, struct dma_attrs *); | ||
61 | typedef void ia64_mv_dma_unmap_single_attrs (struct device *, dma_addr_t, size_t, int, struct dma_attrs *); | ||
62 | typedef int ia64_mv_dma_map_sg_attrs (struct device *, struct scatterlist *, int, int, struct dma_attrs *); | ||
63 | typedef void ia64_mv_dma_unmap_sg_attrs (struct device *, struct scatterlist *, int, int, struct dma_attrs *); | ||
64 | |||
59 | /* | 65 | /* |
60 | * WARNING: The legacy I/O space is _architected_. Platforms are | 66 | * WARNING: The legacy I/O space is _architected_. Platforms are |
61 | * expected to follow this architected model (see Section 10.7 in the | 67 | * expected to follow this architected model (see Section 10.7 in the |
@@ -136,10 +142,10 @@ extern void machvec_tlb_migrate_finish (struct mm_struct *); | |||
136 | # define platform_dma_init ia64_mv.dma_init | 142 | # define platform_dma_init ia64_mv.dma_init |
137 | # define platform_dma_alloc_coherent ia64_mv.dma_alloc_coherent | 143 | # define platform_dma_alloc_coherent ia64_mv.dma_alloc_coherent |
138 | # define platform_dma_free_coherent ia64_mv.dma_free_coherent | 144 | # define platform_dma_free_coherent ia64_mv.dma_free_coherent |
139 | # define platform_dma_map_single ia64_mv.dma_map_single | 145 | # define platform_dma_map_single_attrs ia64_mv.dma_map_single_attrs |
140 | # define platform_dma_unmap_single ia64_mv.dma_unmap_single | 146 | # define platform_dma_unmap_single_attrs ia64_mv.dma_unmap_single_attrs |
141 | # define platform_dma_map_sg ia64_mv.dma_map_sg | 147 | # define platform_dma_map_sg_attrs ia64_mv.dma_map_sg_attrs |
142 | # define platform_dma_unmap_sg ia64_mv.dma_unmap_sg | 148 | # define platform_dma_unmap_sg_attrs ia64_mv.dma_unmap_sg_attrs |
143 | # define platform_dma_sync_single_for_cpu ia64_mv.dma_sync_single_for_cpu | 149 | # define platform_dma_sync_single_for_cpu ia64_mv.dma_sync_single_for_cpu |
144 | # define platform_dma_sync_sg_for_cpu ia64_mv.dma_sync_sg_for_cpu | 150 | # define platform_dma_sync_sg_for_cpu ia64_mv.dma_sync_sg_for_cpu |
145 | # define platform_dma_sync_single_for_device ia64_mv.dma_sync_single_for_device | 151 | # define platform_dma_sync_single_for_device ia64_mv.dma_sync_single_for_device |
@@ -190,10 +196,10 @@ struct ia64_machine_vector { | |||
190 | ia64_mv_dma_init *dma_init; | 196 | ia64_mv_dma_init *dma_init; |
191 | ia64_mv_dma_alloc_coherent *dma_alloc_coherent; | 197 | ia64_mv_dma_alloc_coherent *dma_alloc_coherent; |
192 | ia64_mv_dma_free_coherent *dma_free_coherent; | 198 | ia64_mv_dma_free_coherent *dma_free_coherent; |
193 | ia64_mv_dma_map_single *dma_map_single; | 199 | ia64_mv_dma_map_single_attrs *dma_map_single_attrs; |
194 | ia64_mv_dma_unmap_single *dma_unmap_single; | 200 | ia64_mv_dma_unmap_single_attrs *dma_unmap_single_attrs; |
195 | ia64_mv_dma_map_sg *dma_map_sg; | 201 | ia64_mv_dma_map_sg_attrs *dma_map_sg_attrs; |
196 | ia64_mv_dma_unmap_sg *dma_unmap_sg; | 202 | ia64_mv_dma_unmap_sg_attrs *dma_unmap_sg_attrs; |
197 | ia64_mv_dma_sync_single_for_cpu *dma_sync_single_for_cpu; | 203 | ia64_mv_dma_sync_single_for_cpu *dma_sync_single_for_cpu; |
198 | ia64_mv_dma_sync_sg_for_cpu *dma_sync_sg_for_cpu; | 204 | ia64_mv_dma_sync_sg_for_cpu *dma_sync_sg_for_cpu; |
199 | ia64_mv_dma_sync_single_for_device *dma_sync_single_for_device; | 205 | ia64_mv_dma_sync_single_for_device *dma_sync_single_for_device; |
@@ -240,10 +246,10 @@ struct ia64_machine_vector { | |||
240 | platform_dma_init, \ | 246 | platform_dma_init, \ |
241 | platform_dma_alloc_coherent, \ | 247 | platform_dma_alloc_coherent, \ |
242 | platform_dma_free_coherent, \ | 248 | platform_dma_free_coherent, \ |
243 | platform_dma_map_single, \ | 249 | platform_dma_map_single_attrs, \ |
244 | platform_dma_unmap_single, \ | 250 | platform_dma_unmap_single_attrs, \ |
245 | platform_dma_map_sg, \ | 251 | platform_dma_map_sg_attrs, \ |
246 | platform_dma_unmap_sg, \ | 252 | platform_dma_unmap_sg_attrs, \ |
247 | platform_dma_sync_single_for_cpu, \ | 253 | platform_dma_sync_single_for_cpu, \ |
248 | platform_dma_sync_sg_for_cpu, \ | 254 | platform_dma_sync_sg_for_cpu, \ |
249 | platform_dma_sync_single_for_device, \ | 255 | platform_dma_sync_single_for_device, \ |
@@ -292,9 +298,13 @@ extern ia64_mv_dma_init swiotlb_init; | |||
292 | extern ia64_mv_dma_alloc_coherent swiotlb_alloc_coherent; | 298 | extern ia64_mv_dma_alloc_coherent swiotlb_alloc_coherent; |
293 | extern ia64_mv_dma_free_coherent swiotlb_free_coherent; | 299 | extern ia64_mv_dma_free_coherent swiotlb_free_coherent; |
294 | extern ia64_mv_dma_map_single swiotlb_map_single; | 300 | extern ia64_mv_dma_map_single swiotlb_map_single; |
301 | extern ia64_mv_dma_map_single_attrs swiotlb_map_single_attrs; | ||
295 | extern ia64_mv_dma_unmap_single swiotlb_unmap_single; | 302 | extern ia64_mv_dma_unmap_single swiotlb_unmap_single; |
303 | extern ia64_mv_dma_unmap_single_attrs swiotlb_unmap_single_attrs; | ||
296 | extern ia64_mv_dma_map_sg swiotlb_map_sg; | 304 | extern ia64_mv_dma_map_sg swiotlb_map_sg; |
305 | extern ia64_mv_dma_map_sg_attrs swiotlb_map_sg_attrs; | ||
297 | extern ia64_mv_dma_unmap_sg swiotlb_unmap_sg; | 306 | extern ia64_mv_dma_unmap_sg swiotlb_unmap_sg; |
307 | extern ia64_mv_dma_unmap_sg_attrs swiotlb_unmap_sg_attrs; | ||
298 | extern ia64_mv_dma_sync_single_for_cpu swiotlb_sync_single_for_cpu; | 308 | extern ia64_mv_dma_sync_single_for_cpu swiotlb_sync_single_for_cpu; |
299 | extern ia64_mv_dma_sync_sg_for_cpu swiotlb_sync_sg_for_cpu; | 309 | extern ia64_mv_dma_sync_sg_for_cpu swiotlb_sync_sg_for_cpu; |
300 | extern ia64_mv_dma_sync_single_for_device swiotlb_sync_single_for_device; | 310 | extern ia64_mv_dma_sync_single_for_device swiotlb_sync_single_for_device; |
@@ -340,17 +350,17 @@ extern ia64_mv_dma_supported swiotlb_dma_supported; | |||
340 | #ifndef platform_dma_free_coherent | 350 | #ifndef platform_dma_free_coherent |
341 | # define platform_dma_free_coherent swiotlb_free_coherent | 351 | # define platform_dma_free_coherent swiotlb_free_coherent |
342 | #endif | 352 | #endif |
343 | #ifndef platform_dma_map_single | 353 | #ifndef platform_dma_map_single_attrs |
344 | # define platform_dma_map_single swiotlb_map_single | 354 | # define platform_dma_map_single_attrs swiotlb_map_single_attrs |
345 | #endif | 355 | #endif |
346 | #ifndef platform_dma_unmap_single | 356 | #ifndef platform_dma_unmap_single_attrs |
347 | # define platform_dma_unmap_single swiotlb_unmap_single | 357 | # define platform_dma_unmap_single_attrs swiotlb_unmap_single_attrs |
348 | #endif | 358 | #endif |
349 | #ifndef platform_dma_map_sg | 359 | #ifndef platform_dma_map_sg_attrs |
350 | # define platform_dma_map_sg swiotlb_map_sg | 360 | # define platform_dma_map_sg_attrs swiotlb_map_sg_attrs |
351 | #endif | 361 | #endif |
352 | #ifndef platform_dma_unmap_sg | 362 | #ifndef platform_dma_unmap_sg_attrs |
353 | # define platform_dma_unmap_sg swiotlb_unmap_sg | 363 | # define platform_dma_unmap_sg_attrs swiotlb_unmap_sg_attrs |
354 | #endif | 364 | #endif |
355 | #ifndef platform_dma_sync_single_for_cpu | 365 | #ifndef platform_dma_sync_single_for_cpu |
356 | # define platform_dma_sync_single_for_cpu swiotlb_sync_single_for_cpu | 366 | # define platform_dma_sync_single_for_cpu swiotlb_sync_single_for_cpu |
diff --git a/include/asm-ia64/machvec_hpzx1.h b/include/asm-ia64/machvec_hpzx1.h index e90daf9ce340..2f57f5144b9f 100644 --- a/include/asm-ia64/machvec_hpzx1.h +++ b/include/asm-ia64/machvec_hpzx1.h | |||
@@ -4,10 +4,10 @@ | |||
4 | extern ia64_mv_setup_t dig_setup; | 4 | extern ia64_mv_setup_t dig_setup; |
5 | extern ia64_mv_dma_alloc_coherent sba_alloc_coherent; | 5 | extern ia64_mv_dma_alloc_coherent sba_alloc_coherent; |
6 | extern ia64_mv_dma_free_coherent sba_free_coherent; | 6 | extern ia64_mv_dma_free_coherent sba_free_coherent; |
7 | extern ia64_mv_dma_map_single sba_map_single; | 7 | extern ia64_mv_dma_map_single_attrs sba_map_single_attrs; |
8 | extern ia64_mv_dma_unmap_single sba_unmap_single; | 8 | extern ia64_mv_dma_unmap_single_attrs sba_unmap_single_attrs; |
9 | extern ia64_mv_dma_map_sg sba_map_sg; | 9 | extern ia64_mv_dma_map_sg_attrs sba_map_sg_attrs; |
10 | extern ia64_mv_dma_unmap_sg sba_unmap_sg; | 10 | extern ia64_mv_dma_unmap_sg_attrs sba_unmap_sg_attrs; |
11 | extern ia64_mv_dma_supported sba_dma_supported; | 11 | extern ia64_mv_dma_supported sba_dma_supported; |
12 | extern ia64_mv_dma_mapping_error sba_dma_mapping_error; | 12 | extern ia64_mv_dma_mapping_error sba_dma_mapping_error; |
13 | 13 | ||
@@ -23,10 +23,10 @@ extern ia64_mv_dma_mapping_error sba_dma_mapping_error; | |||
23 | #define platform_dma_init machvec_noop | 23 | #define platform_dma_init machvec_noop |
24 | #define platform_dma_alloc_coherent sba_alloc_coherent | 24 | #define platform_dma_alloc_coherent sba_alloc_coherent |
25 | #define platform_dma_free_coherent sba_free_coherent | 25 | #define platform_dma_free_coherent sba_free_coherent |
26 | #define platform_dma_map_single sba_map_single | 26 | #define platform_dma_map_single_attrs sba_map_single_attrs |
27 | #define platform_dma_unmap_single sba_unmap_single | 27 | #define platform_dma_unmap_single_attrs sba_unmap_single_attrs |
28 | #define platform_dma_map_sg sba_map_sg | 28 | #define platform_dma_map_sg_attrs sba_map_sg_attrs |
29 | #define platform_dma_unmap_sg sba_unmap_sg | 29 | #define platform_dma_unmap_sg_attrs sba_unmap_sg_attrs |
30 | #define platform_dma_sync_single_for_cpu machvec_dma_sync_single | 30 | #define platform_dma_sync_single_for_cpu machvec_dma_sync_single |
31 | #define platform_dma_sync_sg_for_cpu machvec_dma_sync_sg | 31 | #define platform_dma_sync_sg_for_cpu machvec_dma_sync_sg |
32 | #define platform_dma_sync_single_for_device machvec_dma_sync_single | 32 | #define platform_dma_sync_single_for_device machvec_dma_sync_single |
diff --git a/include/asm-ia64/machvec_hpzx1_swiotlb.h b/include/asm-ia64/machvec_hpzx1_swiotlb.h index f00a34a148ff..a842cdda827b 100644 --- a/include/asm-ia64/machvec_hpzx1_swiotlb.h +++ b/include/asm-ia64/machvec_hpzx1_swiotlb.h | |||
@@ -4,10 +4,10 @@ | |||
4 | extern ia64_mv_setup_t dig_setup; | 4 | extern ia64_mv_setup_t dig_setup; |
5 | extern ia64_mv_dma_alloc_coherent hwsw_alloc_coherent; | 5 | extern ia64_mv_dma_alloc_coherent hwsw_alloc_coherent; |
6 | extern ia64_mv_dma_free_coherent hwsw_free_coherent; | 6 | extern ia64_mv_dma_free_coherent hwsw_free_coherent; |
7 | extern ia64_mv_dma_map_single hwsw_map_single; | 7 | extern ia64_mv_dma_map_single_attrs hwsw_map_single_attrs; |
8 | extern ia64_mv_dma_unmap_single hwsw_unmap_single; | 8 | extern ia64_mv_dma_unmap_single_attrs hwsw_unmap_single_attrs; |
9 | extern ia64_mv_dma_map_sg hwsw_map_sg; | 9 | extern ia64_mv_dma_map_sg_attrs hwsw_map_sg_attrs; |
10 | extern ia64_mv_dma_unmap_sg hwsw_unmap_sg; | 10 | extern ia64_mv_dma_unmap_sg_attrs hwsw_unmap_sg_attrs; |
11 | extern ia64_mv_dma_supported hwsw_dma_supported; | 11 | extern ia64_mv_dma_supported hwsw_dma_supported; |
12 | extern ia64_mv_dma_mapping_error hwsw_dma_mapping_error; | 12 | extern ia64_mv_dma_mapping_error hwsw_dma_mapping_error; |
13 | extern ia64_mv_dma_sync_single_for_cpu hwsw_sync_single_for_cpu; | 13 | extern ia64_mv_dma_sync_single_for_cpu hwsw_sync_single_for_cpu; |
@@ -28,10 +28,10 @@ extern ia64_mv_dma_sync_sg_for_device hwsw_sync_sg_for_device; | |||
28 | #define platform_dma_init machvec_noop | 28 | #define platform_dma_init machvec_noop |
29 | #define platform_dma_alloc_coherent hwsw_alloc_coherent | 29 | #define platform_dma_alloc_coherent hwsw_alloc_coherent |
30 | #define platform_dma_free_coherent hwsw_free_coherent | 30 | #define platform_dma_free_coherent hwsw_free_coherent |
31 | #define platform_dma_map_single hwsw_map_single | 31 | #define platform_dma_map_single_attrs hwsw_map_single_attrs |
32 | #define platform_dma_unmap_single hwsw_unmap_single | 32 | #define platform_dma_unmap_single_attrs hwsw_unmap_single_attrs |
33 | #define platform_dma_map_sg hwsw_map_sg | 33 | #define platform_dma_map_sg_attrs hwsw_map_sg_attrs |
34 | #define platform_dma_unmap_sg hwsw_unmap_sg | 34 | #define platform_dma_unmap_sg_attrs hwsw_unmap_sg_attrs |
35 | #define platform_dma_supported hwsw_dma_supported | 35 | #define platform_dma_supported hwsw_dma_supported |
36 | #define platform_dma_mapping_error hwsw_dma_mapping_error | 36 | #define platform_dma_mapping_error hwsw_dma_mapping_error |
37 | #define platform_dma_sync_single_for_cpu hwsw_sync_single_for_cpu | 37 | #define platform_dma_sync_single_for_cpu hwsw_sync_single_for_cpu |
diff --git a/include/asm-ia64/machvec_sn2.h b/include/asm-ia64/machvec_sn2.h index 61439a7f5b08..781308ea7b88 100644 --- a/include/asm-ia64/machvec_sn2.h +++ b/include/asm-ia64/machvec_sn2.h | |||
@@ -57,10 +57,10 @@ extern ia64_mv_readl_t __sn_readl_relaxed; | |||
57 | extern ia64_mv_readq_t __sn_readq_relaxed; | 57 | extern ia64_mv_readq_t __sn_readq_relaxed; |
58 | extern ia64_mv_dma_alloc_coherent sn_dma_alloc_coherent; | 58 | extern ia64_mv_dma_alloc_coherent sn_dma_alloc_coherent; |
59 | extern ia64_mv_dma_free_coherent sn_dma_free_coherent; | 59 | extern ia64_mv_dma_free_coherent sn_dma_free_coherent; |
60 | extern ia64_mv_dma_map_single sn_dma_map_single; | 60 | extern ia64_mv_dma_map_single_attrs sn_dma_map_single_attrs; |
61 | extern ia64_mv_dma_unmap_single sn_dma_unmap_single; | 61 | extern ia64_mv_dma_unmap_single_attrs sn_dma_unmap_single_attrs; |
62 | extern ia64_mv_dma_map_sg sn_dma_map_sg; | 62 | extern ia64_mv_dma_map_sg_attrs sn_dma_map_sg_attrs; |
63 | extern ia64_mv_dma_unmap_sg sn_dma_unmap_sg; | 63 | extern ia64_mv_dma_unmap_sg_attrs sn_dma_unmap_sg_attrs; |
64 | extern ia64_mv_dma_sync_single_for_cpu sn_dma_sync_single_for_cpu; | 64 | extern ia64_mv_dma_sync_single_for_cpu sn_dma_sync_single_for_cpu; |
65 | extern ia64_mv_dma_sync_sg_for_cpu sn_dma_sync_sg_for_cpu; | 65 | extern ia64_mv_dma_sync_sg_for_cpu sn_dma_sync_sg_for_cpu; |
66 | extern ia64_mv_dma_sync_single_for_device sn_dma_sync_single_for_device; | 66 | extern ia64_mv_dma_sync_single_for_device sn_dma_sync_single_for_device; |
@@ -113,10 +113,10 @@ extern ia64_mv_pci_fixup_bus_t sn_pci_fixup_bus; | |||
113 | #define platform_dma_init machvec_noop | 113 | #define platform_dma_init machvec_noop |
114 | #define platform_dma_alloc_coherent sn_dma_alloc_coherent | 114 | #define platform_dma_alloc_coherent sn_dma_alloc_coherent |
115 | #define platform_dma_free_coherent sn_dma_free_coherent | 115 | #define platform_dma_free_coherent sn_dma_free_coherent |
116 | #define platform_dma_map_single sn_dma_map_single | 116 | #define platform_dma_map_single_attrs sn_dma_map_single_attrs |
117 | #define platform_dma_unmap_single sn_dma_unmap_single | 117 | #define platform_dma_unmap_single_attrs sn_dma_unmap_single_attrs |
118 | #define platform_dma_map_sg sn_dma_map_sg | 118 | #define platform_dma_map_sg_attrs sn_dma_map_sg_attrs |
119 | #define platform_dma_unmap_sg sn_dma_unmap_sg | 119 | #define platform_dma_unmap_sg_attrs sn_dma_unmap_sg_attrs |
120 | #define platform_dma_sync_single_for_cpu sn_dma_sync_single_for_cpu | 120 | #define platform_dma_sync_single_for_cpu sn_dma_sync_single_for_cpu |
121 | #define platform_dma_sync_sg_for_cpu sn_dma_sync_sg_for_cpu | 121 | #define platform_dma_sync_sg_for_cpu sn_dma_sync_sg_for_cpu |
122 | #define platform_dma_sync_single_for_device sn_dma_sync_single_for_device | 122 | #define platform_dma_sync_single_for_device sn_dma_sync_single_for_device |
diff --git a/include/asm-ia64/unaligned.h b/include/asm-ia64/unaligned.h index bb8559888103..7bddc7f58584 100644 --- a/include/asm-ia64/unaligned.h +++ b/include/asm-ia64/unaligned.h | |||
@@ -1,6 +1,11 @@ | |||
1 | #ifndef _ASM_IA64_UNALIGNED_H | 1 | #ifndef _ASM_IA64_UNALIGNED_H |
2 | #define _ASM_IA64_UNALIGNED_H | 2 | #define _ASM_IA64_UNALIGNED_H |
3 | 3 | ||
4 | #include <asm-generic/unaligned.h> | 4 | #include <linux/unaligned/le_struct.h> |
5 | #include <linux/unaligned/be_byteshift.h> | ||
6 | #include <linux/unaligned/generic.h> | ||
7 | |||
8 | #define get_unaligned __get_unaligned_le | ||
9 | #define put_unaligned __put_unaligned_le | ||
5 | 10 | ||
6 | #endif /* _ASM_IA64_UNALIGNED_H */ | 11 | #endif /* _ASM_IA64_UNALIGNED_H */ |
diff --git a/include/asm-m32r/unaligned.h b/include/asm-m32r/unaligned.h index fccc180c3913..377eb20d1ec6 100644 --- a/include/asm-m32r/unaligned.h +++ b/include/asm-m32r/unaligned.h | |||
@@ -1,19 +1,18 @@ | |||
1 | #ifndef _ASM_M32R_UNALIGNED_H | 1 | #ifndef _ASM_M32R_UNALIGNED_H |
2 | #define _ASM_M32R_UNALIGNED_H | 2 | #define _ASM_M32R_UNALIGNED_H |
3 | 3 | ||
4 | /* | 4 | #if defined(__LITTLE_ENDIAN__) |
5 | * For the benefit of those who are trying to port Linux to another | 5 | # include <linux/unaligned/le_memmove.h> |
6 | * architecture, here are some C-language equivalents. | 6 | # include <linux/unaligned/be_byteshift.h> |
7 | */ | 7 | # include <linux/unaligned/generic.h> |
8 | 8 | # define get_unaligned __get_unaligned_le | |
9 | #include <asm/string.h> | 9 | # define put_unaligned __put_unaligned_le |
10 | 10 | #else | |
11 | #define get_unaligned(ptr) \ | 11 | # include <linux/unaligned/be_memmove.h> |
12 | ({ __typeof__(*(ptr)) __tmp; memmove(&__tmp, (ptr), sizeof(*(ptr))); __tmp; }) | 12 | # include <linux/unaligned/le_byteshift.h> |
13 | 13 | # include <linux/unaligned/generic.h> | |
14 | #define put_unaligned(val, ptr) \ | 14 | # define get_unaligned __get_unaligned_be |
15 | ({ __typeof__(*(ptr)) __tmp = (val); \ | 15 | # define put_unaligned __put_unaligned_be |
16 | memmove((ptr), &__tmp, sizeof(*(ptr))); \ | 16 | #endif |
17 | (void)0; }) | ||
18 | 17 | ||
19 | #endif /* _ASM_M32R_UNALIGNED_H */ | 18 | #endif /* _ASM_M32R_UNALIGNED_H */ |
diff --git a/include/asm-m68k/unaligned.h b/include/asm-m68k/unaligned.h index 804cb3f888fe..77698f2dc33c 100644 --- a/include/asm-m68k/unaligned.h +++ b/include/asm-m68k/unaligned.h | |||
@@ -1,16 +1,13 @@ | |||
1 | #ifndef __M68K_UNALIGNED_H | 1 | #ifndef _ASM_M68K_UNALIGNED_H |
2 | #define __M68K_UNALIGNED_H | 2 | #define _ASM_M68K_UNALIGNED_H |
3 | 3 | ||
4 | /* | 4 | /* |
5 | * The m68k can do unaligned accesses itself. | 5 | * The m68k can do unaligned accesses itself. |
6 | * | ||
7 | * The strange macros are there to make sure these can't | ||
8 | * be misused in a way that makes them not work on other | ||
9 | * architectures where unaligned accesses aren't as simple. | ||
10 | */ | 6 | */ |
7 | #include <linux/unaligned/access_ok.h> | ||
8 | #include <linux/unaligned/generic.h> | ||
11 | 9 | ||
12 | #define get_unaligned(ptr) (*(ptr)) | 10 | #define get_unaligned __get_unaligned_be |
11 | #define put_unaligned __put_unaligned_be | ||
13 | 12 | ||
14 | #define put_unaligned(val, ptr) ((void)( *(ptr) = (val) )) | 13 | #endif /* _ASM_M68K_UNALIGNED_H */ |
15 | |||
16 | #endif | ||
diff --git a/include/asm-m68knommu/unaligned.h b/include/asm-m68knommu/unaligned.h index 869e9dd24f54..eb1ea4cb9a59 100644 --- a/include/asm-m68knommu/unaligned.h +++ b/include/asm-m68knommu/unaligned.h | |||
@@ -1,23 +1,25 @@ | |||
1 | #ifndef __M68K_UNALIGNED_H | 1 | #ifndef _ASM_M68KNOMMU_UNALIGNED_H |
2 | #define __M68K_UNALIGNED_H | 2 | #define _ASM_M68KNOMMU_UNALIGNED_H |
3 | 3 | ||
4 | 4 | ||
5 | #ifdef CONFIG_COLDFIRE | 5 | #ifdef CONFIG_COLDFIRE |
6 | #include <linux/unaligned/be_struct.h> | ||
7 | #include <linux/unaligned/le_byteshift.h> | ||
8 | #include <linux/unaligned/generic.h> | ||
6 | 9 | ||
7 | #include <asm-generic/unaligned.h> | 10 | #define get_unaligned __get_unaligned_be |
11 | #define put_unaligned __put_unaligned_be | ||
8 | 12 | ||
9 | #else | 13 | #else |
10 | /* | 14 | /* |
11 | * The m68k can do unaligned accesses itself. | 15 | * The m68k can do unaligned accesses itself. |
12 | * | ||
13 | * The strange macros are there to make sure these can't | ||
14 | * be misused in a way that makes them not work on other | ||
15 | * architectures where unaligned accesses aren't as simple. | ||
16 | */ | 16 | */ |
17 | #include <linux/unaligned/access_ok.h> | ||
18 | #include <linux/unaligned/generic.h> | ||
17 | 19 | ||
18 | #define get_unaligned(ptr) (*(ptr)) | 20 | #define get_unaligned __get_unaligned_be |
19 | #define put_unaligned(val, ptr) ((void)( *(ptr) = (val) )) | 21 | #define put_unaligned __put_unaligned_be |
20 | 22 | ||
21 | #endif | 23 | #endif |
22 | 24 | ||
23 | #endif | 25 | #endif /* _ASM_M68KNOMMU_UNALIGNED_H */ |
diff --git a/include/asm-mips/unaligned.h b/include/asm-mips/unaligned.h index 3249049e93aa..792404948571 100644 --- a/include/asm-mips/unaligned.h +++ b/include/asm-mips/unaligned.h | |||
@@ -5,25 +5,24 @@ | |||
5 | * | 5 | * |
6 | * Copyright (C) 2007 Ralf Baechle (ralf@linux-mips.org) | 6 | * Copyright (C) 2007 Ralf Baechle (ralf@linux-mips.org) |
7 | */ | 7 | */ |
8 | #ifndef __ASM_GENERIC_UNALIGNED_H | 8 | #ifndef _ASM_MIPS_UNALIGNED_H |
9 | #define __ASM_GENERIC_UNALIGNED_H | 9 | #define _ASM_MIPS_UNALIGNED_H |
10 | 10 | ||
11 | #include <linux/compiler.h> | 11 | #include <linux/compiler.h> |
12 | #if defined(__MIPSEB__) | ||
13 | # include <linux/unaligned/be_struct.h> | ||
14 | # include <linux/unaligned/le_byteshift.h> | ||
15 | # include <linux/unaligned/generic.h> | ||
16 | # define get_unaligned __get_unaligned_be | ||
17 | # define put_unaligned __put_unaligned_be | ||
18 | #elif defined(__MIPSEL__) | ||
19 | # include <linux/unaligned/le_struct.h> | ||
20 | # include <linux/unaligned/be_byteshift.h> | ||
21 | # include <linux/unaligned/generic.h> | ||
22 | # define get_unaligned __get_unaligned_le | ||
23 | # define put_unaligned __put_unaligned_le | ||
24 | #else | ||
25 | # error "MIPS, but neither __MIPSEB__, nor __MIPSEL__???" | ||
26 | #endif | ||
12 | 27 | ||
13 | #define get_unaligned(ptr) \ | 28 | #endif /* _ASM_MIPS_UNALIGNED_H */ |
14 | ({ \ | ||
15 | struct __packed { \ | ||
16 | typeof(*(ptr)) __v; \ | ||
17 | } *__p = (void *) (ptr); \ | ||
18 | __p->__v; \ | ||
19 | }) | ||
20 | |||
21 | #define put_unaligned(val, ptr) \ | ||
22 | do { \ | ||
23 | struct __packed { \ | ||
24 | typeof(*(ptr)) __v; \ | ||
25 | } *__p = (void *) (ptr); \ | ||
26 | __p->__v = (val); \ | ||
27 | } while(0) | ||
28 | |||
29 | #endif /* __ASM_GENERIC_UNALIGNED_H */ | ||
diff --git a/include/asm-mn10300/unaligned.h b/include/asm-mn10300/unaligned.h index cad3afbd035f..0df671318ae4 100644 --- a/include/asm-mn10300/unaligned.h +++ b/include/asm-mn10300/unaligned.h | |||
@@ -8,129 +8,13 @@ | |||
8 | * as published by the Free Software Foundation; either version | 8 | * as published by the Free Software Foundation; either version |
9 | * 2 of the Licence, or (at your option) any later version. | 9 | * 2 of the Licence, or (at your option) any later version. |
10 | */ | 10 | */ |
11 | #ifndef _ASM_UNALIGNED_H | 11 | #ifndef _ASM_MN10300_UNALIGNED_H |
12 | #define _ASM_UNALIGNED_H | 12 | #define _ASM_MN10300_UNALIGNED_H |
13 | 13 | ||
14 | #include <asm/types.h> | 14 | #include <linux/unaligned/access_ok.h> |
15 | #include <linux/unaligned/generic.h> | ||
15 | 16 | ||
16 | #if 0 | 17 | #define get_unaligned __get_unaligned_le |
17 | extern int __bug_unaligned_x(void *ptr); | 18 | #define put_unaligned __put_unaligned_le |
18 | 19 | ||
19 | /* | 20 | #endif /* _ASM_MN10300_UNALIGNED_H */ |
20 | * What is the most efficient way of loading/storing an unaligned value? | ||
21 | * | ||
22 | * That is the subject of this file. Efficiency here is defined as | ||
23 | * minimum code size with minimum register usage for the common cases. | ||
24 | * It is currently not believed that long longs are common, so we | ||
25 | * trade efficiency for the chars, shorts and longs against the long | ||
26 | * longs. | ||
27 | * | ||
28 | * Current stats with gcc 2.7.2.2 for these functions: | ||
29 | * | ||
30 | * ptrsize get: code regs put: code regs | ||
31 | * 1 1 1 1 2 | ||
32 | * 2 3 2 3 2 | ||
33 | * 4 7 3 7 3 | ||
34 | * 8 20 6 16 6 | ||
35 | * | ||
36 | * gcc 2.95.1 seems to code differently: | ||
37 | * | ||
38 | * ptrsize get: code regs put: code regs | ||
39 | * 1 1 1 1 2 | ||
40 | * 2 3 2 3 2 | ||
41 | * 4 7 4 7 4 | ||
42 | * 8 19 8 15 6 | ||
43 | * | ||
44 | * which may or may not be more efficient (depending upon whether | ||
45 | * you can afford the extra registers). Hopefully the gcc 2.95 | ||
46 | * is inteligent enough to decide if it is better to use the | ||
47 | * extra register, but evidence so far seems to suggest otherwise. | ||
48 | * | ||
49 | * Unfortunately, gcc is not able to optimise the high word | ||
50 | * out of long long >> 32, or the low word from long long << 32 | ||
51 | */ | ||
52 | |||
53 | #define __get_unaligned_2(__p) \ | ||
54 | (__p[0] | __p[1] << 8) | ||
55 | |||
56 | #define __get_unaligned_4(__p) \ | ||
57 | (__p[0] | __p[1] << 8 | __p[2] << 16 | __p[3] << 24) | ||
58 | |||
59 | #define get_unaligned(ptr) \ | ||
60 | ({ \ | ||
61 | unsigned int __v1, __v2; \ | ||
62 | __typeof__(*(ptr)) __v; \ | ||
63 | __u8 *__p = (__u8 *)(ptr); \ | ||
64 | \ | ||
65 | switch (sizeof(*(ptr))) { \ | ||
66 | case 1: __v = *(ptr); break; \ | ||
67 | case 2: __v = __get_unaligned_2(__p); break; \ | ||
68 | case 4: __v = __get_unaligned_4(__p); break; \ | ||
69 | case 8: \ | ||
70 | __v2 = __get_unaligned_4((__p+4)); \ | ||
71 | __v1 = __get_unaligned_4(__p); \ | ||
72 | __v = ((unsigned long long)__v2 << 32 | __v1); \ | ||
73 | break; \ | ||
74 | default: __v = __bug_unaligned_x(__p); break; \ | ||
75 | } \ | ||
76 | __v; \ | ||
77 | }) | ||
78 | |||
79 | |||
80 | static inline void __put_unaligned_2(__u32 __v, register __u8 *__p) | ||
81 | { | ||
82 | *__p++ = __v; | ||
83 | *__p++ = __v >> 8; | ||
84 | } | ||
85 | |||
86 | static inline void __put_unaligned_4(__u32 __v, register __u8 *__p) | ||
87 | { | ||
88 | __put_unaligned_2(__v >> 16, __p + 2); | ||
89 | __put_unaligned_2(__v, __p); | ||
90 | } | ||
91 | |||
92 | static inline void __put_unaligned_8(const unsigned long long __v, __u8 *__p) | ||
93 | { | ||
94 | /* | ||
95 | * tradeoff: 8 bytes of stack for all unaligned puts (2 | ||
96 | * instructions), or an extra register in the long long | ||
97 | * case - go for the extra register. | ||
98 | */ | ||
99 | __put_unaligned_4(__v >> 32, __p + 4); | ||
100 | __put_unaligned_4(__v, __p); | ||
101 | } | ||
102 | |||
103 | /* | ||
104 | * Try to store an unaligned value as efficiently as possible. | ||
105 | */ | ||
106 | #define put_unaligned(val, ptr) \ | ||
107 | ({ \ | ||
108 | switch (sizeof(*(ptr))) { \ | ||
109 | case 1: \ | ||
110 | *(ptr) = (val); \ | ||
111 | break; \ | ||
112 | case 2: \ | ||
113 | __put_unaligned_2((val), (__u8 *)(ptr)); \ | ||
114 | break; \ | ||
115 | case 4: \ | ||
116 | __put_unaligned_4((val), (__u8 *)(ptr)); \ | ||
117 | break; \ | ||
118 | case 8: \ | ||
119 | __put_unaligned_8((val), (__u8 *)(ptr)); \ | ||
120 | break; \ | ||
121 | default: \ | ||
122 | __bug_unaligned_x(ptr); \ | ||
123 | break; \ | ||
124 | } \ | ||
125 | (void) 0; \ | ||
126 | }) | ||
127 | |||
128 | |||
129 | #else | ||
130 | |||
131 | #define get_unaligned(ptr) (*(ptr)) | ||
132 | #define put_unaligned(val, ptr) ({ *(ptr) = (val); (void) 0; }) | ||
133 | |||
134 | #endif | ||
135 | |||
136 | #endif | ||
diff --git a/include/asm-parisc/unaligned.h b/include/asm-parisc/unaligned.h index 53c905838d93..dfc5d3321a54 100644 --- a/include/asm-parisc/unaligned.h +++ b/include/asm-parisc/unaligned.h | |||
@@ -1,7 +1,11 @@ | |||
1 | #ifndef _ASM_PARISC_UNALIGNED_H_ | 1 | #ifndef _ASM_PARISC_UNALIGNED_H |
2 | #define _ASM_PARISC_UNALIGNED_H_ | 2 | #define _ASM_PARISC_UNALIGNED_H |
3 | 3 | ||
4 | #include <asm-generic/unaligned.h> | 4 | #include <linux/unaligned/be_struct.h> |
5 | #include <linux/unaligned/le_byteshift.h> | ||
6 | #include <linux/unaligned/generic.h> | ||
7 | #define get_unaligned __get_unaligned_be | ||
8 | #define put_unaligned __put_unaligned_be | ||
5 | 9 | ||
6 | #ifdef __KERNEL__ | 10 | #ifdef __KERNEL__ |
7 | struct pt_regs; | 11 | struct pt_regs; |
@@ -9,4 +13,4 @@ void handle_unaligned(struct pt_regs *regs); | |||
9 | int check_unaligned(struct pt_regs *regs); | 13 | int check_unaligned(struct pt_regs *regs); |
10 | #endif | 14 | #endif |
11 | 15 | ||
12 | #endif /* _ASM_PARISC_UNALIGNED_H_ */ | 16 | #endif /* _ASM_PARISC_UNALIGNED_H */ |
diff --git a/include/asm-powerpc/irq.h b/include/asm-powerpc/irq.h index b5c03127a9b9..5089deb8fec3 100644 --- a/include/asm-powerpc/irq.h +++ b/include/asm-powerpc/irq.h | |||
@@ -619,8 +619,6 @@ struct pt_regs; | |||
619 | 619 | ||
620 | #define __ARCH_HAS_DO_SOFTIRQ | 620 | #define __ARCH_HAS_DO_SOFTIRQ |
621 | 621 | ||
622 | extern void __do_softirq(void); | ||
623 | |||
624 | #ifdef CONFIG_IRQSTACKS | 622 | #ifdef CONFIG_IRQSTACKS |
625 | /* | 623 | /* |
626 | * Per-cpu stacks for handling hard and soft interrupts. | 624 | * Per-cpu stacks for handling hard and soft interrupts. |
diff --git a/include/asm-powerpc/unaligned.h b/include/asm-powerpc/unaligned.h index 6c95dfa2652f..5f1b1e3c2137 100644 --- a/include/asm-powerpc/unaligned.h +++ b/include/asm-powerpc/unaligned.h | |||
@@ -5,15 +5,12 @@ | |||
5 | 5 | ||
6 | /* | 6 | /* |
7 | * The PowerPC can do unaligned accesses itself in big endian mode. | 7 | * The PowerPC can do unaligned accesses itself in big endian mode. |
8 | * | ||
9 | * The strange macros are there to make sure these can't | ||
10 | * be misused in a way that makes them not work on other | ||
11 | * architectures where unaligned accesses aren't as simple. | ||
12 | */ | 8 | */ |
9 | #include <linux/unaligned/access_ok.h> | ||
10 | #include <linux/unaligned/generic.h> | ||
13 | 11 | ||
14 | #define get_unaligned(ptr) (*(ptr)) | 12 | #define get_unaligned __get_unaligned_be |
15 | 13 | #define put_unaligned __put_unaligned_be | |
16 | #define put_unaligned(val, ptr) ((void)( *(ptr) = (val) )) | ||
17 | 14 | ||
18 | #endif /* __KERNEL__ */ | 15 | #endif /* __KERNEL__ */ |
19 | #endif /* _ASM_POWERPC_UNALIGNED_H */ | 16 | #endif /* _ASM_POWERPC_UNALIGNED_H */ |
diff --git a/include/asm-s390/unaligned.h b/include/asm-s390/unaligned.h index 8ee86dbedd1f..da9627afe5d8 100644 --- a/include/asm-s390/unaligned.h +++ b/include/asm-s390/unaligned.h | |||
@@ -1,24 +1,13 @@ | |||
1 | /* | 1 | #ifndef _ASM_S390_UNALIGNED_H |
2 | * include/asm-s390/unaligned.h | 2 | #define _ASM_S390_UNALIGNED_H |
3 | * | ||
4 | * S390 version | ||
5 | * | ||
6 | * Derived from "include/asm-i386/unaligned.h" | ||
7 | */ | ||
8 | |||
9 | #ifndef __S390_UNALIGNED_H | ||
10 | #define __S390_UNALIGNED_H | ||
11 | 3 | ||
12 | /* | 4 | /* |
13 | * The S390 can do unaligned accesses itself. | 5 | * The S390 can do unaligned accesses itself. |
14 | * | ||
15 | * The strange macros are there to make sure these can't | ||
16 | * be misused in a way that makes them not work on other | ||
17 | * architectures where unaligned accesses aren't as simple. | ||
18 | */ | 6 | */ |
7 | #include <linux/unaligned/access_ok.h> | ||
8 | #include <linux/unaligned/generic.h> | ||
19 | 9 | ||
20 | #define get_unaligned(ptr) (*(ptr)) | 10 | #define get_unaligned __get_unaligned_be |
21 | 11 | #define put_unaligned __put_unaligned_be | |
22 | #define put_unaligned(val, ptr) ((void)( *(ptr) = (val) )) | ||
23 | 12 | ||
24 | #endif | 13 | #endif /* _ASM_S390_UNALIGNED_H */ |
diff --git a/include/asm-sh/unaligned.h b/include/asm-sh/unaligned.h index 5250e3063b42..c1641a01d50f 100644 --- a/include/asm-sh/unaligned.h +++ b/include/asm-sh/unaligned.h | |||
@@ -1,7 +1,19 @@ | |||
1 | #ifndef __ASM_SH_UNALIGNED_H | 1 | #ifndef _ASM_SH_UNALIGNED_H |
2 | #define __ASM_SH_UNALIGNED_H | 2 | #define _ASM_SH_UNALIGNED_H |
3 | 3 | ||
4 | /* SH can't handle unaligned accesses. */ | 4 | /* SH can't handle unaligned accesses. */ |
5 | #include <asm-generic/unaligned.h> | 5 | #ifdef __LITTLE_ENDIAN__ |
6 | # include <linux/unaligned/le_struct.h> | ||
7 | # include <linux/unaligned/be_byteshift.h> | ||
8 | # include <linux/unaligned/generic.h> | ||
9 | # define get_unaligned __get_unaligned_le | ||
10 | # define put_unaligned __put_unaligned_le | ||
11 | #else | ||
12 | # include <linux/unaligned/be_struct.h> | ||
13 | # include <linux/unaligned/le_byteshift.h> | ||
14 | # include <linux/unaligned/generic.h> | ||
15 | # define get_unaligned __get_unaligned_be | ||
16 | # define put_unaligned __put_unaligned_be | ||
17 | #endif | ||
6 | 18 | ||
7 | #endif /* __ASM_SH_UNALIGNED_H */ | 19 | #endif /* _ASM_SH_UNALIGNED_H */ |
diff --git a/include/asm-sparc/unaligned.h b/include/asm-sparc/unaligned.h index b6f8eddd30af..11d2d5fb5902 100644 --- a/include/asm-sparc/unaligned.h +++ b/include/asm-sparc/unaligned.h | |||
@@ -1,6 +1,10 @@ | |||
1 | #ifndef _ASM_SPARC_UNALIGNED_H_ | 1 | #ifndef _ASM_SPARC_UNALIGNED_H |
2 | #define _ASM_SPARC_UNALIGNED_H_ | 2 | #define _ASM_SPARC_UNALIGNED_H |
3 | 3 | ||
4 | #include <asm-generic/unaligned.h> | 4 | #include <linux/unaligned/be_struct.h> |
5 | #include <linux/unaligned/le_byteshift.h> | ||
6 | #include <linux/unaligned/generic.h> | ||
7 | #define get_unaligned __get_unaligned_be | ||
8 | #define put_unaligned __put_unaligned_be | ||
5 | 9 | ||
6 | #endif /* _ASM_SPARC_UNALIGNED_H */ | 10 | #endif /* _ASM_SPARC_UNALIGNED_H */ |
diff --git a/include/asm-sparc64/unaligned.h b/include/asm-sparc64/unaligned.h index 1ed3ba537772..edcebb09441e 100644 --- a/include/asm-sparc64/unaligned.h +++ b/include/asm-sparc64/unaligned.h | |||
@@ -1,6 +1,10 @@ | |||
1 | #ifndef _ASM_SPARC64_UNALIGNED_H_ | 1 | #ifndef _ASM_SPARC64_UNALIGNED_H |
2 | #define _ASM_SPARC64_UNALIGNED_H_ | 2 | #define _ASM_SPARC64_UNALIGNED_H |
3 | 3 | ||
4 | #include <asm-generic/unaligned.h> | 4 | #include <linux/unaligned/be_struct.h> |
5 | #include <linux/unaligned/le_byteshift.h> | ||
6 | #include <linux/unaligned/generic.h> | ||
7 | #define get_unaligned __get_unaligned_be | ||
8 | #define put_unaligned __put_unaligned_be | ||
5 | 9 | ||
6 | #endif /* _ASM_SPARC64_UNALIGNED_H */ | 10 | #endif /* _ASM_SPARC64_UNALIGNED_H */ |
diff --git a/include/asm-um/unaligned.h b/include/asm-um/unaligned.h index 1d2497c57274..a47196974e39 100644 --- a/include/asm-um/unaligned.h +++ b/include/asm-um/unaligned.h | |||
@@ -1,6 +1,6 @@ | |||
1 | #ifndef __UM_UNALIGNED_H | 1 | #ifndef _ASM_UM_UNALIGNED_H |
2 | #define __UM_UNALIGNED_H | 2 | #define _ASM_UM_UNALIGNED_H |
3 | 3 | ||
4 | #include "asm/arch/unaligned.h" | 4 | #include "asm/arch/unaligned.h" |
5 | 5 | ||
6 | #endif | 6 | #endif /* _ASM_UM_UNALIGNED_H */ |
diff --git a/include/asm-v850/unaligned.h b/include/asm-v850/unaligned.h index e30b18653a94..53122b28491e 100644 --- a/include/asm-v850/unaligned.h +++ b/include/asm-v850/unaligned.h | |||
@@ -1,6 +1,4 @@ | |||
1 | /* | 1 | /* |
2 | * include/asm-v850/unaligned.h -- Unaligned memory access | ||
3 | * | ||
4 | * Copyright (C) 2001 NEC Corporation | 2 | * Copyright (C) 2001 NEC Corporation |
5 | * Copyright (C) 2001 Miles Bader <miles@gnu.org> | 3 | * Copyright (C) 2001 Miles Bader <miles@gnu.org> |
6 | * | 4 | * |
@@ -8,123 +6,17 @@ | |||
8 | * Public License. See the file COPYING in the main directory of this | 6 | * Public License. See the file COPYING in the main directory of this |
9 | * archive for more details. | 7 | * archive for more details. |
10 | * | 8 | * |
11 | * This file is a copy of the arm version, include/asm-arm/unaligned.h | ||
12 | * | ||
13 | * Note that some v850 chips support unaligned access, but it seems too | 9 | * Note that some v850 chips support unaligned access, but it seems too |
14 | * annoying to use. | 10 | * annoying to use. |
15 | */ | 11 | */ |
12 | #ifndef _ASM_V850_UNALIGNED_H | ||
13 | #define _ASM_V850_UNALIGNED_H | ||
16 | 14 | ||
17 | #ifndef __V850_UNALIGNED_H__ | 15 | #include <linux/unaligned/be_byteshift.h> |
18 | #define __V850_UNALIGNED_H__ | 16 | #include <linux/unaligned/le_byteshift.h> |
19 | 17 | #include <linux/unaligned/generic.h> | |
20 | #include <asm/types.h> | ||
21 | |||
22 | extern int __bug_unaligned_x(void *ptr); | ||
23 | |||
24 | /* | ||
25 | * What is the most efficient way of loading/storing an unaligned value? | ||
26 | * | ||
27 | * That is the subject of this file. Efficiency here is defined as | ||
28 | * minimum code size with minimum register usage for the common cases. | ||
29 | * It is currently not believed that long longs are common, so we | ||
30 | * trade efficiency for the chars, shorts and longs against the long | ||
31 | * longs. | ||
32 | * | ||
33 | * Current stats with gcc 2.7.2.2 for these functions: | ||
34 | * | ||
35 | * ptrsize get: code regs put: code regs | ||
36 | * 1 1 1 1 2 | ||
37 | * 2 3 2 3 2 | ||
38 | * 4 7 3 7 3 | ||
39 | * 8 20 6 16 6 | ||
40 | * | ||
41 | * gcc 2.95.1 seems to code differently: | ||
42 | * | ||
43 | * ptrsize get: code regs put: code regs | ||
44 | * 1 1 1 1 2 | ||
45 | * 2 3 2 3 2 | ||
46 | * 4 7 4 7 4 | ||
47 | * 8 19 8 15 6 | ||
48 | * | ||
49 | * which may or may not be more efficient (depending upon whether | ||
50 | * you can afford the extra registers). Hopefully the gcc 2.95 | ||
51 | * is inteligent enough to decide if it is better to use the | ||
52 | * extra register, but evidence so far seems to suggest otherwise. | ||
53 | * | ||
54 | * Unfortunately, gcc is not able to optimise the high word | ||
55 | * out of long long >> 32, or the low word from long long << 32 | ||
56 | */ | ||
57 | |||
58 | #define __get_unaligned_2(__p) \ | ||
59 | (__p[0] | __p[1] << 8) | ||
60 | |||
61 | #define __get_unaligned_4(__p) \ | ||
62 | (__p[0] | __p[1] << 8 | __p[2] << 16 | __p[3] << 24) | ||
63 | |||
64 | #define get_unaligned(ptr) \ | ||
65 | ({ \ | ||
66 | __typeof__(*(ptr)) __v; \ | ||
67 | __u8 *__p = (__u8 *)(ptr); \ | ||
68 | switch (sizeof(*(ptr))) { \ | ||
69 | case 1: __v = *(ptr); break; \ | ||
70 | case 2: __v = __get_unaligned_2(__p); break; \ | ||
71 | case 4: __v = __get_unaligned_4(__p); break; \ | ||
72 | case 8: { \ | ||
73 | unsigned int __v1, __v2; \ | ||
74 | __v2 = __get_unaligned_4((__p+4)); \ | ||
75 | __v1 = __get_unaligned_4(__p); \ | ||
76 | __v = ((unsigned long long)__v2 << 32 | __v1); \ | ||
77 | } \ | ||
78 | break; \ | ||
79 | default: __v = __bug_unaligned_x(__p); break; \ | ||
80 | } \ | ||
81 | __v; \ | ||
82 | }) | ||
83 | |||
84 | |||
85 | static inline void __put_unaligned_2(__u32 __v, register __u8 *__p) | ||
86 | { | ||
87 | *__p++ = __v; | ||
88 | *__p++ = __v >> 8; | ||
89 | } | ||
90 | |||
91 | static inline void __put_unaligned_4(__u32 __v, register __u8 *__p) | ||
92 | { | ||
93 | __put_unaligned_2(__v >> 16, __p + 2); | ||
94 | __put_unaligned_2(__v, __p); | ||
95 | } | ||
96 | |||
97 | static inline void __put_unaligned_8(const unsigned long long __v, register __u8 *__p) | ||
98 | { | ||
99 | /* | ||
100 | * tradeoff: 8 bytes of stack for all unaligned puts (2 | ||
101 | * instructions), or an extra register in the long long | ||
102 | * case - go for the extra register. | ||
103 | */ | ||
104 | __put_unaligned_4(__v >> 32, __p+4); | ||
105 | __put_unaligned_4(__v, __p); | ||
106 | } | ||
107 | |||
108 | /* | ||
109 | * Try to store an unaligned value as efficiently as possible. | ||
110 | */ | ||
111 | #define put_unaligned(val,ptr) \ | ||
112 | ({ \ | ||
113 | switch (sizeof(*(ptr))) { \ | ||
114 | case 1: \ | ||
115 | *(ptr) = (val); \ | ||
116 | break; \ | ||
117 | case 2: __put_unaligned_2((val),(__u8 *)(ptr)); \ | ||
118 | break; \ | ||
119 | case 4: __put_unaligned_4((val),(__u8 *)(ptr)); \ | ||
120 | break; \ | ||
121 | case 8: __put_unaligned_8((val),(__u8 *)(ptr)); \ | ||
122 | break; \ | ||
123 | default: __bug_unaligned_x(ptr); \ | ||
124 | break; \ | ||
125 | } \ | ||
126 | (void) 0; \ | ||
127 | }) | ||
128 | 18 | ||
19 | #define get_unaligned __get_unaligned_le | ||
20 | #define put_unaligned __put_unaligned_le | ||
129 | 21 | ||
130 | #endif /* __V850_UNALIGNED_H__ */ | 22 | #endif /* _ASM_V850_UNALIGNED_H */ |
diff --git a/include/asm-x86/olpc.h b/include/asm-x86/olpc.h new file mode 100644 index 000000000000..97d47133486f --- /dev/null +++ b/include/asm-x86/olpc.h | |||
@@ -0,0 +1,132 @@ | |||
1 | /* OLPC machine specific definitions */ | ||
2 | |||
3 | #ifndef ASM_OLPC_H_ | ||
4 | #define ASM_OLPC_H_ | ||
5 | |||
6 | #include <asm/geode.h> | ||
7 | |||
8 | struct olpc_platform_t { | ||
9 | int flags; | ||
10 | uint32_t boardrev; | ||
11 | int ecver; | ||
12 | }; | ||
13 | |||
14 | #define OLPC_F_PRESENT 0x01 | ||
15 | #define OLPC_F_DCON 0x02 | ||
16 | #define OLPC_F_VSA 0x04 | ||
17 | |||
18 | #ifdef CONFIG_OLPC | ||
19 | |||
20 | extern struct olpc_platform_t olpc_platform_info; | ||
21 | |||
22 | /* | ||
23 | * OLPC board IDs contain the major build number within the mask 0x0ff0, | ||
24 | * and the minor build number withing 0x000f. Pre-builds have a minor | ||
25 | * number less than 8, and normal builds start at 8. For example, 0x0B10 | ||
26 | * is a PreB1, and 0x0C18 is a C1. | ||
27 | */ | ||
28 | |||
29 | static inline uint32_t olpc_board(uint8_t id) | ||
30 | { | ||
31 | return (id << 4) | 0x8; | ||
32 | } | ||
33 | |||
34 | static inline uint32_t olpc_board_pre(uint8_t id) | ||
35 | { | ||
36 | return id << 4; | ||
37 | } | ||
38 | |||
39 | static inline int machine_is_olpc(void) | ||
40 | { | ||
41 | return (olpc_platform_info.flags & OLPC_F_PRESENT) ? 1 : 0; | ||
42 | } | ||
43 | |||
44 | /* | ||
45 | * The DCON is OLPC's Display Controller. It has a number of unique | ||
46 | * features that we might want to take advantage of.. | ||
47 | */ | ||
48 | static inline int olpc_has_dcon(void) | ||
49 | { | ||
50 | return (olpc_platform_info.flags & OLPC_F_DCON) ? 1 : 0; | ||
51 | } | ||
52 | |||
53 | /* | ||
54 | * The VSA is software from AMD that typical Geode bioses will include. | ||
55 | * It is used to emulate the PCI bus, VGA, etc. OLPC's Open Firmware does | ||
56 | * not include the VSA; instead, PCI is emulated by the kernel. | ||
57 | * | ||
58 | * The VSA is described further in arch/x86/pci/olpc.c. | ||
59 | */ | ||
60 | static inline int olpc_has_vsa(void) | ||
61 | { | ||
62 | return (olpc_platform_info.flags & OLPC_F_VSA) ? 1 : 0; | ||
63 | } | ||
64 | |||
65 | /* | ||
66 | * The "Mass Production" version of OLPC's XO is identified as being model | ||
67 | * C2. During the prototype phase, the following models (in chronological | ||
68 | * order) were created: A1, B1, B2, B3, B4, C1. The A1 through B2 models | ||
69 | * were based on Geode GX CPUs, and models after that were based upon | ||
70 | * Geode LX CPUs. There were also some hand-assembled models floating | ||
71 | * around, referred to as PreB1, PreB2, etc. | ||
72 | */ | ||
73 | static inline int olpc_board_at_least(uint32_t rev) | ||
74 | { | ||
75 | return olpc_platform_info.boardrev >= rev; | ||
76 | } | ||
77 | |||
78 | #else | ||
79 | |||
80 | static inline int machine_is_olpc(void) | ||
81 | { | ||
82 | return 0; | ||
83 | } | ||
84 | |||
85 | static inline int olpc_has_dcon(void) | ||
86 | { | ||
87 | return 0; | ||
88 | } | ||
89 | |||
90 | static inline int olpc_has_vsa(void) | ||
91 | { | ||
92 | return 0; | ||
93 | } | ||
94 | |||
95 | #endif | ||
96 | |||
97 | /* EC related functions */ | ||
98 | |||
99 | extern int olpc_ec_cmd(unsigned char cmd, unsigned char *inbuf, size_t inlen, | ||
100 | unsigned char *outbuf, size_t outlen); | ||
101 | |||
102 | extern int olpc_ec_mask_set(uint8_t bits); | ||
103 | extern int olpc_ec_mask_unset(uint8_t bits); | ||
104 | |||
105 | /* EC commands */ | ||
106 | |||
107 | #define EC_FIRMWARE_REV 0x08 | ||
108 | |||
109 | /* SCI source values */ | ||
110 | |||
111 | #define EC_SCI_SRC_EMPTY 0x00 | ||
112 | #define EC_SCI_SRC_GAME 0x01 | ||
113 | #define EC_SCI_SRC_BATTERY 0x02 | ||
114 | #define EC_SCI_SRC_BATSOC 0x04 | ||
115 | #define EC_SCI_SRC_BATERR 0x08 | ||
116 | #define EC_SCI_SRC_EBOOK 0x10 | ||
117 | #define EC_SCI_SRC_WLAN 0x20 | ||
118 | #define EC_SCI_SRC_ACPWR 0x40 | ||
119 | #define EC_SCI_SRC_ALL 0x7F | ||
120 | |||
121 | /* GPIO assignments */ | ||
122 | |||
123 | #define OLPC_GPIO_MIC_AC geode_gpio(1) | ||
124 | #define OLPC_GPIO_DCON_IRQ geode_gpio(7) | ||
125 | #define OLPC_GPIO_THRM_ALRM geode_gpio(10) | ||
126 | #define OLPC_GPIO_SMB_CLK geode_gpio(14) | ||
127 | #define OLPC_GPIO_SMB_DATA geode_gpio(15) | ||
128 | #define OLPC_GPIO_WORKAUX geode_gpio(24) | ||
129 | #define OLPC_GPIO_LID geode_gpio(26) | ||
130 | #define OLPC_GPIO_ECSCI geode_gpio(27) | ||
131 | |||
132 | #endif | ||
diff --git a/include/asm-x86/time.h b/include/asm-x86/time.h index 68779b048a3e..bce72d7a958c 100644 --- a/include/asm-x86/time.h +++ b/include/asm-x86/time.h | |||
@@ -1,7 +1,6 @@ | |||
1 | #ifndef _ASMX86_TIME_H | 1 | #ifndef _ASMX86_TIME_H |
2 | #define _ASMX86_TIME_H | 2 | #define _ASMX86_TIME_H |
3 | 3 | ||
4 | extern void (*late_time_init)(void); | ||
5 | extern void hpet_time_init(void); | 4 | extern void hpet_time_init(void); |
6 | 5 | ||
7 | #include <asm/mc146818rtc.h> | 6 | #include <asm/mc146818rtc.h> |
diff --git a/include/asm-x86/unaligned.h b/include/asm-x86/unaligned.h index d270ffe72759..a7bd416b4763 100644 --- a/include/asm-x86/unaligned.h +++ b/include/asm-x86/unaligned.h | |||
@@ -3,35 +3,12 @@ | |||
3 | 3 | ||
4 | /* | 4 | /* |
5 | * The x86 can do unaligned accesses itself. | 5 | * The x86 can do unaligned accesses itself. |
6 | * | ||
7 | * The strange macros are there to make sure these can't | ||
8 | * be misused in a way that makes them not work on other | ||
9 | * architectures where unaligned accesses aren't as simple. | ||
10 | */ | 6 | */ |
11 | 7 | ||
12 | /** | 8 | #include <linux/unaligned/access_ok.h> |
13 | * get_unaligned - get value from possibly mis-aligned location | 9 | #include <linux/unaligned/generic.h> |
14 | * @ptr: pointer to value | ||
15 | * | ||
16 | * This macro should be used for accessing values larger in size than | ||
17 | * single bytes at locations that are expected to be improperly aligned, | ||
18 | * e.g. retrieving a u16 value from a location not u16-aligned. | ||
19 | * | ||
20 | * Note that unaligned accesses can be very expensive on some architectures. | ||
21 | */ | ||
22 | #define get_unaligned(ptr) (*(ptr)) | ||
23 | 10 | ||
24 | /** | 11 | #define get_unaligned __get_unaligned_le |
25 | * put_unaligned - put value to a possibly mis-aligned location | 12 | #define put_unaligned __put_unaligned_le |
26 | * @val: value to place | ||
27 | * @ptr: pointer to location | ||
28 | * | ||
29 | * This macro should be used for placing values larger in size than | ||
30 | * single bytes at locations that are expected to be improperly aligned, | ||
31 | * e.g. writing a u16 value to a location not u16-aligned. | ||
32 | * | ||
33 | * Note that unaligned accesses can be very expensive on some architectures. | ||
34 | */ | ||
35 | #define put_unaligned(val, ptr) ((void)(*(ptr) = (val))) | ||
36 | 13 | ||
37 | #endif /* _ASM_X86_UNALIGNED_H */ | 14 | #endif /* _ASM_X86_UNALIGNED_H */ |
diff --git a/include/asm-xtensa/unaligned.h b/include/asm-xtensa/unaligned.h index 28220890d0a6..8f3424fc5d18 100644 --- a/include/asm-xtensa/unaligned.h +++ b/include/asm-xtensa/unaligned.h | |||
@@ -1,6 +1,4 @@ | |||
1 | /* | 1 | /* |
2 | * include/asm-xtensa/unaligned.h | ||
3 | * | ||
4 | * Xtensa doesn't handle unaligned accesses efficiently. | 2 | * Xtensa doesn't handle unaligned accesses efficiently. |
5 | * | 3 | * |
6 | * This file is subject to the terms and conditions of the GNU General Public | 4 | * This file is subject to the terms and conditions of the GNU General Public |
@@ -9,20 +7,23 @@ | |||
9 | * | 7 | * |
10 | * Copyright (C) 2001 - 2005 Tensilica Inc. | 8 | * Copyright (C) 2001 - 2005 Tensilica Inc. |
11 | */ | 9 | */ |
10 | #ifndef _ASM_XTENSA_UNALIGNED_H | ||
11 | #define _ASM_XTENSA_UNALIGNED_H | ||
12 | 12 | ||
13 | #ifndef _XTENSA_UNALIGNED_H | 13 | #ifdef __XTENSA_EL__ |
14 | #define _XTENSA_UNALIGNED_H | 14 | # include <linux/unaligned/le_memmove.h> |
15 | 15 | # include <linux/unaligned/be_byteshift.h> | |
16 | #include <linux/string.h> | 16 | # include <linux/unaligned/generic.h> |
17 | 17 | # define get_unaligned __get_unaligned_le | |
18 | /* Use memmove here, so gcc does not insert a __builtin_memcpy. */ | 18 | # define put_unaligned __put_unaligned_le |
19 | 19 | #elif defined(__XTENSA_EB__) | |
20 | #define get_unaligned(ptr) \ | 20 | # include <linux/unaligned/be_memmove.h> |
21 | ({ __typeof__(*(ptr)) __tmp; memmove(&__tmp, (ptr), sizeof(*(ptr))); __tmp; }) | 21 | # include <linux/unaligned/le_byteshift.h> |
22 | 22 | # include <linux/unaligned/generic.h> | |
23 | #define put_unaligned(val, ptr) \ | 23 | # define get_unaligned __get_unaligned_be |
24 | ({ __typeof__(*(ptr)) __tmp = (val); \ | 24 | # define put_unaligned __put_unaligned_be |
25 | memmove((ptr), &__tmp, sizeof(*(ptr))); \ | 25 | #else |
26 | (void)0; }) | 26 | # error processor byte order undefined! |
27 | #endif | ||
27 | 28 | ||
28 | #endif /* _XTENSA_UNALIGNED_H */ | 29 | #endif /* _ASM_XTENSA_UNALIGNED_H */ |
diff --git a/include/linux/Kbuild b/include/linux/Kbuild index bda6f04791d4..78fade0a1e35 100644 --- a/include/linux/Kbuild +++ b/include/linux/Kbuild | |||
@@ -20,6 +20,7 @@ header-y += affs_hardblocks.h | |||
20 | header-y += aio_abi.h | 20 | header-y += aio_abi.h |
21 | header-y += arcfb.h | 21 | header-y += arcfb.h |
22 | header-y += atmapi.h | 22 | header-y += atmapi.h |
23 | header-y += atmarp.h | ||
23 | header-y += atmbr2684.h | 24 | header-y += atmbr2684.h |
24 | header-y += atmclip.h | 25 | header-y += atmclip.h |
25 | header-y += atm_eni.h | 26 | header-y += atm_eni.h |
@@ -48,6 +49,7 @@ header-y += coff.h | |||
48 | header-y += comstats.h | 49 | header-y += comstats.h |
49 | header-y += const.h | 50 | header-y += const.h |
50 | header-y += cgroupstats.h | 51 | header-y += cgroupstats.h |
52 | header-y += cramfs_fs.h | ||
51 | header-y += cycx_cfm.h | 53 | header-y += cycx_cfm.h |
52 | header-y += dlmconstants.h | 54 | header-y += dlmconstants.h |
53 | header-y += dlm_device.h | 55 | header-y += dlm_device.h |
@@ -70,10 +72,12 @@ header-y += firewire-constants.h | |||
70 | header-y += fuse.h | 72 | header-y += fuse.h |
71 | header-y += genetlink.h | 73 | header-y += genetlink.h |
72 | header-y += gen_stats.h | 74 | header-y += gen_stats.h |
75 | header-y += gfs2_ondisk.h | ||
73 | header-y += gigaset_dev.h | 76 | header-y += gigaset_dev.h |
74 | header-y += hysdn_if.h | 77 | header-y += hysdn_if.h |
75 | header-y += i2o-dev.h | 78 | header-y += i2o-dev.h |
76 | header-y += i8k.h | 79 | header-y += i8k.h |
80 | header-y += if_addrlabel.h | ||
77 | header-y += if_arcnet.h | 81 | header-y += if_arcnet.h |
78 | header-y += if_bonding.h | 82 | header-y += if_bonding.h |
79 | header-y += if_cablemodem.h | 83 | header-y += if_cablemodem.h |
@@ -91,6 +95,7 @@ header-y += if_tunnel.h | |||
91 | header-y += in6.h | 95 | header-y += in6.h |
92 | header-y += in_route.h | 96 | header-y += in_route.h |
93 | header-y += ioctl.h | 97 | header-y += ioctl.h |
98 | header-y += ip6_tunnel.h | ||
94 | header-y += ipmi_msgdefs.h | 99 | header-y += ipmi_msgdefs.h |
95 | header-y += ipsec.h | 100 | header-y += ipsec.h |
96 | header-y += ipx.h | 101 | header-y += ipx.h |
@@ -117,7 +122,6 @@ header-y += nfs2.h | |||
117 | header-y += nfs4_mount.h | 122 | header-y += nfs4_mount.h |
118 | header-y += nfs_mount.h | 123 | header-y += nfs_mount.h |
119 | header-y += nl80211.h | 124 | header-y += nl80211.h |
120 | header-y += oom.h | ||
121 | header-y += param.h | 125 | header-y += param.h |
122 | header-y += pci_regs.h | 126 | header-y += pci_regs.h |
123 | header-y += pfkeyv2.h | 127 | header-y += pfkeyv2.h |
@@ -166,7 +170,6 @@ unifdef-y += adfs_fs.h | |||
166 | unifdef-y += agpgart.h | 170 | unifdef-y += agpgart.h |
167 | unifdef-y += apm_bios.h | 171 | unifdef-y += apm_bios.h |
168 | unifdef-y += atalk.h | 172 | unifdef-y += atalk.h |
169 | unifdef-y += atmarp.h | ||
170 | unifdef-y += atmdev.h | 173 | unifdef-y += atmdev.h |
171 | unifdef-y += atm.h | 174 | unifdef-y += atm.h |
172 | unifdef-y += atm_tcp.h | 175 | unifdef-y += atm_tcp.h |
@@ -182,7 +185,6 @@ unifdef-y += cm4000_cs.h | |||
182 | unifdef-y += cn_proc.h | 185 | unifdef-y += cn_proc.h |
183 | unifdef-y += coda.h | 186 | unifdef-y += coda.h |
184 | unifdef-y += connector.h | 187 | unifdef-y += connector.h |
185 | unifdef-y += cramfs_fs.h | ||
186 | unifdef-y += cuda.h | 188 | unifdef-y += cuda.h |
187 | unifdef-y += cyclades.h | 189 | unifdef-y += cyclades.h |
188 | unifdef-y += dccp.h | 190 | unifdef-y += dccp.h |
@@ -205,7 +207,6 @@ unifdef-y += futex.h | |||
205 | unifdef-y += fs.h | 207 | unifdef-y += fs.h |
206 | unifdef-y += gameport.h | 208 | unifdef-y += gameport.h |
207 | unifdef-y += generic_serial.h | 209 | unifdef-y += generic_serial.h |
208 | unifdef-y += gfs2_ondisk.h | ||
209 | unifdef-y += hayesesp.h | 210 | unifdef-y += hayesesp.h |
210 | unifdef-y += hdlcdrv.h | 211 | unifdef-y += hdlcdrv.h |
211 | unifdef-y += hdlc.h | 212 | unifdef-y += hdlc.h |
@@ -219,7 +220,6 @@ unifdef-y += i2c-dev.h | |||
219 | unifdef-y += icmp.h | 220 | unifdef-y += icmp.h |
220 | unifdef-y += icmpv6.h | 221 | unifdef-y += icmpv6.h |
221 | unifdef-y += if_addr.h | 222 | unifdef-y += if_addr.h |
222 | unifdef-y += if_addrlabel.h | ||
223 | unifdef-y += if_arp.h | 223 | unifdef-y += if_arp.h |
224 | unifdef-y += if_bridge.h | 224 | unifdef-y += if_bridge.h |
225 | unifdef-y += if_ec.h | 225 | unifdef-y += if_ec.h |
@@ -243,7 +243,6 @@ unifdef-y += ipc.h | |||
243 | unifdef-y += ipmi.h | 243 | unifdef-y += ipmi.h |
244 | unifdef-y += ipv6.h | 244 | unifdef-y += ipv6.h |
245 | unifdef-y += ipv6_route.h | 245 | unifdef-y += ipv6_route.h |
246 | unifdef-y += ip6_tunnel.h | ||
247 | unifdef-y += isdn.h | 246 | unifdef-y += isdn.h |
248 | unifdef-y += isdnif.h | 247 | unifdef-y += isdnif.h |
249 | unifdef-y += isdn_divertif.h | 248 | unifdef-y += isdn_divertif.h |
diff --git a/include/linux/aio.h b/include/linux/aio.h index 0d0b7f629bd3..b51ddd28444e 100644 --- a/include/linux/aio.h +++ b/include/linux/aio.h | |||
@@ -209,27 +209,8 @@ extern ssize_t wait_on_sync_kiocb(struct kiocb *iocb); | |||
209 | extern int aio_put_req(struct kiocb *iocb); | 209 | extern int aio_put_req(struct kiocb *iocb); |
210 | extern void kick_iocb(struct kiocb *iocb); | 210 | extern void kick_iocb(struct kiocb *iocb); |
211 | extern int aio_complete(struct kiocb *iocb, long res, long res2); | 211 | extern int aio_complete(struct kiocb *iocb, long res, long res2); |
212 | extern void __put_ioctx(struct kioctx *ctx); | ||
213 | struct mm_struct; | 212 | struct mm_struct; |
214 | extern void exit_aio(struct mm_struct *mm); | 213 | extern void exit_aio(struct mm_struct *mm); |
215 | extern struct kioctx *lookup_ioctx(unsigned long ctx_id); | ||
216 | extern int io_submit_one(struct kioctx *ctx, struct iocb __user *user_iocb, | ||
217 | struct iocb *iocb); | ||
218 | |||
219 | /* semi private, but used by the 32bit emulations: */ | ||
220 | struct kioctx *lookup_ioctx(unsigned long ctx_id); | ||
221 | int io_submit_one(struct kioctx *ctx, struct iocb __user *user_iocb, | ||
222 | struct iocb *iocb); | ||
223 | |||
224 | #define get_ioctx(kioctx) do { \ | ||
225 | BUG_ON(atomic_read(&(kioctx)->users) <= 0); \ | ||
226 | atomic_inc(&(kioctx)->users); \ | ||
227 | } while (0) | ||
228 | #define put_ioctx(kioctx) do { \ | ||
229 | BUG_ON(atomic_read(&(kioctx)->users) <= 0); \ | ||
230 | if (unlikely(atomic_dec_and_test(&(kioctx)->users))) \ | ||
231 | __put_ioctx(kioctx); \ | ||
232 | } while (0) | ||
233 | 214 | ||
234 | #define io_wait_to_kiocb(wait) container_of(wait, struct kiocb, ki_wait) | 215 | #define io_wait_to_kiocb(wait) container_of(wait, struct kiocb, ki_wait) |
235 | 216 | ||
diff --git a/include/linux/backing-dev.h b/include/linux/backing-dev.h index 48a62baace58..b66fa2bdfd9c 100644 --- a/include/linux/backing-dev.h +++ b/include/linux/backing-dev.h | |||
@@ -156,9 +156,7 @@ static inline unsigned long bdi_stat_error(struct backing_dev_info *bdi) | |||
156 | extern struct backing_dev_info default_backing_dev_info; | 156 | extern struct backing_dev_info default_backing_dev_info; |
157 | void default_unplug_io_fn(struct backing_dev_info *bdi, struct page *page); | 157 | void default_unplug_io_fn(struct backing_dev_info *bdi, struct page *page); |
158 | 158 | ||
159 | int writeback_acquire(struct backing_dev_info *bdi); | ||
160 | int writeback_in_progress(struct backing_dev_info *bdi); | 159 | int writeback_in_progress(struct backing_dev_info *bdi); |
161 | void writeback_release(struct backing_dev_info *bdi); | ||
162 | 160 | ||
163 | static inline int bdi_congested(struct backing_dev_info *bdi, int bdi_bits) | 161 | static inline int bdi_congested(struct backing_dev_info *bdi, int bdi_bits) |
164 | { | 162 | { |
diff --git a/include/linux/binfmts.h b/include/linux/binfmts.h index b7fc55ec8d48..b512e48f6d8e 100644 --- a/include/linux/binfmts.h +++ b/include/linux/binfmts.h | |||
@@ -34,7 +34,8 @@ struct linux_binprm{ | |||
34 | #endif | 34 | #endif |
35 | struct mm_struct *mm; | 35 | struct mm_struct *mm; |
36 | unsigned long p; /* current top of mem */ | 36 | unsigned long p; /* current top of mem */ |
37 | int sh_bang; | 37 | unsigned int sh_bang:1, |
38 | misc_bang:1; | ||
38 | struct file * file; | 39 | struct file * file; |
39 | int e_uid, e_gid; | 40 | int e_uid, e_gid; |
40 | kernel_cap_t cap_inheritable, cap_permitted; | 41 | kernel_cap_t cap_inheritable, cap_permitted; |
@@ -48,7 +49,6 @@ struct linux_binprm{ | |||
48 | unsigned interp_flags; | 49 | unsigned interp_flags; |
49 | unsigned interp_data; | 50 | unsigned interp_data; |
50 | unsigned long loader, exec; | 51 | unsigned long loader, exec; |
51 | unsigned long argv_len; | ||
52 | }; | 52 | }; |
53 | 53 | ||
54 | #define BINPRM_FLAGS_ENFORCE_NONDUMP_BIT 0 | 54 | #define BINPRM_FLAGS_ENFORCE_NONDUMP_BIT 0 |
diff --git a/include/linux/bitops.h b/include/linux/bitops.h index 48bde600a2db..024f2b027244 100644 --- a/include/linux/bitops.h +++ b/include/linux/bitops.h | |||
@@ -6,8 +6,8 @@ | |||
6 | #define BIT(nr) (1UL << (nr)) | 6 | #define BIT(nr) (1UL << (nr)) |
7 | #define BIT_MASK(nr) (1UL << ((nr) % BITS_PER_LONG)) | 7 | #define BIT_MASK(nr) (1UL << ((nr) % BITS_PER_LONG)) |
8 | #define BIT_WORD(nr) ((nr) / BITS_PER_LONG) | 8 | #define BIT_WORD(nr) ((nr) / BITS_PER_LONG) |
9 | #define BITS_TO_LONGS(nr) DIV_ROUND_UP(nr, BITS_PER_LONG) | ||
10 | #define BITS_PER_BYTE 8 | 9 | #define BITS_PER_BYTE 8 |
10 | #define BITS_TO_LONGS(nr) DIV_ROUND_UP(nr, BITS_PER_BYTE * sizeof(long)) | ||
11 | #endif | 11 | #endif |
12 | 12 | ||
13 | /* | 13 | /* |
@@ -114,8 +114,6 @@ static inline unsigned fls_long(unsigned long l) | |||
114 | 114 | ||
115 | #ifdef __KERNEL__ | 115 | #ifdef __KERNEL__ |
116 | #ifdef CONFIG_GENERIC_FIND_FIRST_BIT | 116 | #ifdef CONFIG_GENERIC_FIND_FIRST_BIT |
117 | extern unsigned long __find_first_bit(const unsigned long *addr, | ||
118 | unsigned long size); | ||
119 | 117 | ||
120 | /** | 118 | /** |
121 | * find_first_bit - find the first set bit in a memory region | 119 | * find_first_bit - find the first set bit in a memory region |
@@ -124,28 +122,8 @@ extern unsigned long __find_first_bit(const unsigned long *addr, | |||
124 | * | 122 | * |
125 | * Returns the bit number of the first set bit. | 123 | * Returns the bit number of the first set bit. |
126 | */ | 124 | */ |
127 | static __always_inline unsigned long | 125 | extern unsigned long find_first_bit(const unsigned long *addr, |
128 | find_first_bit(const unsigned long *addr, unsigned long size) | 126 | unsigned long size); |
129 | { | ||
130 | /* Avoid a function call if the bitmap size is a constant */ | ||
131 | /* and not bigger than BITS_PER_LONG. */ | ||
132 | |||
133 | /* insert a sentinel so that __ffs returns size if there */ | ||
134 | /* are no set bits in the bitmap */ | ||
135 | if (__builtin_constant_p(size) && (size < BITS_PER_LONG)) | ||
136 | return __ffs((*addr) | (1ul << size)); | ||
137 | |||
138 | /* the result of __ffs(0) is undefined, so it needs to be */ | ||
139 | /* handled separately */ | ||
140 | if (__builtin_constant_p(size) && (size == BITS_PER_LONG)) | ||
141 | return ((*addr) == 0) ? BITS_PER_LONG : __ffs(*addr); | ||
142 | |||
143 | /* size is not constant or too big */ | ||
144 | return __find_first_bit(addr, size); | ||
145 | } | ||
146 | |||
147 | extern unsigned long __find_first_zero_bit(const unsigned long *addr, | ||
148 | unsigned long size); | ||
149 | 127 | ||
150 | /** | 128 | /** |
151 | * find_first_zero_bit - find the first cleared bit in a memory region | 129 | * find_first_zero_bit - find the first cleared bit in a memory region |
@@ -154,31 +132,12 @@ extern unsigned long __find_first_zero_bit(const unsigned long *addr, | |||
154 | * | 132 | * |
155 | * Returns the bit number of the first cleared bit. | 133 | * Returns the bit number of the first cleared bit. |
156 | */ | 134 | */ |
157 | static __always_inline unsigned long | 135 | extern unsigned long find_first_zero_bit(const unsigned long *addr, |
158 | find_first_zero_bit(const unsigned long *addr, unsigned long size) | 136 | unsigned long size); |
159 | { | 137 | |
160 | /* Avoid a function call if the bitmap size is a constant */ | ||
161 | /* and not bigger than BITS_PER_LONG. */ | ||
162 | |||
163 | /* insert a sentinel so that __ffs returns size if there */ | ||
164 | /* are no set bits in the bitmap */ | ||
165 | if (__builtin_constant_p(size) && (size < BITS_PER_LONG)) { | ||
166 | return __ffs(~(*addr) | (1ul << size)); | ||
167 | } | ||
168 | |||
169 | /* the result of __ffs(0) is undefined, so it needs to be */ | ||
170 | /* handled separately */ | ||
171 | if (__builtin_constant_p(size) && (size == BITS_PER_LONG)) | ||
172 | return (~(*addr) == 0) ? BITS_PER_LONG : __ffs(~(*addr)); | ||
173 | |||
174 | /* size is not constant or too big */ | ||
175 | return __find_first_zero_bit(addr, size); | ||
176 | } | ||
177 | #endif /* CONFIG_GENERIC_FIND_FIRST_BIT */ | 138 | #endif /* CONFIG_GENERIC_FIND_FIRST_BIT */ |
178 | 139 | ||
179 | #ifdef CONFIG_GENERIC_FIND_NEXT_BIT | 140 | #ifdef CONFIG_GENERIC_FIND_NEXT_BIT |
180 | extern unsigned long __find_next_bit(const unsigned long *addr, | ||
181 | unsigned long size, unsigned long offset); | ||
182 | 141 | ||
183 | /** | 142 | /** |
184 | * find_next_bit - find the next set bit in a memory region | 143 | * find_next_bit - find the next set bit in a memory region |
@@ -186,36 +145,8 @@ extern unsigned long __find_next_bit(const unsigned long *addr, | |||
186 | * @offset: The bitnumber to start searching at | 145 | * @offset: The bitnumber to start searching at |
187 | * @size: The bitmap size in bits | 146 | * @size: The bitmap size in bits |
188 | */ | 147 | */ |
189 | static __always_inline unsigned long | 148 | extern unsigned long find_next_bit(const unsigned long *addr, |
190 | find_next_bit(const unsigned long *addr, unsigned long size, | 149 | unsigned long size, unsigned long offset); |
191 | unsigned long offset) | ||
192 | { | ||
193 | unsigned long value; | ||
194 | |||
195 | /* Avoid a function call if the bitmap size is a constant */ | ||
196 | /* and not bigger than BITS_PER_LONG. */ | ||
197 | |||
198 | /* insert a sentinel so that __ffs returns size if there */ | ||
199 | /* are no set bits in the bitmap */ | ||
200 | if (__builtin_constant_p(size) && (size < BITS_PER_LONG)) { | ||
201 | value = (*addr) & ((~0ul) << offset); | ||
202 | value |= (1ul << size); | ||
203 | return __ffs(value); | ||
204 | } | ||
205 | |||
206 | /* the result of __ffs(0) is undefined, so it needs to be */ | ||
207 | /* handled separately */ | ||
208 | if (__builtin_constant_p(size) && (size == BITS_PER_LONG)) { | ||
209 | value = (*addr) & ((~0ul) << offset); | ||
210 | return (value == 0) ? BITS_PER_LONG : __ffs(value); | ||
211 | } | ||
212 | |||
213 | /* size is not constant or too big */ | ||
214 | return __find_next_bit(addr, size, offset); | ||
215 | } | ||
216 | |||
217 | extern unsigned long __find_next_zero_bit(const unsigned long *addr, | ||
218 | unsigned long size, unsigned long offset); | ||
219 | 150 | ||
220 | /** | 151 | /** |
221 | * find_next_zero_bit - find the next cleared bit in a memory region | 152 | * find_next_zero_bit - find the next cleared bit in a memory region |
@@ -223,33 +154,11 @@ extern unsigned long __find_next_zero_bit(const unsigned long *addr, | |||
223 | * @offset: The bitnumber to start searching at | 154 | * @offset: The bitnumber to start searching at |
224 | * @size: The bitmap size in bits | 155 | * @size: The bitmap size in bits |
225 | */ | 156 | */ |
226 | static __always_inline unsigned long | 157 | |
227 | find_next_zero_bit(const unsigned long *addr, unsigned long size, | 158 | extern unsigned long find_next_zero_bit(const unsigned long *addr, |
228 | unsigned long offset) | 159 | unsigned long size, |
229 | { | 160 | unsigned long offset); |
230 | unsigned long value; | 161 | |
231 | |||
232 | /* Avoid a function call if the bitmap size is a constant */ | ||
233 | /* and not bigger than BITS_PER_LONG. */ | ||
234 | |||
235 | /* insert a sentinel so that __ffs returns size if there */ | ||
236 | /* are no set bits in the bitmap */ | ||
237 | if (__builtin_constant_p(size) && (size < BITS_PER_LONG)) { | ||
238 | value = (~(*addr)) & ((~0ul) << offset); | ||
239 | value |= (1ul << size); | ||
240 | return __ffs(value); | ||
241 | } | ||
242 | |||
243 | /* the result of __ffs(0) is undefined, so it needs to be */ | ||
244 | /* handled separately */ | ||
245 | if (__builtin_constant_p(size) && (size == BITS_PER_LONG)) { | ||
246 | value = (~(*addr)) & ((~0ul) << offset); | ||
247 | return (value == 0) ? BITS_PER_LONG : __ffs(value); | ||
248 | } | ||
249 | |||
250 | /* size is not constant or too big */ | ||
251 | return __find_next_zero_bit(addr, size, offset); | ||
252 | } | ||
253 | #endif /* CONFIG_GENERIC_FIND_NEXT_BIT */ | 162 | #endif /* CONFIG_GENERIC_FIND_NEXT_BIT */ |
254 | #endif /* __KERNEL__ */ | 163 | #endif /* __KERNEL__ */ |
255 | #endif | 164 | #endif |
diff --git a/include/linux/buffer_head.h b/include/linux/buffer_head.h index 932eb02a2753..82aa36c53ea7 100644 --- a/include/linux/buffer_head.h +++ b/include/linux/buffer_head.h | |||
@@ -225,7 +225,6 @@ int block_page_mkwrite(struct vm_area_struct *vma, struct page *page, | |||
225 | get_block_t get_block); | 225 | get_block_t get_block); |
226 | void block_sync_page(struct page *); | 226 | void block_sync_page(struct page *); |
227 | sector_t generic_block_bmap(struct address_space *, sector_t, get_block_t *); | 227 | sector_t generic_block_bmap(struct address_space *, sector_t, get_block_t *); |
228 | int generic_commit_write(struct file *, struct page *, unsigned, unsigned); | ||
229 | int block_truncate_page(struct address_space *, loff_t, get_block_t *); | 228 | int block_truncate_page(struct address_space *, loff_t, get_block_t *); |
230 | int file_fsync(struct file *, struct dentry *, int); | 229 | int file_fsync(struct file *, struct dentry *, int); |
231 | int nobh_write_begin(struct file *, struct address_space *, | 230 | int nobh_write_begin(struct file *, struct address_space *, |
diff --git a/include/linux/cgroup.h b/include/linux/cgroup.h index a6a6035a4e1e..e155aa78d859 100644 --- a/include/linux/cgroup.h +++ b/include/linux/cgroup.h | |||
@@ -88,6 +88,17 @@ static inline void css_put(struct cgroup_subsys_state *css) | |||
88 | __css_put(css); | 88 | __css_put(css); |
89 | } | 89 | } |
90 | 90 | ||
91 | /* bits in struct cgroup flags field */ | ||
92 | enum { | ||
93 | /* Control Group is dead */ | ||
94 | CGRP_REMOVED, | ||
95 | /* Control Group has previously had a child cgroup or a task, | ||
96 | * but no longer (only if CGRP_NOTIFY_ON_RELEASE is set) */ | ||
97 | CGRP_RELEASABLE, | ||
98 | /* Control Group requires release notifications to userspace */ | ||
99 | CGRP_NOTIFY_ON_RELEASE, | ||
100 | }; | ||
101 | |||
91 | struct cgroup { | 102 | struct cgroup { |
92 | unsigned long flags; /* "unsigned long" so bitops work */ | 103 | unsigned long flags; /* "unsigned long" so bitops work */ |
93 | 104 | ||
@@ -139,10 +150,10 @@ struct css_set { | |||
139 | struct kref ref; | 150 | struct kref ref; |
140 | 151 | ||
141 | /* | 152 | /* |
142 | * List running through all cgroup groups. Protected by | 153 | * List running through all cgroup groups in the same hash |
143 | * css_set_lock | 154 | * slot. Protected by css_set_lock |
144 | */ | 155 | */ |
145 | struct list_head list; | 156 | struct hlist_node hlist; |
146 | 157 | ||
147 | /* | 158 | /* |
148 | * List running through all tasks using this cgroup | 159 | * List running through all tasks using this cgroup |
@@ -163,7 +174,16 @@ struct css_set { | |||
163 | * during subsystem registration (at boot time). | 174 | * during subsystem registration (at boot time). |
164 | */ | 175 | */ |
165 | struct cgroup_subsys_state *subsys[CGROUP_SUBSYS_COUNT]; | 176 | struct cgroup_subsys_state *subsys[CGROUP_SUBSYS_COUNT]; |
177 | }; | ||
178 | |||
179 | /* | ||
180 | * cgroup_map_cb is an abstract callback API for reporting map-valued | ||
181 | * control files | ||
182 | */ | ||
166 | 183 | ||
184 | struct cgroup_map_cb { | ||
185 | int (*fill)(struct cgroup_map_cb *cb, const char *key, u64 value); | ||
186 | void *state; | ||
167 | }; | 187 | }; |
168 | 188 | ||
169 | /* struct cftype: | 189 | /* struct cftype: |
@@ -190,20 +210,51 @@ struct cftype { | |||
190 | struct file *file, | 210 | struct file *file, |
191 | char __user *buf, size_t nbytes, loff_t *ppos); | 211 | char __user *buf, size_t nbytes, loff_t *ppos); |
192 | /* | 212 | /* |
193 | * read_uint() is a shortcut for the common case of returning a | 213 | * read_u64() is a shortcut for the common case of returning a |
194 | * single integer. Use it in place of read() | 214 | * single integer. Use it in place of read() |
195 | */ | 215 | */ |
196 | u64 (*read_uint) (struct cgroup *cgrp, struct cftype *cft); | 216 | u64 (*read_u64) (struct cgroup *cgrp, struct cftype *cft); |
217 | /* | ||
218 | * read_s64() is a signed version of read_u64() | ||
219 | */ | ||
220 | s64 (*read_s64) (struct cgroup *cgrp, struct cftype *cft); | ||
221 | /* | ||
222 | * read_map() is used for defining a map of key/value | ||
223 | * pairs. It should call cb->fill(cb, key, value) for each | ||
224 | * entry. The key/value pairs (and their ordering) should not | ||
225 | * change between reboots. | ||
226 | */ | ||
227 | int (*read_map) (struct cgroup *cont, struct cftype *cft, | ||
228 | struct cgroup_map_cb *cb); | ||
229 | /* | ||
230 | * read_seq_string() is used for outputting a simple sequence | ||
231 | * using seqfile. | ||
232 | */ | ||
233 | int (*read_seq_string) (struct cgroup *cont, struct cftype *cft, | ||
234 | struct seq_file *m); | ||
235 | |||
197 | ssize_t (*write) (struct cgroup *cgrp, struct cftype *cft, | 236 | ssize_t (*write) (struct cgroup *cgrp, struct cftype *cft, |
198 | struct file *file, | 237 | struct file *file, |
199 | const char __user *buf, size_t nbytes, loff_t *ppos); | 238 | const char __user *buf, size_t nbytes, loff_t *ppos); |
200 | 239 | ||
201 | /* | 240 | /* |
202 | * write_uint() is a shortcut for the common case of accepting | 241 | * write_u64() is a shortcut for the common case of accepting |
203 | * a single integer (as parsed by simple_strtoull) from | 242 | * a single integer (as parsed by simple_strtoull) from |
204 | * userspace. Use in place of write(); return 0 or error. | 243 | * userspace. Use in place of write(); return 0 or error. |
205 | */ | 244 | */ |
206 | int (*write_uint) (struct cgroup *cgrp, struct cftype *cft, u64 val); | 245 | int (*write_u64) (struct cgroup *cgrp, struct cftype *cft, u64 val); |
246 | /* | ||
247 | * write_s64() is a signed version of write_u64() | ||
248 | */ | ||
249 | int (*write_s64) (struct cgroup *cgrp, struct cftype *cft, s64 val); | ||
250 | |||
251 | /* | ||
252 | * trigger() callback can be used to get some kick from the | ||
253 | * userspace, when the actual string written is not important | ||
254 | * at all. The private field can be used to determine the | ||
255 | * kick type for multiplexing. | ||
256 | */ | ||
257 | int (*trigger)(struct cgroup *cgrp, unsigned int event); | ||
207 | 258 | ||
208 | int (*release) (struct inode *inode, struct file *file); | 259 | int (*release) (struct inode *inode, struct file *file); |
209 | }; | 260 | }; |
@@ -254,6 +305,12 @@ struct cgroup_subsys { | |||
254 | struct cgroup *cgrp); | 305 | struct cgroup *cgrp); |
255 | void (*post_clone)(struct cgroup_subsys *ss, struct cgroup *cgrp); | 306 | void (*post_clone)(struct cgroup_subsys *ss, struct cgroup *cgrp); |
256 | void (*bind)(struct cgroup_subsys *ss, struct cgroup *root); | 307 | void (*bind)(struct cgroup_subsys *ss, struct cgroup *root); |
308 | /* | ||
309 | * This routine is called with the task_lock of mm->owner held | ||
310 | */ | ||
311 | void (*mm_owner_changed)(struct cgroup_subsys *ss, | ||
312 | struct cgroup *old, | ||
313 | struct cgroup *new); | ||
257 | int subsys_id; | 314 | int subsys_id; |
258 | int active; | 315 | int active; |
259 | int disabled; | 316 | int disabled; |
@@ -339,4 +396,13 @@ static inline int cgroupstats_build(struct cgroupstats *stats, | |||
339 | 396 | ||
340 | #endif /* !CONFIG_CGROUPS */ | 397 | #endif /* !CONFIG_CGROUPS */ |
341 | 398 | ||
399 | #ifdef CONFIG_MM_OWNER | ||
400 | extern void | ||
401 | cgroup_mm_owner_callbacks(struct task_struct *old, struct task_struct *new); | ||
402 | #else /* !CONFIG_MM_OWNER */ | ||
403 | static inline void | ||
404 | cgroup_mm_owner_callbacks(struct task_struct *old, struct task_struct *new) | ||
405 | { | ||
406 | } | ||
407 | #endif /* CONFIG_MM_OWNER */ | ||
342 | #endif /* _LINUX_CGROUP_H */ | 408 | #endif /* _LINUX_CGROUP_H */ |
diff --git a/include/linux/cgroup_subsys.h b/include/linux/cgroup_subsys.h index 1ddebfc52565..e2877454ec82 100644 --- a/include/linux/cgroup_subsys.h +++ b/include/linux/cgroup_subsys.h | |||
@@ -42,3 +42,9 @@ SUBSYS(mem_cgroup) | |||
42 | #endif | 42 | #endif |
43 | 43 | ||
44 | /* */ | 44 | /* */ |
45 | |||
46 | #ifdef CONFIG_CGROUP_DEVICE | ||
47 | SUBSYS(devices) | ||
48 | #endif | ||
49 | |||
50 | /* */ | ||
diff --git a/include/linux/coda_linux.h b/include/linux/coda_linux.h index 1c47a34aa794..31b75311e2ca 100644 --- a/include/linux/coda_linux.h +++ b/include/linux/coda_linux.h | |||
@@ -43,9 +43,6 @@ int coda_getattr(struct vfsmount *, struct dentry *, struct kstat *); | |||
43 | int coda_setattr(struct dentry *, struct iattr *); | 43 | int coda_setattr(struct dentry *, struct iattr *); |
44 | 44 | ||
45 | /* this file: heloers */ | 45 | /* this file: heloers */ |
46 | static __inline__ struct CodaFid *coda_i2f(struct inode *); | ||
47 | static __inline__ char *coda_i2s(struct inode *); | ||
48 | static __inline__ void coda_flag_inode(struct inode *, int flag); | ||
49 | char *coda_f2s(struct CodaFid *f); | 46 | char *coda_f2s(struct CodaFid *f); |
50 | int coda_isroot(struct inode *i); | 47 | int coda_isroot(struct inode *i); |
51 | int coda_iscontrol(const char *name, size_t length); | 48 | int coda_iscontrol(const char *name, size_t length); |
diff --git a/include/linux/console_struct.h b/include/linux/console_struct.h index d71f7c0f931b..b03f80a078be 100644 --- a/include/linux/console_struct.h +++ b/include/linux/console_struct.h | |||
@@ -53,6 +53,7 @@ struct vc_data { | |||
53 | unsigned short vc_hi_font_mask; /* [#] Attribute set for upper 256 chars of font or 0 if not supported */ | 53 | unsigned short vc_hi_font_mask; /* [#] Attribute set for upper 256 chars of font or 0 if not supported */ |
54 | struct console_font vc_font; /* Current VC font set */ | 54 | struct console_font vc_font; /* Current VC font set */ |
55 | unsigned short vc_video_erase_char; /* Background erase character */ | 55 | unsigned short vc_video_erase_char; /* Background erase character */ |
56 | unsigned short vc_scrl_erase_char; /* Erase character for scroll */ | ||
56 | /* VT terminal data */ | 57 | /* VT terminal data */ |
57 | unsigned int vc_state; /* Escape sequence parser state */ | 58 | unsigned int vc_state; /* Escape sequence parser state */ |
58 | unsigned int vc_npar,vc_par[NPAR]; /* Parameters of current escape sequence */ | 59 | unsigned int vc_npar,vc_par[NPAR]; /* Parameters of current escape sequence */ |
diff --git a/include/linux/cpu.h b/include/linux/cpu.h index f212fa98283e..7464ba3b4333 100644 --- a/include/linux/cpu.h +++ b/include/linux/cpu.h | |||
@@ -108,7 +108,7 @@ static inline void cpuhotplug_mutex_unlock(struct mutex *cpu_hp_mutex) | |||
108 | extern void get_online_cpus(void); | 108 | extern void get_online_cpus(void); |
109 | extern void put_online_cpus(void); | 109 | extern void put_online_cpus(void); |
110 | #define hotcpu_notifier(fn, pri) { \ | 110 | #define hotcpu_notifier(fn, pri) { \ |
111 | static struct notifier_block fn##_nb = \ | 111 | static struct notifier_block fn##_nb __cpuinitdata = \ |
112 | { .notifier_call = fn, .priority = pri }; \ | 112 | { .notifier_call = fn, .priority = pri }; \ |
113 | register_cpu_notifier(&fn##_nb); \ | 113 | register_cpu_notifier(&fn##_nb); \ |
114 | } | 114 | } |
diff --git a/include/linux/device_cgroup.h b/include/linux/device_cgroup.h new file mode 100644 index 000000000000..0b0d9c39ed67 --- /dev/null +++ b/include/linux/device_cgroup.h | |||
@@ -0,0 +1,12 @@ | |||
1 | #include <linux/module.h> | ||
2 | #include <linux/fs.h> | ||
3 | |||
4 | #ifdef CONFIG_CGROUP_DEVICE | ||
5 | extern int devcgroup_inode_permission(struct inode *inode, int mask); | ||
6 | extern int devcgroup_inode_mknod(int mode, dev_t dev); | ||
7 | #else | ||
8 | static inline int devcgroup_inode_permission(struct inode *inode, int mask) | ||
9 | { return 0; } | ||
10 | static inline int devcgroup_inode_mknod(int mode, dev_t dev) | ||
11 | { return 0; } | ||
12 | #endif | ||
diff --git a/include/linux/dma-attrs.h b/include/linux/dma-attrs.h new file mode 100644 index 000000000000..1677e2bfa00c --- /dev/null +++ b/include/linux/dma-attrs.h | |||
@@ -0,0 +1,74 @@ | |||
1 | #ifndef _DMA_ATTR_H | ||
2 | #define _DMA_ATTR_H | ||
3 | |||
4 | #include <linux/bitmap.h> | ||
5 | #include <linux/bitops.h> | ||
6 | #include <linux/bug.h> | ||
7 | |||
8 | /** | ||
9 | * an enum dma_attr represents an attribute associated with a DMA | ||
10 | * mapping. The semantics of each attribute should be defined in | ||
11 | * Documentation/DMA-attributes.txt. | ||
12 | */ | ||
13 | enum dma_attr { | ||
14 | DMA_ATTR_WRITE_BARRIER, | ||
15 | DMA_ATTR_MAX, | ||
16 | }; | ||
17 | |||
18 | #define __DMA_ATTRS_LONGS BITS_TO_LONGS(DMA_ATTR_MAX) | ||
19 | |||
20 | /** | ||
21 | * struct dma_attrs - an opaque container for DMA attributes | ||
22 | * @flags - bitmask representing a collection of enum dma_attr | ||
23 | */ | ||
24 | struct dma_attrs { | ||
25 | unsigned long flags[__DMA_ATTRS_LONGS]; | ||
26 | }; | ||
27 | |||
28 | #define DEFINE_DMA_ATTRS(x) \ | ||
29 | struct dma_attrs x = { \ | ||
30 | .flags = { [0 ... __DMA_ATTRS_LONGS-1] = 0 }, \ | ||
31 | } | ||
32 | |||
33 | static inline void init_dma_attrs(struct dma_attrs *attrs) | ||
34 | { | ||
35 | bitmap_zero(attrs->flags, __DMA_ATTRS_LONGS); | ||
36 | } | ||
37 | |||
38 | #ifdef CONFIG_HAVE_DMA_ATTRS | ||
39 | /** | ||
40 | * dma_set_attr - set a specific attribute | ||
41 | * @attr: attribute to set | ||
42 | * @attrs: struct dma_attrs (may be NULL) | ||
43 | */ | ||
44 | static inline void dma_set_attr(enum dma_attr attr, struct dma_attrs *attrs) | ||
45 | { | ||
46 | if (attrs == NULL) | ||
47 | return; | ||
48 | BUG_ON(attr >= DMA_ATTR_MAX); | ||
49 | __set_bit(attr, attrs->flags); | ||
50 | } | ||
51 | |||
52 | /** | ||
53 | * dma_get_attr - check for a specific attribute | ||
54 | * @attr: attribute to set | ||
55 | * @attrs: struct dma_attrs (may be NULL) | ||
56 | */ | ||
57 | static inline int dma_get_attr(enum dma_attr attr, struct dma_attrs *attrs) | ||
58 | { | ||
59 | if (attrs == NULL) | ||
60 | return 0; | ||
61 | BUG_ON(attr >= DMA_ATTR_MAX); | ||
62 | return test_bit(attr, attrs->flags); | ||
63 | } | ||
64 | #else /* !CONFIG_HAVE_DMA_ATTRS */ | ||
65 | static inline void dma_set_attr(enum dma_attr attr, struct dma_attrs *attrs) | ||
66 | { | ||
67 | } | ||
68 | |||
69 | static inline int dma_get_attr(enum dma_attr attr, struct dma_attrs *attrs) | ||
70 | { | ||
71 | return 0; | ||
72 | } | ||
73 | #endif /* CONFIG_HAVE_DMA_ATTRS */ | ||
74 | #endif /* _DMA_ATTR_H */ | ||
diff --git a/include/linux/dma-mapping.h b/include/linux/dma-mapping.h index 332030709623..952e0f857ac9 100644 --- a/include/linux/dma-mapping.h +++ b/include/linux/dma-mapping.h | |||
@@ -146,4 +146,21 @@ static inline void dmam_release_declared_memory(struct device *dev) | |||
146 | } | 146 | } |
147 | #endif /* ARCH_HAS_DMA_DECLARE_COHERENT_MEMORY */ | 147 | #endif /* ARCH_HAS_DMA_DECLARE_COHERENT_MEMORY */ |
148 | 148 | ||
149 | #ifndef CONFIG_HAVE_DMA_ATTRS | ||
150 | struct dma_attrs; | ||
151 | |||
152 | #define dma_map_single_attrs(dev, cpu_addr, size, dir, attrs) \ | ||
153 | dma_map_single(dev, cpu_addr, size, dir) | ||
154 | |||
155 | #define dma_unmap_single_attrs(dev, dma_addr, size, dir, attrs) \ | ||
156 | dma_unmap_single(dev, dma_addr, size, dir) | ||
157 | |||
158 | #define dma_map_sg_attrs(dev, sgl, nents, dir, attrs) \ | ||
159 | dma_map_sg(dev, sgl, nents, dir) | ||
160 | |||
161 | #define dma_unmap_sg_attrs(dev, sgl, nents, dir, attrs) \ | ||
162 | dma_unmap_sg(dev, sgl, nents, dir) | ||
163 | |||
164 | #endif /* CONFIG_HAVE_DMA_ATTRS */ | ||
165 | |||
149 | #endif | 166 | #endif |
diff --git a/include/linux/edac.h b/include/linux/edac.h index eab451e69a91..7cf92e8a4196 100644 --- a/include/linux/edac.h +++ b/include/linux/edac.h | |||
@@ -3,7 +3,7 @@ | |||
3 | * | 3 | * |
4 | * Author: Dave Jiang <djiang@mvista.com> | 4 | * Author: Dave Jiang <djiang@mvista.com> |
5 | * | 5 | * |
6 | * 2006-2007 (c) MontaVista Software, Inc. This file is licensed under | 6 | * 2006-2008 (c) MontaVista Software, Inc. This file is licensed under |
7 | * the terms of the GNU General Public License version 2. This program | 7 | * the terms of the GNU General Public License version 2. This program |
8 | * is licensed "as is" without any warranty of any kind, whether express | 8 | * is licensed "as is" without any warranty of any kind, whether express |
9 | * or implied. | 9 | * or implied. |
@@ -26,4 +26,16 @@ extern atomic_t edac_handlers; | |||
26 | extern int edac_handler_set(void); | 26 | extern int edac_handler_set(void); |
27 | extern void edac_atomic_assert_error(void); | 27 | extern void edac_atomic_assert_error(void); |
28 | 28 | ||
29 | static inline void opstate_init(void) | ||
30 | { | ||
31 | switch (edac_op_state) { | ||
32 | case EDAC_OPSTATE_POLL: | ||
33 | case EDAC_OPSTATE_NMI: | ||
34 | break; | ||
35 | default: | ||
36 | edac_op_state = EDAC_OPSTATE_POLL; | ||
37 | } | ||
38 | return; | ||
39 | } | ||
40 | |||
29 | #endif | 41 | #endif |
diff --git a/include/linux/elf.h b/include/linux/elf.h index bad1b16ec49a..ff9fbed90123 100644 --- a/include/linux/elf.h +++ b/include/linux/elf.h | |||
@@ -208,7 +208,7 @@ typedef struct elf32_hdr{ | |||
208 | } Elf32_Ehdr; | 208 | } Elf32_Ehdr; |
209 | 209 | ||
210 | typedef struct elf64_hdr { | 210 | typedef struct elf64_hdr { |
211 | unsigned char e_ident[16]; /* ELF "magic number" */ | 211 | unsigned char e_ident[EI_NIDENT]; /* ELF "magic number" */ |
212 | Elf64_Half e_type; | 212 | Elf64_Half e_type; |
213 | Elf64_Half e_machine; | 213 | Elf64_Half e_machine; |
214 | Elf64_Word e_version; | 214 | Elf64_Word e_version; |
diff --git a/include/linux/fs.h b/include/linux/fs.h index 2c925747bc49..a1ba005d08e7 100644 --- a/include/linux/fs.h +++ b/include/linux/fs.h | |||
@@ -1521,7 +1521,6 @@ extern int get_sb_pseudo(struct file_system_type *, char *, | |||
1521 | const struct super_operations *ops, unsigned long, | 1521 | const struct super_operations *ops, unsigned long, |
1522 | struct vfsmount *mnt); | 1522 | struct vfsmount *mnt); |
1523 | extern int simple_set_mnt(struct vfsmount *mnt, struct super_block *sb); | 1523 | extern int simple_set_mnt(struct vfsmount *mnt, struct super_block *sb); |
1524 | int __put_super(struct super_block *sb); | ||
1525 | int __put_super_and_need_restart(struct super_block *sb); | 1524 | int __put_super_and_need_restart(struct super_block *sb); |
1526 | void unnamed_dev_init(void); | 1525 | void unnamed_dev_init(void); |
1527 | 1526 | ||
@@ -1965,7 +1964,6 @@ extern int vfs_stat_fd(int dfd, char __user *, struct kstat *); | |||
1965 | extern int vfs_lstat_fd(int dfd, char __user *, struct kstat *); | 1964 | extern int vfs_lstat_fd(int dfd, char __user *, struct kstat *); |
1966 | extern int vfs_fstat(unsigned int, struct kstat *); | 1965 | extern int vfs_fstat(unsigned int, struct kstat *); |
1967 | 1966 | ||
1968 | extern long vfs_ioctl(struct file *filp, unsigned int cmd, unsigned long arg); | ||
1969 | extern int do_vfs_ioctl(struct file *filp, unsigned int fd, unsigned int cmd, | 1967 | extern int do_vfs_ioctl(struct file *filp, unsigned int fd, unsigned int cmd, |
1970 | unsigned long arg); | 1968 | unsigned long arg); |
1971 | 1969 | ||
diff --git a/include/linux/gfp.h b/include/linux/gfp.h index c37653b6843f..b414be387180 100644 --- a/include/linux/gfp.h +++ b/include/linux/gfp.h | |||
@@ -40,9 +40,9 @@ struct vm_area_struct; | |||
40 | #define __GFP_FS ((__force gfp_t)0x80u) /* Can call down to low-level FS? */ | 40 | #define __GFP_FS ((__force gfp_t)0x80u) /* Can call down to low-level FS? */ |
41 | #define __GFP_COLD ((__force gfp_t)0x100u) /* Cache-cold page required */ | 41 | #define __GFP_COLD ((__force gfp_t)0x100u) /* Cache-cold page required */ |
42 | #define __GFP_NOWARN ((__force gfp_t)0x200u) /* Suppress page allocation failure warning */ | 42 | #define __GFP_NOWARN ((__force gfp_t)0x200u) /* Suppress page allocation failure warning */ |
43 | #define __GFP_REPEAT ((__force gfp_t)0x400u) /* Retry the allocation. Might fail */ | 43 | #define __GFP_REPEAT ((__force gfp_t)0x400u) /* See above */ |
44 | #define __GFP_NOFAIL ((__force gfp_t)0x800u) /* Retry for ever. Cannot fail */ | 44 | #define __GFP_NOFAIL ((__force gfp_t)0x800u) /* See above */ |
45 | #define __GFP_NORETRY ((__force gfp_t)0x1000u)/* Do not retry. Might fail */ | 45 | #define __GFP_NORETRY ((__force gfp_t)0x1000u)/* See above */ |
46 | #define __GFP_COMP ((__force gfp_t)0x4000u)/* Add compound page metadata */ | 46 | #define __GFP_COMP ((__force gfp_t)0x4000u)/* Add compound page metadata */ |
47 | #define __GFP_ZERO ((__force gfp_t)0x8000u)/* Return zeroed page on success */ | 47 | #define __GFP_ZERO ((__force gfp_t)0x8000u)/* Return zeroed page on success */ |
48 | #define __GFP_NOMEMALLOC ((__force gfp_t)0x10000u) /* Don't use emergency reserves */ | 48 | #define __GFP_NOMEMALLOC ((__force gfp_t)0x10000u) /* Don't use emergency reserves */ |
diff --git a/include/linux/idr.h b/include/linux/idr.h index 0edda411959c..9a2d762124de 100644 --- a/include/linux/idr.h +++ b/include/linux/idr.h | |||
@@ -14,6 +14,7 @@ | |||
14 | 14 | ||
15 | #include <linux/types.h> | 15 | #include <linux/types.h> |
16 | #include <linux/bitops.h> | 16 | #include <linux/bitops.h> |
17 | #include <linux/init.h> | ||
17 | 18 | ||
18 | #if BITS_PER_LONG == 32 | 19 | #if BITS_PER_LONG == 32 |
19 | # define IDR_BITS 5 | 20 | # define IDR_BITS 5 |
@@ -115,4 +116,6 @@ void ida_remove(struct ida *ida, int id); | |||
115 | void ida_destroy(struct ida *ida); | 116 | void ida_destroy(struct ida *ida); |
116 | void ida_init(struct ida *ida); | 117 | void ida_init(struct ida *ida); |
117 | 118 | ||
119 | void __init idr_init_cache(void); | ||
120 | |||
118 | #endif /* __IDR_H__ */ | 121 | #endif /* __IDR_H__ */ |
diff --git a/include/linux/init.h b/include/linux/init.h index fb58c0493cf2..21d658cdfa27 100644 --- a/include/linux/init.h +++ b/include/linux/init.h | |||
@@ -147,6 +147,8 @@ extern unsigned int reset_devices; | |||
147 | void setup_arch(char **); | 147 | void setup_arch(char **); |
148 | void prepare_namespace(void); | 148 | void prepare_namespace(void); |
149 | 149 | ||
150 | extern void (*late_time_init)(void); | ||
151 | |||
150 | #endif | 152 | #endif |
151 | 153 | ||
152 | #ifndef MODULE | 154 | #ifndef MODULE |
diff --git a/include/linux/interrupt.h b/include/linux/interrupt.h index b5fef13148bd..f1fc7470d26c 100644 --- a/include/linux/interrupt.h +++ b/include/linux/interrupt.h | |||
@@ -289,6 +289,7 @@ struct softirq_action | |||
289 | }; | 289 | }; |
290 | 290 | ||
291 | asmlinkage void do_softirq(void); | 291 | asmlinkage void do_softirq(void); |
292 | asmlinkage void __do_softirq(void); | ||
292 | extern void open_softirq(int nr, void (*action)(struct softirq_action*), void *data); | 293 | extern void open_softirq(int nr, void (*action)(struct softirq_action*), void *data); |
293 | extern void softirq_init(void); | 294 | extern void softirq_init(void); |
294 | #define __raise_softirq_irqoff(nr) do { or_softirq_pending(1UL << (nr)); } while (0) | 295 | #define __raise_softirq_irqoff(nr) do { or_softirq_pending(1UL << (nr)); } while (0) |
diff --git a/include/linux/ipc_namespace.h b/include/linux/ipc_namespace.h index e4451d1da753..ea6c18a8b0d4 100644 --- a/include/linux/ipc_namespace.h +++ b/include/linux/ipc_namespace.h | |||
@@ -4,6 +4,17 @@ | |||
4 | #include <linux/err.h> | 4 | #include <linux/err.h> |
5 | #include <linux/idr.h> | 5 | #include <linux/idr.h> |
6 | #include <linux/rwsem.h> | 6 | #include <linux/rwsem.h> |
7 | #include <linux/notifier.h> | ||
8 | |||
9 | /* | ||
10 | * ipc namespace events | ||
11 | */ | ||
12 | #define IPCNS_MEMCHANGED 0x00000001 /* Notify lowmem size changed */ | ||
13 | #define IPCNS_CREATED 0x00000002 /* Notify new ipc namespace created */ | ||
14 | #define IPCNS_REMOVED 0x00000003 /* Notify ipc namespace removed */ | ||
15 | |||
16 | #define IPCNS_CALLBACK_PRI 0 | ||
17 | |||
7 | 18 | ||
8 | struct ipc_ids { | 19 | struct ipc_ids { |
9 | int in_use; | 20 | int in_use; |
@@ -30,15 +41,24 @@ struct ipc_namespace { | |||
30 | size_t shm_ctlall; | 41 | size_t shm_ctlall; |
31 | int shm_ctlmni; | 42 | int shm_ctlmni; |
32 | int shm_tot; | 43 | int shm_tot; |
44 | |||
45 | struct notifier_block ipcns_nb; | ||
33 | }; | 46 | }; |
34 | 47 | ||
35 | extern struct ipc_namespace init_ipc_ns; | 48 | extern struct ipc_namespace init_ipc_ns; |
49 | extern atomic_t nr_ipc_ns; | ||
36 | 50 | ||
37 | #ifdef CONFIG_SYSVIPC | 51 | #ifdef CONFIG_SYSVIPC |
38 | #define INIT_IPC_NS(ns) .ns = &init_ipc_ns, | 52 | #define INIT_IPC_NS(ns) .ns = &init_ipc_ns, |
39 | #else | 53 | |
54 | extern int register_ipcns_notifier(struct ipc_namespace *); | ||
55 | extern int cond_register_ipcns_notifier(struct ipc_namespace *); | ||
56 | extern int unregister_ipcns_notifier(struct ipc_namespace *); | ||
57 | extern int ipcns_notify(unsigned long); | ||
58 | |||
59 | #else /* CONFIG_SYSVIPC */ | ||
40 | #define INIT_IPC_NS(ns) | 60 | #define INIT_IPC_NS(ns) |
41 | #endif | 61 | #endif /* CONFIG_SYSVIPC */ |
42 | 62 | ||
43 | #if defined(CONFIG_SYSVIPC) && defined(CONFIG_IPC_NS) | 63 | #if defined(CONFIG_SYSVIPC) && defined(CONFIG_IPC_NS) |
44 | extern void free_ipc_ns(struct kref *kref); | 64 | extern void free_ipc_ns(struct kref *kref); |
diff --git a/include/linux/ipmi.h b/include/linux/ipmi.h index c5bd28b69aec..7ebdb4fb4e54 100644 --- a/include/linux/ipmi.h +++ b/include/linux/ipmi.h | |||
@@ -64,7 +64,7 @@ | |||
64 | * applications and another for userland applications. The | 64 | * applications and another for userland applications. The |
65 | * capabilities are basically the same for both interface, although | 65 | * capabilities are basically the same for both interface, although |
66 | * the interfaces are somewhat different. The stuff in the | 66 | * the interfaces are somewhat different. The stuff in the |
67 | * #ifdef KERNEL below is the in-kernel interface. The userland | 67 | * #ifdef __KERNEL__ below is the in-kernel interface. The userland |
68 | * interface is defined later in the file. */ | 68 | * interface is defined later in the file. */ |
69 | 69 | ||
70 | 70 | ||
@@ -75,8 +75,7 @@ | |||
75 | * work for sockets. | 75 | * work for sockets. |
76 | */ | 76 | */ |
77 | #define IPMI_MAX_ADDR_SIZE 32 | 77 | #define IPMI_MAX_ADDR_SIZE 32 |
78 | struct ipmi_addr | 78 | struct ipmi_addr { |
79 | { | ||
80 | /* Try to take these from the "Channel Medium Type" table | 79 | /* Try to take these from the "Channel Medium Type" table |
81 | in section 6.5 of the IPMI 1.5 manual. */ | 80 | in section 6.5 of the IPMI 1.5 manual. */ |
82 | int addr_type; | 81 | int addr_type; |
@@ -90,8 +89,7 @@ struct ipmi_addr | |||
90 | * 0), or IPMC_BMC_CHANNEL if communicating directly with the BMC. | 89 | * 0), or IPMC_BMC_CHANNEL if communicating directly with the BMC. |
91 | */ | 90 | */ |
92 | #define IPMI_SYSTEM_INTERFACE_ADDR_TYPE 0x0c | 91 | #define IPMI_SYSTEM_INTERFACE_ADDR_TYPE 0x0c |
93 | struct ipmi_system_interface_addr | 92 | struct ipmi_system_interface_addr { |
94 | { | ||
95 | int addr_type; | 93 | int addr_type; |
96 | short channel; | 94 | short channel; |
97 | unsigned char lun; | 95 | unsigned char lun; |
@@ -100,10 +98,9 @@ struct ipmi_system_interface_addr | |||
100 | /* An IPMB Address. */ | 98 | /* An IPMB Address. */ |
101 | #define IPMI_IPMB_ADDR_TYPE 0x01 | 99 | #define IPMI_IPMB_ADDR_TYPE 0x01 |
102 | /* Used for broadcast get device id as described in section 17.9 of the | 100 | /* Used for broadcast get device id as described in section 17.9 of the |
103 | IPMI 1.5 manual. */ | 101 | IPMI 1.5 manual. */ |
104 | #define IPMI_IPMB_BROADCAST_ADDR_TYPE 0x41 | 102 | #define IPMI_IPMB_BROADCAST_ADDR_TYPE 0x41 |
105 | struct ipmi_ipmb_addr | 103 | struct ipmi_ipmb_addr { |
106 | { | ||
107 | int addr_type; | 104 | int addr_type; |
108 | short channel; | 105 | short channel; |
109 | unsigned char slave_addr; | 106 | unsigned char slave_addr; |
@@ -128,8 +125,7 @@ struct ipmi_ipmb_addr | |||
128 | * message is a little weird, but this is required. | 125 | * message is a little weird, but this is required. |
129 | */ | 126 | */ |
130 | #define IPMI_LAN_ADDR_TYPE 0x04 | 127 | #define IPMI_LAN_ADDR_TYPE 0x04 |
131 | struct ipmi_lan_addr | 128 | struct ipmi_lan_addr { |
132 | { | ||
133 | int addr_type; | 129 | int addr_type; |
134 | short channel; | 130 | short channel; |
135 | unsigned char privilege; | 131 | unsigned char privilege; |
@@ -162,16 +158,14 @@ struct ipmi_lan_addr | |||
162 | * byte of data in the response (as the spec shows the messages laid | 158 | * byte of data in the response (as the spec shows the messages laid |
163 | * out). | 159 | * out). |
164 | */ | 160 | */ |
165 | struct ipmi_msg | 161 | struct ipmi_msg { |
166 | { | ||
167 | unsigned char netfn; | 162 | unsigned char netfn; |
168 | unsigned char cmd; | 163 | unsigned char cmd; |
169 | unsigned short data_len; | 164 | unsigned short data_len; |
170 | unsigned char __user *data; | 165 | unsigned char __user *data; |
171 | }; | 166 | }; |
172 | 167 | ||
173 | struct kernel_ipmi_msg | 168 | struct kernel_ipmi_msg { |
174 | { | ||
175 | unsigned char netfn; | 169 | unsigned char netfn; |
176 | unsigned char cmd; | 170 | unsigned char cmd; |
177 | unsigned short data_len; | 171 | unsigned short data_len; |
@@ -239,12 +233,11 @@ typedef struct ipmi_user *ipmi_user_t; | |||
239 | * used after the message is delivered, so the upper layer may use the | 233 | * used after the message is delivered, so the upper layer may use the |
240 | * link to build a linked list, if it likes. | 234 | * link to build a linked list, if it likes. |
241 | */ | 235 | */ |
242 | struct ipmi_recv_msg | 236 | struct ipmi_recv_msg { |
243 | { | ||
244 | struct list_head link; | 237 | struct list_head link; |
245 | 238 | ||
246 | /* The type of message as defined in the "Receive Types" | 239 | /* The type of message as defined in the "Receive Types" |
247 | defines above. */ | 240 | defines above. */ |
248 | int recv_type; | 241 | int recv_type; |
249 | 242 | ||
250 | ipmi_user_t user; | 243 | ipmi_user_t user; |
@@ -271,9 +264,8 @@ struct ipmi_recv_msg | |||
271 | /* Allocate and free the receive message. */ | 264 | /* Allocate and free the receive message. */ |
272 | void ipmi_free_recv_msg(struct ipmi_recv_msg *msg); | 265 | void ipmi_free_recv_msg(struct ipmi_recv_msg *msg); |
273 | 266 | ||
274 | struct ipmi_user_hndl | 267 | struct ipmi_user_hndl { |
275 | { | 268 | /* Routine type to call when a message needs to be routed to |
276 | /* Routine type to call when a message needs to be routed to | ||
277 | the upper layer. This will be called with some locks held, | 269 | the upper layer. This will be called with some locks held, |
278 | the only IPMI routines that can be called are ipmi_request | 270 | the only IPMI routines that can be called are ipmi_request |
279 | and the alloc/free operations. The handler_data is the | 271 | and the alloc/free operations. The handler_data is the |
@@ -368,9 +360,8 @@ int ipmi_request_supply_msgs(ipmi_user_t user, | |||
368 | * Poll the IPMI interface for the user. This causes the IPMI code to | 360 | * Poll the IPMI interface for the user. This causes the IPMI code to |
369 | * do an immediate check for information from the driver and handle | 361 | * do an immediate check for information from the driver and handle |
370 | * anything that is immediately pending. This will not block in any | 362 | * anything that is immediately pending. This will not block in any |
371 | * way. This is useful if you need to implement polling from the user | 363 | * way. This is useful if you need to spin waiting for something to |
372 | * for things like modifying the watchdog timeout when a panic occurs | 364 | * happen in the IPMI driver. |
373 | * or disabling the watchdog timer on a reboot. | ||
374 | */ | 365 | */ |
375 | void ipmi_poll_interface(ipmi_user_t user); | 366 | void ipmi_poll_interface(ipmi_user_t user); |
376 | 367 | ||
@@ -422,12 +413,6 @@ int ipmi_get_maintenance_mode(ipmi_user_t user); | |||
422 | int ipmi_set_maintenance_mode(ipmi_user_t user, int mode); | 413 | int ipmi_set_maintenance_mode(ipmi_user_t user, int mode); |
423 | 414 | ||
424 | /* | 415 | /* |
425 | * Allow run-to-completion mode to be set for the interface of | ||
426 | * a specific user. | ||
427 | */ | ||
428 | void ipmi_user_set_run_to_completion(ipmi_user_t user, int val); | ||
429 | |||
430 | /* | ||
431 | * When the user is created, it will not receive IPMI events by | 416 | * When the user is created, it will not receive IPMI events by |
432 | * default. The user must set this to TRUE to get incoming events. | 417 | * default. The user must set this to TRUE to get incoming events. |
433 | * The first user that sets this to TRUE will receive all events that | 418 | * The first user that sets this to TRUE will receive all events that |
@@ -440,8 +425,7 @@ int ipmi_set_gets_events(ipmi_user_t user, int val); | |||
440 | * every existing interface when a new watcher is registered with | 425 | * every existing interface when a new watcher is registered with |
441 | * ipmi_smi_watcher_register(). | 426 | * ipmi_smi_watcher_register(). |
442 | */ | 427 | */ |
443 | struct ipmi_smi_watcher | 428 | struct ipmi_smi_watcher { |
444 | { | ||
445 | struct list_head link; | 429 | struct list_head link; |
446 | 430 | ||
447 | /* You must set the owner to the current module, if you are in | 431 | /* You must set the owner to the current module, if you are in |
@@ -512,8 +496,7 @@ int ipmi_validate_addr(struct ipmi_addr *addr, int len); | |||
512 | 496 | ||
513 | 497 | ||
514 | /* Messages sent to the interface are this format. */ | 498 | /* Messages sent to the interface are this format. */ |
515 | struct ipmi_req | 499 | struct ipmi_req { |
516 | { | ||
517 | unsigned char __user *addr; /* Address to send the message to. */ | 500 | unsigned char __user *addr; /* Address to send the message to. */ |
518 | unsigned int addr_len; | 501 | unsigned int addr_len; |
519 | 502 | ||
@@ -538,12 +521,11 @@ struct ipmi_req | |||
538 | 521 | ||
539 | /* Messages sent to the interface with timing parameters are this | 522 | /* Messages sent to the interface with timing parameters are this |
540 | format. */ | 523 | format. */ |
541 | struct ipmi_req_settime | 524 | struct ipmi_req_settime { |
542 | { | ||
543 | struct ipmi_req req; | 525 | struct ipmi_req req; |
544 | 526 | ||
545 | /* See ipmi_request_settime() above for details on these | 527 | /* See ipmi_request_settime() above for details on these |
546 | values. */ | 528 | values. */ |
547 | int retries; | 529 | int retries; |
548 | unsigned int retry_time_ms; | 530 | unsigned int retry_time_ms; |
549 | }; | 531 | }; |
@@ -560,8 +542,7 @@ struct ipmi_req_settime | |||
560 | struct ipmi_req_settime) | 542 | struct ipmi_req_settime) |
561 | 543 | ||
562 | /* Messages received from the interface are this format. */ | 544 | /* Messages received from the interface are this format. */ |
563 | struct ipmi_recv | 545 | struct ipmi_recv { |
564 | { | ||
565 | int recv_type; /* Is this a command, response or an | 546 | int recv_type; /* Is this a command, response or an |
566 | asyncronous event. */ | 547 | asyncronous event. */ |
567 | 548 | ||
@@ -607,13 +588,12 @@ struct ipmi_recv | |||
607 | struct ipmi_recv) | 588 | struct ipmi_recv) |
608 | 589 | ||
609 | /* Register to get commands from other entities on this interface. */ | 590 | /* Register to get commands from other entities on this interface. */ |
610 | struct ipmi_cmdspec | 591 | struct ipmi_cmdspec { |
611 | { | ||
612 | unsigned char netfn; | 592 | unsigned char netfn; |
613 | unsigned char cmd; | 593 | unsigned char cmd; |
614 | }; | 594 | }; |
615 | 595 | ||
616 | /* | 596 | /* |
617 | * Register to receive a specific command. error values: | 597 | * Register to receive a specific command. error values: |
618 | * - EFAULT - an address supplied was invalid. | 598 | * - EFAULT - an address supplied was invalid. |
619 | * - EBUSY - The netfn/cmd supplied was already in use. | 599 | * - EBUSY - The netfn/cmd supplied was already in use. |
@@ -636,8 +616,7 @@ struct ipmi_cmdspec | |||
636 | * else. The chans field is a bitmask, (1 << channel) for each channel. | 616 | * else. The chans field is a bitmask, (1 << channel) for each channel. |
637 | * It may be IPMI_CHAN_ALL for all channels. | 617 | * It may be IPMI_CHAN_ALL for all channels. |
638 | */ | 618 | */ |
639 | struct ipmi_cmdspec_chans | 619 | struct ipmi_cmdspec_chans { |
640 | { | ||
641 | unsigned int netfn; | 620 | unsigned int netfn; |
642 | unsigned int cmd; | 621 | unsigned int cmd; |
643 | unsigned int chans; | 622 | unsigned int chans; |
@@ -659,7 +638,7 @@ struct ipmi_cmdspec_chans | |||
659 | #define IPMICTL_UNREGISTER_FOR_CMD_CHANS _IOR(IPMI_IOC_MAGIC, 29, \ | 638 | #define IPMICTL_UNREGISTER_FOR_CMD_CHANS _IOR(IPMI_IOC_MAGIC, 29, \ |
660 | struct ipmi_cmdspec_chans) | 639 | struct ipmi_cmdspec_chans) |
661 | 640 | ||
662 | /* | 641 | /* |
663 | * Set whether this interface receives events. Note that the first | 642 | * Set whether this interface receives events. Note that the first |
664 | * user registered for events will get all pending events for the | 643 | * user registered for events will get all pending events for the |
665 | * interface. error values: | 644 | * interface. error values: |
@@ -675,15 +654,18 @@ struct ipmi_cmdspec_chans | |||
675 | * things it takes to determine your address (if not the BMC) and set | 654 | * things it takes to determine your address (if not the BMC) and set |
676 | * it for everyone else. You should probably leave the LUN alone. | 655 | * it for everyone else. You should probably leave the LUN alone. |
677 | */ | 656 | */ |
678 | struct ipmi_channel_lun_address_set | 657 | struct ipmi_channel_lun_address_set { |
679 | { | ||
680 | unsigned short channel; | 658 | unsigned short channel; |
681 | unsigned char value; | 659 | unsigned char value; |
682 | }; | 660 | }; |
683 | #define IPMICTL_SET_MY_CHANNEL_ADDRESS_CMD _IOR(IPMI_IOC_MAGIC, 24, struct ipmi_channel_lun_address_set) | 661 | #define IPMICTL_SET_MY_CHANNEL_ADDRESS_CMD \ |
684 | #define IPMICTL_GET_MY_CHANNEL_ADDRESS_CMD _IOR(IPMI_IOC_MAGIC, 25, struct ipmi_channel_lun_address_set) | 662 | _IOR(IPMI_IOC_MAGIC, 24, struct ipmi_channel_lun_address_set) |
685 | #define IPMICTL_SET_MY_CHANNEL_LUN_CMD _IOR(IPMI_IOC_MAGIC, 26, struct ipmi_channel_lun_address_set) | 663 | #define IPMICTL_GET_MY_CHANNEL_ADDRESS_CMD \ |
686 | #define IPMICTL_GET_MY_CHANNEL_LUN_CMD _IOR(IPMI_IOC_MAGIC, 27, struct ipmi_channel_lun_address_set) | 664 | _IOR(IPMI_IOC_MAGIC, 25, struct ipmi_channel_lun_address_set) |
665 | #define IPMICTL_SET_MY_CHANNEL_LUN_CMD \ | ||
666 | _IOR(IPMI_IOC_MAGIC, 26, struct ipmi_channel_lun_address_set) | ||
667 | #define IPMICTL_GET_MY_CHANNEL_LUN_CMD \ | ||
668 | _IOR(IPMI_IOC_MAGIC, 27, struct ipmi_channel_lun_address_set) | ||
687 | /* Legacy interfaces, these only set IPMB 0. */ | 669 | /* Legacy interfaces, these only set IPMB 0. */ |
688 | #define IPMICTL_SET_MY_ADDRESS_CMD _IOR(IPMI_IOC_MAGIC, 17, unsigned int) | 670 | #define IPMICTL_SET_MY_ADDRESS_CMD _IOR(IPMI_IOC_MAGIC, 17, unsigned int) |
689 | #define IPMICTL_GET_MY_ADDRESS_CMD _IOR(IPMI_IOC_MAGIC, 18, unsigned int) | 671 | #define IPMICTL_GET_MY_ADDRESS_CMD _IOR(IPMI_IOC_MAGIC, 18, unsigned int) |
@@ -694,8 +676,7 @@ struct ipmi_channel_lun_address_set | |||
694 | * Get/set the default timing values for an interface. You shouldn't | 676 | * Get/set the default timing values for an interface. You shouldn't |
695 | * generally mess with these. | 677 | * generally mess with these. |
696 | */ | 678 | */ |
697 | struct ipmi_timing_parms | 679 | struct ipmi_timing_parms { |
698 | { | ||
699 | int retries; | 680 | int retries; |
700 | unsigned int retry_time_ms; | 681 | unsigned int retry_time_ms; |
701 | }; | 682 | }; |
diff --git a/include/linux/ipmi_smi.h b/include/linux/ipmi_smi.h index 6e8cec503380..62b73668b602 100644 --- a/include/linux/ipmi_smi.h +++ b/include/linux/ipmi_smi.h | |||
@@ -60,8 +60,7 @@ typedef struct ipmi_smi *ipmi_smi_t; | |||
60 | * asynchronous data and messages and request them from the | 60 | * asynchronous data and messages and request them from the |
61 | * interface. | 61 | * interface. |
62 | */ | 62 | */ |
63 | struct ipmi_smi_msg | 63 | struct ipmi_smi_msg { |
64 | { | ||
65 | struct list_head link; | 64 | struct list_head link; |
66 | 65 | ||
67 | long msgid; | 66 | long msgid; |
@@ -74,12 +73,11 @@ struct ipmi_smi_msg | |||
74 | unsigned char rsp[IPMI_MAX_MSG_LENGTH]; | 73 | unsigned char rsp[IPMI_MAX_MSG_LENGTH]; |
75 | 74 | ||
76 | /* Will be called when the system is done with the message | 75 | /* Will be called when the system is done with the message |
77 | (presumably to free it). */ | 76 | (presumably to free it). */ |
78 | void (*done)(struct ipmi_smi_msg *msg); | 77 | void (*done)(struct ipmi_smi_msg *msg); |
79 | }; | 78 | }; |
80 | 79 | ||
81 | struct ipmi_smi_handlers | 80 | struct ipmi_smi_handlers { |
82 | { | ||
83 | struct module *owner; | 81 | struct module *owner; |
84 | 82 | ||
85 | /* The low-level interface cannot start sending messages to | 83 | /* The low-level interface cannot start sending messages to |
@@ -231,7 +229,7 @@ static inline void ipmi_free_smi_msg(struct ipmi_smi_msg *msg) | |||
231 | directory for this interface. Note that the entry will | 229 | directory for this interface. Note that the entry will |
232 | automatically be dstroyed when the interface is destroyed. */ | 230 | automatically be dstroyed when the interface is destroyed. */ |
233 | int ipmi_smi_add_proc_entry(ipmi_smi_t smi, char *name, | 231 | int ipmi_smi_add_proc_entry(ipmi_smi_t smi, char *name, |
234 | read_proc_t *read_proc, write_proc_t *write_proc, | 232 | read_proc_t *read_proc, |
235 | void *data, struct module *owner); | 233 | void *data, struct module *owner); |
236 | 234 | ||
237 | #endif /* __LINUX_IPMI_SMI_H */ | 235 | #endif /* __LINUX_IPMI_SMI_H */ |
diff --git a/include/linux/kbuild.h b/include/linux/kbuild.h new file mode 100644 index 000000000000..22a72198c14b --- /dev/null +++ b/include/linux/kbuild.h | |||
@@ -0,0 +1,15 @@ | |||
1 | #ifndef __LINUX_KBUILD_H | ||
2 | #define __LINUX_KBUILD_H | ||
3 | |||
4 | #define DEFINE(sym, val) \ | ||
5 | asm volatile("\n->" #sym " %0 " #val : : "i" (val)) | ||
6 | |||
7 | #define BLANK() asm volatile("\n->" : : ) | ||
8 | |||
9 | #define OFFSET(sym, str, mem) \ | ||
10 | DEFINE(sym, offsetof(struct str, mem)) | ||
11 | |||
12 | #define COMMENT(x) \ | ||
13 | asm volatile("\n->#" x) | ||
14 | |||
15 | #endif | ||
diff --git a/include/linux/kernel.h b/include/linux/kernel.h index cd6d02cf854d..53839ba265ec 100644 --- a/include/linux/kernel.h +++ b/include/linux/kernel.h | |||
@@ -20,6 +20,9 @@ | |||
20 | extern const char linux_banner[]; | 20 | extern const char linux_banner[]; |
21 | extern const char linux_proc_banner[]; | 21 | extern const char linux_proc_banner[]; |
22 | 22 | ||
23 | #define USHORT_MAX ((u16)(~0U)) | ||
24 | #define SHORT_MAX ((s16)(USHORT_MAX>>1)) | ||
25 | #define SHORT_MIN (-SHORT_MAX - 1) | ||
23 | #define INT_MAX ((int)(~0U>>1)) | 26 | #define INT_MAX ((int)(~0U>>1)) |
24 | #define INT_MIN (-INT_MAX - 1) | 27 | #define INT_MIN (-INT_MAX - 1) |
25 | #define UINT_MAX (~0U) | 28 | #define UINT_MAX (~0U) |
@@ -188,6 +191,7 @@ extern int log_buf_copy(char *dest, int idx, int len); | |||
188 | extern int printk_ratelimit_jiffies; | 191 | extern int printk_ratelimit_jiffies; |
189 | extern int printk_ratelimit_burst; | 192 | extern int printk_ratelimit_burst; |
190 | extern int printk_ratelimit(void); | 193 | extern int printk_ratelimit(void); |
194 | extern int __ratelimit(int ratelimit_jiffies, int ratelimit_burst); | ||
191 | extern int __printk_ratelimit(int ratelimit_jiffies, int ratelimit_burst); | 195 | extern int __printk_ratelimit(int ratelimit_jiffies, int ratelimit_burst); |
192 | extern bool printk_timed_ratelimit(unsigned long *caller_jiffies, | 196 | extern bool printk_timed_ratelimit(unsigned long *caller_jiffies, |
193 | unsigned int interval_msec); | 197 | unsigned int interval_msec); |
@@ -255,6 +259,7 @@ extern enum system_states { | |||
255 | #define TAINT_USER (1<<6) | 259 | #define TAINT_USER (1<<6) |
256 | #define TAINT_DIE (1<<7) | 260 | #define TAINT_DIE (1<<7) |
257 | #define TAINT_OVERRIDDEN_ACPI_TABLE (1<<8) | 261 | #define TAINT_OVERRIDDEN_ACPI_TABLE (1<<8) |
262 | #define TAINT_WARN (1<<9) | ||
258 | 263 | ||
259 | extern void dump_stack(void) __cold; | 264 | extern void dump_stack(void) __cold; |
260 | 265 | ||
diff --git a/include/linux/key.h b/include/linux/key.h index a70b8a8f2005..c45c962d1cc5 100644 --- a/include/linux/key.h +++ b/include/linux/key.h | |||
@@ -19,6 +19,7 @@ | |||
19 | #include <linux/list.h> | 19 | #include <linux/list.h> |
20 | #include <linux/rbtree.h> | 20 | #include <linux/rbtree.h> |
21 | #include <linux/rcupdate.h> | 21 | #include <linux/rcupdate.h> |
22 | #include <linux/sysctl.h> | ||
22 | #include <asm/atomic.h> | 23 | #include <asm/atomic.h> |
23 | 24 | ||
24 | #ifdef __KERNEL__ | 25 | #ifdef __KERNEL__ |
@@ -67,6 +68,8 @@ struct key; | |||
67 | #define KEY_OTH_SETATTR 0x00000020 | 68 | #define KEY_OTH_SETATTR 0x00000020 |
68 | #define KEY_OTH_ALL 0x0000003f | 69 | #define KEY_OTH_ALL 0x0000003f |
69 | 70 | ||
71 | #define KEY_PERM_UNDEF 0xffffffff | ||
72 | |||
70 | struct seq_file; | 73 | struct seq_file; |
71 | struct user_struct; | 74 | struct user_struct; |
72 | struct signal_struct; | 75 | struct signal_struct; |
@@ -208,16 +211,19 @@ extern struct key *request_key(struct key_type *type, | |||
208 | 211 | ||
209 | extern struct key *request_key_with_auxdata(struct key_type *type, | 212 | extern struct key *request_key_with_auxdata(struct key_type *type, |
210 | const char *description, | 213 | const char *description, |
211 | const char *callout_info, | 214 | const void *callout_info, |
215 | size_t callout_len, | ||
212 | void *aux); | 216 | void *aux); |
213 | 217 | ||
214 | extern struct key *request_key_async(struct key_type *type, | 218 | extern struct key *request_key_async(struct key_type *type, |
215 | const char *description, | 219 | const char *description, |
216 | const char *callout_info); | 220 | const void *callout_info, |
221 | size_t callout_len); | ||
217 | 222 | ||
218 | extern struct key *request_key_async_with_auxdata(struct key_type *type, | 223 | extern struct key *request_key_async_with_auxdata(struct key_type *type, |
219 | const char *description, | 224 | const char *description, |
220 | const char *callout_info, | 225 | const void *callout_info, |
226 | size_t callout_len, | ||
221 | void *aux); | 227 | void *aux); |
222 | 228 | ||
223 | extern int wait_for_key_construction(struct key *key, bool intr); | 229 | extern int wait_for_key_construction(struct key *key, bool intr); |
@@ -229,6 +235,7 @@ extern key_ref_t key_create_or_update(key_ref_t keyring, | |||
229 | const char *description, | 235 | const char *description, |
230 | const void *payload, | 236 | const void *payload, |
231 | size_t plen, | 237 | size_t plen, |
238 | key_perm_t perm, | ||
232 | unsigned long flags); | 239 | unsigned long flags); |
233 | 240 | ||
234 | extern int key_update(key_ref_t key, | 241 | extern int key_update(key_ref_t key, |
@@ -257,14 +264,18 @@ extern int keyring_add_key(struct key *keyring, | |||
257 | 264 | ||
258 | extern struct key *key_lookup(key_serial_t id); | 265 | extern struct key *key_lookup(key_serial_t id); |
259 | 266 | ||
260 | #define key_serial(key) ((key) ? (key)->serial : 0) | 267 | static inline key_serial_t key_serial(struct key *key) |
268 | { | ||
269 | return key ? key->serial : 0; | ||
270 | } | ||
271 | |||
272 | #ifdef CONFIG_SYSCTL | ||
273 | extern ctl_table key_sysctls[]; | ||
274 | #endif | ||
261 | 275 | ||
262 | /* | 276 | /* |
263 | * the userspace interface | 277 | * the userspace interface |
264 | */ | 278 | */ |
265 | extern struct key root_user_keyring, root_session_keyring; | ||
266 | extern int alloc_uid_keyring(struct user_struct *user, | ||
267 | struct task_struct *ctx); | ||
268 | extern void switch_uid_keyring(struct user_struct *new_user); | 279 | extern void switch_uid_keyring(struct user_struct *new_user); |
269 | extern int copy_keys(unsigned long clone_flags, struct task_struct *tsk); | 280 | extern int copy_keys(unsigned long clone_flags, struct task_struct *tsk); |
270 | extern int copy_thread_group_keys(struct task_struct *tsk); | 281 | extern int copy_thread_group_keys(struct task_struct *tsk); |
@@ -293,7 +304,6 @@ extern void key_init(void); | |||
293 | #define make_key_ref(k, p) ({ NULL; }) | 304 | #define make_key_ref(k, p) ({ NULL; }) |
294 | #define key_ref_to_ptr(k) ({ NULL; }) | 305 | #define key_ref_to_ptr(k) ({ NULL; }) |
295 | #define is_key_possessed(k) 0 | 306 | #define is_key_possessed(k) 0 |
296 | #define alloc_uid_keyring(u,c) 0 | ||
297 | #define switch_uid_keyring(u) do { } while(0) | 307 | #define switch_uid_keyring(u) do { } while(0) |
298 | #define __install_session_keyring(t, k) ({ NULL; }) | 308 | #define __install_session_keyring(t, k) ({ NULL; }) |
299 | #define copy_keys(f,t) 0 | 309 | #define copy_keys(f,t) 0 |
@@ -306,10 +316,6 @@ extern void key_init(void); | |||
306 | #define key_fsgid_changed(t) do { } while(0) | 316 | #define key_fsgid_changed(t) do { } while(0) |
307 | #define key_init() do { } while(0) | 317 | #define key_init() do { } while(0) |
308 | 318 | ||
309 | /* Initial keyrings */ | ||
310 | extern struct key root_user_keyring; | ||
311 | extern struct key root_session_keyring; | ||
312 | |||
313 | #endif /* CONFIG_KEYS */ | 319 | #endif /* CONFIG_KEYS */ |
314 | #endif /* __KERNEL__ */ | 320 | #endif /* __KERNEL__ */ |
315 | #endif /* _LINUX_KEY_H */ | 321 | #endif /* _LINUX_KEY_H */ |
diff --git a/include/linux/keyctl.h b/include/linux/keyctl.h index 3365945640c9..656ee6b77a4a 100644 --- a/include/linux/keyctl.h +++ b/include/linux/keyctl.h | |||
@@ -49,5 +49,6 @@ | |||
49 | #define KEYCTL_SET_REQKEY_KEYRING 14 /* set default request-key keyring */ | 49 | #define KEYCTL_SET_REQKEY_KEYRING 14 /* set default request-key keyring */ |
50 | #define KEYCTL_SET_TIMEOUT 15 /* set key timeout */ | 50 | #define KEYCTL_SET_TIMEOUT 15 /* set key timeout */ |
51 | #define KEYCTL_ASSUME_AUTHORITY 16 /* assume request_key() authorisation */ | 51 | #define KEYCTL_ASSUME_AUTHORITY 16 /* assume request_key() authorisation */ |
52 | #define KEYCTL_GET_SECURITY 17 /* get key security label */ | ||
52 | 53 | ||
53 | #endif /* _LINUX_KEYCTL_H */ | 54 | #endif /* _LINUX_KEYCTL_H */ |
diff --git a/include/linux/list.h b/include/linux/list.h index b4a939b6b625..7627508f1b74 100644 --- a/include/linux/list.h +++ b/include/linux/list.h | |||
@@ -328,7 +328,7 @@ static inline int list_is_singular(const struct list_head *head) | |||
328 | return !list_empty(head) && (head->next == head->prev); | 328 | return !list_empty(head) && (head->next == head->prev); |
329 | } | 329 | } |
330 | 330 | ||
331 | static inline void __list_splice(struct list_head *list, | 331 | static inline void __list_splice(const struct list_head *list, |
332 | struct list_head *head) | 332 | struct list_head *head) |
333 | { | 333 | { |
334 | struct list_head *first = list->next; | 334 | struct list_head *first = list->next; |
@@ -347,7 +347,8 @@ static inline void __list_splice(struct list_head *list, | |||
347 | * @list: the new list to add. | 347 | * @list: the new list to add. |
348 | * @head: the place to add it in the first list. | 348 | * @head: the place to add it in the first list. |
349 | */ | 349 | */ |
350 | static inline void list_splice(struct list_head *list, struct list_head *head) | 350 | static inline void list_splice(const struct list_head *list, |
351 | struct list_head *head) | ||
351 | { | 352 | { |
352 | if (!list_empty(list)) | 353 | if (!list_empty(list)) |
353 | __list_splice(list, head); | 354 | __list_splice(list, head); |
diff --git a/include/linux/mca-legacy.h b/include/linux/mca-legacy.h index f2bb770e530a..7a3aea845902 100644 --- a/include/linux/mca-legacy.h +++ b/include/linux/mca-legacy.h | |||
@@ -34,7 +34,6 @@ | |||
34 | extern int mca_find_adapter(int id, int start); | 34 | extern int mca_find_adapter(int id, int start); |
35 | extern int mca_find_unused_adapter(int id, int start); | 35 | extern int mca_find_unused_adapter(int id, int start); |
36 | 36 | ||
37 | extern int mca_is_adapter_used(int slot); | ||
38 | extern int mca_mark_as_used(int slot); | 37 | extern int mca_mark_as_used(int slot); |
39 | extern void mca_mark_as_unused(int slot); | 38 | extern void mca_mark_as_unused(int slot); |
40 | 39 | ||
diff --git a/include/linux/memcontrol.h b/include/linux/memcontrol.h index 8b1c4295848b..e6608776bc96 100644 --- a/include/linux/memcontrol.h +++ b/include/linux/memcontrol.h | |||
@@ -27,9 +27,6 @@ struct mm_struct; | |||
27 | 27 | ||
28 | #ifdef CONFIG_CGROUP_MEM_RES_CTLR | 28 | #ifdef CONFIG_CGROUP_MEM_RES_CTLR |
29 | 29 | ||
30 | extern void mm_init_cgroup(struct mm_struct *mm, struct task_struct *p); | ||
31 | extern void mm_free_cgroup(struct mm_struct *mm); | ||
32 | |||
33 | #define page_reset_bad_cgroup(page) ((page)->page_cgroup = 0) | 30 | #define page_reset_bad_cgroup(page) ((page)->page_cgroup = 0) |
34 | 31 | ||
35 | extern struct page_cgroup *page_get_page_cgroup(struct page *page); | 32 | extern struct page_cgroup *page_get_page_cgroup(struct page *page); |
@@ -48,8 +45,10 @@ extern unsigned long mem_cgroup_isolate_pages(unsigned long nr_to_scan, | |||
48 | extern void mem_cgroup_out_of_memory(struct mem_cgroup *mem, gfp_t gfp_mask); | 45 | extern void mem_cgroup_out_of_memory(struct mem_cgroup *mem, gfp_t gfp_mask); |
49 | int task_in_mem_cgroup(struct task_struct *task, const struct mem_cgroup *mem); | 46 | int task_in_mem_cgroup(struct task_struct *task, const struct mem_cgroup *mem); |
50 | 47 | ||
48 | extern struct mem_cgroup *mem_cgroup_from_task(struct task_struct *p); | ||
49 | |||
51 | #define mm_match_cgroup(mm, cgroup) \ | 50 | #define mm_match_cgroup(mm, cgroup) \ |
52 | ((cgroup) == rcu_dereference((mm)->mem_cgroup)) | 51 | ((cgroup) == mem_cgroup_from_task((mm)->owner)) |
53 | 52 | ||
54 | extern int mem_cgroup_prepare_migration(struct page *page); | 53 | extern int mem_cgroup_prepare_migration(struct page *page); |
55 | extern void mem_cgroup_end_migration(struct page *page); | 54 | extern void mem_cgroup_end_migration(struct page *page); |
@@ -73,15 +72,6 @@ extern long mem_cgroup_calc_reclaim_inactive(struct mem_cgroup *mem, | |||
73 | struct zone *zone, int priority); | 72 | struct zone *zone, int priority); |
74 | 73 | ||
75 | #else /* CONFIG_CGROUP_MEM_RES_CTLR */ | 74 | #else /* CONFIG_CGROUP_MEM_RES_CTLR */ |
76 | static inline void mm_init_cgroup(struct mm_struct *mm, | ||
77 | struct task_struct *p) | ||
78 | { | ||
79 | } | ||
80 | |||
81 | static inline void mm_free_cgroup(struct mm_struct *mm) | ||
82 | { | ||
83 | } | ||
84 | |||
85 | static inline void page_reset_bad_cgroup(struct page *page) | 75 | static inline void page_reset_bad_cgroup(struct page *page) |
86 | { | 76 | { |
87 | } | 77 | } |
diff --git a/include/linux/memory.h b/include/linux/memory.h index f80e0e331cb7..2f5f8a5ef2a0 100644 --- a/include/linux/memory.h +++ b/include/linux/memory.h | |||
@@ -53,6 +53,13 @@ struct memory_notify { | |||
53 | struct notifier_block; | 53 | struct notifier_block; |
54 | struct mem_section; | 54 | struct mem_section; |
55 | 55 | ||
56 | /* | ||
57 | * Priorities for the hotplug memory callback routines (stored in decreasing | ||
58 | * order in the callback chain) | ||
59 | */ | ||
60 | #define SLAB_CALLBACK_PRI 1 | ||
61 | #define IPC_CALLBACK_PRI 10 | ||
62 | |||
56 | #ifndef CONFIG_MEMORY_HOTPLUG_SPARSE | 63 | #ifndef CONFIG_MEMORY_HOTPLUG_SPARSE |
57 | static inline int memory_dev_init(void) | 64 | static inline int memory_dev_init(void) |
58 | { | 65 | { |
diff --git a/include/linux/mm.h b/include/linux/mm.h index 8b7f4a5d4f6a..c31a9cd2a30e 100644 --- a/include/linux/mm.h +++ b/include/linux/mm.h | |||
@@ -1066,6 +1066,19 @@ extern void unlink_file_vma(struct vm_area_struct *); | |||
1066 | extern struct vm_area_struct *copy_vma(struct vm_area_struct **, | 1066 | extern struct vm_area_struct *copy_vma(struct vm_area_struct **, |
1067 | unsigned long addr, unsigned long len, pgoff_t pgoff); | 1067 | unsigned long addr, unsigned long len, pgoff_t pgoff); |
1068 | extern void exit_mmap(struct mm_struct *); | 1068 | extern void exit_mmap(struct mm_struct *); |
1069 | |||
1070 | #ifdef CONFIG_PROC_FS | ||
1071 | /* From fs/proc/base.c. callers must _not_ hold the mm's exe_file_lock */ | ||
1072 | extern void added_exe_file_vma(struct mm_struct *mm); | ||
1073 | extern void removed_exe_file_vma(struct mm_struct *mm); | ||
1074 | #else | ||
1075 | static inline void added_exe_file_vma(struct mm_struct *mm) | ||
1076 | {} | ||
1077 | |||
1078 | static inline void removed_exe_file_vma(struct mm_struct *mm) | ||
1079 | {} | ||
1080 | #endif /* CONFIG_PROC_FS */ | ||
1081 | |||
1069 | extern int may_expand_vm(struct mm_struct *mm, unsigned long npages); | 1082 | extern int may_expand_vm(struct mm_struct *mm, unsigned long npages); |
1070 | extern int install_special_mapping(struct mm_struct *mm, | 1083 | extern int install_special_mapping(struct mm_struct *mm, |
1071 | unsigned long addr, unsigned long len, | 1084 | unsigned long addr, unsigned long len, |
@@ -1230,8 +1243,6 @@ int drop_caches_sysctl_handler(struct ctl_table *, int, struct file *, | |||
1230 | void __user *, size_t *, loff_t *); | 1243 | void __user *, size_t *, loff_t *); |
1231 | unsigned long shrink_slab(unsigned long scanned, gfp_t gfp_mask, | 1244 | unsigned long shrink_slab(unsigned long scanned, gfp_t gfp_mask, |
1232 | unsigned long lru_pages); | 1245 | unsigned long lru_pages); |
1233 | void drop_pagecache(void); | ||
1234 | void drop_slab(void); | ||
1235 | 1246 | ||
1236 | #ifndef CONFIG_MMU | 1247 | #ifndef CONFIG_MMU |
1237 | #define randomize_va_space 0 | 1248 | #define randomize_va_space 0 |
diff --git a/include/linux/mm_types.h b/include/linux/mm_types.h index e2bae8dde35a..eb7c16cc9559 100644 --- a/include/linux/mm_types.h +++ b/include/linux/mm_types.h | |||
@@ -225,8 +225,15 @@ struct mm_struct { | |||
225 | /* aio bits */ | 225 | /* aio bits */ |
226 | rwlock_t ioctx_list_lock; /* aio lock */ | 226 | rwlock_t ioctx_list_lock; /* aio lock */ |
227 | struct kioctx *ioctx_list; | 227 | struct kioctx *ioctx_list; |
228 | #ifdef CONFIG_CGROUP_MEM_RES_CTLR | 228 | #ifdef CONFIG_MM_OWNER |
229 | struct mem_cgroup *mem_cgroup; | 229 | struct task_struct *owner; /* The thread group leader that */ |
230 | /* owns the mm_struct. */ | ||
231 | #endif | ||
232 | |||
233 | #ifdef CONFIG_PROC_FS | ||
234 | /* store ref to file /proc/<pid>/exe symlink points to */ | ||
235 | struct file *exe_file; | ||
236 | unsigned long num_exe_file_vmas; | ||
230 | #endif | 237 | #endif |
231 | }; | 238 | }; |
232 | 239 | ||
diff --git a/include/linux/msg.h b/include/linux/msg.h index 10a3d5a1abff..6f3b8e79a991 100644 --- a/include/linux/msg.h +++ b/include/linux/msg.h | |||
@@ -49,16 +49,26 @@ struct msginfo { | |||
49 | unsigned short msgseg; | 49 | unsigned short msgseg; |
50 | }; | 50 | }; |
51 | 51 | ||
52 | /* | ||
53 | * Scaling factor to compute msgmni: | ||
54 | * the memory dedicated to msg queues (msgmni * msgmnb) should occupy | ||
55 | * at most 1/MSG_MEM_SCALE of the lowmem (see the formula in ipc/msg.c): | ||
56 | * up to 8MB : msgmni = 16 (MSGMNI) | ||
57 | * 4 GB : msgmni = 8K | ||
58 | * more than 16 GB : msgmni = 32K (IPCMNI) | ||
59 | */ | ||
60 | #define MSG_MEM_SCALE 32 | ||
61 | |||
52 | #define MSGMNI 16 /* <= IPCMNI */ /* max # of msg queue identifiers */ | 62 | #define MSGMNI 16 /* <= IPCMNI */ /* max # of msg queue identifiers */ |
53 | #define MSGMAX 8192 /* <= INT_MAX */ /* max size of message (bytes) */ | 63 | #define MSGMAX 8192 /* <= INT_MAX */ /* max size of message (bytes) */ |
54 | #define MSGMNB 16384 /* <= INT_MAX */ /* default max size of a message queue */ | 64 | #define MSGMNB 16384 /* <= INT_MAX */ /* default max size of a message queue */ |
55 | 65 | ||
56 | /* unused */ | 66 | /* unused */ |
57 | #define MSGPOOL (MSGMNI*MSGMNB/1024) /* size in kilobytes of message pool */ | 67 | #define MSGPOOL (MSGMNI * MSGMNB) /* size in bytes of message pool */ |
58 | #define MSGTQL MSGMNB /* number of system message headers */ | 68 | #define MSGTQL MSGMNB /* number of system message headers */ |
59 | #define MSGMAP MSGMNB /* number of entries in message map */ | 69 | #define MSGMAP MSGMNB /* number of entries in message map */ |
60 | #define MSGSSZ 16 /* message segment size */ | 70 | #define MSGSSZ 16 /* message segment size */ |
61 | #define __MSGSEG ((MSGPOOL*1024)/ MSGSSZ) /* max no. of segments */ | 71 | #define __MSGSEG (MSGPOOL / MSGSSZ) /* max no. of segments */ |
62 | #define MSGSEG (__MSGSEG <= 0xffff ? __MSGSEG : 0xffff) | 72 | #define MSGSEG (__MSGSEG <= 0xffff ? __MSGSEG : 0xffff) |
63 | 73 | ||
64 | #ifdef __KERNEL__ | 74 | #ifdef __KERNEL__ |
diff --git a/include/linux/nbd.h b/include/linux/nbd.h index 986572081e19..155719dab813 100644 --- a/include/linux/nbd.h +++ b/include/linux/nbd.h | |||
@@ -56,9 +56,11 @@ struct nbd_device { | |||
56 | int magic; | 56 | int magic; |
57 | 57 | ||
58 | spinlock_t queue_lock; | 58 | spinlock_t queue_lock; |
59 | struct list_head queue_head;/* Requests are added here... */ | 59 | struct list_head queue_head; /* Requests waiting result */ |
60 | struct request *active_req; | 60 | struct request *active_req; |
61 | wait_queue_head_t active_wq; | 61 | wait_queue_head_t active_wq; |
62 | struct list_head waiting_queue; /* Requests to be sent */ | ||
63 | wait_queue_head_t waiting_wq; | ||
62 | 64 | ||
63 | struct mutex tx_lock; | 65 | struct mutex tx_lock; |
64 | struct gendisk *disk; | 66 | struct gendisk *disk; |
@@ -86,11 +88,7 @@ struct nbd_request { | |||
86 | char handle[8]; | 88 | char handle[8]; |
87 | __be64 from; | 89 | __be64 from; |
88 | __be32 len; | 90 | __be32 len; |
89 | } | 91 | } __attribute__ ((packed)); |
90 | #ifdef __GNUC__ | ||
91 | __attribute__ ((packed)) | ||
92 | #endif | ||
93 | ; | ||
94 | 92 | ||
95 | /* | 93 | /* |
96 | * This is the reply packet that nbd-server sends back to the client after | 94 | * This is the reply packet that nbd-server sends back to the client after |
diff --git a/include/linux/notifier.h b/include/linux/notifier.h index 20dfed590183..0ff6224d172a 100644 --- a/include/linux/notifier.h +++ b/include/linux/notifier.h | |||
@@ -121,6 +121,10 @@ extern int raw_notifier_chain_register(struct raw_notifier_head *nh, | |||
121 | extern int srcu_notifier_chain_register(struct srcu_notifier_head *nh, | 121 | extern int srcu_notifier_chain_register(struct srcu_notifier_head *nh, |
122 | struct notifier_block *nb); | 122 | struct notifier_block *nb); |
123 | 123 | ||
124 | extern int blocking_notifier_chain_cond_register( | ||
125 | struct blocking_notifier_head *nh, | ||
126 | struct notifier_block *nb); | ||
127 | |||
124 | extern int atomic_notifier_chain_unregister(struct atomic_notifier_head *nh, | 128 | extern int atomic_notifier_chain_unregister(struct atomic_notifier_head *nh, |
125 | struct notifier_block *nb); | 129 | struct notifier_block *nb); |
126 | extern int blocking_notifier_chain_unregister(struct blocking_notifier_head *nh, | 130 | extern int blocking_notifier_chain_unregister(struct blocking_notifier_head *nh, |
diff --git a/include/linux/percpu.h b/include/linux/percpu.h index 1ac969724bb2..d746a2abb322 100644 --- a/include/linux/percpu.h +++ b/include/linux/percpu.h | |||
@@ -4,7 +4,6 @@ | |||
4 | #include <linux/preempt.h> | 4 | #include <linux/preempt.h> |
5 | #include <linux/slab.h> /* For kmalloc() */ | 5 | #include <linux/slab.h> /* For kmalloc() */ |
6 | #include <linux/smp.h> | 6 | #include <linux/smp.h> |
7 | #include <linux/string.h> /* For memset() */ | ||
8 | #include <linux/cpumask.h> | 7 | #include <linux/cpumask.h> |
9 | 8 | ||
10 | #include <asm/percpu.h> | 9 | #include <asm/percpu.h> |
diff --git a/include/linux/personality.h b/include/linux/personality.h index 012cd558189b..a84e9ff9b27e 100644 --- a/include/linux/personality.h +++ b/include/linux/personality.h | |||
@@ -105,10 +105,6 @@ struct exec_domain { | |||
105 | */ | 105 | */ |
106 | #define personality(pers) (pers & PER_MASK) | 106 | #define personality(pers) (pers & PER_MASK) |
107 | 107 | ||
108 | /* | ||
109 | * Personality of the currently running process. | ||
110 | */ | ||
111 | #define get_personality (current->personality) | ||
112 | 108 | ||
113 | /* | 109 | /* |
114 | * Change personality of the currently running process. | 110 | * Change personality of the currently running process. |
diff --git a/include/linux/phantom.h b/include/linux/phantom.h index 96f4048a6cc3..02268c54c250 100644 --- a/include/linux/phantom.h +++ b/include/linux/phantom.h | |||
@@ -27,14 +27,17 @@ struct phm_regs { | |||
27 | 27 | ||
28 | #define PH_IOC_MAGIC 'p' | 28 | #define PH_IOC_MAGIC 'p' |
29 | #define PHN_GET_REG _IOWR(PH_IOC_MAGIC, 0, struct phm_reg *) | 29 | #define PHN_GET_REG _IOWR(PH_IOC_MAGIC, 0, struct phm_reg *) |
30 | #define PHN_SET_REG _IOW (PH_IOC_MAGIC, 1, struct phm_reg *) | 30 | #define PHN_SET_REG _IOW(PH_IOC_MAGIC, 1, struct phm_reg *) |
31 | #define PHN_GET_REGS _IOWR(PH_IOC_MAGIC, 2, struct phm_regs *) | 31 | #define PHN_GET_REGS _IOWR(PH_IOC_MAGIC, 2, struct phm_regs *) |
32 | #define PHN_SET_REGS _IOW (PH_IOC_MAGIC, 3, struct phm_regs *) | 32 | #define PHN_SET_REGS _IOW(PH_IOC_MAGIC, 3, struct phm_regs *) |
33 | /* this ioctl tells the driver, that the caller is not OpenHaptics and might | 33 | /* this ioctl tells the driver, that the caller is not OpenHaptics and might |
34 | * use improved registers update (no more phantom switchoffs when using | 34 | * use improved registers update (no more phantom switchoffs when using |
35 | * libphantom) */ | 35 | * libphantom) */ |
36 | #define PHN_NOT_OH _IO (PH_IOC_MAGIC, 4) | 36 | #define PHN_NOT_OH _IO(PH_IOC_MAGIC, 4) |
37 | #define PH_IOC_MAXNR 4 | 37 | #define PHN_GETREG _IOWR(PH_IOC_MAGIC, 5, struct phm_reg) |
38 | #define PHN_SETREG _IOW(PH_IOC_MAGIC, 6, struct phm_reg) | ||
39 | #define PHN_GETREGS _IOWR(PH_IOC_MAGIC, 7, struct phm_regs) | ||
40 | #define PHN_SETREGS _IOW(PH_IOC_MAGIC, 8, struct phm_regs) | ||
38 | 41 | ||
39 | #define PHN_CONTROL 0x6 /* control byte in iaddr space */ | 42 | #define PHN_CONTROL 0x6 /* control byte in iaddr space */ |
40 | #define PHN_CTL_AMP 0x1 /* switch after torques change */ | 43 | #define PHN_CTL_AMP 0x1 /* switch after torques change */ |
diff --git a/include/linux/proc_fs.h b/include/linux/proc_fs.h index 9b6c935f69cf..9883bc942262 100644 --- a/include/linux/proc_fs.h +++ b/include/linux/proc_fs.h | |||
@@ -9,7 +9,6 @@ | |||
9 | 9 | ||
10 | struct net; | 10 | struct net; |
11 | struct completion; | 11 | struct completion; |
12 | |||
13 | /* | 12 | /* |
14 | * The proc filesystem constants/structures | 13 | * The proc filesystem constants/structures |
15 | */ | 14 | */ |
@@ -41,7 +40,7 @@ enum { | |||
41 | * /proc file has a parent, but "subdir" is NULL for all | 40 | * /proc file has a parent, but "subdir" is NULL for all |
42 | * non-directory entries). | 41 | * non-directory entries). |
43 | * | 42 | * |
44 | * "get_info" is called at "read", while "owner" is used to protect module | 43 | * "owner" is used to protect module |
45 | * from unloading while proc_dir_entry is in use | 44 | * from unloading while proc_dir_entry is in use |
46 | */ | 45 | */ |
47 | 46 | ||
@@ -49,7 +48,6 @@ typedef int (read_proc_t)(char *page, char **start, off_t off, | |||
49 | int count, int *eof, void *data); | 48 | int count, int *eof, void *data); |
50 | typedef int (write_proc_t)(struct file *file, const char __user *buffer, | 49 | typedef int (write_proc_t)(struct file *file, const char __user *buffer, |
51 | unsigned long count, void *data); | 50 | unsigned long count, void *data); |
52 | typedef int (get_info_t)(char *, char **, off_t, int); | ||
53 | 51 | ||
54 | struct proc_dir_entry { | 52 | struct proc_dir_entry { |
55 | unsigned int low_ino; | 53 | unsigned int low_ino; |
@@ -70,7 +68,6 @@ struct proc_dir_entry { | |||
70 | * somewhere. | 68 | * somewhere. |
71 | */ | 69 | */ |
72 | const struct file_operations *proc_fops; | 70 | const struct file_operations *proc_fops; |
73 | get_info_t *get_info; | ||
74 | struct module *owner; | 71 | struct module *owner; |
75 | struct proc_dir_entry *next, *parent, *subdir; | 72 | struct proc_dir_entry *next, *parent, *subdir; |
76 | void *data; | 73 | void *data; |
@@ -97,10 +94,6 @@ struct vmcore { | |||
97 | 94 | ||
98 | #ifdef CONFIG_PROC_FS | 95 | #ifdef CONFIG_PROC_FS |
99 | 96 | ||
100 | extern struct proc_dir_entry proc_root; | ||
101 | extern struct proc_dir_entry *proc_root_fs; | ||
102 | extern struct proc_dir_entry *proc_bus; | ||
103 | extern struct proc_dir_entry *proc_root_driver; | ||
104 | extern struct proc_dir_entry *proc_root_kcore; | 97 | extern struct proc_dir_entry *proc_root_kcore; |
105 | 98 | ||
106 | extern spinlock_t proc_subdir_lock; | 99 | extern spinlock_t proc_subdir_lock; |
@@ -123,9 +116,10 @@ void de_put(struct proc_dir_entry *de); | |||
123 | 116 | ||
124 | extern struct proc_dir_entry *create_proc_entry(const char *name, mode_t mode, | 117 | extern struct proc_dir_entry *create_proc_entry(const char *name, mode_t mode, |
125 | struct proc_dir_entry *parent); | 118 | struct proc_dir_entry *parent); |
126 | struct proc_dir_entry *proc_create(const char *name, mode_t mode, | 119 | struct proc_dir_entry *proc_create_data(const char *name, mode_t mode, |
127 | struct proc_dir_entry *parent, | 120 | struct proc_dir_entry *parent, |
128 | const struct file_operations *proc_fops); | 121 | const struct file_operations *proc_fops, |
122 | void *data); | ||
129 | extern void remove_proc_entry(const char *name, struct proc_dir_entry *parent); | 123 | extern void remove_proc_entry(const char *name, struct proc_dir_entry *parent); |
130 | 124 | ||
131 | extern struct vfsmount *proc_mnt; | 125 | extern struct vfsmount *proc_mnt; |
@@ -180,6 +174,12 @@ extern struct proc_dir_entry *proc_mkdir(const char *,struct proc_dir_entry *); | |||
180 | extern struct proc_dir_entry *proc_mkdir_mode(const char *name, mode_t mode, | 174 | extern struct proc_dir_entry *proc_mkdir_mode(const char *name, mode_t mode, |
181 | struct proc_dir_entry *parent); | 175 | struct proc_dir_entry *parent); |
182 | 176 | ||
177 | static inline struct proc_dir_entry *proc_create(const char *name, mode_t mode, | ||
178 | struct proc_dir_entry *parent, const struct file_operations *proc_fops) | ||
179 | { | ||
180 | return proc_create_data(name, mode, parent, proc_fops, NULL); | ||
181 | } | ||
182 | |||
183 | static inline struct proc_dir_entry *create_proc_read_entry(const char *name, | 183 | static inline struct proc_dir_entry *create_proc_read_entry(const char *name, |
184 | mode_t mode, struct proc_dir_entry *base, | 184 | mode_t mode, struct proc_dir_entry *base, |
185 | read_proc_t *read_proc, void * data) | 185 | read_proc_t *read_proc, void * data) |
@@ -192,24 +192,19 @@ static inline struct proc_dir_entry *create_proc_read_entry(const char *name, | |||
192 | return res; | 192 | return res; |
193 | } | 193 | } |
194 | 194 | ||
195 | static inline struct proc_dir_entry *create_proc_info_entry(const char *name, | ||
196 | mode_t mode, struct proc_dir_entry *base, get_info_t *get_info) | ||
197 | { | ||
198 | struct proc_dir_entry *res=create_proc_entry(name,mode,base); | ||
199 | if (res) res->get_info=get_info; | ||
200 | return res; | ||
201 | } | ||
202 | |||
203 | extern struct proc_dir_entry *proc_net_fops_create(struct net *net, | 195 | extern struct proc_dir_entry *proc_net_fops_create(struct net *net, |
204 | const char *name, mode_t mode, const struct file_operations *fops); | 196 | const char *name, mode_t mode, const struct file_operations *fops); |
205 | extern void proc_net_remove(struct net *net, const char *name); | 197 | extern void proc_net_remove(struct net *net, const char *name); |
206 | extern struct proc_dir_entry *proc_net_mkdir(struct net *net, const char *name, | 198 | extern struct proc_dir_entry *proc_net_mkdir(struct net *net, const char *name, |
207 | struct proc_dir_entry *parent); | 199 | struct proc_dir_entry *parent); |
208 | 200 | ||
209 | #else | 201 | /* While the {get|set|dup}_mm_exe_file functions are for mm_structs, they are |
202 | * only needed to implement /proc/<pid>|self/exe so we define them here. */ | ||
203 | extern void set_mm_exe_file(struct mm_struct *mm, struct file *new_exe_file); | ||
204 | extern struct file *get_mm_exe_file(struct mm_struct *mm); | ||
205 | extern void dup_mm_exe_file(struct mm_struct *oldmm, struct mm_struct *newmm); | ||
210 | 206 | ||
211 | #define proc_root_driver NULL | 207 | #else |
212 | #define proc_bus NULL | ||
213 | 208 | ||
214 | #define proc_net_fops_create(net, name, mode, fops) ({ (void)(mode), NULL; }) | 209 | #define proc_net_fops_create(net, name, mode, fops) ({ (void)(mode), NULL; }) |
215 | static inline void proc_net_remove(struct net *net, const char *name) {} | 210 | static inline void proc_net_remove(struct net *net, const char *name) {} |
@@ -226,6 +221,12 @@ static inline struct proc_dir_entry *proc_create(const char *name, | |||
226 | { | 221 | { |
227 | return NULL; | 222 | return NULL; |
228 | } | 223 | } |
224 | static inline struct proc_dir_entry *proc_create_data(const char *name, | ||
225 | mode_t mode, struct proc_dir_entry *parent, | ||
226 | const struct file_operations *proc_fops, void *data) | ||
227 | { | ||
228 | return NULL; | ||
229 | } | ||
229 | #define remove_proc_entry(name, parent) do {} while (0) | 230 | #define remove_proc_entry(name, parent) do {} while (0) |
230 | 231 | ||
231 | static inline struct proc_dir_entry *proc_symlink(const char *name, | 232 | static inline struct proc_dir_entry *proc_symlink(const char *name, |
@@ -236,16 +237,11 @@ static inline struct proc_dir_entry *proc_mkdir(const char *name, | |||
236 | static inline struct proc_dir_entry *create_proc_read_entry(const char *name, | 237 | static inline struct proc_dir_entry *create_proc_read_entry(const char *name, |
237 | mode_t mode, struct proc_dir_entry *base, | 238 | mode_t mode, struct proc_dir_entry *base, |
238 | read_proc_t *read_proc, void * data) { return NULL; } | 239 | read_proc_t *read_proc, void * data) { return NULL; } |
239 | static inline struct proc_dir_entry *create_proc_info_entry(const char *name, | ||
240 | mode_t mode, struct proc_dir_entry *base, get_info_t *get_info) | ||
241 | { return NULL; } | ||
242 | 240 | ||
243 | struct tty_driver; | 241 | struct tty_driver; |
244 | static inline void proc_tty_register_driver(struct tty_driver *driver) {}; | 242 | static inline void proc_tty_register_driver(struct tty_driver *driver) {}; |
245 | static inline void proc_tty_unregister_driver(struct tty_driver *driver) {}; | 243 | static inline void proc_tty_unregister_driver(struct tty_driver *driver) {}; |
246 | 244 | ||
247 | extern struct proc_dir_entry proc_root; | ||
248 | |||
249 | static inline int pid_ns_prepare_proc(struct pid_namespace *ns) | 245 | static inline int pid_ns_prepare_proc(struct pid_namespace *ns) |
250 | { | 246 | { |
251 | return 0; | 247 | return 0; |
@@ -255,6 +251,19 @@ static inline void pid_ns_release_proc(struct pid_namespace *ns) | |||
255 | { | 251 | { |
256 | } | 252 | } |
257 | 253 | ||
254 | static inline void set_mm_exe_file(struct mm_struct *mm, | ||
255 | struct file *new_exe_file) | ||
256 | {} | ||
257 | |||
258 | static inline struct file *get_mm_exe_file(struct mm_struct *mm) | ||
259 | { | ||
260 | return NULL; | ||
261 | } | ||
262 | |||
263 | static inline void dup_mm_exe_file(struct mm_struct *oldmm, | ||
264 | struct mm_struct *newmm) | ||
265 | {} | ||
266 | |||
258 | #endif /* CONFIG_PROC_FS */ | 267 | #endif /* CONFIG_PROC_FS */ |
259 | 268 | ||
260 | #if !defined(CONFIG_PROC_KCORE) | 269 | #if !defined(CONFIG_PROC_KCORE) |
diff --git a/include/linux/res_counter.h b/include/linux/res_counter.h index 61363ce896d5..6d9e1fca098c 100644 --- a/include/linux/res_counter.h +++ b/include/linux/res_counter.h | |||
@@ -9,6 +9,8 @@ | |||
9 | * | 9 | * |
10 | * Author: Pavel Emelianov <xemul@openvz.org> | 10 | * Author: Pavel Emelianov <xemul@openvz.org> |
11 | * | 11 | * |
12 | * See Documentation/controllers/resource_counter.txt for more | ||
13 | * info about what this counter is. | ||
12 | */ | 14 | */ |
13 | 15 | ||
14 | #include <linux/cgroup.h> | 16 | #include <linux/cgroup.h> |
@@ -25,6 +27,10 @@ struct res_counter { | |||
25 | */ | 27 | */ |
26 | unsigned long long usage; | 28 | unsigned long long usage; |
27 | /* | 29 | /* |
30 | * the maximal value of the usage from the counter creation | ||
31 | */ | ||
32 | unsigned long long max_usage; | ||
33 | /* | ||
28 | * the limit that usage cannot exceed | 34 | * the limit that usage cannot exceed |
29 | */ | 35 | */ |
30 | unsigned long long limit; | 36 | unsigned long long limit; |
@@ -39,8 +45,9 @@ struct res_counter { | |||
39 | spinlock_t lock; | 45 | spinlock_t lock; |
40 | }; | 46 | }; |
41 | 47 | ||
42 | /* | 48 | /** |
43 | * Helpers to interact with userspace | 49 | * Helpers to interact with userspace |
50 | * res_counter_read_u64() - returns the value of the specified member. | ||
44 | * res_counter_read/_write - put/get the specified fields from the | 51 | * res_counter_read/_write - put/get the specified fields from the |
45 | * res_counter struct to/from the user | 52 | * res_counter struct to/from the user |
46 | * | 53 | * |
@@ -51,6 +58,8 @@ struct res_counter { | |||
51 | * @pos: and the offset. | 58 | * @pos: and the offset. |
52 | */ | 59 | */ |
53 | 60 | ||
61 | u64 res_counter_read_u64(struct res_counter *counter, int member); | ||
62 | |||
54 | ssize_t res_counter_read(struct res_counter *counter, int member, | 63 | ssize_t res_counter_read(struct res_counter *counter, int member, |
55 | const char __user *buf, size_t nbytes, loff_t *pos, | 64 | const char __user *buf, size_t nbytes, loff_t *pos, |
56 | int (*read_strategy)(unsigned long long val, char *s)); | 65 | int (*read_strategy)(unsigned long long val, char *s)); |
@@ -64,6 +73,7 @@ ssize_t res_counter_write(struct res_counter *counter, int member, | |||
64 | 73 | ||
65 | enum { | 74 | enum { |
66 | RES_USAGE, | 75 | RES_USAGE, |
76 | RES_MAX_USAGE, | ||
67 | RES_LIMIT, | 77 | RES_LIMIT, |
68 | RES_FAILCNT, | 78 | RES_FAILCNT, |
69 | }; | 79 | }; |
@@ -124,4 +134,21 @@ static inline bool res_counter_check_under_limit(struct res_counter *cnt) | |||
124 | return ret; | 134 | return ret; |
125 | } | 135 | } |
126 | 136 | ||
137 | static inline void res_counter_reset_max(struct res_counter *cnt) | ||
138 | { | ||
139 | unsigned long flags; | ||
140 | |||
141 | spin_lock_irqsave(&cnt->lock, flags); | ||
142 | cnt->max_usage = cnt->usage; | ||
143 | spin_unlock_irqrestore(&cnt->lock, flags); | ||
144 | } | ||
145 | |||
146 | static inline void res_counter_reset_failcnt(struct res_counter *cnt) | ||
147 | { | ||
148 | unsigned long flags; | ||
149 | |||
150 | spin_lock_irqsave(&cnt->lock, flags); | ||
151 | cnt->failcnt = 0; | ||
152 | spin_unlock_irqrestore(&cnt->lock, flags); | ||
153 | } | ||
127 | #endif | 154 | #endif |
diff --git a/include/linux/resource.h b/include/linux/resource.h index ae13db714742..aaa423a6f3d9 100644 --- a/include/linux/resource.h +++ b/include/linux/resource.h | |||
@@ -19,6 +19,7 @@ struct task_struct; | |||
19 | #define RUSAGE_SELF 0 | 19 | #define RUSAGE_SELF 0 |
20 | #define RUSAGE_CHILDREN (-1) | 20 | #define RUSAGE_CHILDREN (-1) |
21 | #define RUSAGE_BOTH (-2) /* sys_wait4() uses this */ | 21 | #define RUSAGE_BOTH (-2) /* sys_wait4() uses this */ |
22 | #define RUSAGE_THREAD 1 /* only the calling thread */ | ||
22 | 23 | ||
23 | struct rusage { | 24 | struct rusage { |
24 | struct timeval ru_utime; /* user time used */ | 25 | struct timeval ru_utime; /* user time used */ |
diff --git a/include/linux/sched.h b/include/linux/sched.h index 024d72b47a0c..1d02babdb2c7 100644 --- a/include/linux/sched.h +++ b/include/linux/sched.h | |||
@@ -2148,6 +2148,19 @@ static inline void migration_init(void) | |||
2148 | #define TASK_SIZE_OF(tsk) TASK_SIZE | 2148 | #define TASK_SIZE_OF(tsk) TASK_SIZE |
2149 | #endif | 2149 | #endif |
2150 | 2150 | ||
2151 | #ifdef CONFIG_MM_OWNER | ||
2152 | extern void mm_update_next_owner(struct mm_struct *mm); | ||
2153 | extern void mm_init_owner(struct mm_struct *mm, struct task_struct *p); | ||
2154 | #else | ||
2155 | static inline void mm_update_next_owner(struct mm_struct *mm) | ||
2156 | { | ||
2157 | } | ||
2158 | |||
2159 | static inline void mm_init_owner(struct mm_struct *mm, struct task_struct *p) | ||
2160 | { | ||
2161 | } | ||
2162 | #endif /* CONFIG_MM_OWNER */ | ||
2163 | |||
2151 | #endif /* __KERNEL__ */ | 2164 | #endif /* __KERNEL__ */ |
2152 | 2165 | ||
2153 | #endif | 2166 | #endif |
diff --git a/include/linux/security.h b/include/linux/security.h index d0a28fd1747a..adb09d893ae0 100644 --- a/include/linux/security.h +++ b/include/linux/security.h | |||
@@ -53,8 +53,9 @@ extern void cap_capset_set(struct task_struct *target, kernel_cap_t *effective, | |||
53 | extern int cap_bprm_set_security(struct linux_binprm *bprm); | 53 | extern int cap_bprm_set_security(struct linux_binprm *bprm); |
54 | extern void cap_bprm_apply_creds(struct linux_binprm *bprm, int unsafe); | 54 | extern void cap_bprm_apply_creds(struct linux_binprm *bprm, int unsafe); |
55 | extern int cap_bprm_secureexec(struct linux_binprm *bprm); | 55 | extern int cap_bprm_secureexec(struct linux_binprm *bprm); |
56 | extern int cap_inode_setxattr(struct dentry *dentry, char *name, void *value, size_t size, int flags); | 56 | extern int cap_inode_setxattr(struct dentry *dentry, const char *name, |
57 | extern int cap_inode_removexattr(struct dentry *dentry, char *name); | 57 | const void *value, size_t size, int flags); |
58 | extern int cap_inode_removexattr(struct dentry *dentry, const char *name); | ||
58 | extern int cap_inode_need_killpriv(struct dentry *dentry); | 59 | extern int cap_inode_need_killpriv(struct dentry *dentry); |
59 | extern int cap_inode_killpriv(struct dentry *dentry); | 60 | extern int cap_inode_killpriv(struct dentry *dentry); |
60 | extern int cap_task_post_setuid(uid_t old_ruid, uid_t old_euid, uid_t old_suid, int flags); | 61 | extern int cap_task_post_setuid(uid_t old_ruid, uid_t old_euid, uid_t old_suid, int flags); |
@@ -1008,6 +1009,17 @@ static inline void security_free_mnt_opts(struct security_mnt_opts *opts) | |||
1008 | * @perm describes the combination of permissions required of this key. | 1009 | * @perm describes the combination of permissions required of this key. |
1009 | * Return 1 if permission granted, 0 if permission denied and -ve it the | 1010 | * Return 1 if permission granted, 0 if permission denied and -ve it the |
1010 | * normal permissions model should be effected. | 1011 | * normal permissions model should be effected. |
1012 | * @key_getsecurity: | ||
1013 | * Get a textual representation of the security context attached to a key | ||
1014 | * for the purposes of honouring KEYCTL_GETSECURITY. This function | ||
1015 | * allocates the storage for the NUL-terminated string and the caller | ||
1016 | * should free it. | ||
1017 | * @key points to the key to be queried. | ||
1018 | * @_buffer points to a pointer that should be set to point to the | ||
1019 | * resulting string (if no label or an error occurs). | ||
1020 | * Return the length of the string (including terminating NUL) or -ve if | ||
1021 | * an error. | ||
1022 | * May also return 0 (and a NULL buffer pointer) if there is no label. | ||
1011 | * | 1023 | * |
1012 | * Security hooks affecting all System V IPC operations. | 1024 | * Security hooks affecting all System V IPC operations. |
1013 | * | 1025 | * |
@@ -1362,13 +1374,13 @@ struct security_operations { | |||
1362 | int (*inode_setattr) (struct dentry *dentry, struct iattr *attr); | 1374 | int (*inode_setattr) (struct dentry *dentry, struct iattr *attr); |
1363 | int (*inode_getattr) (struct vfsmount *mnt, struct dentry *dentry); | 1375 | int (*inode_getattr) (struct vfsmount *mnt, struct dentry *dentry); |
1364 | void (*inode_delete) (struct inode *inode); | 1376 | void (*inode_delete) (struct inode *inode); |
1365 | int (*inode_setxattr) (struct dentry *dentry, char *name, void *value, | 1377 | int (*inode_setxattr) (struct dentry *dentry, const char *name, |
1366 | size_t size, int flags); | 1378 | const void *value, size_t size, int flags); |
1367 | void (*inode_post_setxattr) (struct dentry *dentry, char *name, void *value, | 1379 | void (*inode_post_setxattr) (struct dentry *dentry, const char *name, |
1368 | size_t size, int flags); | 1380 | const void *value, size_t size, int flags); |
1369 | int (*inode_getxattr) (struct dentry *dentry, char *name); | 1381 | int (*inode_getxattr) (struct dentry *dentry, const char *name); |
1370 | int (*inode_listxattr) (struct dentry *dentry); | 1382 | int (*inode_listxattr) (struct dentry *dentry); |
1371 | int (*inode_removexattr) (struct dentry *dentry, char *name); | 1383 | int (*inode_removexattr) (struct dentry *dentry, const char *name); |
1372 | int (*inode_need_killpriv) (struct dentry *dentry); | 1384 | int (*inode_need_killpriv) (struct dentry *dentry); |
1373 | int (*inode_killpriv) (struct dentry *dentry); | 1385 | int (*inode_killpriv) (struct dentry *dentry); |
1374 | int (*inode_getsecurity) (const struct inode *inode, const char *name, void **buffer, bool alloc); | 1386 | int (*inode_getsecurity) (const struct inode *inode, const char *name, void **buffer, bool alloc); |
@@ -1537,7 +1549,7 @@ struct security_operations { | |||
1537 | int (*key_permission) (key_ref_t key_ref, | 1549 | int (*key_permission) (key_ref_t key_ref, |
1538 | struct task_struct *context, | 1550 | struct task_struct *context, |
1539 | key_perm_t perm); | 1551 | key_perm_t perm); |
1540 | 1552 | int (*key_getsecurity)(struct key *key, char **_buffer); | |
1541 | #endif /* CONFIG_KEYS */ | 1553 | #endif /* CONFIG_KEYS */ |
1542 | 1554 | ||
1543 | #ifdef CONFIG_AUDIT | 1555 | #ifdef CONFIG_AUDIT |
@@ -1633,13 +1645,13 @@ int security_inode_permission(struct inode *inode, int mask, struct nameidata *n | |||
1633 | int security_inode_setattr(struct dentry *dentry, struct iattr *attr); | 1645 | int security_inode_setattr(struct dentry *dentry, struct iattr *attr); |
1634 | int security_inode_getattr(struct vfsmount *mnt, struct dentry *dentry); | 1646 | int security_inode_getattr(struct vfsmount *mnt, struct dentry *dentry); |
1635 | void security_inode_delete(struct inode *inode); | 1647 | void security_inode_delete(struct inode *inode); |
1636 | int security_inode_setxattr(struct dentry *dentry, char *name, | 1648 | int security_inode_setxattr(struct dentry *dentry, const char *name, |
1637 | void *value, size_t size, int flags); | 1649 | const void *value, size_t size, int flags); |
1638 | void security_inode_post_setxattr(struct dentry *dentry, char *name, | 1650 | void security_inode_post_setxattr(struct dentry *dentry, const char *name, |
1639 | void *value, size_t size, int flags); | 1651 | const void *value, size_t size, int flags); |
1640 | int security_inode_getxattr(struct dentry *dentry, char *name); | 1652 | int security_inode_getxattr(struct dentry *dentry, const char *name); |
1641 | int security_inode_listxattr(struct dentry *dentry); | 1653 | int security_inode_listxattr(struct dentry *dentry); |
1642 | int security_inode_removexattr(struct dentry *dentry, char *name); | 1654 | int security_inode_removexattr(struct dentry *dentry, const char *name); |
1643 | int security_inode_need_killpriv(struct dentry *dentry); | 1655 | int security_inode_need_killpriv(struct dentry *dentry); |
1644 | int security_inode_killpriv(struct dentry *dentry); | 1656 | int security_inode_killpriv(struct dentry *dentry); |
1645 | int security_inode_getsecurity(const struct inode *inode, const char *name, void **buffer, bool alloc); | 1657 | int security_inode_getsecurity(const struct inode *inode, const char *name, void **buffer, bool alloc); |
@@ -2041,17 +2053,18 @@ static inline int security_inode_getattr(struct vfsmount *mnt, | |||
2041 | static inline void security_inode_delete(struct inode *inode) | 2053 | static inline void security_inode_delete(struct inode *inode) |
2042 | { } | 2054 | { } |
2043 | 2055 | ||
2044 | static inline int security_inode_setxattr(struct dentry *dentry, char *name, | 2056 | static inline int security_inode_setxattr(struct dentry *dentry, |
2045 | void *value, size_t size, int flags) | 2057 | const char *name, const void *value, size_t size, int flags) |
2046 | { | 2058 | { |
2047 | return cap_inode_setxattr(dentry, name, value, size, flags); | 2059 | return cap_inode_setxattr(dentry, name, value, size, flags); |
2048 | } | 2060 | } |
2049 | 2061 | ||
2050 | static inline void security_inode_post_setxattr(struct dentry *dentry, char *name, | 2062 | static inline void security_inode_post_setxattr(struct dentry *dentry, |
2051 | void *value, size_t size, int flags) | 2063 | const char *name, const void *value, size_t size, int flags) |
2052 | { } | 2064 | { } |
2053 | 2065 | ||
2054 | static inline int security_inode_getxattr(struct dentry *dentry, char *name) | 2066 | static inline int security_inode_getxattr(struct dentry *dentry, |
2067 | const char *name) | ||
2055 | { | 2068 | { |
2056 | return 0; | 2069 | return 0; |
2057 | } | 2070 | } |
@@ -2061,7 +2074,8 @@ static inline int security_inode_listxattr(struct dentry *dentry) | |||
2061 | return 0; | 2074 | return 0; |
2062 | } | 2075 | } |
2063 | 2076 | ||
2064 | static inline int security_inode_removexattr(struct dentry *dentry, char *name) | 2077 | static inline int security_inode_removexattr(struct dentry *dentry, |
2078 | const char *name) | ||
2065 | { | 2079 | { |
2066 | return cap_inode_removexattr(dentry, name); | 2080 | return cap_inode_removexattr(dentry, name); |
2067 | } | 2081 | } |
@@ -2729,6 +2743,7 @@ int security_key_alloc(struct key *key, struct task_struct *tsk, unsigned long f | |||
2729 | void security_key_free(struct key *key); | 2743 | void security_key_free(struct key *key); |
2730 | int security_key_permission(key_ref_t key_ref, | 2744 | int security_key_permission(key_ref_t key_ref, |
2731 | struct task_struct *context, key_perm_t perm); | 2745 | struct task_struct *context, key_perm_t perm); |
2746 | int security_key_getsecurity(struct key *key, char **_buffer); | ||
2732 | 2747 | ||
2733 | #else | 2748 | #else |
2734 | 2749 | ||
@@ -2750,6 +2765,12 @@ static inline int security_key_permission(key_ref_t key_ref, | |||
2750 | return 0; | 2765 | return 0; |
2751 | } | 2766 | } |
2752 | 2767 | ||
2768 | static inline int security_key_getsecurity(struct key *key, char **_buffer) | ||
2769 | { | ||
2770 | *_buffer = NULL; | ||
2771 | return 0; | ||
2772 | } | ||
2773 | |||
2753 | #endif | 2774 | #endif |
2754 | #endif /* CONFIG_KEYS */ | 2775 | #endif /* CONFIG_KEYS */ |
2755 | 2776 | ||
diff --git a/include/linux/smb.h b/include/linux/smb.h index f098dff93f6b..caa43b2370cb 100644 --- a/include/linux/smb.h +++ b/include/linux/smb.h | |||
@@ -11,6 +11,7 @@ | |||
11 | 11 | ||
12 | #include <linux/types.h> | 12 | #include <linux/types.h> |
13 | #include <linux/magic.h> | 13 | #include <linux/magic.h> |
14 | #include <linux/time.h> | ||
14 | 15 | ||
15 | enum smb_protocol { | 16 | enum smb_protocol { |
16 | SMB_PROTOCOL_NONE, | 17 | SMB_PROTOCOL_NONE, |
diff --git a/include/linux/syscalls.h b/include/linux/syscalls.h index 8df6d1382ac8..0522f368f9d7 100644 --- a/include/linux/syscalls.h +++ b/include/linux/syscalls.h | |||
@@ -240,26 +240,28 @@ asmlinkage long sys_truncate64(const char __user *path, loff_t length); | |||
240 | asmlinkage long sys_ftruncate64(unsigned int fd, loff_t length); | 240 | asmlinkage long sys_ftruncate64(unsigned int fd, loff_t length); |
241 | #endif | 241 | #endif |
242 | 242 | ||
243 | asmlinkage long sys_setxattr(char __user *path, char __user *name, | 243 | asmlinkage long sys_setxattr(const char __user *path, const char __user *name, |
244 | void __user *value, size_t size, int flags); | 244 | const void __user *value, size_t size, int flags); |
245 | asmlinkage long sys_lsetxattr(char __user *path, char __user *name, | 245 | asmlinkage long sys_lsetxattr(const char __user *path, const char __user *name, |
246 | void __user *value, size_t size, int flags); | 246 | const void __user *value, size_t size, int flags); |
247 | asmlinkage long sys_fsetxattr(int fd, char __user *name, void __user *value, | 247 | asmlinkage long sys_fsetxattr(int fd, const char __user *name, |
248 | size_t size, int flags); | 248 | const void __user *value, size_t size, int flags); |
249 | asmlinkage ssize_t sys_getxattr(char __user *path, char __user *name, | 249 | asmlinkage ssize_t sys_getxattr(const char __user *path, const char __user *name, |
250 | void __user *value, size_t size); | 250 | void __user *value, size_t size); |
251 | asmlinkage ssize_t sys_lgetxattr(char __user *path, char __user *name, | 251 | asmlinkage ssize_t sys_lgetxattr(const char __user *path, const char __user *name, |
252 | void __user *value, size_t size); | 252 | void __user *value, size_t size); |
253 | asmlinkage ssize_t sys_fgetxattr(int fd, char __user *name, | 253 | asmlinkage ssize_t sys_fgetxattr(int fd, const char __user *name, |
254 | void __user *value, size_t size); | 254 | void __user *value, size_t size); |
255 | asmlinkage ssize_t sys_listxattr(char __user *path, char __user *list, | 255 | asmlinkage ssize_t sys_listxattr(const char __user *path, char __user *list, |
256 | size_t size); | 256 | size_t size); |
257 | asmlinkage ssize_t sys_llistxattr(char __user *path, char __user *list, | 257 | asmlinkage ssize_t sys_llistxattr(const char __user *path, char __user *list, |
258 | size_t size); | 258 | size_t size); |
259 | asmlinkage ssize_t sys_flistxattr(int fd, char __user *list, size_t size); | 259 | asmlinkage ssize_t sys_flistxattr(int fd, char __user *list, size_t size); |
260 | asmlinkage long sys_removexattr(char __user *path, char __user *name); | 260 | asmlinkage long sys_removexattr(const char __user *path, |
261 | asmlinkage long sys_lremovexattr(char __user *path, char __user *name); | 261 | const char __user *name); |
262 | asmlinkage long sys_fremovexattr(int fd, char __user *name); | 262 | asmlinkage long sys_lremovexattr(const char __user *path, |
263 | const char __user *name); | ||
264 | asmlinkage long sys_fremovexattr(int fd, const char __user *name); | ||
263 | 265 | ||
264 | asmlinkage unsigned long sys_brk(unsigned long brk); | 266 | asmlinkage unsigned long sys_brk(unsigned long brk); |
265 | asmlinkage long sys_mprotect(unsigned long start, size_t len, | 267 | asmlinkage long sys_mprotect(unsigned long start, size_t len, |
diff --git a/include/linux/sysctl.h b/include/linux/sysctl.h index 571f01d20a86..24141b4d1a11 100644 --- a/include/linux/sysctl.h +++ b/include/linux/sysctl.h | |||
@@ -945,11 +945,14 @@ enum | |||
945 | /* For the /proc/sys support */ | 945 | /* For the /proc/sys support */ |
946 | struct ctl_table; | 946 | struct ctl_table; |
947 | struct nsproxy; | 947 | struct nsproxy; |
948 | struct ctl_table_root; | ||
949 | |||
948 | extern struct ctl_table_header *sysctl_head_next(struct ctl_table_header *prev); | 950 | extern struct ctl_table_header *sysctl_head_next(struct ctl_table_header *prev); |
949 | extern struct ctl_table_header *__sysctl_head_next(struct nsproxy *namespaces, | 951 | extern struct ctl_table_header *__sysctl_head_next(struct nsproxy *namespaces, |
950 | struct ctl_table_header *prev); | 952 | struct ctl_table_header *prev); |
951 | extern void sysctl_head_finish(struct ctl_table_header *prev); | 953 | extern void sysctl_head_finish(struct ctl_table_header *prev); |
952 | extern int sysctl_perm(struct ctl_table *table, int op); | 954 | extern int sysctl_perm(struct ctl_table_root *root, |
955 | struct ctl_table *table, int op); | ||
953 | 956 | ||
954 | typedef struct ctl_table ctl_table; | 957 | typedef struct ctl_table ctl_table; |
955 | 958 | ||
@@ -981,11 +984,6 @@ extern int do_sysctl (int __user *name, int nlen, | |||
981 | void __user *oldval, size_t __user *oldlenp, | 984 | void __user *oldval, size_t __user *oldlenp, |
982 | void __user *newval, size_t newlen); | 985 | void __user *newval, size_t newlen); |
983 | 986 | ||
984 | extern int do_sysctl_strategy (struct ctl_table *table, | ||
985 | int __user *name, int nlen, | ||
986 | void __user *oldval, size_t __user *oldlenp, | ||
987 | void __user *newval, size_t newlen); | ||
988 | |||
989 | extern ctl_handler sysctl_data; | 987 | extern ctl_handler sysctl_data; |
990 | extern ctl_handler sysctl_string; | 988 | extern ctl_handler sysctl_string; |
991 | extern ctl_handler sysctl_intvec; | 989 | extern ctl_handler sysctl_intvec; |
@@ -1054,6 +1052,8 @@ struct ctl_table_root { | |||
1054 | struct list_head header_list; | 1052 | struct list_head header_list; |
1055 | struct list_head *(*lookup)(struct ctl_table_root *root, | 1053 | struct list_head *(*lookup)(struct ctl_table_root *root, |
1056 | struct nsproxy *namespaces); | 1054 | struct nsproxy *namespaces); |
1055 | int (*permissions)(struct ctl_table_root *root, | ||
1056 | struct nsproxy *namespaces, struct ctl_table *table); | ||
1057 | }; | 1057 | }; |
1058 | 1058 | ||
1059 | /* struct ctl_table_header is used to maintain dynamic lists of | 1059 | /* struct ctl_table_header is used to maintain dynamic lists of |
@@ -1085,8 +1085,6 @@ struct ctl_table_header *register_sysctl_paths(const struct ctl_path *path, | |||
1085 | void unregister_sysctl_table(struct ctl_table_header * table); | 1085 | void unregister_sysctl_table(struct ctl_table_header * table); |
1086 | int sysctl_check_table(struct nsproxy *namespaces, struct ctl_table *table); | 1086 | int sysctl_check_table(struct nsproxy *namespaces, struct ctl_table *table); |
1087 | 1087 | ||
1088 | #else /* __KERNEL__ */ | ||
1089 | |||
1090 | #endif /* __KERNEL__ */ | 1088 | #endif /* __KERNEL__ */ |
1091 | 1089 | ||
1092 | #endif /* _LINUX_SYSCTL_H */ | 1090 | #endif /* _LINUX_SYSCTL_H */ |
diff --git a/include/linux/sysv_fs.h b/include/linux/sysv_fs.h index e0248631e461..96411306eec6 100644 --- a/include/linux/sysv_fs.h +++ b/include/linux/sysv_fs.h | |||
@@ -1,11 +1,7 @@ | |||
1 | #ifndef _LINUX_SYSV_FS_H | 1 | #ifndef _LINUX_SYSV_FS_H |
2 | #define _LINUX_SYSV_FS_H | 2 | #define _LINUX_SYSV_FS_H |
3 | 3 | ||
4 | #if defined(__GNUC__) | 4 | #define __packed2__ __attribute__((packed, aligned(2))) |
5 | # define __packed2__ __attribute__((packed, aligned(2))) | ||
6 | #else | ||
7 | >> I want to scream! << | ||
8 | #endif | ||
9 | 5 | ||
10 | 6 | ||
11 | #ifndef __KERNEL__ | 7 | #ifndef __KERNEL__ |
diff --git a/include/linux/unaligned/access_ok.h b/include/linux/unaligned/access_ok.h new file mode 100644 index 000000000000..99c1b4d20b0f --- /dev/null +++ b/include/linux/unaligned/access_ok.h | |||
@@ -0,0 +1,67 @@ | |||
1 | #ifndef _LINUX_UNALIGNED_ACCESS_OK_H | ||
2 | #define _LINUX_UNALIGNED_ACCESS_OK_H | ||
3 | |||
4 | #include <linux/kernel.h> | ||
5 | #include <asm/byteorder.h> | ||
6 | |||
7 | static inline u16 get_unaligned_le16(const void *p) | ||
8 | { | ||
9 | return le16_to_cpup((__le16 *)p); | ||
10 | } | ||
11 | |||
12 | static inline u32 get_unaligned_le32(const void *p) | ||
13 | { | ||
14 | return le32_to_cpup((__le32 *)p); | ||
15 | } | ||
16 | |||
17 | static inline u64 get_unaligned_le64(const void *p) | ||
18 | { | ||
19 | return le64_to_cpup((__le64 *)p); | ||
20 | } | ||
21 | |||
22 | static inline u16 get_unaligned_be16(const void *p) | ||
23 | { | ||
24 | return be16_to_cpup((__be16 *)p); | ||
25 | } | ||
26 | |||
27 | static inline u32 get_unaligned_be32(const void *p) | ||
28 | { | ||
29 | return be32_to_cpup((__be32 *)p); | ||
30 | } | ||
31 | |||
32 | static inline u64 get_unaligned_be64(const void *p) | ||
33 | { | ||
34 | return be64_to_cpup((__be64 *)p); | ||
35 | } | ||
36 | |||
37 | static inline void put_unaligned_le16(u16 val, void *p) | ||
38 | { | ||
39 | *((__le16 *)p) = cpu_to_le16(val); | ||
40 | } | ||
41 | |||
42 | static inline void put_unaligned_le32(u32 val, void *p) | ||
43 | { | ||
44 | *((__le32 *)p) = cpu_to_le32(val); | ||
45 | } | ||
46 | |||
47 | static inline void put_unaligned_le64(u64 val, void *p) | ||
48 | { | ||
49 | *((__le64 *)p) = cpu_to_le64(val); | ||
50 | } | ||
51 | |||
52 | static inline void put_unaligned_be16(u16 val, void *p) | ||
53 | { | ||
54 | *((__be16 *)p) = cpu_to_be16(val); | ||
55 | } | ||
56 | |||
57 | static inline void put_unaligned_be32(u32 val, void *p) | ||
58 | { | ||
59 | *((__be32 *)p) = cpu_to_be32(val); | ||
60 | } | ||
61 | |||
62 | static inline void put_unaligned_be64(u64 val, void *p) | ||
63 | { | ||
64 | *((__be64 *)p) = cpu_to_be64(val); | ||
65 | } | ||
66 | |||
67 | #endif /* _LINUX_UNALIGNED_ACCESS_OK_H */ | ||
diff --git a/include/linux/unaligned/be_byteshift.h b/include/linux/unaligned/be_byteshift.h new file mode 100644 index 000000000000..46dd12c5709e --- /dev/null +++ b/include/linux/unaligned/be_byteshift.h | |||
@@ -0,0 +1,70 @@ | |||
1 | #ifndef _LINUX_UNALIGNED_BE_BYTESHIFT_H | ||
2 | #define _LINUX_UNALIGNED_BE_BYTESHIFT_H | ||
3 | |||
4 | #include <linux/kernel.h> | ||
5 | |||
6 | static inline u16 __get_unaligned_be16(const u8 *p) | ||
7 | { | ||
8 | return p[0] << 8 | p[1]; | ||
9 | } | ||
10 | |||
11 | static inline u32 __get_unaligned_be32(const u8 *p) | ||
12 | { | ||
13 | return p[0] << 24 | p[1] << 16 | p[2] << 8 | p[3]; | ||
14 | } | ||
15 | |||
16 | static inline u64 __get_unaligned_be64(const u8 *p) | ||
17 | { | ||
18 | return (u64)__get_unaligned_be32(p) << 32 | | ||
19 | __get_unaligned_be32(p + 4); | ||
20 | } | ||
21 | |||
22 | static inline void __put_unaligned_be16(u16 val, u8 *p) | ||
23 | { | ||
24 | *p++ = val >> 8; | ||
25 | *p++ = val; | ||
26 | } | ||
27 | |||
28 | static inline void __put_unaligned_be32(u32 val, u8 *p) | ||
29 | { | ||
30 | __put_unaligned_be16(val >> 16, p); | ||
31 | __put_unaligned_be16(val, p + 2); | ||
32 | } | ||
33 | |||
34 | static inline void __put_unaligned_be64(u64 val, u8 *p) | ||
35 | { | ||
36 | __put_unaligned_be32(val >> 32, p); | ||
37 | __put_unaligned_be32(val, p + 4); | ||
38 | } | ||
39 | |||
40 | static inline u16 get_unaligned_be16(const void *p) | ||
41 | { | ||
42 | return __get_unaligned_be16((const u8 *)p); | ||
43 | } | ||
44 | |||
45 | static inline u32 get_unaligned_be32(const void *p) | ||
46 | { | ||
47 | return __get_unaligned_be32((const u8 *)p); | ||
48 | } | ||
49 | |||
50 | static inline u64 get_unaligned_be64(const void *p) | ||
51 | { | ||
52 | return __get_unaligned_be64((const u8 *)p); | ||
53 | } | ||
54 | |||
55 | static inline void put_unaligned_be16(u16 val, void *p) | ||
56 | { | ||
57 | __put_unaligned_be16(val, p); | ||
58 | } | ||
59 | |||
60 | static inline void put_unaligned_be32(u32 val, void *p) | ||
61 | { | ||
62 | __put_unaligned_be32(val, p); | ||
63 | } | ||
64 | |||
65 | static inline void put_unaligned_be64(u64 val, void *p) | ||
66 | { | ||
67 | __put_unaligned_be64(val, p); | ||
68 | } | ||
69 | |||
70 | #endif /* _LINUX_UNALIGNED_BE_BYTESHIFT_H */ | ||
diff --git a/include/linux/unaligned/be_memmove.h b/include/linux/unaligned/be_memmove.h new file mode 100644 index 000000000000..c2a76c5c9ed0 --- /dev/null +++ b/include/linux/unaligned/be_memmove.h | |||
@@ -0,0 +1,36 @@ | |||
1 | #ifndef _LINUX_UNALIGNED_BE_MEMMOVE_H | ||
2 | #define _LINUX_UNALIGNED_BE_MEMMOVE_H | ||
3 | |||
4 | #include <linux/unaligned/memmove.h> | ||
5 | |||
6 | static inline u16 get_unaligned_be16(const void *p) | ||
7 | { | ||
8 | return __get_unaligned_memmove16((const u8 *)p); | ||
9 | } | ||
10 | |||
11 | static inline u32 get_unaligned_be32(const void *p) | ||
12 | { | ||
13 | return __get_unaligned_memmove32((const u8 *)p); | ||
14 | } | ||
15 | |||
16 | static inline u64 get_unaligned_be64(const void *p) | ||
17 | { | ||
18 | return __get_unaligned_memmove64((const u8 *)p); | ||
19 | } | ||
20 | |||
21 | static inline void put_unaligned_be16(u16 val, void *p) | ||
22 | { | ||
23 | __put_unaligned_memmove16(val, p); | ||
24 | } | ||
25 | |||
26 | static inline void put_unaligned_be32(u32 val, void *p) | ||
27 | { | ||
28 | __put_unaligned_memmove32(val, p); | ||
29 | } | ||
30 | |||
31 | static inline void put_unaligned_be64(u64 val, void *p) | ||
32 | { | ||
33 | __put_unaligned_memmove64(val, p); | ||
34 | } | ||
35 | |||
36 | #endif /* _LINUX_UNALIGNED_LE_MEMMOVE_H */ | ||
diff --git a/include/linux/unaligned/be_struct.h b/include/linux/unaligned/be_struct.h new file mode 100644 index 000000000000..132415836c50 --- /dev/null +++ b/include/linux/unaligned/be_struct.h | |||
@@ -0,0 +1,36 @@ | |||
1 | #ifndef _LINUX_UNALIGNED_BE_STRUCT_H | ||
2 | #define _LINUX_UNALIGNED_BE_STRUCT_H | ||
3 | |||
4 | #include <linux/unaligned/packed_struct.h> | ||
5 | |||
6 | static inline u16 get_unaligned_be16(const void *p) | ||
7 | { | ||
8 | return __get_unaligned_cpu16((const u8 *)p); | ||
9 | } | ||
10 | |||
11 | static inline u32 get_unaligned_be32(const void *p) | ||
12 | { | ||
13 | return __get_unaligned_cpu32((const u8 *)p); | ||
14 | } | ||
15 | |||
16 | static inline u64 get_unaligned_be64(const void *p) | ||
17 | { | ||
18 | return __get_unaligned_cpu64((const u8 *)p); | ||
19 | } | ||
20 | |||
21 | static inline void put_unaligned_be16(u16 val, void *p) | ||
22 | { | ||
23 | __put_unaligned_cpu16(val, p); | ||
24 | } | ||
25 | |||
26 | static inline void put_unaligned_be32(u32 val, void *p) | ||
27 | { | ||
28 | __put_unaligned_cpu32(val, p); | ||
29 | } | ||
30 | |||
31 | static inline void put_unaligned_be64(u64 val, void *p) | ||
32 | { | ||
33 | __put_unaligned_cpu64(val, p); | ||
34 | } | ||
35 | |||
36 | #endif /* _LINUX_UNALIGNED_BE_STRUCT_H */ | ||
diff --git a/include/linux/unaligned/generic.h b/include/linux/unaligned/generic.h new file mode 100644 index 000000000000..02d97ff3df70 --- /dev/null +++ b/include/linux/unaligned/generic.h | |||
@@ -0,0 +1,68 @@ | |||
1 | #ifndef _LINUX_UNALIGNED_GENERIC_H | ||
2 | #define _LINUX_UNALIGNED_GENERIC_H | ||
3 | |||
4 | /* | ||
5 | * Cause a link-time error if we try an unaligned access other than | ||
6 | * 1,2,4 or 8 bytes long | ||
7 | */ | ||
8 | extern void __bad_unaligned_access_size(void); | ||
9 | |||
10 | #define __get_unaligned_le(ptr) ((__force typeof(*(ptr)))({ \ | ||
11 | __builtin_choose_expr(sizeof(*(ptr)) == 1, *(ptr), \ | ||
12 | __builtin_choose_expr(sizeof(*(ptr)) == 2, get_unaligned_le16((ptr)), \ | ||
13 | __builtin_choose_expr(sizeof(*(ptr)) == 4, get_unaligned_le32((ptr)), \ | ||
14 | __builtin_choose_expr(sizeof(*(ptr)) == 8, get_unaligned_le64((ptr)), \ | ||
15 | __bad_unaligned_access_size())))); \ | ||
16 | })) | ||
17 | |||
18 | #define __get_unaligned_be(ptr) ((__force typeof(*(ptr)))({ \ | ||
19 | __builtin_choose_expr(sizeof(*(ptr)) == 1, *(ptr), \ | ||
20 | __builtin_choose_expr(sizeof(*(ptr)) == 2, get_unaligned_be16((ptr)), \ | ||
21 | __builtin_choose_expr(sizeof(*(ptr)) == 4, get_unaligned_be32((ptr)), \ | ||
22 | __builtin_choose_expr(sizeof(*(ptr)) == 8, get_unaligned_be64((ptr)), \ | ||
23 | __bad_unaligned_access_size())))); \ | ||
24 | })) | ||
25 | |||
26 | #define __put_unaligned_le(val, ptr) ({ \ | ||
27 | void *__gu_p = (ptr); \ | ||
28 | switch (sizeof(*(ptr))) { \ | ||
29 | case 1: \ | ||
30 | *(u8 *)__gu_p = (__force u8)(val); \ | ||
31 | break; \ | ||
32 | case 2: \ | ||
33 | put_unaligned_le16((__force u16)(val), __gu_p); \ | ||
34 | break; \ | ||
35 | case 4: \ | ||
36 | put_unaligned_le32((__force u32)(val), __gu_p); \ | ||
37 | break; \ | ||
38 | case 8: \ | ||
39 | put_unaligned_le64((__force u64)(val), __gu_p); \ | ||
40 | break; \ | ||
41 | default: \ | ||
42 | __bad_unaligned_access_size(); \ | ||
43 | break; \ | ||
44 | } \ | ||
45 | (void)0; }) | ||
46 | |||
47 | #define __put_unaligned_be(val, ptr) ({ \ | ||
48 | void *__gu_p = (ptr); \ | ||
49 | switch (sizeof(*(ptr))) { \ | ||
50 | case 1: \ | ||
51 | *(u8 *)__gu_p = (__force u8)(val); \ | ||
52 | break; \ | ||
53 | case 2: \ | ||
54 | put_unaligned_be16((__force u16)(val), __gu_p); \ | ||
55 | break; \ | ||
56 | case 4: \ | ||
57 | put_unaligned_be32((__force u32)(val), __gu_p); \ | ||
58 | break; \ | ||
59 | case 8: \ | ||
60 | put_unaligned_be64((__force u64)(val), __gu_p); \ | ||
61 | break; \ | ||
62 | default: \ | ||
63 | __bad_unaligned_access_size(); \ | ||
64 | break; \ | ||
65 | } \ | ||
66 | (void)0; }) | ||
67 | |||
68 | #endif /* _LINUX_UNALIGNED_GENERIC_H */ | ||
diff --git a/include/linux/unaligned/le_byteshift.h b/include/linux/unaligned/le_byteshift.h new file mode 100644 index 000000000000..59777e951baf --- /dev/null +++ b/include/linux/unaligned/le_byteshift.h | |||
@@ -0,0 +1,70 @@ | |||
1 | #ifndef _LINUX_UNALIGNED_LE_BYTESHIFT_H | ||
2 | #define _LINUX_UNALIGNED_LE_BYTESHIFT_H | ||
3 | |||
4 | #include <linux/kernel.h> | ||
5 | |||
6 | static inline u16 __get_unaligned_le16(const u8 *p) | ||
7 | { | ||
8 | return p[0] | p[1] << 8; | ||
9 | } | ||
10 | |||
11 | static inline u32 __get_unaligned_le32(const u8 *p) | ||
12 | { | ||
13 | return p[0] | p[1] << 8 | p[2] << 16 | p[3] << 24; | ||
14 | } | ||
15 | |||
16 | static inline u64 __get_unaligned_le64(const u8 *p) | ||
17 | { | ||
18 | return (u64)__get_unaligned_le32(p + 4) << 32 | | ||
19 | __get_unaligned_le32(p); | ||
20 | } | ||
21 | |||
22 | static inline void __put_unaligned_le16(u16 val, u8 *p) | ||
23 | { | ||
24 | *p++ = val; | ||
25 | *p++ = val >> 8; | ||
26 | } | ||
27 | |||
28 | static inline void __put_unaligned_le32(u32 val, u8 *p) | ||
29 | { | ||
30 | __put_unaligned_le16(val >> 16, p + 2); | ||
31 | __put_unaligned_le16(val, p); | ||
32 | } | ||
33 | |||
34 | static inline void __put_unaligned_le64(u64 val, u8 *p) | ||
35 | { | ||
36 | __put_unaligned_le32(val >> 32, p + 4); | ||
37 | __put_unaligned_le32(val, p); | ||
38 | } | ||
39 | |||
40 | static inline u16 get_unaligned_le16(const void *p) | ||
41 | { | ||
42 | return __get_unaligned_le16((const u8 *)p); | ||
43 | } | ||
44 | |||
45 | static inline u32 get_unaligned_le32(const void *p) | ||
46 | { | ||
47 | return __get_unaligned_le32((const u8 *)p); | ||
48 | } | ||
49 | |||
50 | static inline u64 get_unaligned_le64(const void *p) | ||
51 | { | ||
52 | return __get_unaligned_le64((const u8 *)p); | ||
53 | } | ||
54 | |||
55 | static inline void put_unaligned_le16(u16 val, void *p) | ||
56 | { | ||
57 | __put_unaligned_le16(val, p); | ||
58 | } | ||
59 | |||
60 | static inline void put_unaligned_le32(u32 val, void *p) | ||
61 | { | ||
62 | __put_unaligned_le32(val, p); | ||
63 | } | ||
64 | |||
65 | static inline void put_unaligned_le64(u64 val, void *p) | ||
66 | { | ||
67 | __put_unaligned_le64(val, p); | ||
68 | } | ||
69 | |||
70 | #endif /* _LINUX_UNALIGNED_LE_BYTESHIFT_H */ | ||
diff --git a/include/linux/unaligned/le_memmove.h b/include/linux/unaligned/le_memmove.h new file mode 100644 index 000000000000..269849bee4ec --- /dev/null +++ b/include/linux/unaligned/le_memmove.h | |||
@@ -0,0 +1,36 @@ | |||
1 | #ifndef _LINUX_UNALIGNED_LE_MEMMOVE_H | ||
2 | #define _LINUX_UNALIGNED_LE_MEMMOVE_H | ||
3 | |||
4 | #include <linux/unaligned/memmove.h> | ||
5 | |||
6 | static inline u16 get_unaligned_le16(const void *p) | ||
7 | { | ||
8 | return __get_unaligned_memmove16((const u8 *)p); | ||
9 | } | ||
10 | |||
11 | static inline u32 get_unaligned_le32(const void *p) | ||
12 | { | ||
13 | return __get_unaligned_memmove32((const u8 *)p); | ||
14 | } | ||
15 | |||
16 | static inline u64 get_unaligned_le64(const void *p) | ||
17 | { | ||
18 | return __get_unaligned_memmove64((const u8 *)p); | ||
19 | } | ||
20 | |||
21 | static inline void put_unaligned_le16(u16 val, void *p) | ||
22 | { | ||
23 | __put_unaligned_memmove16(val, p); | ||
24 | } | ||
25 | |||
26 | static inline void put_unaligned_le32(u32 val, void *p) | ||
27 | { | ||
28 | __put_unaligned_memmove32(val, p); | ||
29 | } | ||
30 | |||
31 | static inline void put_unaligned_le64(u64 val, void *p) | ||
32 | { | ||
33 | __put_unaligned_memmove64(val, p); | ||
34 | } | ||
35 | |||
36 | #endif /* _LINUX_UNALIGNED_LE_MEMMOVE_H */ | ||
diff --git a/include/linux/unaligned/le_struct.h b/include/linux/unaligned/le_struct.h new file mode 100644 index 000000000000..088c4572faa8 --- /dev/null +++ b/include/linux/unaligned/le_struct.h | |||
@@ -0,0 +1,36 @@ | |||
1 | #ifndef _LINUX_UNALIGNED_LE_STRUCT_H | ||
2 | #define _LINUX_UNALIGNED_LE_STRUCT_H | ||
3 | |||
4 | #include <linux/unaligned/packed_struct.h> | ||
5 | |||
6 | static inline u16 get_unaligned_le16(const void *p) | ||
7 | { | ||
8 | return __get_unaligned_cpu16((const u8 *)p); | ||
9 | } | ||
10 | |||
11 | static inline u32 get_unaligned_le32(const void *p) | ||
12 | { | ||
13 | return __get_unaligned_cpu32((const u8 *)p); | ||
14 | } | ||
15 | |||
16 | static inline u64 get_unaligned_le64(const void *p) | ||
17 | { | ||
18 | return __get_unaligned_cpu64((const u8 *)p); | ||
19 | } | ||
20 | |||
21 | static inline void put_unaligned_le16(u16 val, void *p) | ||
22 | { | ||
23 | __put_unaligned_cpu16(val, p); | ||
24 | } | ||
25 | |||
26 | static inline void put_unaligned_le32(u32 val, void *p) | ||
27 | { | ||
28 | __put_unaligned_cpu32(val, p); | ||
29 | } | ||
30 | |||
31 | static inline void put_unaligned_le64(u64 val, void *p) | ||
32 | { | ||
33 | __put_unaligned_cpu64(val, p); | ||
34 | } | ||
35 | |||
36 | #endif /* _LINUX_UNALIGNED_LE_STRUCT_H */ | ||
diff --git a/include/linux/unaligned/memmove.h b/include/linux/unaligned/memmove.h new file mode 100644 index 000000000000..eeb5a779a4fd --- /dev/null +++ b/include/linux/unaligned/memmove.h | |||
@@ -0,0 +1,45 @@ | |||
1 | #ifndef _LINUX_UNALIGNED_MEMMOVE_H | ||
2 | #define _LINUX_UNALIGNED_MEMMOVE_H | ||
3 | |||
4 | #include <linux/kernel.h> | ||
5 | #include <linux/string.h> | ||
6 | |||
7 | /* Use memmove here, so gcc does not insert a __builtin_memcpy. */ | ||
8 | |||
9 | static inline u16 __get_unaligned_memmove16(const void *p) | ||
10 | { | ||
11 | u16 tmp; | ||
12 | memmove(&tmp, p, 2); | ||
13 | return tmp; | ||
14 | } | ||
15 | |||
16 | static inline u32 __get_unaligned_memmove32(const void *p) | ||
17 | { | ||
18 | u32 tmp; | ||
19 | memmove(&tmp, p, 4); | ||
20 | return tmp; | ||
21 | } | ||
22 | |||
23 | static inline u64 __get_unaligned_memmove64(const void *p) | ||
24 | { | ||
25 | u64 tmp; | ||
26 | memmove(&tmp, p, 8); | ||
27 | return tmp; | ||
28 | } | ||
29 | |||
30 | static inline void __put_unaligned_memmove16(u16 val, void *p) | ||
31 | { | ||
32 | memmove(p, &val, 2); | ||
33 | } | ||
34 | |||
35 | static inline void __put_unaligned_memmove32(u32 val, void *p) | ||
36 | { | ||
37 | memmove(p, &val, 4); | ||
38 | } | ||
39 | |||
40 | static inline void __put_unaligned_memmove64(u64 val, void *p) | ||
41 | { | ||
42 | memmove(p, &val, 8); | ||
43 | } | ||
44 | |||
45 | #endif /* _LINUX_UNALIGNED_MEMMOVE_H */ | ||
diff --git a/include/linux/unaligned/packed_struct.h b/include/linux/unaligned/packed_struct.h new file mode 100644 index 000000000000..2498bb9fe002 --- /dev/null +++ b/include/linux/unaligned/packed_struct.h | |||
@@ -0,0 +1,46 @@ | |||
1 | #ifndef _LINUX_UNALIGNED_PACKED_STRUCT_H | ||
2 | #define _LINUX_UNALIGNED_PACKED_STRUCT_H | ||
3 | |||
4 | #include <linux/kernel.h> | ||
5 | |||
6 | struct __una_u16 { u16 x __attribute__((packed)); }; | ||
7 | struct __una_u32 { u32 x __attribute__((packed)); }; | ||
8 | struct __una_u64 { u64 x __attribute__((packed)); }; | ||
9 | |||
10 | static inline u16 __get_unaligned_cpu16(const void *p) | ||
11 | { | ||
12 | const struct __una_u16 *ptr = (const struct __una_u16 *)p; | ||
13 | return ptr->x; | ||
14 | } | ||
15 | |||
16 | static inline u32 __get_unaligned_cpu32(const void *p) | ||
17 | { | ||
18 | const struct __una_u32 *ptr = (const struct __una_u32 *)p; | ||
19 | return ptr->x; | ||
20 | } | ||
21 | |||
22 | static inline u64 __get_unaligned_cpu64(const void *p) | ||
23 | { | ||
24 | const struct __una_u64 *ptr = (const struct __una_u64 *)p; | ||
25 | return ptr->x; | ||
26 | } | ||
27 | |||
28 | static inline void __put_unaligned_cpu16(u16 val, void *p) | ||
29 | { | ||
30 | struct __una_u16 *ptr = (struct __una_u16 *)p; | ||
31 | ptr->x = val; | ||
32 | } | ||
33 | |||
34 | static inline void __put_unaligned_cpu32(u32 val, void *p) | ||
35 | { | ||
36 | struct __una_u32 *ptr = (struct __una_u32 *)p; | ||
37 | ptr->x = val; | ||
38 | } | ||
39 | |||
40 | static inline void __put_unaligned_cpu64(u64 val, void *p) | ||
41 | { | ||
42 | struct __una_u64 *ptr = (struct __una_u64 *)p; | ||
43 | ptr->x = val; | ||
44 | } | ||
45 | |||
46 | #endif /* _LINUX_UNALIGNED_PACKED_STRUCT_H */ | ||
diff --git a/include/linux/xattr.h b/include/linux/xattr.h index df6b95d2218e..d131e352cfe1 100644 --- a/include/linux/xattr.h +++ b/include/linux/xattr.h | |||
@@ -47,10 +47,10 @@ struct xattr_handler { | |||
47 | }; | 47 | }; |
48 | 48 | ||
49 | ssize_t xattr_getsecurity(struct inode *, const char *, void *, size_t); | 49 | ssize_t xattr_getsecurity(struct inode *, const char *, void *, size_t); |
50 | ssize_t vfs_getxattr(struct dentry *, char *, void *, size_t); | 50 | ssize_t vfs_getxattr(struct dentry *, const char *, void *, size_t); |
51 | ssize_t vfs_listxattr(struct dentry *d, char *list, size_t size); | 51 | ssize_t vfs_listxattr(struct dentry *d, char *list, size_t size); |
52 | int vfs_setxattr(struct dentry *, char *, void *, size_t, int); | 52 | int vfs_setxattr(struct dentry *, const char *, const void *, size_t, int); |
53 | int vfs_removexattr(struct dentry *, char *); | 53 | int vfs_removexattr(struct dentry *, const char *); |
54 | 54 | ||
55 | ssize_t generic_getxattr(struct dentry *dentry, const char *name, void *buffer, size_t size); | 55 | ssize_t generic_getxattr(struct dentry *dentry, const char *name, void *buffer, size_t size); |
56 | ssize_t generic_listxattr(struct dentry *dentry, char *buffer, size_t buffer_size); | 56 | ssize_t generic_listxattr(struct dentry *dentry, char *buffer, size_t buffer_size); |
diff --git a/include/rdma/ib_umem.h b/include/rdma/ib_umem.h index 22298423cf0b..9ee0d2e51b16 100644 --- a/include/rdma/ib_umem.h +++ b/include/rdma/ib_umem.h | |||
@@ -62,7 +62,7 @@ struct ib_umem_chunk { | |||
62 | #ifdef CONFIG_INFINIBAND_USER_MEM | 62 | #ifdef CONFIG_INFINIBAND_USER_MEM |
63 | 63 | ||
64 | struct ib_umem *ib_umem_get(struct ib_ucontext *context, unsigned long addr, | 64 | struct ib_umem *ib_umem_get(struct ib_ucontext *context, unsigned long addr, |
65 | size_t size, int access); | 65 | size_t size, int access, int dmasync); |
66 | void ib_umem_release(struct ib_umem *umem); | 66 | void ib_umem_release(struct ib_umem *umem); |
67 | int ib_umem_page_count(struct ib_umem *umem); | 67 | int ib_umem_page_count(struct ib_umem *umem); |
68 | 68 | ||
@@ -72,7 +72,7 @@ int ib_umem_page_count(struct ib_umem *umem); | |||
72 | 72 | ||
73 | static inline struct ib_umem *ib_umem_get(struct ib_ucontext *context, | 73 | static inline struct ib_umem *ib_umem_get(struct ib_ucontext *context, |
74 | unsigned long addr, size_t size, | 74 | unsigned long addr, size_t size, |
75 | int access) { | 75 | int access, int dmasync) { |
76 | return ERR_PTR(-EINVAL); | 76 | return ERR_PTR(-EINVAL); |
77 | } | 77 | } |
78 | static inline void ib_umem_release(struct ib_umem *umem) { } | 78 | static inline void ib_umem_release(struct ib_umem *umem) { } |
diff --git a/include/rdma/ib_verbs.h b/include/rdma/ib_verbs.h index 2dcbecce3f61..911a661b7278 100644 --- a/include/rdma/ib_verbs.h +++ b/include/rdma/ib_verbs.h | |||
@@ -1542,6 +1542,24 @@ static inline void ib_dma_unmap_single(struct ib_device *dev, | |||
1542 | dma_unmap_single(dev->dma_device, addr, size, direction); | 1542 | dma_unmap_single(dev->dma_device, addr, size, direction); |
1543 | } | 1543 | } |
1544 | 1544 | ||
1545 | static inline u64 ib_dma_map_single_attrs(struct ib_device *dev, | ||
1546 | void *cpu_addr, size_t size, | ||
1547 | enum dma_data_direction direction, | ||
1548 | struct dma_attrs *attrs) | ||
1549 | { | ||
1550 | return dma_map_single_attrs(dev->dma_device, cpu_addr, size, | ||
1551 | direction, attrs); | ||
1552 | } | ||
1553 | |||
1554 | static inline void ib_dma_unmap_single_attrs(struct ib_device *dev, | ||
1555 | u64 addr, size_t size, | ||
1556 | enum dma_data_direction direction, | ||
1557 | struct dma_attrs *attrs) | ||
1558 | { | ||
1559 | return dma_unmap_single_attrs(dev->dma_device, addr, size, | ||
1560 | direction, attrs); | ||
1561 | } | ||
1562 | |||
1545 | /** | 1563 | /** |
1546 | * ib_dma_map_page - Map a physical page to DMA address | 1564 | * ib_dma_map_page - Map a physical page to DMA address |
1547 | * @dev: The device for which the dma_addr is to be created | 1565 | * @dev: The device for which the dma_addr is to be created |
@@ -1611,6 +1629,21 @@ static inline void ib_dma_unmap_sg(struct ib_device *dev, | |||
1611 | dma_unmap_sg(dev->dma_device, sg, nents, direction); | 1629 | dma_unmap_sg(dev->dma_device, sg, nents, direction); |
1612 | } | 1630 | } |
1613 | 1631 | ||
1632 | static inline int ib_dma_map_sg_attrs(struct ib_device *dev, | ||
1633 | struct scatterlist *sg, int nents, | ||
1634 | enum dma_data_direction direction, | ||
1635 | struct dma_attrs *attrs) | ||
1636 | { | ||
1637 | return dma_map_sg_attrs(dev->dma_device, sg, nents, direction, attrs); | ||
1638 | } | ||
1639 | |||
1640 | static inline void ib_dma_unmap_sg_attrs(struct ib_device *dev, | ||
1641 | struct scatterlist *sg, int nents, | ||
1642 | enum dma_data_direction direction, | ||
1643 | struct dma_attrs *attrs) | ||
1644 | { | ||
1645 | dma_unmap_sg_attrs(dev->dma_device, sg, nents, direction, attrs); | ||
1646 | } | ||
1614 | /** | 1647 | /** |
1615 | * ib_sg_dma_address - Return the DMA address from a scatter/gather entry | 1648 | * ib_sg_dma_address - Return the DMA address from a scatter/gather entry |
1616 | * @dev: The device for which the DMA addresses were created | 1649 | * @dev: The device for which the DMA addresses were created |
diff --git a/init/Kconfig b/init/Kconfig index da071c4bbfb7..3e7b257fc05f 100644 --- a/init/Kconfig +++ b/init/Kconfig | |||
@@ -259,17 +259,14 @@ config IKCONFIG_PROC | |||
259 | config LOG_BUF_SHIFT | 259 | config LOG_BUF_SHIFT |
260 | int "Kernel log buffer size (16 => 64KB, 17 => 128KB)" | 260 | int "Kernel log buffer size (16 => 64KB, 17 => 128KB)" |
261 | range 12 21 | 261 | range 12 21 |
262 | default 17 if S390 || LOCKDEP | 262 | default 17 |
263 | default 16 if X86_NUMAQ || IA64 | ||
264 | default 15 if SMP | ||
265 | default 14 | ||
266 | help | 263 | help |
267 | Select kernel log buffer size as a power of 2. | 264 | Select kernel log buffer size as a power of 2. |
268 | Defaults and Examples: | 265 | Examples: |
269 | 17 => 128 KB for S/390 | 266 | 17 => 128 KB |
270 | 16 => 64 KB for x86 NUMAQ or IA-64 | 267 | 16 => 64 KB |
271 | 15 => 32 KB for SMP | 268 | 15 => 32 KB |
272 | 14 => 16 KB for uniprocessor | 269 | 14 => 16 KB |
273 | 13 => 8 KB | 270 | 13 => 8 KB |
274 | 12 => 4 KB | 271 | 12 => 4 KB |
275 | 272 | ||
@@ -284,6 +281,7 @@ config CGROUPS | |||
284 | config CGROUP_DEBUG | 281 | config CGROUP_DEBUG |
285 | bool "Example debug cgroup subsystem" | 282 | bool "Example debug cgroup subsystem" |
286 | depends on CGROUPS | 283 | depends on CGROUPS |
284 | default n | ||
287 | help | 285 | help |
288 | This option enables a simple cgroup subsystem that | 286 | This option enables a simple cgroup subsystem that |
289 | exports useful debugging information about the cgroups | 287 | exports useful debugging information about the cgroups |
@@ -300,6 +298,13 @@ config CGROUP_NS | |||
300 | for instance virtual servers and checkpoint/restart | 298 | for instance virtual servers and checkpoint/restart |
301 | jobs. | 299 | jobs. |
302 | 300 | ||
301 | config CGROUP_DEVICE | ||
302 | bool "Device controller for cgroups" | ||
303 | depends on CGROUPS && EXPERIMENTAL | ||
304 | help | ||
305 | Provides a cgroup implementing whitelists for devices which | ||
306 | a process in the cgroup can mknod or open. | ||
307 | |||
303 | config CPUSETS | 308 | config CPUSETS |
304 | bool "Cpuset support" | 309 | bool "Cpuset support" |
305 | depends on SMP && CGROUPS | 310 | depends on SMP && CGROUPS |
@@ -373,9 +378,13 @@ config RESOURCE_COUNTERS | |||
373 | infrastructure that works with cgroups | 378 | infrastructure that works with cgroups |
374 | depends on CGROUPS | 379 | depends on CGROUPS |
375 | 380 | ||
381 | config MM_OWNER | ||
382 | bool | ||
383 | |||
376 | config CGROUP_MEM_RES_CTLR | 384 | config CGROUP_MEM_RES_CTLR |
377 | bool "Memory Resource Controller for Control Groups" | 385 | bool "Memory Resource Controller for Control Groups" |
378 | depends on CGROUPS && RESOURCE_COUNTERS | 386 | depends on CGROUPS && RESOURCE_COUNTERS |
387 | select MM_OWNER | ||
379 | help | 388 | help |
380 | Provides a memory resource controller that manages both page cache and | 389 | Provides a memory resource controller that manages both page cache and |
381 | RSS memory. | 390 | RSS memory. |
@@ -388,6 +397,9 @@ config CGROUP_MEM_RES_CTLR | |||
388 | Only enable when you're ok with these trade offs and really | 397 | Only enable when you're ok with these trade offs and really |
389 | sure you need the memory resource controller. | 398 | sure you need the memory resource controller. |
390 | 399 | ||
400 | This config option also selects MM_OWNER config option, which | ||
401 | could in turn add some fork/exit overhead. | ||
402 | |||
391 | config SYSFS_DEPRECATED | 403 | config SYSFS_DEPRECATED |
392 | bool | 404 | bool |
393 | 405 | ||
@@ -538,6 +550,17 @@ config SYSCTL_SYSCALL | |||
538 | 550 | ||
539 | If unsure say Y here. | 551 | If unsure say Y here. |
540 | 552 | ||
553 | config SYSCTL_SYSCALL_CHECK | ||
554 | bool "Sysctl checks" if EMBEDDED | ||
555 | depends on SYSCTL_SYSCALL | ||
556 | default y | ||
557 | ---help--- | ||
558 | sys_sysctl uses binary paths that have been found challenging | ||
559 | to properly maintain and use. This enables checks that help | ||
560 | you to keep things correct. | ||
561 | |||
562 | If unsure say Y here. | ||
563 | |||
541 | config KALLSYMS | 564 | config KALLSYMS |
542 | bool "Load all symbols for debugging/ksymoops" if EMBEDDED | 565 | bool "Load all symbols for debugging/ksymoops" if EMBEDDED |
543 | default y | 566 | default y |
diff --git a/init/initramfs.c b/init/initramfs.c index d53fee8d8604..8eeeccb328c9 100644 --- a/init/initramfs.c +++ b/init/initramfs.c | |||
@@ -57,7 +57,7 @@ static char __init *find_link(int major, int minor, int ino, | |||
57 | continue; | 57 | continue; |
58 | return (*p)->name; | 58 | return (*p)->name; |
59 | } | 59 | } |
60 | q = (struct hash *)malloc(sizeof(struct hash)); | 60 | q = kmalloc(sizeof(struct hash), GFP_KERNEL); |
61 | if (!q) | 61 | if (!q) |
62 | panic("can't allocate link hash entry"); | 62 | panic("can't allocate link hash entry"); |
63 | q->major = major; | 63 | q->major = major; |
@@ -77,7 +77,7 @@ static void __init free_hash(void) | |||
77 | while (*p) { | 77 | while (*p) { |
78 | q = *p; | 78 | q = *p; |
79 | *p = q->next; | 79 | *p = q->next; |
80 | free(q); | 80 | kfree(q); |
81 | } | 81 | } |
82 | } | 82 | } |
83 | } | 83 | } |
@@ -445,10 +445,10 @@ static char * __init unpack_to_rootfs(char *buf, unsigned len, int check_only) | |||
445 | { | 445 | { |
446 | int written; | 446 | int written; |
447 | dry_run = check_only; | 447 | dry_run = check_only; |
448 | header_buf = malloc(110); | 448 | header_buf = kmalloc(110, GFP_KERNEL); |
449 | symlink_buf = malloc(PATH_MAX + N_ALIGN(PATH_MAX) + 1); | 449 | symlink_buf = kmalloc(PATH_MAX + N_ALIGN(PATH_MAX) + 1, GFP_KERNEL); |
450 | name_buf = malloc(N_ALIGN(PATH_MAX)); | 450 | name_buf = kmalloc(N_ALIGN(PATH_MAX), GFP_KERNEL); |
451 | window = malloc(WSIZE); | 451 | window = kmalloc(WSIZE, GFP_KERNEL); |
452 | if (!window || !header_buf || !symlink_buf || !name_buf) | 452 | if (!window || !header_buf || !symlink_buf || !name_buf) |
453 | panic("can't allocate buffers"); | 453 | panic("can't allocate buffers"); |
454 | state = Start; | 454 | state = Start; |
@@ -484,10 +484,10 @@ static char * __init unpack_to_rootfs(char *buf, unsigned len, int check_only) | |||
484 | buf += inptr; | 484 | buf += inptr; |
485 | len -= inptr; | 485 | len -= inptr; |
486 | } | 486 | } |
487 | free(window); | 487 | kfree(window); |
488 | free(name_buf); | 488 | kfree(name_buf); |
489 | free(symlink_buf); | 489 | kfree(symlink_buf); |
490 | free(header_buf); | 490 | kfree(header_buf); |
491 | return message; | 491 | return message; |
492 | } | 492 | } |
493 | 493 | ||
diff --git a/init/main.c b/init/main.c index 1687b0167c4a..624266b524d4 100644 --- a/init/main.c +++ b/init/main.c | |||
@@ -58,6 +58,7 @@ | |||
58 | #include <linux/kthread.h> | 58 | #include <linux/kthread.h> |
59 | #include <linux/sched.h> | 59 | #include <linux/sched.h> |
60 | #include <linux/signal.h> | 60 | #include <linux/signal.h> |
61 | #include <linux/idr.h> | ||
61 | 62 | ||
62 | #include <asm/io.h> | 63 | #include <asm/io.h> |
63 | #include <asm/bugs.h> | 64 | #include <asm/bugs.h> |
@@ -559,6 +560,7 @@ asmlinkage void __init start_kernel(void) | |||
559 | printk(KERN_NOTICE); | 560 | printk(KERN_NOTICE); |
560 | printk(linux_banner); | 561 | printk(linux_banner); |
561 | setup_arch(&command_line); | 562 | setup_arch(&command_line); |
563 | mm_init_owner(&init_mm, &init_task); | ||
562 | setup_command_line(command_line); | 564 | setup_command_line(command_line); |
563 | unwind_setup(); | 565 | unwind_setup(); |
564 | setup_per_cpu_areas(); | 566 | setup_per_cpu_areas(); |
@@ -636,6 +638,7 @@ asmlinkage void __init start_kernel(void) | |||
636 | enable_debug_pagealloc(); | 638 | enable_debug_pagealloc(); |
637 | cpu_hotplug_init(); | 639 | cpu_hotplug_init(); |
638 | kmem_cache_init(); | 640 | kmem_cache_init(); |
641 | idr_init_cache(); | ||
639 | setup_per_cpu_pageset(); | 642 | setup_per_cpu_pageset(); |
640 | numa_policy_init(); | 643 | numa_policy_init(); |
641 | if (late_time_init) | 644 | if (late_time_init) |
@@ -700,10 +703,8 @@ static void __init do_initcalls(void) | |||
700 | int result; | 703 | int result; |
701 | 704 | ||
702 | if (initcall_debug) { | 705 | if (initcall_debug) { |
703 | printk("Calling initcall 0x%p", *call); | 706 | print_fn_descriptor_symbol("calling %s()\n", |
704 | print_fn_descriptor_symbol(": %s()", | ||
705 | (unsigned long) *call); | 707 | (unsigned long) *call); |
706 | printk("\n"); | ||
707 | t0 = ktime_get(); | 708 | t0 = ktime_get(); |
708 | } | 709 | } |
709 | 710 | ||
@@ -713,15 +714,10 @@ static void __init do_initcalls(void) | |||
713 | t1 = ktime_get(); | 714 | t1 = ktime_get(); |
714 | delta = ktime_sub(t1, t0); | 715 | delta = ktime_sub(t1, t0); |
715 | 716 | ||
716 | printk("initcall 0x%p", *call); | 717 | print_fn_descriptor_symbol("initcall %s()", |
717 | print_fn_descriptor_symbol(": %s()", | ||
718 | (unsigned long) *call); | 718 | (unsigned long) *call); |
719 | printk(" returned %d.\n", result); | 719 | printk(" returned %d after %Ld msecs\n", result, |
720 | 720 | (unsigned long long) delta.tv64 >> 20); | |
721 | printk("initcall 0x%p ran for %Ld msecs: ", | ||
722 | *call, (unsigned long long)delta.tv64 >> 20); | ||
723 | print_fn_descriptor_symbol("%s()\n", | ||
724 | (unsigned long) *call); | ||
725 | } | 721 | } |
726 | 722 | ||
727 | if (result && result != -ENODEV && initcall_debug) { | 723 | if (result && result != -ENODEV && initcall_debug) { |
@@ -737,10 +733,9 @@ static void __init do_initcalls(void) | |||
737 | local_irq_enable(); | 733 | local_irq_enable(); |
738 | } | 734 | } |
739 | if (msg) { | 735 | if (msg) { |
740 | printk(KERN_WARNING "initcall at 0x%p", *call); | 736 | print_fn_descriptor_symbol(KERN_WARNING "initcall %s()", |
741 | print_fn_descriptor_symbol(": %s()", | ||
742 | (unsigned long) *call); | 737 | (unsigned long) *call); |
743 | printk(": returned with %s\n", msg); | 738 | printk(" returned with %s\n", msg); |
744 | } | 739 | } |
745 | } | 740 | } |
746 | 741 | ||
diff --git a/ipc/Makefile b/ipc/Makefile index 5fc5e33ea047..65c384395801 100644 --- a/ipc/Makefile +++ b/ipc/Makefile | |||
@@ -3,7 +3,7 @@ | |||
3 | # | 3 | # |
4 | 4 | ||
5 | obj-$(CONFIG_SYSVIPC_COMPAT) += compat.o | 5 | obj-$(CONFIG_SYSVIPC_COMPAT) += compat.o |
6 | obj-$(CONFIG_SYSVIPC) += util.o msgutil.o msg.o sem.o shm.o | 6 | obj-$(CONFIG_SYSVIPC) += util.o msgutil.o msg.o sem.o shm.o ipcns_notifier.o |
7 | obj-$(CONFIG_SYSVIPC_SYSCTL) += ipc_sysctl.o | 7 | obj-$(CONFIG_SYSVIPC_SYSCTL) += ipc_sysctl.o |
8 | obj_mq-$(CONFIG_COMPAT) += compat_mq.o | 8 | obj_mq-$(CONFIG_COMPAT) += compat_mq.o |
9 | obj-$(CONFIG_POSIX_MQUEUE) += mqueue.o msgutil.o $(obj_mq-y) | 9 | obj-$(CONFIG_POSIX_MQUEUE) += mqueue.o msgutil.o $(obj_mq-y) |
diff --git a/ipc/ipc_sysctl.c b/ipc/ipc_sysctl.c index 7f4235bed51b..d3497465cc0a 100644 --- a/ipc/ipc_sysctl.c +++ b/ipc/ipc_sysctl.c | |||
@@ -15,6 +15,8 @@ | |||
15 | #include <linux/sysctl.h> | 15 | #include <linux/sysctl.h> |
16 | #include <linux/uaccess.h> | 16 | #include <linux/uaccess.h> |
17 | #include <linux/ipc_namespace.h> | 17 | #include <linux/ipc_namespace.h> |
18 | #include <linux/msg.h> | ||
19 | #include "util.h" | ||
18 | 20 | ||
19 | static void *get_ipc(ctl_table *table) | 21 | static void *get_ipc(ctl_table *table) |
20 | { | 22 | { |
@@ -24,6 +26,27 @@ static void *get_ipc(ctl_table *table) | |||
24 | return which; | 26 | return which; |
25 | } | 27 | } |
26 | 28 | ||
29 | /* | ||
30 | * Routine that is called when a tunable has successfully been changed by | ||
31 | * hand and it has a callback routine registered on the ipc namespace notifier | ||
32 | * chain: we don't want such tunables to be recomputed anymore upon memory | ||
33 | * add/remove or ipc namespace creation/removal. | ||
34 | * They can come back to a recomputable state by being set to a <0 value. | ||
35 | */ | ||
36 | static void tunable_set_callback(int val) | ||
37 | { | ||
38 | if (val >= 0) | ||
39 | unregister_ipcns_notifier(current->nsproxy->ipc_ns); | ||
40 | else { | ||
41 | /* | ||
42 | * Re-enable automatic recomputing only if not already | ||
43 | * enabled. | ||
44 | */ | ||
45 | recompute_msgmni(current->nsproxy->ipc_ns); | ||
46 | cond_register_ipcns_notifier(current->nsproxy->ipc_ns); | ||
47 | } | ||
48 | } | ||
49 | |||
27 | #ifdef CONFIG_PROC_FS | 50 | #ifdef CONFIG_PROC_FS |
28 | static int proc_ipc_dointvec(ctl_table *table, int write, struct file *filp, | 51 | static int proc_ipc_dointvec(ctl_table *table, int write, struct file *filp, |
29 | void __user *buffer, size_t *lenp, loff_t *ppos) | 52 | void __user *buffer, size_t *lenp, loff_t *ppos) |
@@ -35,6 +58,24 @@ static int proc_ipc_dointvec(ctl_table *table, int write, struct file *filp, | |||
35 | return proc_dointvec(&ipc_table, write, filp, buffer, lenp, ppos); | 58 | return proc_dointvec(&ipc_table, write, filp, buffer, lenp, ppos); |
36 | } | 59 | } |
37 | 60 | ||
61 | static int proc_ipc_callback_dointvec(ctl_table *table, int write, | ||
62 | struct file *filp, void __user *buffer, size_t *lenp, loff_t *ppos) | ||
63 | { | ||
64 | struct ctl_table ipc_table; | ||
65 | size_t lenp_bef = *lenp; | ||
66 | int rc; | ||
67 | |||
68 | memcpy(&ipc_table, table, sizeof(ipc_table)); | ||
69 | ipc_table.data = get_ipc(table); | ||
70 | |||
71 | rc = proc_dointvec(&ipc_table, write, filp, buffer, lenp, ppos); | ||
72 | |||
73 | if (write && !rc && lenp_bef == *lenp) | ||
74 | tunable_set_callback(*((int *)(ipc_table.data))); | ||
75 | |||
76 | return rc; | ||
77 | } | ||
78 | |||
38 | static int proc_ipc_doulongvec_minmax(ctl_table *table, int write, | 79 | static int proc_ipc_doulongvec_minmax(ctl_table *table, int write, |
39 | struct file *filp, void __user *buffer, size_t *lenp, loff_t *ppos) | 80 | struct file *filp, void __user *buffer, size_t *lenp, loff_t *ppos) |
40 | { | 81 | { |
@@ -49,6 +90,7 @@ static int proc_ipc_doulongvec_minmax(ctl_table *table, int write, | |||
49 | #else | 90 | #else |
50 | #define proc_ipc_doulongvec_minmax NULL | 91 | #define proc_ipc_doulongvec_minmax NULL |
51 | #define proc_ipc_dointvec NULL | 92 | #define proc_ipc_dointvec NULL |
93 | #define proc_ipc_callback_dointvec NULL | ||
52 | #endif | 94 | #endif |
53 | 95 | ||
54 | #ifdef CONFIG_SYSCTL_SYSCALL | 96 | #ifdef CONFIG_SYSCTL_SYSCALL |
@@ -90,8 +132,30 @@ static int sysctl_ipc_data(ctl_table *table, int __user *name, int nlen, | |||
90 | } | 132 | } |
91 | return 1; | 133 | return 1; |
92 | } | 134 | } |
135 | |||
136 | static int sysctl_ipc_registered_data(ctl_table *table, int __user *name, | ||
137 | int nlen, void __user *oldval, size_t __user *oldlenp, | ||
138 | void __user *newval, size_t newlen) | ||
139 | { | ||
140 | int rc; | ||
141 | |||
142 | rc = sysctl_ipc_data(table, name, nlen, oldval, oldlenp, newval, | ||
143 | newlen); | ||
144 | |||
145 | if (newval && newlen && rc > 0) { | ||
146 | /* | ||
147 | * Tunable has successfully been changed from userland | ||
148 | */ | ||
149 | int *data = get_ipc(table); | ||
150 | |||
151 | tunable_set_callback(*data); | ||
152 | } | ||
153 | |||
154 | return rc; | ||
155 | } | ||
93 | #else | 156 | #else |
94 | #define sysctl_ipc_data NULL | 157 | #define sysctl_ipc_data NULL |
158 | #define sysctl_ipc_registered_data NULL | ||
95 | #endif | 159 | #endif |
96 | 160 | ||
97 | static struct ctl_table ipc_kern_table[] = { | 161 | static struct ctl_table ipc_kern_table[] = { |
@@ -137,8 +201,8 @@ static struct ctl_table ipc_kern_table[] = { | |||
137 | .data = &init_ipc_ns.msg_ctlmni, | 201 | .data = &init_ipc_ns.msg_ctlmni, |
138 | .maxlen = sizeof (init_ipc_ns.msg_ctlmni), | 202 | .maxlen = sizeof (init_ipc_ns.msg_ctlmni), |
139 | .mode = 0644, | 203 | .mode = 0644, |
140 | .proc_handler = proc_ipc_dointvec, | 204 | .proc_handler = proc_ipc_callback_dointvec, |
141 | .strategy = sysctl_ipc_data, | 205 | .strategy = sysctl_ipc_registered_data, |
142 | }, | 206 | }, |
143 | { | 207 | { |
144 | .ctl_name = KERN_MSGMNB, | 208 | .ctl_name = KERN_MSGMNB, |
diff --git a/ipc/ipcns_notifier.c b/ipc/ipcns_notifier.c new file mode 100644 index 000000000000..70ff09183f7b --- /dev/null +++ b/ipc/ipcns_notifier.c | |||
@@ -0,0 +1,82 @@ | |||
1 | /* | ||
2 | * linux/ipc/ipcns_notifier.c | ||
3 | * Copyright (C) 2007 BULL SA. Nadia Derbey | ||
4 | * | ||
5 | * Notification mechanism for ipc namespaces: | ||
6 | * The callback routine registered in the memory chain invokes the ipcns | ||
7 | * notifier chain with the IPCNS_MEMCHANGED event. | ||
8 | * Each callback routine registered in the ipcns namespace recomputes msgmni | ||
9 | * for the owning namespace. | ||
10 | */ | ||
11 | |||
12 | #include <linux/msg.h> | ||
13 | #include <linux/rcupdate.h> | ||
14 | #include <linux/notifier.h> | ||
15 | #include <linux/nsproxy.h> | ||
16 | #include <linux/ipc_namespace.h> | ||
17 | |||
18 | #include "util.h" | ||
19 | |||
20 | |||
21 | |||
22 | static BLOCKING_NOTIFIER_HEAD(ipcns_chain); | ||
23 | |||
24 | |||
25 | static int ipcns_callback(struct notifier_block *self, | ||
26 | unsigned long action, void *arg) | ||
27 | { | ||
28 | struct ipc_namespace *ns; | ||
29 | |||
30 | switch (action) { | ||
31 | case IPCNS_MEMCHANGED: /* amount of lowmem has changed */ | ||
32 | case IPCNS_CREATED: | ||
33 | case IPCNS_REMOVED: | ||
34 | /* | ||
35 | * It's time to recompute msgmni | ||
36 | */ | ||
37 | ns = container_of(self, struct ipc_namespace, ipcns_nb); | ||
38 | /* | ||
39 | * No need to get a reference on the ns: the 1st job of | ||
40 | * free_ipc_ns() is to unregister the callback routine. | ||
41 | * blocking_notifier_chain_unregister takes the wr lock to do | ||
42 | * it. | ||
43 | * When this callback routine is called the rd lock is held by | ||
44 | * blocking_notifier_call_chain. | ||
45 | * So the ipc ns cannot be freed while we are here. | ||
46 | */ | ||
47 | recompute_msgmni(ns); | ||
48 | break; | ||
49 | default: | ||
50 | break; | ||
51 | } | ||
52 | |||
53 | return NOTIFY_OK; | ||
54 | } | ||
55 | |||
56 | int register_ipcns_notifier(struct ipc_namespace *ns) | ||
57 | { | ||
58 | memset(&ns->ipcns_nb, 0, sizeof(ns->ipcns_nb)); | ||
59 | ns->ipcns_nb.notifier_call = ipcns_callback; | ||
60 | ns->ipcns_nb.priority = IPCNS_CALLBACK_PRI; | ||
61 | return blocking_notifier_chain_register(&ipcns_chain, &ns->ipcns_nb); | ||
62 | } | ||
63 | |||
64 | int cond_register_ipcns_notifier(struct ipc_namespace *ns) | ||
65 | { | ||
66 | memset(&ns->ipcns_nb, 0, sizeof(ns->ipcns_nb)); | ||
67 | ns->ipcns_nb.notifier_call = ipcns_callback; | ||
68 | ns->ipcns_nb.priority = IPCNS_CALLBACK_PRI; | ||
69 | return blocking_notifier_chain_cond_register(&ipcns_chain, | ||
70 | &ns->ipcns_nb); | ||
71 | } | ||
72 | |||
73 | int unregister_ipcns_notifier(struct ipc_namespace *ns) | ||
74 | { | ||
75 | return blocking_notifier_chain_unregister(&ipcns_chain, | ||
76 | &ns->ipcns_nb); | ||
77 | } | ||
78 | |||
79 | int ipcns_notify(unsigned long val) | ||
80 | { | ||
81 | return blocking_notifier_call_chain(&ipcns_chain, val, NULL); | ||
82 | } | ||
@@ -27,6 +27,7 @@ | |||
27 | #include <linux/msg.h> | 27 | #include <linux/msg.h> |
28 | #include <linux/spinlock.h> | 28 | #include <linux/spinlock.h> |
29 | #include <linux/init.h> | 29 | #include <linux/init.h> |
30 | #include <linux/mm.h> | ||
30 | #include <linux/proc_fs.h> | 31 | #include <linux/proc_fs.h> |
31 | #include <linux/list.h> | 32 | #include <linux/list.h> |
32 | #include <linux/security.h> | 33 | #include <linux/security.h> |
@@ -70,7 +71,6 @@ struct msg_sender { | |||
70 | #define msg_ids(ns) ((ns)->ids[IPC_MSG_IDS]) | 71 | #define msg_ids(ns) ((ns)->ids[IPC_MSG_IDS]) |
71 | 72 | ||
72 | #define msg_unlock(msq) ipc_unlock(&(msq)->q_perm) | 73 | #define msg_unlock(msq) ipc_unlock(&(msq)->q_perm) |
73 | #define msg_buildid(id, seq) ipc_buildid(id, seq) | ||
74 | 74 | ||
75 | static void freeque(struct ipc_namespace *, struct kern_ipc_perm *); | 75 | static void freeque(struct ipc_namespace *, struct kern_ipc_perm *); |
76 | static int newque(struct ipc_namespace *, struct ipc_params *); | 76 | static int newque(struct ipc_namespace *, struct ipc_params *); |
@@ -78,11 +78,49 @@ static int newque(struct ipc_namespace *, struct ipc_params *); | |||
78 | static int sysvipc_msg_proc_show(struct seq_file *s, void *it); | 78 | static int sysvipc_msg_proc_show(struct seq_file *s, void *it); |
79 | #endif | 79 | #endif |
80 | 80 | ||
81 | /* | ||
82 | * Scale msgmni with the available lowmem size: the memory dedicated to msg | ||
83 | * queues should occupy at most 1/MSG_MEM_SCALE of lowmem. | ||
84 | * Also take into account the number of nsproxies created so far. | ||
85 | * This should be done staying within the (MSGMNI , IPCMNI/nr_ipc_ns) range. | ||
86 | */ | ||
87 | void recompute_msgmni(struct ipc_namespace *ns) | ||
88 | { | ||
89 | struct sysinfo i; | ||
90 | unsigned long allowed; | ||
91 | int nb_ns; | ||
92 | |||
93 | si_meminfo(&i); | ||
94 | allowed = (((i.totalram - i.totalhigh) / MSG_MEM_SCALE) * i.mem_unit) | ||
95 | / MSGMNB; | ||
96 | nb_ns = atomic_read(&nr_ipc_ns); | ||
97 | allowed /= nb_ns; | ||
98 | |||
99 | if (allowed < MSGMNI) { | ||
100 | ns->msg_ctlmni = MSGMNI; | ||
101 | goto out_callback; | ||
102 | } | ||
103 | |||
104 | if (allowed > IPCMNI / nb_ns) { | ||
105 | ns->msg_ctlmni = IPCMNI / nb_ns; | ||
106 | goto out_callback; | ||
107 | } | ||
108 | |||
109 | ns->msg_ctlmni = allowed; | ||
110 | |||
111 | out_callback: | ||
112 | |||
113 | printk(KERN_INFO "msgmni has been set to %d for ipc namespace %p\n", | ||
114 | ns->msg_ctlmni, ns); | ||
115 | } | ||
116 | |||
81 | void msg_init_ns(struct ipc_namespace *ns) | 117 | void msg_init_ns(struct ipc_namespace *ns) |
82 | { | 118 | { |
83 | ns->msg_ctlmax = MSGMAX; | 119 | ns->msg_ctlmax = MSGMAX; |
84 | ns->msg_ctlmnb = MSGMNB; | 120 | ns->msg_ctlmnb = MSGMNB; |
85 | ns->msg_ctlmni = MSGMNI; | 121 | |
122 | recompute_msgmni(ns); | ||
123 | |||
86 | atomic_set(&ns->msg_bytes, 0); | 124 | atomic_set(&ns->msg_bytes, 0); |
87 | atomic_set(&ns->msg_hdrs, 0); | 125 | atomic_set(&ns->msg_hdrs, 0); |
88 | ipc_init_ids(&ns->ids[IPC_MSG_IDS]); | 126 | ipc_init_ids(&ns->ids[IPC_MSG_IDS]); |
@@ -104,21 +142,6 @@ void __init msg_init(void) | |||
104 | } | 142 | } |
105 | 143 | ||
106 | /* | 144 | /* |
107 | * This routine is called in the paths where the rw_mutex is held to protect | ||
108 | * access to the idr tree. | ||
109 | */ | ||
110 | static inline struct msg_queue *msg_lock_check_down(struct ipc_namespace *ns, | ||
111 | int id) | ||
112 | { | ||
113 | struct kern_ipc_perm *ipcp = ipc_lock_check_down(&msg_ids(ns), id); | ||
114 | |||
115 | if (IS_ERR(ipcp)) | ||
116 | return (struct msg_queue *)ipcp; | ||
117 | |||
118 | return container_of(ipcp, struct msg_queue, q_perm); | ||
119 | } | ||
120 | |||
121 | /* | ||
122 | * msg_lock_(check_) routines are called in the paths where the rw_mutex | 145 | * msg_lock_(check_) routines are called in the paths where the rw_mutex |
123 | * is not held. | 146 | * is not held. |
124 | */ | 147 | */ |
@@ -186,7 +209,6 @@ static int newque(struct ipc_namespace *ns, struct ipc_params *params) | |||
186 | return id; | 209 | return id; |
187 | } | 210 | } |
188 | 211 | ||
189 | msq->q_perm.id = msg_buildid(id, msq->q_perm.seq); | ||
190 | msq->q_stime = msq->q_rtime = 0; | 212 | msq->q_stime = msq->q_rtime = 0; |
191 | msq->q_ctime = get_seconds(); | 213 | msq->q_ctime = get_seconds(); |
192 | msq->q_cbytes = msq->q_qnum = 0; | 214 | msq->q_cbytes = msq->q_qnum = 0; |
@@ -324,19 +346,19 @@ copy_msqid_to_user(void __user *buf, struct msqid64_ds *in, int version) | |||
324 | out.msg_rtime = in->msg_rtime; | 346 | out.msg_rtime = in->msg_rtime; |
325 | out.msg_ctime = in->msg_ctime; | 347 | out.msg_ctime = in->msg_ctime; |
326 | 348 | ||
327 | if (in->msg_cbytes > USHRT_MAX) | 349 | if (in->msg_cbytes > USHORT_MAX) |
328 | out.msg_cbytes = USHRT_MAX; | 350 | out.msg_cbytes = USHORT_MAX; |
329 | else | 351 | else |
330 | out.msg_cbytes = in->msg_cbytes; | 352 | out.msg_cbytes = in->msg_cbytes; |
331 | out.msg_lcbytes = in->msg_cbytes; | 353 | out.msg_lcbytes = in->msg_cbytes; |
332 | 354 | ||
333 | if (in->msg_qnum > USHRT_MAX) | 355 | if (in->msg_qnum > USHORT_MAX) |
334 | out.msg_qnum = USHRT_MAX; | 356 | out.msg_qnum = USHORT_MAX; |
335 | else | 357 | else |
336 | out.msg_qnum = in->msg_qnum; | 358 | out.msg_qnum = in->msg_qnum; |
337 | 359 | ||
338 | if (in->msg_qbytes > USHRT_MAX) | 360 | if (in->msg_qbytes > USHORT_MAX) |
339 | out.msg_qbytes = USHRT_MAX; | 361 | out.msg_qbytes = USHORT_MAX; |
340 | else | 362 | else |
341 | out.msg_qbytes = in->msg_qbytes; | 363 | out.msg_qbytes = in->msg_qbytes; |
342 | out.msg_lqbytes = in->msg_qbytes; | 364 | out.msg_lqbytes = in->msg_qbytes; |
@@ -351,31 +373,14 @@ copy_msqid_to_user(void __user *buf, struct msqid64_ds *in, int version) | |||
351 | } | 373 | } |
352 | } | 374 | } |
353 | 375 | ||
354 | struct msq_setbuf { | ||
355 | unsigned long qbytes; | ||
356 | uid_t uid; | ||
357 | gid_t gid; | ||
358 | mode_t mode; | ||
359 | }; | ||
360 | |||
361 | static inline unsigned long | 376 | static inline unsigned long |
362 | copy_msqid_from_user(struct msq_setbuf *out, void __user *buf, int version) | 377 | copy_msqid_from_user(struct msqid64_ds *out, void __user *buf, int version) |
363 | { | 378 | { |
364 | switch(version) { | 379 | switch(version) { |
365 | case IPC_64: | 380 | case IPC_64: |
366 | { | 381 | if (copy_from_user(out, buf, sizeof(*out))) |
367 | struct msqid64_ds tbuf; | ||
368 | |||
369 | if (copy_from_user(&tbuf, buf, sizeof(tbuf))) | ||
370 | return -EFAULT; | 382 | return -EFAULT; |
371 | |||
372 | out->qbytes = tbuf.msg_qbytes; | ||
373 | out->uid = tbuf.msg_perm.uid; | ||
374 | out->gid = tbuf.msg_perm.gid; | ||
375 | out->mode = tbuf.msg_perm.mode; | ||
376 | |||
377 | return 0; | 383 | return 0; |
378 | } | ||
379 | case IPC_OLD: | 384 | case IPC_OLD: |
380 | { | 385 | { |
381 | struct msqid_ds tbuf_old; | 386 | struct msqid_ds tbuf_old; |
@@ -383,14 +388,14 @@ copy_msqid_from_user(struct msq_setbuf *out, void __user *buf, int version) | |||
383 | if (copy_from_user(&tbuf_old, buf, sizeof(tbuf_old))) | 388 | if (copy_from_user(&tbuf_old, buf, sizeof(tbuf_old))) |
384 | return -EFAULT; | 389 | return -EFAULT; |
385 | 390 | ||
386 | out->uid = tbuf_old.msg_perm.uid; | 391 | out->msg_perm.uid = tbuf_old.msg_perm.uid; |
387 | out->gid = tbuf_old.msg_perm.gid; | 392 | out->msg_perm.gid = tbuf_old.msg_perm.gid; |
388 | out->mode = tbuf_old.msg_perm.mode; | 393 | out->msg_perm.mode = tbuf_old.msg_perm.mode; |
389 | 394 | ||
390 | if (tbuf_old.msg_qbytes == 0) | 395 | if (tbuf_old.msg_qbytes == 0) |
391 | out->qbytes = tbuf_old.msg_lqbytes; | 396 | out->msg_qbytes = tbuf_old.msg_lqbytes; |
392 | else | 397 | else |
393 | out->qbytes = tbuf_old.msg_qbytes; | 398 | out->msg_qbytes = tbuf_old.msg_qbytes; |
394 | 399 | ||
395 | return 0; | 400 | return 0; |
396 | } | 401 | } |
@@ -399,10 +404,71 @@ copy_msqid_from_user(struct msq_setbuf *out, void __user *buf, int version) | |||
399 | } | 404 | } |
400 | } | 405 | } |
401 | 406 | ||
402 | asmlinkage long sys_msgctl(int msqid, int cmd, struct msqid_ds __user *buf) | 407 | /* |
408 | * This function handles some msgctl commands which require the rw_mutex | ||
409 | * to be held in write mode. | ||
410 | * NOTE: no locks must be held, the rw_mutex is taken inside this function. | ||
411 | */ | ||
412 | static int msgctl_down(struct ipc_namespace *ns, int msqid, int cmd, | ||
413 | struct msqid_ds __user *buf, int version) | ||
403 | { | 414 | { |
404 | struct kern_ipc_perm *ipcp; | 415 | struct kern_ipc_perm *ipcp; |
405 | struct msq_setbuf uninitialized_var(setbuf); | 416 | struct msqid64_ds msqid64; |
417 | struct msg_queue *msq; | ||
418 | int err; | ||
419 | |||
420 | if (cmd == IPC_SET) { | ||
421 | if (copy_msqid_from_user(&msqid64, buf, version)) | ||
422 | return -EFAULT; | ||
423 | } | ||
424 | |||
425 | ipcp = ipcctl_pre_down(&msg_ids(ns), msqid, cmd, | ||
426 | &msqid64.msg_perm, msqid64.msg_qbytes); | ||
427 | if (IS_ERR(ipcp)) | ||
428 | return PTR_ERR(ipcp); | ||
429 | |||
430 | msq = container_of(ipcp, struct msg_queue, q_perm); | ||
431 | |||
432 | err = security_msg_queue_msgctl(msq, cmd); | ||
433 | if (err) | ||
434 | goto out_unlock; | ||
435 | |||
436 | switch (cmd) { | ||
437 | case IPC_RMID: | ||
438 | freeque(ns, ipcp); | ||
439 | goto out_up; | ||
440 | case IPC_SET: | ||
441 | if (msqid64.msg_qbytes > ns->msg_ctlmnb && | ||
442 | !capable(CAP_SYS_RESOURCE)) { | ||
443 | err = -EPERM; | ||
444 | goto out_unlock; | ||
445 | } | ||
446 | |||
447 | msq->q_qbytes = msqid64.msg_qbytes; | ||
448 | |||
449 | ipc_update_perm(&msqid64.msg_perm, ipcp); | ||
450 | msq->q_ctime = get_seconds(); | ||
451 | /* sleeping receivers might be excluded by | ||
452 | * stricter permissions. | ||
453 | */ | ||
454 | expunge_all(msq, -EAGAIN); | ||
455 | /* sleeping senders might be able to send | ||
456 | * due to a larger queue size. | ||
457 | */ | ||
458 | ss_wakeup(&msq->q_senders, 0); | ||
459 | break; | ||
460 | default: | ||
461 | err = -EINVAL; | ||
462 | } | ||
463 | out_unlock: | ||
464 | msg_unlock(msq); | ||
465 | out_up: | ||
466 | up_write(&msg_ids(ns).rw_mutex); | ||
467 | return err; | ||
468 | } | ||
469 | |||
470 | asmlinkage long sys_msgctl(int msqid, int cmd, struct msqid_ds __user *buf) | ||
471 | { | ||
406 | struct msg_queue *msq; | 472 | struct msg_queue *msq; |
407 | int err, version; | 473 | int err, version; |
408 | struct ipc_namespace *ns; | 474 | struct ipc_namespace *ns; |
@@ -498,82 +564,13 @@ asmlinkage long sys_msgctl(int msqid, int cmd, struct msqid_ds __user *buf) | |||
498 | return success_return; | 564 | return success_return; |
499 | } | 565 | } |
500 | case IPC_SET: | 566 | case IPC_SET: |
501 | if (!buf) | ||
502 | return -EFAULT; | ||
503 | if (copy_msqid_from_user(&setbuf, buf, version)) | ||
504 | return -EFAULT; | ||
505 | break; | ||
506 | case IPC_RMID: | 567 | case IPC_RMID: |
507 | break; | 568 | err = msgctl_down(ns, msqid, cmd, buf, version); |
569 | return err; | ||
508 | default: | 570 | default: |
509 | return -EINVAL; | 571 | return -EINVAL; |
510 | } | 572 | } |
511 | 573 | ||
512 | down_write(&msg_ids(ns).rw_mutex); | ||
513 | msq = msg_lock_check_down(ns, msqid); | ||
514 | if (IS_ERR(msq)) { | ||
515 | err = PTR_ERR(msq); | ||
516 | goto out_up; | ||
517 | } | ||
518 | |||
519 | ipcp = &msq->q_perm; | ||
520 | |||
521 | err = audit_ipc_obj(ipcp); | ||
522 | if (err) | ||
523 | goto out_unlock_up; | ||
524 | if (cmd == IPC_SET) { | ||
525 | err = audit_ipc_set_perm(setbuf.qbytes, setbuf.uid, setbuf.gid, | ||
526 | setbuf.mode); | ||
527 | if (err) | ||
528 | goto out_unlock_up; | ||
529 | } | ||
530 | |||
531 | err = -EPERM; | ||
532 | if (current->euid != ipcp->cuid && | ||
533 | current->euid != ipcp->uid && !capable(CAP_SYS_ADMIN)) | ||
534 | /* We _could_ check for CAP_CHOWN above, but we don't */ | ||
535 | goto out_unlock_up; | ||
536 | |||
537 | err = security_msg_queue_msgctl(msq, cmd); | ||
538 | if (err) | ||
539 | goto out_unlock_up; | ||
540 | |||
541 | switch (cmd) { | ||
542 | case IPC_SET: | ||
543 | { | ||
544 | err = -EPERM; | ||
545 | if (setbuf.qbytes > ns->msg_ctlmnb && !capable(CAP_SYS_RESOURCE)) | ||
546 | goto out_unlock_up; | ||
547 | |||
548 | msq->q_qbytes = setbuf.qbytes; | ||
549 | |||
550 | ipcp->uid = setbuf.uid; | ||
551 | ipcp->gid = setbuf.gid; | ||
552 | ipcp->mode = (ipcp->mode & ~S_IRWXUGO) | | ||
553 | (S_IRWXUGO & setbuf.mode); | ||
554 | msq->q_ctime = get_seconds(); | ||
555 | /* sleeping receivers might be excluded by | ||
556 | * stricter permissions. | ||
557 | */ | ||
558 | expunge_all(msq, -EAGAIN); | ||
559 | /* sleeping senders might be able to send | ||
560 | * due to a larger queue size. | ||
561 | */ | ||
562 | ss_wakeup(&msq->q_senders, 0); | ||
563 | msg_unlock(msq); | ||
564 | break; | ||
565 | } | ||
566 | case IPC_RMID: | ||
567 | freeque(ns, &msq->q_perm); | ||
568 | break; | ||
569 | } | ||
570 | err = 0; | ||
571 | out_up: | ||
572 | up_write(&msg_ids(ns).rw_mutex); | ||
573 | return err; | ||
574 | out_unlock_up: | ||
575 | msg_unlock(msq); | ||
576 | goto out_up; | ||
577 | out_unlock: | 574 | out_unlock: |
578 | msg_unlock(msq); | 575 | msg_unlock(msq); |
579 | return err; | 576 | return err; |
diff --git a/ipc/namespace.c b/ipc/namespace.c index 1b967655eb35..9171d948751e 100644 --- a/ipc/namespace.c +++ b/ipc/namespace.c | |||
@@ -20,10 +20,20 @@ static struct ipc_namespace *clone_ipc_ns(struct ipc_namespace *old_ns) | |||
20 | if (ns == NULL) | 20 | if (ns == NULL) |
21 | return ERR_PTR(-ENOMEM); | 21 | return ERR_PTR(-ENOMEM); |
22 | 22 | ||
23 | atomic_inc(&nr_ipc_ns); | ||
24 | |||
23 | sem_init_ns(ns); | 25 | sem_init_ns(ns); |
24 | msg_init_ns(ns); | 26 | msg_init_ns(ns); |
25 | shm_init_ns(ns); | 27 | shm_init_ns(ns); |
26 | 28 | ||
29 | /* | ||
30 | * msgmni has already been computed for the new ipc ns. | ||
31 | * Thus, do the ipcns creation notification before registering that | ||
32 | * new ipcns in the chain. | ||
33 | */ | ||
34 | ipcns_notify(IPCNS_CREATED); | ||
35 | register_ipcns_notifier(ns); | ||
36 | |||
27 | kref_init(&ns->kref); | 37 | kref_init(&ns->kref); |
28 | return ns; | 38 | return ns; |
29 | } | 39 | } |
@@ -79,8 +89,24 @@ void free_ipc_ns(struct kref *kref) | |||
79 | struct ipc_namespace *ns; | 89 | struct ipc_namespace *ns; |
80 | 90 | ||
81 | ns = container_of(kref, struct ipc_namespace, kref); | 91 | ns = container_of(kref, struct ipc_namespace, kref); |
92 | /* | ||
93 | * Unregistering the hotplug notifier at the beginning guarantees | ||
94 | * that the ipc namespace won't be freed while we are inside the | ||
95 | * callback routine. Since the blocking_notifier_chain_XXX routines | ||
96 | * hold a rw lock on the notifier list, unregister_ipcns_notifier() | ||
97 | * won't take the rw lock before blocking_notifier_call_chain() has | ||
98 | * released the rd lock. | ||
99 | */ | ||
100 | unregister_ipcns_notifier(ns); | ||
82 | sem_exit_ns(ns); | 101 | sem_exit_ns(ns); |
83 | msg_exit_ns(ns); | 102 | msg_exit_ns(ns); |
84 | shm_exit_ns(ns); | 103 | shm_exit_ns(ns); |
85 | kfree(ns); | 104 | kfree(ns); |
105 | atomic_dec(&nr_ipc_ns); | ||
106 | |||
107 | /* | ||
108 | * Do the ipcns removal notification after decrementing nr_ipc_ns in | ||
109 | * order to have a correct value when recomputing msgmni. | ||
110 | */ | ||
111 | ipcns_notify(IPCNS_REMOVED); | ||
86 | } | 112 | } |
@@ -91,7 +91,6 @@ | |||
91 | 91 | ||
92 | #define sem_unlock(sma) ipc_unlock(&(sma)->sem_perm) | 92 | #define sem_unlock(sma) ipc_unlock(&(sma)->sem_perm) |
93 | #define sem_checkid(sma, semid) ipc_checkid(&sma->sem_perm, semid) | 93 | #define sem_checkid(sma, semid) ipc_checkid(&sma->sem_perm, semid) |
94 | #define sem_buildid(id, seq) ipc_buildid(id, seq) | ||
95 | 94 | ||
96 | static int newary(struct ipc_namespace *, struct ipc_params *); | 95 | static int newary(struct ipc_namespace *, struct ipc_params *); |
97 | static void freeary(struct ipc_namespace *, struct kern_ipc_perm *); | 96 | static void freeary(struct ipc_namespace *, struct kern_ipc_perm *); |
@@ -142,21 +141,6 @@ void __init sem_init (void) | |||
142 | } | 141 | } |
143 | 142 | ||
144 | /* | 143 | /* |
145 | * This routine is called in the paths where the rw_mutex is held to protect | ||
146 | * access to the idr tree. | ||
147 | */ | ||
148 | static inline struct sem_array *sem_lock_check_down(struct ipc_namespace *ns, | ||
149 | int id) | ||
150 | { | ||
151 | struct kern_ipc_perm *ipcp = ipc_lock_check_down(&sem_ids(ns), id); | ||
152 | |||
153 | if (IS_ERR(ipcp)) | ||
154 | return (struct sem_array *)ipcp; | ||
155 | |||
156 | return container_of(ipcp, struct sem_array, sem_perm); | ||
157 | } | ||
158 | |||
159 | /* | ||
160 | * sem_lock_(check_) routines are called in the paths where the rw_mutex | 144 | * sem_lock_(check_) routines are called in the paths where the rw_mutex |
161 | * is not held. | 145 | * is not held. |
162 | */ | 146 | */ |
@@ -181,6 +165,25 @@ static inline struct sem_array *sem_lock_check(struct ipc_namespace *ns, | |||
181 | return container_of(ipcp, struct sem_array, sem_perm); | 165 | return container_of(ipcp, struct sem_array, sem_perm); |
182 | } | 166 | } |
183 | 167 | ||
168 | static inline void sem_lock_and_putref(struct sem_array *sma) | ||
169 | { | ||
170 | ipc_lock_by_ptr(&sma->sem_perm); | ||
171 | ipc_rcu_putref(sma); | ||
172 | } | ||
173 | |||
174 | static inline void sem_getref_and_unlock(struct sem_array *sma) | ||
175 | { | ||
176 | ipc_rcu_getref(sma); | ||
177 | ipc_unlock(&(sma)->sem_perm); | ||
178 | } | ||
179 | |||
180 | static inline void sem_putref(struct sem_array *sma) | ||
181 | { | ||
182 | ipc_lock_by_ptr(&sma->sem_perm); | ||
183 | ipc_rcu_putref(sma); | ||
184 | ipc_unlock(&(sma)->sem_perm); | ||
185 | } | ||
186 | |||
184 | static inline void sem_rmid(struct ipc_namespace *ns, struct sem_array *s) | 187 | static inline void sem_rmid(struct ipc_namespace *ns, struct sem_array *s) |
185 | { | 188 | { |
186 | ipc_rmid(&sem_ids(ns), &s->sem_perm); | 189 | ipc_rmid(&sem_ids(ns), &s->sem_perm); |
@@ -268,7 +271,6 @@ static int newary(struct ipc_namespace *ns, struct ipc_params *params) | |||
268 | } | 271 | } |
269 | ns->used_sems += nsems; | 272 | ns->used_sems += nsems; |
270 | 273 | ||
271 | sma->sem_perm.id = sem_buildid(id, sma->sem_perm.seq); | ||
272 | sma->sem_base = (struct sem *) &sma[1]; | 274 | sma->sem_base = (struct sem *) &sma[1]; |
273 | /* sma->sem_pending = NULL; */ | 275 | /* sma->sem_pending = NULL; */ |
274 | sma->sem_pending_last = &sma->sem_pending; | 276 | sma->sem_pending_last = &sma->sem_pending; |
@@ -700,19 +702,15 @@ static int semctl_main(struct ipc_namespace *ns, int semid, int semnum, | |||
700 | int i; | 702 | int i; |
701 | 703 | ||
702 | if(nsems > SEMMSL_FAST) { | 704 | if(nsems > SEMMSL_FAST) { |
703 | ipc_rcu_getref(sma); | 705 | sem_getref_and_unlock(sma); |
704 | sem_unlock(sma); | ||
705 | 706 | ||
706 | sem_io = ipc_alloc(sizeof(ushort)*nsems); | 707 | sem_io = ipc_alloc(sizeof(ushort)*nsems); |
707 | if(sem_io == NULL) { | 708 | if(sem_io == NULL) { |
708 | ipc_lock_by_ptr(&sma->sem_perm); | 709 | sem_putref(sma); |
709 | ipc_rcu_putref(sma); | ||
710 | sem_unlock(sma); | ||
711 | return -ENOMEM; | 710 | return -ENOMEM; |
712 | } | 711 | } |
713 | 712 | ||
714 | ipc_lock_by_ptr(&sma->sem_perm); | 713 | sem_lock_and_putref(sma); |
715 | ipc_rcu_putref(sma); | ||
716 | if (sma->sem_perm.deleted) { | 714 | if (sma->sem_perm.deleted) { |
717 | sem_unlock(sma); | 715 | sem_unlock(sma); |
718 | err = -EIDRM; | 716 | err = -EIDRM; |
@@ -733,38 +731,30 @@ static int semctl_main(struct ipc_namespace *ns, int semid, int semnum, | |||
733 | int i; | 731 | int i; |
734 | struct sem_undo *un; | 732 | struct sem_undo *un; |
735 | 733 | ||
736 | ipc_rcu_getref(sma); | 734 | sem_getref_and_unlock(sma); |
737 | sem_unlock(sma); | ||
738 | 735 | ||
739 | if(nsems > SEMMSL_FAST) { | 736 | if(nsems > SEMMSL_FAST) { |
740 | sem_io = ipc_alloc(sizeof(ushort)*nsems); | 737 | sem_io = ipc_alloc(sizeof(ushort)*nsems); |
741 | if(sem_io == NULL) { | 738 | if(sem_io == NULL) { |
742 | ipc_lock_by_ptr(&sma->sem_perm); | 739 | sem_putref(sma); |
743 | ipc_rcu_putref(sma); | ||
744 | sem_unlock(sma); | ||
745 | return -ENOMEM; | 740 | return -ENOMEM; |
746 | } | 741 | } |
747 | } | 742 | } |
748 | 743 | ||
749 | if (copy_from_user (sem_io, arg.array, nsems*sizeof(ushort))) { | 744 | if (copy_from_user (sem_io, arg.array, nsems*sizeof(ushort))) { |
750 | ipc_lock_by_ptr(&sma->sem_perm); | 745 | sem_putref(sma); |
751 | ipc_rcu_putref(sma); | ||
752 | sem_unlock(sma); | ||
753 | err = -EFAULT; | 746 | err = -EFAULT; |
754 | goto out_free; | 747 | goto out_free; |
755 | } | 748 | } |
756 | 749 | ||
757 | for (i = 0; i < nsems; i++) { | 750 | for (i = 0; i < nsems; i++) { |
758 | if (sem_io[i] > SEMVMX) { | 751 | if (sem_io[i] > SEMVMX) { |
759 | ipc_lock_by_ptr(&sma->sem_perm); | 752 | sem_putref(sma); |
760 | ipc_rcu_putref(sma); | ||
761 | sem_unlock(sma); | ||
762 | err = -ERANGE; | 753 | err = -ERANGE; |
763 | goto out_free; | 754 | goto out_free; |
764 | } | 755 | } |
765 | } | 756 | } |
766 | ipc_lock_by_ptr(&sma->sem_perm); | 757 | sem_lock_and_putref(sma); |
767 | ipc_rcu_putref(sma); | ||
768 | if (sma->sem_perm.deleted) { | 758 | if (sma->sem_perm.deleted) { |
769 | sem_unlock(sma); | 759 | sem_unlock(sma); |
770 | err = -EIDRM; | 760 | err = -EIDRM; |
@@ -830,28 +820,14 @@ out_free: | |||
830 | return err; | 820 | return err; |
831 | } | 821 | } |
832 | 822 | ||
833 | struct sem_setbuf { | 823 | static inline unsigned long |
834 | uid_t uid; | 824 | copy_semid_from_user(struct semid64_ds *out, void __user *buf, int version) |
835 | gid_t gid; | ||
836 | mode_t mode; | ||
837 | }; | ||
838 | |||
839 | static inline unsigned long copy_semid_from_user(struct sem_setbuf *out, void __user *buf, int version) | ||
840 | { | 825 | { |
841 | switch(version) { | 826 | switch(version) { |
842 | case IPC_64: | 827 | case IPC_64: |
843 | { | 828 | if (copy_from_user(out, buf, sizeof(*out))) |
844 | struct semid64_ds tbuf; | ||
845 | |||
846 | if(copy_from_user(&tbuf, buf, sizeof(tbuf))) | ||
847 | return -EFAULT; | 829 | return -EFAULT; |
848 | |||
849 | out->uid = tbuf.sem_perm.uid; | ||
850 | out->gid = tbuf.sem_perm.gid; | ||
851 | out->mode = tbuf.sem_perm.mode; | ||
852 | |||
853 | return 0; | 830 | return 0; |
854 | } | ||
855 | case IPC_OLD: | 831 | case IPC_OLD: |
856 | { | 832 | { |
857 | struct semid_ds tbuf_old; | 833 | struct semid_ds tbuf_old; |
@@ -859,9 +835,9 @@ static inline unsigned long copy_semid_from_user(struct sem_setbuf *out, void __ | |||
859 | if(copy_from_user(&tbuf_old, buf, sizeof(tbuf_old))) | 835 | if(copy_from_user(&tbuf_old, buf, sizeof(tbuf_old))) |
860 | return -EFAULT; | 836 | return -EFAULT; |
861 | 837 | ||
862 | out->uid = tbuf_old.sem_perm.uid; | 838 | out->sem_perm.uid = tbuf_old.sem_perm.uid; |
863 | out->gid = tbuf_old.sem_perm.gid; | 839 | out->sem_perm.gid = tbuf_old.sem_perm.gid; |
864 | out->mode = tbuf_old.sem_perm.mode; | 840 | out->sem_perm.mode = tbuf_old.sem_perm.mode; |
865 | 841 | ||
866 | return 0; | 842 | return 0; |
867 | } | 843 | } |
@@ -870,38 +846,29 @@ static inline unsigned long copy_semid_from_user(struct sem_setbuf *out, void __ | |||
870 | } | 846 | } |
871 | } | 847 | } |
872 | 848 | ||
873 | static int semctl_down(struct ipc_namespace *ns, int semid, int semnum, | 849 | /* |
874 | int cmd, int version, union semun arg) | 850 | * This function handles some semctl commands which require the rw_mutex |
851 | * to be held in write mode. | ||
852 | * NOTE: no locks must be held, the rw_mutex is taken inside this function. | ||
853 | */ | ||
854 | static int semctl_down(struct ipc_namespace *ns, int semid, | ||
855 | int cmd, int version, union semun arg) | ||
875 | { | 856 | { |
876 | struct sem_array *sma; | 857 | struct sem_array *sma; |
877 | int err; | 858 | int err; |
878 | struct sem_setbuf uninitialized_var(setbuf); | 859 | struct semid64_ds semid64; |
879 | struct kern_ipc_perm *ipcp; | 860 | struct kern_ipc_perm *ipcp; |
880 | 861 | ||
881 | if(cmd == IPC_SET) { | 862 | if(cmd == IPC_SET) { |
882 | if(copy_semid_from_user (&setbuf, arg.buf, version)) | 863 | if (copy_semid_from_user(&semid64, arg.buf, version)) |
883 | return -EFAULT; | 864 | return -EFAULT; |
884 | } | 865 | } |
885 | sma = sem_lock_check_down(ns, semid); | ||
886 | if (IS_ERR(sma)) | ||
887 | return PTR_ERR(sma); | ||
888 | 866 | ||
889 | ipcp = &sma->sem_perm; | 867 | ipcp = ipcctl_pre_down(&sem_ids(ns), semid, cmd, &semid64.sem_perm, 0); |
890 | 868 | if (IS_ERR(ipcp)) | |
891 | err = audit_ipc_obj(ipcp); | 869 | return PTR_ERR(ipcp); |
892 | if (err) | ||
893 | goto out_unlock; | ||
894 | 870 | ||
895 | if (cmd == IPC_SET) { | 871 | sma = container_of(ipcp, struct sem_array, sem_perm); |
896 | err = audit_ipc_set_perm(0, setbuf.uid, setbuf.gid, setbuf.mode); | ||
897 | if (err) | ||
898 | goto out_unlock; | ||
899 | } | ||
900 | if (current->euid != ipcp->cuid && | ||
901 | current->euid != ipcp->uid && !capable(CAP_SYS_ADMIN)) { | ||
902 | err=-EPERM; | ||
903 | goto out_unlock; | ||
904 | } | ||
905 | 872 | ||
906 | err = security_sem_semctl(sma, cmd); | 873 | err = security_sem_semctl(sma, cmd); |
907 | if (err) | 874 | if (err) |
@@ -910,26 +877,19 @@ static int semctl_down(struct ipc_namespace *ns, int semid, int semnum, | |||
910 | switch(cmd){ | 877 | switch(cmd){ |
911 | case IPC_RMID: | 878 | case IPC_RMID: |
912 | freeary(ns, ipcp); | 879 | freeary(ns, ipcp); |
913 | err = 0; | 880 | goto out_up; |
914 | break; | ||
915 | case IPC_SET: | 881 | case IPC_SET: |
916 | ipcp->uid = setbuf.uid; | 882 | ipc_update_perm(&semid64.sem_perm, ipcp); |
917 | ipcp->gid = setbuf.gid; | ||
918 | ipcp->mode = (ipcp->mode & ~S_IRWXUGO) | ||
919 | | (setbuf.mode & S_IRWXUGO); | ||
920 | sma->sem_ctime = get_seconds(); | 883 | sma->sem_ctime = get_seconds(); |
921 | sem_unlock(sma); | ||
922 | err = 0; | ||
923 | break; | 884 | break; |
924 | default: | 885 | default: |
925 | sem_unlock(sma); | ||
926 | err = -EINVAL; | 886 | err = -EINVAL; |
927 | break; | ||
928 | } | 887 | } |
929 | return err; | ||
930 | 888 | ||
931 | out_unlock: | 889 | out_unlock: |
932 | sem_unlock(sma); | 890 | sem_unlock(sma); |
891 | out_up: | ||
892 | up_write(&sem_ids(ns).rw_mutex); | ||
933 | return err; | 893 | return err; |
934 | } | 894 | } |
935 | 895 | ||
@@ -963,9 +923,7 @@ asmlinkage long sys_semctl (int semid, int semnum, int cmd, union semun arg) | |||
963 | return err; | 923 | return err; |
964 | case IPC_RMID: | 924 | case IPC_RMID: |
965 | case IPC_SET: | 925 | case IPC_SET: |
966 | down_write(&sem_ids(ns).rw_mutex); | 926 | err = semctl_down(ns, semid, cmd, version, arg); |
967 | err = semctl_down(ns,semid,semnum,cmd,version,arg); | ||
968 | up_write(&sem_ids(ns).rw_mutex); | ||
969 | return err; | 927 | return err; |
970 | default: | 928 | default: |
971 | return -EINVAL; | 929 | return -EINVAL; |
@@ -1044,14 +1002,11 @@ static struct sem_undo *find_undo(struct ipc_namespace *ns, int semid) | |||
1044 | return ERR_PTR(PTR_ERR(sma)); | 1002 | return ERR_PTR(PTR_ERR(sma)); |
1045 | 1003 | ||
1046 | nsems = sma->sem_nsems; | 1004 | nsems = sma->sem_nsems; |
1047 | ipc_rcu_getref(sma); | 1005 | sem_getref_and_unlock(sma); |
1048 | sem_unlock(sma); | ||
1049 | 1006 | ||
1050 | new = kzalloc(sizeof(struct sem_undo) + sizeof(short)*nsems, GFP_KERNEL); | 1007 | new = kzalloc(sizeof(struct sem_undo) + sizeof(short)*nsems, GFP_KERNEL); |
1051 | if (!new) { | 1008 | if (!new) { |
1052 | ipc_lock_by_ptr(&sma->sem_perm); | 1009 | sem_putref(sma); |
1053 | ipc_rcu_putref(sma); | ||
1054 | sem_unlock(sma); | ||
1055 | return ERR_PTR(-ENOMEM); | 1010 | return ERR_PTR(-ENOMEM); |
1056 | } | 1011 | } |
1057 | new->semadj = (short *) &new[1]; | 1012 | new->semadj = (short *) &new[1]; |
@@ -1062,13 +1017,10 @@ static struct sem_undo *find_undo(struct ipc_namespace *ns, int semid) | |||
1062 | if (un) { | 1017 | if (un) { |
1063 | spin_unlock(&ulp->lock); | 1018 | spin_unlock(&ulp->lock); |
1064 | kfree(new); | 1019 | kfree(new); |
1065 | ipc_lock_by_ptr(&sma->sem_perm); | 1020 | sem_putref(sma); |
1066 | ipc_rcu_putref(sma); | ||
1067 | sem_unlock(sma); | ||
1068 | goto out; | 1021 | goto out; |
1069 | } | 1022 | } |
1070 | ipc_lock_by_ptr(&sma->sem_perm); | 1023 | sem_lock_and_putref(sma); |
1071 | ipc_rcu_putref(sma); | ||
1072 | if (sma->sem_perm.deleted) { | 1024 | if (sma->sem_perm.deleted) { |
1073 | sem_unlock(sma); | 1025 | sem_unlock(sma); |
1074 | spin_unlock(&ulp->lock); | 1026 | spin_unlock(&ulp->lock); |
@@ -1298,6 +1250,7 @@ void exit_sem(struct task_struct *tsk) | |||
1298 | undo_list = tsk->sysvsem.undo_list; | 1250 | undo_list = tsk->sysvsem.undo_list; |
1299 | if (!undo_list) | 1251 | if (!undo_list) |
1300 | return; | 1252 | return; |
1253 | tsk->sysvsem.undo_list = NULL; | ||
1301 | 1254 | ||
1302 | if (!atomic_dec_and_test(&undo_list->refcnt)) | 1255 | if (!atomic_dec_and_test(&undo_list->refcnt)) |
1303 | return; | 1256 | return; |
@@ -60,7 +60,6 @@ static struct vm_operations_struct shm_vm_ops; | |||
60 | 60 | ||
61 | #define shm_unlock(shp) \ | 61 | #define shm_unlock(shp) \ |
62 | ipc_unlock(&(shp)->shm_perm) | 62 | ipc_unlock(&(shp)->shm_perm) |
63 | #define shm_buildid(id, seq) ipc_buildid(id, seq) | ||
64 | 63 | ||
65 | static int newseg(struct ipc_namespace *, struct ipc_params *); | 64 | static int newseg(struct ipc_namespace *, struct ipc_params *); |
66 | static void shm_open(struct vm_area_struct *vma); | 65 | static void shm_open(struct vm_area_struct *vma); |
@@ -127,18 +126,6 @@ static inline struct shmid_kernel *shm_lock_down(struct ipc_namespace *ns, | |||
127 | return container_of(ipcp, struct shmid_kernel, shm_perm); | 126 | return container_of(ipcp, struct shmid_kernel, shm_perm); |
128 | } | 127 | } |
129 | 128 | ||
130 | static inline struct shmid_kernel *shm_lock_check_down( | ||
131 | struct ipc_namespace *ns, | ||
132 | int id) | ||
133 | { | ||
134 | struct kern_ipc_perm *ipcp = ipc_lock_check_down(&shm_ids(ns), id); | ||
135 | |||
136 | if (IS_ERR(ipcp)) | ||
137 | return (struct shmid_kernel *)ipcp; | ||
138 | |||
139 | return container_of(ipcp, struct shmid_kernel, shm_perm); | ||
140 | } | ||
141 | |||
142 | /* | 129 | /* |
143 | * shm_lock_(check_) routines are called in the paths where the rw_mutex | 130 | * shm_lock_(check_) routines are called in the paths where the rw_mutex |
144 | * is not held. | 131 | * is not held. |
@@ -169,12 +156,6 @@ static inline void shm_rmid(struct ipc_namespace *ns, struct shmid_kernel *s) | |||
169 | ipc_rmid(&shm_ids(ns), &s->shm_perm); | 156 | ipc_rmid(&shm_ids(ns), &s->shm_perm); |
170 | } | 157 | } |
171 | 158 | ||
172 | static inline int shm_addid(struct ipc_namespace *ns, struct shmid_kernel *shp) | ||
173 | { | ||
174 | return ipc_addid(&shm_ids(ns), &shp->shm_perm, ns->shm_ctlmni); | ||
175 | } | ||
176 | |||
177 | |||
178 | 159 | ||
179 | /* This is called by fork, once for every shm attach. */ | 160 | /* This is called by fork, once for every shm attach. */ |
180 | static void shm_open(struct vm_area_struct *vma) | 161 | static void shm_open(struct vm_area_struct *vma) |
@@ -416,7 +397,7 @@ static int newseg(struct ipc_namespace *ns, struct ipc_params *params) | |||
416 | if (IS_ERR(file)) | 397 | if (IS_ERR(file)) |
417 | goto no_file; | 398 | goto no_file; |
418 | 399 | ||
419 | id = shm_addid(ns, shp); | 400 | id = ipc_addid(&shm_ids(ns), &shp->shm_perm, ns->shm_ctlmni); |
420 | if (id < 0) { | 401 | if (id < 0) { |
421 | error = id; | 402 | error = id; |
422 | goto no_id; | 403 | goto no_id; |
@@ -428,7 +409,6 @@ static int newseg(struct ipc_namespace *ns, struct ipc_params *params) | |||
428 | shp->shm_ctim = get_seconds(); | 409 | shp->shm_ctim = get_seconds(); |
429 | shp->shm_segsz = size; | 410 | shp->shm_segsz = size; |
430 | shp->shm_nattch = 0; | 411 | shp->shm_nattch = 0; |
431 | shp->shm_perm.id = shm_buildid(id, shp->shm_perm.seq); | ||
432 | shp->shm_file = file; | 412 | shp->shm_file = file; |
433 | /* | 413 | /* |
434 | * shmid gets reported as "inode#" in /proc/pid/maps. | 414 | * shmid gets reported as "inode#" in /proc/pid/maps. |
@@ -519,28 +499,14 @@ static inline unsigned long copy_shmid_to_user(void __user *buf, struct shmid64_ | |||
519 | } | 499 | } |
520 | } | 500 | } |
521 | 501 | ||
522 | struct shm_setbuf { | 502 | static inline unsigned long |
523 | uid_t uid; | 503 | copy_shmid_from_user(struct shmid64_ds *out, void __user *buf, int version) |
524 | gid_t gid; | ||
525 | mode_t mode; | ||
526 | }; | ||
527 | |||
528 | static inline unsigned long copy_shmid_from_user(struct shm_setbuf *out, void __user *buf, int version) | ||
529 | { | 504 | { |
530 | switch(version) { | 505 | switch(version) { |
531 | case IPC_64: | 506 | case IPC_64: |
532 | { | 507 | if (copy_from_user(out, buf, sizeof(*out))) |
533 | struct shmid64_ds tbuf; | ||
534 | |||
535 | if (copy_from_user(&tbuf, buf, sizeof(tbuf))) | ||
536 | return -EFAULT; | 508 | return -EFAULT; |
537 | |||
538 | out->uid = tbuf.shm_perm.uid; | ||
539 | out->gid = tbuf.shm_perm.gid; | ||
540 | out->mode = tbuf.shm_perm.mode; | ||
541 | |||
542 | return 0; | 509 | return 0; |
543 | } | ||
544 | case IPC_OLD: | 510 | case IPC_OLD: |
545 | { | 511 | { |
546 | struct shmid_ds tbuf_old; | 512 | struct shmid_ds tbuf_old; |
@@ -548,9 +514,9 @@ static inline unsigned long copy_shmid_from_user(struct shm_setbuf *out, void __ | |||
548 | if (copy_from_user(&tbuf_old, buf, sizeof(tbuf_old))) | 514 | if (copy_from_user(&tbuf_old, buf, sizeof(tbuf_old))) |
549 | return -EFAULT; | 515 | return -EFAULT; |
550 | 516 | ||
551 | out->uid = tbuf_old.shm_perm.uid; | 517 | out->shm_perm.uid = tbuf_old.shm_perm.uid; |
552 | out->gid = tbuf_old.shm_perm.gid; | 518 | out->shm_perm.gid = tbuf_old.shm_perm.gid; |
553 | out->mode = tbuf_old.shm_perm.mode; | 519 | out->shm_perm.mode = tbuf_old.shm_perm.mode; |
554 | 520 | ||
555 | return 0; | 521 | return 0; |
556 | } | 522 | } |
@@ -624,9 +590,53 @@ static void shm_get_stat(struct ipc_namespace *ns, unsigned long *rss, | |||
624 | } | 590 | } |
625 | } | 591 | } |
626 | 592 | ||
627 | asmlinkage long sys_shmctl (int shmid, int cmd, struct shmid_ds __user *buf) | 593 | /* |
594 | * This function handles some shmctl commands which require the rw_mutex | ||
595 | * to be held in write mode. | ||
596 | * NOTE: no locks must be held, the rw_mutex is taken inside this function. | ||
597 | */ | ||
598 | static int shmctl_down(struct ipc_namespace *ns, int shmid, int cmd, | ||
599 | struct shmid_ds __user *buf, int version) | ||
600 | { | ||
601 | struct kern_ipc_perm *ipcp; | ||
602 | struct shmid64_ds shmid64; | ||
603 | struct shmid_kernel *shp; | ||
604 | int err; | ||
605 | |||
606 | if (cmd == IPC_SET) { | ||
607 | if (copy_shmid_from_user(&shmid64, buf, version)) | ||
608 | return -EFAULT; | ||
609 | } | ||
610 | |||
611 | ipcp = ipcctl_pre_down(&shm_ids(ns), shmid, cmd, &shmid64.shm_perm, 0); | ||
612 | if (IS_ERR(ipcp)) | ||
613 | return PTR_ERR(ipcp); | ||
614 | |||
615 | shp = container_of(ipcp, struct shmid_kernel, shm_perm); | ||
616 | |||
617 | err = security_shm_shmctl(shp, cmd); | ||
618 | if (err) | ||
619 | goto out_unlock; | ||
620 | switch (cmd) { | ||
621 | case IPC_RMID: | ||
622 | do_shm_rmid(ns, ipcp); | ||
623 | goto out_up; | ||
624 | case IPC_SET: | ||
625 | ipc_update_perm(&shmid64.shm_perm, ipcp); | ||
626 | shp->shm_ctim = get_seconds(); | ||
627 | break; | ||
628 | default: | ||
629 | err = -EINVAL; | ||
630 | } | ||
631 | out_unlock: | ||
632 | shm_unlock(shp); | ||
633 | out_up: | ||
634 | up_write(&shm_ids(ns).rw_mutex); | ||
635 | return err; | ||
636 | } | ||
637 | |||
638 | asmlinkage long sys_shmctl(int shmid, int cmd, struct shmid_ds __user *buf) | ||
628 | { | 639 | { |
629 | struct shm_setbuf setbuf; | ||
630 | struct shmid_kernel *shp; | 640 | struct shmid_kernel *shp; |
631 | int err, version; | 641 | int err, version; |
632 | struct ipc_namespace *ns; | 642 | struct ipc_namespace *ns; |
@@ -783,97 +793,13 @@ asmlinkage long sys_shmctl (int shmid, int cmd, struct shmid_ds __user *buf) | |||
783 | goto out; | 793 | goto out; |
784 | } | 794 | } |
785 | case IPC_RMID: | 795 | case IPC_RMID: |
786 | { | ||
787 | /* | ||
788 | * We cannot simply remove the file. The SVID states | ||
789 | * that the block remains until the last person | ||
790 | * detaches from it, then is deleted. A shmat() on | ||
791 | * an RMID segment is legal in older Linux and if | ||
792 | * we change it apps break... | ||
793 | * | ||
794 | * Instead we set a destroyed flag, and then blow | ||
795 | * the name away when the usage hits zero. | ||
796 | */ | ||
797 | down_write(&shm_ids(ns).rw_mutex); | ||
798 | shp = shm_lock_check_down(ns, shmid); | ||
799 | if (IS_ERR(shp)) { | ||
800 | err = PTR_ERR(shp); | ||
801 | goto out_up; | ||
802 | } | ||
803 | |||
804 | err = audit_ipc_obj(&(shp->shm_perm)); | ||
805 | if (err) | ||
806 | goto out_unlock_up; | ||
807 | |||
808 | if (current->euid != shp->shm_perm.uid && | ||
809 | current->euid != shp->shm_perm.cuid && | ||
810 | !capable(CAP_SYS_ADMIN)) { | ||
811 | err=-EPERM; | ||
812 | goto out_unlock_up; | ||
813 | } | ||
814 | |||
815 | err = security_shm_shmctl(shp, cmd); | ||
816 | if (err) | ||
817 | goto out_unlock_up; | ||
818 | |||
819 | do_shm_rmid(ns, &shp->shm_perm); | ||
820 | up_write(&shm_ids(ns).rw_mutex); | ||
821 | goto out; | ||
822 | } | ||
823 | |||
824 | case IPC_SET: | 796 | case IPC_SET: |
825 | { | 797 | err = shmctl_down(ns, shmid, cmd, buf, version); |
826 | if (!buf) { | 798 | return err; |
827 | err = -EFAULT; | ||
828 | goto out; | ||
829 | } | ||
830 | |||
831 | if (copy_shmid_from_user (&setbuf, buf, version)) { | ||
832 | err = -EFAULT; | ||
833 | goto out; | ||
834 | } | ||
835 | down_write(&shm_ids(ns).rw_mutex); | ||
836 | shp = shm_lock_check_down(ns, shmid); | ||
837 | if (IS_ERR(shp)) { | ||
838 | err = PTR_ERR(shp); | ||
839 | goto out_up; | ||
840 | } | ||
841 | err = audit_ipc_obj(&(shp->shm_perm)); | ||
842 | if (err) | ||
843 | goto out_unlock_up; | ||
844 | err = audit_ipc_set_perm(0, setbuf.uid, setbuf.gid, setbuf.mode); | ||
845 | if (err) | ||
846 | goto out_unlock_up; | ||
847 | err=-EPERM; | ||
848 | if (current->euid != shp->shm_perm.uid && | ||
849 | current->euid != shp->shm_perm.cuid && | ||
850 | !capable(CAP_SYS_ADMIN)) { | ||
851 | goto out_unlock_up; | ||
852 | } | ||
853 | |||
854 | err = security_shm_shmctl(shp, cmd); | ||
855 | if (err) | ||
856 | goto out_unlock_up; | ||
857 | |||
858 | shp->shm_perm.uid = setbuf.uid; | ||
859 | shp->shm_perm.gid = setbuf.gid; | ||
860 | shp->shm_perm.mode = (shp->shm_perm.mode & ~S_IRWXUGO) | ||
861 | | (setbuf.mode & S_IRWXUGO); | ||
862 | shp->shm_ctim = get_seconds(); | ||
863 | break; | ||
864 | } | ||
865 | |||
866 | default: | 799 | default: |
867 | err = -EINVAL; | 800 | return -EINVAL; |
868 | goto out; | ||
869 | } | 801 | } |
870 | 802 | ||
871 | err = 0; | ||
872 | out_unlock_up: | ||
873 | shm_unlock(shp); | ||
874 | out_up: | ||
875 | up_write(&shm_ids(ns).rw_mutex); | ||
876 | goto out; | ||
877 | out_unlock: | 803 | out_unlock: |
878 | shm_unlock(shp); | 804 | shm_unlock(shp); |
879 | out: | 805 | out: |
diff --git a/ipc/util.c b/ipc/util.c index fd1b50da9db8..3339177b336c 100644 --- a/ipc/util.c +++ b/ipc/util.c | |||
@@ -33,6 +33,7 @@ | |||
33 | #include <linux/audit.h> | 33 | #include <linux/audit.h> |
34 | #include <linux/nsproxy.h> | 34 | #include <linux/nsproxy.h> |
35 | #include <linux/rwsem.h> | 35 | #include <linux/rwsem.h> |
36 | #include <linux/memory.h> | ||
36 | #include <linux/ipc_namespace.h> | 37 | #include <linux/ipc_namespace.h> |
37 | 38 | ||
38 | #include <asm/unistd.h> | 39 | #include <asm/unistd.h> |
@@ -52,11 +53,57 @@ struct ipc_namespace init_ipc_ns = { | |||
52 | }, | 53 | }, |
53 | }; | 54 | }; |
54 | 55 | ||
56 | atomic_t nr_ipc_ns = ATOMIC_INIT(1); | ||
57 | |||
58 | |||
59 | #ifdef CONFIG_MEMORY_HOTPLUG | ||
60 | |||
61 | static void ipc_memory_notifier(struct work_struct *work) | ||
62 | { | ||
63 | ipcns_notify(IPCNS_MEMCHANGED); | ||
64 | } | ||
65 | |||
66 | static DECLARE_WORK(ipc_memory_wq, ipc_memory_notifier); | ||
67 | |||
68 | |||
69 | static int ipc_memory_callback(struct notifier_block *self, | ||
70 | unsigned long action, void *arg) | ||
71 | { | ||
72 | switch (action) { | ||
73 | case MEM_ONLINE: /* memory successfully brought online */ | ||
74 | case MEM_OFFLINE: /* or offline: it's time to recompute msgmni */ | ||
75 | /* | ||
76 | * This is done by invoking the ipcns notifier chain with the | ||
77 | * IPC_MEMCHANGED event. | ||
78 | * In order not to keep the lock on the hotplug memory chain | ||
79 | * for too long, queue a work item that will, when waken up, | ||
80 | * activate the ipcns notification chain. | ||
81 | * No need to keep several ipc work items on the queue. | ||
82 | */ | ||
83 | if (!work_pending(&ipc_memory_wq)) | ||
84 | schedule_work(&ipc_memory_wq); | ||
85 | break; | ||
86 | case MEM_GOING_ONLINE: | ||
87 | case MEM_GOING_OFFLINE: | ||
88 | case MEM_CANCEL_ONLINE: | ||
89 | case MEM_CANCEL_OFFLINE: | ||
90 | default: | ||
91 | break; | ||
92 | } | ||
93 | |||
94 | return NOTIFY_OK; | ||
95 | } | ||
96 | |||
97 | #endif /* CONFIG_MEMORY_HOTPLUG */ | ||
98 | |||
55 | /** | 99 | /** |
56 | * ipc_init - initialise IPC subsystem | 100 | * ipc_init - initialise IPC subsystem |
57 | * | 101 | * |
58 | * The various system5 IPC resources (semaphores, messages and shared | 102 | * The various system5 IPC resources (semaphores, messages and shared |
59 | * memory) are initialised | 103 | * memory) are initialised |
104 | * A callback routine is registered into the memory hotplug notifier | ||
105 | * chain: since msgmni scales to lowmem this callback routine will be | ||
106 | * called upon successful memory add / remove to recompute msmgni. | ||
60 | */ | 107 | */ |
61 | 108 | ||
62 | static int __init ipc_init(void) | 109 | static int __init ipc_init(void) |
@@ -64,6 +111,8 @@ static int __init ipc_init(void) | |||
64 | sem_init(); | 111 | sem_init(); |
65 | msg_init(); | 112 | msg_init(); |
66 | shm_init(); | 113 | shm_init(); |
114 | hotplug_memory_notifier(ipc_memory_callback, IPC_CALLBACK_PRI); | ||
115 | register_ipcns_notifier(&init_ipc_ns); | ||
67 | return 0; | 116 | return 0; |
68 | } | 117 | } |
69 | __initcall(ipc_init); | 118 | __initcall(ipc_init); |
@@ -84,8 +133,8 @@ void ipc_init_ids(struct ipc_ids *ids) | |||
84 | ids->seq = 0; | 133 | ids->seq = 0; |
85 | { | 134 | { |
86 | int seq_limit = INT_MAX/SEQ_MULTIPLIER; | 135 | int seq_limit = INT_MAX/SEQ_MULTIPLIER; |
87 | if(seq_limit > USHRT_MAX) | 136 | if (seq_limit > USHORT_MAX) |
88 | ids->seq_max = USHRT_MAX; | 137 | ids->seq_max = USHORT_MAX; |
89 | else | 138 | else |
90 | ids->seq_max = seq_limit; | 139 | ids->seq_max = seq_limit; |
91 | } | 140 | } |
@@ -116,13 +165,12 @@ void __init ipc_init_proc_interface(const char *path, const char *header, | |||
116 | iface->ids = ids; | 165 | iface->ids = ids; |
117 | iface->show = show; | 166 | iface->show = show; |
118 | 167 | ||
119 | pde = create_proc_entry(path, | 168 | pde = proc_create_data(path, |
120 | S_IRUGO, /* world readable */ | 169 | S_IRUGO, /* world readable */ |
121 | NULL /* parent dir */); | 170 | NULL, /* parent dir */ |
122 | if (pde) { | 171 | &sysvipc_proc_fops, |
123 | pde->data = iface; | 172 | iface); |
124 | pde->proc_fops = &sysvipc_proc_fops; | 173 | if (!pde) { |
125 | } else { | ||
126 | kfree(iface); | 174 | kfree(iface); |
127 | } | 175 | } |
128 | } | 176 | } |
@@ -231,6 +279,7 @@ int ipc_addid(struct ipc_ids* ids, struct kern_ipc_perm* new, int size) | |||
231 | if(ids->seq > ids->seq_max) | 279 | if(ids->seq > ids->seq_max) |
232 | ids->seq = 0; | 280 | ids->seq = 0; |
233 | 281 | ||
282 | new->id = ipc_buildid(id, new->seq); | ||
234 | spin_lock_init(&new->lock); | 283 | spin_lock_init(&new->lock); |
235 | new->deleted = 0; | 284 | new->deleted = 0; |
236 | rcu_read_lock(); | 285 | rcu_read_lock(); |
@@ -761,6 +810,70 @@ int ipcget(struct ipc_namespace *ns, struct ipc_ids *ids, | |||
761 | return ipcget_public(ns, ids, ops, params); | 810 | return ipcget_public(ns, ids, ops, params); |
762 | } | 811 | } |
763 | 812 | ||
813 | /** | ||
814 | * ipc_update_perm - update the permissions of an IPC. | ||
815 | * @in: the permission given as input. | ||
816 | * @out: the permission of the ipc to set. | ||
817 | */ | ||
818 | void ipc_update_perm(struct ipc64_perm *in, struct kern_ipc_perm *out) | ||
819 | { | ||
820 | out->uid = in->uid; | ||
821 | out->gid = in->gid; | ||
822 | out->mode = (out->mode & ~S_IRWXUGO) | ||
823 | | (in->mode & S_IRWXUGO); | ||
824 | } | ||
825 | |||
826 | /** | ||
827 | * ipcctl_pre_down - retrieve an ipc and check permissions for some IPC_XXX cmd | ||
828 | * @ids: the table of ids where to look for the ipc | ||
829 | * @id: the id of the ipc to retrieve | ||
830 | * @cmd: the cmd to check | ||
831 | * @perm: the permission to set | ||
832 | * @extra_perm: one extra permission parameter used by msq | ||
833 | * | ||
834 | * This function does some common audit and permissions check for some IPC_XXX | ||
835 | * cmd and is called from semctl_down, shmctl_down and msgctl_down. | ||
836 | * It must be called without any lock held and | ||
837 | * - retrieves the ipc with the given id in the given table. | ||
838 | * - performs some audit and permission check, depending on the given cmd | ||
839 | * - returns the ipc with both ipc and rw_mutex locks held in case of success | ||
840 | * or an err-code without any lock held otherwise. | ||
841 | */ | ||
842 | struct kern_ipc_perm *ipcctl_pre_down(struct ipc_ids *ids, int id, int cmd, | ||
843 | struct ipc64_perm *perm, int extra_perm) | ||
844 | { | ||
845 | struct kern_ipc_perm *ipcp; | ||
846 | int err; | ||
847 | |||
848 | down_write(&ids->rw_mutex); | ||
849 | ipcp = ipc_lock_check_down(ids, id); | ||
850 | if (IS_ERR(ipcp)) { | ||
851 | err = PTR_ERR(ipcp); | ||
852 | goto out_up; | ||
853 | } | ||
854 | |||
855 | err = audit_ipc_obj(ipcp); | ||
856 | if (err) | ||
857 | goto out_unlock; | ||
858 | |||
859 | if (cmd == IPC_SET) { | ||
860 | err = audit_ipc_set_perm(extra_perm, perm->uid, | ||
861 | perm->gid, perm->mode); | ||
862 | if (err) | ||
863 | goto out_unlock; | ||
864 | } | ||
865 | if (current->euid == ipcp->cuid || | ||
866 | current->euid == ipcp->uid || capable(CAP_SYS_ADMIN)) | ||
867 | return ipcp; | ||
868 | |||
869 | err = -EPERM; | ||
870 | out_unlock: | ||
871 | ipc_unlock(ipcp); | ||
872 | out_up: | ||
873 | up_write(&ids->rw_mutex); | ||
874 | return ERR_PTR(err); | ||
875 | } | ||
876 | |||
764 | #ifdef __ARCH_WANT_IPC_PARSE_VERSION | 877 | #ifdef __ARCH_WANT_IPC_PARSE_VERSION |
765 | 878 | ||
766 | 879 | ||
diff --git a/ipc/util.h b/ipc/util.h index f37d160c98fe..cdb966aebe07 100644 --- a/ipc/util.h +++ b/ipc/util.h | |||
@@ -12,7 +12,6 @@ | |||
12 | 12 | ||
13 | #include <linux/err.h> | 13 | #include <linux/err.h> |
14 | 14 | ||
15 | #define USHRT_MAX 0xffff | ||
16 | #define SEQ_MULTIPLIER (IPCMNI) | 15 | #define SEQ_MULTIPLIER (IPCMNI) |
17 | 16 | ||
18 | void sem_init (void); | 17 | void sem_init (void); |
@@ -112,6 +111,9 @@ struct kern_ipc_perm *ipc_lock(struct ipc_ids *, int); | |||
112 | 111 | ||
113 | void kernel_to_ipc64_perm(struct kern_ipc_perm *in, struct ipc64_perm *out); | 112 | void kernel_to_ipc64_perm(struct kern_ipc_perm *in, struct ipc64_perm *out); |
114 | void ipc64_perm_to_ipc_perm(struct ipc64_perm *in, struct ipc_perm *out); | 113 | void ipc64_perm_to_ipc_perm(struct ipc64_perm *in, struct ipc_perm *out); |
114 | void ipc_update_perm(struct ipc64_perm *in, struct kern_ipc_perm *out); | ||
115 | struct kern_ipc_perm *ipcctl_pre_down(struct ipc_ids *ids, int id, int cmd, | ||
116 | struct ipc64_perm *perm, int extra_perm); | ||
115 | 117 | ||
116 | #if defined(__ia64__) || defined(__x86_64__) || defined(__hppa__) || defined(__XTENSA__) | 118 | #if defined(__ia64__) || defined(__x86_64__) || defined(__hppa__) || defined(__XTENSA__) |
117 | /* On IA-64, we always use the "64-bit version" of the IPC structures. */ | 119 | /* On IA-64, we always use the "64-bit version" of the IPC structures. */ |
@@ -124,6 +126,8 @@ extern void free_msg(struct msg_msg *msg); | |||
124 | extern struct msg_msg *load_msg(const void __user *src, int len); | 126 | extern struct msg_msg *load_msg(const void __user *src, int len); |
125 | extern int store_msg(void __user *dest, struct msg_msg *msg, int len); | 127 | extern int store_msg(void __user *dest, struct msg_msg *msg, int len); |
126 | 128 | ||
129 | extern void recompute_msgmni(struct ipc_namespace *); | ||
130 | |||
127 | static inline int ipc_buildid(int id, int seq) | 131 | static inline int ipc_buildid(int id, int seq) |
128 | { | 132 | { |
129 | return SEQ_MULTIPLIER * seq + id; | 133 | return SEQ_MULTIPLIER * seq + id; |
diff --git a/kernel/Makefile b/kernel/Makefile index 6c5f081132a4..188c43223f52 100644 --- a/kernel/Makefile +++ b/kernel/Makefile | |||
@@ -11,7 +11,7 @@ obj-y = sched.o fork.o exec_domain.o panic.o printk.o profile.o \ | |||
11 | hrtimer.o rwsem.o nsproxy.o srcu.o semaphore.o \ | 11 | hrtimer.o rwsem.o nsproxy.o srcu.o semaphore.o \ |
12 | notifier.o ksysfs.o pm_qos_params.o | 12 | notifier.o ksysfs.o pm_qos_params.o |
13 | 13 | ||
14 | obj-$(CONFIG_SYSCTL) += sysctl_check.o | 14 | obj-$(CONFIG_SYSCTL_SYSCALL_CHECK) += sysctl_check.o |
15 | obj-$(CONFIG_STACKTRACE) += stacktrace.o | 15 | obj-$(CONFIG_STACKTRACE) += stacktrace.o |
16 | obj-y += time/ | 16 | obj-y += time/ |
17 | obj-$(CONFIG_DEBUG_MUTEXES) += mutex-debug.o | 17 | obj-$(CONFIG_DEBUG_MUTEXES) += mutex-debug.o |
diff --git a/kernel/auditfilter.c b/kernel/auditfilter.c index 28fef6bf8534..13430176b3c9 100644 --- a/kernel/auditfilter.c +++ b/kernel/auditfilter.c | |||
@@ -272,7 +272,7 @@ static int audit_to_watch(struct audit_krule *krule, char *path, int len, | |||
272 | return -EINVAL; | 272 | return -EINVAL; |
273 | 273 | ||
274 | watch = audit_init_watch(path); | 274 | watch = audit_init_watch(path); |
275 | if (unlikely(IS_ERR(watch))) | 275 | if (IS_ERR(watch)) |
276 | return PTR_ERR(watch); | 276 | return PTR_ERR(watch); |
277 | 277 | ||
278 | audit_get_watch(watch); | 278 | audit_get_watch(watch); |
@@ -848,7 +848,7 @@ static struct audit_watch *audit_dupe_watch(struct audit_watch *old) | |||
848 | return ERR_PTR(-ENOMEM); | 848 | return ERR_PTR(-ENOMEM); |
849 | 849 | ||
850 | new = audit_init_watch(path); | 850 | new = audit_init_watch(path); |
851 | if (unlikely(IS_ERR(new))) { | 851 | if (IS_ERR(new)) { |
852 | kfree(path); | 852 | kfree(path); |
853 | goto out; | 853 | goto out; |
854 | } | 854 | } |
@@ -989,7 +989,7 @@ static void audit_update_watch(struct audit_parent *parent, | |||
989 | audit_set_auditable(current->audit_context); | 989 | audit_set_auditable(current->audit_context); |
990 | 990 | ||
991 | nwatch = audit_dupe_watch(owatch); | 991 | nwatch = audit_dupe_watch(owatch); |
992 | if (unlikely(IS_ERR(nwatch))) { | 992 | if (IS_ERR(nwatch)) { |
993 | mutex_unlock(&audit_filter_mutex); | 993 | mutex_unlock(&audit_filter_mutex); |
994 | audit_panic("error updating watch, skipping"); | 994 | audit_panic("error updating watch, skipping"); |
995 | return; | 995 | return; |
@@ -1004,7 +1004,7 @@ static void audit_update_watch(struct audit_parent *parent, | |||
1004 | list_del_rcu(&oentry->list); | 1004 | list_del_rcu(&oentry->list); |
1005 | 1005 | ||
1006 | nentry = audit_dupe_rule(&oentry->rule, nwatch); | 1006 | nentry = audit_dupe_rule(&oentry->rule, nwatch); |
1007 | if (unlikely(IS_ERR(nentry))) | 1007 | if (IS_ERR(nentry)) |
1008 | audit_panic("error updating watch, removing"); | 1008 | audit_panic("error updating watch, removing"); |
1009 | else { | 1009 | else { |
1010 | int h = audit_hash_ino((u32)ino); | 1010 | int h = audit_hash_ino((u32)ino); |
@@ -1785,7 +1785,7 @@ int audit_update_lsm_rules(void) | |||
1785 | watch = entry->rule.watch; | 1785 | watch = entry->rule.watch; |
1786 | tree = entry->rule.tree; | 1786 | tree = entry->rule.tree; |
1787 | nentry = audit_dupe_rule(&entry->rule, watch); | 1787 | nentry = audit_dupe_rule(&entry->rule, watch); |
1788 | if (unlikely(IS_ERR(nentry))) { | 1788 | if (IS_ERR(nentry)) { |
1789 | /* save the first error encountered for the | 1789 | /* save the first error encountered for the |
1790 | * return value */ | 1790 | * return value */ |
1791 | if (!err) | 1791 | if (!err) |
diff --git a/kernel/bounds.c b/kernel/bounds.c index c3c55544db2f..3c5301381837 100644 --- a/kernel/bounds.c +++ b/kernel/bounds.c | |||
@@ -8,11 +8,7 @@ | |||
8 | /* Include headers that define the enum constants of interest */ | 8 | /* Include headers that define the enum constants of interest */ |
9 | #include <linux/page-flags.h> | 9 | #include <linux/page-flags.h> |
10 | #include <linux/mmzone.h> | 10 | #include <linux/mmzone.h> |
11 | 11 | #include <linux/kbuild.h> | |
12 | #define DEFINE(sym, val) \ | ||
13 | asm volatile("\n->" #sym " %0 " #val : : "i" (val)) | ||
14 | |||
15 | #define BLANK() asm volatile("\n->" : : ) | ||
16 | 12 | ||
17 | void foo(void) | 13 | void foo(void) |
18 | { | 14 | { |
diff --git a/kernel/cgroup.c b/kernel/cgroup.c index 6d8de051382b..b9d467d83fc1 100644 --- a/kernel/cgroup.c +++ b/kernel/cgroup.c | |||
@@ -44,6 +44,7 @@ | |||
44 | #include <linux/kmod.h> | 44 | #include <linux/kmod.h> |
45 | #include <linux/delayacct.h> | 45 | #include <linux/delayacct.h> |
46 | #include <linux/cgroupstats.h> | 46 | #include <linux/cgroupstats.h> |
47 | #include <linux/hash.h> | ||
47 | 48 | ||
48 | #include <asm/atomic.h> | 49 | #include <asm/atomic.h> |
49 | 50 | ||
@@ -118,17 +119,7 @@ static int root_count; | |||
118 | * be called. | 119 | * be called. |
119 | */ | 120 | */ |
120 | static int need_forkexit_callback; | 121 | static int need_forkexit_callback; |
121 | 122 | static int need_mm_owner_callback __read_mostly; | |
122 | /* bits in struct cgroup flags field */ | ||
123 | enum { | ||
124 | /* Control Group is dead */ | ||
125 | CGRP_REMOVED, | ||
126 | /* Control Group has previously had a child cgroup or a task, | ||
127 | * but no longer (only if CGRP_NOTIFY_ON_RELEASE is set) */ | ||
128 | CGRP_RELEASABLE, | ||
129 | /* Control Group requires release notifications to userspace */ | ||
130 | CGRP_NOTIFY_ON_RELEASE, | ||
131 | }; | ||
132 | 123 | ||
133 | /* convenient tests for these bits */ | 124 | /* convenient tests for these bits */ |
134 | inline int cgroup_is_removed(const struct cgroup *cgrp) | 125 | inline int cgroup_is_removed(const struct cgroup *cgrp) |
@@ -204,6 +195,27 @@ static struct cg_cgroup_link init_css_set_link; | |||
204 | static DEFINE_RWLOCK(css_set_lock); | 195 | static DEFINE_RWLOCK(css_set_lock); |
205 | static int css_set_count; | 196 | static int css_set_count; |
206 | 197 | ||
198 | /* hash table for cgroup groups. This improves the performance to | ||
199 | * find an existing css_set */ | ||
200 | #define CSS_SET_HASH_BITS 7 | ||
201 | #define CSS_SET_TABLE_SIZE (1 << CSS_SET_HASH_BITS) | ||
202 | static struct hlist_head css_set_table[CSS_SET_TABLE_SIZE]; | ||
203 | |||
204 | static struct hlist_head *css_set_hash(struct cgroup_subsys_state *css[]) | ||
205 | { | ||
206 | int i; | ||
207 | int index; | ||
208 | unsigned long tmp = 0UL; | ||
209 | |||
210 | for (i = 0; i < CGROUP_SUBSYS_COUNT; i++) | ||
211 | tmp += (unsigned long)css[i]; | ||
212 | tmp = (tmp >> 16) ^ tmp; | ||
213 | |||
214 | index = hash_long(tmp, CSS_SET_HASH_BITS); | ||
215 | |||
216 | return &css_set_table[index]; | ||
217 | } | ||
218 | |||
207 | /* We don't maintain the lists running through each css_set to its | 219 | /* We don't maintain the lists running through each css_set to its |
208 | * task until after the first call to cgroup_iter_start(). This | 220 | * task until after the first call to cgroup_iter_start(). This |
209 | * reduces the fork()/exit() overhead for people who have cgroups | 221 | * reduces the fork()/exit() overhead for people who have cgroups |
@@ -230,7 +242,7 @@ static int use_task_css_set_links; | |||
230 | static void unlink_css_set(struct css_set *cg) | 242 | static void unlink_css_set(struct css_set *cg) |
231 | { | 243 | { |
232 | write_lock(&css_set_lock); | 244 | write_lock(&css_set_lock); |
233 | list_del(&cg->list); | 245 | hlist_del(&cg->hlist); |
234 | css_set_count--; | 246 | css_set_count--; |
235 | while (!list_empty(&cg->cg_links)) { | 247 | while (!list_empty(&cg->cg_links)) { |
236 | struct cg_cgroup_link *link; | 248 | struct cg_cgroup_link *link; |
@@ -295,9 +307,7 @@ static inline void put_css_set_taskexit(struct css_set *cg) | |||
295 | /* | 307 | /* |
296 | * find_existing_css_set() is a helper for | 308 | * find_existing_css_set() is a helper for |
297 | * find_css_set(), and checks to see whether an existing | 309 | * find_css_set(), and checks to see whether an existing |
298 | * css_set is suitable. This currently walks a linked-list for | 310 | * css_set is suitable. |
299 | * simplicity; a later patch will use a hash table for better | ||
300 | * performance | ||
301 | * | 311 | * |
302 | * oldcg: the cgroup group that we're using before the cgroup | 312 | * oldcg: the cgroup group that we're using before the cgroup |
303 | * transition | 313 | * transition |
@@ -314,7 +324,9 @@ static struct css_set *find_existing_css_set( | |||
314 | { | 324 | { |
315 | int i; | 325 | int i; |
316 | struct cgroupfs_root *root = cgrp->root; | 326 | struct cgroupfs_root *root = cgrp->root; |
317 | struct list_head *l = &init_css_set.list; | 327 | struct hlist_head *hhead; |
328 | struct hlist_node *node; | ||
329 | struct css_set *cg; | ||
318 | 330 | ||
319 | /* Built the set of subsystem state objects that we want to | 331 | /* Built the set of subsystem state objects that we want to |
320 | * see in the new css_set */ | 332 | * see in the new css_set */ |
@@ -331,18 +343,13 @@ static struct css_set *find_existing_css_set( | |||
331 | } | 343 | } |
332 | } | 344 | } |
333 | 345 | ||
334 | /* Look through existing cgroup groups to find one to reuse */ | 346 | hhead = css_set_hash(template); |
335 | do { | 347 | hlist_for_each_entry(cg, node, hhead, hlist) { |
336 | struct css_set *cg = | ||
337 | list_entry(l, struct css_set, list); | ||
338 | |||
339 | if (!memcmp(template, cg->subsys, sizeof(cg->subsys))) { | 348 | if (!memcmp(template, cg->subsys, sizeof(cg->subsys))) { |
340 | /* All subsystems matched */ | 349 | /* All subsystems matched */ |
341 | return cg; | 350 | return cg; |
342 | } | 351 | } |
343 | /* Try the next cgroup group */ | 352 | } |
344 | l = l->next; | ||
345 | } while (l != &init_css_set.list); | ||
346 | 353 | ||
347 | /* No existing cgroup group matched */ | 354 | /* No existing cgroup group matched */ |
348 | return NULL; | 355 | return NULL; |
@@ -404,6 +411,8 @@ static struct css_set *find_css_set( | |||
404 | struct list_head tmp_cg_links; | 411 | struct list_head tmp_cg_links; |
405 | struct cg_cgroup_link *link; | 412 | struct cg_cgroup_link *link; |
406 | 413 | ||
414 | struct hlist_head *hhead; | ||
415 | |||
407 | /* First see if we already have a cgroup group that matches | 416 | /* First see if we already have a cgroup group that matches |
408 | * the desired set */ | 417 | * the desired set */ |
409 | write_lock(&css_set_lock); | 418 | write_lock(&css_set_lock); |
@@ -428,6 +437,7 @@ static struct css_set *find_css_set( | |||
428 | kref_init(&res->ref); | 437 | kref_init(&res->ref); |
429 | INIT_LIST_HEAD(&res->cg_links); | 438 | INIT_LIST_HEAD(&res->cg_links); |
430 | INIT_LIST_HEAD(&res->tasks); | 439 | INIT_LIST_HEAD(&res->tasks); |
440 | INIT_HLIST_NODE(&res->hlist); | ||
431 | 441 | ||
432 | /* Copy the set of subsystem state objects generated in | 442 | /* Copy the set of subsystem state objects generated in |
433 | * find_existing_css_set() */ | 443 | * find_existing_css_set() */ |
@@ -467,9 +477,12 @@ static struct css_set *find_css_set( | |||
467 | 477 | ||
468 | BUG_ON(!list_empty(&tmp_cg_links)); | 478 | BUG_ON(!list_empty(&tmp_cg_links)); |
469 | 479 | ||
470 | /* Link this cgroup group into the list */ | ||
471 | list_add(&res->list, &init_css_set.list); | ||
472 | css_set_count++; | 480 | css_set_count++; |
481 | |||
482 | /* Add this cgroup group to the hash table */ | ||
483 | hhead = css_set_hash(res->subsys); | ||
484 | hlist_add_head(&res->hlist, hhead); | ||
485 | |||
473 | write_unlock(&css_set_lock); | 486 | write_unlock(&css_set_lock); |
474 | 487 | ||
475 | return res; | 488 | return res; |
@@ -948,7 +961,7 @@ static int cgroup_get_sb(struct file_system_type *fs_type, | |||
948 | int ret = 0; | 961 | int ret = 0; |
949 | struct super_block *sb; | 962 | struct super_block *sb; |
950 | struct cgroupfs_root *root; | 963 | struct cgroupfs_root *root; |
951 | struct list_head tmp_cg_links, *l; | 964 | struct list_head tmp_cg_links; |
952 | INIT_LIST_HEAD(&tmp_cg_links); | 965 | INIT_LIST_HEAD(&tmp_cg_links); |
953 | 966 | ||
954 | /* First find the desired set of subsystems */ | 967 | /* First find the desired set of subsystems */ |
@@ -990,6 +1003,7 @@ static int cgroup_get_sb(struct file_system_type *fs_type, | |||
990 | /* New superblock */ | 1003 | /* New superblock */ |
991 | struct cgroup *cgrp = &root->top_cgroup; | 1004 | struct cgroup *cgrp = &root->top_cgroup; |
992 | struct inode *inode; | 1005 | struct inode *inode; |
1006 | int i; | ||
993 | 1007 | ||
994 | BUG_ON(sb->s_root != NULL); | 1008 | BUG_ON(sb->s_root != NULL); |
995 | 1009 | ||
@@ -1034,22 +1048,25 @@ static int cgroup_get_sb(struct file_system_type *fs_type, | |||
1034 | /* Link the top cgroup in this hierarchy into all | 1048 | /* Link the top cgroup in this hierarchy into all |
1035 | * the css_set objects */ | 1049 | * the css_set objects */ |
1036 | write_lock(&css_set_lock); | 1050 | write_lock(&css_set_lock); |
1037 | l = &init_css_set.list; | 1051 | for (i = 0; i < CSS_SET_TABLE_SIZE; i++) { |
1038 | do { | 1052 | struct hlist_head *hhead = &css_set_table[i]; |
1053 | struct hlist_node *node; | ||
1039 | struct css_set *cg; | 1054 | struct css_set *cg; |
1040 | struct cg_cgroup_link *link; | 1055 | |
1041 | cg = list_entry(l, struct css_set, list); | 1056 | hlist_for_each_entry(cg, node, hhead, hlist) { |
1042 | BUG_ON(list_empty(&tmp_cg_links)); | 1057 | struct cg_cgroup_link *link; |
1043 | link = list_entry(tmp_cg_links.next, | 1058 | |
1044 | struct cg_cgroup_link, | 1059 | BUG_ON(list_empty(&tmp_cg_links)); |
1045 | cgrp_link_list); | 1060 | link = list_entry(tmp_cg_links.next, |
1046 | list_del(&link->cgrp_link_list); | 1061 | struct cg_cgroup_link, |
1047 | link->cg = cg; | 1062 | cgrp_link_list); |
1048 | list_add(&link->cgrp_link_list, | 1063 | list_del(&link->cgrp_link_list); |
1049 | &root->top_cgroup.css_sets); | 1064 | link->cg = cg; |
1050 | list_add(&link->cg_link_list, &cg->cg_links); | 1065 | list_add(&link->cgrp_link_list, |
1051 | l = l->next; | 1066 | &root->top_cgroup.css_sets); |
1052 | } while (l != &init_css_set.list); | 1067 | list_add(&link->cg_link_list, &cg->cg_links); |
1068 | } | ||
1069 | } | ||
1053 | write_unlock(&css_set_lock); | 1070 | write_unlock(&css_set_lock); |
1054 | 1071 | ||
1055 | free_cg_links(&tmp_cg_links); | 1072 | free_cg_links(&tmp_cg_links); |
@@ -1307,18 +1324,16 @@ enum cgroup_filetype { | |||
1307 | FILE_DIR, | 1324 | FILE_DIR, |
1308 | FILE_TASKLIST, | 1325 | FILE_TASKLIST, |
1309 | FILE_NOTIFY_ON_RELEASE, | 1326 | FILE_NOTIFY_ON_RELEASE, |
1310 | FILE_RELEASABLE, | ||
1311 | FILE_RELEASE_AGENT, | 1327 | FILE_RELEASE_AGENT, |
1312 | }; | 1328 | }; |
1313 | 1329 | ||
1314 | static ssize_t cgroup_write_uint(struct cgroup *cgrp, struct cftype *cft, | 1330 | static ssize_t cgroup_write_X64(struct cgroup *cgrp, struct cftype *cft, |
1315 | struct file *file, | 1331 | struct file *file, |
1316 | const char __user *userbuf, | 1332 | const char __user *userbuf, |
1317 | size_t nbytes, loff_t *unused_ppos) | 1333 | size_t nbytes, loff_t *unused_ppos) |
1318 | { | 1334 | { |
1319 | char buffer[64]; | 1335 | char buffer[64]; |
1320 | int retval = 0; | 1336 | int retval = 0; |
1321 | u64 val; | ||
1322 | char *end; | 1337 | char *end; |
1323 | 1338 | ||
1324 | if (!nbytes) | 1339 | if (!nbytes) |
@@ -1329,16 +1344,18 @@ static ssize_t cgroup_write_uint(struct cgroup *cgrp, struct cftype *cft, | |||
1329 | return -EFAULT; | 1344 | return -EFAULT; |
1330 | 1345 | ||
1331 | buffer[nbytes] = 0; /* nul-terminate */ | 1346 | buffer[nbytes] = 0; /* nul-terminate */ |
1332 | 1347 | strstrip(buffer); | |
1333 | /* strip newline if necessary */ | 1348 | if (cft->write_u64) { |
1334 | if (nbytes && (buffer[nbytes-1] == '\n')) | 1349 | u64 val = simple_strtoull(buffer, &end, 0); |
1335 | buffer[nbytes-1] = 0; | 1350 | if (*end) |
1336 | val = simple_strtoull(buffer, &end, 0); | 1351 | return -EINVAL; |
1337 | if (*end) | 1352 | retval = cft->write_u64(cgrp, cft, val); |
1338 | return -EINVAL; | 1353 | } else { |
1339 | 1354 | s64 val = simple_strtoll(buffer, &end, 0); | |
1340 | /* Pass to subsystem */ | 1355 | if (*end) |
1341 | retval = cft->write_uint(cgrp, cft, val); | 1356 | return -EINVAL; |
1357 | retval = cft->write_s64(cgrp, cft, val); | ||
1358 | } | ||
1342 | if (!retval) | 1359 | if (!retval) |
1343 | retval = nbytes; | 1360 | retval = nbytes; |
1344 | return retval; | 1361 | return retval; |
@@ -1419,23 +1436,39 @@ static ssize_t cgroup_file_write(struct file *file, const char __user *buf, | |||
1419 | return -ENODEV; | 1436 | return -ENODEV; |
1420 | if (cft->write) | 1437 | if (cft->write) |
1421 | return cft->write(cgrp, cft, file, buf, nbytes, ppos); | 1438 | return cft->write(cgrp, cft, file, buf, nbytes, ppos); |
1422 | if (cft->write_uint) | 1439 | if (cft->write_u64 || cft->write_s64) |
1423 | return cgroup_write_uint(cgrp, cft, file, buf, nbytes, ppos); | 1440 | return cgroup_write_X64(cgrp, cft, file, buf, nbytes, ppos); |
1441 | if (cft->trigger) { | ||
1442 | int ret = cft->trigger(cgrp, (unsigned int)cft->private); | ||
1443 | return ret ? ret : nbytes; | ||
1444 | } | ||
1424 | return -EINVAL; | 1445 | return -EINVAL; |
1425 | } | 1446 | } |
1426 | 1447 | ||
1427 | static ssize_t cgroup_read_uint(struct cgroup *cgrp, struct cftype *cft, | 1448 | static ssize_t cgroup_read_u64(struct cgroup *cgrp, struct cftype *cft, |
1428 | struct file *file, | 1449 | struct file *file, |
1429 | char __user *buf, size_t nbytes, | 1450 | char __user *buf, size_t nbytes, |
1430 | loff_t *ppos) | 1451 | loff_t *ppos) |
1431 | { | 1452 | { |
1432 | char tmp[64]; | 1453 | char tmp[64]; |
1433 | u64 val = cft->read_uint(cgrp, cft); | 1454 | u64 val = cft->read_u64(cgrp, cft); |
1434 | int len = sprintf(tmp, "%llu\n", (unsigned long long) val); | 1455 | int len = sprintf(tmp, "%llu\n", (unsigned long long) val); |
1435 | 1456 | ||
1436 | return simple_read_from_buffer(buf, nbytes, ppos, tmp, len); | 1457 | return simple_read_from_buffer(buf, nbytes, ppos, tmp, len); |
1437 | } | 1458 | } |
1438 | 1459 | ||
1460 | static ssize_t cgroup_read_s64(struct cgroup *cgrp, struct cftype *cft, | ||
1461 | struct file *file, | ||
1462 | char __user *buf, size_t nbytes, | ||
1463 | loff_t *ppos) | ||
1464 | { | ||
1465 | char tmp[64]; | ||
1466 | s64 val = cft->read_s64(cgrp, cft); | ||
1467 | int len = sprintf(tmp, "%lld\n", (long long) val); | ||
1468 | |||
1469 | return simple_read_from_buffer(buf, nbytes, ppos, tmp, len); | ||
1470 | } | ||
1471 | |||
1439 | static ssize_t cgroup_common_file_read(struct cgroup *cgrp, | 1472 | static ssize_t cgroup_common_file_read(struct cgroup *cgrp, |
1440 | struct cftype *cft, | 1473 | struct cftype *cft, |
1441 | struct file *file, | 1474 | struct file *file, |
@@ -1490,11 +1523,56 @@ static ssize_t cgroup_file_read(struct file *file, char __user *buf, | |||
1490 | 1523 | ||
1491 | if (cft->read) | 1524 | if (cft->read) |
1492 | return cft->read(cgrp, cft, file, buf, nbytes, ppos); | 1525 | return cft->read(cgrp, cft, file, buf, nbytes, ppos); |
1493 | if (cft->read_uint) | 1526 | if (cft->read_u64) |
1494 | return cgroup_read_uint(cgrp, cft, file, buf, nbytes, ppos); | 1527 | return cgroup_read_u64(cgrp, cft, file, buf, nbytes, ppos); |
1528 | if (cft->read_s64) | ||
1529 | return cgroup_read_s64(cgrp, cft, file, buf, nbytes, ppos); | ||
1495 | return -EINVAL; | 1530 | return -EINVAL; |
1496 | } | 1531 | } |
1497 | 1532 | ||
1533 | /* | ||
1534 | * seqfile ops/methods for returning structured data. Currently just | ||
1535 | * supports string->u64 maps, but can be extended in future. | ||
1536 | */ | ||
1537 | |||
1538 | struct cgroup_seqfile_state { | ||
1539 | struct cftype *cft; | ||
1540 | struct cgroup *cgroup; | ||
1541 | }; | ||
1542 | |||
1543 | static int cgroup_map_add(struct cgroup_map_cb *cb, const char *key, u64 value) | ||
1544 | { | ||
1545 | struct seq_file *sf = cb->state; | ||
1546 | return seq_printf(sf, "%s %llu\n", key, (unsigned long long)value); | ||
1547 | } | ||
1548 | |||
1549 | static int cgroup_seqfile_show(struct seq_file *m, void *arg) | ||
1550 | { | ||
1551 | struct cgroup_seqfile_state *state = m->private; | ||
1552 | struct cftype *cft = state->cft; | ||
1553 | if (cft->read_map) { | ||
1554 | struct cgroup_map_cb cb = { | ||
1555 | .fill = cgroup_map_add, | ||
1556 | .state = m, | ||
1557 | }; | ||
1558 | return cft->read_map(state->cgroup, cft, &cb); | ||
1559 | } | ||
1560 | return cft->read_seq_string(state->cgroup, cft, m); | ||
1561 | } | ||
1562 | |||
1563 | int cgroup_seqfile_release(struct inode *inode, struct file *file) | ||
1564 | { | ||
1565 | struct seq_file *seq = file->private_data; | ||
1566 | kfree(seq->private); | ||
1567 | return single_release(inode, file); | ||
1568 | } | ||
1569 | |||
1570 | static struct file_operations cgroup_seqfile_operations = { | ||
1571 | .read = seq_read, | ||
1572 | .llseek = seq_lseek, | ||
1573 | .release = cgroup_seqfile_release, | ||
1574 | }; | ||
1575 | |||
1498 | static int cgroup_file_open(struct inode *inode, struct file *file) | 1576 | static int cgroup_file_open(struct inode *inode, struct file *file) |
1499 | { | 1577 | { |
1500 | int err; | 1578 | int err; |
@@ -1507,7 +1585,18 @@ static int cgroup_file_open(struct inode *inode, struct file *file) | |||
1507 | cft = __d_cft(file->f_dentry); | 1585 | cft = __d_cft(file->f_dentry); |
1508 | if (!cft) | 1586 | if (!cft) |
1509 | return -ENODEV; | 1587 | return -ENODEV; |
1510 | if (cft->open) | 1588 | if (cft->read_map || cft->read_seq_string) { |
1589 | struct cgroup_seqfile_state *state = | ||
1590 | kzalloc(sizeof(*state), GFP_USER); | ||
1591 | if (!state) | ||
1592 | return -ENOMEM; | ||
1593 | state->cft = cft; | ||
1594 | state->cgroup = __d_cgrp(file->f_dentry->d_parent); | ||
1595 | file->f_op = &cgroup_seqfile_operations; | ||
1596 | err = single_open(file, cgroup_seqfile_show, state); | ||
1597 | if (err < 0) | ||
1598 | kfree(state); | ||
1599 | } else if (cft->open) | ||
1511 | err = cft->open(inode, file); | 1600 | err = cft->open(inode, file); |
1512 | else | 1601 | else |
1513 | err = 0; | 1602 | err = 0; |
@@ -1715,7 +1804,7 @@ static void cgroup_advance_iter(struct cgroup *cgrp, | |||
1715 | * The tasklist_lock is not held here, as do_each_thread() and | 1804 | * The tasklist_lock is not held here, as do_each_thread() and |
1716 | * while_each_thread() are protected by RCU. | 1805 | * while_each_thread() are protected by RCU. |
1717 | */ | 1806 | */ |
1718 | void cgroup_enable_task_cg_lists(void) | 1807 | static void cgroup_enable_task_cg_lists(void) |
1719 | { | 1808 | { |
1720 | struct task_struct *p, *g; | 1809 | struct task_struct *p, *g; |
1721 | write_lock(&css_set_lock); | 1810 | write_lock(&css_set_lock); |
@@ -1913,14 +2002,14 @@ int cgroup_scan_tasks(struct cgroup_scanner *scan) | |||
1913 | 2002 | ||
1914 | if (heap->size) { | 2003 | if (heap->size) { |
1915 | for (i = 0; i < heap->size; i++) { | 2004 | for (i = 0; i < heap->size; i++) { |
1916 | struct task_struct *p = heap->ptrs[i]; | 2005 | struct task_struct *q = heap->ptrs[i]; |
1917 | if (i == 0) { | 2006 | if (i == 0) { |
1918 | latest_time = p->start_time; | 2007 | latest_time = q->start_time; |
1919 | latest_task = p; | 2008 | latest_task = q; |
1920 | } | 2009 | } |
1921 | /* Process the task per the caller's callback */ | 2010 | /* Process the task per the caller's callback */ |
1922 | scan->process_task(p, scan); | 2011 | scan->process_task(q, scan); |
1923 | put_task_struct(p); | 2012 | put_task_struct(q); |
1924 | } | 2013 | } |
1925 | /* | 2014 | /* |
1926 | * If we had to process any tasks at all, scan again | 2015 | * If we had to process any tasks at all, scan again |
@@ -2138,11 +2227,6 @@ static u64 cgroup_read_notify_on_release(struct cgroup *cgrp, | |||
2138 | return notify_on_release(cgrp); | 2227 | return notify_on_release(cgrp); |
2139 | } | 2228 | } |
2140 | 2229 | ||
2141 | static u64 cgroup_read_releasable(struct cgroup *cgrp, struct cftype *cft) | ||
2142 | { | ||
2143 | return test_bit(CGRP_RELEASABLE, &cgrp->flags); | ||
2144 | } | ||
2145 | |||
2146 | /* | 2230 | /* |
2147 | * for the common functions, 'private' gives the type of file | 2231 | * for the common functions, 'private' gives the type of file |
2148 | */ | 2232 | */ |
@@ -2158,16 +2242,10 @@ static struct cftype files[] = { | |||
2158 | 2242 | ||
2159 | { | 2243 | { |
2160 | .name = "notify_on_release", | 2244 | .name = "notify_on_release", |
2161 | .read_uint = cgroup_read_notify_on_release, | 2245 | .read_u64 = cgroup_read_notify_on_release, |
2162 | .write = cgroup_common_file_write, | 2246 | .write = cgroup_common_file_write, |
2163 | .private = FILE_NOTIFY_ON_RELEASE, | 2247 | .private = FILE_NOTIFY_ON_RELEASE, |
2164 | }, | 2248 | }, |
2165 | |||
2166 | { | ||
2167 | .name = "releasable", | ||
2168 | .read_uint = cgroup_read_releasable, | ||
2169 | .private = FILE_RELEASABLE, | ||
2170 | } | ||
2171 | }; | 2249 | }; |
2172 | 2250 | ||
2173 | static struct cftype cft_release_agent = { | 2251 | static struct cftype cft_release_agent = { |
@@ -2401,10 +2479,9 @@ static int cgroup_rmdir(struct inode *unused_dir, struct dentry *dentry) | |||
2401 | return 0; | 2479 | return 0; |
2402 | } | 2480 | } |
2403 | 2481 | ||
2404 | static void cgroup_init_subsys(struct cgroup_subsys *ss) | 2482 | static void __init cgroup_init_subsys(struct cgroup_subsys *ss) |
2405 | { | 2483 | { |
2406 | struct cgroup_subsys_state *css; | 2484 | struct cgroup_subsys_state *css; |
2407 | struct list_head *l; | ||
2408 | 2485 | ||
2409 | printk(KERN_INFO "Initializing cgroup subsys %s\n", ss->name); | 2486 | printk(KERN_INFO "Initializing cgroup subsys %s\n", ss->name); |
2410 | 2487 | ||
@@ -2415,34 +2492,19 @@ static void cgroup_init_subsys(struct cgroup_subsys *ss) | |||
2415 | BUG_ON(IS_ERR(css)); | 2492 | BUG_ON(IS_ERR(css)); |
2416 | init_cgroup_css(css, ss, dummytop); | 2493 | init_cgroup_css(css, ss, dummytop); |
2417 | 2494 | ||
2418 | /* Update all cgroup groups to contain a subsys | 2495 | /* Update the init_css_set to contain a subsys |
2419 | * pointer to this state - since the subsystem is | 2496 | * pointer to this state - since the subsystem is |
2420 | * newly registered, all tasks and hence all cgroup | 2497 | * newly registered, all tasks and hence the |
2421 | * groups are in the subsystem's top cgroup. */ | 2498 | * init_css_set is in the subsystem's top cgroup. */ |
2422 | write_lock(&css_set_lock); | 2499 | init_css_set.subsys[ss->subsys_id] = dummytop->subsys[ss->subsys_id]; |
2423 | l = &init_css_set.list; | ||
2424 | do { | ||
2425 | struct css_set *cg = | ||
2426 | list_entry(l, struct css_set, list); | ||
2427 | cg->subsys[ss->subsys_id] = dummytop->subsys[ss->subsys_id]; | ||
2428 | l = l->next; | ||
2429 | } while (l != &init_css_set.list); | ||
2430 | write_unlock(&css_set_lock); | ||
2431 | |||
2432 | /* If this subsystem requested that it be notified with fork | ||
2433 | * events, we should send it one now for every process in the | ||
2434 | * system */ | ||
2435 | if (ss->fork) { | ||
2436 | struct task_struct *g, *p; | ||
2437 | |||
2438 | read_lock(&tasklist_lock); | ||
2439 | do_each_thread(g, p) { | ||
2440 | ss->fork(ss, p); | ||
2441 | } while_each_thread(g, p); | ||
2442 | read_unlock(&tasklist_lock); | ||
2443 | } | ||
2444 | 2500 | ||
2445 | need_forkexit_callback |= ss->fork || ss->exit; | 2501 | need_forkexit_callback |= ss->fork || ss->exit; |
2502 | need_mm_owner_callback |= !!ss->mm_owner_changed; | ||
2503 | |||
2504 | /* At system boot, before all subsystems have been | ||
2505 | * registered, no tasks have been forked, so we don't | ||
2506 | * need to invoke fork callbacks here. */ | ||
2507 | BUG_ON(!list_empty(&init_task.tasks)); | ||
2446 | 2508 | ||
2447 | ss->active = 1; | 2509 | ss->active = 1; |
2448 | } | 2510 | } |
@@ -2458,9 +2520,9 @@ int __init cgroup_init_early(void) | |||
2458 | int i; | 2520 | int i; |
2459 | kref_init(&init_css_set.ref); | 2521 | kref_init(&init_css_set.ref); |
2460 | kref_get(&init_css_set.ref); | 2522 | kref_get(&init_css_set.ref); |
2461 | INIT_LIST_HEAD(&init_css_set.list); | ||
2462 | INIT_LIST_HEAD(&init_css_set.cg_links); | 2523 | INIT_LIST_HEAD(&init_css_set.cg_links); |
2463 | INIT_LIST_HEAD(&init_css_set.tasks); | 2524 | INIT_LIST_HEAD(&init_css_set.tasks); |
2525 | INIT_HLIST_NODE(&init_css_set.hlist); | ||
2464 | css_set_count = 1; | 2526 | css_set_count = 1; |
2465 | init_cgroup_root(&rootnode); | 2527 | init_cgroup_root(&rootnode); |
2466 | list_add(&rootnode.root_list, &roots); | 2528 | list_add(&rootnode.root_list, &roots); |
@@ -2473,6 +2535,9 @@ int __init cgroup_init_early(void) | |||
2473 | list_add(&init_css_set_link.cg_link_list, | 2535 | list_add(&init_css_set_link.cg_link_list, |
2474 | &init_css_set.cg_links); | 2536 | &init_css_set.cg_links); |
2475 | 2537 | ||
2538 | for (i = 0; i < CSS_SET_TABLE_SIZE; i++) | ||
2539 | INIT_HLIST_HEAD(&css_set_table[i]); | ||
2540 | |||
2476 | for (i = 0; i < CGROUP_SUBSYS_COUNT; i++) { | 2541 | for (i = 0; i < CGROUP_SUBSYS_COUNT; i++) { |
2477 | struct cgroup_subsys *ss = subsys[i]; | 2542 | struct cgroup_subsys *ss = subsys[i]; |
2478 | 2543 | ||
@@ -2502,7 +2567,7 @@ int __init cgroup_init(void) | |||
2502 | { | 2567 | { |
2503 | int err; | 2568 | int err; |
2504 | int i; | 2569 | int i; |
2505 | struct proc_dir_entry *entry; | 2570 | struct hlist_head *hhead; |
2506 | 2571 | ||
2507 | err = bdi_init(&cgroup_backing_dev_info); | 2572 | err = bdi_init(&cgroup_backing_dev_info); |
2508 | if (err) | 2573 | if (err) |
@@ -2514,13 +2579,15 @@ int __init cgroup_init(void) | |||
2514 | cgroup_init_subsys(ss); | 2579 | cgroup_init_subsys(ss); |
2515 | } | 2580 | } |
2516 | 2581 | ||
2582 | /* Add init_css_set to the hash table */ | ||
2583 | hhead = css_set_hash(init_css_set.subsys); | ||
2584 | hlist_add_head(&init_css_set.hlist, hhead); | ||
2585 | |||
2517 | err = register_filesystem(&cgroup_fs_type); | 2586 | err = register_filesystem(&cgroup_fs_type); |
2518 | if (err < 0) | 2587 | if (err < 0) |
2519 | goto out; | 2588 | goto out; |
2520 | 2589 | ||
2521 | entry = create_proc_entry("cgroups", 0, NULL); | 2590 | proc_create("cgroups", 0, NULL, &proc_cgroupstats_operations); |
2522 | if (entry) | ||
2523 | entry->proc_fops = &proc_cgroupstats_operations; | ||
2524 | 2591 | ||
2525 | out: | 2592 | out: |
2526 | if (err) | 2593 | if (err) |
@@ -2683,6 +2750,34 @@ void cgroup_fork_callbacks(struct task_struct *child) | |||
2683 | } | 2750 | } |
2684 | } | 2751 | } |
2685 | 2752 | ||
2753 | #ifdef CONFIG_MM_OWNER | ||
2754 | /** | ||
2755 | * cgroup_mm_owner_callbacks - run callbacks when the mm->owner changes | ||
2756 | * @p: the new owner | ||
2757 | * | ||
2758 | * Called on every change to mm->owner. mm_init_owner() does not | ||
2759 | * invoke this routine, since it assigns the mm->owner the first time | ||
2760 | * and does not change it. | ||
2761 | */ | ||
2762 | void cgroup_mm_owner_callbacks(struct task_struct *old, struct task_struct *new) | ||
2763 | { | ||
2764 | struct cgroup *oldcgrp, *newcgrp; | ||
2765 | |||
2766 | if (need_mm_owner_callback) { | ||
2767 | int i; | ||
2768 | for (i = 0; i < CGROUP_SUBSYS_COUNT; i++) { | ||
2769 | struct cgroup_subsys *ss = subsys[i]; | ||
2770 | oldcgrp = task_cgroup(old, ss->subsys_id); | ||
2771 | newcgrp = task_cgroup(new, ss->subsys_id); | ||
2772 | if (oldcgrp == newcgrp) | ||
2773 | continue; | ||
2774 | if (ss->mm_owner_changed) | ||
2775 | ss->mm_owner_changed(ss, oldcgrp, newcgrp); | ||
2776 | } | ||
2777 | } | ||
2778 | } | ||
2779 | #endif /* CONFIG_MM_OWNER */ | ||
2780 | |||
2686 | /** | 2781 | /** |
2687 | * cgroup_post_fork - called on a new task after adding it to the task list | 2782 | * cgroup_post_fork - called on a new task after adding it to the task list |
2688 | * @child: the task in question | 2783 | * @child: the task in question |
diff --git a/kernel/cgroup_debug.c b/kernel/cgroup_debug.c index 37301e877cb0..c3dc3aba4c02 100644 --- a/kernel/cgroup_debug.c +++ b/kernel/cgroup_debug.c | |||
@@ -1,5 +1,5 @@ | |||
1 | /* | 1 | /* |
2 | * kernel/ccontainer_debug.c - Example cgroup subsystem that | 2 | * kernel/cgroup_debug.c - Example cgroup subsystem that |
3 | * exposes debug info | 3 | * exposes debug info |
4 | * | 4 | * |
5 | * Copyright (C) Google Inc, 2007 | 5 | * Copyright (C) Google Inc, 2007 |
@@ -62,25 +62,35 @@ static u64 current_css_set_refcount_read(struct cgroup *cont, | |||
62 | return count; | 62 | return count; |
63 | } | 63 | } |
64 | 64 | ||
65 | static u64 releasable_read(struct cgroup *cgrp, struct cftype *cft) | ||
66 | { | ||
67 | return test_bit(CGRP_RELEASABLE, &cgrp->flags); | ||
68 | } | ||
69 | |||
65 | static struct cftype files[] = { | 70 | static struct cftype files[] = { |
66 | { | 71 | { |
67 | .name = "cgroup_refcount", | 72 | .name = "cgroup_refcount", |
68 | .read_uint = cgroup_refcount_read, | 73 | .read_u64 = cgroup_refcount_read, |
69 | }, | 74 | }, |
70 | { | 75 | { |
71 | .name = "taskcount", | 76 | .name = "taskcount", |
72 | .read_uint = taskcount_read, | 77 | .read_u64 = taskcount_read, |
73 | }, | 78 | }, |
74 | 79 | ||
75 | { | 80 | { |
76 | .name = "current_css_set", | 81 | .name = "current_css_set", |
77 | .read_uint = current_css_set_read, | 82 | .read_u64 = current_css_set_read, |
78 | }, | 83 | }, |
79 | 84 | ||
80 | { | 85 | { |
81 | .name = "current_css_set_refcount", | 86 | .name = "current_css_set_refcount", |
82 | .read_uint = current_css_set_refcount_read, | 87 | .read_u64 = current_css_set_refcount_read, |
83 | }, | 88 | }, |
89 | |||
90 | { | ||
91 | .name = "releasable", | ||
92 | .read_u64 = releasable_read, | ||
93 | } | ||
84 | }; | 94 | }; |
85 | 95 | ||
86 | static int debug_populate(struct cgroup_subsys *ss, struct cgroup *cont) | 96 | static int debug_populate(struct cgroup_subsys *ss, struct cgroup *cont) |
diff --git a/kernel/configs.c b/kernel/configs.c index e84d3f9c6c7b..4c345210ed8c 100644 --- a/kernel/configs.c +++ b/kernel/configs.c | |||
@@ -79,12 +79,11 @@ static int __init ikconfig_init(void) | |||
79 | struct proc_dir_entry *entry; | 79 | struct proc_dir_entry *entry; |
80 | 80 | ||
81 | /* create the current config file */ | 81 | /* create the current config file */ |
82 | entry = create_proc_entry("config.gz", S_IFREG | S_IRUGO, | 82 | entry = proc_create("config.gz", S_IFREG | S_IRUGO, NULL, |
83 | &proc_root); | 83 | &ikconfig_file_ops); |
84 | if (!entry) | 84 | if (!entry) |
85 | return -ENOMEM; | 85 | return -ENOMEM; |
86 | 86 | ||
87 | entry->proc_fops = &ikconfig_file_ops; | ||
88 | entry->size = kernel_config_data_size; | 87 | entry->size = kernel_config_data_size; |
89 | 88 | ||
90 | return 0; | 89 | return 0; |
@@ -95,7 +94,7 @@ static int __init ikconfig_init(void) | |||
95 | 94 | ||
96 | static void __exit ikconfig_cleanup(void) | 95 | static void __exit ikconfig_cleanup(void) |
97 | { | 96 | { |
98 | remove_proc_entry("config.gz", &proc_root); | 97 | remove_proc_entry("config.gz", NULL); |
99 | } | 98 | } |
100 | 99 | ||
101 | module_init(ikconfig_init); | 100 | module_init(ikconfig_init); |
diff --git a/kernel/cpu.c b/kernel/cpu.c index 2011ad8d2697..a98f6ab16ecd 100644 --- a/kernel/cpu.c +++ b/kernel/cpu.c | |||
@@ -33,17 +33,13 @@ static struct { | |||
33 | * an ongoing cpu hotplug operation. | 33 | * an ongoing cpu hotplug operation. |
34 | */ | 34 | */ |
35 | int refcount; | 35 | int refcount; |
36 | wait_queue_head_t writer_queue; | ||
37 | } cpu_hotplug; | 36 | } cpu_hotplug; |
38 | 37 | ||
39 | #define writer_exists() (cpu_hotplug.active_writer != NULL) | ||
40 | |||
41 | void __init cpu_hotplug_init(void) | 38 | void __init cpu_hotplug_init(void) |
42 | { | 39 | { |
43 | cpu_hotplug.active_writer = NULL; | 40 | cpu_hotplug.active_writer = NULL; |
44 | mutex_init(&cpu_hotplug.lock); | 41 | mutex_init(&cpu_hotplug.lock); |
45 | cpu_hotplug.refcount = 0; | 42 | cpu_hotplug.refcount = 0; |
46 | init_waitqueue_head(&cpu_hotplug.writer_queue); | ||
47 | } | 43 | } |
48 | 44 | ||
49 | #ifdef CONFIG_HOTPLUG_CPU | 45 | #ifdef CONFIG_HOTPLUG_CPU |
@@ -65,11 +61,8 @@ void put_online_cpus(void) | |||
65 | if (cpu_hotplug.active_writer == current) | 61 | if (cpu_hotplug.active_writer == current) |
66 | return; | 62 | return; |
67 | mutex_lock(&cpu_hotplug.lock); | 63 | mutex_lock(&cpu_hotplug.lock); |
68 | cpu_hotplug.refcount--; | 64 | if (!--cpu_hotplug.refcount && unlikely(cpu_hotplug.active_writer)) |
69 | 65 | wake_up_process(cpu_hotplug.active_writer); | |
70 | if (unlikely(writer_exists()) && !cpu_hotplug.refcount) | ||
71 | wake_up(&cpu_hotplug.writer_queue); | ||
72 | |||
73 | mutex_unlock(&cpu_hotplug.lock); | 66 | mutex_unlock(&cpu_hotplug.lock); |
74 | 67 | ||
75 | } | 68 | } |
@@ -98,8 +91,8 @@ void cpu_maps_update_done(void) | |||
98 | * Note that during a cpu-hotplug operation, the new readers, if any, | 91 | * Note that during a cpu-hotplug operation, the new readers, if any, |
99 | * will be blocked by the cpu_hotplug.lock | 92 | * will be blocked by the cpu_hotplug.lock |
100 | * | 93 | * |
101 | * Since cpu_maps_update_begin is always called after invoking | 94 | * Since cpu_hotplug_begin() is always called after invoking |
102 | * cpu_maps_update_begin, we can be sure that only one writer is active. | 95 | * cpu_maps_update_begin(), we can be sure that only one writer is active. |
103 | * | 96 | * |
104 | * Note that theoretically, there is a possibility of a livelock: | 97 | * Note that theoretically, there is a possibility of a livelock: |
105 | * - Refcount goes to zero, last reader wakes up the sleeping | 98 | * - Refcount goes to zero, last reader wakes up the sleeping |
@@ -115,19 +108,16 @@ void cpu_maps_update_done(void) | |||
115 | */ | 108 | */ |
116 | static void cpu_hotplug_begin(void) | 109 | static void cpu_hotplug_begin(void) |
117 | { | 110 | { |
118 | DECLARE_WAITQUEUE(wait, current); | ||
119 | |||
120 | mutex_lock(&cpu_hotplug.lock); | ||
121 | |||
122 | cpu_hotplug.active_writer = current; | 111 | cpu_hotplug.active_writer = current; |
123 | add_wait_queue_exclusive(&cpu_hotplug.writer_queue, &wait); | 112 | |
124 | while (cpu_hotplug.refcount) { | 113 | for (;;) { |
125 | set_current_state(TASK_UNINTERRUPTIBLE); | 114 | mutex_lock(&cpu_hotplug.lock); |
115 | if (likely(!cpu_hotplug.refcount)) | ||
116 | break; | ||
117 | __set_current_state(TASK_UNINTERRUPTIBLE); | ||
126 | mutex_unlock(&cpu_hotplug.lock); | 118 | mutex_unlock(&cpu_hotplug.lock); |
127 | schedule(); | 119 | schedule(); |
128 | mutex_lock(&cpu_hotplug.lock); | ||
129 | } | 120 | } |
130 | remove_wait_queue_locked(&cpu_hotplug.writer_queue, &wait); | ||
131 | } | 121 | } |
132 | 122 | ||
133 | static void cpu_hotplug_done(void) | 123 | static void cpu_hotplug_done(void) |
@@ -136,7 +126,7 @@ static void cpu_hotplug_done(void) | |||
136 | mutex_unlock(&cpu_hotplug.lock); | 126 | mutex_unlock(&cpu_hotplug.lock); |
137 | } | 127 | } |
138 | /* Need to know about CPUs going up/down? */ | 128 | /* Need to know about CPUs going up/down? */ |
139 | int __cpuinit register_cpu_notifier(struct notifier_block *nb) | 129 | int __ref register_cpu_notifier(struct notifier_block *nb) |
140 | { | 130 | { |
141 | int ret; | 131 | int ret; |
142 | cpu_maps_update_begin(); | 132 | cpu_maps_update_begin(); |
@@ -149,7 +139,7 @@ int __cpuinit register_cpu_notifier(struct notifier_block *nb) | |||
149 | 139 | ||
150 | EXPORT_SYMBOL(register_cpu_notifier); | 140 | EXPORT_SYMBOL(register_cpu_notifier); |
151 | 141 | ||
152 | void unregister_cpu_notifier(struct notifier_block *nb) | 142 | void __ref unregister_cpu_notifier(struct notifier_block *nb) |
153 | { | 143 | { |
154 | cpu_maps_update_begin(); | 144 | cpu_maps_update_begin(); |
155 | raw_notifier_chain_unregister(&cpu_chain, nb); | 145 | raw_notifier_chain_unregister(&cpu_chain, nb); |
@@ -180,7 +170,7 @@ struct take_cpu_down_param { | |||
180 | }; | 170 | }; |
181 | 171 | ||
182 | /* Take this CPU down. */ | 172 | /* Take this CPU down. */ |
183 | static int take_cpu_down(void *_param) | 173 | static int __ref take_cpu_down(void *_param) |
184 | { | 174 | { |
185 | struct take_cpu_down_param *param = _param; | 175 | struct take_cpu_down_param *param = _param; |
186 | int err; | 176 | int err; |
@@ -199,7 +189,7 @@ static int take_cpu_down(void *_param) | |||
199 | } | 189 | } |
200 | 190 | ||
201 | /* Requires cpu_add_remove_lock to be held */ | 191 | /* Requires cpu_add_remove_lock to be held */ |
202 | static int _cpu_down(unsigned int cpu, int tasks_frozen) | 192 | static int __ref _cpu_down(unsigned int cpu, int tasks_frozen) |
203 | { | 193 | { |
204 | int err, nr_calls = 0; | 194 | int err, nr_calls = 0; |
205 | struct task_struct *p; | 195 | struct task_struct *p; |
@@ -274,7 +264,7 @@ out_release: | |||
274 | return err; | 264 | return err; |
275 | } | 265 | } |
276 | 266 | ||
277 | int cpu_down(unsigned int cpu) | 267 | int __ref cpu_down(unsigned int cpu) |
278 | { | 268 | { |
279 | int err = 0; | 269 | int err = 0; |
280 | 270 | ||
diff --git a/kernel/cpuset.c b/kernel/cpuset.c index 48a976c52cf5..8da627d33804 100644 --- a/kernel/cpuset.c +++ b/kernel/cpuset.c | |||
@@ -127,6 +127,7 @@ struct cpuset_hotplug_scanner { | |||
127 | typedef enum { | 127 | typedef enum { |
128 | CS_CPU_EXCLUSIVE, | 128 | CS_CPU_EXCLUSIVE, |
129 | CS_MEM_EXCLUSIVE, | 129 | CS_MEM_EXCLUSIVE, |
130 | CS_MEM_HARDWALL, | ||
130 | CS_MEMORY_MIGRATE, | 131 | CS_MEMORY_MIGRATE, |
131 | CS_SCHED_LOAD_BALANCE, | 132 | CS_SCHED_LOAD_BALANCE, |
132 | CS_SPREAD_PAGE, | 133 | CS_SPREAD_PAGE, |
@@ -144,6 +145,11 @@ static inline int is_mem_exclusive(const struct cpuset *cs) | |||
144 | return test_bit(CS_MEM_EXCLUSIVE, &cs->flags); | 145 | return test_bit(CS_MEM_EXCLUSIVE, &cs->flags); |
145 | } | 146 | } |
146 | 147 | ||
148 | static inline int is_mem_hardwall(const struct cpuset *cs) | ||
149 | { | ||
150 | return test_bit(CS_MEM_HARDWALL, &cs->flags); | ||
151 | } | ||
152 | |||
147 | static inline int is_sched_load_balance(const struct cpuset *cs) | 153 | static inline int is_sched_load_balance(const struct cpuset *cs) |
148 | { | 154 | { |
149 | return test_bit(CS_SCHED_LOAD_BALANCE, &cs->flags); | 155 | return test_bit(CS_SCHED_LOAD_BALANCE, &cs->flags); |
@@ -735,7 +741,8 @@ static inline int started_after(void *p1, void *p2) | |||
735 | * Return nonzero if this tasks's cpus_allowed mask should be changed (in other | 741 | * Return nonzero if this tasks's cpus_allowed mask should be changed (in other |
736 | * words, if its mask is not equal to its cpuset's mask). | 742 | * words, if its mask is not equal to its cpuset's mask). |
737 | */ | 743 | */ |
738 | int cpuset_test_cpumask(struct task_struct *tsk, struct cgroup_scanner *scan) | 744 | static int cpuset_test_cpumask(struct task_struct *tsk, |
745 | struct cgroup_scanner *scan) | ||
739 | { | 746 | { |
740 | return !cpus_equal(tsk->cpus_allowed, | 747 | return !cpus_equal(tsk->cpus_allowed, |
741 | (cgroup_cs(scan->cg))->cpus_allowed); | 748 | (cgroup_cs(scan->cg))->cpus_allowed); |
@@ -752,7 +759,8 @@ int cpuset_test_cpumask(struct task_struct *tsk, struct cgroup_scanner *scan) | |||
752 | * We don't need to re-check for the cgroup/cpuset membership, since we're | 759 | * We don't need to re-check for the cgroup/cpuset membership, since we're |
753 | * holding cgroup_lock() at this point. | 760 | * holding cgroup_lock() at this point. |
754 | */ | 761 | */ |
755 | void cpuset_change_cpumask(struct task_struct *tsk, struct cgroup_scanner *scan) | 762 | static void cpuset_change_cpumask(struct task_struct *tsk, |
763 | struct cgroup_scanner *scan) | ||
756 | { | 764 | { |
757 | set_cpus_allowed_ptr(tsk, &((cgroup_cs(scan->cg))->cpus_allowed)); | 765 | set_cpus_allowed_ptr(tsk, &((cgroup_cs(scan->cg))->cpus_allowed)); |
758 | } | 766 | } |
@@ -1023,19 +1031,6 @@ int current_cpuset_is_being_rebound(void) | |||
1023 | return task_cs(current) == cpuset_being_rebound; | 1031 | return task_cs(current) == cpuset_being_rebound; |
1024 | } | 1032 | } |
1025 | 1033 | ||
1026 | /* | ||
1027 | * Call with cgroup_mutex held. | ||
1028 | */ | ||
1029 | |||
1030 | static int update_memory_pressure_enabled(struct cpuset *cs, char *buf) | ||
1031 | { | ||
1032 | if (simple_strtoul(buf, NULL, 10) != 0) | ||
1033 | cpuset_memory_pressure_enabled = 1; | ||
1034 | else | ||
1035 | cpuset_memory_pressure_enabled = 0; | ||
1036 | return 0; | ||
1037 | } | ||
1038 | |||
1039 | static int update_relax_domain_level(struct cpuset *cs, char *buf) | 1034 | static int update_relax_domain_level(struct cpuset *cs, char *buf) |
1040 | { | 1035 | { |
1041 | int val = simple_strtol(buf, NULL, 10); | 1036 | int val = simple_strtol(buf, NULL, 10); |
@@ -1053,25 +1048,20 @@ static int update_relax_domain_level(struct cpuset *cs, char *buf) | |||
1053 | 1048 | ||
1054 | /* | 1049 | /* |
1055 | * update_flag - read a 0 or a 1 in a file and update associated flag | 1050 | * update_flag - read a 0 or a 1 in a file and update associated flag |
1056 | * bit: the bit to update (CS_CPU_EXCLUSIVE, CS_MEM_EXCLUSIVE, | 1051 | * bit: the bit to update (see cpuset_flagbits_t) |
1057 | * CS_SCHED_LOAD_BALANCE, | 1052 | * cs: the cpuset to update |
1058 | * CS_NOTIFY_ON_RELEASE, CS_MEMORY_MIGRATE, | 1053 | * turning_on: whether the flag is being set or cleared |
1059 | * CS_SPREAD_PAGE, CS_SPREAD_SLAB) | ||
1060 | * cs: the cpuset to update | ||
1061 | * buf: the buffer where we read the 0 or 1 | ||
1062 | * | 1054 | * |
1063 | * Call with cgroup_mutex held. | 1055 | * Call with cgroup_mutex held. |
1064 | */ | 1056 | */ |
1065 | 1057 | ||
1066 | static int update_flag(cpuset_flagbits_t bit, struct cpuset *cs, char *buf) | 1058 | static int update_flag(cpuset_flagbits_t bit, struct cpuset *cs, |
1059 | int turning_on) | ||
1067 | { | 1060 | { |
1068 | int turning_on; | ||
1069 | struct cpuset trialcs; | 1061 | struct cpuset trialcs; |
1070 | int err; | 1062 | int err; |
1071 | int cpus_nonempty, balance_flag_changed; | 1063 | int cpus_nonempty, balance_flag_changed; |
1072 | 1064 | ||
1073 | turning_on = (simple_strtoul(buf, NULL, 10) != 0); | ||
1074 | |||
1075 | trialcs = *cs; | 1065 | trialcs = *cs; |
1076 | if (turning_on) | 1066 | if (turning_on) |
1077 | set_bit(bit, &trialcs.flags); | 1067 | set_bit(bit, &trialcs.flags); |
@@ -1241,6 +1231,7 @@ typedef enum { | |||
1241 | FILE_MEMLIST, | 1231 | FILE_MEMLIST, |
1242 | FILE_CPU_EXCLUSIVE, | 1232 | FILE_CPU_EXCLUSIVE, |
1243 | FILE_MEM_EXCLUSIVE, | 1233 | FILE_MEM_EXCLUSIVE, |
1234 | FILE_MEM_HARDWALL, | ||
1244 | FILE_SCHED_LOAD_BALANCE, | 1235 | FILE_SCHED_LOAD_BALANCE, |
1245 | FILE_SCHED_RELAX_DOMAIN_LEVEL, | 1236 | FILE_SCHED_RELAX_DOMAIN_LEVEL, |
1246 | FILE_MEMORY_PRESSURE_ENABLED, | 1237 | FILE_MEMORY_PRESSURE_ENABLED, |
@@ -1289,46 +1280,71 @@ static ssize_t cpuset_common_file_write(struct cgroup *cont, | |||
1289 | case FILE_MEMLIST: | 1280 | case FILE_MEMLIST: |
1290 | retval = update_nodemask(cs, buffer); | 1281 | retval = update_nodemask(cs, buffer); |
1291 | break; | 1282 | break; |
1283 | case FILE_SCHED_RELAX_DOMAIN_LEVEL: | ||
1284 | retval = update_relax_domain_level(cs, buffer); | ||
1285 | break; | ||
1286 | default: | ||
1287 | retval = -EINVAL; | ||
1288 | goto out2; | ||
1289 | } | ||
1290 | |||
1291 | if (retval == 0) | ||
1292 | retval = nbytes; | ||
1293 | out2: | ||
1294 | cgroup_unlock(); | ||
1295 | out1: | ||
1296 | kfree(buffer); | ||
1297 | return retval; | ||
1298 | } | ||
1299 | |||
1300 | static int cpuset_write_u64(struct cgroup *cgrp, struct cftype *cft, u64 val) | ||
1301 | { | ||
1302 | int retval = 0; | ||
1303 | struct cpuset *cs = cgroup_cs(cgrp); | ||
1304 | cpuset_filetype_t type = cft->private; | ||
1305 | |||
1306 | cgroup_lock(); | ||
1307 | |||
1308 | if (cgroup_is_removed(cgrp)) { | ||
1309 | cgroup_unlock(); | ||
1310 | return -ENODEV; | ||
1311 | } | ||
1312 | |||
1313 | switch (type) { | ||
1292 | case FILE_CPU_EXCLUSIVE: | 1314 | case FILE_CPU_EXCLUSIVE: |
1293 | retval = update_flag(CS_CPU_EXCLUSIVE, cs, buffer); | 1315 | retval = update_flag(CS_CPU_EXCLUSIVE, cs, val); |
1294 | break; | 1316 | break; |
1295 | case FILE_MEM_EXCLUSIVE: | 1317 | case FILE_MEM_EXCLUSIVE: |
1296 | retval = update_flag(CS_MEM_EXCLUSIVE, cs, buffer); | 1318 | retval = update_flag(CS_MEM_EXCLUSIVE, cs, val); |
1297 | break; | 1319 | break; |
1298 | case FILE_SCHED_LOAD_BALANCE: | 1320 | case FILE_MEM_HARDWALL: |
1299 | retval = update_flag(CS_SCHED_LOAD_BALANCE, cs, buffer); | 1321 | retval = update_flag(CS_MEM_HARDWALL, cs, val); |
1300 | break; | 1322 | break; |
1301 | case FILE_SCHED_RELAX_DOMAIN_LEVEL: | 1323 | case FILE_SCHED_LOAD_BALANCE: |
1302 | retval = update_relax_domain_level(cs, buffer); | 1324 | retval = update_flag(CS_SCHED_LOAD_BALANCE, cs, val); |
1303 | break; | 1325 | break; |
1304 | case FILE_MEMORY_MIGRATE: | 1326 | case FILE_MEMORY_MIGRATE: |
1305 | retval = update_flag(CS_MEMORY_MIGRATE, cs, buffer); | 1327 | retval = update_flag(CS_MEMORY_MIGRATE, cs, val); |
1306 | break; | 1328 | break; |
1307 | case FILE_MEMORY_PRESSURE_ENABLED: | 1329 | case FILE_MEMORY_PRESSURE_ENABLED: |
1308 | retval = update_memory_pressure_enabled(cs, buffer); | 1330 | cpuset_memory_pressure_enabled = !!val; |
1309 | break; | 1331 | break; |
1310 | case FILE_MEMORY_PRESSURE: | 1332 | case FILE_MEMORY_PRESSURE: |
1311 | retval = -EACCES; | 1333 | retval = -EACCES; |
1312 | break; | 1334 | break; |
1313 | case FILE_SPREAD_PAGE: | 1335 | case FILE_SPREAD_PAGE: |
1314 | retval = update_flag(CS_SPREAD_PAGE, cs, buffer); | 1336 | retval = update_flag(CS_SPREAD_PAGE, cs, val); |
1315 | cs->mems_generation = cpuset_mems_generation++; | 1337 | cs->mems_generation = cpuset_mems_generation++; |
1316 | break; | 1338 | break; |
1317 | case FILE_SPREAD_SLAB: | 1339 | case FILE_SPREAD_SLAB: |
1318 | retval = update_flag(CS_SPREAD_SLAB, cs, buffer); | 1340 | retval = update_flag(CS_SPREAD_SLAB, cs, val); |
1319 | cs->mems_generation = cpuset_mems_generation++; | 1341 | cs->mems_generation = cpuset_mems_generation++; |
1320 | break; | 1342 | break; |
1321 | default: | 1343 | default: |
1322 | retval = -EINVAL; | 1344 | retval = -EINVAL; |
1323 | goto out2; | 1345 | break; |
1324 | } | 1346 | } |
1325 | |||
1326 | if (retval == 0) | ||
1327 | retval = nbytes; | ||
1328 | out2: | ||
1329 | cgroup_unlock(); | 1347 | cgroup_unlock(); |
1330 | out1: | ||
1331 | kfree(buffer); | ||
1332 | return retval; | 1348 | return retval; |
1333 | } | 1349 | } |
1334 | 1350 | ||
@@ -1390,33 +1406,9 @@ static ssize_t cpuset_common_file_read(struct cgroup *cont, | |||
1390 | case FILE_MEMLIST: | 1406 | case FILE_MEMLIST: |
1391 | s += cpuset_sprintf_memlist(s, cs); | 1407 | s += cpuset_sprintf_memlist(s, cs); |
1392 | break; | 1408 | break; |
1393 | case FILE_CPU_EXCLUSIVE: | ||
1394 | *s++ = is_cpu_exclusive(cs) ? '1' : '0'; | ||
1395 | break; | ||
1396 | case FILE_MEM_EXCLUSIVE: | ||
1397 | *s++ = is_mem_exclusive(cs) ? '1' : '0'; | ||
1398 | break; | ||
1399 | case FILE_SCHED_LOAD_BALANCE: | ||
1400 | *s++ = is_sched_load_balance(cs) ? '1' : '0'; | ||
1401 | break; | ||
1402 | case FILE_SCHED_RELAX_DOMAIN_LEVEL: | 1409 | case FILE_SCHED_RELAX_DOMAIN_LEVEL: |
1403 | s += sprintf(s, "%d", cs->relax_domain_level); | 1410 | s += sprintf(s, "%d", cs->relax_domain_level); |
1404 | break; | 1411 | break; |
1405 | case FILE_MEMORY_MIGRATE: | ||
1406 | *s++ = is_memory_migrate(cs) ? '1' : '0'; | ||
1407 | break; | ||
1408 | case FILE_MEMORY_PRESSURE_ENABLED: | ||
1409 | *s++ = cpuset_memory_pressure_enabled ? '1' : '0'; | ||
1410 | break; | ||
1411 | case FILE_MEMORY_PRESSURE: | ||
1412 | s += sprintf(s, "%d", fmeter_getrate(&cs->fmeter)); | ||
1413 | break; | ||
1414 | case FILE_SPREAD_PAGE: | ||
1415 | *s++ = is_spread_page(cs) ? '1' : '0'; | ||
1416 | break; | ||
1417 | case FILE_SPREAD_SLAB: | ||
1418 | *s++ = is_spread_slab(cs) ? '1' : '0'; | ||
1419 | break; | ||
1420 | default: | 1412 | default: |
1421 | retval = -EINVAL; | 1413 | retval = -EINVAL; |
1422 | goto out; | 1414 | goto out; |
@@ -1429,121 +1421,137 @@ out: | |||
1429 | return retval; | 1421 | return retval; |
1430 | } | 1422 | } |
1431 | 1423 | ||
1432 | 1424 | static u64 cpuset_read_u64(struct cgroup *cont, struct cftype *cft) | |
1433 | 1425 | { | |
1426 | struct cpuset *cs = cgroup_cs(cont); | ||
1427 | cpuset_filetype_t type = cft->private; | ||
1428 | switch (type) { | ||
1429 | case FILE_CPU_EXCLUSIVE: | ||
1430 | return is_cpu_exclusive(cs); | ||
1431 | case FILE_MEM_EXCLUSIVE: | ||
1432 | return is_mem_exclusive(cs); | ||
1433 | case FILE_MEM_HARDWALL: | ||
1434 | return is_mem_hardwall(cs); | ||
1435 | case FILE_SCHED_LOAD_BALANCE: | ||
1436 | return is_sched_load_balance(cs); | ||
1437 | case FILE_MEMORY_MIGRATE: | ||
1438 | return is_memory_migrate(cs); | ||
1439 | case FILE_MEMORY_PRESSURE_ENABLED: | ||
1440 | return cpuset_memory_pressure_enabled; | ||
1441 | case FILE_MEMORY_PRESSURE: | ||
1442 | return fmeter_getrate(&cs->fmeter); | ||
1443 | case FILE_SPREAD_PAGE: | ||
1444 | return is_spread_page(cs); | ||
1445 | case FILE_SPREAD_SLAB: | ||
1446 | return is_spread_slab(cs); | ||
1447 | default: | ||
1448 | BUG(); | ||
1449 | } | ||
1450 | } | ||
1434 | 1451 | ||
1435 | 1452 | ||
1436 | /* | 1453 | /* |
1437 | * for the common functions, 'private' gives the type of file | 1454 | * for the common functions, 'private' gives the type of file |
1438 | */ | 1455 | */ |
1439 | 1456 | ||
1440 | static struct cftype cft_cpus = { | 1457 | static struct cftype files[] = { |
1441 | .name = "cpus", | 1458 | { |
1442 | .read = cpuset_common_file_read, | 1459 | .name = "cpus", |
1443 | .write = cpuset_common_file_write, | 1460 | .read = cpuset_common_file_read, |
1444 | .private = FILE_CPULIST, | 1461 | .write = cpuset_common_file_write, |
1445 | }; | 1462 | .private = FILE_CPULIST, |
1446 | 1463 | }, | |
1447 | static struct cftype cft_mems = { | 1464 | |
1448 | .name = "mems", | 1465 | { |
1449 | .read = cpuset_common_file_read, | 1466 | .name = "mems", |
1450 | .write = cpuset_common_file_write, | 1467 | .read = cpuset_common_file_read, |
1451 | .private = FILE_MEMLIST, | 1468 | .write = cpuset_common_file_write, |
1452 | }; | 1469 | .private = FILE_MEMLIST, |
1453 | 1470 | }, | |
1454 | static struct cftype cft_cpu_exclusive = { | 1471 | |
1455 | .name = "cpu_exclusive", | 1472 | { |
1456 | .read = cpuset_common_file_read, | 1473 | .name = "cpu_exclusive", |
1457 | .write = cpuset_common_file_write, | 1474 | .read_u64 = cpuset_read_u64, |
1458 | .private = FILE_CPU_EXCLUSIVE, | 1475 | .write_u64 = cpuset_write_u64, |
1459 | }; | 1476 | .private = FILE_CPU_EXCLUSIVE, |
1460 | 1477 | }, | |
1461 | static struct cftype cft_mem_exclusive = { | 1478 | |
1462 | .name = "mem_exclusive", | 1479 | { |
1463 | .read = cpuset_common_file_read, | 1480 | .name = "mem_exclusive", |
1464 | .write = cpuset_common_file_write, | 1481 | .read_u64 = cpuset_read_u64, |
1465 | .private = FILE_MEM_EXCLUSIVE, | 1482 | .write_u64 = cpuset_write_u64, |
1466 | }; | 1483 | .private = FILE_MEM_EXCLUSIVE, |
1467 | 1484 | }, | |
1468 | static struct cftype cft_sched_load_balance = { | 1485 | |
1469 | .name = "sched_load_balance", | 1486 | { |
1470 | .read = cpuset_common_file_read, | 1487 | .name = "mem_hardwall", |
1471 | .write = cpuset_common_file_write, | 1488 | .read_u64 = cpuset_read_u64, |
1472 | .private = FILE_SCHED_LOAD_BALANCE, | 1489 | .write_u64 = cpuset_write_u64, |
1473 | }; | 1490 | .private = FILE_MEM_HARDWALL, |
1474 | 1491 | }, | |
1475 | static struct cftype cft_sched_relax_domain_level = { | 1492 | |
1476 | .name = "sched_relax_domain_level", | 1493 | { |
1477 | .read = cpuset_common_file_read, | 1494 | .name = "sched_load_balance", |
1478 | .write = cpuset_common_file_write, | 1495 | .read_u64 = cpuset_read_u64, |
1479 | .private = FILE_SCHED_RELAX_DOMAIN_LEVEL, | 1496 | .write_u64 = cpuset_write_u64, |
1480 | }; | 1497 | .private = FILE_SCHED_LOAD_BALANCE, |
1481 | 1498 | }, | |
1482 | static struct cftype cft_memory_migrate = { | 1499 | |
1483 | .name = "memory_migrate", | 1500 | { |
1484 | .read = cpuset_common_file_read, | 1501 | .name = "sched_relax_domain_level", |
1485 | .write = cpuset_common_file_write, | 1502 | .read_u64 = cpuset_read_u64, |
1486 | .private = FILE_MEMORY_MIGRATE, | 1503 | .write_u64 = cpuset_write_u64, |
1504 | .private = FILE_SCHED_RELAX_DOMAIN_LEVEL, | ||
1505 | }, | ||
1506 | |||
1507 | { | ||
1508 | .name = "memory_migrate", | ||
1509 | .read_u64 = cpuset_read_u64, | ||
1510 | .write_u64 = cpuset_write_u64, | ||
1511 | .private = FILE_MEMORY_MIGRATE, | ||
1512 | }, | ||
1513 | |||
1514 | { | ||
1515 | .name = "memory_pressure", | ||
1516 | .read_u64 = cpuset_read_u64, | ||
1517 | .write_u64 = cpuset_write_u64, | ||
1518 | .private = FILE_MEMORY_PRESSURE, | ||
1519 | }, | ||
1520 | |||
1521 | { | ||
1522 | .name = "memory_spread_page", | ||
1523 | .read_u64 = cpuset_read_u64, | ||
1524 | .write_u64 = cpuset_write_u64, | ||
1525 | .private = FILE_SPREAD_PAGE, | ||
1526 | }, | ||
1527 | |||
1528 | { | ||
1529 | .name = "memory_spread_slab", | ||
1530 | .read_u64 = cpuset_read_u64, | ||
1531 | .write_u64 = cpuset_write_u64, | ||
1532 | .private = FILE_SPREAD_SLAB, | ||
1533 | }, | ||
1487 | }; | 1534 | }; |
1488 | 1535 | ||
1489 | static struct cftype cft_memory_pressure_enabled = { | 1536 | static struct cftype cft_memory_pressure_enabled = { |
1490 | .name = "memory_pressure_enabled", | 1537 | .name = "memory_pressure_enabled", |
1491 | .read = cpuset_common_file_read, | 1538 | .read_u64 = cpuset_read_u64, |
1492 | .write = cpuset_common_file_write, | 1539 | .write_u64 = cpuset_write_u64, |
1493 | .private = FILE_MEMORY_PRESSURE_ENABLED, | 1540 | .private = FILE_MEMORY_PRESSURE_ENABLED, |
1494 | }; | 1541 | }; |
1495 | 1542 | ||
1496 | static struct cftype cft_memory_pressure = { | ||
1497 | .name = "memory_pressure", | ||
1498 | .read = cpuset_common_file_read, | ||
1499 | .write = cpuset_common_file_write, | ||
1500 | .private = FILE_MEMORY_PRESSURE, | ||
1501 | }; | ||
1502 | |||
1503 | static struct cftype cft_spread_page = { | ||
1504 | .name = "memory_spread_page", | ||
1505 | .read = cpuset_common_file_read, | ||
1506 | .write = cpuset_common_file_write, | ||
1507 | .private = FILE_SPREAD_PAGE, | ||
1508 | }; | ||
1509 | |||
1510 | static struct cftype cft_spread_slab = { | ||
1511 | .name = "memory_spread_slab", | ||
1512 | .read = cpuset_common_file_read, | ||
1513 | .write = cpuset_common_file_write, | ||
1514 | .private = FILE_SPREAD_SLAB, | ||
1515 | }; | ||
1516 | |||
1517 | static int cpuset_populate(struct cgroup_subsys *ss, struct cgroup *cont) | 1543 | static int cpuset_populate(struct cgroup_subsys *ss, struct cgroup *cont) |
1518 | { | 1544 | { |
1519 | int err; | 1545 | int err; |
1520 | 1546 | ||
1521 | if ((err = cgroup_add_file(cont, ss, &cft_cpus)) < 0) | 1547 | err = cgroup_add_files(cont, ss, files, ARRAY_SIZE(files)); |
1522 | return err; | 1548 | if (err) |
1523 | if ((err = cgroup_add_file(cont, ss, &cft_mems)) < 0) | ||
1524 | return err; | ||
1525 | if ((err = cgroup_add_file(cont, ss, &cft_cpu_exclusive)) < 0) | ||
1526 | return err; | ||
1527 | if ((err = cgroup_add_file(cont, ss, &cft_mem_exclusive)) < 0) | ||
1528 | return err; | ||
1529 | if ((err = cgroup_add_file(cont, ss, &cft_memory_migrate)) < 0) | ||
1530 | return err; | ||
1531 | if ((err = cgroup_add_file(cont, ss, &cft_sched_load_balance)) < 0) | ||
1532 | return err; | ||
1533 | if ((err = cgroup_add_file(cont, ss, | ||
1534 | &cft_sched_relax_domain_level)) < 0) | ||
1535 | return err; | ||
1536 | if ((err = cgroup_add_file(cont, ss, &cft_memory_pressure)) < 0) | ||
1537 | return err; | ||
1538 | if ((err = cgroup_add_file(cont, ss, &cft_spread_page)) < 0) | ||
1539 | return err; | ||
1540 | if ((err = cgroup_add_file(cont, ss, &cft_spread_slab)) < 0) | ||
1541 | return err; | 1549 | return err; |
1542 | /* memory_pressure_enabled is in root cpuset only */ | 1550 | /* memory_pressure_enabled is in root cpuset only */ |
1543 | if (err == 0 && !cont->parent) | 1551 | if (!cont->parent) |
1544 | err = cgroup_add_file(cont, ss, | 1552 | err = cgroup_add_file(cont, ss, |
1545 | &cft_memory_pressure_enabled); | 1553 | &cft_memory_pressure_enabled); |
1546 | return 0; | 1554 | return err; |
1547 | } | 1555 | } |
1548 | 1556 | ||
1549 | /* | 1557 | /* |
@@ -1643,7 +1651,7 @@ static void cpuset_destroy(struct cgroup_subsys *ss, struct cgroup *cont) | |||
1643 | cpuset_update_task_memory_state(); | 1651 | cpuset_update_task_memory_state(); |
1644 | 1652 | ||
1645 | if (is_sched_load_balance(cs)) | 1653 | if (is_sched_load_balance(cs)) |
1646 | update_flag(CS_SCHED_LOAD_BALANCE, cs, "0"); | 1654 | update_flag(CS_SCHED_LOAD_BALANCE, cs, 0); |
1647 | 1655 | ||
1648 | number_of_cpusets--; | 1656 | number_of_cpusets--; |
1649 | kfree(cs); | 1657 | kfree(cs); |
@@ -1708,7 +1716,8 @@ int __init cpuset_init(void) | |||
1708 | * Called by cgroup_scan_tasks() for each task in a cgroup. | 1716 | * Called by cgroup_scan_tasks() for each task in a cgroup. |
1709 | * Return nonzero to stop the walk through the tasks. | 1717 | * Return nonzero to stop the walk through the tasks. |
1710 | */ | 1718 | */ |
1711 | void cpuset_do_move_task(struct task_struct *tsk, struct cgroup_scanner *scan) | 1719 | static void cpuset_do_move_task(struct task_struct *tsk, |
1720 | struct cgroup_scanner *scan) | ||
1712 | { | 1721 | { |
1713 | struct cpuset_hotplug_scanner *chsp; | 1722 | struct cpuset_hotplug_scanner *chsp; |
1714 | 1723 | ||
@@ -1970,14 +1979,14 @@ int cpuset_nodemask_valid_mems_allowed(nodemask_t *nodemask) | |||
1970 | } | 1979 | } |
1971 | 1980 | ||
1972 | /* | 1981 | /* |
1973 | * nearest_exclusive_ancestor() - Returns the nearest mem_exclusive | 1982 | * nearest_hardwall_ancestor() - Returns the nearest mem_exclusive or |
1974 | * ancestor to the specified cpuset. Call holding callback_mutex. | 1983 | * mem_hardwall ancestor to the specified cpuset. Call holding |
1975 | * If no ancestor is mem_exclusive (an unusual configuration), then | 1984 | * callback_mutex. If no ancestor is mem_exclusive or mem_hardwall |
1976 | * returns the root cpuset. | 1985 | * (an unusual configuration), then returns the root cpuset. |
1977 | */ | 1986 | */ |
1978 | static const struct cpuset *nearest_exclusive_ancestor(const struct cpuset *cs) | 1987 | static const struct cpuset *nearest_hardwall_ancestor(const struct cpuset *cs) |
1979 | { | 1988 | { |
1980 | while (!is_mem_exclusive(cs) && cs->parent) | 1989 | while (!(is_mem_exclusive(cs) || is_mem_hardwall(cs)) && cs->parent) |
1981 | cs = cs->parent; | 1990 | cs = cs->parent; |
1982 | return cs; | 1991 | return cs; |
1983 | } | 1992 | } |
@@ -1991,7 +2000,7 @@ static const struct cpuset *nearest_exclusive_ancestor(const struct cpuset *cs) | |||
1991 | * __GFP_THISNODE is set, yes, we can always allocate. If zone | 2000 | * __GFP_THISNODE is set, yes, we can always allocate. If zone |
1992 | * z's node is in our tasks mems_allowed, yes. If it's not a | 2001 | * z's node is in our tasks mems_allowed, yes. If it's not a |
1993 | * __GFP_HARDWALL request and this zone's nodes is in the nearest | 2002 | * __GFP_HARDWALL request and this zone's nodes is in the nearest |
1994 | * mem_exclusive cpuset ancestor to this tasks cpuset, yes. | 2003 | * hardwalled cpuset ancestor to this tasks cpuset, yes. |
1995 | * If the task has been OOM killed and has access to memory reserves | 2004 | * If the task has been OOM killed and has access to memory reserves |
1996 | * as specified by the TIF_MEMDIE flag, yes. | 2005 | * as specified by the TIF_MEMDIE flag, yes. |
1997 | * Otherwise, no. | 2006 | * Otherwise, no. |
@@ -2014,7 +2023,7 @@ static const struct cpuset *nearest_exclusive_ancestor(const struct cpuset *cs) | |||
2014 | * and do not allow allocations outside the current tasks cpuset | 2023 | * and do not allow allocations outside the current tasks cpuset |
2015 | * unless the task has been OOM killed as is marked TIF_MEMDIE. | 2024 | * unless the task has been OOM killed as is marked TIF_MEMDIE. |
2016 | * GFP_KERNEL allocations are not so marked, so can escape to the | 2025 | * GFP_KERNEL allocations are not so marked, so can escape to the |
2017 | * nearest enclosing mem_exclusive ancestor cpuset. | 2026 | * nearest enclosing hardwalled ancestor cpuset. |
2018 | * | 2027 | * |
2019 | * Scanning up parent cpusets requires callback_mutex. The | 2028 | * Scanning up parent cpusets requires callback_mutex. The |
2020 | * __alloc_pages() routine only calls here with __GFP_HARDWALL bit | 2029 | * __alloc_pages() routine only calls here with __GFP_HARDWALL bit |
@@ -2037,7 +2046,7 @@ static const struct cpuset *nearest_exclusive_ancestor(const struct cpuset *cs) | |||
2037 | * in_interrupt - any node ok (current task context irrelevant) | 2046 | * in_interrupt - any node ok (current task context irrelevant) |
2038 | * GFP_ATOMIC - any node ok | 2047 | * GFP_ATOMIC - any node ok |
2039 | * TIF_MEMDIE - any node ok | 2048 | * TIF_MEMDIE - any node ok |
2040 | * GFP_KERNEL - any node in enclosing mem_exclusive cpuset ok | 2049 | * GFP_KERNEL - any node in enclosing hardwalled cpuset ok |
2041 | * GFP_USER - only nodes in current tasks mems allowed ok. | 2050 | * GFP_USER - only nodes in current tasks mems allowed ok. |
2042 | * | 2051 | * |
2043 | * Rule: | 2052 | * Rule: |
@@ -2074,7 +2083,7 @@ int __cpuset_zone_allowed_softwall(struct zone *z, gfp_t gfp_mask) | |||
2074 | mutex_lock(&callback_mutex); | 2083 | mutex_lock(&callback_mutex); |
2075 | 2084 | ||
2076 | task_lock(current); | 2085 | task_lock(current); |
2077 | cs = nearest_exclusive_ancestor(task_cs(current)); | 2086 | cs = nearest_hardwall_ancestor(task_cs(current)); |
2078 | task_unlock(current); | 2087 | task_unlock(current); |
2079 | 2088 | ||
2080 | allowed = node_isset(node, cs->mems_allowed); | 2089 | allowed = node_isset(node, cs->mems_allowed); |
diff --git a/kernel/dma.c b/kernel/dma.c index 6a82bb716dac..d2c60a822790 100644 --- a/kernel/dma.c +++ b/kernel/dma.c | |||
@@ -149,12 +149,7 @@ static const struct file_operations proc_dma_operations = { | |||
149 | 149 | ||
150 | static int __init proc_dma_init(void) | 150 | static int __init proc_dma_init(void) |
151 | { | 151 | { |
152 | struct proc_dir_entry *e; | 152 | proc_create("dma", 0, NULL, &proc_dma_operations); |
153 | |||
154 | e = create_proc_entry("dma", 0, NULL); | ||
155 | if (e) | ||
156 | e->proc_fops = &proc_dma_operations; | ||
157 | |||
158 | return 0; | 153 | return 0; |
159 | } | 154 | } |
160 | 155 | ||
diff --git a/kernel/exit.c b/kernel/exit.c index 2a9d98c641ac..ae0f2c4e452b 100644 --- a/kernel/exit.c +++ b/kernel/exit.c | |||
@@ -557,6 +557,88 @@ void exit_fs(struct task_struct *tsk) | |||
557 | 557 | ||
558 | EXPORT_SYMBOL_GPL(exit_fs); | 558 | EXPORT_SYMBOL_GPL(exit_fs); |
559 | 559 | ||
560 | #ifdef CONFIG_MM_OWNER | ||
561 | /* | ||
562 | * Task p is exiting and it owned mm, lets find a new owner for it | ||
563 | */ | ||
564 | static inline int | ||
565 | mm_need_new_owner(struct mm_struct *mm, struct task_struct *p) | ||
566 | { | ||
567 | /* | ||
568 | * If there are other users of the mm and the owner (us) is exiting | ||
569 | * we need to find a new owner to take on the responsibility. | ||
570 | */ | ||
571 | if (!mm) | ||
572 | return 0; | ||
573 | if (atomic_read(&mm->mm_users) <= 1) | ||
574 | return 0; | ||
575 | if (mm->owner != p) | ||
576 | return 0; | ||
577 | return 1; | ||
578 | } | ||
579 | |||
580 | void mm_update_next_owner(struct mm_struct *mm) | ||
581 | { | ||
582 | struct task_struct *c, *g, *p = current; | ||
583 | |||
584 | retry: | ||
585 | if (!mm_need_new_owner(mm, p)) | ||
586 | return; | ||
587 | |||
588 | read_lock(&tasklist_lock); | ||
589 | /* | ||
590 | * Search in the children | ||
591 | */ | ||
592 | list_for_each_entry(c, &p->children, sibling) { | ||
593 | if (c->mm == mm) | ||
594 | goto assign_new_owner; | ||
595 | } | ||
596 | |||
597 | /* | ||
598 | * Search in the siblings | ||
599 | */ | ||
600 | list_for_each_entry(c, &p->parent->children, sibling) { | ||
601 | if (c->mm == mm) | ||
602 | goto assign_new_owner; | ||
603 | } | ||
604 | |||
605 | /* | ||
606 | * Search through everything else. We should not get | ||
607 | * here often | ||
608 | */ | ||
609 | do_each_thread(g, c) { | ||
610 | if (c->mm == mm) | ||
611 | goto assign_new_owner; | ||
612 | } while_each_thread(g, c); | ||
613 | |||
614 | read_unlock(&tasklist_lock); | ||
615 | return; | ||
616 | |||
617 | assign_new_owner: | ||
618 | BUG_ON(c == p); | ||
619 | get_task_struct(c); | ||
620 | /* | ||
621 | * The task_lock protects c->mm from changing. | ||
622 | * We always want mm->owner->mm == mm | ||
623 | */ | ||
624 | task_lock(c); | ||
625 | /* | ||
626 | * Delay read_unlock() till we have the task_lock() | ||
627 | * to ensure that c does not slip away underneath us | ||
628 | */ | ||
629 | read_unlock(&tasklist_lock); | ||
630 | if (c->mm != mm) { | ||
631 | task_unlock(c); | ||
632 | put_task_struct(c); | ||
633 | goto retry; | ||
634 | } | ||
635 | cgroup_mm_owner_callbacks(mm->owner, c); | ||
636 | mm->owner = c; | ||
637 | task_unlock(c); | ||
638 | put_task_struct(c); | ||
639 | } | ||
640 | #endif /* CONFIG_MM_OWNER */ | ||
641 | |||
560 | /* | 642 | /* |
561 | * Turn us into a lazy TLB process if we | 643 | * Turn us into a lazy TLB process if we |
562 | * aren't already.. | 644 | * aren't already.. |
@@ -596,6 +678,7 @@ static void exit_mm(struct task_struct * tsk) | |||
596 | /* We don't want this task to be frozen prematurely */ | 678 | /* We don't want this task to be frozen prematurely */ |
597 | clear_freeze_flag(tsk); | 679 | clear_freeze_flag(tsk); |
598 | task_unlock(tsk); | 680 | task_unlock(tsk); |
681 | mm_update_next_owner(mm); | ||
599 | mmput(mm); | 682 | mmput(mm); |
600 | } | 683 | } |
601 | 684 | ||
diff --git a/kernel/fork.c b/kernel/fork.c index 6067e429f281..068ffe007529 100644 --- a/kernel/fork.c +++ b/kernel/fork.c | |||
@@ -381,14 +381,13 @@ static struct mm_struct * mm_init(struct mm_struct * mm, struct task_struct *p) | |||
381 | mm->ioctx_list = NULL; | 381 | mm->ioctx_list = NULL; |
382 | mm->free_area_cache = TASK_UNMAPPED_BASE; | 382 | mm->free_area_cache = TASK_UNMAPPED_BASE; |
383 | mm->cached_hole_size = ~0UL; | 383 | mm->cached_hole_size = ~0UL; |
384 | mm_init_cgroup(mm, p); | 384 | mm_init_owner(mm, p); |
385 | 385 | ||
386 | if (likely(!mm_alloc_pgd(mm))) { | 386 | if (likely(!mm_alloc_pgd(mm))) { |
387 | mm->def_flags = 0; | 387 | mm->def_flags = 0; |
388 | return mm; | 388 | return mm; |
389 | } | 389 | } |
390 | 390 | ||
391 | mm_free_cgroup(mm); | ||
392 | free_mm(mm); | 391 | free_mm(mm); |
393 | return NULL; | 392 | return NULL; |
394 | } | 393 | } |
@@ -432,13 +431,13 @@ void mmput(struct mm_struct *mm) | |||
432 | if (atomic_dec_and_test(&mm->mm_users)) { | 431 | if (atomic_dec_and_test(&mm->mm_users)) { |
433 | exit_aio(mm); | 432 | exit_aio(mm); |
434 | exit_mmap(mm); | 433 | exit_mmap(mm); |
434 | set_mm_exe_file(mm, NULL); | ||
435 | if (!list_empty(&mm->mmlist)) { | 435 | if (!list_empty(&mm->mmlist)) { |
436 | spin_lock(&mmlist_lock); | 436 | spin_lock(&mmlist_lock); |
437 | list_del(&mm->mmlist); | 437 | list_del(&mm->mmlist); |
438 | spin_unlock(&mmlist_lock); | 438 | spin_unlock(&mmlist_lock); |
439 | } | 439 | } |
440 | put_swap_token(mm); | 440 | put_swap_token(mm); |
441 | mm_free_cgroup(mm); | ||
442 | mmdrop(mm); | 441 | mmdrop(mm); |
443 | } | 442 | } |
444 | } | 443 | } |
@@ -545,6 +544,8 @@ struct mm_struct *dup_mm(struct task_struct *tsk) | |||
545 | if (init_new_context(tsk, mm)) | 544 | if (init_new_context(tsk, mm)) |
546 | goto fail_nocontext; | 545 | goto fail_nocontext; |
547 | 546 | ||
547 | dup_mm_exe_file(oldmm, mm); | ||
548 | |||
548 | err = dup_mmap(mm, oldmm); | 549 | err = dup_mmap(mm, oldmm); |
549 | if (err) | 550 | if (err) |
550 | goto free_pt; | 551 | goto free_pt; |
@@ -982,6 +983,13 @@ static void rt_mutex_init_task(struct task_struct *p) | |||
982 | #endif | 983 | #endif |
983 | } | 984 | } |
984 | 985 | ||
986 | #ifdef CONFIG_MM_OWNER | ||
987 | void mm_init_owner(struct mm_struct *mm, struct task_struct *p) | ||
988 | { | ||
989 | mm->owner = p; | ||
990 | } | ||
991 | #endif /* CONFIG_MM_OWNER */ | ||
992 | |||
985 | /* | 993 | /* |
986 | * This creates a new process as a copy of the old one, | 994 | * This creates a new process as a copy of the old one, |
987 | * but does not actually start it yet. | 995 | * but does not actually start it yet. |
@@ -1664,18 +1672,6 @@ static int unshare_fd(unsigned long unshare_flags, struct files_struct **new_fdp | |||
1664 | } | 1672 | } |
1665 | 1673 | ||
1666 | /* | 1674 | /* |
1667 | * Unsharing of semundo for tasks created with CLONE_SYSVSEM is not | ||
1668 | * supported yet | ||
1669 | */ | ||
1670 | static int unshare_semundo(unsigned long unshare_flags, struct sem_undo_list **new_ulistp) | ||
1671 | { | ||
1672 | if (unshare_flags & CLONE_SYSVSEM) | ||
1673 | return -EINVAL; | ||
1674 | |||
1675 | return 0; | ||
1676 | } | ||
1677 | |||
1678 | /* | ||
1679 | * unshare allows a process to 'unshare' part of the process | 1675 | * unshare allows a process to 'unshare' part of the process |
1680 | * context which was originally shared using clone. copy_* | 1676 | * context which was originally shared using clone. copy_* |
1681 | * functions used by do_fork() cannot be used here directly | 1677 | * functions used by do_fork() cannot be used here directly |
@@ -1690,8 +1686,8 @@ asmlinkage long sys_unshare(unsigned long unshare_flags) | |||
1690 | struct sighand_struct *new_sigh = NULL; | 1686 | struct sighand_struct *new_sigh = NULL; |
1691 | struct mm_struct *mm, *new_mm = NULL, *active_mm = NULL; | 1687 | struct mm_struct *mm, *new_mm = NULL, *active_mm = NULL; |
1692 | struct files_struct *fd, *new_fd = NULL; | 1688 | struct files_struct *fd, *new_fd = NULL; |
1693 | struct sem_undo_list *new_ulist = NULL; | ||
1694 | struct nsproxy *new_nsproxy = NULL; | 1689 | struct nsproxy *new_nsproxy = NULL; |
1690 | int do_sysvsem = 0; | ||
1695 | 1691 | ||
1696 | check_unshare_flags(&unshare_flags); | 1692 | check_unshare_flags(&unshare_flags); |
1697 | 1693 | ||
@@ -1703,6 +1699,13 @@ asmlinkage long sys_unshare(unsigned long unshare_flags) | |||
1703 | CLONE_NEWNET)) | 1699 | CLONE_NEWNET)) |
1704 | goto bad_unshare_out; | 1700 | goto bad_unshare_out; |
1705 | 1701 | ||
1702 | /* | ||
1703 | * CLONE_NEWIPC must also detach from the undolist: after switching | ||
1704 | * to a new ipc namespace, the semaphore arrays from the old | ||
1705 | * namespace are unreachable. | ||
1706 | */ | ||
1707 | if (unshare_flags & (CLONE_NEWIPC|CLONE_SYSVSEM)) | ||
1708 | do_sysvsem = 1; | ||
1706 | if ((err = unshare_thread(unshare_flags))) | 1709 | if ((err = unshare_thread(unshare_flags))) |
1707 | goto bad_unshare_out; | 1710 | goto bad_unshare_out; |
1708 | if ((err = unshare_fs(unshare_flags, &new_fs))) | 1711 | if ((err = unshare_fs(unshare_flags, &new_fs))) |
@@ -1713,13 +1716,17 @@ asmlinkage long sys_unshare(unsigned long unshare_flags) | |||
1713 | goto bad_unshare_cleanup_sigh; | 1716 | goto bad_unshare_cleanup_sigh; |
1714 | if ((err = unshare_fd(unshare_flags, &new_fd))) | 1717 | if ((err = unshare_fd(unshare_flags, &new_fd))) |
1715 | goto bad_unshare_cleanup_vm; | 1718 | goto bad_unshare_cleanup_vm; |
1716 | if ((err = unshare_semundo(unshare_flags, &new_ulist))) | ||
1717 | goto bad_unshare_cleanup_fd; | ||
1718 | if ((err = unshare_nsproxy_namespaces(unshare_flags, &new_nsproxy, | 1719 | if ((err = unshare_nsproxy_namespaces(unshare_flags, &new_nsproxy, |
1719 | new_fs))) | 1720 | new_fs))) |
1720 | goto bad_unshare_cleanup_semundo; | 1721 | goto bad_unshare_cleanup_fd; |
1721 | 1722 | ||
1722 | if (new_fs || new_mm || new_fd || new_ulist || new_nsproxy) { | 1723 | if (new_fs || new_mm || new_fd || do_sysvsem || new_nsproxy) { |
1724 | if (do_sysvsem) { | ||
1725 | /* | ||
1726 | * CLONE_SYSVSEM is equivalent to sys_exit(). | ||
1727 | */ | ||
1728 | exit_sem(current); | ||
1729 | } | ||
1723 | 1730 | ||
1724 | if (new_nsproxy) { | 1731 | if (new_nsproxy) { |
1725 | switch_task_namespaces(current, new_nsproxy); | 1732 | switch_task_namespaces(current, new_nsproxy); |
@@ -1755,7 +1762,6 @@ asmlinkage long sys_unshare(unsigned long unshare_flags) | |||
1755 | if (new_nsproxy) | 1762 | if (new_nsproxy) |
1756 | put_nsproxy(new_nsproxy); | 1763 | put_nsproxy(new_nsproxy); |
1757 | 1764 | ||
1758 | bad_unshare_cleanup_semundo: | ||
1759 | bad_unshare_cleanup_fd: | 1765 | bad_unshare_cleanup_fd: |
1760 | if (new_fd) | 1766 | if (new_fd) |
1761 | put_files_struct(new_fd); | 1767 | put_files_struct(new_fd); |
diff --git a/kernel/irq/devres.c b/kernel/irq/devres.c index 6d9204f3a370..38a25b8d8bff 100644 --- a/kernel/irq/devres.c +++ b/kernel/irq/devres.c | |||
@@ -1,6 +1,7 @@ | |||
1 | #include <linux/module.h> | 1 | #include <linux/module.h> |
2 | #include <linux/interrupt.h> | 2 | #include <linux/interrupt.h> |
3 | #include <linux/device.h> | 3 | #include <linux/device.h> |
4 | #include <linux/gfp.h> | ||
4 | 5 | ||
5 | /* | 6 | /* |
6 | * Device resource management aware IRQ request/free implementation. | 7 | * Device resource management aware IRQ request/free implementation. |
diff --git a/kernel/irq/manage.c b/kernel/irq/manage.c index 438a01464287..46e4ad1723f0 100644 --- a/kernel/irq/manage.c +++ b/kernel/irq/manage.c | |||
@@ -11,6 +11,7 @@ | |||
11 | #include <linux/module.h> | 11 | #include <linux/module.h> |
12 | #include <linux/random.h> | 12 | #include <linux/random.h> |
13 | #include <linux/interrupt.h> | 13 | #include <linux/interrupt.h> |
14 | #include <linux/slab.h> | ||
14 | 15 | ||
15 | #include "internals.h" | 16 | #include "internals.h" |
16 | 17 | ||
diff --git a/kernel/kallsyms.c b/kernel/kallsyms.c index f091d13def00..6fc0040f3e3a 100644 --- a/kernel/kallsyms.c +++ b/kernel/kallsyms.c | |||
@@ -472,11 +472,7 @@ static const struct file_operations kallsyms_operations = { | |||
472 | 472 | ||
473 | static int __init kallsyms_init(void) | 473 | static int __init kallsyms_init(void) |
474 | { | 474 | { |
475 | struct proc_dir_entry *entry; | 475 | proc_create("kallsyms", 0444, NULL, &kallsyms_operations); |
476 | |||
477 | entry = create_proc_entry("kallsyms", 0444, NULL); | ||
478 | if (entry) | ||
479 | entry->proc_fops = &kallsyms_operations; | ||
480 | return 0; | 476 | return 0; |
481 | } | 477 | } |
482 | __initcall(kallsyms_init); | 478 | __initcall(kallsyms_init); |
diff --git a/kernel/kthread.c b/kernel/kthread.c index 92cf6930ab51..ac72eea48339 100644 --- a/kernel/kthread.c +++ b/kernel/kthread.c | |||
@@ -144,9 +144,9 @@ struct task_struct *kthread_create(int (*threadfn)(void *data), | |||
144 | 144 | ||
145 | spin_lock(&kthread_create_lock); | 145 | spin_lock(&kthread_create_lock); |
146 | list_add_tail(&create.list, &kthread_create_list); | 146 | list_add_tail(&create.list, &kthread_create_list); |
147 | wake_up_process(kthreadd_task); | ||
148 | spin_unlock(&kthread_create_lock); | 147 | spin_unlock(&kthread_create_lock); |
149 | 148 | ||
149 | wake_up_process(kthreadd_task); | ||
150 | wait_for_completion(&create.done); | 150 | wait_for_completion(&create.done); |
151 | 151 | ||
152 | if (!IS_ERR(create.result)) { | 152 | if (!IS_ERR(create.result)) { |
diff --git a/kernel/latencytop.c b/kernel/latencytop.c index 7c74dab0d21b..5e7b45c56923 100644 --- a/kernel/latencytop.c +++ b/kernel/latencytop.c | |||
@@ -233,14 +233,7 @@ static struct file_operations lstats_fops = { | |||
233 | 233 | ||
234 | static int __init init_lstats_procfs(void) | 234 | static int __init init_lstats_procfs(void) |
235 | { | 235 | { |
236 | struct proc_dir_entry *pe; | 236 | proc_create("latency_stats", 0644, NULL, &lstats_fops); |
237 | |||
238 | pe = create_proc_entry("latency_stats", 0644, NULL); | ||
239 | if (!pe) | ||
240 | return -ENOMEM; | ||
241 | |||
242 | pe->proc_fops = &lstats_fops; | ||
243 | |||
244 | return 0; | 237 | return 0; |
245 | } | 238 | } |
246 | __initcall(init_lstats_procfs); | 239 | __initcall(init_lstats_procfs); |
diff --git a/kernel/lockdep_proc.c b/kernel/lockdep_proc.c index 8a135bd163c2..dc5d29648d85 100644 --- a/kernel/lockdep_proc.c +++ b/kernel/lockdep_proc.c | |||
@@ -660,20 +660,12 @@ static const struct file_operations proc_lock_stat_operations = { | |||
660 | 660 | ||
661 | static int __init lockdep_proc_init(void) | 661 | static int __init lockdep_proc_init(void) |
662 | { | 662 | { |
663 | struct proc_dir_entry *entry; | 663 | proc_create("lockdep", S_IRUSR, NULL, &proc_lockdep_operations); |
664 | 664 | proc_create("lockdep_stats", S_IRUSR, NULL, | |
665 | entry = create_proc_entry("lockdep", S_IRUSR, NULL); | 665 | &proc_lockdep_stats_operations); |
666 | if (entry) | ||
667 | entry->proc_fops = &proc_lockdep_operations; | ||
668 | |||
669 | entry = create_proc_entry("lockdep_stats", S_IRUSR, NULL); | ||
670 | if (entry) | ||
671 | entry->proc_fops = &proc_lockdep_stats_operations; | ||
672 | 666 | ||
673 | #ifdef CONFIG_LOCK_STAT | 667 | #ifdef CONFIG_LOCK_STAT |
674 | entry = create_proc_entry("lock_stat", S_IRUSR, NULL); | 668 | proc_create("lock_stat", S_IRUSR, NULL, &proc_lock_stat_operations); |
675 | if (entry) | ||
676 | entry->proc_fops = &proc_lock_stat_operations; | ||
677 | #endif | 669 | #endif |
678 | 670 | ||
679 | return 0; | 671 | return 0; |
diff --git a/kernel/marker.c b/kernel/marker.c index 005b95954593..139260e5460c 100644 --- a/kernel/marker.c +++ b/kernel/marker.c | |||
@@ -23,6 +23,7 @@ | |||
23 | #include <linux/rcupdate.h> | 23 | #include <linux/rcupdate.h> |
24 | #include <linux/marker.h> | 24 | #include <linux/marker.h> |
25 | #include <linux/err.h> | 25 | #include <linux/err.h> |
26 | #include <linux/slab.h> | ||
26 | 27 | ||
27 | extern struct marker __start___markers[]; | 28 | extern struct marker __start___markers[]; |
28 | extern struct marker __stop___markers[]; | 29 | extern struct marker __stop___markers[]; |
diff --git a/kernel/notifier.c b/kernel/notifier.c index 643360d1bb14..823be11584ef 100644 --- a/kernel/notifier.c +++ b/kernel/notifier.c | |||
@@ -31,6 +31,21 @@ static int notifier_chain_register(struct notifier_block **nl, | |||
31 | return 0; | 31 | return 0; |
32 | } | 32 | } |
33 | 33 | ||
34 | static int notifier_chain_cond_register(struct notifier_block **nl, | ||
35 | struct notifier_block *n) | ||
36 | { | ||
37 | while ((*nl) != NULL) { | ||
38 | if ((*nl) == n) | ||
39 | return 0; | ||
40 | if (n->priority > (*nl)->priority) | ||
41 | break; | ||
42 | nl = &((*nl)->next); | ||
43 | } | ||
44 | n->next = *nl; | ||
45 | rcu_assign_pointer(*nl, n); | ||
46 | return 0; | ||
47 | } | ||
48 | |||
34 | static int notifier_chain_unregister(struct notifier_block **nl, | 49 | static int notifier_chain_unregister(struct notifier_block **nl, |
35 | struct notifier_block *n) | 50 | struct notifier_block *n) |
36 | { | 51 | { |
@@ -205,6 +220,29 @@ int blocking_notifier_chain_register(struct blocking_notifier_head *nh, | |||
205 | EXPORT_SYMBOL_GPL(blocking_notifier_chain_register); | 220 | EXPORT_SYMBOL_GPL(blocking_notifier_chain_register); |
206 | 221 | ||
207 | /** | 222 | /** |
223 | * blocking_notifier_chain_cond_register - Cond add notifier to a blocking notifier chain | ||
224 | * @nh: Pointer to head of the blocking notifier chain | ||
225 | * @n: New entry in notifier chain | ||
226 | * | ||
227 | * Adds a notifier to a blocking notifier chain, only if not already | ||
228 | * present in the chain. | ||
229 | * Must be called in process context. | ||
230 | * | ||
231 | * Currently always returns zero. | ||
232 | */ | ||
233 | int blocking_notifier_chain_cond_register(struct blocking_notifier_head *nh, | ||
234 | struct notifier_block *n) | ||
235 | { | ||
236 | int ret; | ||
237 | |||
238 | down_write(&nh->rwsem); | ||
239 | ret = notifier_chain_cond_register(&nh->head, n); | ||
240 | up_write(&nh->rwsem); | ||
241 | return ret; | ||
242 | } | ||
243 | EXPORT_SYMBOL_GPL(blocking_notifier_chain_cond_register); | ||
244 | |||
245 | /** | ||
208 | * blocking_notifier_chain_unregister - Remove notifier from a blocking notifier chain | 246 | * blocking_notifier_chain_unregister - Remove notifier from a blocking notifier chain |
209 | * @nh: Pointer to head of the blocking notifier chain | 247 | * @nh: Pointer to head of the blocking notifier chain |
210 | * @n: Entry to remove from notifier chain | 248 | * @n: Entry to remove from notifier chain |
diff --git a/kernel/ns_cgroup.c b/kernel/ns_cgroup.c index aead4d69f62b..48d7ed6fc3a4 100644 --- a/kernel/ns_cgroup.c +++ b/kernel/ns_cgroup.c | |||
@@ -7,6 +7,8 @@ | |||
7 | #include <linux/module.h> | 7 | #include <linux/module.h> |
8 | #include <linux/cgroup.h> | 8 | #include <linux/cgroup.h> |
9 | #include <linux/fs.h> | 9 | #include <linux/fs.h> |
10 | #include <linux/slab.h> | ||
11 | #include <linux/nsproxy.h> | ||
10 | 12 | ||
11 | struct ns_cgroup { | 13 | struct ns_cgroup { |
12 | struct cgroup_subsys_state css; | 14 | struct cgroup_subsys_state css; |
diff --git a/kernel/nsproxy.c b/kernel/nsproxy.c index f5d332cf8c63..adc785146a1c 100644 --- a/kernel/nsproxy.c +++ b/kernel/nsproxy.c | |||
@@ -139,6 +139,18 @@ int copy_namespaces(unsigned long flags, struct task_struct *tsk) | |||
139 | goto out; | 139 | goto out; |
140 | } | 140 | } |
141 | 141 | ||
142 | /* | ||
143 | * CLONE_NEWIPC must detach from the undolist: after switching | ||
144 | * to a new ipc namespace, the semaphore arrays from the old | ||
145 | * namespace are unreachable. In clone parlance, CLONE_SYSVSEM | ||
146 | * means share undolist with parent, so we must forbid using | ||
147 | * it along with CLONE_NEWIPC. | ||
148 | */ | ||
149 | if ((flags & CLONE_NEWIPC) && (flags & CLONE_SYSVSEM)) { | ||
150 | err = -EINVAL; | ||
151 | goto out; | ||
152 | } | ||
153 | |||
142 | new_ns = create_new_namespaces(flags, tsk, tsk->fs); | 154 | new_ns = create_new_namespaces(flags, tsk, tsk->fs); |
143 | if (IS_ERR(new_ns)) { | 155 | if (IS_ERR(new_ns)) { |
144 | err = PTR_ERR(new_ns); | 156 | err = PTR_ERR(new_ns); |
diff --git a/kernel/panic.c b/kernel/panic.c index 24af9f8bac99..425567f45b9f 100644 --- a/kernel/panic.c +++ b/kernel/panic.c | |||
@@ -153,6 +153,8 @@ EXPORT_SYMBOL(panic); | |||
153 | * 'M' - System experienced a machine check exception. | 153 | * 'M' - System experienced a machine check exception. |
154 | * 'B' - System has hit bad_page. | 154 | * 'B' - System has hit bad_page. |
155 | * 'U' - Userspace-defined naughtiness. | 155 | * 'U' - Userspace-defined naughtiness. |
156 | * 'A' - ACPI table overridden. | ||
157 | * 'W' - Taint on warning. | ||
156 | * | 158 | * |
157 | * The string is overwritten by the next call to print_taint(). | 159 | * The string is overwritten by the next call to print_taint(). |
158 | */ | 160 | */ |
@@ -161,7 +163,7 @@ const char *print_tainted(void) | |||
161 | { | 163 | { |
162 | static char buf[20]; | 164 | static char buf[20]; |
163 | if (tainted) { | 165 | if (tainted) { |
164 | snprintf(buf, sizeof(buf), "Tainted: %c%c%c%c%c%c%c%c%c", | 166 | snprintf(buf, sizeof(buf), "Tainted: %c%c%c%c%c%c%c%c%c%c", |
165 | tainted & TAINT_PROPRIETARY_MODULE ? 'P' : 'G', | 167 | tainted & TAINT_PROPRIETARY_MODULE ? 'P' : 'G', |
166 | tainted & TAINT_FORCED_MODULE ? 'F' : ' ', | 168 | tainted & TAINT_FORCED_MODULE ? 'F' : ' ', |
167 | tainted & TAINT_UNSAFE_SMP ? 'S' : ' ', | 169 | tainted & TAINT_UNSAFE_SMP ? 'S' : ' ', |
@@ -170,7 +172,8 @@ const char *print_tainted(void) | |||
170 | tainted & TAINT_BAD_PAGE ? 'B' : ' ', | 172 | tainted & TAINT_BAD_PAGE ? 'B' : ' ', |
171 | tainted & TAINT_USER ? 'U' : ' ', | 173 | tainted & TAINT_USER ? 'U' : ' ', |
172 | tainted & TAINT_DIE ? 'D' : ' ', | 174 | tainted & TAINT_DIE ? 'D' : ' ', |
173 | tainted & TAINT_OVERRIDDEN_ACPI_TABLE ? 'A' : ' '); | 175 | tainted & TAINT_OVERRIDDEN_ACPI_TABLE ? 'A' : ' ', |
176 | tainted & TAINT_WARN ? 'W' : ' '); | ||
174 | } | 177 | } |
175 | else | 178 | else |
176 | snprintf(buf, sizeof(buf), "Not tainted"); | 179 | snprintf(buf, sizeof(buf), "Not tainted"); |
@@ -312,6 +315,7 @@ void warn_on_slowpath(const char *file, int line) | |||
312 | print_modules(); | 315 | print_modules(); |
313 | dump_stack(); | 316 | dump_stack(); |
314 | print_oops_end_marker(); | 317 | print_oops_end_marker(); |
318 | add_taint(TAINT_WARN); | ||
315 | } | 319 | } |
316 | EXPORT_SYMBOL(warn_on_slowpath); | 320 | EXPORT_SYMBOL(warn_on_slowpath); |
317 | #endif | 321 | #endif |
diff --git a/kernel/printk.c b/kernel/printk.c index bdd4ea8c3f2b..d3f9c0f788bf 100644 --- a/kernel/printk.c +++ b/kernel/printk.c | |||
@@ -1287,31 +1287,7 @@ void tty_write_message(struct tty_struct *tty, char *msg) | |||
1287 | */ | 1287 | */ |
1288 | int __printk_ratelimit(int ratelimit_jiffies, int ratelimit_burst) | 1288 | int __printk_ratelimit(int ratelimit_jiffies, int ratelimit_burst) |
1289 | { | 1289 | { |
1290 | static DEFINE_SPINLOCK(ratelimit_lock); | 1290 | return __ratelimit(ratelimit_jiffies, ratelimit_burst); |
1291 | static unsigned toks = 10 * 5 * HZ; | ||
1292 | static unsigned long last_msg; | ||
1293 | static int missed; | ||
1294 | unsigned long flags; | ||
1295 | unsigned long now = jiffies; | ||
1296 | |||
1297 | spin_lock_irqsave(&ratelimit_lock, flags); | ||
1298 | toks += now - last_msg; | ||
1299 | last_msg = now; | ||
1300 | if (toks > (ratelimit_burst * ratelimit_jiffies)) | ||
1301 | toks = ratelimit_burst * ratelimit_jiffies; | ||
1302 | if (toks >= ratelimit_jiffies) { | ||
1303 | int lost = missed; | ||
1304 | |||
1305 | missed = 0; | ||
1306 | toks -= ratelimit_jiffies; | ||
1307 | spin_unlock_irqrestore(&ratelimit_lock, flags); | ||
1308 | if (lost) | ||
1309 | printk(KERN_WARNING "printk: %d messages suppressed.\n", lost); | ||
1310 | return 1; | ||
1311 | } | ||
1312 | missed++; | ||
1313 | spin_unlock_irqrestore(&ratelimit_lock, flags); | ||
1314 | return 0; | ||
1315 | } | 1291 | } |
1316 | EXPORT_SYMBOL(__printk_ratelimit); | 1292 | EXPORT_SYMBOL(__printk_ratelimit); |
1317 | 1293 | ||
diff --git a/kernel/profile.c b/kernel/profile.c index 606d7387265c..ae7ead82cbc9 100644 --- a/kernel/profile.c +++ b/kernel/profile.c | |||
@@ -587,10 +587,10 @@ static int __init create_proc_profile(void) | |||
587 | return 0; | 587 | return 0; |
588 | if (create_hash_tables()) | 588 | if (create_hash_tables()) |
589 | return -1; | 589 | return -1; |
590 | entry = create_proc_entry("profile", S_IWUSR | S_IRUGO, NULL); | 590 | entry = proc_create("profile", S_IWUSR | S_IRUGO, |
591 | NULL, &proc_profile_operations); | ||
591 | if (!entry) | 592 | if (!entry) |
592 | return 0; | 593 | return 0; |
593 | entry->proc_fops = &proc_profile_operations; | ||
594 | entry->size = (1+prof_len) * sizeof(atomic_t); | 594 | entry->size = (1+prof_len) * sizeof(atomic_t); |
595 | hotcpu_notifier(profile_cpu_callback, 0); | 595 | hotcpu_notifier(profile_cpu_callback, 0); |
596 | return 0; | 596 | return 0; |
diff --git a/kernel/rcutorture.c b/kernel/rcutorture.c index 47894f919d4e..33acc424667e 100644 --- a/kernel/rcutorture.c +++ b/kernel/rcutorture.c | |||
@@ -45,6 +45,7 @@ | |||
45 | #include <linux/byteorder/swabb.h> | 45 | #include <linux/byteorder/swabb.h> |
46 | #include <linux/stat.h> | 46 | #include <linux/stat.h> |
47 | #include <linux/srcu.h> | 47 | #include <linux/srcu.h> |
48 | #include <linux/slab.h> | ||
48 | 49 | ||
49 | MODULE_LICENSE("GPL"); | 50 | MODULE_LICENSE("GPL"); |
50 | MODULE_AUTHOR("Paul E. McKenney <paulmck@us.ibm.com> and " | 51 | MODULE_AUTHOR("Paul E. McKenney <paulmck@us.ibm.com> and " |
diff --git a/kernel/relay.c b/kernel/relay.c index dc873fba90d2..7de644cdec43 100644 --- a/kernel/relay.c +++ b/kernel/relay.c | |||
@@ -65,6 +65,35 @@ static struct vm_operations_struct relay_file_mmap_ops = { | |||
65 | .close = relay_file_mmap_close, | 65 | .close = relay_file_mmap_close, |
66 | }; | 66 | }; |
67 | 67 | ||
68 | /* | ||
69 | * allocate an array of pointers of struct page | ||
70 | */ | ||
71 | static struct page **relay_alloc_page_array(unsigned int n_pages) | ||
72 | { | ||
73 | struct page **array; | ||
74 | size_t pa_size = n_pages * sizeof(struct page *); | ||
75 | |||
76 | if (pa_size > PAGE_SIZE) { | ||
77 | array = vmalloc(pa_size); | ||
78 | if (array) | ||
79 | memset(array, 0, pa_size); | ||
80 | } else { | ||
81 | array = kzalloc(pa_size, GFP_KERNEL); | ||
82 | } | ||
83 | return array; | ||
84 | } | ||
85 | |||
86 | /* | ||
87 | * free an array of pointers of struct page | ||
88 | */ | ||
89 | static void relay_free_page_array(struct page **array) | ||
90 | { | ||
91 | if (is_vmalloc_addr(array)) | ||
92 | vfree(array); | ||
93 | else | ||
94 | kfree(array); | ||
95 | } | ||
96 | |||
68 | /** | 97 | /** |
69 | * relay_mmap_buf: - mmap channel buffer to process address space | 98 | * relay_mmap_buf: - mmap channel buffer to process address space |
70 | * @buf: relay channel buffer | 99 | * @buf: relay channel buffer |
@@ -109,7 +138,7 @@ static void *relay_alloc_buf(struct rchan_buf *buf, size_t *size) | |||
109 | *size = PAGE_ALIGN(*size); | 138 | *size = PAGE_ALIGN(*size); |
110 | n_pages = *size >> PAGE_SHIFT; | 139 | n_pages = *size >> PAGE_SHIFT; |
111 | 140 | ||
112 | buf->page_array = kcalloc(n_pages, sizeof(struct page *), GFP_KERNEL); | 141 | buf->page_array = relay_alloc_page_array(n_pages); |
113 | if (!buf->page_array) | 142 | if (!buf->page_array) |
114 | return NULL; | 143 | return NULL; |
115 | 144 | ||
@@ -130,7 +159,7 @@ static void *relay_alloc_buf(struct rchan_buf *buf, size_t *size) | |||
130 | depopulate: | 159 | depopulate: |
131 | for (j = 0; j < i; j++) | 160 | for (j = 0; j < i; j++) |
132 | __free_page(buf->page_array[j]); | 161 | __free_page(buf->page_array[j]); |
133 | kfree(buf->page_array); | 162 | relay_free_page_array(buf->page_array); |
134 | return NULL; | 163 | return NULL; |
135 | } | 164 | } |
136 | 165 | ||
@@ -189,7 +218,7 @@ static void relay_destroy_buf(struct rchan_buf *buf) | |||
189 | vunmap(buf->start); | 218 | vunmap(buf->start); |
190 | for (i = 0; i < buf->page_count; i++) | 219 | for (i = 0; i < buf->page_count; i++) |
191 | __free_page(buf->page_array[i]); | 220 | __free_page(buf->page_array[i]); |
192 | kfree(buf->page_array); | 221 | relay_free_page_array(buf->page_array); |
193 | } | 222 | } |
194 | chan->buf[buf->cpu] = NULL; | 223 | chan->buf[buf->cpu] = NULL; |
195 | kfree(buf->padding); | 224 | kfree(buf->padding); |
diff --git a/kernel/res_counter.c b/kernel/res_counter.c index efbfc0fc232f..d3c61b4ebef2 100644 --- a/kernel/res_counter.c +++ b/kernel/res_counter.c | |||
@@ -10,6 +10,7 @@ | |||
10 | #include <linux/types.h> | 10 | #include <linux/types.h> |
11 | #include <linux/parser.h> | 11 | #include <linux/parser.h> |
12 | #include <linux/fs.h> | 12 | #include <linux/fs.h> |
13 | #include <linux/slab.h> | ||
13 | #include <linux/res_counter.h> | 14 | #include <linux/res_counter.h> |
14 | #include <linux/uaccess.h> | 15 | #include <linux/uaccess.h> |
15 | 16 | ||
@@ -27,6 +28,8 @@ int res_counter_charge_locked(struct res_counter *counter, unsigned long val) | |||
27 | } | 28 | } |
28 | 29 | ||
29 | counter->usage += val; | 30 | counter->usage += val; |
31 | if (counter->usage > counter->max_usage) | ||
32 | counter->max_usage = counter->usage; | ||
30 | return 0; | 33 | return 0; |
31 | } | 34 | } |
32 | 35 | ||
@@ -65,6 +68,8 @@ res_counter_member(struct res_counter *counter, int member) | |||
65 | switch (member) { | 68 | switch (member) { |
66 | case RES_USAGE: | 69 | case RES_USAGE: |
67 | return &counter->usage; | 70 | return &counter->usage; |
71 | case RES_MAX_USAGE: | ||
72 | return &counter->max_usage; | ||
68 | case RES_LIMIT: | 73 | case RES_LIMIT: |
69 | return &counter->limit; | 74 | return &counter->limit; |
70 | case RES_FAILCNT: | 75 | case RES_FAILCNT: |
@@ -92,6 +97,11 @@ ssize_t res_counter_read(struct res_counter *counter, int member, | |||
92 | pos, buf, s - buf); | 97 | pos, buf, s - buf); |
93 | } | 98 | } |
94 | 99 | ||
100 | u64 res_counter_read_u64(struct res_counter *counter, int member) | ||
101 | { | ||
102 | return *res_counter_member(counter, member); | ||
103 | } | ||
104 | |||
95 | ssize_t res_counter_write(struct res_counter *counter, int member, | 105 | ssize_t res_counter_write(struct res_counter *counter, int member, |
96 | const char __user *userbuf, size_t nbytes, loff_t *pos, | 106 | const char __user *userbuf, size_t nbytes, loff_t *pos, |
97 | int (*write_strategy)(char *st_buf, unsigned long long *val)) | 107 | int (*write_strategy)(char *st_buf, unsigned long long *val)) |
diff --git a/kernel/resource.c b/kernel/resource.c index cee12cc47cab..74af2d7cb5a1 100644 --- a/kernel/resource.c +++ b/kernel/resource.c | |||
@@ -131,14 +131,8 @@ static const struct file_operations proc_iomem_operations = { | |||
131 | 131 | ||
132 | static int __init ioresources_init(void) | 132 | static int __init ioresources_init(void) |
133 | { | 133 | { |
134 | struct proc_dir_entry *entry; | 134 | proc_create("ioports", 0, NULL, &proc_ioports_operations); |
135 | 135 | proc_create("iomem", 0, NULL, &proc_iomem_operations); | |
136 | entry = create_proc_entry("ioports", 0, NULL); | ||
137 | if (entry) | ||
138 | entry->proc_fops = &proc_ioports_operations; | ||
139 | entry = create_proc_entry("iomem", 0, NULL); | ||
140 | if (entry) | ||
141 | entry->proc_fops = &proc_iomem_operations; | ||
142 | return 0; | 136 | return 0; |
143 | } | 137 | } |
144 | __initcall(ioresources_init); | 138 | __initcall(ioresources_init); |
diff --git a/kernel/sched.c b/kernel/sched.c index 740fb409e5bb..e2f7f5acc807 100644 --- a/kernel/sched.c +++ b/kernel/sched.c | |||
@@ -9057,13 +9057,13 @@ cpu_cgroup_attach(struct cgroup_subsys *ss, struct cgroup *cgrp, | |||
9057 | } | 9057 | } |
9058 | 9058 | ||
9059 | #ifdef CONFIG_FAIR_GROUP_SCHED | 9059 | #ifdef CONFIG_FAIR_GROUP_SCHED |
9060 | static int cpu_shares_write_uint(struct cgroup *cgrp, struct cftype *cftype, | 9060 | static int cpu_shares_write_u64(struct cgroup *cgrp, struct cftype *cftype, |
9061 | u64 shareval) | 9061 | u64 shareval) |
9062 | { | 9062 | { |
9063 | return sched_group_set_shares(cgroup_tg(cgrp), shareval); | 9063 | return sched_group_set_shares(cgroup_tg(cgrp), shareval); |
9064 | } | 9064 | } |
9065 | 9065 | ||
9066 | static u64 cpu_shares_read_uint(struct cgroup *cgrp, struct cftype *cft) | 9066 | static u64 cpu_shares_read_u64(struct cgroup *cgrp, struct cftype *cft) |
9067 | { | 9067 | { |
9068 | struct task_group *tg = cgroup_tg(cgrp); | 9068 | struct task_group *tg = cgroup_tg(cgrp); |
9069 | 9069 | ||
@@ -9073,48 +9073,14 @@ static u64 cpu_shares_read_uint(struct cgroup *cgrp, struct cftype *cft) | |||
9073 | 9073 | ||
9074 | #ifdef CONFIG_RT_GROUP_SCHED | 9074 | #ifdef CONFIG_RT_GROUP_SCHED |
9075 | static ssize_t cpu_rt_runtime_write(struct cgroup *cgrp, struct cftype *cft, | 9075 | static ssize_t cpu_rt_runtime_write(struct cgroup *cgrp, struct cftype *cft, |
9076 | struct file *file, | 9076 | s64 val) |
9077 | const char __user *userbuf, | ||
9078 | size_t nbytes, loff_t *unused_ppos) | ||
9079 | { | 9077 | { |
9080 | char buffer[64]; | 9078 | return sched_group_set_rt_runtime(cgroup_tg(cgrp), val); |
9081 | int retval = 0; | ||
9082 | s64 val; | ||
9083 | char *end; | ||
9084 | |||
9085 | if (!nbytes) | ||
9086 | return -EINVAL; | ||
9087 | if (nbytes >= sizeof(buffer)) | ||
9088 | return -E2BIG; | ||
9089 | if (copy_from_user(buffer, userbuf, nbytes)) | ||
9090 | return -EFAULT; | ||
9091 | |||
9092 | buffer[nbytes] = 0; /* nul-terminate */ | ||
9093 | |||
9094 | /* strip newline if necessary */ | ||
9095 | if (nbytes && (buffer[nbytes-1] == '\n')) | ||
9096 | buffer[nbytes-1] = 0; | ||
9097 | val = simple_strtoll(buffer, &end, 0); | ||
9098 | if (*end) | ||
9099 | return -EINVAL; | ||
9100 | |||
9101 | /* Pass to subsystem */ | ||
9102 | retval = sched_group_set_rt_runtime(cgroup_tg(cgrp), val); | ||
9103 | if (!retval) | ||
9104 | retval = nbytes; | ||
9105 | return retval; | ||
9106 | } | 9079 | } |
9107 | 9080 | ||
9108 | static ssize_t cpu_rt_runtime_read(struct cgroup *cgrp, struct cftype *cft, | 9081 | static s64 cpu_rt_runtime_read(struct cgroup *cgrp, struct cftype *cft) |
9109 | struct file *file, | ||
9110 | char __user *buf, size_t nbytes, | ||
9111 | loff_t *ppos) | ||
9112 | { | 9082 | { |
9113 | char tmp[64]; | 9083 | return sched_group_rt_runtime(cgroup_tg(cgrp)); |
9114 | long val = sched_group_rt_runtime(cgroup_tg(cgrp)); | ||
9115 | int len = sprintf(tmp, "%ld\n", val); | ||
9116 | |||
9117 | return simple_read_from_buffer(buf, nbytes, ppos, tmp, len); | ||
9118 | } | 9084 | } |
9119 | 9085 | ||
9120 | static int cpu_rt_period_write_uint(struct cgroup *cgrp, struct cftype *cftype, | 9086 | static int cpu_rt_period_write_uint(struct cgroup *cgrp, struct cftype *cftype, |
@@ -9133,20 +9099,20 @@ static struct cftype cpu_files[] = { | |||
9133 | #ifdef CONFIG_FAIR_GROUP_SCHED | 9099 | #ifdef CONFIG_FAIR_GROUP_SCHED |
9134 | { | 9100 | { |
9135 | .name = "shares", | 9101 | .name = "shares", |
9136 | .read_uint = cpu_shares_read_uint, | 9102 | .read_u64 = cpu_shares_read_u64, |
9137 | .write_uint = cpu_shares_write_uint, | 9103 | .write_u64 = cpu_shares_write_u64, |
9138 | }, | 9104 | }, |
9139 | #endif | 9105 | #endif |
9140 | #ifdef CONFIG_RT_GROUP_SCHED | 9106 | #ifdef CONFIG_RT_GROUP_SCHED |
9141 | { | 9107 | { |
9142 | .name = "rt_runtime_us", | 9108 | .name = "rt_runtime_us", |
9143 | .read = cpu_rt_runtime_read, | 9109 | .read_s64 = cpu_rt_runtime_read, |
9144 | .write = cpu_rt_runtime_write, | 9110 | .write_s64 = cpu_rt_runtime_write, |
9145 | }, | 9111 | }, |
9146 | { | 9112 | { |
9147 | .name = "rt_period_us", | 9113 | .name = "rt_period_us", |
9148 | .read_uint = cpu_rt_period_read_uint, | 9114 | .read_u64 = cpu_rt_period_read_uint, |
9149 | .write_uint = cpu_rt_period_write_uint, | 9115 | .write_u64 = cpu_rt_period_write_uint, |
9150 | }, | 9116 | }, |
9151 | #endif | 9117 | #endif |
9152 | }; | 9118 | }; |
@@ -9277,8 +9243,8 @@ out: | |||
9277 | static struct cftype files[] = { | 9243 | static struct cftype files[] = { |
9278 | { | 9244 | { |
9279 | .name = "usage", | 9245 | .name = "usage", |
9280 | .read_uint = cpuusage_read, | 9246 | .read_u64 = cpuusage_read, |
9281 | .write_uint = cpuusage_write, | 9247 | .write_u64 = cpuusage_write, |
9282 | }, | 9248 | }, |
9283 | }; | 9249 | }; |
9284 | 9250 | ||
diff --git a/kernel/sched_debug.c b/kernel/sched_debug.c index f3f4af4b8b0f..8a9498e7c831 100644 --- a/kernel/sched_debug.c +++ b/kernel/sched_debug.c | |||
@@ -277,12 +277,9 @@ static int __init init_sched_debug_procfs(void) | |||
277 | { | 277 | { |
278 | struct proc_dir_entry *pe; | 278 | struct proc_dir_entry *pe; |
279 | 279 | ||
280 | pe = create_proc_entry("sched_debug", 0644, NULL); | 280 | pe = proc_create("sched_debug", 0644, NULL, &sched_debug_fops); |
281 | if (!pe) | 281 | if (!pe) |
282 | return -ENOMEM; | 282 | return -ENOMEM; |
283 | |||
284 | pe->proc_fops = &sched_debug_fops; | ||
285 | |||
286 | return 0; | 283 | return 0; |
287 | } | 284 | } |
288 | 285 | ||
diff --git a/kernel/sys.c b/kernel/sys.c index f2a451366953..e423d0d9e6ff 100644 --- a/kernel/sys.c +++ b/kernel/sys.c | |||
@@ -1545,6 +1545,19 @@ out: | |||
1545 | * | 1545 | * |
1546 | */ | 1546 | */ |
1547 | 1547 | ||
1548 | static void accumulate_thread_rusage(struct task_struct *t, struct rusage *r, | ||
1549 | cputime_t *utimep, cputime_t *stimep) | ||
1550 | { | ||
1551 | *utimep = cputime_add(*utimep, t->utime); | ||
1552 | *stimep = cputime_add(*stimep, t->stime); | ||
1553 | r->ru_nvcsw += t->nvcsw; | ||
1554 | r->ru_nivcsw += t->nivcsw; | ||
1555 | r->ru_minflt += t->min_flt; | ||
1556 | r->ru_majflt += t->maj_flt; | ||
1557 | r->ru_inblock += task_io_get_inblock(t); | ||
1558 | r->ru_oublock += task_io_get_oublock(t); | ||
1559 | } | ||
1560 | |||
1548 | static void k_getrusage(struct task_struct *p, int who, struct rusage *r) | 1561 | static void k_getrusage(struct task_struct *p, int who, struct rusage *r) |
1549 | { | 1562 | { |
1550 | struct task_struct *t; | 1563 | struct task_struct *t; |
@@ -1554,6 +1567,11 @@ static void k_getrusage(struct task_struct *p, int who, struct rusage *r) | |||
1554 | memset((char *) r, 0, sizeof *r); | 1567 | memset((char *) r, 0, sizeof *r); |
1555 | utime = stime = cputime_zero; | 1568 | utime = stime = cputime_zero; |
1556 | 1569 | ||
1570 | if (who == RUSAGE_THREAD) { | ||
1571 | accumulate_thread_rusage(p, r, &utime, &stime); | ||
1572 | goto out; | ||
1573 | } | ||
1574 | |||
1557 | rcu_read_lock(); | 1575 | rcu_read_lock(); |
1558 | if (!lock_task_sighand(p, &flags)) { | 1576 | if (!lock_task_sighand(p, &flags)) { |
1559 | rcu_read_unlock(); | 1577 | rcu_read_unlock(); |
@@ -1586,14 +1604,7 @@ static void k_getrusage(struct task_struct *p, int who, struct rusage *r) | |||
1586 | r->ru_oublock += p->signal->oublock; | 1604 | r->ru_oublock += p->signal->oublock; |
1587 | t = p; | 1605 | t = p; |
1588 | do { | 1606 | do { |
1589 | utime = cputime_add(utime, t->utime); | 1607 | accumulate_thread_rusage(t, r, &utime, &stime); |
1590 | stime = cputime_add(stime, t->stime); | ||
1591 | r->ru_nvcsw += t->nvcsw; | ||
1592 | r->ru_nivcsw += t->nivcsw; | ||
1593 | r->ru_minflt += t->min_flt; | ||
1594 | r->ru_majflt += t->maj_flt; | ||
1595 | r->ru_inblock += task_io_get_inblock(t); | ||
1596 | r->ru_oublock += task_io_get_oublock(t); | ||
1597 | t = next_thread(t); | 1608 | t = next_thread(t); |
1598 | } while (t != p); | 1609 | } while (t != p); |
1599 | break; | 1610 | break; |
@@ -1605,6 +1616,7 @@ static void k_getrusage(struct task_struct *p, int who, struct rusage *r) | |||
1605 | unlock_task_sighand(p, &flags); | 1616 | unlock_task_sighand(p, &flags); |
1606 | rcu_read_unlock(); | 1617 | rcu_read_unlock(); |
1607 | 1618 | ||
1619 | out: | ||
1608 | cputime_to_timeval(utime, &r->ru_utime); | 1620 | cputime_to_timeval(utime, &r->ru_utime); |
1609 | cputime_to_timeval(stime, &r->ru_stime); | 1621 | cputime_to_timeval(stime, &r->ru_stime); |
1610 | } | 1622 | } |
@@ -1618,7 +1630,8 @@ int getrusage(struct task_struct *p, int who, struct rusage __user *ru) | |||
1618 | 1630 | ||
1619 | asmlinkage long sys_getrusage(int who, struct rusage __user *ru) | 1631 | asmlinkage long sys_getrusage(int who, struct rusage __user *ru) |
1620 | { | 1632 | { |
1621 | if (who != RUSAGE_SELF && who != RUSAGE_CHILDREN) | 1633 | if (who != RUSAGE_SELF && who != RUSAGE_CHILDREN && |
1634 | who != RUSAGE_THREAD) | ||
1622 | return -EINVAL; | 1635 | return -EINVAL; |
1623 | return getrusage(current, who, ru); | 1636 | return getrusage(current, who, ru); |
1624 | } | 1637 | } |
diff --git a/kernel/sysctl.c b/kernel/sysctl.c index fd3364827ccf..d7ffdc59816a 100644 --- a/kernel/sysctl.c +++ b/kernel/sysctl.c | |||
@@ -38,6 +38,7 @@ | |||
38 | #include <linux/writeback.h> | 38 | #include <linux/writeback.h> |
39 | #include <linux/hugetlb.h> | 39 | #include <linux/hugetlb.h> |
40 | #include <linux/initrd.h> | 40 | #include <linux/initrd.h> |
41 | #include <linux/key.h> | ||
41 | #include <linux/times.h> | 42 | #include <linux/times.h> |
42 | #include <linux/limits.h> | 43 | #include <linux/limits.h> |
43 | #include <linux/dcache.h> | 44 | #include <linux/dcache.h> |
@@ -144,12 +145,6 @@ extern int no_unaligned_warning; | |||
144 | extern int max_lock_depth; | 145 | extern int max_lock_depth; |
145 | #endif | 146 | #endif |
146 | 147 | ||
147 | #ifdef CONFIG_SYSCTL_SYSCALL | ||
148 | static int parse_table(int __user *, int, void __user *, size_t __user *, | ||
149 | void __user *, size_t, struct ctl_table *); | ||
150 | #endif | ||
151 | |||
152 | |||
153 | #ifdef CONFIG_PROC_SYSCTL | 148 | #ifdef CONFIG_PROC_SYSCTL |
154 | static int proc_do_cad_pid(struct ctl_table *table, int write, struct file *filp, | 149 | static int proc_do_cad_pid(struct ctl_table *table, int write, struct file *filp, |
155 | void __user *buffer, size_t *lenp, loff_t *ppos); | 150 | void __user *buffer, size_t *lenp, loff_t *ppos); |
@@ -809,6 +804,14 @@ static struct ctl_table kern_table[] = { | |||
809 | .proc_handler = &proc_dostring, | 804 | .proc_handler = &proc_dostring, |
810 | .strategy = &sysctl_string, | 805 | .strategy = &sysctl_string, |
811 | }, | 806 | }, |
807 | #ifdef CONFIG_KEYS | ||
808 | { | ||
809 | .ctl_name = CTL_UNNUMBERED, | ||
810 | .procname = "keys", | ||
811 | .mode = 0555, | ||
812 | .child = key_sysctls, | ||
813 | }, | ||
814 | #endif | ||
812 | /* | 815 | /* |
813 | * NOTE: do not add new entries to this table unless you have read | 816 | * NOTE: do not add new entries to this table unless you have read |
814 | * Documentation/sysctl/ctl_unnumbered.txt | 817 | * Documentation/sysctl/ctl_unnumbered.txt |
@@ -1430,6 +1433,76 @@ void register_sysctl_root(struct ctl_table_root *root) | |||
1430 | } | 1433 | } |
1431 | 1434 | ||
1432 | #ifdef CONFIG_SYSCTL_SYSCALL | 1435 | #ifdef CONFIG_SYSCTL_SYSCALL |
1436 | /* Perform the actual read/write of a sysctl table entry. */ | ||
1437 | static int do_sysctl_strategy(struct ctl_table_root *root, | ||
1438 | struct ctl_table *table, | ||
1439 | int __user *name, int nlen, | ||
1440 | void __user *oldval, size_t __user *oldlenp, | ||
1441 | void __user *newval, size_t newlen) | ||
1442 | { | ||
1443 | int op = 0, rc; | ||
1444 | |||
1445 | if (oldval) | ||
1446 | op |= 004; | ||
1447 | if (newval) | ||
1448 | op |= 002; | ||
1449 | if (sysctl_perm(root, table, op)) | ||
1450 | return -EPERM; | ||
1451 | |||
1452 | if (table->strategy) { | ||
1453 | rc = table->strategy(table, name, nlen, oldval, oldlenp, | ||
1454 | newval, newlen); | ||
1455 | if (rc < 0) | ||
1456 | return rc; | ||
1457 | if (rc > 0) | ||
1458 | return 0; | ||
1459 | } | ||
1460 | |||
1461 | /* If there is no strategy routine, or if the strategy returns | ||
1462 | * zero, proceed with automatic r/w */ | ||
1463 | if (table->data && table->maxlen) { | ||
1464 | rc = sysctl_data(table, name, nlen, oldval, oldlenp, | ||
1465 | newval, newlen); | ||
1466 | if (rc < 0) | ||
1467 | return rc; | ||
1468 | } | ||
1469 | return 0; | ||
1470 | } | ||
1471 | |||
1472 | static int parse_table(int __user *name, int nlen, | ||
1473 | void __user *oldval, size_t __user *oldlenp, | ||
1474 | void __user *newval, size_t newlen, | ||
1475 | struct ctl_table_root *root, | ||
1476 | struct ctl_table *table) | ||
1477 | { | ||
1478 | int n; | ||
1479 | repeat: | ||
1480 | if (!nlen) | ||
1481 | return -ENOTDIR; | ||
1482 | if (get_user(n, name)) | ||
1483 | return -EFAULT; | ||
1484 | for ( ; table->ctl_name || table->procname; table++) { | ||
1485 | if (!table->ctl_name) | ||
1486 | continue; | ||
1487 | if (n == table->ctl_name) { | ||
1488 | int error; | ||
1489 | if (table->child) { | ||
1490 | if (sysctl_perm(root, table, 001)) | ||
1491 | return -EPERM; | ||
1492 | name++; | ||
1493 | nlen--; | ||
1494 | table = table->child; | ||
1495 | goto repeat; | ||
1496 | } | ||
1497 | error = do_sysctl_strategy(root, table, name, nlen, | ||
1498 | oldval, oldlenp, | ||
1499 | newval, newlen); | ||
1500 | return error; | ||
1501 | } | ||
1502 | } | ||
1503 | return -ENOTDIR; | ||
1504 | } | ||
1505 | |||
1433 | int do_sysctl(int __user *name, int nlen, void __user *oldval, size_t __user *oldlenp, | 1506 | int do_sysctl(int __user *name, int nlen, void __user *oldval, size_t __user *oldlenp, |
1434 | void __user *newval, size_t newlen) | 1507 | void __user *newval, size_t newlen) |
1435 | { | 1508 | { |
@@ -1447,7 +1520,8 @@ int do_sysctl(int __user *name, int nlen, void __user *oldval, size_t __user *ol | |||
1447 | for (head = sysctl_head_next(NULL); head; | 1520 | for (head = sysctl_head_next(NULL); head; |
1448 | head = sysctl_head_next(head)) { | 1521 | head = sysctl_head_next(head)) { |
1449 | error = parse_table(name, nlen, oldval, oldlenp, | 1522 | error = parse_table(name, nlen, oldval, oldlenp, |
1450 | newval, newlen, head->ctl_table); | 1523 | newval, newlen, |
1524 | head->root, head->ctl_table); | ||
1451 | if (error != -ENOTDIR) { | 1525 | if (error != -ENOTDIR) { |
1452 | sysctl_head_finish(head); | 1526 | sysctl_head_finish(head); |
1453 | break; | 1527 | break; |
@@ -1493,84 +1567,22 @@ static int test_perm(int mode, int op) | |||
1493 | return -EACCES; | 1567 | return -EACCES; |
1494 | } | 1568 | } |
1495 | 1569 | ||
1496 | int sysctl_perm(struct ctl_table *table, int op) | 1570 | int sysctl_perm(struct ctl_table_root *root, struct ctl_table *table, int op) |
1497 | { | 1571 | { |
1498 | int error; | 1572 | int error; |
1573 | int mode; | ||
1574 | |||
1499 | error = security_sysctl(table, op); | 1575 | error = security_sysctl(table, op); |
1500 | if (error) | 1576 | if (error) |
1501 | return error; | 1577 | return error; |
1502 | return test_perm(table->mode, op); | ||
1503 | } | ||
1504 | |||
1505 | #ifdef CONFIG_SYSCTL_SYSCALL | ||
1506 | static int parse_table(int __user *name, int nlen, | ||
1507 | void __user *oldval, size_t __user *oldlenp, | ||
1508 | void __user *newval, size_t newlen, | ||
1509 | struct ctl_table *table) | ||
1510 | { | ||
1511 | int n; | ||
1512 | repeat: | ||
1513 | if (!nlen) | ||
1514 | return -ENOTDIR; | ||
1515 | if (get_user(n, name)) | ||
1516 | return -EFAULT; | ||
1517 | for ( ; table->ctl_name || table->procname; table++) { | ||
1518 | if (!table->ctl_name) | ||
1519 | continue; | ||
1520 | if (n == table->ctl_name) { | ||
1521 | int error; | ||
1522 | if (table->child) { | ||
1523 | if (sysctl_perm(table, 001)) | ||
1524 | return -EPERM; | ||
1525 | name++; | ||
1526 | nlen--; | ||
1527 | table = table->child; | ||
1528 | goto repeat; | ||
1529 | } | ||
1530 | error = do_sysctl_strategy(table, name, nlen, | ||
1531 | oldval, oldlenp, | ||
1532 | newval, newlen); | ||
1533 | return error; | ||
1534 | } | ||
1535 | } | ||
1536 | return -ENOTDIR; | ||
1537 | } | ||
1538 | 1578 | ||
1539 | /* Perform the actual read/write of a sysctl table entry. */ | 1579 | if (root->permissions) |
1540 | int do_sysctl_strategy (struct ctl_table *table, | 1580 | mode = root->permissions(root, current->nsproxy, table); |
1541 | int __user *name, int nlen, | 1581 | else |
1542 | void __user *oldval, size_t __user *oldlenp, | 1582 | mode = table->mode; |
1543 | void __user *newval, size_t newlen) | ||
1544 | { | ||
1545 | int op = 0, rc; | ||
1546 | |||
1547 | if (oldval) | ||
1548 | op |= 004; | ||
1549 | if (newval) | ||
1550 | op |= 002; | ||
1551 | if (sysctl_perm(table, op)) | ||
1552 | return -EPERM; | ||
1553 | 1583 | ||
1554 | if (table->strategy) { | 1584 | return test_perm(mode, op); |
1555 | rc = table->strategy(table, name, nlen, oldval, oldlenp, | ||
1556 | newval, newlen); | ||
1557 | if (rc < 0) | ||
1558 | return rc; | ||
1559 | if (rc > 0) | ||
1560 | return 0; | ||
1561 | } | ||
1562 | |||
1563 | /* If there is no strategy routine, or if the strategy returns | ||
1564 | * zero, proceed with automatic r/w */ | ||
1565 | if (table->data && table->maxlen) { | ||
1566 | rc = sysctl_data(table, name, nlen, oldval, oldlenp, | ||
1567 | newval, newlen); | ||
1568 | if (rc < 0) | ||
1569 | return rc; | ||
1570 | } | ||
1571 | return 0; | ||
1572 | } | 1585 | } |
1573 | #endif /* CONFIG_SYSCTL_SYSCALL */ | ||
1574 | 1586 | ||
1575 | static void sysctl_set_parent(struct ctl_table *parent, struct ctl_table *table) | 1587 | static void sysctl_set_parent(struct ctl_table *parent, struct ctl_table *table) |
1576 | { | 1588 | { |
@@ -1583,9 +1595,13 @@ static void sysctl_set_parent(struct ctl_table *parent, struct ctl_table *table) | |||
1583 | 1595 | ||
1584 | static __init int sysctl_init(void) | 1596 | static __init int sysctl_init(void) |
1585 | { | 1597 | { |
1586 | int err; | ||
1587 | sysctl_set_parent(NULL, root_table); | 1598 | sysctl_set_parent(NULL, root_table); |
1588 | err = sysctl_check_table(current->nsproxy, root_table); | 1599 | #ifdef CONFIG_SYSCTL_SYSCALL_CHECK |
1600 | { | ||
1601 | int err; | ||
1602 | err = sysctl_check_table(current->nsproxy, root_table); | ||
1603 | } | ||
1604 | #endif | ||
1589 | return 0; | 1605 | return 0; |
1590 | } | 1606 | } |
1591 | 1607 | ||
@@ -1712,10 +1728,12 @@ struct ctl_table_header *__register_sysctl_paths( | |||
1712 | header->unregistering = NULL; | 1728 | header->unregistering = NULL; |
1713 | header->root = root; | 1729 | header->root = root; |
1714 | sysctl_set_parent(NULL, header->ctl_table); | 1730 | sysctl_set_parent(NULL, header->ctl_table); |
1731 | #ifdef CONFIG_SYSCTL_SYSCALL_CHECK | ||
1715 | if (sysctl_check_table(namespaces, header->ctl_table)) { | 1732 | if (sysctl_check_table(namespaces, header->ctl_table)) { |
1716 | kfree(header); | 1733 | kfree(header); |
1717 | return NULL; | 1734 | return NULL; |
1718 | } | 1735 | } |
1736 | #endif | ||
1719 | spin_lock(&sysctl_lock); | 1737 | spin_lock(&sysctl_lock); |
1720 | header_list = lookup_header_list(root, namespaces); | 1738 | header_list = lookup_header_list(root, namespaces); |
1721 | list_add_tail(&header->ctl_entry, header_list); | 1739 | list_add_tail(&header->ctl_entry, header_list); |
diff --git a/kernel/time.c b/kernel/time.c index 35d373a98782..86729042e4cd 100644 --- a/kernel/time.c +++ b/kernel/time.c | |||
@@ -35,6 +35,7 @@ | |||
35 | #include <linux/syscalls.h> | 35 | #include <linux/syscalls.h> |
36 | #include <linux/security.h> | 36 | #include <linux/security.h> |
37 | #include <linux/fs.h> | 37 | #include <linux/fs.h> |
38 | #include <linux/slab.h> | ||
38 | 39 | ||
39 | #include <asm/uaccess.h> | 40 | #include <asm/uaccess.h> |
40 | #include <asm/unistd.h> | 41 | #include <asm/unistd.h> |
diff --git a/kernel/time/timer_list.c b/kernel/time/timer_list.c index 67fe8fc21fb1..a40e20fd0001 100644 --- a/kernel/time/timer_list.c +++ b/kernel/time/timer_list.c | |||
@@ -278,12 +278,9 @@ static int __init init_timer_list_procfs(void) | |||
278 | { | 278 | { |
279 | struct proc_dir_entry *pe; | 279 | struct proc_dir_entry *pe; |
280 | 280 | ||
281 | pe = create_proc_entry("timer_list", 0644, NULL); | 281 | pe = proc_create("timer_list", 0644, NULL, &timer_list_fops); |
282 | if (!pe) | 282 | if (!pe) |
283 | return -ENOMEM; | 283 | return -ENOMEM; |
284 | |||
285 | pe->proc_fops = &timer_list_fops; | ||
286 | |||
287 | return 0; | 284 | return 0; |
288 | } | 285 | } |
289 | __initcall(init_timer_list_procfs); | 286 | __initcall(init_timer_list_procfs); |
diff --git a/kernel/time/timer_stats.c b/kernel/time/timer_stats.c index 417da8c5bc72..c994530d166d 100644 --- a/kernel/time/timer_stats.c +++ b/kernel/time/timer_stats.c | |||
@@ -415,12 +415,9 @@ static int __init init_tstats_procfs(void) | |||
415 | { | 415 | { |
416 | struct proc_dir_entry *pe; | 416 | struct proc_dir_entry *pe; |
417 | 417 | ||
418 | pe = create_proc_entry("timer_stats", 0644, NULL); | 418 | pe = proc_create("timer_stats", 0644, NULL, &tstats_fops); |
419 | if (!pe) | 419 | if (!pe) |
420 | return -ENOMEM; | 420 | return -ENOMEM; |
421 | |||
422 | pe->proc_fops = &tstats_fops; | ||
423 | |||
424 | return 0; | 421 | return 0; |
425 | } | 422 | } |
426 | __initcall(init_tstats_procfs); | 423 | __initcall(init_tstats_procfs); |
diff --git a/kernel/user.c b/kernel/user.c index debce602bfdd..aefbbfa3159f 100644 --- a/kernel/user.c +++ b/kernel/user.c | |||
@@ -53,10 +53,6 @@ struct user_struct root_user = { | |||
53 | .files = ATOMIC_INIT(0), | 53 | .files = ATOMIC_INIT(0), |
54 | .sigpending = ATOMIC_INIT(0), | 54 | .sigpending = ATOMIC_INIT(0), |
55 | .locked_shm = 0, | 55 | .locked_shm = 0, |
56 | #ifdef CONFIG_KEYS | ||
57 | .uid_keyring = &root_user_keyring, | ||
58 | .session_keyring = &root_session_keyring, | ||
59 | #endif | ||
60 | #ifdef CONFIG_USER_SCHED | 56 | #ifdef CONFIG_USER_SCHED |
61 | .tg = &init_task_group, | 57 | .tg = &init_task_group, |
62 | #endif | 58 | #endif |
@@ -420,12 +416,12 @@ struct user_struct * alloc_uid(struct user_namespace *ns, uid_t uid) | |||
420 | new->mq_bytes = 0; | 416 | new->mq_bytes = 0; |
421 | #endif | 417 | #endif |
422 | new->locked_shm = 0; | 418 | new->locked_shm = 0; |
423 | 419 | #ifdef CONFIG_KEYS | |
424 | if (alloc_uid_keyring(new, current) < 0) | 420 | new->uid_keyring = new->session_keyring = NULL; |
425 | goto out_free_user; | 421 | #endif |
426 | 422 | ||
427 | if (sched_create_user(new) < 0) | 423 | if (sched_create_user(new) < 0) |
428 | goto out_put_keys; | 424 | goto out_free_user; |
429 | 425 | ||
430 | if (uids_user_create(new)) | 426 | if (uids_user_create(new)) |
431 | goto out_destoy_sched; | 427 | goto out_destoy_sched; |
@@ -459,9 +455,6 @@ struct user_struct * alloc_uid(struct user_namespace *ns, uid_t uid) | |||
459 | 455 | ||
460 | out_destoy_sched: | 456 | out_destoy_sched: |
461 | sched_destroy_user(new); | 457 | sched_destroy_user(new); |
462 | out_put_keys: | ||
463 | key_put(new->uid_keyring); | ||
464 | key_put(new->session_keyring); | ||
465 | out_free_user: | 458 | out_free_user: |
466 | kmem_cache_free(uid_cachep, new); | 459 | kmem_cache_free(uid_cachep, new); |
467 | out_unlock: | 460 | out_unlock: |
diff --git a/kernel/user_namespace.c b/kernel/user_namespace.c index 4c9006275df7..a9ab0596de44 100644 --- a/kernel/user_namespace.c +++ b/kernel/user_namespace.c | |||
@@ -8,6 +8,7 @@ | |||
8 | #include <linux/module.h> | 8 | #include <linux/module.h> |
9 | #include <linux/version.h> | 9 | #include <linux/version.h> |
10 | #include <linux/nsproxy.h> | 10 | #include <linux/nsproxy.h> |
11 | #include <linux/slab.h> | ||
11 | #include <linux/user_namespace.h> | 12 | #include <linux/user_namespace.h> |
12 | 13 | ||
13 | /* | 14 | /* |
@@ -73,3 +74,4 @@ void free_user_ns(struct kref *kref) | |||
73 | release_uids(ns); | 74 | release_uids(ns); |
74 | kfree(ns); | 75 | kfree(ns); |
75 | } | 76 | } |
77 | EXPORT_SYMBOL(free_user_ns); | ||
diff --git a/kernel/utsname.c b/kernel/utsname.c index 816d7b24fa03..64d398f12444 100644 --- a/kernel/utsname.c +++ b/kernel/utsname.c | |||
@@ -14,6 +14,7 @@ | |||
14 | #include <linux/utsname.h> | 14 | #include <linux/utsname.h> |
15 | #include <linux/version.h> | 15 | #include <linux/version.h> |
16 | #include <linux/err.h> | 16 | #include <linux/err.h> |
17 | #include <linux/slab.h> | ||
17 | 18 | ||
18 | /* | 19 | /* |
19 | * Clone a new ns copying an original utsname, setting refcount to 1 | 20 | * Clone a new ns copying an original utsname, setting refcount to 1 |
diff --git a/kernel/workqueue.c b/kernel/workqueue.c index 00ff4d08e370..7db251a959c5 100644 --- a/kernel/workqueue.c +++ b/kernel/workqueue.c | |||
@@ -158,8 +158,8 @@ static void __queue_work(struct cpu_workqueue_struct *cwq, | |||
158 | * | 158 | * |
159 | * Returns 0 if @work was already on a queue, non-zero otherwise. | 159 | * Returns 0 if @work was already on a queue, non-zero otherwise. |
160 | * | 160 | * |
161 | * We queue the work to the CPU it was submitted, but there is no | 161 | * We queue the work to the CPU on which it was submitted, but if the CPU dies |
162 | * guarantee that it will be processed by that CPU. | 162 | * it can be processed by another CPU. |
163 | */ | 163 | */ |
164 | int queue_work(struct workqueue_struct *wq, struct work_struct *work) | 164 | int queue_work(struct workqueue_struct *wq, struct work_struct *work) |
165 | { | 165 | { |
@@ -772,7 +772,7 @@ struct workqueue_struct *__create_workqueue_key(const char *name, | |||
772 | } | 772 | } |
773 | EXPORT_SYMBOL_GPL(__create_workqueue_key); | 773 | EXPORT_SYMBOL_GPL(__create_workqueue_key); |
774 | 774 | ||
775 | static void cleanup_workqueue_thread(struct cpu_workqueue_struct *cwq, int cpu) | 775 | static void cleanup_workqueue_thread(struct cpu_workqueue_struct *cwq) |
776 | { | 776 | { |
777 | /* | 777 | /* |
778 | * Our caller is either destroy_workqueue() or CPU_DEAD, | 778 | * Our caller is either destroy_workqueue() or CPU_DEAD, |
@@ -808,19 +808,16 @@ static void cleanup_workqueue_thread(struct cpu_workqueue_struct *cwq, int cpu) | |||
808 | void destroy_workqueue(struct workqueue_struct *wq) | 808 | void destroy_workqueue(struct workqueue_struct *wq) |
809 | { | 809 | { |
810 | const cpumask_t *cpu_map = wq_cpu_map(wq); | 810 | const cpumask_t *cpu_map = wq_cpu_map(wq); |
811 | struct cpu_workqueue_struct *cwq; | ||
812 | int cpu; | 811 | int cpu; |
813 | 812 | ||
814 | get_online_cpus(); | 813 | get_online_cpus(); |
815 | spin_lock(&workqueue_lock); | 814 | spin_lock(&workqueue_lock); |
816 | list_del(&wq->list); | 815 | list_del(&wq->list); |
817 | spin_unlock(&workqueue_lock); | 816 | spin_unlock(&workqueue_lock); |
818 | put_online_cpus(); | ||
819 | 817 | ||
820 | for_each_cpu_mask(cpu, *cpu_map) { | 818 | for_each_cpu_mask(cpu, *cpu_map) |
821 | cwq = per_cpu_ptr(wq->cpu_wq, cpu); | 819 | cleanup_workqueue_thread(per_cpu_ptr(wq->cpu_wq, cpu)); |
822 | cleanup_workqueue_thread(cwq, cpu); | 820 | put_online_cpus(); |
823 | } | ||
824 | 821 | ||
825 | free_percpu(wq->cpu_wq); | 822 | free_percpu(wq->cpu_wq); |
826 | kfree(wq); | 823 | kfree(wq); |
@@ -838,7 +835,6 @@ static int __devinit workqueue_cpu_callback(struct notifier_block *nfb, | |||
838 | action &= ~CPU_TASKS_FROZEN; | 835 | action &= ~CPU_TASKS_FROZEN; |
839 | 836 | ||
840 | switch (action) { | 837 | switch (action) { |
841 | |||
842 | case CPU_UP_PREPARE: | 838 | case CPU_UP_PREPARE: |
843 | cpu_set(cpu, cpu_populated_map); | 839 | cpu_set(cpu, cpu_populated_map); |
844 | } | 840 | } |
@@ -861,11 +857,17 @@ static int __devinit workqueue_cpu_callback(struct notifier_block *nfb, | |||
861 | case CPU_UP_CANCELED: | 857 | case CPU_UP_CANCELED: |
862 | start_workqueue_thread(cwq, -1); | 858 | start_workqueue_thread(cwq, -1); |
863 | case CPU_DEAD: | 859 | case CPU_DEAD: |
864 | cleanup_workqueue_thread(cwq, cpu); | 860 | cleanup_workqueue_thread(cwq); |
865 | break; | 861 | break; |
866 | } | 862 | } |
867 | } | 863 | } |
868 | 864 | ||
865 | switch (action) { | ||
866 | case CPU_UP_CANCELED: | ||
867 | case CPU_DEAD: | ||
868 | cpu_clear(cpu, cpu_populated_map); | ||
869 | } | ||
870 | |||
869 | return NOTIFY_OK; | 871 | return NOTIFY_OK; |
870 | } | 872 | } |
871 | 873 | ||
diff --git a/lib/Makefile b/lib/Makefile index 2d7001b7f5a4..0ae4eb047aac 100644 --- a/lib/Makefile +++ b/lib/Makefile | |||
@@ -6,7 +6,7 @@ lib-y := ctype.o string.o vsprintf.o cmdline.o \ | |||
6 | rbtree.o radix-tree.o dump_stack.o \ | 6 | rbtree.o radix-tree.o dump_stack.o \ |
7 | idr.o int_sqrt.o extable.o prio_tree.o \ | 7 | idr.o int_sqrt.o extable.o prio_tree.o \ |
8 | sha1.o irq_regs.o reciprocal_div.o argv_split.o \ | 8 | sha1.o irq_regs.o reciprocal_div.o argv_split.o \ |
9 | proportions.o prio_heap.o | 9 | proportions.o prio_heap.o ratelimit.o |
10 | 10 | ||
11 | lib-$(CONFIG_MMU) += ioremap.o | 11 | lib-$(CONFIG_MMU) += ioremap.o |
12 | lib-$(CONFIG_SMP) += cpumask.o | 12 | lib-$(CONFIG_SMP) += cpumask.o |
diff --git a/lib/find_next_bit.c b/lib/find_next_bit.c index d3f5784807b4..24c59ded47a0 100644 --- a/lib/find_next_bit.c +++ b/lib/find_next_bit.c | |||
@@ -20,8 +20,8 @@ | |||
20 | /* | 20 | /* |
21 | * Find the next set bit in a memory region. | 21 | * Find the next set bit in a memory region. |
22 | */ | 22 | */ |
23 | unsigned long __find_next_bit(const unsigned long *addr, | 23 | unsigned long find_next_bit(const unsigned long *addr, unsigned long size, |
24 | unsigned long size, unsigned long offset) | 24 | unsigned long offset) |
25 | { | 25 | { |
26 | const unsigned long *p = addr + BITOP_WORD(offset); | 26 | const unsigned long *p = addr + BITOP_WORD(offset); |
27 | unsigned long result = offset & ~(BITS_PER_LONG-1); | 27 | unsigned long result = offset & ~(BITS_PER_LONG-1); |
@@ -58,14 +58,14 @@ found_first: | |||
58 | found_middle: | 58 | found_middle: |
59 | return result + __ffs(tmp); | 59 | return result + __ffs(tmp); |
60 | } | 60 | } |
61 | EXPORT_SYMBOL(__find_next_bit); | 61 | EXPORT_SYMBOL(find_next_bit); |
62 | 62 | ||
63 | /* | 63 | /* |
64 | * This implementation of find_{first,next}_zero_bit was stolen from | 64 | * This implementation of find_{first,next}_zero_bit was stolen from |
65 | * Linus' asm-alpha/bitops.h. | 65 | * Linus' asm-alpha/bitops.h. |
66 | */ | 66 | */ |
67 | unsigned long __find_next_zero_bit(const unsigned long *addr, | 67 | unsigned long find_next_zero_bit(const unsigned long *addr, unsigned long size, |
68 | unsigned long size, unsigned long offset) | 68 | unsigned long offset) |
69 | { | 69 | { |
70 | const unsigned long *p = addr + BITOP_WORD(offset); | 70 | const unsigned long *p = addr + BITOP_WORD(offset); |
71 | unsigned long result = offset & ~(BITS_PER_LONG-1); | 71 | unsigned long result = offset & ~(BITS_PER_LONG-1); |
@@ -102,15 +102,14 @@ found_first: | |||
102 | found_middle: | 102 | found_middle: |
103 | return result + ffz(tmp); | 103 | return result + ffz(tmp); |
104 | } | 104 | } |
105 | EXPORT_SYMBOL(__find_next_zero_bit); | 105 | EXPORT_SYMBOL(find_next_zero_bit); |
106 | #endif /* CONFIG_GENERIC_FIND_NEXT_BIT */ | 106 | #endif /* CONFIG_GENERIC_FIND_NEXT_BIT */ |
107 | 107 | ||
108 | #ifdef CONFIG_GENERIC_FIND_FIRST_BIT | 108 | #ifdef CONFIG_GENERIC_FIND_FIRST_BIT |
109 | /* | 109 | /* |
110 | * Find the first set bit in a memory region. | 110 | * Find the first set bit in a memory region. |
111 | */ | 111 | */ |
112 | unsigned long __find_first_bit(const unsigned long *addr, | 112 | unsigned long find_first_bit(const unsigned long *addr, unsigned long size) |
113 | unsigned long size) | ||
114 | { | 113 | { |
115 | const unsigned long *p = addr; | 114 | const unsigned long *p = addr; |
116 | unsigned long result = 0; | 115 | unsigned long result = 0; |
@@ -131,13 +130,12 @@ unsigned long __find_first_bit(const unsigned long *addr, | |||
131 | found: | 130 | found: |
132 | return result + __ffs(tmp); | 131 | return result + __ffs(tmp); |
133 | } | 132 | } |
134 | EXPORT_SYMBOL(__find_first_bit); | 133 | EXPORT_SYMBOL(find_first_bit); |
135 | 134 | ||
136 | /* | 135 | /* |
137 | * Find the first cleared bit in a memory region. | 136 | * Find the first cleared bit in a memory region. |
138 | */ | 137 | */ |
139 | unsigned long __find_first_zero_bit(const unsigned long *addr, | 138 | unsigned long find_first_zero_bit(const unsigned long *addr, unsigned long size) |
140 | unsigned long size) | ||
141 | { | 139 | { |
142 | const unsigned long *p = addr; | 140 | const unsigned long *p = addr; |
143 | unsigned long result = 0; | 141 | unsigned long result = 0; |
@@ -158,7 +156,7 @@ unsigned long __find_first_zero_bit(const unsigned long *addr, | |||
158 | found: | 156 | found: |
159 | return result + ffz(tmp); | 157 | return result + ffz(tmp); |
160 | } | 158 | } |
161 | EXPORT_SYMBOL(__find_first_zero_bit); | 159 | EXPORT_SYMBOL(find_first_zero_bit); |
162 | #endif /* CONFIG_GENERIC_FIND_FIRST_BIT */ | 160 | #endif /* CONFIG_GENERIC_FIND_FIRST_BIT */ |
163 | 161 | ||
164 | #ifdef __BIG_ENDIAN | 162 | #ifdef __BIG_ENDIAN |
@@ -585,12 +585,11 @@ static void idr_cache_ctor(struct kmem_cache *idr_layer_cache, void *idr_layer) | |||
585 | memset(idr_layer, 0, sizeof(struct idr_layer)); | 585 | memset(idr_layer, 0, sizeof(struct idr_layer)); |
586 | } | 586 | } |
587 | 587 | ||
588 | static int init_id_cache(void) | 588 | void __init idr_init_cache(void) |
589 | { | 589 | { |
590 | if (!idr_layer_cache) | 590 | idr_layer_cache = kmem_cache_create("idr_layer_cache", |
591 | idr_layer_cache = kmem_cache_create("idr_layer_cache", | 591 | sizeof(struct idr_layer), 0, SLAB_PANIC, |
592 | sizeof(struct idr_layer), 0, 0, idr_cache_ctor); | 592 | idr_cache_ctor); |
593 | return 0; | ||
594 | } | 593 | } |
595 | 594 | ||
596 | /** | 595 | /** |
@@ -602,7 +601,6 @@ static int init_id_cache(void) | |||
602 | */ | 601 | */ |
603 | void idr_init(struct idr *idp) | 602 | void idr_init(struct idr *idp) |
604 | { | 603 | { |
605 | init_id_cache(); | ||
606 | memset(idp, 0, sizeof(struct idr)); | 604 | memset(idp, 0, sizeof(struct idr)); |
607 | spin_lock_init(&idp->lock); | 605 | spin_lock_init(&idp->lock); |
608 | } | 606 | } |
diff --git a/lib/inflate.c b/lib/inflate.c index 845f91d3ac12..9762294be062 100644 --- a/lib/inflate.c +++ b/lib/inflate.c | |||
@@ -811,6 +811,9 @@ DEBG("<dyn"); | |||
811 | ll = malloc(sizeof(*ll) * (286+30)); /* literal/length and distance code lengths */ | 811 | ll = malloc(sizeof(*ll) * (286+30)); /* literal/length and distance code lengths */ |
812 | #endif | 812 | #endif |
813 | 813 | ||
814 | if (ll == NULL) | ||
815 | return 1; | ||
816 | |||
814 | /* make local bit buffer */ | 817 | /* make local bit buffer */ |
815 | b = bb; | 818 | b = bb; |
816 | k = bk; | 819 | k = bk; |
diff --git a/lib/iomap.c b/lib/iomap.c index dd6ca48fe6b0..37a3ea4cac9f 100644 --- a/lib/iomap.c +++ b/lib/iomap.c | |||
@@ -257,7 +257,7 @@ EXPORT_SYMBOL(ioport_unmap); | |||
257 | void __iomem *pci_iomap(struct pci_dev *dev, int bar, unsigned long maxlen) | 257 | void __iomem *pci_iomap(struct pci_dev *dev, int bar, unsigned long maxlen) |
258 | { | 258 | { |
259 | resource_size_t start = pci_resource_start(dev, bar); | 259 | resource_size_t start = pci_resource_start(dev, bar); |
260 | unsigned long len = pci_resource_len(dev, bar); | 260 | resource_size_t len = pci_resource_len(dev, bar); |
261 | unsigned long flags = pci_resource_flags(dev, bar); | 261 | unsigned long flags = pci_resource_flags(dev, bar); |
262 | 262 | ||
263 | if (!len || !start) | 263 | if (!len || !start) |
diff --git a/lib/ratelimit.c b/lib/ratelimit.c new file mode 100644 index 000000000000..485e3040dcd4 --- /dev/null +++ b/lib/ratelimit.c | |||
@@ -0,0 +1,51 @@ | |||
1 | /* | ||
2 | * ratelimit.c - Do something with rate limit. | ||
3 | * | ||
4 | * Isolated from kernel/printk.c by Dave Young <hidave.darkstar@gmail.com> | ||
5 | * | ||
6 | * This file is released under the GPLv2. | ||
7 | * | ||
8 | */ | ||
9 | |||
10 | #include <linux/kernel.h> | ||
11 | #include <linux/jiffies.h> | ||
12 | #include <linux/module.h> | ||
13 | |||
14 | /* | ||
15 | * __ratelimit - rate limiting | ||
16 | * @ratelimit_jiffies: minimum time in jiffies between two callbacks | ||
17 | * @ratelimit_burst: number of callbacks we do before ratelimiting | ||
18 | * | ||
19 | * This enforces a rate limit: not more than @ratelimit_burst callbacks | ||
20 | * in every ratelimit_jiffies | ||
21 | */ | ||
22 | int __ratelimit(int ratelimit_jiffies, int ratelimit_burst) | ||
23 | { | ||
24 | static DEFINE_SPINLOCK(ratelimit_lock); | ||
25 | static unsigned toks = 10 * 5 * HZ; | ||
26 | static unsigned long last_msg; | ||
27 | static int missed; | ||
28 | unsigned long flags; | ||
29 | unsigned long now = jiffies; | ||
30 | |||
31 | spin_lock_irqsave(&ratelimit_lock, flags); | ||
32 | toks += now - last_msg; | ||
33 | last_msg = now; | ||
34 | if (toks > (ratelimit_burst * ratelimit_jiffies)) | ||
35 | toks = ratelimit_burst * ratelimit_jiffies; | ||
36 | if (toks >= ratelimit_jiffies) { | ||
37 | int lost = missed; | ||
38 | |||
39 | missed = 0; | ||
40 | toks -= ratelimit_jiffies; | ||
41 | spin_unlock_irqrestore(&ratelimit_lock, flags); | ||
42 | if (lost) | ||
43 | printk(KERN_WARNING "%s: %d messages suppressed\n", | ||
44 | __func__, lost); | ||
45 | return 1; | ||
46 | } | ||
47 | missed++; | ||
48 | spin_unlock_irqrestore(&ratelimit_lock, flags); | ||
49 | return 0; | ||
50 | } | ||
51 | EXPORT_SYMBOL(__ratelimit); | ||
diff --git a/lib/swiotlb.c b/lib/swiotlb.c index 025922807e6e..d568894df8cc 100644 --- a/lib/swiotlb.c +++ b/lib/swiotlb.c | |||
@@ -31,6 +31,7 @@ | |||
31 | 31 | ||
32 | #include <linux/init.h> | 32 | #include <linux/init.h> |
33 | #include <linux/bootmem.h> | 33 | #include <linux/bootmem.h> |
34 | #include <linux/iommu-helper.h> | ||
34 | 35 | ||
35 | #define OFFSET(val,align) ((unsigned long) \ | 36 | #define OFFSET(val,align) ((unsigned long) \ |
36 | ( (val) & ( (align) - 1))) | 37 | ( (val) & ( (align) - 1))) |
@@ -282,15 +283,6 @@ address_needs_mapping(struct device *hwdev, dma_addr_t addr) | |||
282 | return (addr & ~mask) != 0; | 283 | return (addr & ~mask) != 0; |
283 | } | 284 | } |
284 | 285 | ||
285 | static inline unsigned int is_span_boundary(unsigned int index, | ||
286 | unsigned int nslots, | ||
287 | unsigned long offset_slots, | ||
288 | unsigned long max_slots) | ||
289 | { | ||
290 | unsigned long offset = (offset_slots + index) & (max_slots - 1); | ||
291 | return offset + nslots > max_slots; | ||
292 | } | ||
293 | |||
294 | /* | 286 | /* |
295 | * Allocates bounce buffer and returns its kernel virtual address. | 287 | * Allocates bounce buffer and returns its kernel virtual address. |
296 | */ | 288 | */ |
@@ -331,56 +323,53 @@ map_single(struct device *hwdev, char *buffer, size_t size, int dir) | |||
331 | * request and allocate a buffer from that IO TLB pool. | 323 | * request and allocate a buffer from that IO TLB pool. |
332 | */ | 324 | */ |
333 | spin_lock_irqsave(&io_tlb_lock, flags); | 325 | spin_lock_irqsave(&io_tlb_lock, flags); |
334 | { | 326 | index = ALIGN(io_tlb_index, stride); |
335 | index = ALIGN(io_tlb_index, stride); | 327 | if (index >= io_tlb_nslabs) |
336 | if (index >= io_tlb_nslabs) | 328 | index = 0; |
337 | index = 0; | 329 | wrap = index; |
338 | wrap = index; | 330 | |
339 | 331 | do { | |
340 | do { | 332 | while (iommu_is_span_boundary(index, nslots, offset_slots, |
341 | while (is_span_boundary(index, nslots, offset_slots, | 333 | max_slots)) { |
342 | max_slots)) { | ||
343 | index += stride; | ||
344 | if (index >= io_tlb_nslabs) | ||
345 | index = 0; | ||
346 | if (index == wrap) | ||
347 | goto not_found; | ||
348 | } | ||
349 | |||
350 | /* | ||
351 | * If we find a slot that indicates we have 'nslots' | ||
352 | * number of contiguous buffers, we allocate the | ||
353 | * buffers from that slot and mark the entries as '0' | ||
354 | * indicating unavailable. | ||
355 | */ | ||
356 | if (io_tlb_list[index] >= nslots) { | ||
357 | int count = 0; | ||
358 | |||
359 | for (i = index; i < (int) (index + nslots); i++) | ||
360 | io_tlb_list[i] = 0; | ||
361 | for (i = index - 1; (OFFSET(i, IO_TLB_SEGSIZE) != IO_TLB_SEGSIZE -1) && io_tlb_list[i]; i--) | ||
362 | io_tlb_list[i] = ++count; | ||
363 | dma_addr = io_tlb_start + (index << IO_TLB_SHIFT); | ||
364 | |||
365 | /* | ||
366 | * Update the indices to avoid searching in | ||
367 | * the next round. | ||
368 | */ | ||
369 | io_tlb_index = ((index + nslots) < io_tlb_nslabs | ||
370 | ? (index + nslots) : 0); | ||
371 | |||
372 | goto found; | ||
373 | } | ||
374 | index += stride; | 334 | index += stride; |
375 | if (index >= io_tlb_nslabs) | 335 | if (index >= io_tlb_nslabs) |
376 | index = 0; | 336 | index = 0; |
377 | } while (index != wrap); | 337 | if (index == wrap) |
338 | goto not_found; | ||
339 | } | ||
378 | 340 | ||
379 | not_found: | 341 | /* |
380 | spin_unlock_irqrestore(&io_tlb_lock, flags); | 342 | * If we find a slot that indicates we have 'nslots' number of |
381 | return NULL; | 343 | * contiguous buffers, we allocate the buffers from that slot |
382 | } | 344 | * and mark the entries as '0' indicating unavailable. |
383 | found: | 345 | */ |
346 | if (io_tlb_list[index] >= nslots) { | ||
347 | int count = 0; | ||
348 | |||
349 | for (i = index; i < (int) (index + nslots); i++) | ||
350 | io_tlb_list[i] = 0; | ||
351 | for (i = index - 1; (OFFSET(i, IO_TLB_SEGSIZE) != IO_TLB_SEGSIZE - 1) && io_tlb_list[i]; i--) | ||
352 | io_tlb_list[i] = ++count; | ||
353 | dma_addr = io_tlb_start + (index << IO_TLB_SHIFT); | ||
354 | |||
355 | /* | ||
356 | * Update the indices to avoid searching in the next | ||
357 | * round. | ||
358 | */ | ||
359 | io_tlb_index = ((index + nslots) < io_tlb_nslabs | ||
360 | ? (index + nslots) : 0); | ||
361 | |||
362 | goto found; | ||
363 | } | ||
364 | index += stride; | ||
365 | if (index >= io_tlb_nslabs) | ||
366 | index = 0; | ||
367 | } while (index != wrap); | ||
368 | |||
369 | not_found: | ||
370 | spin_unlock_irqrestore(&io_tlb_lock, flags); | ||
371 | return NULL; | ||
372 | found: | ||
384 | spin_unlock_irqrestore(&io_tlb_lock, flags); | 373 | spin_unlock_irqrestore(&io_tlb_lock, flags); |
385 | 374 | ||
386 | /* | 375 | /* |
@@ -566,7 +555,8 @@ swiotlb_full(struct device *dev, size_t size, int dir, int do_panic) | |||
566 | * either swiotlb_unmap_single or swiotlb_dma_sync_single is performed. | 555 | * either swiotlb_unmap_single or swiotlb_dma_sync_single is performed. |
567 | */ | 556 | */ |
568 | dma_addr_t | 557 | dma_addr_t |
569 | swiotlb_map_single(struct device *hwdev, void *ptr, size_t size, int dir) | 558 | swiotlb_map_single_attrs(struct device *hwdev, void *ptr, size_t size, |
559 | int dir, struct dma_attrs *attrs) | ||
570 | { | 560 | { |
571 | dma_addr_t dev_addr = virt_to_bus(ptr); | 561 | dma_addr_t dev_addr = virt_to_bus(ptr); |
572 | void *map; | 562 | void *map; |
@@ -599,6 +589,13 @@ swiotlb_map_single(struct device *hwdev, void *ptr, size_t size, int dir) | |||
599 | 589 | ||
600 | return dev_addr; | 590 | return dev_addr; |
601 | } | 591 | } |
592 | EXPORT_SYMBOL(swiotlb_map_single_attrs); | ||
593 | |||
594 | dma_addr_t | ||
595 | swiotlb_map_single(struct device *hwdev, void *ptr, size_t size, int dir) | ||
596 | { | ||
597 | return swiotlb_map_single_attrs(hwdev, ptr, size, dir, NULL); | ||
598 | } | ||
602 | 599 | ||
603 | /* | 600 | /* |
604 | * Unmap a single streaming mode DMA translation. The dma_addr and size must | 601 | * Unmap a single streaming mode DMA translation. The dma_addr and size must |
@@ -609,8 +606,8 @@ swiotlb_map_single(struct device *hwdev, void *ptr, size_t size, int dir) | |||
609 | * whatever the device wrote there. | 606 | * whatever the device wrote there. |
610 | */ | 607 | */ |
611 | void | 608 | void |
612 | swiotlb_unmap_single(struct device *hwdev, dma_addr_t dev_addr, size_t size, | 609 | swiotlb_unmap_single_attrs(struct device *hwdev, dma_addr_t dev_addr, |
613 | int dir) | 610 | size_t size, int dir, struct dma_attrs *attrs) |
614 | { | 611 | { |
615 | char *dma_addr = bus_to_virt(dev_addr); | 612 | char *dma_addr = bus_to_virt(dev_addr); |
616 | 613 | ||
@@ -620,7 +617,14 @@ swiotlb_unmap_single(struct device *hwdev, dma_addr_t dev_addr, size_t size, | |||
620 | else if (dir == DMA_FROM_DEVICE) | 617 | else if (dir == DMA_FROM_DEVICE) |
621 | dma_mark_clean(dma_addr, size); | 618 | dma_mark_clean(dma_addr, size); |
622 | } | 619 | } |
620 | EXPORT_SYMBOL(swiotlb_unmap_single_attrs); | ||
623 | 621 | ||
622 | void | ||
623 | swiotlb_unmap_single(struct device *hwdev, dma_addr_t dev_addr, size_t size, | ||
624 | int dir) | ||
625 | { | ||
626 | return swiotlb_unmap_single_attrs(hwdev, dev_addr, size, dir, NULL); | ||
627 | } | ||
624 | /* | 628 | /* |
625 | * Make physical memory consistent for a single streaming mode DMA translation | 629 | * Make physical memory consistent for a single streaming mode DMA translation |
626 | * after a transfer. | 630 | * after a transfer. |
@@ -691,6 +695,8 @@ swiotlb_sync_single_range_for_device(struct device *hwdev, dma_addr_t dev_addr, | |||
691 | SYNC_FOR_DEVICE); | 695 | SYNC_FOR_DEVICE); |
692 | } | 696 | } |
693 | 697 | ||
698 | void swiotlb_unmap_sg_attrs(struct device *, struct scatterlist *, int, int, | ||
699 | struct dma_attrs *); | ||
694 | /* | 700 | /* |
695 | * Map a set of buffers described by scatterlist in streaming mode for DMA. | 701 | * Map a set of buffers described by scatterlist in streaming mode for DMA. |
696 | * This is the scatter-gather version of the above swiotlb_map_single | 702 | * This is the scatter-gather version of the above swiotlb_map_single |
@@ -708,8 +714,8 @@ swiotlb_sync_single_range_for_device(struct device *hwdev, dma_addr_t dev_addr, | |||
708 | * same here. | 714 | * same here. |
709 | */ | 715 | */ |
710 | int | 716 | int |
711 | swiotlb_map_sg(struct device *hwdev, struct scatterlist *sgl, int nelems, | 717 | swiotlb_map_sg_attrs(struct device *hwdev, struct scatterlist *sgl, int nelems, |
712 | int dir) | 718 | int dir, struct dma_attrs *attrs) |
713 | { | 719 | { |
714 | struct scatterlist *sg; | 720 | struct scatterlist *sg; |
715 | void *addr; | 721 | void *addr; |
@@ -727,7 +733,8 @@ swiotlb_map_sg(struct device *hwdev, struct scatterlist *sgl, int nelems, | |||
727 | /* Don't panic here, we expect map_sg users | 733 | /* Don't panic here, we expect map_sg users |
728 | to do proper error handling. */ | 734 | to do proper error handling. */ |
729 | swiotlb_full(hwdev, sg->length, dir, 0); | 735 | swiotlb_full(hwdev, sg->length, dir, 0); |
730 | swiotlb_unmap_sg(hwdev, sgl, i, dir); | 736 | swiotlb_unmap_sg_attrs(hwdev, sgl, i, dir, |
737 | attrs); | ||
731 | sgl[0].dma_length = 0; | 738 | sgl[0].dma_length = 0; |
732 | return 0; | 739 | return 0; |
733 | } | 740 | } |
@@ -738,14 +745,22 @@ swiotlb_map_sg(struct device *hwdev, struct scatterlist *sgl, int nelems, | |||
738 | } | 745 | } |
739 | return nelems; | 746 | return nelems; |
740 | } | 747 | } |
748 | EXPORT_SYMBOL(swiotlb_map_sg_attrs); | ||
749 | |||
750 | int | ||
751 | swiotlb_map_sg(struct device *hwdev, struct scatterlist *sgl, int nelems, | ||
752 | int dir) | ||
753 | { | ||
754 | return swiotlb_map_sg_attrs(hwdev, sgl, nelems, dir, NULL); | ||
755 | } | ||
741 | 756 | ||
742 | /* | 757 | /* |
743 | * Unmap a set of streaming mode DMA translations. Again, cpu read rules | 758 | * Unmap a set of streaming mode DMA translations. Again, cpu read rules |
744 | * concerning calls here are the same as for swiotlb_unmap_single() above. | 759 | * concerning calls here are the same as for swiotlb_unmap_single() above. |
745 | */ | 760 | */ |
746 | void | 761 | void |
747 | swiotlb_unmap_sg(struct device *hwdev, struct scatterlist *sgl, int nelems, | 762 | swiotlb_unmap_sg_attrs(struct device *hwdev, struct scatterlist *sgl, |
748 | int dir) | 763 | int nelems, int dir, struct dma_attrs *attrs) |
749 | { | 764 | { |
750 | struct scatterlist *sg; | 765 | struct scatterlist *sg; |
751 | int i; | 766 | int i; |
@@ -760,6 +775,14 @@ swiotlb_unmap_sg(struct device *hwdev, struct scatterlist *sgl, int nelems, | |||
760 | dma_mark_clean(SG_ENT_VIRT_ADDRESS(sg), sg->dma_length); | 775 | dma_mark_clean(SG_ENT_VIRT_ADDRESS(sg), sg->dma_length); |
761 | } | 776 | } |
762 | } | 777 | } |
778 | EXPORT_SYMBOL(swiotlb_unmap_sg_attrs); | ||
779 | |||
780 | void | ||
781 | swiotlb_unmap_sg(struct device *hwdev, struct scatterlist *sgl, int nelems, | ||
782 | int dir) | ||
783 | { | ||
784 | return swiotlb_unmap_sg_attrs(hwdev, sgl, nelems, dir, NULL); | ||
785 | } | ||
763 | 786 | ||
764 | /* | 787 | /* |
765 | * Make physical memory consistent for a set of streaming mode DMA translations | 788 | * Make physical memory consistent for a set of streaming mode DMA translations |
diff --git a/mm/hugetlb.c b/mm/hugetlb.c index 2c37c67ed8c9..bbf953eeb58b 100644 --- a/mm/hugetlb.c +++ b/mm/hugetlb.c | |||
@@ -199,7 +199,8 @@ static struct page *alloc_fresh_huge_page_node(int nid) | |||
199 | struct page *page; | 199 | struct page *page; |
200 | 200 | ||
201 | page = alloc_pages_node(nid, | 201 | page = alloc_pages_node(nid, |
202 | htlb_alloc_mask|__GFP_COMP|__GFP_THISNODE|__GFP_NOWARN, | 202 | htlb_alloc_mask|__GFP_COMP|__GFP_THISNODE| |
203 | __GFP_REPEAT|__GFP_NOWARN, | ||
203 | HUGETLB_PAGE_ORDER); | 204 | HUGETLB_PAGE_ORDER); |
204 | if (page) { | 205 | if (page) { |
205 | if (arch_prepare_hugepage(page)) { | 206 | if (arch_prepare_hugepage(page)) { |
@@ -294,7 +295,8 @@ static struct page *alloc_buddy_huge_page(struct vm_area_struct *vma, | |||
294 | } | 295 | } |
295 | spin_unlock(&hugetlb_lock); | 296 | spin_unlock(&hugetlb_lock); |
296 | 297 | ||
297 | page = alloc_pages(htlb_alloc_mask|__GFP_COMP|__GFP_NOWARN, | 298 | page = alloc_pages(htlb_alloc_mask|__GFP_COMP| |
299 | __GFP_REPEAT|__GFP_NOWARN, | ||
298 | HUGETLB_PAGE_ORDER); | 300 | HUGETLB_PAGE_ORDER); |
299 | 301 | ||
300 | spin_lock(&hugetlb_lock); | 302 | spin_lock(&hugetlb_lock); |
diff --git a/mm/memcontrol.c b/mm/memcontrol.c index 2e0bfc93484b..33add96cd5fb 100644 --- a/mm/memcontrol.c +++ b/mm/memcontrol.c | |||
@@ -26,15 +26,18 @@ | |||
26 | #include <linux/backing-dev.h> | 26 | #include <linux/backing-dev.h> |
27 | #include <linux/bit_spinlock.h> | 27 | #include <linux/bit_spinlock.h> |
28 | #include <linux/rcupdate.h> | 28 | #include <linux/rcupdate.h> |
29 | #include <linux/slab.h> | ||
29 | #include <linux/swap.h> | 30 | #include <linux/swap.h> |
30 | #include <linux/spinlock.h> | 31 | #include <linux/spinlock.h> |
31 | #include <linux/fs.h> | 32 | #include <linux/fs.h> |
32 | #include <linux/seq_file.h> | 33 | #include <linux/seq_file.h> |
34 | #include <linux/vmalloc.h> | ||
33 | 35 | ||
34 | #include <asm/uaccess.h> | 36 | #include <asm/uaccess.h> |
35 | 37 | ||
36 | struct cgroup_subsys mem_cgroup_subsys; | 38 | struct cgroup_subsys mem_cgroup_subsys; |
37 | static const int MEM_CGROUP_RECLAIM_RETRIES = 5; | 39 | static const int MEM_CGROUP_RECLAIM_RETRIES = 5; |
40 | static struct kmem_cache *page_cgroup_cache; | ||
38 | 41 | ||
39 | /* | 42 | /* |
40 | * Statistics for memory cgroup. | 43 | * Statistics for memory cgroup. |
@@ -236,26 +239,12 @@ static struct mem_cgroup *mem_cgroup_from_cont(struct cgroup *cont) | |||
236 | css); | 239 | css); |
237 | } | 240 | } |
238 | 241 | ||
239 | static struct mem_cgroup *mem_cgroup_from_task(struct task_struct *p) | 242 | struct mem_cgroup *mem_cgroup_from_task(struct task_struct *p) |
240 | { | 243 | { |
241 | return container_of(task_subsys_state(p, mem_cgroup_subsys_id), | 244 | return container_of(task_subsys_state(p, mem_cgroup_subsys_id), |
242 | struct mem_cgroup, css); | 245 | struct mem_cgroup, css); |
243 | } | 246 | } |
244 | 247 | ||
245 | void mm_init_cgroup(struct mm_struct *mm, struct task_struct *p) | ||
246 | { | ||
247 | struct mem_cgroup *mem; | ||
248 | |||
249 | mem = mem_cgroup_from_task(p); | ||
250 | css_get(&mem->css); | ||
251 | mm->mem_cgroup = mem; | ||
252 | } | ||
253 | |||
254 | void mm_free_cgroup(struct mm_struct *mm) | ||
255 | { | ||
256 | css_put(&mm->mem_cgroup->css); | ||
257 | } | ||
258 | |||
259 | static inline int page_cgroup_locked(struct page *page) | 248 | static inline int page_cgroup_locked(struct page *page) |
260 | { | 249 | { |
261 | return bit_spin_is_locked(PAGE_CGROUP_LOCK_BIT, &page->page_cgroup); | 250 | return bit_spin_is_locked(PAGE_CGROUP_LOCK_BIT, &page->page_cgroup); |
@@ -287,10 +276,10 @@ static void unlock_page_cgroup(struct page *page) | |||
287 | bit_spin_unlock(PAGE_CGROUP_LOCK_BIT, &page->page_cgroup); | 276 | bit_spin_unlock(PAGE_CGROUP_LOCK_BIT, &page->page_cgroup); |
288 | } | 277 | } |
289 | 278 | ||
290 | static void __mem_cgroup_remove_list(struct page_cgroup *pc) | 279 | static void __mem_cgroup_remove_list(struct mem_cgroup_per_zone *mz, |
280 | struct page_cgroup *pc) | ||
291 | { | 281 | { |
292 | int from = pc->flags & PAGE_CGROUP_FLAG_ACTIVE; | 282 | int from = pc->flags & PAGE_CGROUP_FLAG_ACTIVE; |
293 | struct mem_cgroup_per_zone *mz = page_cgroup_zoneinfo(pc); | ||
294 | 283 | ||
295 | if (from) | 284 | if (from) |
296 | MEM_CGROUP_ZSTAT(mz, MEM_CGROUP_ZSTAT_ACTIVE) -= 1; | 285 | MEM_CGROUP_ZSTAT(mz, MEM_CGROUP_ZSTAT_ACTIVE) -= 1; |
@@ -301,10 +290,10 @@ static void __mem_cgroup_remove_list(struct page_cgroup *pc) | |||
301 | list_del_init(&pc->lru); | 290 | list_del_init(&pc->lru); |
302 | } | 291 | } |
303 | 292 | ||
304 | static void __mem_cgroup_add_list(struct page_cgroup *pc) | 293 | static void __mem_cgroup_add_list(struct mem_cgroup_per_zone *mz, |
294 | struct page_cgroup *pc) | ||
305 | { | 295 | { |
306 | int to = pc->flags & PAGE_CGROUP_FLAG_ACTIVE; | 296 | int to = pc->flags & PAGE_CGROUP_FLAG_ACTIVE; |
307 | struct mem_cgroup_per_zone *mz = page_cgroup_zoneinfo(pc); | ||
308 | 297 | ||
309 | if (!to) { | 298 | if (!to) { |
310 | MEM_CGROUP_ZSTAT(mz, MEM_CGROUP_ZSTAT_INACTIVE) += 1; | 299 | MEM_CGROUP_ZSTAT(mz, MEM_CGROUP_ZSTAT_INACTIVE) += 1; |
@@ -476,6 +465,7 @@ unsigned long mem_cgroup_isolate_pages(unsigned long nr_to_scan, | |||
476 | int zid = zone_idx(z); | 465 | int zid = zone_idx(z); |
477 | struct mem_cgroup_per_zone *mz; | 466 | struct mem_cgroup_per_zone *mz; |
478 | 467 | ||
468 | BUG_ON(!mem_cont); | ||
479 | mz = mem_cgroup_zoneinfo(mem_cont, nid, zid); | 469 | mz = mem_cgroup_zoneinfo(mem_cont, nid, zid); |
480 | if (active) | 470 | if (active) |
481 | src = &mz->active_list; | 471 | src = &mz->active_list; |
@@ -560,7 +550,7 @@ retry: | |||
560 | } | 550 | } |
561 | unlock_page_cgroup(page); | 551 | unlock_page_cgroup(page); |
562 | 552 | ||
563 | pc = kzalloc(sizeof(struct page_cgroup), gfp_mask); | 553 | pc = kmem_cache_zalloc(page_cgroup_cache, gfp_mask); |
564 | if (pc == NULL) | 554 | if (pc == NULL) |
565 | goto err; | 555 | goto err; |
566 | 556 | ||
@@ -574,7 +564,7 @@ retry: | |||
574 | mm = &init_mm; | 564 | mm = &init_mm; |
575 | 565 | ||
576 | rcu_read_lock(); | 566 | rcu_read_lock(); |
577 | mem = rcu_dereference(mm->mem_cgroup); | 567 | mem = mem_cgroup_from_task(rcu_dereference(mm->owner)); |
578 | /* | 568 | /* |
579 | * For every charge from the cgroup, increment reference count | 569 | * For every charge from the cgroup, increment reference count |
580 | */ | 570 | */ |
@@ -602,7 +592,6 @@ retry: | |||
602 | mem_cgroup_out_of_memory(mem, gfp_mask); | 592 | mem_cgroup_out_of_memory(mem, gfp_mask); |
603 | goto out; | 593 | goto out; |
604 | } | 594 | } |
605 | congestion_wait(WRITE, HZ/10); | ||
606 | } | 595 | } |
607 | 596 | ||
608 | pc->ref_cnt = 1; | 597 | pc->ref_cnt = 1; |
@@ -610,7 +599,7 @@ retry: | |||
610 | pc->page = page; | 599 | pc->page = page; |
611 | pc->flags = PAGE_CGROUP_FLAG_ACTIVE; | 600 | pc->flags = PAGE_CGROUP_FLAG_ACTIVE; |
612 | if (ctype == MEM_CGROUP_CHARGE_TYPE_CACHE) | 601 | if (ctype == MEM_CGROUP_CHARGE_TYPE_CACHE) |
613 | pc->flags |= PAGE_CGROUP_FLAG_CACHE; | 602 | pc->flags = PAGE_CGROUP_FLAG_CACHE; |
614 | 603 | ||
615 | lock_page_cgroup(page); | 604 | lock_page_cgroup(page); |
616 | if (page_get_page_cgroup(page)) { | 605 | if (page_get_page_cgroup(page)) { |
@@ -622,14 +611,14 @@ retry: | |||
622 | */ | 611 | */ |
623 | res_counter_uncharge(&mem->res, PAGE_SIZE); | 612 | res_counter_uncharge(&mem->res, PAGE_SIZE); |
624 | css_put(&mem->css); | 613 | css_put(&mem->css); |
625 | kfree(pc); | 614 | kmem_cache_free(page_cgroup_cache, pc); |
626 | goto retry; | 615 | goto retry; |
627 | } | 616 | } |
628 | page_assign_page_cgroup(page, pc); | 617 | page_assign_page_cgroup(page, pc); |
629 | 618 | ||
630 | mz = page_cgroup_zoneinfo(pc); | 619 | mz = page_cgroup_zoneinfo(pc); |
631 | spin_lock_irqsave(&mz->lru_lock, flags); | 620 | spin_lock_irqsave(&mz->lru_lock, flags); |
632 | __mem_cgroup_add_list(pc); | 621 | __mem_cgroup_add_list(mz, pc); |
633 | spin_unlock_irqrestore(&mz->lru_lock, flags); | 622 | spin_unlock_irqrestore(&mz->lru_lock, flags); |
634 | 623 | ||
635 | unlock_page_cgroup(page); | 624 | unlock_page_cgroup(page); |
@@ -637,7 +626,7 @@ done: | |||
637 | return 0; | 626 | return 0; |
638 | out: | 627 | out: |
639 | css_put(&mem->css); | 628 | css_put(&mem->css); |
640 | kfree(pc); | 629 | kmem_cache_free(page_cgroup_cache, pc); |
641 | err: | 630 | err: |
642 | return -ENOMEM; | 631 | return -ENOMEM; |
643 | } | 632 | } |
@@ -685,7 +674,7 @@ void mem_cgroup_uncharge_page(struct page *page) | |||
685 | if (--(pc->ref_cnt) == 0) { | 674 | if (--(pc->ref_cnt) == 0) { |
686 | mz = page_cgroup_zoneinfo(pc); | 675 | mz = page_cgroup_zoneinfo(pc); |
687 | spin_lock_irqsave(&mz->lru_lock, flags); | 676 | spin_lock_irqsave(&mz->lru_lock, flags); |
688 | __mem_cgroup_remove_list(pc); | 677 | __mem_cgroup_remove_list(mz, pc); |
689 | spin_unlock_irqrestore(&mz->lru_lock, flags); | 678 | spin_unlock_irqrestore(&mz->lru_lock, flags); |
690 | 679 | ||
691 | page_assign_page_cgroup(page, NULL); | 680 | page_assign_page_cgroup(page, NULL); |
@@ -695,7 +684,7 @@ void mem_cgroup_uncharge_page(struct page *page) | |||
695 | res_counter_uncharge(&mem->res, PAGE_SIZE); | 684 | res_counter_uncharge(&mem->res, PAGE_SIZE); |
696 | css_put(&mem->css); | 685 | css_put(&mem->css); |
697 | 686 | ||
698 | kfree(pc); | 687 | kmem_cache_free(page_cgroup_cache, pc); |
699 | return; | 688 | return; |
700 | } | 689 | } |
701 | 690 | ||
@@ -747,7 +736,7 @@ void mem_cgroup_page_migration(struct page *page, struct page *newpage) | |||
747 | 736 | ||
748 | mz = page_cgroup_zoneinfo(pc); | 737 | mz = page_cgroup_zoneinfo(pc); |
749 | spin_lock_irqsave(&mz->lru_lock, flags); | 738 | spin_lock_irqsave(&mz->lru_lock, flags); |
750 | __mem_cgroup_remove_list(pc); | 739 | __mem_cgroup_remove_list(mz, pc); |
751 | spin_unlock_irqrestore(&mz->lru_lock, flags); | 740 | spin_unlock_irqrestore(&mz->lru_lock, flags); |
752 | 741 | ||
753 | page_assign_page_cgroup(page, NULL); | 742 | page_assign_page_cgroup(page, NULL); |
@@ -759,7 +748,7 @@ void mem_cgroup_page_migration(struct page *page, struct page *newpage) | |||
759 | 748 | ||
760 | mz = page_cgroup_zoneinfo(pc); | 749 | mz = page_cgroup_zoneinfo(pc); |
761 | spin_lock_irqsave(&mz->lru_lock, flags); | 750 | spin_lock_irqsave(&mz->lru_lock, flags); |
762 | __mem_cgroup_add_list(pc); | 751 | __mem_cgroup_add_list(mz, pc); |
763 | spin_unlock_irqrestore(&mz->lru_lock, flags); | 752 | spin_unlock_irqrestore(&mz->lru_lock, flags); |
764 | 753 | ||
765 | unlock_page_cgroup(newpage); | 754 | unlock_page_cgroup(newpage); |
@@ -853,13 +842,10 @@ static int mem_cgroup_write_strategy(char *buf, unsigned long long *tmp) | |||
853 | return 0; | 842 | return 0; |
854 | } | 843 | } |
855 | 844 | ||
856 | static ssize_t mem_cgroup_read(struct cgroup *cont, | 845 | static u64 mem_cgroup_read(struct cgroup *cont, struct cftype *cft) |
857 | struct cftype *cft, struct file *file, | ||
858 | char __user *userbuf, size_t nbytes, loff_t *ppos) | ||
859 | { | 846 | { |
860 | return res_counter_read(&mem_cgroup_from_cont(cont)->res, | 847 | return res_counter_read_u64(&mem_cgroup_from_cont(cont)->res, |
861 | cft->private, userbuf, nbytes, ppos, | 848 | cft->private); |
862 | NULL); | ||
863 | } | 849 | } |
864 | 850 | ||
865 | static ssize_t mem_cgroup_write(struct cgroup *cont, struct cftype *cft, | 851 | static ssize_t mem_cgroup_write(struct cgroup *cont, struct cftype *cft, |
@@ -871,27 +857,25 @@ static ssize_t mem_cgroup_write(struct cgroup *cont, struct cftype *cft, | |||
871 | mem_cgroup_write_strategy); | 857 | mem_cgroup_write_strategy); |
872 | } | 858 | } |
873 | 859 | ||
874 | static ssize_t mem_force_empty_write(struct cgroup *cont, | 860 | static int mem_cgroup_reset(struct cgroup *cont, unsigned int event) |
875 | struct cftype *cft, struct file *file, | ||
876 | const char __user *userbuf, | ||
877 | size_t nbytes, loff_t *ppos) | ||
878 | { | 861 | { |
879 | struct mem_cgroup *mem = mem_cgroup_from_cont(cont); | 862 | struct mem_cgroup *mem; |
880 | int ret = mem_cgroup_force_empty(mem); | 863 | |
881 | if (!ret) | 864 | mem = mem_cgroup_from_cont(cont); |
882 | ret = nbytes; | 865 | switch (event) { |
883 | return ret; | 866 | case RES_MAX_USAGE: |
867 | res_counter_reset_max(&mem->res); | ||
868 | break; | ||
869 | case RES_FAILCNT: | ||
870 | res_counter_reset_failcnt(&mem->res); | ||
871 | break; | ||
872 | } | ||
873 | return 0; | ||
884 | } | 874 | } |
885 | 875 | ||
886 | /* | 876 | static int mem_force_empty_write(struct cgroup *cont, unsigned int event) |
887 | * Note: This should be removed if cgroup supports write-only file. | ||
888 | */ | ||
889 | static ssize_t mem_force_empty_read(struct cgroup *cont, | ||
890 | struct cftype *cft, | ||
891 | struct file *file, char __user *userbuf, | ||
892 | size_t nbytes, loff_t *ppos) | ||
893 | { | 877 | { |
894 | return -EINVAL; | 878 | return mem_cgroup_force_empty(mem_cgroup_from_cont(cont)); |
895 | } | 879 | } |
896 | 880 | ||
897 | static const struct mem_cgroup_stat_desc { | 881 | static const struct mem_cgroup_stat_desc { |
@@ -902,9 +886,9 @@ static const struct mem_cgroup_stat_desc { | |||
902 | [MEM_CGROUP_STAT_RSS] = { "rss", PAGE_SIZE, }, | 886 | [MEM_CGROUP_STAT_RSS] = { "rss", PAGE_SIZE, }, |
903 | }; | 887 | }; |
904 | 888 | ||
905 | static int mem_control_stat_show(struct seq_file *m, void *arg) | 889 | static int mem_control_stat_show(struct cgroup *cont, struct cftype *cft, |
890 | struct cgroup_map_cb *cb) | ||
906 | { | 891 | { |
907 | struct cgroup *cont = m->private; | ||
908 | struct mem_cgroup *mem_cont = mem_cgroup_from_cont(cont); | 892 | struct mem_cgroup *mem_cont = mem_cgroup_from_cont(cont); |
909 | struct mem_cgroup_stat *stat = &mem_cont->stat; | 893 | struct mem_cgroup_stat *stat = &mem_cont->stat; |
910 | int i; | 894 | int i; |
@@ -914,8 +898,7 @@ static int mem_control_stat_show(struct seq_file *m, void *arg) | |||
914 | 898 | ||
915 | val = mem_cgroup_read_stat(stat, i); | 899 | val = mem_cgroup_read_stat(stat, i); |
916 | val *= mem_cgroup_stat_desc[i].unit; | 900 | val *= mem_cgroup_stat_desc[i].unit; |
917 | seq_printf(m, "%s %lld\n", mem_cgroup_stat_desc[i].msg, | 901 | cb->fill(cb, mem_cgroup_stat_desc[i].msg, val); |
918 | (long long)val); | ||
919 | } | 902 | } |
920 | /* showing # of active pages */ | 903 | /* showing # of active pages */ |
921 | { | 904 | { |
@@ -925,52 +908,43 @@ static int mem_control_stat_show(struct seq_file *m, void *arg) | |||
925 | MEM_CGROUP_ZSTAT_INACTIVE); | 908 | MEM_CGROUP_ZSTAT_INACTIVE); |
926 | active = mem_cgroup_get_all_zonestat(mem_cont, | 909 | active = mem_cgroup_get_all_zonestat(mem_cont, |
927 | MEM_CGROUP_ZSTAT_ACTIVE); | 910 | MEM_CGROUP_ZSTAT_ACTIVE); |
928 | seq_printf(m, "active %ld\n", (active) * PAGE_SIZE); | 911 | cb->fill(cb, "active", (active) * PAGE_SIZE); |
929 | seq_printf(m, "inactive %ld\n", (inactive) * PAGE_SIZE); | 912 | cb->fill(cb, "inactive", (inactive) * PAGE_SIZE); |
930 | } | 913 | } |
931 | return 0; | 914 | return 0; |
932 | } | 915 | } |
933 | 916 | ||
934 | static const struct file_operations mem_control_stat_file_operations = { | ||
935 | .read = seq_read, | ||
936 | .llseek = seq_lseek, | ||
937 | .release = single_release, | ||
938 | }; | ||
939 | |||
940 | static int mem_control_stat_open(struct inode *unused, struct file *file) | ||
941 | { | ||
942 | /* XXX __d_cont */ | ||
943 | struct cgroup *cont = file->f_dentry->d_parent->d_fsdata; | ||
944 | |||
945 | file->f_op = &mem_control_stat_file_operations; | ||
946 | return single_open(file, mem_control_stat_show, cont); | ||
947 | } | ||
948 | |||
949 | static struct cftype mem_cgroup_files[] = { | 917 | static struct cftype mem_cgroup_files[] = { |
950 | { | 918 | { |
951 | .name = "usage_in_bytes", | 919 | .name = "usage_in_bytes", |
952 | .private = RES_USAGE, | 920 | .private = RES_USAGE, |
953 | .read = mem_cgroup_read, | 921 | .read_u64 = mem_cgroup_read, |
922 | }, | ||
923 | { | ||
924 | .name = "max_usage_in_bytes", | ||
925 | .private = RES_MAX_USAGE, | ||
926 | .trigger = mem_cgroup_reset, | ||
927 | .read_u64 = mem_cgroup_read, | ||
954 | }, | 928 | }, |
955 | { | 929 | { |
956 | .name = "limit_in_bytes", | 930 | .name = "limit_in_bytes", |
957 | .private = RES_LIMIT, | 931 | .private = RES_LIMIT, |
958 | .write = mem_cgroup_write, | 932 | .write = mem_cgroup_write, |
959 | .read = mem_cgroup_read, | 933 | .read_u64 = mem_cgroup_read, |
960 | }, | 934 | }, |
961 | { | 935 | { |
962 | .name = "failcnt", | 936 | .name = "failcnt", |
963 | .private = RES_FAILCNT, | 937 | .private = RES_FAILCNT, |
964 | .read = mem_cgroup_read, | 938 | .trigger = mem_cgroup_reset, |
939 | .read_u64 = mem_cgroup_read, | ||
965 | }, | 940 | }, |
966 | { | 941 | { |
967 | .name = "force_empty", | 942 | .name = "force_empty", |
968 | .write = mem_force_empty_write, | 943 | .trigger = mem_force_empty_write, |
969 | .read = mem_force_empty_read, | ||
970 | }, | 944 | }, |
971 | { | 945 | { |
972 | .name = "stat", | 946 | .name = "stat", |
973 | .open = mem_control_stat_open, | 947 | .read_map = mem_control_stat_show, |
974 | }, | 948 | }, |
975 | }; | 949 | }; |
976 | 950 | ||
@@ -1010,6 +984,29 @@ static void free_mem_cgroup_per_zone_info(struct mem_cgroup *mem, int node) | |||
1010 | kfree(mem->info.nodeinfo[node]); | 984 | kfree(mem->info.nodeinfo[node]); |
1011 | } | 985 | } |
1012 | 986 | ||
987 | static struct mem_cgroup *mem_cgroup_alloc(void) | ||
988 | { | ||
989 | struct mem_cgroup *mem; | ||
990 | |||
991 | if (sizeof(*mem) < PAGE_SIZE) | ||
992 | mem = kmalloc(sizeof(*mem), GFP_KERNEL); | ||
993 | else | ||
994 | mem = vmalloc(sizeof(*mem)); | ||
995 | |||
996 | if (mem) | ||
997 | memset(mem, 0, sizeof(*mem)); | ||
998 | return mem; | ||
999 | } | ||
1000 | |||
1001 | static void mem_cgroup_free(struct mem_cgroup *mem) | ||
1002 | { | ||
1003 | if (sizeof(*mem) < PAGE_SIZE) | ||
1004 | kfree(mem); | ||
1005 | else | ||
1006 | vfree(mem); | ||
1007 | } | ||
1008 | |||
1009 | |||
1013 | static struct cgroup_subsys_state * | 1010 | static struct cgroup_subsys_state * |
1014 | mem_cgroup_create(struct cgroup_subsys *ss, struct cgroup *cont) | 1011 | mem_cgroup_create(struct cgroup_subsys *ss, struct cgroup *cont) |
1015 | { | 1012 | { |
@@ -1018,17 +1015,15 @@ mem_cgroup_create(struct cgroup_subsys *ss, struct cgroup *cont) | |||
1018 | 1015 | ||
1019 | if (unlikely((cont->parent) == NULL)) { | 1016 | if (unlikely((cont->parent) == NULL)) { |
1020 | mem = &init_mem_cgroup; | 1017 | mem = &init_mem_cgroup; |
1021 | init_mm.mem_cgroup = mem; | 1018 | page_cgroup_cache = KMEM_CACHE(page_cgroup, SLAB_PANIC); |
1022 | } else | 1019 | } else { |
1023 | mem = kzalloc(sizeof(struct mem_cgroup), GFP_KERNEL); | 1020 | mem = mem_cgroup_alloc(); |
1024 | 1021 | if (!mem) | |
1025 | if (mem == NULL) | 1022 | return ERR_PTR(-ENOMEM); |
1026 | return ERR_PTR(-ENOMEM); | 1023 | } |
1027 | 1024 | ||
1028 | res_counter_init(&mem->res); | 1025 | res_counter_init(&mem->res); |
1029 | 1026 | ||
1030 | memset(&mem->info, 0, sizeof(mem->info)); | ||
1031 | |||
1032 | for_each_node_state(node, N_POSSIBLE) | 1027 | for_each_node_state(node, N_POSSIBLE) |
1033 | if (alloc_mem_cgroup_per_zone_info(mem, node)) | 1028 | if (alloc_mem_cgroup_per_zone_info(mem, node)) |
1034 | goto free_out; | 1029 | goto free_out; |
@@ -1038,7 +1033,7 @@ free_out: | |||
1038 | for_each_node_state(node, N_POSSIBLE) | 1033 | for_each_node_state(node, N_POSSIBLE) |
1039 | free_mem_cgroup_per_zone_info(mem, node); | 1034 | free_mem_cgroup_per_zone_info(mem, node); |
1040 | if (cont->parent != NULL) | 1035 | if (cont->parent != NULL) |
1041 | kfree(mem); | 1036 | mem_cgroup_free(mem); |
1042 | return ERR_PTR(-ENOMEM); | 1037 | return ERR_PTR(-ENOMEM); |
1043 | } | 1038 | } |
1044 | 1039 | ||
@@ -1058,7 +1053,7 @@ static void mem_cgroup_destroy(struct cgroup_subsys *ss, | |||
1058 | for_each_node_state(node, N_POSSIBLE) | 1053 | for_each_node_state(node, N_POSSIBLE) |
1059 | free_mem_cgroup_per_zone_info(mem, node); | 1054 | free_mem_cgroup_per_zone_info(mem, node); |
1060 | 1055 | ||
1061 | kfree(mem_cgroup_from_cont(cont)); | 1056 | mem_cgroup_free(mem_cgroup_from_cont(cont)); |
1062 | } | 1057 | } |
1063 | 1058 | ||
1064 | static int mem_cgroup_populate(struct cgroup_subsys *ss, | 1059 | static int mem_cgroup_populate(struct cgroup_subsys *ss, |
@@ -1098,10 +1093,6 @@ static void mem_cgroup_move_task(struct cgroup_subsys *ss, | |||
1098 | if (!thread_group_leader(p)) | 1093 | if (!thread_group_leader(p)) |
1099 | goto out; | 1094 | goto out; |
1100 | 1095 | ||
1101 | css_get(&mem->css); | ||
1102 | rcu_assign_pointer(mm->mem_cgroup, mem); | ||
1103 | css_put(&old_mem->css); | ||
1104 | |||
1105 | out: | 1096 | out: |
1106 | mmput(mm); | 1097 | mmput(mm); |
1107 | } | 1098 | } |
@@ -230,8 +230,11 @@ static struct vm_area_struct *remove_vma(struct vm_area_struct *vma) | |||
230 | might_sleep(); | 230 | might_sleep(); |
231 | if (vma->vm_ops && vma->vm_ops->close) | 231 | if (vma->vm_ops && vma->vm_ops->close) |
232 | vma->vm_ops->close(vma); | 232 | vma->vm_ops->close(vma); |
233 | if (vma->vm_file) | 233 | if (vma->vm_file) { |
234 | fput(vma->vm_file); | 234 | fput(vma->vm_file); |
235 | if (vma->vm_flags & VM_EXECUTABLE) | ||
236 | removed_exe_file_vma(vma->vm_mm); | ||
237 | } | ||
235 | mpol_put(vma_policy(vma)); | 238 | mpol_put(vma_policy(vma)); |
236 | kmem_cache_free(vm_area_cachep, vma); | 239 | kmem_cache_free(vm_area_cachep, vma); |
237 | return next; | 240 | return next; |
@@ -623,8 +626,11 @@ again: remove_next = 1 + (end > next->vm_end); | |||
623 | spin_unlock(&mapping->i_mmap_lock); | 626 | spin_unlock(&mapping->i_mmap_lock); |
624 | 627 | ||
625 | if (remove_next) { | 628 | if (remove_next) { |
626 | if (file) | 629 | if (file) { |
627 | fput(file); | 630 | fput(file); |
631 | if (next->vm_flags & VM_EXECUTABLE) | ||
632 | removed_exe_file_vma(mm); | ||
633 | } | ||
628 | mm->map_count--; | 634 | mm->map_count--; |
629 | mpol_put(vma_policy(next)); | 635 | mpol_put(vma_policy(next)); |
630 | kmem_cache_free(vm_area_cachep, next); | 636 | kmem_cache_free(vm_area_cachep, next); |
@@ -1154,6 +1160,8 @@ munmap_back: | |||
1154 | error = file->f_op->mmap(file, vma); | 1160 | error = file->f_op->mmap(file, vma); |
1155 | if (error) | 1161 | if (error) |
1156 | goto unmap_and_free_vma; | 1162 | goto unmap_and_free_vma; |
1163 | if (vm_flags & VM_EXECUTABLE) | ||
1164 | added_exe_file_vma(mm); | ||
1157 | } else if (vm_flags & VM_SHARED) { | 1165 | } else if (vm_flags & VM_SHARED) { |
1158 | error = shmem_zero_setup(vma); | 1166 | error = shmem_zero_setup(vma); |
1159 | if (error) | 1167 | if (error) |
@@ -1185,6 +1193,8 @@ munmap_back: | |||
1185 | mpol_put(vma_policy(vma)); | 1193 | mpol_put(vma_policy(vma)); |
1186 | kmem_cache_free(vm_area_cachep, vma); | 1194 | kmem_cache_free(vm_area_cachep, vma); |
1187 | fput(file); | 1195 | fput(file); |
1196 | if (vm_flags & VM_EXECUTABLE) | ||
1197 | removed_exe_file_vma(mm); | ||
1188 | } else { | 1198 | } else { |
1189 | vma_link(mm, vma, prev, rb_link, rb_parent); | 1199 | vma_link(mm, vma, prev, rb_link, rb_parent); |
1190 | file = vma->vm_file; | 1200 | file = vma->vm_file; |
@@ -1817,8 +1827,11 @@ int split_vma(struct mm_struct * mm, struct vm_area_struct * vma, | |||
1817 | } | 1827 | } |
1818 | vma_set_policy(new, pol); | 1828 | vma_set_policy(new, pol); |
1819 | 1829 | ||
1820 | if (new->vm_file) | 1830 | if (new->vm_file) { |
1821 | get_file(new->vm_file); | 1831 | get_file(new->vm_file); |
1832 | if (vma->vm_flags & VM_EXECUTABLE) | ||
1833 | added_exe_file_vma(mm); | ||
1834 | } | ||
1822 | 1835 | ||
1823 | if (new->vm_ops && new->vm_ops->open) | 1836 | if (new->vm_ops && new->vm_ops->open) |
1824 | new->vm_ops->open(new); | 1837 | new->vm_ops->open(new); |
@@ -2135,8 +2148,11 @@ struct vm_area_struct *copy_vma(struct vm_area_struct **vmap, | |||
2135 | new_vma->vm_start = addr; | 2148 | new_vma->vm_start = addr; |
2136 | new_vma->vm_end = addr + len; | 2149 | new_vma->vm_end = addr + len; |
2137 | new_vma->vm_pgoff = pgoff; | 2150 | new_vma->vm_pgoff = pgoff; |
2138 | if (new_vma->vm_file) | 2151 | if (new_vma->vm_file) { |
2139 | get_file(new_vma->vm_file); | 2152 | get_file(new_vma->vm_file); |
2153 | if (vma->vm_flags & VM_EXECUTABLE) | ||
2154 | added_exe_file_vma(mm); | ||
2155 | } | ||
2140 | if (new_vma->vm_ops && new_vma->vm_ops->open) | 2156 | if (new_vma->vm_ops && new_vma->vm_ops->open) |
2141 | new_vma->vm_ops->open(new_vma); | 2157 | new_vma->vm_ops->open(new_vma); |
2142 | vma_link(mm, new_vma, prev, rb_link, rb_parent); | 2158 | vma_link(mm, new_vma, prev, rb_link, rb_parent); |
diff --git a/mm/nommu.c b/mm/nommu.c index 1d32fe89d57b..ef8c62cec697 100644 --- a/mm/nommu.c +++ b/mm/nommu.c | |||
@@ -966,8 +966,13 @@ unsigned long do_mmap_pgoff(struct file *file, | |||
966 | 966 | ||
967 | INIT_LIST_HEAD(&vma->anon_vma_node); | 967 | INIT_LIST_HEAD(&vma->anon_vma_node); |
968 | atomic_set(&vma->vm_usage, 1); | 968 | atomic_set(&vma->vm_usage, 1); |
969 | if (file) | 969 | if (file) { |
970 | get_file(file); | 970 | get_file(file); |
971 | if (vm_flags & VM_EXECUTABLE) { | ||
972 | added_exe_file_vma(current->mm); | ||
973 | vma->vm_mm = current->mm; | ||
974 | } | ||
975 | } | ||
971 | vma->vm_file = file; | 976 | vma->vm_file = file; |
972 | vma->vm_flags = vm_flags; | 977 | vma->vm_flags = vm_flags; |
973 | vma->vm_start = addr; | 978 | vma->vm_start = addr; |
@@ -1022,8 +1027,11 @@ unsigned long do_mmap_pgoff(struct file *file, | |||
1022 | up_write(&nommu_vma_sem); | 1027 | up_write(&nommu_vma_sem); |
1023 | kfree(vml); | 1028 | kfree(vml); |
1024 | if (vma) { | 1029 | if (vma) { |
1025 | if (vma->vm_file) | 1030 | if (vma->vm_file) { |
1026 | fput(vma->vm_file); | 1031 | fput(vma->vm_file); |
1032 | if (vma->vm_flags & VM_EXECUTABLE) | ||
1033 | removed_exe_file_vma(vma->vm_mm); | ||
1034 | } | ||
1027 | kfree(vma); | 1035 | kfree(vma); |
1028 | } | 1036 | } |
1029 | return ret; | 1037 | return ret; |
@@ -1053,7 +1061,7 @@ EXPORT_SYMBOL(do_mmap_pgoff); | |||
1053 | /* | 1061 | /* |
1054 | * handle mapping disposal for uClinux | 1062 | * handle mapping disposal for uClinux |
1055 | */ | 1063 | */ |
1056 | static void put_vma(struct vm_area_struct *vma) | 1064 | static void put_vma(struct mm_struct *mm, struct vm_area_struct *vma) |
1057 | { | 1065 | { |
1058 | if (vma) { | 1066 | if (vma) { |
1059 | down_write(&nommu_vma_sem); | 1067 | down_write(&nommu_vma_sem); |
@@ -1075,8 +1083,11 @@ static void put_vma(struct vm_area_struct *vma) | |||
1075 | realalloc -= kobjsize(vma); | 1083 | realalloc -= kobjsize(vma); |
1076 | askedalloc -= sizeof(*vma); | 1084 | askedalloc -= sizeof(*vma); |
1077 | 1085 | ||
1078 | if (vma->vm_file) | 1086 | if (vma->vm_file) { |
1079 | fput(vma->vm_file); | 1087 | fput(vma->vm_file); |
1088 | if (vma->vm_flags & VM_EXECUTABLE) | ||
1089 | removed_exe_file_vma(mm); | ||
1090 | } | ||
1080 | kfree(vma); | 1091 | kfree(vma); |
1081 | } | 1092 | } |
1082 | 1093 | ||
@@ -1113,7 +1124,7 @@ int do_munmap(struct mm_struct *mm, unsigned long addr, size_t len) | |||
1113 | found: | 1124 | found: |
1114 | vml = *parent; | 1125 | vml = *parent; |
1115 | 1126 | ||
1116 | put_vma(vml->vma); | 1127 | put_vma(mm, vml->vma); |
1117 | 1128 | ||
1118 | *parent = vml->next; | 1129 | *parent = vml->next; |
1119 | realalloc -= kobjsize(vml); | 1130 | realalloc -= kobjsize(vml); |
@@ -1158,7 +1169,7 @@ void exit_mmap(struct mm_struct * mm) | |||
1158 | 1169 | ||
1159 | while ((tmp = mm->context.vmlist)) { | 1170 | while ((tmp = mm->context.vmlist)) { |
1160 | mm->context.vmlist = tmp->next; | 1171 | mm->context.vmlist = tmp->next; |
1161 | put_vma(tmp->vma); | 1172 | put_vma(mm, tmp->vma); |
1162 | 1173 | ||
1163 | realalloc -= kobjsize(tmp); | 1174 | realalloc -= kobjsize(tmp); |
1164 | askedalloc -= sizeof(*tmp); | 1175 | askedalloc -= sizeof(*tmp); |
diff --git a/mm/page_alloc.c b/mm/page_alloc.c index d1cf4f05dcda..0a502e99ee22 100644 --- a/mm/page_alloc.c +++ b/mm/page_alloc.c | |||
@@ -1461,7 +1461,8 @@ __alloc_pages_internal(gfp_t gfp_mask, unsigned int order, | |||
1461 | struct task_struct *p = current; | 1461 | struct task_struct *p = current; |
1462 | int do_retry; | 1462 | int do_retry; |
1463 | int alloc_flags; | 1463 | int alloc_flags; |
1464 | int did_some_progress; | 1464 | unsigned long did_some_progress; |
1465 | unsigned long pages_reclaimed = 0; | ||
1465 | 1466 | ||
1466 | might_sleep_if(wait); | 1467 | might_sleep_if(wait); |
1467 | 1468 | ||
@@ -1611,14 +1612,26 @@ nofail_alloc: | |||
1611 | * Don't let big-order allocations loop unless the caller explicitly | 1612 | * Don't let big-order allocations loop unless the caller explicitly |
1612 | * requests that. Wait for some write requests to complete then retry. | 1613 | * requests that. Wait for some write requests to complete then retry. |
1613 | * | 1614 | * |
1614 | * In this implementation, __GFP_REPEAT means __GFP_NOFAIL for order | 1615 | * In this implementation, order <= PAGE_ALLOC_COSTLY_ORDER |
1615 | * <= 3, but that may not be true in other implementations. | 1616 | * means __GFP_NOFAIL, but that may not be true in other |
1617 | * implementations. | ||
1618 | * | ||
1619 | * For order > PAGE_ALLOC_COSTLY_ORDER, if __GFP_REPEAT is | ||
1620 | * specified, then we retry until we no longer reclaim any pages | ||
1621 | * (above), or we've reclaimed an order of pages at least as | ||
1622 | * large as the allocation's order. In both cases, if the | ||
1623 | * allocation still fails, we stop retrying. | ||
1616 | */ | 1624 | */ |
1625 | pages_reclaimed += did_some_progress; | ||
1617 | do_retry = 0; | 1626 | do_retry = 0; |
1618 | if (!(gfp_mask & __GFP_NORETRY)) { | 1627 | if (!(gfp_mask & __GFP_NORETRY)) { |
1619 | if ((order <= PAGE_ALLOC_COSTLY_ORDER) || | 1628 | if (order <= PAGE_ALLOC_COSTLY_ORDER) { |
1620 | (gfp_mask & __GFP_REPEAT)) | ||
1621 | do_retry = 1; | 1629 | do_retry = 1; |
1630 | } else { | ||
1631 | if (gfp_mask & __GFP_REPEAT && | ||
1632 | pages_reclaimed < (1 << order)) | ||
1633 | do_retry = 1; | ||
1634 | } | ||
1622 | if (gfp_mask & __GFP_NOFAIL) | 1635 | if (gfp_mask & __GFP_NOFAIL) |
1623 | do_retry = 1; | 1636 | do_retry = 1; |
1624 | } | 1637 | } |
@@ -2524,7 +2537,9 @@ void __meminit memmap_init_zone(unsigned long size, int nid, unsigned long zone, | |||
2524 | struct page *page; | 2537 | struct page *page; |
2525 | unsigned long end_pfn = start_pfn + size; | 2538 | unsigned long end_pfn = start_pfn + size; |
2526 | unsigned long pfn; | 2539 | unsigned long pfn; |
2540 | struct zone *z; | ||
2527 | 2541 | ||
2542 | z = &NODE_DATA(nid)->node_zones[zone]; | ||
2528 | for (pfn = start_pfn; pfn < end_pfn; pfn++) { | 2543 | for (pfn = start_pfn; pfn < end_pfn; pfn++) { |
2529 | /* | 2544 | /* |
2530 | * There can be holes in boot-time mem_map[]s | 2545 | * There can be holes in boot-time mem_map[]s |
@@ -2542,7 +2557,6 @@ void __meminit memmap_init_zone(unsigned long size, int nid, unsigned long zone, | |||
2542 | init_page_count(page); | 2557 | init_page_count(page); |
2543 | reset_page_mapcount(page); | 2558 | reset_page_mapcount(page); |
2544 | SetPageReserved(page); | 2559 | SetPageReserved(page); |
2545 | |||
2546 | /* | 2560 | /* |
2547 | * Mark the block movable so that blocks are reserved for | 2561 | * Mark the block movable so that blocks are reserved for |
2548 | * movable at startup. This will force kernel allocations | 2562 | * movable at startup. This will force kernel allocations |
@@ -2551,8 +2565,15 @@ void __meminit memmap_init_zone(unsigned long size, int nid, unsigned long zone, | |||
2551 | * kernel allocations are made. Later some blocks near | 2565 | * kernel allocations are made. Later some blocks near |
2552 | * the start are marked MIGRATE_RESERVE by | 2566 | * the start are marked MIGRATE_RESERVE by |
2553 | * setup_zone_migrate_reserve() | 2567 | * setup_zone_migrate_reserve() |
2568 | * | ||
2569 | * bitmap is created for zone's valid pfn range. but memmap | ||
2570 | * can be created for invalid pages (for alignment) | ||
2571 | * check here not to call set_pageblock_migratetype() against | ||
2572 | * pfn out of zone. | ||
2554 | */ | 2573 | */ |
2555 | if ((pfn & (pageblock_nr_pages-1))) | 2574 | if ((z->zone_start_pfn <= pfn) |
2575 | && (pfn < z->zone_start_pfn + z->spanned_pages) | ||
2576 | && !(pfn & (pageblock_nr_pages - 1))) | ||
2556 | set_pageblock_migratetype(page, MIGRATE_MOVABLE); | 2577 | set_pageblock_migratetype(page, MIGRATE_MOVABLE); |
2557 | 2578 | ||
2558 | INIT_LIST_HEAD(&page->lru); | 2579 | INIT_LIST_HEAD(&page->lru); |
@@ -4464,6 +4485,8 @@ void set_pageblock_flags_group(struct page *page, unsigned long flags, | |||
4464 | pfn = page_to_pfn(page); | 4485 | pfn = page_to_pfn(page); |
4465 | bitmap = get_pageblock_bitmap(zone, pfn); | 4486 | bitmap = get_pageblock_bitmap(zone, pfn); |
4466 | bitidx = pfn_to_bitidx(zone, pfn); | 4487 | bitidx = pfn_to_bitidx(zone, pfn); |
4488 | VM_BUG_ON(pfn < zone->zone_start_pfn); | ||
4489 | VM_BUG_ON(pfn >= zone->zone_start_pfn + zone->spanned_pages); | ||
4467 | 4490 | ||
4468 | for (; start_bitidx <= end_bitidx; start_bitidx++, value <<= 1) | 4491 | for (; start_bitidx <= end_bitidx; start_bitidx++, value <<= 1) |
4469 | if (flags & value) | 4492 | if (flags & value) |
@@ -2978,7 +2978,7 @@ void __init kmem_cache_init(void) | |||
2978 | kmalloc_caches[0].refcount = -1; | 2978 | kmalloc_caches[0].refcount = -1; |
2979 | caches++; | 2979 | caches++; |
2980 | 2980 | ||
2981 | hotplug_memory_notifier(slab_memory_callback, 1); | 2981 | hotplug_memory_notifier(slab_memory_callback, SLAB_CALLBACK_PRI); |
2982 | #endif | 2982 | #endif |
2983 | 2983 | ||
2984 | /* Able to allocate the per node structures */ | 2984 | /* Able to allocate the per node structures */ |
diff --git a/mm/swapfile.c b/mm/swapfile.c index 67051be7083a..bd1bb5920306 100644 --- a/mm/swapfile.c +++ b/mm/swapfile.c | |||
@@ -1426,11 +1426,7 @@ static const struct file_operations proc_swaps_operations = { | |||
1426 | 1426 | ||
1427 | static int __init procswaps_init(void) | 1427 | static int __init procswaps_init(void) |
1428 | { | 1428 | { |
1429 | struct proc_dir_entry *entry; | 1429 | proc_create("swaps", 0, NULL, &proc_swaps_operations); |
1430 | |||
1431 | entry = create_proc_entry("swaps", 0, NULL); | ||
1432 | if (entry) | ||
1433 | entry->proc_fops = &proc_swaps_operations; | ||
1434 | return 0; | 1430 | return 0; |
1435 | } | 1431 | } |
1436 | __initcall(procswaps_init); | 1432 | __initcall(procswaps_init); |
diff --git a/mm/vmscan.c b/mm/vmscan.c index eceac9f9032f..12e8627c9747 100644 --- a/mm/vmscan.c +++ b/mm/vmscan.c | |||
@@ -1299,6 +1299,9 @@ static unsigned long shrink_zones(int priority, struct zonelist *zonelist, | |||
1299 | * hope that some of these pages can be written. But if the allocating task | 1299 | * hope that some of these pages can be written. But if the allocating task |
1300 | * holds filesystem locks which prevent writeout this might not work, and the | 1300 | * holds filesystem locks which prevent writeout this might not work, and the |
1301 | * allocation attempt will fail. | 1301 | * allocation attempt will fail. |
1302 | * | ||
1303 | * returns: 0, if no pages reclaimed | ||
1304 | * else, the number of pages reclaimed | ||
1302 | */ | 1305 | */ |
1303 | static unsigned long do_try_to_free_pages(struct zonelist *zonelist, | 1306 | static unsigned long do_try_to_free_pages(struct zonelist *zonelist, |
1304 | struct scan_control *sc) | 1307 | struct scan_control *sc) |
@@ -1347,7 +1350,7 @@ static unsigned long do_try_to_free_pages(struct zonelist *zonelist, | |||
1347 | } | 1350 | } |
1348 | total_scanned += sc->nr_scanned; | 1351 | total_scanned += sc->nr_scanned; |
1349 | if (nr_reclaimed >= sc->swap_cluster_max) { | 1352 | if (nr_reclaimed >= sc->swap_cluster_max) { |
1350 | ret = 1; | 1353 | ret = nr_reclaimed; |
1351 | goto out; | 1354 | goto out; |
1352 | } | 1355 | } |
1353 | 1356 | ||
@@ -1370,7 +1373,7 @@ static unsigned long do_try_to_free_pages(struct zonelist *zonelist, | |||
1370 | } | 1373 | } |
1371 | /* top priority shrink_caches still had more to do? don't OOM, then */ | 1374 | /* top priority shrink_caches still had more to do? don't OOM, then */ |
1372 | if (!sc->all_unreclaimable && scan_global_lru(sc)) | 1375 | if (!sc->all_unreclaimable && scan_global_lru(sc)) |
1373 | ret = 1; | 1376 | ret = nr_reclaimed; |
1374 | out: | 1377 | out: |
1375 | /* | 1378 | /* |
1376 | * Now that we've scanned all the zones at this priority level, note | 1379 | * Now that we've scanned all the zones at this priority level, note |
diff --git a/net/core/dev.c b/net/core/dev.c index e1df1ab3e04a..ed49da592051 100644 --- a/net/core/dev.c +++ b/net/core/dev.c | |||
@@ -1524,7 +1524,7 @@ static int dev_gso_segment(struct sk_buff *skb) | |||
1524 | if (!segs) | 1524 | if (!segs) |
1525 | return 0; | 1525 | return 0; |
1526 | 1526 | ||
1527 | if (unlikely(IS_ERR(segs))) | 1527 | if (IS_ERR(segs)) |
1528 | return PTR_ERR(segs); | 1528 | return PTR_ERR(segs); |
1529 | 1529 | ||
1530 | skb->next = segs; | 1530 | skb->next = segs; |
diff --git a/net/ipv4/af_inet.c b/net/ipv4/af_inet.c index f2b5270efdaa..24eca23c2db3 100644 --- a/net/ipv4/af_inet.c +++ b/net/ipv4/af_inet.c | |||
@@ -1234,7 +1234,7 @@ static struct sk_buff *inet_gso_segment(struct sk_buff *skb, int features) | |||
1234 | segs = ops->gso_segment(skb, features); | 1234 | segs = ops->gso_segment(skb, features); |
1235 | rcu_read_unlock(); | 1235 | rcu_read_unlock(); |
1236 | 1236 | ||
1237 | if (!segs || unlikely(IS_ERR(segs))) | 1237 | if (!segs || IS_ERR(segs)) |
1238 | goto out; | 1238 | goto out; |
1239 | 1239 | ||
1240 | skb = segs; | 1240 | skb = segs; |
diff --git a/net/irda/irnet/irnet_irda.c b/net/irda/irnet/irnet_irda.c index a4f1439ffdd8..75497e55927d 100644 --- a/net/irda/irnet/irnet_irda.c +++ b/net/irda/irnet/irnet_irda.c | |||
@@ -9,6 +9,7 @@ | |||
9 | */ | 9 | */ |
10 | 10 | ||
11 | #include "irnet_irda.h" /* Private header */ | 11 | #include "irnet_irda.h" /* Private header */ |
12 | #include <linux/seq_file.h> | ||
12 | 13 | ||
13 | /* | 14 | /* |
14 | * PPP disconnect work: we need to make sure we're in | 15 | * PPP disconnect work: we need to make sure we're in |
@@ -1717,34 +1718,23 @@ irnet_expiry_indication(discinfo_t * expiry, | |||
1717 | */ | 1718 | */ |
1718 | 1719 | ||
1719 | #ifdef CONFIG_PROC_FS | 1720 | #ifdef CONFIG_PROC_FS |
1720 | /*------------------------------------------------------------------*/ | ||
1721 | /* | ||
1722 | * Function irnet_proc_read (buf, start, offset, len, unused) | ||
1723 | * | ||
1724 | * Give some info to the /proc file system | ||
1725 | */ | ||
1726 | static int | 1721 | static int |
1727 | irnet_proc_read(char * buf, | 1722 | irnet_proc_show(struct seq_file *m, void *v) |
1728 | char ** start, | ||
1729 | off_t offset, | ||
1730 | int len) | ||
1731 | { | 1723 | { |
1732 | irnet_socket * self; | 1724 | irnet_socket * self; |
1733 | char * state; | 1725 | char * state; |
1734 | int i = 0; | 1726 | int i = 0; |
1735 | 1727 | ||
1736 | len = 0; | ||
1737 | |||
1738 | /* Get the IrNET server information... */ | 1728 | /* Get the IrNET server information... */ |
1739 | len += sprintf(buf+len, "IrNET server - "); | 1729 | seq_printf(m, "IrNET server - "); |
1740 | len += sprintf(buf+len, "IrDA state: %s, ", | 1730 | seq_printf(m, "IrDA state: %s, ", |
1741 | (irnet_server.running ? "running" : "dead")); | 1731 | (irnet_server.running ? "running" : "dead")); |
1742 | len += sprintf(buf+len, "stsap_sel: %02x, ", irnet_server.s.stsap_sel); | 1732 | seq_printf(m, "stsap_sel: %02x, ", irnet_server.s.stsap_sel); |
1743 | len += sprintf(buf+len, "dtsap_sel: %02x\n", irnet_server.s.dtsap_sel); | 1733 | seq_printf(m, "dtsap_sel: %02x\n", irnet_server.s.dtsap_sel); |
1744 | 1734 | ||
1745 | /* Do we need to continue ? */ | 1735 | /* Do we need to continue ? */ |
1746 | if(!irnet_server.running) | 1736 | if(!irnet_server.running) |
1747 | return len; | 1737 | return 0; |
1748 | 1738 | ||
1749 | /* Protect access to the instance list */ | 1739 | /* Protect access to the instance list */ |
1750 | spin_lock_bh(&irnet_server.spinlock); | 1740 | spin_lock_bh(&irnet_server.spinlock); |
@@ -1754,23 +1744,23 @@ irnet_proc_read(char * buf, | |||
1754 | while(self != NULL) | 1744 | while(self != NULL) |
1755 | { | 1745 | { |
1756 | /* Start printing info about the socket. */ | 1746 | /* Start printing info about the socket. */ |
1757 | len += sprintf(buf+len, "\nIrNET socket %d - ", i++); | 1747 | seq_printf(m, "\nIrNET socket %d - ", i++); |
1758 | 1748 | ||
1759 | /* First, get the requested configuration */ | 1749 | /* First, get the requested configuration */ |
1760 | len += sprintf(buf+len, "Requested IrDA name: \"%s\", ", self->rname); | 1750 | seq_printf(m, "Requested IrDA name: \"%s\", ", self->rname); |
1761 | len += sprintf(buf+len, "daddr: %08x, ", self->rdaddr); | 1751 | seq_printf(m, "daddr: %08x, ", self->rdaddr); |
1762 | len += sprintf(buf+len, "saddr: %08x\n", self->rsaddr); | 1752 | seq_printf(m, "saddr: %08x\n", self->rsaddr); |
1763 | 1753 | ||
1764 | /* Second, get all the PPP info */ | 1754 | /* Second, get all the PPP info */ |
1765 | len += sprintf(buf+len, " PPP state: %s", | 1755 | seq_printf(m, " PPP state: %s", |
1766 | (self->ppp_open ? "registered" : "unregistered")); | 1756 | (self->ppp_open ? "registered" : "unregistered")); |
1767 | if(self->ppp_open) | 1757 | if(self->ppp_open) |
1768 | { | 1758 | { |
1769 | len += sprintf(buf+len, ", unit: ppp%d", | 1759 | seq_printf(m, ", unit: ppp%d", |
1770 | ppp_unit_number(&self->chan)); | 1760 | ppp_unit_number(&self->chan)); |
1771 | len += sprintf(buf+len, ", channel: %d", | 1761 | seq_printf(m, ", channel: %d", |
1772 | ppp_channel_index(&self->chan)); | 1762 | ppp_channel_index(&self->chan)); |
1773 | len += sprintf(buf+len, ", mru: %d", | 1763 | seq_printf(m, ", mru: %d", |
1774 | self->mru); | 1764 | self->mru); |
1775 | /* Maybe add self->flags ? Later... */ | 1765 | /* Maybe add self->flags ? Later... */ |
1776 | } | 1766 | } |
@@ -1789,10 +1779,10 @@ irnet_proc_read(char * buf, | |||
1789 | state = "weird"; | 1779 | state = "weird"; |
1790 | else | 1780 | else |
1791 | state = "idle"; | 1781 | state = "idle"; |
1792 | len += sprintf(buf+len, "\n IrDA state: %s, ", state); | 1782 | seq_printf(m, "\n IrDA state: %s, ", state); |
1793 | len += sprintf(buf+len, "daddr: %08x, ", self->daddr); | 1783 | seq_printf(m, "daddr: %08x, ", self->daddr); |
1794 | len += sprintf(buf+len, "stsap_sel: %02x, ", self->stsap_sel); | 1784 | seq_printf(m, "stsap_sel: %02x, ", self->stsap_sel); |
1795 | len += sprintf(buf+len, "dtsap_sel: %02x\n", self->dtsap_sel); | 1785 | seq_printf(m, "dtsap_sel: %02x\n", self->dtsap_sel); |
1796 | 1786 | ||
1797 | /* Next socket, please... */ | 1787 | /* Next socket, please... */ |
1798 | self = (irnet_socket *) hashbin_get_next(irnet_server.list); | 1788 | self = (irnet_socket *) hashbin_get_next(irnet_server.list); |
@@ -1801,8 +1791,21 @@ irnet_proc_read(char * buf, | |||
1801 | /* Spin lock end */ | 1791 | /* Spin lock end */ |
1802 | spin_unlock_bh(&irnet_server.spinlock); | 1792 | spin_unlock_bh(&irnet_server.spinlock); |
1803 | 1793 | ||
1804 | return len; | 1794 | return 0; |
1805 | } | 1795 | } |
1796 | |||
1797 | static int irnet_proc_open(struct inode *inode, struct file *file) | ||
1798 | { | ||
1799 | return single_open(file, irnet_proc_show, NULL); | ||
1800 | } | ||
1801 | |||
1802 | static const struct file_operations irnet_proc_fops = { | ||
1803 | .owner = THIS_MODULE, | ||
1804 | .open = irnet_proc_open, | ||
1805 | .read = seq_read, | ||
1806 | .llseek = seq_lseek, | ||
1807 | .release = single_release, | ||
1808 | }; | ||
1806 | #endif /* PROC_FS */ | 1809 | #endif /* PROC_FS */ |
1807 | 1810 | ||
1808 | 1811 | ||
@@ -1841,7 +1844,7 @@ irda_irnet_init(void) | |||
1841 | 1844 | ||
1842 | #ifdef CONFIG_PROC_FS | 1845 | #ifdef CONFIG_PROC_FS |
1843 | /* Add a /proc file for irnet infos */ | 1846 | /* Add a /proc file for irnet infos */ |
1844 | create_proc_info_entry("irnet", 0, proc_irda, irnet_proc_read); | 1847 | proc_create("irnet", 0, proc_irda, &irnet_proc_fops); |
1845 | #endif /* CONFIG_PROC_FS */ | 1848 | #endif /* CONFIG_PROC_FS */ |
1846 | 1849 | ||
1847 | /* Setup the IrNET server */ | 1850 | /* Setup the IrNET server */ |
diff --git a/net/irda/irnet/irnet_irda.h b/net/irda/irnet/irnet_irda.h index 0ba92d0d5204..3e408952a3f1 100644 --- a/net/irda/irnet/irnet_irda.h +++ b/net/irda/irnet/irnet_irda.h | |||
@@ -159,14 +159,6 @@ static void | |||
159 | DISCOVERY_MODE, | 159 | DISCOVERY_MODE, |
160 | void *); | 160 | void *); |
161 | #endif | 161 | #endif |
162 | /* -------------------------- PROC ENTRY -------------------------- */ | ||
163 | #ifdef CONFIG_PROC_FS | ||
164 | static int | ||
165 | irnet_proc_read(char *, | ||
166 | char **, | ||
167 | off_t, | ||
168 | int); | ||
169 | #endif /* CONFIG_PROC_FS */ | ||
170 | 162 | ||
171 | /**************************** VARIABLES ****************************/ | 163 | /**************************** VARIABLES ****************************/ |
172 | 164 | ||
diff --git a/net/netfilter/nf_queue.c b/net/netfilter/nf_queue.c index bbd26893c0c4..582ec3efc8a5 100644 --- a/net/netfilter/nf_queue.c +++ b/net/netfilter/nf_queue.c | |||
@@ -214,7 +214,7 @@ int nf_queue(struct sk_buff *skb, | |||
214 | 214 | ||
215 | segs = skb_gso_segment(skb, 0); | 215 | segs = skb_gso_segment(skb, 0); |
216 | kfree_skb(skb); | 216 | kfree_skb(skb); |
217 | if (unlikely(IS_ERR(segs))) | 217 | if (IS_ERR(segs)) |
218 | return 1; | 218 | return 1; |
219 | 219 | ||
220 | do { | 220 | do { |
diff --git a/net/xfrm/xfrm_output.c b/net/xfrm/xfrm_output.c index 2519129c6d21..09cd9c0c2d80 100644 --- a/net/xfrm/xfrm_output.c +++ b/net/xfrm/xfrm_output.c | |||
@@ -150,7 +150,7 @@ static int xfrm_output_gso(struct sk_buff *skb) | |||
150 | 150 | ||
151 | segs = skb_gso_segment(skb, 0); | 151 | segs = skb_gso_segment(skb, 0); |
152 | kfree_skb(skb); | 152 | kfree_skb(skb); |
153 | if (unlikely(IS_ERR(segs))) | 153 | if (IS_ERR(segs)) |
154 | return PTR_ERR(segs); | 154 | return PTR_ERR(segs); |
155 | 155 | ||
156 | do { | 156 | do { |
diff --git a/samples/markers/marker-example.c b/samples/markers/marker-example.c index 05e438f8b4e2..e90dc5d04392 100644 --- a/samples/markers/marker-example.c +++ b/samples/markers/marker-example.c | |||
@@ -33,10 +33,8 @@ static struct file_operations mark_ops = { | |||
33 | static int example_init(void) | 33 | static int example_init(void) |
34 | { | 34 | { |
35 | printk(KERN_ALERT "example init\n"); | 35 | printk(KERN_ALERT "example init\n"); |
36 | pentry_example = create_proc_entry("marker-example", 0444, NULL); | 36 | pentry_example = proc_create("marker-example", 0444, NULL, &mark_ops); |
37 | if (pentry_example) | 37 | if (!pentry_example) |
38 | pentry_example->proc_fops = &mark_ops; | ||
39 | else | ||
40 | return -EPERM; | 38 | return -EPERM; |
41 | return 0; | 39 | return 0; |
42 | } | 40 | } |
diff --git a/scripts/Lindent b/scripts/Lindent index 9468ec7971db..9c4b3e2b7098 100755 --- a/scripts/Lindent +++ b/scripts/Lindent | |||
@@ -1,2 +1,18 @@ | |||
1 | #!/bin/sh | 1 | #!/bin/sh |
2 | indent -npro -kr -i8 -ts8 -sob -l80 -ss -ncs -cp1 "$@" | 2 | PARAM="-npro -kr -i8 -ts8 -sob -l80 -ss -ncs -cp1" |
3 | RES=`indent --version` | ||
4 | V1=`echo $RES | cut -d' ' -f3 | cut -d'.' -f1` | ||
5 | V2=`echo $RES | cut -d' ' -f3 | cut -d'.' -f2` | ||
6 | V3=`echo $RES | cut -d' ' -f3 | cut -d'.' -f3` | ||
7 | if [ $V1 -gt 2 ]; then | ||
8 | PARAM="$PARAM -il0" | ||
9 | elif [ $V1 -eq 2 ]; then | ||
10 | if [ $V2 -gt 2 ]; then | ||
11 | PARAM="$PARAM -il0"; | ||
12 | elif [ $V2 -eq 2 ]; then | ||
13 | if [ $V3 -ge 10 ]; then | ||
14 | PARAM="$PARAM -il0" | ||
15 | fi | ||
16 | fi | ||
17 | fi | ||
18 | indent $PARAM "$@" | ||
diff --git a/scripts/checkpatch.pl b/scripts/checkpatch.pl index 64ec4b8a51b5..b6bbbcdc557e 100755 --- a/scripts/checkpatch.pl +++ b/scripts/checkpatch.pl | |||
@@ -9,7 +9,7 @@ use strict; | |||
9 | my $P = $0; | 9 | my $P = $0; |
10 | $P =~ s@.*/@@g; | 10 | $P =~ s@.*/@@g; |
11 | 11 | ||
12 | my $V = '0.16'; | 12 | my $V = '0.18'; |
13 | 13 | ||
14 | use Getopt::Long qw(:config no_auto_abbrev); | 14 | use Getopt::Long qw(:config no_auto_abbrev); |
15 | 15 | ||
@@ -131,6 +131,17 @@ our $NonptrType; | |||
131 | our $Type; | 131 | our $Type; |
132 | our $Declare; | 132 | our $Declare; |
133 | 133 | ||
134 | our $UTF8 = qr { | ||
135 | [\x09\x0A\x0D\x20-\x7E] # ASCII | ||
136 | | [\xC2-\xDF][\x80-\xBF] # non-overlong 2-byte | ||
137 | | \xE0[\xA0-\xBF][\x80-\xBF] # excluding overlongs | ||
138 | | [\xE1-\xEC\xEE\xEF][\x80-\xBF]{2} # straight 3-byte | ||
139 | | \xED[\x80-\x9F][\x80-\xBF] # excluding surrogates | ||
140 | | \xF0[\x90-\xBF][\x80-\xBF]{2} # planes 1-3 | ||
141 | | [\xF1-\xF3][\x80-\xBF]{3} # planes 4-15 | ||
142 | | \xF4[\x80-\x8F][\x80-\xBF]{2} # plane 16 | ||
143 | }x; | ||
144 | |||
134 | our @typeList = ( | 145 | our @typeList = ( |
135 | qr{void}, | 146 | qr{void}, |
136 | qr{char}, | 147 | qr{char}, |
@@ -692,7 +703,7 @@ sub annotate_values { | |||
692 | while (length($cur)) { | 703 | while (length($cur)) { |
693 | @av_paren_type = ('E') if ($#av_paren_type < 0); | 704 | @av_paren_type = ('E') if ($#av_paren_type < 0); |
694 | print " <" . join('', @av_paren_type) . | 705 | print " <" . join('', @av_paren_type) . |
695 | "> <$type> " if ($dbg_values > 1); | 706 | "> <$type> <$av_pending>" if ($dbg_values > 1); |
696 | if ($cur =~ /^(\s+)/o) { | 707 | if ($cur =~ /^(\s+)/o) { |
697 | print "WS($1)\n" if ($dbg_values > 1); | 708 | print "WS($1)\n" if ($dbg_values > 1); |
698 | if ($1 =~ /\n/ && $av_preprocessor) { | 709 | if ($1 =~ /\n/ && $av_preprocessor) { |
@@ -705,9 +716,18 @@ sub annotate_values { | |||
705 | $type = 'T'; | 716 | $type = 'T'; |
706 | 717 | ||
707 | } elsif ($cur =~ /^(#\s*define\s*$Ident)(\(?)/o) { | 718 | } elsif ($cur =~ /^(#\s*define\s*$Ident)(\(?)/o) { |
708 | print "DEFINE($1)\n" if ($dbg_values > 1); | 719 | print "DEFINE($1,$2)\n" if ($dbg_values > 1); |
709 | $av_preprocessor = 1; | 720 | $av_preprocessor = 1; |
710 | $av_pending = 'N'; | 721 | push(@av_paren_type, $type); |
722 | if ($2 ne '') { | ||
723 | $av_pending = 'N'; | ||
724 | } | ||
725 | $type = 'E'; | ||
726 | |||
727 | } elsif ($cur =~ /^(#\s*undef\s*$Ident)/o) { | ||
728 | print "UNDEF($1)\n" if ($dbg_values > 1); | ||
729 | $av_preprocessor = 1; | ||
730 | push(@av_paren_type, $type); | ||
711 | 731 | ||
712 | } elsif ($cur =~ /^(#\s*(?:ifdef|ifndef|if))/o) { | 732 | } elsif ($cur =~ /^(#\s*(?:ifdef|ifndef|if))/o) { |
713 | print "PRE_START($1)\n" if ($dbg_values > 1); | 733 | print "PRE_START($1)\n" if ($dbg_values > 1); |
@@ -715,7 +735,7 @@ sub annotate_values { | |||
715 | 735 | ||
716 | push(@av_paren_type, $type); | 736 | push(@av_paren_type, $type); |
717 | push(@av_paren_type, $type); | 737 | push(@av_paren_type, $type); |
718 | $type = 'N'; | 738 | $type = 'E'; |
719 | 739 | ||
720 | } elsif ($cur =~ /^(#\s*(?:else|elif))/o) { | 740 | } elsif ($cur =~ /^(#\s*(?:else|elif))/o) { |
721 | print "PRE_RESTART($1)\n" if ($dbg_values > 1); | 741 | print "PRE_RESTART($1)\n" if ($dbg_values > 1); |
@@ -723,7 +743,7 @@ sub annotate_values { | |||
723 | 743 | ||
724 | push(@av_paren_type, $av_paren_type[$#av_paren_type]); | 744 | push(@av_paren_type, $av_paren_type[$#av_paren_type]); |
725 | 745 | ||
726 | $type = 'N'; | 746 | $type = 'E'; |
727 | 747 | ||
728 | } elsif ($cur =~ /^(#\s*(?:endif))/o) { | 748 | } elsif ($cur =~ /^(#\s*(?:endif))/o) { |
729 | print "PRE_END($1)\n" if ($dbg_values > 1); | 749 | print "PRE_END($1)\n" if ($dbg_values > 1); |
@@ -734,11 +754,16 @@ sub annotate_values { | |||
734 | # one does, and continue as if the #endif was not here. | 754 | # one does, and continue as if the #endif was not here. |
735 | pop(@av_paren_type); | 755 | pop(@av_paren_type); |
736 | push(@av_paren_type, $type); | 756 | push(@av_paren_type, $type); |
737 | $type = 'N'; | 757 | $type = 'E'; |
738 | 758 | ||
739 | } elsif ($cur =~ /^(\\\n)/o) { | 759 | } elsif ($cur =~ /^(\\\n)/o) { |
740 | print "PRECONT($1)\n" if ($dbg_values > 1); | 760 | print "PRECONT($1)\n" if ($dbg_values > 1); |
741 | 761 | ||
762 | } elsif ($cur =~ /^(__attribute__)\s*\(?/o) { | ||
763 | print "ATTR($1)\n" if ($dbg_values > 1); | ||
764 | $av_pending = $type; | ||
765 | $type = 'N'; | ||
766 | |||
742 | } elsif ($cur =~ /^(sizeof)\s*(\()?/o) { | 767 | } elsif ($cur =~ /^(sizeof)\s*(\()?/o) { |
743 | print "SIZEOF($1)\n" if ($dbg_values > 1); | 768 | print "SIZEOF($1)\n" if ($dbg_values > 1); |
744 | if (defined $2) { | 769 | if (defined $2) { |
@@ -930,7 +955,7 @@ sub process { | |||
930 | # edge is a close comment then we must be in a comment | 955 | # edge is a close comment then we must be in a comment |
931 | # at context start. | 956 | # at context start. |
932 | my $edge; | 957 | my $edge; |
933 | for (my $ln = $linenr; $ln < ($linenr + $realcnt); $ln++) { | 958 | for (my $ln = $linenr + 1; $ln < ($linenr + $realcnt); $ln++) { |
934 | next if ($line =~ /^-/); | 959 | next if ($line =~ /^-/); |
935 | ($edge) = ($rawlines[$ln - 1] =~ m@(/\*|\*/)@); | 960 | ($edge) = ($rawlines[$ln - 1] =~ m@(/\*|\*/)@); |
936 | last if (defined $edge); | 961 | last if (defined $edge); |
@@ -951,9 +976,9 @@ sub process { | |||
951 | ##print "COMMENT:$in_comment edge<$edge> $rawline\n"; | 976 | ##print "COMMENT:$in_comment edge<$edge> $rawline\n"; |
952 | sanitise_line_reset($in_comment); | 977 | sanitise_line_reset($in_comment); |
953 | 978 | ||
954 | } elsif ($realcnt) { | 979 | } elsif ($realcnt && $rawline =~ /^(?:\+| |$)/) { |
955 | # Standardise the strings and chars within the input to | 980 | # Standardise the strings and chars within the input to |
956 | # simplify matching. | 981 | # simplify matching -- only bother with positive lines. |
957 | $line = sanitise_line($rawline); | 982 | $line = sanitise_line($rawline); |
958 | } | 983 | } |
959 | push(@lines, $line); | 984 | push(@lines, $line); |
@@ -1066,17 +1091,14 @@ sub process { | |||
1066 | 1091 | ||
1067 | # UTF-8 regex found at http://www.w3.org/International/questions/qa-forms-utf-8.en.php | 1092 | # UTF-8 regex found at http://www.w3.org/International/questions/qa-forms-utf-8.en.php |
1068 | if (($realfile =~ /^$/ || $line =~ /^\+/) && | 1093 | if (($realfile =~ /^$/ || $line =~ /^\+/) && |
1069 | !($rawline =~ m/^( | 1094 | $rawline !~ m/^$UTF8*$/) { |
1070 | [\x09\x0A\x0D\x20-\x7E] # ASCII | 1095 | my ($utf8_prefix) = ($rawline =~ /^($UTF8*)/); |
1071 | | [\xC2-\xDF][\x80-\xBF] # non-overlong 2-byte | 1096 | |
1072 | | \xE0[\xA0-\xBF][\x80-\xBF] # excluding overlongs | 1097 | my $blank = copy_spacing($rawline); |
1073 | | [\xE1-\xEC\xEE\xEF][\x80-\xBF]{2} # straight 3-byte | 1098 | my $ptr = substr($blank, 0, length($utf8_prefix)) . "^"; |
1074 | | \xED[\x80-\x9F][\x80-\xBF] # excluding surrogates | 1099 | my $hereptr = "$hereline$ptr\n"; |
1075 | | \xF0[\x90-\xBF][\x80-\xBF]{2} # planes 1-3 | 1100 | |
1076 | | [\xF1-\xF3][\x80-\xBF]{3} # planes 4-15 | 1101 | ERROR("Invalid UTF-8, patch and commit message should be encoded in UTF-8\n" . $hereptr); |
1077 | | \xF4[\x80-\x8F][\x80-\xBF]{2} # plane 16 | ||
1078 | )*$/x )) { | ||
1079 | ERROR("Invalid UTF-8, patch and commit message should be encoded in UTF-8\n" . $herecurr); | ||
1080 | } | 1102 | } |
1081 | 1103 | ||
1082 | #ignore lines being removed | 1104 | #ignore lines being removed |
@@ -1112,7 +1134,7 @@ sub process { | |||
1112 | if ($rawline =~ /^\+\s* \t\s*\S/ || | 1134 | if ($rawline =~ /^\+\s* \t\s*\S/ || |
1113 | $rawline =~ /^\+\s* \s*/) { | 1135 | $rawline =~ /^\+\s* \s*/) { |
1114 | my $herevet = "$here\n" . cat_vet($rawline) . "\n"; | 1136 | my $herevet = "$here\n" . cat_vet($rawline) . "\n"; |
1115 | ERROR("use tabs not spaces\n" . $herevet); | 1137 | ERROR("code indent should use tabs where possible\n" . $herevet); |
1116 | } | 1138 | } |
1117 | 1139 | ||
1118 | # check for RCS/CVS revision markers | 1140 | # check for RCS/CVS revision markers |
@@ -1121,35 +1143,40 @@ sub process { | |||
1121 | } | 1143 | } |
1122 | 1144 | ||
1123 | # Check for potential 'bare' types | 1145 | # Check for potential 'bare' types |
1124 | if ($realcnt) { | 1146 | my ($stat, $cond); |
1125 | my ($s, $c) = ctx_statement_block($linenr, $realcnt, 0); | 1147 | if ($realcnt && $line =~ /.\s*\S/) { |
1126 | $s =~ s/\n./ /g; | 1148 | ($stat, $cond) = ctx_statement_block($linenr, |
1127 | $s =~ s/{.*$//; | 1149 | $realcnt, 0); |
1150 | $stat =~ s/\n./\n /g; | ||
1151 | $cond =~ s/\n./\n /g; | ||
1152 | |||
1153 | my $s = $stat; | ||
1154 | $s =~ s/{.*$//s; | ||
1128 | 1155 | ||
1129 | # Ignore goto labels. | 1156 | # Ignore goto labels. |
1130 | if ($s =~ /$Ident:\*$/) { | 1157 | if ($s =~ /$Ident:\*$/s) { |
1131 | 1158 | ||
1132 | # Ignore functions being called | 1159 | # Ignore functions being called |
1133 | } elsif ($s =~ /^.\s*$Ident\s*\(/) { | 1160 | } elsif ($s =~ /^.\s*$Ident\s*\(/s) { |
1134 | 1161 | ||
1135 | # definitions in global scope can only start with types | 1162 | # definitions in global scope can only start with types |
1136 | } elsif ($s =~ /^.(?:$Storage\s+)?(?:$Inline\s+)?(?:const\s+)?($Ident)\b/) { | 1163 | } elsif ($s =~ /^.(?:$Storage\s+)?(?:$Inline\s+)?(?:const\s+)?($Ident)\b/s) { |
1137 | possible($1, $s); | 1164 | possible($1, $s); |
1138 | 1165 | ||
1139 | # declarations always start with types | 1166 | # declarations always start with types |
1140 | } elsif ($prev_values eq 'E' && $s =~ /^.\s*(?:$Storage\s+)?(?:const\s+)?($Ident)\b(:?\s+$Sparse)?\s*\**\s*$Ident\s*(?:;|=|,)/) { | 1167 | } elsif ($prev_values eq 'E' && $s =~ /^.\s*(?:$Storage\s+)?(?:const\s+)?($Ident)\b(:?\s+$Sparse)?\s*\**\s*$Ident\s*(?:;|=|,)/s) { |
1141 | possible($1, $s); | 1168 | possible($1, $s); |
1142 | } | 1169 | } |
1143 | 1170 | ||
1144 | # any (foo ... *) is a pointer cast, and foo is a type | 1171 | # any (foo ... *) is a pointer cast, and foo is a type |
1145 | while ($s =~ /\(($Ident)(?:\s+$Sparse)*\s*\*+\s*\)/g) { | 1172 | while ($s =~ /\(($Ident)(?:\s+$Sparse)*\s*\*+\s*\)/sg) { |
1146 | possible($1, $s); | 1173 | possible($1, $s); |
1147 | } | 1174 | } |
1148 | 1175 | ||
1149 | # Check for any sort of function declaration. | 1176 | # Check for any sort of function declaration. |
1150 | # int foo(something bar, other baz); | 1177 | # int foo(something bar, other baz); |
1151 | # void (*store_gdt)(x86_descr_ptr *); | 1178 | # void (*store_gdt)(x86_descr_ptr *); |
1152 | if ($prev_values eq 'E' && $s =~ /^(.(?:typedef\s*)?(?:(?:$Storage|$Inline)\s*)*\s*$Type\s*(?:\b$Ident|\(\*\s*$Ident\))\s*)\(/) { | 1179 | if ($prev_values eq 'E' && $s =~ /^(.(?:typedef\s*)?(?:(?:$Storage|$Inline)\s*)*\s*$Type\s*(?:\b$Ident|\(\*\s*$Ident\))\s*)\(/s) { |
1153 | my ($name_len) = length($1); | 1180 | my ($name_len) = length($1); |
1154 | 1181 | ||
1155 | my $ctx = $s; | 1182 | my $ctx = $s; |
@@ -1282,18 +1309,19 @@ sub process { | |||
1282 | ($prevline !~ /^ }/) && | 1309 | ($prevline !~ /^ }/) && |
1283 | ($prevline !~ /^.DECLARE_$Ident\(\Q$name\E\)/) && | 1310 | ($prevline !~ /^.DECLARE_$Ident\(\Q$name\E\)/) && |
1284 | ($prevline !~ /^.LIST_HEAD\(\Q$name\E\)/) && | 1311 | ($prevline !~ /^.LIST_HEAD\(\Q$name\E\)/) && |
1312 | ($prevline !~ /^.$Type\s*\(\s*\*\s*\Q$name\E\s*\)\s*\(/) && | ||
1285 | ($prevline !~ /\b\Q$name\E(?:\s+$Attribute)?\s*(?:;|=|\[)/)) { | 1313 | ($prevline !~ /\b\Q$name\E(?:\s+$Attribute)?\s*(?:;|=|\[)/)) { |
1286 | WARN("EXPORT_SYMBOL(foo); should immediately follow its function/variable\n" . $herecurr); | 1314 | WARN("EXPORT_SYMBOL(foo); should immediately follow its function/variable\n" . $herecurr); |
1287 | } | 1315 | } |
1288 | } | 1316 | } |
1289 | 1317 | ||
1290 | # check for external initialisers. | 1318 | # check for external initialisers. |
1291 | if ($line =~ /^.$Type\s*$Ident\s*=\s*(0|NULL);/) { | 1319 | if ($line =~ /^.$Type\s*$Ident\s*=\s*(0|NULL|false)\s*;/) { |
1292 | ERROR("do not initialise externals to 0 or NULL\n" . | 1320 | ERROR("do not initialise externals to 0 or NULL\n" . |
1293 | $herecurr); | 1321 | $herecurr); |
1294 | } | 1322 | } |
1295 | # check for static initialisers. | 1323 | # check for static initialisers. |
1296 | if ($line =~ /\s*static\s.*=\s*(0|NULL);/) { | 1324 | if ($line =~ /\s*static\s.*=\s*(0|NULL|false)\s*;/) { |
1297 | ERROR("do not initialise statics to 0 or NULL\n" . | 1325 | ERROR("do not initialise statics to 0 or NULL\n" . |
1298 | $herecurr); | 1326 | $herecurr); |
1299 | } | 1327 | } |
@@ -1512,7 +1540,10 @@ sub process { | |||
1512 | if ($ctx !~ /[WEBC]x./ && $ca !~ /(?:\)|!|~|\*|-|\&|\||\+\+|\-\-|\{)$/) { | 1540 | if ($ctx !~ /[WEBC]x./ && $ca !~ /(?:\)|!|~|\*|-|\&|\||\+\+|\-\-|\{)$/) { |
1513 | ERROR("space required before that '$op' $at\n" . $hereptr); | 1541 | ERROR("space required before that '$op' $at\n" . $hereptr); |
1514 | } | 1542 | } |
1515 | if ($ctx =~ /.xW/) { | 1543 | if ($op eq '*' && $cc =~/\s*const\b/) { |
1544 | # A unary '*' may be const | ||
1545 | |||
1546 | } elsif ($ctx =~ /.xW/) { | ||
1516 | ERROR("space prohibited after that '$op' $at\n" . $hereptr); | 1547 | ERROR("space prohibited after that '$op' $at\n" . $hereptr); |
1517 | } | 1548 | } |
1518 | 1549 | ||
@@ -1617,7 +1648,7 @@ sub process { | |||
1617 | 1648 | ||
1618 | # Check for illegal assignment in if conditional. | 1649 | # Check for illegal assignment in if conditional. |
1619 | if ($line =~ /\bif\s*\(/) { | 1650 | if ($line =~ /\bif\s*\(/) { |
1620 | my ($s, $c) = ctx_statement_block($linenr, $realcnt, 0); | 1651 | my ($s, $c) = ($stat, $cond); |
1621 | 1652 | ||
1622 | if ($c =~ /\bif\s*\(.*[^<>!=]=[^=].*/) { | 1653 | if ($c =~ /\bif\s*\(.*[^<>!=]=[^=].*/) { |
1623 | ERROR("do not use assignment in if condition\n" . $herecurr); | 1654 | ERROR("do not use assignment in if condition\n" . $herecurr); |
@@ -1695,7 +1726,7 @@ sub process { | |||
1695 | #warn if <asm/foo.h> is #included and <linux/foo.h> is available (uses RAW line) | 1726 | #warn if <asm/foo.h> is #included and <linux/foo.h> is available (uses RAW line) |
1696 | if ($tree && $rawline =~ m{^.\#\s*include\s*\<asm\/(.*)\.h\>}) { | 1727 | if ($tree && $rawline =~ m{^.\#\s*include\s*\<asm\/(.*)\.h\>}) { |
1697 | my $checkfile = "$root/include/linux/$1.h"; | 1728 | my $checkfile = "$root/include/linux/$1.h"; |
1698 | if (-f $checkfile && $1 ne 'irq.h') { | 1729 | if (-f $checkfile && $1 ne 'irq') { |
1699 | WARN("Use #include <linux/$1.h> instead of <asm/$1.h>\n" . | 1730 | WARN("Use #include <linux/$1.h> instead of <asm/$1.h>\n" . |
1700 | $herecurr); | 1731 | $herecurr); |
1701 | } | 1732 | } |
@@ -1910,7 +1941,8 @@ sub process { | |||
1910 | } | 1941 | } |
1911 | 1942 | ||
1912 | # check for spinlock_t definitions without a comment. | 1943 | # check for spinlock_t definitions without a comment. |
1913 | if ($line =~ /^.\s*(struct\s+mutex|spinlock_t)\s+\S+;/) { | 1944 | if ($line =~ /^.\s*(struct\s+mutex|spinlock_t)\s+\S+;/ || |
1945 | $line =~ /^.\s*(DEFINE_MUTEX)\s*\(/) { | ||
1914 | my $which = $1; | 1946 | my $which = $1; |
1915 | if (!ctx_has_comment($first_line, $linenr)) { | 1947 | if (!ctx_has_comment($first_line, $linenr)) { |
1916 | CHK("$1 definition without comment\n" . $herecurr); | 1948 | CHK("$1 definition without comment\n" . $herecurr); |
@@ -1940,7 +1972,26 @@ sub process { | |||
1940 | } | 1972 | } |
1941 | 1973 | ||
1942 | # check for new externs in .c files. | 1974 | # check for new externs in .c files. |
1943 | if ($line =~ /^.\s*extern\s/ && ($realfile =~ /\.c$/)) { | 1975 | if ($realfile =~ /\.c$/ && defined $stat && |
1976 | $stat =~ /^.\s*(?:extern\s+)?$Type\s+$Ident(\s*)\(/s) | ||
1977 | { | ||
1978 | my $paren_space = $1; | ||
1979 | |||
1980 | my $s = $stat; | ||
1981 | if (defined $cond) { | ||
1982 | substr($s, 0, length($cond), ''); | ||
1983 | } | ||
1984 | if ($s =~ /^\s*;/) { | ||
1985 | WARN("externs should be avoided in .c files\n" . $herecurr); | ||
1986 | } | ||
1987 | |||
1988 | if ($paren_space =~ /\n/) { | ||
1989 | WARN("arguments for function declarations should follow identifier\n" . $herecurr); | ||
1990 | } | ||
1991 | |||
1992 | } elsif ($realfile =~ /\.c$/ && defined $stat && | ||
1993 | $stat =~ /^.\s*extern\s+/) | ||
1994 | { | ||
1944 | WARN("externs should be avoided in .c files\n" . $herecurr); | 1995 | WARN("externs should be avoided in .c files\n" . $herecurr); |
1945 | } | 1996 | } |
1946 | 1997 | ||
@@ -1964,11 +2015,11 @@ sub process { | |||
1964 | } | 2015 | } |
1965 | 2016 | ||
1966 | # check for semaphores used as mutexes | 2017 | # check for semaphores used as mutexes |
1967 | if ($line =~ /\b(DECLARE_MUTEX|init_MUTEX)\s*\(/) { | 2018 | if ($line =~ /^.\s*(DECLARE_MUTEX|init_MUTEX)\s*\(/) { |
1968 | WARN("mutexes are preferred for single holder semaphores\n" . $herecurr); | 2019 | WARN("mutexes are preferred for single holder semaphores\n" . $herecurr); |
1969 | } | 2020 | } |
1970 | # check for semaphores used as mutexes | 2021 | # check for semaphores used as mutexes |
1971 | if ($line =~ /\binit_MUTEX_LOCKED\s*\(/) { | 2022 | if ($line =~ /^.\s*init_MUTEX_LOCKED\s*\(/) { |
1972 | WARN("consider using a completion\n" . $herecurr); | 2023 | WARN("consider using a completion\n" . $herecurr); |
1973 | } | 2024 | } |
1974 | # recommend strict_strto* over simple_strto* | 2025 | # recommend strict_strto* over simple_strto* |
@@ -1979,11 +2030,24 @@ sub process { | |||
1979 | # use of NR_CPUS is usually wrong | 2030 | # use of NR_CPUS is usually wrong |
1980 | # ignore definitions of NR_CPUS and usage to define arrays as likely right | 2031 | # ignore definitions of NR_CPUS and usage to define arrays as likely right |
1981 | if ($line =~ /\bNR_CPUS\b/ && | 2032 | if ($line =~ /\bNR_CPUS\b/ && |
1982 | $line !~ /^.#\s*define\s+NR_CPUS\s+/ && | 2033 | $line !~ /^.#\s*if\b.*\bNR_CPUS\b/ && |
1983 | $line !~ /^.\s*$Declare\s.*\[[^\]]*NR_CPUS[^\]]*\]/) | 2034 | $line !~ /^.#\s*define\b.*\bNR_CPUS\b/ && |
2035 | $line !~ /^.\s*$Declare\s.*\[[^\]]*NR_CPUS[^\]]*\]/ && | ||
2036 | $line !~ /\[[^\]]*\.\.\.[^\]]*NR_CPUS[^\]]*\]/ && | ||
2037 | $line !~ /\[[^\]]*NR_CPUS[^\]]*\.\.\.[^\]]*\]/) | ||
1984 | { | 2038 | { |
1985 | WARN("usage of NR_CPUS is often wrong - consider using cpu_possible(), num_possible_cpus(), for_each_possible_cpu(), etc\n" . $herecurr); | 2039 | WARN("usage of NR_CPUS is often wrong - consider using cpu_possible(), num_possible_cpus(), for_each_possible_cpu(), etc\n" . $herecurr); |
1986 | } | 2040 | } |
2041 | |||
2042 | # check for %L{u,d,i} in strings | ||
2043 | my $string; | ||
2044 | while ($line =~ /(?:^|")([X\t]*)(?:"|$)/g) { | ||
2045 | $string = substr($rawline, $-[1], $+[1] - $-[1]); | ||
2046 | if ($string =~ /(?<!%)%L[udi]/) { | ||
2047 | WARN("\%Ld/%Lu are not-standard C, use %lld/%llu\n" . $herecurr); | ||
2048 | last; | ||
2049 | } | ||
2050 | } | ||
1987 | } | 2051 | } |
1988 | 2052 | ||
1989 | # If we have no input at all, then there is nothing to report on | 2053 | # If we have no input at all, then there is nothing to report on |
diff --git a/security/Makefile b/security/Makefile index 9e8b02525014..7ef1107a7287 100644 --- a/security/Makefile +++ b/security/Makefile | |||
@@ -18,3 +18,4 @@ obj-$(CONFIG_SECURITY_SELINUX) += selinux/built-in.o | |||
18 | obj-$(CONFIG_SECURITY_SMACK) += commoncap.o smack/built-in.o | 18 | obj-$(CONFIG_SECURITY_SMACK) += commoncap.o smack/built-in.o |
19 | obj-$(CONFIG_SECURITY_CAPABILITIES) += commoncap.o capability.o | 19 | obj-$(CONFIG_SECURITY_CAPABILITIES) += commoncap.o capability.o |
20 | obj-$(CONFIG_SECURITY_ROOTPLUG) += commoncap.o root_plug.o | 20 | obj-$(CONFIG_SECURITY_ROOTPLUG) += commoncap.o root_plug.o |
21 | obj-$(CONFIG_CGROUP_DEVICE) += device_cgroup.o | ||
diff --git a/security/commoncap.c b/security/commoncap.c index e8c3f5e46705..5edabc7542ae 100644 --- a/security/commoncap.c +++ b/security/commoncap.c | |||
@@ -383,8 +383,8 @@ int cap_bprm_secureexec (struct linux_binprm *bprm) | |||
383 | current->egid != current->gid); | 383 | current->egid != current->gid); |
384 | } | 384 | } |
385 | 385 | ||
386 | int cap_inode_setxattr(struct dentry *dentry, char *name, void *value, | 386 | int cap_inode_setxattr(struct dentry *dentry, const char *name, |
387 | size_t size, int flags) | 387 | const void *value, size_t size, int flags) |
388 | { | 388 | { |
389 | if (!strcmp(name, XATTR_NAME_CAPS)) { | 389 | if (!strcmp(name, XATTR_NAME_CAPS)) { |
390 | if (!capable(CAP_SETFCAP)) | 390 | if (!capable(CAP_SETFCAP)) |
@@ -397,7 +397,7 @@ int cap_inode_setxattr(struct dentry *dentry, char *name, void *value, | |||
397 | return 0; | 397 | return 0; |
398 | } | 398 | } |
399 | 399 | ||
400 | int cap_inode_removexattr(struct dentry *dentry, char *name) | 400 | int cap_inode_removexattr(struct dentry *dentry, const char *name) |
401 | { | 401 | { |
402 | if (!strcmp(name, XATTR_NAME_CAPS)) { | 402 | if (!strcmp(name, XATTR_NAME_CAPS)) { |
403 | if (!capable(CAP_SETFCAP)) | 403 | if (!capable(CAP_SETFCAP)) |
diff --git a/security/device_cgroup.c b/security/device_cgroup.c new file mode 100644 index 000000000000..4ea583689eec --- /dev/null +++ b/security/device_cgroup.c | |||
@@ -0,0 +1,575 @@ | |||
1 | /* | ||
2 | * dev_cgroup.c - device cgroup subsystem | ||
3 | * | ||
4 | * Copyright 2007 IBM Corp | ||
5 | */ | ||
6 | |||
7 | #include <linux/device_cgroup.h> | ||
8 | #include <linux/cgroup.h> | ||
9 | #include <linux/ctype.h> | ||
10 | #include <linux/list.h> | ||
11 | #include <linux/uaccess.h> | ||
12 | #include <linux/seq_file.h> | ||
13 | |||
14 | #define ACC_MKNOD 1 | ||
15 | #define ACC_READ 2 | ||
16 | #define ACC_WRITE 4 | ||
17 | #define ACC_MASK (ACC_MKNOD | ACC_READ | ACC_WRITE) | ||
18 | |||
19 | #define DEV_BLOCK 1 | ||
20 | #define DEV_CHAR 2 | ||
21 | #define DEV_ALL 4 /* this represents all devices */ | ||
22 | |||
23 | /* | ||
24 | * whitelist locking rules: | ||
25 | * cgroup_lock() cannot be taken under dev_cgroup->lock. | ||
26 | * dev_cgroup->lock can be taken with or without cgroup_lock(). | ||
27 | * | ||
28 | * modifications always require cgroup_lock | ||
29 | * modifications to a list which is visible require the | ||
30 | * dev_cgroup->lock *and* cgroup_lock() | ||
31 | * walking the list requires dev_cgroup->lock or cgroup_lock(). | ||
32 | * | ||
33 | * reasoning: dev_whitelist_copy() needs to kmalloc, so needs | ||
34 | * a mutex, which the cgroup_lock() is. Since modifying | ||
35 | * a visible list requires both locks, either lock can be | ||
36 | * taken for walking the list. | ||
37 | */ | ||
38 | |||
39 | struct dev_whitelist_item { | ||
40 | u32 major, minor; | ||
41 | short type; | ||
42 | short access; | ||
43 | struct list_head list; | ||
44 | }; | ||
45 | |||
46 | struct dev_cgroup { | ||
47 | struct cgroup_subsys_state css; | ||
48 | struct list_head whitelist; | ||
49 | spinlock_t lock; | ||
50 | }; | ||
51 | |||
52 | static inline struct dev_cgroup *cgroup_to_devcgroup(struct cgroup *cgroup) | ||
53 | { | ||
54 | return container_of(cgroup_subsys_state(cgroup, devices_subsys_id), | ||
55 | struct dev_cgroup, css); | ||
56 | } | ||
57 | |||
58 | struct cgroup_subsys devices_subsys; | ||
59 | |||
60 | static int devcgroup_can_attach(struct cgroup_subsys *ss, | ||
61 | struct cgroup *new_cgroup, struct task_struct *task) | ||
62 | { | ||
63 | if (current != task && !capable(CAP_SYS_ADMIN)) | ||
64 | return -EPERM; | ||
65 | |||
66 | return 0; | ||
67 | } | ||
68 | |||
69 | /* | ||
70 | * called under cgroup_lock() | ||
71 | */ | ||
72 | static int dev_whitelist_copy(struct list_head *dest, struct list_head *orig) | ||
73 | { | ||
74 | struct dev_whitelist_item *wh, *tmp, *new; | ||
75 | |||
76 | list_for_each_entry(wh, orig, list) { | ||
77 | new = kmalloc(sizeof(*wh), GFP_KERNEL); | ||
78 | if (!new) | ||
79 | goto free_and_exit; | ||
80 | new->major = wh->major; | ||
81 | new->minor = wh->minor; | ||
82 | new->type = wh->type; | ||
83 | new->access = wh->access; | ||
84 | list_add_tail(&new->list, dest); | ||
85 | } | ||
86 | |||
87 | return 0; | ||
88 | |||
89 | free_and_exit: | ||
90 | list_for_each_entry_safe(wh, tmp, dest, list) { | ||
91 | list_del(&wh->list); | ||
92 | kfree(wh); | ||
93 | } | ||
94 | return -ENOMEM; | ||
95 | } | ||
96 | |||
97 | /* Stupid prototype - don't bother combining existing entries */ | ||
98 | /* | ||
99 | * called under cgroup_lock() | ||
100 | * since the list is visible to other tasks, we need the spinlock also | ||
101 | */ | ||
102 | static int dev_whitelist_add(struct dev_cgroup *dev_cgroup, | ||
103 | struct dev_whitelist_item *wh) | ||
104 | { | ||
105 | struct dev_whitelist_item *whcopy; | ||
106 | |||
107 | whcopy = kmalloc(sizeof(*whcopy), GFP_KERNEL); | ||
108 | if (!whcopy) | ||
109 | return -ENOMEM; | ||
110 | |||
111 | memcpy(whcopy, wh, sizeof(*whcopy)); | ||
112 | spin_lock(&dev_cgroup->lock); | ||
113 | list_add_tail(&whcopy->list, &dev_cgroup->whitelist); | ||
114 | spin_unlock(&dev_cgroup->lock); | ||
115 | return 0; | ||
116 | } | ||
117 | |||
118 | /* | ||
119 | * called under cgroup_lock() | ||
120 | * since the list is visible to other tasks, we need the spinlock also | ||
121 | */ | ||
122 | static void dev_whitelist_rm(struct dev_cgroup *dev_cgroup, | ||
123 | struct dev_whitelist_item *wh) | ||
124 | { | ||
125 | struct dev_whitelist_item *walk, *tmp; | ||
126 | |||
127 | spin_lock(&dev_cgroup->lock); | ||
128 | list_for_each_entry_safe(walk, tmp, &dev_cgroup->whitelist, list) { | ||
129 | if (walk->type == DEV_ALL) | ||
130 | goto remove; | ||
131 | if (walk->type != wh->type) | ||
132 | continue; | ||
133 | if (walk->major != ~0 && walk->major != wh->major) | ||
134 | continue; | ||
135 | if (walk->minor != ~0 && walk->minor != wh->minor) | ||
136 | continue; | ||
137 | |||
138 | remove: | ||
139 | walk->access &= ~wh->access; | ||
140 | if (!walk->access) { | ||
141 | list_del(&walk->list); | ||
142 | kfree(walk); | ||
143 | } | ||
144 | } | ||
145 | spin_unlock(&dev_cgroup->lock); | ||
146 | } | ||
147 | |||
148 | /* | ||
149 | * called from kernel/cgroup.c with cgroup_lock() held. | ||
150 | */ | ||
151 | static struct cgroup_subsys_state *devcgroup_create(struct cgroup_subsys *ss, | ||
152 | struct cgroup *cgroup) | ||
153 | { | ||
154 | struct dev_cgroup *dev_cgroup, *parent_dev_cgroup; | ||
155 | struct cgroup *parent_cgroup; | ||
156 | int ret; | ||
157 | |||
158 | dev_cgroup = kzalloc(sizeof(*dev_cgroup), GFP_KERNEL); | ||
159 | if (!dev_cgroup) | ||
160 | return ERR_PTR(-ENOMEM); | ||
161 | INIT_LIST_HEAD(&dev_cgroup->whitelist); | ||
162 | parent_cgroup = cgroup->parent; | ||
163 | |||
164 | if (parent_cgroup == NULL) { | ||
165 | struct dev_whitelist_item *wh; | ||
166 | wh = kmalloc(sizeof(*wh), GFP_KERNEL); | ||
167 | if (!wh) { | ||
168 | kfree(dev_cgroup); | ||
169 | return ERR_PTR(-ENOMEM); | ||
170 | } | ||
171 | wh->minor = wh->major = ~0; | ||
172 | wh->type = DEV_ALL; | ||
173 | wh->access = ACC_MKNOD | ACC_READ | ACC_WRITE; | ||
174 | list_add(&wh->list, &dev_cgroup->whitelist); | ||
175 | } else { | ||
176 | parent_dev_cgroup = cgroup_to_devcgroup(parent_cgroup); | ||
177 | ret = dev_whitelist_copy(&dev_cgroup->whitelist, | ||
178 | &parent_dev_cgroup->whitelist); | ||
179 | if (ret) { | ||
180 | kfree(dev_cgroup); | ||
181 | return ERR_PTR(ret); | ||
182 | } | ||
183 | } | ||
184 | |||
185 | spin_lock_init(&dev_cgroup->lock); | ||
186 | return &dev_cgroup->css; | ||
187 | } | ||
188 | |||
189 | static void devcgroup_destroy(struct cgroup_subsys *ss, | ||
190 | struct cgroup *cgroup) | ||
191 | { | ||
192 | struct dev_cgroup *dev_cgroup; | ||
193 | struct dev_whitelist_item *wh, *tmp; | ||
194 | |||
195 | dev_cgroup = cgroup_to_devcgroup(cgroup); | ||
196 | list_for_each_entry_safe(wh, tmp, &dev_cgroup->whitelist, list) { | ||
197 | list_del(&wh->list); | ||
198 | kfree(wh); | ||
199 | } | ||
200 | kfree(dev_cgroup); | ||
201 | } | ||
202 | |||
203 | #define DEVCG_ALLOW 1 | ||
204 | #define DEVCG_DENY 2 | ||
205 | #define DEVCG_LIST 3 | ||
206 | |||
207 | #define MAJMINLEN 10 | ||
208 | #define ACCLEN 4 | ||
209 | |||
210 | static void set_access(char *acc, short access) | ||
211 | { | ||
212 | int idx = 0; | ||
213 | memset(acc, 0, ACCLEN); | ||
214 | if (access & ACC_READ) | ||
215 | acc[idx++] = 'r'; | ||
216 | if (access & ACC_WRITE) | ||
217 | acc[idx++] = 'w'; | ||
218 | if (access & ACC_MKNOD) | ||
219 | acc[idx++] = 'm'; | ||
220 | } | ||
221 | |||
222 | static char type_to_char(short type) | ||
223 | { | ||
224 | if (type == DEV_ALL) | ||
225 | return 'a'; | ||
226 | if (type == DEV_CHAR) | ||
227 | return 'c'; | ||
228 | if (type == DEV_BLOCK) | ||
229 | return 'b'; | ||
230 | return 'X'; | ||
231 | } | ||
232 | |||
233 | static void set_majmin(char *str, unsigned m) | ||
234 | { | ||
235 | memset(str, 0, MAJMINLEN); | ||
236 | if (m == ~0) | ||
237 | sprintf(str, "*"); | ||
238 | else | ||
239 | snprintf(str, MAJMINLEN, "%d", m); | ||
240 | } | ||
241 | |||
242 | static int devcgroup_seq_read(struct cgroup *cgroup, struct cftype *cft, | ||
243 | struct seq_file *m) | ||
244 | { | ||
245 | struct dev_cgroup *devcgroup = cgroup_to_devcgroup(cgroup); | ||
246 | struct dev_whitelist_item *wh; | ||
247 | char maj[MAJMINLEN], min[MAJMINLEN], acc[ACCLEN]; | ||
248 | |||
249 | spin_lock(&devcgroup->lock); | ||
250 | list_for_each_entry(wh, &devcgroup->whitelist, list) { | ||
251 | set_access(acc, wh->access); | ||
252 | set_majmin(maj, wh->major); | ||
253 | set_majmin(min, wh->minor); | ||
254 | seq_printf(m, "%c %s:%s %s\n", type_to_char(wh->type), | ||
255 | maj, min, acc); | ||
256 | } | ||
257 | spin_unlock(&devcgroup->lock); | ||
258 | |||
259 | return 0; | ||
260 | } | ||
261 | |||
262 | /* | ||
263 | * may_access_whitelist: | ||
264 | * does the access granted to dev_cgroup c contain the access | ||
265 | * requested in whitelist item refwh. | ||
266 | * return 1 if yes, 0 if no. | ||
267 | * call with c->lock held | ||
268 | */ | ||
269 | static int may_access_whitelist(struct dev_cgroup *c, | ||
270 | struct dev_whitelist_item *refwh) | ||
271 | { | ||
272 | struct dev_whitelist_item *whitem; | ||
273 | |||
274 | list_for_each_entry(whitem, &c->whitelist, list) { | ||
275 | if (whitem->type & DEV_ALL) | ||
276 | return 1; | ||
277 | if ((refwh->type & DEV_BLOCK) && !(whitem->type & DEV_BLOCK)) | ||
278 | continue; | ||
279 | if ((refwh->type & DEV_CHAR) && !(whitem->type & DEV_CHAR)) | ||
280 | continue; | ||
281 | if (whitem->major != ~0 && whitem->major != refwh->major) | ||
282 | continue; | ||
283 | if (whitem->minor != ~0 && whitem->minor != refwh->minor) | ||
284 | continue; | ||
285 | if (refwh->access & (~(whitem->access | ACC_MASK))) | ||
286 | continue; | ||
287 | return 1; | ||
288 | } | ||
289 | return 0; | ||
290 | } | ||
291 | |||
292 | /* | ||
293 | * parent_has_perm: | ||
294 | * when adding a new allow rule to a device whitelist, the rule | ||
295 | * must be allowed in the parent device | ||
296 | */ | ||
297 | static int parent_has_perm(struct cgroup *childcg, | ||
298 | struct dev_whitelist_item *wh) | ||
299 | { | ||
300 | struct cgroup *pcg = childcg->parent; | ||
301 | struct dev_cgroup *parent; | ||
302 | int ret; | ||
303 | |||
304 | if (!pcg) | ||
305 | return 1; | ||
306 | parent = cgroup_to_devcgroup(pcg); | ||
307 | spin_lock(&parent->lock); | ||
308 | ret = may_access_whitelist(parent, wh); | ||
309 | spin_unlock(&parent->lock); | ||
310 | return ret; | ||
311 | } | ||
312 | |||
313 | /* | ||
314 | * Modify the whitelist using allow/deny rules. | ||
315 | * CAP_SYS_ADMIN is needed for this. It's at least separate from CAP_MKNOD | ||
316 | * so we can give a container CAP_MKNOD to let it create devices but not | ||
317 | * modify the whitelist. | ||
318 | * It seems likely we'll want to add a CAP_CONTAINER capability to allow | ||
319 | * us to also grant CAP_SYS_ADMIN to containers without giving away the | ||
320 | * device whitelist controls, but for now we'll stick with CAP_SYS_ADMIN | ||
321 | * | ||
322 | * Taking rules away is always allowed (given CAP_SYS_ADMIN). Granting | ||
323 | * new access is only allowed if you're in the top-level cgroup, or your | ||
324 | * parent cgroup has the access you're asking for. | ||
325 | */ | ||
326 | static ssize_t devcgroup_access_write(struct cgroup *cgroup, struct cftype *cft, | ||
327 | struct file *file, const char __user *userbuf, | ||
328 | size_t nbytes, loff_t *ppos) | ||
329 | { | ||
330 | struct cgroup *cur_cgroup; | ||
331 | struct dev_cgroup *devcgroup, *cur_devcgroup; | ||
332 | int filetype = cft->private; | ||
333 | char *buffer, *b; | ||
334 | int retval = 0, count; | ||
335 | struct dev_whitelist_item wh; | ||
336 | |||
337 | if (!capable(CAP_SYS_ADMIN)) | ||
338 | return -EPERM; | ||
339 | |||
340 | devcgroup = cgroup_to_devcgroup(cgroup); | ||
341 | cur_cgroup = task_cgroup(current, devices_subsys.subsys_id); | ||
342 | cur_devcgroup = cgroup_to_devcgroup(cur_cgroup); | ||
343 | |||
344 | buffer = kmalloc(nbytes+1, GFP_KERNEL); | ||
345 | if (!buffer) | ||
346 | return -ENOMEM; | ||
347 | |||
348 | if (copy_from_user(buffer, userbuf, nbytes)) { | ||
349 | retval = -EFAULT; | ||
350 | goto out1; | ||
351 | } | ||
352 | buffer[nbytes] = 0; /* nul-terminate */ | ||
353 | |||
354 | cgroup_lock(); | ||
355 | if (cgroup_is_removed(cgroup)) { | ||
356 | retval = -ENODEV; | ||
357 | goto out2; | ||
358 | } | ||
359 | |||
360 | memset(&wh, 0, sizeof(wh)); | ||
361 | b = buffer; | ||
362 | |||
363 | switch (*b) { | ||
364 | case 'a': | ||
365 | wh.type = DEV_ALL; | ||
366 | wh.access = ACC_MASK; | ||
367 | goto handle; | ||
368 | case 'b': | ||
369 | wh.type = DEV_BLOCK; | ||
370 | break; | ||
371 | case 'c': | ||
372 | wh.type = DEV_CHAR; | ||
373 | break; | ||
374 | default: | ||
375 | retval = -EINVAL; | ||
376 | goto out2; | ||
377 | } | ||
378 | b++; | ||
379 | if (!isspace(*b)) { | ||
380 | retval = -EINVAL; | ||
381 | goto out2; | ||
382 | } | ||
383 | b++; | ||
384 | if (*b == '*') { | ||
385 | wh.major = ~0; | ||
386 | b++; | ||
387 | } else if (isdigit(*b)) { | ||
388 | wh.major = 0; | ||
389 | while (isdigit(*b)) { | ||
390 | wh.major = wh.major*10+(*b-'0'); | ||
391 | b++; | ||
392 | } | ||
393 | } else { | ||
394 | retval = -EINVAL; | ||
395 | goto out2; | ||
396 | } | ||
397 | if (*b != ':') { | ||
398 | retval = -EINVAL; | ||
399 | goto out2; | ||
400 | } | ||
401 | b++; | ||
402 | |||
403 | /* read minor */ | ||
404 | if (*b == '*') { | ||
405 | wh.minor = ~0; | ||
406 | b++; | ||
407 | } else if (isdigit(*b)) { | ||
408 | wh.minor = 0; | ||
409 | while (isdigit(*b)) { | ||
410 | wh.minor = wh.minor*10+(*b-'0'); | ||
411 | b++; | ||
412 | } | ||
413 | } else { | ||
414 | retval = -EINVAL; | ||
415 | goto out2; | ||
416 | } | ||
417 | if (!isspace(*b)) { | ||
418 | retval = -EINVAL; | ||
419 | goto out2; | ||
420 | } | ||
421 | for (b++, count = 0; count < 3; count++, b++) { | ||
422 | switch (*b) { | ||
423 | case 'r': | ||
424 | wh.access |= ACC_READ; | ||
425 | break; | ||
426 | case 'w': | ||
427 | wh.access |= ACC_WRITE; | ||
428 | break; | ||
429 | case 'm': | ||
430 | wh.access |= ACC_MKNOD; | ||
431 | break; | ||
432 | case '\n': | ||
433 | case '\0': | ||
434 | count = 3; | ||
435 | break; | ||
436 | default: | ||
437 | retval = -EINVAL; | ||
438 | goto out2; | ||
439 | } | ||
440 | } | ||
441 | |||
442 | handle: | ||
443 | retval = 0; | ||
444 | switch (filetype) { | ||
445 | case DEVCG_ALLOW: | ||
446 | if (!parent_has_perm(cgroup, &wh)) | ||
447 | retval = -EPERM; | ||
448 | else | ||
449 | retval = dev_whitelist_add(devcgroup, &wh); | ||
450 | break; | ||
451 | case DEVCG_DENY: | ||
452 | dev_whitelist_rm(devcgroup, &wh); | ||
453 | break; | ||
454 | default: | ||
455 | retval = -EINVAL; | ||
456 | goto out2; | ||
457 | } | ||
458 | |||
459 | if (retval == 0) | ||
460 | retval = nbytes; | ||
461 | |||
462 | out2: | ||
463 | cgroup_unlock(); | ||
464 | out1: | ||
465 | kfree(buffer); | ||
466 | return retval; | ||
467 | } | ||
468 | |||
469 | static struct cftype dev_cgroup_files[] = { | ||
470 | { | ||
471 | .name = "allow", | ||
472 | .write = devcgroup_access_write, | ||
473 | .private = DEVCG_ALLOW, | ||
474 | }, | ||
475 | { | ||
476 | .name = "deny", | ||
477 | .write = devcgroup_access_write, | ||
478 | .private = DEVCG_DENY, | ||
479 | }, | ||
480 | { | ||
481 | .name = "list", | ||
482 | .read_seq_string = devcgroup_seq_read, | ||
483 | .private = DEVCG_LIST, | ||
484 | }, | ||
485 | }; | ||
486 | |||
487 | static int devcgroup_populate(struct cgroup_subsys *ss, | ||
488 | struct cgroup *cgroup) | ||
489 | { | ||
490 | return cgroup_add_files(cgroup, ss, dev_cgroup_files, | ||
491 | ARRAY_SIZE(dev_cgroup_files)); | ||
492 | } | ||
493 | |||
494 | struct cgroup_subsys devices_subsys = { | ||
495 | .name = "devices", | ||
496 | .can_attach = devcgroup_can_attach, | ||
497 | .create = devcgroup_create, | ||
498 | .destroy = devcgroup_destroy, | ||
499 | .populate = devcgroup_populate, | ||
500 | .subsys_id = devices_subsys_id, | ||
501 | }; | ||
502 | |||
503 | int devcgroup_inode_permission(struct inode *inode, int mask) | ||
504 | { | ||
505 | struct cgroup *cgroup; | ||
506 | struct dev_cgroup *dev_cgroup; | ||
507 | struct dev_whitelist_item *wh; | ||
508 | |||
509 | dev_t device = inode->i_rdev; | ||
510 | if (!device) | ||
511 | return 0; | ||
512 | if (!S_ISBLK(inode->i_mode) && !S_ISCHR(inode->i_mode)) | ||
513 | return 0; | ||
514 | cgroup = task_cgroup(current, devices_subsys.subsys_id); | ||
515 | dev_cgroup = cgroup_to_devcgroup(cgroup); | ||
516 | if (!dev_cgroup) | ||
517 | return 0; | ||
518 | |||
519 | spin_lock(&dev_cgroup->lock); | ||
520 | list_for_each_entry(wh, &dev_cgroup->whitelist, list) { | ||
521 | if (wh->type & DEV_ALL) | ||
522 | goto acc_check; | ||
523 | if ((wh->type & DEV_BLOCK) && !S_ISBLK(inode->i_mode)) | ||
524 | continue; | ||
525 | if ((wh->type & DEV_CHAR) && !S_ISCHR(inode->i_mode)) | ||
526 | continue; | ||
527 | if (wh->major != ~0 && wh->major != imajor(inode)) | ||
528 | continue; | ||
529 | if (wh->minor != ~0 && wh->minor != iminor(inode)) | ||
530 | continue; | ||
531 | acc_check: | ||
532 | if ((mask & MAY_WRITE) && !(wh->access & ACC_WRITE)) | ||
533 | continue; | ||
534 | if ((mask & MAY_READ) && !(wh->access & ACC_READ)) | ||
535 | continue; | ||
536 | spin_unlock(&dev_cgroup->lock); | ||
537 | return 0; | ||
538 | } | ||
539 | spin_unlock(&dev_cgroup->lock); | ||
540 | |||
541 | return -EPERM; | ||
542 | } | ||
543 | |||
544 | int devcgroup_inode_mknod(int mode, dev_t dev) | ||
545 | { | ||
546 | struct cgroup *cgroup; | ||
547 | struct dev_cgroup *dev_cgroup; | ||
548 | struct dev_whitelist_item *wh; | ||
549 | |||
550 | cgroup = task_cgroup(current, devices_subsys.subsys_id); | ||
551 | dev_cgroup = cgroup_to_devcgroup(cgroup); | ||
552 | if (!dev_cgroup) | ||
553 | return 0; | ||
554 | |||
555 | spin_lock(&dev_cgroup->lock); | ||
556 | list_for_each_entry(wh, &dev_cgroup->whitelist, list) { | ||
557 | if (wh->type & DEV_ALL) | ||
558 | goto acc_check; | ||
559 | if ((wh->type & DEV_BLOCK) && !S_ISBLK(mode)) | ||
560 | continue; | ||
561 | if ((wh->type & DEV_CHAR) && !S_ISCHR(mode)) | ||
562 | continue; | ||
563 | if (wh->major != ~0 && wh->major != MAJOR(dev)) | ||
564 | continue; | ||
565 | if (wh->minor != ~0 && wh->minor != MINOR(dev)) | ||
566 | continue; | ||
567 | acc_check: | ||
568 | if (!(wh->access & ACC_MKNOD)) | ||
569 | continue; | ||
570 | spin_unlock(&dev_cgroup->lock); | ||
571 | return 0; | ||
572 | } | ||
573 | spin_unlock(&dev_cgroup->lock); | ||
574 | return -EPERM; | ||
575 | } | ||
diff --git a/security/dummy.c b/security/dummy.c index 58d4dd1af5c7..48cf30226e16 100644 --- a/security/dummy.c +++ b/security/dummy.c | |||
@@ -365,8 +365,8 @@ static void dummy_inode_delete (struct inode *ino) | |||
365 | return; | 365 | return; |
366 | } | 366 | } |
367 | 367 | ||
368 | static int dummy_inode_setxattr (struct dentry *dentry, char *name, void *value, | 368 | static int dummy_inode_setxattr (struct dentry *dentry, const char *name, |
369 | size_t size, int flags) | 369 | const void *value, size_t size, int flags) |
370 | { | 370 | { |
371 | if (!strncmp(name, XATTR_SECURITY_PREFIX, | 371 | if (!strncmp(name, XATTR_SECURITY_PREFIX, |
372 | sizeof(XATTR_SECURITY_PREFIX) - 1) && | 372 | sizeof(XATTR_SECURITY_PREFIX) - 1) && |
@@ -375,12 +375,13 @@ static int dummy_inode_setxattr (struct dentry *dentry, char *name, void *value, | |||
375 | return 0; | 375 | return 0; |
376 | } | 376 | } |
377 | 377 | ||
378 | static void dummy_inode_post_setxattr (struct dentry *dentry, char *name, void *value, | 378 | static void dummy_inode_post_setxattr (struct dentry *dentry, const char *name, |
379 | size_t size, int flags) | 379 | const void *value, size_t size, |
380 | int flags) | ||
380 | { | 381 | { |
381 | } | 382 | } |
382 | 383 | ||
383 | static int dummy_inode_getxattr (struct dentry *dentry, char *name) | 384 | static int dummy_inode_getxattr (struct dentry *dentry, const char *name) |
384 | { | 385 | { |
385 | return 0; | 386 | return 0; |
386 | } | 387 | } |
@@ -390,7 +391,7 @@ static int dummy_inode_listxattr (struct dentry *dentry) | |||
390 | return 0; | 391 | return 0; |
391 | } | 392 | } |
392 | 393 | ||
393 | static int dummy_inode_removexattr (struct dentry *dentry, char *name) | 394 | static int dummy_inode_removexattr (struct dentry *dentry, const char *name) |
394 | { | 395 | { |
395 | if (!strncmp(name, XATTR_SECURITY_PREFIX, | 396 | if (!strncmp(name, XATTR_SECURITY_PREFIX, |
396 | sizeof(XATTR_SECURITY_PREFIX) - 1) && | 397 | sizeof(XATTR_SECURITY_PREFIX) - 1) && |
@@ -993,6 +994,13 @@ static inline int dummy_key_permission(key_ref_t key_ref, | |||
993 | { | 994 | { |
994 | return 0; | 995 | return 0; |
995 | } | 996 | } |
997 | |||
998 | static int dummy_key_getsecurity(struct key *key, char **_buffer) | ||
999 | { | ||
1000 | *_buffer = NULL; | ||
1001 | return 0; | ||
1002 | } | ||
1003 | |||
996 | #endif /* CONFIG_KEYS */ | 1004 | #endif /* CONFIG_KEYS */ |
997 | 1005 | ||
998 | #ifdef CONFIG_AUDIT | 1006 | #ifdef CONFIG_AUDIT |
@@ -1209,6 +1217,7 @@ void security_fixup_ops (struct security_operations *ops) | |||
1209 | set_to_dummy_if_null(ops, key_alloc); | 1217 | set_to_dummy_if_null(ops, key_alloc); |
1210 | set_to_dummy_if_null(ops, key_free); | 1218 | set_to_dummy_if_null(ops, key_free); |
1211 | set_to_dummy_if_null(ops, key_permission); | 1219 | set_to_dummy_if_null(ops, key_permission); |
1220 | set_to_dummy_if_null(ops, key_getsecurity); | ||
1212 | #endif /* CONFIG_KEYS */ | 1221 | #endif /* CONFIG_KEYS */ |
1213 | #ifdef CONFIG_AUDIT | 1222 | #ifdef CONFIG_AUDIT |
1214 | set_to_dummy_if_null(ops, audit_rule_init); | 1223 | set_to_dummy_if_null(ops, audit_rule_init); |
diff --git a/security/keys/Makefile b/security/keys/Makefile index 5145adfb6a05..747a464943af 100644 --- a/security/keys/Makefile +++ b/security/keys/Makefile | |||
@@ -14,3 +14,4 @@ obj-y := \ | |||
14 | 14 | ||
15 | obj-$(CONFIG_KEYS_COMPAT) += compat.o | 15 | obj-$(CONFIG_KEYS_COMPAT) += compat.o |
16 | obj-$(CONFIG_PROC_FS) += proc.o | 16 | obj-$(CONFIG_PROC_FS) += proc.o |
17 | obj-$(CONFIG_SYSCTL) += sysctl.o | ||
diff --git a/security/keys/compat.c b/security/keys/compat.c index e10ec995f275..c766c68a63bc 100644 --- a/security/keys/compat.c +++ b/security/keys/compat.c | |||
@@ -79,6 +79,9 @@ asmlinkage long compat_sys_keyctl(u32 option, | |||
79 | case KEYCTL_ASSUME_AUTHORITY: | 79 | case KEYCTL_ASSUME_AUTHORITY: |
80 | return keyctl_assume_authority(arg2); | 80 | return keyctl_assume_authority(arg2); |
81 | 81 | ||
82 | case KEYCTL_GET_SECURITY: | ||
83 | return keyctl_get_security(arg2, compat_ptr(arg3), arg4); | ||
84 | |||
82 | default: | 85 | default: |
83 | return -EOPNOTSUPP; | 86 | return -EOPNOTSUPP; |
84 | } | 87 | } |
diff --git a/security/keys/internal.h b/security/keys/internal.h index 7d894ef70370..8c05587f5018 100644 --- a/security/keys/internal.h +++ b/security/keys/internal.h | |||
@@ -57,10 +57,6 @@ struct key_user { | |||
57 | int qnbytes; /* number of bytes allocated to this user */ | 57 | int qnbytes; /* number of bytes allocated to this user */ |
58 | }; | 58 | }; |
59 | 59 | ||
60 | #define KEYQUOTA_MAX_KEYS 100 | ||
61 | #define KEYQUOTA_MAX_BYTES 10000 | ||
62 | #define KEYQUOTA_LINK_BYTES 4 /* a link in a keyring is worth 4 bytes */ | ||
63 | |||
64 | extern struct rb_root key_user_tree; | 60 | extern struct rb_root key_user_tree; |
65 | extern spinlock_t key_user_lock; | 61 | extern spinlock_t key_user_lock; |
66 | extern struct key_user root_key_user; | 62 | extern struct key_user root_key_user; |
@@ -68,6 +64,16 @@ extern struct key_user root_key_user; | |||
68 | extern struct key_user *key_user_lookup(uid_t uid); | 64 | extern struct key_user *key_user_lookup(uid_t uid); |
69 | extern void key_user_put(struct key_user *user); | 65 | extern void key_user_put(struct key_user *user); |
70 | 66 | ||
67 | /* | ||
68 | * key quota limits | ||
69 | * - root has its own separate limits to everyone else | ||
70 | */ | ||
71 | extern unsigned key_quota_root_maxkeys; | ||
72 | extern unsigned key_quota_root_maxbytes; | ||
73 | extern unsigned key_quota_maxkeys; | ||
74 | extern unsigned key_quota_maxbytes; | ||
75 | |||
76 | #define KEYQUOTA_LINK_BYTES 4 /* a link in a keyring is worth 4 bytes */ | ||
71 | 77 | ||
72 | 78 | ||
73 | extern struct rb_root key_serial_tree; | 79 | extern struct rb_root key_serial_tree; |
@@ -77,8 +83,6 @@ extern struct mutex key_construction_mutex; | |||
77 | extern wait_queue_head_t request_key_conswq; | 83 | extern wait_queue_head_t request_key_conswq; |
78 | 84 | ||
79 | 85 | ||
80 | extern void keyring_publish_name(struct key *keyring); | ||
81 | |||
82 | extern int __key_link(struct key *keyring, struct key *key); | 86 | extern int __key_link(struct key *keyring, struct key *key); |
83 | 87 | ||
84 | extern key_ref_t __keyring_search_one(key_ref_t keyring_ref, | 88 | extern key_ref_t __keyring_search_one(key_ref_t keyring_ref, |
@@ -102,14 +106,15 @@ extern key_ref_t search_process_keyrings(struct key_type *type, | |||
102 | key_match_func_t match, | 106 | key_match_func_t match, |
103 | struct task_struct *tsk); | 107 | struct task_struct *tsk); |
104 | 108 | ||
105 | extern struct key *find_keyring_by_name(const char *name, key_serial_t bound); | 109 | extern struct key *find_keyring_by_name(const char *name, bool skip_perm_check); |
106 | 110 | ||
107 | extern int install_thread_keyring(struct task_struct *tsk); | 111 | extern int install_thread_keyring(struct task_struct *tsk); |
108 | extern int install_process_keyring(struct task_struct *tsk); | 112 | extern int install_process_keyring(struct task_struct *tsk); |
109 | 113 | ||
110 | extern struct key *request_key_and_link(struct key_type *type, | 114 | extern struct key *request_key_and_link(struct key_type *type, |
111 | const char *description, | 115 | const char *description, |
112 | const char *callout_info, | 116 | const void *callout_info, |
117 | size_t callout_len, | ||
113 | void *aux, | 118 | void *aux, |
114 | struct key *dest_keyring, | 119 | struct key *dest_keyring, |
115 | unsigned long flags); | 120 | unsigned long flags); |
@@ -120,13 +125,15 @@ extern struct key *request_key_and_link(struct key_type *type, | |||
120 | struct request_key_auth { | 125 | struct request_key_auth { |
121 | struct key *target_key; | 126 | struct key *target_key; |
122 | struct task_struct *context; | 127 | struct task_struct *context; |
123 | char *callout_info; | 128 | void *callout_info; |
129 | size_t callout_len; | ||
124 | pid_t pid; | 130 | pid_t pid; |
125 | }; | 131 | }; |
126 | 132 | ||
127 | extern struct key_type key_type_request_key_auth; | 133 | extern struct key_type key_type_request_key_auth; |
128 | extern struct key *request_key_auth_new(struct key *target, | 134 | extern struct key *request_key_auth_new(struct key *target, |
129 | const char *callout_info); | 135 | const void *callout_info, |
136 | size_t callout_len); | ||
130 | 137 | ||
131 | extern struct key *key_get_instantiation_authkey(key_serial_t target_id); | 138 | extern struct key *key_get_instantiation_authkey(key_serial_t target_id); |
132 | 139 | ||
@@ -152,7 +159,8 @@ extern long keyctl_negate_key(key_serial_t, unsigned, key_serial_t); | |||
152 | extern long keyctl_set_reqkey_keyring(int); | 159 | extern long keyctl_set_reqkey_keyring(int); |
153 | extern long keyctl_set_timeout(key_serial_t, unsigned); | 160 | extern long keyctl_set_timeout(key_serial_t, unsigned); |
154 | extern long keyctl_assume_authority(key_serial_t); | 161 | extern long keyctl_assume_authority(key_serial_t); |
155 | 162 | extern long keyctl_get_security(key_serial_t keyid, char __user *buffer, | |
163 | size_t buflen); | ||
156 | 164 | ||
157 | /* | 165 | /* |
158 | * debugging key validation | 166 | * debugging key validation |
diff --git a/security/keys/key.c b/security/keys/key.c index 654d23baf352..14948cf83ef6 100644 --- a/security/keys/key.c +++ b/security/keys/key.c | |||
@@ -1,6 +1,6 @@ | |||
1 | /* Basic authentication token and access key management | 1 | /* Basic authentication token and access key management |
2 | * | 2 | * |
3 | * Copyright (C) 2004-2007 Red Hat, Inc. All Rights Reserved. | 3 | * Copyright (C) 2004-2008 Red Hat, Inc. All Rights Reserved. |
4 | * Written by David Howells (dhowells@redhat.com) | 4 | * Written by David Howells (dhowells@redhat.com) |
5 | * | 5 | * |
6 | * This program is free software; you can redistribute it and/or | 6 | * This program is free software; you can redistribute it and/or |
@@ -27,6 +27,11 @@ DEFINE_SPINLOCK(key_serial_lock); | |||
27 | struct rb_root key_user_tree; /* tree of quota records indexed by UID */ | 27 | struct rb_root key_user_tree; /* tree of quota records indexed by UID */ |
28 | DEFINE_SPINLOCK(key_user_lock); | 28 | DEFINE_SPINLOCK(key_user_lock); |
29 | 29 | ||
30 | unsigned int key_quota_root_maxkeys = 200; /* root's key count quota */ | ||
31 | unsigned int key_quota_root_maxbytes = 20000; /* root's key space quota */ | ||
32 | unsigned int key_quota_maxkeys = 200; /* general key count quota */ | ||
33 | unsigned int key_quota_maxbytes = 20000; /* general key space quota */ | ||
34 | |||
30 | static LIST_HEAD(key_types_list); | 35 | static LIST_HEAD(key_types_list); |
31 | static DECLARE_RWSEM(key_types_sem); | 36 | static DECLARE_RWSEM(key_types_sem); |
32 | 37 | ||
@@ -139,36 +144,6 @@ void key_user_put(struct key_user *user) | |||
139 | 144 | ||
140 | /*****************************************************************************/ | 145 | /*****************************************************************************/ |
141 | /* | 146 | /* |
142 | * insert a key with a fixed serial number | ||
143 | */ | ||
144 | static void __init __key_insert_serial(struct key *key) | ||
145 | { | ||
146 | struct rb_node *parent, **p; | ||
147 | struct key *xkey; | ||
148 | |||
149 | parent = NULL; | ||
150 | p = &key_serial_tree.rb_node; | ||
151 | |||
152 | while (*p) { | ||
153 | parent = *p; | ||
154 | xkey = rb_entry(parent, struct key, serial_node); | ||
155 | |||
156 | if (key->serial < xkey->serial) | ||
157 | p = &(*p)->rb_left; | ||
158 | else if (key->serial > xkey->serial) | ||
159 | p = &(*p)->rb_right; | ||
160 | else | ||
161 | BUG(); | ||
162 | } | ||
163 | |||
164 | /* we've found a suitable hole - arrange for this key to occupy it */ | ||
165 | rb_link_node(&key->serial_node, parent, p); | ||
166 | rb_insert_color(&key->serial_node, &key_serial_tree); | ||
167 | |||
168 | } /* end __key_insert_serial() */ | ||
169 | |||
170 | /*****************************************************************************/ | ||
171 | /* | ||
172 | * assign a key the next unique serial number | 147 | * assign a key the next unique serial number |
173 | * - these are assigned randomly to avoid security issues through covert | 148 | * - these are assigned randomly to avoid security issues through covert |
174 | * channel problems | 149 | * channel problems |
@@ -266,11 +241,16 @@ struct key *key_alloc(struct key_type *type, const char *desc, | |||
266 | /* check that the user's quota permits allocation of another key and | 241 | /* check that the user's quota permits allocation of another key and |
267 | * its description */ | 242 | * its description */ |
268 | if (!(flags & KEY_ALLOC_NOT_IN_QUOTA)) { | 243 | if (!(flags & KEY_ALLOC_NOT_IN_QUOTA)) { |
244 | unsigned maxkeys = (uid == 0) ? | ||
245 | key_quota_root_maxkeys : key_quota_maxkeys; | ||
246 | unsigned maxbytes = (uid == 0) ? | ||
247 | key_quota_root_maxbytes : key_quota_maxbytes; | ||
248 | |||
269 | spin_lock(&user->lock); | 249 | spin_lock(&user->lock); |
270 | if (!(flags & KEY_ALLOC_QUOTA_OVERRUN)) { | 250 | if (!(flags & KEY_ALLOC_QUOTA_OVERRUN)) { |
271 | if (user->qnkeys + 1 >= KEYQUOTA_MAX_KEYS || | 251 | if (user->qnkeys + 1 >= maxkeys || |
272 | user->qnbytes + quotalen >= KEYQUOTA_MAX_BYTES | 252 | user->qnbytes + quotalen >= maxbytes || |
273 | ) | 253 | user->qnbytes + quotalen < user->qnbytes) |
274 | goto no_quota; | 254 | goto no_quota; |
275 | } | 255 | } |
276 | 256 | ||
@@ -375,11 +355,14 @@ int key_payload_reserve(struct key *key, size_t datalen) | |||
375 | 355 | ||
376 | /* contemplate the quota adjustment */ | 356 | /* contemplate the quota adjustment */ |
377 | if (delta != 0 && test_bit(KEY_FLAG_IN_QUOTA, &key->flags)) { | 357 | if (delta != 0 && test_bit(KEY_FLAG_IN_QUOTA, &key->flags)) { |
358 | unsigned maxbytes = (key->user->uid == 0) ? | ||
359 | key_quota_root_maxbytes : key_quota_maxbytes; | ||
360 | |||
378 | spin_lock(&key->user->lock); | 361 | spin_lock(&key->user->lock); |
379 | 362 | ||
380 | if (delta > 0 && | 363 | if (delta > 0 && |
381 | key->user->qnbytes + delta > KEYQUOTA_MAX_BYTES | 364 | (key->user->qnbytes + delta >= maxbytes || |
382 | ) { | 365 | key->user->qnbytes + delta < key->user->qnbytes)) { |
383 | ret = -EDQUOT; | 366 | ret = -EDQUOT; |
384 | } | 367 | } |
385 | else { | 368 | else { |
@@ -757,11 +740,11 @@ key_ref_t key_create_or_update(key_ref_t keyring_ref, | |||
757 | const char *description, | 740 | const char *description, |
758 | const void *payload, | 741 | const void *payload, |
759 | size_t plen, | 742 | size_t plen, |
743 | key_perm_t perm, | ||
760 | unsigned long flags) | 744 | unsigned long flags) |
761 | { | 745 | { |
762 | struct key_type *ktype; | 746 | struct key_type *ktype; |
763 | struct key *keyring, *key = NULL; | 747 | struct key *keyring, *key = NULL; |
764 | key_perm_t perm; | ||
765 | key_ref_t key_ref; | 748 | key_ref_t key_ref; |
766 | int ret; | 749 | int ret; |
767 | 750 | ||
@@ -806,15 +789,17 @@ key_ref_t key_create_or_update(key_ref_t keyring_ref, | |||
806 | goto found_matching_key; | 789 | goto found_matching_key; |
807 | } | 790 | } |
808 | 791 | ||
809 | /* decide on the permissions we want */ | 792 | /* if the client doesn't provide, decide on the permissions we want */ |
810 | perm = KEY_POS_VIEW | KEY_POS_SEARCH | KEY_POS_LINK | KEY_POS_SETATTR; | 793 | if (perm == KEY_PERM_UNDEF) { |
811 | perm |= KEY_USR_VIEW | KEY_USR_SEARCH | KEY_USR_LINK | KEY_USR_SETATTR; | 794 | perm = KEY_POS_VIEW | KEY_POS_SEARCH | KEY_POS_LINK | KEY_POS_SETATTR; |
795 | perm |= KEY_USR_VIEW | KEY_USR_SEARCH | KEY_USR_LINK | KEY_USR_SETATTR; | ||
812 | 796 | ||
813 | if (ktype->read) | 797 | if (ktype->read) |
814 | perm |= KEY_POS_READ | KEY_USR_READ; | 798 | perm |= KEY_POS_READ | KEY_USR_READ; |
815 | 799 | ||
816 | if (ktype == &key_type_keyring || ktype->update) | 800 | if (ktype == &key_type_keyring || ktype->update) |
817 | perm |= KEY_USR_WRITE; | 801 | perm |= KEY_USR_WRITE; |
802 | } | ||
818 | 803 | ||
819 | /* allocate a new key */ | 804 | /* allocate a new key */ |
820 | key = key_alloc(ktype, description, current->fsuid, current->fsgid, | 805 | key = key_alloc(ktype, description, current->fsuid, current->fsgid, |
@@ -1018,17 +1003,4 @@ void __init key_init(void) | |||
1018 | rb_insert_color(&root_key_user.node, | 1003 | rb_insert_color(&root_key_user.node, |
1019 | &key_user_tree); | 1004 | &key_user_tree); |
1020 | 1005 | ||
1021 | /* record root's user standard keyrings */ | ||
1022 | key_check(&root_user_keyring); | ||
1023 | key_check(&root_session_keyring); | ||
1024 | |||
1025 | __key_insert_serial(&root_user_keyring); | ||
1026 | __key_insert_serial(&root_session_keyring); | ||
1027 | |||
1028 | keyring_publish_name(&root_user_keyring); | ||
1029 | keyring_publish_name(&root_session_keyring); | ||
1030 | |||
1031 | /* link the two root keyrings together */ | ||
1032 | key_link(&root_session_keyring, &root_user_keyring); | ||
1033 | |||
1034 | } /* end key_init() */ | 1006 | } /* end key_init() */ |
diff --git a/security/keys/keyctl.c b/security/keys/keyctl.c index d9ca15c109cc..acc9c89e40a8 100644 --- a/security/keys/keyctl.c +++ b/security/keys/keyctl.c | |||
@@ -19,6 +19,8 @@ | |||
19 | #include <linux/capability.h> | 19 | #include <linux/capability.h> |
20 | #include <linux/string.h> | 20 | #include <linux/string.h> |
21 | #include <linux/err.h> | 21 | #include <linux/err.h> |
22 | #include <linux/vmalloc.h> | ||
23 | #include <linux/security.h> | ||
22 | #include <asm/uaccess.h> | 24 | #include <asm/uaccess.h> |
23 | #include "internal.h" | 25 | #include "internal.h" |
24 | 26 | ||
@@ -62,9 +64,10 @@ asmlinkage long sys_add_key(const char __user *_type, | |||
62 | char type[32], *description; | 64 | char type[32], *description; |
63 | void *payload; | 65 | void *payload; |
64 | long ret; | 66 | long ret; |
67 | bool vm; | ||
65 | 68 | ||
66 | ret = -EINVAL; | 69 | ret = -EINVAL; |
67 | if (plen > 32767) | 70 | if (plen > 1024 * 1024 - 1) |
68 | goto error; | 71 | goto error; |
69 | 72 | ||
70 | /* draw all the data into kernel space */ | 73 | /* draw all the data into kernel space */ |
@@ -81,11 +84,18 @@ asmlinkage long sys_add_key(const char __user *_type, | |||
81 | /* pull the payload in if one was supplied */ | 84 | /* pull the payload in if one was supplied */ |
82 | payload = NULL; | 85 | payload = NULL; |
83 | 86 | ||
87 | vm = false; | ||
84 | if (_payload) { | 88 | if (_payload) { |
85 | ret = -ENOMEM; | 89 | ret = -ENOMEM; |
86 | payload = kmalloc(plen, GFP_KERNEL); | 90 | payload = kmalloc(plen, GFP_KERNEL); |
87 | if (!payload) | 91 | if (!payload) { |
88 | goto error2; | 92 | if (plen <= PAGE_SIZE) |
93 | goto error2; | ||
94 | vm = true; | ||
95 | payload = vmalloc(plen); | ||
96 | if (!payload) | ||
97 | goto error2; | ||
98 | } | ||
89 | 99 | ||
90 | ret = -EFAULT; | 100 | ret = -EFAULT; |
91 | if (copy_from_user(payload, _payload, plen) != 0) | 101 | if (copy_from_user(payload, _payload, plen) != 0) |
@@ -102,7 +112,8 @@ asmlinkage long sys_add_key(const char __user *_type, | |||
102 | /* create or update the requested key and add it to the target | 112 | /* create or update the requested key and add it to the target |
103 | * keyring */ | 113 | * keyring */ |
104 | key_ref = key_create_or_update(keyring_ref, type, description, | 114 | key_ref = key_create_or_update(keyring_ref, type, description, |
105 | payload, plen, KEY_ALLOC_IN_QUOTA); | 115 | payload, plen, KEY_PERM_UNDEF, |
116 | KEY_ALLOC_IN_QUOTA); | ||
106 | if (!IS_ERR(key_ref)) { | 117 | if (!IS_ERR(key_ref)) { |
107 | ret = key_ref_to_ptr(key_ref)->serial; | 118 | ret = key_ref_to_ptr(key_ref)->serial; |
108 | key_ref_put(key_ref); | 119 | key_ref_put(key_ref); |
@@ -113,7 +124,10 @@ asmlinkage long sys_add_key(const char __user *_type, | |||
113 | 124 | ||
114 | key_ref_put(keyring_ref); | 125 | key_ref_put(keyring_ref); |
115 | error3: | 126 | error3: |
116 | kfree(payload); | 127 | if (!vm) |
128 | kfree(payload); | ||
129 | else | ||
130 | vfree(payload); | ||
117 | error2: | 131 | error2: |
118 | kfree(description); | 132 | kfree(description); |
119 | error: | 133 | error: |
@@ -140,6 +154,7 @@ asmlinkage long sys_request_key(const char __user *_type, | |||
140 | struct key_type *ktype; | 154 | struct key_type *ktype; |
141 | struct key *key; | 155 | struct key *key; |
142 | key_ref_t dest_ref; | 156 | key_ref_t dest_ref; |
157 | size_t callout_len; | ||
143 | char type[32], *description, *callout_info; | 158 | char type[32], *description, *callout_info; |
144 | long ret; | 159 | long ret; |
145 | 160 | ||
@@ -157,12 +172,14 @@ asmlinkage long sys_request_key(const char __user *_type, | |||
157 | 172 | ||
158 | /* pull the callout info into kernel space */ | 173 | /* pull the callout info into kernel space */ |
159 | callout_info = NULL; | 174 | callout_info = NULL; |
175 | callout_len = 0; | ||
160 | if (_callout_info) { | 176 | if (_callout_info) { |
161 | callout_info = strndup_user(_callout_info, PAGE_SIZE); | 177 | callout_info = strndup_user(_callout_info, PAGE_SIZE); |
162 | if (IS_ERR(callout_info)) { | 178 | if (IS_ERR(callout_info)) { |
163 | ret = PTR_ERR(callout_info); | 179 | ret = PTR_ERR(callout_info); |
164 | goto error2; | 180 | goto error2; |
165 | } | 181 | } |
182 | callout_len = strlen(callout_info); | ||
166 | } | 183 | } |
167 | 184 | ||
168 | /* get the destination keyring if specified */ | 185 | /* get the destination keyring if specified */ |
@@ -183,8 +200,8 @@ asmlinkage long sys_request_key(const char __user *_type, | |||
183 | } | 200 | } |
184 | 201 | ||
185 | /* do the search */ | 202 | /* do the search */ |
186 | key = request_key_and_link(ktype, description, callout_info, NULL, | 203 | key = request_key_and_link(ktype, description, callout_info, |
187 | key_ref_to_ptr(dest_ref), | 204 | callout_len, NULL, key_ref_to_ptr(dest_ref), |
188 | KEY_ALLOC_IN_QUOTA); | 205 | KEY_ALLOC_IN_QUOTA); |
189 | if (IS_ERR(key)) { | 206 | if (IS_ERR(key)) { |
190 | ret = PTR_ERR(key); | 207 | ret = PTR_ERR(key); |
@@ -714,10 +731,16 @@ long keyctl_chown_key(key_serial_t id, uid_t uid, gid_t gid) | |||
714 | 731 | ||
715 | /* transfer the quota burden to the new user */ | 732 | /* transfer the quota burden to the new user */ |
716 | if (test_bit(KEY_FLAG_IN_QUOTA, &key->flags)) { | 733 | if (test_bit(KEY_FLAG_IN_QUOTA, &key->flags)) { |
734 | unsigned maxkeys = (uid == 0) ? | ||
735 | key_quota_root_maxkeys : key_quota_maxkeys; | ||
736 | unsigned maxbytes = (uid == 0) ? | ||
737 | key_quota_root_maxbytes : key_quota_maxbytes; | ||
738 | |||
717 | spin_lock(&newowner->lock); | 739 | spin_lock(&newowner->lock); |
718 | if (newowner->qnkeys + 1 >= KEYQUOTA_MAX_KEYS || | 740 | if (newowner->qnkeys + 1 >= maxkeys || |
719 | newowner->qnbytes + key->quotalen >= | 741 | newowner->qnbytes + key->quotalen >= maxbytes || |
720 | KEYQUOTA_MAX_BYTES) | 742 | newowner->qnbytes + key->quotalen < |
743 | newowner->qnbytes) | ||
721 | goto quota_overrun; | 744 | goto quota_overrun; |
722 | 745 | ||
723 | newowner->qnkeys++; | 746 | newowner->qnkeys++; |
@@ -821,9 +844,10 @@ long keyctl_instantiate_key(key_serial_t id, | |||
821 | key_ref_t keyring_ref; | 844 | key_ref_t keyring_ref; |
822 | void *payload; | 845 | void *payload; |
823 | long ret; | 846 | long ret; |
847 | bool vm = false; | ||
824 | 848 | ||
825 | ret = -EINVAL; | 849 | ret = -EINVAL; |
826 | if (plen > 32767) | 850 | if (plen > 1024 * 1024 - 1) |
827 | goto error; | 851 | goto error; |
828 | 852 | ||
829 | /* the appropriate instantiation authorisation key must have been | 853 | /* the appropriate instantiation authorisation key must have been |
@@ -843,8 +867,14 @@ long keyctl_instantiate_key(key_serial_t id, | |||
843 | if (_payload) { | 867 | if (_payload) { |
844 | ret = -ENOMEM; | 868 | ret = -ENOMEM; |
845 | payload = kmalloc(plen, GFP_KERNEL); | 869 | payload = kmalloc(plen, GFP_KERNEL); |
846 | if (!payload) | 870 | if (!payload) { |
847 | goto error; | 871 | if (plen <= PAGE_SIZE) |
872 | goto error; | ||
873 | vm = true; | ||
874 | payload = vmalloc(plen); | ||
875 | if (!payload) | ||
876 | goto error; | ||
877 | } | ||
848 | 878 | ||
849 | ret = -EFAULT; | 879 | ret = -EFAULT; |
850 | if (copy_from_user(payload, _payload, plen) != 0) | 880 | if (copy_from_user(payload, _payload, plen) != 0) |
@@ -877,7 +907,10 @@ long keyctl_instantiate_key(key_serial_t id, | |||
877 | } | 907 | } |
878 | 908 | ||
879 | error2: | 909 | error2: |
880 | kfree(payload); | 910 | if (!vm) |
911 | kfree(payload); | ||
912 | else | ||
913 | vfree(payload); | ||
881 | error: | 914 | error: |
882 | return ret; | 915 | return ret; |
883 | 916 | ||
@@ -1055,6 +1088,66 @@ error: | |||
1055 | 1088 | ||
1056 | } /* end keyctl_assume_authority() */ | 1089 | } /* end keyctl_assume_authority() */ |
1057 | 1090 | ||
1091 | /* | ||
1092 | * get the security label of a key | ||
1093 | * - the key must grant us view permission | ||
1094 | * - if there's a buffer, we place up to buflen bytes of data into it | ||
1095 | * - unless there's an error, we return the amount of information available, | ||
1096 | * irrespective of how much we may have copied (including the terminal NUL) | ||
1097 | * - implements keyctl(KEYCTL_GET_SECURITY) | ||
1098 | */ | ||
1099 | long keyctl_get_security(key_serial_t keyid, | ||
1100 | char __user *buffer, | ||
1101 | size_t buflen) | ||
1102 | { | ||
1103 | struct key *key, *instkey; | ||
1104 | key_ref_t key_ref; | ||
1105 | char *context; | ||
1106 | long ret; | ||
1107 | |||
1108 | key_ref = lookup_user_key(NULL, keyid, 0, 1, KEY_VIEW); | ||
1109 | if (IS_ERR(key_ref)) { | ||
1110 | if (PTR_ERR(key_ref) != -EACCES) | ||
1111 | return PTR_ERR(key_ref); | ||
1112 | |||
1113 | /* viewing a key under construction is also permitted if we | ||
1114 | * have the authorisation token handy */ | ||
1115 | instkey = key_get_instantiation_authkey(keyid); | ||
1116 | if (IS_ERR(instkey)) | ||
1117 | return PTR_ERR(key_ref); | ||
1118 | key_put(instkey); | ||
1119 | |||
1120 | key_ref = lookup_user_key(NULL, keyid, 0, 1, 0); | ||
1121 | if (IS_ERR(key_ref)) | ||
1122 | return PTR_ERR(key_ref); | ||
1123 | } | ||
1124 | |||
1125 | key = key_ref_to_ptr(key_ref); | ||
1126 | ret = security_key_getsecurity(key, &context); | ||
1127 | if (ret == 0) { | ||
1128 | /* if no information was returned, give userspace an empty | ||
1129 | * string */ | ||
1130 | ret = 1; | ||
1131 | if (buffer && buflen > 0 && | ||
1132 | copy_to_user(buffer, "", 1) != 0) | ||
1133 | ret = -EFAULT; | ||
1134 | } else if (ret > 0) { | ||
1135 | /* return as much data as there's room for */ | ||
1136 | if (buffer && buflen > 0) { | ||
1137 | if (buflen > ret) | ||
1138 | buflen = ret; | ||
1139 | |||
1140 | if (copy_to_user(buffer, context, buflen) != 0) | ||
1141 | ret = -EFAULT; | ||
1142 | } | ||
1143 | |||
1144 | kfree(context); | ||
1145 | } | ||
1146 | |||
1147 | key_ref_put(key_ref); | ||
1148 | return ret; | ||
1149 | } | ||
1150 | |||
1058 | /*****************************************************************************/ | 1151 | /*****************************************************************************/ |
1059 | /* | 1152 | /* |
1060 | * the key control system call | 1153 | * the key control system call |
@@ -1135,6 +1228,11 @@ asmlinkage long sys_keyctl(int option, unsigned long arg2, unsigned long arg3, | |||
1135 | case KEYCTL_ASSUME_AUTHORITY: | 1228 | case KEYCTL_ASSUME_AUTHORITY: |
1136 | return keyctl_assume_authority((key_serial_t) arg2); | 1229 | return keyctl_assume_authority((key_serial_t) arg2); |
1137 | 1230 | ||
1231 | case KEYCTL_GET_SECURITY: | ||
1232 | return keyctl_get_security((key_serial_t) arg2, | ||
1233 | (char *) arg3, | ||
1234 | (size_t) arg4); | ||
1235 | |||
1138 | default: | 1236 | default: |
1139 | return -EOPNOTSUPP; | 1237 | return -EOPNOTSUPP; |
1140 | } | 1238 | } |
diff --git a/security/keys/keyring.c b/security/keys/keyring.c index 88292e3dee96..a9ab8affc092 100644 --- a/security/keys/keyring.c +++ b/security/keys/keyring.c | |||
@@ -1,6 +1,6 @@ | |||
1 | /* keyring.c: keyring handling | 1 | /* Keyring handling |
2 | * | 2 | * |
3 | * Copyright (C) 2004-5 Red Hat, Inc. All Rights Reserved. | 3 | * Copyright (C) 2004-2005, 2008 Red Hat, Inc. All Rights Reserved. |
4 | * Written by David Howells (dhowells@redhat.com) | 4 | * Written by David Howells (dhowells@redhat.com) |
5 | * | 5 | * |
6 | * This program is free software; you can redistribute it and/or | 6 | * This program is free software; you can redistribute it and/or |
@@ -79,7 +79,7 @@ static DECLARE_RWSEM(keyring_serialise_link_sem); | |||
79 | * publish the name of a keyring so that it can be found by name (if it has | 79 | * publish the name of a keyring so that it can be found by name (if it has |
80 | * one) | 80 | * one) |
81 | */ | 81 | */ |
82 | void keyring_publish_name(struct key *keyring) | 82 | static void keyring_publish_name(struct key *keyring) |
83 | { | 83 | { |
84 | int bucket; | 84 | int bucket; |
85 | 85 | ||
@@ -292,7 +292,7 @@ key_ref_t keyring_search_aux(key_ref_t keyring_ref, | |||
292 | 292 | ||
293 | struct keyring_list *keylist; | 293 | struct keyring_list *keylist; |
294 | struct timespec now; | 294 | struct timespec now; |
295 | unsigned long possessed; | 295 | unsigned long possessed, kflags; |
296 | struct key *keyring, *key; | 296 | struct key *keyring, *key; |
297 | key_ref_t key_ref; | 297 | key_ref_t key_ref; |
298 | long err; | 298 | long err; |
@@ -319,6 +319,32 @@ key_ref_t keyring_search_aux(key_ref_t keyring_ref, | |||
319 | err = -EAGAIN; | 319 | err = -EAGAIN; |
320 | sp = 0; | 320 | sp = 0; |
321 | 321 | ||
322 | /* firstly we should check to see if this top-level keyring is what we | ||
323 | * are looking for */ | ||
324 | key_ref = ERR_PTR(-EAGAIN); | ||
325 | kflags = keyring->flags; | ||
326 | if (keyring->type == type && match(keyring, description)) { | ||
327 | key = keyring; | ||
328 | |||
329 | /* check it isn't negative and hasn't expired or been | ||
330 | * revoked */ | ||
331 | if (kflags & (1 << KEY_FLAG_REVOKED)) | ||
332 | goto error_2; | ||
333 | if (key->expiry && now.tv_sec >= key->expiry) | ||
334 | goto error_2; | ||
335 | key_ref = ERR_PTR(-ENOKEY); | ||
336 | if (kflags & (1 << KEY_FLAG_NEGATIVE)) | ||
337 | goto error_2; | ||
338 | goto found; | ||
339 | } | ||
340 | |||
341 | /* otherwise, the top keyring must not be revoked, expired, or | ||
342 | * negatively instantiated if we are to search it */ | ||
343 | key_ref = ERR_PTR(-EAGAIN); | ||
344 | if (kflags & ((1 << KEY_FLAG_REVOKED) | (1 << KEY_FLAG_NEGATIVE)) || | ||
345 | (keyring->expiry && now.tv_sec >= keyring->expiry)) | ||
346 | goto error_2; | ||
347 | |||
322 | /* start processing a new keyring */ | 348 | /* start processing a new keyring */ |
323 | descend: | 349 | descend: |
324 | if (test_bit(KEY_FLAG_REVOKED, &keyring->flags)) | 350 | if (test_bit(KEY_FLAG_REVOKED, &keyring->flags)) |
@@ -331,13 +357,14 @@ descend: | |||
331 | /* iterate through the keys in this keyring first */ | 357 | /* iterate through the keys in this keyring first */ |
332 | for (kix = 0; kix < keylist->nkeys; kix++) { | 358 | for (kix = 0; kix < keylist->nkeys; kix++) { |
333 | key = keylist->keys[kix]; | 359 | key = keylist->keys[kix]; |
360 | kflags = key->flags; | ||
334 | 361 | ||
335 | /* ignore keys not of this type */ | 362 | /* ignore keys not of this type */ |
336 | if (key->type != type) | 363 | if (key->type != type) |
337 | continue; | 364 | continue; |
338 | 365 | ||
339 | /* skip revoked keys and expired keys */ | 366 | /* skip revoked keys and expired keys */ |
340 | if (test_bit(KEY_FLAG_REVOKED, &key->flags)) | 367 | if (kflags & (1 << KEY_FLAG_REVOKED)) |
341 | continue; | 368 | continue; |
342 | 369 | ||
343 | if (key->expiry && now.tv_sec >= key->expiry) | 370 | if (key->expiry && now.tv_sec >= key->expiry) |
@@ -352,8 +379,8 @@ descend: | |||
352 | context, KEY_SEARCH) < 0) | 379 | context, KEY_SEARCH) < 0) |
353 | continue; | 380 | continue; |
354 | 381 | ||
355 | /* we set a different error code if we find a negative key */ | 382 | /* we set a different error code if we pass a negative key */ |
356 | if (test_bit(KEY_FLAG_NEGATIVE, &key->flags)) { | 383 | if (kflags & (1 << KEY_FLAG_NEGATIVE)) { |
357 | err = -ENOKEY; | 384 | err = -ENOKEY; |
358 | continue; | 385 | continue; |
359 | } | 386 | } |
@@ -489,10 +516,9 @@ key_ref_t __keyring_search_one(key_ref_t keyring_ref, | |||
489 | /* | 516 | /* |
490 | * find a keyring with the specified name | 517 | * find a keyring with the specified name |
491 | * - all named keyrings are searched | 518 | * - all named keyrings are searched |
492 | * - only find keyrings with search permission for the process | 519 | * - normally only finds keyrings with search permission for the current process |
493 | * - only find keyrings with a serial number greater than the one specified | ||
494 | */ | 520 | */ |
495 | struct key *find_keyring_by_name(const char *name, key_serial_t bound) | 521 | struct key *find_keyring_by_name(const char *name, bool skip_perm_check) |
496 | { | 522 | { |
497 | struct key *keyring; | 523 | struct key *keyring; |
498 | int bucket; | 524 | int bucket; |
@@ -518,15 +544,11 @@ struct key *find_keyring_by_name(const char *name, key_serial_t bound) | |||
518 | if (strcmp(keyring->description, name) != 0) | 544 | if (strcmp(keyring->description, name) != 0) |
519 | continue; | 545 | continue; |
520 | 546 | ||
521 | if (key_permission(make_key_ref(keyring, 0), | 547 | if (!skip_perm_check && |
548 | key_permission(make_key_ref(keyring, 0), | ||
522 | KEY_SEARCH) < 0) | 549 | KEY_SEARCH) < 0) |
523 | continue; | 550 | continue; |
524 | 551 | ||
525 | /* found a potential candidate, but we still need to | ||
526 | * check the serial number */ | ||
527 | if (keyring->serial <= bound) | ||
528 | continue; | ||
529 | |||
530 | /* we've got a match */ | 552 | /* we've got a match */ |
531 | atomic_inc(&keyring->usage); | 553 | atomic_inc(&keyring->usage); |
532 | read_unlock(&keyring_name_lock); | 554 | read_unlock(&keyring_name_lock); |
diff --git a/security/keys/proc.c b/security/keys/proc.c index 694126003ed3..f619170da760 100644 --- a/security/keys/proc.c +++ b/security/keys/proc.c | |||
@@ -70,19 +70,15 @@ static int __init key_proc_init(void) | |||
70 | struct proc_dir_entry *p; | 70 | struct proc_dir_entry *p; |
71 | 71 | ||
72 | #ifdef CONFIG_KEYS_DEBUG_PROC_KEYS | 72 | #ifdef CONFIG_KEYS_DEBUG_PROC_KEYS |
73 | p = create_proc_entry("keys", 0, NULL); | 73 | p = proc_create("keys", 0, NULL, &proc_keys_fops); |
74 | if (!p) | 74 | if (!p) |
75 | panic("Cannot create /proc/keys\n"); | 75 | panic("Cannot create /proc/keys\n"); |
76 | |||
77 | p->proc_fops = &proc_keys_fops; | ||
78 | #endif | 76 | #endif |
79 | 77 | ||
80 | p = create_proc_entry("key-users", 0, NULL); | 78 | p = proc_create("key-users", 0, NULL, &proc_key_users_fops); |
81 | if (!p) | 79 | if (!p) |
82 | panic("Cannot create /proc/key-users\n"); | 80 | panic("Cannot create /proc/key-users\n"); |
83 | 81 | ||
84 | p->proc_fops = &proc_key_users_fops; | ||
85 | |||
86 | return 0; | 82 | return 0; |
87 | 83 | ||
88 | } /* end key_proc_init() */ | 84 | } /* end key_proc_init() */ |
@@ -246,6 +242,10 @@ static int proc_key_users_show(struct seq_file *m, void *v) | |||
246 | { | 242 | { |
247 | struct rb_node *_p = v; | 243 | struct rb_node *_p = v; |
248 | struct key_user *user = rb_entry(_p, struct key_user, node); | 244 | struct key_user *user = rb_entry(_p, struct key_user, node); |
245 | unsigned maxkeys = (user->uid == 0) ? | ||
246 | key_quota_root_maxkeys : key_quota_maxkeys; | ||
247 | unsigned maxbytes = (user->uid == 0) ? | ||
248 | key_quota_root_maxbytes : key_quota_maxbytes; | ||
249 | 249 | ||
250 | seq_printf(m, "%5u: %5d %d/%d %d/%d %d/%d\n", | 250 | seq_printf(m, "%5u: %5d %d/%d %d/%d %d/%d\n", |
251 | user->uid, | 251 | user->uid, |
@@ -253,10 +253,9 @@ static int proc_key_users_show(struct seq_file *m, void *v) | |||
253 | atomic_read(&user->nkeys), | 253 | atomic_read(&user->nkeys), |
254 | atomic_read(&user->nikeys), | 254 | atomic_read(&user->nikeys), |
255 | user->qnkeys, | 255 | user->qnkeys, |
256 | KEYQUOTA_MAX_KEYS, | 256 | maxkeys, |
257 | user->qnbytes, | 257 | user->qnbytes, |
258 | KEYQUOTA_MAX_BYTES | 258 | maxbytes); |
259 | ); | ||
260 | 259 | ||
261 | return 0; | 260 | return 0; |
262 | 261 | ||
diff --git a/security/keys/process_keys.c b/security/keys/process_keys.c index c886a2bb792a..5be6d018759a 100644 --- a/security/keys/process_keys.c +++ b/security/keys/process_keys.c | |||
@@ -1,6 +1,6 @@ | |||
1 | /* process_keys.c: management of a process's keyrings | 1 | /* Management of a process's keyrings |
2 | * | 2 | * |
3 | * Copyright (C) 2004-5 Red Hat, Inc. All Rights Reserved. | 3 | * Copyright (C) 2004-2005, 2008 Red Hat, Inc. All Rights Reserved. |
4 | * Written by David Howells (dhowells@redhat.com) | 4 | * Written by David Howells (dhowells@redhat.com) |
5 | * | 5 | * |
6 | * This program is free software; you can redistribute it and/or | 6 | * This program is free software; you can redistribute it and/or |
@@ -23,6 +23,9 @@ | |||
23 | /* session keyring create vs join semaphore */ | 23 | /* session keyring create vs join semaphore */ |
24 | static DEFINE_MUTEX(key_session_mutex); | 24 | static DEFINE_MUTEX(key_session_mutex); |
25 | 25 | ||
26 | /* user keyring creation semaphore */ | ||
27 | static DEFINE_MUTEX(key_user_keyring_mutex); | ||
28 | |||
26 | /* the root user's tracking struct */ | 29 | /* the root user's tracking struct */ |
27 | struct key_user root_key_user = { | 30 | struct key_user root_key_user = { |
28 | .usage = ATOMIC_INIT(3), | 31 | .usage = ATOMIC_INIT(3), |
@@ -33,78 +36,84 @@ struct key_user root_key_user = { | |||
33 | .uid = 0, | 36 | .uid = 0, |
34 | }; | 37 | }; |
35 | 38 | ||
36 | /* the root user's UID keyring */ | ||
37 | struct key root_user_keyring = { | ||
38 | .usage = ATOMIC_INIT(1), | ||
39 | .serial = 2, | ||
40 | .type = &key_type_keyring, | ||
41 | .user = &root_key_user, | ||
42 | .sem = __RWSEM_INITIALIZER(root_user_keyring.sem), | ||
43 | .perm = (KEY_POS_ALL & ~KEY_POS_SETATTR) | KEY_USR_ALL, | ||
44 | .flags = 1 << KEY_FLAG_INSTANTIATED, | ||
45 | .description = "_uid.0", | ||
46 | #ifdef KEY_DEBUGGING | ||
47 | .magic = KEY_DEBUG_MAGIC, | ||
48 | #endif | ||
49 | }; | ||
50 | |||
51 | /* the root user's default session keyring */ | ||
52 | struct key root_session_keyring = { | ||
53 | .usage = ATOMIC_INIT(1), | ||
54 | .serial = 1, | ||
55 | .type = &key_type_keyring, | ||
56 | .user = &root_key_user, | ||
57 | .sem = __RWSEM_INITIALIZER(root_session_keyring.sem), | ||
58 | .perm = (KEY_POS_ALL & ~KEY_POS_SETATTR) | KEY_USR_ALL, | ||
59 | .flags = 1 << KEY_FLAG_INSTANTIATED, | ||
60 | .description = "_uid_ses.0", | ||
61 | #ifdef KEY_DEBUGGING | ||
62 | .magic = KEY_DEBUG_MAGIC, | ||
63 | #endif | ||
64 | }; | ||
65 | |||
66 | /*****************************************************************************/ | 39 | /*****************************************************************************/ |
67 | /* | 40 | /* |
68 | * allocate the keyrings to be associated with a UID | 41 | * install user and user session keyrings for a particular UID |
69 | */ | 42 | */ |
70 | int alloc_uid_keyring(struct user_struct *user, | 43 | static int install_user_keyrings(struct task_struct *tsk) |
71 | struct task_struct *ctx) | ||
72 | { | 44 | { |
45 | struct user_struct *user = tsk->user; | ||
73 | struct key *uid_keyring, *session_keyring; | 46 | struct key *uid_keyring, *session_keyring; |
74 | char buf[20]; | 47 | char buf[20]; |
75 | int ret; | 48 | int ret; |
76 | 49 | ||
77 | /* concoct a default session keyring */ | 50 | kenter("%p{%u}", user, user->uid); |
78 | sprintf(buf, "_uid_ses.%u", user->uid); | ||
79 | 51 | ||
80 | session_keyring = keyring_alloc(buf, user->uid, (gid_t) -1, ctx, | 52 | if (user->uid_keyring) { |
81 | KEY_ALLOC_IN_QUOTA, NULL); | 53 | kleave(" = 0 [exist]"); |
82 | if (IS_ERR(session_keyring)) { | 54 | return 0; |
83 | ret = PTR_ERR(session_keyring); | ||
84 | goto error; | ||
85 | } | 55 | } |
86 | 56 | ||
87 | /* and a UID specific keyring, pointed to by the default session | 57 | mutex_lock(&key_user_keyring_mutex); |
88 | * keyring */ | 58 | ret = 0; |
89 | sprintf(buf, "_uid.%u", user->uid); | ||
90 | 59 | ||
91 | uid_keyring = keyring_alloc(buf, user->uid, (gid_t) -1, ctx, | 60 | if (!user->uid_keyring) { |
92 | KEY_ALLOC_IN_QUOTA, session_keyring); | 61 | /* get the UID-specific keyring |
93 | if (IS_ERR(uid_keyring)) { | 62 | * - there may be one in existence already as it may have been |
94 | key_put(session_keyring); | 63 | * pinned by a session, but the user_struct pointing to it |
95 | ret = PTR_ERR(uid_keyring); | 64 | * may have been destroyed by setuid */ |
96 | goto error; | 65 | sprintf(buf, "_uid.%u", user->uid); |
66 | |||
67 | uid_keyring = find_keyring_by_name(buf, true); | ||
68 | if (IS_ERR(uid_keyring)) { | ||
69 | uid_keyring = keyring_alloc(buf, user->uid, (gid_t) -1, | ||
70 | tsk, KEY_ALLOC_IN_QUOTA, | ||
71 | NULL); | ||
72 | if (IS_ERR(uid_keyring)) { | ||
73 | ret = PTR_ERR(uid_keyring); | ||
74 | goto error; | ||
75 | } | ||
76 | } | ||
77 | |||
78 | /* get a default session keyring (which might also exist | ||
79 | * already) */ | ||
80 | sprintf(buf, "_uid_ses.%u", user->uid); | ||
81 | |||
82 | session_keyring = find_keyring_by_name(buf, true); | ||
83 | if (IS_ERR(session_keyring)) { | ||
84 | session_keyring = | ||
85 | keyring_alloc(buf, user->uid, (gid_t) -1, | ||
86 | tsk, KEY_ALLOC_IN_QUOTA, NULL); | ||
87 | if (IS_ERR(session_keyring)) { | ||
88 | ret = PTR_ERR(session_keyring); | ||
89 | goto error_release; | ||
90 | } | ||
91 | |||
92 | /* we install a link from the user session keyring to | ||
93 | * the user keyring */ | ||
94 | ret = key_link(session_keyring, uid_keyring); | ||
95 | if (ret < 0) | ||
96 | goto error_release_both; | ||
97 | } | ||
98 | |||
99 | /* install the keyrings */ | ||
100 | user->uid_keyring = uid_keyring; | ||
101 | user->session_keyring = session_keyring; | ||
97 | } | 102 | } |
98 | 103 | ||
99 | /* install the keyrings */ | 104 | mutex_unlock(&key_user_keyring_mutex); |
100 | user->uid_keyring = uid_keyring; | 105 | kleave(" = 0"); |
101 | user->session_keyring = session_keyring; | 106 | return 0; |
102 | ret = 0; | ||
103 | 107 | ||
108 | error_release_both: | ||
109 | key_put(session_keyring); | ||
110 | error_release: | ||
111 | key_put(uid_keyring); | ||
104 | error: | 112 | error: |
113 | mutex_unlock(&key_user_keyring_mutex); | ||
114 | kleave(" = %d", ret); | ||
105 | return ret; | 115 | return ret; |
106 | 116 | } | |
107 | } /* end alloc_uid_keyring() */ | ||
108 | 117 | ||
109 | /*****************************************************************************/ | 118 | /*****************************************************************************/ |
110 | /* | 119 | /* |
@@ -481,7 +490,7 @@ key_ref_t search_process_keyrings(struct key_type *type, | |||
481 | } | 490 | } |
482 | } | 491 | } |
483 | /* or search the user-session keyring */ | 492 | /* or search the user-session keyring */ |
484 | else { | 493 | else if (context->user->session_keyring) { |
485 | key_ref = keyring_search_aux( | 494 | key_ref = keyring_search_aux( |
486 | make_key_ref(context->user->session_keyring, 1), | 495 | make_key_ref(context->user->session_keyring, 1), |
487 | context, type, description, match); | 496 | context, type, description, match); |
@@ -614,6 +623,9 @@ key_ref_t lookup_user_key(struct task_struct *context, key_serial_t id, | |||
614 | if (!context->signal->session_keyring) { | 623 | if (!context->signal->session_keyring) { |
615 | /* always install a session keyring upon access if one | 624 | /* always install a session keyring upon access if one |
616 | * doesn't exist yet */ | 625 | * doesn't exist yet */ |
626 | ret = install_user_keyrings(context); | ||
627 | if (ret < 0) | ||
628 | goto error; | ||
617 | ret = install_session_keyring( | 629 | ret = install_session_keyring( |
618 | context, context->user->session_keyring); | 630 | context, context->user->session_keyring); |
619 | if (ret < 0) | 631 | if (ret < 0) |
@@ -628,12 +640,24 @@ key_ref_t lookup_user_key(struct task_struct *context, key_serial_t id, | |||
628 | break; | 640 | break; |
629 | 641 | ||
630 | case KEY_SPEC_USER_KEYRING: | 642 | case KEY_SPEC_USER_KEYRING: |
643 | if (!context->user->uid_keyring) { | ||
644 | ret = install_user_keyrings(context); | ||
645 | if (ret < 0) | ||
646 | goto error; | ||
647 | } | ||
648 | |||
631 | key = context->user->uid_keyring; | 649 | key = context->user->uid_keyring; |
632 | atomic_inc(&key->usage); | 650 | atomic_inc(&key->usage); |
633 | key_ref = make_key_ref(key, 1); | 651 | key_ref = make_key_ref(key, 1); |
634 | break; | 652 | break; |
635 | 653 | ||
636 | case KEY_SPEC_USER_SESSION_KEYRING: | 654 | case KEY_SPEC_USER_SESSION_KEYRING: |
655 | if (!context->user->session_keyring) { | ||
656 | ret = install_user_keyrings(context); | ||
657 | if (ret < 0) | ||
658 | goto error; | ||
659 | } | ||
660 | |||
637 | key = context->user->session_keyring; | 661 | key = context->user->session_keyring; |
638 | atomic_inc(&key->usage); | 662 | atomic_inc(&key->usage); |
639 | key_ref = make_key_ref(key, 1); | 663 | key_ref = make_key_ref(key, 1); |
@@ -744,7 +768,7 @@ long join_session_keyring(const char *name) | |||
744 | mutex_lock(&key_session_mutex); | 768 | mutex_lock(&key_session_mutex); |
745 | 769 | ||
746 | /* look for an existing keyring of this name */ | 770 | /* look for an existing keyring of this name */ |
747 | keyring = find_keyring_by_name(name, 0); | 771 | keyring = find_keyring_by_name(name, false); |
748 | if (PTR_ERR(keyring) == -ENOKEY) { | 772 | if (PTR_ERR(keyring) == -ENOKEY) { |
749 | /* not found - try and create a new one */ | 773 | /* not found - try and create a new one */ |
750 | keyring = keyring_alloc(name, tsk->uid, tsk->gid, tsk, | 774 | keyring = keyring_alloc(name, tsk->uid, tsk->gid, tsk, |
diff --git a/security/keys/request_key.c b/security/keys/request_key.c index 5ecc5057fb54..ba32ca6469bd 100644 --- a/security/keys/request_key.c +++ b/security/keys/request_key.c | |||
@@ -16,6 +16,7 @@ | |||
16 | #include <linux/kmod.h> | 16 | #include <linux/kmod.h> |
17 | #include <linux/err.h> | 17 | #include <linux/err.h> |
18 | #include <linux/keyctl.h> | 18 | #include <linux/keyctl.h> |
19 | #include <linux/slab.h> | ||
19 | #include "internal.h" | 20 | #include "internal.h" |
20 | 21 | ||
21 | /* | 22 | /* |
@@ -161,21 +162,22 @@ error_alloc: | |||
161 | * call out to userspace for key construction | 162 | * call out to userspace for key construction |
162 | * - we ignore program failure and go on key status instead | 163 | * - we ignore program failure and go on key status instead |
163 | */ | 164 | */ |
164 | static int construct_key(struct key *key, const char *callout_info, void *aux) | 165 | static int construct_key(struct key *key, const void *callout_info, |
166 | size_t callout_len, void *aux) | ||
165 | { | 167 | { |
166 | struct key_construction *cons; | 168 | struct key_construction *cons; |
167 | request_key_actor_t actor; | 169 | request_key_actor_t actor; |
168 | struct key *authkey; | 170 | struct key *authkey; |
169 | int ret; | 171 | int ret; |
170 | 172 | ||
171 | kenter("%d,%s,%p", key->serial, callout_info, aux); | 173 | kenter("%d,%p,%zu,%p", key->serial, callout_info, callout_len, aux); |
172 | 174 | ||
173 | cons = kmalloc(sizeof(*cons), GFP_KERNEL); | 175 | cons = kmalloc(sizeof(*cons), GFP_KERNEL); |
174 | if (!cons) | 176 | if (!cons) |
175 | return -ENOMEM; | 177 | return -ENOMEM; |
176 | 178 | ||
177 | /* allocate an authorisation key */ | 179 | /* allocate an authorisation key */ |
178 | authkey = request_key_auth_new(key, callout_info); | 180 | authkey = request_key_auth_new(key, callout_info, callout_len); |
179 | if (IS_ERR(authkey)) { | 181 | if (IS_ERR(authkey)) { |
180 | kfree(cons); | 182 | kfree(cons); |
181 | ret = PTR_ERR(authkey); | 183 | ret = PTR_ERR(authkey); |
@@ -331,6 +333,7 @@ alloc_failed: | |||
331 | static struct key *construct_key_and_link(struct key_type *type, | 333 | static struct key *construct_key_and_link(struct key_type *type, |
332 | const char *description, | 334 | const char *description, |
333 | const char *callout_info, | 335 | const char *callout_info, |
336 | size_t callout_len, | ||
334 | void *aux, | 337 | void *aux, |
335 | struct key *dest_keyring, | 338 | struct key *dest_keyring, |
336 | unsigned long flags) | 339 | unsigned long flags) |
@@ -348,7 +351,7 @@ static struct key *construct_key_and_link(struct key_type *type, | |||
348 | key_user_put(user); | 351 | key_user_put(user); |
349 | 352 | ||
350 | if (ret == 0) { | 353 | if (ret == 0) { |
351 | ret = construct_key(key, callout_info, aux); | 354 | ret = construct_key(key, callout_info, callout_len, aux); |
352 | if (ret < 0) | 355 | if (ret < 0) |
353 | goto construction_failed; | 356 | goto construction_failed; |
354 | } | 357 | } |
@@ -370,7 +373,8 @@ construction_failed: | |||
370 | */ | 373 | */ |
371 | struct key *request_key_and_link(struct key_type *type, | 374 | struct key *request_key_and_link(struct key_type *type, |
372 | const char *description, | 375 | const char *description, |
373 | const char *callout_info, | 376 | const void *callout_info, |
377 | size_t callout_len, | ||
374 | void *aux, | 378 | void *aux, |
375 | struct key *dest_keyring, | 379 | struct key *dest_keyring, |
376 | unsigned long flags) | 380 | unsigned long flags) |
@@ -378,8 +382,8 @@ struct key *request_key_and_link(struct key_type *type, | |||
378 | struct key *key; | 382 | struct key *key; |
379 | key_ref_t key_ref; | 383 | key_ref_t key_ref; |
380 | 384 | ||
381 | kenter("%s,%s,%s,%p,%p,%lx", | 385 | kenter("%s,%s,%p,%zu,%p,%p,%lx", |
382 | type->name, description, callout_info, aux, | 386 | type->name, description, callout_info, callout_len, aux, |
383 | dest_keyring, flags); | 387 | dest_keyring, flags); |
384 | 388 | ||
385 | /* search all the process keyrings for a key */ | 389 | /* search all the process keyrings for a key */ |
@@ -398,7 +402,8 @@ struct key *request_key_and_link(struct key_type *type, | |||
398 | goto error; | 402 | goto error; |
399 | 403 | ||
400 | key = construct_key_and_link(type, description, callout_info, | 404 | key = construct_key_and_link(type, description, callout_info, |
401 | aux, dest_keyring, flags); | 405 | callout_len, aux, dest_keyring, |
406 | flags); | ||
402 | } | 407 | } |
403 | 408 | ||
404 | error: | 409 | error: |
@@ -434,10 +439,13 @@ struct key *request_key(struct key_type *type, | |||
434 | const char *callout_info) | 439 | const char *callout_info) |
435 | { | 440 | { |
436 | struct key *key; | 441 | struct key *key; |
442 | size_t callout_len = 0; | ||
437 | int ret; | 443 | int ret; |
438 | 444 | ||
439 | key = request_key_and_link(type, description, callout_info, NULL, | 445 | if (callout_info) |
440 | NULL, KEY_ALLOC_IN_QUOTA); | 446 | callout_len = strlen(callout_info); |
447 | key = request_key_and_link(type, description, callout_info, callout_len, | ||
448 | NULL, NULL, KEY_ALLOC_IN_QUOTA); | ||
441 | if (!IS_ERR(key)) { | 449 | if (!IS_ERR(key)) { |
442 | ret = wait_for_key_construction(key, false); | 450 | ret = wait_for_key_construction(key, false); |
443 | if (ret < 0) { | 451 | if (ret < 0) { |
@@ -458,14 +466,15 @@ EXPORT_SYMBOL(request_key); | |||
458 | */ | 466 | */ |
459 | struct key *request_key_with_auxdata(struct key_type *type, | 467 | struct key *request_key_with_auxdata(struct key_type *type, |
460 | const char *description, | 468 | const char *description, |
461 | const char *callout_info, | 469 | const void *callout_info, |
470 | size_t callout_len, | ||
462 | void *aux) | 471 | void *aux) |
463 | { | 472 | { |
464 | struct key *key; | 473 | struct key *key; |
465 | int ret; | 474 | int ret; |
466 | 475 | ||
467 | key = request_key_and_link(type, description, callout_info, aux, | 476 | key = request_key_and_link(type, description, callout_info, callout_len, |
468 | NULL, KEY_ALLOC_IN_QUOTA); | 477 | aux, NULL, KEY_ALLOC_IN_QUOTA); |
469 | if (!IS_ERR(key)) { | 478 | if (!IS_ERR(key)) { |
470 | ret = wait_for_key_construction(key, false); | 479 | ret = wait_for_key_construction(key, false); |
471 | if (ret < 0) { | 480 | if (ret < 0) { |
@@ -485,10 +494,12 @@ EXPORT_SYMBOL(request_key_with_auxdata); | |||
485 | */ | 494 | */ |
486 | struct key *request_key_async(struct key_type *type, | 495 | struct key *request_key_async(struct key_type *type, |
487 | const char *description, | 496 | const char *description, |
488 | const char *callout_info) | 497 | const void *callout_info, |
498 | size_t callout_len) | ||
489 | { | 499 | { |
490 | return request_key_and_link(type, description, callout_info, NULL, | 500 | return request_key_and_link(type, description, callout_info, |
491 | NULL, KEY_ALLOC_IN_QUOTA); | 501 | callout_len, NULL, NULL, |
502 | KEY_ALLOC_IN_QUOTA); | ||
492 | } | 503 | } |
493 | EXPORT_SYMBOL(request_key_async); | 504 | EXPORT_SYMBOL(request_key_async); |
494 | 505 | ||
@@ -500,10 +511,11 @@ EXPORT_SYMBOL(request_key_async); | |||
500 | */ | 511 | */ |
501 | struct key *request_key_async_with_auxdata(struct key_type *type, | 512 | struct key *request_key_async_with_auxdata(struct key_type *type, |
502 | const char *description, | 513 | const char *description, |
503 | const char *callout_info, | 514 | const void *callout_info, |
515 | size_t callout_len, | ||
504 | void *aux) | 516 | void *aux) |
505 | { | 517 | { |
506 | return request_key_and_link(type, description, callout_info, aux, | 518 | return request_key_and_link(type, description, callout_info, |
507 | NULL, KEY_ALLOC_IN_QUOTA); | 519 | callout_len, aux, NULL, KEY_ALLOC_IN_QUOTA); |
508 | } | 520 | } |
509 | EXPORT_SYMBOL(request_key_async_with_auxdata); | 521 | EXPORT_SYMBOL(request_key_async_with_auxdata); |
diff --git a/security/keys/request_key_auth.c b/security/keys/request_key_auth.c index e42b5252486f..bd237b0a6331 100644 --- a/security/keys/request_key_auth.c +++ b/security/keys/request_key_auth.c | |||
@@ -15,6 +15,7 @@ | |||
15 | #include <linux/sched.h> | 15 | #include <linux/sched.h> |
16 | #include <linux/err.h> | 16 | #include <linux/err.h> |
17 | #include <linux/seq_file.h> | 17 | #include <linux/seq_file.h> |
18 | #include <linux/slab.h> | ||
18 | #include <asm/uaccess.h> | 19 | #include <asm/uaccess.h> |
19 | #include "internal.h" | 20 | #include "internal.h" |
20 | 21 | ||
@@ -61,7 +62,7 @@ static void request_key_auth_describe(const struct key *key, | |||
61 | 62 | ||
62 | seq_puts(m, "key:"); | 63 | seq_puts(m, "key:"); |
63 | seq_puts(m, key->description); | 64 | seq_puts(m, key->description); |
64 | seq_printf(m, " pid:%d ci:%zu", rka->pid, strlen(rka->callout_info)); | 65 | seq_printf(m, " pid:%d ci:%zu", rka->pid, rka->callout_len); |
65 | 66 | ||
66 | } /* end request_key_auth_describe() */ | 67 | } /* end request_key_auth_describe() */ |
67 | 68 | ||
@@ -77,7 +78,7 @@ static long request_key_auth_read(const struct key *key, | |||
77 | size_t datalen; | 78 | size_t datalen; |
78 | long ret; | 79 | long ret; |
79 | 80 | ||
80 | datalen = strlen(rka->callout_info); | 81 | datalen = rka->callout_len; |
81 | ret = datalen; | 82 | ret = datalen; |
82 | 83 | ||
83 | /* we can return the data as is */ | 84 | /* we can return the data as is */ |
@@ -137,7 +138,8 @@ static void request_key_auth_destroy(struct key *key) | |||
137 | * create an authorisation token for /sbin/request-key or whoever to gain | 138 | * create an authorisation token for /sbin/request-key or whoever to gain |
138 | * access to the caller's security data | 139 | * access to the caller's security data |
139 | */ | 140 | */ |
140 | struct key *request_key_auth_new(struct key *target, const char *callout_info) | 141 | struct key *request_key_auth_new(struct key *target, const void *callout_info, |
142 | size_t callout_len) | ||
141 | { | 143 | { |
142 | struct request_key_auth *rka, *irka; | 144 | struct request_key_auth *rka, *irka; |
143 | struct key *authkey = NULL; | 145 | struct key *authkey = NULL; |
@@ -152,7 +154,7 @@ struct key *request_key_auth_new(struct key *target, const char *callout_info) | |||
152 | kleave(" = -ENOMEM"); | 154 | kleave(" = -ENOMEM"); |
153 | return ERR_PTR(-ENOMEM); | 155 | return ERR_PTR(-ENOMEM); |
154 | } | 156 | } |
155 | rka->callout_info = kmalloc(strlen(callout_info) + 1, GFP_KERNEL); | 157 | rka->callout_info = kmalloc(callout_len, GFP_KERNEL); |
156 | if (!rka->callout_info) { | 158 | if (!rka->callout_info) { |
157 | kleave(" = -ENOMEM"); | 159 | kleave(" = -ENOMEM"); |
158 | kfree(rka); | 160 | kfree(rka); |
@@ -186,7 +188,8 @@ struct key *request_key_auth_new(struct key *target, const char *callout_info) | |||
186 | } | 188 | } |
187 | 189 | ||
188 | rka->target_key = key_get(target); | 190 | rka->target_key = key_get(target); |
189 | strcpy(rka->callout_info, callout_info); | 191 | memcpy(rka->callout_info, callout_info, callout_len); |
192 | rka->callout_len = callout_len; | ||
190 | 193 | ||
191 | /* allocate the auth key */ | 194 | /* allocate the auth key */ |
192 | sprintf(desc, "%x", target->serial); | 195 | sprintf(desc, "%x", target->serial); |
diff --git a/security/keys/sysctl.c b/security/keys/sysctl.c new file mode 100644 index 000000000000..b611d493c2d8 --- /dev/null +++ b/security/keys/sysctl.c | |||
@@ -0,0 +1,50 @@ | |||
1 | /* Key management controls | ||
2 | * | ||
3 | * Copyright (C) 2008 Red Hat, Inc. All Rights Reserved. | ||
4 | * Written by David Howells (dhowells@redhat.com) | ||
5 | * | ||
6 | * This program is free software; you can redistribute it and/or | ||
7 | * modify it under the terms of the GNU General Public Licence | ||
8 | * as published by the Free Software Foundation; either version | ||
9 | * 2 of the Licence, or (at your option) any later version. | ||
10 | */ | ||
11 | |||
12 | #include <linux/key.h> | ||
13 | #include <linux/sysctl.h> | ||
14 | #include "internal.h" | ||
15 | |||
16 | ctl_table key_sysctls[] = { | ||
17 | { | ||
18 | .ctl_name = CTL_UNNUMBERED, | ||
19 | .procname = "maxkeys", | ||
20 | .data = &key_quota_maxkeys, | ||
21 | .maxlen = sizeof(unsigned), | ||
22 | .mode = 0644, | ||
23 | .proc_handler = &proc_dointvec, | ||
24 | }, | ||
25 | { | ||
26 | .ctl_name = CTL_UNNUMBERED, | ||
27 | .procname = "maxbytes", | ||
28 | .data = &key_quota_maxbytes, | ||
29 | .maxlen = sizeof(unsigned), | ||
30 | .mode = 0644, | ||
31 | .proc_handler = &proc_dointvec, | ||
32 | }, | ||
33 | { | ||
34 | .ctl_name = CTL_UNNUMBERED, | ||
35 | .procname = "root_maxkeys", | ||
36 | .data = &key_quota_root_maxkeys, | ||
37 | .maxlen = sizeof(unsigned), | ||
38 | .mode = 0644, | ||
39 | .proc_handler = &proc_dointvec, | ||
40 | }, | ||
41 | { | ||
42 | .ctl_name = CTL_UNNUMBERED, | ||
43 | .procname = "root_maxbytes", | ||
44 | .data = &key_quota_root_maxbytes, | ||
45 | .maxlen = sizeof(unsigned), | ||
46 | .mode = 0644, | ||
47 | .proc_handler = &proc_dointvec, | ||
48 | }, | ||
49 | { .ctl_name = 0 } | ||
50 | }; | ||
diff --git a/security/security.c b/security/security.c index d5cb5898d967..8e64a29dc55d 100644 --- a/security/security.c +++ b/security/security.c | |||
@@ -491,23 +491,23 @@ void security_inode_delete(struct inode *inode) | |||
491 | security_ops->inode_delete(inode); | 491 | security_ops->inode_delete(inode); |
492 | } | 492 | } |
493 | 493 | ||
494 | int security_inode_setxattr(struct dentry *dentry, char *name, | 494 | int security_inode_setxattr(struct dentry *dentry, const char *name, |
495 | void *value, size_t size, int flags) | 495 | const void *value, size_t size, int flags) |
496 | { | 496 | { |
497 | if (unlikely(IS_PRIVATE(dentry->d_inode))) | 497 | if (unlikely(IS_PRIVATE(dentry->d_inode))) |
498 | return 0; | 498 | return 0; |
499 | return security_ops->inode_setxattr(dentry, name, value, size, flags); | 499 | return security_ops->inode_setxattr(dentry, name, value, size, flags); |
500 | } | 500 | } |
501 | 501 | ||
502 | void security_inode_post_setxattr(struct dentry *dentry, char *name, | 502 | void security_inode_post_setxattr(struct dentry *dentry, const char *name, |
503 | void *value, size_t size, int flags) | 503 | const void *value, size_t size, int flags) |
504 | { | 504 | { |
505 | if (unlikely(IS_PRIVATE(dentry->d_inode))) | 505 | if (unlikely(IS_PRIVATE(dentry->d_inode))) |
506 | return; | 506 | return; |
507 | security_ops->inode_post_setxattr(dentry, name, value, size, flags); | 507 | security_ops->inode_post_setxattr(dentry, name, value, size, flags); |
508 | } | 508 | } |
509 | 509 | ||
510 | int security_inode_getxattr(struct dentry *dentry, char *name) | 510 | int security_inode_getxattr(struct dentry *dentry, const char *name) |
511 | { | 511 | { |
512 | if (unlikely(IS_PRIVATE(dentry->d_inode))) | 512 | if (unlikely(IS_PRIVATE(dentry->d_inode))) |
513 | return 0; | 513 | return 0; |
@@ -521,7 +521,7 @@ int security_inode_listxattr(struct dentry *dentry) | |||
521 | return security_ops->inode_listxattr(dentry); | 521 | return security_ops->inode_listxattr(dentry); |
522 | } | 522 | } |
523 | 523 | ||
524 | int security_inode_removexattr(struct dentry *dentry, char *name) | 524 | int security_inode_removexattr(struct dentry *dentry, const char *name) |
525 | { | 525 | { |
526 | if (unlikely(IS_PRIVATE(dentry->d_inode))) | 526 | if (unlikely(IS_PRIVATE(dentry->d_inode))) |
527 | return 0; | 527 | return 0; |
@@ -1156,6 +1156,11 @@ int security_key_permission(key_ref_t key_ref, | |||
1156 | return security_ops->key_permission(key_ref, context, perm); | 1156 | return security_ops->key_permission(key_ref, context, perm); |
1157 | } | 1157 | } |
1158 | 1158 | ||
1159 | int security_key_getsecurity(struct key *key, char **_buffer) | ||
1160 | { | ||
1161 | return security_ops->key_getsecurity(key, _buffer); | ||
1162 | } | ||
1163 | |||
1159 | #endif /* CONFIG_KEYS */ | 1164 | #endif /* CONFIG_KEYS */ |
1160 | 1165 | ||
1161 | #ifdef CONFIG_AUDIT | 1166 | #ifdef CONFIG_AUDIT |
diff --git a/security/selinux/hooks.c b/security/selinux/hooks.c index 04acb5af8317..4e4de98941ae 100644 --- a/security/selinux/hooks.c +++ b/security/selinux/hooks.c | |||
@@ -2619,7 +2619,7 @@ static int selinux_inode_getattr(struct vfsmount *mnt, struct dentry *dentry) | |||
2619 | return dentry_has_perm(current, mnt, dentry, FILE__GETATTR); | 2619 | return dentry_has_perm(current, mnt, dentry, FILE__GETATTR); |
2620 | } | 2620 | } |
2621 | 2621 | ||
2622 | static int selinux_inode_setotherxattr(struct dentry *dentry, char *name) | 2622 | static int selinux_inode_setotherxattr(struct dentry *dentry, const char *name) |
2623 | { | 2623 | { |
2624 | if (!strncmp(name, XATTR_SECURITY_PREFIX, | 2624 | if (!strncmp(name, XATTR_SECURITY_PREFIX, |
2625 | sizeof XATTR_SECURITY_PREFIX - 1)) { | 2625 | sizeof XATTR_SECURITY_PREFIX - 1)) { |
@@ -2638,7 +2638,8 @@ static int selinux_inode_setotherxattr(struct dentry *dentry, char *name) | |||
2638 | return dentry_has_perm(current, NULL, dentry, FILE__SETATTR); | 2638 | return dentry_has_perm(current, NULL, dentry, FILE__SETATTR); |
2639 | } | 2639 | } |
2640 | 2640 | ||
2641 | static int selinux_inode_setxattr(struct dentry *dentry, char *name, void *value, size_t size, int flags) | 2641 | static int selinux_inode_setxattr(struct dentry *dentry, const char *name, |
2642 | const void *value, size_t size, int flags) | ||
2642 | { | 2643 | { |
2643 | struct task_security_struct *tsec = current->security; | 2644 | struct task_security_struct *tsec = current->security; |
2644 | struct inode *inode = dentry->d_inode; | 2645 | struct inode *inode = dentry->d_inode; |
@@ -2687,8 +2688,9 @@ static int selinux_inode_setxattr(struct dentry *dentry, char *name, void *value | |||
2687 | &ad); | 2688 | &ad); |
2688 | } | 2689 | } |
2689 | 2690 | ||
2690 | static void selinux_inode_post_setxattr(struct dentry *dentry, char *name, | 2691 | static void selinux_inode_post_setxattr(struct dentry *dentry, const char *name, |
2691 | void *value, size_t size, int flags) | 2692 | const void *value, size_t size, |
2693 | int flags) | ||
2692 | { | 2694 | { |
2693 | struct inode *inode = dentry->d_inode; | 2695 | struct inode *inode = dentry->d_inode; |
2694 | struct inode_security_struct *isec = inode->i_security; | 2696 | struct inode_security_struct *isec = inode->i_security; |
@@ -2711,7 +2713,7 @@ static void selinux_inode_post_setxattr(struct dentry *dentry, char *name, | |||
2711 | return; | 2713 | return; |
2712 | } | 2714 | } |
2713 | 2715 | ||
2714 | static int selinux_inode_getxattr(struct dentry *dentry, char *name) | 2716 | static int selinux_inode_getxattr(struct dentry *dentry, const char *name) |
2715 | { | 2717 | { |
2716 | return dentry_has_perm(current, NULL, dentry, FILE__GETATTR); | 2718 | return dentry_has_perm(current, NULL, dentry, FILE__GETATTR); |
2717 | } | 2719 | } |
@@ -2721,7 +2723,7 @@ static int selinux_inode_listxattr(struct dentry *dentry) | |||
2721 | return dentry_has_perm(current, NULL, dentry, FILE__GETATTR); | 2723 | return dentry_has_perm(current, NULL, dentry, FILE__GETATTR); |
2722 | } | 2724 | } |
2723 | 2725 | ||
2724 | static int selinux_inode_removexattr(struct dentry *dentry, char *name) | 2726 | static int selinux_inode_removexattr(struct dentry *dentry, const char *name) |
2725 | { | 2727 | { |
2726 | if (strcmp(name, XATTR_NAME_SELINUX)) | 2728 | if (strcmp(name, XATTR_NAME_SELINUX)) |
2727 | return selinux_inode_setotherxattr(dentry, name); | 2729 | return selinux_inode_setotherxattr(dentry, name); |
@@ -5298,6 +5300,20 @@ static int selinux_key_permission(key_ref_t key_ref, | |||
5298 | SECCLASS_KEY, perm, NULL); | 5300 | SECCLASS_KEY, perm, NULL); |
5299 | } | 5301 | } |
5300 | 5302 | ||
5303 | static int selinux_key_getsecurity(struct key *key, char **_buffer) | ||
5304 | { | ||
5305 | struct key_security_struct *ksec = key->security; | ||
5306 | char *context = NULL; | ||
5307 | unsigned len; | ||
5308 | int rc; | ||
5309 | |||
5310 | rc = security_sid_to_context(ksec->sid, &context, &len); | ||
5311 | if (!rc) | ||
5312 | rc = len; | ||
5313 | *_buffer = context; | ||
5314 | return rc; | ||
5315 | } | ||
5316 | |||
5301 | #endif | 5317 | #endif |
5302 | 5318 | ||
5303 | static struct security_operations selinux_ops = { | 5319 | static struct security_operations selinux_ops = { |
@@ -5486,6 +5502,7 @@ static struct security_operations selinux_ops = { | |||
5486 | .key_alloc = selinux_key_alloc, | 5502 | .key_alloc = selinux_key_alloc, |
5487 | .key_free = selinux_key_free, | 5503 | .key_free = selinux_key_free, |
5488 | .key_permission = selinux_key_permission, | 5504 | .key_permission = selinux_key_permission, |
5505 | .key_getsecurity = selinux_key_getsecurity, | ||
5489 | #endif | 5506 | #endif |
5490 | 5507 | ||
5491 | #ifdef CONFIG_AUDIT | 5508 | #ifdef CONFIG_AUDIT |
@@ -5534,14 +5551,6 @@ static __init int selinux_init(void) | |||
5534 | else | 5551 | else |
5535 | printk(KERN_DEBUG "SELinux: Starting in permissive mode\n"); | 5552 | printk(KERN_DEBUG "SELinux: Starting in permissive mode\n"); |
5536 | 5553 | ||
5537 | #ifdef CONFIG_KEYS | ||
5538 | /* Add security information to initial keyrings */ | ||
5539 | selinux_key_alloc(&root_user_keyring, current, | ||
5540 | KEY_ALLOC_NOT_IN_QUOTA); | ||
5541 | selinux_key_alloc(&root_session_keyring, current, | ||
5542 | KEY_ALLOC_NOT_IN_QUOTA); | ||
5543 | #endif | ||
5544 | |||
5545 | return 0; | 5554 | return 0; |
5546 | } | 5555 | } |
5547 | 5556 | ||
diff --git a/security/selinux/include/security.h b/security/selinux/include/security.h index 6445b6440648..cdb14add27d2 100644 --- a/security/selinux/include/security.h +++ b/security/selinux/include/security.h | |||
@@ -93,7 +93,7 @@ int security_change_sid(u32 ssid, u32 tsid, | |||
93 | int security_sid_to_context(u32 sid, char **scontext, | 93 | int security_sid_to_context(u32 sid, char **scontext, |
94 | u32 *scontext_len); | 94 | u32 *scontext_len); |
95 | 95 | ||
96 | int security_context_to_sid(char *scontext, u32 scontext_len, | 96 | int security_context_to_sid(const char *scontext, u32 scontext_len, |
97 | u32 *out_sid); | 97 | u32 *out_sid); |
98 | 98 | ||
99 | int security_context_to_sid_default(char *scontext, u32 scontext_len, | 99 | int security_context_to_sid_default(char *scontext, u32 scontext_len, |
diff --git a/security/selinux/ss/services.c b/security/selinux/ss/services.c index 2daaddbb301d..25cac5a2aa8e 100644 --- a/security/selinux/ss/services.c +++ b/security/selinux/ss/services.c | |||
@@ -708,7 +708,7 @@ out: | |||
708 | 708 | ||
709 | } | 709 | } |
710 | 710 | ||
711 | static int security_context_to_sid_core(char *scontext, u32 scontext_len, | 711 | static int security_context_to_sid_core(const char *scontext, u32 scontext_len, |
712 | u32 *sid, u32 def_sid, gfp_t gfp_flags) | 712 | u32 *sid, u32 def_sid, gfp_t gfp_flags) |
713 | { | 713 | { |
714 | char *scontext2; | 714 | char *scontext2; |
@@ -835,7 +835,7 @@ out: | |||
835 | * Returns -%EINVAL if the context is invalid, -%ENOMEM if insufficient | 835 | * Returns -%EINVAL if the context is invalid, -%ENOMEM if insufficient |
836 | * memory is available, or 0 on success. | 836 | * memory is available, or 0 on success. |
837 | */ | 837 | */ |
838 | int security_context_to_sid(char *scontext, u32 scontext_len, u32 *sid) | 838 | int security_context_to_sid(const char *scontext, u32 scontext_len, u32 *sid) |
839 | { | 839 | { |
840 | return security_context_to_sid_core(scontext, scontext_len, | 840 | return security_context_to_sid_core(scontext, scontext_len, |
841 | sid, SECSID_NULL, GFP_KERNEL); | 841 | sid, SECSID_NULL, GFP_KERNEL); |
diff --git a/security/smack/smack_lsm.c b/security/smack/smack_lsm.c index 77ec16a3b68b..5d2ec5650e61 100644 --- a/security/smack/smack_lsm.c +++ b/security/smack/smack_lsm.c | |||
@@ -574,8 +574,8 @@ static int smack_inode_getattr(struct vfsmount *mnt, struct dentry *dentry) | |||
574 | * | 574 | * |
575 | * Returns 0 if access is permitted, an error code otherwise | 575 | * Returns 0 if access is permitted, an error code otherwise |
576 | */ | 576 | */ |
577 | static int smack_inode_setxattr(struct dentry *dentry, char *name, | 577 | static int smack_inode_setxattr(struct dentry *dentry, const char *name, |
578 | void *value, size_t size, int flags) | 578 | const void *value, size_t size, int flags) |
579 | { | 579 | { |
580 | int rc = 0; | 580 | int rc = 0; |
581 | 581 | ||
@@ -604,8 +604,8 @@ static int smack_inode_setxattr(struct dentry *dentry, char *name, | |||
604 | * Set the pointer in the inode blob to the entry found | 604 | * Set the pointer in the inode blob to the entry found |
605 | * in the master label list. | 605 | * in the master label list. |
606 | */ | 606 | */ |
607 | static void smack_inode_post_setxattr(struct dentry *dentry, char *name, | 607 | static void smack_inode_post_setxattr(struct dentry *dentry, const char *name, |
608 | void *value, size_t size, int flags) | 608 | const void *value, size_t size, int flags) |
609 | { | 609 | { |
610 | struct inode_smack *isp; | 610 | struct inode_smack *isp; |
611 | char *nsp; | 611 | char *nsp; |
@@ -641,7 +641,7 @@ static void smack_inode_post_setxattr(struct dentry *dentry, char *name, | |||
641 | * | 641 | * |
642 | * Returns 0 if access is permitted, an error code otherwise | 642 | * Returns 0 if access is permitted, an error code otherwise |
643 | */ | 643 | */ |
644 | static int smack_inode_getxattr(struct dentry *dentry, char *name) | 644 | static int smack_inode_getxattr(struct dentry *dentry, const char *name) |
645 | { | 645 | { |
646 | return smk_curacc(smk_of_inode(dentry->d_inode), MAY_READ); | 646 | return smk_curacc(smk_of_inode(dentry->d_inode), MAY_READ); |
647 | } | 647 | } |
@@ -655,7 +655,7 @@ static int smack_inode_getxattr(struct dentry *dentry, char *name) | |||
655 | * | 655 | * |
656 | * Returns 0 if access is permitted, an error code otherwise | 656 | * Returns 0 if access is permitted, an error code otherwise |
657 | */ | 657 | */ |
658 | static int smack_inode_removexattr(struct dentry *dentry, char *name) | 658 | static int smack_inode_removexattr(struct dentry *dentry, const char *name) |
659 | { | 659 | { |
660 | int rc = 0; | 660 | int rc = 0; |
661 | 661 | ||
diff --git a/sound/core/info.c b/sound/core/info.c index 9977ec2eace3..cb5ead3e202d 100644 --- a/sound/core/info.c +++ b/sound/core/info.c | |||
@@ -544,7 +544,7 @@ int __init snd_info_init(void) | |||
544 | { | 544 | { |
545 | struct proc_dir_entry *p; | 545 | struct proc_dir_entry *p; |
546 | 546 | ||
547 | p = snd_create_proc_entry("asound", S_IFDIR | S_IRUGO | S_IXUGO, &proc_root); | 547 | p = snd_create_proc_entry("asound", S_IFDIR | S_IRUGO | S_IXUGO, NULL); |
548 | if (p == NULL) | 548 | if (p == NULL) |
549 | return -ENOMEM; | 549 | return -ENOMEM; |
550 | snd_proc_root = p; | 550 | snd_proc_root = p; |
@@ -594,7 +594,7 @@ int __exit snd_info_done(void) | |||
594 | #ifdef CONFIG_SND_OSSEMUL | 594 | #ifdef CONFIG_SND_OSSEMUL |
595 | snd_info_free_entry(snd_oss_root); | 595 | snd_info_free_entry(snd_oss_root); |
596 | #endif | 596 | #endif |
597 | snd_remove_proc_entry(&proc_root, snd_proc_root); | 597 | snd_remove_proc_entry(NULL, snd_proc_root); |
598 | } | 598 | } |
599 | return 0; | 599 | return 0; |
600 | } | 600 | } |
diff --git a/sound/core/memalloc.c b/sound/core/memalloc.c index 920e5780c228..23b7bc02728b 100644 --- a/sound/core/memalloc.c +++ b/sound/core/memalloc.c | |||
@@ -629,9 +629,8 @@ static const struct file_operations snd_mem_proc_fops = { | |||
629 | static int __init snd_mem_init(void) | 629 | static int __init snd_mem_init(void) |
630 | { | 630 | { |
631 | #ifdef CONFIG_PROC_FS | 631 | #ifdef CONFIG_PROC_FS |
632 | snd_mem_proc = create_proc_entry(SND_MEM_PROC_FILE, 0644, NULL); | 632 | snd_mem_proc = proc_create(SND_MEM_PROC_FILE, 0644, NULL, |
633 | if (snd_mem_proc) | 633 | &snd_mem_proc_fops); |
634 | snd_mem_proc->proc_fops = &snd_mem_proc_fops; | ||
635 | #endif | 634 | #endif |
636 | return 0; | 635 | return 0; |
637 | } | 636 | } |
diff --git a/sound/sh/aica.c b/sound/sh/aica.c index d49417bf78c6..9ca113326143 100644 --- a/sound/sh/aica.c +++ b/sound/sh/aica.c | |||
@@ -663,7 +663,7 @@ static int __init aica_init(void) | |||
663 | return err; | 663 | return err; |
664 | pd = platform_device_register_simple(SND_AICA_DRIVER, -1, | 664 | pd = platform_device_register_simple(SND_AICA_DRIVER, -1, |
665 | aica_memory_space, 2); | 665 | aica_memory_space, 2); |
666 | if (unlikely(IS_ERR(pd))) { | 666 | if (IS_ERR(pd)) { |
667 | platform_driver_unregister(&snd_aica_driver); | 667 | platform_driver_unregister(&snd_aica_driver); |
668 | return PTR_ERR(pd); | 668 | return PTR_ERR(pd); |
669 | } | 669 | } |